1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Shared application/kernel submission and completion ring pairs, for |
4 | * supporting fast/efficient IO. |
5 | * |
6 | * A note on the read/write ordering memory barriers that are matched between |
7 | * the application and kernel side. |
8 | * |
9 | * After the application reads the CQ ring tail, it must use an |
10 | * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses |
11 | * before writing the tail (using smp_load_acquire to read the tail will |
12 | * do). It also needs a smp_mb() before updating CQ head (ordering the |
13 | * entry load(s) with the head store), pairing with an implicit barrier |
14 | * through a control-dependency in io_get_cqe (smp_store_release to |
15 | * store head will do). Failure to do so could lead to reading invalid |
16 | * CQ entries. |
17 | * |
18 | * Likewise, the application must use an appropriate smp_wmb() before |
19 | * writing the SQ tail (ordering SQ entry stores with the tail store), |
20 | * which pairs with smp_load_acquire in io_get_sqring (smp_store_release |
21 | * to store the tail will do). And it needs a barrier ordering the SQ |
22 | * head load before writing new SQ entries (smp_load_acquire to read |
23 | * head will do). |
24 | * |
25 | * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application |
26 | * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after* |
27 | * updating the SQ tail; a full memory barrier smp_mb() is needed |
28 | * between. |
29 | * |
30 | * Also see the examples in the liburing library: |
31 | * |
32 | * git://git.kernel.dk/liburing |
33 | * |
34 | * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens |
35 | * from data shared between the kernel and application. This is done both |
36 | * for ordering purposes, but also to ensure that once a value is loaded from |
37 | * data that the application could potentially modify, it remains stable. |
38 | * |
39 | * Copyright (C) 2018-2019 Jens Axboe |
40 | * Copyright (c) 2018-2019 Christoph Hellwig |
41 | */ |
42 | #include <linux/kernel.h> |
43 | #include <linux/init.h> |
44 | #include <linux/errno.h> |
45 | #include <linux/syscalls.h> |
46 | #include <net/compat.h> |
47 | #include <linux/refcount.h> |
48 | #include <linux/uio.h> |
49 | #include <linux/bits.h> |
50 | |
51 | #include <linux/sched/signal.h> |
52 | #include <linux/fs.h> |
53 | #include <linux/file.h> |
54 | #include <linux/fdtable.h> |
55 | #include <linux/mm.h> |
56 | #include <linux/mman.h> |
57 | #include <linux/percpu.h> |
58 | #include <linux/slab.h> |
59 | #include <linux/bvec.h> |
60 | #include <linux/net.h> |
61 | #include <net/sock.h> |
62 | #include <linux/anon_inodes.h> |
63 | #include <linux/sched/mm.h> |
64 | #include <linux/uaccess.h> |
65 | #include <linux/nospec.h> |
66 | #include <linux/highmem.h> |
67 | #include <linux/fsnotify.h> |
68 | #include <linux/fadvise.h> |
69 | #include <linux/task_work.h> |
70 | #include <linux/io_uring.h> |
71 | #include <linux/io_uring/cmd.h> |
72 | #include <linux/audit.h> |
73 | #include <linux/security.h> |
74 | #include <asm/shmparam.h> |
75 | |
76 | #define CREATE_TRACE_POINTS |
77 | #include <trace/events/io_uring.h> |
78 | |
79 | #include <uapi/linux/io_uring.h> |
80 | |
81 | #include "io-wq.h" |
82 | |
83 | #include "io_uring.h" |
84 | #include "opdef.h" |
85 | #include "refs.h" |
86 | #include "tctx.h" |
87 | #include "register.h" |
88 | #include "sqpoll.h" |
89 | #include "fdinfo.h" |
90 | #include "kbuf.h" |
91 | #include "rsrc.h" |
92 | #include "cancel.h" |
93 | #include "net.h" |
94 | #include "notif.h" |
95 | #include "waitid.h" |
96 | #include "futex.h" |
97 | #include "napi.h" |
98 | |
99 | #include "timeout.h" |
100 | #include "poll.h" |
101 | #include "rw.h" |
102 | #include "alloc_cache.h" |
103 | |
104 | #define IORING_MAX_ENTRIES 32768 |
105 | #define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES) |
106 | |
107 | #define SQE_COMMON_FLAGS (IOSQE_FIXED_FILE | IOSQE_IO_LINK | \ |
108 | IOSQE_IO_HARDLINK | IOSQE_ASYNC) |
109 | |
110 | #define SQE_VALID_FLAGS (SQE_COMMON_FLAGS | IOSQE_BUFFER_SELECT | \ |
111 | IOSQE_IO_DRAIN | IOSQE_CQE_SKIP_SUCCESS) |
112 | |
113 | #define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \ |
114 | REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS | \ |
115 | REQ_F_ASYNC_DATA) |
116 | |
117 | #define IO_REQ_CLEAN_SLOW_FLAGS (REQ_F_REFCOUNT | REQ_F_LINK | REQ_F_HARDLINK |\ |
118 | IO_REQ_CLEAN_FLAGS) |
119 | |
120 | #define IO_TCTX_REFS_CACHE_NR (1U << 10) |
121 | |
122 | #define IO_COMPL_BATCH 32 |
123 | #define IO_REQ_ALLOC_BATCH 8 |
124 | |
125 | struct io_defer_entry { |
126 | struct list_head list; |
127 | struct io_kiocb *req; |
128 | u32 seq; |
129 | }; |
130 | |
131 | /* requests with any of those set should undergo io_disarm_next() */ |
132 | #define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL) |
133 | #define IO_REQ_LINK_FLAGS (REQ_F_LINK | REQ_F_HARDLINK) |
134 | |
135 | /* |
136 | * No waiters. It's larger than any valid value of the tw counter |
137 | * so that tests against ->cq_wait_nr would fail and skip wake_up(). |
138 | */ |
139 | #define IO_CQ_WAKE_INIT (-1U) |
140 | /* Forced wake up if there is a waiter regardless of ->cq_wait_nr */ |
141 | #define IO_CQ_WAKE_FORCE (IO_CQ_WAKE_INIT >> 1) |
142 | |
143 | static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx, |
144 | struct task_struct *task, |
145 | bool cancel_all); |
146 | |
147 | static void io_queue_sqe(struct io_kiocb *req); |
148 | |
149 | struct kmem_cache *req_cachep; |
150 | static struct workqueue_struct *iou_wq __ro_after_init; |
151 | |
152 | static int __read_mostly sysctl_io_uring_disabled; |
153 | static int __read_mostly sysctl_io_uring_group = -1; |
154 | |
155 | #ifdef CONFIG_SYSCTL |
156 | static struct ctl_table kernel_io_uring_disabled_table[] = { |
157 | { |
158 | .procname = "io_uring_disabled" , |
159 | .data = &sysctl_io_uring_disabled, |
160 | .maxlen = sizeof(sysctl_io_uring_disabled), |
161 | .mode = 0644, |
162 | .proc_handler = proc_dointvec_minmax, |
163 | .extra1 = SYSCTL_ZERO, |
164 | .extra2 = SYSCTL_TWO, |
165 | }, |
166 | { |
167 | .procname = "io_uring_group" , |
168 | .data = &sysctl_io_uring_group, |
169 | .maxlen = sizeof(gid_t), |
170 | .mode = 0644, |
171 | .proc_handler = proc_dointvec, |
172 | }, |
173 | {}, |
174 | }; |
175 | #endif |
176 | |
177 | static inline void io_submit_flush_completions(struct io_ring_ctx *ctx) |
178 | { |
179 | if (!wq_list_empty(&ctx->submit_state.compl_reqs) || |
180 | ctx->submit_state.cqes_count) |
181 | __io_submit_flush_completions(ctx); |
182 | } |
183 | |
184 | static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx) |
185 | { |
186 | return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head); |
187 | } |
188 | |
189 | static inline unsigned int __io_cqring_events_user(struct io_ring_ctx *ctx) |
190 | { |
191 | return READ_ONCE(ctx->rings->cq.tail) - READ_ONCE(ctx->rings->cq.head); |
192 | } |
193 | |
194 | static bool io_match_linked(struct io_kiocb *head) |
195 | { |
196 | struct io_kiocb *req; |
197 | |
198 | io_for_each_link(req, head) { |
199 | if (req->flags & REQ_F_INFLIGHT) |
200 | return true; |
201 | } |
202 | return false; |
203 | } |
204 | |
205 | /* |
206 | * As io_match_task() but protected against racing with linked timeouts. |
207 | * User must not hold timeout_lock. |
208 | */ |
209 | bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task, |
210 | bool cancel_all) |
211 | { |
212 | bool matched; |
213 | |
214 | if (task && head->task != task) |
215 | return false; |
216 | if (cancel_all) |
217 | return true; |
218 | |
219 | if (head->flags & REQ_F_LINK_TIMEOUT) { |
220 | struct io_ring_ctx *ctx = head->ctx; |
221 | |
222 | /* protect against races with linked timeouts */ |
223 | spin_lock_irq(lock: &ctx->timeout_lock); |
224 | matched = io_match_linked(head); |
225 | spin_unlock_irq(lock: &ctx->timeout_lock); |
226 | } else { |
227 | matched = io_match_linked(head); |
228 | } |
229 | return matched; |
230 | } |
231 | |
232 | static inline void req_fail_link_node(struct io_kiocb *req, int res) |
233 | { |
234 | req_set_fail(req); |
235 | io_req_set_res(req, res, cflags: 0); |
236 | } |
237 | |
238 | static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx) |
239 | { |
240 | wq_stack_add_head(node: &req->comp_list, stack: &ctx->submit_state.free_list); |
241 | } |
242 | |
243 | static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref) |
244 | { |
245 | struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs); |
246 | |
247 | complete(&ctx->ref_comp); |
248 | } |
249 | |
250 | static __cold void io_fallback_req_func(struct work_struct *work) |
251 | { |
252 | struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, |
253 | fallback_work.work); |
254 | struct llist_node *node = llist_del_all(head: &ctx->fallback_llist); |
255 | struct io_kiocb *req, *tmp; |
256 | struct io_tw_state ts = { .locked = true, }; |
257 | |
258 | percpu_ref_get(ref: &ctx->refs); |
259 | mutex_lock(&ctx->uring_lock); |
260 | llist_for_each_entry_safe(req, tmp, node, io_task_work.node) |
261 | req->io_task_work.func(req, &ts); |
262 | if (WARN_ON_ONCE(!ts.locked)) |
263 | return; |
264 | io_submit_flush_completions(ctx); |
265 | mutex_unlock(lock: &ctx->uring_lock); |
266 | percpu_ref_put(ref: &ctx->refs); |
267 | } |
268 | |
269 | static int io_alloc_hash_table(struct io_hash_table *table, unsigned bits) |
270 | { |
271 | unsigned hash_buckets = 1U << bits; |
272 | size_t hash_size = hash_buckets * sizeof(table->hbs[0]); |
273 | |
274 | table->hbs = kmalloc(size: hash_size, GFP_KERNEL); |
275 | if (!table->hbs) |
276 | return -ENOMEM; |
277 | |
278 | table->hash_bits = bits; |
279 | init_hash_table(table, size: hash_buckets); |
280 | return 0; |
281 | } |
282 | |
283 | static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) |
284 | { |
285 | struct io_ring_ctx *ctx; |
286 | int hash_bits; |
287 | |
288 | ctx = kzalloc(size: sizeof(*ctx), GFP_KERNEL); |
289 | if (!ctx) |
290 | return NULL; |
291 | |
292 | xa_init(xa: &ctx->io_bl_xa); |
293 | |
294 | /* |
295 | * Use 5 bits less than the max cq entries, that should give us around |
296 | * 32 entries per hash list if totally full and uniformly spread, but |
297 | * don't keep too many buckets to not overconsume memory. |
298 | */ |
299 | hash_bits = ilog2(p->cq_entries) - 5; |
300 | hash_bits = clamp(hash_bits, 1, 8); |
301 | if (io_alloc_hash_table(table: &ctx->cancel_table, bits: hash_bits)) |
302 | goto err; |
303 | if (io_alloc_hash_table(table: &ctx->cancel_table_locked, bits: hash_bits)) |
304 | goto err; |
305 | if (percpu_ref_init(ref: &ctx->refs, release: io_ring_ctx_ref_free, |
306 | flags: 0, GFP_KERNEL)) |
307 | goto err; |
308 | |
309 | ctx->flags = p->flags; |
310 | atomic_set(v: &ctx->cq_wait_nr, IO_CQ_WAKE_INIT); |
311 | init_waitqueue_head(&ctx->sqo_sq_wait); |
312 | INIT_LIST_HEAD(list: &ctx->sqd_list); |
313 | INIT_LIST_HEAD(list: &ctx->cq_overflow_list); |
314 | INIT_LIST_HEAD(list: &ctx->io_buffers_cache); |
315 | INIT_HLIST_HEAD(&ctx->io_buf_list); |
316 | io_alloc_cache_init(cache: &ctx->rsrc_node_cache, IO_NODE_ALLOC_CACHE_MAX, |
317 | size: sizeof(struct io_rsrc_node)); |
318 | io_alloc_cache_init(cache: &ctx->apoll_cache, IO_ALLOC_CACHE_MAX, |
319 | size: sizeof(struct async_poll)); |
320 | io_alloc_cache_init(cache: &ctx->netmsg_cache, IO_ALLOC_CACHE_MAX, |
321 | size: sizeof(struct io_async_msghdr)); |
322 | io_futex_cache_init(ctx); |
323 | init_completion(x: &ctx->ref_comp); |
324 | xa_init_flags(xa: &ctx->personalities, XA_FLAGS_ALLOC1); |
325 | mutex_init(&ctx->uring_lock); |
326 | init_waitqueue_head(&ctx->cq_wait); |
327 | init_waitqueue_head(&ctx->poll_wq); |
328 | init_waitqueue_head(&ctx->rsrc_quiesce_wq); |
329 | spin_lock_init(&ctx->completion_lock); |
330 | spin_lock_init(&ctx->timeout_lock); |
331 | INIT_WQ_LIST(&ctx->iopoll_list); |
332 | INIT_LIST_HEAD(list: &ctx->io_buffers_comp); |
333 | INIT_LIST_HEAD(list: &ctx->defer_list); |
334 | INIT_LIST_HEAD(list: &ctx->timeout_list); |
335 | INIT_LIST_HEAD(list: &ctx->ltimeout_list); |
336 | INIT_LIST_HEAD(list: &ctx->rsrc_ref_list); |
337 | init_llist_head(list: &ctx->work_llist); |
338 | INIT_LIST_HEAD(list: &ctx->tctx_list); |
339 | ctx->submit_state.free_list.next = NULL; |
340 | INIT_WQ_LIST(&ctx->locked_free_list); |
341 | INIT_HLIST_HEAD(&ctx->waitid_list); |
342 | #ifdef CONFIG_FUTEX |
343 | INIT_HLIST_HEAD(&ctx->futex_list); |
344 | #endif |
345 | INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func); |
346 | INIT_WQ_LIST(&ctx->submit_state.compl_reqs); |
347 | INIT_HLIST_HEAD(&ctx->cancelable_uring_cmd); |
348 | io_napi_init(ctx); |
349 | |
350 | return ctx; |
351 | err: |
352 | kfree(objp: ctx->cancel_table.hbs); |
353 | kfree(objp: ctx->cancel_table_locked.hbs); |
354 | xa_destroy(&ctx->io_bl_xa); |
355 | kfree(objp: ctx); |
356 | return NULL; |
357 | } |
358 | |
359 | static void io_account_cq_overflow(struct io_ring_ctx *ctx) |
360 | { |
361 | struct io_rings *r = ctx->rings; |
362 | |
363 | WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1); |
364 | ctx->cq_extra--; |
365 | } |
366 | |
367 | static bool req_need_defer(struct io_kiocb *req, u32 seq) |
368 | { |
369 | if (unlikely(req->flags & REQ_F_IO_DRAIN)) { |
370 | struct io_ring_ctx *ctx = req->ctx; |
371 | |
372 | return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail; |
373 | } |
374 | |
375 | return false; |
376 | } |
377 | |
378 | static void io_clean_op(struct io_kiocb *req) |
379 | { |
380 | if (req->flags & REQ_F_BUFFER_SELECTED) { |
381 | spin_lock(lock: &req->ctx->completion_lock); |
382 | io_put_kbuf_comp(req); |
383 | spin_unlock(lock: &req->ctx->completion_lock); |
384 | } |
385 | |
386 | if (req->flags & REQ_F_NEED_CLEANUP) { |
387 | const struct io_cold_def *def = &io_cold_defs[req->opcode]; |
388 | |
389 | if (def->cleanup) |
390 | def->cleanup(req); |
391 | } |
392 | if ((req->flags & REQ_F_POLLED) && req->apoll) { |
393 | kfree(objp: req->apoll->double_poll); |
394 | kfree(objp: req->apoll); |
395 | req->apoll = NULL; |
396 | } |
397 | if (req->flags & REQ_F_INFLIGHT) { |
398 | struct io_uring_task *tctx = req->task->io_uring; |
399 | |
400 | atomic_dec(v: &tctx->inflight_tracked); |
401 | } |
402 | if (req->flags & REQ_F_CREDS) |
403 | put_cred(cred: req->creds); |
404 | if (req->flags & REQ_F_ASYNC_DATA) { |
405 | kfree(objp: req->async_data); |
406 | req->async_data = NULL; |
407 | } |
408 | req->flags &= ~IO_REQ_CLEAN_FLAGS; |
409 | } |
410 | |
411 | static inline void io_req_track_inflight(struct io_kiocb *req) |
412 | { |
413 | if (!(req->flags & REQ_F_INFLIGHT)) { |
414 | req->flags |= REQ_F_INFLIGHT; |
415 | atomic_inc(v: &req->task->io_uring->inflight_tracked); |
416 | } |
417 | } |
418 | |
419 | static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req) |
420 | { |
421 | if (WARN_ON_ONCE(!req->link)) |
422 | return NULL; |
423 | |
424 | req->flags &= ~REQ_F_ARM_LTIMEOUT; |
425 | req->flags |= REQ_F_LINK_TIMEOUT; |
426 | |
427 | /* linked timeouts should have two refs once prep'ed */ |
428 | io_req_set_refcount(req); |
429 | __io_req_set_refcount(req: req->link, nr: 2); |
430 | return req->link; |
431 | } |
432 | |
433 | static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req) |
434 | { |
435 | if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT))) |
436 | return NULL; |
437 | return __io_prep_linked_timeout(req); |
438 | } |
439 | |
440 | static noinline void __io_arm_ltimeout(struct io_kiocb *req) |
441 | { |
442 | io_queue_linked_timeout(req: __io_prep_linked_timeout(req)); |
443 | } |
444 | |
445 | static inline void io_arm_ltimeout(struct io_kiocb *req) |
446 | { |
447 | if (unlikely(req->flags & REQ_F_ARM_LTIMEOUT)) |
448 | __io_arm_ltimeout(req); |
449 | } |
450 | |
451 | static void io_prep_async_work(struct io_kiocb *req) |
452 | { |
453 | const struct io_issue_def *def = &io_issue_defs[req->opcode]; |
454 | struct io_ring_ctx *ctx = req->ctx; |
455 | |
456 | if (!(req->flags & REQ_F_CREDS)) { |
457 | req->flags |= REQ_F_CREDS; |
458 | req->creds = get_current_cred(); |
459 | } |
460 | |
461 | req->work.list.next = NULL; |
462 | req->work.flags = 0; |
463 | if (req->flags & REQ_F_FORCE_ASYNC) |
464 | req->work.flags |= IO_WQ_WORK_CONCURRENT; |
465 | |
466 | if (req->file && !(req->flags & REQ_F_FIXED_FILE)) |
467 | req->flags |= io_file_get_flags(file: req->file); |
468 | |
469 | if (req->file && (req->flags & REQ_F_ISREG)) { |
470 | bool should_hash = def->hash_reg_file; |
471 | |
472 | /* don't serialize this request if the fs doesn't need it */ |
473 | if (should_hash && (req->file->f_flags & O_DIRECT) && |
474 | (req->file->f_mode & FMODE_DIO_PARALLEL_WRITE)) |
475 | should_hash = false; |
476 | if (should_hash || (ctx->flags & IORING_SETUP_IOPOLL)) |
477 | io_wq_hash_work(work: &req->work, val: file_inode(f: req->file)); |
478 | } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) { |
479 | if (def->unbound_nonreg_file) |
480 | req->work.flags |= IO_WQ_WORK_UNBOUND; |
481 | } |
482 | } |
483 | |
484 | static void io_prep_async_link(struct io_kiocb *req) |
485 | { |
486 | struct io_kiocb *cur; |
487 | |
488 | if (req->flags & REQ_F_LINK_TIMEOUT) { |
489 | struct io_ring_ctx *ctx = req->ctx; |
490 | |
491 | spin_lock_irq(lock: &ctx->timeout_lock); |
492 | io_for_each_link(cur, req) |
493 | io_prep_async_work(req: cur); |
494 | spin_unlock_irq(lock: &ctx->timeout_lock); |
495 | } else { |
496 | io_for_each_link(cur, req) |
497 | io_prep_async_work(req: cur); |
498 | } |
499 | } |
500 | |
501 | void io_queue_iowq(struct io_kiocb *req, struct io_tw_state *ts_dont_use) |
502 | { |
503 | struct io_kiocb *link = io_prep_linked_timeout(req); |
504 | struct io_uring_task *tctx = req->task->io_uring; |
505 | |
506 | BUG_ON(!tctx); |
507 | BUG_ON(!tctx->io_wq); |
508 | |
509 | /* init ->work of the whole link before punting */ |
510 | io_prep_async_link(req); |
511 | |
512 | /* |
513 | * Not expected to happen, but if we do have a bug where this _can_ |
514 | * happen, catch it here and ensure the request is marked as |
515 | * canceled. That will make io-wq go through the usual work cancel |
516 | * procedure rather than attempt to run this request (or create a new |
517 | * worker for it). |
518 | */ |
519 | if (WARN_ON_ONCE(!same_thread_group(req->task, current))) |
520 | req->work.flags |= IO_WQ_WORK_CANCEL; |
521 | |
522 | trace_io_uring_queue_async_work(req, rw: io_wq_is_hashed(work: &req->work)); |
523 | io_wq_enqueue(wq: tctx->io_wq, work: &req->work); |
524 | if (link) |
525 | io_queue_linked_timeout(req: link); |
526 | } |
527 | |
528 | static __cold void io_queue_deferred(struct io_ring_ctx *ctx) |
529 | { |
530 | while (!list_empty(head: &ctx->defer_list)) { |
531 | struct io_defer_entry *de = list_first_entry(&ctx->defer_list, |
532 | struct io_defer_entry, list); |
533 | |
534 | if (req_need_defer(req: de->req, seq: de->seq)) |
535 | break; |
536 | list_del_init(entry: &de->list); |
537 | io_req_task_queue(req: de->req); |
538 | kfree(objp: de); |
539 | } |
540 | } |
541 | |
542 | void io_eventfd_ops(struct rcu_head *rcu) |
543 | { |
544 | struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu); |
545 | int ops = atomic_xchg(v: &ev_fd->ops, new: 0); |
546 | |
547 | if (ops & BIT(IO_EVENTFD_OP_SIGNAL_BIT)) |
548 | eventfd_signal_mask(ctx: ev_fd->cq_ev_fd, EPOLL_URING_WAKE); |
549 | |
550 | /* IO_EVENTFD_OP_FREE_BIT may not be set here depending on callback |
551 | * ordering in a race but if references are 0 we know we have to free |
552 | * it regardless. |
553 | */ |
554 | if (atomic_dec_and_test(v: &ev_fd->refs)) { |
555 | eventfd_ctx_put(ctx: ev_fd->cq_ev_fd); |
556 | kfree(objp: ev_fd); |
557 | } |
558 | } |
559 | |
560 | static void io_eventfd_signal(struct io_ring_ctx *ctx) |
561 | { |
562 | struct io_ev_fd *ev_fd = NULL; |
563 | |
564 | rcu_read_lock(); |
565 | /* |
566 | * rcu_dereference ctx->io_ev_fd once and use it for both for checking |
567 | * and eventfd_signal |
568 | */ |
569 | ev_fd = rcu_dereference(ctx->io_ev_fd); |
570 | |
571 | /* |
572 | * Check again if ev_fd exists incase an io_eventfd_unregister call |
573 | * completed between the NULL check of ctx->io_ev_fd at the start of |
574 | * the function and rcu_read_lock. |
575 | */ |
576 | if (unlikely(!ev_fd)) |
577 | goto out; |
578 | if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED) |
579 | goto out; |
580 | if (ev_fd->eventfd_async && !io_wq_current_is_worker()) |
581 | goto out; |
582 | |
583 | if (likely(eventfd_signal_allowed())) { |
584 | eventfd_signal_mask(ctx: ev_fd->cq_ev_fd, EPOLL_URING_WAKE); |
585 | } else { |
586 | atomic_inc(v: &ev_fd->refs); |
587 | if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_SIGNAL_BIT), v: &ev_fd->ops)) |
588 | call_rcu_hurry(head: &ev_fd->rcu, func: io_eventfd_ops); |
589 | else |
590 | atomic_dec(v: &ev_fd->refs); |
591 | } |
592 | |
593 | out: |
594 | rcu_read_unlock(); |
595 | } |
596 | |
597 | static void io_eventfd_flush_signal(struct io_ring_ctx *ctx) |
598 | { |
599 | bool skip; |
600 | |
601 | spin_lock(lock: &ctx->completion_lock); |
602 | |
603 | /* |
604 | * Eventfd should only get triggered when at least one event has been |
605 | * posted. Some applications rely on the eventfd notification count |
606 | * only changing IFF a new CQE has been added to the CQ ring. There's |
607 | * no depedency on 1:1 relationship between how many times this |
608 | * function is called (and hence the eventfd count) and number of CQEs |
609 | * posted to the CQ ring. |
610 | */ |
611 | skip = ctx->cached_cq_tail == ctx->evfd_last_cq_tail; |
612 | ctx->evfd_last_cq_tail = ctx->cached_cq_tail; |
613 | spin_unlock(lock: &ctx->completion_lock); |
614 | if (skip) |
615 | return; |
616 | |
617 | io_eventfd_signal(ctx); |
618 | } |
619 | |
620 | void __io_commit_cqring_flush(struct io_ring_ctx *ctx) |
621 | { |
622 | if (ctx->poll_activated) |
623 | io_poll_wq_wake(ctx); |
624 | if (ctx->off_timeout_used) |
625 | io_flush_timeouts(ctx); |
626 | if (ctx->drain_active) { |
627 | spin_lock(lock: &ctx->completion_lock); |
628 | io_queue_deferred(ctx); |
629 | spin_unlock(lock: &ctx->completion_lock); |
630 | } |
631 | if (ctx->has_evfd) |
632 | io_eventfd_flush_signal(ctx); |
633 | } |
634 | |
635 | static inline void __io_cq_lock(struct io_ring_ctx *ctx) |
636 | { |
637 | if (!ctx->lockless_cq) |
638 | spin_lock(lock: &ctx->completion_lock); |
639 | } |
640 | |
641 | static inline void io_cq_lock(struct io_ring_ctx *ctx) |
642 | __acquires(ctx->completion_lock) |
643 | { |
644 | spin_lock(lock: &ctx->completion_lock); |
645 | } |
646 | |
647 | static inline void __io_cq_unlock_post(struct io_ring_ctx *ctx) |
648 | { |
649 | io_commit_cqring(ctx); |
650 | if (!ctx->task_complete) { |
651 | if (!ctx->lockless_cq) |
652 | spin_unlock(lock: &ctx->completion_lock); |
653 | /* IOPOLL rings only need to wake up if it's also SQPOLL */ |
654 | if (!ctx->syscall_iopoll) |
655 | io_cqring_wake(ctx); |
656 | } |
657 | io_commit_cqring_flush(ctx); |
658 | } |
659 | |
660 | static void io_cq_unlock_post(struct io_ring_ctx *ctx) |
661 | __releases(ctx->completion_lock) |
662 | { |
663 | io_commit_cqring(ctx); |
664 | spin_unlock(lock: &ctx->completion_lock); |
665 | io_cqring_wake(ctx); |
666 | io_commit_cqring_flush(ctx); |
667 | } |
668 | |
669 | static void io_cqring_overflow_kill(struct io_ring_ctx *ctx) |
670 | { |
671 | struct io_overflow_cqe *ocqe; |
672 | LIST_HEAD(list); |
673 | |
674 | spin_lock(lock: &ctx->completion_lock); |
675 | list_splice_init(list: &ctx->cq_overflow_list, head: &list); |
676 | clear_bit(nr: IO_CHECK_CQ_OVERFLOW_BIT, addr: &ctx->check_cq); |
677 | spin_unlock(lock: &ctx->completion_lock); |
678 | |
679 | while (!list_empty(head: &list)) { |
680 | ocqe = list_first_entry(&list, struct io_overflow_cqe, list); |
681 | list_del(entry: &ocqe->list); |
682 | kfree(objp: ocqe); |
683 | } |
684 | } |
685 | |
686 | static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx) |
687 | { |
688 | size_t cqe_size = sizeof(struct io_uring_cqe); |
689 | |
690 | if (__io_cqring_events(ctx) == ctx->cq_entries) |
691 | return; |
692 | |
693 | if (ctx->flags & IORING_SETUP_CQE32) |
694 | cqe_size <<= 1; |
695 | |
696 | io_cq_lock(ctx); |
697 | while (!list_empty(head: &ctx->cq_overflow_list)) { |
698 | struct io_uring_cqe *cqe; |
699 | struct io_overflow_cqe *ocqe; |
700 | |
701 | if (!io_get_cqe_overflow(ctx, ret: &cqe, overflow: true)) |
702 | break; |
703 | ocqe = list_first_entry(&ctx->cq_overflow_list, |
704 | struct io_overflow_cqe, list); |
705 | memcpy(cqe, &ocqe->cqe, cqe_size); |
706 | list_del(entry: &ocqe->list); |
707 | kfree(objp: ocqe); |
708 | } |
709 | |
710 | if (list_empty(head: &ctx->cq_overflow_list)) { |
711 | clear_bit(nr: IO_CHECK_CQ_OVERFLOW_BIT, addr: &ctx->check_cq); |
712 | atomic_andnot(IORING_SQ_CQ_OVERFLOW, v: &ctx->rings->sq_flags); |
713 | } |
714 | io_cq_unlock_post(ctx); |
715 | } |
716 | |
717 | static void io_cqring_do_overflow_flush(struct io_ring_ctx *ctx) |
718 | { |
719 | /* iopoll syncs against uring_lock, not completion_lock */ |
720 | if (ctx->flags & IORING_SETUP_IOPOLL) |
721 | mutex_lock(&ctx->uring_lock); |
722 | __io_cqring_overflow_flush(ctx); |
723 | if (ctx->flags & IORING_SETUP_IOPOLL) |
724 | mutex_unlock(lock: &ctx->uring_lock); |
725 | } |
726 | |
727 | static void io_cqring_overflow_flush(struct io_ring_ctx *ctx) |
728 | { |
729 | if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) |
730 | io_cqring_do_overflow_flush(ctx); |
731 | } |
732 | |
733 | /* can be called by any task */ |
734 | static void io_put_task_remote(struct task_struct *task) |
735 | { |
736 | struct io_uring_task *tctx = task->io_uring; |
737 | |
738 | percpu_counter_sub(fbc: &tctx->inflight, amount: 1); |
739 | if (unlikely(atomic_read(&tctx->in_cancel))) |
740 | wake_up(&tctx->wait); |
741 | put_task_struct(t: task); |
742 | } |
743 | |
744 | /* used by a task to put its own references */ |
745 | static void io_put_task_local(struct task_struct *task) |
746 | { |
747 | task->io_uring->cached_refs++; |
748 | } |
749 | |
750 | /* must to be called somewhat shortly after putting a request */ |
751 | static inline void io_put_task(struct task_struct *task) |
752 | { |
753 | if (likely(task == current)) |
754 | io_put_task_local(task); |
755 | else |
756 | io_put_task_remote(task); |
757 | } |
758 | |
759 | void io_task_refs_refill(struct io_uring_task *tctx) |
760 | { |
761 | unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR; |
762 | |
763 | percpu_counter_add(fbc: &tctx->inflight, amount: refill); |
764 | refcount_add(i: refill, r: ¤t->usage); |
765 | tctx->cached_refs += refill; |
766 | } |
767 | |
768 | static __cold void io_uring_drop_tctx_refs(struct task_struct *task) |
769 | { |
770 | struct io_uring_task *tctx = task->io_uring; |
771 | unsigned int refs = tctx->cached_refs; |
772 | |
773 | if (refs) { |
774 | tctx->cached_refs = 0; |
775 | percpu_counter_sub(fbc: &tctx->inflight, amount: refs); |
776 | put_task_struct_many(t: task, nr: refs); |
777 | } |
778 | } |
779 | |
780 | static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, |
781 | s32 res, u32 cflags, u64 , u64 ) |
782 | { |
783 | struct io_overflow_cqe *ocqe; |
784 | size_t ocq_size = sizeof(struct io_overflow_cqe); |
785 | bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32); |
786 | |
787 | lockdep_assert_held(&ctx->completion_lock); |
788 | |
789 | if (is_cqe32) |
790 | ocq_size += sizeof(struct io_uring_cqe); |
791 | |
792 | ocqe = kmalloc(size: ocq_size, GFP_ATOMIC | __GFP_ACCOUNT); |
793 | trace_io_uring_cqe_overflow(ctx, user_data, res, cflags, ocqe); |
794 | if (!ocqe) { |
795 | /* |
796 | * If we're in ring overflow flush mode, or in task cancel mode, |
797 | * or cannot allocate an overflow entry, then we need to drop it |
798 | * on the floor. |
799 | */ |
800 | io_account_cq_overflow(ctx); |
801 | set_bit(nr: IO_CHECK_CQ_DROPPED_BIT, addr: &ctx->check_cq); |
802 | return false; |
803 | } |
804 | if (list_empty(head: &ctx->cq_overflow_list)) { |
805 | set_bit(nr: IO_CHECK_CQ_OVERFLOW_BIT, addr: &ctx->check_cq); |
806 | atomic_or(IORING_SQ_CQ_OVERFLOW, v: &ctx->rings->sq_flags); |
807 | |
808 | } |
809 | ocqe->cqe.user_data = user_data; |
810 | ocqe->cqe.res = res; |
811 | ocqe->cqe.flags = cflags; |
812 | if (is_cqe32) { |
813 | ocqe->cqe.big_cqe[0] = extra1; |
814 | ocqe->cqe.big_cqe[1] = extra2; |
815 | } |
816 | list_add_tail(new: &ocqe->list, head: &ctx->cq_overflow_list); |
817 | return true; |
818 | } |
819 | |
820 | void io_req_cqe_overflow(struct io_kiocb *req) |
821 | { |
822 | io_cqring_event_overflow(ctx: req->ctx, user_data: req->cqe.user_data, |
823 | res: req->cqe.res, cflags: req->cqe.flags, |
824 | extra1: req->big_cqe.extra1, extra2: req->big_cqe.extra2); |
825 | memset(&req->big_cqe, 0, sizeof(req->big_cqe)); |
826 | } |
827 | |
828 | /* |
829 | * writes to the cq entry need to come after reading head; the |
830 | * control dependency is enough as we're using WRITE_ONCE to |
831 | * fill the cq entry |
832 | */ |
833 | bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow) |
834 | { |
835 | struct io_rings *rings = ctx->rings; |
836 | unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1); |
837 | unsigned int free, queued, len; |
838 | |
839 | /* |
840 | * Posting into the CQ when there are pending overflowed CQEs may break |
841 | * ordering guarantees, which will affect links, F_MORE users and more. |
842 | * Force overflow the completion. |
843 | */ |
844 | if (!overflow && (ctx->check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))) |
845 | return false; |
846 | |
847 | /* userspace may cheat modifying the tail, be safe and do min */ |
848 | queued = min(__io_cqring_events(ctx), ctx->cq_entries); |
849 | free = ctx->cq_entries - queued; |
850 | /* we need a contiguous range, limit based on the current array offset */ |
851 | len = min(free, ctx->cq_entries - off); |
852 | if (!len) |
853 | return false; |
854 | |
855 | if (ctx->flags & IORING_SETUP_CQE32) { |
856 | off <<= 1; |
857 | len <<= 1; |
858 | } |
859 | |
860 | ctx->cqe_cached = &rings->cqes[off]; |
861 | ctx->cqe_sentinel = ctx->cqe_cached + len; |
862 | return true; |
863 | } |
864 | |
865 | static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, |
866 | u32 cflags) |
867 | { |
868 | struct io_uring_cqe *cqe; |
869 | |
870 | ctx->cq_extra++; |
871 | |
872 | /* |
873 | * If we can't get a cq entry, userspace overflowed the |
874 | * submission (by quite a lot). Increment the overflow count in |
875 | * the ring. |
876 | */ |
877 | if (likely(io_get_cqe(ctx, &cqe))) { |
878 | trace_io_uring_complete(ctx, NULL, user_data, res, cflags, extra1: 0, extra2: 0); |
879 | |
880 | WRITE_ONCE(cqe->user_data, user_data); |
881 | WRITE_ONCE(cqe->res, res); |
882 | WRITE_ONCE(cqe->flags, cflags); |
883 | |
884 | if (ctx->flags & IORING_SETUP_CQE32) { |
885 | WRITE_ONCE(cqe->big_cqe[0], 0); |
886 | WRITE_ONCE(cqe->big_cqe[1], 0); |
887 | } |
888 | return true; |
889 | } |
890 | return false; |
891 | } |
892 | |
893 | static void __io_flush_post_cqes(struct io_ring_ctx *ctx) |
894 | __must_hold(&ctx->uring_lock) |
895 | { |
896 | struct io_submit_state *state = &ctx->submit_state; |
897 | unsigned int i; |
898 | |
899 | lockdep_assert_held(&ctx->uring_lock); |
900 | for (i = 0; i < state->cqes_count; i++) { |
901 | struct io_uring_cqe *cqe = &ctx->completion_cqes[i]; |
902 | |
903 | if (!io_fill_cqe_aux(ctx, user_data: cqe->user_data, res: cqe->res, cflags: cqe->flags)) { |
904 | if (ctx->lockless_cq) { |
905 | spin_lock(lock: &ctx->completion_lock); |
906 | io_cqring_event_overflow(ctx, user_data: cqe->user_data, |
907 | res: cqe->res, cflags: cqe->flags, extra1: 0, extra2: 0); |
908 | spin_unlock(lock: &ctx->completion_lock); |
909 | } else { |
910 | io_cqring_event_overflow(ctx, user_data: cqe->user_data, |
911 | res: cqe->res, cflags: cqe->flags, extra1: 0, extra2: 0); |
912 | } |
913 | } |
914 | } |
915 | state->cqes_count = 0; |
916 | } |
917 | |
918 | static bool __io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags, |
919 | bool allow_overflow) |
920 | { |
921 | bool filled; |
922 | |
923 | io_cq_lock(ctx); |
924 | filled = io_fill_cqe_aux(ctx, user_data, res, cflags); |
925 | if (!filled && allow_overflow) |
926 | filled = io_cqring_event_overflow(ctx, user_data, res, cflags, extra1: 0, extra2: 0); |
927 | |
928 | io_cq_unlock_post(ctx); |
929 | return filled; |
930 | } |
931 | |
932 | bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags) |
933 | { |
934 | return __io_post_aux_cqe(ctx, user_data, res, cflags, allow_overflow: true); |
935 | } |
936 | |
937 | /* |
938 | * A helper for multishot requests posting additional CQEs. |
939 | * Should only be used from a task_work including IO_URING_F_MULTISHOT. |
940 | */ |
941 | bool io_fill_cqe_req_aux(struct io_kiocb *req, bool defer, s32 res, u32 cflags) |
942 | { |
943 | struct io_ring_ctx *ctx = req->ctx; |
944 | u64 user_data = req->cqe.user_data; |
945 | struct io_uring_cqe *cqe; |
946 | |
947 | lockdep_assert(!io_wq_current_is_worker()); |
948 | |
949 | if (!defer) |
950 | return __io_post_aux_cqe(ctx, user_data, res, cflags, allow_overflow: false); |
951 | |
952 | lockdep_assert_held(&ctx->uring_lock); |
953 | |
954 | if (ctx->submit_state.cqes_count == ARRAY_SIZE(ctx->completion_cqes)) { |
955 | __io_cq_lock(ctx); |
956 | __io_flush_post_cqes(ctx); |
957 | /* no need to flush - flush is deferred */ |
958 | __io_cq_unlock_post(ctx); |
959 | } |
960 | |
961 | /* For defered completions this is not as strict as it is otherwise, |
962 | * however it's main job is to prevent unbounded posted completions, |
963 | * and in that it works just as well. |
964 | */ |
965 | if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) |
966 | return false; |
967 | |
968 | cqe = &ctx->completion_cqes[ctx->submit_state.cqes_count++]; |
969 | cqe->user_data = user_data; |
970 | cqe->res = res; |
971 | cqe->flags = cflags; |
972 | return true; |
973 | } |
974 | |
975 | static void __io_req_complete_post(struct io_kiocb *req, unsigned issue_flags) |
976 | { |
977 | struct io_ring_ctx *ctx = req->ctx; |
978 | struct io_rsrc_node *rsrc_node = NULL; |
979 | |
980 | io_cq_lock(ctx); |
981 | if (!(req->flags & REQ_F_CQE_SKIP)) { |
982 | if (!io_fill_cqe_req(ctx, req)) |
983 | io_req_cqe_overflow(req); |
984 | } |
985 | |
986 | /* |
987 | * If we're the last reference to this request, add to our locked |
988 | * free_list cache. |
989 | */ |
990 | if (req_ref_put_and_test(req)) { |
991 | if (req->flags & IO_REQ_LINK_FLAGS) { |
992 | if (req->flags & IO_DISARM_MASK) |
993 | io_disarm_next(req); |
994 | if (req->link) { |
995 | io_req_task_queue(req: req->link); |
996 | req->link = NULL; |
997 | } |
998 | } |
999 | io_put_kbuf_comp(req); |
1000 | if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS)) |
1001 | io_clean_op(req); |
1002 | io_put_file(req); |
1003 | |
1004 | rsrc_node = req->rsrc_node; |
1005 | /* |
1006 | * Selected buffer deallocation in io_clean_op() assumes that |
1007 | * we don't hold ->completion_lock. Clean them here to avoid |
1008 | * deadlocks. |
1009 | */ |
1010 | io_put_task_remote(task: req->task); |
1011 | wq_list_add_head(node: &req->comp_list, list: &ctx->locked_free_list); |
1012 | ctx->locked_free_nr++; |
1013 | } |
1014 | io_cq_unlock_post(ctx); |
1015 | |
1016 | if (rsrc_node) { |
1017 | io_ring_submit_lock(ctx, issue_flags); |
1018 | io_put_rsrc_node(ctx, node: rsrc_node); |
1019 | io_ring_submit_unlock(ctx, issue_flags); |
1020 | } |
1021 | } |
1022 | |
1023 | void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags) |
1024 | { |
1025 | struct io_ring_ctx *ctx = req->ctx; |
1026 | |
1027 | if (ctx->task_complete && ctx->submitter_task != current) { |
1028 | req->io_task_work.func = io_req_task_complete; |
1029 | io_req_task_work_add(req); |
1030 | } else if (!(issue_flags & IO_URING_F_UNLOCKED) || |
1031 | !(ctx->flags & IORING_SETUP_IOPOLL)) { |
1032 | __io_req_complete_post(req, issue_flags); |
1033 | } else { |
1034 | mutex_lock(&ctx->uring_lock); |
1035 | __io_req_complete_post(req, issue_flags: issue_flags & ~IO_URING_F_UNLOCKED); |
1036 | mutex_unlock(lock: &ctx->uring_lock); |
1037 | } |
1038 | } |
1039 | |
1040 | void io_req_defer_failed(struct io_kiocb *req, s32 res) |
1041 | __must_hold(&ctx->uring_lock) |
1042 | { |
1043 | const struct io_cold_def *def = &io_cold_defs[req->opcode]; |
1044 | |
1045 | lockdep_assert_held(&req->ctx->uring_lock); |
1046 | |
1047 | req_set_fail(req); |
1048 | io_req_set_res(req, res, cflags: io_put_kbuf(req, issue_flags: IO_URING_F_UNLOCKED)); |
1049 | if (def->fail) |
1050 | def->fail(req); |
1051 | io_req_complete_defer(req); |
1052 | } |
1053 | |
1054 | /* |
1055 | * Don't initialise the fields below on every allocation, but do that in |
1056 | * advance and keep them valid across allocations. |
1057 | */ |
1058 | static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx) |
1059 | { |
1060 | req->ctx = ctx; |
1061 | req->link = NULL; |
1062 | req->async_data = NULL; |
1063 | /* not necessary, but safer to zero */ |
1064 | memset(&req->cqe, 0, sizeof(req->cqe)); |
1065 | memset(&req->big_cqe, 0, sizeof(req->big_cqe)); |
1066 | } |
1067 | |
1068 | static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx, |
1069 | struct io_submit_state *state) |
1070 | { |
1071 | spin_lock(lock: &ctx->completion_lock); |
1072 | wq_list_splice(list: &ctx->locked_free_list, to: &state->free_list); |
1073 | ctx->locked_free_nr = 0; |
1074 | spin_unlock(lock: &ctx->completion_lock); |
1075 | } |
1076 | |
1077 | /* |
1078 | * A request might get retired back into the request caches even before opcode |
1079 | * handlers and io_issue_sqe() are done with it, e.g. inline completion path. |
1080 | * Because of that, io_alloc_req() should be called only under ->uring_lock |
1081 | * and with extra caution to not get a request that is still worked on. |
1082 | */ |
1083 | __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx) |
1084 | __must_hold(&ctx->uring_lock) |
1085 | { |
1086 | gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; |
1087 | void *reqs[IO_REQ_ALLOC_BATCH]; |
1088 | int ret, i; |
1089 | |
1090 | /* |
1091 | * If we have more than a batch's worth of requests in our IRQ side |
1092 | * locked cache, grab the lock and move them over to our submission |
1093 | * side cache. |
1094 | */ |
1095 | if (data_race(ctx->locked_free_nr) > IO_COMPL_BATCH) { |
1096 | io_flush_cached_locked_reqs(ctx, state: &ctx->submit_state); |
1097 | if (!io_req_cache_empty(ctx)) |
1098 | return true; |
1099 | } |
1100 | |
1101 | ret = kmem_cache_alloc_bulk(s: req_cachep, flags: gfp, ARRAY_SIZE(reqs), p: reqs); |
1102 | |
1103 | /* |
1104 | * Bulk alloc is all-or-nothing. If we fail to get a batch, |
1105 | * retry single alloc to be on the safe side. |
1106 | */ |
1107 | if (unlikely(ret <= 0)) { |
1108 | reqs[0] = kmem_cache_alloc(cachep: req_cachep, flags: gfp); |
1109 | if (!reqs[0]) |
1110 | return false; |
1111 | ret = 1; |
1112 | } |
1113 | |
1114 | percpu_ref_get_many(ref: &ctx->refs, nr: ret); |
1115 | for (i = 0; i < ret; i++) { |
1116 | struct io_kiocb *req = reqs[i]; |
1117 | |
1118 | io_preinit_req(req, ctx); |
1119 | io_req_add_to_cache(req, ctx); |
1120 | } |
1121 | return true; |
1122 | } |
1123 | |
1124 | __cold void io_free_req(struct io_kiocb *req) |
1125 | { |
1126 | /* refs were already put, restore them for io_req_task_complete() */ |
1127 | req->flags &= ~REQ_F_REFCOUNT; |
1128 | /* we only want to free it, don't post CQEs */ |
1129 | req->flags |= REQ_F_CQE_SKIP; |
1130 | req->io_task_work.func = io_req_task_complete; |
1131 | io_req_task_work_add(req); |
1132 | } |
1133 | |
1134 | static void __io_req_find_next_prep(struct io_kiocb *req) |
1135 | { |
1136 | struct io_ring_ctx *ctx = req->ctx; |
1137 | |
1138 | spin_lock(lock: &ctx->completion_lock); |
1139 | io_disarm_next(req); |
1140 | spin_unlock(lock: &ctx->completion_lock); |
1141 | } |
1142 | |
1143 | static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req) |
1144 | { |
1145 | struct io_kiocb *nxt; |
1146 | |
1147 | /* |
1148 | * If LINK is set, we have dependent requests in this chain. If we |
1149 | * didn't fail this request, queue the first one up, moving any other |
1150 | * dependencies to the next request. In case of failure, fail the rest |
1151 | * of the chain. |
1152 | */ |
1153 | if (unlikely(req->flags & IO_DISARM_MASK)) |
1154 | __io_req_find_next_prep(req); |
1155 | nxt = req->link; |
1156 | req->link = NULL; |
1157 | return nxt; |
1158 | } |
1159 | |
1160 | static void ctx_flush_and_put(struct io_ring_ctx *ctx, struct io_tw_state *ts) |
1161 | { |
1162 | if (!ctx) |
1163 | return; |
1164 | if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) |
1165 | atomic_andnot(IORING_SQ_TASKRUN, v: &ctx->rings->sq_flags); |
1166 | if (ts->locked) { |
1167 | io_submit_flush_completions(ctx); |
1168 | mutex_unlock(lock: &ctx->uring_lock); |
1169 | ts->locked = false; |
1170 | } |
1171 | percpu_ref_put(ref: &ctx->refs); |
1172 | } |
1173 | |
1174 | /* |
1175 | * Run queued task_work, returning the number of entries processed in *count. |
1176 | * If more entries than max_entries are available, stop processing once this |
1177 | * is reached and return the rest of the list. |
1178 | */ |
1179 | struct llist_node *io_handle_tw_list(struct llist_node *node, |
1180 | unsigned int *count, |
1181 | unsigned int max_entries) |
1182 | { |
1183 | struct io_ring_ctx *ctx = NULL; |
1184 | struct io_tw_state ts = { }; |
1185 | |
1186 | do { |
1187 | struct llist_node *next = node->next; |
1188 | struct io_kiocb *req = container_of(node, struct io_kiocb, |
1189 | io_task_work.node); |
1190 | |
1191 | if (req->ctx != ctx) { |
1192 | ctx_flush_and_put(ctx, ts: &ts); |
1193 | ctx = req->ctx; |
1194 | /* if not contended, grab and improve batching */ |
1195 | ts.locked = mutex_trylock(lock: &ctx->uring_lock); |
1196 | percpu_ref_get(ref: &ctx->refs); |
1197 | } |
1198 | INDIRECT_CALL_2(req->io_task_work.func, |
1199 | io_poll_task_func, io_req_rw_complete, |
1200 | req, &ts); |
1201 | node = next; |
1202 | (*count)++; |
1203 | if (unlikely(need_resched())) { |
1204 | ctx_flush_and_put(ctx, ts: &ts); |
1205 | ctx = NULL; |
1206 | cond_resched(); |
1207 | } |
1208 | } while (node && *count < max_entries); |
1209 | |
1210 | ctx_flush_and_put(ctx, ts: &ts); |
1211 | return node; |
1212 | } |
1213 | |
1214 | /** |
1215 | * io_llist_xchg - swap all entries in a lock-less list |
1216 | * @head: the head of lock-less list to delete all entries |
1217 | * @new: new entry as the head of the list |
1218 | * |
1219 | * If list is empty, return NULL, otherwise, return the pointer to the first entry. |
1220 | * The order of entries returned is from the newest to the oldest added one. |
1221 | */ |
1222 | static inline struct llist_node *io_llist_xchg(struct llist_head *head, |
1223 | struct llist_node *new) |
1224 | { |
1225 | return xchg(&head->first, new); |
1226 | } |
1227 | |
1228 | static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync) |
1229 | { |
1230 | struct llist_node *node = llist_del_all(head: &tctx->task_list); |
1231 | struct io_ring_ctx *last_ctx = NULL; |
1232 | struct io_kiocb *req; |
1233 | |
1234 | while (node) { |
1235 | req = container_of(node, struct io_kiocb, io_task_work.node); |
1236 | node = node->next; |
1237 | if (sync && last_ctx != req->ctx) { |
1238 | if (last_ctx) { |
1239 | flush_delayed_work(dwork: &last_ctx->fallback_work); |
1240 | percpu_ref_put(ref: &last_ctx->refs); |
1241 | } |
1242 | last_ctx = req->ctx; |
1243 | percpu_ref_get(ref: &last_ctx->refs); |
1244 | } |
1245 | if (llist_add(new: &req->io_task_work.node, |
1246 | head: &req->ctx->fallback_llist)) |
1247 | schedule_delayed_work(dwork: &req->ctx->fallback_work, delay: 1); |
1248 | } |
1249 | |
1250 | if (last_ctx) { |
1251 | flush_delayed_work(dwork: &last_ctx->fallback_work); |
1252 | percpu_ref_put(ref: &last_ctx->refs); |
1253 | } |
1254 | } |
1255 | |
1256 | struct llist_node *tctx_task_work_run(struct io_uring_task *tctx, |
1257 | unsigned int max_entries, |
1258 | unsigned int *count) |
1259 | { |
1260 | struct llist_node *node; |
1261 | |
1262 | if (unlikely(current->flags & PF_EXITING)) { |
1263 | io_fallback_tw(tctx, sync: true); |
1264 | return NULL; |
1265 | } |
1266 | |
1267 | node = llist_del_all(head: &tctx->task_list); |
1268 | if (node) { |
1269 | node = llist_reverse_order(head: node); |
1270 | node = io_handle_tw_list(node, count, max_entries); |
1271 | } |
1272 | |
1273 | /* relaxed read is enough as only the task itself sets ->in_cancel */ |
1274 | if (unlikely(atomic_read(&tctx->in_cancel))) |
1275 | io_uring_drop_tctx_refs(current); |
1276 | |
1277 | trace_io_uring_task_work_run(tctx, count: *count); |
1278 | return node; |
1279 | } |
1280 | |
1281 | void tctx_task_work(struct callback_head *cb) |
1282 | { |
1283 | struct io_uring_task *tctx; |
1284 | struct llist_node *ret; |
1285 | unsigned int count = 0; |
1286 | |
1287 | tctx = container_of(cb, struct io_uring_task, task_work); |
1288 | ret = tctx_task_work_run(tctx, UINT_MAX, count: &count); |
1289 | /* can't happen */ |
1290 | WARN_ON_ONCE(ret); |
1291 | } |
1292 | |
1293 | static inline void io_req_local_work_add(struct io_kiocb *req, unsigned flags) |
1294 | { |
1295 | struct io_ring_ctx *ctx = req->ctx; |
1296 | unsigned nr_wait, nr_tw, nr_tw_prev; |
1297 | struct llist_node *head; |
1298 | |
1299 | /* See comment above IO_CQ_WAKE_INIT */ |
1300 | BUILD_BUG_ON(IO_CQ_WAKE_FORCE <= IORING_MAX_CQ_ENTRIES); |
1301 | |
1302 | /* |
1303 | * We don't know how many reuqests is there in the link and whether |
1304 | * they can even be queued lazily, fall back to non-lazy. |
1305 | */ |
1306 | if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) |
1307 | flags &= ~IOU_F_TWQ_LAZY_WAKE; |
1308 | |
1309 | head = READ_ONCE(ctx->work_llist.first); |
1310 | do { |
1311 | nr_tw_prev = 0; |
1312 | if (head) { |
1313 | struct io_kiocb *first_req = container_of(head, |
1314 | struct io_kiocb, |
1315 | io_task_work.node); |
1316 | /* |
1317 | * Might be executed at any moment, rely on |
1318 | * SLAB_TYPESAFE_BY_RCU to keep it alive. |
1319 | */ |
1320 | nr_tw_prev = READ_ONCE(first_req->nr_tw); |
1321 | } |
1322 | |
1323 | /* |
1324 | * Theoretically, it can overflow, but that's fine as one of |
1325 | * previous adds should've tried to wake the task. |
1326 | */ |
1327 | nr_tw = nr_tw_prev + 1; |
1328 | if (!(flags & IOU_F_TWQ_LAZY_WAKE)) |
1329 | nr_tw = IO_CQ_WAKE_FORCE; |
1330 | |
1331 | req->nr_tw = nr_tw; |
1332 | req->io_task_work.node.next = head; |
1333 | } while (!try_cmpxchg(&ctx->work_llist.first, &head, |
1334 | &req->io_task_work.node)); |
1335 | |
1336 | /* |
1337 | * cmpxchg implies a full barrier, which pairs with the barrier |
1338 | * in set_current_state() on the io_cqring_wait() side. It's used |
1339 | * to ensure that either we see updated ->cq_wait_nr, or waiters |
1340 | * going to sleep will observe the work added to the list, which |
1341 | * is similar to the wait/wawke task state sync. |
1342 | */ |
1343 | |
1344 | if (!head) { |
1345 | if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) |
1346 | atomic_or(IORING_SQ_TASKRUN, v: &ctx->rings->sq_flags); |
1347 | if (ctx->has_evfd) |
1348 | io_eventfd_signal(ctx); |
1349 | } |
1350 | |
1351 | nr_wait = atomic_read(v: &ctx->cq_wait_nr); |
1352 | /* not enough or no one is waiting */ |
1353 | if (nr_tw < nr_wait) |
1354 | return; |
1355 | /* the previous add has already woken it up */ |
1356 | if (nr_tw_prev >= nr_wait) |
1357 | return; |
1358 | wake_up_state(tsk: ctx->submitter_task, TASK_INTERRUPTIBLE); |
1359 | } |
1360 | |
1361 | static void io_req_normal_work_add(struct io_kiocb *req) |
1362 | { |
1363 | struct io_uring_task *tctx = req->task->io_uring; |
1364 | struct io_ring_ctx *ctx = req->ctx; |
1365 | |
1366 | /* task_work already pending, we're done */ |
1367 | if (!llist_add(new: &req->io_task_work.node, head: &tctx->task_list)) |
1368 | return; |
1369 | |
1370 | if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) |
1371 | atomic_or(IORING_SQ_TASKRUN, v: &ctx->rings->sq_flags); |
1372 | |
1373 | /* SQPOLL doesn't need the task_work added, it'll run it itself */ |
1374 | if (ctx->flags & IORING_SETUP_SQPOLL) { |
1375 | struct io_sq_data *sqd = ctx->sq_data; |
1376 | |
1377 | if (wq_has_sleeper(wq_head: &sqd->wait)) |
1378 | wake_up(&sqd->wait); |
1379 | return; |
1380 | } |
1381 | |
1382 | if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method))) |
1383 | return; |
1384 | |
1385 | io_fallback_tw(tctx, sync: false); |
1386 | } |
1387 | |
1388 | void __io_req_task_work_add(struct io_kiocb *req, unsigned flags) |
1389 | { |
1390 | if (req->ctx->flags & IORING_SETUP_DEFER_TASKRUN) { |
1391 | rcu_read_lock(); |
1392 | io_req_local_work_add(req, flags); |
1393 | rcu_read_unlock(); |
1394 | } else { |
1395 | io_req_normal_work_add(req); |
1396 | } |
1397 | } |
1398 | |
1399 | static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx) |
1400 | { |
1401 | struct llist_node *node; |
1402 | |
1403 | node = llist_del_all(head: &ctx->work_llist); |
1404 | while (node) { |
1405 | struct io_kiocb *req = container_of(node, struct io_kiocb, |
1406 | io_task_work.node); |
1407 | |
1408 | node = node->next; |
1409 | io_req_normal_work_add(req); |
1410 | } |
1411 | } |
1412 | |
1413 | static bool io_run_local_work_continue(struct io_ring_ctx *ctx, int events, |
1414 | int min_events) |
1415 | { |
1416 | if (llist_empty(head: &ctx->work_llist)) |
1417 | return false; |
1418 | if (events < min_events) |
1419 | return true; |
1420 | if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) |
1421 | atomic_or(IORING_SQ_TASKRUN, v: &ctx->rings->sq_flags); |
1422 | return false; |
1423 | } |
1424 | |
1425 | static int __io_run_local_work(struct io_ring_ctx *ctx, struct io_tw_state *ts, |
1426 | int min_events) |
1427 | { |
1428 | struct llist_node *node; |
1429 | unsigned int loops = 0; |
1430 | int ret = 0; |
1431 | |
1432 | if (WARN_ON_ONCE(ctx->submitter_task != current)) |
1433 | return -EEXIST; |
1434 | if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) |
1435 | atomic_andnot(IORING_SQ_TASKRUN, v: &ctx->rings->sq_flags); |
1436 | again: |
1437 | /* |
1438 | * llists are in reverse order, flip it back the right way before |
1439 | * running the pending items. |
1440 | */ |
1441 | node = llist_reverse_order(head: io_llist_xchg(head: &ctx->work_llist, NULL)); |
1442 | while (node) { |
1443 | struct llist_node *next = node->next; |
1444 | struct io_kiocb *req = container_of(node, struct io_kiocb, |
1445 | io_task_work.node); |
1446 | INDIRECT_CALL_2(req->io_task_work.func, |
1447 | io_poll_task_func, io_req_rw_complete, |
1448 | req, ts); |
1449 | ret++; |
1450 | node = next; |
1451 | } |
1452 | loops++; |
1453 | |
1454 | if (io_run_local_work_continue(ctx, events: ret, min_events)) |
1455 | goto again; |
1456 | if (ts->locked) { |
1457 | io_submit_flush_completions(ctx); |
1458 | if (io_run_local_work_continue(ctx, events: ret, min_events)) |
1459 | goto again; |
1460 | } |
1461 | |
1462 | trace_io_uring_local_work_run(ctx, count: ret, loops); |
1463 | return ret; |
1464 | } |
1465 | |
1466 | static inline int io_run_local_work_locked(struct io_ring_ctx *ctx, |
1467 | int min_events) |
1468 | { |
1469 | struct io_tw_state ts = { .locked = true, }; |
1470 | int ret; |
1471 | |
1472 | if (llist_empty(head: &ctx->work_llist)) |
1473 | return 0; |
1474 | |
1475 | ret = __io_run_local_work(ctx, ts: &ts, min_events); |
1476 | /* shouldn't happen! */ |
1477 | if (WARN_ON_ONCE(!ts.locked)) |
1478 | mutex_lock(&ctx->uring_lock); |
1479 | return ret; |
1480 | } |
1481 | |
1482 | static int io_run_local_work(struct io_ring_ctx *ctx, int min_events) |
1483 | { |
1484 | struct io_tw_state ts = {}; |
1485 | int ret; |
1486 | |
1487 | ts.locked = mutex_trylock(lock: &ctx->uring_lock); |
1488 | ret = __io_run_local_work(ctx, ts: &ts, min_events); |
1489 | if (ts.locked) |
1490 | mutex_unlock(lock: &ctx->uring_lock); |
1491 | |
1492 | return ret; |
1493 | } |
1494 | |
1495 | static void io_req_task_cancel(struct io_kiocb *req, struct io_tw_state *ts) |
1496 | { |
1497 | io_tw_lock(ctx: req->ctx, ts); |
1498 | io_req_defer_failed(req, res: req->cqe.res); |
1499 | } |
1500 | |
1501 | void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts) |
1502 | { |
1503 | io_tw_lock(ctx: req->ctx, ts); |
1504 | /* req->task == current here, checking PF_EXITING is safe */ |
1505 | if (unlikely(req->task->flags & PF_EXITING)) |
1506 | io_req_defer_failed(req, res: -EFAULT); |
1507 | else if (req->flags & REQ_F_FORCE_ASYNC) |
1508 | io_queue_iowq(req, ts_dont_use: ts); |
1509 | else |
1510 | io_queue_sqe(req); |
1511 | } |
1512 | |
1513 | void io_req_task_queue_fail(struct io_kiocb *req, int ret) |
1514 | { |
1515 | io_req_set_res(req, res: ret, cflags: 0); |
1516 | req->io_task_work.func = io_req_task_cancel; |
1517 | io_req_task_work_add(req); |
1518 | } |
1519 | |
1520 | void io_req_task_queue(struct io_kiocb *req) |
1521 | { |
1522 | req->io_task_work.func = io_req_task_submit; |
1523 | io_req_task_work_add(req); |
1524 | } |
1525 | |
1526 | void io_queue_next(struct io_kiocb *req) |
1527 | { |
1528 | struct io_kiocb *nxt = io_req_find_next(req); |
1529 | |
1530 | if (nxt) |
1531 | io_req_task_queue(req: nxt); |
1532 | } |
1533 | |
1534 | static void io_free_batch_list(struct io_ring_ctx *ctx, |
1535 | struct io_wq_work_node *node) |
1536 | __must_hold(&ctx->uring_lock) |
1537 | { |
1538 | do { |
1539 | struct io_kiocb *req = container_of(node, struct io_kiocb, |
1540 | comp_list); |
1541 | |
1542 | if (unlikely(req->flags & IO_REQ_CLEAN_SLOW_FLAGS)) { |
1543 | if (req->flags & REQ_F_REFCOUNT) { |
1544 | node = req->comp_list.next; |
1545 | if (!req_ref_put_and_test(req)) |
1546 | continue; |
1547 | } |
1548 | if ((req->flags & REQ_F_POLLED) && req->apoll) { |
1549 | struct async_poll *apoll = req->apoll; |
1550 | |
1551 | if (apoll->double_poll) |
1552 | kfree(objp: apoll->double_poll); |
1553 | if (!io_alloc_cache_put(cache: &ctx->apoll_cache, entry: &apoll->cache)) |
1554 | kfree(objp: apoll); |
1555 | req->flags &= ~REQ_F_POLLED; |
1556 | } |
1557 | if (req->flags & IO_REQ_LINK_FLAGS) |
1558 | io_queue_next(req); |
1559 | if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS)) |
1560 | io_clean_op(req); |
1561 | } |
1562 | io_put_file(req); |
1563 | |
1564 | io_req_put_rsrc_locked(req, ctx); |
1565 | |
1566 | io_put_task(task: req->task); |
1567 | node = req->comp_list.next; |
1568 | io_req_add_to_cache(req, ctx); |
1569 | } while (node); |
1570 | } |
1571 | |
1572 | void __io_submit_flush_completions(struct io_ring_ctx *ctx) |
1573 | __must_hold(&ctx->uring_lock) |
1574 | { |
1575 | struct io_submit_state *state = &ctx->submit_state; |
1576 | struct io_wq_work_node *node; |
1577 | |
1578 | __io_cq_lock(ctx); |
1579 | /* must come first to preserve CQE ordering in failure cases */ |
1580 | if (state->cqes_count) |
1581 | __io_flush_post_cqes(ctx); |
1582 | __wq_list_for_each(node, &state->compl_reqs) { |
1583 | struct io_kiocb *req = container_of(node, struct io_kiocb, |
1584 | comp_list); |
1585 | |
1586 | if (!(req->flags & REQ_F_CQE_SKIP) && |
1587 | unlikely(!io_fill_cqe_req(ctx, req))) { |
1588 | if (ctx->lockless_cq) { |
1589 | spin_lock(lock: &ctx->completion_lock); |
1590 | io_req_cqe_overflow(req); |
1591 | spin_unlock(lock: &ctx->completion_lock); |
1592 | } else { |
1593 | io_req_cqe_overflow(req); |
1594 | } |
1595 | } |
1596 | } |
1597 | __io_cq_unlock_post(ctx); |
1598 | |
1599 | if (!wq_list_empty(&ctx->submit_state.compl_reqs)) { |
1600 | io_free_batch_list(ctx, node: state->compl_reqs.first); |
1601 | INIT_WQ_LIST(&state->compl_reqs); |
1602 | } |
1603 | } |
1604 | |
1605 | static unsigned io_cqring_events(struct io_ring_ctx *ctx) |
1606 | { |
1607 | /* See comment at the top of this file */ |
1608 | smp_rmb(); |
1609 | return __io_cqring_events(ctx); |
1610 | } |
1611 | |
1612 | /* |
1613 | * We can't just wait for polled events to come to us, we have to actively |
1614 | * find and complete them. |
1615 | */ |
1616 | static __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx) |
1617 | { |
1618 | if (!(ctx->flags & IORING_SETUP_IOPOLL)) |
1619 | return; |
1620 | |
1621 | mutex_lock(&ctx->uring_lock); |
1622 | while (!wq_list_empty(&ctx->iopoll_list)) { |
1623 | /* let it sleep and repeat later if can't complete a request */ |
1624 | if (io_do_iopoll(ctx, force_nonspin: true) == 0) |
1625 | break; |
1626 | /* |
1627 | * Ensure we allow local-to-the-cpu processing to take place, |
1628 | * in this case we need to ensure that we reap all events. |
1629 | * Also let task_work, etc. to progress by releasing the mutex |
1630 | */ |
1631 | if (need_resched()) { |
1632 | mutex_unlock(lock: &ctx->uring_lock); |
1633 | cond_resched(); |
1634 | mutex_lock(&ctx->uring_lock); |
1635 | } |
1636 | } |
1637 | mutex_unlock(lock: &ctx->uring_lock); |
1638 | } |
1639 | |
1640 | static int io_iopoll_check(struct io_ring_ctx *ctx, long min) |
1641 | { |
1642 | unsigned int nr_events = 0; |
1643 | unsigned long check_cq; |
1644 | |
1645 | if (!io_allowed_run_tw(ctx)) |
1646 | return -EEXIST; |
1647 | |
1648 | check_cq = READ_ONCE(ctx->check_cq); |
1649 | if (unlikely(check_cq)) { |
1650 | if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)) |
1651 | __io_cqring_overflow_flush(ctx); |
1652 | /* |
1653 | * Similarly do not spin if we have not informed the user of any |
1654 | * dropped CQE. |
1655 | */ |
1656 | if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)) |
1657 | return -EBADR; |
1658 | } |
1659 | /* |
1660 | * Don't enter poll loop if we already have events pending. |
1661 | * If we do, we can potentially be spinning for commands that |
1662 | * already triggered a CQE (eg in error). |
1663 | */ |
1664 | if (io_cqring_events(ctx)) |
1665 | return 0; |
1666 | |
1667 | do { |
1668 | int ret = 0; |
1669 | |
1670 | /* |
1671 | * If a submit got punted to a workqueue, we can have the |
1672 | * application entering polling for a command before it gets |
1673 | * issued. That app will hold the uring_lock for the duration |
1674 | * of the poll right here, so we need to take a breather every |
1675 | * now and then to ensure that the issue has a chance to add |
1676 | * the poll to the issued list. Otherwise we can spin here |
1677 | * forever, while the workqueue is stuck trying to acquire the |
1678 | * very same mutex. |
1679 | */ |
1680 | if (wq_list_empty(&ctx->iopoll_list) || |
1681 | io_task_work_pending(ctx)) { |
1682 | u32 tail = ctx->cached_cq_tail; |
1683 | |
1684 | (void) io_run_local_work_locked(ctx, min_events: min); |
1685 | |
1686 | if (task_work_pending(current) || |
1687 | wq_list_empty(&ctx->iopoll_list)) { |
1688 | mutex_unlock(lock: &ctx->uring_lock); |
1689 | io_run_task_work(); |
1690 | mutex_lock(&ctx->uring_lock); |
1691 | } |
1692 | /* some requests don't go through iopoll_list */ |
1693 | if (tail != ctx->cached_cq_tail || |
1694 | wq_list_empty(&ctx->iopoll_list)) |
1695 | break; |
1696 | } |
1697 | ret = io_do_iopoll(ctx, force_nonspin: !min); |
1698 | if (unlikely(ret < 0)) |
1699 | return ret; |
1700 | |
1701 | if (task_sigpending(current)) |
1702 | return -EINTR; |
1703 | if (need_resched()) |
1704 | break; |
1705 | |
1706 | nr_events += ret; |
1707 | } while (nr_events < min); |
1708 | |
1709 | return 0; |
1710 | } |
1711 | |
1712 | void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts) |
1713 | { |
1714 | if (ts->locked) |
1715 | io_req_complete_defer(req); |
1716 | else |
1717 | io_req_complete_post(req, issue_flags: IO_URING_F_UNLOCKED); |
1718 | } |
1719 | |
1720 | /* |
1721 | * After the iocb has been issued, it's safe to be found on the poll list. |
1722 | * Adding the kiocb to the list AFTER submission ensures that we don't |
1723 | * find it from a io_do_iopoll() thread before the issuer is done |
1724 | * accessing the kiocb cookie. |
1725 | */ |
1726 | static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags) |
1727 | { |
1728 | struct io_ring_ctx *ctx = req->ctx; |
1729 | const bool needs_lock = issue_flags & IO_URING_F_UNLOCKED; |
1730 | |
1731 | /* workqueue context doesn't hold uring_lock, grab it now */ |
1732 | if (unlikely(needs_lock)) |
1733 | mutex_lock(&ctx->uring_lock); |
1734 | |
1735 | /* |
1736 | * Track whether we have multiple files in our lists. This will impact |
1737 | * how we do polling eventually, not spinning if we're on potentially |
1738 | * different devices. |
1739 | */ |
1740 | if (wq_list_empty(&ctx->iopoll_list)) { |
1741 | ctx->poll_multi_queue = false; |
1742 | } else if (!ctx->poll_multi_queue) { |
1743 | struct io_kiocb *list_req; |
1744 | |
1745 | list_req = container_of(ctx->iopoll_list.first, struct io_kiocb, |
1746 | comp_list); |
1747 | if (list_req->file != req->file) |
1748 | ctx->poll_multi_queue = true; |
1749 | } |
1750 | |
1751 | /* |
1752 | * For fast devices, IO may have already completed. If it has, add |
1753 | * it to the front so we find it first. |
1754 | */ |
1755 | if (READ_ONCE(req->iopoll_completed)) |
1756 | wq_list_add_head(node: &req->comp_list, list: &ctx->iopoll_list); |
1757 | else |
1758 | wq_list_add_tail(node: &req->comp_list, list: &ctx->iopoll_list); |
1759 | |
1760 | if (unlikely(needs_lock)) { |
1761 | /* |
1762 | * If IORING_SETUP_SQPOLL is enabled, sqes are either handle |
1763 | * in sq thread task context or in io worker task context. If |
1764 | * current task context is sq thread, we don't need to check |
1765 | * whether should wake up sq thread. |
1766 | */ |
1767 | if ((ctx->flags & IORING_SETUP_SQPOLL) && |
1768 | wq_has_sleeper(wq_head: &ctx->sq_data->wait)) |
1769 | wake_up(&ctx->sq_data->wait); |
1770 | |
1771 | mutex_unlock(lock: &ctx->uring_lock); |
1772 | } |
1773 | } |
1774 | |
1775 | io_req_flags_t io_file_get_flags(struct file *file) |
1776 | { |
1777 | io_req_flags_t res = 0; |
1778 | |
1779 | if (S_ISREG(file_inode(file)->i_mode)) |
1780 | res |= REQ_F_ISREG; |
1781 | if ((file->f_flags & O_NONBLOCK) || (file->f_mode & FMODE_NOWAIT)) |
1782 | res |= REQ_F_SUPPORT_NOWAIT; |
1783 | return res; |
1784 | } |
1785 | |
1786 | bool io_alloc_async_data(struct io_kiocb *req) |
1787 | { |
1788 | WARN_ON_ONCE(!io_cold_defs[req->opcode].async_size); |
1789 | req->async_data = kmalloc(size: io_cold_defs[req->opcode].async_size, GFP_KERNEL); |
1790 | if (req->async_data) { |
1791 | req->flags |= REQ_F_ASYNC_DATA; |
1792 | return false; |
1793 | } |
1794 | return true; |
1795 | } |
1796 | |
1797 | int io_req_prep_async(struct io_kiocb *req) |
1798 | { |
1799 | const struct io_cold_def *cdef = &io_cold_defs[req->opcode]; |
1800 | const struct io_issue_def *def = &io_issue_defs[req->opcode]; |
1801 | |
1802 | /* assign early for deferred execution for non-fixed file */ |
1803 | if (def->needs_file && !(req->flags & REQ_F_FIXED_FILE) && !req->file) |
1804 | req->file = io_file_get_normal(req, fd: req->cqe.fd); |
1805 | if (!cdef->prep_async) |
1806 | return 0; |
1807 | if (WARN_ON_ONCE(req_has_async_data(req))) |
1808 | return -EFAULT; |
1809 | if (!def->manual_alloc) { |
1810 | if (io_alloc_async_data(req)) |
1811 | return -EAGAIN; |
1812 | } |
1813 | return cdef->prep_async(req); |
1814 | } |
1815 | |
1816 | static u32 io_get_sequence(struct io_kiocb *req) |
1817 | { |
1818 | u32 seq = req->ctx->cached_sq_head; |
1819 | struct io_kiocb *cur; |
1820 | |
1821 | /* need original cached_sq_head, but it was increased for each req */ |
1822 | io_for_each_link(cur, req) |
1823 | seq--; |
1824 | return seq; |
1825 | } |
1826 | |
1827 | static __cold void io_drain_req(struct io_kiocb *req) |
1828 | __must_hold(&ctx->uring_lock) |
1829 | { |
1830 | struct io_ring_ctx *ctx = req->ctx; |
1831 | struct io_defer_entry *de; |
1832 | int ret; |
1833 | u32 seq = io_get_sequence(req); |
1834 | |
1835 | /* Still need defer if there is pending req in defer list. */ |
1836 | spin_lock(lock: &ctx->completion_lock); |
1837 | if (!req_need_defer(req, seq) && list_empty_careful(head: &ctx->defer_list)) { |
1838 | spin_unlock(lock: &ctx->completion_lock); |
1839 | queue: |
1840 | ctx->drain_active = false; |
1841 | io_req_task_queue(req); |
1842 | return; |
1843 | } |
1844 | spin_unlock(lock: &ctx->completion_lock); |
1845 | |
1846 | io_prep_async_link(req); |
1847 | de = kmalloc(size: sizeof(*de), GFP_KERNEL); |
1848 | if (!de) { |
1849 | ret = -ENOMEM; |
1850 | io_req_defer_failed(req, res: ret); |
1851 | return; |
1852 | } |
1853 | |
1854 | spin_lock(lock: &ctx->completion_lock); |
1855 | if (!req_need_defer(req, seq) && list_empty(head: &ctx->defer_list)) { |
1856 | spin_unlock(lock: &ctx->completion_lock); |
1857 | kfree(objp: de); |
1858 | goto queue; |
1859 | } |
1860 | |
1861 | trace_io_uring_defer(req); |
1862 | de->req = req; |
1863 | de->seq = seq; |
1864 | list_add_tail(new: &de->list, head: &ctx->defer_list); |
1865 | spin_unlock(lock: &ctx->completion_lock); |
1866 | } |
1867 | |
1868 | static bool io_assign_file(struct io_kiocb *req, const struct io_issue_def *def, |
1869 | unsigned int issue_flags) |
1870 | { |
1871 | if (req->file || !def->needs_file) |
1872 | return true; |
1873 | |
1874 | if (req->flags & REQ_F_FIXED_FILE) |
1875 | req->file = io_file_get_fixed(req, fd: req->cqe.fd, issue_flags); |
1876 | else |
1877 | req->file = io_file_get_normal(req, fd: req->cqe.fd); |
1878 | |
1879 | return !!req->file; |
1880 | } |
1881 | |
1882 | static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) |
1883 | { |
1884 | const struct io_issue_def *def = &io_issue_defs[req->opcode]; |
1885 | const struct cred *creds = NULL; |
1886 | int ret; |
1887 | |
1888 | if (unlikely(!io_assign_file(req, def, issue_flags))) |
1889 | return -EBADF; |
1890 | |
1891 | if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred())) |
1892 | creds = override_creds(req->creds); |
1893 | |
1894 | if (!def->audit_skip) |
1895 | audit_uring_entry(op: req->opcode); |
1896 | |
1897 | ret = def->issue(req, issue_flags); |
1898 | |
1899 | if (!def->audit_skip) |
1900 | audit_uring_exit(success: !ret, code: ret); |
1901 | |
1902 | if (creds) |
1903 | revert_creds(creds); |
1904 | |
1905 | if (ret == IOU_OK) { |
1906 | if (issue_flags & IO_URING_F_COMPLETE_DEFER) |
1907 | io_req_complete_defer(req); |
1908 | else |
1909 | io_req_complete_post(req, issue_flags); |
1910 | |
1911 | return 0; |
1912 | } |
1913 | |
1914 | if (ret == IOU_ISSUE_SKIP_COMPLETE) { |
1915 | ret = 0; |
1916 | io_arm_ltimeout(req); |
1917 | |
1918 | /* If the op doesn't have a file, we're not polling for it */ |
1919 | if ((req->ctx->flags & IORING_SETUP_IOPOLL) && def->iopoll_queue) |
1920 | io_iopoll_req_issued(req, issue_flags); |
1921 | } |
1922 | return ret; |
1923 | } |
1924 | |
1925 | int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts) |
1926 | { |
1927 | io_tw_lock(ctx: req->ctx, ts); |
1928 | return io_issue_sqe(req, issue_flags: IO_URING_F_NONBLOCK|IO_URING_F_MULTISHOT| |
1929 | IO_URING_F_COMPLETE_DEFER); |
1930 | } |
1931 | |
1932 | struct io_wq_work *io_wq_free_work(struct io_wq_work *work) |
1933 | { |
1934 | struct io_kiocb *req = container_of(work, struct io_kiocb, work); |
1935 | struct io_kiocb *nxt = NULL; |
1936 | |
1937 | if (req_ref_put_and_test(req)) { |
1938 | if (req->flags & IO_REQ_LINK_FLAGS) |
1939 | nxt = io_req_find_next(req); |
1940 | io_free_req(req); |
1941 | } |
1942 | return nxt ? &nxt->work : NULL; |
1943 | } |
1944 | |
1945 | void io_wq_submit_work(struct io_wq_work *work) |
1946 | { |
1947 | struct io_kiocb *req = container_of(work, struct io_kiocb, work); |
1948 | const struct io_issue_def *def = &io_issue_defs[req->opcode]; |
1949 | unsigned int issue_flags = IO_URING_F_UNLOCKED | IO_URING_F_IOWQ; |
1950 | bool needs_poll = false; |
1951 | int ret = 0, err = -ECANCELED; |
1952 | |
1953 | /* one will be dropped by ->io_wq_free_work() after returning to io-wq */ |
1954 | if (!(req->flags & REQ_F_REFCOUNT)) |
1955 | __io_req_set_refcount(req, nr: 2); |
1956 | else |
1957 | req_ref_get(req); |
1958 | |
1959 | io_arm_ltimeout(req); |
1960 | |
1961 | /* either cancelled or io-wq is dying, so don't touch tctx->iowq */ |
1962 | if (work->flags & IO_WQ_WORK_CANCEL) { |
1963 | fail: |
1964 | io_req_task_queue_fail(req, ret: err); |
1965 | return; |
1966 | } |
1967 | if (!io_assign_file(req, def, issue_flags)) { |
1968 | err = -EBADF; |
1969 | work->flags |= IO_WQ_WORK_CANCEL; |
1970 | goto fail; |
1971 | } |
1972 | |
1973 | /* |
1974 | * If DEFER_TASKRUN is set, it's only allowed to post CQEs from the |
1975 | * submitter task context. Final request completions are handed to the |
1976 | * right context, however this is not the case of auxiliary CQEs, |
1977 | * which is the main mean of operation for multishot requests. |
1978 | * Don't allow any multishot execution from io-wq. It's more restrictive |
1979 | * than necessary and also cleaner. |
1980 | */ |
1981 | if (req->flags & REQ_F_APOLL_MULTISHOT) { |
1982 | err = -EBADFD; |
1983 | if (!io_file_can_poll(req)) |
1984 | goto fail; |
1985 | if (req->file->f_flags & O_NONBLOCK || |
1986 | req->file->f_mode & FMODE_NOWAIT) { |
1987 | err = -ECANCELED; |
1988 | if (io_arm_poll_handler(req, issue_flags) != IO_APOLL_OK) |
1989 | goto fail; |
1990 | return; |
1991 | } else { |
1992 | req->flags &= ~REQ_F_APOLL_MULTISHOT; |
1993 | } |
1994 | } |
1995 | |
1996 | if (req->flags & REQ_F_FORCE_ASYNC) { |
1997 | bool opcode_poll = def->pollin || def->pollout; |
1998 | |
1999 | if (opcode_poll && io_file_can_poll(req)) { |
2000 | needs_poll = true; |
2001 | issue_flags |= IO_URING_F_NONBLOCK; |
2002 | } |
2003 | } |
2004 | |
2005 | do { |
2006 | ret = io_issue_sqe(req, issue_flags); |
2007 | if (ret != -EAGAIN) |
2008 | break; |
2009 | |
2010 | /* |
2011 | * If REQ_F_NOWAIT is set, then don't wait or retry with |
2012 | * poll. -EAGAIN is final for that case. |
2013 | */ |
2014 | if (req->flags & REQ_F_NOWAIT) |
2015 | break; |
2016 | |
2017 | /* |
2018 | * We can get EAGAIN for iopolled IO even though we're |
2019 | * forcing a sync submission from here, since we can't |
2020 | * wait for request slots on the block side. |
2021 | */ |
2022 | if (!needs_poll) { |
2023 | if (!(req->ctx->flags & IORING_SETUP_IOPOLL)) |
2024 | break; |
2025 | if (io_wq_worker_stopped()) |
2026 | break; |
2027 | cond_resched(); |
2028 | continue; |
2029 | } |
2030 | |
2031 | if (io_arm_poll_handler(req, issue_flags) == IO_APOLL_OK) |
2032 | return; |
2033 | /* aborted or ready, in either case retry blocking */ |
2034 | needs_poll = false; |
2035 | issue_flags &= ~IO_URING_F_NONBLOCK; |
2036 | } while (1); |
2037 | |
2038 | /* avoid locking problems by failing it from a clean context */ |
2039 | if (ret < 0) |
2040 | io_req_task_queue_fail(req, ret); |
2041 | } |
2042 | |
2043 | inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd, |
2044 | unsigned int issue_flags) |
2045 | { |
2046 | struct io_ring_ctx *ctx = req->ctx; |
2047 | struct io_fixed_file *slot; |
2048 | struct file *file = NULL; |
2049 | |
2050 | io_ring_submit_lock(ctx, issue_flags); |
2051 | |
2052 | if (unlikely((unsigned int)fd >= ctx->nr_user_files)) |
2053 | goto out; |
2054 | fd = array_index_nospec(fd, ctx->nr_user_files); |
2055 | slot = io_fixed_file_slot(table: &ctx->file_table, i: fd); |
2056 | if (!req->rsrc_node) |
2057 | __io_req_set_rsrc_node(req, ctx); |
2058 | req->flags |= io_slot_flags(slot); |
2059 | file = io_slot_file(slot); |
2060 | out: |
2061 | io_ring_submit_unlock(ctx, issue_flags); |
2062 | return file; |
2063 | } |
2064 | |
2065 | struct file *io_file_get_normal(struct io_kiocb *req, int fd) |
2066 | { |
2067 | struct file *file = fget(fd); |
2068 | |
2069 | trace_io_uring_file_get(req, fd); |
2070 | |
2071 | /* we don't allow fixed io_uring files */ |
2072 | if (file && io_is_uring_fops(file)) |
2073 | io_req_track_inflight(req); |
2074 | return file; |
2075 | } |
2076 | |
2077 | static void io_queue_async(struct io_kiocb *req, int ret) |
2078 | __must_hold(&req->ctx->uring_lock) |
2079 | { |
2080 | struct io_kiocb *linked_timeout; |
2081 | |
2082 | if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) { |
2083 | io_req_defer_failed(req, res: ret); |
2084 | return; |
2085 | } |
2086 | |
2087 | linked_timeout = io_prep_linked_timeout(req); |
2088 | |
2089 | switch (io_arm_poll_handler(req, issue_flags: 0)) { |
2090 | case IO_APOLL_READY: |
2091 | io_kbuf_recycle(req, issue_flags: 0); |
2092 | io_req_task_queue(req); |
2093 | break; |
2094 | case IO_APOLL_ABORTED: |
2095 | io_kbuf_recycle(req, issue_flags: 0); |
2096 | io_queue_iowq(req, NULL); |
2097 | break; |
2098 | case IO_APOLL_OK: |
2099 | break; |
2100 | } |
2101 | |
2102 | if (linked_timeout) |
2103 | io_queue_linked_timeout(req: linked_timeout); |
2104 | } |
2105 | |
2106 | static inline void io_queue_sqe(struct io_kiocb *req) |
2107 | __must_hold(&req->ctx->uring_lock) |
2108 | { |
2109 | int ret; |
2110 | |
2111 | ret = io_issue_sqe(req, issue_flags: IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER); |
2112 | |
2113 | /* |
2114 | * We async punt it if the file wasn't marked NOWAIT, or if the file |
2115 | * doesn't support non-blocking read/write attempts |
2116 | */ |
2117 | if (unlikely(ret)) |
2118 | io_queue_async(req, ret); |
2119 | } |
2120 | |
2121 | static void io_queue_sqe_fallback(struct io_kiocb *req) |
2122 | __must_hold(&req->ctx->uring_lock) |
2123 | { |
2124 | if (unlikely(req->flags & REQ_F_FAIL)) { |
2125 | /* |
2126 | * We don't submit, fail them all, for that replace hardlinks |
2127 | * with normal links. Extra REQ_F_LINK is tolerated. |
2128 | */ |
2129 | req->flags &= ~REQ_F_HARDLINK; |
2130 | req->flags |= REQ_F_LINK; |
2131 | io_req_defer_failed(req, res: req->cqe.res); |
2132 | } else { |
2133 | int ret = io_req_prep_async(req); |
2134 | |
2135 | if (unlikely(ret)) { |
2136 | io_req_defer_failed(req, res: ret); |
2137 | return; |
2138 | } |
2139 | |
2140 | if (unlikely(req->ctx->drain_active)) |
2141 | io_drain_req(req); |
2142 | else |
2143 | io_queue_iowq(req, NULL); |
2144 | } |
2145 | } |
2146 | |
2147 | /* |
2148 | * Check SQE restrictions (opcode and flags). |
2149 | * |
2150 | * Returns 'true' if SQE is allowed, 'false' otherwise. |
2151 | */ |
2152 | static inline bool io_check_restriction(struct io_ring_ctx *ctx, |
2153 | struct io_kiocb *req, |
2154 | unsigned int sqe_flags) |
2155 | { |
2156 | if (!test_bit(req->opcode, ctx->restrictions.sqe_op)) |
2157 | return false; |
2158 | |
2159 | if ((sqe_flags & ctx->restrictions.sqe_flags_required) != |
2160 | ctx->restrictions.sqe_flags_required) |
2161 | return false; |
2162 | |
2163 | if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed | |
2164 | ctx->restrictions.sqe_flags_required)) |
2165 | return false; |
2166 | |
2167 | return true; |
2168 | } |
2169 | |
2170 | static void io_init_req_drain(struct io_kiocb *req) |
2171 | { |
2172 | struct io_ring_ctx *ctx = req->ctx; |
2173 | struct io_kiocb *head = ctx->submit_state.link.head; |
2174 | |
2175 | ctx->drain_active = true; |
2176 | if (head) { |
2177 | /* |
2178 | * If we need to drain a request in the middle of a link, drain |
2179 | * the head request and the next request/link after the current |
2180 | * link. Considering sequential execution of links, |
2181 | * REQ_F_IO_DRAIN will be maintained for every request of our |
2182 | * link. |
2183 | */ |
2184 | head->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC; |
2185 | ctx->drain_next = true; |
2186 | } |
2187 | } |
2188 | |
2189 | static __cold int io_init_fail_req(struct io_kiocb *req, int err) |
2190 | { |
2191 | /* ensure per-opcode data is cleared if we fail before prep */ |
2192 | memset(&req->cmd.data, 0, sizeof(req->cmd.data)); |
2193 | return err; |
2194 | } |
2195 | |
2196 | static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, |
2197 | const struct io_uring_sqe *sqe) |
2198 | __must_hold(&ctx->uring_lock) |
2199 | { |
2200 | const struct io_issue_def *def; |
2201 | unsigned int sqe_flags; |
2202 | int personality; |
2203 | u8 opcode; |
2204 | |
2205 | /* req is partially pre-initialised, see io_preinit_req() */ |
2206 | req->opcode = opcode = READ_ONCE(sqe->opcode); |
2207 | /* same numerical values with corresponding REQ_F_*, safe to copy */ |
2208 | sqe_flags = READ_ONCE(sqe->flags); |
2209 | req->flags = (io_req_flags_t) sqe_flags; |
2210 | req->cqe.user_data = READ_ONCE(sqe->user_data); |
2211 | req->file = NULL; |
2212 | req->rsrc_node = NULL; |
2213 | req->task = current; |
2214 | |
2215 | if (unlikely(opcode >= IORING_OP_LAST)) { |
2216 | req->opcode = 0; |
2217 | return io_init_fail_req(req, err: -EINVAL); |
2218 | } |
2219 | def = &io_issue_defs[opcode]; |
2220 | if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) { |
2221 | /* enforce forwards compatibility on users */ |
2222 | if (sqe_flags & ~SQE_VALID_FLAGS) |
2223 | return io_init_fail_req(req, err: -EINVAL); |
2224 | if (sqe_flags & IOSQE_BUFFER_SELECT) { |
2225 | if (!def->buffer_select) |
2226 | return io_init_fail_req(req, err: -EOPNOTSUPP); |
2227 | req->buf_index = READ_ONCE(sqe->buf_group); |
2228 | } |
2229 | if (sqe_flags & IOSQE_CQE_SKIP_SUCCESS) |
2230 | ctx->drain_disabled = true; |
2231 | if (sqe_flags & IOSQE_IO_DRAIN) { |
2232 | if (ctx->drain_disabled) |
2233 | return io_init_fail_req(req, err: -EOPNOTSUPP); |
2234 | io_init_req_drain(req); |
2235 | } |
2236 | } |
2237 | if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) { |
2238 | if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags)) |
2239 | return io_init_fail_req(req, err: -EACCES); |
2240 | /* knock it to the slow queue path, will be drained there */ |
2241 | if (ctx->drain_active) |
2242 | req->flags |= REQ_F_FORCE_ASYNC; |
2243 | /* if there is no link, we're at "next" request and need to drain */ |
2244 | if (unlikely(ctx->drain_next) && !ctx->submit_state.link.head) { |
2245 | ctx->drain_next = false; |
2246 | ctx->drain_active = true; |
2247 | req->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC; |
2248 | } |
2249 | } |
2250 | |
2251 | if (!def->ioprio && sqe->ioprio) |
2252 | return io_init_fail_req(req, err: -EINVAL); |
2253 | if (!def->iopoll && (ctx->flags & IORING_SETUP_IOPOLL)) |
2254 | return io_init_fail_req(req, err: -EINVAL); |
2255 | |
2256 | if (def->needs_file) { |
2257 | struct io_submit_state *state = &ctx->submit_state; |
2258 | |
2259 | req->cqe.fd = READ_ONCE(sqe->fd); |
2260 | |
2261 | /* |
2262 | * Plug now if we have more than 2 IO left after this, and the |
2263 | * target is potentially a read/write to block based storage. |
2264 | */ |
2265 | if (state->need_plug && def->plug) { |
2266 | state->plug_started = true; |
2267 | state->need_plug = false; |
2268 | blk_start_plug_nr_ios(&state->plug, state->submit_nr); |
2269 | } |
2270 | } |
2271 | |
2272 | personality = READ_ONCE(sqe->personality); |
2273 | if (personality) { |
2274 | int ret; |
2275 | |
2276 | req->creds = xa_load(&ctx->personalities, index: personality); |
2277 | if (!req->creds) |
2278 | return io_init_fail_req(req, err: -EINVAL); |
2279 | get_cred(cred: req->creds); |
2280 | ret = security_uring_override_creds(new: req->creds); |
2281 | if (ret) { |
2282 | put_cred(cred: req->creds); |
2283 | return io_init_fail_req(req, err: ret); |
2284 | } |
2285 | req->flags |= REQ_F_CREDS; |
2286 | } |
2287 | |
2288 | return def->prep(req, sqe); |
2289 | } |
2290 | |
2291 | static __cold int io_submit_fail_init(const struct io_uring_sqe *sqe, |
2292 | struct io_kiocb *req, int ret) |
2293 | { |
2294 | struct io_ring_ctx *ctx = req->ctx; |
2295 | struct io_submit_link *link = &ctx->submit_state.link; |
2296 | struct io_kiocb *head = link->head; |
2297 | |
2298 | trace_io_uring_req_failed(sqe, req, error: ret); |
2299 | |
2300 | /* |
2301 | * Avoid breaking links in the middle as it renders links with SQPOLL |
2302 | * unusable. Instead of failing eagerly, continue assembling the link if |
2303 | * applicable and mark the head with REQ_F_FAIL. The link flushing code |
2304 | * should find the flag and handle the rest. |
2305 | */ |
2306 | req_fail_link_node(req, res: ret); |
2307 | if (head && !(head->flags & REQ_F_FAIL)) |
2308 | req_fail_link_node(req: head, res: -ECANCELED); |
2309 | |
2310 | if (!(req->flags & IO_REQ_LINK_FLAGS)) { |
2311 | if (head) { |
2312 | link->last->link = req; |
2313 | link->head = NULL; |
2314 | req = head; |
2315 | } |
2316 | io_queue_sqe_fallback(req); |
2317 | return ret; |
2318 | } |
2319 | |
2320 | if (head) |
2321 | link->last->link = req; |
2322 | else |
2323 | link->head = req; |
2324 | link->last = req; |
2325 | return 0; |
2326 | } |
2327 | |
2328 | static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, |
2329 | const struct io_uring_sqe *sqe) |
2330 | __must_hold(&ctx->uring_lock) |
2331 | { |
2332 | struct io_submit_link *link = &ctx->submit_state.link; |
2333 | int ret; |
2334 | |
2335 | ret = io_init_req(ctx, req, sqe); |
2336 | if (unlikely(ret)) |
2337 | return io_submit_fail_init(sqe, req, ret); |
2338 | |
2339 | trace_io_uring_submit_req(req); |
2340 | |
2341 | /* |
2342 | * If we already have a head request, queue this one for async |
2343 | * submittal once the head completes. If we don't have a head but |
2344 | * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be |
2345 | * submitted sync once the chain is complete. If none of those |
2346 | * conditions are true (normal request), then just queue it. |
2347 | */ |
2348 | if (unlikely(link->head)) { |
2349 | ret = io_req_prep_async(req); |
2350 | if (unlikely(ret)) |
2351 | return io_submit_fail_init(sqe, req, ret); |
2352 | |
2353 | trace_io_uring_link(req, target_req: link->head); |
2354 | link->last->link = req; |
2355 | link->last = req; |
2356 | |
2357 | if (req->flags & IO_REQ_LINK_FLAGS) |
2358 | return 0; |
2359 | /* last request of the link, flush it */ |
2360 | req = link->head; |
2361 | link->head = NULL; |
2362 | if (req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL)) |
2363 | goto fallback; |
2364 | |
2365 | } else if (unlikely(req->flags & (IO_REQ_LINK_FLAGS | |
2366 | REQ_F_FORCE_ASYNC | REQ_F_FAIL))) { |
2367 | if (req->flags & IO_REQ_LINK_FLAGS) { |
2368 | link->head = req; |
2369 | link->last = req; |
2370 | } else { |
2371 | fallback: |
2372 | io_queue_sqe_fallback(req); |
2373 | } |
2374 | return 0; |
2375 | } |
2376 | |
2377 | io_queue_sqe(req); |
2378 | return 0; |
2379 | } |
2380 | |
2381 | /* |
2382 | * Batched submission is done, ensure local IO is flushed out. |
2383 | */ |
2384 | static void io_submit_state_end(struct io_ring_ctx *ctx) |
2385 | { |
2386 | struct io_submit_state *state = &ctx->submit_state; |
2387 | |
2388 | if (unlikely(state->link.head)) |
2389 | io_queue_sqe_fallback(req: state->link.head); |
2390 | /* flush only after queuing links as they can generate completions */ |
2391 | io_submit_flush_completions(ctx); |
2392 | if (state->plug_started) |
2393 | blk_finish_plug(&state->plug); |
2394 | } |
2395 | |
2396 | /* |
2397 | * Start submission side cache. |
2398 | */ |
2399 | static void io_submit_state_start(struct io_submit_state *state, |
2400 | unsigned int max_ios) |
2401 | { |
2402 | state->plug_started = false; |
2403 | state->need_plug = max_ios > 2; |
2404 | state->submit_nr = max_ios; |
2405 | /* set only head, no need to init link_last in advance */ |
2406 | state->link.head = NULL; |
2407 | } |
2408 | |
2409 | static void io_commit_sqring(struct io_ring_ctx *ctx) |
2410 | { |
2411 | struct io_rings *rings = ctx->rings; |
2412 | |
2413 | /* |
2414 | * Ensure any loads from the SQEs are done at this point, |
2415 | * since once we write the new head, the application could |
2416 | * write new data to them. |
2417 | */ |
2418 | smp_store_release(&rings->sq.head, ctx->cached_sq_head); |
2419 | } |
2420 | |
2421 | /* |
2422 | * Fetch an sqe, if one is available. Note this returns a pointer to memory |
2423 | * that is mapped by userspace. This means that care needs to be taken to |
2424 | * ensure that reads are stable, as we cannot rely on userspace always |
2425 | * being a good citizen. If members of the sqe are validated and then later |
2426 | * used, it's important that those reads are done through READ_ONCE() to |
2427 | * prevent a re-load down the line. |
2428 | */ |
2429 | static bool io_get_sqe(struct io_ring_ctx *ctx, const struct io_uring_sqe **sqe) |
2430 | { |
2431 | unsigned mask = ctx->sq_entries - 1; |
2432 | unsigned head = ctx->cached_sq_head++ & mask; |
2433 | |
2434 | if (!(ctx->flags & IORING_SETUP_NO_SQARRAY)) { |
2435 | head = READ_ONCE(ctx->sq_array[head]); |
2436 | if (unlikely(head >= ctx->sq_entries)) { |
2437 | /* drop invalid entries */ |
2438 | spin_lock(lock: &ctx->completion_lock); |
2439 | ctx->cq_extra--; |
2440 | spin_unlock(lock: &ctx->completion_lock); |
2441 | WRITE_ONCE(ctx->rings->sq_dropped, |
2442 | READ_ONCE(ctx->rings->sq_dropped) + 1); |
2443 | return false; |
2444 | } |
2445 | } |
2446 | |
2447 | /* |
2448 | * The cached sq head (or cq tail) serves two purposes: |
2449 | * |
2450 | * 1) allows us to batch the cost of updating the user visible |
2451 | * head updates. |
2452 | * 2) allows the kernel side to track the head on its own, even |
2453 | * though the application is the one updating it. |
2454 | */ |
2455 | |
2456 | /* double index for 128-byte SQEs, twice as long */ |
2457 | if (ctx->flags & IORING_SETUP_SQE128) |
2458 | head <<= 1; |
2459 | *sqe = &ctx->sq_sqes[head]; |
2460 | return true; |
2461 | } |
2462 | |
2463 | int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) |
2464 | __must_hold(&ctx->uring_lock) |
2465 | { |
2466 | unsigned int entries = io_sqring_entries(ctx); |
2467 | unsigned int left; |
2468 | int ret; |
2469 | |
2470 | if (unlikely(!entries)) |
2471 | return 0; |
2472 | /* make sure SQ entry isn't read before tail */ |
2473 | ret = left = min(nr, entries); |
2474 | io_get_task_refs(nr: left); |
2475 | io_submit_state_start(state: &ctx->submit_state, max_ios: left); |
2476 | |
2477 | do { |
2478 | const struct io_uring_sqe *sqe; |
2479 | struct io_kiocb *req; |
2480 | |
2481 | if (unlikely(!io_alloc_req(ctx, &req))) |
2482 | break; |
2483 | if (unlikely(!io_get_sqe(ctx, &sqe))) { |
2484 | io_req_add_to_cache(req, ctx); |
2485 | break; |
2486 | } |
2487 | |
2488 | /* |
2489 | * Continue submitting even for sqe failure if the |
2490 | * ring was setup with IORING_SETUP_SUBMIT_ALL |
2491 | */ |
2492 | if (unlikely(io_submit_sqe(ctx, req, sqe)) && |
2493 | !(ctx->flags & IORING_SETUP_SUBMIT_ALL)) { |
2494 | left--; |
2495 | break; |
2496 | } |
2497 | } while (--left); |
2498 | |
2499 | if (unlikely(left)) { |
2500 | ret -= left; |
2501 | /* try again if it submitted nothing and can't allocate a req */ |
2502 | if (!ret && io_req_cache_empty(ctx)) |
2503 | ret = -EAGAIN; |
2504 | current->io_uring->cached_refs += left; |
2505 | } |
2506 | |
2507 | io_submit_state_end(ctx); |
2508 | /* Commit SQ ring head once we've consumed and submitted all SQEs */ |
2509 | io_commit_sqring(ctx); |
2510 | return ret; |
2511 | } |
2512 | |
2513 | static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode, |
2514 | int wake_flags, void *key) |
2515 | { |
2516 | struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue, wq); |
2517 | |
2518 | /* |
2519 | * Cannot safely flush overflowed CQEs from here, ensure we wake up |
2520 | * the task, and the next invocation will do it. |
2521 | */ |
2522 | if (io_should_wake(iowq) || io_has_work(ctx: iowq->ctx)) |
2523 | return autoremove_wake_function(wq_entry: curr, mode, sync: wake_flags, key); |
2524 | return -1; |
2525 | } |
2526 | |
2527 | int io_run_task_work_sig(struct io_ring_ctx *ctx) |
2528 | { |
2529 | if (!llist_empty(head: &ctx->work_llist)) { |
2530 | __set_current_state(TASK_RUNNING); |
2531 | if (io_run_local_work(ctx, INT_MAX) > 0) |
2532 | return 0; |
2533 | } |
2534 | if (io_run_task_work() > 0) |
2535 | return 0; |
2536 | if (task_sigpending(current)) |
2537 | return -EINTR; |
2538 | return 0; |
2539 | } |
2540 | |
2541 | static bool current_pending_io(void) |
2542 | { |
2543 | struct io_uring_task *tctx = current->io_uring; |
2544 | |
2545 | if (!tctx) |
2546 | return false; |
2547 | return percpu_counter_read_positive(fbc: &tctx->inflight); |
2548 | } |
2549 | |
2550 | /* when returns >0, the caller should retry */ |
2551 | static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx, |
2552 | struct io_wait_queue *iowq) |
2553 | { |
2554 | int ret; |
2555 | |
2556 | if (unlikely(READ_ONCE(ctx->check_cq))) |
2557 | return 1; |
2558 | if (unlikely(!llist_empty(&ctx->work_llist))) |
2559 | return 1; |
2560 | if (unlikely(test_thread_flag(TIF_NOTIFY_SIGNAL))) |
2561 | return 1; |
2562 | if (unlikely(task_sigpending(current))) |
2563 | return -EINTR; |
2564 | if (unlikely(io_should_wake(iowq))) |
2565 | return 0; |
2566 | |
2567 | /* |
2568 | * Mark us as being in io_wait if we have pending requests, so cpufreq |
2569 | * can take into account that the task is waiting for IO - turns out |
2570 | * to be important for low QD IO. |
2571 | */ |
2572 | if (current_pending_io()) |
2573 | current->in_iowait = 1; |
2574 | ret = 0; |
2575 | if (iowq->timeout == KTIME_MAX) |
2576 | schedule(); |
2577 | else if (!schedule_hrtimeout(expires: &iowq->timeout, mode: HRTIMER_MODE_ABS)) |
2578 | ret = -ETIME; |
2579 | current->in_iowait = 0; |
2580 | return ret; |
2581 | } |
2582 | |
2583 | /* |
2584 | * Wait until events become available, if we don't already have some. The |
2585 | * application must reap them itself, as they reside on the shared cq ring. |
2586 | */ |
2587 | static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, |
2588 | const sigset_t __user *sig, size_t sigsz, |
2589 | struct __kernel_timespec __user *uts) |
2590 | { |
2591 | struct io_wait_queue iowq; |
2592 | struct io_rings *rings = ctx->rings; |
2593 | int ret; |
2594 | |
2595 | if (!io_allowed_run_tw(ctx)) |
2596 | return -EEXIST; |
2597 | if (!llist_empty(head: &ctx->work_llist)) |
2598 | io_run_local_work(ctx, min_events); |
2599 | io_run_task_work(); |
2600 | io_cqring_overflow_flush(ctx); |
2601 | /* if user messes with these they will just get an early return */ |
2602 | if (__io_cqring_events_user(ctx) >= min_events) |
2603 | return 0; |
2604 | |
2605 | init_waitqueue_func_entry(wq_entry: &iowq.wq, func: io_wake_function); |
2606 | iowq.wq.private = current; |
2607 | INIT_LIST_HEAD(list: &iowq.wq.entry); |
2608 | iowq.ctx = ctx; |
2609 | iowq.nr_timeouts = atomic_read(v: &ctx->cq_timeouts); |
2610 | iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events; |
2611 | iowq.timeout = KTIME_MAX; |
2612 | |
2613 | if (uts) { |
2614 | struct timespec64 ts; |
2615 | |
2616 | if (get_timespec64(ts: &ts, uts)) |
2617 | return -EFAULT; |
2618 | |
2619 | iowq.timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns()); |
2620 | io_napi_adjust_timeout(ctx, iowq: &iowq, ts: &ts); |
2621 | } |
2622 | |
2623 | if (sig) { |
2624 | #ifdef CONFIG_COMPAT |
2625 | if (in_compat_syscall()) |
2626 | ret = set_compat_user_sigmask(umask: (const compat_sigset_t __user *)sig, |
2627 | sigsetsize: sigsz); |
2628 | else |
2629 | #endif |
2630 | ret = set_user_sigmask(umask: sig, sigsetsize: sigsz); |
2631 | |
2632 | if (ret) |
2633 | return ret; |
2634 | } |
2635 | |
2636 | io_napi_busy_loop(ctx, iowq: &iowq); |
2637 | |
2638 | trace_io_uring_cqring_wait(ctx, min_events); |
2639 | do { |
2640 | int nr_wait = (int) iowq.cq_tail - READ_ONCE(ctx->rings->cq.tail); |
2641 | unsigned long check_cq; |
2642 | |
2643 | if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) { |
2644 | atomic_set(v: &ctx->cq_wait_nr, i: nr_wait); |
2645 | set_current_state(TASK_INTERRUPTIBLE); |
2646 | } else { |
2647 | prepare_to_wait_exclusive(wq_head: &ctx->cq_wait, wq_entry: &iowq.wq, |
2648 | TASK_INTERRUPTIBLE); |
2649 | } |
2650 | |
2651 | ret = io_cqring_wait_schedule(ctx, iowq: &iowq); |
2652 | __set_current_state(TASK_RUNNING); |
2653 | atomic_set(v: &ctx->cq_wait_nr, IO_CQ_WAKE_INIT); |
2654 | |
2655 | /* |
2656 | * Run task_work after scheduling and before io_should_wake(). |
2657 | * If we got woken because of task_work being processed, run it |
2658 | * now rather than let the caller do another wait loop. |
2659 | */ |
2660 | io_run_task_work(); |
2661 | if (!llist_empty(head: &ctx->work_llist)) |
2662 | io_run_local_work(ctx, min_events: nr_wait); |
2663 | |
2664 | /* |
2665 | * Non-local task_work will be run on exit to userspace, but |
2666 | * if we're using DEFER_TASKRUN, then we could have waited |
2667 | * with a timeout for a number of requests. If the timeout |
2668 | * hits, we could have some requests ready to process. Ensure |
2669 | * this break is _after_ we have run task_work, to avoid |
2670 | * deferring running potentially pending requests until the |
2671 | * next time we wait for events. |
2672 | */ |
2673 | if (ret < 0) |
2674 | break; |
2675 | |
2676 | check_cq = READ_ONCE(ctx->check_cq); |
2677 | if (unlikely(check_cq)) { |
2678 | /* let the caller flush overflows, retry */ |
2679 | if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)) |
2680 | io_cqring_do_overflow_flush(ctx); |
2681 | if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)) { |
2682 | ret = -EBADR; |
2683 | break; |
2684 | } |
2685 | } |
2686 | |
2687 | if (io_should_wake(iowq: &iowq)) { |
2688 | ret = 0; |
2689 | break; |
2690 | } |
2691 | cond_resched(); |
2692 | } while (1); |
2693 | |
2694 | if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN)) |
2695 | finish_wait(wq_head: &ctx->cq_wait, wq_entry: &iowq.wq); |
2696 | restore_saved_sigmask_unless(interrupted: ret == -EINTR); |
2697 | |
2698 | return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0; |
2699 | } |
2700 | |
2701 | void io_mem_free(void *ptr) |
2702 | { |
2703 | if (!ptr) |
2704 | return; |
2705 | |
2706 | folio_put(folio: virt_to_folio(x: ptr)); |
2707 | } |
2708 | |
2709 | static void io_pages_free(struct page ***pages, int npages) |
2710 | { |
2711 | struct page **page_array = *pages; |
2712 | int i; |
2713 | |
2714 | if (!page_array) |
2715 | return; |
2716 | |
2717 | for (i = 0; i < npages; i++) |
2718 | unpin_user_page(page: page_array[i]); |
2719 | kvfree(addr: page_array); |
2720 | *pages = NULL; |
2721 | } |
2722 | |
2723 | static void *__io_uaddr_map(struct page ***pages, unsigned short *npages, |
2724 | unsigned long uaddr, size_t size) |
2725 | { |
2726 | struct page **page_array; |
2727 | unsigned int nr_pages; |
2728 | void *page_addr; |
2729 | int ret, i, pinned; |
2730 | |
2731 | *npages = 0; |
2732 | |
2733 | if (uaddr & (PAGE_SIZE - 1) || !size) |
2734 | return ERR_PTR(error: -EINVAL); |
2735 | |
2736 | nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
2737 | if (nr_pages > USHRT_MAX) |
2738 | return ERR_PTR(error: -EINVAL); |
2739 | page_array = kvmalloc_array(n: nr_pages, size: sizeof(struct page *), GFP_KERNEL); |
2740 | if (!page_array) |
2741 | return ERR_PTR(error: -ENOMEM); |
2742 | |
2743 | |
2744 | pinned = pin_user_pages_fast(start: uaddr, nr_pages, gup_flags: FOLL_WRITE | FOLL_LONGTERM, |
2745 | pages: page_array); |
2746 | if (pinned != nr_pages) { |
2747 | ret = (pinned < 0) ? pinned : -EFAULT; |
2748 | goto free_pages; |
2749 | } |
2750 | |
2751 | page_addr = page_address(page_array[0]); |
2752 | for (i = 0; i < nr_pages; i++) { |
2753 | ret = -EINVAL; |
2754 | |
2755 | /* |
2756 | * Can't support mapping user allocated ring memory on 32-bit |
2757 | * archs where it could potentially reside in highmem. Just |
2758 | * fail those with -EINVAL, just like we did on kernels that |
2759 | * didn't support this feature. |
2760 | */ |
2761 | if (PageHighMem(page: page_array[i])) |
2762 | goto free_pages; |
2763 | |
2764 | /* |
2765 | * No support for discontig pages for now, should either be a |
2766 | * single normal page, or a huge page. Later on we can add |
2767 | * support for remapping discontig pages, for now we will |
2768 | * just fail them with EINVAL. |
2769 | */ |
2770 | if (page_address(page_array[i]) != page_addr) |
2771 | goto free_pages; |
2772 | page_addr += PAGE_SIZE; |
2773 | } |
2774 | |
2775 | *pages = page_array; |
2776 | *npages = nr_pages; |
2777 | return page_to_virt(page_array[0]); |
2778 | |
2779 | free_pages: |
2780 | io_pages_free(pages: &page_array, npages: pinned > 0 ? pinned : 0); |
2781 | return ERR_PTR(error: ret); |
2782 | } |
2783 | |
2784 | static void *io_rings_map(struct io_ring_ctx *ctx, unsigned long uaddr, |
2785 | size_t size) |
2786 | { |
2787 | return __io_uaddr_map(pages: &ctx->ring_pages, npages: &ctx->n_ring_pages, uaddr, |
2788 | size); |
2789 | } |
2790 | |
2791 | static void *io_sqes_map(struct io_ring_ctx *ctx, unsigned long uaddr, |
2792 | size_t size) |
2793 | { |
2794 | return __io_uaddr_map(pages: &ctx->sqe_pages, npages: &ctx->n_sqe_pages, uaddr, |
2795 | size); |
2796 | } |
2797 | |
2798 | static void io_rings_free(struct io_ring_ctx *ctx) |
2799 | { |
2800 | if (!(ctx->flags & IORING_SETUP_NO_MMAP)) { |
2801 | io_mem_free(ptr: ctx->rings); |
2802 | io_mem_free(ptr: ctx->sq_sqes); |
2803 | } else { |
2804 | io_pages_free(pages: &ctx->ring_pages, npages: ctx->n_ring_pages); |
2805 | ctx->n_ring_pages = 0; |
2806 | io_pages_free(pages: &ctx->sqe_pages, npages: ctx->n_sqe_pages); |
2807 | ctx->n_sqe_pages = 0; |
2808 | } |
2809 | |
2810 | ctx->rings = NULL; |
2811 | ctx->sq_sqes = NULL; |
2812 | } |
2813 | |
2814 | void *io_mem_alloc(size_t size) |
2815 | { |
2816 | gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP; |
2817 | void *ret; |
2818 | |
2819 | ret = (void *) __get_free_pages(gfp_mask: gfp, order: get_order(size)); |
2820 | if (ret) |
2821 | return ret; |
2822 | return ERR_PTR(error: -ENOMEM); |
2823 | } |
2824 | |
2825 | static unsigned long rings_size(struct io_ring_ctx *ctx, unsigned int sq_entries, |
2826 | unsigned int cq_entries, size_t *sq_offset) |
2827 | { |
2828 | struct io_rings *rings; |
2829 | size_t off, sq_array_size; |
2830 | |
2831 | off = struct_size(rings, cqes, cq_entries); |
2832 | if (off == SIZE_MAX) |
2833 | return SIZE_MAX; |
2834 | if (ctx->flags & IORING_SETUP_CQE32) { |
2835 | if (check_shl_overflow(off, 1, &off)) |
2836 | return SIZE_MAX; |
2837 | } |
2838 | |
2839 | #ifdef CONFIG_SMP |
2840 | off = ALIGN(off, SMP_CACHE_BYTES); |
2841 | if (off == 0) |
2842 | return SIZE_MAX; |
2843 | #endif |
2844 | |
2845 | if (ctx->flags & IORING_SETUP_NO_SQARRAY) { |
2846 | if (sq_offset) |
2847 | *sq_offset = SIZE_MAX; |
2848 | return off; |
2849 | } |
2850 | |
2851 | if (sq_offset) |
2852 | *sq_offset = off; |
2853 | |
2854 | sq_array_size = array_size(sizeof(u32), sq_entries); |
2855 | if (sq_array_size == SIZE_MAX) |
2856 | return SIZE_MAX; |
2857 | |
2858 | if (check_add_overflow(off, sq_array_size, &off)) |
2859 | return SIZE_MAX; |
2860 | |
2861 | return off; |
2862 | } |
2863 | |
2864 | static void io_req_caches_free(struct io_ring_ctx *ctx) |
2865 | { |
2866 | struct io_kiocb *req; |
2867 | int nr = 0; |
2868 | |
2869 | mutex_lock(&ctx->uring_lock); |
2870 | io_flush_cached_locked_reqs(ctx, state: &ctx->submit_state); |
2871 | |
2872 | while (!io_req_cache_empty(ctx)) { |
2873 | req = io_extract_req(ctx); |
2874 | kmem_cache_free(s: req_cachep, objp: req); |
2875 | nr++; |
2876 | } |
2877 | if (nr) |
2878 | percpu_ref_put_many(ref: &ctx->refs, nr); |
2879 | mutex_unlock(lock: &ctx->uring_lock); |
2880 | } |
2881 | |
2882 | static void io_rsrc_node_cache_free(struct io_cache_entry *entry) |
2883 | { |
2884 | kfree(container_of(entry, struct io_rsrc_node, cache)); |
2885 | } |
2886 | |
2887 | static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) |
2888 | { |
2889 | io_sq_thread_finish(ctx); |
2890 | /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */ |
2891 | if (WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list))) |
2892 | return; |
2893 | |
2894 | mutex_lock(&ctx->uring_lock); |
2895 | if (ctx->buf_data) |
2896 | __io_sqe_buffers_unregister(ctx); |
2897 | if (ctx->file_data) |
2898 | __io_sqe_files_unregister(ctx); |
2899 | io_cqring_overflow_kill(ctx); |
2900 | io_eventfd_unregister(ctx); |
2901 | io_alloc_cache_free(cache: &ctx->apoll_cache, free: io_apoll_cache_free); |
2902 | io_alloc_cache_free(cache: &ctx->netmsg_cache, free: io_netmsg_cache_free); |
2903 | io_futex_cache_free(ctx); |
2904 | io_destroy_buffers(ctx); |
2905 | mutex_unlock(lock: &ctx->uring_lock); |
2906 | if (ctx->sq_creds) |
2907 | put_cred(cred: ctx->sq_creds); |
2908 | if (ctx->submitter_task) |
2909 | put_task_struct(t: ctx->submitter_task); |
2910 | |
2911 | /* there are no registered resources left, nobody uses it */ |
2912 | if (ctx->rsrc_node) |
2913 | io_rsrc_node_destroy(ctx, ref_node: ctx->rsrc_node); |
2914 | |
2915 | WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list)); |
2916 | WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list)); |
2917 | |
2918 | io_alloc_cache_free(cache: &ctx->rsrc_node_cache, free: io_rsrc_node_cache_free); |
2919 | if (ctx->mm_account) { |
2920 | mmdrop(mm: ctx->mm_account); |
2921 | ctx->mm_account = NULL; |
2922 | } |
2923 | io_rings_free(ctx); |
2924 | io_kbuf_mmap_list_free(ctx); |
2925 | |
2926 | percpu_ref_exit(ref: &ctx->refs); |
2927 | free_uid(ctx->user); |
2928 | io_req_caches_free(ctx); |
2929 | if (ctx->hash_map) |
2930 | io_wq_put_hash(hash: ctx->hash_map); |
2931 | io_napi_free(ctx); |
2932 | kfree(objp: ctx->cancel_table.hbs); |
2933 | kfree(objp: ctx->cancel_table_locked.hbs); |
2934 | xa_destroy(&ctx->io_bl_xa); |
2935 | kfree(objp: ctx); |
2936 | } |
2937 | |
2938 | static __cold void io_activate_pollwq_cb(struct callback_head *cb) |
2939 | { |
2940 | struct io_ring_ctx *ctx = container_of(cb, struct io_ring_ctx, |
2941 | poll_wq_task_work); |
2942 | |
2943 | mutex_lock(&ctx->uring_lock); |
2944 | ctx->poll_activated = true; |
2945 | mutex_unlock(lock: &ctx->uring_lock); |
2946 | |
2947 | /* |
2948 | * Wake ups for some events between start of polling and activation |
2949 | * might've been lost due to loose synchronisation. |
2950 | */ |
2951 | wake_up_all(&ctx->poll_wq); |
2952 | percpu_ref_put(ref: &ctx->refs); |
2953 | } |
2954 | |
2955 | __cold void io_activate_pollwq(struct io_ring_ctx *ctx) |
2956 | { |
2957 | spin_lock(lock: &ctx->completion_lock); |
2958 | /* already activated or in progress */ |
2959 | if (ctx->poll_activated || ctx->poll_wq_task_work.func) |
2960 | goto out; |
2961 | if (WARN_ON_ONCE(!ctx->task_complete)) |
2962 | goto out; |
2963 | if (!ctx->submitter_task) |
2964 | goto out; |
2965 | /* |
2966 | * with ->submitter_task only the submitter task completes requests, we |
2967 | * only need to sync with it, which is done by injecting a tw |
2968 | */ |
2969 | init_task_work(twork: &ctx->poll_wq_task_work, func: io_activate_pollwq_cb); |
2970 | percpu_ref_get(ref: &ctx->refs); |
2971 | if (task_work_add(task: ctx->submitter_task, twork: &ctx->poll_wq_task_work, mode: TWA_SIGNAL)) |
2972 | percpu_ref_put(ref: &ctx->refs); |
2973 | out: |
2974 | spin_unlock(lock: &ctx->completion_lock); |
2975 | } |
2976 | |
2977 | static __poll_t io_uring_poll(struct file *file, poll_table *wait) |
2978 | { |
2979 | struct io_ring_ctx *ctx = file->private_data; |
2980 | __poll_t mask = 0; |
2981 | |
2982 | if (unlikely(!ctx->poll_activated)) |
2983 | io_activate_pollwq(ctx); |
2984 | |
2985 | poll_wait(filp: file, wait_address: &ctx->poll_wq, p: wait); |
2986 | /* |
2987 | * synchronizes with barrier from wq_has_sleeper call in |
2988 | * io_commit_cqring |
2989 | */ |
2990 | smp_rmb(); |
2991 | if (!io_sqring_full(ctx)) |
2992 | mask |= EPOLLOUT | EPOLLWRNORM; |
2993 | |
2994 | /* |
2995 | * Don't flush cqring overflow list here, just do a simple check. |
2996 | * Otherwise there could possible be ABBA deadlock: |
2997 | * CPU0 CPU1 |
2998 | * ---- ---- |
2999 | * lock(&ctx->uring_lock); |
3000 | * lock(&ep->mtx); |
3001 | * lock(&ctx->uring_lock); |
3002 | * lock(&ep->mtx); |
3003 | * |
3004 | * Users may get EPOLLIN meanwhile seeing nothing in cqring, this |
3005 | * pushes them to do the flush. |
3006 | */ |
3007 | |
3008 | if (__io_cqring_events_user(ctx) || io_has_work(ctx)) |
3009 | mask |= EPOLLIN | EPOLLRDNORM; |
3010 | |
3011 | return mask; |
3012 | } |
3013 | |
3014 | struct io_tctx_exit { |
3015 | struct callback_head task_work; |
3016 | struct completion completion; |
3017 | struct io_ring_ctx *ctx; |
3018 | }; |
3019 | |
3020 | static __cold void io_tctx_exit_cb(struct callback_head *cb) |
3021 | { |
3022 | struct io_uring_task *tctx = current->io_uring; |
3023 | struct io_tctx_exit *work; |
3024 | |
3025 | work = container_of(cb, struct io_tctx_exit, task_work); |
3026 | /* |
3027 | * When @in_cancel, we're in cancellation and it's racy to remove the |
3028 | * node. It'll be removed by the end of cancellation, just ignore it. |
3029 | * tctx can be NULL if the queueing of this task_work raced with |
3030 | * work cancelation off the exec path. |
3031 | */ |
3032 | if (tctx && !atomic_read(v: &tctx->in_cancel)) |
3033 | io_uring_del_tctx_node(index: (unsigned long)work->ctx); |
3034 | complete(&work->completion); |
3035 | } |
3036 | |
3037 | static __cold bool io_cancel_ctx_cb(struct io_wq_work *work, void *data) |
3038 | { |
3039 | struct io_kiocb *req = container_of(work, struct io_kiocb, work); |
3040 | |
3041 | return req->ctx == data; |
3042 | } |
3043 | |
3044 | static __cold void io_ring_exit_work(struct work_struct *work) |
3045 | { |
3046 | struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work); |
3047 | unsigned long timeout = jiffies + HZ * 60 * 5; |
3048 | unsigned long interval = HZ / 20; |
3049 | struct io_tctx_exit exit; |
3050 | struct io_tctx_node *node; |
3051 | int ret; |
3052 | |
3053 | /* |
3054 | * If we're doing polled IO and end up having requests being |
3055 | * submitted async (out-of-line), then completions can come in while |
3056 | * we're waiting for refs to drop. We need to reap these manually, |
3057 | * as nobody else will be looking for them. |
3058 | */ |
3059 | do { |
3060 | if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) { |
3061 | mutex_lock(&ctx->uring_lock); |
3062 | io_cqring_overflow_kill(ctx); |
3063 | mutex_unlock(lock: &ctx->uring_lock); |
3064 | } |
3065 | |
3066 | if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) |
3067 | io_move_task_work_from_local(ctx); |
3068 | |
3069 | while (io_uring_try_cancel_requests(ctx, NULL, cancel_all: true)) |
3070 | cond_resched(); |
3071 | |
3072 | if (ctx->sq_data) { |
3073 | struct io_sq_data *sqd = ctx->sq_data; |
3074 | struct task_struct *tsk; |
3075 | |
3076 | io_sq_thread_park(sqd); |
3077 | tsk = sqd->thread; |
3078 | if (tsk && tsk->io_uring && tsk->io_uring->io_wq) |
3079 | io_wq_cancel_cb(wq: tsk->io_uring->io_wq, |
3080 | cancel: io_cancel_ctx_cb, data: ctx, cancel_all: true); |
3081 | io_sq_thread_unpark(sqd); |
3082 | } |
3083 | |
3084 | io_req_caches_free(ctx); |
3085 | |
3086 | if (WARN_ON_ONCE(time_after(jiffies, timeout))) { |
3087 | /* there is little hope left, don't run it too often */ |
3088 | interval = HZ * 60; |
3089 | } |
3090 | /* |
3091 | * This is really an uninterruptible wait, as it has to be |
3092 | * complete. But it's also run from a kworker, which doesn't |
3093 | * take signals, so it's fine to make it interruptible. This |
3094 | * avoids scenarios where we knowingly can wait much longer |
3095 | * on completions, for example if someone does a SIGSTOP on |
3096 | * a task that needs to finish task_work to make this loop |
3097 | * complete. That's a synthetic situation that should not |
3098 | * cause a stuck task backtrace, and hence a potential panic |
3099 | * on stuck tasks if that is enabled. |
3100 | */ |
3101 | } while (!wait_for_completion_interruptible_timeout(x: &ctx->ref_comp, timeout: interval)); |
3102 | |
3103 | init_completion(x: &exit.completion); |
3104 | init_task_work(twork: &exit.task_work, func: io_tctx_exit_cb); |
3105 | exit.ctx = ctx; |
3106 | |
3107 | mutex_lock(&ctx->uring_lock); |
3108 | while (!list_empty(head: &ctx->tctx_list)) { |
3109 | WARN_ON_ONCE(time_after(jiffies, timeout)); |
3110 | |
3111 | node = list_first_entry(&ctx->tctx_list, struct io_tctx_node, |
3112 | ctx_node); |
3113 | /* don't spin on a single task if cancellation failed */ |
3114 | list_rotate_left(head: &ctx->tctx_list); |
3115 | ret = task_work_add(task: node->task, twork: &exit.task_work, mode: TWA_SIGNAL); |
3116 | if (WARN_ON_ONCE(ret)) |
3117 | continue; |
3118 | |
3119 | mutex_unlock(lock: &ctx->uring_lock); |
3120 | /* |
3121 | * See comment above for |
3122 | * wait_for_completion_interruptible_timeout() on why this |
3123 | * wait is marked as interruptible. |
3124 | */ |
3125 | wait_for_completion_interruptible(x: &exit.completion); |
3126 | mutex_lock(&ctx->uring_lock); |
3127 | } |
3128 | mutex_unlock(lock: &ctx->uring_lock); |
3129 | spin_lock(lock: &ctx->completion_lock); |
3130 | spin_unlock(lock: &ctx->completion_lock); |
3131 | |
3132 | /* pairs with RCU read section in io_req_local_work_add() */ |
3133 | if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) |
3134 | synchronize_rcu(); |
3135 | |
3136 | io_ring_ctx_free(ctx); |
3137 | } |
3138 | |
3139 | static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) |
3140 | { |
3141 | unsigned long index; |
3142 | struct creds *creds; |
3143 | |
3144 | mutex_lock(&ctx->uring_lock); |
3145 | percpu_ref_kill(ref: &ctx->refs); |
3146 | xa_for_each(&ctx->personalities, index, creds) |
3147 | io_unregister_personality(ctx, id: index); |
3148 | if (ctx->rings) |
3149 | io_poll_remove_all(ctx, NULL, cancel_all: true); |
3150 | mutex_unlock(lock: &ctx->uring_lock); |
3151 | |
3152 | /* |
3153 | * If we failed setting up the ctx, we might not have any rings |
3154 | * and therefore did not submit any requests |
3155 | */ |
3156 | if (ctx->rings) |
3157 | io_kill_timeouts(ctx, NULL, cancel_all: true); |
3158 | |
3159 | flush_delayed_work(dwork: &ctx->fallback_work); |
3160 | |
3161 | INIT_WORK(&ctx->exit_work, io_ring_exit_work); |
3162 | /* |
3163 | * Use system_unbound_wq to avoid spawning tons of event kworkers |
3164 | * if we're exiting a ton of rings at the same time. It just adds |
3165 | * noise and overhead, there's no discernable change in runtime |
3166 | * over using system_wq. |
3167 | */ |
3168 | queue_work(wq: iou_wq, work: &ctx->exit_work); |
3169 | } |
3170 | |
3171 | static int io_uring_release(struct inode *inode, struct file *file) |
3172 | { |
3173 | struct io_ring_ctx *ctx = file->private_data; |
3174 | |
3175 | file->private_data = NULL; |
3176 | io_ring_ctx_wait_and_kill(ctx); |
3177 | return 0; |
3178 | } |
3179 | |
3180 | struct io_task_cancel { |
3181 | struct task_struct *task; |
3182 | bool all; |
3183 | }; |
3184 | |
3185 | static bool io_cancel_task_cb(struct io_wq_work *work, void *data) |
3186 | { |
3187 | struct io_kiocb *req = container_of(work, struct io_kiocb, work); |
3188 | struct io_task_cancel *cancel = data; |
3189 | |
3190 | return io_match_task_safe(head: req, task: cancel->task, cancel_all: cancel->all); |
3191 | } |
3192 | |
3193 | static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx, |
3194 | struct task_struct *task, |
3195 | bool cancel_all) |
3196 | { |
3197 | struct io_defer_entry *de; |
3198 | LIST_HEAD(list); |
3199 | |
3200 | spin_lock(lock: &ctx->completion_lock); |
3201 | list_for_each_entry_reverse(de, &ctx->defer_list, list) { |
3202 | if (io_match_task_safe(head: de->req, task, cancel_all)) { |
3203 | list_cut_position(list: &list, head: &ctx->defer_list, entry: &de->list); |
3204 | break; |
3205 | } |
3206 | } |
3207 | spin_unlock(lock: &ctx->completion_lock); |
3208 | if (list_empty(head: &list)) |
3209 | return false; |
3210 | |
3211 | while (!list_empty(head: &list)) { |
3212 | de = list_first_entry(&list, struct io_defer_entry, list); |
3213 | list_del_init(entry: &de->list); |
3214 | io_req_task_queue_fail(req: de->req, ret: -ECANCELED); |
3215 | kfree(objp: de); |
3216 | } |
3217 | return true; |
3218 | } |
3219 | |
3220 | static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx) |
3221 | { |
3222 | struct io_tctx_node *node; |
3223 | enum io_wq_cancel cret; |
3224 | bool ret = false; |
3225 | |
3226 | mutex_lock(&ctx->uring_lock); |
3227 | list_for_each_entry(node, &ctx->tctx_list, ctx_node) { |
3228 | struct io_uring_task *tctx = node->task->io_uring; |
3229 | |
3230 | /* |
3231 | * io_wq will stay alive while we hold uring_lock, because it's |
3232 | * killed after ctx nodes, which requires to take the lock. |
3233 | */ |
3234 | if (!tctx || !tctx->io_wq) |
3235 | continue; |
3236 | cret = io_wq_cancel_cb(wq: tctx->io_wq, cancel: io_cancel_ctx_cb, data: ctx, cancel_all: true); |
3237 | ret |= (cret != IO_WQ_CANCEL_NOTFOUND); |
3238 | } |
3239 | mutex_unlock(lock: &ctx->uring_lock); |
3240 | |
3241 | return ret; |
3242 | } |
3243 | |
3244 | static bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx, |
3245 | struct task_struct *task, bool cancel_all) |
3246 | { |
3247 | struct hlist_node *tmp; |
3248 | struct io_kiocb *req; |
3249 | bool ret = false; |
3250 | |
3251 | lockdep_assert_held(&ctx->uring_lock); |
3252 | |
3253 | hlist_for_each_entry_safe(req, tmp, &ctx->cancelable_uring_cmd, |
3254 | hash_node) { |
3255 | struct io_uring_cmd *cmd = io_kiocb_to_cmd(req, |
3256 | struct io_uring_cmd); |
3257 | struct file *file = req->file; |
3258 | |
3259 | if (!cancel_all && req->task != task) |
3260 | continue; |
3261 | |
3262 | if (cmd->flags & IORING_URING_CMD_CANCELABLE) { |
3263 | /* ->sqe isn't available if no async data */ |
3264 | if (!req_has_async_data(req)) |
3265 | cmd->sqe = NULL; |
3266 | file->f_op->uring_cmd(cmd, IO_URING_F_CANCEL); |
3267 | ret = true; |
3268 | } |
3269 | } |
3270 | io_submit_flush_completions(ctx); |
3271 | |
3272 | return ret; |
3273 | } |
3274 | |
3275 | static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx, |
3276 | struct task_struct *task, |
3277 | bool cancel_all) |
3278 | { |
3279 | struct io_task_cancel cancel = { .task = task, .all = cancel_all, }; |
3280 | struct io_uring_task *tctx = task ? task->io_uring : NULL; |
3281 | enum io_wq_cancel cret; |
3282 | bool ret = false; |
3283 | |
3284 | /* set it so io_req_local_work_add() would wake us up */ |
3285 | if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) { |
3286 | atomic_set(v: &ctx->cq_wait_nr, i: 1); |
3287 | smp_mb(); |
3288 | } |
3289 | |
3290 | /* failed during ring init, it couldn't have issued any requests */ |
3291 | if (!ctx->rings) |
3292 | return false; |
3293 | |
3294 | if (!task) { |
3295 | ret |= io_uring_try_cancel_iowq(ctx); |
3296 | } else if (tctx && tctx->io_wq) { |
3297 | /* |
3298 | * Cancels requests of all rings, not only @ctx, but |
3299 | * it's fine as the task is in exit/exec. |
3300 | */ |
3301 | cret = io_wq_cancel_cb(wq: tctx->io_wq, cancel: io_cancel_task_cb, |
3302 | data: &cancel, cancel_all: true); |
3303 | ret |= (cret != IO_WQ_CANCEL_NOTFOUND); |
3304 | } |
3305 | |
3306 | /* SQPOLL thread does its own polling */ |
3307 | if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) || |
3308 | (ctx->sq_data && ctx->sq_data->thread == current)) { |
3309 | while (!wq_list_empty(&ctx->iopoll_list)) { |
3310 | io_iopoll_try_reap_events(ctx); |
3311 | ret = true; |
3312 | cond_resched(); |
3313 | } |
3314 | } |
3315 | |
3316 | if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) && |
3317 | io_allowed_defer_tw_run(ctx)) |
3318 | ret |= io_run_local_work(ctx, INT_MAX) > 0; |
3319 | ret |= io_cancel_defer_files(ctx, task, cancel_all); |
3320 | mutex_lock(&ctx->uring_lock); |
3321 | ret |= io_poll_remove_all(ctx, tsk: task, cancel_all); |
3322 | ret |= io_waitid_remove_all(ctx, task, cancel_all); |
3323 | ret |= io_futex_remove_all(ctx, task, cancel_all); |
3324 | ret |= io_uring_try_cancel_uring_cmd(ctx, task, cancel_all); |
3325 | mutex_unlock(lock: &ctx->uring_lock); |
3326 | ret |= io_kill_timeouts(ctx, tsk: task, cancel_all); |
3327 | if (task) |
3328 | ret |= io_run_task_work() > 0; |
3329 | return ret; |
3330 | } |
3331 | |
3332 | static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked) |
3333 | { |
3334 | if (tracked) |
3335 | return atomic_read(v: &tctx->inflight_tracked); |
3336 | return percpu_counter_sum(fbc: &tctx->inflight); |
3337 | } |
3338 | |
3339 | /* |
3340 | * Find any io_uring ctx that this task has registered or done IO on, and cancel |
3341 | * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation. |
3342 | */ |
3343 | __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd) |
3344 | { |
3345 | struct io_uring_task *tctx = current->io_uring; |
3346 | struct io_ring_ctx *ctx; |
3347 | struct io_tctx_node *node; |
3348 | unsigned long index; |
3349 | s64 inflight; |
3350 | DEFINE_WAIT(wait); |
3351 | |
3352 | WARN_ON_ONCE(sqd && sqd->thread != current); |
3353 | |
3354 | if (!current->io_uring) |
3355 | return; |
3356 | if (tctx->io_wq) |
3357 | io_wq_exit_start(wq: tctx->io_wq); |
3358 | |
3359 | atomic_inc(v: &tctx->in_cancel); |
3360 | do { |
3361 | bool loop = false; |
3362 | |
3363 | io_uring_drop_tctx_refs(current); |
3364 | /* read completions before cancelations */ |
3365 | inflight = tctx_inflight(tctx, tracked: !cancel_all); |
3366 | if (!inflight) |
3367 | break; |
3368 | |
3369 | if (!sqd) { |
3370 | xa_for_each(&tctx->xa, index, node) { |
3371 | /* sqpoll task will cancel all its requests */ |
3372 | if (node->ctx->sq_data) |
3373 | continue; |
3374 | loop |= io_uring_try_cancel_requests(ctx: node->ctx, |
3375 | current, cancel_all); |
3376 | } |
3377 | } else { |
3378 | list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) |
3379 | loop |= io_uring_try_cancel_requests(ctx, |
3380 | current, |
3381 | cancel_all); |
3382 | } |
3383 | |
3384 | if (loop) { |
3385 | cond_resched(); |
3386 | continue; |
3387 | } |
3388 | |
3389 | prepare_to_wait(wq_head: &tctx->wait, wq_entry: &wait, TASK_INTERRUPTIBLE); |
3390 | io_run_task_work(); |
3391 | io_uring_drop_tctx_refs(current); |
3392 | xa_for_each(&tctx->xa, index, node) { |
3393 | if (!llist_empty(head: &node->ctx->work_llist)) { |
3394 | WARN_ON_ONCE(node->ctx->submitter_task && |
3395 | node->ctx->submitter_task != current); |
3396 | goto end_wait; |
3397 | } |
3398 | } |
3399 | /* |
3400 | * If we've seen completions, retry without waiting. This |
3401 | * avoids a race where a completion comes in before we did |
3402 | * prepare_to_wait(). |
3403 | */ |
3404 | if (inflight == tctx_inflight(tctx, tracked: !cancel_all)) |
3405 | schedule(); |
3406 | end_wait: |
3407 | finish_wait(wq_head: &tctx->wait, wq_entry: &wait); |
3408 | } while (1); |
3409 | |
3410 | io_uring_clean_tctx(tctx); |
3411 | if (cancel_all) { |
3412 | /* |
3413 | * We shouldn't run task_works after cancel, so just leave |
3414 | * ->in_cancel set for normal exit. |
3415 | */ |
3416 | atomic_dec(v: &tctx->in_cancel); |
3417 | /* for exec all current's requests should be gone, kill tctx */ |
3418 | __io_uring_free(current); |
3419 | } |
3420 | } |
3421 | |
3422 | void __io_uring_cancel(bool cancel_all) |
3423 | { |
3424 | io_uring_cancel_generic(cancel_all, NULL); |
3425 | } |
3426 | |
3427 | static void *io_uring_validate_mmap_request(struct file *file, |
3428 | loff_t pgoff, size_t sz) |
3429 | { |
3430 | struct io_ring_ctx *ctx = file->private_data; |
3431 | loff_t offset = pgoff << PAGE_SHIFT; |
3432 | struct page *page; |
3433 | void *ptr; |
3434 | |
3435 | switch (offset & IORING_OFF_MMAP_MASK) { |
3436 | case IORING_OFF_SQ_RING: |
3437 | case IORING_OFF_CQ_RING: |
3438 | /* Don't allow mmap if the ring was setup without it */ |
3439 | if (ctx->flags & IORING_SETUP_NO_MMAP) |
3440 | return ERR_PTR(error: -EINVAL); |
3441 | ptr = ctx->rings; |
3442 | break; |
3443 | case IORING_OFF_SQES: |
3444 | /* Don't allow mmap if the ring was setup without it */ |
3445 | if (ctx->flags & IORING_SETUP_NO_MMAP) |
3446 | return ERR_PTR(error: -EINVAL); |
3447 | ptr = ctx->sq_sqes; |
3448 | break; |
3449 | case IORING_OFF_PBUF_RING: { |
3450 | struct io_buffer_list *bl; |
3451 | unsigned int bgid; |
3452 | |
3453 | bgid = (offset & ~IORING_OFF_MMAP_MASK) >> IORING_OFF_PBUF_SHIFT; |
3454 | bl = io_pbuf_get_bl(ctx, bgid); |
3455 | if (IS_ERR(ptr: bl)) |
3456 | return bl; |
3457 | ptr = bl->buf_ring; |
3458 | io_put_bl(ctx, bl); |
3459 | break; |
3460 | } |
3461 | default: |
3462 | return ERR_PTR(error: -EINVAL); |
3463 | } |
3464 | |
3465 | page = virt_to_head_page(x: ptr); |
3466 | if (sz > page_size(page)) |
3467 | return ERR_PTR(error: -EINVAL); |
3468 | |
3469 | return ptr; |
3470 | } |
3471 | |
3472 | #ifdef CONFIG_MMU |
3473 | |
3474 | static __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma) |
3475 | { |
3476 | size_t sz = vma->vm_end - vma->vm_start; |
3477 | unsigned long pfn; |
3478 | void *ptr; |
3479 | |
3480 | ptr = io_uring_validate_mmap_request(file, pgoff: vma->vm_pgoff, sz); |
3481 | if (IS_ERR(ptr)) |
3482 | return PTR_ERR(ptr); |
3483 | |
3484 | pfn = virt_to_phys(address: ptr) >> PAGE_SHIFT; |
3485 | return remap_pfn_range(vma, addr: vma->vm_start, pfn, size: sz, vma->vm_page_prot); |
3486 | } |
3487 | |
3488 | static unsigned long io_uring_mmu_get_unmapped_area(struct file *filp, |
3489 | unsigned long addr, unsigned long len, |
3490 | unsigned long pgoff, unsigned long flags) |
3491 | { |
3492 | void *ptr; |
3493 | |
3494 | /* |
3495 | * Do not allow to map to user-provided address to avoid breaking the |
3496 | * aliasing rules. Userspace is not able to guess the offset address of |
3497 | * kernel kmalloc()ed memory area. |
3498 | */ |
3499 | if (addr) |
3500 | return -EINVAL; |
3501 | |
3502 | ptr = io_uring_validate_mmap_request(file: filp, pgoff, sz: len); |
3503 | if (IS_ERR(ptr)) |
3504 | return -ENOMEM; |
3505 | |
3506 | /* |
3507 | * Some architectures have strong cache aliasing requirements. |
3508 | * For such architectures we need a coherent mapping which aliases |
3509 | * kernel memory *and* userspace memory. To achieve that: |
3510 | * - use a NULL file pointer to reference physical memory, and |
3511 | * - use the kernel virtual address of the shared io_uring context |
3512 | * (instead of the userspace-provided address, which has to be 0UL |
3513 | * anyway). |
3514 | * - use the same pgoff which the get_unmapped_area() uses to |
3515 | * calculate the page colouring. |
3516 | * For architectures without such aliasing requirements, the |
3517 | * architecture will return any suitable mapping because addr is 0. |
3518 | */ |
3519 | filp = NULL; |
3520 | flags |= MAP_SHARED; |
3521 | pgoff = 0; /* has been translated to ptr above */ |
3522 | #ifdef SHM_COLOUR |
3523 | addr = (uintptr_t) ptr; |
3524 | pgoff = addr >> PAGE_SHIFT; |
3525 | #else |
3526 | addr = 0UL; |
3527 | #endif |
3528 | return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); |
3529 | } |
3530 | |
3531 | #else /* !CONFIG_MMU */ |
3532 | |
3533 | static int io_uring_mmap(struct file *file, struct vm_area_struct *vma) |
3534 | { |
3535 | return is_nommu_shared_mapping(vma->vm_flags) ? 0 : -EINVAL; |
3536 | } |
3537 | |
3538 | static unsigned int io_uring_nommu_mmap_capabilities(struct file *file) |
3539 | { |
3540 | return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE; |
3541 | } |
3542 | |
3543 | static unsigned long io_uring_nommu_get_unmapped_area(struct file *file, |
3544 | unsigned long addr, unsigned long len, |
3545 | unsigned long pgoff, unsigned long flags) |
3546 | { |
3547 | void *ptr; |
3548 | |
3549 | ptr = io_uring_validate_mmap_request(file, pgoff, len); |
3550 | if (IS_ERR(ptr)) |
3551 | return PTR_ERR(ptr); |
3552 | |
3553 | return (unsigned long) ptr; |
3554 | } |
3555 | |
3556 | #endif /* !CONFIG_MMU */ |
3557 | |
3558 | static int io_validate_ext_arg(unsigned flags, const void __user *argp, size_t argsz) |
3559 | { |
3560 | if (flags & IORING_ENTER_EXT_ARG) { |
3561 | struct io_uring_getevents_arg arg; |
3562 | |
3563 | if (argsz != sizeof(arg)) |
3564 | return -EINVAL; |
3565 | if (copy_from_user(to: &arg, from: argp, n: sizeof(arg))) |
3566 | return -EFAULT; |
3567 | } |
3568 | return 0; |
3569 | } |
3570 | |
3571 | static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz, |
3572 | struct __kernel_timespec __user **ts, |
3573 | const sigset_t __user **sig) |
3574 | { |
3575 | struct io_uring_getevents_arg arg; |
3576 | |
3577 | /* |
3578 | * If EXT_ARG isn't set, then we have no timespec and the argp pointer |
3579 | * is just a pointer to the sigset_t. |
3580 | */ |
3581 | if (!(flags & IORING_ENTER_EXT_ARG)) { |
3582 | *sig = (const sigset_t __user *) argp; |
3583 | *ts = NULL; |
3584 | return 0; |
3585 | } |
3586 | |
3587 | /* |
3588 | * EXT_ARG is set - ensure we agree on the size of it and copy in our |
3589 | * timespec and sigset_t pointers if good. |
3590 | */ |
3591 | if (*argsz != sizeof(arg)) |
3592 | return -EINVAL; |
3593 | if (copy_from_user(to: &arg, from: argp, n: sizeof(arg))) |
3594 | return -EFAULT; |
3595 | if (arg.pad) |
3596 | return -EINVAL; |
3597 | *sig = u64_to_user_ptr(arg.sigmask); |
3598 | *argsz = arg.sigmask_sz; |
3599 | *ts = u64_to_user_ptr(arg.ts); |
3600 | return 0; |
3601 | } |
3602 | |
3603 | SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, |
3604 | u32, min_complete, u32, flags, const void __user *, argp, |
3605 | size_t, argsz) |
3606 | { |
3607 | struct io_ring_ctx *ctx; |
3608 | struct file *file; |
3609 | long ret; |
3610 | |
3611 | if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP | |
3612 | IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG | |
3613 | IORING_ENTER_REGISTERED_RING))) |
3614 | return -EINVAL; |
3615 | |
3616 | /* |
3617 | * Ring fd has been registered via IORING_REGISTER_RING_FDS, we |
3618 | * need only dereference our task private array to find it. |
3619 | */ |
3620 | if (flags & IORING_ENTER_REGISTERED_RING) { |
3621 | struct io_uring_task *tctx = current->io_uring; |
3622 | |
3623 | if (unlikely(!tctx || fd >= IO_RINGFD_REG_MAX)) |
3624 | return -EINVAL; |
3625 | fd = array_index_nospec(fd, IO_RINGFD_REG_MAX); |
3626 | file = tctx->registered_rings[fd]; |
3627 | if (unlikely(!file)) |
3628 | return -EBADF; |
3629 | } else { |
3630 | file = fget(fd); |
3631 | if (unlikely(!file)) |
3632 | return -EBADF; |
3633 | ret = -EOPNOTSUPP; |
3634 | if (unlikely(!io_is_uring_fops(file))) |
3635 | goto out; |
3636 | } |
3637 | |
3638 | ctx = file->private_data; |
3639 | ret = -EBADFD; |
3640 | if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED)) |
3641 | goto out; |
3642 | |
3643 | /* |
3644 | * For SQ polling, the thread will do all submissions and completions. |
3645 | * Just return the requested submit count, and wake the thread if |
3646 | * we were asked to. |
3647 | */ |
3648 | ret = 0; |
3649 | if (ctx->flags & IORING_SETUP_SQPOLL) { |
3650 | io_cqring_overflow_flush(ctx); |
3651 | |
3652 | if (unlikely(ctx->sq_data->thread == NULL)) { |
3653 | ret = -EOWNERDEAD; |
3654 | goto out; |
3655 | } |
3656 | if (flags & IORING_ENTER_SQ_WAKEUP) |
3657 | wake_up(&ctx->sq_data->wait); |
3658 | if (flags & IORING_ENTER_SQ_WAIT) |
3659 | io_sqpoll_wait_sq(ctx); |
3660 | |
3661 | ret = to_submit; |
3662 | } else if (to_submit) { |
3663 | ret = io_uring_add_tctx_node(ctx); |
3664 | if (unlikely(ret)) |
3665 | goto out; |
3666 | |
3667 | mutex_lock(&ctx->uring_lock); |
3668 | ret = io_submit_sqes(ctx, nr: to_submit); |
3669 | if (ret != to_submit) { |
3670 | mutex_unlock(lock: &ctx->uring_lock); |
3671 | goto out; |
3672 | } |
3673 | if (flags & IORING_ENTER_GETEVENTS) { |
3674 | if (ctx->syscall_iopoll) |
3675 | goto iopoll_locked; |
3676 | /* |
3677 | * Ignore errors, we'll soon call io_cqring_wait() and |
3678 | * it should handle ownership problems if any. |
3679 | */ |
3680 | if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) |
3681 | (void)io_run_local_work_locked(ctx, min_events: min_complete); |
3682 | } |
3683 | mutex_unlock(lock: &ctx->uring_lock); |
3684 | } |
3685 | |
3686 | if (flags & IORING_ENTER_GETEVENTS) { |
3687 | int ret2; |
3688 | |
3689 | if (ctx->syscall_iopoll) { |
3690 | /* |
3691 | * We disallow the app entering submit/complete with |
3692 | * polling, but we still need to lock the ring to |
3693 | * prevent racing with polled issue that got punted to |
3694 | * a workqueue. |
3695 | */ |
3696 | mutex_lock(&ctx->uring_lock); |
3697 | iopoll_locked: |
3698 | ret2 = io_validate_ext_arg(flags, argp, argsz); |
3699 | if (likely(!ret2)) { |
3700 | min_complete = min(min_complete, |
3701 | ctx->cq_entries); |
3702 | ret2 = io_iopoll_check(ctx, min: min_complete); |
3703 | } |
3704 | mutex_unlock(lock: &ctx->uring_lock); |
3705 | } else { |
3706 | const sigset_t __user *sig; |
3707 | struct __kernel_timespec __user *ts; |
3708 | |
3709 | ret2 = io_get_ext_arg(flags, argp, argsz: &argsz, ts: &ts, sig: &sig); |
3710 | if (likely(!ret2)) { |
3711 | min_complete = min(min_complete, |
3712 | ctx->cq_entries); |
3713 | ret2 = io_cqring_wait(ctx, min_events: min_complete, sig, |
3714 | sigsz: argsz, uts: ts); |
3715 | } |
3716 | } |
3717 | |
3718 | if (!ret) { |
3719 | ret = ret2; |
3720 | |
3721 | /* |
3722 | * EBADR indicates that one or more CQE were dropped. |
3723 | * Once the user has been informed we can clear the bit |
3724 | * as they are obviously ok with those drops. |
3725 | */ |
3726 | if (unlikely(ret2 == -EBADR)) |
3727 | clear_bit(nr: IO_CHECK_CQ_DROPPED_BIT, |
3728 | addr: &ctx->check_cq); |
3729 | } |
3730 | } |
3731 | out: |
3732 | if (!(flags & IORING_ENTER_REGISTERED_RING)) |
3733 | fput(file); |
3734 | return ret; |
3735 | } |
3736 | |
3737 | static const struct file_operations io_uring_fops = { |
3738 | .release = io_uring_release, |
3739 | .mmap = io_uring_mmap, |
3740 | #ifndef CONFIG_MMU |
3741 | .get_unmapped_area = io_uring_nommu_get_unmapped_area, |
3742 | .mmap_capabilities = io_uring_nommu_mmap_capabilities, |
3743 | #else |
3744 | .get_unmapped_area = io_uring_mmu_get_unmapped_area, |
3745 | #endif |
3746 | .poll = io_uring_poll, |
3747 | #ifdef CONFIG_PROC_FS |
3748 | .show_fdinfo = io_uring_show_fdinfo, |
3749 | #endif |
3750 | }; |
3751 | |
3752 | bool io_is_uring_fops(struct file *file) |
3753 | { |
3754 | return file->f_op == &io_uring_fops; |
3755 | } |
3756 | |
3757 | static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx, |
3758 | struct io_uring_params *p) |
3759 | { |
3760 | struct io_rings *rings; |
3761 | size_t size, sq_array_offset; |
3762 | void *ptr; |
3763 | |
3764 | /* make sure these are sane, as we already accounted them */ |
3765 | ctx->sq_entries = p->sq_entries; |
3766 | ctx->cq_entries = p->cq_entries; |
3767 | |
3768 | size = rings_size(ctx, sq_entries: p->sq_entries, cq_entries: p->cq_entries, sq_offset: &sq_array_offset); |
3769 | if (size == SIZE_MAX) |
3770 | return -EOVERFLOW; |
3771 | |
3772 | if (!(ctx->flags & IORING_SETUP_NO_MMAP)) |
3773 | rings = io_mem_alloc(size); |
3774 | else |
3775 | rings = io_rings_map(ctx, uaddr: p->cq_off.user_addr, size); |
3776 | |
3777 | if (IS_ERR(ptr: rings)) |
3778 | return PTR_ERR(ptr: rings); |
3779 | |
3780 | ctx->rings = rings; |
3781 | if (!(ctx->flags & IORING_SETUP_NO_SQARRAY)) |
3782 | ctx->sq_array = (u32 *)((char *)rings + sq_array_offset); |
3783 | rings->sq_ring_mask = p->sq_entries - 1; |
3784 | rings->cq_ring_mask = p->cq_entries - 1; |
3785 | rings->sq_ring_entries = p->sq_entries; |
3786 | rings->cq_ring_entries = p->cq_entries; |
3787 | |
3788 | if (p->flags & IORING_SETUP_SQE128) |
3789 | size = array_size(2 * sizeof(struct io_uring_sqe), p->sq_entries); |
3790 | else |
3791 | size = array_size(sizeof(struct io_uring_sqe), p->sq_entries); |
3792 | if (size == SIZE_MAX) { |
3793 | io_rings_free(ctx); |
3794 | return -EOVERFLOW; |
3795 | } |
3796 | |
3797 | if (!(ctx->flags & IORING_SETUP_NO_MMAP)) |
3798 | ptr = io_mem_alloc(size); |
3799 | else |
3800 | ptr = io_sqes_map(ctx, uaddr: p->sq_off.user_addr, size); |
3801 | |
3802 | if (IS_ERR(ptr)) { |
3803 | io_rings_free(ctx); |
3804 | return PTR_ERR(ptr); |
3805 | } |
3806 | |
3807 | ctx->sq_sqes = ptr; |
3808 | return 0; |
3809 | } |
3810 | |
3811 | static int io_uring_install_fd(struct file *file) |
3812 | { |
3813 | int fd; |
3814 | |
3815 | fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC); |
3816 | if (fd < 0) |
3817 | return fd; |
3818 | fd_install(fd, file); |
3819 | return fd; |
3820 | } |
3821 | |
3822 | /* |
3823 | * Allocate an anonymous fd, this is what constitutes the application |
3824 | * visible backing of an io_uring instance. The application mmaps this |
3825 | * fd to gain access to the SQ/CQ ring details. |
3826 | */ |
3827 | static struct file *io_uring_get_file(struct io_ring_ctx *ctx) |
3828 | { |
3829 | /* Create a new inode so that the LSM can block the creation. */ |
3830 | return anon_inode_create_getfile(name: "[io_uring]" , fops: &io_uring_fops, priv: ctx, |
3831 | O_RDWR | O_CLOEXEC, NULL); |
3832 | } |
3833 | |
3834 | static __cold int io_uring_create(unsigned entries, struct io_uring_params *p, |
3835 | struct io_uring_params __user *params) |
3836 | { |
3837 | struct io_ring_ctx *ctx; |
3838 | struct io_uring_task *tctx; |
3839 | struct file *file; |
3840 | int ret; |
3841 | |
3842 | if (!entries) |
3843 | return -EINVAL; |
3844 | if (entries > IORING_MAX_ENTRIES) { |
3845 | if (!(p->flags & IORING_SETUP_CLAMP)) |
3846 | return -EINVAL; |
3847 | entries = IORING_MAX_ENTRIES; |
3848 | } |
3849 | |
3850 | if ((p->flags & IORING_SETUP_REGISTERED_FD_ONLY) |
3851 | && !(p->flags & IORING_SETUP_NO_MMAP)) |
3852 | return -EINVAL; |
3853 | |
3854 | /* |
3855 | * Use twice as many entries for the CQ ring. It's possible for the |
3856 | * application to drive a higher depth than the size of the SQ ring, |
3857 | * since the sqes are only used at submission time. This allows for |
3858 | * some flexibility in overcommitting a bit. If the application has |
3859 | * set IORING_SETUP_CQSIZE, it will have passed in the desired number |
3860 | * of CQ ring entries manually. |
3861 | */ |
3862 | p->sq_entries = roundup_pow_of_two(entries); |
3863 | if (p->flags & IORING_SETUP_CQSIZE) { |
3864 | /* |
3865 | * If IORING_SETUP_CQSIZE is set, we do the same roundup |
3866 | * to a power-of-two, if it isn't already. We do NOT impose |
3867 | * any cq vs sq ring sizing. |
3868 | */ |
3869 | if (!p->cq_entries) |
3870 | return -EINVAL; |
3871 | if (p->cq_entries > IORING_MAX_CQ_ENTRIES) { |
3872 | if (!(p->flags & IORING_SETUP_CLAMP)) |
3873 | return -EINVAL; |
3874 | p->cq_entries = IORING_MAX_CQ_ENTRIES; |
3875 | } |
3876 | p->cq_entries = roundup_pow_of_two(p->cq_entries); |
3877 | if (p->cq_entries < p->sq_entries) |
3878 | return -EINVAL; |
3879 | } else { |
3880 | p->cq_entries = 2 * p->sq_entries; |
3881 | } |
3882 | |
3883 | ctx = io_ring_ctx_alloc(p); |
3884 | if (!ctx) |
3885 | return -ENOMEM; |
3886 | |
3887 | if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) && |
3888 | !(ctx->flags & IORING_SETUP_IOPOLL) && |
3889 | !(ctx->flags & IORING_SETUP_SQPOLL)) |
3890 | ctx->task_complete = true; |
3891 | |
3892 | if (ctx->task_complete || (ctx->flags & IORING_SETUP_IOPOLL)) |
3893 | ctx->lockless_cq = true; |
3894 | |
3895 | /* |
3896 | * lazy poll_wq activation relies on ->task_complete for synchronisation |
3897 | * purposes, see io_activate_pollwq() |
3898 | */ |
3899 | if (!ctx->task_complete) |
3900 | ctx->poll_activated = true; |
3901 | |
3902 | /* |
3903 | * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user |
3904 | * space applications don't need to do io completion events |
3905 | * polling again, they can rely on io_sq_thread to do polling |
3906 | * work, which can reduce cpu usage and uring_lock contention. |
3907 | */ |
3908 | if (ctx->flags & IORING_SETUP_IOPOLL && |
3909 | !(ctx->flags & IORING_SETUP_SQPOLL)) |
3910 | ctx->syscall_iopoll = 1; |
3911 | |
3912 | ctx->compat = in_compat_syscall(); |
3913 | if (!ns_capable_noaudit(ns: &init_user_ns, CAP_IPC_LOCK)) |
3914 | ctx->user = get_uid(current_user()); |
3915 | |
3916 | /* |
3917 | * For SQPOLL, we just need a wakeup, always. For !SQPOLL, if |
3918 | * COOP_TASKRUN is set, then IPIs are never needed by the app. |
3919 | */ |
3920 | ret = -EINVAL; |
3921 | if (ctx->flags & IORING_SETUP_SQPOLL) { |
3922 | /* IPI related flags don't make sense with SQPOLL */ |
3923 | if (ctx->flags & (IORING_SETUP_COOP_TASKRUN | |
3924 | IORING_SETUP_TASKRUN_FLAG | |
3925 | IORING_SETUP_DEFER_TASKRUN)) |
3926 | goto err; |
3927 | ctx->notify_method = TWA_SIGNAL_NO_IPI; |
3928 | } else if (ctx->flags & IORING_SETUP_COOP_TASKRUN) { |
3929 | ctx->notify_method = TWA_SIGNAL_NO_IPI; |
3930 | } else { |
3931 | if (ctx->flags & IORING_SETUP_TASKRUN_FLAG && |
3932 | !(ctx->flags & IORING_SETUP_DEFER_TASKRUN)) |
3933 | goto err; |
3934 | ctx->notify_method = TWA_SIGNAL; |
3935 | } |
3936 | |
3937 | /* |
3938 | * For DEFER_TASKRUN we require the completion task to be the same as the |
3939 | * submission task. This implies that there is only one submitter, so enforce |
3940 | * that. |
3941 | */ |
3942 | if (ctx->flags & IORING_SETUP_DEFER_TASKRUN && |
3943 | !(ctx->flags & IORING_SETUP_SINGLE_ISSUER)) { |
3944 | goto err; |
3945 | } |
3946 | |
3947 | /* |
3948 | * This is just grabbed for accounting purposes. When a process exits, |
3949 | * the mm is exited and dropped before the files, hence we need to hang |
3950 | * on to this mm purely for the purposes of being able to unaccount |
3951 | * memory (locked/pinned vm). It's not used for anything else. |
3952 | */ |
3953 | mmgrab(current->mm); |
3954 | ctx->mm_account = current->mm; |
3955 | |
3956 | ret = io_allocate_scq_urings(ctx, p); |
3957 | if (ret) |
3958 | goto err; |
3959 | |
3960 | ret = io_sq_offload_create(ctx, p); |
3961 | if (ret) |
3962 | goto err; |
3963 | |
3964 | ret = io_rsrc_init(ctx); |
3965 | if (ret) |
3966 | goto err; |
3967 | |
3968 | p->sq_off.head = offsetof(struct io_rings, sq.head); |
3969 | p->sq_off.tail = offsetof(struct io_rings, sq.tail); |
3970 | p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask); |
3971 | p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries); |
3972 | p->sq_off.flags = offsetof(struct io_rings, sq_flags); |
3973 | p->sq_off.dropped = offsetof(struct io_rings, sq_dropped); |
3974 | if (!(ctx->flags & IORING_SETUP_NO_SQARRAY)) |
3975 | p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings; |
3976 | p->sq_off.resv1 = 0; |
3977 | if (!(ctx->flags & IORING_SETUP_NO_MMAP)) |
3978 | p->sq_off.user_addr = 0; |
3979 | |
3980 | p->cq_off.head = offsetof(struct io_rings, cq.head); |
3981 | p->cq_off.tail = offsetof(struct io_rings, cq.tail); |
3982 | p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask); |
3983 | p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries); |
3984 | p->cq_off.overflow = offsetof(struct io_rings, cq_overflow); |
3985 | p->cq_off.cqes = offsetof(struct io_rings, cqes); |
3986 | p->cq_off.flags = offsetof(struct io_rings, cq_flags); |
3987 | p->cq_off.resv1 = 0; |
3988 | if (!(ctx->flags & IORING_SETUP_NO_MMAP)) |
3989 | p->cq_off.user_addr = 0; |
3990 | |
3991 | p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP | |
3992 | IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS | |
3993 | IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL | |
3994 | IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED | |
3995 | IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS | |
3996 | IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP | |
3997 | IORING_FEAT_LINKED_FILE | IORING_FEAT_REG_REG_RING; |
3998 | |
3999 | if (copy_to_user(to: params, from: p, n: sizeof(*p))) { |
4000 | ret = -EFAULT; |
4001 | goto err; |
4002 | } |
4003 | |
4004 | if (ctx->flags & IORING_SETUP_SINGLE_ISSUER |
4005 | && !(ctx->flags & IORING_SETUP_R_DISABLED)) |
4006 | WRITE_ONCE(ctx->submitter_task, get_task_struct(current)); |
4007 | |
4008 | file = io_uring_get_file(ctx); |
4009 | if (IS_ERR(ptr: file)) { |
4010 | ret = PTR_ERR(ptr: file); |
4011 | goto err; |
4012 | } |
4013 | |
4014 | ret = __io_uring_add_tctx_node(ctx); |
4015 | if (ret) |
4016 | goto err_fput; |
4017 | tctx = current->io_uring; |
4018 | |
4019 | /* |
4020 | * Install ring fd as the very last thing, so we don't risk someone |
4021 | * having closed it before we finish setup |
4022 | */ |
4023 | if (p->flags & IORING_SETUP_REGISTERED_FD_ONLY) |
4024 | ret = io_ring_add_registered_file(tctx, file, start: 0, IO_RINGFD_REG_MAX); |
4025 | else |
4026 | ret = io_uring_install_fd(file); |
4027 | if (ret < 0) |
4028 | goto err_fput; |
4029 | |
4030 | trace_io_uring_create(fd: ret, ctx, sq_entries: p->sq_entries, cq_entries: p->cq_entries, flags: p->flags); |
4031 | return ret; |
4032 | err: |
4033 | io_ring_ctx_wait_and_kill(ctx); |
4034 | return ret; |
4035 | err_fput: |
4036 | fput(file); |
4037 | return ret; |
4038 | } |
4039 | |
4040 | /* |
4041 | * Sets up an aio uring context, and returns the fd. Applications asks for a |
4042 | * ring size, we return the actual sq/cq ring sizes (among other things) in the |
4043 | * params structure passed in. |
4044 | */ |
4045 | static long io_uring_setup(u32 entries, struct io_uring_params __user *params) |
4046 | { |
4047 | struct io_uring_params p; |
4048 | int i; |
4049 | |
4050 | if (copy_from_user(to: &p, from: params, n: sizeof(p))) |
4051 | return -EFAULT; |
4052 | for (i = 0; i < ARRAY_SIZE(p.resv); i++) { |
4053 | if (p.resv[i]) |
4054 | return -EINVAL; |
4055 | } |
4056 | |
4057 | if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL | |
4058 | IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE | |
4059 | IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ | |
4060 | IORING_SETUP_R_DISABLED | IORING_SETUP_SUBMIT_ALL | |
4061 | IORING_SETUP_COOP_TASKRUN | IORING_SETUP_TASKRUN_FLAG | |
4062 | IORING_SETUP_SQE128 | IORING_SETUP_CQE32 | |
4063 | IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN | |
4064 | IORING_SETUP_NO_MMAP | IORING_SETUP_REGISTERED_FD_ONLY | |
4065 | IORING_SETUP_NO_SQARRAY)) |
4066 | return -EINVAL; |
4067 | |
4068 | return io_uring_create(entries, p: &p, params); |
4069 | } |
4070 | |
4071 | static inline bool io_uring_allowed(void) |
4072 | { |
4073 | int disabled = READ_ONCE(sysctl_io_uring_disabled); |
4074 | kgid_t io_uring_group; |
4075 | |
4076 | if (disabled == 2) |
4077 | return false; |
4078 | |
4079 | if (disabled == 0 || capable(CAP_SYS_ADMIN)) |
4080 | return true; |
4081 | |
4082 | io_uring_group = make_kgid(from: &init_user_ns, gid: sysctl_io_uring_group); |
4083 | if (!gid_valid(gid: io_uring_group)) |
4084 | return false; |
4085 | |
4086 | return in_group_p(io_uring_group); |
4087 | } |
4088 | |
4089 | SYSCALL_DEFINE2(io_uring_setup, u32, entries, |
4090 | struct io_uring_params __user *, params) |
4091 | { |
4092 | if (!io_uring_allowed()) |
4093 | return -EPERM; |
4094 | |
4095 | return io_uring_setup(entries, params); |
4096 | } |
4097 | |
4098 | static int __init io_uring_init(void) |
4099 | { |
4100 | #define __BUILD_BUG_VERIFY_OFFSET_SIZE(stype, eoffset, esize, ename) do { \ |
4101 | BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \ |
4102 | BUILD_BUG_ON(sizeof_field(stype, ename) != esize); \ |
4103 | } while (0) |
4104 | |
4105 | #define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \ |
4106 | __BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, sizeof(etype), ename) |
4107 | #define BUILD_BUG_SQE_ELEM_SIZE(eoffset, esize, ename) \ |
4108 | __BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, esize, ename) |
4109 | BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64); |
4110 | BUILD_BUG_SQE_ELEM(0, __u8, opcode); |
4111 | BUILD_BUG_SQE_ELEM(1, __u8, flags); |
4112 | BUILD_BUG_SQE_ELEM(2, __u16, ioprio); |
4113 | BUILD_BUG_SQE_ELEM(4, __s32, fd); |
4114 | BUILD_BUG_SQE_ELEM(8, __u64, off); |
4115 | BUILD_BUG_SQE_ELEM(8, __u64, addr2); |
4116 | BUILD_BUG_SQE_ELEM(8, __u32, cmd_op); |
4117 | BUILD_BUG_SQE_ELEM(12, __u32, __pad1); |
4118 | BUILD_BUG_SQE_ELEM(16, __u64, addr); |
4119 | BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in); |
4120 | BUILD_BUG_SQE_ELEM(24, __u32, len); |
4121 | BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags); |
4122 | BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags); |
4123 | BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags); |
4124 | BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags); |
4125 | BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events); |
4126 | BUILD_BUG_SQE_ELEM(28, __u32, poll32_events); |
4127 | BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags); |
4128 | BUILD_BUG_SQE_ELEM(28, __u32, msg_flags); |
4129 | BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags); |
4130 | BUILD_BUG_SQE_ELEM(28, __u32, accept_flags); |
4131 | BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags); |
4132 | BUILD_BUG_SQE_ELEM(28, __u32, open_flags); |
4133 | BUILD_BUG_SQE_ELEM(28, __u32, statx_flags); |
4134 | BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice); |
4135 | BUILD_BUG_SQE_ELEM(28, __u32, splice_flags); |
4136 | BUILD_BUG_SQE_ELEM(28, __u32, rename_flags); |
4137 | BUILD_BUG_SQE_ELEM(28, __u32, unlink_flags); |
4138 | BUILD_BUG_SQE_ELEM(28, __u32, hardlink_flags); |
4139 | BUILD_BUG_SQE_ELEM(28, __u32, xattr_flags); |
4140 | BUILD_BUG_SQE_ELEM(28, __u32, msg_ring_flags); |
4141 | BUILD_BUG_SQE_ELEM(32, __u64, user_data); |
4142 | BUILD_BUG_SQE_ELEM(40, __u16, buf_index); |
4143 | BUILD_BUG_SQE_ELEM(40, __u16, buf_group); |
4144 | BUILD_BUG_SQE_ELEM(42, __u16, personality); |
4145 | BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in); |
4146 | BUILD_BUG_SQE_ELEM(44, __u32, file_index); |
4147 | BUILD_BUG_SQE_ELEM(44, __u16, addr_len); |
4148 | BUILD_BUG_SQE_ELEM(46, __u16, __pad3[0]); |
4149 | BUILD_BUG_SQE_ELEM(48, __u64, addr3); |
4150 | BUILD_BUG_SQE_ELEM_SIZE(48, 0, cmd); |
4151 | BUILD_BUG_SQE_ELEM(56, __u64, __pad2); |
4152 | |
4153 | BUILD_BUG_ON(sizeof(struct io_uring_files_update) != |
4154 | sizeof(struct io_uring_rsrc_update)); |
4155 | BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) > |
4156 | sizeof(struct io_uring_rsrc_update2)); |
4157 | |
4158 | /* ->buf_index is u16 */ |
4159 | BUILD_BUG_ON(offsetof(struct io_uring_buf_ring, bufs) != 0); |
4160 | BUILD_BUG_ON(offsetof(struct io_uring_buf, resv) != |
4161 | offsetof(struct io_uring_buf_ring, tail)); |
4162 | |
4163 | /* should fit into one byte */ |
4164 | BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8)); |
4165 | BUILD_BUG_ON(SQE_COMMON_FLAGS >= (1 << 8)); |
4166 | BUILD_BUG_ON((SQE_VALID_FLAGS | SQE_COMMON_FLAGS) != SQE_VALID_FLAGS); |
4167 | |
4168 | BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof_field(struct io_kiocb, flags)); |
4169 | |
4170 | BUILD_BUG_ON(sizeof(atomic_t) != sizeof(u32)); |
4171 | |
4172 | /* top 8bits are for internal use */ |
4173 | BUILD_BUG_ON((IORING_URING_CMD_MASK & 0xff000000) != 0); |
4174 | |
4175 | io_uring_optable_init(); |
4176 | |
4177 | /* |
4178 | * Allow user copy in the per-command field, which starts after the |
4179 | * file in io_kiocb and until the opcode field. The openat2 handling |
4180 | * requires copying in user memory into the io_kiocb object in that |
4181 | * range, and HARDENED_USERCOPY will complain if we haven't |
4182 | * correctly annotated this range. |
4183 | */ |
4184 | req_cachep = kmem_cache_create_usercopy(name: "io_kiocb" , |
4185 | size: sizeof(struct io_kiocb), align: 0, |
4186 | SLAB_HWCACHE_ALIGN | SLAB_PANIC | |
4187 | SLAB_ACCOUNT | SLAB_TYPESAFE_BY_RCU, |
4188 | offsetof(struct io_kiocb, cmd.data), |
4189 | sizeof_field(struct io_kiocb, cmd.data), NULL); |
4190 | io_buf_cachep = KMEM_CACHE(io_buffer, |
4191 | SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT); |
4192 | |
4193 | iou_wq = alloc_workqueue(fmt: "iou_exit" , flags: WQ_UNBOUND, max_active: 64); |
4194 | |
4195 | #ifdef CONFIG_SYSCTL |
4196 | register_sysctl_init("kernel" , kernel_io_uring_disabled_table); |
4197 | #endif |
4198 | |
4199 | return 0; |
4200 | }; |
4201 | __initcall(io_uring_init); |
4202 | |