1 | /* |
2 | FUSE: Filesystem in Userspace |
3 | Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu> |
4 | |
5 | This program can be distributed under the terms of the GNU GPL. |
6 | See the file COPYING. |
7 | */ |
8 | |
9 | #include "fuse_i.h" |
10 | |
11 | #include <linux/init.h> |
12 | #include <linux/module.h> |
13 | #include <linux/poll.h> |
14 | #include <linux/sched/signal.h> |
15 | #include <linux/uio.h> |
16 | #include <linux/miscdevice.h> |
17 | #include <linux/pagemap.h> |
18 | #include <linux/file.h> |
19 | #include <linux/slab.h> |
20 | #include <linux/pipe_fs_i.h> |
21 | #include <linux/swap.h> |
22 | #include <linux/splice.h> |
23 | #include <linux/sched.h> |
24 | |
25 | MODULE_ALIAS_MISCDEV(FUSE_MINOR); |
26 | MODULE_ALIAS("devname:fuse" ); |
27 | |
28 | /* Ordinary requests have even IDs, while interrupts IDs are odd */ |
29 | #define FUSE_INT_REQ_BIT (1ULL << 0) |
30 | #define FUSE_REQ_ID_STEP (1ULL << 1) |
31 | |
32 | static struct kmem_cache *fuse_req_cachep; |
33 | |
34 | static struct fuse_dev *fuse_get_dev(struct file *file) |
35 | { |
36 | /* |
37 | * Lockless access is OK, because file->private data is set |
38 | * once during mount and is valid until the file is released. |
39 | */ |
40 | return READ_ONCE(file->private_data); |
41 | } |
42 | |
43 | static void fuse_request_init(struct fuse_mount *fm, struct fuse_req *req) |
44 | { |
45 | INIT_LIST_HEAD(list: &req->list); |
46 | INIT_LIST_HEAD(list: &req->intr_entry); |
47 | init_waitqueue_head(&req->waitq); |
48 | refcount_set(r: &req->count, n: 1); |
49 | __set_bit(FR_PENDING, &req->flags); |
50 | req->fm = fm; |
51 | } |
52 | |
53 | static struct fuse_req *fuse_request_alloc(struct fuse_mount *fm, gfp_t flags) |
54 | { |
55 | struct fuse_req *req = kmem_cache_zalloc(k: fuse_req_cachep, flags); |
56 | if (req) |
57 | fuse_request_init(fm, req); |
58 | |
59 | return req; |
60 | } |
61 | |
62 | static void fuse_request_free(struct fuse_req *req) |
63 | { |
64 | kmem_cache_free(s: fuse_req_cachep, objp: req); |
65 | } |
66 | |
67 | static void __fuse_get_request(struct fuse_req *req) |
68 | { |
69 | refcount_inc(r: &req->count); |
70 | } |
71 | |
72 | /* Must be called with > 1 refcount */ |
73 | static void __fuse_put_request(struct fuse_req *req) |
74 | { |
75 | refcount_dec(r: &req->count); |
76 | } |
77 | |
78 | void fuse_set_initialized(struct fuse_conn *fc) |
79 | { |
80 | /* Make sure stores before this are seen on another CPU */ |
81 | smp_wmb(); |
82 | fc->initialized = 1; |
83 | } |
84 | |
85 | static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background) |
86 | { |
87 | return !fc->initialized || (for_background && fc->blocked); |
88 | } |
89 | |
90 | static void fuse_drop_waiting(struct fuse_conn *fc) |
91 | { |
92 | /* |
93 | * lockess check of fc->connected is okay, because atomic_dec_and_test() |
94 | * provides a memory barrier matched with the one in fuse_wait_aborted() |
95 | * to ensure no wake-up is missed. |
96 | */ |
97 | if (atomic_dec_and_test(v: &fc->num_waiting) && |
98 | !READ_ONCE(fc->connected)) { |
99 | /* wake up aborters */ |
100 | wake_up_all(&fc->blocked_waitq); |
101 | } |
102 | } |
103 | |
104 | static void fuse_put_request(struct fuse_req *req); |
105 | |
106 | static struct fuse_req *fuse_get_req(struct fuse_mount *fm, bool for_background) |
107 | { |
108 | struct fuse_conn *fc = fm->fc; |
109 | struct fuse_req *req; |
110 | int err; |
111 | atomic_inc(v: &fc->num_waiting); |
112 | |
113 | if (fuse_block_alloc(fc, for_background)) { |
114 | err = -EINTR; |
115 | if (wait_event_killable_exclusive(fc->blocked_waitq, |
116 | !fuse_block_alloc(fc, for_background))) |
117 | goto out; |
118 | } |
119 | /* Matches smp_wmb() in fuse_set_initialized() */ |
120 | smp_rmb(); |
121 | |
122 | err = -ENOTCONN; |
123 | if (!fc->connected) |
124 | goto out; |
125 | |
126 | err = -ECONNREFUSED; |
127 | if (fc->conn_error) |
128 | goto out; |
129 | |
130 | req = fuse_request_alloc(fm, GFP_KERNEL); |
131 | err = -ENOMEM; |
132 | if (!req) { |
133 | if (for_background) |
134 | wake_up(&fc->blocked_waitq); |
135 | goto out; |
136 | } |
137 | |
138 | req->in.h.uid = from_kuid(to: fc->user_ns, current_fsuid()); |
139 | req->in.h.gid = from_kgid(to: fc->user_ns, current_fsgid()); |
140 | req->in.h.pid = pid_nr_ns(pid: task_pid(current), ns: fc->pid_ns); |
141 | |
142 | __set_bit(FR_WAITING, &req->flags); |
143 | if (for_background) |
144 | __set_bit(FR_BACKGROUND, &req->flags); |
145 | |
146 | if (unlikely(req->in.h.uid == ((uid_t)-1) || |
147 | req->in.h.gid == ((gid_t)-1))) { |
148 | fuse_put_request(req); |
149 | return ERR_PTR(error: -EOVERFLOW); |
150 | } |
151 | return req; |
152 | |
153 | out: |
154 | fuse_drop_waiting(fc); |
155 | return ERR_PTR(error: err); |
156 | } |
157 | |
158 | static void fuse_put_request(struct fuse_req *req) |
159 | { |
160 | struct fuse_conn *fc = req->fm->fc; |
161 | |
162 | if (refcount_dec_and_test(r: &req->count)) { |
163 | if (test_bit(FR_BACKGROUND, &req->flags)) { |
164 | /* |
165 | * We get here in the unlikely case that a background |
166 | * request was allocated but not sent |
167 | */ |
168 | spin_lock(lock: &fc->bg_lock); |
169 | if (!fc->blocked) |
170 | wake_up(&fc->blocked_waitq); |
171 | spin_unlock(lock: &fc->bg_lock); |
172 | } |
173 | |
174 | if (test_bit(FR_WAITING, &req->flags)) { |
175 | __clear_bit(FR_WAITING, &req->flags); |
176 | fuse_drop_waiting(fc); |
177 | } |
178 | |
179 | fuse_request_free(req); |
180 | } |
181 | } |
182 | |
183 | unsigned int fuse_len_args(unsigned int numargs, struct fuse_arg *args) |
184 | { |
185 | unsigned nbytes = 0; |
186 | unsigned i; |
187 | |
188 | for (i = 0; i < numargs; i++) |
189 | nbytes += args[i].size; |
190 | |
191 | return nbytes; |
192 | } |
193 | EXPORT_SYMBOL_GPL(fuse_len_args); |
194 | |
195 | u64 fuse_get_unique(struct fuse_iqueue *fiq) |
196 | { |
197 | fiq->reqctr += FUSE_REQ_ID_STEP; |
198 | return fiq->reqctr; |
199 | } |
200 | EXPORT_SYMBOL_GPL(fuse_get_unique); |
201 | |
202 | static unsigned int fuse_req_hash(u64 unique) |
203 | { |
204 | return hash_long(unique & ~FUSE_INT_REQ_BIT, FUSE_PQ_HASH_BITS); |
205 | } |
206 | |
207 | /* |
208 | * A new request is available, wake fiq->waitq |
209 | */ |
210 | static void fuse_dev_wake_and_unlock(struct fuse_iqueue *fiq) |
211 | __releases(fiq->lock) |
212 | { |
213 | wake_up(&fiq->waitq); |
214 | kill_fasync(&fiq->fasync, SIGIO, POLL_IN); |
215 | spin_unlock(lock: &fiq->lock); |
216 | } |
217 | |
218 | const struct fuse_iqueue_ops fuse_dev_fiq_ops = { |
219 | .wake_forget_and_unlock = fuse_dev_wake_and_unlock, |
220 | .wake_interrupt_and_unlock = fuse_dev_wake_and_unlock, |
221 | .wake_pending_and_unlock = fuse_dev_wake_and_unlock, |
222 | }; |
223 | EXPORT_SYMBOL_GPL(fuse_dev_fiq_ops); |
224 | |
225 | static void queue_request_and_unlock(struct fuse_iqueue *fiq, |
226 | struct fuse_req *req) |
227 | __releases(fiq->lock) |
228 | { |
229 | req->in.h.len = sizeof(struct fuse_in_header) + |
230 | fuse_len_args(req->args->in_numargs, |
231 | (struct fuse_arg *) req->args->in_args); |
232 | list_add_tail(new: &req->list, head: &fiq->pending); |
233 | fiq->ops->wake_pending_and_unlock(fiq); |
234 | } |
235 | |
236 | void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget, |
237 | u64 nodeid, u64 nlookup) |
238 | { |
239 | struct fuse_iqueue *fiq = &fc->iq; |
240 | |
241 | forget->forget_one.nodeid = nodeid; |
242 | forget->forget_one.nlookup = nlookup; |
243 | |
244 | spin_lock(lock: &fiq->lock); |
245 | if (fiq->connected) { |
246 | fiq->forget_list_tail->next = forget; |
247 | fiq->forget_list_tail = forget; |
248 | fiq->ops->wake_forget_and_unlock(fiq); |
249 | } else { |
250 | kfree(objp: forget); |
251 | spin_unlock(lock: &fiq->lock); |
252 | } |
253 | } |
254 | |
255 | static void flush_bg_queue(struct fuse_conn *fc) |
256 | { |
257 | struct fuse_iqueue *fiq = &fc->iq; |
258 | |
259 | while (fc->active_background < fc->max_background && |
260 | !list_empty(head: &fc->bg_queue)) { |
261 | struct fuse_req *req; |
262 | |
263 | req = list_first_entry(&fc->bg_queue, struct fuse_req, list); |
264 | list_del(entry: &req->list); |
265 | fc->active_background++; |
266 | spin_lock(lock: &fiq->lock); |
267 | req->in.h.unique = fuse_get_unique(fiq); |
268 | queue_request_and_unlock(fiq, req); |
269 | } |
270 | } |
271 | |
272 | /* |
273 | * This function is called when a request is finished. Either a reply |
274 | * has arrived or it was aborted (and not yet sent) or some error |
275 | * occurred during communication with userspace, or the device file |
276 | * was closed. The requester thread is woken up (if still waiting), |
277 | * the 'end' callback is called if given, else the reference to the |
278 | * request is released |
279 | */ |
280 | void fuse_request_end(struct fuse_req *req) |
281 | { |
282 | struct fuse_mount *fm = req->fm; |
283 | struct fuse_conn *fc = fm->fc; |
284 | struct fuse_iqueue *fiq = &fc->iq; |
285 | |
286 | if (test_and_set_bit(nr: FR_FINISHED, addr: &req->flags)) |
287 | goto put_request; |
288 | |
289 | /* |
290 | * test_and_set_bit() implies smp_mb() between bit |
291 | * changing and below FR_INTERRUPTED check. Pairs with |
292 | * smp_mb() from queue_interrupt(). |
293 | */ |
294 | if (test_bit(FR_INTERRUPTED, &req->flags)) { |
295 | spin_lock(lock: &fiq->lock); |
296 | list_del_init(entry: &req->intr_entry); |
297 | spin_unlock(lock: &fiq->lock); |
298 | } |
299 | WARN_ON(test_bit(FR_PENDING, &req->flags)); |
300 | WARN_ON(test_bit(FR_SENT, &req->flags)); |
301 | if (test_bit(FR_BACKGROUND, &req->flags)) { |
302 | spin_lock(lock: &fc->bg_lock); |
303 | clear_bit(nr: FR_BACKGROUND, addr: &req->flags); |
304 | if (fc->num_background == fc->max_background) { |
305 | fc->blocked = 0; |
306 | wake_up(&fc->blocked_waitq); |
307 | } else if (!fc->blocked) { |
308 | /* |
309 | * Wake up next waiter, if any. It's okay to use |
310 | * waitqueue_active(), as we've already synced up |
311 | * fc->blocked with waiters with the wake_up() call |
312 | * above. |
313 | */ |
314 | if (waitqueue_active(wq_head: &fc->blocked_waitq)) |
315 | wake_up(&fc->blocked_waitq); |
316 | } |
317 | |
318 | fc->num_background--; |
319 | fc->active_background--; |
320 | flush_bg_queue(fc); |
321 | spin_unlock(lock: &fc->bg_lock); |
322 | } else { |
323 | /* Wake up waiter sleeping in request_wait_answer() */ |
324 | wake_up(&req->waitq); |
325 | } |
326 | |
327 | if (test_bit(FR_ASYNC, &req->flags)) |
328 | req->args->end(fm, req->args, req->out.h.error); |
329 | put_request: |
330 | fuse_put_request(req); |
331 | } |
332 | EXPORT_SYMBOL_GPL(fuse_request_end); |
333 | |
334 | static int queue_interrupt(struct fuse_req *req) |
335 | { |
336 | struct fuse_iqueue *fiq = &req->fm->fc->iq; |
337 | |
338 | spin_lock(lock: &fiq->lock); |
339 | /* Check for we've sent request to interrupt this req */ |
340 | if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) { |
341 | spin_unlock(lock: &fiq->lock); |
342 | return -EINVAL; |
343 | } |
344 | |
345 | if (list_empty(head: &req->intr_entry)) { |
346 | list_add_tail(new: &req->intr_entry, head: &fiq->interrupts); |
347 | /* |
348 | * Pairs with smp_mb() implied by test_and_set_bit() |
349 | * from fuse_request_end(). |
350 | */ |
351 | smp_mb(); |
352 | if (test_bit(FR_FINISHED, &req->flags)) { |
353 | list_del_init(entry: &req->intr_entry); |
354 | spin_unlock(lock: &fiq->lock); |
355 | return 0; |
356 | } |
357 | fiq->ops->wake_interrupt_and_unlock(fiq); |
358 | } else { |
359 | spin_unlock(lock: &fiq->lock); |
360 | } |
361 | return 0; |
362 | } |
363 | |
364 | static void request_wait_answer(struct fuse_req *req) |
365 | { |
366 | struct fuse_conn *fc = req->fm->fc; |
367 | struct fuse_iqueue *fiq = &fc->iq; |
368 | int err; |
369 | |
370 | if (!fc->no_interrupt) { |
371 | /* Any signal may interrupt this */ |
372 | err = wait_event_interruptible(req->waitq, |
373 | test_bit(FR_FINISHED, &req->flags)); |
374 | if (!err) |
375 | return; |
376 | |
377 | set_bit(nr: FR_INTERRUPTED, addr: &req->flags); |
378 | /* matches barrier in fuse_dev_do_read() */ |
379 | smp_mb__after_atomic(); |
380 | if (test_bit(FR_SENT, &req->flags)) |
381 | queue_interrupt(req); |
382 | } |
383 | |
384 | if (!test_bit(FR_FORCE, &req->flags)) { |
385 | /* Only fatal signals may interrupt this */ |
386 | err = wait_event_killable(req->waitq, |
387 | test_bit(FR_FINISHED, &req->flags)); |
388 | if (!err) |
389 | return; |
390 | |
391 | spin_lock(lock: &fiq->lock); |
392 | /* Request is not yet in userspace, bail out */ |
393 | if (test_bit(FR_PENDING, &req->flags)) { |
394 | list_del(entry: &req->list); |
395 | spin_unlock(lock: &fiq->lock); |
396 | __fuse_put_request(req); |
397 | req->out.h.error = -EINTR; |
398 | return; |
399 | } |
400 | spin_unlock(lock: &fiq->lock); |
401 | } |
402 | |
403 | /* |
404 | * Either request is already in userspace, or it was forced. |
405 | * Wait it out. |
406 | */ |
407 | wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags)); |
408 | } |
409 | |
410 | static void __fuse_request_send(struct fuse_req *req) |
411 | { |
412 | struct fuse_iqueue *fiq = &req->fm->fc->iq; |
413 | |
414 | BUG_ON(test_bit(FR_BACKGROUND, &req->flags)); |
415 | spin_lock(lock: &fiq->lock); |
416 | if (!fiq->connected) { |
417 | spin_unlock(lock: &fiq->lock); |
418 | req->out.h.error = -ENOTCONN; |
419 | } else { |
420 | req->in.h.unique = fuse_get_unique(fiq); |
421 | /* acquire extra reference, since request is still needed |
422 | after fuse_request_end() */ |
423 | __fuse_get_request(req); |
424 | queue_request_and_unlock(fiq, req); |
425 | |
426 | request_wait_answer(req); |
427 | /* Pairs with smp_wmb() in fuse_request_end() */ |
428 | smp_rmb(); |
429 | } |
430 | } |
431 | |
432 | static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args) |
433 | { |
434 | if (fc->minor < 4 && args->opcode == FUSE_STATFS) |
435 | args->out_args[0].size = FUSE_COMPAT_STATFS_SIZE; |
436 | |
437 | if (fc->minor < 9) { |
438 | switch (args->opcode) { |
439 | case FUSE_LOOKUP: |
440 | case FUSE_CREATE: |
441 | case FUSE_MKNOD: |
442 | case FUSE_MKDIR: |
443 | case FUSE_SYMLINK: |
444 | case FUSE_LINK: |
445 | args->out_args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE; |
446 | break; |
447 | case FUSE_GETATTR: |
448 | case FUSE_SETATTR: |
449 | args->out_args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE; |
450 | break; |
451 | } |
452 | } |
453 | if (fc->minor < 12) { |
454 | switch (args->opcode) { |
455 | case FUSE_CREATE: |
456 | args->in_args[0].size = sizeof(struct fuse_open_in); |
457 | break; |
458 | case FUSE_MKNOD: |
459 | args->in_args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE; |
460 | break; |
461 | } |
462 | } |
463 | } |
464 | |
465 | static void fuse_force_creds(struct fuse_req *req) |
466 | { |
467 | struct fuse_conn *fc = req->fm->fc; |
468 | |
469 | req->in.h.uid = from_kuid_munged(to: fc->user_ns, current_fsuid()); |
470 | req->in.h.gid = from_kgid_munged(to: fc->user_ns, current_fsgid()); |
471 | req->in.h.pid = pid_nr_ns(pid: task_pid(current), ns: fc->pid_ns); |
472 | } |
473 | |
474 | static void fuse_args_to_req(struct fuse_req *req, struct fuse_args *args) |
475 | { |
476 | req->in.h.opcode = args->opcode; |
477 | req->in.h.nodeid = args->nodeid; |
478 | req->args = args; |
479 | if (args->is_ext) |
480 | req->in.h.total_extlen = args->in_args[args->ext_idx].size / 8; |
481 | if (args->end) |
482 | __set_bit(FR_ASYNC, &req->flags); |
483 | } |
484 | |
485 | ssize_t fuse_simple_request(struct fuse_mount *fm, struct fuse_args *args) |
486 | { |
487 | struct fuse_conn *fc = fm->fc; |
488 | struct fuse_req *req; |
489 | ssize_t ret; |
490 | |
491 | if (args->force) { |
492 | atomic_inc(v: &fc->num_waiting); |
493 | req = fuse_request_alloc(fm, GFP_KERNEL | __GFP_NOFAIL); |
494 | |
495 | if (!args->nocreds) |
496 | fuse_force_creds(req); |
497 | |
498 | __set_bit(FR_WAITING, &req->flags); |
499 | __set_bit(FR_FORCE, &req->flags); |
500 | } else { |
501 | WARN_ON(args->nocreds); |
502 | req = fuse_get_req(fm, for_background: false); |
503 | if (IS_ERR(ptr: req)) |
504 | return PTR_ERR(ptr: req); |
505 | } |
506 | |
507 | /* Needs to be done after fuse_get_req() so that fc->minor is valid */ |
508 | fuse_adjust_compat(fc, args); |
509 | fuse_args_to_req(req, args); |
510 | |
511 | if (!args->noreply) |
512 | __set_bit(FR_ISREPLY, &req->flags); |
513 | __fuse_request_send(req); |
514 | ret = req->out.h.error; |
515 | if (!ret && args->out_argvar) { |
516 | BUG_ON(args->out_numargs == 0); |
517 | ret = args->out_args[args->out_numargs - 1].size; |
518 | } |
519 | fuse_put_request(req); |
520 | |
521 | return ret; |
522 | } |
523 | |
524 | static bool fuse_request_queue_background(struct fuse_req *req) |
525 | { |
526 | struct fuse_mount *fm = req->fm; |
527 | struct fuse_conn *fc = fm->fc; |
528 | bool queued = false; |
529 | |
530 | WARN_ON(!test_bit(FR_BACKGROUND, &req->flags)); |
531 | if (!test_bit(FR_WAITING, &req->flags)) { |
532 | __set_bit(FR_WAITING, &req->flags); |
533 | atomic_inc(v: &fc->num_waiting); |
534 | } |
535 | __set_bit(FR_ISREPLY, &req->flags); |
536 | spin_lock(lock: &fc->bg_lock); |
537 | if (likely(fc->connected)) { |
538 | fc->num_background++; |
539 | if (fc->num_background == fc->max_background) |
540 | fc->blocked = 1; |
541 | list_add_tail(new: &req->list, head: &fc->bg_queue); |
542 | flush_bg_queue(fc); |
543 | queued = true; |
544 | } |
545 | spin_unlock(lock: &fc->bg_lock); |
546 | |
547 | return queued; |
548 | } |
549 | |
550 | int fuse_simple_background(struct fuse_mount *fm, struct fuse_args *args, |
551 | gfp_t gfp_flags) |
552 | { |
553 | struct fuse_req *req; |
554 | |
555 | if (args->force) { |
556 | WARN_ON(!args->nocreds); |
557 | req = fuse_request_alloc(fm, flags: gfp_flags); |
558 | if (!req) |
559 | return -ENOMEM; |
560 | __set_bit(FR_BACKGROUND, &req->flags); |
561 | } else { |
562 | WARN_ON(args->nocreds); |
563 | req = fuse_get_req(fm, for_background: true); |
564 | if (IS_ERR(ptr: req)) |
565 | return PTR_ERR(ptr: req); |
566 | } |
567 | |
568 | fuse_args_to_req(req, args); |
569 | |
570 | if (!fuse_request_queue_background(req)) { |
571 | fuse_put_request(req); |
572 | return -ENOTCONN; |
573 | } |
574 | |
575 | return 0; |
576 | } |
577 | EXPORT_SYMBOL_GPL(fuse_simple_background); |
578 | |
579 | static int fuse_simple_notify_reply(struct fuse_mount *fm, |
580 | struct fuse_args *args, u64 unique) |
581 | { |
582 | struct fuse_req *req; |
583 | struct fuse_iqueue *fiq = &fm->fc->iq; |
584 | int err = 0; |
585 | |
586 | req = fuse_get_req(fm, for_background: false); |
587 | if (IS_ERR(ptr: req)) |
588 | return PTR_ERR(ptr: req); |
589 | |
590 | __clear_bit(FR_ISREPLY, &req->flags); |
591 | req->in.h.unique = unique; |
592 | |
593 | fuse_args_to_req(req, args); |
594 | |
595 | spin_lock(lock: &fiq->lock); |
596 | if (fiq->connected) { |
597 | queue_request_and_unlock(fiq, req); |
598 | } else { |
599 | err = -ENODEV; |
600 | spin_unlock(lock: &fiq->lock); |
601 | fuse_put_request(req); |
602 | } |
603 | |
604 | return err; |
605 | } |
606 | |
607 | /* |
608 | * Lock the request. Up to the next unlock_request() there mustn't be |
609 | * anything that could cause a page-fault. If the request was already |
610 | * aborted bail out. |
611 | */ |
612 | static int lock_request(struct fuse_req *req) |
613 | { |
614 | int err = 0; |
615 | if (req) { |
616 | spin_lock(lock: &req->waitq.lock); |
617 | if (test_bit(FR_ABORTED, &req->flags)) |
618 | err = -ENOENT; |
619 | else |
620 | set_bit(nr: FR_LOCKED, addr: &req->flags); |
621 | spin_unlock(lock: &req->waitq.lock); |
622 | } |
623 | return err; |
624 | } |
625 | |
626 | /* |
627 | * Unlock request. If it was aborted while locked, caller is responsible |
628 | * for unlocking and ending the request. |
629 | */ |
630 | static int unlock_request(struct fuse_req *req) |
631 | { |
632 | int err = 0; |
633 | if (req) { |
634 | spin_lock(lock: &req->waitq.lock); |
635 | if (test_bit(FR_ABORTED, &req->flags)) |
636 | err = -ENOENT; |
637 | else |
638 | clear_bit(nr: FR_LOCKED, addr: &req->flags); |
639 | spin_unlock(lock: &req->waitq.lock); |
640 | } |
641 | return err; |
642 | } |
643 | |
644 | struct fuse_copy_state { |
645 | int write; |
646 | struct fuse_req *req; |
647 | struct iov_iter *iter; |
648 | struct pipe_buffer *pipebufs; |
649 | struct pipe_buffer *currbuf; |
650 | struct pipe_inode_info *pipe; |
651 | unsigned long nr_segs; |
652 | struct page *pg; |
653 | unsigned len; |
654 | unsigned offset; |
655 | unsigned move_pages:1; |
656 | }; |
657 | |
658 | static void fuse_copy_init(struct fuse_copy_state *cs, int write, |
659 | struct iov_iter *iter) |
660 | { |
661 | memset(cs, 0, sizeof(*cs)); |
662 | cs->write = write; |
663 | cs->iter = iter; |
664 | } |
665 | |
666 | /* Unmap and put previous page of userspace buffer */ |
667 | static void fuse_copy_finish(struct fuse_copy_state *cs) |
668 | { |
669 | if (cs->currbuf) { |
670 | struct pipe_buffer *buf = cs->currbuf; |
671 | |
672 | if (cs->write) |
673 | buf->len = PAGE_SIZE - cs->len; |
674 | cs->currbuf = NULL; |
675 | } else if (cs->pg) { |
676 | if (cs->write) { |
677 | flush_dcache_page(page: cs->pg); |
678 | set_page_dirty_lock(cs->pg); |
679 | } |
680 | put_page(page: cs->pg); |
681 | } |
682 | cs->pg = NULL; |
683 | } |
684 | |
685 | /* |
686 | * Get another pagefull of userspace buffer, and map it to kernel |
687 | * address space, and lock request |
688 | */ |
689 | static int fuse_copy_fill(struct fuse_copy_state *cs) |
690 | { |
691 | struct page *page; |
692 | int err; |
693 | |
694 | err = unlock_request(req: cs->req); |
695 | if (err) |
696 | return err; |
697 | |
698 | fuse_copy_finish(cs); |
699 | if (cs->pipebufs) { |
700 | struct pipe_buffer *buf = cs->pipebufs; |
701 | |
702 | if (!cs->write) { |
703 | err = pipe_buf_confirm(pipe: cs->pipe, buf); |
704 | if (err) |
705 | return err; |
706 | |
707 | BUG_ON(!cs->nr_segs); |
708 | cs->currbuf = buf; |
709 | cs->pg = buf->page; |
710 | cs->offset = buf->offset; |
711 | cs->len = buf->len; |
712 | cs->pipebufs++; |
713 | cs->nr_segs--; |
714 | } else { |
715 | if (cs->nr_segs >= cs->pipe->max_usage) |
716 | return -EIO; |
717 | |
718 | page = alloc_page(GFP_HIGHUSER); |
719 | if (!page) |
720 | return -ENOMEM; |
721 | |
722 | buf->page = page; |
723 | buf->offset = 0; |
724 | buf->len = 0; |
725 | |
726 | cs->currbuf = buf; |
727 | cs->pg = page; |
728 | cs->offset = 0; |
729 | cs->len = PAGE_SIZE; |
730 | cs->pipebufs++; |
731 | cs->nr_segs++; |
732 | } |
733 | } else { |
734 | size_t off; |
735 | err = iov_iter_get_pages2(i: cs->iter, pages: &page, PAGE_SIZE, maxpages: 1, start: &off); |
736 | if (err < 0) |
737 | return err; |
738 | BUG_ON(!err); |
739 | cs->len = err; |
740 | cs->offset = off; |
741 | cs->pg = page; |
742 | } |
743 | |
744 | return lock_request(req: cs->req); |
745 | } |
746 | |
747 | /* Do as much copy to/from userspace buffer as we can */ |
748 | static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size) |
749 | { |
750 | unsigned ncpy = min(*size, cs->len); |
751 | if (val) { |
752 | void *pgaddr = kmap_local_page(page: cs->pg); |
753 | void *buf = pgaddr + cs->offset; |
754 | |
755 | if (cs->write) |
756 | memcpy(buf, *val, ncpy); |
757 | else |
758 | memcpy(*val, buf, ncpy); |
759 | |
760 | kunmap_local(pgaddr); |
761 | *val += ncpy; |
762 | } |
763 | *size -= ncpy; |
764 | cs->len -= ncpy; |
765 | cs->offset += ncpy; |
766 | return ncpy; |
767 | } |
768 | |
769 | static int fuse_check_folio(struct folio *folio) |
770 | { |
771 | if (folio_mapped(folio) || |
772 | folio->mapping != NULL || |
773 | (folio->flags & PAGE_FLAGS_CHECK_AT_PREP & |
774 | ~(1 << PG_locked | |
775 | 1 << PG_referenced | |
776 | 1 << PG_uptodate | |
777 | 1 << PG_lru | |
778 | 1 << PG_active | |
779 | 1 << PG_workingset | |
780 | 1 << PG_reclaim | |
781 | 1 << PG_waiters | |
782 | LRU_GEN_MASK | LRU_REFS_MASK))) { |
783 | dump_page(page: &folio->page, reason: "fuse: trying to steal weird page" ); |
784 | return 1; |
785 | } |
786 | return 0; |
787 | } |
788 | |
789 | static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) |
790 | { |
791 | int err; |
792 | struct folio *oldfolio = page_folio(*pagep); |
793 | struct folio *newfolio; |
794 | struct pipe_buffer *buf = cs->pipebufs; |
795 | |
796 | folio_get(folio: oldfolio); |
797 | err = unlock_request(req: cs->req); |
798 | if (err) |
799 | goto out_put_old; |
800 | |
801 | fuse_copy_finish(cs); |
802 | |
803 | err = pipe_buf_confirm(pipe: cs->pipe, buf); |
804 | if (err) |
805 | goto out_put_old; |
806 | |
807 | BUG_ON(!cs->nr_segs); |
808 | cs->currbuf = buf; |
809 | cs->len = buf->len; |
810 | cs->pipebufs++; |
811 | cs->nr_segs--; |
812 | |
813 | if (cs->len != PAGE_SIZE) |
814 | goto out_fallback; |
815 | |
816 | if (!pipe_buf_try_steal(pipe: cs->pipe, buf)) |
817 | goto out_fallback; |
818 | |
819 | newfolio = page_folio(buf->page); |
820 | |
821 | if (!folio_test_uptodate(folio: newfolio)) |
822 | folio_mark_uptodate(folio: newfolio); |
823 | |
824 | folio_clear_mappedtodisk(folio: newfolio); |
825 | |
826 | if (fuse_check_folio(folio: newfolio) != 0) |
827 | goto out_fallback_unlock; |
828 | |
829 | /* |
830 | * This is a new and locked page, it shouldn't be mapped or |
831 | * have any special flags on it |
832 | */ |
833 | if (WARN_ON(folio_mapped(oldfolio))) |
834 | goto out_fallback_unlock; |
835 | if (WARN_ON(folio_has_private(oldfolio))) |
836 | goto out_fallback_unlock; |
837 | if (WARN_ON(folio_test_dirty(oldfolio) || |
838 | folio_test_writeback(oldfolio))) |
839 | goto out_fallback_unlock; |
840 | if (WARN_ON(folio_test_mlocked(oldfolio))) |
841 | goto out_fallback_unlock; |
842 | |
843 | replace_page_cache_folio(old: oldfolio, new: newfolio); |
844 | |
845 | folio_get(folio: newfolio); |
846 | |
847 | if (!(buf->flags & PIPE_BUF_FLAG_LRU)) |
848 | folio_add_lru(newfolio); |
849 | |
850 | /* |
851 | * Release while we have extra ref on stolen page. Otherwise |
852 | * anon_pipe_buf_release() might think the page can be reused. |
853 | */ |
854 | pipe_buf_release(pipe: cs->pipe, buf); |
855 | |
856 | err = 0; |
857 | spin_lock(lock: &cs->req->waitq.lock); |
858 | if (test_bit(FR_ABORTED, &cs->req->flags)) |
859 | err = -ENOENT; |
860 | else |
861 | *pagep = &newfolio->page; |
862 | spin_unlock(lock: &cs->req->waitq.lock); |
863 | |
864 | if (err) { |
865 | folio_unlock(folio: newfolio); |
866 | folio_put(folio: newfolio); |
867 | goto out_put_old; |
868 | } |
869 | |
870 | folio_unlock(folio: oldfolio); |
871 | /* Drop ref for ap->pages[] array */ |
872 | folio_put(folio: oldfolio); |
873 | cs->len = 0; |
874 | |
875 | err = 0; |
876 | out_put_old: |
877 | /* Drop ref obtained in this function */ |
878 | folio_put(folio: oldfolio); |
879 | return err; |
880 | |
881 | out_fallback_unlock: |
882 | folio_unlock(folio: newfolio); |
883 | out_fallback: |
884 | cs->pg = buf->page; |
885 | cs->offset = buf->offset; |
886 | |
887 | err = lock_request(req: cs->req); |
888 | if (!err) |
889 | err = 1; |
890 | |
891 | goto out_put_old; |
892 | } |
893 | |
894 | static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page, |
895 | unsigned offset, unsigned count) |
896 | { |
897 | struct pipe_buffer *buf; |
898 | int err; |
899 | |
900 | if (cs->nr_segs >= cs->pipe->max_usage) |
901 | return -EIO; |
902 | |
903 | get_page(page); |
904 | err = unlock_request(req: cs->req); |
905 | if (err) { |
906 | put_page(page); |
907 | return err; |
908 | } |
909 | |
910 | fuse_copy_finish(cs); |
911 | |
912 | buf = cs->pipebufs; |
913 | buf->page = page; |
914 | buf->offset = offset; |
915 | buf->len = count; |
916 | |
917 | cs->pipebufs++; |
918 | cs->nr_segs++; |
919 | cs->len = 0; |
920 | |
921 | return 0; |
922 | } |
923 | |
924 | /* |
925 | * Copy a page in the request to/from the userspace buffer. Must be |
926 | * done atomically |
927 | */ |
928 | static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep, |
929 | unsigned offset, unsigned count, int zeroing) |
930 | { |
931 | int err; |
932 | struct page *page = *pagep; |
933 | |
934 | if (page && zeroing && count < PAGE_SIZE) |
935 | clear_highpage(page); |
936 | |
937 | while (count) { |
938 | if (cs->write && cs->pipebufs && page) { |
939 | /* |
940 | * Can't control lifetime of pipe buffers, so always |
941 | * copy user pages. |
942 | */ |
943 | if (cs->req->args->user_pages) { |
944 | err = fuse_copy_fill(cs); |
945 | if (err) |
946 | return err; |
947 | } else { |
948 | return fuse_ref_page(cs, page, offset, count); |
949 | } |
950 | } else if (!cs->len) { |
951 | if (cs->move_pages && page && |
952 | offset == 0 && count == PAGE_SIZE) { |
953 | err = fuse_try_move_page(cs, pagep); |
954 | if (err <= 0) |
955 | return err; |
956 | } else { |
957 | err = fuse_copy_fill(cs); |
958 | if (err) |
959 | return err; |
960 | } |
961 | } |
962 | if (page) { |
963 | void *mapaddr = kmap_local_page(page); |
964 | void *buf = mapaddr + offset; |
965 | offset += fuse_copy_do(cs, val: &buf, size: &count); |
966 | kunmap_local(mapaddr); |
967 | } else |
968 | offset += fuse_copy_do(cs, NULL, size: &count); |
969 | } |
970 | if (page && !cs->write) |
971 | flush_dcache_page(page); |
972 | return 0; |
973 | } |
974 | |
975 | /* Copy pages in the request to/from userspace buffer */ |
976 | static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes, |
977 | int zeroing) |
978 | { |
979 | unsigned i; |
980 | struct fuse_req *req = cs->req; |
981 | struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args); |
982 | |
983 | |
984 | for (i = 0; i < ap->num_pages && (nbytes || zeroing); i++) { |
985 | int err; |
986 | unsigned int offset = ap->descs[i].offset; |
987 | unsigned int count = min(nbytes, ap->descs[i].length); |
988 | |
989 | err = fuse_copy_page(cs, pagep: &ap->pages[i], offset, count, zeroing); |
990 | if (err) |
991 | return err; |
992 | |
993 | nbytes -= count; |
994 | } |
995 | return 0; |
996 | } |
997 | |
998 | /* Copy a single argument in the request to/from userspace buffer */ |
999 | static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size) |
1000 | { |
1001 | while (size) { |
1002 | if (!cs->len) { |
1003 | int err = fuse_copy_fill(cs); |
1004 | if (err) |
1005 | return err; |
1006 | } |
1007 | fuse_copy_do(cs, val: &val, size: &size); |
1008 | } |
1009 | return 0; |
1010 | } |
1011 | |
1012 | /* Copy request arguments to/from userspace buffer */ |
1013 | static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs, |
1014 | unsigned argpages, struct fuse_arg *args, |
1015 | int zeroing) |
1016 | { |
1017 | int err = 0; |
1018 | unsigned i; |
1019 | |
1020 | for (i = 0; !err && i < numargs; i++) { |
1021 | struct fuse_arg *arg = &args[i]; |
1022 | if (i == numargs - 1 && argpages) |
1023 | err = fuse_copy_pages(cs, nbytes: arg->size, zeroing); |
1024 | else |
1025 | err = fuse_copy_one(cs, val: arg->value, size: arg->size); |
1026 | } |
1027 | return err; |
1028 | } |
1029 | |
1030 | static int forget_pending(struct fuse_iqueue *fiq) |
1031 | { |
1032 | return fiq->forget_list_head.next != NULL; |
1033 | } |
1034 | |
1035 | static int request_pending(struct fuse_iqueue *fiq) |
1036 | { |
1037 | return !list_empty(head: &fiq->pending) || !list_empty(head: &fiq->interrupts) || |
1038 | forget_pending(fiq); |
1039 | } |
1040 | |
1041 | /* |
1042 | * Transfer an interrupt request to userspace |
1043 | * |
1044 | * Unlike other requests this is assembled on demand, without a need |
1045 | * to allocate a separate fuse_req structure. |
1046 | * |
1047 | * Called with fiq->lock held, releases it |
1048 | */ |
1049 | static int fuse_read_interrupt(struct fuse_iqueue *fiq, |
1050 | struct fuse_copy_state *cs, |
1051 | size_t nbytes, struct fuse_req *req) |
1052 | __releases(fiq->lock) |
1053 | { |
1054 | struct fuse_in_header ih; |
1055 | struct fuse_interrupt_in arg; |
1056 | unsigned reqsize = sizeof(ih) + sizeof(arg); |
1057 | int err; |
1058 | |
1059 | list_del_init(entry: &req->intr_entry); |
1060 | memset(&ih, 0, sizeof(ih)); |
1061 | memset(&arg, 0, sizeof(arg)); |
1062 | ih.len = reqsize; |
1063 | ih.opcode = FUSE_INTERRUPT; |
1064 | ih.unique = (req->in.h.unique | FUSE_INT_REQ_BIT); |
1065 | arg.unique = req->in.h.unique; |
1066 | |
1067 | spin_unlock(lock: &fiq->lock); |
1068 | if (nbytes < reqsize) |
1069 | return -EINVAL; |
1070 | |
1071 | err = fuse_copy_one(cs, val: &ih, size: sizeof(ih)); |
1072 | if (!err) |
1073 | err = fuse_copy_one(cs, val: &arg, size: sizeof(arg)); |
1074 | fuse_copy_finish(cs); |
1075 | |
1076 | return err ? err : reqsize; |
1077 | } |
1078 | |
1079 | struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq, |
1080 | unsigned int max, |
1081 | unsigned int *countp) |
1082 | { |
1083 | struct fuse_forget_link *head = fiq->forget_list_head.next; |
1084 | struct fuse_forget_link **newhead = &head; |
1085 | unsigned count; |
1086 | |
1087 | for (count = 0; *newhead != NULL && count < max; count++) |
1088 | newhead = &(*newhead)->next; |
1089 | |
1090 | fiq->forget_list_head.next = *newhead; |
1091 | *newhead = NULL; |
1092 | if (fiq->forget_list_head.next == NULL) |
1093 | fiq->forget_list_tail = &fiq->forget_list_head; |
1094 | |
1095 | if (countp != NULL) |
1096 | *countp = count; |
1097 | |
1098 | return head; |
1099 | } |
1100 | EXPORT_SYMBOL(fuse_dequeue_forget); |
1101 | |
1102 | static int fuse_read_single_forget(struct fuse_iqueue *fiq, |
1103 | struct fuse_copy_state *cs, |
1104 | size_t nbytes) |
1105 | __releases(fiq->lock) |
1106 | { |
1107 | int err; |
1108 | struct fuse_forget_link *forget = fuse_dequeue_forget(fiq, 1, NULL); |
1109 | struct fuse_forget_in arg = { |
1110 | .nlookup = forget->forget_one.nlookup, |
1111 | }; |
1112 | struct fuse_in_header ih = { |
1113 | .opcode = FUSE_FORGET, |
1114 | .nodeid = forget->forget_one.nodeid, |
1115 | .unique = fuse_get_unique(fiq), |
1116 | .len = sizeof(ih) + sizeof(arg), |
1117 | }; |
1118 | |
1119 | spin_unlock(lock: &fiq->lock); |
1120 | kfree(objp: forget); |
1121 | if (nbytes < ih.len) |
1122 | return -EINVAL; |
1123 | |
1124 | err = fuse_copy_one(cs, val: &ih, size: sizeof(ih)); |
1125 | if (!err) |
1126 | err = fuse_copy_one(cs, val: &arg, size: sizeof(arg)); |
1127 | fuse_copy_finish(cs); |
1128 | |
1129 | if (err) |
1130 | return err; |
1131 | |
1132 | return ih.len; |
1133 | } |
1134 | |
1135 | static int fuse_read_batch_forget(struct fuse_iqueue *fiq, |
1136 | struct fuse_copy_state *cs, size_t nbytes) |
1137 | __releases(fiq->lock) |
1138 | { |
1139 | int err; |
1140 | unsigned max_forgets; |
1141 | unsigned count; |
1142 | struct fuse_forget_link *head; |
1143 | struct fuse_batch_forget_in arg = { .count = 0 }; |
1144 | struct fuse_in_header ih = { |
1145 | .opcode = FUSE_BATCH_FORGET, |
1146 | .unique = fuse_get_unique(fiq), |
1147 | .len = sizeof(ih) + sizeof(arg), |
1148 | }; |
1149 | |
1150 | if (nbytes < ih.len) { |
1151 | spin_unlock(lock: &fiq->lock); |
1152 | return -EINVAL; |
1153 | } |
1154 | |
1155 | max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one); |
1156 | head = fuse_dequeue_forget(fiq, max_forgets, &count); |
1157 | spin_unlock(lock: &fiq->lock); |
1158 | |
1159 | arg.count = count; |
1160 | ih.len += count * sizeof(struct fuse_forget_one); |
1161 | err = fuse_copy_one(cs, val: &ih, size: sizeof(ih)); |
1162 | if (!err) |
1163 | err = fuse_copy_one(cs, val: &arg, size: sizeof(arg)); |
1164 | |
1165 | while (head) { |
1166 | struct fuse_forget_link *forget = head; |
1167 | |
1168 | if (!err) { |
1169 | err = fuse_copy_one(cs, val: &forget->forget_one, |
1170 | size: sizeof(forget->forget_one)); |
1171 | } |
1172 | head = forget->next; |
1173 | kfree(objp: forget); |
1174 | } |
1175 | |
1176 | fuse_copy_finish(cs); |
1177 | |
1178 | if (err) |
1179 | return err; |
1180 | |
1181 | return ih.len; |
1182 | } |
1183 | |
1184 | static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq, |
1185 | struct fuse_copy_state *cs, |
1186 | size_t nbytes) |
1187 | __releases(fiq->lock) |
1188 | { |
1189 | if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL) |
1190 | return fuse_read_single_forget(fiq, cs, nbytes); |
1191 | else |
1192 | return fuse_read_batch_forget(fiq, cs, nbytes); |
1193 | } |
1194 | |
1195 | /* |
1196 | * Read a single request into the userspace filesystem's buffer. This |
1197 | * function waits until a request is available, then removes it from |
1198 | * the pending list and copies request data to userspace buffer. If |
1199 | * no reply is needed (FORGET) or request has been aborted or there |
1200 | * was an error during the copying then it's finished by calling |
1201 | * fuse_request_end(). Otherwise add it to the processing list, and set |
1202 | * the 'sent' flag. |
1203 | */ |
1204 | static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file, |
1205 | struct fuse_copy_state *cs, size_t nbytes) |
1206 | { |
1207 | ssize_t err; |
1208 | struct fuse_conn *fc = fud->fc; |
1209 | struct fuse_iqueue *fiq = &fc->iq; |
1210 | struct fuse_pqueue *fpq = &fud->pq; |
1211 | struct fuse_req *req; |
1212 | struct fuse_args *args; |
1213 | unsigned reqsize; |
1214 | unsigned int hash; |
1215 | |
1216 | /* |
1217 | * Require sane minimum read buffer - that has capacity for fixed part |
1218 | * of any request header + negotiated max_write room for data. |
1219 | * |
1220 | * Historically libfuse reserves 4K for fixed header room, but e.g. |
1221 | * GlusterFS reserves only 80 bytes |
1222 | * |
1223 | * = `sizeof(fuse_in_header) + sizeof(fuse_write_in)` |
1224 | * |
1225 | * which is the absolute minimum any sane filesystem should be using |
1226 | * for header room. |
1227 | */ |
1228 | if (nbytes < max_t(size_t, FUSE_MIN_READ_BUFFER, |
1229 | sizeof(struct fuse_in_header) + |
1230 | sizeof(struct fuse_write_in) + |
1231 | fc->max_write)) |
1232 | return -EINVAL; |
1233 | |
1234 | restart: |
1235 | for (;;) { |
1236 | spin_lock(lock: &fiq->lock); |
1237 | if (!fiq->connected || request_pending(fiq)) |
1238 | break; |
1239 | spin_unlock(lock: &fiq->lock); |
1240 | |
1241 | if (file->f_flags & O_NONBLOCK) |
1242 | return -EAGAIN; |
1243 | err = wait_event_interruptible_exclusive(fiq->waitq, |
1244 | !fiq->connected || request_pending(fiq)); |
1245 | if (err) |
1246 | return err; |
1247 | } |
1248 | |
1249 | if (!fiq->connected) { |
1250 | err = fc->aborted ? -ECONNABORTED : -ENODEV; |
1251 | goto err_unlock; |
1252 | } |
1253 | |
1254 | if (!list_empty(head: &fiq->interrupts)) { |
1255 | req = list_entry(fiq->interrupts.next, struct fuse_req, |
1256 | intr_entry); |
1257 | return fuse_read_interrupt(fiq, cs, nbytes, req); |
1258 | } |
1259 | |
1260 | if (forget_pending(fiq)) { |
1261 | if (list_empty(head: &fiq->pending) || fiq->forget_batch-- > 0) |
1262 | return fuse_read_forget(fc, fiq, cs, nbytes); |
1263 | |
1264 | if (fiq->forget_batch <= -8) |
1265 | fiq->forget_batch = 16; |
1266 | } |
1267 | |
1268 | req = list_entry(fiq->pending.next, struct fuse_req, list); |
1269 | clear_bit(nr: FR_PENDING, addr: &req->flags); |
1270 | list_del_init(entry: &req->list); |
1271 | spin_unlock(lock: &fiq->lock); |
1272 | |
1273 | args = req->args; |
1274 | reqsize = req->in.h.len; |
1275 | |
1276 | /* If request is too large, reply with an error and restart the read */ |
1277 | if (nbytes < reqsize) { |
1278 | req->out.h.error = -EIO; |
1279 | /* SETXATTR is special, since it may contain too large data */ |
1280 | if (args->opcode == FUSE_SETXATTR) |
1281 | req->out.h.error = -E2BIG; |
1282 | fuse_request_end(req); |
1283 | goto restart; |
1284 | } |
1285 | spin_lock(lock: &fpq->lock); |
1286 | /* |
1287 | * Must not put request on fpq->io queue after having been shut down by |
1288 | * fuse_abort_conn() |
1289 | */ |
1290 | if (!fpq->connected) { |
1291 | req->out.h.error = err = -ECONNABORTED; |
1292 | goto out_end; |
1293 | |
1294 | } |
1295 | list_add(new: &req->list, head: &fpq->io); |
1296 | spin_unlock(lock: &fpq->lock); |
1297 | cs->req = req; |
1298 | err = fuse_copy_one(cs, val: &req->in.h, size: sizeof(req->in.h)); |
1299 | if (!err) |
1300 | err = fuse_copy_args(cs, numargs: args->in_numargs, argpages: args->in_pages, |
1301 | args: (struct fuse_arg *) args->in_args, zeroing: 0); |
1302 | fuse_copy_finish(cs); |
1303 | spin_lock(lock: &fpq->lock); |
1304 | clear_bit(nr: FR_LOCKED, addr: &req->flags); |
1305 | if (!fpq->connected) { |
1306 | err = fc->aborted ? -ECONNABORTED : -ENODEV; |
1307 | goto out_end; |
1308 | } |
1309 | if (err) { |
1310 | req->out.h.error = -EIO; |
1311 | goto out_end; |
1312 | } |
1313 | if (!test_bit(FR_ISREPLY, &req->flags)) { |
1314 | err = reqsize; |
1315 | goto out_end; |
1316 | } |
1317 | hash = fuse_req_hash(unique: req->in.h.unique); |
1318 | list_move_tail(list: &req->list, head: &fpq->processing[hash]); |
1319 | __fuse_get_request(req); |
1320 | set_bit(nr: FR_SENT, addr: &req->flags); |
1321 | spin_unlock(lock: &fpq->lock); |
1322 | /* matches barrier in request_wait_answer() */ |
1323 | smp_mb__after_atomic(); |
1324 | if (test_bit(FR_INTERRUPTED, &req->flags)) |
1325 | queue_interrupt(req); |
1326 | fuse_put_request(req); |
1327 | |
1328 | return reqsize; |
1329 | |
1330 | out_end: |
1331 | if (!test_bit(FR_PRIVATE, &req->flags)) |
1332 | list_del_init(entry: &req->list); |
1333 | spin_unlock(lock: &fpq->lock); |
1334 | fuse_request_end(req); |
1335 | return err; |
1336 | |
1337 | err_unlock: |
1338 | spin_unlock(lock: &fiq->lock); |
1339 | return err; |
1340 | } |
1341 | |
1342 | static int fuse_dev_open(struct inode *inode, struct file *file) |
1343 | { |
1344 | /* |
1345 | * The fuse device's file's private_data is used to hold |
1346 | * the fuse_conn(ection) when it is mounted, and is used to |
1347 | * keep track of whether the file has been mounted already. |
1348 | */ |
1349 | file->private_data = NULL; |
1350 | return 0; |
1351 | } |
1352 | |
1353 | static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to) |
1354 | { |
1355 | struct fuse_copy_state cs; |
1356 | struct file *file = iocb->ki_filp; |
1357 | struct fuse_dev *fud = fuse_get_dev(file); |
1358 | |
1359 | if (!fud) |
1360 | return -EPERM; |
1361 | |
1362 | if (!user_backed_iter(i: to)) |
1363 | return -EINVAL; |
1364 | |
1365 | fuse_copy_init(cs: &cs, write: 1, iter: to); |
1366 | |
1367 | return fuse_dev_do_read(fud, file, cs: &cs, nbytes: iov_iter_count(i: to)); |
1368 | } |
1369 | |
1370 | static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos, |
1371 | struct pipe_inode_info *pipe, |
1372 | size_t len, unsigned int flags) |
1373 | { |
1374 | int total, ret; |
1375 | int page_nr = 0; |
1376 | struct pipe_buffer *bufs; |
1377 | struct fuse_copy_state cs; |
1378 | struct fuse_dev *fud = fuse_get_dev(file: in); |
1379 | |
1380 | if (!fud) |
1381 | return -EPERM; |
1382 | |
1383 | bufs = kvmalloc_array(n: pipe->max_usage, size: sizeof(struct pipe_buffer), |
1384 | GFP_KERNEL); |
1385 | if (!bufs) |
1386 | return -ENOMEM; |
1387 | |
1388 | fuse_copy_init(cs: &cs, write: 1, NULL); |
1389 | cs.pipebufs = bufs; |
1390 | cs.pipe = pipe; |
1391 | ret = fuse_dev_do_read(fud, file: in, cs: &cs, nbytes: len); |
1392 | if (ret < 0) |
1393 | goto out; |
1394 | |
1395 | if (pipe_occupancy(head: pipe->head, tail: pipe->tail) + cs.nr_segs > pipe->max_usage) { |
1396 | ret = -EIO; |
1397 | goto out; |
1398 | } |
1399 | |
1400 | for (ret = total = 0; page_nr < cs.nr_segs; total += ret) { |
1401 | /* |
1402 | * Need to be careful about this. Having buf->ops in module |
1403 | * code can Oops if the buffer persists after module unload. |
1404 | */ |
1405 | bufs[page_nr].ops = &nosteal_pipe_buf_ops; |
1406 | bufs[page_nr].flags = 0; |
1407 | ret = add_to_pipe(pipe, buf: &bufs[page_nr++]); |
1408 | if (unlikely(ret < 0)) |
1409 | break; |
1410 | } |
1411 | if (total) |
1412 | ret = total; |
1413 | out: |
1414 | for (; page_nr < cs.nr_segs; page_nr++) |
1415 | put_page(page: bufs[page_nr].page); |
1416 | |
1417 | kvfree(addr: bufs); |
1418 | return ret; |
1419 | } |
1420 | |
1421 | static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size, |
1422 | struct fuse_copy_state *cs) |
1423 | { |
1424 | struct fuse_notify_poll_wakeup_out outarg; |
1425 | int err = -EINVAL; |
1426 | |
1427 | if (size != sizeof(outarg)) |
1428 | goto err; |
1429 | |
1430 | err = fuse_copy_one(cs, val: &outarg, size: sizeof(outarg)); |
1431 | if (err) |
1432 | goto err; |
1433 | |
1434 | fuse_copy_finish(cs); |
1435 | return fuse_notify_poll_wakeup(fc, outarg: &outarg); |
1436 | |
1437 | err: |
1438 | fuse_copy_finish(cs); |
1439 | return err; |
1440 | } |
1441 | |
1442 | static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size, |
1443 | struct fuse_copy_state *cs) |
1444 | { |
1445 | struct fuse_notify_inval_inode_out outarg; |
1446 | int err = -EINVAL; |
1447 | |
1448 | if (size != sizeof(outarg)) |
1449 | goto err; |
1450 | |
1451 | err = fuse_copy_one(cs, val: &outarg, size: sizeof(outarg)); |
1452 | if (err) |
1453 | goto err; |
1454 | fuse_copy_finish(cs); |
1455 | |
1456 | down_read(sem: &fc->killsb); |
1457 | err = fuse_reverse_inval_inode(fc, nodeid: outarg.ino, |
1458 | offset: outarg.off, len: outarg.len); |
1459 | up_read(sem: &fc->killsb); |
1460 | return err; |
1461 | |
1462 | err: |
1463 | fuse_copy_finish(cs); |
1464 | return err; |
1465 | } |
1466 | |
1467 | static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size, |
1468 | struct fuse_copy_state *cs) |
1469 | { |
1470 | struct fuse_notify_inval_entry_out outarg; |
1471 | int err = -ENOMEM; |
1472 | char *buf; |
1473 | struct qstr name; |
1474 | |
1475 | buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL); |
1476 | if (!buf) |
1477 | goto err; |
1478 | |
1479 | err = -EINVAL; |
1480 | if (size < sizeof(outarg)) |
1481 | goto err; |
1482 | |
1483 | err = fuse_copy_one(cs, val: &outarg, size: sizeof(outarg)); |
1484 | if (err) |
1485 | goto err; |
1486 | |
1487 | err = -ENAMETOOLONG; |
1488 | if (outarg.namelen > FUSE_NAME_MAX) |
1489 | goto err; |
1490 | |
1491 | err = -EINVAL; |
1492 | if (size != sizeof(outarg) + outarg.namelen + 1) |
1493 | goto err; |
1494 | |
1495 | name.name = buf; |
1496 | name.len = outarg.namelen; |
1497 | err = fuse_copy_one(cs, val: buf, size: outarg.namelen + 1); |
1498 | if (err) |
1499 | goto err; |
1500 | fuse_copy_finish(cs); |
1501 | buf[outarg.namelen] = 0; |
1502 | |
1503 | down_read(sem: &fc->killsb); |
1504 | err = fuse_reverse_inval_entry(fc, parent_nodeid: outarg.parent, child_nodeid: 0, name: &name, flags: outarg.flags); |
1505 | up_read(sem: &fc->killsb); |
1506 | kfree(objp: buf); |
1507 | return err; |
1508 | |
1509 | err: |
1510 | kfree(objp: buf); |
1511 | fuse_copy_finish(cs); |
1512 | return err; |
1513 | } |
1514 | |
1515 | static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size, |
1516 | struct fuse_copy_state *cs) |
1517 | { |
1518 | struct fuse_notify_delete_out outarg; |
1519 | int err = -ENOMEM; |
1520 | char *buf; |
1521 | struct qstr name; |
1522 | |
1523 | buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL); |
1524 | if (!buf) |
1525 | goto err; |
1526 | |
1527 | err = -EINVAL; |
1528 | if (size < sizeof(outarg)) |
1529 | goto err; |
1530 | |
1531 | err = fuse_copy_one(cs, val: &outarg, size: sizeof(outarg)); |
1532 | if (err) |
1533 | goto err; |
1534 | |
1535 | err = -ENAMETOOLONG; |
1536 | if (outarg.namelen > FUSE_NAME_MAX) |
1537 | goto err; |
1538 | |
1539 | err = -EINVAL; |
1540 | if (size != sizeof(outarg) + outarg.namelen + 1) |
1541 | goto err; |
1542 | |
1543 | name.name = buf; |
1544 | name.len = outarg.namelen; |
1545 | err = fuse_copy_one(cs, val: buf, size: outarg.namelen + 1); |
1546 | if (err) |
1547 | goto err; |
1548 | fuse_copy_finish(cs); |
1549 | buf[outarg.namelen] = 0; |
1550 | |
1551 | down_read(sem: &fc->killsb); |
1552 | err = fuse_reverse_inval_entry(fc, parent_nodeid: outarg.parent, child_nodeid: outarg.child, name: &name, flags: 0); |
1553 | up_read(sem: &fc->killsb); |
1554 | kfree(objp: buf); |
1555 | return err; |
1556 | |
1557 | err: |
1558 | kfree(objp: buf); |
1559 | fuse_copy_finish(cs); |
1560 | return err; |
1561 | } |
1562 | |
1563 | static int fuse_notify_store(struct fuse_conn *fc, unsigned int size, |
1564 | struct fuse_copy_state *cs) |
1565 | { |
1566 | struct fuse_notify_store_out outarg; |
1567 | struct inode *inode; |
1568 | struct address_space *mapping; |
1569 | u64 nodeid; |
1570 | int err; |
1571 | pgoff_t index; |
1572 | unsigned int offset; |
1573 | unsigned int num; |
1574 | loff_t file_size; |
1575 | loff_t end; |
1576 | |
1577 | err = -EINVAL; |
1578 | if (size < sizeof(outarg)) |
1579 | goto out_finish; |
1580 | |
1581 | err = fuse_copy_one(cs, val: &outarg, size: sizeof(outarg)); |
1582 | if (err) |
1583 | goto out_finish; |
1584 | |
1585 | err = -EINVAL; |
1586 | if (size - sizeof(outarg) != outarg.size) |
1587 | goto out_finish; |
1588 | |
1589 | nodeid = outarg.nodeid; |
1590 | |
1591 | down_read(sem: &fc->killsb); |
1592 | |
1593 | err = -ENOENT; |
1594 | inode = fuse_ilookup(fc, nodeid, NULL); |
1595 | if (!inode) |
1596 | goto out_up_killsb; |
1597 | |
1598 | mapping = inode->i_mapping; |
1599 | index = outarg.offset >> PAGE_SHIFT; |
1600 | offset = outarg.offset & ~PAGE_MASK; |
1601 | file_size = i_size_read(inode); |
1602 | end = outarg.offset + outarg.size; |
1603 | if (end > file_size) { |
1604 | file_size = end; |
1605 | fuse_write_update_attr(inode, pos: file_size, written: outarg.size); |
1606 | } |
1607 | |
1608 | num = outarg.size; |
1609 | while (num) { |
1610 | struct page *page; |
1611 | unsigned int this_num; |
1612 | |
1613 | err = -ENOMEM; |
1614 | page = find_or_create_page(mapping, index, |
1615 | gfp_mask: mapping_gfp_mask(mapping)); |
1616 | if (!page) |
1617 | goto out_iput; |
1618 | |
1619 | this_num = min_t(unsigned, num, PAGE_SIZE - offset); |
1620 | err = fuse_copy_page(cs, pagep: &page, offset, count: this_num, zeroing: 0); |
1621 | if (!err && offset == 0 && |
1622 | (this_num == PAGE_SIZE || file_size == end)) |
1623 | SetPageUptodate(page); |
1624 | unlock_page(page); |
1625 | put_page(page); |
1626 | |
1627 | if (err) |
1628 | goto out_iput; |
1629 | |
1630 | num -= this_num; |
1631 | offset = 0; |
1632 | index++; |
1633 | } |
1634 | |
1635 | err = 0; |
1636 | |
1637 | out_iput: |
1638 | iput(inode); |
1639 | out_up_killsb: |
1640 | up_read(sem: &fc->killsb); |
1641 | out_finish: |
1642 | fuse_copy_finish(cs); |
1643 | return err; |
1644 | } |
1645 | |
1646 | struct fuse_retrieve_args { |
1647 | struct fuse_args_pages ap; |
1648 | struct fuse_notify_retrieve_in inarg; |
1649 | }; |
1650 | |
1651 | static void fuse_retrieve_end(struct fuse_mount *fm, struct fuse_args *args, |
1652 | int error) |
1653 | { |
1654 | struct fuse_retrieve_args *ra = |
1655 | container_of(args, typeof(*ra), ap.args); |
1656 | |
1657 | release_pages(ra->ap.pages, nr: ra->ap.num_pages); |
1658 | kfree(objp: ra); |
1659 | } |
1660 | |
1661 | static int fuse_retrieve(struct fuse_mount *fm, struct inode *inode, |
1662 | struct fuse_notify_retrieve_out *outarg) |
1663 | { |
1664 | int err; |
1665 | struct address_space *mapping = inode->i_mapping; |
1666 | pgoff_t index; |
1667 | loff_t file_size; |
1668 | unsigned int num; |
1669 | unsigned int offset; |
1670 | size_t total_len = 0; |
1671 | unsigned int num_pages; |
1672 | struct fuse_conn *fc = fm->fc; |
1673 | struct fuse_retrieve_args *ra; |
1674 | size_t args_size = sizeof(*ra); |
1675 | struct fuse_args_pages *ap; |
1676 | struct fuse_args *args; |
1677 | |
1678 | offset = outarg->offset & ~PAGE_MASK; |
1679 | file_size = i_size_read(inode); |
1680 | |
1681 | num = min(outarg->size, fc->max_write); |
1682 | if (outarg->offset > file_size) |
1683 | num = 0; |
1684 | else if (outarg->offset + num > file_size) |
1685 | num = file_size - outarg->offset; |
1686 | |
1687 | num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; |
1688 | num_pages = min(num_pages, fc->max_pages); |
1689 | |
1690 | args_size += num_pages * (sizeof(ap->pages[0]) + sizeof(ap->descs[0])); |
1691 | |
1692 | ra = kzalloc(size: args_size, GFP_KERNEL); |
1693 | if (!ra) |
1694 | return -ENOMEM; |
1695 | |
1696 | ap = &ra->ap; |
1697 | ap->pages = (void *) (ra + 1); |
1698 | ap->descs = (void *) (ap->pages + num_pages); |
1699 | |
1700 | args = &ap->args; |
1701 | args->nodeid = outarg->nodeid; |
1702 | args->opcode = FUSE_NOTIFY_REPLY; |
1703 | args->in_numargs = 2; |
1704 | args->in_pages = true; |
1705 | args->end = fuse_retrieve_end; |
1706 | |
1707 | index = outarg->offset >> PAGE_SHIFT; |
1708 | |
1709 | while (num && ap->num_pages < num_pages) { |
1710 | struct page *page; |
1711 | unsigned int this_num; |
1712 | |
1713 | page = find_get_page(mapping, offset: index); |
1714 | if (!page) |
1715 | break; |
1716 | |
1717 | this_num = min_t(unsigned, num, PAGE_SIZE - offset); |
1718 | ap->pages[ap->num_pages] = page; |
1719 | ap->descs[ap->num_pages].offset = offset; |
1720 | ap->descs[ap->num_pages].length = this_num; |
1721 | ap->num_pages++; |
1722 | |
1723 | offset = 0; |
1724 | num -= this_num; |
1725 | total_len += this_num; |
1726 | index++; |
1727 | } |
1728 | ra->inarg.offset = outarg->offset; |
1729 | ra->inarg.size = total_len; |
1730 | args->in_args[0].size = sizeof(ra->inarg); |
1731 | args->in_args[0].value = &ra->inarg; |
1732 | args->in_args[1].size = total_len; |
1733 | |
1734 | err = fuse_simple_notify_reply(fm, args, unique: outarg->notify_unique); |
1735 | if (err) |
1736 | fuse_retrieve_end(fm, args, error: err); |
1737 | |
1738 | return err; |
1739 | } |
1740 | |
1741 | static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size, |
1742 | struct fuse_copy_state *cs) |
1743 | { |
1744 | struct fuse_notify_retrieve_out outarg; |
1745 | struct fuse_mount *fm; |
1746 | struct inode *inode; |
1747 | u64 nodeid; |
1748 | int err; |
1749 | |
1750 | err = -EINVAL; |
1751 | if (size != sizeof(outarg)) |
1752 | goto copy_finish; |
1753 | |
1754 | err = fuse_copy_one(cs, val: &outarg, size: sizeof(outarg)); |
1755 | if (err) |
1756 | goto copy_finish; |
1757 | |
1758 | fuse_copy_finish(cs); |
1759 | |
1760 | down_read(sem: &fc->killsb); |
1761 | err = -ENOENT; |
1762 | nodeid = outarg.nodeid; |
1763 | |
1764 | inode = fuse_ilookup(fc, nodeid, fm: &fm); |
1765 | if (inode) { |
1766 | err = fuse_retrieve(fm, inode, outarg: &outarg); |
1767 | iput(inode); |
1768 | } |
1769 | up_read(sem: &fc->killsb); |
1770 | |
1771 | return err; |
1772 | |
1773 | copy_finish: |
1774 | fuse_copy_finish(cs); |
1775 | return err; |
1776 | } |
1777 | |
1778 | /* |
1779 | * Resending all processing queue requests. |
1780 | * |
1781 | * During a FUSE daemon panics and failover, it is possible for some inflight |
1782 | * requests to be lost and never returned. As a result, applications awaiting |
1783 | * replies would become stuck forever. To address this, we can use notification |
1784 | * to trigger resending of these pending requests to the FUSE daemon, ensuring |
1785 | * they are properly processed again. |
1786 | * |
1787 | * Please note that this strategy is applicable only to idempotent requests or |
1788 | * if the FUSE daemon takes careful measures to avoid processing duplicated |
1789 | * non-idempotent requests. |
1790 | */ |
1791 | static void fuse_resend(struct fuse_conn *fc) |
1792 | { |
1793 | struct fuse_dev *fud; |
1794 | struct fuse_req *req, *next; |
1795 | struct fuse_iqueue *fiq = &fc->iq; |
1796 | LIST_HEAD(to_queue); |
1797 | unsigned int i; |
1798 | |
1799 | spin_lock(lock: &fc->lock); |
1800 | if (!fc->connected) { |
1801 | spin_unlock(lock: &fc->lock); |
1802 | return; |
1803 | } |
1804 | |
1805 | list_for_each_entry(fud, &fc->devices, entry) { |
1806 | struct fuse_pqueue *fpq = &fud->pq; |
1807 | |
1808 | spin_lock(lock: &fpq->lock); |
1809 | for (i = 0; i < FUSE_PQ_HASH_SIZE; i++) |
1810 | list_splice_tail_init(list: &fpq->processing[i], head: &to_queue); |
1811 | spin_unlock(lock: &fpq->lock); |
1812 | } |
1813 | spin_unlock(lock: &fc->lock); |
1814 | |
1815 | list_for_each_entry_safe(req, next, &to_queue, list) { |
1816 | __set_bit(FR_PENDING, &req->flags); |
1817 | /* mark the request as resend request */ |
1818 | req->in.h.unique |= FUSE_UNIQUE_RESEND; |
1819 | } |
1820 | |
1821 | spin_lock(lock: &fiq->lock); |
1822 | /* iq and pq requests are both oldest to newest */ |
1823 | list_splice(list: &to_queue, head: &fiq->pending); |
1824 | fiq->ops->wake_pending_and_unlock(fiq); |
1825 | } |
1826 | |
1827 | static int fuse_notify_resend(struct fuse_conn *fc) |
1828 | { |
1829 | fuse_resend(fc); |
1830 | return 0; |
1831 | } |
1832 | |
1833 | static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, |
1834 | unsigned int size, struct fuse_copy_state *cs) |
1835 | { |
1836 | /* Don't try to move pages (yet) */ |
1837 | cs->move_pages = 0; |
1838 | |
1839 | switch (code) { |
1840 | case FUSE_NOTIFY_POLL: |
1841 | return fuse_notify_poll(fc, size, cs); |
1842 | |
1843 | case FUSE_NOTIFY_INVAL_INODE: |
1844 | return fuse_notify_inval_inode(fc, size, cs); |
1845 | |
1846 | case FUSE_NOTIFY_INVAL_ENTRY: |
1847 | return fuse_notify_inval_entry(fc, size, cs); |
1848 | |
1849 | case FUSE_NOTIFY_STORE: |
1850 | return fuse_notify_store(fc, size, cs); |
1851 | |
1852 | case FUSE_NOTIFY_RETRIEVE: |
1853 | return fuse_notify_retrieve(fc, size, cs); |
1854 | |
1855 | case FUSE_NOTIFY_DELETE: |
1856 | return fuse_notify_delete(fc, size, cs); |
1857 | |
1858 | case FUSE_NOTIFY_RESEND: |
1859 | return fuse_notify_resend(fc); |
1860 | |
1861 | default: |
1862 | fuse_copy_finish(cs); |
1863 | return -EINVAL; |
1864 | } |
1865 | } |
1866 | |
1867 | /* Look up request on processing list by unique ID */ |
1868 | static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique) |
1869 | { |
1870 | unsigned int hash = fuse_req_hash(unique); |
1871 | struct fuse_req *req; |
1872 | |
1873 | list_for_each_entry(req, &fpq->processing[hash], list) { |
1874 | if (req->in.h.unique == unique) |
1875 | return req; |
1876 | } |
1877 | return NULL; |
1878 | } |
1879 | |
1880 | static int copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args, |
1881 | unsigned nbytes) |
1882 | { |
1883 | unsigned reqsize = sizeof(struct fuse_out_header); |
1884 | |
1885 | reqsize += fuse_len_args(args->out_numargs, args->out_args); |
1886 | |
1887 | if (reqsize < nbytes || (reqsize > nbytes && !args->out_argvar)) |
1888 | return -EINVAL; |
1889 | else if (reqsize > nbytes) { |
1890 | struct fuse_arg *lastarg = &args->out_args[args->out_numargs-1]; |
1891 | unsigned diffsize = reqsize - nbytes; |
1892 | |
1893 | if (diffsize > lastarg->size) |
1894 | return -EINVAL; |
1895 | lastarg->size -= diffsize; |
1896 | } |
1897 | return fuse_copy_args(cs, numargs: args->out_numargs, argpages: args->out_pages, |
1898 | args: args->out_args, zeroing: args->page_zeroing); |
1899 | } |
1900 | |
1901 | /* |
1902 | * Write a single reply to a request. First the header is copied from |
1903 | * the write buffer. The request is then searched on the processing |
1904 | * list by the unique ID found in the header. If found, then remove |
1905 | * it from the list and copy the rest of the buffer to the request. |
1906 | * The request is finished by calling fuse_request_end(). |
1907 | */ |
1908 | static ssize_t fuse_dev_do_write(struct fuse_dev *fud, |
1909 | struct fuse_copy_state *cs, size_t nbytes) |
1910 | { |
1911 | int err; |
1912 | struct fuse_conn *fc = fud->fc; |
1913 | struct fuse_pqueue *fpq = &fud->pq; |
1914 | struct fuse_req *req; |
1915 | struct fuse_out_header oh; |
1916 | |
1917 | err = -EINVAL; |
1918 | if (nbytes < sizeof(struct fuse_out_header)) |
1919 | goto out; |
1920 | |
1921 | err = fuse_copy_one(cs, val: &oh, size: sizeof(oh)); |
1922 | if (err) |
1923 | goto copy_finish; |
1924 | |
1925 | err = -EINVAL; |
1926 | if (oh.len != nbytes) |
1927 | goto copy_finish; |
1928 | |
1929 | /* |
1930 | * Zero oh.unique indicates unsolicited notification message |
1931 | * and error contains notification code. |
1932 | */ |
1933 | if (!oh.unique) { |
1934 | err = fuse_notify(fc, code: oh.error, size: nbytes - sizeof(oh), cs); |
1935 | goto out; |
1936 | } |
1937 | |
1938 | err = -EINVAL; |
1939 | if (oh.error <= -512 || oh.error > 0) |
1940 | goto copy_finish; |
1941 | |
1942 | spin_lock(lock: &fpq->lock); |
1943 | req = NULL; |
1944 | if (fpq->connected) |
1945 | req = request_find(fpq, unique: oh.unique & ~FUSE_INT_REQ_BIT); |
1946 | |
1947 | err = -ENOENT; |
1948 | if (!req) { |
1949 | spin_unlock(lock: &fpq->lock); |
1950 | goto copy_finish; |
1951 | } |
1952 | |
1953 | /* Is it an interrupt reply ID? */ |
1954 | if (oh.unique & FUSE_INT_REQ_BIT) { |
1955 | __fuse_get_request(req); |
1956 | spin_unlock(lock: &fpq->lock); |
1957 | |
1958 | err = 0; |
1959 | if (nbytes != sizeof(struct fuse_out_header)) |
1960 | err = -EINVAL; |
1961 | else if (oh.error == -ENOSYS) |
1962 | fc->no_interrupt = 1; |
1963 | else if (oh.error == -EAGAIN) |
1964 | err = queue_interrupt(req); |
1965 | |
1966 | fuse_put_request(req); |
1967 | |
1968 | goto copy_finish; |
1969 | } |
1970 | |
1971 | clear_bit(nr: FR_SENT, addr: &req->flags); |
1972 | list_move(list: &req->list, head: &fpq->io); |
1973 | req->out.h = oh; |
1974 | set_bit(nr: FR_LOCKED, addr: &req->flags); |
1975 | spin_unlock(lock: &fpq->lock); |
1976 | cs->req = req; |
1977 | if (!req->args->page_replace) |
1978 | cs->move_pages = 0; |
1979 | |
1980 | if (oh.error) |
1981 | err = nbytes != sizeof(oh) ? -EINVAL : 0; |
1982 | else |
1983 | err = copy_out_args(cs, args: req->args, nbytes); |
1984 | fuse_copy_finish(cs); |
1985 | |
1986 | spin_lock(lock: &fpq->lock); |
1987 | clear_bit(nr: FR_LOCKED, addr: &req->flags); |
1988 | if (!fpq->connected) |
1989 | err = -ENOENT; |
1990 | else if (err) |
1991 | req->out.h.error = -EIO; |
1992 | if (!test_bit(FR_PRIVATE, &req->flags)) |
1993 | list_del_init(entry: &req->list); |
1994 | spin_unlock(lock: &fpq->lock); |
1995 | |
1996 | fuse_request_end(req); |
1997 | out: |
1998 | return err ? err : nbytes; |
1999 | |
2000 | copy_finish: |
2001 | fuse_copy_finish(cs); |
2002 | goto out; |
2003 | } |
2004 | |
2005 | static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from) |
2006 | { |
2007 | struct fuse_copy_state cs; |
2008 | struct fuse_dev *fud = fuse_get_dev(file: iocb->ki_filp); |
2009 | |
2010 | if (!fud) |
2011 | return -EPERM; |
2012 | |
2013 | if (!user_backed_iter(i: from)) |
2014 | return -EINVAL; |
2015 | |
2016 | fuse_copy_init(cs: &cs, write: 0, iter: from); |
2017 | |
2018 | return fuse_dev_do_write(fud, cs: &cs, nbytes: iov_iter_count(i: from)); |
2019 | } |
2020 | |
2021 | static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, |
2022 | struct file *out, loff_t *ppos, |
2023 | size_t len, unsigned int flags) |
2024 | { |
2025 | unsigned int head, tail, mask, count; |
2026 | unsigned nbuf; |
2027 | unsigned idx; |
2028 | struct pipe_buffer *bufs; |
2029 | struct fuse_copy_state cs; |
2030 | struct fuse_dev *fud; |
2031 | size_t rem; |
2032 | ssize_t ret; |
2033 | |
2034 | fud = fuse_get_dev(file: out); |
2035 | if (!fud) |
2036 | return -EPERM; |
2037 | |
2038 | pipe_lock(pipe); |
2039 | |
2040 | head = pipe->head; |
2041 | tail = pipe->tail; |
2042 | mask = pipe->ring_size - 1; |
2043 | count = head - tail; |
2044 | |
2045 | bufs = kvmalloc_array(n: count, size: sizeof(struct pipe_buffer), GFP_KERNEL); |
2046 | if (!bufs) { |
2047 | pipe_unlock(pipe); |
2048 | return -ENOMEM; |
2049 | } |
2050 | |
2051 | nbuf = 0; |
2052 | rem = 0; |
2053 | for (idx = tail; idx != head && rem < len; idx++) |
2054 | rem += pipe->bufs[idx & mask].len; |
2055 | |
2056 | ret = -EINVAL; |
2057 | if (rem < len) |
2058 | goto out_free; |
2059 | |
2060 | rem = len; |
2061 | while (rem) { |
2062 | struct pipe_buffer *ibuf; |
2063 | struct pipe_buffer *obuf; |
2064 | |
2065 | if (WARN_ON(nbuf >= count || tail == head)) |
2066 | goto out_free; |
2067 | |
2068 | ibuf = &pipe->bufs[tail & mask]; |
2069 | obuf = &bufs[nbuf]; |
2070 | |
2071 | if (rem >= ibuf->len) { |
2072 | *obuf = *ibuf; |
2073 | ibuf->ops = NULL; |
2074 | tail++; |
2075 | pipe->tail = tail; |
2076 | } else { |
2077 | if (!pipe_buf_get(pipe, buf: ibuf)) |
2078 | goto out_free; |
2079 | |
2080 | *obuf = *ibuf; |
2081 | obuf->flags &= ~PIPE_BUF_FLAG_GIFT; |
2082 | obuf->len = rem; |
2083 | ibuf->offset += obuf->len; |
2084 | ibuf->len -= obuf->len; |
2085 | } |
2086 | nbuf++; |
2087 | rem -= obuf->len; |
2088 | } |
2089 | pipe_unlock(pipe); |
2090 | |
2091 | fuse_copy_init(cs: &cs, write: 0, NULL); |
2092 | cs.pipebufs = bufs; |
2093 | cs.nr_segs = nbuf; |
2094 | cs.pipe = pipe; |
2095 | |
2096 | if (flags & SPLICE_F_MOVE) |
2097 | cs.move_pages = 1; |
2098 | |
2099 | ret = fuse_dev_do_write(fud, cs: &cs, nbytes: len); |
2100 | |
2101 | pipe_lock(pipe); |
2102 | out_free: |
2103 | for (idx = 0; idx < nbuf; idx++) { |
2104 | struct pipe_buffer *buf = &bufs[idx]; |
2105 | |
2106 | if (buf->ops) |
2107 | pipe_buf_release(pipe, buf); |
2108 | } |
2109 | pipe_unlock(pipe); |
2110 | |
2111 | kvfree(addr: bufs); |
2112 | return ret; |
2113 | } |
2114 | |
2115 | static __poll_t fuse_dev_poll(struct file *file, poll_table *wait) |
2116 | { |
2117 | __poll_t mask = EPOLLOUT | EPOLLWRNORM; |
2118 | struct fuse_iqueue *fiq; |
2119 | struct fuse_dev *fud = fuse_get_dev(file); |
2120 | |
2121 | if (!fud) |
2122 | return EPOLLERR; |
2123 | |
2124 | fiq = &fud->fc->iq; |
2125 | poll_wait(filp: file, wait_address: &fiq->waitq, p: wait); |
2126 | |
2127 | spin_lock(lock: &fiq->lock); |
2128 | if (!fiq->connected) |
2129 | mask = EPOLLERR; |
2130 | else if (request_pending(fiq)) |
2131 | mask |= EPOLLIN | EPOLLRDNORM; |
2132 | spin_unlock(lock: &fiq->lock); |
2133 | |
2134 | return mask; |
2135 | } |
2136 | |
2137 | /* Abort all requests on the given list (pending or processing) */ |
2138 | static void end_requests(struct list_head *head) |
2139 | { |
2140 | while (!list_empty(head)) { |
2141 | struct fuse_req *req; |
2142 | req = list_entry(head->next, struct fuse_req, list); |
2143 | req->out.h.error = -ECONNABORTED; |
2144 | clear_bit(nr: FR_SENT, addr: &req->flags); |
2145 | list_del_init(entry: &req->list); |
2146 | fuse_request_end(req); |
2147 | } |
2148 | } |
2149 | |
2150 | static void end_polls(struct fuse_conn *fc) |
2151 | { |
2152 | struct rb_node *p; |
2153 | |
2154 | p = rb_first(&fc->polled_files); |
2155 | |
2156 | while (p) { |
2157 | struct fuse_file *ff; |
2158 | ff = rb_entry(p, struct fuse_file, polled_node); |
2159 | wake_up_interruptible_all(&ff->poll_wait); |
2160 | |
2161 | p = rb_next(p); |
2162 | } |
2163 | } |
2164 | |
2165 | /* |
2166 | * Abort all requests. |
2167 | * |
2168 | * Emergency exit in case of a malicious or accidental deadlock, or just a hung |
2169 | * filesystem. |
2170 | * |
2171 | * The same effect is usually achievable through killing the filesystem daemon |
2172 | * and all users of the filesystem. The exception is the combination of an |
2173 | * asynchronous request and the tricky deadlock (see |
2174 | * Documentation/filesystems/fuse.rst). |
2175 | * |
2176 | * Aborting requests under I/O goes as follows: 1: Separate out unlocked |
2177 | * requests, they should be finished off immediately. Locked requests will be |
2178 | * finished after unlock; see unlock_request(). 2: Finish off the unlocked |
2179 | * requests. It is possible that some request will finish before we can. This |
2180 | * is OK, the request will in that case be removed from the list before we touch |
2181 | * it. |
2182 | */ |
2183 | void fuse_abort_conn(struct fuse_conn *fc) |
2184 | { |
2185 | struct fuse_iqueue *fiq = &fc->iq; |
2186 | |
2187 | spin_lock(lock: &fc->lock); |
2188 | if (fc->connected) { |
2189 | struct fuse_dev *fud; |
2190 | struct fuse_req *req, *next; |
2191 | LIST_HEAD(to_end); |
2192 | unsigned int i; |
2193 | |
2194 | /* Background queuing checks fc->connected under bg_lock */ |
2195 | spin_lock(lock: &fc->bg_lock); |
2196 | fc->connected = 0; |
2197 | spin_unlock(lock: &fc->bg_lock); |
2198 | |
2199 | fuse_set_initialized(fc); |
2200 | list_for_each_entry(fud, &fc->devices, entry) { |
2201 | struct fuse_pqueue *fpq = &fud->pq; |
2202 | |
2203 | spin_lock(lock: &fpq->lock); |
2204 | fpq->connected = 0; |
2205 | list_for_each_entry_safe(req, next, &fpq->io, list) { |
2206 | req->out.h.error = -ECONNABORTED; |
2207 | spin_lock(lock: &req->waitq.lock); |
2208 | set_bit(nr: FR_ABORTED, addr: &req->flags); |
2209 | if (!test_bit(FR_LOCKED, &req->flags)) { |
2210 | set_bit(nr: FR_PRIVATE, addr: &req->flags); |
2211 | __fuse_get_request(req); |
2212 | list_move(list: &req->list, head: &to_end); |
2213 | } |
2214 | spin_unlock(lock: &req->waitq.lock); |
2215 | } |
2216 | for (i = 0; i < FUSE_PQ_HASH_SIZE; i++) |
2217 | list_splice_tail_init(list: &fpq->processing[i], |
2218 | head: &to_end); |
2219 | spin_unlock(lock: &fpq->lock); |
2220 | } |
2221 | spin_lock(lock: &fc->bg_lock); |
2222 | fc->blocked = 0; |
2223 | fc->max_background = UINT_MAX; |
2224 | flush_bg_queue(fc); |
2225 | spin_unlock(lock: &fc->bg_lock); |
2226 | |
2227 | spin_lock(lock: &fiq->lock); |
2228 | fiq->connected = 0; |
2229 | list_for_each_entry(req, &fiq->pending, list) |
2230 | clear_bit(nr: FR_PENDING, addr: &req->flags); |
2231 | list_splice_tail_init(list: &fiq->pending, head: &to_end); |
2232 | while (forget_pending(fiq)) |
2233 | kfree(objp: fuse_dequeue_forget(fiq, 1, NULL)); |
2234 | wake_up_all(&fiq->waitq); |
2235 | spin_unlock(lock: &fiq->lock); |
2236 | kill_fasync(&fiq->fasync, SIGIO, POLL_IN); |
2237 | end_polls(fc); |
2238 | wake_up_all(&fc->blocked_waitq); |
2239 | spin_unlock(lock: &fc->lock); |
2240 | |
2241 | end_requests(head: &to_end); |
2242 | } else { |
2243 | spin_unlock(lock: &fc->lock); |
2244 | } |
2245 | } |
2246 | EXPORT_SYMBOL_GPL(fuse_abort_conn); |
2247 | |
2248 | void fuse_wait_aborted(struct fuse_conn *fc) |
2249 | { |
2250 | /* matches implicit memory barrier in fuse_drop_waiting() */ |
2251 | smp_mb(); |
2252 | wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0); |
2253 | } |
2254 | |
2255 | int fuse_dev_release(struct inode *inode, struct file *file) |
2256 | { |
2257 | struct fuse_dev *fud = fuse_get_dev(file); |
2258 | |
2259 | if (fud) { |
2260 | struct fuse_conn *fc = fud->fc; |
2261 | struct fuse_pqueue *fpq = &fud->pq; |
2262 | LIST_HEAD(to_end); |
2263 | unsigned int i; |
2264 | |
2265 | spin_lock(lock: &fpq->lock); |
2266 | WARN_ON(!list_empty(&fpq->io)); |
2267 | for (i = 0; i < FUSE_PQ_HASH_SIZE; i++) |
2268 | list_splice_init(list: &fpq->processing[i], head: &to_end); |
2269 | spin_unlock(lock: &fpq->lock); |
2270 | |
2271 | end_requests(head: &to_end); |
2272 | |
2273 | /* Are we the last open device? */ |
2274 | if (atomic_dec_and_test(v: &fc->dev_count)) { |
2275 | WARN_ON(fc->iq.fasync != NULL); |
2276 | fuse_abort_conn(fc); |
2277 | } |
2278 | fuse_dev_free(fud); |
2279 | } |
2280 | return 0; |
2281 | } |
2282 | EXPORT_SYMBOL_GPL(fuse_dev_release); |
2283 | |
2284 | static int fuse_dev_fasync(int fd, struct file *file, int on) |
2285 | { |
2286 | struct fuse_dev *fud = fuse_get_dev(file); |
2287 | |
2288 | if (!fud) |
2289 | return -EPERM; |
2290 | |
2291 | /* No locking - fasync_helper does its own locking */ |
2292 | return fasync_helper(fd, file, on, &fud->fc->iq.fasync); |
2293 | } |
2294 | |
2295 | static int fuse_device_clone(struct fuse_conn *fc, struct file *new) |
2296 | { |
2297 | struct fuse_dev *fud; |
2298 | |
2299 | if (new->private_data) |
2300 | return -EINVAL; |
2301 | |
2302 | fud = fuse_dev_alloc_install(fc); |
2303 | if (!fud) |
2304 | return -ENOMEM; |
2305 | |
2306 | new->private_data = fud; |
2307 | atomic_inc(v: &fc->dev_count); |
2308 | |
2309 | return 0; |
2310 | } |
2311 | |
2312 | static long fuse_dev_ioctl_clone(struct file *file, __u32 __user *argp) |
2313 | { |
2314 | int res; |
2315 | int oldfd; |
2316 | struct fuse_dev *fud = NULL; |
2317 | struct fd f; |
2318 | |
2319 | if (get_user(oldfd, argp)) |
2320 | return -EFAULT; |
2321 | |
2322 | f = fdget(fd: oldfd); |
2323 | if (!f.file) |
2324 | return -EINVAL; |
2325 | |
2326 | /* |
2327 | * Check against file->f_op because CUSE |
2328 | * uses the same ioctl handler. |
2329 | */ |
2330 | if (f.file->f_op == file->f_op) |
2331 | fud = fuse_get_dev(file: f.file); |
2332 | |
2333 | res = -EINVAL; |
2334 | if (fud) { |
2335 | mutex_lock(&fuse_mutex); |
2336 | res = fuse_device_clone(fc: fud->fc, new: file); |
2337 | mutex_unlock(lock: &fuse_mutex); |
2338 | } |
2339 | |
2340 | fdput(fd: f); |
2341 | return res; |
2342 | } |
2343 | |
2344 | static long fuse_dev_ioctl_backing_open(struct file *file, |
2345 | struct fuse_backing_map __user *argp) |
2346 | { |
2347 | struct fuse_dev *fud = fuse_get_dev(file); |
2348 | struct fuse_backing_map map; |
2349 | |
2350 | if (!fud) |
2351 | return -EPERM; |
2352 | |
2353 | if (!IS_ENABLED(CONFIG_FUSE_PASSTHROUGH)) |
2354 | return -EOPNOTSUPP; |
2355 | |
2356 | if (copy_from_user(to: &map, from: argp, n: sizeof(map))) |
2357 | return -EFAULT; |
2358 | |
2359 | return fuse_backing_open(fc: fud->fc, map: &map); |
2360 | } |
2361 | |
2362 | static long fuse_dev_ioctl_backing_close(struct file *file, __u32 __user *argp) |
2363 | { |
2364 | struct fuse_dev *fud = fuse_get_dev(file); |
2365 | int backing_id; |
2366 | |
2367 | if (!fud) |
2368 | return -EPERM; |
2369 | |
2370 | if (!IS_ENABLED(CONFIG_FUSE_PASSTHROUGH)) |
2371 | return -EOPNOTSUPP; |
2372 | |
2373 | if (get_user(backing_id, argp)) |
2374 | return -EFAULT; |
2375 | |
2376 | return fuse_backing_close(fc: fud->fc, backing_id); |
2377 | } |
2378 | |
2379 | static long fuse_dev_ioctl(struct file *file, unsigned int cmd, |
2380 | unsigned long arg) |
2381 | { |
2382 | void __user *argp = (void __user *)arg; |
2383 | |
2384 | switch (cmd) { |
2385 | case FUSE_DEV_IOC_CLONE: |
2386 | return fuse_dev_ioctl_clone(file, argp); |
2387 | |
2388 | case FUSE_DEV_IOC_BACKING_OPEN: |
2389 | return fuse_dev_ioctl_backing_open(file, argp); |
2390 | |
2391 | case FUSE_DEV_IOC_BACKING_CLOSE: |
2392 | return fuse_dev_ioctl_backing_close(file, argp); |
2393 | |
2394 | default: |
2395 | return -ENOTTY; |
2396 | } |
2397 | } |
2398 | |
2399 | const struct file_operations fuse_dev_operations = { |
2400 | .owner = THIS_MODULE, |
2401 | .open = fuse_dev_open, |
2402 | .llseek = no_llseek, |
2403 | .read_iter = fuse_dev_read, |
2404 | .splice_read = fuse_dev_splice_read, |
2405 | .write_iter = fuse_dev_write, |
2406 | .splice_write = fuse_dev_splice_write, |
2407 | .poll = fuse_dev_poll, |
2408 | .release = fuse_dev_release, |
2409 | .fasync = fuse_dev_fasync, |
2410 | .unlocked_ioctl = fuse_dev_ioctl, |
2411 | .compat_ioctl = compat_ptr_ioctl, |
2412 | }; |
2413 | EXPORT_SYMBOL_GPL(fuse_dev_operations); |
2414 | |
2415 | static struct miscdevice fuse_miscdevice = { |
2416 | .minor = FUSE_MINOR, |
2417 | .name = "fuse" , |
2418 | .fops = &fuse_dev_operations, |
2419 | }; |
2420 | |
2421 | int __init fuse_dev_init(void) |
2422 | { |
2423 | int err = -ENOMEM; |
2424 | fuse_req_cachep = kmem_cache_create(name: "fuse_request" , |
2425 | size: sizeof(struct fuse_req), |
2426 | align: 0, flags: 0, NULL); |
2427 | if (!fuse_req_cachep) |
2428 | goto out; |
2429 | |
2430 | err = misc_register(misc: &fuse_miscdevice); |
2431 | if (err) |
2432 | goto out_cache_clean; |
2433 | |
2434 | return 0; |
2435 | |
2436 | out_cache_clean: |
2437 | kmem_cache_destroy(s: fuse_req_cachep); |
2438 | out: |
2439 | return err; |
2440 | } |
2441 | |
2442 | void fuse_dev_cleanup(void) |
2443 | { |
2444 | misc_deregister(misc: &fuse_miscdevice); |
2445 | kmem_cache_destroy(s: fuse_req_cachep); |
2446 | } |
2447 | |