1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * linux/fs/nfs/callback_proc.c |
4 | * |
5 | * Copyright (C) 2004 Trond Myklebust |
6 | * |
7 | * NFSv4 callback procedures |
8 | */ |
9 | |
10 | #include <linux/errno.h> |
11 | #include <linux/math.h> |
12 | #include <linux/nfs4.h> |
13 | #include <linux/nfs_fs.h> |
14 | #include <linux/slab.h> |
15 | #include <linux/rcupdate.h> |
16 | #include <linux/types.h> |
17 | |
18 | #include "nfs4_fs.h" |
19 | #include "callback.h" |
20 | #include "delegation.h" |
21 | #include "internal.h" |
22 | #include "pnfs.h" |
23 | #include "nfs4session.h" |
24 | #include "nfs4trace.h" |
25 | |
26 | #define NFSDBG_FACILITY NFSDBG_CALLBACK |
27 | |
28 | __be32 nfs4_callback_getattr(void *argp, void *resp, |
29 | struct cb_process_state *cps) |
30 | { |
31 | struct cb_getattrargs *args = argp; |
32 | struct cb_getattrres *res = resp; |
33 | struct nfs_delegation *delegation; |
34 | struct inode *inode; |
35 | |
36 | res->status = htonl(NFS4ERR_OP_NOT_IN_SESSION); |
37 | if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */ |
38 | goto out; |
39 | |
40 | res->bitmap[0] = res->bitmap[1] = 0; |
41 | res->status = htonl(NFS4ERR_BADHANDLE); |
42 | |
43 | dprintk_rcu("NFS: GETATTR callback request from %s\n" , |
44 | rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR)); |
45 | |
46 | inode = nfs_delegation_find_inode(clp: cps->clp, fhandle: &args->fh); |
47 | if (IS_ERR(ptr: inode)) { |
48 | if (inode == ERR_PTR(error: -EAGAIN)) |
49 | res->status = htonl(NFS4ERR_DELAY); |
50 | trace_nfs4_cb_getattr(clp: cps->clp, fhandle: &args->fh, NULL, |
51 | error: -ntohl(res->status)); |
52 | goto out; |
53 | } |
54 | rcu_read_lock(); |
55 | delegation = nfs4_get_valid_delegation(inode); |
56 | if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0) |
57 | goto out_iput; |
58 | res->size = i_size_read(inode); |
59 | res->change_attr = delegation->change_attr; |
60 | if (nfs_have_writebacks(inode)) |
61 | res->change_attr++; |
62 | res->ctime = inode_get_ctime(inode); |
63 | res->mtime = inode_get_mtime(inode); |
64 | res->bitmap[0] = (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) & |
65 | args->bitmap[0]; |
66 | res->bitmap[1] = (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) & |
67 | args->bitmap[1]; |
68 | res->status = 0; |
69 | out_iput: |
70 | rcu_read_unlock(); |
71 | trace_nfs4_cb_getattr(clp: cps->clp, fhandle: &args->fh, inode, error: -ntohl(res->status)); |
72 | nfs_iput_and_deactive(inode); |
73 | out: |
74 | dprintk("%s: exit with status = %d\n" , __func__, ntohl(res->status)); |
75 | return res->status; |
76 | } |
77 | |
78 | __be32 nfs4_callback_recall(void *argp, void *resp, |
79 | struct cb_process_state *cps) |
80 | { |
81 | struct cb_recallargs *args = argp; |
82 | struct inode *inode; |
83 | __be32 res; |
84 | |
85 | res = htonl(NFS4ERR_OP_NOT_IN_SESSION); |
86 | if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */ |
87 | goto out; |
88 | |
89 | dprintk_rcu("NFS: RECALL callback request from %s\n" , |
90 | rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR)); |
91 | |
92 | res = htonl(NFS4ERR_BADHANDLE); |
93 | inode = nfs_delegation_find_inode(clp: cps->clp, fhandle: &args->fh); |
94 | if (IS_ERR(ptr: inode)) { |
95 | if (inode == ERR_PTR(error: -EAGAIN)) |
96 | res = htonl(NFS4ERR_DELAY); |
97 | trace_nfs4_cb_recall(clp: cps->clp, fhandle: &args->fh, NULL, |
98 | stateid: &args->stateid, error: -ntohl(res)); |
99 | goto out; |
100 | } |
101 | /* Set up a helper thread to actually return the delegation */ |
102 | switch (nfs_async_inode_return_delegation(inode, stateid: &args->stateid)) { |
103 | case 0: |
104 | res = 0; |
105 | break; |
106 | case -ENOENT: |
107 | res = htonl(NFS4ERR_BAD_STATEID); |
108 | break; |
109 | default: |
110 | res = htonl(NFS4ERR_RESOURCE); |
111 | } |
112 | trace_nfs4_cb_recall(clp: cps->clp, fhandle: &args->fh, inode, |
113 | stateid: &args->stateid, error: -ntohl(res)); |
114 | nfs_iput_and_deactive(inode); |
115 | out: |
116 | dprintk("%s: exit with status = %d\n" , __func__, ntohl(res)); |
117 | return res; |
118 | } |
119 | |
120 | #if defined(CONFIG_NFS_V4_1) |
121 | |
122 | /* |
123 | * Lookup a layout inode by stateid |
124 | * |
125 | * Note: returns a refcount on the inode and superblock |
126 | */ |
127 | static struct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp, |
128 | const nfs4_stateid *stateid) |
129 | __must_hold(RCU) |
130 | { |
131 | struct nfs_server *server; |
132 | struct inode *inode; |
133 | struct pnfs_layout_hdr *lo; |
134 | |
135 | rcu_read_lock(); |
136 | list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { |
137 | list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) { |
138 | if (!pnfs_layout_is_valid(lo)) |
139 | continue; |
140 | if (!nfs4_stateid_match_other(dst: stateid, src: &lo->plh_stateid)) |
141 | continue; |
142 | if (nfs_sb_active(sb: server->super)) |
143 | inode = igrab(lo->plh_inode); |
144 | else |
145 | inode = ERR_PTR(error: -EAGAIN); |
146 | rcu_read_unlock(); |
147 | if (inode) |
148 | return inode; |
149 | nfs_sb_deactive(sb: server->super); |
150 | return ERR_PTR(error: -EAGAIN); |
151 | } |
152 | } |
153 | rcu_read_unlock(); |
154 | return ERR_PTR(error: -ENOENT); |
155 | } |
156 | |
157 | /* |
158 | * Lookup a layout inode by filehandle. |
159 | * |
160 | * Note: returns a refcount on the inode and superblock |
161 | * |
162 | */ |
163 | static struct inode *nfs_layout_find_inode_by_fh(struct nfs_client *clp, |
164 | const struct nfs_fh *fh) |
165 | { |
166 | struct nfs_server *server; |
167 | struct nfs_inode *nfsi; |
168 | struct inode *inode; |
169 | struct pnfs_layout_hdr *lo; |
170 | |
171 | rcu_read_lock(); |
172 | list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { |
173 | list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) { |
174 | nfsi = NFS_I(inode: lo->plh_inode); |
175 | if (nfs_compare_fh(a: fh, b: &nfsi->fh)) |
176 | continue; |
177 | if (nfsi->layout != lo) |
178 | continue; |
179 | if (nfs_sb_active(sb: server->super)) |
180 | inode = igrab(lo->plh_inode); |
181 | else |
182 | inode = ERR_PTR(error: -EAGAIN); |
183 | rcu_read_unlock(); |
184 | if (inode) |
185 | return inode; |
186 | nfs_sb_deactive(sb: server->super); |
187 | return ERR_PTR(error: -EAGAIN); |
188 | } |
189 | } |
190 | rcu_read_unlock(); |
191 | return ERR_PTR(error: -ENOENT); |
192 | } |
193 | |
194 | static struct inode *nfs_layout_find_inode(struct nfs_client *clp, |
195 | const struct nfs_fh *fh, |
196 | const nfs4_stateid *stateid) |
197 | { |
198 | struct inode *inode; |
199 | |
200 | inode = nfs_layout_find_inode_by_stateid(clp, stateid); |
201 | if (inode == ERR_PTR(error: -ENOENT)) |
202 | inode = nfs_layout_find_inode_by_fh(clp, fh); |
203 | return inode; |
204 | } |
205 | |
206 | /* |
207 | * Enforce RFC5661 section 12.5.5.2.1. (Layout Recall and Return Sequencing) |
208 | */ |
209 | static u32 pnfs_check_callback_stateid(struct pnfs_layout_hdr *lo, |
210 | const nfs4_stateid *new, |
211 | struct cb_process_state *cps) |
212 | { |
213 | u32 oldseq, newseq; |
214 | |
215 | /* Is the stateid not initialised? */ |
216 | if (!pnfs_layout_is_valid(lo)) |
217 | return NFS4ERR_NOMATCHING_LAYOUT; |
218 | |
219 | /* Mismatched stateid? */ |
220 | if (!nfs4_stateid_match_other(dst: &lo->plh_stateid, src: new)) |
221 | return NFS4ERR_BAD_STATEID; |
222 | |
223 | newseq = be32_to_cpu(new->seqid); |
224 | /* Are we already in a layout recall situation? */ |
225 | if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) |
226 | return NFS4ERR_DELAY; |
227 | |
228 | /* |
229 | * Check that the stateid matches what we think it should be. |
230 | * Note that if the server sent us a list of referring calls, |
231 | * and we know that those have completed, then we trust the |
232 | * stateid argument is correct. |
233 | */ |
234 | oldseq = be32_to_cpu(lo->plh_stateid.seqid); |
235 | if (newseq > oldseq + 1 && !cps->referring_calls) |
236 | return NFS4ERR_DELAY; |
237 | |
238 | /* Crazy server! */ |
239 | if (newseq <= oldseq) |
240 | return NFS4ERR_OLD_STATEID; |
241 | |
242 | return NFS_OK; |
243 | } |
244 | |
245 | static u32 initiate_file_draining(struct nfs_client *clp, |
246 | struct cb_layoutrecallargs *args, |
247 | struct cb_process_state *cps) |
248 | { |
249 | struct inode *ino; |
250 | struct pnfs_layout_hdr *lo; |
251 | u32 rv = NFS4ERR_NOMATCHING_LAYOUT; |
252 | LIST_HEAD(free_me_list); |
253 | |
254 | ino = nfs_layout_find_inode(clp, fh: &args->cbl_fh, stateid: &args->cbl_stateid); |
255 | if (IS_ERR(ptr: ino)) { |
256 | if (ino == ERR_PTR(error: -EAGAIN)) |
257 | rv = NFS4ERR_DELAY; |
258 | goto out_noput; |
259 | } |
260 | |
261 | pnfs_layoutcommit_inode(inode: ino, sync: false); |
262 | |
263 | |
264 | spin_lock(lock: &ino->i_lock); |
265 | lo = NFS_I(inode: ino)->layout; |
266 | if (!lo) { |
267 | spin_unlock(lock: &ino->i_lock); |
268 | goto out; |
269 | } |
270 | pnfs_get_layout_hdr(lo); |
271 | rv = pnfs_check_callback_stateid(lo, new: &args->cbl_stateid, cps); |
272 | if (rv != NFS_OK) |
273 | goto unlock; |
274 | |
275 | /* |
276 | * Enforce RFC5661 Section 12.5.5.2.1.5 (Bulk Recall and Return) |
277 | */ |
278 | if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) { |
279 | rv = NFS4ERR_DELAY; |
280 | goto unlock; |
281 | } |
282 | |
283 | pnfs_set_layout_stateid(lo, new: &args->cbl_stateid, NULL, update_barrier: true); |
284 | switch (pnfs_mark_matching_lsegs_return(lo, tmp_list: &free_me_list, |
285 | recall_range: &args->cbl_range, |
286 | be32_to_cpu(args->cbl_stateid.seqid))) { |
287 | case 0: |
288 | case -EBUSY: |
289 | /* There are layout segments that need to be returned */ |
290 | rv = NFS4_OK; |
291 | break; |
292 | case -ENOENT: |
293 | set_bit(nr: NFS_LAYOUT_DRAIN, addr: &lo->plh_flags); |
294 | /* Embrace your forgetfulness! */ |
295 | rv = NFS4ERR_NOMATCHING_LAYOUT; |
296 | |
297 | if (NFS_SERVER(inode: ino)->pnfs_curr_ld->return_range) { |
298 | NFS_SERVER(inode: ino)->pnfs_curr_ld->return_range(lo, |
299 | &args->cbl_range); |
300 | } |
301 | } |
302 | unlock: |
303 | spin_unlock(lock: &ino->i_lock); |
304 | pnfs_free_lseg_list(tmp_list: &free_me_list); |
305 | /* Free all lsegs that are attached to commit buckets */ |
306 | nfs_commit_inode(ino, 0); |
307 | pnfs_put_layout_hdr(lo); |
308 | out: |
309 | nfs_iput_and_deactive(inode: ino); |
310 | out_noput: |
311 | trace_nfs4_cb_layoutrecall_file(clp, fhandle: &args->cbl_fh, inode: ino, |
312 | stateid: &args->cbl_stateid, error: -rv); |
313 | return rv; |
314 | } |
315 | |
316 | static u32 initiate_bulk_draining(struct nfs_client *clp, |
317 | struct cb_layoutrecallargs *args) |
318 | { |
319 | int stat; |
320 | |
321 | if (args->cbl_recall_type == RETURN_FSID) |
322 | stat = pnfs_destroy_layouts_byfsid(clp, fsid: &args->cbl_fsid, is_recall: true); |
323 | else |
324 | stat = pnfs_destroy_layouts_byclid(clp, is_recall: true); |
325 | if (stat != 0) |
326 | return NFS4ERR_DELAY; |
327 | return NFS4ERR_NOMATCHING_LAYOUT; |
328 | } |
329 | |
330 | static u32 do_callback_layoutrecall(struct nfs_client *clp, |
331 | struct cb_layoutrecallargs *args, |
332 | struct cb_process_state *cps) |
333 | { |
334 | if (args->cbl_recall_type == RETURN_FILE) |
335 | return initiate_file_draining(clp, args, cps); |
336 | return initiate_bulk_draining(clp, args); |
337 | } |
338 | |
339 | __be32 nfs4_callback_layoutrecall(void *argp, void *resp, |
340 | struct cb_process_state *cps) |
341 | { |
342 | struct cb_layoutrecallargs *args = argp; |
343 | u32 res = NFS4ERR_OP_NOT_IN_SESSION; |
344 | |
345 | if (cps->clp) |
346 | res = do_callback_layoutrecall(clp: cps->clp, args, cps); |
347 | return cpu_to_be32(res); |
348 | } |
349 | |
350 | static void pnfs_recall_all_layouts(struct nfs_client *clp, |
351 | struct cb_process_state *cps) |
352 | { |
353 | struct cb_layoutrecallargs args; |
354 | |
355 | /* Pretend we got a CB_LAYOUTRECALL(ALL) */ |
356 | memset(&args, 0, sizeof(args)); |
357 | args.cbl_recall_type = RETURN_ALL; |
358 | /* FIXME we ignore errors, what should we do? */ |
359 | do_callback_layoutrecall(clp, args: &args, cps); |
360 | } |
361 | |
362 | __be32 nfs4_callback_devicenotify(void *argp, void *resp, |
363 | struct cb_process_state *cps) |
364 | { |
365 | struct cb_devicenotifyargs *args = argp; |
366 | const struct pnfs_layoutdriver_type *ld = NULL; |
367 | uint32_t i; |
368 | __be32 res = 0; |
369 | |
370 | if (!cps->clp) { |
371 | res = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION); |
372 | goto out; |
373 | } |
374 | |
375 | for (i = 0; i < args->ndevs; i++) { |
376 | struct cb_devicenotifyitem *dev = &args->devs[i]; |
377 | |
378 | if (!ld || ld->id != dev->cbd_layout_type) { |
379 | pnfs_put_layoutdriver(ld); |
380 | ld = pnfs_find_layoutdriver(id: dev->cbd_layout_type); |
381 | if (!ld) |
382 | continue; |
383 | } |
384 | nfs4_delete_deviceid(ld, cps->clp, &dev->cbd_dev_id); |
385 | } |
386 | pnfs_put_layoutdriver(ld); |
387 | out: |
388 | kfree(objp: args->devs); |
389 | return res; |
390 | } |
391 | |
392 | /* |
393 | * Validate the sequenceID sent by the server. |
394 | * Return success if the sequenceID is one more than what we last saw on |
395 | * this slot, accounting for wraparound. Increments the slot's sequence. |
396 | * |
397 | * We don't yet implement a duplicate request cache, instead we set the |
398 | * back channel ca_maxresponsesize_cached to zero. This is OK for now |
399 | * since we only currently implement idempotent callbacks anyway. |
400 | * |
401 | * We have a single slot backchannel at this time, so we don't bother |
402 | * checking the used_slots bit array on the table. The lower layer guarantees |
403 | * a single outstanding callback request at a time. |
404 | */ |
405 | static __be32 |
406 | validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot, |
407 | const struct cb_sequenceargs * args) |
408 | { |
409 | __be32 ret; |
410 | |
411 | ret = cpu_to_be32(NFS4ERR_BADSLOT); |
412 | if (args->csa_slotid > tbl->server_highest_slotid) |
413 | goto out_err; |
414 | |
415 | /* Replay */ |
416 | if (args->csa_sequenceid == slot->seq_nr) { |
417 | ret = cpu_to_be32(NFS4ERR_DELAY); |
418 | if (nfs4_test_locked_slot(tbl, slotid: slot->slot_nr)) |
419 | goto out_err; |
420 | |
421 | /* Signal process_op to set this error on next op */ |
422 | ret = cpu_to_be32(NFS4ERR_RETRY_UNCACHED_REP); |
423 | if (args->csa_cachethis == 0) |
424 | goto out_err; |
425 | |
426 | /* Liar! We never allowed you to set csa_cachethis != 0 */ |
427 | ret = cpu_to_be32(NFS4ERR_SEQ_FALSE_RETRY); |
428 | goto out_err; |
429 | } |
430 | |
431 | /* Note: wraparound relies on seq_nr being of type u32 */ |
432 | /* Misordered request */ |
433 | ret = cpu_to_be32(NFS4ERR_SEQ_MISORDERED); |
434 | if (args->csa_sequenceid != slot->seq_nr + 1) |
435 | goto out_err; |
436 | |
437 | return cpu_to_be32(NFS4_OK); |
438 | |
439 | out_err: |
440 | trace_nfs4_cb_seqid_err(args, status: ret); |
441 | return ret; |
442 | } |
443 | |
444 | /* |
445 | * For each referring call triple, check the session's slot table for |
446 | * a match. If the slot is in use and the sequence numbers match, the |
447 | * client is still waiting for a response to the original request. |
448 | */ |
449 | static int referring_call_exists(struct nfs_client *clp, |
450 | uint32_t nrclists, |
451 | struct referring_call_list *rclists, |
452 | spinlock_t *lock) |
453 | __releases(lock) |
454 | __acquires(lock) |
455 | { |
456 | int status = 0; |
457 | int found = 0; |
458 | int i, j; |
459 | struct nfs4_session *session; |
460 | struct nfs4_slot_table *tbl; |
461 | struct referring_call_list *rclist; |
462 | struct referring_call *ref; |
463 | |
464 | /* |
465 | * XXX When client trunking is implemented, this becomes |
466 | * a session lookup from within the loop |
467 | */ |
468 | session = clp->cl_session; |
469 | tbl = &session->fc_slot_table; |
470 | |
471 | for (i = 0; i < nrclists; i++) { |
472 | rclist = &rclists[i]; |
473 | if (memcmp(p: session->sess_id.data, |
474 | q: rclist->rcl_sessionid.data, |
475 | NFS4_MAX_SESSIONID_LEN) != 0) |
476 | continue; |
477 | |
478 | for (j = 0; j < rclist->rcl_nrefcalls; j++) { |
479 | ref = &rclist->rcl_refcalls[j]; |
480 | spin_unlock(lock); |
481 | status = nfs4_slot_wait_on_seqid(tbl, slotid: ref->rc_slotid, |
482 | seq_nr: ref->rc_sequenceid, HZ >> 1) < 0; |
483 | spin_lock(lock); |
484 | if (status) |
485 | goto out; |
486 | found++; |
487 | } |
488 | } |
489 | |
490 | out: |
491 | return status < 0 ? status : found; |
492 | } |
493 | |
494 | __be32 nfs4_callback_sequence(void *argp, void *resp, |
495 | struct cb_process_state *cps) |
496 | { |
497 | struct cb_sequenceargs *args = argp; |
498 | struct cb_sequenceres *res = resp; |
499 | struct nfs4_slot_table *tbl; |
500 | struct nfs4_slot *slot; |
501 | struct nfs_client *clp; |
502 | int ret; |
503 | int i; |
504 | __be32 status = htonl(NFS4ERR_BADSESSION); |
505 | |
506 | clp = nfs4_find_client_sessionid(cps->net, args->csa_addr, |
507 | &args->csa_sessionid, cps->minorversion); |
508 | if (clp == NULL) |
509 | goto out; |
510 | |
511 | if (!(clp->cl_session->flags & SESSION4_BACK_CHAN)) |
512 | goto out; |
513 | |
514 | tbl = &clp->cl_session->bc_slot_table; |
515 | |
516 | /* Set up res before grabbing the spinlock */ |
517 | memcpy(&res->csr_sessionid, &args->csa_sessionid, |
518 | sizeof(res->csr_sessionid)); |
519 | res->csr_sequenceid = args->csa_sequenceid; |
520 | res->csr_slotid = args->csa_slotid; |
521 | |
522 | spin_lock(lock: &tbl->slot_tbl_lock); |
523 | /* state manager is resetting the session */ |
524 | if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) { |
525 | status = htonl(NFS4ERR_DELAY); |
526 | /* Return NFS4ERR_BADSESSION if we're draining the session |
527 | * in order to reset it. |
528 | */ |
529 | if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state)) |
530 | status = htonl(NFS4ERR_BADSESSION); |
531 | goto out_unlock; |
532 | } |
533 | |
534 | status = htonl(NFS4ERR_BADSLOT); |
535 | slot = nfs4_lookup_slot(tbl, slotid: args->csa_slotid); |
536 | if (IS_ERR(ptr: slot)) |
537 | goto out_unlock; |
538 | |
539 | res->csr_highestslotid = tbl->server_highest_slotid; |
540 | res->csr_target_highestslotid = tbl->target_highest_slotid; |
541 | |
542 | status = validate_seqid(tbl, slot, args); |
543 | if (status) |
544 | goto out_unlock; |
545 | if (!nfs4_try_to_lock_slot(tbl, slot)) { |
546 | status = htonl(NFS4ERR_DELAY); |
547 | goto out_unlock; |
548 | } |
549 | cps->slot = slot; |
550 | |
551 | /* The ca_maxresponsesize_cached is 0 with no DRC */ |
552 | if (args->csa_cachethis != 0) { |
553 | status = htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE); |
554 | goto out_unlock; |
555 | } |
556 | |
557 | /* |
558 | * Check for pending referring calls. If a match is found, a |
559 | * related callback was received before the response to the original |
560 | * call. |
561 | */ |
562 | ret = referring_call_exists(clp, nrclists: args->csa_nrclists, rclists: args->csa_rclists, |
563 | lock: &tbl->slot_tbl_lock); |
564 | if (ret < 0) { |
565 | status = htonl(NFS4ERR_DELAY); |
566 | goto out_unlock; |
567 | } |
568 | cps->referring_calls = ret; |
569 | |
570 | /* |
571 | * RFC5661 20.9.3 |
572 | * If CB_SEQUENCE returns an error, then the state of the slot |
573 | * (sequence ID, cached reply) MUST NOT change. |
574 | */ |
575 | slot->seq_nr = args->csa_sequenceid; |
576 | out_unlock: |
577 | spin_unlock(lock: &tbl->slot_tbl_lock); |
578 | |
579 | out: |
580 | cps->clp = clp; /* put in nfs4_callback_compound */ |
581 | for (i = 0; i < args->csa_nrclists; i++) |
582 | kfree(objp: args->csa_rclists[i].rcl_refcalls); |
583 | kfree(objp: args->csa_rclists); |
584 | |
585 | if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) { |
586 | cps->drc_status = status; |
587 | status = 0; |
588 | } else |
589 | res->csr_status = status; |
590 | |
591 | trace_nfs4_cb_sequence(args, res, status); |
592 | return status; |
593 | } |
594 | |
595 | static bool |
596 | validate_bitmap_values(unsigned int mask) |
597 | { |
598 | return (mask & ~RCA4_TYPE_MASK_ALL) == 0; |
599 | } |
600 | |
601 | __be32 nfs4_callback_recallany(void *argp, void *resp, |
602 | struct cb_process_state *cps) |
603 | { |
604 | struct cb_recallanyargs *args = argp; |
605 | __be32 status; |
606 | fmode_t flags = 0; |
607 | bool schedule_manager = false; |
608 | |
609 | status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION); |
610 | if (!cps->clp) /* set in cb_sequence */ |
611 | goto out; |
612 | |
613 | dprintk_rcu("NFS: RECALL_ANY callback request from %s\n" , |
614 | rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR)); |
615 | |
616 | status = cpu_to_be32(NFS4ERR_INVAL); |
617 | if (!validate_bitmap_values(mask: args->craa_type_mask)) |
618 | goto out; |
619 | |
620 | status = cpu_to_be32(NFS4_OK); |
621 | if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_RDATA_DLG)) |
622 | flags = FMODE_READ; |
623 | if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_WDATA_DLG)) |
624 | flags |= FMODE_WRITE; |
625 | if (flags) |
626 | nfs_expire_unused_delegation_types(clp: cps->clp, flags); |
627 | |
628 | if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_FILE_LAYOUT)) |
629 | pnfs_recall_all_layouts(clp: cps->clp, cps); |
630 | |
631 | if (args->craa_type_mask & BIT(PNFS_FF_RCA4_TYPE_MASK_READ)) { |
632 | set_bit(nr: NFS4CLNT_RECALL_ANY_LAYOUT_READ, addr: &cps->clp->cl_state); |
633 | schedule_manager = true; |
634 | } |
635 | if (args->craa_type_mask & BIT(PNFS_FF_RCA4_TYPE_MASK_RW)) { |
636 | set_bit(nr: NFS4CLNT_RECALL_ANY_LAYOUT_RW, addr: &cps->clp->cl_state); |
637 | schedule_manager = true; |
638 | } |
639 | if (schedule_manager) |
640 | nfs4_schedule_state_manager(cps->clp); |
641 | |
642 | out: |
643 | dprintk("%s: exit with status = %d\n" , __func__, ntohl(status)); |
644 | return status; |
645 | } |
646 | |
647 | /* Reduce the fore channel's max_slots to the target value */ |
648 | __be32 nfs4_callback_recallslot(void *argp, void *resp, |
649 | struct cb_process_state *cps) |
650 | { |
651 | struct cb_recallslotargs *args = argp; |
652 | struct nfs4_slot_table *fc_tbl; |
653 | __be32 status; |
654 | |
655 | status = htonl(NFS4ERR_OP_NOT_IN_SESSION); |
656 | if (!cps->clp) /* set in cb_sequence */ |
657 | goto out; |
658 | |
659 | dprintk_rcu("NFS: CB_RECALL_SLOT request from %s target highest slotid %u\n" , |
660 | rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR), |
661 | args->crsa_target_highest_slotid); |
662 | |
663 | fc_tbl = &cps->clp->cl_session->fc_slot_table; |
664 | |
665 | status = htonl(NFS4_OK); |
666 | |
667 | nfs41_set_target_slotid(tbl: fc_tbl, target_highest_slotid: args->crsa_target_highest_slotid); |
668 | nfs41_notify_server(cps->clp); |
669 | out: |
670 | dprintk("%s: exit with status = %d\n" , __func__, ntohl(status)); |
671 | return status; |
672 | } |
673 | |
674 | __be32 nfs4_callback_notify_lock(void *argp, void *resp, |
675 | struct cb_process_state *cps) |
676 | { |
677 | struct cb_notify_lock_args *args = argp; |
678 | |
679 | if (!cps->clp) /* set in cb_sequence */ |
680 | return htonl(NFS4ERR_OP_NOT_IN_SESSION); |
681 | |
682 | dprintk_rcu("NFS: CB_NOTIFY_LOCK request from %s\n" , |
683 | rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR)); |
684 | |
685 | /* Don't wake anybody if the string looked bogus */ |
686 | if (args->cbnl_valid) |
687 | __wake_up(wq_head: &cps->clp->cl_lock_waitq, TASK_NORMAL, nr: 0, key: args); |
688 | |
689 | return htonl(NFS4_OK); |
690 | } |
691 | #endif /* CONFIG_NFS_V4_1 */ |
692 | #ifdef CONFIG_NFS_V4_2 |
693 | static void nfs4_copy_cb_args(struct nfs4_copy_state *cp_state, |
694 | struct cb_offloadargs *args) |
695 | { |
696 | cp_state->count = args->wr_count; |
697 | cp_state->error = args->error; |
698 | if (!args->error) { |
699 | cp_state->verf.committed = args->wr_writeverf.committed; |
700 | memcpy(&cp_state->verf.verifier.data[0], |
701 | &args->wr_writeverf.verifier.data[0], |
702 | NFS4_VERIFIER_SIZE); |
703 | } |
704 | } |
705 | |
706 | __be32 nfs4_callback_offload(void *data, void *dummy, |
707 | struct cb_process_state *cps) |
708 | { |
709 | struct cb_offloadargs *args = data; |
710 | struct nfs_server *server; |
711 | struct nfs4_copy_state *copy, *tmp_copy; |
712 | bool found = false; |
713 | |
714 | copy = kzalloc(size: sizeof(struct nfs4_copy_state), GFP_KERNEL); |
715 | if (!copy) |
716 | return htonl(NFS4ERR_SERVERFAULT); |
717 | |
718 | spin_lock(lock: &cps->clp->cl_lock); |
719 | rcu_read_lock(); |
720 | list_for_each_entry_rcu(server, &cps->clp->cl_superblocks, |
721 | client_link) { |
722 | list_for_each_entry(tmp_copy, &server->ss_copies, copies) { |
723 | if (memcmp(p: args->coa_stateid.other, |
724 | q: tmp_copy->stateid.other, |
725 | size: sizeof(args->coa_stateid.other))) |
726 | continue; |
727 | nfs4_copy_cb_args(cp_state: tmp_copy, args); |
728 | complete(&tmp_copy->completion); |
729 | found = true; |
730 | goto out; |
731 | } |
732 | } |
733 | out: |
734 | rcu_read_unlock(); |
735 | if (!found) { |
736 | memcpy(©->stateid, &args->coa_stateid, NFS4_STATEID_SIZE); |
737 | nfs4_copy_cb_args(cp_state: copy, args); |
738 | list_add_tail(new: ©->copies, head: &cps->clp->pending_cb_stateids); |
739 | } else |
740 | kfree(objp: copy); |
741 | spin_unlock(lock: &cps->clp->cl_lock); |
742 | |
743 | trace_nfs4_cb_offload(cb_fh: &args->coa_fh, cb_stateid: &args->coa_stateid, |
744 | cb_count: args->wr_count, cb_error: args->error, |
745 | cb_how_stable: args->wr_writeverf.committed); |
746 | return 0; |
747 | } |
748 | #endif /* CONFIG_NFS_V4_2 */ |
749 | |