1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/nfs/callback_proc.c
4 *
5 * Copyright (C) 2004 Trond Myklebust
6 *
7 * NFSv4 callback procedures
8 */
9 #include <linux/nfs4.h>
10 #include <linux/nfs_fs.h>
11 #include <linux/slab.h>
12 #include <linux/rcupdate.h>
13 #include "nfs4_fs.h"
14 #include "callback.h"
15 #include "delegation.h"
16 #include "internal.h"
17 #include "pnfs.h"
18 #include "nfs4session.h"
19 #include "nfs4trace.h"
20
21 #define NFSDBG_FACILITY NFSDBG_CALLBACK
22
nfs4_callback_getattr(void * argp,void * resp,struct cb_process_state * cps)23 __be32 nfs4_callback_getattr(void *argp, void *resp,
24 struct cb_process_state *cps)
25 {
26 struct cb_getattrargs *args = argp;
27 struct cb_getattrres *res = resp;
28 struct nfs_delegation *delegation;
29 struct nfs_inode *nfsi;
30 struct inode *inode;
31
32 res->status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
33 if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
34 goto out;
35
36 res->bitmap[0] = res->bitmap[1] = 0;
37 res->status = htonl(NFS4ERR_BADHANDLE);
38
39 dprintk_rcu("NFS: GETATTR callback request from %s\n",
40 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
41
42 inode = nfs_delegation_find_inode(cps->clp, &args->fh);
43 if (IS_ERR(inode)) {
44 if (inode == ERR_PTR(-EAGAIN))
45 res->status = htonl(NFS4ERR_DELAY);
46 trace_nfs4_cb_getattr(cps->clp, &args->fh, NULL,
47 -ntohl(res->status));
48 goto out;
49 }
50 nfsi = NFS_I(inode);
51 rcu_read_lock();
52 delegation = rcu_dereference(nfsi->delegation);
53 if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0)
54 goto out_iput;
55 res->size = i_size_read(inode);
56 res->change_attr = delegation->change_attr;
57 if (nfs_have_writebacks(inode))
58 res->change_attr++;
59 res->ctime = timespec64_to_timespec(inode->i_ctime);
60 res->mtime = timespec64_to_timespec(inode->i_mtime);
61 res->bitmap[0] = (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) &
62 args->bitmap[0];
63 res->bitmap[1] = (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) &
64 args->bitmap[1];
65 res->status = 0;
66 out_iput:
67 rcu_read_unlock();
68 trace_nfs4_cb_getattr(cps->clp, &args->fh, inode, -ntohl(res->status));
69 nfs_iput_and_deactive(inode);
70 out:
71 dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
72 return res->status;
73 }
74
nfs4_callback_recall(void * argp,void * resp,struct cb_process_state * cps)75 __be32 nfs4_callback_recall(void *argp, void *resp,
76 struct cb_process_state *cps)
77 {
78 struct cb_recallargs *args = argp;
79 struct inode *inode;
80 __be32 res;
81
82 res = htonl(NFS4ERR_OP_NOT_IN_SESSION);
83 if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
84 goto out;
85
86 dprintk_rcu("NFS: RECALL callback request from %s\n",
87 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
88
89 res = htonl(NFS4ERR_BADHANDLE);
90 inode = nfs_delegation_find_inode(cps->clp, &args->fh);
91 if (IS_ERR(inode)) {
92 if (inode == ERR_PTR(-EAGAIN))
93 res = htonl(NFS4ERR_DELAY);
94 trace_nfs4_cb_recall(cps->clp, &args->fh, NULL,
95 &args->stateid, -ntohl(res));
96 goto out;
97 }
98 /* Set up a helper thread to actually return the delegation */
99 switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
100 case 0:
101 res = 0;
102 break;
103 case -ENOENT:
104 res = htonl(NFS4ERR_BAD_STATEID);
105 break;
106 default:
107 res = htonl(NFS4ERR_RESOURCE);
108 }
109 trace_nfs4_cb_recall(cps->clp, &args->fh, inode,
110 &args->stateid, -ntohl(res));
111 nfs_iput_and_deactive(inode);
112 out:
113 dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
114 return res;
115 }
116
117 #if defined(CONFIG_NFS_V4_1)
118
119 /*
120 * Lookup a layout inode by stateid
121 *
122 * Note: returns a refcount on the inode and superblock
123 */
nfs_layout_find_inode_by_stateid(struct nfs_client * clp,const nfs4_stateid * stateid)124 static struct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp,
125 const nfs4_stateid *stateid)
126 {
127 struct nfs_server *server;
128 struct inode *inode;
129 struct pnfs_layout_hdr *lo;
130
131 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
132 list_for_each_entry(lo, &server->layouts, plh_layouts) {
133 if (!pnfs_layout_is_valid(lo))
134 continue;
135 if (stateid != NULL &&
136 !nfs4_stateid_match_other(stateid, &lo->plh_stateid))
137 continue;
138 inode = igrab(lo->plh_inode);
139 if (!inode)
140 return ERR_PTR(-EAGAIN);
141 if (!nfs_sb_active(inode->i_sb)) {
142 rcu_read_unlock();
143 spin_unlock(&clp->cl_lock);
144 iput(inode);
145 spin_lock(&clp->cl_lock);
146 rcu_read_lock();
147 return ERR_PTR(-EAGAIN);
148 }
149 return inode;
150 }
151 }
152
153 return ERR_PTR(-ENOENT);
154 }
155
156 /*
157 * Lookup a layout inode by filehandle.
158 *
159 * Note: returns a refcount on the inode and superblock
160 *
161 */
nfs_layout_find_inode_by_fh(struct nfs_client * clp,const struct nfs_fh * fh)162 static struct inode *nfs_layout_find_inode_by_fh(struct nfs_client *clp,
163 const struct nfs_fh *fh)
164 {
165 struct nfs_server *server;
166 struct nfs_inode *nfsi;
167 struct inode *inode;
168 struct pnfs_layout_hdr *lo;
169
170 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
171 list_for_each_entry(lo, &server->layouts, plh_layouts) {
172 nfsi = NFS_I(lo->plh_inode);
173 if (nfs_compare_fh(fh, &nfsi->fh))
174 continue;
175 if (nfsi->layout != lo)
176 continue;
177 inode = igrab(lo->plh_inode);
178 if (!inode)
179 return ERR_PTR(-EAGAIN);
180 if (!nfs_sb_active(inode->i_sb)) {
181 rcu_read_unlock();
182 spin_unlock(&clp->cl_lock);
183 iput(inode);
184 spin_lock(&clp->cl_lock);
185 rcu_read_lock();
186 return ERR_PTR(-EAGAIN);
187 }
188 return inode;
189 }
190 }
191
192 return ERR_PTR(-ENOENT);
193 }
194
nfs_layout_find_inode(struct nfs_client * clp,const struct nfs_fh * fh,const nfs4_stateid * stateid)195 static struct inode *nfs_layout_find_inode(struct nfs_client *clp,
196 const struct nfs_fh *fh,
197 const nfs4_stateid *stateid)
198 {
199 struct inode *inode;
200
201 spin_lock(&clp->cl_lock);
202 rcu_read_lock();
203 inode = nfs_layout_find_inode_by_stateid(clp, stateid);
204 if (inode == ERR_PTR(-ENOENT))
205 inode = nfs_layout_find_inode_by_fh(clp, fh);
206 rcu_read_unlock();
207 spin_unlock(&clp->cl_lock);
208
209 return inode;
210 }
211
212 /*
213 * Enforce RFC5661 section 12.5.5.2.1. (Layout Recall and Return Sequencing)
214 */
pnfs_check_callback_stateid(struct pnfs_layout_hdr * lo,const nfs4_stateid * new)215 static u32 pnfs_check_callback_stateid(struct pnfs_layout_hdr *lo,
216 const nfs4_stateid *new)
217 {
218 u32 oldseq, newseq;
219
220 /* Is the stateid not initialised? */
221 if (!pnfs_layout_is_valid(lo))
222 return NFS4ERR_NOMATCHING_LAYOUT;
223
224 /* Mismatched stateid? */
225 if (!nfs4_stateid_match_other(&lo->plh_stateid, new))
226 return NFS4ERR_BAD_STATEID;
227
228 newseq = be32_to_cpu(new->seqid);
229 /* Are we already in a layout recall situation? */
230 if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) &&
231 lo->plh_return_seq != 0) {
232 if (newseq < lo->plh_return_seq)
233 return NFS4ERR_OLD_STATEID;
234 if (newseq > lo->plh_return_seq)
235 return NFS4ERR_DELAY;
236 goto out;
237 }
238
239 /* Check that the stateid matches what we think it should be. */
240 oldseq = be32_to_cpu(lo->plh_stateid.seqid);
241 if (newseq > oldseq + 1)
242 return NFS4ERR_DELAY;
243 /* Crazy server! */
244 if (newseq <= oldseq)
245 return NFS4ERR_OLD_STATEID;
246 out:
247 return NFS_OK;
248 }
249
initiate_file_draining(struct nfs_client * clp,struct cb_layoutrecallargs * args)250 static u32 initiate_file_draining(struct nfs_client *clp,
251 struct cb_layoutrecallargs *args)
252 {
253 struct inode *ino;
254 struct pnfs_layout_hdr *lo;
255 u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
256 LIST_HEAD(free_me_list);
257
258 ino = nfs_layout_find_inode(clp, &args->cbl_fh, &args->cbl_stateid);
259 if (IS_ERR(ino)) {
260 if (ino == ERR_PTR(-EAGAIN))
261 rv = NFS4ERR_DELAY;
262 goto out_noput;
263 }
264
265 pnfs_layoutcommit_inode(ino, false);
266
267
268 spin_lock(&ino->i_lock);
269 lo = NFS_I(ino)->layout;
270 if (!lo) {
271 spin_unlock(&ino->i_lock);
272 goto out;
273 }
274 pnfs_get_layout_hdr(lo);
275 rv = pnfs_check_callback_stateid(lo, &args->cbl_stateid);
276 if (rv != NFS_OK)
277 goto unlock;
278
279 /*
280 * Enforce RFC5661 Section 12.5.5.2.1.5 (Bulk Recall and Return)
281 */
282 if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
283 rv = NFS4ERR_DELAY;
284 goto unlock;
285 }
286
287 pnfs_set_layout_stateid(lo, &args->cbl_stateid, true);
288 switch (pnfs_mark_matching_lsegs_return(lo, &free_me_list,
289 &args->cbl_range,
290 be32_to_cpu(args->cbl_stateid.seqid))) {
291 case 0:
292 case -EBUSY:
293 /* There are layout segments that need to be returned */
294 rv = NFS4_OK;
295 break;
296 case -ENOENT:
297 /* Embrace your forgetfulness! */
298 rv = NFS4ERR_NOMATCHING_LAYOUT;
299
300 if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
301 NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo,
302 &args->cbl_range);
303 }
304 }
305 unlock:
306 spin_unlock(&ino->i_lock);
307 pnfs_free_lseg_list(&free_me_list);
308 /* Free all lsegs that are attached to commit buckets */
309 nfs_commit_inode(ino, 0);
310 pnfs_put_layout_hdr(lo);
311 out:
312 nfs_iput_and_deactive(ino);
313 out_noput:
314 trace_nfs4_cb_layoutrecall_file(clp, &args->cbl_fh, ino,
315 &args->cbl_stateid, -rv);
316 return rv;
317 }
318
initiate_bulk_draining(struct nfs_client * clp,struct cb_layoutrecallargs * args)319 static u32 initiate_bulk_draining(struct nfs_client *clp,
320 struct cb_layoutrecallargs *args)
321 {
322 int stat;
323
324 if (args->cbl_recall_type == RETURN_FSID)
325 stat = pnfs_destroy_layouts_byfsid(clp, &args->cbl_fsid, true);
326 else
327 stat = pnfs_destroy_layouts_byclid(clp, true);
328 if (stat != 0)
329 return NFS4ERR_DELAY;
330 return NFS4ERR_NOMATCHING_LAYOUT;
331 }
332
do_callback_layoutrecall(struct nfs_client * clp,struct cb_layoutrecallargs * args)333 static u32 do_callback_layoutrecall(struct nfs_client *clp,
334 struct cb_layoutrecallargs *args)
335 {
336 if (args->cbl_recall_type == RETURN_FILE)
337 return initiate_file_draining(clp, args);
338 return initiate_bulk_draining(clp, args);
339 }
340
nfs4_callback_layoutrecall(void * argp,void * resp,struct cb_process_state * cps)341 __be32 nfs4_callback_layoutrecall(void *argp, void *resp,
342 struct cb_process_state *cps)
343 {
344 struct cb_layoutrecallargs *args = argp;
345 u32 res = NFS4ERR_OP_NOT_IN_SESSION;
346
347 if (cps->clp)
348 res = do_callback_layoutrecall(cps->clp, args);
349 return cpu_to_be32(res);
350 }
351
pnfs_recall_all_layouts(struct nfs_client * clp)352 static void pnfs_recall_all_layouts(struct nfs_client *clp)
353 {
354 struct cb_layoutrecallargs args;
355
356 /* Pretend we got a CB_LAYOUTRECALL(ALL) */
357 memset(&args, 0, sizeof(args));
358 args.cbl_recall_type = RETURN_ALL;
359 /* FIXME we ignore errors, what should we do? */
360 do_callback_layoutrecall(clp, &args);
361 }
362
nfs4_callback_devicenotify(void * argp,void * resp,struct cb_process_state * cps)363 __be32 nfs4_callback_devicenotify(void *argp, void *resp,
364 struct cb_process_state *cps)
365 {
366 struct cb_devicenotifyargs *args = argp;
367 const struct pnfs_layoutdriver_type *ld = NULL;
368 uint32_t i;
369 __be32 res = 0;
370
371 if (!cps->clp) {
372 res = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
373 goto out;
374 }
375
376 for (i = 0; i < args->ndevs; i++) {
377 struct cb_devicenotifyitem *dev = &args->devs[i];
378
379 if (!ld || ld->id != dev->cbd_layout_type) {
380 pnfs_put_layoutdriver(ld);
381 ld = pnfs_find_layoutdriver(dev->cbd_layout_type);
382 if (!ld)
383 continue;
384 }
385 nfs4_delete_deviceid(ld, cps->clp, &dev->cbd_dev_id);
386 }
387 pnfs_put_layoutdriver(ld);
388 out:
389 kfree(args->devs);
390 return res;
391 }
392
393 /*
394 * Validate the sequenceID sent by the server.
395 * Return success if the sequenceID is one more than what we last saw on
396 * this slot, accounting for wraparound. Increments the slot's sequence.
397 *
398 * We don't yet implement a duplicate request cache, instead we set the
399 * back channel ca_maxresponsesize_cached to zero. This is OK for now
400 * since we only currently implement idempotent callbacks anyway.
401 *
402 * We have a single slot backchannel at this time, so we don't bother
403 * checking the used_slots bit array on the table. The lower layer guarantees
404 * a single outstanding callback request at a time.
405 */
406 static __be32
validate_seqid(const struct nfs4_slot_table * tbl,const struct nfs4_slot * slot,const struct cb_sequenceargs * args)407 validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot,
408 const struct cb_sequenceargs * args)
409 {
410 __be32 ret;
411
412 ret = cpu_to_be32(NFS4ERR_BADSLOT);
413 if (args->csa_slotid > tbl->server_highest_slotid)
414 goto out_err;
415
416 /* Replay */
417 if (args->csa_sequenceid == slot->seq_nr) {
418 ret = cpu_to_be32(NFS4ERR_DELAY);
419 if (nfs4_test_locked_slot(tbl, slot->slot_nr))
420 goto out_err;
421
422 /* Signal process_op to set this error on next op */
423 ret = cpu_to_be32(NFS4ERR_RETRY_UNCACHED_REP);
424 if (args->csa_cachethis == 0)
425 goto out_err;
426
427 /* Liar! We never allowed you to set csa_cachethis != 0 */
428 ret = cpu_to_be32(NFS4ERR_SEQ_FALSE_RETRY);
429 goto out_err;
430 }
431
432 /* Note: wraparound relies on seq_nr being of type u32 */
433 /* Misordered request */
434 ret = cpu_to_be32(NFS4ERR_SEQ_MISORDERED);
435 if (args->csa_sequenceid != slot->seq_nr + 1)
436 goto out_err;
437
438 return cpu_to_be32(NFS4_OK);
439
440 out_err:
441 trace_nfs4_cb_seqid_err(args, ret);
442 return ret;
443 }
444
445 /*
446 * For each referring call triple, check the session's slot table for
447 * a match. If the slot is in use and the sequence numbers match, the
448 * client is still waiting for a response to the original request.
449 */
referring_call_exists(struct nfs_client * clp,uint32_t nrclists,struct referring_call_list * rclists,spinlock_t * lock)450 static int referring_call_exists(struct nfs_client *clp,
451 uint32_t nrclists,
452 struct referring_call_list *rclists,
453 spinlock_t *lock)
454 __releases(lock)
455 __acquires(lock)
456 {
457 int status = 0;
458 int i, j;
459 struct nfs4_session *session;
460 struct nfs4_slot_table *tbl;
461 struct referring_call_list *rclist;
462 struct referring_call *ref;
463
464 /*
465 * XXX When client trunking is implemented, this becomes
466 * a session lookup from within the loop
467 */
468 session = clp->cl_session;
469 tbl = &session->fc_slot_table;
470
471 for (i = 0; i < nrclists; i++) {
472 rclist = &rclists[i];
473 if (memcmp(session->sess_id.data,
474 rclist->rcl_sessionid.data,
475 NFS4_MAX_SESSIONID_LEN) != 0)
476 continue;
477
478 for (j = 0; j < rclist->rcl_nrefcalls; j++) {
479 ref = &rclist->rcl_refcalls[j];
480 spin_unlock(lock);
481 status = nfs4_slot_wait_on_seqid(tbl, ref->rc_slotid,
482 ref->rc_sequenceid, HZ >> 1) < 0;
483 spin_lock(lock);
484 if (status)
485 goto out;
486 }
487 }
488
489 out:
490 return status;
491 }
492
nfs4_callback_sequence(void * argp,void * resp,struct cb_process_state * cps)493 __be32 nfs4_callback_sequence(void *argp, void *resp,
494 struct cb_process_state *cps)
495 {
496 struct cb_sequenceargs *args = argp;
497 struct cb_sequenceres *res = resp;
498 struct nfs4_slot_table *tbl;
499 struct nfs4_slot *slot;
500 struct nfs_client *clp;
501 int i;
502 __be32 status = htonl(NFS4ERR_BADSESSION);
503
504 clp = nfs4_find_client_sessionid(cps->net, args->csa_addr,
505 &args->csa_sessionid, cps->minorversion);
506 if (clp == NULL)
507 goto out;
508
509 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
510 goto out;
511
512 tbl = &clp->cl_session->bc_slot_table;
513
514 /* Set up res before grabbing the spinlock */
515 memcpy(&res->csr_sessionid, &args->csa_sessionid,
516 sizeof(res->csr_sessionid));
517 res->csr_sequenceid = args->csa_sequenceid;
518 res->csr_slotid = args->csa_slotid;
519
520 spin_lock(&tbl->slot_tbl_lock);
521 /* state manager is resetting the session */
522 if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
523 status = htonl(NFS4ERR_DELAY);
524 /* Return NFS4ERR_BADSESSION if we're draining the session
525 * in order to reset it.
526 */
527 if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
528 status = htonl(NFS4ERR_BADSESSION);
529 goto out_unlock;
530 }
531
532 status = htonl(NFS4ERR_BADSLOT);
533 slot = nfs4_lookup_slot(tbl, args->csa_slotid);
534 if (IS_ERR(slot))
535 goto out_unlock;
536
537 res->csr_highestslotid = tbl->server_highest_slotid;
538 res->csr_target_highestslotid = tbl->target_highest_slotid;
539
540 status = validate_seqid(tbl, slot, args);
541 if (status)
542 goto out_unlock;
543 if (!nfs4_try_to_lock_slot(tbl, slot)) {
544 status = htonl(NFS4ERR_DELAY);
545 goto out_unlock;
546 }
547 cps->slot = slot;
548
549 /* The ca_maxresponsesize_cached is 0 with no DRC */
550 if (args->csa_cachethis != 0) {
551 status = htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);
552 goto out_unlock;
553 }
554
555 /*
556 * Check for pending referring calls. If a match is found, a
557 * related callback was received before the response to the original
558 * call.
559 */
560 if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists,
561 &tbl->slot_tbl_lock) < 0) {
562 status = htonl(NFS4ERR_DELAY);
563 goto out_unlock;
564 }
565
566 /*
567 * RFC5661 20.9.3
568 * If CB_SEQUENCE returns an error, then the state of the slot
569 * (sequence ID, cached reply) MUST NOT change.
570 */
571 slot->seq_nr = args->csa_sequenceid;
572 out_unlock:
573 spin_unlock(&tbl->slot_tbl_lock);
574
575 out:
576 cps->clp = clp; /* put in nfs4_callback_compound */
577 for (i = 0; i < args->csa_nrclists; i++)
578 kfree(args->csa_rclists[i].rcl_refcalls);
579 kfree(args->csa_rclists);
580
581 if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
582 cps->drc_status = status;
583 status = 0;
584 } else
585 res->csr_status = status;
586
587 trace_nfs4_cb_sequence(args, res, status);
588 return status;
589 }
590
591 static bool
validate_bitmap_values(unsigned int mask)592 validate_bitmap_values(unsigned int mask)
593 {
594 return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
595 }
596
nfs4_callback_recallany(void * argp,void * resp,struct cb_process_state * cps)597 __be32 nfs4_callback_recallany(void *argp, void *resp,
598 struct cb_process_state *cps)
599 {
600 struct cb_recallanyargs *args = argp;
601 __be32 status;
602 fmode_t flags = 0;
603
604 status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
605 if (!cps->clp) /* set in cb_sequence */
606 goto out;
607
608 dprintk_rcu("NFS: RECALL_ANY callback request from %s\n",
609 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
610
611 status = cpu_to_be32(NFS4ERR_INVAL);
612 if (!validate_bitmap_values(args->craa_type_mask))
613 goto out;
614
615 status = cpu_to_be32(NFS4_OK);
616 if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_RDATA_DLG))
617 flags = FMODE_READ;
618 if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_WDATA_DLG))
619 flags |= FMODE_WRITE;
620 if (flags)
621 nfs_expire_unused_delegation_types(cps->clp, flags);
622
623 if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_FILE_LAYOUT))
624 pnfs_recall_all_layouts(cps->clp);
625 out:
626 dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
627 return status;
628 }
629
630 /* Reduce the fore channel's max_slots to the target value */
nfs4_callback_recallslot(void * argp,void * resp,struct cb_process_state * cps)631 __be32 nfs4_callback_recallslot(void *argp, void *resp,
632 struct cb_process_state *cps)
633 {
634 struct cb_recallslotargs *args = argp;
635 struct nfs4_slot_table *fc_tbl;
636 __be32 status;
637
638 status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
639 if (!cps->clp) /* set in cb_sequence */
640 goto out;
641
642 dprintk_rcu("NFS: CB_RECALL_SLOT request from %s target highest slotid %u\n",
643 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR),
644 args->crsa_target_highest_slotid);
645
646 fc_tbl = &cps->clp->cl_session->fc_slot_table;
647
648 status = htonl(NFS4_OK);
649
650 nfs41_set_target_slotid(fc_tbl, args->crsa_target_highest_slotid);
651 nfs41_notify_server(cps->clp);
652 out:
653 dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
654 return status;
655 }
656
nfs4_callback_notify_lock(void * argp,void * resp,struct cb_process_state * cps)657 __be32 nfs4_callback_notify_lock(void *argp, void *resp,
658 struct cb_process_state *cps)
659 {
660 struct cb_notify_lock_args *args = argp;
661
662 if (!cps->clp) /* set in cb_sequence */
663 return htonl(NFS4ERR_OP_NOT_IN_SESSION);
664
665 dprintk_rcu("NFS: CB_NOTIFY_LOCK request from %s\n",
666 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
667
668 /* Don't wake anybody if the string looked bogus */
669 if (args->cbnl_valid)
670 __wake_up(&cps->clp->cl_lock_waitq, TASK_NORMAL, 0, args);
671
672 return htonl(NFS4_OK);
673 }
674 #endif /* CONFIG_NFS_V4_1 */
675 #ifdef CONFIG_NFS_V4_2
nfs4_copy_cb_args(struct nfs4_copy_state * cp_state,struct cb_offloadargs * args)676 static void nfs4_copy_cb_args(struct nfs4_copy_state *cp_state,
677 struct cb_offloadargs *args)
678 {
679 cp_state->count = args->wr_count;
680 cp_state->error = args->error;
681 if (!args->error) {
682 cp_state->verf.committed = args->wr_writeverf.committed;
683 memcpy(&cp_state->verf.verifier.data[0],
684 &args->wr_writeverf.verifier.data[0],
685 NFS4_VERIFIER_SIZE);
686 }
687 }
688
nfs4_callback_offload(void * data,void * dummy,struct cb_process_state * cps)689 __be32 nfs4_callback_offload(void *data, void *dummy,
690 struct cb_process_state *cps)
691 {
692 struct cb_offloadargs *args = data;
693 struct nfs_server *server;
694 struct nfs4_copy_state *copy, *tmp_copy;
695 bool found = false;
696
697 copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
698 if (!copy)
699 return htonl(NFS4ERR_SERVERFAULT);
700
701 spin_lock(&cps->clp->cl_lock);
702 rcu_read_lock();
703 list_for_each_entry_rcu(server, &cps->clp->cl_superblocks,
704 client_link) {
705 list_for_each_entry(tmp_copy, &server->ss_copies, copies) {
706 if (memcmp(args->coa_stateid.other,
707 tmp_copy->stateid.other,
708 sizeof(args->coa_stateid.other)))
709 continue;
710 nfs4_copy_cb_args(tmp_copy, args);
711 complete(&tmp_copy->completion);
712 found = true;
713 goto out;
714 }
715 }
716 out:
717 rcu_read_unlock();
718 if (!found) {
719 memcpy(©->stateid, &args->coa_stateid, NFS4_STATEID_SIZE);
720 nfs4_copy_cb_args(copy, args);
721 list_add_tail(©->copies, &cps->clp->pending_cb_stateids);
722 } else
723 kfree(copy);
724 spin_unlock(&cps->clp->cl_lock);
725
726 return 0;
727 }
728 #endif /* CONFIG_NFS_V4_2 */
729