• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/fs/nfs/callback_proc.c
4  *
5  * Copyright (C) 2004 Trond Myklebust
6  *
7  * NFSv4 callback procedures
8  */
9 #include <linux/nfs4.h>
10 #include <linux/nfs_fs.h>
11 #include <linux/slab.h>
12 #include <linux/rcupdate.h>
13 #include "nfs4_fs.h"
14 #include "callback.h"
15 #include "delegation.h"
16 #include "internal.h"
17 #include "pnfs.h"
18 #include "nfs4session.h"
19 #include "nfs4trace.h"
20 
21 #define NFSDBG_FACILITY NFSDBG_CALLBACK
22 
nfs4_callback_getattr(void * argp,void * resp,struct cb_process_state * cps)23 __be32 nfs4_callback_getattr(void *argp, void *resp,
24 			     struct cb_process_state *cps)
25 {
26 	struct cb_getattrargs *args = argp;
27 	struct cb_getattrres *res = resp;
28 	struct nfs_delegation *delegation;
29 	struct inode *inode;
30 
31 	res->status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
32 	if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
33 		goto out;
34 
35 	res->bitmap[0] = res->bitmap[1] = 0;
36 	res->status = htonl(NFS4ERR_BADHANDLE);
37 
38 	dprintk_rcu("NFS: GETATTR callback request from %s\n",
39 		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
40 
41 	inode = nfs_delegation_find_inode(cps->clp, &args->fh);
42 	if (IS_ERR(inode)) {
43 		if (inode == ERR_PTR(-EAGAIN))
44 			res->status = htonl(NFS4ERR_DELAY);
45 		trace_nfs4_cb_getattr(cps->clp, &args->fh, NULL,
46 				-ntohl(res->status));
47 		goto out;
48 	}
49 	rcu_read_lock();
50 	delegation = nfs4_get_valid_delegation(inode);
51 	if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0)
52 		goto out_iput;
53 	res->size = i_size_read(inode);
54 	res->change_attr = delegation->change_attr;
55 	if (nfs_have_writebacks(inode))
56 		res->change_attr++;
57 	res->ctime = inode->i_ctime;
58 	res->mtime = inode->i_mtime;
59 	res->bitmap[0] = (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) &
60 		args->bitmap[0];
61 	res->bitmap[1] = (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) &
62 		args->bitmap[1];
63 	res->status = 0;
64 out_iput:
65 	rcu_read_unlock();
66 	trace_nfs4_cb_getattr(cps->clp, &args->fh, inode, -ntohl(res->status));
67 	nfs_iput_and_deactive(inode);
68 out:
69 	dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
70 	return res->status;
71 }
72 
nfs4_callback_recall(void * argp,void * resp,struct cb_process_state * cps)73 __be32 nfs4_callback_recall(void *argp, void *resp,
74 			    struct cb_process_state *cps)
75 {
76 	struct cb_recallargs *args = argp;
77 	struct inode *inode;
78 	__be32 res;
79 
80 	res = htonl(NFS4ERR_OP_NOT_IN_SESSION);
81 	if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
82 		goto out;
83 
84 	dprintk_rcu("NFS: RECALL callback request from %s\n",
85 		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
86 
87 	res = htonl(NFS4ERR_BADHANDLE);
88 	inode = nfs_delegation_find_inode(cps->clp, &args->fh);
89 	if (IS_ERR(inode)) {
90 		if (inode == ERR_PTR(-EAGAIN))
91 			res = htonl(NFS4ERR_DELAY);
92 		trace_nfs4_cb_recall(cps->clp, &args->fh, NULL,
93 				&args->stateid, -ntohl(res));
94 		goto out;
95 	}
96 	/* Set up a helper thread to actually return the delegation */
97 	switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
98 	case 0:
99 		res = 0;
100 		break;
101 	case -ENOENT:
102 		res = htonl(NFS4ERR_BAD_STATEID);
103 		break;
104 	default:
105 		res = htonl(NFS4ERR_RESOURCE);
106 	}
107 	trace_nfs4_cb_recall(cps->clp, &args->fh, inode,
108 			&args->stateid, -ntohl(res));
109 	nfs_iput_and_deactive(inode);
110 out:
111 	dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
112 	return res;
113 }
114 
115 #if defined(CONFIG_NFS_V4_1)
116 
117 /*
118  * Lookup a layout inode by stateid
119  *
120  * Note: returns a refcount on the inode and superblock
121  */
nfs_layout_find_inode_by_stateid(struct nfs_client * clp,const nfs4_stateid * stateid)122 static struct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp,
123 		const nfs4_stateid *stateid)
124 	__must_hold(RCU)
125 {
126 	struct nfs_server *server;
127 	struct inode *inode;
128 	struct pnfs_layout_hdr *lo;
129 
130 	rcu_read_lock();
131 	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
132 		list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) {
133 			if (!pnfs_layout_is_valid(lo))
134 				continue;
135 			if (!nfs4_stateid_match_other(stateid, &lo->plh_stateid))
136 				continue;
137 			if (nfs_sb_active(server->super))
138 				inode = igrab(lo->plh_inode);
139 			else
140 				inode = ERR_PTR(-EAGAIN);
141 			rcu_read_unlock();
142 			if (inode)
143 				return inode;
144 			nfs_sb_deactive(server->super);
145 			return ERR_PTR(-EAGAIN);
146 		}
147 	}
148 	rcu_read_unlock();
149 	return ERR_PTR(-ENOENT);
150 }
151 
152 /*
153  * Lookup a layout inode by filehandle.
154  *
155  * Note: returns a refcount on the inode and superblock
156  *
157  */
nfs_layout_find_inode_by_fh(struct nfs_client * clp,const struct nfs_fh * fh)158 static struct inode *nfs_layout_find_inode_by_fh(struct nfs_client *clp,
159 		const struct nfs_fh *fh)
160 {
161 	struct nfs_server *server;
162 	struct nfs_inode *nfsi;
163 	struct inode *inode;
164 	struct pnfs_layout_hdr *lo;
165 
166 	rcu_read_lock();
167 	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
168 		list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) {
169 			nfsi = NFS_I(lo->plh_inode);
170 			if (nfs_compare_fh(fh, &nfsi->fh))
171 				continue;
172 			if (nfsi->layout != lo)
173 				continue;
174 			if (nfs_sb_active(server->super))
175 				inode = igrab(lo->plh_inode);
176 			else
177 				inode = ERR_PTR(-EAGAIN);
178 			rcu_read_unlock();
179 			if (inode)
180 				return inode;
181 			nfs_sb_deactive(server->super);
182 			return ERR_PTR(-EAGAIN);
183 		}
184 	}
185 	rcu_read_unlock();
186 	return ERR_PTR(-ENOENT);
187 }
188 
nfs_layout_find_inode(struct nfs_client * clp,const struct nfs_fh * fh,const nfs4_stateid * stateid)189 static struct inode *nfs_layout_find_inode(struct nfs_client *clp,
190 		const struct nfs_fh *fh,
191 		const nfs4_stateid *stateid)
192 {
193 	struct inode *inode;
194 
195 	inode = nfs_layout_find_inode_by_stateid(clp, stateid);
196 	if (inode == ERR_PTR(-ENOENT))
197 		inode = nfs_layout_find_inode_by_fh(clp, fh);
198 	return inode;
199 }
200 
201 /*
202  * Enforce RFC5661 section 12.5.5.2.1. (Layout Recall and Return Sequencing)
203  */
pnfs_check_callback_stateid(struct pnfs_layout_hdr * lo,const nfs4_stateid * new)204 static u32 pnfs_check_callback_stateid(struct pnfs_layout_hdr *lo,
205 					const nfs4_stateid *new)
206 {
207 	u32 oldseq, newseq;
208 
209 	/* Is the stateid not initialised? */
210 	if (!pnfs_layout_is_valid(lo))
211 		return NFS4ERR_NOMATCHING_LAYOUT;
212 
213 	/* Mismatched stateid? */
214 	if (!nfs4_stateid_match_other(&lo->plh_stateid, new))
215 		return NFS4ERR_BAD_STATEID;
216 
217 	newseq = be32_to_cpu(new->seqid);
218 	/* Are we already in a layout recall situation? */
219 	if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) &&
220 	    lo->plh_return_seq != 0) {
221 		if (newseq < lo->plh_return_seq)
222 			return NFS4ERR_OLD_STATEID;
223 		if (newseq > lo->plh_return_seq)
224 			return NFS4ERR_DELAY;
225 		goto out;
226 	}
227 
228 	/* Check that the stateid matches what we think it should be. */
229 	oldseq = be32_to_cpu(lo->plh_stateid.seqid);
230 	if (newseq > oldseq + 1)
231 		return NFS4ERR_DELAY;
232 	/* Crazy server! */
233 	if (newseq <= oldseq)
234 		return NFS4ERR_OLD_STATEID;
235 out:
236 	return NFS_OK;
237 }
238 
initiate_file_draining(struct nfs_client * clp,struct cb_layoutrecallargs * args)239 static u32 initiate_file_draining(struct nfs_client *clp,
240 				  struct cb_layoutrecallargs *args)
241 {
242 	struct inode *ino;
243 	struct pnfs_layout_hdr *lo;
244 	u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
245 	LIST_HEAD(free_me_list);
246 
247 	ino = nfs_layout_find_inode(clp, &args->cbl_fh, &args->cbl_stateid);
248 	if (IS_ERR(ino)) {
249 		if (ino == ERR_PTR(-EAGAIN))
250 			rv = NFS4ERR_DELAY;
251 		goto out_noput;
252 	}
253 
254 	pnfs_layoutcommit_inode(ino, false);
255 
256 
257 	spin_lock(&ino->i_lock);
258 	lo = NFS_I(ino)->layout;
259 	if (!lo) {
260 		spin_unlock(&ino->i_lock);
261 		goto out;
262 	}
263 	pnfs_get_layout_hdr(lo);
264 	rv = pnfs_check_callback_stateid(lo, &args->cbl_stateid);
265 	if (rv != NFS_OK)
266 		goto unlock;
267 
268 	/*
269 	 * Enforce RFC5661 Section 12.5.5.2.1.5 (Bulk Recall and Return)
270 	 */
271 	if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
272 		rv = NFS4ERR_DELAY;
273 		goto unlock;
274 	}
275 
276 	pnfs_set_layout_stateid(lo, &args->cbl_stateid, NULL, true);
277 	switch (pnfs_mark_matching_lsegs_return(lo, &free_me_list,
278 				&args->cbl_range,
279 				be32_to_cpu(args->cbl_stateid.seqid))) {
280 	case 0:
281 	case -EBUSY:
282 		/* There are layout segments that need to be returned */
283 		rv = NFS4_OK;
284 		break;
285 	case -ENOENT:
286 		/* Embrace your forgetfulness! */
287 		rv = NFS4ERR_NOMATCHING_LAYOUT;
288 
289 		if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
290 			NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo,
291 				&args->cbl_range);
292 		}
293 	}
294 unlock:
295 	spin_unlock(&ino->i_lock);
296 	pnfs_free_lseg_list(&free_me_list);
297 	/* Free all lsegs that are attached to commit buckets */
298 	nfs_commit_inode(ino, 0);
299 	pnfs_put_layout_hdr(lo);
300 out:
301 	nfs_iput_and_deactive(ino);
302 out_noput:
303 	trace_nfs4_cb_layoutrecall_file(clp, &args->cbl_fh, ino,
304 			&args->cbl_stateid, -rv);
305 	return rv;
306 }
307 
initiate_bulk_draining(struct nfs_client * clp,struct cb_layoutrecallargs * args)308 static u32 initiate_bulk_draining(struct nfs_client *clp,
309 				  struct cb_layoutrecallargs *args)
310 {
311 	int stat;
312 
313 	if (args->cbl_recall_type == RETURN_FSID)
314 		stat = pnfs_destroy_layouts_byfsid(clp, &args->cbl_fsid, true);
315 	else
316 		stat = pnfs_destroy_layouts_byclid(clp, true);
317 	if (stat != 0)
318 		return NFS4ERR_DELAY;
319 	return NFS4ERR_NOMATCHING_LAYOUT;
320 }
321 
do_callback_layoutrecall(struct nfs_client * clp,struct cb_layoutrecallargs * args)322 static u32 do_callback_layoutrecall(struct nfs_client *clp,
323 				    struct cb_layoutrecallargs *args)
324 {
325 	if (args->cbl_recall_type == RETURN_FILE)
326 		return initiate_file_draining(clp, args);
327 	return initiate_bulk_draining(clp, args);
328 }
329 
nfs4_callback_layoutrecall(void * argp,void * resp,struct cb_process_state * cps)330 __be32 nfs4_callback_layoutrecall(void *argp, void *resp,
331 				  struct cb_process_state *cps)
332 {
333 	struct cb_layoutrecallargs *args = argp;
334 	u32 res = NFS4ERR_OP_NOT_IN_SESSION;
335 
336 	if (cps->clp)
337 		res = do_callback_layoutrecall(cps->clp, args);
338 	return cpu_to_be32(res);
339 }
340 
pnfs_recall_all_layouts(struct nfs_client * clp)341 static void pnfs_recall_all_layouts(struct nfs_client *clp)
342 {
343 	struct cb_layoutrecallargs args;
344 
345 	/* Pretend we got a CB_LAYOUTRECALL(ALL) */
346 	memset(&args, 0, sizeof(args));
347 	args.cbl_recall_type = RETURN_ALL;
348 	/* FIXME we ignore errors, what should we do? */
349 	do_callback_layoutrecall(clp, &args);
350 }
351 
nfs4_callback_devicenotify(void * argp,void * resp,struct cb_process_state * cps)352 __be32 nfs4_callback_devicenotify(void *argp, void *resp,
353 				  struct cb_process_state *cps)
354 {
355 	struct cb_devicenotifyargs *args = argp;
356 	int i;
357 	__be32 res = 0;
358 	struct nfs_client *clp = cps->clp;
359 	struct nfs_server *server = NULL;
360 
361 	if (!clp) {
362 		res = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
363 		goto out;
364 	}
365 
366 	for (i = 0; i < args->ndevs; i++) {
367 		struct cb_devicenotifyitem *dev = &args->devs[i];
368 
369 		if (!server ||
370 		    server->pnfs_curr_ld->id != dev->cbd_layout_type) {
371 			rcu_read_lock();
372 			list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
373 				if (server->pnfs_curr_ld &&
374 				    server->pnfs_curr_ld->id == dev->cbd_layout_type) {
375 					rcu_read_unlock();
376 					goto found;
377 				}
378 			rcu_read_unlock();
379 			continue;
380 		}
381 
382 	found:
383 		nfs4_delete_deviceid(server->pnfs_curr_ld, clp, &dev->cbd_dev_id);
384 	}
385 
386 out:
387 	kfree(args->devs);
388 	return res;
389 }
390 
391 /*
392  * Validate the sequenceID sent by the server.
393  * Return success if the sequenceID is one more than what we last saw on
394  * this slot, accounting for wraparound.  Increments the slot's sequence.
395  *
396  * We don't yet implement a duplicate request cache, instead we set the
397  * back channel ca_maxresponsesize_cached to zero. This is OK for now
398  * since we only currently implement idempotent callbacks anyway.
399  *
400  * We have a single slot backchannel at this time, so we don't bother
401  * checking the used_slots bit array on the table.  The lower layer guarantees
402  * a single outstanding callback request at a time.
403  */
404 static __be32
validate_seqid(const struct nfs4_slot_table * tbl,const struct nfs4_slot * slot,const struct cb_sequenceargs * args)405 validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot,
406 		const struct cb_sequenceargs * args)
407 {
408 	__be32 ret;
409 
410 	ret = cpu_to_be32(NFS4ERR_BADSLOT);
411 	if (args->csa_slotid > tbl->server_highest_slotid)
412 		goto out_err;
413 
414 	/* Replay */
415 	if (args->csa_sequenceid == slot->seq_nr) {
416 		ret = cpu_to_be32(NFS4ERR_DELAY);
417 		if (nfs4_test_locked_slot(tbl, slot->slot_nr))
418 			goto out_err;
419 
420 		/* Signal process_op to set this error on next op */
421 		ret = cpu_to_be32(NFS4ERR_RETRY_UNCACHED_REP);
422 		if (args->csa_cachethis == 0)
423 			goto out_err;
424 
425 		/* Liar! We never allowed you to set csa_cachethis != 0 */
426 		ret = cpu_to_be32(NFS4ERR_SEQ_FALSE_RETRY);
427 		goto out_err;
428 	}
429 
430 	/* Note: wraparound relies on seq_nr being of type u32 */
431 	/* Misordered request */
432 	ret = cpu_to_be32(NFS4ERR_SEQ_MISORDERED);
433 	if (args->csa_sequenceid != slot->seq_nr + 1)
434 		goto out_err;
435 
436 	return cpu_to_be32(NFS4_OK);
437 
438 out_err:
439 	trace_nfs4_cb_seqid_err(args, ret);
440 	return ret;
441 }
442 
443 /*
444  * For each referring call triple, check the session's slot table for
445  * a match.  If the slot is in use and the sequence numbers match, the
446  * client is still waiting for a response to the original request.
447  */
referring_call_exists(struct nfs_client * clp,uint32_t nrclists,struct referring_call_list * rclists,spinlock_t * lock)448 static int referring_call_exists(struct nfs_client *clp,
449 				  uint32_t nrclists,
450 				  struct referring_call_list *rclists,
451 				  spinlock_t *lock)
452 	__releases(lock)
453 	__acquires(lock)
454 {
455 	int status = 0;
456 	int i, j;
457 	struct nfs4_session *session;
458 	struct nfs4_slot_table *tbl;
459 	struct referring_call_list *rclist;
460 	struct referring_call *ref;
461 
462 	/*
463 	 * XXX When client trunking is implemented, this becomes
464 	 * a session lookup from within the loop
465 	 */
466 	session = clp->cl_session;
467 	tbl = &session->fc_slot_table;
468 
469 	for (i = 0; i < nrclists; i++) {
470 		rclist = &rclists[i];
471 		if (memcmp(session->sess_id.data,
472 			   rclist->rcl_sessionid.data,
473 			   NFS4_MAX_SESSIONID_LEN) != 0)
474 			continue;
475 
476 		for (j = 0; j < rclist->rcl_nrefcalls; j++) {
477 			ref = &rclist->rcl_refcalls[j];
478 			spin_unlock(lock);
479 			status = nfs4_slot_wait_on_seqid(tbl, ref->rc_slotid,
480 					ref->rc_sequenceid, HZ >> 1) < 0;
481 			spin_lock(lock);
482 			if (status)
483 				goto out;
484 		}
485 	}
486 
487 out:
488 	return status;
489 }
490 
nfs4_callback_sequence(void * argp,void * resp,struct cb_process_state * cps)491 __be32 nfs4_callback_sequence(void *argp, void *resp,
492 			      struct cb_process_state *cps)
493 {
494 	struct cb_sequenceargs *args = argp;
495 	struct cb_sequenceres *res = resp;
496 	struct nfs4_slot_table *tbl;
497 	struct nfs4_slot *slot;
498 	struct nfs_client *clp;
499 	int i;
500 	__be32 status = htonl(NFS4ERR_BADSESSION);
501 
502 	clp = nfs4_find_client_sessionid(cps->net, args->csa_addr,
503 					 &args->csa_sessionid, cps->minorversion);
504 	if (clp == NULL)
505 		goto out;
506 
507 	if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
508 		goto out;
509 
510 	tbl = &clp->cl_session->bc_slot_table;
511 
512 	/* Set up res before grabbing the spinlock */
513 	memcpy(&res->csr_sessionid, &args->csa_sessionid,
514 	       sizeof(res->csr_sessionid));
515 	res->csr_sequenceid = args->csa_sequenceid;
516 	res->csr_slotid = args->csa_slotid;
517 
518 	spin_lock(&tbl->slot_tbl_lock);
519 	/* state manager is resetting the session */
520 	if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
521 		status = htonl(NFS4ERR_DELAY);
522 		/* Return NFS4ERR_BADSESSION if we're draining the session
523 		 * in order to reset it.
524 		 */
525 		if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
526 			status = htonl(NFS4ERR_BADSESSION);
527 		goto out_unlock;
528 	}
529 
530 	status = htonl(NFS4ERR_BADSLOT);
531 	slot = nfs4_lookup_slot(tbl, args->csa_slotid);
532 	if (IS_ERR(slot))
533 		goto out_unlock;
534 
535 	res->csr_highestslotid = tbl->server_highest_slotid;
536 	res->csr_target_highestslotid = tbl->target_highest_slotid;
537 
538 	status = validate_seqid(tbl, slot, args);
539 	if (status)
540 		goto out_unlock;
541 	if (!nfs4_try_to_lock_slot(tbl, slot)) {
542 		status = htonl(NFS4ERR_DELAY);
543 		goto out_unlock;
544 	}
545 	cps->slot = slot;
546 
547 	/* The ca_maxresponsesize_cached is 0 with no DRC */
548 	if (args->csa_cachethis != 0) {
549 		status = htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);
550 		goto out_unlock;
551 	}
552 
553 	/*
554 	 * Check for pending referring calls.  If a match is found, a
555 	 * related callback was received before the response to the original
556 	 * call.
557 	 */
558 	if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists,
559 				&tbl->slot_tbl_lock) < 0) {
560 		status = htonl(NFS4ERR_DELAY);
561 		goto out_unlock;
562 	}
563 
564 	/*
565 	 * RFC5661 20.9.3
566 	 * If CB_SEQUENCE returns an error, then the state of the slot
567 	 * (sequence ID, cached reply) MUST NOT change.
568 	 */
569 	slot->seq_nr = args->csa_sequenceid;
570 out_unlock:
571 	spin_unlock(&tbl->slot_tbl_lock);
572 
573 out:
574 	cps->clp = clp; /* put in nfs4_callback_compound */
575 	for (i = 0; i < args->csa_nrclists; i++)
576 		kfree(args->csa_rclists[i].rcl_refcalls);
577 	kfree(args->csa_rclists);
578 
579 	if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
580 		cps->drc_status = status;
581 		status = 0;
582 	} else
583 		res->csr_status = status;
584 
585 	trace_nfs4_cb_sequence(args, res, status);
586 	return status;
587 }
588 
589 static bool
validate_bitmap_values(unsigned int mask)590 validate_bitmap_values(unsigned int mask)
591 {
592 	return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
593 }
594 
nfs4_callback_recallany(void * argp,void * resp,struct cb_process_state * cps)595 __be32 nfs4_callback_recallany(void *argp, void *resp,
596 			       struct cb_process_state *cps)
597 {
598 	struct cb_recallanyargs *args = argp;
599 	__be32 status;
600 	fmode_t flags = 0;
601 	bool schedule_manager = false;
602 
603 	status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
604 	if (!cps->clp) /* set in cb_sequence */
605 		goto out;
606 
607 	dprintk_rcu("NFS: RECALL_ANY callback request from %s\n",
608 		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
609 
610 	status = cpu_to_be32(NFS4ERR_INVAL);
611 	if (!validate_bitmap_values(args->craa_type_mask))
612 		goto out;
613 
614 	status = cpu_to_be32(NFS4_OK);
615 	if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_RDATA_DLG))
616 		flags = FMODE_READ;
617 	if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_WDATA_DLG))
618 		flags |= FMODE_WRITE;
619 	if (flags)
620 		nfs_expire_unused_delegation_types(cps->clp, flags);
621 
622 	if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_FILE_LAYOUT))
623 		pnfs_recall_all_layouts(cps->clp);
624 
625 	if (args->craa_type_mask & BIT(PNFS_FF_RCA4_TYPE_MASK_READ)) {
626 		set_bit(NFS4CLNT_RECALL_ANY_LAYOUT_READ, &cps->clp->cl_state);
627 		schedule_manager = true;
628 	}
629 	if (args->craa_type_mask & BIT(PNFS_FF_RCA4_TYPE_MASK_RW)) {
630 		set_bit(NFS4CLNT_RECALL_ANY_LAYOUT_RW, &cps->clp->cl_state);
631 		schedule_manager = true;
632 	}
633 	if (schedule_manager)
634 		nfs4_schedule_state_manager(cps->clp);
635 
636 out:
637 	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
638 	return status;
639 }
640 
641 /* Reduce the fore channel's max_slots to the target value */
nfs4_callback_recallslot(void * argp,void * resp,struct cb_process_state * cps)642 __be32 nfs4_callback_recallslot(void *argp, void *resp,
643 				struct cb_process_state *cps)
644 {
645 	struct cb_recallslotargs *args = argp;
646 	struct nfs4_slot_table *fc_tbl;
647 	__be32 status;
648 
649 	status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
650 	if (!cps->clp) /* set in cb_sequence */
651 		goto out;
652 
653 	dprintk_rcu("NFS: CB_RECALL_SLOT request from %s target highest slotid %u\n",
654 		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR),
655 		args->crsa_target_highest_slotid);
656 
657 	fc_tbl = &cps->clp->cl_session->fc_slot_table;
658 
659 	status = htonl(NFS4_OK);
660 
661 	nfs41_set_target_slotid(fc_tbl, args->crsa_target_highest_slotid);
662 	nfs41_notify_server(cps->clp);
663 out:
664 	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
665 	return status;
666 }
667 
nfs4_callback_notify_lock(void * argp,void * resp,struct cb_process_state * cps)668 __be32 nfs4_callback_notify_lock(void *argp, void *resp,
669 				 struct cb_process_state *cps)
670 {
671 	struct cb_notify_lock_args *args = argp;
672 
673 	if (!cps->clp) /* set in cb_sequence */
674 		return htonl(NFS4ERR_OP_NOT_IN_SESSION);
675 
676 	dprintk_rcu("NFS: CB_NOTIFY_LOCK request from %s\n",
677 		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
678 
679 	/* Don't wake anybody if the string looked bogus */
680 	if (args->cbnl_valid)
681 		__wake_up(&cps->clp->cl_lock_waitq, TASK_NORMAL, 0, args);
682 
683 	return htonl(NFS4_OK);
684 }
685 #endif /* CONFIG_NFS_V4_1 */
686 #ifdef CONFIG_NFS_V4_2
nfs4_copy_cb_args(struct nfs4_copy_state * cp_state,struct cb_offloadargs * args)687 static void nfs4_copy_cb_args(struct nfs4_copy_state *cp_state,
688 				struct cb_offloadargs *args)
689 {
690 	cp_state->count = args->wr_count;
691 	cp_state->error = args->error;
692 	if (!args->error) {
693 		cp_state->verf.committed = args->wr_writeverf.committed;
694 		memcpy(&cp_state->verf.verifier.data[0],
695 			&args->wr_writeverf.verifier.data[0],
696 			NFS4_VERIFIER_SIZE);
697 	}
698 }
699 
nfs4_callback_offload(void * data,void * dummy,struct cb_process_state * cps)700 __be32 nfs4_callback_offload(void *data, void *dummy,
701 			     struct cb_process_state *cps)
702 {
703 	struct cb_offloadargs *args = data;
704 	struct nfs_server *server;
705 	struct nfs4_copy_state *copy, *tmp_copy;
706 	bool found = false;
707 
708 	copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
709 	if (!copy)
710 		return htonl(NFS4ERR_SERVERFAULT);
711 
712 	spin_lock(&cps->clp->cl_lock);
713 	rcu_read_lock();
714 	list_for_each_entry_rcu(server, &cps->clp->cl_superblocks,
715 				client_link) {
716 		list_for_each_entry(tmp_copy, &server->ss_copies, copies) {
717 			if (memcmp(args->coa_stateid.other,
718 					tmp_copy->stateid.other,
719 					sizeof(args->coa_stateid.other)))
720 				continue;
721 			nfs4_copy_cb_args(tmp_copy, args);
722 			complete(&tmp_copy->completion);
723 			found = true;
724 			goto out;
725 		}
726 	}
727 out:
728 	rcu_read_unlock();
729 	if (!found) {
730 		memcpy(&copy->stateid, &args->coa_stateid, NFS4_STATEID_SIZE);
731 		nfs4_copy_cb_args(copy, args);
732 		list_add_tail(&copy->copies, &cps->clp->pending_cb_stateids);
733 	} else
734 		kfree(copy);
735 	spin_unlock(&cps->clp->cl_lock);
736 
737 	return 0;
738 }
739 #endif /* CONFIG_NFS_V4_2 */
740