1 /*
2 * fs/nfs/nfs4proc.c
3 *
4 * Client-side procedure declarations for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 * Andy Adamson <andros@umich.edu>
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include <linux/mm.h>
39 #include <linux/delay.h>
40 #include <linux/errno.h>
41 #include <linux/string.h>
42 #include <linux/ratelimit.h>
43 #include <linux/printk.h>
44 #include <linux/slab.h>
45 #include <linux/sunrpc/clnt.h>
46 #include <linux/nfs.h>
47 #include <linux/nfs4.h>
48 #include <linux/nfs_fs.h>
49 #include <linux/nfs_page.h>
50 #include <linux/nfs_mount.h>
51 #include <linux/namei.h>
52 #include <linux/mount.h>
53 #include <linux/module.h>
54 #include <linux/xattr.h>
55 #include <linux/utsname.h>
56 #include <linux/freezer.h>
57 #include <linux/iversion.h>
58
59 #include "nfs4_fs.h"
60 #include "delegation.h"
61 #include "internal.h"
62 #include "iostat.h"
63 #include "callback.h"
64 #include "pnfs.h"
65 #include "netns.h"
66 #include "sysfs.h"
67 #include "nfs4idmap.h"
68 #include "nfs4session.h"
69 #include "fscache.h"
70 #include "nfs42.h"
71
72 #include "nfs4trace.h"
73
74 #define NFSDBG_FACILITY NFSDBG_PROC
75
76 #define NFS4_BITMASK_SZ 3
77
78 #define NFS4_POLL_RETRY_MIN (HZ/10)
79 #define NFS4_POLL_RETRY_MAX (15*HZ)
80
81 /* file attributes which can be mapped to nfs attributes */
82 #define NFS4_VALID_ATTRS (ATTR_MODE \
83 | ATTR_UID \
84 | ATTR_GID \
85 | ATTR_SIZE \
86 | ATTR_ATIME \
87 | ATTR_MTIME \
88 | ATTR_CTIME \
89 | ATTR_ATIME_SET \
90 | ATTR_MTIME_SET)
91
92 struct nfs4_opendata;
93 static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
94 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
95 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
96 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr, struct nfs4_label *label, struct inode *inode);
97 static int nfs4_do_setattr(struct inode *inode, const struct cred *cred,
98 struct nfs_fattr *fattr, struct iattr *sattr,
99 struct nfs_open_context *ctx, struct nfs4_label *ilabel,
100 struct nfs4_label *olabel);
101 #ifdef CONFIG_NFS_V4_1
102 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
103 const struct cred *cred,
104 struct nfs4_slot *slot,
105 bool is_privileged);
106 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *,
107 const struct cred *);
108 static int nfs41_free_stateid(struct nfs_server *, const nfs4_stateid *,
109 const struct cred *, bool);
110 #endif
111 static void nfs4_bitmask_set(__u32 bitmask[NFS4_BITMASK_SZ],
112 const __u32 *src, struct inode *inode,
113 struct nfs_server *server,
114 struct nfs4_label *label);
115
116 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
117 static inline struct nfs4_label *
nfs4_label_init_security(struct inode * dir,struct dentry * dentry,struct iattr * sattr,struct nfs4_label * label)118 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
119 struct iattr *sattr, struct nfs4_label *label)
120 {
121 int err;
122
123 if (label == NULL)
124 return NULL;
125
126 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0)
127 return NULL;
128
129 label->lfs = 0;
130 label->pi = 0;
131 label->len = 0;
132 label->label = NULL;
133
134 err = security_dentry_init_security(dentry, sattr->ia_mode,
135 &dentry->d_name, (void **)&label->label, &label->len);
136 if (err == 0)
137 return label;
138
139 return NULL;
140 }
141 static inline void
nfs4_label_release_security(struct nfs4_label * label)142 nfs4_label_release_security(struct nfs4_label *label)
143 {
144 if (label)
145 security_release_secctx(label->label, label->len);
146 }
nfs4_bitmask(struct nfs_server * server,struct nfs4_label * label)147 static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
148 {
149 if (label)
150 return server->attr_bitmask;
151
152 return server->attr_bitmask_nl;
153 }
154 #else
155 static inline struct nfs4_label *
nfs4_label_init_security(struct inode * dir,struct dentry * dentry,struct iattr * sattr,struct nfs4_label * l)156 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
157 struct iattr *sattr, struct nfs4_label *l)
158 { return NULL; }
159 static inline void
nfs4_label_release_security(struct nfs4_label * label)160 nfs4_label_release_security(struct nfs4_label *label)
161 { return; }
162 static inline u32 *
nfs4_bitmask(struct nfs_server * server,struct nfs4_label * label)163 nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
164 { return server->attr_bitmask; }
165 #endif
166
167 /* Prevent leaks of NFSv4 errors into userland */
nfs4_map_errors(int err)168 static int nfs4_map_errors(int err)
169 {
170 if (err >= -1000)
171 return err;
172 switch (err) {
173 case -NFS4ERR_RESOURCE:
174 case -NFS4ERR_LAYOUTTRYLATER:
175 case -NFS4ERR_RECALLCONFLICT:
176 case -NFS4ERR_RETURNCONFLICT:
177 return -EREMOTEIO;
178 case -NFS4ERR_WRONGSEC:
179 case -NFS4ERR_WRONG_CRED:
180 return -EPERM;
181 case -NFS4ERR_BADOWNER:
182 case -NFS4ERR_BADNAME:
183 return -EINVAL;
184 case -NFS4ERR_SHARE_DENIED:
185 return -EACCES;
186 case -NFS4ERR_MINOR_VERS_MISMATCH:
187 return -EPROTONOSUPPORT;
188 case -NFS4ERR_FILE_OPEN:
189 return -EBUSY;
190 case -NFS4ERR_NOT_SAME:
191 return -ENOTSYNC;
192 default:
193 dprintk("%s could not handle NFSv4 error %d\n",
194 __func__, -err);
195 break;
196 }
197 return -EIO;
198 }
199
200 /*
201 * This is our standard bitmap for GETATTR requests.
202 */
203 const u32 nfs4_fattr_bitmap[3] = {
204 FATTR4_WORD0_TYPE
205 | FATTR4_WORD0_CHANGE
206 | FATTR4_WORD0_SIZE
207 | FATTR4_WORD0_FSID
208 | FATTR4_WORD0_FILEID,
209 FATTR4_WORD1_MODE
210 | FATTR4_WORD1_NUMLINKS
211 | FATTR4_WORD1_OWNER
212 | FATTR4_WORD1_OWNER_GROUP
213 | FATTR4_WORD1_RAWDEV
214 | FATTR4_WORD1_SPACE_USED
215 | FATTR4_WORD1_TIME_ACCESS
216 | FATTR4_WORD1_TIME_METADATA
217 | FATTR4_WORD1_TIME_MODIFY
218 | FATTR4_WORD1_MOUNTED_ON_FILEID,
219 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
220 FATTR4_WORD2_SECURITY_LABEL
221 #endif
222 };
223
224 static const u32 nfs4_pnfs_open_bitmap[3] = {
225 FATTR4_WORD0_TYPE
226 | FATTR4_WORD0_CHANGE
227 | FATTR4_WORD0_SIZE
228 | FATTR4_WORD0_FSID
229 | FATTR4_WORD0_FILEID,
230 FATTR4_WORD1_MODE
231 | FATTR4_WORD1_NUMLINKS
232 | FATTR4_WORD1_OWNER
233 | FATTR4_WORD1_OWNER_GROUP
234 | FATTR4_WORD1_RAWDEV
235 | FATTR4_WORD1_SPACE_USED
236 | FATTR4_WORD1_TIME_ACCESS
237 | FATTR4_WORD1_TIME_METADATA
238 | FATTR4_WORD1_TIME_MODIFY,
239 FATTR4_WORD2_MDSTHRESHOLD
240 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
241 | FATTR4_WORD2_SECURITY_LABEL
242 #endif
243 };
244
245 static const u32 nfs4_open_noattr_bitmap[3] = {
246 FATTR4_WORD0_TYPE
247 | FATTR4_WORD0_FILEID,
248 };
249
250 const u32 nfs4_statfs_bitmap[3] = {
251 FATTR4_WORD0_FILES_AVAIL
252 | FATTR4_WORD0_FILES_FREE
253 | FATTR4_WORD0_FILES_TOTAL,
254 FATTR4_WORD1_SPACE_AVAIL
255 | FATTR4_WORD1_SPACE_FREE
256 | FATTR4_WORD1_SPACE_TOTAL
257 };
258
259 const u32 nfs4_pathconf_bitmap[3] = {
260 FATTR4_WORD0_MAXLINK
261 | FATTR4_WORD0_MAXNAME,
262 0
263 };
264
265 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
266 | FATTR4_WORD0_MAXREAD
267 | FATTR4_WORD0_MAXWRITE
268 | FATTR4_WORD0_LEASE_TIME,
269 FATTR4_WORD1_TIME_DELTA
270 | FATTR4_WORD1_FS_LAYOUT_TYPES,
271 FATTR4_WORD2_LAYOUT_BLKSIZE
272 | FATTR4_WORD2_CLONE_BLKSIZE
273 | FATTR4_WORD2_CHANGE_ATTR_TYPE
274 | FATTR4_WORD2_XATTR_SUPPORT
275 };
276
277 const u32 nfs4_fs_locations_bitmap[3] = {
278 FATTR4_WORD0_CHANGE
279 | FATTR4_WORD0_SIZE
280 | FATTR4_WORD0_FSID
281 | FATTR4_WORD0_FILEID
282 | FATTR4_WORD0_FS_LOCATIONS,
283 FATTR4_WORD1_OWNER
284 | FATTR4_WORD1_OWNER_GROUP
285 | FATTR4_WORD1_RAWDEV
286 | FATTR4_WORD1_SPACE_USED
287 | FATTR4_WORD1_TIME_ACCESS
288 | FATTR4_WORD1_TIME_METADATA
289 | FATTR4_WORD1_TIME_MODIFY
290 | FATTR4_WORD1_MOUNTED_ON_FILEID,
291 };
292
nfs4_bitmap_copy_adjust(__u32 * dst,const __u32 * src,struct inode * inode,unsigned long flags)293 static void nfs4_bitmap_copy_adjust(__u32 *dst, const __u32 *src,
294 struct inode *inode, unsigned long flags)
295 {
296 unsigned long cache_validity;
297
298 memcpy(dst, src, NFS4_BITMASK_SZ*sizeof(*dst));
299 if (!inode || !nfs4_have_delegation(inode, FMODE_READ))
300 return;
301
302 cache_validity = READ_ONCE(NFS_I(inode)->cache_validity) | flags;
303
304 /* Remove the attributes over which we have full control */
305 dst[1] &= ~FATTR4_WORD1_RAWDEV;
306 if (!(cache_validity & NFS_INO_INVALID_SIZE))
307 dst[0] &= ~FATTR4_WORD0_SIZE;
308
309 if (!(cache_validity & NFS_INO_INVALID_CHANGE))
310 dst[0] &= ~FATTR4_WORD0_CHANGE;
311
312 if (!(cache_validity & NFS_INO_INVALID_MODE))
313 dst[1] &= ~FATTR4_WORD1_MODE;
314 if (!(cache_validity & NFS_INO_INVALID_OTHER))
315 dst[1] &= ~(FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP);
316 }
317
nfs4_setup_readdir(u64 cookie,__be32 * verifier,struct dentry * dentry,struct nfs4_readdir_arg * readdir)318 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
319 struct nfs4_readdir_arg *readdir)
320 {
321 unsigned int attrs = FATTR4_WORD0_FILEID | FATTR4_WORD0_TYPE;
322 __be32 *start, *p;
323
324 if (cookie > 2) {
325 readdir->cookie = cookie;
326 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
327 return;
328 }
329
330 readdir->cookie = 0;
331 memset(&readdir->verifier, 0, sizeof(readdir->verifier));
332 if (cookie == 2)
333 return;
334
335 /*
336 * NFSv4 servers do not return entries for '.' and '..'
337 * Therefore, we fake these entries here. We let '.'
338 * have cookie 0 and '..' have cookie 1. Note that
339 * when talking to the server, we always send cookie 0
340 * instead of 1 or 2.
341 */
342 start = p = kmap_atomic(*readdir->pages);
343
344 if (cookie == 0) {
345 *p++ = xdr_one; /* next */
346 *p++ = xdr_zero; /* cookie, first word */
347 *p++ = xdr_one; /* cookie, second word */
348 *p++ = xdr_one; /* entry len */
349 memcpy(p, ".\0\0\0", 4); /* entry */
350 p++;
351 *p++ = xdr_one; /* bitmap length */
352 *p++ = htonl(attrs); /* bitmap */
353 *p++ = htonl(12); /* attribute buffer length */
354 *p++ = htonl(NF4DIR);
355 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry)));
356 }
357
358 *p++ = xdr_one; /* next */
359 *p++ = xdr_zero; /* cookie, first word */
360 *p++ = xdr_two; /* cookie, second word */
361 *p++ = xdr_two; /* entry len */
362 memcpy(p, "..\0\0", 4); /* entry */
363 p++;
364 *p++ = xdr_one; /* bitmap length */
365 *p++ = htonl(attrs); /* bitmap */
366 *p++ = htonl(12); /* attribute buffer length */
367 *p++ = htonl(NF4DIR);
368 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent)));
369
370 readdir->pgbase = (char *)p - (char *)start;
371 readdir->count -= readdir->pgbase;
372 kunmap_atomic(start);
373 }
374
nfs4_fattr_set_prechange(struct nfs_fattr * fattr,u64 version)375 static void nfs4_fattr_set_prechange(struct nfs_fattr *fattr, u64 version)
376 {
377 if (!(fattr->valid & NFS_ATTR_FATTR_PRECHANGE)) {
378 fattr->pre_change_attr = version;
379 fattr->valid |= NFS_ATTR_FATTR_PRECHANGE;
380 }
381 }
382
nfs4_test_and_free_stateid(struct nfs_server * server,nfs4_stateid * stateid,const struct cred * cred)383 static void nfs4_test_and_free_stateid(struct nfs_server *server,
384 nfs4_stateid *stateid,
385 const struct cred *cred)
386 {
387 const struct nfs4_minor_version_ops *ops = server->nfs_client->cl_mvops;
388
389 ops->test_and_free_expired(server, stateid, cred);
390 }
391
__nfs4_free_revoked_stateid(struct nfs_server * server,nfs4_stateid * stateid,const struct cred * cred)392 static void __nfs4_free_revoked_stateid(struct nfs_server *server,
393 nfs4_stateid *stateid,
394 const struct cred *cred)
395 {
396 stateid->type = NFS4_REVOKED_STATEID_TYPE;
397 nfs4_test_and_free_stateid(server, stateid, cred);
398 }
399
nfs4_free_revoked_stateid(struct nfs_server * server,const nfs4_stateid * stateid,const struct cred * cred)400 static void nfs4_free_revoked_stateid(struct nfs_server *server,
401 const nfs4_stateid *stateid,
402 const struct cred *cred)
403 {
404 nfs4_stateid tmp;
405
406 nfs4_stateid_copy(&tmp, stateid);
407 __nfs4_free_revoked_stateid(server, &tmp, cred);
408 }
409
nfs4_update_delay(long * timeout)410 static long nfs4_update_delay(long *timeout)
411 {
412 long ret;
413 if (!timeout)
414 return NFS4_POLL_RETRY_MAX;
415 if (*timeout <= 0)
416 *timeout = NFS4_POLL_RETRY_MIN;
417 if (*timeout > NFS4_POLL_RETRY_MAX)
418 *timeout = NFS4_POLL_RETRY_MAX;
419 ret = *timeout;
420 *timeout <<= 1;
421 return ret;
422 }
423
nfs4_delay_killable(long * timeout)424 static int nfs4_delay_killable(long *timeout)
425 {
426 might_sleep();
427
428 freezable_schedule_timeout_killable_unsafe(
429 nfs4_update_delay(timeout));
430 if (!__fatal_signal_pending(current))
431 return 0;
432 return -EINTR;
433 }
434
nfs4_delay_interruptible(long * timeout)435 static int nfs4_delay_interruptible(long *timeout)
436 {
437 might_sleep();
438
439 freezable_schedule_timeout_interruptible_unsafe(nfs4_update_delay(timeout));
440 if (!signal_pending(current))
441 return 0;
442 return __fatal_signal_pending(current) ? -EINTR :-ERESTARTSYS;
443 }
444
nfs4_delay(long * timeout,bool interruptible)445 static int nfs4_delay(long *timeout, bool interruptible)
446 {
447 if (interruptible)
448 return nfs4_delay_interruptible(timeout);
449 return nfs4_delay_killable(timeout);
450 }
451
452 static const nfs4_stateid *
nfs4_recoverable_stateid(const nfs4_stateid * stateid)453 nfs4_recoverable_stateid(const nfs4_stateid *stateid)
454 {
455 if (!stateid)
456 return NULL;
457 switch (stateid->type) {
458 case NFS4_OPEN_STATEID_TYPE:
459 case NFS4_LOCK_STATEID_TYPE:
460 case NFS4_DELEGATION_STATEID_TYPE:
461 return stateid;
462 default:
463 break;
464 }
465 return NULL;
466 }
467
468 /* This is the error handling routine for processes that are allowed
469 * to sleep.
470 */
nfs4_do_handle_exception(struct nfs_server * server,int errorcode,struct nfs4_exception * exception)471 static int nfs4_do_handle_exception(struct nfs_server *server,
472 int errorcode, struct nfs4_exception *exception)
473 {
474 struct nfs_client *clp = server->nfs_client;
475 struct nfs4_state *state = exception->state;
476 const nfs4_stateid *stateid;
477 struct inode *inode = exception->inode;
478 int ret = errorcode;
479
480 exception->delay = 0;
481 exception->recovering = 0;
482 exception->retry = 0;
483
484 stateid = nfs4_recoverable_stateid(exception->stateid);
485 if (stateid == NULL && state != NULL)
486 stateid = nfs4_recoverable_stateid(&state->stateid);
487
488 switch(errorcode) {
489 case 0:
490 return 0;
491 case -NFS4ERR_BADHANDLE:
492 case -ESTALE:
493 if (inode != NULL && S_ISREG(inode->i_mode))
494 pnfs_destroy_layout(NFS_I(inode));
495 break;
496 case -NFS4ERR_DELEG_REVOKED:
497 case -NFS4ERR_ADMIN_REVOKED:
498 case -NFS4ERR_EXPIRED:
499 case -NFS4ERR_BAD_STATEID:
500 case -NFS4ERR_PARTNER_NO_AUTH:
501 if (inode != NULL && stateid != NULL) {
502 nfs_inode_find_state_and_recover(inode,
503 stateid);
504 goto wait_on_recovery;
505 }
506 fallthrough;
507 case -NFS4ERR_OPENMODE:
508 if (inode) {
509 int err;
510
511 err = nfs_async_inode_return_delegation(inode,
512 stateid);
513 if (err == 0)
514 goto wait_on_recovery;
515 if (stateid != NULL && stateid->type == NFS4_DELEGATION_STATEID_TYPE) {
516 exception->retry = 1;
517 break;
518 }
519 }
520 if (state == NULL)
521 break;
522 ret = nfs4_schedule_stateid_recovery(server, state);
523 if (ret < 0)
524 break;
525 goto wait_on_recovery;
526 case -NFS4ERR_STALE_STATEID:
527 case -NFS4ERR_STALE_CLIENTID:
528 nfs4_schedule_lease_recovery(clp);
529 goto wait_on_recovery;
530 case -NFS4ERR_MOVED:
531 ret = nfs4_schedule_migration_recovery(server);
532 if (ret < 0)
533 break;
534 goto wait_on_recovery;
535 case -NFS4ERR_LEASE_MOVED:
536 nfs4_schedule_lease_moved_recovery(clp);
537 goto wait_on_recovery;
538 #if defined(CONFIG_NFS_V4_1)
539 case -NFS4ERR_BADSESSION:
540 case -NFS4ERR_BADSLOT:
541 case -NFS4ERR_BAD_HIGH_SLOT:
542 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
543 case -NFS4ERR_DEADSESSION:
544 case -NFS4ERR_SEQ_FALSE_RETRY:
545 case -NFS4ERR_SEQ_MISORDERED:
546 /* Handled in nfs41_sequence_process() */
547 goto wait_on_recovery;
548 #endif /* defined(CONFIG_NFS_V4_1) */
549 case -NFS4ERR_FILE_OPEN:
550 if (exception->timeout > HZ) {
551 /* We have retried a decent amount, time to
552 * fail
553 */
554 ret = -EBUSY;
555 break;
556 }
557 fallthrough;
558 case -NFS4ERR_DELAY:
559 nfs_inc_server_stats(server, NFSIOS_DELAY);
560 fallthrough;
561 case -NFS4ERR_GRACE:
562 case -NFS4ERR_LAYOUTTRYLATER:
563 case -NFS4ERR_RECALLCONFLICT:
564 case -NFS4ERR_RETURNCONFLICT:
565 exception->delay = 1;
566 return 0;
567
568 case -NFS4ERR_RETRY_UNCACHED_REP:
569 case -NFS4ERR_OLD_STATEID:
570 exception->retry = 1;
571 break;
572 case -NFS4ERR_BADOWNER:
573 /* The following works around a Linux server bug! */
574 case -NFS4ERR_BADNAME:
575 if (server->caps & NFS_CAP_UIDGID_NOMAP) {
576 server->caps &= ~NFS_CAP_UIDGID_NOMAP;
577 exception->retry = 1;
578 printk(KERN_WARNING "NFS: v4 server %s "
579 "does not accept raw "
580 "uid/gids. "
581 "Reenabling the idmapper.\n",
582 server->nfs_client->cl_hostname);
583 }
584 }
585 /* We failed to handle the error */
586 return nfs4_map_errors(ret);
587 wait_on_recovery:
588 exception->recovering = 1;
589 return 0;
590 }
591
592 /* This is the error handling routine for processes that are allowed
593 * to sleep.
594 */
nfs4_handle_exception(struct nfs_server * server,int errorcode,struct nfs4_exception * exception)595 int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
596 {
597 struct nfs_client *clp = server->nfs_client;
598 int ret;
599
600 ret = nfs4_do_handle_exception(server, errorcode, exception);
601 if (exception->delay) {
602 ret = nfs4_delay(&exception->timeout,
603 exception->interruptible);
604 goto out_retry;
605 }
606 if (exception->recovering) {
607 if (exception->task_is_privileged)
608 return -EDEADLOCK;
609 ret = nfs4_wait_clnt_recover(clp);
610 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
611 return -EIO;
612 goto out_retry;
613 }
614 return ret;
615 out_retry:
616 if (ret == 0)
617 exception->retry = 1;
618 return ret;
619 }
620
621 static int
nfs4_async_handle_exception(struct rpc_task * task,struct nfs_server * server,int errorcode,struct nfs4_exception * exception)622 nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server,
623 int errorcode, struct nfs4_exception *exception)
624 {
625 struct nfs_client *clp = server->nfs_client;
626 int ret;
627
628 ret = nfs4_do_handle_exception(server, errorcode, exception);
629 if (exception->delay) {
630 rpc_delay(task, nfs4_update_delay(&exception->timeout));
631 goto out_retry;
632 }
633 if (exception->recovering) {
634 if (exception->task_is_privileged)
635 return -EDEADLOCK;
636 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
637 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
638 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
639 goto out_retry;
640 }
641 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
642 ret = -EIO;
643 return ret;
644 out_retry:
645 if (ret == 0) {
646 exception->retry = 1;
647 /*
648 * For NFS4ERR_MOVED, the client transport will need to
649 * be recomputed after migration recovery has completed.
650 */
651 if (errorcode == -NFS4ERR_MOVED)
652 rpc_task_release_transport(task);
653 }
654 return ret;
655 }
656
657 int
nfs4_async_handle_error(struct rpc_task * task,struct nfs_server * server,struct nfs4_state * state,long * timeout)658 nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server,
659 struct nfs4_state *state, long *timeout)
660 {
661 struct nfs4_exception exception = {
662 .state = state,
663 };
664
665 if (task->tk_status >= 0)
666 return 0;
667 if (timeout)
668 exception.timeout = *timeout;
669 task->tk_status = nfs4_async_handle_exception(task, server,
670 task->tk_status,
671 &exception);
672 if (exception.delay && timeout)
673 *timeout = exception.timeout;
674 if (exception.retry)
675 return -EAGAIN;
676 return 0;
677 }
678
679 /*
680 * Return 'true' if 'clp' is using an rpc_client that is integrity protected
681 * or 'false' otherwise.
682 */
_nfs4_is_integrity_protected(struct nfs_client * clp)683 static bool _nfs4_is_integrity_protected(struct nfs_client *clp)
684 {
685 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor;
686 return (flavor == RPC_AUTH_GSS_KRB5I) || (flavor == RPC_AUTH_GSS_KRB5P);
687 }
688
do_renew_lease(struct nfs_client * clp,unsigned long timestamp)689 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
690 {
691 spin_lock(&clp->cl_lock);
692 if (time_before(clp->cl_last_renewal,timestamp))
693 clp->cl_last_renewal = timestamp;
694 spin_unlock(&clp->cl_lock);
695 }
696
renew_lease(const struct nfs_server * server,unsigned long timestamp)697 static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
698 {
699 struct nfs_client *clp = server->nfs_client;
700
701 if (!nfs4_has_session(clp))
702 do_renew_lease(clp, timestamp);
703 }
704
705 struct nfs4_call_sync_data {
706 const struct nfs_server *seq_server;
707 struct nfs4_sequence_args *seq_args;
708 struct nfs4_sequence_res *seq_res;
709 };
710
nfs4_init_sequence(struct nfs4_sequence_args * args,struct nfs4_sequence_res * res,int cache_reply,int privileged)711 void nfs4_init_sequence(struct nfs4_sequence_args *args,
712 struct nfs4_sequence_res *res, int cache_reply,
713 int privileged)
714 {
715 args->sa_slot = NULL;
716 args->sa_cache_this = cache_reply;
717 args->sa_privileged = privileged;
718
719 res->sr_slot = NULL;
720 }
721
nfs40_sequence_free_slot(struct nfs4_sequence_res * res)722 static void nfs40_sequence_free_slot(struct nfs4_sequence_res *res)
723 {
724 struct nfs4_slot *slot = res->sr_slot;
725 struct nfs4_slot_table *tbl;
726
727 tbl = slot->table;
728 spin_lock(&tbl->slot_tbl_lock);
729 if (!nfs41_wake_and_assign_slot(tbl, slot))
730 nfs4_free_slot(tbl, slot);
731 spin_unlock(&tbl->slot_tbl_lock);
732
733 res->sr_slot = NULL;
734 }
735
nfs40_sequence_done(struct rpc_task * task,struct nfs4_sequence_res * res)736 static int nfs40_sequence_done(struct rpc_task *task,
737 struct nfs4_sequence_res *res)
738 {
739 if (res->sr_slot != NULL)
740 nfs40_sequence_free_slot(res);
741 return 1;
742 }
743
744 #if defined(CONFIG_NFS_V4_1)
745
nfs41_release_slot(struct nfs4_slot * slot)746 static void nfs41_release_slot(struct nfs4_slot *slot)
747 {
748 struct nfs4_session *session;
749 struct nfs4_slot_table *tbl;
750 bool send_new_highest_used_slotid = false;
751
752 if (!slot)
753 return;
754 tbl = slot->table;
755 session = tbl->session;
756
757 /* Bump the slot sequence number */
758 if (slot->seq_done)
759 slot->seq_nr++;
760 slot->seq_done = 0;
761
762 spin_lock(&tbl->slot_tbl_lock);
763 /* Be nice to the server: try to ensure that the last transmitted
764 * value for highest_user_slotid <= target_highest_slotid
765 */
766 if (tbl->highest_used_slotid > tbl->target_highest_slotid)
767 send_new_highest_used_slotid = true;
768
769 if (nfs41_wake_and_assign_slot(tbl, slot)) {
770 send_new_highest_used_slotid = false;
771 goto out_unlock;
772 }
773 nfs4_free_slot(tbl, slot);
774
775 if (tbl->highest_used_slotid != NFS4_NO_SLOT)
776 send_new_highest_used_slotid = false;
777 out_unlock:
778 spin_unlock(&tbl->slot_tbl_lock);
779 if (send_new_highest_used_slotid)
780 nfs41_notify_server(session->clp);
781 if (waitqueue_active(&tbl->slot_waitq))
782 wake_up_all(&tbl->slot_waitq);
783 }
784
nfs41_sequence_free_slot(struct nfs4_sequence_res * res)785 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
786 {
787 nfs41_release_slot(res->sr_slot);
788 res->sr_slot = NULL;
789 }
790
nfs4_slot_sequence_record_sent(struct nfs4_slot * slot,u32 seqnr)791 static void nfs4_slot_sequence_record_sent(struct nfs4_slot *slot,
792 u32 seqnr)
793 {
794 if ((s32)(seqnr - slot->seq_nr_highest_sent) > 0)
795 slot->seq_nr_highest_sent = seqnr;
796 }
nfs4_slot_sequence_acked(struct nfs4_slot * slot,u32 seqnr)797 static void nfs4_slot_sequence_acked(struct nfs4_slot *slot, u32 seqnr)
798 {
799 nfs4_slot_sequence_record_sent(slot, seqnr);
800 slot->seq_nr_last_acked = seqnr;
801 }
802
nfs4_probe_sequence(struct nfs_client * client,const struct cred * cred,struct nfs4_slot * slot)803 static void nfs4_probe_sequence(struct nfs_client *client, const struct cred *cred,
804 struct nfs4_slot *slot)
805 {
806 struct rpc_task *task = _nfs41_proc_sequence(client, cred, slot, true);
807 if (!IS_ERR(task))
808 rpc_put_task_async(task);
809 }
810
nfs41_sequence_process(struct rpc_task * task,struct nfs4_sequence_res * res)811 static int nfs41_sequence_process(struct rpc_task *task,
812 struct nfs4_sequence_res *res)
813 {
814 struct nfs4_session *session;
815 struct nfs4_slot *slot = res->sr_slot;
816 struct nfs_client *clp;
817 int status;
818 int ret = 1;
819
820 if (slot == NULL)
821 goto out_noaction;
822 /* don't increment the sequence number if the task wasn't sent */
823 if (!RPC_WAS_SENT(task) || slot->seq_done)
824 goto out;
825
826 session = slot->table->session;
827 clp = session->clp;
828
829 trace_nfs4_sequence_done(session, res);
830
831 status = res->sr_status;
832 if (task->tk_status == -NFS4ERR_DEADSESSION)
833 status = -NFS4ERR_DEADSESSION;
834
835 /* Check the SEQUENCE operation status */
836 switch (status) {
837 case 0:
838 /* Mark this sequence number as having been acked */
839 nfs4_slot_sequence_acked(slot, slot->seq_nr);
840 /* Update the slot's sequence and clientid lease timer */
841 slot->seq_done = 1;
842 do_renew_lease(clp, res->sr_timestamp);
843 /* Check sequence flags */
844 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags,
845 !!slot->privileged);
846 nfs41_update_target_slotid(slot->table, slot, res);
847 break;
848 case 1:
849 /*
850 * sr_status remains 1 if an RPC level error occurred.
851 * The server may or may not have processed the sequence
852 * operation..
853 */
854 nfs4_slot_sequence_record_sent(slot, slot->seq_nr);
855 slot->seq_done = 1;
856 goto out;
857 case -NFS4ERR_DELAY:
858 /* The server detected a resend of the RPC call and
859 * returned NFS4ERR_DELAY as per Section 2.10.6.2
860 * of RFC5661.
861 */
862 dprintk("%s: slot=%u seq=%u: Operation in progress\n",
863 __func__,
864 slot->slot_nr,
865 slot->seq_nr);
866 goto out_retry;
867 case -NFS4ERR_RETRY_UNCACHED_REP:
868 case -NFS4ERR_SEQ_FALSE_RETRY:
869 /*
870 * The server thinks we tried to replay a request.
871 * Retry the call after bumping the sequence ID.
872 */
873 nfs4_slot_sequence_acked(slot, slot->seq_nr);
874 goto retry_new_seq;
875 case -NFS4ERR_BADSLOT:
876 /*
877 * The slot id we used was probably retired. Try again
878 * using a different slot id.
879 */
880 if (slot->slot_nr < slot->table->target_highest_slotid)
881 goto session_recover;
882 goto retry_nowait;
883 case -NFS4ERR_SEQ_MISORDERED:
884 nfs4_slot_sequence_record_sent(slot, slot->seq_nr);
885 /*
886 * Were one or more calls using this slot interrupted?
887 * If the server never received the request, then our
888 * transmitted slot sequence number may be too high. However,
889 * if the server did receive the request then it might
890 * accidentally give us a reply with a mismatched operation.
891 * We can sort this out by sending a lone sequence operation
892 * to the server on the same slot.
893 */
894 if ((s32)(slot->seq_nr - slot->seq_nr_last_acked) > 1) {
895 slot->seq_nr--;
896 if (task->tk_msg.rpc_proc != &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE]) {
897 nfs4_probe_sequence(clp, task->tk_msg.rpc_cred, slot);
898 res->sr_slot = NULL;
899 }
900 goto retry_nowait;
901 }
902 /*
903 * RFC5661:
904 * A retry might be sent while the original request is
905 * still in progress on the replier. The replier SHOULD
906 * deal with the issue by returning NFS4ERR_DELAY as the
907 * reply to SEQUENCE or CB_SEQUENCE operation, but
908 * implementations MAY return NFS4ERR_SEQ_MISORDERED.
909 *
910 * Restart the search after a delay.
911 */
912 slot->seq_nr = slot->seq_nr_highest_sent;
913 goto out_retry;
914 case -NFS4ERR_BADSESSION:
915 case -NFS4ERR_DEADSESSION:
916 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
917 goto session_recover;
918 default:
919 /* Just update the slot sequence no. */
920 slot->seq_done = 1;
921 }
922 out:
923 /* The session may be reset by one of the error handlers. */
924 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
925 out_noaction:
926 return ret;
927 session_recover:
928 set_bit(NFS4_SLOT_TBL_DRAINING, &session->fc_slot_table.slot_tbl_state);
929 nfs4_schedule_session_recovery(session, status);
930 dprintk("%s ERROR: %d Reset session\n", __func__, status);
931 nfs41_sequence_free_slot(res);
932 goto out;
933 retry_new_seq:
934 ++slot->seq_nr;
935 retry_nowait:
936 if (rpc_restart_call_prepare(task)) {
937 nfs41_sequence_free_slot(res);
938 task->tk_status = 0;
939 ret = 0;
940 }
941 goto out;
942 out_retry:
943 if (!rpc_restart_call(task))
944 goto out;
945 rpc_delay(task, NFS4_POLL_RETRY_MAX);
946 return 0;
947 }
948
nfs41_sequence_done(struct rpc_task * task,struct nfs4_sequence_res * res)949 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
950 {
951 if (!nfs41_sequence_process(task, res))
952 return 0;
953 if (res->sr_slot != NULL)
954 nfs41_sequence_free_slot(res);
955 return 1;
956
957 }
958 EXPORT_SYMBOL_GPL(nfs41_sequence_done);
959
nfs4_sequence_process(struct rpc_task * task,struct nfs4_sequence_res * res)960 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
961 {
962 if (res->sr_slot == NULL)
963 return 1;
964 if (res->sr_slot->table->session != NULL)
965 return nfs41_sequence_process(task, res);
966 return nfs40_sequence_done(task, res);
967 }
968
nfs4_sequence_free_slot(struct nfs4_sequence_res * res)969 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
970 {
971 if (res->sr_slot != NULL) {
972 if (res->sr_slot->table->session != NULL)
973 nfs41_sequence_free_slot(res);
974 else
975 nfs40_sequence_free_slot(res);
976 }
977 }
978
nfs4_sequence_done(struct rpc_task * task,struct nfs4_sequence_res * res)979 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
980 {
981 if (res->sr_slot == NULL)
982 return 1;
983 if (!res->sr_slot->table->session)
984 return nfs40_sequence_done(task, res);
985 return nfs41_sequence_done(task, res);
986 }
987 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
988
nfs41_call_sync_prepare(struct rpc_task * task,void * calldata)989 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
990 {
991 struct nfs4_call_sync_data *data = calldata;
992
993 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
994
995 nfs4_setup_sequence(data->seq_server->nfs_client,
996 data->seq_args, data->seq_res, task);
997 }
998
nfs41_call_sync_done(struct rpc_task * task,void * calldata)999 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
1000 {
1001 struct nfs4_call_sync_data *data = calldata;
1002
1003 nfs41_sequence_done(task, data->seq_res);
1004 }
1005
1006 static const struct rpc_call_ops nfs41_call_sync_ops = {
1007 .rpc_call_prepare = nfs41_call_sync_prepare,
1008 .rpc_call_done = nfs41_call_sync_done,
1009 };
1010
1011 #else /* !CONFIG_NFS_V4_1 */
1012
nfs4_sequence_process(struct rpc_task * task,struct nfs4_sequence_res * res)1013 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
1014 {
1015 return nfs40_sequence_done(task, res);
1016 }
1017
nfs4_sequence_free_slot(struct nfs4_sequence_res * res)1018 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
1019 {
1020 if (res->sr_slot != NULL)
1021 nfs40_sequence_free_slot(res);
1022 }
1023
nfs4_sequence_done(struct rpc_task * task,struct nfs4_sequence_res * res)1024 int nfs4_sequence_done(struct rpc_task *task,
1025 struct nfs4_sequence_res *res)
1026 {
1027 return nfs40_sequence_done(task, res);
1028 }
1029 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
1030
1031 #endif /* !CONFIG_NFS_V4_1 */
1032
nfs41_sequence_res_init(struct nfs4_sequence_res * res)1033 static void nfs41_sequence_res_init(struct nfs4_sequence_res *res)
1034 {
1035 res->sr_timestamp = jiffies;
1036 res->sr_status_flags = 0;
1037 res->sr_status = 1;
1038 }
1039
1040 static
nfs4_sequence_attach_slot(struct nfs4_sequence_args * args,struct nfs4_sequence_res * res,struct nfs4_slot * slot)1041 void nfs4_sequence_attach_slot(struct nfs4_sequence_args *args,
1042 struct nfs4_sequence_res *res,
1043 struct nfs4_slot *slot)
1044 {
1045 if (!slot)
1046 return;
1047 slot->privileged = args->sa_privileged ? 1 : 0;
1048 args->sa_slot = slot;
1049
1050 res->sr_slot = slot;
1051 }
1052
nfs4_setup_sequence(struct nfs_client * client,struct nfs4_sequence_args * args,struct nfs4_sequence_res * res,struct rpc_task * task)1053 int nfs4_setup_sequence(struct nfs_client *client,
1054 struct nfs4_sequence_args *args,
1055 struct nfs4_sequence_res *res,
1056 struct rpc_task *task)
1057 {
1058 struct nfs4_session *session = nfs4_get_session(client);
1059 struct nfs4_slot_table *tbl = client->cl_slot_tbl;
1060 struct nfs4_slot *slot;
1061
1062 /* slot already allocated? */
1063 if (res->sr_slot != NULL)
1064 goto out_start;
1065
1066 if (session)
1067 tbl = &session->fc_slot_table;
1068
1069 spin_lock(&tbl->slot_tbl_lock);
1070 /* The state manager will wait until the slot table is empty */
1071 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
1072 goto out_sleep;
1073
1074 slot = nfs4_alloc_slot(tbl);
1075 if (IS_ERR(slot)) {
1076 if (slot == ERR_PTR(-ENOMEM))
1077 goto out_sleep_timeout;
1078 goto out_sleep;
1079 }
1080 spin_unlock(&tbl->slot_tbl_lock);
1081
1082 nfs4_sequence_attach_slot(args, res, slot);
1083
1084 trace_nfs4_setup_sequence(session, args);
1085 out_start:
1086 nfs41_sequence_res_init(res);
1087 rpc_call_start(task);
1088 return 0;
1089 out_sleep_timeout:
1090 /* Try again in 1/4 second */
1091 if (args->sa_privileged)
1092 rpc_sleep_on_priority_timeout(&tbl->slot_tbl_waitq, task,
1093 jiffies + (HZ >> 2), RPC_PRIORITY_PRIVILEGED);
1094 else
1095 rpc_sleep_on_timeout(&tbl->slot_tbl_waitq, task,
1096 NULL, jiffies + (HZ >> 2));
1097 spin_unlock(&tbl->slot_tbl_lock);
1098 return -EAGAIN;
1099 out_sleep:
1100 if (args->sa_privileged)
1101 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
1102 RPC_PRIORITY_PRIVILEGED);
1103 else
1104 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
1105 spin_unlock(&tbl->slot_tbl_lock);
1106 return -EAGAIN;
1107 }
1108 EXPORT_SYMBOL_GPL(nfs4_setup_sequence);
1109
nfs40_call_sync_prepare(struct rpc_task * task,void * calldata)1110 static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata)
1111 {
1112 struct nfs4_call_sync_data *data = calldata;
1113 nfs4_setup_sequence(data->seq_server->nfs_client,
1114 data->seq_args, data->seq_res, task);
1115 }
1116
nfs40_call_sync_done(struct rpc_task * task,void * calldata)1117 static void nfs40_call_sync_done(struct rpc_task *task, void *calldata)
1118 {
1119 struct nfs4_call_sync_data *data = calldata;
1120 nfs4_sequence_done(task, data->seq_res);
1121 }
1122
1123 static const struct rpc_call_ops nfs40_call_sync_ops = {
1124 .rpc_call_prepare = nfs40_call_sync_prepare,
1125 .rpc_call_done = nfs40_call_sync_done,
1126 };
1127
nfs4_call_sync_custom(struct rpc_task_setup * task_setup)1128 static int nfs4_call_sync_custom(struct rpc_task_setup *task_setup)
1129 {
1130 int ret;
1131 struct rpc_task *task;
1132
1133 task = rpc_run_task(task_setup);
1134 if (IS_ERR(task))
1135 return PTR_ERR(task);
1136
1137 ret = task->tk_status;
1138 rpc_put_task(task);
1139 return ret;
1140 }
1141
nfs4_do_call_sync(struct rpc_clnt * clnt,struct nfs_server * server,struct rpc_message * msg,struct nfs4_sequence_args * args,struct nfs4_sequence_res * res,unsigned short task_flags)1142 static int nfs4_do_call_sync(struct rpc_clnt *clnt,
1143 struct nfs_server *server,
1144 struct rpc_message *msg,
1145 struct nfs4_sequence_args *args,
1146 struct nfs4_sequence_res *res,
1147 unsigned short task_flags)
1148 {
1149 struct nfs_client *clp = server->nfs_client;
1150 struct nfs4_call_sync_data data = {
1151 .seq_server = server,
1152 .seq_args = args,
1153 .seq_res = res,
1154 };
1155 struct rpc_task_setup task_setup = {
1156 .rpc_client = clnt,
1157 .rpc_message = msg,
1158 .callback_ops = clp->cl_mvops->call_sync_ops,
1159 .callback_data = &data,
1160 .flags = task_flags,
1161 };
1162
1163 return nfs4_call_sync_custom(&task_setup);
1164 }
1165
nfs4_call_sync_sequence(struct rpc_clnt * clnt,struct nfs_server * server,struct rpc_message * msg,struct nfs4_sequence_args * args,struct nfs4_sequence_res * res)1166 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
1167 struct nfs_server *server,
1168 struct rpc_message *msg,
1169 struct nfs4_sequence_args *args,
1170 struct nfs4_sequence_res *res)
1171 {
1172 unsigned short task_flags = 0;
1173
1174 if (server->caps & NFS_CAP_MOVEABLE)
1175 task_flags = RPC_TASK_MOVEABLE;
1176 return nfs4_do_call_sync(clnt, server, msg, args, res, task_flags);
1177 }
1178
1179
nfs4_call_sync(struct rpc_clnt * clnt,struct nfs_server * server,struct rpc_message * msg,struct nfs4_sequence_args * args,struct nfs4_sequence_res * res,int cache_reply)1180 int nfs4_call_sync(struct rpc_clnt *clnt,
1181 struct nfs_server *server,
1182 struct rpc_message *msg,
1183 struct nfs4_sequence_args *args,
1184 struct nfs4_sequence_res *res,
1185 int cache_reply)
1186 {
1187 nfs4_init_sequence(args, res, cache_reply, 0);
1188 return nfs4_call_sync_sequence(clnt, server, msg, args, res);
1189 }
1190
1191 static void
nfs4_inc_nlink_locked(struct inode * inode)1192 nfs4_inc_nlink_locked(struct inode *inode)
1193 {
1194 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE |
1195 NFS_INO_INVALID_CTIME |
1196 NFS_INO_INVALID_NLINK);
1197 inc_nlink(inode);
1198 }
1199
1200 static void
nfs4_inc_nlink(struct inode * inode)1201 nfs4_inc_nlink(struct inode *inode)
1202 {
1203 spin_lock(&inode->i_lock);
1204 nfs4_inc_nlink_locked(inode);
1205 spin_unlock(&inode->i_lock);
1206 }
1207
1208 static void
nfs4_dec_nlink_locked(struct inode * inode)1209 nfs4_dec_nlink_locked(struct inode *inode)
1210 {
1211 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE |
1212 NFS_INO_INVALID_CTIME |
1213 NFS_INO_INVALID_NLINK);
1214 drop_nlink(inode);
1215 }
1216
1217 static void
nfs4_update_changeattr_locked(struct inode * inode,struct nfs4_change_info * cinfo,unsigned long timestamp,unsigned long cache_validity)1218 nfs4_update_changeattr_locked(struct inode *inode,
1219 struct nfs4_change_info *cinfo,
1220 unsigned long timestamp, unsigned long cache_validity)
1221 {
1222 struct nfs_inode *nfsi = NFS_I(inode);
1223 u64 change_attr = inode_peek_iversion_raw(inode);
1224
1225 cache_validity |= NFS_INO_INVALID_CTIME | NFS_INO_INVALID_MTIME;
1226 if (S_ISDIR(inode->i_mode))
1227 cache_validity |= NFS_INO_INVALID_DATA;
1228
1229 switch (NFS_SERVER(inode)->change_attr_type) {
1230 case NFS4_CHANGE_TYPE_IS_UNDEFINED:
1231 if (cinfo->after == change_attr)
1232 goto out;
1233 break;
1234 default:
1235 if ((s64)(change_attr - cinfo->after) >= 0)
1236 goto out;
1237 }
1238
1239 inode_set_iversion_raw(inode, cinfo->after);
1240 if (!cinfo->atomic || cinfo->before != change_attr) {
1241 if (S_ISDIR(inode->i_mode))
1242 nfs_force_lookup_revalidate(inode);
1243
1244 if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
1245 cache_validity |=
1246 NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL |
1247 NFS_INO_INVALID_SIZE | NFS_INO_INVALID_OTHER |
1248 NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_NLINK |
1249 NFS_INO_INVALID_MODE | NFS_INO_INVALID_XATTR;
1250 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
1251 }
1252 nfsi->attrtimeo_timestamp = jiffies;
1253 nfsi->read_cache_jiffies = timestamp;
1254 nfsi->attr_gencount = nfs_inc_attr_generation_counter();
1255 nfsi->cache_validity &= ~NFS_INO_INVALID_CHANGE;
1256 out:
1257 nfs_set_cache_invalid(inode, cache_validity);
1258 }
1259
1260 void
nfs4_update_changeattr(struct inode * dir,struct nfs4_change_info * cinfo,unsigned long timestamp,unsigned long cache_validity)1261 nfs4_update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo,
1262 unsigned long timestamp, unsigned long cache_validity)
1263 {
1264 spin_lock(&dir->i_lock);
1265 nfs4_update_changeattr_locked(dir, cinfo, timestamp, cache_validity);
1266 spin_unlock(&dir->i_lock);
1267 }
1268
1269 struct nfs4_open_createattrs {
1270 struct nfs4_label *label;
1271 struct iattr *sattr;
1272 const __u32 verf[2];
1273 };
1274
nfs4_clear_cap_atomic_open_v1(struct nfs_server * server,int err,struct nfs4_exception * exception)1275 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server,
1276 int err, struct nfs4_exception *exception)
1277 {
1278 if (err != -EINVAL)
1279 return false;
1280 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
1281 return false;
1282 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1;
1283 exception->retry = 1;
1284 return true;
1285 }
1286
_nfs4_ctx_to_accessmode(const struct nfs_open_context * ctx)1287 static fmode_t _nfs4_ctx_to_accessmode(const struct nfs_open_context *ctx)
1288 {
1289 return ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC);
1290 }
1291
_nfs4_ctx_to_openmode(const struct nfs_open_context * ctx)1292 static fmode_t _nfs4_ctx_to_openmode(const struct nfs_open_context *ctx)
1293 {
1294 fmode_t ret = ctx->mode & (FMODE_READ|FMODE_WRITE);
1295
1296 return (ctx->mode & FMODE_EXEC) ? FMODE_READ | ret : ret;
1297 }
1298
1299 static u32
nfs4_map_atomic_open_share(struct nfs_server * server,fmode_t fmode,int openflags)1300 nfs4_map_atomic_open_share(struct nfs_server *server,
1301 fmode_t fmode, int openflags)
1302 {
1303 u32 res = 0;
1304
1305 switch (fmode & (FMODE_READ | FMODE_WRITE)) {
1306 case FMODE_READ:
1307 res = NFS4_SHARE_ACCESS_READ;
1308 break;
1309 case FMODE_WRITE:
1310 res = NFS4_SHARE_ACCESS_WRITE;
1311 break;
1312 case FMODE_READ|FMODE_WRITE:
1313 res = NFS4_SHARE_ACCESS_BOTH;
1314 }
1315 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
1316 goto out;
1317 /* Want no delegation if we're using O_DIRECT */
1318 if (openflags & O_DIRECT)
1319 res |= NFS4_SHARE_WANT_NO_DELEG;
1320 out:
1321 return res;
1322 }
1323
1324 static enum open_claim_type4
nfs4_map_atomic_open_claim(struct nfs_server * server,enum open_claim_type4 claim)1325 nfs4_map_atomic_open_claim(struct nfs_server *server,
1326 enum open_claim_type4 claim)
1327 {
1328 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1)
1329 return claim;
1330 switch (claim) {
1331 default:
1332 return claim;
1333 case NFS4_OPEN_CLAIM_FH:
1334 return NFS4_OPEN_CLAIM_NULL;
1335 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1336 return NFS4_OPEN_CLAIM_DELEGATE_CUR;
1337 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1338 return NFS4_OPEN_CLAIM_DELEGATE_PREV;
1339 }
1340 }
1341
nfs4_init_opendata_res(struct nfs4_opendata * p)1342 static void nfs4_init_opendata_res(struct nfs4_opendata *p)
1343 {
1344 p->o_res.f_attr = &p->f_attr;
1345 p->o_res.f_label = p->f_label;
1346 p->o_res.seqid = p->o_arg.seqid;
1347 p->c_res.seqid = p->c_arg.seqid;
1348 p->o_res.server = p->o_arg.server;
1349 p->o_res.access_request = p->o_arg.access;
1350 nfs_fattr_init(&p->f_attr);
1351 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
1352 }
1353
nfs4_opendata_alloc(struct dentry * dentry,struct nfs4_state_owner * sp,fmode_t fmode,int flags,const struct nfs4_open_createattrs * c,enum open_claim_type4 claim,gfp_t gfp_mask)1354 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
1355 struct nfs4_state_owner *sp, fmode_t fmode, int flags,
1356 const struct nfs4_open_createattrs *c,
1357 enum open_claim_type4 claim,
1358 gfp_t gfp_mask)
1359 {
1360 struct dentry *parent = dget_parent(dentry);
1361 struct inode *dir = d_inode(parent);
1362 struct nfs_server *server = NFS_SERVER(dir);
1363 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
1364 struct nfs4_label *label = (c != NULL) ? c->label : NULL;
1365 struct nfs4_opendata *p;
1366
1367 p = kzalloc(sizeof(*p), gfp_mask);
1368 if (p == NULL)
1369 goto err;
1370
1371 p->f_label = nfs4_label_alloc(server, gfp_mask);
1372 if (IS_ERR(p->f_label))
1373 goto err_free_p;
1374
1375 p->a_label = nfs4_label_alloc(server, gfp_mask);
1376 if (IS_ERR(p->a_label))
1377 goto err_free_f;
1378
1379 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
1380 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
1381 if (IS_ERR(p->o_arg.seqid))
1382 goto err_free_label;
1383 nfs_sb_active(dentry->d_sb);
1384 p->dentry = dget(dentry);
1385 p->dir = parent;
1386 p->owner = sp;
1387 atomic_inc(&sp->so_count);
1388 p->o_arg.open_flags = flags;
1389 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
1390 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
1391 p->o_arg.share_access = nfs4_map_atomic_open_share(server,
1392 fmode, flags);
1393 if (flags & O_CREAT) {
1394 p->o_arg.umask = current_umask();
1395 p->o_arg.label = nfs4_label_copy(p->a_label, label);
1396 if (c->sattr != NULL && c->sattr->ia_valid != 0) {
1397 p->o_arg.u.attrs = &p->attrs;
1398 memcpy(&p->attrs, c->sattr, sizeof(p->attrs));
1399
1400 memcpy(p->o_arg.u.verifier.data, c->verf,
1401 sizeof(p->o_arg.u.verifier.data));
1402 }
1403 }
1404 /* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS
1405 * will return permission denied for all bits until close */
1406 if (!(flags & O_EXCL)) {
1407 /* ask server to check for all possible rights as results
1408 * are cached */
1409 switch (p->o_arg.claim) {
1410 default:
1411 break;
1412 case NFS4_OPEN_CLAIM_NULL:
1413 case NFS4_OPEN_CLAIM_FH:
1414 p->o_arg.access = NFS4_ACCESS_READ |
1415 NFS4_ACCESS_MODIFY |
1416 NFS4_ACCESS_EXTEND |
1417 NFS4_ACCESS_EXECUTE;
1418 #ifdef CONFIG_NFS_V4_2
1419 if (server->caps & NFS_CAP_XATTR)
1420 p->o_arg.access |= NFS4_ACCESS_XAREAD |
1421 NFS4_ACCESS_XAWRITE |
1422 NFS4_ACCESS_XALIST;
1423 #endif
1424 }
1425 }
1426 p->o_arg.clientid = server->nfs_client->cl_clientid;
1427 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
1428 p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
1429 p->o_arg.name = &dentry->d_name;
1430 p->o_arg.server = server;
1431 p->o_arg.bitmask = nfs4_bitmask(server, label);
1432 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
1433 switch (p->o_arg.claim) {
1434 case NFS4_OPEN_CLAIM_NULL:
1435 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1436 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
1437 p->o_arg.fh = NFS_FH(dir);
1438 break;
1439 case NFS4_OPEN_CLAIM_PREVIOUS:
1440 case NFS4_OPEN_CLAIM_FH:
1441 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1442 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1443 p->o_arg.fh = NFS_FH(d_inode(dentry));
1444 }
1445 p->c_arg.fh = &p->o_res.fh;
1446 p->c_arg.stateid = &p->o_res.stateid;
1447 p->c_arg.seqid = p->o_arg.seqid;
1448 nfs4_init_opendata_res(p);
1449 kref_init(&p->kref);
1450 return p;
1451
1452 err_free_label:
1453 nfs4_label_free(p->a_label);
1454 err_free_f:
1455 nfs4_label_free(p->f_label);
1456 err_free_p:
1457 kfree(p);
1458 err:
1459 dput(parent);
1460 return NULL;
1461 }
1462
nfs4_opendata_free(struct kref * kref)1463 static void nfs4_opendata_free(struct kref *kref)
1464 {
1465 struct nfs4_opendata *p = container_of(kref,
1466 struct nfs4_opendata, kref);
1467 struct super_block *sb = p->dentry->d_sb;
1468
1469 nfs4_lgopen_release(p->lgp);
1470 nfs_free_seqid(p->o_arg.seqid);
1471 nfs4_sequence_free_slot(&p->o_res.seq_res);
1472 if (p->state != NULL)
1473 nfs4_put_open_state(p->state);
1474 nfs4_put_state_owner(p->owner);
1475
1476 nfs4_label_free(p->a_label);
1477 nfs4_label_free(p->f_label);
1478
1479 dput(p->dir);
1480 dput(p->dentry);
1481 nfs_sb_deactive(sb);
1482 nfs_fattr_free_names(&p->f_attr);
1483 kfree(p->f_attr.mdsthreshold);
1484 kfree(p);
1485 }
1486
nfs4_opendata_put(struct nfs4_opendata * p)1487 static void nfs4_opendata_put(struct nfs4_opendata *p)
1488 {
1489 if (p != NULL)
1490 kref_put(&p->kref, nfs4_opendata_free);
1491 }
1492
nfs4_mode_match_open_stateid(struct nfs4_state * state,fmode_t fmode)1493 static bool nfs4_mode_match_open_stateid(struct nfs4_state *state,
1494 fmode_t fmode)
1495 {
1496 switch(fmode & (FMODE_READ|FMODE_WRITE)) {
1497 case FMODE_READ|FMODE_WRITE:
1498 return state->n_rdwr != 0;
1499 case FMODE_WRITE:
1500 return state->n_wronly != 0;
1501 case FMODE_READ:
1502 return state->n_rdonly != 0;
1503 }
1504 WARN_ON_ONCE(1);
1505 return false;
1506 }
1507
can_open_cached(struct nfs4_state * state,fmode_t mode,int open_mode,enum open_claim_type4 claim)1508 static int can_open_cached(struct nfs4_state *state, fmode_t mode,
1509 int open_mode, enum open_claim_type4 claim)
1510 {
1511 int ret = 0;
1512
1513 if (open_mode & (O_EXCL|O_TRUNC))
1514 goto out;
1515 switch (claim) {
1516 case NFS4_OPEN_CLAIM_NULL:
1517 case NFS4_OPEN_CLAIM_FH:
1518 goto out;
1519 default:
1520 break;
1521 }
1522 switch (mode & (FMODE_READ|FMODE_WRITE)) {
1523 case FMODE_READ:
1524 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
1525 && state->n_rdonly != 0;
1526 break;
1527 case FMODE_WRITE:
1528 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
1529 && state->n_wronly != 0;
1530 break;
1531 case FMODE_READ|FMODE_WRITE:
1532 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
1533 && state->n_rdwr != 0;
1534 }
1535 out:
1536 return ret;
1537 }
1538
can_open_delegated(struct nfs_delegation * delegation,fmode_t fmode,enum open_claim_type4 claim)1539 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode,
1540 enum open_claim_type4 claim)
1541 {
1542 if (delegation == NULL)
1543 return 0;
1544 if ((delegation->type & fmode) != fmode)
1545 return 0;
1546 switch (claim) {
1547 case NFS4_OPEN_CLAIM_NULL:
1548 case NFS4_OPEN_CLAIM_FH:
1549 break;
1550 case NFS4_OPEN_CLAIM_PREVIOUS:
1551 if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
1552 break;
1553 fallthrough;
1554 default:
1555 return 0;
1556 }
1557 nfs_mark_delegation_referenced(delegation);
1558 return 1;
1559 }
1560
update_open_stateflags(struct nfs4_state * state,fmode_t fmode)1561 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
1562 {
1563 switch (fmode) {
1564 case FMODE_WRITE:
1565 state->n_wronly++;
1566 break;
1567 case FMODE_READ:
1568 state->n_rdonly++;
1569 break;
1570 case FMODE_READ|FMODE_WRITE:
1571 state->n_rdwr++;
1572 }
1573 nfs4_state_set_mode_locked(state, state->state | fmode);
1574 }
1575
1576 #ifdef CONFIG_NFS_V4_1
nfs_open_stateid_recover_openmode(struct nfs4_state * state)1577 static bool nfs_open_stateid_recover_openmode(struct nfs4_state *state)
1578 {
1579 if (state->n_rdonly && !test_bit(NFS_O_RDONLY_STATE, &state->flags))
1580 return true;
1581 if (state->n_wronly && !test_bit(NFS_O_WRONLY_STATE, &state->flags))
1582 return true;
1583 if (state->n_rdwr && !test_bit(NFS_O_RDWR_STATE, &state->flags))
1584 return true;
1585 return false;
1586 }
1587 #endif /* CONFIG_NFS_V4_1 */
1588
nfs_state_log_update_open_stateid(struct nfs4_state * state)1589 static void nfs_state_log_update_open_stateid(struct nfs4_state *state)
1590 {
1591 if (test_and_clear_bit(NFS_STATE_CHANGE_WAIT, &state->flags))
1592 wake_up_all(&state->waitq);
1593 }
1594
nfs_test_and_clear_all_open_stateid(struct nfs4_state * state)1595 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state)
1596 {
1597 struct nfs_client *clp = state->owner->so_server->nfs_client;
1598 bool need_recover = false;
1599
1600 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly)
1601 need_recover = true;
1602 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly)
1603 need_recover = true;
1604 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr)
1605 need_recover = true;
1606 if (need_recover)
1607 nfs4_state_mark_reclaim_nograce(clp, state);
1608 }
1609
1610 /*
1611 * Check for whether or not the caller may update the open stateid
1612 * to the value passed in by stateid.
1613 *
1614 * Note: This function relies heavily on the server implementing
1615 * RFC7530 Section 9.1.4.2, and RFC5661 Section 8.2.2
1616 * correctly.
1617 * i.e. The stateid seqids have to be initialised to 1, and
1618 * are then incremented on every state transition.
1619 */
nfs_stateid_is_sequential(struct nfs4_state * state,const nfs4_stateid * stateid)1620 static bool nfs_stateid_is_sequential(struct nfs4_state *state,
1621 const nfs4_stateid *stateid)
1622 {
1623 if (test_bit(NFS_OPEN_STATE, &state->flags)) {
1624 /* The common case - we're updating to a new sequence number */
1625 if (nfs4_stateid_match_other(stateid, &state->open_stateid)) {
1626 if (nfs4_stateid_is_next(&state->open_stateid, stateid))
1627 return true;
1628 return false;
1629 }
1630 /* The server returned a new stateid */
1631 }
1632 /* This is the first OPEN in this generation */
1633 if (stateid->seqid == cpu_to_be32(1))
1634 return true;
1635 return false;
1636 }
1637
nfs_resync_open_stateid_locked(struct nfs4_state * state)1638 static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
1639 {
1640 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr))
1641 return;
1642 if (state->n_wronly)
1643 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1644 if (state->n_rdonly)
1645 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1646 if (state->n_rdwr)
1647 set_bit(NFS_O_RDWR_STATE, &state->flags);
1648 set_bit(NFS_OPEN_STATE, &state->flags);
1649 }
1650
nfs_clear_open_stateid_locked(struct nfs4_state * state,nfs4_stateid * stateid,fmode_t fmode)1651 static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
1652 nfs4_stateid *stateid, fmode_t fmode)
1653 {
1654 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1655 switch (fmode & (FMODE_READ|FMODE_WRITE)) {
1656 case FMODE_WRITE:
1657 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1658 break;
1659 case FMODE_READ:
1660 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1661 break;
1662 case 0:
1663 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1664 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1665 clear_bit(NFS_OPEN_STATE, &state->flags);
1666 }
1667 if (stateid == NULL)
1668 return;
1669 /* Handle OPEN+OPEN_DOWNGRADE races */
1670 if (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
1671 !nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
1672 nfs_resync_open_stateid_locked(state);
1673 goto out;
1674 }
1675 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1676 nfs4_stateid_copy(&state->stateid, stateid);
1677 nfs4_stateid_copy(&state->open_stateid, stateid);
1678 trace_nfs4_open_stateid_update(state->inode, stateid, 0);
1679 out:
1680 nfs_state_log_update_open_stateid(state);
1681 }
1682
nfs_clear_open_stateid(struct nfs4_state * state,nfs4_stateid * arg_stateid,nfs4_stateid * stateid,fmode_t fmode)1683 static void nfs_clear_open_stateid(struct nfs4_state *state,
1684 nfs4_stateid *arg_stateid,
1685 nfs4_stateid *stateid, fmode_t fmode)
1686 {
1687 write_seqlock(&state->seqlock);
1688 /* Ignore, if the CLOSE argment doesn't match the current stateid */
1689 if (nfs4_state_match_open_stateid_other(state, arg_stateid))
1690 nfs_clear_open_stateid_locked(state, stateid, fmode);
1691 write_sequnlock(&state->seqlock);
1692 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1693 nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
1694 }
1695
nfs_set_open_stateid_locked(struct nfs4_state * state,const nfs4_stateid * stateid,nfs4_stateid * freeme)1696 static void nfs_set_open_stateid_locked(struct nfs4_state *state,
1697 const nfs4_stateid *stateid, nfs4_stateid *freeme)
1698 __must_hold(&state->owner->so_lock)
1699 __must_hold(&state->seqlock)
1700 __must_hold(RCU)
1701
1702 {
1703 DEFINE_WAIT(wait);
1704 int status = 0;
1705 for (;;) {
1706
1707 if (nfs_stateid_is_sequential(state, stateid))
1708 break;
1709
1710 if (status)
1711 break;
1712 /* Rely on seqids for serialisation with NFSv4.0 */
1713 if (!nfs4_has_session(NFS_SERVER(state->inode)->nfs_client))
1714 break;
1715
1716 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags);
1717 prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE);
1718 /*
1719 * Ensure we process the state changes in the same order
1720 * in which the server processed them by delaying the
1721 * update of the stateid until we are in sequence.
1722 */
1723 write_sequnlock(&state->seqlock);
1724 spin_unlock(&state->owner->so_lock);
1725 rcu_read_unlock();
1726 trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0);
1727
1728 if (!fatal_signal_pending(current)) {
1729 if (schedule_timeout(5*HZ) == 0)
1730 status = -EAGAIN;
1731 else
1732 status = 0;
1733 } else
1734 status = -EINTR;
1735 finish_wait(&state->waitq, &wait);
1736 rcu_read_lock();
1737 spin_lock(&state->owner->so_lock);
1738 write_seqlock(&state->seqlock);
1739 }
1740
1741 if (test_bit(NFS_OPEN_STATE, &state->flags) &&
1742 !nfs4_stateid_match_other(stateid, &state->open_stateid)) {
1743 nfs4_stateid_copy(freeme, &state->open_stateid);
1744 nfs_test_and_clear_all_open_stateid(state);
1745 }
1746
1747 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1748 nfs4_stateid_copy(&state->stateid, stateid);
1749 nfs4_stateid_copy(&state->open_stateid, stateid);
1750 trace_nfs4_open_stateid_update(state->inode, stateid, status);
1751 nfs_state_log_update_open_stateid(state);
1752 }
1753
nfs_state_set_open_stateid(struct nfs4_state * state,const nfs4_stateid * open_stateid,fmode_t fmode,nfs4_stateid * freeme)1754 static void nfs_state_set_open_stateid(struct nfs4_state *state,
1755 const nfs4_stateid *open_stateid,
1756 fmode_t fmode,
1757 nfs4_stateid *freeme)
1758 {
1759 /*
1760 * Protect the call to nfs4_state_set_mode_locked and
1761 * serialise the stateid update
1762 */
1763 write_seqlock(&state->seqlock);
1764 nfs_set_open_stateid_locked(state, open_stateid, freeme);
1765 switch (fmode) {
1766 case FMODE_READ:
1767 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1768 break;
1769 case FMODE_WRITE:
1770 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1771 break;
1772 case FMODE_READ|FMODE_WRITE:
1773 set_bit(NFS_O_RDWR_STATE, &state->flags);
1774 }
1775 set_bit(NFS_OPEN_STATE, &state->flags);
1776 write_sequnlock(&state->seqlock);
1777 }
1778
nfs_state_clear_open_state_flags(struct nfs4_state * state)1779 static void nfs_state_clear_open_state_flags(struct nfs4_state *state)
1780 {
1781 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1782 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1783 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1784 clear_bit(NFS_OPEN_STATE, &state->flags);
1785 }
1786
nfs_state_set_delegation(struct nfs4_state * state,const nfs4_stateid * deleg_stateid,fmode_t fmode)1787 static void nfs_state_set_delegation(struct nfs4_state *state,
1788 const nfs4_stateid *deleg_stateid,
1789 fmode_t fmode)
1790 {
1791 /*
1792 * Protect the call to nfs4_state_set_mode_locked and
1793 * serialise the stateid update
1794 */
1795 write_seqlock(&state->seqlock);
1796 nfs4_stateid_copy(&state->stateid, deleg_stateid);
1797 set_bit(NFS_DELEGATED_STATE, &state->flags);
1798 write_sequnlock(&state->seqlock);
1799 }
1800
nfs_state_clear_delegation(struct nfs4_state * state)1801 static void nfs_state_clear_delegation(struct nfs4_state *state)
1802 {
1803 write_seqlock(&state->seqlock);
1804 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1805 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1806 write_sequnlock(&state->seqlock);
1807 }
1808
update_open_stateid(struct nfs4_state * state,const nfs4_stateid * open_stateid,const nfs4_stateid * delegation,fmode_t fmode)1809 int update_open_stateid(struct nfs4_state *state,
1810 const nfs4_stateid *open_stateid,
1811 const nfs4_stateid *delegation,
1812 fmode_t fmode)
1813 {
1814 struct nfs_server *server = NFS_SERVER(state->inode);
1815 struct nfs_client *clp = server->nfs_client;
1816 struct nfs_inode *nfsi = NFS_I(state->inode);
1817 struct nfs_delegation *deleg_cur;
1818 nfs4_stateid freeme = { };
1819 int ret = 0;
1820
1821 fmode &= (FMODE_READ|FMODE_WRITE);
1822
1823 rcu_read_lock();
1824 spin_lock(&state->owner->so_lock);
1825 if (open_stateid != NULL) {
1826 nfs_state_set_open_stateid(state, open_stateid, fmode, &freeme);
1827 ret = 1;
1828 }
1829
1830 deleg_cur = nfs4_get_valid_delegation(state->inode);
1831 if (deleg_cur == NULL)
1832 goto no_delegation;
1833
1834 spin_lock(&deleg_cur->lock);
1835 if (rcu_dereference(nfsi->delegation) != deleg_cur ||
1836 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) ||
1837 (deleg_cur->type & fmode) != fmode)
1838 goto no_delegation_unlock;
1839
1840 if (delegation == NULL)
1841 delegation = &deleg_cur->stateid;
1842 else if (!nfs4_stateid_match_other(&deleg_cur->stateid, delegation))
1843 goto no_delegation_unlock;
1844
1845 nfs_mark_delegation_referenced(deleg_cur);
1846 nfs_state_set_delegation(state, &deleg_cur->stateid, fmode);
1847 ret = 1;
1848 no_delegation_unlock:
1849 spin_unlock(&deleg_cur->lock);
1850 no_delegation:
1851 if (ret)
1852 update_open_stateflags(state, fmode);
1853 spin_unlock(&state->owner->so_lock);
1854 rcu_read_unlock();
1855
1856 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1857 nfs4_schedule_state_manager(clp);
1858 if (freeme.type != 0)
1859 nfs4_test_and_free_stateid(server, &freeme,
1860 state->owner->so_cred);
1861
1862 return ret;
1863 }
1864
nfs4_update_lock_stateid(struct nfs4_lock_state * lsp,const nfs4_stateid * stateid)1865 static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp,
1866 const nfs4_stateid *stateid)
1867 {
1868 struct nfs4_state *state = lsp->ls_state;
1869 bool ret = false;
1870
1871 spin_lock(&state->state_lock);
1872 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid))
1873 goto out_noupdate;
1874 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid))
1875 goto out_noupdate;
1876 nfs4_stateid_copy(&lsp->ls_stateid, stateid);
1877 ret = true;
1878 out_noupdate:
1879 spin_unlock(&state->state_lock);
1880 return ret;
1881 }
1882
nfs4_return_incompatible_delegation(struct inode * inode,fmode_t fmode)1883 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
1884 {
1885 struct nfs_delegation *delegation;
1886
1887 fmode &= FMODE_READ|FMODE_WRITE;
1888 rcu_read_lock();
1889 delegation = nfs4_get_valid_delegation(inode);
1890 if (delegation == NULL || (delegation->type & fmode) == fmode) {
1891 rcu_read_unlock();
1892 return;
1893 }
1894 rcu_read_unlock();
1895 nfs4_inode_return_delegation(inode);
1896 }
1897
nfs4_try_open_cached(struct nfs4_opendata * opendata)1898 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1899 {
1900 struct nfs4_state *state = opendata->state;
1901 struct nfs_delegation *delegation;
1902 int open_mode = opendata->o_arg.open_flags;
1903 fmode_t fmode = opendata->o_arg.fmode;
1904 enum open_claim_type4 claim = opendata->o_arg.claim;
1905 nfs4_stateid stateid;
1906 int ret = -EAGAIN;
1907
1908 for (;;) {
1909 spin_lock(&state->owner->so_lock);
1910 if (can_open_cached(state, fmode, open_mode, claim)) {
1911 update_open_stateflags(state, fmode);
1912 spin_unlock(&state->owner->so_lock);
1913 goto out_return_state;
1914 }
1915 spin_unlock(&state->owner->so_lock);
1916 rcu_read_lock();
1917 delegation = nfs4_get_valid_delegation(state->inode);
1918 if (!can_open_delegated(delegation, fmode, claim)) {
1919 rcu_read_unlock();
1920 break;
1921 }
1922 /* Save the delegation */
1923 nfs4_stateid_copy(&stateid, &delegation->stateid);
1924 rcu_read_unlock();
1925 nfs_release_seqid(opendata->o_arg.seqid);
1926 if (!opendata->is_recover) {
1927 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
1928 if (ret != 0)
1929 goto out;
1930 }
1931 ret = -EAGAIN;
1932
1933 /* Try to update the stateid using the delegation */
1934 if (update_open_stateid(state, NULL, &stateid, fmode))
1935 goto out_return_state;
1936 }
1937 out:
1938 return ERR_PTR(ret);
1939 out_return_state:
1940 refcount_inc(&state->count);
1941 return state;
1942 }
1943
1944 static void
nfs4_opendata_check_deleg(struct nfs4_opendata * data,struct nfs4_state * state)1945 nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state)
1946 {
1947 struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client;
1948 struct nfs_delegation *delegation;
1949 int delegation_flags = 0;
1950
1951 rcu_read_lock();
1952 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1953 if (delegation)
1954 delegation_flags = delegation->flags;
1955 rcu_read_unlock();
1956 switch (data->o_arg.claim) {
1957 default:
1958 break;
1959 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1960 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1961 pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
1962 "returning a delegation for "
1963 "OPEN(CLAIM_DELEGATE_CUR)\n",
1964 clp->cl_hostname);
1965 return;
1966 }
1967 if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
1968 nfs_inode_set_delegation(state->inode,
1969 data->owner->so_cred,
1970 data->o_res.delegation_type,
1971 &data->o_res.delegation,
1972 data->o_res.pagemod_limit);
1973 else
1974 nfs_inode_reclaim_delegation(state->inode,
1975 data->owner->so_cred,
1976 data->o_res.delegation_type,
1977 &data->o_res.delegation,
1978 data->o_res.pagemod_limit);
1979
1980 if (data->o_res.do_recall)
1981 nfs_async_inode_return_delegation(state->inode,
1982 &data->o_res.delegation);
1983 }
1984
1985 /*
1986 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes
1987 * and update the nfs4_state.
1988 */
1989 static struct nfs4_state *
_nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata * data)1990 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
1991 {
1992 struct inode *inode = data->state->inode;
1993 struct nfs4_state *state = data->state;
1994 int ret;
1995
1996 if (!data->rpc_done) {
1997 if (data->rpc_status)
1998 return ERR_PTR(data->rpc_status);
1999 return nfs4_try_open_cached(data);
2000 }
2001
2002 ret = nfs_refresh_inode(inode, &data->f_attr);
2003 if (ret)
2004 return ERR_PTR(ret);
2005
2006 if (data->o_res.delegation_type != 0)
2007 nfs4_opendata_check_deleg(data, state);
2008
2009 if (!update_open_stateid(state, &data->o_res.stateid,
2010 NULL, data->o_arg.fmode))
2011 return ERR_PTR(-EAGAIN);
2012 refcount_inc(&state->count);
2013
2014 return state;
2015 }
2016
2017 static struct inode *
nfs4_opendata_get_inode(struct nfs4_opendata * data)2018 nfs4_opendata_get_inode(struct nfs4_opendata *data)
2019 {
2020 struct inode *inode;
2021
2022 switch (data->o_arg.claim) {
2023 case NFS4_OPEN_CLAIM_NULL:
2024 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
2025 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
2026 if (!(data->f_attr.valid & NFS_ATTR_FATTR))
2027 return ERR_PTR(-EAGAIN);
2028 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh,
2029 &data->f_attr, data->f_label);
2030 break;
2031 default:
2032 inode = d_inode(data->dentry);
2033 ihold(inode);
2034 nfs_refresh_inode(inode, &data->f_attr);
2035 }
2036 return inode;
2037 }
2038
2039 static struct nfs4_state *
nfs4_opendata_find_nfs4_state(struct nfs4_opendata * data)2040 nfs4_opendata_find_nfs4_state(struct nfs4_opendata *data)
2041 {
2042 struct nfs4_state *state;
2043 struct inode *inode;
2044
2045 inode = nfs4_opendata_get_inode(data);
2046 if (IS_ERR(inode))
2047 return ERR_CAST(inode);
2048 if (data->state != NULL && data->state->inode == inode) {
2049 state = data->state;
2050 refcount_inc(&state->count);
2051 } else
2052 state = nfs4_get_open_state(inode, data->owner);
2053 iput(inode);
2054 if (state == NULL)
2055 state = ERR_PTR(-ENOMEM);
2056 return state;
2057 }
2058
2059 static struct nfs4_state *
_nfs4_opendata_to_nfs4_state(struct nfs4_opendata * data)2060 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
2061 {
2062 struct nfs4_state *state;
2063
2064 if (!data->rpc_done) {
2065 state = nfs4_try_open_cached(data);
2066 trace_nfs4_cached_open(data->state);
2067 goto out;
2068 }
2069
2070 state = nfs4_opendata_find_nfs4_state(data);
2071 if (IS_ERR(state))
2072 goto out;
2073
2074 if (data->o_res.delegation_type != 0)
2075 nfs4_opendata_check_deleg(data, state);
2076 if (!update_open_stateid(state, &data->o_res.stateid,
2077 NULL, data->o_arg.fmode)) {
2078 nfs4_put_open_state(state);
2079 state = ERR_PTR(-EAGAIN);
2080 }
2081 out:
2082 nfs_release_seqid(data->o_arg.seqid);
2083 return state;
2084 }
2085
2086 static struct nfs4_state *
nfs4_opendata_to_nfs4_state(struct nfs4_opendata * data)2087 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
2088 {
2089 struct nfs4_state *ret;
2090
2091 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
2092 ret =_nfs4_opendata_reclaim_to_nfs4_state(data);
2093 else
2094 ret = _nfs4_opendata_to_nfs4_state(data);
2095 nfs4_sequence_free_slot(&data->o_res.seq_res);
2096 return ret;
2097 }
2098
2099 static struct nfs_open_context *
nfs4_state_find_open_context_mode(struct nfs4_state * state,fmode_t mode)2100 nfs4_state_find_open_context_mode(struct nfs4_state *state, fmode_t mode)
2101 {
2102 struct nfs_inode *nfsi = NFS_I(state->inode);
2103 struct nfs_open_context *ctx;
2104
2105 rcu_read_lock();
2106 list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
2107 if (ctx->state != state)
2108 continue;
2109 if ((ctx->mode & mode) != mode)
2110 continue;
2111 if (!get_nfs_open_context(ctx))
2112 continue;
2113 rcu_read_unlock();
2114 return ctx;
2115 }
2116 rcu_read_unlock();
2117 return ERR_PTR(-ENOENT);
2118 }
2119
2120 static struct nfs_open_context *
nfs4_state_find_open_context(struct nfs4_state * state)2121 nfs4_state_find_open_context(struct nfs4_state *state)
2122 {
2123 struct nfs_open_context *ctx;
2124
2125 ctx = nfs4_state_find_open_context_mode(state, FMODE_READ|FMODE_WRITE);
2126 if (!IS_ERR(ctx))
2127 return ctx;
2128 ctx = nfs4_state_find_open_context_mode(state, FMODE_WRITE);
2129 if (!IS_ERR(ctx))
2130 return ctx;
2131 return nfs4_state_find_open_context_mode(state, FMODE_READ);
2132 }
2133
nfs4_open_recoverdata_alloc(struct nfs_open_context * ctx,struct nfs4_state * state,enum open_claim_type4 claim)2134 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx,
2135 struct nfs4_state *state, enum open_claim_type4 claim)
2136 {
2137 struct nfs4_opendata *opendata;
2138
2139 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0,
2140 NULL, claim, GFP_NOFS);
2141 if (opendata == NULL)
2142 return ERR_PTR(-ENOMEM);
2143 opendata->state = state;
2144 refcount_inc(&state->count);
2145 return opendata;
2146 }
2147
nfs4_open_recover_helper(struct nfs4_opendata * opendata,fmode_t fmode)2148 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata,
2149 fmode_t fmode)
2150 {
2151 struct nfs4_state *newstate;
2152 struct nfs_server *server = NFS_SB(opendata->dentry->d_sb);
2153 int openflags = opendata->o_arg.open_flags;
2154 int ret;
2155
2156 if (!nfs4_mode_match_open_stateid(opendata->state, fmode))
2157 return 0;
2158 opendata->o_arg.fmode = fmode;
2159 opendata->o_arg.share_access =
2160 nfs4_map_atomic_open_share(server, fmode, openflags);
2161 memset(&opendata->o_res, 0, sizeof(opendata->o_res));
2162 memset(&opendata->c_res, 0, sizeof(opendata->c_res));
2163 nfs4_init_opendata_res(opendata);
2164 ret = _nfs4_recover_proc_open(opendata);
2165 if (ret != 0)
2166 return ret;
2167 newstate = nfs4_opendata_to_nfs4_state(opendata);
2168 if (IS_ERR(newstate))
2169 return PTR_ERR(newstate);
2170 if (newstate != opendata->state)
2171 ret = -ESTALE;
2172 nfs4_close_state(newstate, fmode);
2173 return ret;
2174 }
2175
nfs4_open_recover(struct nfs4_opendata * opendata,struct nfs4_state * state)2176 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
2177 {
2178 int ret;
2179
2180 /* memory barrier prior to reading state->n_* */
2181 smp_rmb();
2182 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
2183 if (ret != 0)
2184 return ret;
2185 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE);
2186 if (ret != 0)
2187 return ret;
2188 ret = nfs4_open_recover_helper(opendata, FMODE_READ);
2189 if (ret != 0)
2190 return ret;
2191 /*
2192 * We may have performed cached opens for all three recoveries.
2193 * Check if we need to update the current stateid.
2194 */
2195 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
2196 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
2197 write_seqlock(&state->seqlock);
2198 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
2199 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
2200 write_sequnlock(&state->seqlock);
2201 }
2202 return 0;
2203 }
2204
2205 /*
2206 * OPEN_RECLAIM:
2207 * reclaim state on the server after a reboot.
2208 */
_nfs4_do_open_reclaim(struct nfs_open_context * ctx,struct nfs4_state * state)2209 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
2210 {
2211 struct nfs_delegation *delegation;
2212 struct nfs4_opendata *opendata;
2213 fmode_t delegation_type = 0;
2214 int status;
2215
2216 opendata = nfs4_open_recoverdata_alloc(ctx, state,
2217 NFS4_OPEN_CLAIM_PREVIOUS);
2218 if (IS_ERR(opendata))
2219 return PTR_ERR(opendata);
2220 rcu_read_lock();
2221 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
2222 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
2223 delegation_type = delegation->type;
2224 rcu_read_unlock();
2225 opendata->o_arg.u.delegation_type = delegation_type;
2226 status = nfs4_open_recover(opendata, state);
2227 nfs4_opendata_put(opendata);
2228 return status;
2229 }
2230
nfs4_do_open_reclaim(struct nfs_open_context * ctx,struct nfs4_state * state)2231 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
2232 {
2233 struct nfs_server *server = NFS_SERVER(state->inode);
2234 struct nfs4_exception exception = { };
2235 int err;
2236 do {
2237 err = _nfs4_do_open_reclaim(ctx, state);
2238 trace_nfs4_open_reclaim(ctx, 0, err);
2239 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
2240 continue;
2241 if (err != -NFS4ERR_DELAY)
2242 break;
2243 nfs4_handle_exception(server, err, &exception);
2244 } while (exception.retry);
2245 return err;
2246 }
2247
nfs4_open_reclaim(struct nfs4_state_owner * sp,struct nfs4_state * state)2248 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
2249 {
2250 struct nfs_open_context *ctx;
2251 int ret;
2252
2253 ctx = nfs4_state_find_open_context(state);
2254 if (IS_ERR(ctx))
2255 return -EAGAIN;
2256 clear_bit(NFS_DELEGATED_STATE, &state->flags);
2257 nfs_state_clear_open_state_flags(state);
2258 ret = nfs4_do_open_reclaim(ctx, state);
2259 put_nfs_open_context(ctx);
2260 return ret;
2261 }
2262
nfs4_handle_delegation_recall_error(struct nfs_server * server,struct nfs4_state * state,const nfs4_stateid * stateid,struct file_lock * fl,int err)2263 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, struct file_lock *fl, int err)
2264 {
2265 switch (err) {
2266 default:
2267 printk(KERN_ERR "NFS: %s: unhandled error "
2268 "%d.\n", __func__, err);
2269 fallthrough;
2270 case 0:
2271 case -ENOENT:
2272 case -EAGAIN:
2273 case -ESTALE:
2274 case -ETIMEDOUT:
2275 break;
2276 case -NFS4ERR_BADSESSION:
2277 case -NFS4ERR_BADSLOT:
2278 case -NFS4ERR_BAD_HIGH_SLOT:
2279 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
2280 case -NFS4ERR_DEADSESSION:
2281 return -EAGAIN;
2282 case -NFS4ERR_STALE_CLIENTID:
2283 case -NFS4ERR_STALE_STATEID:
2284 /* Don't recall a delegation if it was lost */
2285 nfs4_schedule_lease_recovery(server->nfs_client);
2286 return -EAGAIN;
2287 case -NFS4ERR_MOVED:
2288 nfs4_schedule_migration_recovery(server);
2289 return -EAGAIN;
2290 case -NFS4ERR_LEASE_MOVED:
2291 nfs4_schedule_lease_moved_recovery(server->nfs_client);
2292 return -EAGAIN;
2293 case -NFS4ERR_DELEG_REVOKED:
2294 case -NFS4ERR_ADMIN_REVOKED:
2295 case -NFS4ERR_EXPIRED:
2296 case -NFS4ERR_BAD_STATEID:
2297 case -NFS4ERR_OPENMODE:
2298 nfs_inode_find_state_and_recover(state->inode,
2299 stateid);
2300 nfs4_schedule_stateid_recovery(server, state);
2301 return -EAGAIN;
2302 case -NFS4ERR_DELAY:
2303 case -NFS4ERR_GRACE:
2304 ssleep(1);
2305 return -EAGAIN;
2306 case -ENOMEM:
2307 case -NFS4ERR_DENIED:
2308 if (fl) {
2309 struct nfs4_lock_state *lsp = fl->fl_u.nfs4_fl.owner;
2310 if (lsp)
2311 set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
2312 }
2313 return 0;
2314 }
2315 return err;
2316 }
2317
nfs4_open_delegation_recall(struct nfs_open_context * ctx,struct nfs4_state * state,const nfs4_stateid * stateid)2318 int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
2319 struct nfs4_state *state, const nfs4_stateid *stateid)
2320 {
2321 struct nfs_server *server = NFS_SERVER(state->inode);
2322 struct nfs4_opendata *opendata;
2323 int err = 0;
2324
2325 opendata = nfs4_open_recoverdata_alloc(ctx, state,
2326 NFS4_OPEN_CLAIM_DELEG_CUR_FH);
2327 if (IS_ERR(opendata))
2328 return PTR_ERR(opendata);
2329 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
2330 if (!test_bit(NFS_O_RDWR_STATE, &state->flags)) {
2331 err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
2332 if (err)
2333 goto out;
2334 }
2335 if (!test_bit(NFS_O_WRONLY_STATE, &state->flags)) {
2336 err = nfs4_open_recover_helper(opendata, FMODE_WRITE);
2337 if (err)
2338 goto out;
2339 }
2340 if (!test_bit(NFS_O_RDONLY_STATE, &state->flags)) {
2341 err = nfs4_open_recover_helper(opendata, FMODE_READ);
2342 if (err)
2343 goto out;
2344 }
2345 nfs_state_clear_delegation(state);
2346 out:
2347 nfs4_opendata_put(opendata);
2348 return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err);
2349 }
2350
nfs4_open_confirm_prepare(struct rpc_task * task,void * calldata)2351 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
2352 {
2353 struct nfs4_opendata *data = calldata;
2354
2355 nfs4_setup_sequence(data->o_arg.server->nfs_client,
2356 &data->c_arg.seq_args, &data->c_res.seq_res, task);
2357 }
2358
nfs4_open_confirm_done(struct rpc_task * task,void * calldata)2359 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
2360 {
2361 struct nfs4_opendata *data = calldata;
2362
2363 nfs40_sequence_done(task, &data->c_res.seq_res);
2364
2365 data->rpc_status = task->tk_status;
2366 if (data->rpc_status == 0) {
2367 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
2368 nfs_confirm_seqid(&data->owner->so_seqid, 0);
2369 renew_lease(data->o_res.server, data->timestamp);
2370 data->rpc_done = true;
2371 }
2372 }
2373
nfs4_open_confirm_release(void * calldata)2374 static void nfs4_open_confirm_release(void *calldata)
2375 {
2376 struct nfs4_opendata *data = calldata;
2377 struct nfs4_state *state = NULL;
2378
2379 /* If this request hasn't been cancelled, do nothing */
2380 if (!data->cancelled)
2381 goto out_free;
2382 /* In case of error, no cleanup! */
2383 if (!data->rpc_done)
2384 goto out_free;
2385 state = nfs4_opendata_to_nfs4_state(data);
2386 if (!IS_ERR(state))
2387 nfs4_close_state(state, data->o_arg.fmode);
2388 out_free:
2389 nfs4_opendata_put(data);
2390 }
2391
2392 static const struct rpc_call_ops nfs4_open_confirm_ops = {
2393 .rpc_call_prepare = nfs4_open_confirm_prepare,
2394 .rpc_call_done = nfs4_open_confirm_done,
2395 .rpc_release = nfs4_open_confirm_release,
2396 };
2397
2398 /*
2399 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
2400 */
_nfs4_proc_open_confirm(struct nfs4_opendata * data)2401 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
2402 {
2403 struct nfs_server *server = NFS_SERVER(d_inode(data->dir));
2404 struct rpc_task *task;
2405 struct rpc_message msg = {
2406 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
2407 .rpc_argp = &data->c_arg,
2408 .rpc_resp = &data->c_res,
2409 .rpc_cred = data->owner->so_cred,
2410 };
2411 struct rpc_task_setup task_setup_data = {
2412 .rpc_client = server->client,
2413 .rpc_message = &msg,
2414 .callback_ops = &nfs4_open_confirm_ops,
2415 .callback_data = data,
2416 .workqueue = nfsiod_workqueue,
2417 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
2418 };
2419 int status;
2420
2421 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1,
2422 data->is_recover);
2423 kref_get(&data->kref);
2424 data->rpc_done = false;
2425 data->rpc_status = 0;
2426 data->timestamp = jiffies;
2427 task = rpc_run_task(&task_setup_data);
2428 if (IS_ERR(task))
2429 return PTR_ERR(task);
2430 status = rpc_wait_for_completion_task(task);
2431 if (status != 0) {
2432 data->cancelled = true;
2433 smp_wmb();
2434 } else
2435 status = data->rpc_status;
2436 rpc_put_task(task);
2437 return status;
2438 }
2439
nfs4_open_prepare(struct rpc_task * task,void * calldata)2440 static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
2441 {
2442 struct nfs4_opendata *data = calldata;
2443 struct nfs4_state_owner *sp = data->owner;
2444 struct nfs_client *clp = sp->so_server->nfs_client;
2445 enum open_claim_type4 claim = data->o_arg.claim;
2446
2447 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
2448 goto out_wait;
2449 /*
2450 * Check if we still need to send an OPEN call, or if we can use
2451 * a delegation instead.
2452 */
2453 if (data->state != NULL) {
2454 struct nfs_delegation *delegation;
2455
2456 if (can_open_cached(data->state, data->o_arg.fmode,
2457 data->o_arg.open_flags, claim))
2458 goto out_no_action;
2459 rcu_read_lock();
2460 delegation = nfs4_get_valid_delegation(data->state->inode);
2461 if (can_open_delegated(delegation, data->o_arg.fmode, claim))
2462 goto unlock_no_action;
2463 rcu_read_unlock();
2464 }
2465 /* Update client id. */
2466 data->o_arg.clientid = clp->cl_clientid;
2467 switch (claim) {
2468 default:
2469 break;
2470 case NFS4_OPEN_CLAIM_PREVIOUS:
2471 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
2472 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
2473 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0];
2474 fallthrough;
2475 case NFS4_OPEN_CLAIM_FH:
2476 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
2477 }
2478 data->timestamp = jiffies;
2479 if (nfs4_setup_sequence(data->o_arg.server->nfs_client,
2480 &data->o_arg.seq_args,
2481 &data->o_res.seq_res,
2482 task) != 0)
2483 nfs_release_seqid(data->o_arg.seqid);
2484
2485 /* Set the create mode (note dependency on the session type) */
2486 data->o_arg.createmode = NFS4_CREATE_UNCHECKED;
2487 if (data->o_arg.open_flags & O_EXCL) {
2488 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE;
2489 if (nfs4_has_persistent_session(clp))
2490 data->o_arg.createmode = NFS4_CREATE_GUARDED;
2491 else if (clp->cl_mvops->minor_version > 0)
2492 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1;
2493 }
2494 return;
2495 unlock_no_action:
2496 trace_nfs4_cached_open(data->state);
2497 rcu_read_unlock();
2498 out_no_action:
2499 task->tk_action = NULL;
2500 out_wait:
2501 nfs4_sequence_done(task, &data->o_res.seq_res);
2502 }
2503
nfs4_open_done(struct rpc_task * task,void * calldata)2504 static void nfs4_open_done(struct rpc_task *task, void *calldata)
2505 {
2506 struct nfs4_opendata *data = calldata;
2507
2508 data->rpc_status = task->tk_status;
2509
2510 if (!nfs4_sequence_process(task, &data->o_res.seq_res))
2511 return;
2512
2513 if (task->tk_status == 0) {
2514 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) {
2515 switch (data->o_res.f_attr->mode & S_IFMT) {
2516 case S_IFREG:
2517 break;
2518 case S_IFLNK:
2519 data->rpc_status = -ELOOP;
2520 break;
2521 case S_IFDIR:
2522 data->rpc_status = -EISDIR;
2523 break;
2524 default:
2525 data->rpc_status = -ENOTDIR;
2526 }
2527 }
2528 renew_lease(data->o_res.server, data->timestamp);
2529 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
2530 nfs_confirm_seqid(&data->owner->so_seqid, 0);
2531 }
2532 data->rpc_done = true;
2533 }
2534
nfs4_open_release(void * calldata)2535 static void nfs4_open_release(void *calldata)
2536 {
2537 struct nfs4_opendata *data = calldata;
2538 struct nfs4_state *state = NULL;
2539
2540 /* If this request hasn't been cancelled, do nothing */
2541 if (!data->cancelled)
2542 goto out_free;
2543 /* In case of error, no cleanup! */
2544 if (data->rpc_status != 0 || !data->rpc_done)
2545 goto out_free;
2546 /* In case we need an open_confirm, no cleanup! */
2547 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
2548 goto out_free;
2549 state = nfs4_opendata_to_nfs4_state(data);
2550 if (!IS_ERR(state))
2551 nfs4_close_state(state, data->o_arg.fmode);
2552 out_free:
2553 nfs4_opendata_put(data);
2554 }
2555
2556 static const struct rpc_call_ops nfs4_open_ops = {
2557 .rpc_call_prepare = nfs4_open_prepare,
2558 .rpc_call_done = nfs4_open_done,
2559 .rpc_release = nfs4_open_release,
2560 };
2561
nfs4_run_open_task(struct nfs4_opendata * data,struct nfs_open_context * ctx)2562 static int nfs4_run_open_task(struct nfs4_opendata *data,
2563 struct nfs_open_context *ctx)
2564 {
2565 struct inode *dir = d_inode(data->dir);
2566 struct nfs_server *server = NFS_SERVER(dir);
2567 struct nfs_openargs *o_arg = &data->o_arg;
2568 struct nfs_openres *o_res = &data->o_res;
2569 struct rpc_task *task;
2570 struct rpc_message msg = {
2571 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
2572 .rpc_argp = o_arg,
2573 .rpc_resp = o_res,
2574 .rpc_cred = data->owner->so_cred,
2575 };
2576 struct rpc_task_setup task_setup_data = {
2577 .rpc_client = server->client,
2578 .rpc_message = &msg,
2579 .callback_ops = &nfs4_open_ops,
2580 .callback_data = data,
2581 .workqueue = nfsiod_workqueue,
2582 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
2583 };
2584 int status;
2585
2586 if (nfs_server_capable(dir, NFS_CAP_MOVEABLE))
2587 task_setup_data.flags |= RPC_TASK_MOVEABLE;
2588
2589 kref_get(&data->kref);
2590 data->rpc_done = false;
2591 data->rpc_status = 0;
2592 data->cancelled = false;
2593 data->is_recover = false;
2594 if (!ctx) {
2595 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 1);
2596 data->is_recover = true;
2597 task_setup_data.flags |= RPC_TASK_TIMEOUT;
2598 } else {
2599 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 0);
2600 pnfs_lgopen_prepare(data, ctx);
2601 }
2602 task = rpc_run_task(&task_setup_data);
2603 if (IS_ERR(task))
2604 return PTR_ERR(task);
2605 status = rpc_wait_for_completion_task(task);
2606 if (status != 0) {
2607 data->cancelled = true;
2608 smp_wmb();
2609 } else
2610 status = data->rpc_status;
2611 rpc_put_task(task);
2612
2613 return status;
2614 }
2615
_nfs4_recover_proc_open(struct nfs4_opendata * data)2616 static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
2617 {
2618 struct inode *dir = d_inode(data->dir);
2619 struct nfs_openres *o_res = &data->o_res;
2620 int status;
2621
2622 status = nfs4_run_open_task(data, NULL);
2623 if (status != 0 || !data->rpc_done)
2624 return status;
2625
2626 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
2627
2628 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM)
2629 status = _nfs4_proc_open_confirm(data);
2630
2631 return status;
2632 }
2633
2634 /*
2635 * Additional permission checks in order to distinguish between an
2636 * open for read, and an open for execute. This works around the
2637 * fact that NFSv4 OPEN treats read and execute permissions as being
2638 * the same.
2639 * Note that in the non-execute case, we want to turn off permission
2640 * checking if we just created a new file (POSIX open() semantics).
2641 */
nfs4_opendata_access(const struct cred * cred,struct nfs4_opendata * opendata,struct nfs4_state * state,fmode_t fmode,int openflags)2642 static int nfs4_opendata_access(const struct cred *cred,
2643 struct nfs4_opendata *opendata,
2644 struct nfs4_state *state, fmode_t fmode,
2645 int openflags)
2646 {
2647 struct nfs_access_entry cache;
2648 u32 mask, flags;
2649
2650 /* access call failed or for some reason the server doesn't
2651 * support any access modes -- defer access call until later */
2652 if (opendata->o_res.access_supported == 0)
2653 return 0;
2654
2655 mask = 0;
2656 /*
2657 * Use openflags to check for exec, because fmode won't
2658 * always have FMODE_EXEC set when file open for exec.
2659 */
2660 if (openflags & __FMODE_EXEC) {
2661 /* ONLY check for exec rights */
2662 if (S_ISDIR(state->inode->i_mode))
2663 mask = NFS4_ACCESS_LOOKUP;
2664 else
2665 mask = NFS4_ACCESS_EXECUTE;
2666 } else if ((fmode & FMODE_READ) && !opendata->file_created)
2667 mask = NFS4_ACCESS_READ;
2668
2669 cache.cred = cred;
2670 nfs_access_set_mask(&cache, opendata->o_res.access_result);
2671 nfs_access_add_cache(state->inode, &cache);
2672
2673 flags = NFS4_ACCESS_READ | NFS4_ACCESS_EXECUTE | NFS4_ACCESS_LOOKUP;
2674 if ((mask & ~cache.mask & flags) == 0)
2675 return 0;
2676
2677 return -EACCES;
2678 }
2679
2680 /*
2681 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
2682 */
_nfs4_proc_open(struct nfs4_opendata * data,struct nfs_open_context * ctx)2683 static int _nfs4_proc_open(struct nfs4_opendata *data,
2684 struct nfs_open_context *ctx)
2685 {
2686 struct inode *dir = d_inode(data->dir);
2687 struct nfs_server *server = NFS_SERVER(dir);
2688 struct nfs_openargs *o_arg = &data->o_arg;
2689 struct nfs_openres *o_res = &data->o_res;
2690 int status;
2691
2692 status = nfs4_run_open_task(data, ctx);
2693 if (!data->rpc_done)
2694 return status;
2695 if (status != 0) {
2696 if (status == -NFS4ERR_BADNAME &&
2697 !(o_arg->open_flags & O_CREAT))
2698 return -ENOENT;
2699 return status;
2700 }
2701
2702 nfs_fattr_map_and_free_names(server, &data->f_attr);
2703
2704 if (o_arg->open_flags & O_CREAT) {
2705 if (o_arg->open_flags & O_EXCL)
2706 data->file_created = true;
2707 else if (o_res->cinfo.before != o_res->cinfo.after)
2708 data->file_created = true;
2709 if (data->file_created ||
2710 inode_peek_iversion_raw(dir) != o_res->cinfo.after)
2711 nfs4_update_changeattr(dir, &o_res->cinfo,
2712 o_res->f_attr->time_start,
2713 NFS_INO_INVALID_DATA);
2714 }
2715 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
2716 server->caps &= ~NFS_CAP_POSIX_LOCK;
2717 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
2718 status = _nfs4_proc_open_confirm(data);
2719 if (status != 0)
2720 return status;
2721 }
2722 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) {
2723 nfs4_sequence_free_slot(&o_res->seq_res);
2724 nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr,
2725 o_res->f_label, NULL);
2726 }
2727 return 0;
2728 }
2729
2730 /*
2731 * OPEN_EXPIRED:
2732 * reclaim state on the server after a network partition.
2733 * Assumes caller holds the appropriate lock
2734 */
_nfs4_open_expired(struct nfs_open_context * ctx,struct nfs4_state * state)2735 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2736 {
2737 struct nfs4_opendata *opendata;
2738 int ret;
2739
2740 opendata = nfs4_open_recoverdata_alloc(ctx, state, NFS4_OPEN_CLAIM_FH);
2741 if (IS_ERR(opendata))
2742 return PTR_ERR(opendata);
2743 /*
2744 * We're not recovering a delegation, so ask for no delegation.
2745 * Otherwise the recovery thread could deadlock with an outstanding
2746 * delegation return.
2747 */
2748 opendata->o_arg.open_flags = O_DIRECT;
2749 ret = nfs4_open_recover(opendata, state);
2750 if (ret == -ESTALE)
2751 d_drop(ctx->dentry);
2752 nfs4_opendata_put(opendata);
2753 return ret;
2754 }
2755
nfs4_do_open_expired(struct nfs_open_context * ctx,struct nfs4_state * state)2756 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2757 {
2758 struct nfs_server *server = NFS_SERVER(state->inode);
2759 struct nfs4_exception exception = { };
2760 int err;
2761
2762 do {
2763 err = _nfs4_open_expired(ctx, state);
2764 trace_nfs4_open_expired(ctx, 0, err);
2765 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
2766 continue;
2767 switch (err) {
2768 default:
2769 goto out;
2770 case -NFS4ERR_GRACE:
2771 case -NFS4ERR_DELAY:
2772 nfs4_handle_exception(server, err, &exception);
2773 err = 0;
2774 }
2775 } while (exception.retry);
2776 out:
2777 return err;
2778 }
2779
nfs4_open_expired(struct nfs4_state_owner * sp,struct nfs4_state * state)2780 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2781 {
2782 struct nfs_open_context *ctx;
2783 int ret;
2784
2785 ctx = nfs4_state_find_open_context(state);
2786 if (IS_ERR(ctx))
2787 return -EAGAIN;
2788 ret = nfs4_do_open_expired(ctx, state);
2789 put_nfs_open_context(ctx);
2790 return ret;
2791 }
2792
nfs_finish_clear_delegation_stateid(struct nfs4_state * state,const nfs4_stateid * stateid)2793 static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state,
2794 const nfs4_stateid *stateid)
2795 {
2796 nfs_remove_bad_delegation(state->inode, stateid);
2797 nfs_state_clear_delegation(state);
2798 }
2799
nfs40_clear_delegation_stateid(struct nfs4_state * state)2800 static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
2801 {
2802 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL)
2803 nfs_finish_clear_delegation_stateid(state, NULL);
2804 }
2805
nfs40_open_expired(struct nfs4_state_owner * sp,struct nfs4_state * state)2806 static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2807 {
2808 /* NFSv4.0 doesn't allow for delegation recovery on open expire */
2809 nfs40_clear_delegation_stateid(state);
2810 nfs_state_clear_open_state_flags(state);
2811 return nfs4_open_expired(sp, state);
2812 }
2813
nfs40_test_and_free_expired_stateid(struct nfs_server * server,nfs4_stateid * stateid,const struct cred * cred)2814 static int nfs40_test_and_free_expired_stateid(struct nfs_server *server,
2815 nfs4_stateid *stateid,
2816 const struct cred *cred)
2817 {
2818 return -NFS4ERR_BAD_STATEID;
2819 }
2820
2821 #if defined(CONFIG_NFS_V4_1)
nfs41_test_and_free_expired_stateid(struct nfs_server * server,nfs4_stateid * stateid,const struct cred * cred)2822 static int nfs41_test_and_free_expired_stateid(struct nfs_server *server,
2823 nfs4_stateid *stateid,
2824 const struct cred *cred)
2825 {
2826 int status;
2827
2828 switch (stateid->type) {
2829 default:
2830 break;
2831 case NFS4_INVALID_STATEID_TYPE:
2832 case NFS4_SPECIAL_STATEID_TYPE:
2833 return -NFS4ERR_BAD_STATEID;
2834 case NFS4_REVOKED_STATEID_TYPE:
2835 goto out_free;
2836 }
2837
2838 status = nfs41_test_stateid(server, stateid, cred);
2839 switch (status) {
2840 case -NFS4ERR_EXPIRED:
2841 case -NFS4ERR_ADMIN_REVOKED:
2842 case -NFS4ERR_DELEG_REVOKED:
2843 break;
2844 default:
2845 return status;
2846 }
2847 out_free:
2848 /* Ack the revoked state to the server */
2849 nfs41_free_stateid(server, stateid, cred, true);
2850 return -NFS4ERR_EXPIRED;
2851 }
2852
nfs41_check_delegation_stateid(struct nfs4_state * state)2853 static int nfs41_check_delegation_stateid(struct nfs4_state *state)
2854 {
2855 struct nfs_server *server = NFS_SERVER(state->inode);
2856 nfs4_stateid stateid;
2857 struct nfs_delegation *delegation;
2858 const struct cred *cred = NULL;
2859 int status, ret = NFS_OK;
2860
2861 /* Get the delegation credential for use by test/free_stateid */
2862 rcu_read_lock();
2863 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
2864 if (delegation == NULL) {
2865 rcu_read_unlock();
2866 nfs_state_clear_delegation(state);
2867 return NFS_OK;
2868 }
2869
2870 spin_lock(&delegation->lock);
2871 nfs4_stateid_copy(&stateid, &delegation->stateid);
2872
2873 if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
2874 &delegation->flags)) {
2875 spin_unlock(&delegation->lock);
2876 rcu_read_unlock();
2877 return NFS_OK;
2878 }
2879
2880 if (delegation->cred)
2881 cred = get_cred(delegation->cred);
2882 spin_unlock(&delegation->lock);
2883 rcu_read_unlock();
2884 status = nfs41_test_and_free_expired_stateid(server, &stateid, cred);
2885 trace_nfs4_test_delegation_stateid(state, NULL, status);
2886 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID)
2887 nfs_finish_clear_delegation_stateid(state, &stateid);
2888 else
2889 ret = status;
2890
2891 put_cred(cred);
2892 return ret;
2893 }
2894
nfs41_delegation_recover_stateid(struct nfs4_state * state)2895 static void nfs41_delegation_recover_stateid(struct nfs4_state *state)
2896 {
2897 nfs4_stateid tmp;
2898
2899 if (test_bit(NFS_DELEGATED_STATE, &state->flags) &&
2900 nfs4_copy_delegation_stateid(state->inode, state->state,
2901 &tmp, NULL) &&
2902 nfs4_stateid_match_other(&state->stateid, &tmp))
2903 nfs_state_set_delegation(state, &tmp, state->state);
2904 else
2905 nfs_state_clear_delegation(state);
2906 }
2907
2908 /**
2909 * nfs41_check_expired_locks - possibly free a lock stateid
2910 *
2911 * @state: NFSv4 state for an inode
2912 *
2913 * Returns NFS_OK if recovery for this stateid is now finished.
2914 * Otherwise a negative NFS4ERR value is returned.
2915 */
nfs41_check_expired_locks(struct nfs4_state * state)2916 static int nfs41_check_expired_locks(struct nfs4_state *state)
2917 {
2918 int status, ret = NFS_OK;
2919 struct nfs4_lock_state *lsp, *prev = NULL;
2920 struct nfs_server *server = NFS_SERVER(state->inode);
2921
2922 if (!test_bit(LK_STATE_IN_USE, &state->flags))
2923 goto out;
2924
2925 spin_lock(&state->state_lock);
2926 list_for_each_entry(lsp, &state->lock_states, ls_locks) {
2927 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
2928 const struct cred *cred = lsp->ls_state->owner->so_cred;
2929
2930 refcount_inc(&lsp->ls_count);
2931 spin_unlock(&state->state_lock);
2932
2933 nfs4_put_lock_state(prev);
2934 prev = lsp;
2935
2936 status = nfs41_test_and_free_expired_stateid(server,
2937 &lsp->ls_stateid,
2938 cred);
2939 trace_nfs4_test_lock_stateid(state, lsp, status);
2940 if (status == -NFS4ERR_EXPIRED ||
2941 status == -NFS4ERR_BAD_STATEID) {
2942 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
2943 lsp->ls_stateid.type = NFS4_INVALID_STATEID_TYPE;
2944 if (!recover_lost_locks)
2945 set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
2946 } else if (status != NFS_OK) {
2947 ret = status;
2948 nfs4_put_lock_state(prev);
2949 goto out;
2950 }
2951 spin_lock(&state->state_lock);
2952 }
2953 }
2954 spin_unlock(&state->state_lock);
2955 nfs4_put_lock_state(prev);
2956 out:
2957 return ret;
2958 }
2959
2960 /**
2961 * nfs41_check_open_stateid - possibly free an open stateid
2962 *
2963 * @state: NFSv4 state for an inode
2964 *
2965 * Returns NFS_OK if recovery for this stateid is now finished.
2966 * Otherwise a negative NFS4ERR value is returned.
2967 */
nfs41_check_open_stateid(struct nfs4_state * state)2968 static int nfs41_check_open_stateid(struct nfs4_state *state)
2969 {
2970 struct nfs_server *server = NFS_SERVER(state->inode);
2971 nfs4_stateid *stateid = &state->open_stateid;
2972 const struct cred *cred = state->owner->so_cred;
2973 int status;
2974
2975 if (test_bit(NFS_OPEN_STATE, &state->flags) == 0)
2976 return -NFS4ERR_BAD_STATEID;
2977 status = nfs41_test_and_free_expired_stateid(server, stateid, cred);
2978 trace_nfs4_test_open_stateid(state, NULL, status);
2979 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) {
2980 nfs_state_clear_open_state_flags(state);
2981 stateid->type = NFS4_INVALID_STATEID_TYPE;
2982 return status;
2983 }
2984 if (nfs_open_stateid_recover_openmode(state))
2985 return -NFS4ERR_OPENMODE;
2986 return NFS_OK;
2987 }
2988
nfs41_open_expired(struct nfs4_state_owner * sp,struct nfs4_state * state)2989 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2990 {
2991 int status;
2992
2993 status = nfs41_check_delegation_stateid(state);
2994 if (status != NFS_OK)
2995 return status;
2996 nfs41_delegation_recover_stateid(state);
2997
2998 status = nfs41_check_expired_locks(state);
2999 if (status != NFS_OK)
3000 return status;
3001 status = nfs41_check_open_stateid(state);
3002 if (status != NFS_OK)
3003 status = nfs4_open_expired(sp, state);
3004 return status;
3005 }
3006 #endif
3007
3008 /*
3009 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
3010 * fields corresponding to attributes that were used to store the verifier.
3011 * Make sure we clobber those fields in the later setattr call
3012 */
nfs4_exclusive_attrset(struct nfs4_opendata * opendata,struct iattr * sattr,struct nfs4_label ** label)3013 static unsigned nfs4_exclusive_attrset(struct nfs4_opendata *opendata,
3014 struct iattr *sattr, struct nfs4_label **label)
3015 {
3016 const __u32 *bitmask = opendata->o_arg.server->exclcreat_bitmask;
3017 __u32 attrset[3];
3018 unsigned ret;
3019 unsigned i;
3020
3021 for (i = 0; i < ARRAY_SIZE(attrset); i++) {
3022 attrset[i] = opendata->o_res.attrset[i];
3023 if (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE4_1)
3024 attrset[i] &= ~bitmask[i];
3025 }
3026
3027 ret = (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE) ?
3028 sattr->ia_valid : 0;
3029
3030 if ((attrset[1] & (FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET))) {
3031 if (sattr->ia_valid & ATTR_ATIME_SET)
3032 ret |= ATTR_ATIME_SET;
3033 else
3034 ret |= ATTR_ATIME;
3035 }
3036
3037 if ((attrset[1] & (FATTR4_WORD1_TIME_MODIFY|FATTR4_WORD1_TIME_MODIFY_SET))) {
3038 if (sattr->ia_valid & ATTR_MTIME_SET)
3039 ret |= ATTR_MTIME_SET;
3040 else
3041 ret |= ATTR_MTIME;
3042 }
3043
3044 if (!(attrset[2] & FATTR4_WORD2_SECURITY_LABEL))
3045 *label = NULL;
3046 return ret;
3047 }
3048
_nfs4_open_and_get_state(struct nfs4_opendata * opendata,int flags,struct nfs_open_context * ctx)3049 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
3050 int flags, struct nfs_open_context *ctx)
3051 {
3052 struct nfs4_state_owner *sp = opendata->owner;
3053 struct nfs_server *server = sp->so_server;
3054 struct dentry *dentry;
3055 struct nfs4_state *state;
3056 fmode_t acc_mode = _nfs4_ctx_to_accessmode(ctx);
3057 struct inode *dir = d_inode(opendata->dir);
3058 unsigned long dir_verifier;
3059 unsigned int seq;
3060 int ret;
3061
3062 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
3063 dir_verifier = nfs_save_change_attribute(dir);
3064
3065 ret = _nfs4_proc_open(opendata, ctx);
3066 if (ret != 0)
3067 goto out;
3068
3069 state = _nfs4_opendata_to_nfs4_state(opendata);
3070 ret = PTR_ERR(state);
3071 if (IS_ERR(state))
3072 goto out;
3073 ctx->state = state;
3074 if (server->caps & NFS_CAP_POSIX_LOCK)
3075 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
3076 if (opendata->o_res.rflags & NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK)
3077 set_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags);
3078
3079 dentry = opendata->dentry;
3080 if (d_really_is_negative(dentry)) {
3081 struct dentry *alias;
3082 d_drop(dentry);
3083 alias = d_exact_alias(dentry, state->inode);
3084 if (!alias)
3085 alias = d_splice_alias(igrab(state->inode), dentry);
3086 /* d_splice_alias() can't fail here - it's a non-directory */
3087 if (alias) {
3088 dput(ctx->dentry);
3089 ctx->dentry = dentry = alias;
3090 }
3091 }
3092
3093 switch(opendata->o_arg.claim) {
3094 default:
3095 break;
3096 case NFS4_OPEN_CLAIM_NULL:
3097 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
3098 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
3099 if (!opendata->rpc_done)
3100 break;
3101 if (opendata->o_res.delegation_type != 0)
3102 dir_verifier = nfs_save_change_attribute(dir);
3103 nfs_set_verifier(dentry, dir_verifier);
3104 }
3105
3106 /* Parse layoutget results before we check for access */
3107 pnfs_parse_lgopen(state->inode, opendata->lgp, ctx);
3108
3109 ret = nfs4_opendata_access(sp->so_cred, opendata, state,
3110 acc_mode, flags);
3111 if (ret != 0)
3112 goto out;
3113
3114 if (d_inode(dentry) == state->inode) {
3115 nfs_inode_attach_open_context(ctx);
3116 if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
3117 nfs4_schedule_stateid_recovery(server, state);
3118 }
3119
3120 out:
3121 if (!opendata->cancelled) {
3122 if (opendata->lgp) {
3123 nfs4_lgopen_release(opendata->lgp);
3124 opendata->lgp = NULL;
3125 }
3126 nfs4_sequence_free_slot(&opendata->o_res.seq_res);
3127 }
3128 return ret;
3129 }
3130
3131 /*
3132 * Returns a referenced nfs4_state
3133 */
_nfs4_do_open(struct inode * dir,struct nfs_open_context * ctx,int flags,const struct nfs4_open_createattrs * c,int * opened)3134 static int _nfs4_do_open(struct inode *dir,
3135 struct nfs_open_context *ctx,
3136 int flags,
3137 const struct nfs4_open_createattrs *c,
3138 int *opened)
3139 {
3140 struct nfs4_state_owner *sp;
3141 struct nfs4_state *state = NULL;
3142 struct nfs_server *server = NFS_SERVER(dir);
3143 struct nfs4_opendata *opendata;
3144 struct dentry *dentry = ctx->dentry;
3145 const struct cred *cred = ctx->cred;
3146 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold;
3147 fmode_t fmode = _nfs4_ctx_to_openmode(ctx);
3148 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL;
3149 struct iattr *sattr = c->sattr;
3150 struct nfs4_label *label = c->label;
3151 struct nfs4_label *olabel = NULL;
3152 int status;
3153
3154 /* Protect against reboot recovery conflicts */
3155 status = -ENOMEM;
3156 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
3157 if (sp == NULL) {
3158 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
3159 goto out_err;
3160 }
3161 status = nfs4_client_recover_expired_lease(server->nfs_client);
3162 if (status != 0)
3163 goto err_put_state_owner;
3164 if (d_really_is_positive(dentry))
3165 nfs4_return_incompatible_delegation(d_inode(dentry), fmode);
3166 status = -ENOMEM;
3167 if (d_really_is_positive(dentry))
3168 claim = NFS4_OPEN_CLAIM_FH;
3169 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags,
3170 c, claim, GFP_KERNEL);
3171 if (opendata == NULL)
3172 goto err_put_state_owner;
3173
3174 if (label) {
3175 olabel = nfs4_label_alloc(server, GFP_KERNEL);
3176 if (IS_ERR(olabel)) {
3177 status = PTR_ERR(olabel);
3178 goto err_opendata_put;
3179 }
3180 }
3181
3182 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
3183 if (!opendata->f_attr.mdsthreshold) {
3184 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
3185 if (!opendata->f_attr.mdsthreshold)
3186 goto err_free_label;
3187 }
3188 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
3189 }
3190 if (d_really_is_positive(dentry))
3191 opendata->state = nfs4_get_open_state(d_inode(dentry), sp);
3192
3193 status = _nfs4_open_and_get_state(opendata, flags, ctx);
3194 if (status != 0)
3195 goto err_free_label;
3196 state = ctx->state;
3197
3198 if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) &&
3199 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
3200 unsigned attrs = nfs4_exclusive_attrset(opendata, sattr, &label);
3201 /*
3202 * send create attributes which was not set by open
3203 * with an extra setattr.
3204 */
3205 if (attrs || label) {
3206 unsigned ia_old = sattr->ia_valid;
3207
3208 sattr->ia_valid = attrs;
3209 nfs_fattr_init(opendata->o_res.f_attr);
3210 status = nfs4_do_setattr(state->inode, cred,
3211 opendata->o_res.f_attr, sattr,
3212 ctx, label, olabel);
3213 if (status == 0) {
3214 nfs_setattr_update_inode(state->inode, sattr,
3215 opendata->o_res.f_attr);
3216 nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel);
3217 }
3218 sattr->ia_valid = ia_old;
3219 }
3220 }
3221 if (opened && opendata->file_created)
3222 *opened = 1;
3223
3224 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) {
3225 *ctx_th = opendata->f_attr.mdsthreshold;
3226 opendata->f_attr.mdsthreshold = NULL;
3227 }
3228
3229 nfs4_label_free(olabel);
3230
3231 nfs4_opendata_put(opendata);
3232 nfs4_put_state_owner(sp);
3233 return 0;
3234 err_free_label:
3235 nfs4_label_free(olabel);
3236 err_opendata_put:
3237 nfs4_opendata_put(opendata);
3238 err_put_state_owner:
3239 nfs4_put_state_owner(sp);
3240 out_err:
3241 return status;
3242 }
3243
3244
nfs4_do_open(struct inode * dir,struct nfs_open_context * ctx,int flags,struct iattr * sattr,struct nfs4_label * label,int * opened)3245 static struct nfs4_state *nfs4_do_open(struct inode *dir,
3246 struct nfs_open_context *ctx,
3247 int flags,
3248 struct iattr *sattr,
3249 struct nfs4_label *label,
3250 int *opened)
3251 {
3252 struct nfs_server *server = NFS_SERVER(dir);
3253 struct nfs4_exception exception = {
3254 .interruptible = true,
3255 };
3256 struct nfs4_state *res;
3257 struct nfs4_open_createattrs c = {
3258 .label = label,
3259 .sattr = sattr,
3260 .verf = {
3261 [0] = (__u32)jiffies,
3262 [1] = (__u32)current->pid,
3263 },
3264 };
3265 int status;
3266
3267 do {
3268 status = _nfs4_do_open(dir, ctx, flags, &c, opened);
3269 res = ctx->state;
3270 trace_nfs4_open_file(ctx, flags, status);
3271 if (status == 0)
3272 break;
3273 /* NOTE: BAD_SEQID means the server and client disagree about the
3274 * book-keeping w.r.t. state-changing operations
3275 * (OPEN/CLOSE/LOCK/LOCKU...)
3276 * It is actually a sign of a bug on the client or on the server.
3277 *
3278 * If we receive a BAD_SEQID error in the particular case of
3279 * doing an OPEN, we assume that nfs_increment_open_seqid() will
3280 * have unhashed the old state_owner for us, and that we can
3281 * therefore safely retry using a new one. We should still warn
3282 * the user though...
3283 */
3284 if (status == -NFS4ERR_BAD_SEQID) {
3285 pr_warn_ratelimited("NFS: v4 server %s "
3286 " returned a bad sequence-id error!\n",
3287 NFS_SERVER(dir)->nfs_client->cl_hostname);
3288 exception.retry = 1;
3289 continue;
3290 }
3291 /*
3292 * BAD_STATEID on OPEN means that the server cancelled our
3293 * state before it received the OPEN_CONFIRM.
3294 * Recover by retrying the request as per the discussion
3295 * on Page 181 of RFC3530.
3296 */
3297 if (status == -NFS4ERR_BAD_STATEID) {
3298 exception.retry = 1;
3299 continue;
3300 }
3301 if (status == -NFS4ERR_EXPIRED) {
3302 nfs4_schedule_lease_recovery(server->nfs_client);
3303 exception.retry = 1;
3304 continue;
3305 }
3306 if (status == -EAGAIN) {
3307 /* We must have found a delegation */
3308 exception.retry = 1;
3309 continue;
3310 }
3311 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception))
3312 continue;
3313 res = ERR_PTR(nfs4_handle_exception(server,
3314 status, &exception));
3315 } while (exception.retry);
3316 return res;
3317 }
3318
_nfs4_do_setattr(struct inode * inode,struct nfs_setattrargs * arg,struct nfs_setattrres * res,const struct cred * cred,struct nfs_open_context * ctx)3319 static int _nfs4_do_setattr(struct inode *inode,
3320 struct nfs_setattrargs *arg,
3321 struct nfs_setattrres *res,
3322 const struct cred *cred,
3323 struct nfs_open_context *ctx)
3324 {
3325 struct nfs_server *server = NFS_SERVER(inode);
3326 struct rpc_message msg = {
3327 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
3328 .rpc_argp = arg,
3329 .rpc_resp = res,
3330 .rpc_cred = cred,
3331 };
3332 const struct cred *delegation_cred = NULL;
3333 unsigned long timestamp = jiffies;
3334 bool truncate;
3335 int status;
3336
3337 nfs_fattr_init(res->fattr);
3338
3339 /* Servers should only apply open mode checks for file size changes */
3340 truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false;
3341 if (!truncate) {
3342 nfs4_inode_make_writeable(inode);
3343 goto zero_stateid;
3344 }
3345
3346 if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) {
3347 /* Use that stateid */
3348 } else if (ctx != NULL && ctx->state) {
3349 struct nfs_lock_context *l_ctx;
3350 if (!nfs4_valid_open_stateid(ctx->state))
3351 return -EBADF;
3352 l_ctx = nfs_get_lock_context(ctx);
3353 if (IS_ERR(l_ctx))
3354 return PTR_ERR(l_ctx);
3355 status = nfs4_select_rw_stateid(ctx->state, FMODE_WRITE, l_ctx,
3356 &arg->stateid, &delegation_cred);
3357 nfs_put_lock_context(l_ctx);
3358 if (status == -EIO)
3359 return -EBADF;
3360 else if (status == -EAGAIN)
3361 goto zero_stateid;
3362 } else {
3363 zero_stateid:
3364 nfs4_stateid_copy(&arg->stateid, &zero_stateid);
3365 }
3366 if (delegation_cred)
3367 msg.rpc_cred = delegation_cred;
3368
3369 status = nfs4_call_sync(server->client, server, &msg, &arg->seq_args, &res->seq_res, 1);
3370
3371 put_cred(delegation_cred);
3372 if (status == 0 && ctx != NULL)
3373 renew_lease(server, timestamp);
3374 trace_nfs4_setattr(inode, &arg->stateid, status);
3375 return status;
3376 }
3377
nfs4_do_setattr(struct inode * inode,const struct cred * cred,struct nfs_fattr * fattr,struct iattr * sattr,struct nfs_open_context * ctx,struct nfs4_label * ilabel,struct nfs4_label * olabel)3378 static int nfs4_do_setattr(struct inode *inode, const struct cred *cred,
3379 struct nfs_fattr *fattr, struct iattr *sattr,
3380 struct nfs_open_context *ctx, struct nfs4_label *ilabel,
3381 struct nfs4_label *olabel)
3382 {
3383 struct nfs_server *server = NFS_SERVER(inode);
3384 __u32 bitmask[NFS4_BITMASK_SZ];
3385 struct nfs4_state *state = ctx ? ctx->state : NULL;
3386 struct nfs_setattrargs arg = {
3387 .fh = NFS_FH(inode),
3388 .iap = sattr,
3389 .server = server,
3390 .bitmask = bitmask,
3391 .label = ilabel,
3392 };
3393 struct nfs_setattrres res = {
3394 .fattr = fattr,
3395 .label = olabel,
3396 .server = server,
3397 };
3398 struct nfs4_exception exception = {
3399 .state = state,
3400 .inode = inode,
3401 .stateid = &arg.stateid,
3402 };
3403 unsigned long adjust_flags = NFS_INO_INVALID_CHANGE;
3404 int err;
3405
3406 if (sattr->ia_valid & (ATTR_MODE | ATTR_KILL_SUID | ATTR_KILL_SGID))
3407 adjust_flags |= NFS_INO_INVALID_MODE;
3408 if (sattr->ia_valid & (ATTR_UID | ATTR_GID))
3409 adjust_flags |= NFS_INO_INVALID_OTHER;
3410
3411 do {
3412 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, olabel),
3413 inode, adjust_flags);
3414
3415 err = _nfs4_do_setattr(inode, &arg, &res, cred, ctx);
3416 switch (err) {
3417 case -NFS4ERR_OPENMODE:
3418 if (!(sattr->ia_valid & ATTR_SIZE)) {
3419 pr_warn_once("NFSv4: server %s is incorrectly "
3420 "applying open mode checks to "
3421 "a SETATTR that is not "
3422 "changing file size.\n",
3423 server->nfs_client->cl_hostname);
3424 }
3425 if (state && !(state->state & FMODE_WRITE)) {
3426 err = -EBADF;
3427 if (sattr->ia_valid & ATTR_OPEN)
3428 err = -EACCES;
3429 goto out;
3430 }
3431 }
3432 err = nfs4_handle_exception(server, err, &exception);
3433 } while (exception.retry);
3434 out:
3435 return err;
3436 }
3437
3438 static bool
nfs4_wait_on_layoutreturn(struct inode * inode,struct rpc_task * task)3439 nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task)
3440 {
3441 if (inode == NULL || !nfs_have_layout(inode))
3442 return false;
3443
3444 return pnfs_wait_on_layoutreturn(inode, task);
3445 }
3446
3447 /*
3448 * Update the seqid of an open stateid
3449 */
nfs4_sync_open_stateid(nfs4_stateid * dst,struct nfs4_state * state)3450 static void nfs4_sync_open_stateid(nfs4_stateid *dst,
3451 struct nfs4_state *state)
3452 {
3453 __be32 seqid_open;
3454 u32 dst_seqid;
3455 int seq;
3456
3457 for (;;) {
3458 if (!nfs4_valid_open_stateid(state))
3459 break;
3460 seq = read_seqbegin(&state->seqlock);
3461 if (!nfs4_state_match_open_stateid_other(state, dst)) {
3462 nfs4_stateid_copy(dst, &state->open_stateid);
3463 if (read_seqretry(&state->seqlock, seq))
3464 continue;
3465 break;
3466 }
3467 seqid_open = state->open_stateid.seqid;
3468 if (read_seqretry(&state->seqlock, seq))
3469 continue;
3470
3471 dst_seqid = be32_to_cpu(dst->seqid);
3472 if ((s32)(dst_seqid - be32_to_cpu(seqid_open)) < 0)
3473 dst->seqid = seqid_open;
3474 break;
3475 }
3476 }
3477
3478 /*
3479 * Update the seqid of an open stateid after receiving
3480 * NFS4ERR_OLD_STATEID
3481 */
nfs4_refresh_open_old_stateid(nfs4_stateid * dst,struct nfs4_state * state)3482 static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst,
3483 struct nfs4_state *state)
3484 {
3485 __be32 seqid_open;
3486 u32 dst_seqid;
3487 bool ret;
3488 int seq, status = -EAGAIN;
3489 DEFINE_WAIT(wait);
3490
3491 for (;;) {
3492 ret = false;
3493 if (!nfs4_valid_open_stateid(state))
3494 break;
3495 seq = read_seqbegin(&state->seqlock);
3496 if (!nfs4_state_match_open_stateid_other(state, dst)) {
3497 if (read_seqretry(&state->seqlock, seq))
3498 continue;
3499 break;
3500 }
3501
3502 write_seqlock(&state->seqlock);
3503 seqid_open = state->open_stateid.seqid;
3504
3505 dst_seqid = be32_to_cpu(dst->seqid);
3506
3507 /* Did another OPEN bump the state's seqid? try again: */
3508 if ((s32)(be32_to_cpu(seqid_open) - dst_seqid) > 0) {
3509 dst->seqid = seqid_open;
3510 write_sequnlock(&state->seqlock);
3511 ret = true;
3512 break;
3513 }
3514
3515 /* server says we're behind but we haven't seen the update yet */
3516 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags);
3517 prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE);
3518 write_sequnlock(&state->seqlock);
3519 trace_nfs4_close_stateid_update_wait(state->inode, dst, 0);
3520
3521 if (fatal_signal_pending(current))
3522 status = -EINTR;
3523 else
3524 if (schedule_timeout(5*HZ) != 0)
3525 status = 0;
3526
3527 finish_wait(&state->waitq, &wait);
3528
3529 if (!status)
3530 continue;
3531 if (status == -EINTR)
3532 break;
3533
3534 /* we slept the whole 5 seconds, we must have lost a seqid */
3535 dst->seqid = cpu_to_be32(dst_seqid + 1);
3536 ret = true;
3537 break;
3538 }
3539
3540 return ret;
3541 }
3542
3543 struct nfs4_closedata {
3544 struct inode *inode;
3545 struct nfs4_state *state;
3546 struct nfs_closeargs arg;
3547 struct nfs_closeres res;
3548 struct {
3549 struct nfs4_layoutreturn_args arg;
3550 struct nfs4_layoutreturn_res res;
3551 struct nfs4_xdr_opaque_data ld_private;
3552 u32 roc_barrier;
3553 bool roc;
3554 } lr;
3555 struct nfs_fattr fattr;
3556 unsigned long timestamp;
3557 };
3558
nfs4_free_closedata(void * data)3559 static void nfs4_free_closedata(void *data)
3560 {
3561 struct nfs4_closedata *calldata = data;
3562 struct nfs4_state_owner *sp = calldata->state->owner;
3563 struct super_block *sb = calldata->state->inode->i_sb;
3564
3565 if (calldata->lr.roc)
3566 pnfs_roc_release(&calldata->lr.arg, &calldata->lr.res,
3567 calldata->res.lr_ret);
3568 nfs4_put_open_state(calldata->state);
3569 nfs_free_seqid(calldata->arg.seqid);
3570 nfs4_put_state_owner(sp);
3571 nfs_sb_deactive(sb);
3572 kfree(calldata);
3573 }
3574
nfs4_close_done(struct rpc_task * task,void * data)3575 static void nfs4_close_done(struct rpc_task *task, void *data)
3576 {
3577 struct nfs4_closedata *calldata = data;
3578 struct nfs4_state *state = calldata->state;
3579 struct nfs_server *server = NFS_SERVER(calldata->inode);
3580 nfs4_stateid *res_stateid = NULL;
3581 struct nfs4_exception exception = {
3582 .state = state,
3583 .inode = calldata->inode,
3584 .stateid = &calldata->arg.stateid,
3585 };
3586
3587 dprintk("%s: begin!\n", __func__);
3588 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
3589 return;
3590 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status);
3591
3592 /* Handle Layoutreturn errors */
3593 if (pnfs_roc_done(task, &calldata->arg.lr_args, &calldata->res.lr_res,
3594 &calldata->res.lr_ret) == -EAGAIN)
3595 goto out_restart;
3596
3597 /* hmm. we are done with the inode, and in the process of freeing
3598 * the state_owner. we keep this around to process errors
3599 */
3600 switch (task->tk_status) {
3601 case 0:
3602 res_stateid = &calldata->res.stateid;
3603 renew_lease(server, calldata->timestamp);
3604 break;
3605 case -NFS4ERR_ACCESS:
3606 if (calldata->arg.bitmask != NULL) {
3607 calldata->arg.bitmask = NULL;
3608 calldata->res.fattr = NULL;
3609 goto out_restart;
3610
3611 }
3612 break;
3613 case -NFS4ERR_OLD_STATEID:
3614 /* Did we race with OPEN? */
3615 if (nfs4_refresh_open_old_stateid(&calldata->arg.stateid,
3616 state))
3617 goto out_restart;
3618 goto out_release;
3619 case -NFS4ERR_ADMIN_REVOKED:
3620 case -NFS4ERR_STALE_STATEID:
3621 case -NFS4ERR_EXPIRED:
3622 nfs4_free_revoked_stateid(server,
3623 &calldata->arg.stateid,
3624 task->tk_msg.rpc_cred);
3625 fallthrough;
3626 case -NFS4ERR_BAD_STATEID:
3627 if (calldata->arg.fmode == 0)
3628 break;
3629 fallthrough;
3630 default:
3631 task->tk_status = nfs4_async_handle_exception(task,
3632 server, task->tk_status, &exception);
3633 if (exception.retry)
3634 goto out_restart;
3635 }
3636 nfs_clear_open_stateid(state, &calldata->arg.stateid,
3637 res_stateid, calldata->arg.fmode);
3638 out_release:
3639 task->tk_status = 0;
3640 nfs_release_seqid(calldata->arg.seqid);
3641 nfs_refresh_inode(calldata->inode, &calldata->fattr);
3642 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
3643 return;
3644 out_restart:
3645 task->tk_status = 0;
3646 rpc_restart_call_prepare(task);
3647 goto out_release;
3648 }
3649
nfs4_close_prepare(struct rpc_task * task,void * data)3650 static void nfs4_close_prepare(struct rpc_task *task, void *data)
3651 {
3652 struct nfs4_closedata *calldata = data;
3653 struct nfs4_state *state = calldata->state;
3654 struct inode *inode = calldata->inode;
3655 struct nfs_server *server = NFS_SERVER(inode);
3656 struct pnfs_layout_hdr *lo;
3657 bool is_rdonly, is_wronly, is_rdwr;
3658 int call_close = 0;
3659
3660 dprintk("%s: begin!\n", __func__);
3661 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
3662 goto out_wait;
3663
3664 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
3665 spin_lock(&state->owner->so_lock);
3666 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
3667 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
3668 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
3669 /* Calculate the change in open mode */
3670 calldata->arg.fmode = 0;
3671 if (state->n_rdwr == 0) {
3672 if (state->n_rdonly == 0)
3673 call_close |= is_rdonly;
3674 else if (is_rdonly)
3675 calldata->arg.fmode |= FMODE_READ;
3676 if (state->n_wronly == 0)
3677 call_close |= is_wronly;
3678 else if (is_wronly)
3679 calldata->arg.fmode |= FMODE_WRITE;
3680 if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE))
3681 call_close |= is_rdwr;
3682 } else if (is_rdwr)
3683 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
3684
3685 nfs4_sync_open_stateid(&calldata->arg.stateid, state);
3686 if (!nfs4_valid_open_stateid(state))
3687 call_close = 0;
3688 spin_unlock(&state->owner->so_lock);
3689
3690 if (!call_close) {
3691 /* Note: exit _without_ calling nfs4_close_done */
3692 goto out_no_action;
3693 }
3694
3695 if (!calldata->lr.roc && nfs4_wait_on_layoutreturn(inode, task)) {
3696 nfs_release_seqid(calldata->arg.seqid);
3697 goto out_wait;
3698 }
3699
3700 lo = calldata->arg.lr_args ? calldata->arg.lr_args->layout : NULL;
3701 if (lo && !pnfs_layout_is_valid(lo)) {
3702 calldata->arg.lr_args = NULL;
3703 calldata->res.lr_res = NULL;
3704 }
3705
3706 if (calldata->arg.fmode == 0)
3707 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
3708
3709 if (calldata->arg.fmode == 0 || calldata->arg.fmode == FMODE_READ) {
3710 /* Close-to-open cache consistency revalidation */
3711 if (!nfs4_have_delegation(inode, FMODE_READ)) {
3712 nfs4_bitmask_set(calldata->arg.bitmask_store,
3713 server->cache_consistency_bitmask,
3714 inode, server, NULL);
3715 calldata->arg.bitmask = calldata->arg.bitmask_store;
3716 } else
3717 calldata->arg.bitmask = NULL;
3718 }
3719
3720 calldata->arg.share_access =
3721 nfs4_map_atomic_open_share(NFS_SERVER(inode),
3722 calldata->arg.fmode, 0);
3723
3724 if (calldata->res.fattr == NULL)
3725 calldata->arg.bitmask = NULL;
3726 else if (calldata->arg.bitmask == NULL)
3727 calldata->res.fattr = NULL;
3728 calldata->timestamp = jiffies;
3729 if (nfs4_setup_sequence(NFS_SERVER(inode)->nfs_client,
3730 &calldata->arg.seq_args,
3731 &calldata->res.seq_res,
3732 task) != 0)
3733 nfs_release_seqid(calldata->arg.seqid);
3734 dprintk("%s: done!\n", __func__);
3735 return;
3736 out_no_action:
3737 task->tk_action = NULL;
3738 out_wait:
3739 nfs4_sequence_done(task, &calldata->res.seq_res);
3740 }
3741
3742 static const struct rpc_call_ops nfs4_close_ops = {
3743 .rpc_call_prepare = nfs4_close_prepare,
3744 .rpc_call_done = nfs4_close_done,
3745 .rpc_release = nfs4_free_closedata,
3746 };
3747
3748 /*
3749 * It is possible for data to be read/written from a mem-mapped file
3750 * after the sys_close call (which hits the vfs layer as a flush).
3751 * This means that we can't safely call nfsv4 close on a file until
3752 * the inode is cleared. This in turn means that we are not good
3753 * NFSv4 citizens - we do not indicate to the server to update the file's
3754 * share state even when we are done with one of the three share
3755 * stateid's in the inode.
3756 *
3757 * NOTE: Caller must be holding the sp->so_owner semaphore!
3758 */
nfs4_do_close(struct nfs4_state * state,gfp_t gfp_mask,int wait)3759 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
3760 {
3761 struct nfs_server *server = NFS_SERVER(state->inode);
3762 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
3763 struct nfs4_closedata *calldata;
3764 struct nfs4_state_owner *sp = state->owner;
3765 struct rpc_task *task;
3766 struct rpc_message msg = {
3767 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
3768 .rpc_cred = state->owner->so_cred,
3769 };
3770 struct rpc_task_setup task_setup_data = {
3771 .rpc_client = server->client,
3772 .rpc_message = &msg,
3773 .callback_ops = &nfs4_close_ops,
3774 .workqueue = nfsiod_workqueue,
3775 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
3776 };
3777 int status = -ENOMEM;
3778
3779 if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE))
3780 task_setup_data.flags |= RPC_TASK_MOVEABLE;
3781
3782 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP,
3783 &task_setup_data.rpc_client, &msg);
3784
3785 calldata = kzalloc(sizeof(*calldata), gfp_mask);
3786 if (calldata == NULL)
3787 goto out;
3788 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1, 0);
3789 calldata->inode = state->inode;
3790 calldata->state = state;
3791 calldata->arg.fh = NFS_FH(state->inode);
3792 if (!nfs4_copy_open_stateid(&calldata->arg.stateid, state))
3793 goto out_free_calldata;
3794 /* Serialization for the sequence id */
3795 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
3796 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask);
3797 if (IS_ERR(calldata->arg.seqid))
3798 goto out_free_calldata;
3799 nfs_fattr_init(&calldata->fattr);
3800 calldata->arg.fmode = 0;
3801 calldata->lr.arg.ld_private = &calldata->lr.ld_private;
3802 calldata->res.fattr = &calldata->fattr;
3803 calldata->res.seqid = calldata->arg.seqid;
3804 calldata->res.server = server;
3805 calldata->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
3806 calldata->lr.roc = pnfs_roc(state->inode,
3807 &calldata->lr.arg, &calldata->lr.res, msg.rpc_cred);
3808 if (calldata->lr.roc) {
3809 calldata->arg.lr_args = &calldata->lr.arg;
3810 calldata->res.lr_res = &calldata->lr.res;
3811 }
3812 nfs_sb_active(calldata->inode->i_sb);
3813
3814 msg.rpc_argp = &calldata->arg;
3815 msg.rpc_resp = &calldata->res;
3816 task_setup_data.callback_data = calldata;
3817 task = rpc_run_task(&task_setup_data);
3818 if (IS_ERR(task))
3819 return PTR_ERR(task);
3820 status = 0;
3821 if (wait)
3822 status = rpc_wait_for_completion_task(task);
3823 rpc_put_task(task);
3824 return status;
3825 out_free_calldata:
3826 kfree(calldata);
3827 out:
3828 nfs4_put_open_state(state);
3829 nfs4_put_state_owner(sp);
3830 return status;
3831 }
3832
3833 static struct inode *
nfs4_atomic_open(struct inode * dir,struct nfs_open_context * ctx,int open_flags,struct iattr * attr,int * opened)3834 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
3835 int open_flags, struct iattr *attr, int *opened)
3836 {
3837 struct nfs4_state *state;
3838 struct nfs4_label l, *label;
3839
3840 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l);
3841
3842 /* Protect against concurrent sillydeletes */
3843 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened);
3844
3845 nfs4_label_release_security(label);
3846
3847 if (IS_ERR(state))
3848 return ERR_CAST(state);
3849 return state->inode;
3850 }
3851
nfs4_close_context(struct nfs_open_context * ctx,int is_sync)3852 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
3853 {
3854 if (ctx->state == NULL)
3855 return;
3856 if (is_sync)
3857 nfs4_close_sync(ctx->state, _nfs4_ctx_to_openmode(ctx));
3858 else
3859 nfs4_close_state(ctx->state, _nfs4_ctx_to_openmode(ctx));
3860 }
3861
3862 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL)
3863 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL)
3864 #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_XATTR_SUPPORT - 1UL)
3865
_nfs4_server_capabilities(struct nfs_server * server,struct nfs_fh * fhandle)3866 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
3867 {
3868 u32 bitmask[3] = {}, minorversion = server->nfs_client->cl_minorversion;
3869 struct nfs4_server_caps_arg args = {
3870 .fhandle = fhandle,
3871 .bitmask = bitmask,
3872 };
3873 struct nfs4_server_caps_res res = {};
3874 struct rpc_message msg = {
3875 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
3876 .rpc_argp = &args,
3877 .rpc_resp = &res,
3878 };
3879 int status;
3880 int i;
3881
3882 bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS |
3883 FATTR4_WORD0_FH_EXPIRE_TYPE |
3884 FATTR4_WORD0_LINK_SUPPORT |
3885 FATTR4_WORD0_SYMLINK_SUPPORT |
3886 FATTR4_WORD0_ACLSUPPORT;
3887 if (minorversion)
3888 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT;
3889
3890 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3891 if (status == 0) {
3892 /* Sanity check the server answers */
3893 switch (minorversion) {
3894 case 0:
3895 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK;
3896 res.attr_bitmask[2] = 0;
3897 break;
3898 case 1:
3899 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK;
3900 break;
3901 case 2:
3902 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK;
3903 }
3904 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
3905 server->caps &= ~(NFS_CAP_ACLS | NFS_CAP_HARDLINKS |
3906 NFS_CAP_SYMLINKS| NFS_CAP_SECURITY_LABEL);
3907 server->fattr_valid = NFS_ATTR_FATTR_V4;
3908 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL &&
3909 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
3910 server->caps |= NFS_CAP_ACLS;
3911 if (res.has_links != 0)
3912 server->caps |= NFS_CAP_HARDLINKS;
3913 if (res.has_symlinks != 0)
3914 server->caps |= NFS_CAP_SYMLINKS;
3915 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
3916 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL)
3917 server->caps |= NFS_CAP_SECURITY_LABEL;
3918 #endif
3919 if (res.attr_bitmask[0] & FATTR4_WORD0_FS_LOCATIONS)
3920 server->caps |= NFS_CAP_FS_LOCATIONS;
3921 if (!(res.attr_bitmask[0] & FATTR4_WORD0_FILEID))
3922 server->fattr_valid &= ~NFS_ATTR_FATTR_FILEID;
3923 if (!(res.attr_bitmask[1] & FATTR4_WORD1_MODE))
3924 server->fattr_valid &= ~NFS_ATTR_FATTR_MODE;
3925 if (!(res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS))
3926 server->fattr_valid &= ~NFS_ATTR_FATTR_NLINK;
3927 if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER))
3928 server->fattr_valid &= ~(NFS_ATTR_FATTR_OWNER |
3929 NFS_ATTR_FATTR_OWNER_NAME);
3930 if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP))
3931 server->fattr_valid &= ~(NFS_ATTR_FATTR_GROUP |
3932 NFS_ATTR_FATTR_GROUP_NAME);
3933 if (!(res.attr_bitmask[1] & FATTR4_WORD1_SPACE_USED))
3934 server->fattr_valid &= ~NFS_ATTR_FATTR_SPACE_USED;
3935 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS))
3936 server->fattr_valid &= ~NFS_ATTR_FATTR_ATIME;
3937 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA))
3938 server->fattr_valid &= ~NFS_ATTR_FATTR_CTIME;
3939 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY))
3940 server->fattr_valid &= ~NFS_ATTR_FATTR_MTIME;
3941 memcpy(server->attr_bitmask_nl, res.attr_bitmask,
3942 sizeof(server->attr_bitmask));
3943 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
3944
3945 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
3946 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
3947 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
3948 server->cache_consistency_bitmask[2] = 0;
3949
3950 /* Avoid a regression due to buggy server */
3951 for (i = 0; i < ARRAY_SIZE(res.exclcreat_bitmask); i++)
3952 res.exclcreat_bitmask[i] &= res.attr_bitmask[i];
3953 memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask,
3954 sizeof(server->exclcreat_bitmask));
3955
3956 server->acl_bitmask = res.acl_bitmask;
3957 server->fh_expire_type = res.fh_expire_type;
3958 }
3959
3960 return status;
3961 }
3962
nfs4_server_capabilities(struct nfs_server * server,struct nfs_fh * fhandle)3963 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
3964 {
3965 struct nfs4_exception exception = {
3966 .interruptible = true,
3967 };
3968 int err;
3969 do {
3970 err = nfs4_handle_exception(server,
3971 _nfs4_server_capabilities(server, fhandle),
3972 &exception);
3973 } while (exception.retry);
3974 return err;
3975 }
3976
_nfs4_discover_trunking(struct nfs_server * server,struct nfs_fh * fhandle)3977 static int _nfs4_discover_trunking(struct nfs_server *server,
3978 struct nfs_fh *fhandle)
3979 {
3980 struct nfs4_fs_locations *locations = NULL;
3981 struct page *page;
3982 const struct cred *cred;
3983 struct nfs_client *clp = server->nfs_client;
3984 const struct nfs4_state_maintenance_ops *ops =
3985 clp->cl_mvops->state_renewal_ops;
3986 int status = -ENOMEM;
3987
3988 cred = ops->get_state_renewal_cred(clp);
3989 if (cred == NULL) {
3990 cred = nfs4_get_clid_cred(clp);
3991 if (cred == NULL)
3992 return -ENOKEY;
3993 }
3994
3995 page = alloc_page(GFP_KERNEL);
3996 if (!page)
3997 goto out_put_cred;
3998 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
3999 if (!locations)
4000 goto out_free;
4001 locations->fattr = nfs_alloc_fattr();
4002 if (!locations->fattr)
4003 goto out_free_2;
4004
4005 status = nfs4_proc_get_locations(server, fhandle, locations, page,
4006 cred);
4007
4008 kfree(locations->fattr);
4009 out_free_2:
4010 kfree(locations);
4011 out_free:
4012 __free_page(page);
4013 out_put_cred:
4014 put_cred(cred);
4015 return status;
4016 }
4017
nfs4_discover_trunking(struct nfs_server * server,struct nfs_fh * fhandle)4018 static int nfs4_discover_trunking(struct nfs_server *server,
4019 struct nfs_fh *fhandle)
4020 {
4021 struct nfs4_exception exception = {
4022 .interruptible = true,
4023 };
4024 struct nfs_client *clp = server->nfs_client;
4025 int err = 0;
4026
4027 if (!nfs4_has_session(clp))
4028 goto out;
4029 do {
4030 err = nfs4_handle_exception(server,
4031 _nfs4_discover_trunking(server, fhandle),
4032 &exception);
4033 } while (exception.retry);
4034 out:
4035 return err;
4036 }
4037
_nfs4_lookup_root(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info)4038 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
4039 struct nfs_fsinfo *info)
4040 {
4041 u32 bitmask[3];
4042 struct nfs4_lookup_root_arg args = {
4043 .bitmask = bitmask,
4044 };
4045 struct nfs4_lookup_res res = {
4046 .server = server,
4047 .fattr = info->fattr,
4048 .fh = fhandle,
4049 };
4050 struct rpc_message msg = {
4051 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
4052 .rpc_argp = &args,
4053 .rpc_resp = &res,
4054 };
4055
4056 bitmask[0] = nfs4_fattr_bitmap[0];
4057 bitmask[1] = nfs4_fattr_bitmap[1];
4058 /*
4059 * Process the label in the upcoming getfattr
4060 */
4061 bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL;
4062
4063 nfs_fattr_init(info->fattr);
4064 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4065 }
4066
nfs4_lookup_root(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info)4067 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
4068 struct nfs_fsinfo *info)
4069 {
4070 struct nfs4_exception exception = {
4071 .interruptible = true,
4072 };
4073 int err;
4074 do {
4075 err = _nfs4_lookup_root(server, fhandle, info);
4076 trace_nfs4_lookup_root(server, fhandle, info->fattr, err);
4077 switch (err) {
4078 case 0:
4079 case -NFS4ERR_WRONGSEC:
4080 goto out;
4081 default:
4082 err = nfs4_handle_exception(server, err, &exception);
4083 }
4084 } while (exception.retry);
4085 out:
4086 return err;
4087 }
4088
nfs4_lookup_root_sec(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info,rpc_authflavor_t flavor)4089 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
4090 struct nfs_fsinfo *info, rpc_authflavor_t flavor)
4091 {
4092 struct rpc_auth_create_args auth_args = {
4093 .pseudoflavor = flavor,
4094 };
4095 struct rpc_auth *auth;
4096
4097 auth = rpcauth_create(&auth_args, server->client);
4098 if (IS_ERR(auth))
4099 return -EACCES;
4100 return nfs4_lookup_root(server, fhandle, info);
4101 }
4102
4103 /*
4104 * Retry pseudoroot lookup with various security flavors. We do this when:
4105 *
4106 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC
4107 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation
4108 *
4109 * Returns zero on success, or a negative NFS4ERR value, or a
4110 * negative errno value.
4111 */
nfs4_find_root_sec(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info)4112 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
4113 struct nfs_fsinfo *info)
4114 {
4115 /* Per 3530bis 15.33.5 */
4116 static const rpc_authflavor_t flav_array[] = {
4117 RPC_AUTH_GSS_KRB5P,
4118 RPC_AUTH_GSS_KRB5I,
4119 RPC_AUTH_GSS_KRB5,
4120 RPC_AUTH_UNIX, /* courtesy */
4121 RPC_AUTH_NULL,
4122 };
4123 int status = -EPERM;
4124 size_t i;
4125
4126 if (server->auth_info.flavor_len > 0) {
4127 /* try each flavor specified by user */
4128 for (i = 0; i < server->auth_info.flavor_len; i++) {
4129 status = nfs4_lookup_root_sec(server, fhandle, info,
4130 server->auth_info.flavors[i]);
4131 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
4132 continue;
4133 break;
4134 }
4135 } else {
4136 /* no flavors specified by user, try default list */
4137 for (i = 0; i < ARRAY_SIZE(flav_array); i++) {
4138 status = nfs4_lookup_root_sec(server, fhandle, info,
4139 flav_array[i]);
4140 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
4141 continue;
4142 break;
4143 }
4144 }
4145
4146 /*
4147 * -EACCES could mean that the user doesn't have correct permissions
4148 * to access the mount. It could also mean that we tried to mount
4149 * with a gss auth flavor, but rpc.gssd isn't running. Either way,
4150 * existing mount programs don't handle -EACCES very well so it should
4151 * be mapped to -EPERM instead.
4152 */
4153 if (status == -EACCES)
4154 status = -EPERM;
4155 return status;
4156 }
4157
4158 /**
4159 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot
4160 * @server: initialized nfs_server handle
4161 * @fhandle: we fill in the pseudo-fs root file handle
4162 * @info: we fill in an FSINFO struct
4163 * @auth_probe: probe the auth flavours
4164 *
4165 * Returns zero on success, or a negative errno.
4166 */
nfs4_proc_get_rootfh(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info,bool auth_probe)4167 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
4168 struct nfs_fsinfo *info,
4169 bool auth_probe)
4170 {
4171 int status = 0;
4172
4173 if (!auth_probe)
4174 status = nfs4_lookup_root(server, fhandle, info);
4175
4176 if (auth_probe || status == NFS4ERR_WRONGSEC)
4177 status = server->nfs_client->cl_mvops->find_root_sec(server,
4178 fhandle, info);
4179
4180 if (status == 0)
4181 status = nfs4_server_capabilities(server, fhandle);
4182 if (status == 0)
4183 status = nfs4_do_fsinfo(server, fhandle, info);
4184
4185 return nfs4_map_errors(status);
4186 }
4187
nfs4_proc_get_root(struct nfs_server * server,struct nfs_fh * mntfh,struct nfs_fsinfo * info)4188 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
4189 struct nfs_fsinfo *info)
4190 {
4191 int error;
4192 struct nfs_fattr *fattr = info->fattr;
4193 struct nfs4_label *label = fattr->label;
4194
4195 error = nfs4_server_capabilities(server, mntfh);
4196 if (error < 0) {
4197 dprintk("nfs4_get_root: getcaps error = %d\n", -error);
4198 return error;
4199 }
4200
4201 error = nfs4_proc_getattr(server, mntfh, fattr, label, NULL);
4202 if (error < 0) {
4203 dprintk("nfs4_get_root: getattr error = %d\n", -error);
4204 goto out;
4205 }
4206
4207 if (fattr->valid & NFS_ATTR_FATTR_FSID &&
4208 !nfs_fsid_equal(&server->fsid, &fattr->fsid))
4209 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
4210
4211 out:
4212 return error;
4213 }
4214
4215 /*
4216 * Get locations and (maybe) other attributes of a referral.
4217 * Note that we'll actually follow the referral later when
4218 * we detect fsid mismatch in inode revalidation
4219 */
nfs4_get_referral(struct rpc_clnt * client,struct inode * dir,const struct qstr * name,struct nfs_fattr * fattr,struct nfs_fh * fhandle)4220 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
4221 const struct qstr *name, struct nfs_fattr *fattr,
4222 struct nfs_fh *fhandle)
4223 {
4224 int status = -ENOMEM;
4225 struct page *page = NULL;
4226 struct nfs4_fs_locations *locations = NULL;
4227
4228 page = alloc_page(GFP_KERNEL);
4229 if (page == NULL)
4230 goto out;
4231 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
4232 if (locations == NULL)
4233 goto out;
4234
4235 locations->fattr = fattr;
4236
4237 status = nfs4_proc_fs_locations(client, dir, name, locations, page);
4238 if (status != 0)
4239 goto out;
4240
4241 /*
4242 * If the fsid didn't change, this is a migration event, not a
4243 * referral. Cause us to drop into the exception handler, which
4244 * will kick off migration recovery.
4245 */
4246 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &fattr->fsid)) {
4247 dprintk("%s: server did not return a different fsid for"
4248 " a referral at %s\n", __func__, name->name);
4249 status = -NFS4ERR_MOVED;
4250 goto out;
4251 }
4252 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
4253 nfs_fixup_referral_attributes(fattr);
4254 memset(fhandle, 0, sizeof(struct nfs_fh));
4255 out:
4256 if (page)
4257 __free_page(page);
4258 kfree(locations);
4259 return status;
4260 }
4261
_nfs4_proc_getattr(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fattr * fattr,struct nfs4_label * label,struct inode * inode)4262 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
4263 struct nfs_fattr *fattr, struct nfs4_label *label,
4264 struct inode *inode)
4265 {
4266 __u32 bitmask[NFS4_BITMASK_SZ];
4267 struct nfs4_getattr_arg args = {
4268 .fh = fhandle,
4269 .bitmask = bitmask,
4270 };
4271 struct nfs4_getattr_res res = {
4272 .fattr = fattr,
4273 .label = label,
4274 .server = server,
4275 };
4276 struct rpc_message msg = {
4277 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
4278 .rpc_argp = &args,
4279 .rpc_resp = &res,
4280 };
4281 unsigned short task_flags = 0;
4282
4283 if (nfs4_has_session(server->nfs_client))
4284 task_flags = RPC_TASK_MOVEABLE;
4285
4286 /* Is this is an attribute revalidation, subject to softreval? */
4287 if (inode && (server->flags & NFS_MOUNT_SOFTREVAL))
4288 task_flags |= RPC_TASK_TIMEOUT;
4289
4290 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, label), inode, 0);
4291 nfs_fattr_init(fattr);
4292 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
4293 return nfs4_do_call_sync(server->client, server, &msg,
4294 &args.seq_args, &res.seq_res, task_flags);
4295 }
4296
nfs4_proc_getattr(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fattr * fattr,struct nfs4_label * label,struct inode * inode)4297 int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
4298 struct nfs_fattr *fattr, struct nfs4_label *label,
4299 struct inode *inode)
4300 {
4301 struct nfs4_exception exception = {
4302 .interruptible = true,
4303 };
4304 int err;
4305 do {
4306 err = _nfs4_proc_getattr(server, fhandle, fattr, label, inode);
4307 trace_nfs4_getattr(server, fhandle, fattr, err);
4308 err = nfs4_handle_exception(server, err,
4309 &exception);
4310 } while (exception.retry);
4311 return err;
4312 }
4313
4314 /*
4315 * The file is not closed if it is opened due to the a request to change
4316 * the size of the file. The open call will not be needed once the
4317 * VFS layer lookup-intents are implemented.
4318 *
4319 * Close is called when the inode is destroyed.
4320 * If we haven't opened the file for O_WRONLY, we
4321 * need to in the size_change case to obtain a stateid.
4322 *
4323 * Got race?
4324 * Because OPEN is always done by name in nfsv4, it is
4325 * possible that we opened a different file by the same
4326 * name. We can recognize this race condition, but we
4327 * can't do anything about it besides returning an error.
4328 *
4329 * This will be fixed with VFS changes (lookup-intent).
4330 */
4331 static int
nfs4_proc_setattr(struct dentry * dentry,struct nfs_fattr * fattr,struct iattr * sattr)4332 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
4333 struct iattr *sattr)
4334 {
4335 struct inode *inode = d_inode(dentry);
4336 const struct cred *cred = NULL;
4337 struct nfs_open_context *ctx = NULL;
4338 struct nfs4_label *label = NULL;
4339 int status;
4340
4341 if (pnfs_ld_layoutret_on_setattr(inode) &&
4342 sattr->ia_valid & ATTR_SIZE &&
4343 sattr->ia_size < i_size_read(inode))
4344 pnfs_commit_and_return_layout(inode);
4345
4346 nfs_fattr_init(fattr);
4347
4348 /* Deal with open(O_TRUNC) */
4349 if (sattr->ia_valid & ATTR_OPEN)
4350 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME);
4351
4352 /* Optimization: if the end result is no change, don't RPC */
4353 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0)
4354 return 0;
4355
4356 /* Search for an existing open(O_WRITE) file */
4357 if (sattr->ia_valid & ATTR_FILE) {
4358
4359 ctx = nfs_file_open_context(sattr->ia_file);
4360 if (ctx)
4361 cred = ctx->cred;
4362 }
4363
4364 label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
4365 if (IS_ERR(label))
4366 return PTR_ERR(label);
4367
4368 /* Return any delegations if we're going to change ACLs */
4369 if ((sattr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0)
4370 nfs4_inode_make_writeable(inode);
4371
4372 status = nfs4_do_setattr(inode, cred, fattr, sattr, ctx, NULL, label);
4373 if (status == 0) {
4374 nfs_setattr_update_inode(inode, sattr, fattr);
4375 nfs_setsecurity(inode, fattr, label);
4376 }
4377 nfs4_label_free(label);
4378 return status;
4379 }
4380
_nfs4_proc_lookup(struct rpc_clnt * clnt,struct inode * dir,struct dentry * dentry,struct nfs_fh * fhandle,struct nfs_fattr * fattr,struct nfs4_label * label)4381 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
4382 struct dentry *dentry, struct nfs_fh *fhandle,
4383 struct nfs_fattr *fattr, struct nfs4_label *label)
4384 {
4385 struct nfs_server *server = NFS_SERVER(dir);
4386 int status;
4387 struct nfs4_lookup_arg args = {
4388 .bitmask = server->attr_bitmask,
4389 .dir_fh = NFS_FH(dir),
4390 .name = &dentry->d_name,
4391 };
4392 struct nfs4_lookup_res res = {
4393 .server = server,
4394 .fattr = fattr,
4395 .label = label,
4396 .fh = fhandle,
4397 };
4398 struct rpc_message msg = {
4399 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
4400 .rpc_argp = &args,
4401 .rpc_resp = &res,
4402 };
4403 unsigned short task_flags = 0;
4404
4405 if (nfs_server_capable(dir, NFS_CAP_MOVEABLE))
4406 task_flags = RPC_TASK_MOVEABLE;
4407
4408 /* Is this is an attribute revalidation, subject to softreval? */
4409 if (nfs_lookup_is_soft_revalidate(dentry))
4410 task_flags |= RPC_TASK_TIMEOUT;
4411
4412 args.bitmask = nfs4_bitmask(server, label);
4413
4414 nfs_fattr_init(fattr);
4415
4416 dprintk("NFS call lookup %pd2\n", dentry);
4417 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
4418 status = nfs4_do_call_sync(clnt, server, &msg,
4419 &args.seq_args, &res.seq_res, task_flags);
4420 dprintk("NFS reply lookup: %d\n", status);
4421 return status;
4422 }
4423
nfs_fixup_secinfo_attributes(struct nfs_fattr * fattr)4424 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
4425 {
4426 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
4427 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
4428 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
4429 fattr->nlink = 2;
4430 }
4431
nfs4_proc_lookup_common(struct rpc_clnt ** clnt,struct inode * dir,struct dentry * dentry,struct nfs_fh * fhandle,struct nfs_fattr * fattr,struct nfs4_label * label)4432 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
4433 struct dentry *dentry, struct nfs_fh *fhandle,
4434 struct nfs_fattr *fattr, struct nfs4_label *label)
4435 {
4436 struct nfs4_exception exception = {
4437 .interruptible = true,
4438 };
4439 struct rpc_clnt *client = *clnt;
4440 const struct qstr *name = &dentry->d_name;
4441 int err;
4442 do {
4443 err = _nfs4_proc_lookup(client, dir, dentry, fhandle, fattr, label);
4444 trace_nfs4_lookup(dir, name, err);
4445 switch (err) {
4446 case -NFS4ERR_BADNAME:
4447 err = -ENOENT;
4448 goto out;
4449 case -NFS4ERR_MOVED:
4450 err = nfs4_get_referral(client, dir, name, fattr, fhandle);
4451 if (err == -NFS4ERR_MOVED)
4452 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
4453 goto out;
4454 case -NFS4ERR_WRONGSEC:
4455 err = -EPERM;
4456 if (client != *clnt)
4457 goto out;
4458 client = nfs4_negotiate_security(client, dir, name);
4459 if (IS_ERR(client))
4460 return PTR_ERR(client);
4461
4462 exception.retry = 1;
4463 break;
4464 default:
4465 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
4466 }
4467 } while (exception.retry);
4468
4469 out:
4470 if (err == 0)
4471 *clnt = client;
4472 else if (client != *clnt)
4473 rpc_shutdown_client(client);
4474
4475 return err;
4476 }
4477
nfs4_proc_lookup(struct inode * dir,struct dentry * dentry,struct nfs_fh * fhandle,struct nfs_fattr * fattr,struct nfs4_label * label)4478 static int nfs4_proc_lookup(struct inode *dir, struct dentry *dentry,
4479 struct nfs_fh *fhandle, struct nfs_fattr *fattr,
4480 struct nfs4_label *label)
4481 {
4482 int status;
4483 struct rpc_clnt *client = NFS_CLIENT(dir);
4484
4485 status = nfs4_proc_lookup_common(&client, dir, dentry, fhandle, fattr, label);
4486 if (client != NFS_CLIENT(dir)) {
4487 rpc_shutdown_client(client);
4488 nfs_fixup_secinfo_attributes(fattr);
4489 }
4490 return status;
4491 }
4492
4493 struct rpc_clnt *
nfs4_proc_lookup_mountpoint(struct inode * dir,struct dentry * dentry,struct nfs_fh * fhandle,struct nfs_fattr * fattr)4494 nfs4_proc_lookup_mountpoint(struct inode *dir, struct dentry *dentry,
4495 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
4496 {
4497 struct rpc_clnt *client = NFS_CLIENT(dir);
4498 int status;
4499
4500 status = nfs4_proc_lookup_common(&client, dir, dentry, fhandle, fattr, NULL);
4501 if (status < 0)
4502 return ERR_PTR(status);
4503 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client;
4504 }
4505
_nfs4_proc_lookupp(struct inode * inode,struct nfs_fh * fhandle,struct nfs_fattr * fattr,struct nfs4_label * label)4506 static int _nfs4_proc_lookupp(struct inode *inode,
4507 struct nfs_fh *fhandle, struct nfs_fattr *fattr,
4508 struct nfs4_label *label)
4509 {
4510 struct rpc_clnt *clnt = NFS_CLIENT(inode);
4511 struct nfs_server *server = NFS_SERVER(inode);
4512 int status;
4513 struct nfs4_lookupp_arg args = {
4514 .bitmask = server->attr_bitmask,
4515 .fh = NFS_FH(inode),
4516 };
4517 struct nfs4_lookupp_res res = {
4518 .server = server,
4519 .fattr = fattr,
4520 .label = label,
4521 .fh = fhandle,
4522 };
4523 struct rpc_message msg = {
4524 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUPP],
4525 .rpc_argp = &args,
4526 .rpc_resp = &res,
4527 };
4528 unsigned short task_flags = 0;
4529
4530 if (NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL)
4531 task_flags |= RPC_TASK_TIMEOUT;
4532
4533 args.bitmask = nfs4_bitmask(server, label);
4534
4535 nfs_fattr_init(fattr);
4536
4537 dprintk("NFS call lookupp ino=0x%lx\n", inode->i_ino);
4538 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args,
4539 &res.seq_res, task_flags);
4540 dprintk("NFS reply lookupp: %d\n", status);
4541 return status;
4542 }
4543
nfs4_proc_lookupp(struct inode * inode,struct nfs_fh * fhandle,struct nfs_fattr * fattr,struct nfs4_label * label)4544 static int nfs4_proc_lookupp(struct inode *inode, struct nfs_fh *fhandle,
4545 struct nfs_fattr *fattr, struct nfs4_label *label)
4546 {
4547 struct nfs4_exception exception = {
4548 .interruptible = true,
4549 };
4550 int err;
4551 do {
4552 err = _nfs4_proc_lookupp(inode, fhandle, fattr, label);
4553 trace_nfs4_lookupp(inode, err);
4554 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4555 &exception);
4556 } while (exception.retry);
4557 return err;
4558 }
4559
_nfs4_proc_access(struct inode * inode,struct nfs_access_entry * entry)4560 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
4561 {
4562 struct nfs_server *server = NFS_SERVER(inode);
4563 struct nfs4_accessargs args = {
4564 .fh = NFS_FH(inode),
4565 .access = entry->mask,
4566 };
4567 struct nfs4_accessres res = {
4568 .server = server,
4569 };
4570 struct rpc_message msg = {
4571 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
4572 .rpc_argp = &args,
4573 .rpc_resp = &res,
4574 .rpc_cred = entry->cred,
4575 };
4576 int status = 0;
4577
4578 if (!nfs4_have_delegation(inode, FMODE_READ)) {
4579 res.fattr = nfs_alloc_fattr();
4580 if (res.fattr == NULL)
4581 return -ENOMEM;
4582 args.bitmask = server->cache_consistency_bitmask;
4583 }
4584 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4585 if (!status) {
4586 nfs_access_set_mask(entry, res.access);
4587 if (res.fattr)
4588 nfs_refresh_inode(inode, res.fattr);
4589 }
4590 nfs_free_fattr(res.fattr);
4591 return status;
4592 }
4593
nfs4_proc_access(struct inode * inode,struct nfs_access_entry * entry)4594 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
4595 {
4596 struct nfs4_exception exception = {
4597 .interruptible = true,
4598 };
4599 int err;
4600 do {
4601 err = _nfs4_proc_access(inode, entry);
4602 trace_nfs4_access(inode, err);
4603 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4604 &exception);
4605 } while (exception.retry);
4606 return err;
4607 }
4608
4609 /*
4610 * TODO: For the time being, we don't try to get any attributes
4611 * along with any of the zero-copy operations READ, READDIR,
4612 * READLINK, WRITE.
4613 *
4614 * In the case of the first three, we want to put the GETATTR
4615 * after the read-type operation -- this is because it is hard
4616 * to predict the length of a GETATTR response in v4, and thus
4617 * align the READ data correctly. This means that the GETATTR
4618 * may end up partially falling into the page cache, and we should
4619 * shift it into the 'tail' of the xdr_buf before processing.
4620 * To do this efficiently, we need to know the total length
4621 * of data received, which doesn't seem to be available outside
4622 * of the RPC layer.
4623 *
4624 * In the case of WRITE, we also want to put the GETATTR after
4625 * the operation -- in this case because we want to make sure
4626 * we get the post-operation mtime and size.
4627 *
4628 * Both of these changes to the XDR layer would in fact be quite
4629 * minor, but I decided to leave them for a subsequent patch.
4630 */
_nfs4_proc_readlink(struct inode * inode,struct page * page,unsigned int pgbase,unsigned int pglen)4631 static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
4632 unsigned int pgbase, unsigned int pglen)
4633 {
4634 struct nfs4_readlink args = {
4635 .fh = NFS_FH(inode),
4636 .pgbase = pgbase,
4637 .pglen = pglen,
4638 .pages = &page,
4639 };
4640 struct nfs4_readlink_res res;
4641 struct rpc_message msg = {
4642 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
4643 .rpc_argp = &args,
4644 .rpc_resp = &res,
4645 };
4646
4647 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
4648 }
4649
nfs4_proc_readlink(struct inode * inode,struct page * page,unsigned int pgbase,unsigned int pglen)4650 static int nfs4_proc_readlink(struct inode *inode, struct page *page,
4651 unsigned int pgbase, unsigned int pglen)
4652 {
4653 struct nfs4_exception exception = {
4654 .interruptible = true,
4655 };
4656 int err;
4657 do {
4658 err = _nfs4_proc_readlink(inode, page, pgbase, pglen);
4659 trace_nfs4_readlink(inode, err);
4660 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4661 &exception);
4662 } while (exception.retry);
4663 return err;
4664 }
4665
4666 /*
4667 * This is just for mknod. open(O_CREAT) will always do ->open_context().
4668 */
4669 static int
nfs4_proc_create(struct inode * dir,struct dentry * dentry,struct iattr * sattr,int flags)4670 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
4671 int flags)
4672 {
4673 struct nfs_server *server = NFS_SERVER(dir);
4674 struct nfs4_label l, *ilabel;
4675 struct nfs_open_context *ctx;
4676 struct nfs4_state *state;
4677 int status = 0;
4678
4679 ctx = alloc_nfs_open_context(dentry, FMODE_READ, NULL);
4680 if (IS_ERR(ctx))
4681 return PTR_ERR(ctx);
4682
4683 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l);
4684
4685 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
4686 sattr->ia_mode &= ~current_umask();
4687 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL);
4688 if (IS_ERR(state)) {
4689 status = PTR_ERR(state);
4690 goto out;
4691 }
4692 out:
4693 nfs4_label_release_security(ilabel);
4694 put_nfs_open_context(ctx);
4695 return status;
4696 }
4697
4698 static int
_nfs4_proc_remove(struct inode * dir,const struct qstr * name,u32 ftype)4699 _nfs4_proc_remove(struct inode *dir, const struct qstr *name, u32 ftype)
4700 {
4701 struct nfs_server *server = NFS_SERVER(dir);
4702 struct nfs_removeargs args = {
4703 .fh = NFS_FH(dir),
4704 .name = *name,
4705 };
4706 struct nfs_removeres res = {
4707 .server = server,
4708 };
4709 struct rpc_message msg = {
4710 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
4711 .rpc_argp = &args,
4712 .rpc_resp = &res,
4713 };
4714 unsigned long timestamp = jiffies;
4715 int status;
4716
4717 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
4718 if (status == 0) {
4719 spin_lock(&dir->i_lock);
4720 /* Removing a directory decrements nlink in the parent */
4721 if (ftype == NF4DIR && dir->i_nlink > 2)
4722 nfs4_dec_nlink_locked(dir);
4723 nfs4_update_changeattr_locked(dir, &res.cinfo, timestamp,
4724 NFS_INO_INVALID_DATA);
4725 spin_unlock(&dir->i_lock);
4726 }
4727 return status;
4728 }
4729
nfs4_proc_remove(struct inode * dir,struct dentry * dentry)4730 static int nfs4_proc_remove(struct inode *dir, struct dentry *dentry)
4731 {
4732 struct nfs4_exception exception = {
4733 .interruptible = true,
4734 };
4735 struct inode *inode = d_inode(dentry);
4736 int err;
4737
4738 if (inode) {
4739 if (inode->i_nlink == 1)
4740 nfs4_inode_return_delegation(inode);
4741 else
4742 nfs4_inode_make_writeable(inode);
4743 }
4744 do {
4745 err = _nfs4_proc_remove(dir, &dentry->d_name, NF4REG);
4746 trace_nfs4_remove(dir, &dentry->d_name, err);
4747 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4748 &exception);
4749 } while (exception.retry);
4750 return err;
4751 }
4752
nfs4_proc_rmdir(struct inode * dir,const struct qstr * name)4753 static int nfs4_proc_rmdir(struct inode *dir, const struct qstr *name)
4754 {
4755 struct nfs4_exception exception = {
4756 .interruptible = true,
4757 };
4758 int err;
4759
4760 do {
4761 err = _nfs4_proc_remove(dir, name, NF4DIR);
4762 trace_nfs4_remove(dir, name, err);
4763 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4764 &exception);
4765 } while (exception.retry);
4766 return err;
4767 }
4768
nfs4_proc_unlink_setup(struct rpc_message * msg,struct dentry * dentry,struct inode * inode)4769 static void nfs4_proc_unlink_setup(struct rpc_message *msg,
4770 struct dentry *dentry,
4771 struct inode *inode)
4772 {
4773 struct nfs_removeargs *args = msg->rpc_argp;
4774 struct nfs_removeres *res = msg->rpc_resp;
4775
4776 res->server = NFS_SB(dentry->d_sb);
4777 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
4778 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1, 0);
4779
4780 nfs_fattr_init(res->dir_attr);
4781
4782 if (inode)
4783 nfs4_inode_return_delegation(inode);
4784 }
4785
nfs4_proc_unlink_rpc_prepare(struct rpc_task * task,struct nfs_unlinkdata * data)4786 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
4787 {
4788 nfs4_setup_sequence(NFS_SB(data->dentry->d_sb)->nfs_client,
4789 &data->args.seq_args,
4790 &data->res.seq_res,
4791 task);
4792 }
4793
nfs4_proc_unlink_done(struct rpc_task * task,struct inode * dir)4794 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
4795 {
4796 struct nfs_unlinkdata *data = task->tk_calldata;
4797 struct nfs_removeres *res = &data->res;
4798
4799 if (!nfs4_sequence_done(task, &res->seq_res))
4800 return 0;
4801 if (nfs4_async_handle_error(task, res->server, NULL,
4802 &data->timeout) == -EAGAIN)
4803 return 0;
4804 if (task->tk_status == 0)
4805 nfs4_update_changeattr(dir, &res->cinfo,
4806 res->dir_attr->time_start,
4807 NFS_INO_INVALID_DATA);
4808 return 1;
4809 }
4810
nfs4_proc_rename_setup(struct rpc_message * msg,struct dentry * old_dentry,struct dentry * new_dentry)4811 static void nfs4_proc_rename_setup(struct rpc_message *msg,
4812 struct dentry *old_dentry,
4813 struct dentry *new_dentry)
4814 {
4815 struct nfs_renameargs *arg = msg->rpc_argp;
4816 struct nfs_renameres *res = msg->rpc_resp;
4817 struct inode *old_inode = d_inode(old_dentry);
4818 struct inode *new_inode = d_inode(new_dentry);
4819
4820 if (old_inode)
4821 nfs4_inode_make_writeable(old_inode);
4822 if (new_inode)
4823 nfs4_inode_return_delegation(new_inode);
4824 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
4825 res->server = NFS_SB(old_dentry->d_sb);
4826 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1, 0);
4827 }
4828
nfs4_proc_rename_rpc_prepare(struct rpc_task * task,struct nfs_renamedata * data)4829 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
4830 {
4831 nfs4_setup_sequence(NFS_SERVER(data->old_dir)->nfs_client,
4832 &data->args.seq_args,
4833 &data->res.seq_res,
4834 task);
4835 }
4836
nfs4_proc_rename_done(struct rpc_task * task,struct inode * old_dir,struct inode * new_dir)4837 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
4838 struct inode *new_dir)
4839 {
4840 struct nfs_renamedata *data = task->tk_calldata;
4841 struct nfs_renameres *res = &data->res;
4842
4843 if (!nfs4_sequence_done(task, &res->seq_res))
4844 return 0;
4845 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN)
4846 return 0;
4847
4848 if (task->tk_status == 0) {
4849 if (new_dir != old_dir) {
4850 /* Note: If we moved a directory, nlink will change */
4851 nfs4_update_changeattr(old_dir, &res->old_cinfo,
4852 res->old_fattr->time_start,
4853 NFS_INO_INVALID_NLINK |
4854 NFS_INO_INVALID_DATA);
4855 nfs4_update_changeattr(new_dir, &res->new_cinfo,
4856 res->new_fattr->time_start,
4857 NFS_INO_INVALID_NLINK |
4858 NFS_INO_INVALID_DATA);
4859 } else
4860 nfs4_update_changeattr(old_dir, &res->old_cinfo,
4861 res->old_fattr->time_start,
4862 NFS_INO_INVALID_DATA);
4863 }
4864 return 1;
4865 }
4866
_nfs4_proc_link(struct inode * inode,struct inode * dir,const struct qstr * name)4867 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
4868 {
4869 struct nfs_server *server = NFS_SERVER(inode);
4870 __u32 bitmask[NFS4_BITMASK_SZ];
4871 struct nfs4_link_arg arg = {
4872 .fh = NFS_FH(inode),
4873 .dir_fh = NFS_FH(dir),
4874 .name = name,
4875 .bitmask = bitmask,
4876 };
4877 struct nfs4_link_res res = {
4878 .server = server,
4879 .label = NULL,
4880 };
4881 struct rpc_message msg = {
4882 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
4883 .rpc_argp = &arg,
4884 .rpc_resp = &res,
4885 };
4886 int status = -ENOMEM;
4887
4888 res.fattr = nfs_alloc_fattr();
4889 if (res.fattr == NULL)
4890 goto out;
4891
4892 res.label = nfs4_label_alloc(server, GFP_KERNEL);
4893 if (IS_ERR(res.label)) {
4894 status = PTR_ERR(res.label);
4895 goto out;
4896 }
4897
4898 nfs4_inode_make_writeable(inode);
4899 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, res.label), inode,
4900 NFS_INO_INVALID_CHANGE);
4901 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4902 if (!status) {
4903 nfs4_update_changeattr(dir, &res.cinfo, res.fattr->time_start,
4904 NFS_INO_INVALID_DATA);
4905 nfs4_inc_nlink(inode);
4906 status = nfs_post_op_update_inode(inode, res.fattr);
4907 if (!status)
4908 nfs_setsecurity(inode, res.fattr, res.label);
4909 }
4910
4911
4912 nfs4_label_free(res.label);
4913
4914 out:
4915 nfs_free_fattr(res.fattr);
4916 return status;
4917 }
4918
nfs4_proc_link(struct inode * inode,struct inode * dir,const struct qstr * name)4919 static int nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
4920 {
4921 struct nfs4_exception exception = {
4922 .interruptible = true,
4923 };
4924 int err;
4925 do {
4926 err = nfs4_handle_exception(NFS_SERVER(inode),
4927 _nfs4_proc_link(inode, dir, name),
4928 &exception);
4929 } while (exception.retry);
4930 return err;
4931 }
4932
4933 struct nfs4_createdata {
4934 struct rpc_message msg;
4935 struct nfs4_create_arg arg;
4936 struct nfs4_create_res res;
4937 struct nfs_fh fh;
4938 struct nfs_fattr fattr;
4939 struct nfs4_label *label;
4940 };
4941
nfs4_alloc_createdata(struct inode * dir,const struct qstr * name,struct iattr * sattr,u32 ftype)4942 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
4943 const struct qstr *name, struct iattr *sattr, u32 ftype)
4944 {
4945 struct nfs4_createdata *data;
4946
4947 data = kzalloc(sizeof(*data), GFP_KERNEL);
4948 if (data != NULL) {
4949 struct nfs_server *server = NFS_SERVER(dir);
4950
4951 data->label = nfs4_label_alloc(server, GFP_KERNEL);
4952 if (IS_ERR(data->label))
4953 goto out_free;
4954
4955 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
4956 data->msg.rpc_argp = &data->arg;
4957 data->msg.rpc_resp = &data->res;
4958 data->arg.dir_fh = NFS_FH(dir);
4959 data->arg.server = server;
4960 data->arg.name = name;
4961 data->arg.attrs = sattr;
4962 data->arg.ftype = ftype;
4963 data->arg.bitmask = nfs4_bitmask(server, data->label);
4964 data->arg.umask = current_umask();
4965 data->res.server = server;
4966 data->res.fh = &data->fh;
4967 data->res.fattr = &data->fattr;
4968 data->res.label = data->label;
4969 nfs_fattr_init(data->res.fattr);
4970 }
4971 return data;
4972 out_free:
4973 kfree(data);
4974 return NULL;
4975 }
4976
nfs4_do_create(struct inode * dir,struct dentry * dentry,struct nfs4_createdata * data)4977 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
4978 {
4979 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
4980 &data->arg.seq_args, &data->res.seq_res, 1);
4981 if (status == 0) {
4982 spin_lock(&dir->i_lock);
4983 /* Creating a directory bumps nlink in the parent */
4984 if (data->arg.ftype == NF4DIR)
4985 nfs4_inc_nlink_locked(dir);
4986 nfs4_update_changeattr_locked(dir, &data->res.dir_cinfo,
4987 data->res.fattr->time_start,
4988 NFS_INO_INVALID_DATA);
4989 spin_unlock(&dir->i_lock);
4990 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label);
4991 }
4992 return status;
4993 }
4994
nfs4_free_createdata(struct nfs4_createdata * data)4995 static void nfs4_free_createdata(struct nfs4_createdata *data)
4996 {
4997 nfs4_label_free(data->label);
4998 kfree(data);
4999 }
5000
_nfs4_proc_symlink(struct inode * dir,struct dentry * dentry,struct page * page,unsigned int len,struct iattr * sattr,struct nfs4_label * label)5001 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
5002 struct page *page, unsigned int len, struct iattr *sattr,
5003 struct nfs4_label *label)
5004 {
5005 struct nfs4_createdata *data;
5006 int status = -ENAMETOOLONG;
5007
5008 if (len > NFS4_MAXPATHLEN)
5009 goto out;
5010
5011 status = -ENOMEM;
5012 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
5013 if (data == NULL)
5014 goto out;
5015
5016 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
5017 data->arg.u.symlink.pages = &page;
5018 data->arg.u.symlink.len = len;
5019 data->arg.label = label;
5020
5021 status = nfs4_do_create(dir, dentry, data);
5022
5023 nfs4_free_createdata(data);
5024 out:
5025 return status;
5026 }
5027
nfs4_proc_symlink(struct inode * dir,struct dentry * dentry,struct page * page,unsigned int len,struct iattr * sattr)5028 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
5029 struct page *page, unsigned int len, struct iattr *sattr)
5030 {
5031 struct nfs4_exception exception = {
5032 .interruptible = true,
5033 };
5034 struct nfs4_label l, *label;
5035 int err;
5036
5037 label = nfs4_label_init_security(dir, dentry, sattr, &l);
5038
5039 do {
5040 err = _nfs4_proc_symlink(dir, dentry, page, len, sattr, label);
5041 trace_nfs4_symlink(dir, &dentry->d_name, err);
5042 err = nfs4_handle_exception(NFS_SERVER(dir), err,
5043 &exception);
5044 } while (exception.retry);
5045
5046 nfs4_label_release_security(label);
5047 return err;
5048 }
5049
_nfs4_proc_mkdir(struct inode * dir,struct dentry * dentry,struct iattr * sattr,struct nfs4_label * label)5050 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
5051 struct iattr *sattr, struct nfs4_label *label)
5052 {
5053 struct nfs4_createdata *data;
5054 int status = -ENOMEM;
5055
5056 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
5057 if (data == NULL)
5058 goto out;
5059
5060 data->arg.label = label;
5061 status = nfs4_do_create(dir, dentry, data);
5062
5063 nfs4_free_createdata(data);
5064 out:
5065 return status;
5066 }
5067
nfs4_proc_mkdir(struct inode * dir,struct dentry * dentry,struct iattr * sattr)5068 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
5069 struct iattr *sattr)
5070 {
5071 struct nfs_server *server = NFS_SERVER(dir);
5072 struct nfs4_exception exception = {
5073 .interruptible = true,
5074 };
5075 struct nfs4_label l, *label;
5076 int err;
5077
5078 label = nfs4_label_init_security(dir, dentry, sattr, &l);
5079
5080 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
5081 sattr->ia_mode &= ~current_umask();
5082 do {
5083 err = _nfs4_proc_mkdir(dir, dentry, sattr, label);
5084 trace_nfs4_mkdir(dir, &dentry->d_name, err);
5085 err = nfs4_handle_exception(NFS_SERVER(dir), err,
5086 &exception);
5087 } while (exception.retry);
5088 nfs4_label_release_security(label);
5089
5090 return err;
5091 }
5092
_nfs4_proc_readdir(struct nfs_readdir_arg * nr_arg,struct nfs_readdir_res * nr_res)5093 static int _nfs4_proc_readdir(struct nfs_readdir_arg *nr_arg,
5094 struct nfs_readdir_res *nr_res)
5095 {
5096 struct inode *dir = d_inode(nr_arg->dentry);
5097 struct nfs_server *server = NFS_SERVER(dir);
5098 struct nfs4_readdir_arg args = {
5099 .fh = NFS_FH(dir),
5100 .pages = nr_arg->pages,
5101 .pgbase = 0,
5102 .count = nr_arg->page_len,
5103 .plus = nr_arg->plus,
5104 };
5105 struct nfs4_readdir_res res;
5106 struct rpc_message msg = {
5107 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
5108 .rpc_argp = &args,
5109 .rpc_resp = &res,
5110 .rpc_cred = nr_arg->cred,
5111 };
5112 int status;
5113
5114 dprintk("%s: dentry = %pd2, cookie = %llu\n", __func__,
5115 nr_arg->dentry, (unsigned long long)nr_arg->cookie);
5116 if (!(server->caps & NFS_CAP_SECURITY_LABEL))
5117 args.bitmask = server->attr_bitmask_nl;
5118 else
5119 args.bitmask = server->attr_bitmask;
5120
5121 nfs4_setup_readdir(nr_arg->cookie, nr_arg->verf, nr_arg->dentry, &args);
5122 res.pgbase = args.pgbase;
5123 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
5124 &res.seq_res, 0);
5125 if (status >= 0) {
5126 memcpy(nr_res->verf, res.verifier.data, NFS4_VERIFIER_SIZE);
5127 status += args.pgbase;
5128 }
5129
5130 nfs_invalidate_atime(dir);
5131
5132 dprintk("%s: returns %d\n", __func__, status);
5133 return status;
5134 }
5135
nfs4_proc_readdir(struct nfs_readdir_arg * arg,struct nfs_readdir_res * res)5136 static int nfs4_proc_readdir(struct nfs_readdir_arg *arg,
5137 struct nfs_readdir_res *res)
5138 {
5139 struct nfs4_exception exception = {
5140 .interruptible = true,
5141 };
5142 int err;
5143 do {
5144 err = _nfs4_proc_readdir(arg, res);
5145 trace_nfs4_readdir(d_inode(arg->dentry), err);
5146 err = nfs4_handle_exception(NFS_SERVER(d_inode(arg->dentry)),
5147 err, &exception);
5148 } while (exception.retry);
5149 return err;
5150 }
5151
_nfs4_proc_mknod(struct inode * dir,struct dentry * dentry,struct iattr * sattr,struct nfs4_label * label,dev_t rdev)5152 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
5153 struct iattr *sattr, struct nfs4_label *label, dev_t rdev)
5154 {
5155 struct nfs4_createdata *data;
5156 int mode = sattr->ia_mode;
5157 int status = -ENOMEM;
5158
5159 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
5160 if (data == NULL)
5161 goto out;
5162
5163 if (S_ISFIFO(mode))
5164 data->arg.ftype = NF4FIFO;
5165 else if (S_ISBLK(mode)) {
5166 data->arg.ftype = NF4BLK;
5167 data->arg.u.device.specdata1 = MAJOR(rdev);
5168 data->arg.u.device.specdata2 = MINOR(rdev);
5169 }
5170 else if (S_ISCHR(mode)) {
5171 data->arg.ftype = NF4CHR;
5172 data->arg.u.device.specdata1 = MAJOR(rdev);
5173 data->arg.u.device.specdata2 = MINOR(rdev);
5174 } else if (!S_ISSOCK(mode)) {
5175 status = -EINVAL;
5176 goto out_free;
5177 }
5178
5179 data->arg.label = label;
5180 status = nfs4_do_create(dir, dentry, data);
5181 out_free:
5182 nfs4_free_createdata(data);
5183 out:
5184 return status;
5185 }
5186
nfs4_proc_mknod(struct inode * dir,struct dentry * dentry,struct iattr * sattr,dev_t rdev)5187 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
5188 struct iattr *sattr, dev_t rdev)
5189 {
5190 struct nfs_server *server = NFS_SERVER(dir);
5191 struct nfs4_exception exception = {
5192 .interruptible = true,
5193 };
5194 struct nfs4_label l, *label;
5195 int err;
5196
5197 label = nfs4_label_init_security(dir, dentry, sattr, &l);
5198
5199 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
5200 sattr->ia_mode &= ~current_umask();
5201 do {
5202 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev);
5203 trace_nfs4_mknod(dir, &dentry->d_name, err);
5204 err = nfs4_handle_exception(NFS_SERVER(dir), err,
5205 &exception);
5206 } while (exception.retry);
5207
5208 nfs4_label_release_security(label);
5209
5210 return err;
5211 }
5212
_nfs4_proc_statfs(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsstat * fsstat)5213 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
5214 struct nfs_fsstat *fsstat)
5215 {
5216 struct nfs4_statfs_arg args = {
5217 .fh = fhandle,
5218 .bitmask = server->attr_bitmask,
5219 };
5220 struct nfs4_statfs_res res = {
5221 .fsstat = fsstat,
5222 };
5223 struct rpc_message msg = {
5224 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
5225 .rpc_argp = &args,
5226 .rpc_resp = &res,
5227 };
5228
5229 nfs_fattr_init(fsstat->fattr);
5230 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
5231 }
5232
nfs4_proc_statfs(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsstat * fsstat)5233 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
5234 {
5235 struct nfs4_exception exception = {
5236 .interruptible = true,
5237 };
5238 int err;
5239 do {
5240 err = nfs4_handle_exception(server,
5241 _nfs4_proc_statfs(server, fhandle, fsstat),
5242 &exception);
5243 } while (exception.retry);
5244 return err;
5245 }
5246
_nfs4_do_fsinfo(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * fsinfo)5247 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
5248 struct nfs_fsinfo *fsinfo)
5249 {
5250 struct nfs4_fsinfo_arg args = {
5251 .fh = fhandle,
5252 .bitmask = server->attr_bitmask,
5253 };
5254 struct nfs4_fsinfo_res res = {
5255 .fsinfo = fsinfo,
5256 };
5257 struct rpc_message msg = {
5258 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
5259 .rpc_argp = &args,
5260 .rpc_resp = &res,
5261 };
5262
5263 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
5264 }
5265
nfs4_do_fsinfo(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * fsinfo)5266 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
5267 {
5268 struct nfs4_exception exception = {
5269 .interruptible = true,
5270 };
5271 int err;
5272
5273 do {
5274 err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
5275 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err);
5276 if (err == 0) {
5277 nfs4_set_lease_period(server->nfs_client, fsinfo->lease_time * HZ);
5278 break;
5279 }
5280 err = nfs4_handle_exception(server, err, &exception);
5281 } while (exception.retry);
5282 return err;
5283 }
5284
nfs4_proc_fsinfo(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * fsinfo)5285 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
5286 {
5287 int error;
5288
5289 nfs_fattr_init(fsinfo->fattr);
5290 error = nfs4_do_fsinfo(server, fhandle, fsinfo);
5291 if (error == 0) {
5292 /* block layout checks this! */
5293 server->pnfs_blksize = fsinfo->blksize;
5294 set_pnfs_layoutdriver(server, fhandle, fsinfo);
5295 }
5296
5297 return error;
5298 }
5299
_nfs4_proc_pathconf(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_pathconf * pathconf)5300 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
5301 struct nfs_pathconf *pathconf)
5302 {
5303 struct nfs4_pathconf_arg args = {
5304 .fh = fhandle,
5305 .bitmask = server->attr_bitmask,
5306 };
5307 struct nfs4_pathconf_res res = {
5308 .pathconf = pathconf,
5309 };
5310 struct rpc_message msg = {
5311 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
5312 .rpc_argp = &args,
5313 .rpc_resp = &res,
5314 };
5315
5316 /* None of the pathconf attributes are mandatory to implement */
5317 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
5318 memset(pathconf, 0, sizeof(*pathconf));
5319 return 0;
5320 }
5321
5322 nfs_fattr_init(pathconf->fattr);
5323 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
5324 }
5325
nfs4_proc_pathconf(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_pathconf * pathconf)5326 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
5327 struct nfs_pathconf *pathconf)
5328 {
5329 struct nfs4_exception exception = {
5330 .interruptible = true,
5331 };
5332 int err;
5333
5334 do {
5335 err = nfs4_handle_exception(server,
5336 _nfs4_proc_pathconf(server, fhandle, pathconf),
5337 &exception);
5338 } while (exception.retry);
5339 return err;
5340 }
5341
nfs4_set_rw_stateid(nfs4_stateid * stateid,const struct nfs_open_context * ctx,const struct nfs_lock_context * l_ctx,fmode_t fmode)5342 int nfs4_set_rw_stateid(nfs4_stateid *stateid,
5343 const struct nfs_open_context *ctx,
5344 const struct nfs_lock_context *l_ctx,
5345 fmode_t fmode)
5346 {
5347 return nfs4_select_rw_stateid(ctx->state, fmode, l_ctx, stateid, NULL);
5348 }
5349 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid);
5350
nfs4_stateid_is_current(nfs4_stateid * stateid,const struct nfs_open_context * ctx,const struct nfs_lock_context * l_ctx,fmode_t fmode)5351 static bool nfs4_stateid_is_current(nfs4_stateid *stateid,
5352 const struct nfs_open_context *ctx,
5353 const struct nfs_lock_context *l_ctx,
5354 fmode_t fmode)
5355 {
5356 nfs4_stateid _current_stateid;
5357
5358 /* If the current stateid represents a lost lock, then exit */
5359 if (nfs4_set_rw_stateid(&_current_stateid, ctx, l_ctx, fmode) == -EIO)
5360 return true;
5361 return nfs4_stateid_match(stateid, &_current_stateid);
5362 }
5363
nfs4_error_stateid_expired(int err)5364 static bool nfs4_error_stateid_expired(int err)
5365 {
5366 switch (err) {
5367 case -NFS4ERR_DELEG_REVOKED:
5368 case -NFS4ERR_ADMIN_REVOKED:
5369 case -NFS4ERR_BAD_STATEID:
5370 case -NFS4ERR_STALE_STATEID:
5371 case -NFS4ERR_OLD_STATEID:
5372 case -NFS4ERR_OPENMODE:
5373 case -NFS4ERR_EXPIRED:
5374 return true;
5375 }
5376 return false;
5377 }
5378
nfs4_read_done_cb(struct rpc_task * task,struct nfs_pgio_header * hdr)5379 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr)
5380 {
5381 struct nfs_server *server = NFS_SERVER(hdr->inode);
5382
5383 trace_nfs4_read(hdr, task->tk_status);
5384 if (task->tk_status < 0) {
5385 struct nfs4_exception exception = {
5386 .inode = hdr->inode,
5387 .state = hdr->args.context->state,
5388 .stateid = &hdr->args.stateid,
5389 };
5390 task->tk_status = nfs4_async_handle_exception(task,
5391 server, task->tk_status, &exception);
5392 if (exception.retry) {
5393 rpc_restart_call_prepare(task);
5394 return -EAGAIN;
5395 }
5396 }
5397
5398 if (task->tk_status > 0)
5399 renew_lease(server, hdr->timestamp);
5400 return 0;
5401 }
5402
nfs4_read_stateid_changed(struct rpc_task * task,struct nfs_pgio_args * args)5403 static bool nfs4_read_stateid_changed(struct rpc_task *task,
5404 struct nfs_pgio_args *args)
5405 {
5406
5407 if (!nfs4_error_stateid_expired(task->tk_status) ||
5408 nfs4_stateid_is_current(&args->stateid,
5409 args->context,
5410 args->lock_context,
5411 FMODE_READ))
5412 return false;
5413 rpc_restart_call_prepare(task);
5414 return true;
5415 }
5416
nfs4_read_plus_not_supported(struct rpc_task * task,struct nfs_pgio_header * hdr)5417 static bool nfs4_read_plus_not_supported(struct rpc_task *task,
5418 struct nfs_pgio_header *hdr)
5419 {
5420 struct nfs_server *server = NFS_SERVER(hdr->inode);
5421 struct rpc_message *msg = &task->tk_msg;
5422
5423 if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS] &&
5424 server->caps & NFS_CAP_READ_PLUS && task->tk_status == -ENOTSUPP) {
5425 server->caps &= ~NFS_CAP_READ_PLUS;
5426 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
5427 rpc_restart_call_prepare(task);
5428 return true;
5429 }
5430 return false;
5431 }
5432
nfs4_read_done(struct rpc_task * task,struct nfs_pgio_header * hdr)5433 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
5434 {
5435 dprintk("--> %s\n", __func__);
5436
5437 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
5438 return -EAGAIN;
5439 if (nfs4_read_stateid_changed(task, &hdr->args))
5440 return -EAGAIN;
5441 if (nfs4_read_plus_not_supported(task, hdr))
5442 return -EAGAIN;
5443 if (task->tk_status > 0)
5444 nfs_invalidate_atime(hdr->inode);
5445 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
5446 nfs4_read_done_cb(task, hdr);
5447 }
5448
5449 #if defined CONFIG_NFS_V4_2 && defined CONFIG_NFS_V4_2_READ_PLUS
nfs42_read_plus_support(struct nfs_pgio_header * hdr,struct rpc_message * msg)5450 static void nfs42_read_plus_support(struct nfs_pgio_header *hdr,
5451 struct rpc_message *msg)
5452 {
5453 /* Note: We don't use READ_PLUS with pNFS yet */
5454 if (nfs_server_capable(hdr->inode, NFS_CAP_READ_PLUS) && !hdr->ds_clp)
5455 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS];
5456 }
5457 #else
nfs42_read_plus_support(struct nfs_pgio_header * hdr,struct rpc_message * msg)5458 static void nfs42_read_plus_support(struct nfs_pgio_header *hdr,
5459 struct rpc_message *msg)
5460 {
5461 }
5462 #endif /* CONFIG_NFS_V4_2 */
5463
nfs4_proc_read_setup(struct nfs_pgio_header * hdr,struct rpc_message * msg)5464 static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr,
5465 struct rpc_message *msg)
5466 {
5467 hdr->timestamp = jiffies;
5468 if (!hdr->pgio_done_cb)
5469 hdr->pgio_done_cb = nfs4_read_done_cb;
5470 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
5471 nfs42_read_plus_support(hdr, msg);
5472 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
5473 }
5474
nfs4_proc_pgio_rpc_prepare(struct rpc_task * task,struct nfs_pgio_header * hdr)5475 static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task,
5476 struct nfs_pgio_header *hdr)
5477 {
5478 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode)->nfs_client,
5479 &hdr->args.seq_args,
5480 &hdr->res.seq_res,
5481 task))
5482 return 0;
5483 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
5484 hdr->args.lock_context,
5485 hdr->rw_mode) == -EIO)
5486 return -EIO;
5487 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags)))
5488 return -EIO;
5489 return 0;
5490 }
5491
nfs4_write_done_cb(struct rpc_task * task,struct nfs_pgio_header * hdr)5492 static int nfs4_write_done_cb(struct rpc_task *task,
5493 struct nfs_pgio_header *hdr)
5494 {
5495 struct inode *inode = hdr->inode;
5496
5497 trace_nfs4_write(hdr, task->tk_status);
5498 if (task->tk_status < 0) {
5499 struct nfs4_exception exception = {
5500 .inode = hdr->inode,
5501 .state = hdr->args.context->state,
5502 .stateid = &hdr->args.stateid,
5503 };
5504 task->tk_status = nfs4_async_handle_exception(task,
5505 NFS_SERVER(inode), task->tk_status,
5506 &exception);
5507 if (exception.retry) {
5508 rpc_restart_call_prepare(task);
5509 return -EAGAIN;
5510 }
5511 }
5512 if (task->tk_status >= 0) {
5513 renew_lease(NFS_SERVER(inode), hdr->timestamp);
5514 nfs_writeback_update_inode(hdr);
5515 }
5516 return 0;
5517 }
5518
nfs4_write_stateid_changed(struct rpc_task * task,struct nfs_pgio_args * args)5519 static bool nfs4_write_stateid_changed(struct rpc_task *task,
5520 struct nfs_pgio_args *args)
5521 {
5522
5523 if (!nfs4_error_stateid_expired(task->tk_status) ||
5524 nfs4_stateid_is_current(&args->stateid,
5525 args->context,
5526 args->lock_context,
5527 FMODE_WRITE))
5528 return false;
5529 rpc_restart_call_prepare(task);
5530 return true;
5531 }
5532
nfs4_write_done(struct rpc_task * task,struct nfs_pgio_header * hdr)5533 static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
5534 {
5535 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
5536 return -EAGAIN;
5537 if (nfs4_write_stateid_changed(task, &hdr->args))
5538 return -EAGAIN;
5539 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
5540 nfs4_write_done_cb(task, hdr);
5541 }
5542
5543 static
nfs4_write_need_cache_consistency_data(struct nfs_pgio_header * hdr)5544 bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr)
5545 {
5546 /* Don't request attributes for pNFS or O_DIRECT writes */
5547 if (hdr->ds_clp != NULL || hdr->dreq != NULL)
5548 return false;
5549 /* Otherwise, request attributes if and only if we don't hold
5550 * a delegation
5551 */
5552 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
5553 }
5554
nfs4_bitmask_set(__u32 bitmask[NFS4_BITMASK_SZ],const __u32 * src,struct inode * inode,struct nfs_server * server,struct nfs4_label * label)5555 static void nfs4_bitmask_set(__u32 bitmask[NFS4_BITMASK_SZ], const __u32 *src,
5556 struct inode *inode, struct nfs_server *server,
5557 struct nfs4_label *label)
5558 {
5559 unsigned long cache_validity = READ_ONCE(NFS_I(inode)->cache_validity);
5560 unsigned int i;
5561
5562 memcpy(bitmask, src, sizeof(*bitmask) * NFS4_BITMASK_SZ);
5563
5564 if (cache_validity & NFS_INO_INVALID_CHANGE)
5565 bitmask[0] |= FATTR4_WORD0_CHANGE;
5566 if (cache_validity & NFS_INO_INVALID_ATIME)
5567 bitmask[1] |= FATTR4_WORD1_TIME_ACCESS;
5568 if (cache_validity & NFS_INO_INVALID_MODE)
5569 bitmask[1] |= FATTR4_WORD1_MODE;
5570 if (cache_validity & NFS_INO_INVALID_OTHER)
5571 bitmask[1] |= FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP;
5572 if (cache_validity & NFS_INO_INVALID_NLINK)
5573 bitmask[1] |= FATTR4_WORD1_NUMLINKS;
5574 if (label && label->len && cache_validity & NFS_INO_INVALID_LABEL)
5575 bitmask[2] |= FATTR4_WORD2_SECURITY_LABEL;
5576 if (cache_validity & NFS_INO_INVALID_CTIME)
5577 bitmask[1] |= FATTR4_WORD1_TIME_METADATA;
5578 if (cache_validity & NFS_INO_INVALID_MTIME)
5579 bitmask[1] |= FATTR4_WORD1_TIME_MODIFY;
5580 if (cache_validity & NFS_INO_INVALID_BLOCKS)
5581 bitmask[1] |= FATTR4_WORD1_SPACE_USED;
5582
5583 if (cache_validity & NFS_INO_INVALID_SIZE)
5584 bitmask[0] |= FATTR4_WORD0_SIZE;
5585
5586 for (i = 0; i < NFS4_BITMASK_SZ; i++)
5587 bitmask[i] &= server->attr_bitmask[i];
5588 }
5589
nfs4_proc_write_setup(struct nfs_pgio_header * hdr,struct rpc_message * msg,struct rpc_clnt ** clnt)5590 static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
5591 struct rpc_message *msg,
5592 struct rpc_clnt **clnt)
5593 {
5594 struct nfs_server *server = NFS_SERVER(hdr->inode);
5595
5596 if (!nfs4_write_need_cache_consistency_data(hdr)) {
5597 hdr->args.bitmask = NULL;
5598 hdr->res.fattr = NULL;
5599 } else {
5600 nfs4_bitmask_set(hdr->args.bitmask_store,
5601 server->cache_consistency_bitmask,
5602 hdr->inode, server, NULL);
5603 hdr->args.bitmask = hdr->args.bitmask_store;
5604 }
5605
5606 if (!hdr->pgio_done_cb)
5607 hdr->pgio_done_cb = nfs4_write_done_cb;
5608 hdr->res.server = server;
5609 hdr->timestamp = jiffies;
5610
5611 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
5612 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
5613 nfs4_state_protect_write(hdr->ds_clp ? hdr->ds_clp : server->nfs_client, clnt, msg, hdr);
5614 }
5615
nfs4_proc_commit_rpc_prepare(struct rpc_task * task,struct nfs_commit_data * data)5616 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
5617 {
5618 nfs4_setup_sequence(NFS_SERVER(data->inode)->nfs_client,
5619 &data->args.seq_args,
5620 &data->res.seq_res,
5621 task);
5622 }
5623
nfs4_commit_done_cb(struct rpc_task * task,struct nfs_commit_data * data)5624 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
5625 {
5626 struct inode *inode = data->inode;
5627
5628 trace_nfs4_commit(data, task->tk_status);
5629 if (nfs4_async_handle_error(task, NFS_SERVER(inode),
5630 NULL, NULL) == -EAGAIN) {
5631 rpc_restart_call_prepare(task);
5632 return -EAGAIN;
5633 }
5634 return 0;
5635 }
5636
nfs4_commit_done(struct rpc_task * task,struct nfs_commit_data * data)5637 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
5638 {
5639 if (!nfs4_sequence_done(task, &data->res.seq_res))
5640 return -EAGAIN;
5641 return data->commit_done_cb(task, data);
5642 }
5643
nfs4_proc_commit_setup(struct nfs_commit_data * data,struct rpc_message * msg,struct rpc_clnt ** clnt)5644 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg,
5645 struct rpc_clnt **clnt)
5646 {
5647 struct nfs_server *server = NFS_SERVER(data->inode);
5648
5649 if (data->commit_done_cb == NULL)
5650 data->commit_done_cb = nfs4_commit_done_cb;
5651 data->res.server = server;
5652 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
5653 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
5654 nfs4_state_protect(data->ds_clp ? data->ds_clp : server->nfs_client,
5655 NFS_SP4_MACH_CRED_COMMIT, clnt, msg);
5656 }
5657
_nfs4_proc_commit(struct file * dst,struct nfs_commitargs * args,struct nfs_commitres * res)5658 static int _nfs4_proc_commit(struct file *dst, struct nfs_commitargs *args,
5659 struct nfs_commitres *res)
5660 {
5661 struct inode *dst_inode = file_inode(dst);
5662 struct nfs_server *server = NFS_SERVER(dst_inode);
5663 struct rpc_message msg = {
5664 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT],
5665 .rpc_argp = args,
5666 .rpc_resp = res,
5667 };
5668
5669 args->fh = NFS_FH(dst_inode);
5670 return nfs4_call_sync(server->client, server, &msg,
5671 &args->seq_args, &res->seq_res, 1);
5672 }
5673
nfs4_proc_commit(struct file * dst,__u64 offset,__u32 count,struct nfs_commitres * res)5674 int nfs4_proc_commit(struct file *dst, __u64 offset, __u32 count, struct nfs_commitres *res)
5675 {
5676 struct nfs_commitargs args = {
5677 .offset = offset,
5678 .count = count,
5679 };
5680 struct nfs_server *dst_server = NFS_SERVER(file_inode(dst));
5681 struct nfs4_exception exception = { };
5682 int status;
5683
5684 do {
5685 status = _nfs4_proc_commit(dst, &args, res);
5686 status = nfs4_handle_exception(dst_server, status, &exception);
5687 } while (exception.retry);
5688
5689 return status;
5690 }
5691
5692 struct nfs4_renewdata {
5693 struct nfs_client *client;
5694 unsigned long timestamp;
5695 };
5696
5697 /*
5698 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
5699 * standalone procedure for queueing an asynchronous RENEW.
5700 */
nfs4_renew_release(void * calldata)5701 static void nfs4_renew_release(void *calldata)
5702 {
5703 struct nfs4_renewdata *data = calldata;
5704 struct nfs_client *clp = data->client;
5705
5706 if (refcount_read(&clp->cl_count) > 1)
5707 nfs4_schedule_state_renewal(clp);
5708 nfs_put_client(clp);
5709 kfree(data);
5710 }
5711
nfs4_renew_done(struct rpc_task * task,void * calldata)5712 static void nfs4_renew_done(struct rpc_task *task, void *calldata)
5713 {
5714 struct nfs4_renewdata *data = calldata;
5715 struct nfs_client *clp = data->client;
5716 unsigned long timestamp = data->timestamp;
5717
5718 trace_nfs4_renew_async(clp, task->tk_status);
5719 switch (task->tk_status) {
5720 case 0:
5721 break;
5722 case -NFS4ERR_LEASE_MOVED:
5723 nfs4_schedule_lease_moved_recovery(clp);
5724 break;
5725 default:
5726 /* Unless we're shutting down, schedule state recovery! */
5727 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
5728 return;
5729 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
5730 nfs4_schedule_lease_recovery(clp);
5731 return;
5732 }
5733 nfs4_schedule_path_down_recovery(clp);
5734 }
5735 do_renew_lease(clp, timestamp);
5736 }
5737
5738 static const struct rpc_call_ops nfs4_renew_ops = {
5739 .rpc_call_done = nfs4_renew_done,
5740 .rpc_release = nfs4_renew_release,
5741 };
5742
nfs4_proc_async_renew(struct nfs_client * clp,const struct cred * cred,unsigned renew_flags)5743 static int nfs4_proc_async_renew(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags)
5744 {
5745 struct rpc_message msg = {
5746 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
5747 .rpc_argp = clp,
5748 .rpc_cred = cred,
5749 };
5750 struct nfs4_renewdata *data;
5751
5752 if (renew_flags == 0)
5753 return 0;
5754 if (!refcount_inc_not_zero(&clp->cl_count))
5755 return -EIO;
5756 data = kmalloc(sizeof(*data), GFP_NOFS);
5757 if (data == NULL) {
5758 nfs_put_client(clp);
5759 return -ENOMEM;
5760 }
5761 data->client = clp;
5762 data->timestamp = jiffies;
5763 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT,
5764 &nfs4_renew_ops, data);
5765 }
5766
nfs4_proc_renew(struct nfs_client * clp,const struct cred * cred)5767 static int nfs4_proc_renew(struct nfs_client *clp, const struct cred *cred)
5768 {
5769 struct rpc_message msg = {
5770 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
5771 .rpc_argp = clp,
5772 .rpc_cred = cred,
5773 };
5774 unsigned long now = jiffies;
5775 int status;
5776
5777 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5778 if (status < 0)
5779 return status;
5780 do_renew_lease(clp, now);
5781 return 0;
5782 }
5783
nfs4_server_supports_acls(struct nfs_server * server)5784 static inline int nfs4_server_supports_acls(struct nfs_server *server)
5785 {
5786 return server->caps & NFS_CAP_ACLS;
5787 }
5788
5789 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
5790 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on
5791 * the stack.
5792 */
5793 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
5794
nfs4_buf_to_pages_noslab(const void * buf,size_t buflen,struct page ** pages)5795 int nfs4_buf_to_pages_noslab(const void *buf, size_t buflen,
5796 struct page **pages)
5797 {
5798 struct page *newpage, **spages;
5799 int rc = 0;
5800 size_t len;
5801 spages = pages;
5802
5803 do {
5804 len = min_t(size_t, PAGE_SIZE, buflen);
5805 newpage = alloc_page(GFP_KERNEL);
5806
5807 if (newpage == NULL)
5808 goto unwind;
5809 memcpy(page_address(newpage), buf, len);
5810 buf += len;
5811 buflen -= len;
5812 *pages++ = newpage;
5813 rc++;
5814 } while (buflen != 0);
5815
5816 return rc;
5817
5818 unwind:
5819 for(; rc > 0; rc--)
5820 __free_page(spages[rc-1]);
5821 return -ENOMEM;
5822 }
5823
5824 struct nfs4_cached_acl {
5825 int cached;
5826 size_t len;
5827 char data[];
5828 };
5829
nfs4_set_cached_acl(struct inode * inode,struct nfs4_cached_acl * acl)5830 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
5831 {
5832 struct nfs_inode *nfsi = NFS_I(inode);
5833
5834 spin_lock(&inode->i_lock);
5835 kfree(nfsi->nfs4_acl);
5836 nfsi->nfs4_acl = acl;
5837 spin_unlock(&inode->i_lock);
5838 }
5839
nfs4_zap_acl_attr(struct inode * inode)5840 static void nfs4_zap_acl_attr(struct inode *inode)
5841 {
5842 nfs4_set_cached_acl(inode, NULL);
5843 }
5844
nfs4_read_cached_acl(struct inode * inode,char * buf,size_t buflen)5845 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
5846 {
5847 struct nfs_inode *nfsi = NFS_I(inode);
5848 struct nfs4_cached_acl *acl;
5849 int ret = -ENOENT;
5850
5851 spin_lock(&inode->i_lock);
5852 acl = nfsi->nfs4_acl;
5853 if (acl == NULL)
5854 goto out;
5855 if (buf == NULL) /* user is just asking for length */
5856 goto out_len;
5857 if (acl->cached == 0)
5858 goto out;
5859 ret = -ERANGE; /* see getxattr(2) man page */
5860 if (acl->len > buflen)
5861 goto out;
5862 memcpy(buf, acl->data, acl->len);
5863 out_len:
5864 ret = acl->len;
5865 out:
5866 spin_unlock(&inode->i_lock);
5867 return ret;
5868 }
5869
nfs4_write_cached_acl(struct inode * inode,struct page ** pages,size_t pgbase,size_t acl_len)5870 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
5871 {
5872 struct nfs4_cached_acl *acl;
5873 size_t buflen = sizeof(*acl) + acl_len;
5874
5875 if (buflen <= PAGE_SIZE) {
5876 acl = kmalloc(buflen, GFP_KERNEL);
5877 if (acl == NULL)
5878 goto out;
5879 acl->cached = 1;
5880 _copy_from_pages(acl->data, pages, pgbase, acl_len);
5881 } else {
5882 acl = kmalloc(sizeof(*acl), GFP_KERNEL);
5883 if (acl == NULL)
5884 goto out;
5885 acl->cached = 0;
5886 }
5887 acl->len = acl_len;
5888 out:
5889 nfs4_set_cached_acl(inode, acl);
5890 }
5891
5892 /*
5893 * The getxattr API returns the required buffer length when called with a
5894 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
5895 * the required buf. On a NULL buf, we send a page of data to the server
5896 * guessing that the ACL request can be serviced by a page. If so, we cache
5897 * up to the page of ACL data, and the 2nd call to getxattr is serviced by
5898 * the cache. If not so, we throw away the page, and cache the required
5899 * length. The next getxattr call will then produce another round trip to
5900 * the server, this time with the input buf of the required size.
5901 */
__nfs4_get_acl_uncached(struct inode * inode,void * buf,size_t buflen)5902 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
5903 {
5904 struct page **pages;
5905 struct nfs_getaclargs args = {
5906 .fh = NFS_FH(inode),
5907 .acl_len = buflen,
5908 };
5909 struct nfs_getaclres res = {
5910 .acl_len = buflen,
5911 };
5912 struct rpc_message msg = {
5913 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
5914 .rpc_argp = &args,
5915 .rpc_resp = &res,
5916 };
5917 unsigned int npages;
5918 int ret = -ENOMEM, i;
5919 struct nfs_server *server = NFS_SERVER(inode);
5920
5921 if (buflen == 0)
5922 buflen = server->rsize;
5923
5924 npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1;
5925 pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
5926 if (!pages)
5927 return -ENOMEM;
5928
5929 args.acl_pages = pages;
5930
5931 for (i = 0; i < npages; i++) {
5932 pages[i] = alloc_page(GFP_KERNEL);
5933 if (!pages[i])
5934 goto out_free;
5935 }
5936
5937 /* for decoding across pages */
5938 res.acl_scratch = alloc_page(GFP_KERNEL);
5939 if (!res.acl_scratch)
5940 goto out_free;
5941
5942 args.acl_len = npages * PAGE_SIZE;
5943
5944 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
5945 __func__, buf, buflen, npages, args.acl_len);
5946 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
5947 &msg, &args.seq_args, &res.seq_res, 0);
5948 if (ret)
5949 goto out_free;
5950
5951 /* Handle the case where the passed-in buffer is too short */
5952 if (res.acl_flags & NFS4_ACL_TRUNC) {
5953 /* Did the user only issue a request for the acl length? */
5954 if (buf == NULL)
5955 goto out_ok;
5956 ret = -ERANGE;
5957 goto out_free;
5958 }
5959 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len);
5960 if (buf) {
5961 if (res.acl_len > buflen) {
5962 ret = -ERANGE;
5963 goto out_free;
5964 }
5965 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len);
5966 }
5967 out_ok:
5968 ret = res.acl_len;
5969 out_free:
5970 while (--i >= 0)
5971 __free_page(pages[i]);
5972 if (res.acl_scratch)
5973 __free_page(res.acl_scratch);
5974 kfree(pages);
5975 return ret;
5976 }
5977
nfs4_get_acl_uncached(struct inode * inode,void * buf,size_t buflen)5978 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
5979 {
5980 struct nfs4_exception exception = {
5981 .interruptible = true,
5982 };
5983 ssize_t ret;
5984 do {
5985 ret = __nfs4_get_acl_uncached(inode, buf, buflen);
5986 trace_nfs4_get_acl(inode, ret);
5987 if (ret >= 0)
5988 break;
5989 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
5990 } while (exception.retry);
5991 return ret;
5992 }
5993
nfs4_proc_get_acl(struct inode * inode,void * buf,size_t buflen)5994 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
5995 {
5996 struct nfs_server *server = NFS_SERVER(inode);
5997 int ret;
5998
5999 if (!nfs4_server_supports_acls(server))
6000 return -EOPNOTSUPP;
6001 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE);
6002 if (ret < 0)
6003 return ret;
6004 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
6005 nfs_zap_acl_cache(inode);
6006 ret = nfs4_read_cached_acl(inode, buf, buflen);
6007 if (ret != -ENOENT)
6008 /* -ENOENT is returned if there is no ACL or if there is an ACL
6009 * but no cached acl data, just the acl length */
6010 return ret;
6011 return nfs4_get_acl_uncached(inode, buf, buflen);
6012 }
6013
__nfs4_proc_set_acl(struct inode * inode,const void * buf,size_t buflen)6014 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
6015 {
6016 struct nfs_server *server = NFS_SERVER(inode);
6017 struct page *pages[NFS4ACL_MAXPAGES];
6018 struct nfs_setaclargs arg = {
6019 .fh = NFS_FH(inode),
6020 .acl_pages = pages,
6021 .acl_len = buflen,
6022 };
6023 struct nfs_setaclres res;
6024 struct rpc_message msg = {
6025 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
6026 .rpc_argp = &arg,
6027 .rpc_resp = &res,
6028 };
6029 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
6030 int ret, i;
6031
6032 /* You can't remove system.nfs4_acl: */
6033 if (buflen == 0)
6034 return -EINVAL;
6035 if (!nfs4_server_supports_acls(server))
6036 return -EOPNOTSUPP;
6037 if (npages > ARRAY_SIZE(pages))
6038 return -ERANGE;
6039 i = nfs4_buf_to_pages_noslab(buf, buflen, arg.acl_pages);
6040 if (i < 0)
6041 return i;
6042 nfs4_inode_make_writeable(inode);
6043 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
6044
6045 /*
6046 * Free each page after tx, so the only ref left is
6047 * held by the network stack
6048 */
6049 for (; i > 0; i--)
6050 put_page(pages[i-1]);
6051
6052 /*
6053 * Acl update can result in inode attribute update.
6054 * so mark the attribute cache invalid.
6055 */
6056 spin_lock(&inode->i_lock);
6057 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE |
6058 NFS_INO_INVALID_CTIME |
6059 NFS_INO_REVAL_FORCED);
6060 spin_unlock(&inode->i_lock);
6061 nfs_access_zap_cache(inode);
6062 nfs_zap_acl_cache(inode);
6063 return ret;
6064 }
6065
nfs4_proc_set_acl(struct inode * inode,const void * buf,size_t buflen)6066 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
6067 {
6068 struct nfs4_exception exception = { };
6069 int err;
6070 do {
6071 err = __nfs4_proc_set_acl(inode, buf, buflen);
6072 trace_nfs4_set_acl(inode, err);
6073 if (err == -NFS4ERR_BADOWNER || err == -NFS4ERR_BADNAME) {
6074 /*
6075 * no need to retry since the kernel
6076 * isn't involved in encoding the ACEs.
6077 */
6078 err = -EINVAL;
6079 break;
6080 }
6081 err = nfs4_handle_exception(NFS_SERVER(inode), err,
6082 &exception);
6083 } while (exception.retry);
6084 return err;
6085 }
6086
6087 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
_nfs4_get_security_label(struct inode * inode,void * buf,size_t buflen)6088 static int _nfs4_get_security_label(struct inode *inode, void *buf,
6089 size_t buflen)
6090 {
6091 struct nfs_server *server = NFS_SERVER(inode);
6092 struct nfs_fattr fattr;
6093 struct nfs4_label label = {0, 0, buflen, buf};
6094
6095 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
6096 struct nfs4_getattr_arg arg = {
6097 .fh = NFS_FH(inode),
6098 .bitmask = bitmask,
6099 };
6100 struct nfs4_getattr_res res = {
6101 .fattr = &fattr,
6102 .label = &label,
6103 .server = server,
6104 };
6105 struct rpc_message msg = {
6106 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
6107 .rpc_argp = &arg,
6108 .rpc_resp = &res,
6109 };
6110 int ret;
6111
6112 nfs_fattr_init(&fattr);
6113
6114 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0);
6115 if (ret)
6116 return ret;
6117 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
6118 return -ENOENT;
6119 return label.len;
6120 }
6121
nfs4_get_security_label(struct inode * inode,void * buf,size_t buflen)6122 static int nfs4_get_security_label(struct inode *inode, void *buf,
6123 size_t buflen)
6124 {
6125 struct nfs4_exception exception = {
6126 .interruptible = true,
6127 };
6128 int err;
6129
6130 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
6131 return -EOPNOTSUPP;
6132
6133 do {
6134 err = _nfs4_get_security_label(inode, buf, buflen);
6135 trace_nfs4_get_security_label(inode, err);
6136 err = nfs4_handle_exception(NFS_SERVER(inode), err,
6137 &exception);
6138 } while (exception.retry);
6139 return err;
6140 }
6141
_nfs4_do_set_security_label(struct inode * inode,struct nfs4_label * ilabel,struct nfs_fattr * fattr,struct nfs4_label * olabel)6142 static int _nfs4_do_set_security_label(struct inode *inode,
6143 struct nfs4_label *ilabel,
6144 struct nfs_fattr *fattr,
6145 struct nfs4_label *olabel)
6146 {
6147
6148 struct iattr sattr = {0};
6149 struct nfs_server *server = NFS_SERVER(inode);
6150 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
6151 struct nfs_setattrargs arg = {
6152 .fh = NFS_FH(inode),
6153 .iap = &sattr,
6154 .server = server,
6155 .bitmask = bitmask,
6156 .label = ilabel,
6157 };
6158 struct nfs_setattrres res = {
6159 .fattr = fattr,
6160 .label = olabel,
6161 .server = server,
6162 };
6163 struct rpc_message msg = {
6164 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
6165 .rpc_argp = &arg,
6166 .rpc_resp = &res,
6167 };
6168 int status;
6169
6170 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
6171
6172 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
6173 if (status)
6174 dprintk("%s failed: %d\n", __func__, status);
6175
6176 return status;
6177 }
6178
nfs4_do_set_security_label(struct inode * inode,struct nfs4_label * ilabel,struct nfs_fattr * fattr,struct nfs4_label * olabel)6179 static int nfs4_do_set_security_label(struct inode *inode,
6180 struct nfs4_label *ilabel,
6181 struct nfs_fattr *fattr,
6182 struct nfs4_label *olabel)
6183 {
6184 struct nfs4_exception exception = { };
6185 int err;
6186
6187 do {
6188 err = _nfs4_do_set_security_label(inode, ilabel,
6189 fattr, olabel);
6190 trace_nfs4_set_security_label(inode, err);
6191 err = nfs4_handle_exception(NFS_SERVER(inode), err,
6192 &exception);
6193 } while (exception.retry);
6194 return err;
6195 }
6196
6197 static int
nfs4_set_security_label(struct inode * inode,const void * buf,size_t buflen)6198 nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen)
6199 {
6200 struct nfs4_label ilabel, *olabel = NULL;
6201 struct nfs_fattr fattr;
6202 int status;
6203
6204 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
6205 return -EOPNOTSUPP;
6206
6207 nfs_fattr_init(&fattr);
6208
6209 ilabel.pi = 0;
6210 ilabel.lfs = 0;
6211 ilabel.label = (char *)buf;
6212 ilabel.len = buflen;
6213
6214 olabel = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
6215 if (IS_ERR(olabel)) {
6216 status = -PTR_ERR(olabel);
6217 goto out;
6218 }
6219
6220 status = nfs4_do_set_security_label(inode, &ilabel, &fattr, olabel);
6221 if (status == 0)
6222 nfs_setsecurity(inode, &fattr, olabel);
6223
6224 nfs4_label_free(olabel);
6225 out:
6226 return status;
6227 }
6228 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */
6229
6230
nfs4_init_boot_verifier(const struct nfs_client * clp,nfs4_verifier * bootverf)6231 static void nfs4_init_boot_verifier(const struct nfs_client *clp,
6232 nfs4_verifier *bootverf)
6233 {
6234 __be32 verf[2];
6235
6236 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
6237 /* An impossible timestamp guarantees this value
6238 * will never match a generated boot time. */
6239 verf[0] = cpu_to_be32(U32_MAX);
6240 verf[1] = cpu_to_be32(U32_MAX);
6241 } else {
6242 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
6243 u64 ns = ktime_to_ns(nn->boot_time);
6244
6245 verf[0] = cpu_to_be32(ns >> 32);
6246 verf[1] = cpu_to_be32(ns);
6247 }
6248 memcpy(bootverf->data, verf, sizeof(bootverf->data));
6249 }
6250
6251 static size_t
nfs4_get_uniquifier(struct nfs_client * clp,char * buf,size_t buflen)6252 nfs4_get_uniquifier(struct nfs_client *clp, char *buf, size_t buflen)
6253 {
6254 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
6255 struct nfs_netns_client *nn_clp = nn->nfs_client;
6256 const char *id;
6257
6258 buf[0] = '\0';
6259
6260 if (nn_clp) {
6261 rcu_read_lock();
6262 id = rcu_dereference(nn_clp->identifier);
6263 if (id)
6264 strscpy(buf, id, buflen);
6265 rcu_read_unlock();
6266 }
6267
6268 if (nfs4_client_id_uniquifier[0] != '\0' && buf[0] == '\0')
6269 strscpy(buf, nfs4_client_id_uniquifier, buflen);
6270
6271 return strlen(buf);
6272 }
6273
6274 static int
nfs4_init_nonuniform_client_string(struct nfs_client * clp)6275 nfs4_init_nonuniform_client_string(struct nfs_client *clp)
6276 {
6277 char buf[NFS4_CLIENT_ID_UNIQ_LEN];
6278 size_t buflen;
6279 size_t len;
6280 char *str;
6281
6282 if (clp->cl_owner_id != NULL)
6283 return 0;
6284
6285 rcu_read_lock();
6286 len = 14 +
6287 strlen(clp->cl_rpcclient->cl_nodename) +
6288 1 +
6289 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) +
6290 1;
6291 rcu_read_unlock();
6292
6293 buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf));
6294 if (buflen)
6295 len += buflen + 1;
6296
6297 if (len > NFS4_OPAQUE_LIMIT + 1)
6298 return -EINVAL;
6299
6300 /*
6301 * Since this string is allocated at mount time, and held until the
6302 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
6303 * about a memory-reclaim deadlock.
6304 */
6305 str = kmalloc(len, GFP_KERNEL);
6306 if (!str)
6307 return -ENOMEM;
6308
6309 rcu_read_lock();
6310 if (buflen)
6311 scnprintf(str, len, "Linux NFSv4.0 %s/%s/%s",
6312 clp->cl_rpcclient->cl_nodename, buf,
6313 rpc_peeraddr2str(clp->cl_rpcclient,
6314 RPC_DISPLAY_ADDR));
6315 else
6316 scnprintf(str, len, "Linux NFSv4.0 %s/%s",
6317 clp->cl_rpcclient->cl_nodename,
6318 rpc_peeraddr2str(clp->cl_rpcclient,
6319 RPC_DISPLAY_ADDR));
6320 rcu_read_unlock();
6321
6322 clp->cl_owner_id = str;
6323 return 0;
6324 }
6325
6326 static int
nfs4_init_uniform_client_string(struct nfs_client * clp)6327 nfs4_init_uniform_client_string(struct nfs_client *clp)
6328 {
6329 char buf[NFS4_CLIENT_ID_UNIQ_LEN];
6330 size_t buflen;
6331 size_t len;
6332 char *str;
6333
6334 if (clp->cl_owner_id != NULL)
6335 return 0;
6336
6337 len = 10 + 10 + 1 + 10 + 1 +
6338 strlen(clp->cl_rpcclient->cl_nodename) + 1;
6339
6340 buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf));
6341 if (buflen)
6342 len += buflen + 1;
6343
6344 if (len > NFS4_OPAQUE_LIMIT + 1)
6345 return -EINVAL;
6346
6347 /*
6348 * Since this string is allocated at mount time, and held until the
6349 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
6350 * about a memory-reclaim deadlock.
6351 */
6352 str = kmalloc(len, GFP_KERNEL);
6353 if (!str)
6354 return -ENOMEM;
6355
6356 if (buflen)
6357 scnprintf(str, len, "Linux NFSv%u.%u %s/%s",
6358 clp->rpc_ops->version, clp->cl_minorversion,
6359 buf, clp->cl_rpcclient->cl_nodename);
6360 else
6361 scnprintf(str, len, "Linux NFSv%u.%u %s",
6362 clp->rpc_ops->version, clp->cl_minorversion,
6363 clp->cl_rpcclient->cl_nodename);
6364 clp->cl_owner_id = str;
6365 return 0;
6366 }
6367
6368 /*
6369 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback
6370 * services. Advertise one based on the address family of the
6371 * clientaddr.
6372 */
6373 static unsigned int
nfs4_init_callback_netid(const struct nfs_client * clp,char * buf,size_t len)6374 nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len)
6375 {
6376 if (strchr(clp->cl_ipaddr, ':') != NULL)
6377 return scnprintf(buf, len, "tcp6");
6378 else
6379 return scnprintf(buf, len, "tcp");
6380 }
6381
nfs4_setclientid_done(struct rpc_task * task,void * calldata)6382 static void nfs4_setclientid_done(struct rpc_task *task, void *calldata)
6383 {
6384 struct nfs4_setclientid *sc = calldata;
6385
6386 if (task->tk_status == 0)
6387 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred);
6388 }
6389
6390 static const struct rpc_call_ops nfs4_setclientid_ops = {
6391 .rpc_call_done = nfs4_setclientid_done,
6392 };
6393
6394 /**
6395 * nfs4_proc_setclientid - Negotiate client ID
6396 * @clp: state data structure
6397 * @program: RPC program for NFSv4 callback service
6398 * @port: IP port number for NFS4 callback service
6399 * @cred: credential to use for this call
6400 * @res: where to place the result
6401 *
6402 * Returns zero, a negative errno, or a negative NFS4ERR status code.
6403 */
nfs4_proc_setclientid(struct nfs_client * clp,u32 program,unsigned short port,const struct cred * cred,struct nfs4_setclientid_res * res)6404 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
6405 unsigned short port, const struct cred *cred,
6406 struct nfs4_setclientid_res *res)
6407 {
6408 nfs4_verifier sc_verifier;
6409 struct nfs4_setclientid setclientid = {
6410 .sc_verifier = &sc_verifier,
6411 .sc_prog = program,
6412 .sc_clnt = clp,
6413 };
6414 struct rpc_message msg = {
6415 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
6416 .rpc_argp = &setclientid,
6417 .rpc_resp = res,
6418 .rpc_cred = cred,
6419 };
6420 struct rpc_task_setup task_setup_data = {
6421 .rpc_client = clp->cl_rpcclient,
6422 .rpc_message = &msg,
6423 .callback_ops = &nfs4_setclientid_ops,
6424 .callback_data = &setclientid,
6425 .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN,
6426 };
6427 unsigned long now = jiffies;
6428 int status;
6429
6430 /* nfs_client_id4 */
6431 nfs4_init_boot_verifier(clp, &sc_verifier);
6432
6433 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags))
6434 status = nfs4_init_uniform_client_string(clp);
6435 else
6436 status = nfs4_init_nonuniform_client_string(clp);
6437
6438 if (status)
6439 goto out;
6440
6441 /* cb_client4 */
6442 setclientid.sc_netid_len =
6443 nfs4_init_callback_netid(clp,
6444 setclientid.sc_netid,
6445 sizeof(setclientid.sc_netid));
6446 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
6447 sizeof(setclientid.sc_uaddr), "%s.%u.%u",
6448 clp->cl_ipaddr, port >> 8, port & 255);
6449
6450 dprintk("NFS call setclientid auth=%s, '%s'\n",
6451 clp->cl_rpcclient->cl_auth->au_ops->au_name,
6452 clp->cl_owner_id);
6453
6454 status = nfs4_call_sync_custom(&task_setup_data);
6455 if (setclientid.sc_cred) {
6456 kfree(clp->cl_acceptor);
6457 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
6458 put_rpccred(setclientid.sc_cred);
6459 }
6460
6461 if (status == 0)
6462 do_renew_lease(clp, now);
6463 out:
6464 trace_nfs4_setclientid(clp, status);
6465 dprintk("NFS reply setclientid: %d\n", status);
6466 return status;
6467 }
6468
6469 /**
6470 * nfs4_proc_setclientid_confirm - Confirm client ID
6471 * @clp: state data structure
6472 * @arg: result of a previous SETCLIENTID
6473 * @cred: credential to use for this call
6474 *
6475 * Returns zero, a negative errno, or a negative NFS4ERR status code.
6476 */
nfs4_proc_setclientid_confirm(struct nfs_client * clp,struct nfs4_setclientid_res * arg,const struct cred * cred)6477 int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
6478 struct nfs4_setclientid_res *arg,
6479 const struct cred *cred)
6480 {
6481 struct rpc_message msg = {
6482 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
6483 .rpc_argp = arg,
6484 .rpc_cred = cred,
6485 };
6486 int status;
6487
6488 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n",
6489 clp->cl_rpcclient->cl_auth->au_ops->au_name,
6490 clp->cl_clientid);
6491 status = rpc_call_sync(clp->cl_rpcclient, &msg,
6492 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
6493 trace_nfs4_setclientid_confirm(clp, status);
6494 dprintk("NFS reply setclientid_confirm: %d\n", status);
6495 return status;
6496 }
6497
6498 struct nfs4_delegreturndata {
6499 struct nfs4_delegreturnargs args;
6500 struct nfs4_delegreturnres res;
6501 struct nfs_fh fh;
6502 nfs4_stateid stateid;
6503 unsigned long timestamp;
6504 struct {
6505 struct nfs4_layoutreturn_args arg;
6506 struct nfs4_layoutreturn_res res;
6507 struct nfs4_xdr_opaque_data ld_private;
6508 u32 roc_barrier;
6509 bool roc;
6510 } lr;
6511 struct nfs_fattr fattr;
6512 int rpc_status;
6513 struct inode *inode;
6514 };
6515
nfs4_delegreturn_done(struct rpc_task * task,void * calldata)6516 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
6517 {
6518 struct nfs4_delegreturndata *data = calldata;
6519 struct nfs4_exception exception = {
6520 .inode = data->inode,
6521 .stateid = &data->stateid,
6522 .task_is_privileged = data->args.seq_args.sa_privileged,
6523 };
6524
6525 if (!nfs4_sequence_done(task, &data->res.seq_res))
6526 return;
6527
6528 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
6529
6530 /* Handle Layoutreturn errors */
6531 if (pnfs_roc_done(task, &data->args.lr_args, &data->res.lr_res,
6532 &data->res.lr_ret) == -EAGAIN)
6533 goto out_restart;
6534
6535 switch (task->tk_status) {
6536 case 0:
6537 renew_lease(data->res.server, data->timestamp);
6538 break;
6539 case -NFS4ERR_ADMIN_REVOKED:
6540 case -NFS4ERR_DELEG_REVOKED:
6541 case -NFS4ERR_EXPIRED:
6542 nfs4_free_revoked_stateid(data->res.server,
6543 data->args.stateid,
6544 task->tk_msg.rpc_cred);
6545 fallthrough;
6546 case -NFS4ERR_BAD_STATEID:
6547 case -NFS4ERR_STALE_STATEID:
6548 case -ETIMEDOUT:
6549 task->tk_status = 0;
6550 break;
6551 case -NFS4ERR_OLD_STATEID:
6552 if (!nfs4_refresh_delegation_stateid(&data->stateid, data->inode))
6553 nfs4_stateid_seqid_inc(&data->stateid);
6554 if (data->args.bitmask) {
6555 data->args.bitmask = NULL;
6556 data->res.fattr = NULL;
6557 }
6558 goto out_restart;
6559 case -NFS4ERR_ACCESS:
6560 if (data->args.bitmask) {
6561 data->args.bitmask = NULL;
6562 data->res.fattr = NULL;
6563 goto out_restart;
6564 }
6565 fallthrough;
6566 default:
6567 task->tk_status = nfs4_async_handle_exception(task,
6568 data->res.server, task->tk_status,
6569 &exception);
6570 if (exception.retry)
6571 goto out_restart;
6572 }
6573 nfs_delegation_mark_returned(data->inode, data->args.stateid);
6574 data->rpc_status = task->tk_status;
6575 return;
6576 out_restart:
6577 task->tk_status = 0;
6578 rpc_restart_call_prepare(task);
6579 }
6580
nfs4_delegreturn_release(void * calldata)6581 static void nfs4_delegreturn_release(void *calldata)
6582 {
6583 struct nfs4_delegreturndata *data = calldata;
6584 struct inode *inode = data->inode;
6585
6586 if (data->lr.roc)
6587 pnfs_roc_release(&data->lr.arg, &data->lr.res,
6588 data->res.lr_ret);
6589 if (inode) {
6590 nfs4_fattr_set_prechange(&data->fattr,
6591 inode_peek_iversion_raw(inode));
6592 nfs_refresh_inode(inode, &data->fattr);
6593 nfs_iput_and_deactive(inode);
6594 }
6595 kfree(calldata);
6596 }
6597
nfs4_delegreturn_prepare(struct rpc_task * task,void * data)6598 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
6599 {
6600 struct nfs4_delegreturndata *d_data;
6601 struct pnfs_layout_hdr *lo;
6602
6603 d_data = (struct nfs4_delegreturndata *)data;
6604
6605 if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task)) {
6606 nfs4_sequence_done(task, &d_data->res.seq_res);
6607 return;
6608 }
6609
6610 lo = d_data->args.lr_args ? d_data->args.lr_args->layout : NULL;
6611 if (lo && !pnfs_layout_is_valid(lo)) {
6612 d_data->args.lr_args = NULL;
6613 d_data->res.lr_res = NULL;
6614 }
6615
6616 nfs4_setup_sequence(d_data->res.server->nfs_client,
6617 &d_data->args.seq_args,
6618 &d_data->res.seq_res,
6619 task);
6620 }
6621
6622 static const struct rpc_call_ops nfs4_delegreturn_ops = {
6623 .rpc_call_prepare = nfs4_delegreturn_prepare,
6624 .rpc_call_done = nfs4_delegreturn_done,
6625 .rpc_release = nfs4_delegreturn_release,
6626 };
6627
_nfs4_proc_delegreturn(struct inode * inode,const struct cred * cred,const nfs4_stateid * stateid,int issync)6628 static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, const nfs4_stateid *stateid, int issync)
6629 {
6630 struct nfs4_delegreturndata *data;
6631 struct nfs_server *server = NFS_SERVER(inode);
6632 struct rpc_task *task;
6633 struct rpc_message msg = {
6634 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
6635 .rpc_cred = cred,
6636 };
6637 struct rpc_task_setup task_setup_data = {
6638 .rpc_client = server->client,
6639 .rpc_message = &msg,
6640 .callback_ops = &nfs4_delegreturn_ops,
6641 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
6642 };
6643 int status = 0;
6644
6645 if (nfs_server_capable(inode, NFS_CAP_MOVEABLE))
6646 task_setup_data.flags |= RPC_TASK_MOVEABLE;
6647
6648 data = kzalloc(sizeof(*data), GFP_KERNEL);
6649 if (data == NULL)
6650 return -ENOMEM;
6651
6652 nfs4_state_protect(server->nfs_client,
6653 NFS_SP4_MACH_CRED_CLEANUP,
6654 &task_setup_data.rpc_client, &msg);
6655
6656 data->args.fhandle = &data->fh;
6657 data->args.stateid = &data->stateid;
6658 nfs4_bitmask_set(data->args.bitmask_store,
6659 server->cache_consistency_bitmask, inode, server,
6660 NULL);
6661 data->args.bitmask = data->args.bitmask_store;
6662 nfs_copy_fh(&data->fh, NFS_FH(inode));
6663 nfs4_stateid_copy(&data->stateid, stateid);
6664 data->res.fattr = &data->fattr;
6665 data->res.server = server;
6666 data->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
6667 data->lr.arg.ld_private = &data->lr.ld_private;
6668 nfs_fattr_init(data->res.fattr);
6669 data->timestamp = jiffies;
6670 data->rpc_status = 0;
6671 data->inode = nfs_igrab_and_active(inode);
6672 if (data->inode || issync) {
6673 data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res,
6674 cred);
6675 if (data->lr.roc) {
6676 data->args.lr_args = &data->lr.arg;
6677 data->res.lr_res = &data->lr.res;
6678 }
6679 }
6680
6681 if (!data->inode)
6682 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1,
6683 1);
6684 else
6685 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1,
6686 0);
6687 task_setup_data.callback_data = data;
6688 msg.rpc_argp = &data->args;
6689 msg.rpc_resp = &data->res;
6690 task = rpc_run_task(&task_setup_data);
6691 if (IS_ERR(task))
6692 return PTR_ERR(task);
6693 if (!issync)
6694 goto out;
6695 status = rpc_wait_for_completion_task(task);
6696 if (status != 0)
6697 goto out;
6698 status = data->rpc_status;
6699 out:
6700 rpc_put_task(task);
6701 return status;
6702 }
6703
nfs4_proc_delegreturn(struct inode * inode,const struct cred * cred,const nfs4_stateid * stateid,int issync)6704 int nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, const nfs4_stateid *stateid, int issync)
6705 {
6706 struct nfs_server *server = NFS_SERVER(inode);
6707 struct nfs4_exception exception = { };
6708 int err;
6709 do {
6710 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
6711 trace_nfs4_delegreturn(inode, stateid, err);
6712 switch (err) {
6713 case -NFS4ERR_STALE_STATEID:
6714 case -NFS4ERR_EXPIRED:
6715 case 0:
6716 return 0;
6717 }
6718 err = nfs4_handle_exception(server, err, &exception);
6719 } while (exception.retry);
6720 return err;
6721 }
6722
_nfs4_proc_getlk(struct nfs4_state * state,int cmd,struct file_lock * request)6723 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6724 {
6725 struct inode *inode = state->inode;
6726 struct nfs_server *server = NFS_SERVER(inode);
6727 struct nfs_client *clp = server->nfs_client;
6728 struct nfs_lockt_args arg = {
6729 .fh = NFS_FH(inode),
6730 .fl = request,
6731 };
6732 struct nfs_lockt_res res = {
6733 .denied = request,
6734 };
6735 struct rpc_message msg = {
6736 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
6737 .rpc_argp = &arg,
6738 .rpc_resp = &res,
6739 .rpc_cred = state->owner->so_cred,
6740 };
6741 struct nfs4_lock_state *lsp;
6742 int status;
6743
6744 arg.lock_owner.clientid = clp->cl_clientid;
6745 status = nfs4_set_lock_state(state, request);
6746 if (status != 0)
6747 goto out;
6748 lsp = request->fl_u.nfs4_fl.owner;
6749 arg.lock_owner.id = lsp->ls_seqid.owner_id;
6750 arg.lock_owner.s_dev = server->s_dev;
6751 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
6752 switch (status) {
6753 case 0:
6754 request->fl_type = F_UNLCK;
6755 break;
6756 case -NFS4ERR_DENIED:
6757 status = 0;
6758 }
6759 request->fl_ops->fl_release_private(request);
6760 request->fl_ops = NULL;
6761 out:
6762 return status;
6763 }
6764
nfs4_proc_getlk(struct nfs4_state * state,int cmd,struct file_lock * request)6765 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6766 {
6767 struct nfs4_exception exception = {
6768 .interruptible = true,
6769 };
6770 int err;
6771
6772 do {
6773 err = _nfs4_proc_getlk(state, cmd, request);
6774 trace_nfs4_get_lock(request, state, cmd, err);
6775 err = nfs4_handle_exception(NFS_SERVER(state->inode), err,
6776 &exception);
6777 } while (exception.retry);
6778 return err;
6779 }
6780
6781 /*
6782 * Update the seqid of a lock stateid after receiving
6783 * NFS4ERR_OLD_STATEID
6784 */
nfs4_refresh_lock_old_stateid(nfs4_stateid * dst,struct nfs4_lock_state * lsp)6785 static bool nfs4_refresh_lock_old_stateid(nfs4_stateid *dst,
6786 struct nfs4_lock_state *lsp)
6787 {
6788 struct nfs4_state *state = lsp->ls_state;
6789 bool ret = false;
6790
6791 spin_lock(&state->state_lock);
6792 if (!nfs4_stateid_match_other(dst, &lsp->ls_stateid))
6793 goto out;
6794 if (!nfs4_stateid_is_newer(&lsp->ls_stateid, dst))
6795 nfs4_stateid_seqid_inc(dst);
6796 else
6797 dst->seqid = lsp->ls_stateid.seqid;
6798 ret = true;
6799 out:
6800 spin_unlock(&state->state_lock);
6801 return ret;
6802 }
6803
nfs4_sync_lock_stateid(nfs4_stateid * dst,struct nfs4_lock_state * lsp)6804 static bool nfs4_sync_lock_stateid(nfs4_stateid *dst,
6805 struct nfs4_lock_state *lsp)
6806 {
6807 struct nfs4_state *state = lsp->ls_state;
6808 bool ret;
6809
6810 spin_lock(&state->state_lock);
6811 ret = !nfs4_stateid_match_other(dst, &lsp->ls_stateid);
6812 nfs4_stateid_copy(dst, &lsp->ls_stateid);
6813 spin_unlock(&state->state_lock);
6814 return ret;
6815 }
6816
6817 struct nfs4_unlockdata {
6818 struct nfs_locku_args arg;
6819 struct nfs_locku_res res;
6820 struct nfs4_lock_state *lsp;
6821 struct nfs_open_context *ctx;
6822 struct nfs_lock_context *l_ctx;
6823 struct file_lock fl;
6824 struct nfs_server *server;
6825 unsigned long timestamp;
6826 };
6827
nfs4_alloc_unlockdata(struct file_lock * fl,struct nfs_open_context * ctx,struct nfs4_lock_state * lsp,struct nfs_seqid * seqid)6828 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
6829 struct nfs_open_context *ctx,
6830 struct nfs4_lock_state *lsp,
6831 struct nfs_seqid *seqid)
6832 {
6833 struct nfs4_unlockdata *p;
6834 struct nfs4_state *state = lsp->ls_state;
6835 struct inode *inode = state->inode;
6836
6837 p = kzalloc(sizeof(*p), GFP_KERNEL);
6838 if (p == NULL)
6839 return NULL;
6840 p->arg.fh = NFS_FH(inode);
6841 p->arg.fl = &p->fl;
6842 p->arg.seqid = seqid;
6843 p->res.seqid = seqid;
6844 p->lsp = lsp;
6845 /* Ensure we don't close file until we're done freeing locks! */
6846 p->ctx = get_nfs_open_context(ctx);
6847 p->l_ctx = nfs_get_lock_context(ctx);
6848 locks_init_lock(&p->fl);
6849 locks_copy_lock(&p->fl, fl);
6850 p->server = NFS_SERVER(inode);
6851 spin_lock(&state->state_lock);
6852 nfs4_stateid_copy(&p->arg.stateid, &lsp->ls_stateid);
6853 spin_unlock(&state->state_lock);
6854 return p;
6855 }
6856
nfs4_locku_release_calldata(void * data)6857 static void nfs4_locku_release_calldata(void *data)
6858 {
6859 struct nfs4_unlockdata *calldata = data;
6860 nfs_free_seqid(calldata->arg.seqid);
6861 nfs4_put_lock_state(calldata->lsp);
6862 nfs_put_lock_context(calldata->l_ctx);
6863 put_nfs_open_context(calldata->ctx);
6864 kfree(calldata);
6865 }
6866
nfs4_locku_done(struct rpc_task * task,void * data)6867 static void nfs4_locku_done(struct rpc_task *task, void *data)
6868 {
6869 struct nfs4_unlockdata *calldata = data;
6870 struct nfs4_exception exception = {
6871 .inode = calldata->lsp->ls_state->inode,
6872 .stateid = &calldata->arg.stateid,
6873 };
6874
6875 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
6876 return;
6877 switch (task->tk_status) {
6878 case 0:
6879 renew_lease(calldata->server, calldata->timestamp);
6880 locks_lock_inode_wait(calldata->lsp->ls_state->inode, &calldata->fl);
6881 if (nfs4_update_lock_stateid(calldata->lsp,
6882 &calldata->res.stateid))
6883 break;
6884 fallthrough;
6885 case -NFS4ERR_ADMIN_REVOKED:
6886 case -NFS4ERR_EXPIRED:
6887 nfs4_free_revoked_stateid(calldata->server,
6888 &calldata->arg.stateid,
6889 task->tk_msg.rpc_cred);
6890 fallthrough;
6891 case -NFS4ERR_BAD_STATEID:
6892 case -NFS4ERR_STALE_STATEID:
6893 if (nfs4_sync_lock_stateid(&calldata->arg.stateid,
6894 calldata->lsp))
6895 rpc_restart_call_prepare(task);
6896 break;
6897 case -NFS4ERR_OLD_STATEID:
6898 if (nfs4_refresh_lock_old_stateid(&calldata->arg.stateid,
6899 calldata->lsp))
6900 rpc_restart_call_prepare(task);
6901 break;
6902 default:
6903 task->tk_status = nfs4_async_handle_exception(task,
6904 calldata->server, task->tk_status,
6905 &exception);
6906 if (exception.retry)
6907 rpc_restart_call_prepare(task);
6908 }
6909 nfs_release_seqid(calldata->arg.seqid);
6910 }
6911
nfs4_locku_prepare(struct rpc_task * task,void * data)6912 static void nfs4_locku_prepare(struct rpc_task *task, void *data)
6913 {
6914 struct nfs4_unlockdata *calldata = data;
6915
6916 if (test_bit(NFS_CONTEXT_UNLOCK, &calldata->l_ctx->open_context->flags) &&
6917 nfs_async_iocounter_wait(task, calldata->l_ctx))
6918 return;
6919
6920 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
6921 goto out_wait;
6922 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
6923 /* Note: exit _without_ running nfs4_locku_done */
6924 goto out_no_action;
6925 }
6926 calldata->timestamp = jiffies;
6927 if (nfs4_setup_sequence(calldata->server->nfs_client,
6928 &calldata->arg.seq_args,
6929 &calldata->res.seq_res,
6930 task) != 0)
6931 nfs_release_seqid(calldata->arg.seqid);
6932 return;
6933 out_no_action:
6934 task->tk_action = NULL;
6935 out_wait:
6936 nfs4_sequence_done(task, &calldata->res.seq_res);
6937 }
6938
6939 static const struct rpc_call_ops nfs4_locku_ops = {
6940 .rpc_call_prepare = nfs4_locku_prepare,
6941 .rpc_call_done = nfs4_locku_done,
6942 .rpc_release = nfs4_locku_release_calldata,
6943 };
6944
nfs4_do_unlck(struct file_lock * fl,struct nfs_open_context * ctx,struct nfs4_lock_state * lsp,struct nfs_seqid * seqid)6945 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
6946 struct nfs_open_context *ctx,
6947 struct nfs4_lock_state *lsp,
6948 struct nfs_seqid *seqid)
6949 {
6950 struct nfs4_unlockdata *data;
6951 struct rpc_message msg = {
6952 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
6953 .rpc_cred = ctx->cred,
6954 };
6955 struct rpc_task_setup task_setup_data = {
6956 .rpc_client = NFS_CLIENT(lsp->ls_state->inode),
6957 .rpc_message = &msg,
6958 .callback_ops = &nfs4_locku_ops,
6959 .workqueue = nfsiod_workqueue,
6960 .flags = RPC_TASK_ASYNC,
6961 };
6962
6963 if (nfs_server_capable(lsp->ls_state->inode, NFS_CAP_MOVEABLE))
6964 task_setup_data.flags |= RPC_TASK_MOVEABLE;
6965
6966 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client,
6967 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg);
6968
6969 /* Ensure this is an unlock - when canceling a lock, the
6970 * canceled lock is passed in, and it won't be an unlock.
6971 */
6972 fl->fl_type = F_UNLCK;
6973 if (fl->fl_flags & FL_CLOSE)
6974 set_bit(NFS_CONTEXT_UNLOCK, &ctx->flags);
6975
6976 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
6977 if (data == NULL) {
6978 nfs_free_seqid(seqid);
6979 return ERR_PTR(-ENOMEM);
6980 }
6981
6982 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1, 0);
6983 msg.rpc_argp = &data->arg;
6984 msg.rpc_resp = &data->res;
6985 task_setup_data.callback_data = data;
6986 return rpc_run_task(&task_setup_data);
6987 }
6988
nfs4_proc_unlck(struct nfs4_state * state,int cmd,struct file_lock * request)6989 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
6990 {
6991 struct inode *inode = state->inode;
6992 struct nfs4_state_owner *sp = state->owner;
6993 struct nfs_inode *nfsi = NFS_I(inode);
6994 struct nfs_seqid *seqid;
6995 struct nfs4_lock_state *lsp;
6996 struct rpc_task *task;
6997 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
6998 int status = 0;
6999 unsigned char fl_flags = request->fl_flags;
7000
7001 status = nfs4_set_lock_state(state, request);
7002 /* Unlock _before_ we do the RPC call */
7003 request->fl_flags |= FL_EXISTS;
7004 /* Exclude nfs_delegation_claim_locks() */
7005 mutex_lock(&sp->so_delegreturn_mutex);
7006 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */
7007 down_read(&nfsi->rwsem);
7008 if (locks_lock_inode_wait(inode, request) == -ENOENT) {
7009 up_read(&nfsi->rwsem);
7010 mutex_unlock(&sp->so_delegreturn_mutex);
7011 goto out;
7012 }
7013 up_read(&nfsi->rwsem);
7014 mutex_unlock(&sp->so_delegreturn_mutex);
7015 if (status != 0)
7016 goto out;
7017 /* Is this a delegated lock? */
7018 lsp = request->fl_u.nfs4_fl.owner;
7019 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0)
7020 goto out;
7021 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid;
7022 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
7023 status = -ENOMEM;
7024 if (IS_ERR(seqid))
7025 goto out;
7026 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
7027 status = PTR_ERR(task);
7028 if (IS_ERR(task))
7029 goto out;
7030 status = rpc_wait_for_completion_task(task);
7031 rpc_put_task(task);
7032 out:
7033 request->fl_flags = fl_flags;
7034 trace_nfs4_unlock(request, state, F_SETLK, status);
7035 return status;
7036 }
7037
7038 struct nfs4_lockdata {
7039 struct nfs_lock_args arg;
7040 struct nfs_lock_res res;
7041 struct nfs4_lock_state *lsp;
7042 struct nfs_open_context *ctx;
7043 struct file_lock fl;
7044 unsigned long timestamp;
7045 int rpc_status;
7046 int cancelled;
7047 struct nfs_server *server;
7048 };
7049
nfs4_alloc_lockdata(struct file_lock * fl,struct nfs_open_context * ctx,struct nfs4_lock_state * lsp,gfp_t gfp_mask)7050 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
7051 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
7052 gfp_t gfp_mask)
7053 {
7054 struct nfs4_lockdata *p;
7055 struct inode *inode = lsp->ls_state->inode;
7056 struct nfs_server *server = NFS_SERVER(inode);
7057 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
7058
7059 p = kzalloc(sizeof(*p), gfp_mask);
7060 if (p == NULL)
7061 return NULL;
7062
7063 p->arg.fh = NFS_FH(inode);
7064 p->arg.fl = &p->fl;
7065 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
7066 if (IS_ERR(p->arg.open_seqid))
7067 goto out_free;
7068 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
7069 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask);
7070 if (IS_ERR(p->arg.lock_seqid))
7071 goto out_free_seqid;
7072 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
7073 p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
7074 p->arg.lock_owner.s_dev = server->s_dev;
7075 p->res.lock_seqid = p->arg.lock_seqid;
7076 p->lsp = lsp;
7077 p->server = server;
7078 p->ctx = get_nfs_open_context(ctx);
7079 locks_init_lock(&p->fl);
7080 locks_copy_lock(&p->fl, fl);
7081 return p;
7082 out_free_seqid:
7083 nfs_free_seqid(p->arg.open_seqid);
7084 out_free:
7085 kfree(p);
7086 return NULL;
7087 }
7088
nfs4_lock_prepare(struct rpc_task * task,void * calldata)7089 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
7090 {
7091 struct nfs4_lockdata *data = calldata;
7092 struct nfs4_state *state = data->lsp->ls_state;
7093
7094 dprintk("%s: begin!\n", __func__);
7095 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
7096 goto out_wait;
7097 /* Do we need to do an open_to_lock_owner? */
7098 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) {
7099 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) {
7100 goto out_release_lock_seqid;
7101 }
7102 nfs4_stateid_copy(&data->arg.open_stateid,
7103 &state->open_stateid);
7104 data->arg.new_lock_owner = 1;
7105 data->res.open_seqid = data->arg.open_seqid;
7106 } else {
7107 data->arg.new_lock_owner = 0;
7108 nfs4_stateid_copy(&data->arg.lock_stateid,
7109 &data->lsp->ls_stateid);
7110 }
7111 if (!nfs4_valid_open_stateid(state)) {
7112 data->rpc_status = -EBADF;
7113 task->tk_action = NULL;
7114 goto out_release_open_seqid;
7115 }
7116 data->timestamp = jiffies;
7117 if (nfs4_setup_sequence(data->server->nfs_client,
7118 &data->arg.seq_args,
7119 &data->res.seq_res,
7120 task) == 0)
7121 return;
7122 out_release_open_seqid:
7123 nfs_release_seqid(data->arg.open_seqid);
7124 out_release_lock_seqid:
7125 nfs_release_seqid(data->arg.lock_seqid);
7126 out_wait:
7127 nfs4_sequence_done(task, &data->res.seq_res);
7128 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
7129 }
7130
nfs4_lock_done(struct rpc_task * task,void * calldata)7131 static void nfs4_lock_done(struct rpc_task *task, void *calldata)
7132 {
7133 struct nfs4_lockdata *data = calldata;
7134 struct nfs4_lock_state *lsp = data->lsp;
7135 struct nfs_server *server = NFS_SERVER(d_inode(data->ctx->dentry));
7136
7137 dprintk("%s: begin!\n", __func__);
7138
7139 if (!nfs4_sequence_done(task, &data->res.seq_res))
7140 return;
7141
7142 data->rpc_status = task->tk_status;
7143 switch (task->tk_status) {
7144 case 0:
7145 renew_lease(server, data->timestamp);
7146 if (data->arg.new_lock && !data->cancelled) {
7147 data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
7148 if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0)
7149 goto out_restart;
7150 }
7151 if (data->arg.new_lock_owner != 0) {
7152 nfs_confirm_seqid(&lsp->ls_seqid, 0);
7153 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid);
7154 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
7155 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
7156 goto out_restart;
7157 break;
7158 case -NFS4ERR_OLD_STATEID:
7159 if (data->arg.new_lock_owner != 0 &&
7160 nfs4_refresh_open_old_stateid(&data->arg.open_stateid,
7161 lsp->ls_state))
7162 goto out_restart;
7163 if (nfs4_refresh_lock_old_stateid(&data->arg.lock_stateid, lsp))
7164 goto out_restart;
7165 fallthrough;
7166 case -NFS4ERR_BAD_STATEID:
7167 case -NFS4ERR_STALE_STATEID:
7168 case -NFS4ERR_EXPIRED:
7169 if (data->arg.new_lock_owner != 0) {
7170 if (!nfs4_stateid_match(&data->arg.open_stateid,
7171 &lsp->ls_state->open_stateid))
7172 goto out_restart;
7173 else if (nfs4_async_handle_error(task, server, lsp->ls_state, NULL) == -EAGAIN)
7174 goto out_restart;
7175 } else if (!nfs4_stateid_match(&data->arg.lock_stateid,
7176 &lsp->ls_stateid))
7177 goto out_restart;
7178 }
7179 out_done:
7180 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
7181 return;
7182 out_restart:
7183 if (!data->cancelled)
7184 rpc_restart_call_prepare(task);
7185 goto out_done;
7186 }
7187
nfs4_lock_release(void * calldata)7188 static void nfs4_lock_release(void *calldata)
7189 {
7190 struct nfs4_lockdata *data = calldata;
7191
7192 dprintk("%s: begin!\n", __func__);
7193 nfs_free_seqid(data->arg.open_seqid);
7194 if (data->cancelled && data->rpc_status == 0) {
7195 struct rpc_task *task;
7196 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
7197 data->arg.lock_seqid);
7198 if (!IS_ERR(task))
7199 rpc_put_task_async(task);
7200 dprintk("%s: cancelling lock!\n", __func__);
7201 } else
7202 nfs_free_seqid(data->arg.lock_seqid);
7203 nfs4_put_lock_state(data->lsp);
7204 put_nfs_open_context(data->ctx);
7205 kfree(data);
7206 dprintk("%s: done!\n", __func__);
7207 }
7208
7209 static const struct rpc_call_ops nfs4_lock_ops = {
7210 .rpc_call_prepare = nfs4_lock_prepare,
7211 .rpc_call_done = nfs4_lock_done,
7212 .rpc_release = nfs4_lock_release,
7213 };
7214
nfs4_handle_setlk_error(struct nfs_server * server,struct nfs4_lock_state * lsp,int new_lock_owner,int error)7215 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
7216 {
7217 switch (error) {
7218 case -NFS4ERR_ADMIN_REVOKED:
7219 case -NFS4ERR_EXPIRED:
7220 case -NFS4ERR_BAD_STATEID:
7221 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
7222 if (new_lock_owner != 0 ||
7223 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0)
7224 nfs4_schedule_stateid_recovery(server, lsp->ls_state);
7225 break;
7226 case -NFS4ERR_STALE_STATEID:
7227 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
7228 nfs4_schedule_lease_recovery(server->nfs_client);
7229 }
7230 }
7231
_nfs4_do_setlk(struct nfs4_state * state,int cmd,struct file_lock * fl,int recovery_type)7232 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
7233 {
7234 struct nfs4_lockdata *data;
7235 struct rpc_task *task;
7236 struct rpc_message msg = {
7237 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
7238 .rpc_cred = state->owner->so_cred,
7239 };
7240 struct rpc_task_setup task_setup_data = {
7241 .rpc_client = NFS_CLIENT(state->inode),
7242 .rpc_message = &msg,
7243 .callback_ops = &nfs4_lock_ops,
7244 .workqueue = nfsiod_workqueue,
7245 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
7246 };
7247 int ret;
7248
7249 if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE))
7250 task_setup_data.flags |= RPC_TASK_MOVEABLE;
7251
7252 dprintk("%s: begin!\n", __func__);
7253 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
7254 fl->fl_u.nfs4_fl.owner, GFP_KERNEL);
7255 if (data == NULL)
7256 return -ENOMEM;
7257 if (IS_SETLKW(cmd))
7258 data->arg.block = 1;
7259 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1,
7260 recovery_type > NFS_LOCK_NEW);
7261 msg.rpc_argp = &data->arg;
7262 msg.rpc_resp = &data->res;
7263 task_setup_data.callback_data = data;
7264 if (recovery_type > NFS_LOCK_NEW) {
7265 if (recovery_type == NFS_LOCK_RECLAIM)
7266 data->arg.reclaim = NFS_LOCK_RECLAIM;
7267 } else
7268 data->arg.new_lock = 1;
7269 task = rpc_run_task(&task_setup_data);
7270 if (IS_ERR(task))
7271 return PTR_ERR(task);
7272 ret = rpc_wait_for_completion_task(task);
7273 if (ret == 0) {
7274 ret = data->rpc_status;
7275 if (ret)
7276 nfs4_handle_setlk_error(data->server, data->lsp,
7277 data->arg.new_lock_owner, ret);
7278 } else
7279 data->cancelled = true;
7280 trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret);
7281 rpc_put_task(task);
7282 dprintk("%s: done, ret = %d!\n", __func__, ret);
7283 return ret;
7284 }
7285
nfs4_lock_reclaim(struct nfs4_state * state,struct file_lock * request)7286 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
7287 {
7288 struct nfs_server *server = NFS_SERVER(state->inode);
7289 struct nfs4_exception exception = {
7290 .inode = state->inode,
7291 };
7292 int err;
7293
7294 do {
7295 /* Cache the lock if possible... */
7296 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
7297 return 0;
7298 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
7299 if (err != -NFS4ERR_DELAY)
7300 break;
7301 nfs4_handle_exception(server, err, &exception);
7302 } while (exception.retry);
7303 return err;
7304 }
7305
nfs4_lock_expired(struct nfs4_state * state,struct file_lock * request)7306 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
7307 {
7308 struct nfs_server *server = NFS_SERVER(state->inode);
7309 struct nfs4_exception exception = {
7310 .inode = state->inode,
7311 };
7312 int err;
7313
7314 err = nfs4_set_lock_state(state, request);
7315 if (err != 0)
7316 return err;
7317 if (!recover_lost_locks) {
7318 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags);
7319 return 0;
7320 }
7321 do {
7322 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
7323 return 0;
7324 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
7325 switch (err) {
7326 default:
7327 goto out;
7328 case -NFS4ERR_GRACE:
7329 case -NFS4ERR_DELAY:
7330 nfs4_handle_exception(server, err, &exception);
7331 err = 0;
7332 }
7333 } while (exception.retry);
7334 out:
7335 return err;
7336 }
7337
7338 #if defined(CONFIG_NFS_V4_1)
nfs41_lock_expired(struct nfs4_state * state,struct file_lock * request)7339 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
7340 {
7341 struct nfs4_lock_state *lsp;
7342 int status;
7343
7344 status = nfs4_set_lock_state(state, request);
7345 if (status != 0)
7346 return status;
7347 lsp = request->fl_u.nfs4_fl.owner;
7348 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) ||
7349 test_bit(NFS_LOCK_LOST, &lsp->ls_flags))
7350 return 0;
7351 return nfs4_lock_expired(state, request);
7352 }
7353 #endif
7354
_nfs4_proc_setlk(struct nfs4_state * state,int cmd,struct file_lock * request)7355 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7356 {
7357 struct nfs_inode *nfsi = NFS_I(state->inode);
7358 struct nfs4_state_owner *sp = state->owner;
7359 unsigned char fl_flags = request->fl_flags;
7360 int status;
7361
7362 request->fl_flags |= FL_ACCESS;
7363 status = locks_lock_inode_wait(state->inode, request);
7364 if (status < 0)
7365 goto out;
7366 mutex_lock(&sp->so_delegreturn_mutex);
7367 down_read(&nfsi->rwsem);
7368 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
7369 /* Yes: cache locks! */
7370 /* ...but avoid races with delegation recall... */
7371 request->fl_flags = fl_flags & ~FL_SLEEP;
7372 status = locks_lock_inode_wait(state->inode, request);
7373 up_read(&nfsi->rwsem);
7374 mutex_unlock(&sp->so_delegreturn_mutex);
7375 goto out;
7376 }
7377 up_read(&nfsi->rwsem);
7378 mutex_unlock(&sp->so_delegreturn_mutex);
7379 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
7380 out:
7381 request->fl_flags = fl_flags;
7382 return status;
7383 }
7384
nfs4_proc_setlk(struct nfs4_state * state,int cmd,struct file_lock * request)7385 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7386 {
7387 struct nfs4_exception exception = {
7388 .state = state,
7389 .inode = state->inode,
7390 .interruptible = true,
7391 };
7392 int err;
7393
7394 do {
7395 err = _nfs4_proc_setlk(state, cmd, request);
7396 if (err == -NFS4ERR_DENIED)
7397 err = -EAGAIN;
7398 err = nfs4_handle_exception(NFS_SERVER(state->inode),
7399 err, &exception);
7400 } while (exception.retry);
7401 return err;
7402 }
7403
7404 #define NFS4_LOCK_MINTIMEOUT (1 * HZ)
7405 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
7406
7407 static int
nfs4_retry_setlk_simple(struct nfs4_state * state,int cmd,struct file_lock * request)7408 nfs4_retry_setlk_simple(struct nfs4_state *state, int cmd,
7409 struct file_lock *request)
7410 {
7411 int status = -ERESTARTSYS;
7412 unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
7413
7414 while(!signalled()) {
7415 status = nfs4_proc_setlk(state, cmd, request);
7416 if ((status != -EAGAIN) || IS_SETLK(cmd))
7417 break;
7418 freezable_schedule_timeout_interruptible(timeout);
7419 timeout *= 2;
7420 timeout = min_t(unsigned long, NFS4_LOCK_MAXTIMEOUT, timeout);
7421 status = -ERESTARTSYS;
7422 }
7423 return status;
7424 }
7425
7426 #ifdef CONFIG_NFS_V4_1
7427 struct nfs4_lock_waiter {
7428 struct inode *inode;
7429 struct nfs_lowner owner;
7430 wait_queue_entry_t wait;
7431 };
7432
7433 static int
nfs4_wake_lock_waiter(wait_queue_entry_t * wait,unsigned int mode,int flags,void * key)7434 nfs4_wake_lock_waiter(wait_queue_entry_t *wait, unsigned int mode, int flags, void *key)
7435 {
7436 struct nfs4_lock_waiter *waiter =
7437 container_of(wait, struct nfs4_lock_waiter, wait);
7438
7439 /* NULL key means to wake up everyone */
7440 if (key) {
7441 struct cb_notify_lock_args *cbnl = key;
7442 struct nfs_lowner *lowner = &cbnl->cbnl_owner,
7443 *wowner = &waiter->owner;
7444
7445 /* Only wake if the callback was for the same owner. */
7446 if (lowner->id != wowner->id || lowner->s_dev != wowner->s_dev)
7447 return 0;
7448
7449 /* Make sure it's for the right inode */
7450 if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh))
7451 return 0;
7452 }
7453
7454 return woken_wake_function(wait, mode, flags, key);
7455 }
7456
7457 static int
nfs4_retry_setlk(struct nfs4_state * state,int cmd,struct file_lock * request)7458 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7459 {
7460 struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner;
7461 struct nfs_server *server = NFS_SERVER(state->inode);
7462 struct nfs_client *clp = server->nfs_client;
7463 wait_queue_head_t *q = &clp->cl_lock_waitq;
7464 struct nfs4_lock_waiter waiter = {
7465 .inode = state->inode,
7466 .owner = { .clientid = clp->cl_clientid,
7467 .id = lsp->ls_seqid.owner_id,
7468 .s_dev = server->s_dev },
7469 };
7470 int status;
7471
7472 /* Don't bother with waitqueue if we don't expect a callback */
7473 if (!test_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags))
7474 return nfs4_retry_setlk_simple(state, cmd, request);
7475
7476 init_wait(&waiter.wait);
7477 waiter.wait.func = nfs4_wake_lock_waiter;
7478 add_wait_queue(q, &waiter.wait);
7479
7480 do {
7481 status = nfs4_proc_setlk(state, cmd, request);
7482 if (status != -EAGAIN || IS_SETLK(cmd))
7483 break;
7484
7485 status = -ERESTARTSYS;
7486 freezer_do_not_count();
7487 wait_woken(&waiter.wait, TASK_INTERRUPTIBLE,
7488 NFS4_LOCK_MAXTIMEOUT);
7489 freezer_count();
7490 } while (!signalled());
7491
7492 remove_wait_queue(q, &waiter.wait);
7493
7494 return status;
7495 }
7496 #else /* !CONFIG_NFS_V4_1 */
7497 static inline int
nfs4_retry_setlk(struct nfs4_state * state,int cmd,struct file_lock * request)7498 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7499 {
7500 return nfs4_retry_setlk_simple(state, cmd, request);
7501 }
7502 #endif
7503
7504 static int
nfs4_proc_lock(struct file * filp,int cmd,struct file_lock * request)7505 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
7506 {
7507 struct nfs_open_context *ctx;
7508 struct nfs4_state *state;
7509 int status;
7510
7511 /* verify open state */
7512 ctx = nfs_file_open_context(filp);
7513 state = ctx->state;
7514
7515 if (IS_GETLK(cmd)) {
7516 if (state != NULL)
7517 return nfs4_proc_getlk(state, F_GETLK, request);
7518 return 0;
7519 }
7520
7521 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
7522 return -EINVAL;
7523
7524 if (request->fl_type == F_UNLCK) {
7525 if (state != NULL)
7526 return nfs4_proc_unlck(state, cmd, request);
7527 return 0;
7528 }
7529
7530 if (state == NULL)
7531 return -ENOLCK;
7532
7533 if ((request->fl_flags & FL_POSIX) &&
7534 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
7535 return -ENOLCK;
7536
7537 /*
7538 * Don't rely on the VFS having checked the file open mode,
7539 * since it won't do this for flock() locks.
7540 */
7541 switch (request->fl_type) {
7542 case F_RDLCK:
7543 if (!(filp->f_mode & FMODE_READ))
7544 return -EBADF;
7545 break;
7546 case F_WRLCK:
7547 if (!(filp->f_mode & FMODE_WRITE))
7548 return -EBADF;
7549 }
7550
7551 status = nfs4_set_lock_state(state, request);
7552 if (status != 0)
7553 return status;
7554
7555 return nfs4_retry_setlk(state, cmd, request);
7556 }
7557
nfs4_delete_lease(struct file * file,void ** priv)7558 static int nfs4_delete_lease(struct file *file, void **priv)
7559 {
7560 return generic_setlease(file, F_UNLCK, NULL, priv);
7561 }
7562
nfs4_add_lease(struct file * file,long arg,struct file_lock ** lease,void ** priv)7563 static int nfs4_add_lease(struct file *file, long arg, struct file_lock **lease,
7564 void **priv)
7565 {
7566 struct inode *inode = file_inode(file);
7567 fmode_t type = arg == F_RDLCK ? FMODE_READ : FMODE_WRITE;
7568 int ret;
7569
7570 /* No delegation, no lease */
7571 if (!nfs4_have_delegation(inode, type))
7572 return -EAGAIN;
7573 ret = generic_setlease(file, arg, lease, priv);
7574 if (ret || nfs4_have_delegation(inode, type))
7575 return ret;
7576 /* We raced with a delegation return */
7577 nfs4_delete_lease(file, priv);
7578 return -EAGAIN;
7579 }
7580
nfs4_proc_setlease(struct file * file,long arg,struct file_lock ** lease,void ** priv)7581 int nfs4_proc_setlease(struct file *file, long arg, struct file_lock **lease,
7582 void **priv)
7583 {
7584 switch (arg) {
7585 case F_RDLCK:
7586 case F_WRLCK:
7587 return nfs4_add_lease(file, arg, lease, priv);
7588 case F_UNLCK:
7589 return nfs4_delete_lease(file, priv);
7590 default:
7591 return -EINVAL;
7592 }
7593 }
7594
nfs4_lock_delegation_recall(struct file_lock * fl,struct nfs4_state * state,const nfs4_stateid * stateid)7595 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid)
7596 {
7597 struct nfs_server *server = NFS_SERVER(state->inode);
7598 int err;
7599
7600 err = nfs4_set_lock_state(state, fl);
7601 if (err != 0)
7602 return err;
7603 do {
7604 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
7605 if (err != -NFS4ERR_DELAY)
7606 break;
7607 ssleep(1);
7608 } while (err == -NFS4ERR_DELAY);
7609 return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err);
7610 }
7611
7612 struct nfs_release_lockowner_data {
7613 struct nfs4_lock_state *lsp;
7614 struct nfs_server *server;
7615 struct nfs_release_lockowner_args args;
7616 struct nfs_release_lockowner_res res;
7617 unsigned long timestamp;
7618 };
7619
nfs4_release_lockowner_prepare(struct rpc_task * task,void * calldata)7620 static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata)
7621 {
7622 struct nfs_release_lockowner_data *data = calldata;
7623 struct nfs_server *server = data->server;
7624 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args,
7625 &data->res.seq_res, task);
7626 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
7627 data->timestamp = jiffies;
7628 }
7629
nfs4_release_lockowner_done(struct rpc_task * task,void * calldata)7630 static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata)
7631 {
7632 struct nfs_release_lockowner_data *data = calldata;
7633 struct nfs_server *server = data->server;
7634
7635 nfs40_sequence_done(task, &data->res.seq_res);
7636
7637 switch (task->tk_status) {
7638 case 0:
7639 renew_lease(server, data->timestamp);
7640 break;
7641 case -NFS4ERR_STALE_CLIENTID:
7642 case -NFS4ERR_EXPIRED:
7643 nfs4_schedule_lease_recovery(server->nfs_client);
7644 break;
7645 case -NFS4ERR_LEASE_MOVED:
7646 case -NFS4ERR_DELAY:
7647 if (nfs4_async_handle_error(task, server,
7648 NULL, NULL) == -EAGAIN)
7649 rpc_restart_call_prepare(task);
7650 }
7651 }
7652
nfs4_release_lockowner_release(void * calldata)7653 static void nfs4_release_lockowner_release(void *calldata)
7654 {
7655 struct nfs_release_lockowner_data *data = calldata;
7656 nfs4_free_lock_state(data->server, data->lsp);
7657 kfree(calldata);
7658 }
7659
7660 static const struct rpc_call_ops nfs4_release_lockowner_ops = {
7661 .rpc_call_prepare = nfs4_release_lockowner_prepare,
7662 .rpc_call_done = nfs4_release_lockowner_done,
7663 .rpc_release = nfs4_release_lockowner_release,
7664 };
7665
7666 static void
nfs4_release_lockowner(struct nfs_server * server,struct nfs4_lock_state * lsp)7667 nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp)
7668 {
7669 struct nfs_release_lockowner_data *data;
7670 struct rpc_message msg = {
7671 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
7672 };
7673
7674 if (server->nfs_client->cl_mvops->minor_version != 0)
7675 return;
7676
7677 data = kmalloc(sizeof(*data), GFP_KERNEL);
7678 if (!data)
7679 return;
7680 data->lsp = lsp;
7681 data->server = server;
7682 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
7683 data->args.lock_owner.id = lsp->ls_seqid.owner_id;
7684 data->args.lock_owner.s_dev = server->s_dev;
7685
7686 msg.rpc_argp = &data->args;
7687 msg.rpc_resp = &data->res;
7688 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0);
7689 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
7690 }
7691
7692 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
7693
nfs4_xattr_set_nfs4_acl(const struct xattr_handler * handler,struct user_namespace * mnt_userns,struct dentry * unused,struct inode * inode,const char * key,const void * buf,size_t buflen,int flags)7694 static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler,
7695 struct user_namespace *mnt_userns,
7696 struct dentry *unused, struct inode *inode,
7697 const char *key, const void *buf,
7698 size_t buflen, int flags)
7699 {
7700 return nfs4_proc_set_acl(inode, buf, buflen);
7701 }
7702
nfs4_xattr_get_nfs4_acl(const struct xattr_handler * handler,struct dentry * unused,struct inode * inode,const char * key,void * buf,size_t buflen)7703 static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler,
7704 struct dentry *unused, struct inode *inode,
7705 const char *key, void *buf, size_t buflen)
7706 {
7707 return nfs4_proc_get_acl(inode, buf, buflen);
7708 }
7709
nfs4_xattr_list_nfs4_acl(struct dentry * dentry)7710 static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry)
7711 {
7712 return nfs4_server_supports_acls(NFS_SERVER(d_inode(dentry)));
7713 }
7714
7715 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
7716
nfs4_xattr_set_nfs4_label(const struct xattr_handler * handler,struct user_namespace * mnt_userns,struct dentry * unused,struct inode * inode,const char * key,const void * buf,size_t buflen,int flags)7717 static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler,
7718 struct user_namespace *mnt_userns,
7719 struct dentry *unused, struct inode *inode,
7720 const char *key, const void *buf,
7721 size_t buflen, int flags)
7722 {
7723 if (security_ismaclabel(key))
7724 return nfs4_set_security_label(inode, buf, buflen);
7725
7726 return -EOPNOTSUPP;
7727 }
7728
nfs4_xattr_get_nfs4_label(const struct xattr_handler * handler,struct dentry * unused,struct inode * inode,const char * key,void * buf,size_t buflen)7729 static int nfs4_xattr_get_nfs4_label(const struct xattr_handler *handler,
7730 struct dentry *unused, struct inode *inode,
7731 const char *key, void *buf, size_t buflen)
7732 {
7733 if (security_ismaclabel(key))
7734 return nfs4_get_security_label(inode, buf, buflen);
7735 return -EOPNOTSUPP;
7736 }
7737
7738 static ssize_t
nfs4_listxattr_nfs4_label(struct inode * inode,char * list,size_t list_len)7739 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len)
7740 {
7741 int len = 0;
7742
7743 if (nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) {
7744 len = security_inode_listsecurity(inode, list, list_len);
7745 if (len >= 0 && list_len && len > list_len)
7746 return -ERANGE;
7747 }
7748 return len;
7749 }
7750
7751 static const struct xattr_handler nfs4_xattr_nfs4_label_handler = {
7752 .prefix = XATTR_SECURITY_PREFIX,
7753 .get = nfs4_xattr_get_nfs4_label,
7754 .set = nfs4_xattr_set_nfs4_label,
7755 };
7756
7757 #else
7758
7759 static ssize_t
nfs4_listxattr_nfs4_label(struct inode * inode,char * list,size_t list_len)7760 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len)
7761 {
7762 return 0;
7763 }
7764
7765 #endif
7766
7767 #ifdef CONFIG_NFS_V4_2
nfs4_xattr_set_nfs4_user(const struct xattr_handler * handler,struct user_namespace * mnt_userns,struct dentry * unused,struct inode * inode,const char * key,const void * buf,size_t buflen,int flags)7768 static int nfs4_xattr_set_nfs4_user(const struct xattr_handler *handler,
7769 struct user_namespace *mnt_userns,
7770 struct dentry *unused, struct inode *inode,
7771 const char *key, const void *buf,
7772 size_t buflen, int flags)
7773 {
7774 u32 mask;
7775 int ret;
7776
7777 if (!nfs_server_capable(inode, NFS_CAP_XATTR))
7778 return -EOPNOTSUPP;
7779
7780 /*
7781 * There is no mapping from the MAY_* flags to the NFS_ACCESS_XA*
7782 * flags right now. Handling of xattr operations use the normal
7783 * file read/write permissions.
7784 *
7785 * Just in case the server has other ideas (which RFC 8276 allows),
7786 * do a cached access check for the XA* flags to possibly avoid
7787 * doing an RPC and getting EACCES back.
7788 */
7789 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) {
7790 if (!(mask & NFS_ACCESS_XAWRITE))
7791 return -EACCES;
7792 }
7793
7794 if (buf == NULL) {
7795 ret = nfs42_proc_removexattr(inode, key);
7796 if (!ret)
7797 nfs4_xattr_cache_remove(inode, key);
7798 } else {
7799 ret = nfs42_proc_setxattr(inode, key, buf, buflen, flags);
7800 if (!ret)
7801 nfs4_xattr_cache_add(inode, key, buf, NULL, buflen);
7802 }
7803
7804 return ret;
7805 }
7806
nfs4_xattr_get_nfs4_user(const struct xattr_handler * handler,struct dentry * unused,struct inode * inode,const char * key,void * buf,size_t buflen)7807 static int nfs4_xattr_get_nfs4_user(const struct xattr_handler *handler,
7808 struct dentry *unused, struct inode *inode,
7809 const char *key, void *buf, size_t buflen)
7810 {
7811 u32 mask;
7812 ssize_t ret;
7813
7814 if (!nfs_server_capable(inode, NFS_CAP_XATTR))
7815 return -EOPNOTSUPP;
7816
7817 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) {
7818 if (!(mask & NFS_ACCESS_XAREAD))
7819 return -EACCES;
7820 }
7821
7822 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE);
7823 if (ret)
7824 return ret;
7825
7826 ret = nfs4_xattr_cache_get(inode, key, buf, buflen);
7827 if (ret >= 0 || (ret < 0 && ret != -ENOENT))
7828 return ret;
7829
7830 ret = nfs42_proc_getxattr(inode, key, buf, buflen);
7831
7832 return ret;
7833 }
7834
7835 static ssize_t
nfs4_listxattr_nfs4_user(struct inode * inode,char * list,size_t list_len)7836 nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len)
7837 {
7838 u64 cookie;
7839 bool eof;
7840 ssize_t ret, size;
7841 char *buf;
7842 size_t buflen;
7843 u32 mask;
7844
7845 if (!nfs_server_capable(inode, NFS_CAP_XATTR))
7846 return 0;
7847
7848 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) {
7849 if (!(mask & NFS_ACCESS_XALIST))
7850 return 0;
7851 }
7852
7853 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE);
7854 if (ret)
7855 return ret;
7856
7857 ret = nfs4_xattr_cache_list(inode, list, list_len);
7858 if (ret >= 0 || (ret < 0 && ret != -ENOENT))
7859 return ret;
7860
7861 cookie = 0;
7862 eof = false;
7863 buflen = list_len ? list_len : XATTR_LIST_MAX;
7864 buf = list_len ? list : NULL;
7865 size = 0;
7866
7867 while (!eof) {
7868 ret = nfs42_proc_listxattrs(inode, buf, buflen,
7869 &cookie, &eof);
7870 if (ret < 0)
7871 return ret;
7872
7873 if (list_len) {
7874 buf += ret;
7875 buflen -= ret;
7876 }
7877 size += ret;
7878 }
7879
7880 if (list_len)
7881 nfs4_xattr_cache_set_list(inode, list, size);
7882
7883 return size;
7884 }
7885
7886 #else
7887
7888 static ssize_t
nfs4_listxattr_nfs4_user(struct inode * inode,char * list,size_t list_len)7889 nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len)
7890 {
7891 return 0;
7892 }
7893 #endif /* CONFIG_NFS_V4_2 */
7894
7895 /*
7896 * nfs_fhget will use either the mounted_on_fileid or the fileid
7897 */
nfs_fixup_referral_attributes(struct nfs_fattr * fattr)7898 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
7899 {
7900 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
7901 (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
7902 (fattr->valid & NFS_ATTR_FATTR_FSID) &&
7903 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
7904 return;
7905
7906 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
7907 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
7908 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
7909 fattr->nlink = 2;
7910 }
7911
_nfs4_proc_fs_locations(struct rpc_clnt * client,struct inode * dir,const struct qstr * name,struct nfs4_fs_locations * fs_locations,struct page * page)7912 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
7913 const struct qstr *name,
7914 struct nfs4_fs_locations *fs_locations,
7915 struct page *page)
7916 {
7917 struct nfs_server *server = NFS_SERVER(dir);
7918 u32 bitmask[3];
7919 struct nfs4_fs_locations_arg args = {
7920 .dir_fh = NFS_FH(dir),
7921 .name = name,
7922 .page = page,
7923 .bitmask = bitmask,
7924 };
7925 struct nfs4_fs_locations_res res = {
7926 .fs_locations = fs_locations,
7927 };
7928 struct rpc_message msg = {
7929 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
7930 .rpc_argp = &args,
7931 .rpc_resp = &res,
7932 };
7933 int status;
7934
7935 dprintk("%s: start\n", __func__);
7936
7937 bitmask[0] = nfs4_fattr_bitmap[0] | FATTR4_WORD0_FS_LOCATIONS;
7938 bitmask[1] = nfs4_fattr_bitmap[1];
7939
7940 /* Ask for the fileid of the absent filesystem if mounted_on_fileid
7941 * is not supported */
7942 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
7943 bitmask[0] &= ~FATTR4_WORD0_FILEID;
7944 else
7945 bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
7946
7947 nfs_fattr_init(fs_locations->fattr);
7948 fs_locations->server = server;
7949 fs_locations->nlocations = 0;
7950 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
7951 dprintk("%s: returned status = %d\n", __func__, status);
7952 return status;
7953 }
7954
nfs4_proc_fs_locations(struct rpc_clnt * client,struct inode * dir,const struct qstr * name,struct nfs4_fs_locations * fs_locations,struct page * page)7955 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
7956 const struct qstr *name,
7957 struct nfs4_fs_locations *fs_locations,
7958 struct page *page)
7959 {
7960 struct nfs4_exception exception = {
7961 .interruptible = true,
7962 };
7963 int err;
7964 do {
7965 err = _nfs4_proc_fs_locations(client, dir, name,
7966 fs_locations, page);
7967 trace_nfs4_get_fs_locations(dir, name, err);
7968 err = nfs4_handle_exception(NFS_SERVER(dir), err,
7969 &exception);
7970 } while (exception.retry);
7971 return err;
7972 }
7973
7974 /*
7975 * This operation also signals the server that this client is
7976 * performing migration recovery. The server can stop returning
7977 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is
7978 * appended to this compound to identify the client ID which is
7979 * performing recovery.
7980 */
_nfs40_proc_get_locations(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs4_fs_locations * locations,struct page * page,const struct cred * cred)7981 static int _nfs40_proc_get_locations(struct nfs_server *server,
7982 struct nfs_fh *fhandle,
7983 struct nfs4_fs_locations *locations,
7984 struct page *page, const struct cred *cred)
7985 {
7986 struct rpc_clnt *clnt = server->client;
7987 u32 bitmask[2] = {
7988 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
7989 };
7990 struct nfs4_fs_locations_arg args = {
7991 .clientid = server->nfs_client->cl_clientid,
7992 .fh = fhandle,
7993 .page = page,
7994 .bitmask = bitmask,
7995 .migration = 1, /* skip LOOKUP */
7996 .renew = 1, /* append RENEW */
7997 };
7998 struct nfs4_fs_locations_res res = {
7999 .fs_locations = locations,
8000 .migration = 1,
8001 .renew = 1,
8002 };
8003 struct rpc_message msg = {
8004 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
8005 .rpc_argp = &args,
8006 .rpc_resp = &res,
8007 .rpc_cred = cred,
8008 };
8009 unsigned long now = jiffies;
8010 int status;
8011
8012 nfs_fattr_init(locations->fattr);
8013 locations->server = server;
8014 locations->nlocations = 0;
8015
8016 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
8017 status = nfs4_call_sync_sequence(clnt, server, &msg,
8018 &args.seq_args, &res.seq_res);
8019 if (status)
8020 return status;
8021
8022 renew_lease(server, now);
8023 return 0;
8024 }
8025
8026 #ifdef CONFIG_NFS_V4_1
8027
8028 /*
8029 * This operation also signals the server that this client is
8030 * performing migration recovery. The server can stop asserting
8031 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID
8032 * performing this operation is identified in the SEQUENCE
8033 * operation in this compound.
8034 *
8035 * When the client supports GETATTR(fs_locations_info), it can
8036 * be plumbed in here.
8037 */
_nfs41_proc_get_locations(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs4_fs_locations * locations,struct page * page,const struct cred * cred)8038 static int _nfs41_proc_get_locations(struct nfs_server *server,
8039 struct nfs_fh *fhandle,
8040 struct nfs4_fs_locations *locations,
8041 struct page *page, const struct cred *cred)
8042 {
8043 struct rpc_clnt *clnt = server->client;
8044 u32 bitmask[2] = {
8045 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
8046 };
8047 struct nfs4_fs_locations_arg args = {
8048 .fh = fhandle,
8049 .page = page,
8050 .bitmask = bitmask,
8051 .migration = 1, /* skip LOOKUP */
8052 };
8053 struct nfs4_fs_locations_res res = {
8054 .fs_locations = locations,
8055 .migration = 1,
8056 };
8057 struct rpc_message msg = {
8058 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
8059 .rpc_argp = &args,
8060 .rpc_resp = &res,
8061 .rpc_cred = cred,
8062 };
8063 int status;
8064
8065 nfs_fattr_init(locations->fattr);
8066 locations->server = server;
8067 locations->nlocations = 0;
8068
8069 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
8070 status = nfs4_call_sync_sequence(clnt, server, &msg,
8071 &args.seq_args, &res.seq_res);
8072 if (status == NFS4_OK &&
8073 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
8074 status = -NFS4ERR_LEASE_MOVED;
8075 return status;
8076 }
8077
8078 #endif /* CONFIG_NFS_V4_1 */
8079
8080 /**
8081 * nfs4_proc_get_locations - discover locations for a migrated FSID
8082 * @inode: inode on FSID that is migrating
8083 * @locations: result of query
8084 * @page: buffer
8085 * @cred: credential to use for this operation
8086 *
8087 * Returns NFS4_OK on success, a negative NFS4ERR status code if the
8088 * operation failed, or a negative errno if a local error occurred.
8089 *
8090 * On success, "locations" is filled in, but if the server has
8091 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not
8092 * asserted.
8093 *
8094 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases
8095 * from this client that require migration recovery.
8096 */
nfs4_proc_get_locations(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs4_fs_locations * locations,struct page * page,const struct cred * cred)8097 int nfs4_proc_get_locations(struct nfs_server *server,
8098 struct nfs_fh *fhandle,
8099 struct nfs4_fs_locations *locations,
8100 struct page *page, const struct cred *cred)
8101 {
8102 struct nfs_client *clp = server->nfs_client;
8103 const struct nfs4_mig_recovery_ops *ops =
8104 clp->cl_mvops->mig_recovery_ops;
8105 struct nfs4_exception exception = {
8106 .interruptible = true,
8107 };
8108 int status;
8109
8110 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
8111 (unsigned long long)server->fsid.major,
8112 (unsigned long long)server->fsid.minor,
8113 clp->cl_hostname);
8114 nfs_display_fhandle(fhandle, __func__);
8115
8116 do {
8117 status = ops->get_locations(server, fhandle, locations, page,
8118 cred);
8119 if (status != -NFS4ERR_DELAY)
8120 break;
8121 nfs4_handle_exception(server, status, &exception);
8122 } while (exception.retry);
8123 return status;
8124 }
8125
8126 /*
8127 * This operation also signals the server that this client is
8128 * performing "lease moved" recovery. The server can stop
8129 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation
8130 * is appended to this compound to identify the client ID which is
8131 * performing recovery.
8132 */
_nfs40_proc_fsid_present(struct inode * inode,const struct cred * cred)8133 static int _nfs40_proc_fsid_present(struct inode *inode, const struct cred *cred)
8134 {
8135 struct nfs_server *server = NFS_SERVER(inode);
8136 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
8137 struct rpc_clnt *clnt = server->client;
8138 struct nfs4_fsid_present_arg args = {
8139 .fh = NFS_FH(inode),
8140 .clientid = clp->cl_clientid,
8141 .renew = 1, /* append RENEW */
8142 };
8143 struct nfs4_fsid_present_res res = {
8144 .renew = 1,
8145 };
8146 struct rpc_message msg = {
8147 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
8148 .rpc_argp = &args,
8149 .rpc_resp = &res,
8150 .rpc_cred = cred,
8151 };
8152 unsigned long now = jiffies;
8153 int status;
8154
8155 res.fh = nfs_alloc_fhandle();
8156 if (res.fh == NULL)
8157 return -ENOMEM;
8158
8159 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
8160 status = nfs4_call_sync_sequence(clnt, server, &msg,
8161 &args.seq_args, &res.seq_res);
8162 nfs_free_fhandle(res.fh);
8163 if (status)
8164 return status;
8165
8166 do_renew_lease(clp, now);
8167 return 0;
8168 }
8169
8170 #ifdef CONFIG_NFS_V4_1
8171
8172 /*
8173 * This operation also signals the server that this client is
8174 * performing "lease moved" recovery. The server can stop asserting
8175 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing
8176 * this operation is identified in the SEQUENCE operation in this
8177 * compound.
8178 */
_nfs41_proc_fsid_present(struct inode * inode,const struct cred * cred)8179 static int _nfs41_proc_fsid_present(struct inode *inode, const struct cred *cred)
8180 {
8181 struct nfs_server *server = NFS_SERVER(inode);
8182 struct rpc_clnt *clnt = server->client;
8183 struct nfs4_fsid_present_arg args = {
8184 .fh = NFS_FH(inode),
8185 };
8186 struct nfs4_fsid_present_res res = {
8187 };
8188 struct rpc_message msg = {
8189 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
8190 .rpc_argp = &args,
8191 .rpc_resp = &res,
8192 .rpc_cred = cred,
8193 };
8194 int status;
8195
8196 res.fh = nfs_alloc_fhandle();
8197 if (res.fh == NULL)
8198 return -ENOMEM;
8199
8200 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
8201 status = nfs4_call_sync_sequence(clnt, server, &msg,
8202 &args.seq_args, &res.seq_res);
8203 nfs_free_fhandle(res.fh);
8204 if (status == NFS4_OK &&
8205 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
8206 status = -NFS4ERR_LEASE_MOVED;
8207 return status;
8208 }
8209
8210 #endif /* CONFIG_NFS_V4_1 */
8211
8212 /**
8213 * nfs4_proc_fsid_present - Is this FSID present or absent on server?
8214 * @inode: inode on FSID to check
8215 * @cred: credential to use for this operation
8216 *
8217 * Server indicates whether the FSID is present, moved, or not
8218 * recognized. This operation is necessary to clear a LEASE_MOVED
8219 * condition for this client ID.
8220 *
8221 * Returns NFS4_OK if the FSID is present on this server,
8222 * -NFS4ERR_MOVED if the FSID is no longer present, a negative
8223 * NFS4ERR code if some error occurred on the server, or a
8224 * negative errno if a local failure occurred.
8225 */
nfs4_proc_fsid_present(struct inode * inode,const struct cred * cred)8226 int nfs4_proc_fsid_present(struct inode *inode, const struct cred *cred)
8227 {
8228 struct nfs_server *server = NFS_SERVER(inode);
8229 struct nfs_client *clp = server->nfs_client;
8230 const struct nfs4_mig_recovery_ops *ops =
8231 clp->cl_mvops->mig_recovery_ops;
8232 struct nfs4_exception exception = {
8233 .interruptible = true,
8234 };
8235 int status;
8236
8237 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
8238 (unsigned long long)server->fsid.major,
8239 (unsigned long long)server->fsid.minor,
8240 clp->cl_hostname);
8241 nfs_display_fhandle(NFS_FH(inode), __func__);
8242
8243 do {
8244 status = ops->fsid_present(inode, cred);
8245 if (status != -NFS4ERR_DELAY)
8246 break;
8247 nfs4_handle_exception(server, status, &exception);
8248 } while (exception.retry);
8249 return status;
8250 }
8251
8252 /*
8253 * If 'use_integrity' is true and the state managment nfs_client
8254 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient
8255 * and the machine credential as per RFC3530bis and RFC5661 Security
8256 * Considerations sections. Otherwise, just use the user cred with the
8257 * filesystem's rpc_client.
8258 */
_nfs4_proc_secinfo(struct inode * dir,const struct qstr * name,struct nfs4_secinfo_flavors * flavors,bool use_integrity)8259 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity)
8260 {
8261 int status;
8262 struct rpc_clnt *clnt = NFS_SERVER(dir)->client;
8263 struct nfs_client *clp = NFS_SERVER(dir)->nfs_client;
8264 struct nfs4_secinfo_arg args = {
8265 .dir_fh = NFS_FH(dir),
8266 .name = name,
8267 };
8268 struct nfs4_secinfo_res res = {
8269 .flavors = flavors,
8270 };
8271 struct rpc_message msg = {
8272 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
8273 .rpc_argp = &args,
8274 .rpc_resp = &res,
8275 };
8276 struct nfs4_call_sync_data data = {
8277 .seq_server = NFS_SERVER(dir),
8278 .seq_args = &args.seq_args,
8279 .seq_res = &res.seq_res,
8280 };
8281 struct rpc_task_setup task_setup = {
8282 .rpc_client = clnt,
8283 .rpc_message = &msg,
8284 .callback_ops = clp->cl_mvops->call_sync_ops,
8285 .callback_data = &data,
8286 .flags = RPC_TASK_NO_ROUND_ROBIN,
8287 };
8288 const struct cred *cred = NULL;
8289
8290 if (use_integrity) {
8291 clnt = clp->cl_rpcclient;
8292 task_setup.rpc_client = clnt;
8293
8294 cred = nfs4_get_clid_cred(clp);
8295 msg.rpc_cred = cred;
8296 }
8297
8298 dprintk("NFS call secinfo %s\n", name->name);
8299
8300 nfs4_state_protect(clp, NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg);
8301 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
8302 status = nfs4_call_sync_custom(&task_setup);
8303
8304 dprintk("NFS reply secinfo: %d\n", status);
8305
8306 put_cred(cred);
8307 return status;
8308 }
8309
nfs4_proc_secinfo(struct inode * dir,const struct qstr * name,struct nfs4_secinfo_flavors * flavors)8310 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
8311 struct nfs4_secinfo_flavors *flavors)
8312 {
8313 struct nfs4_exception exception = {
8314 .interruptible = true,
8315 };
8316 int err;
8317 do {
8318 err = -NFS4ERR_WRONGSEC;
8319
8320 /* try to use integrity protection with machine cred */
8321 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client))
8322 err = _nfs4_proc_secinfo(dir, name, flavors, true);
8323
8324 /*
8325 * if unable to use integrity protection, or SECINFO with
8326 * integrity protection returns NFS4ERR_WRONGSEC (which is
8327 * disallowed by spec, but exists in deployed servers) use
8328 * the current filesystem's rpc_client and the user cred.
8329 */
8330 if (err == -NFS4ERR_WRONGSEC)
8331 err = _nfs4_proc_secinfo(dir, name, flavors, false);
8332
8333 trace_nfs4_secinfo(dir, name, err);
8334 err = nfs4_handle_exception(NFS_SERVER(dir), err,
8335 &exception);
8336 } while (exception.retry);
8337 return err;
8338 }
8339
8340 #ifdef CONFIG_NFS_V4_1
8341 /*
8342 * Check the exchange flags returned by the server for invalid flags, having
8343 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
8344 * DS flags set.
8345 */
nfs4_check_cl_exchange_flags(u32 flags,u32 version)8346 static int nfs4_check_cl_exchange_flags(u32 flags, u32 version)
8347 {
8348 if (version >= 2 && (flags & ~EXCHGID4_2_FLAG_MASK_R))
8349 goto out_inval;
8350 else if (version < 2 && (flags & ~EXCHGID4_FLAG_MASK_R))
8351 goto out_inval;
8352 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
8353 (flags & EXCHGID4_FLAG_USE_NON_PNFS))
8354 goto out_inval;
8355 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
8356 goto out_inval;
8357 return NFS_OK;
8358 out_inval:
8359 return -NFS4ERR_INVAL;
8360 }
8361
8362 static bool
nfs41_same_server_scope(struct nfs41_server_scope * a,struct nfs41_server_scope * b)8363 nfs41_same_server_scope(struct nfs41_server_scope *a,
8364 struct nfs41_server_scope *b)
8365 {
8366 if (a->server_scope_sz != b->server_scope_sz)
8367 return false;
8368 return memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0;
8369 }
8370
8371 static void
nfs4_bind_one_conn_to_session_done(struct rpc_task * task,void * calldata)8372 nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata)
8373 {
8374 struct nfs41_bind_conn_to_session_args *args = task->tk_msg.rpc_argp;
8375 struct nfs41_bind_conn_to_session_res *res = task->tk_msg.rpc_resp;
8376 struct nfs_client *clp = args->client;
8377
8378 switch (task->tk_status) {
8379 case -NFS4ERR_BADSESSION:
8380 case -NFS4ERR_DEADSESSION:
8381 nfs4_schedule_session_recovery(clp->cl_session,
8382 task->tk_status);
8383 return;
8384 }
8385 if (args->dir == NFS4_CDFC4_FORE_OR_BOTH &&
8386 res->dir != NFS4_CDFS4_BOTH) {
8387 rpc_task_close_connection(task);
8388 if (args->retries++ < MAX_BIND_CONN_TO_SESSION_RETRIES)
8389 rpc_restart_call(task);
8390 }
8391 }
8392
8393 static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = {
8394 .rpc_call_done = nfs4_bind_one_conn_to_session_done,
8395 };
8396
8397 /*
8398 * nfs4_proc_bind_one_conn_to_session()
8399 *
8400 * The 4.1 client currently uses the same TCP connection for the
8401 * fore and backchannel.
8402 */
8403 static
nfs4_proc_bind_one_conn_to_session(struct rpc_clnt * clnt,struct rpc_xprt * xprt,struct nfs_client * clp,const struct cred * cred)8404 int nfs4_proc_bind_one_conn_to_session(struct rpc_clnt *clnt,
8405 struct rpc_xprt *xprt,
8406 struct nfs_client *clp,
8407 const struct cred *cred)
8408 {
8409 int status;
8410 struct nfs41_bind_conn_to_session_args args = {
8411 .client = clp,
8412 .dir = NFS4_CDFC4_FORE_OR_BOTH,
8413 .retries = 0,
8414 };
8415 struct nfs41_bind_conn_to_session_res res;
8416 struct rpc_message msg = {
8417 .rpc_proc =
8418 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
8419 .rpc_argp = &args,
8420 .rpc_resp = &res,
8421 .rpc_cred = cred,
8422 };
8423 struct rpc_task_setup task_setup_data = {
8424 .rpc_client = clnt,
8425 .rpc_xprt = xprt,
8426 .callback_ops = &nfs4_bind_one_conn_to_session_ops,
8427 .rpc_message = &msg,
8428 .flags = RPC_TASK_TIMEOUT,
8429 };
8430 struct rpc_task *task;
8431
8432 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id);
8433 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
8434 args.dir = NFS4_CDFC4_FORE;
8435
8436 /* Do not set the backchannel flag unless this is clnt->cl_xprt */
8437 if (xprt != rcu_access_pointer(clnt->cl_xprt))
8438 args.dir = NFS4_CDFC4_FORE;
8439
8440 task = rpc_run_task(&task_setup_data);
8441 if (!IS_ERR(task)) {
8442 status = task->tk_status;
8443 rpc_put_task(task);
8444 } else
8445 status = PTR_ERR(task);
8446 trace_nfs4_bind_conn_to_session(clp, status);
8447 if (status == 0) {
8448 if (memcmp(res.sessionid.data,
8449 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
8450 dprintk("NFS: %s: Session ID mismatch\n", __func__);
8451 return -EIO;
8452 }
8453 if ((res.dir & args.dir) != res.dir || res.dir == 0) {
8454 dprintk("NFS: %s: Unexpected direction from server\n",
8455 __func__);
8456 return -EIO;
8457 }
8458 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) {
8459 dprintk("NFS: %s: Server returned RDMA mode = true\n",
8460 __func__);
8461 return -EIO;
8462 }
8463 }
8464
8465 return status;
8466 }
8467
8468 struct rpc_bind_conn_calldata {
8469 struct nfs_client *clp;
8470 const struct cred *cred;
8471 };
8472
8473 static int
nfs4_proc_bind_conn_to_session_callback(struct rpc_clnt * clnt,struct rpc_xprt * xprt,void * calldata)8474 nfs4_proc_bind_conn_to_session_callback(struct rpc_clnt *clnt,
8475 struct rpc_xprt *xprt,
8476 void *calldata)
8477 {
8478 struct rpc_bind_conn_calldata *p = calldata;
8479
8480 return nfs4_proc_bind_one_conn_to_session(clnt, xprt, p->clp, p->cred);
8481 }
8482
nfs4_proc_bind_conn_to_session(struct nfs_client * clp,const struct cred * cred)8483 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, const struct cred *cred)
8484 {
8485 struct rpc_bind_conn_calldata data = {
8486 .clp = clp,
8487 .cred = cred,
8488 };
8489 return rpc_clnt_iterate_for_each_xprt(clp->cl_rpcclient,
8490 nfs4_proc_bind_conn_to_session_callback, &data);
8491 }
8492
8493 /*
8494 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map
8495 * and operations we'd like to see to enable certain features in the allow map
8496 */
8497 static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = {
8498 .how = SP4_MACH_CRED,
8499 .enforce.u.words = {
8500 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
8501 1 << (OP_EXCHANGE_ID - 32) |
8502 1 << (OP_CREATE_SESSION - 32) |
8503 1 << (OP_DESTROY_SESSION - 32) |
8504 1 << (OP_DESTROY_CLIENTID - 32)
8505 },
8506 .allow.u.words = {
8507 [0] = 1 << (OP_CLOSE) |
8508 1 << (OP_OPEN_DOWNGRADE) |
8509 1 << (OP_LOCKU) |
8510 1 << (OP_DELEGRETURN) |
8511 1 << (OP_COMMIT),
8512 [1] = 1 << (OP_SECINFO - 32) |
8513 1 << (OP_SECINFO_NO_NAME - 32) |
8514 1 << (OP_LAYOUTRETURN - 32) |
8515 1 << (OP_TEST_STATEID - 32) |
8516 1 << (OP_FREE_STATEID - 32) |
8517 1 << (OP_WRITE - 32)
8518 }
8519 };
8520
8521 /*
8522 * Select the state protection mode for client `clp' given the server results
8523 * from exchange_id in `sp'.
8524 *
8525 * Returns 0 on success, negative errno otherwise.
8526 */
nfs4_sp4_select_mode(struct nfs_client * clp,struct nfs41_state_protection * sp)8527 static int nfs4_sp4_select_mode(struct nfs_client *clp,
8528 struct nfs41_state_protection *sp)
8529 {
8530 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = {
8531 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
8532 1 << (OP_EXCHANGE_ID - 32) |
8533 1 << (OP_CREATE_SESSION - 32) |
8534 1 << (OP_DESTROY_SESSION - 32) |
8535 1 << (OP_DESTROY_CLIENTID - 32)
8536 };
8537 unsigned long flags = 0;
8538 unsigned int i;
8539 int ret = 0;
8540
8541 if (sp->how == SP4_MACH_CRED) {
8542 /* Print state protect result */
8543 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n");
8544 for (i = 0; i <= LAST_NFS4_OP; i++) {
8545 if (test_bit(i, sp->enforce.u.longs))
8546 dfprintk(MOUNT, " enforce op %d\n", i);
8547 if (test_bit(i, sp->allow.u.longs))
8548 dfprintk(MOUNT, " allow op %d\n", i);
8549 }
8550
8551 /* make sure nothing is on enforce list that isn't supported */
8552 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) {
8553 if (sp->enforce.u.words[i] & ~supported_enforce[i]) {
8554 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
8555 ret = -EINVAL;
8556 goto out;
8557 }
8558 }
8559
8560 /*
8561 * Minimal mode - state operations are allowed to use machine
8562 * credential. Note this already happens by default, so the
8563 * client doesn't have to do anything more than the negotiation.
8564 *
8565 * NOTE: we don't care if EXCHANGE_ID is in the list -
8566 * we're already using the machine cred for exchange_id
8567 * and will never use a different cred.
8568 */
8569 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) &&
8570 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) &&
8571 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) &&
8572 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) {
8573 dfprintk(MOUNT, "sp4_mach_cred:\n");
8574 dfprintk(MOUNT, " minimal mode enabled\n");
8575 __set_bit(NFS_SP4_MACH_CRED_MINIMAL, &flags);
8576 } else {
8577 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
8578 ret = -EINVAL;
8579 goto out;
8580 }
8581
8582 if (test_bit(OP_CLOSE, sp->allow.u.longs) &&
8583 test_bit(OP_OPEN_DOWNGRADE, sp->allow.u.longs) &&
8584 test_bit(OP_DELEGRETURN, sp->allow.u.longs) &&
8585 test_bit(OP_LOCKU, sp->allow.u.longs)) {
8586 dfprintk(MOUNT, " cleanup mode enabled\n");
8587 __set_bit(NFS_SP4_MACH_CRED_CLEANUP, &flags);
8588 }
8589
8590 if (test_bit(OP_LAYOUTRETURN, sp->allow.u.longs)) {
8591 dfprintk(MOUNT, " pnfs cleanup mode enabled\n");
8592 __set_bit(NFS_SP4_MACH_CRED_PNFS_CLEANUP, &flags);
8593 }
8594
8595 if (test_bit(OP_SECINFO, sp->allow.u.longs) &&
8596 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) {
8597 dfprintk(MOUNT, " secinfo mode enabled\n");
8598 __set_bit(NFS_SP4_MACH_CRED_SECINFO, &flags);
8599 }
8600
8601 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) &&
8602 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) {
8603 dfprintk(MOUNT, " stateid mode enabled\n");
8604 __set_bit(NFS_SP4_MACH_CRED_STATEID, &flags);
8605 }
8606
8607 if (test_bit(OP_WRITE, sp->allow.u.longs)) {
8608 dfprintk(MOUNT, " write mode enabled\n");
8609 __set_bit(NFS_SP4_MACH_CRED_WRITE, &flags);
8610 }
8611
8612 if (test_bit(OP_COMMIT, sp->allow.u.longs)) {
8613 dfprintk(MOUNT, " commit mode enabled\n");
8614 __set_bit(NFS_SP4_MACH_CRED_COMMIT, &flags);
8615 }
8616 }
8617 out:
8618 clp->cl_sp4_flags = flags;
8619 return ret;
8620 }
8621
8622 struct nfs41_exchange_id_data {
8623 struct nfs41_exchange_id_res res;
8624 struct nfs41_exchange_id_args args;
8625 };
8626
nfs4_exchange_id_release(void * data)8627 static void nfs4_exchange_id_release(void *data)
8628 {
8629 struct nfs41_exchange_id_data *cdata =
8630 (struct nfs41_exchange_id_data *)data;
8631
8632 nfs_put_client(cdata->args.client);
8633 kfree(cdata->res.impl_id);
8634 kfree(cdata->res.server_scope);
8635 kfree(cdata->res.server_owner);
8636 kfree(cdata);
8637 }
8638
8639 static const struct rpc_call_ops nfs4_exchange_id_call_ops = {
8640 .rpc_release = nfs4_exchange_id_release,
8641 };
8642
8643 /*
8644 * _nfs4_proc_exchange_id()
8645 *
8646 * Wrapper for EXCHANGE_ID operation.
8647 */
8648 static struct rpc_task *
nfs4_run_exchange_id(struct nfs_client * clp,const struct cred * cred,u32 sp4_how,struct rpc_xprt * xprt)8649 nfs4_run_exchange_id(struct nfs_client *clp, const struct cred *cred,
8650 u32 sp4_how, struct rpc_xprt *xprt)
8651 {
8652 struct rpc_message msg = {
8653 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
8654 .rpc_cred = cred,
8655 };
8656 struct rpc_task_setup task_setup_data = {
8657 .rpc_client = clp->cl_rpcclient,
8658 .callback_ops = &nfs4_exchange_id_call_ops,
8659 .rpc_message = &msg,
8660 .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN,
8661 };
8662 struct nfs41_exchange_id_data *calldata;
8663 int status;
8664
8665 if (!refcount_inc_not_zero(&clp->cl_count))
8666 return ERR_PTR(-EIO);
8667
8668 status = -ENOMEM;
8669 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
8670 if (!calldata)
8671 goto out;
8672
8673 nfs4_init_boot_verifier(clp, &calldata->args.verifier);
8674
8675 status = nfs4_init_uniform_client_string(clp);
8676 if (status)
8677 goto out_calldata;
8678
8679 calldata->res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
8680 GFP_NOFS);
8681 status = -ENOMEM;
8682 if (unlikely(calldata->res.server_owner == NULL))
8683 goto out_calldata;
8684
8685 calldata->res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
8686 GFP_NOFS);
8687 if (unlikely(calldata->res.server_scope == NULL))
8688 goto out_server_owner;
8689
8690 calldata->res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
8691 if (unlikely(calldata->res.impl_id == NULL))
8692 goto out_server_scope;
8693
8694 switch (sp4_how) {
8695 case SP4_NONE:
8696 calldata->args.state_protect.how = SP4_NONE;
8697 break;
8698
8699 case SP4_MACH_CRED:
8700 calldata->args.state_protect = nfs4_sp4_mach_cred_request;
8701 break;
8702
8703 default:
8704 /* unsupported! */
8705 WARN_ON_ONCE(1);
8706 status = -EINVAL;
8707 goto out_impl_id;
8708 }
8709 if (xprt) {
8710 task_setup_data.rpc_xprt = xprt;
8711 task_setup_data.flags |= RPC_TASK_SOFTCONN;
8712 memcpy(calldata->args.verifier.data, clp->cl_confirm.data,
8713 sizeof(calldata->args.verifier.data));
8714 }
8715 calldata->args.client = clp;
8716 calldata->args.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
8717 EXCHGID4_FLAG_BIND_PRINC_STATEID;
8718 #ifdef CONFIG_NFS_V4_1_MIGRATION
8719 calldata->args.flags |= EXCHGID4_FLAG_SUPP_MOVED_MIGR;
8720 #endif
8721 if (test_bit(NFS_CS_DS, &clp->cl_flags))
8722 calldata->args.flags |= EXCHGID4_FLAG_USE_PNFS_DS;
8723 msg.rpc_argp = &calldata->args;
8724 msg.rpc_resp = &calldata->res;
8725 task_setup_data.callback_data = calldata;
8726
8727 return rpc_run_task(&task_setup_data);
8728
8729 out_impl_id:
8730 kfree(calldata->res.impl_id);
8731 out_server_scope:
8732 kfree(calldata->res.server_scope);
8733 out_server_owner:
8734 kfree(calldata->res.server_owner);
8735 out_calldata:
8736 kfree(calldata);
8737 out:
8738 nfs_put_client(clp);
8739 return ERR_PTR(status);
8740 }
8741
8742 /*
8743 * _nfs4_proc_exchange_id()
8744 *
8745 * Wrapper for EXCHANGE_ID operation.
8746 */
_nfs4_proc_exchange_id(struct nfs_client * clp,const struct cred * cred,u32 sp4_how)8747 static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred,
8748 u32 sp4_how)
8749 {
8750 struct rpc_task *task;
8751 struct nfs41_exchange_id_args *argp;
8752 struct nfs41_exchange_id_res *resp;
8753 unsigned long now = jiffies;
8754 int status;
8755
8756 task = nfs4_run_exchange_id(clp, cred, sp4_how, NULL);
8757 if (IS_ERR(task))
8758 return PTR_ERR(task);
8759
8760 argp = task->tk_msg.rpc_argp;
8761 resp = task->tk_msg.rpc_resp;
8762 status = task->tk_status;
8763 if (status != 0)
8764 goto out;
8765
8766 status = nfs4_check_cl_exchange_flags(resp->flags,
8767 clp->cl_mvops->minor_version);
8768 if (status != 0)
8769 goto out;
8770
8771 status = nfs4_sp4_select_mode(clp, &resp->state_protect);
8772 if (status != 0)
8773 goto out;
8774
8775 do_renew_lease(clp, now);
8776
8777 clp->cl_clientid = resp->clientid;
8778 clp->cl_exchange_flags = resp->flags;
8779 clp->cl_seqid = resp->seqid;
8780 /* Client ID is not confirmed */
8781 if (!(resp->flags & EXCHGID4_FLAG_CONFIRMED_R))
8782 clear_bit(NFS4_SESSION_ESTABLISHED,
8783 &clp->cl_session->session_state);
8784
8785 if (clp->cl_serverscope != NULL &&
8786 !nfs41_same_server_scope(clp->cl_serverscope,
8787 resp->server_scope)) {
8788 dprintk("%s: server_scope mismatch detected\n",
8789 __func__);
8790 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
8791 }
8792
8793 swap(clp->cl_serverowner, resp->server_owner);
8794 swap(clp->cl_serverscope, resp->server_scope);
8795 swap(clp->cl_implid, resp->impl_id);
8796
8797 /* Save the EXCHANGE_ID verifier session trunk tests */
8798 memcpy(clp->cl_confirm.data, argp->verifier.data,
8799 sizeof(clp->cl_confirm.data));
8800 out:
8801 trace_nfs4_exchange_id(clp, status);
8802 rpc_put_task(task);
8803 return status;
8804 }
8805
8806 /*
8807 * nfs4_proc_exchange_id()
8808 *
8809 * Returns zero, a negative errno, or a negative NFS4ERR status code.
8810 *
8811 * Since the clientid has expired, all compounds using sessions
8812 * associated with the stale clientid will be returning
8813 * NFS4ERR_BADSESSION in the sequence operation, and will therefore
8814 * be in some phase of session reset.
8815 *
8816 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used.
8817 */
nfs4_proc_exchange_id(struct nfs_client * clp,const struct cred * cred)8818 int nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred)
8819 {
8820 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor;
8821 int status;
8822
8823 /* try SP4_MACH_CRED if krb5i/p */
8824 if (authflavor == RPC_AUTH_GSS_KRB5I ||
8825 authflavor == RPC_AUTH_GSS_KRB5P) {
8826 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED);
8827 if (!status)
8828 return 0;
8829 }
8830
8831 /* try SP4_NONE */
8832 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE);
8833 }
8834
8835 /**
8836 * nfs4_test_session_trunk
8837 *
8838 * This is an add_xprt_test() test function called from
8839 * rpc_clnt_setup_test_and_add_xprt.
8840 *
8841 * The rpc_xprt_switch is referrenced by rpc_clnt_setup_test_and_add_xprt
8842 * and is dereferrenced in nfs4_exchange_id_release
8843 *
8844 * Upon success, add the new transport to the rpc_clnt
8845 *
8846 * @clnt: struct rpc_clnt to get new transport
8847 * @xprt: the rpc_xprt to test
8848 * @data: call data for _nfs4_proc_exchange_id.
8849 */
nfs4_test_session_trunk(struct rpc_clnt * clnt,struct rpc_xprt * xprt,void * data)8850 void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
8851 void *data)
8852 {
8853 struct nfs4_add_xprt_data *adata = (struct nfs4_add_xprt_data *)data;
8854 struct rpc_task *task;
8855 int status;
8856
8857 u32 sp4_how;
8858
8859 dprintk("--> %s try %s\n", __func__,
8860 xprt->address_strings[RPC_DISPLAY_ADDR]);
8861
8862 sp4_how = (adata->clp->cl_sp4_flags == 0 ? SP4_NONE : SP4_MACH_CRED);
8863
8864 /* Test connection for session trunking. Async exchange_id call */
8865 task = nfs4_run_exchange_id(adata->clp, adata->cred, sp4_how, xprt);
8866 if (IS_ERR(task))
8867 return;
8868
8869 status = task->tk_status;
8870 if (status == 0)
8871 status = nfs4_detect_session_trunking(adata->clp,
8872 task->tk_msg.rpc_resp, xprt);
8873
8874 if (status == 0)
8875 rpc_clnt_xprt_switch_add_xprt(clnt, xprt);
8876
8877 rpc_put_task(task);
8878 }
8879 EXPORT_SYMBOL_GPL(nfs4_test_session_trunk);
8880
_nfs4_proc_destroy_clientid(struct nfs_client * clp,const struct cred * cred)8881 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
8882 const struct cred *cred)
8883 {
8884 struct rpc_message msg = {
8885 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
8886 .rpc_argp = clp,
8887 .rpc_cred = cred,
8888 };
8889 int status;
8890
8891 status = rpc_call_sync(clp->cl_rpcclient, &msg,
8892 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
8893 trace_nfs4_destroy_clientid(clp, status);
8894 if (status)
8895 dprintk("NFS: Got error %d from the server %s on "
8896 "DESTROY_CLIENTID.", status, clp->cl_hostname);
8897 return status;
8898 }
8899
nfs4_proc_destroy_clientid(struct nfs_client * clp,const struct cred * cred)8900 static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
8901 const struct cred *cred)
8902 {
8903 unsigned int loop;
8904 int ret;
8905
8906 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
8907 ret = _nfs4_proc_destroy_clientid(clp, cred);
8908 switch (ret) {
8909 case -NFS4ERR_DELAY:
8910 case -NFS4ERR_CLIENTID_BUSY:
8911 ssleep(1);
8912 break;
8913 default:
8914 return ret;
8915 }
8916 }
8917 return 0;
8918 }
8919
nfs4_destroy_clientid(struct nfs_client * clp)8920 int nfs4_destroy_clientid(struct nfs_client *clp)
8921 {
8922 const struct cred *cred;
8923 int ret = 0;
8924
8925 if (clp->cl_mvops->minor_version < 1)
8926 goto out;
8927 if (clp->cl_exchange_flags == 0)
8928 goto out;
8929 if (clp->cl_preserve_clid)
8930 goto out;
8931 cred = nfs4_get_clid_cred(clp);
8932 ret = nfs4_proc_destroy_clientid(clp, cred);
8933 put_cred(cred);
8934 switch (ret) {
8935 case 0:
8936 case -NFS4ERR_STALE_CLIENTID:
8937 clp->cl_exchange_flags = 0;
8938 }
8939 out:
8940 return ret;
8941 }
8942
8943 #endif /* CONFIG_NFS_V4_1 */
8944
8945 struct nfs4_get_lease_time_data {
8946 struct nfs4_get_lease_time_args *args;
8947 struct nfs4_get_lease_time_res *res;
8948 struct nfs_client *clp;
8949 };
8950
nfs4_get_lease_time_prepare(struct rpc_task * task,void * calldata)8951 static void nfs4_get_lease_time_prepare(struct rpc_task *task,
8952 void *calldata)
8953 {
8954 struct nfs4_get_lease_time_data *data =
8955 (struct nfs4_get_lease_time_data *)calldata;
8956
8957 dprintk("--> %s\n", __func__);
8958 /* just setup sequence, do not trigger session recovery
8959 since we're invoked within one */
8960 nfs4_setup_sequence(data->clp,
8961 &data->args->la_seq_args,
8962 &data->res->lr_seq_res,
8963 task);
8964 dprintk("<-- %s\n", __func__);
8965 }
8966
8967 /*
8968 * Called from nfs4_state_manager thread for session setup, so don't recover
8969 * from sequence operation or clientid errors.
8970 */
nfs4_get_lease_time_done(struct rpc_task * task,void * calldata)8971 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
8972 {
8973 struct nfs4_get_lease_time_data *data =
8974 (struct nfs4_get_lease_time_data *)calldata;
8975
8976 dprintk("--> %s\n", __func__);
8977 if (!nfs4_sequence_done(task, &data->res->lr_seq_res))
8978 return;
8979 switch (task->tk_status) {
8980 case -NFS4ERR_DELAY:
8981 case -NFS4ERR_GRACE:
8982 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
8983 rpc_delay(task, NFS4_POLL_RETRY_MIN);
8984 task->tk_status = 0;
8985 fallthrough;
8986 case -NFS4ERR_RETRY_UNCACHED_REP:
8987 rpc_restart_call_prepare(task);
8988 return;
8989 }
8990 dprintk("<-- %s\n", __func__);
8991 }
8992
8993 static const struct rpc_call_ops nfs4_get_lease_time_ops = {
8994 .rpc_call_prepare = nfs4_get_lease_time_prepare,
8995 .rpc_call_done = nfs4_get_lease_time_done,
8996 };
8997
nfs4_proc_get_lease_time(struct nfs_client * clp,struct nfs_fsinfo * fsinfo)8998 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
8999 {
9000 struct nfs4_get_lease_time_args args;
9001 struct nfs4_get_lease_time_res res = {
9002 .lr_fsinfo = fsinfo,
9003 };
9004 struct nfs4_get_lease_time_data data = {
9005 .args = &args,
9006 .res = &res,
9007 .clp = clp,
9008 };
9009 struct rpc_message msg = {
9010 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
9011 .rpc_argp = &args,
9012 .rpc_resp = &res,
9013 };
9014 struct rpc_task_setup task_setup = {
9015 .rpc_client = clp->cl_rpcclient,
9016 .rpc_message = &msg,
9017 .callback_ops = &nfs4_get_lease_time_ops,
9018 .callback_data = &data,
9019 .flags = RPC_TASK_TIMEOUT,
9020 };
9021
9022 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0, 1);
9023 return nfs4_call_sync_custom(&task_setup);
9024 }
9025
9026 #ifdef CONFIG_NFS_V4_1
9027
9028 /*
9029 * Initialize the values to be used by the client in CREATE_SESSION
9030 * If nfs4_init_session set the fore channel request and response sizes,
9031 * use them.
9032 *
9033 * Set the back channel max_resp_sz_cached to zero to force the client to
9034 * always set csa_cachethis to FALSE because the current implementation
9035 * of the back channel DRC only supports caching the CB_SEQUENCE operation.
9036 */
nfs4_init_channel_attrs(struct nfs41_create_session_args * args,struct rpc_clnt * clnt)9037 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args,
9038 struct rpc_clnt *clnt)
9039 {
9040 unsigned int max_rqst_sz, max_resp_sz;
9041 unsigned int max_bc_payload = rpc_max_bc_payload(clnt);
9042 unsigned int max_bc_slots = rpc_num_bc_slots(clnt);
9043
9044 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead;
9045 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead;
9046
9047 /* Fore channel attributes */
9048 args->fc_attrs.max_rqst_sz = max_rqst_sz;
9049 args->fc_attrs.max_resp_sz = max_resp_sz;
9050 args->fc_attrs.max_ops = NFS4_MAX_OPS;
9051 args->fc_attrs.max_reqs = max_session_slots;
9052
9053 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
9054 "max_ops=%u max_reqs=%u\n",
9055 __func__,
9056 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
9057 args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
9058
9059 /* Back channel attributes */
9060 args->bc_attrs.max_rqst_sz = max_bc_payload;
9061 args->bc_attrs.max_resp_sz = max_bc_payload;
9062 args->bc_attrs.max_resp_sz_cached = 0;
9063 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
9064 args->bc_attrs.max_reqs = max_t(unsigned short, max_session_cb_slots, 1);
9065 if (args->bc_attrs.max_reqs > max_bc_slots)
9066 args->bc_attrs.max_reqs = max_bc_slots;
9067
9068 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
9069 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
9070 __func__,
9071 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
9072 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
9073 args->bc_attrs.max_reqs);
9074 }
9075
nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args * args,struct nfs41_create_session_res * res)9076 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args,
9077 struct nfs41_create_session_res *res)
9078 {
9079 struct nfs4_channel_attrs *sent = &args->fc_attrs;
9080 struct nfs4_channel_attrs *rcvd = &res->fc_attrs;
9081
9082 if (rcvd->max_resp_sz > sent->max_resp_sz)
9083 return -EINVAL;
9084 /*
9085 * Our requested max_ops is the minimum we need; we're not
9086 * prepared to break up compounds into smaller pieces than that.
9087 * So, no point even trying to continue if the server won't
9088 * cooperate:
9089 */
9090 if (rcvd->max_ops < sent->max_ops)
9091 return -EINVAL;
9092 if (rcvd->max_reqs == 0)
9093 return -EINVAL;
9094 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
9095 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
9096 return 0;
9097 }
9098
nfs4_verify_back_channel_attrs(struct nfs41_create_session_args * args,struct nfs41_create_session_res * res)9099 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args,
9100 struct nfs41_create_session_res *res)
9101 {
9102 struct nfs4_channel_attrs *sent = &args->bc_attrs;
9103 struct nfs4_channel_attrs *rcvd = &res->bc_attrs;
9104
9105 if (!(res->flags & SESSION4_BACK_CHAN))
9106 goto out;
9107 if (rcvd->max_rqst_sz > sent->max_rqst_sz)
9108 return -EINVAL;
9109 if (rcvd->max_resp_sz < sent->max_resp_sz)
9110 return -EINVAL;
9111 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
9112 return -EINVAL;
9113 if (rcvd->max_ops > sent->max_ops)
9114 return -EINVAL;
9115 if (rcvd->max_reqs > sent->max_reqs)
9116 return -EINVAL;
9117 out:
9118 return 0;
9119 }
9120
nfs4_verify_channel_attrs(struct nfs41_create_session_args * args,struct nfs41_create_session_res * res)9121 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
9122 struct nfs41_create_session_res *res)
9123 {
9124 int ret;
9125
9126 ret = nfs4_verify_fore_channel_attrs(args, res);
9127 if (ret)
9128 return ret;
9129 return nfs4_verify_back_channel_attrs(args, res);
9130 }
9131
nfs4_update_session(struct nfs4_session * session,struct nfs41_create_session_res * res)9132 static void nfs4_update_session(struct nfs4_session *session,
9133 struct nfs41_create_session_res *res)
9134 {
9135 nfs4_copy_sessionid(&session->sess_id, &res->sessionid);
9136 /* Mark client id and session as being confirmed */
9137 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
9138 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state);
9139 session->flags = res->flags;
9140 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs));
9141 if (res->flags & SESSION4_BACK_CHAN)
9142 memcpy(&session->bc_attrs, &res->bc_attrs,
9143 sizeof(session->bc_attrs));
9144 }
9145
_nfs4_proc_create_session(struct nfs_client * clp,const struct cred * cred)9146 static int _nfs4_proc_create_session(struct nfs_client *clp,
9147 const struct cred *cred)
9148 {
9149 struct nfs4_session *session = clp->cl_session;
9150 struct nfs41_create_session_args args = {
9151 .client = clp,
9152 .clientid = clp->cl_clientid,
9153 .seqid = clp->cl_seqid,
9154 .cb_program = NFS4_CALLBACK,
9155 };
9156 struct nfs41_create_session_res res;
9157
9158 struct rpc_message msg = {
9159 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
9160 .rpc_argp = &args,
9161 .rpc_resp = &res,
9162 .rpc_cred = cred,
9163 };
9164 int status;
9165
9166 nfs4_init_channel_attrs(&args, clp->cl_rpcclient);
9167 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
9168
9169 status = rpc_call_sync(session->clp->cl_rpcclient, &msg,
9170 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
9171 trace_nfs4_create_session(clp, status);
9172
9173 switch (status) {
9174 case -NFS4ERR_STALE_CLIENTID:
9175 case -NFS4ERR_DELAY:
9176 case -ETIMEDOUT:
9177 case -EACCES:
9178 case -EAGAIN:
9179 goto out;
9180 }
9181
9182 clp->cl_seqid++;
9183 if (!status) {
9184 /* Verify the session's negotiated channel_attrs values */
9185 status = nfs4_verify_channel_attrs(&args, &res);
9186 /* Increment the clientid slot sequence id */
9187 if (status)
9188 goto out;
9189 nfs4_update_session(session, &res);
9190 }
9191 out:
9192 return status;
9193 }
9194
9195 /*
9196 * Issues a CREATE_SESSION operation to the server.
9197 * It is the responsibility of the caller to verify the session is
9198 * expired before calling this routine.
9199 */
nfs4_proc_create_session(struct nfs_client * clp,const struct cred * cred)9200 int nfs4_proc_create_session(struct nfs_client *clp, const struct cred *cred)
9201 {
9202 int status;
9203 unsigned *ptr;
9204 struct nfs4_session *session = clp->cl_session;
9205
9206 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
9207
9208 status = _nfs4_proc_create_session(clp, cred);
9209 if (status)
9210 goto out;
9211
9212 /* Init or reset the session slot tables */
9213 status = nfs4_setup_session_slot_tables(session);
9214 dprintk("slot table setup returned %d\n", status);
9215 if (status)
9216 goto out;
9217
9218 ptr = (unsigned *)&session->sess_id.data[0];
9219 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
9220 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
9221 out:
9222 dprintk("<-- %s\n", __func__);
9223 return status;
9224 }
9225
9226 /*
9227 * Issue the over-the-wire RPC DESTROY_SESSION.
9228 * The caller must serialize access to this routine.
9229 */
nfs4_proc_destroy_session(struct nfs4_session * session,const struct cred * cred)9230 int nfs4_proc_destroy_session(struct nfs4_session *session,
9231 const struct cred *cred)
9232 {
9233 struct rpc_message msg = {
9234 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
9235 .rpc_argp = session,
9236 .rpc_cred = cred,
9237 };
9238 int status = 0;
9239
9240 dprintk("--> nfs4_proc_destroy_session\n");
9241
9242 /* session is still being setup */
9243 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state))
9244 return 0;
9245
9246 status = rpc_call_sync(session->clp->cl_rpcclient, &msg,
9247 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
9248 trace_nfs4_destroy_session(session->clp, status);
9249
9250 if (status)
9251 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. "
9252 "Session has been destroyed regardless...\n", status);
9253
9254 dprintk("<-- nfs4_proc_destroy_session\n");
9255 return status;
9256 }
9257
9258 /*
9259 * Renew the cl_session lease.
9260 */
9261 struct nfs4_sequence_data {
9262 struct nfs_client *clp;
9263 struct nfs4_sequence_args args;
9264 struct nfs4_sequence_res res;
9265 };
9266
nfs41_sequence_release(void * data)9267 static void nfs41_sequence_release(void *data)
9268 {
9269 struct nfs4_sequence_data *calldata = data;
9270 struct nfs_client *clp = calldata->clp;
9271
9272 if (refcount_read(&clp->cl_count) > 1)
9273 nfs4_schedule_state_renewal(clp);
9274 nfs_put_client(clp);
9275 kfree(calldata);
9276 }
9277
nfs41_sequence_handle_errors(struct rpc_task * task,struct nfs_client * clp)9278 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
9279 {
9280 switch(task->tk_status) {
9281 case -NFS4ERR_DELAY:
9282 rpc_delay(task, NFS4_POLL_RETRY_MAX);
9283 return -EAGAIN;
9284 default:
9285 nfs4_schedule_lease_recovery(clp);
9286 }
9287 return 0;
9288 }
9289
nfs41_sequence_call_done(struct rpc_task * task,void * data)9290 static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
9291 {
9292 struct nfs4_sequence_data *calldata = data;
9293 struct nfs_client *clp = calldata->clp;
9294
9295 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
9296 return;
9297
9298 trace_nfs4_sequence(clp, task->tk_status);
9299 if (task->tk_status < 0) {
9300 dprintk("%s ERROR %d\n", __func__, task->tk_status);
9301 if (refcount_read(&clp->cl_count) == 1)
9302 goto out;
9303
9304 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
9305 rpc_restart_call_prepare(task);
9306 return;
9307 }
9308 }
9309 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
9310 out:
9311 dprintk("<-- %s\n", __func__);
9312 }
9313
nfs41_sequence_prepare(struct rpc_task * task,void * data)9314 static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
9315 {
9316 struct nfs4_sequence_data *calldata = data;
9317 struct nfs_client *clp = calldata->clp;
9318 struct nfs4_sequence_args *args;
9319 struct nfs4_sequence_res *res;
9320
9321 args = task->tk_msg.rpc_argp;
9322 res = task->tk_msg.rpc_resp;
9323
9324 nfs4_setup_sequence(clp, args, res, task);
9325 }
9326
9327 static const struct rpc_call_ops nfs41_sequence_ops = {
9328 .rpc_call_done = nfs41_sequence_call_done,
9329 .rpc_call_prepare = nfs41_sequence_prepare,
9330 .rpc_release = nfs41_sequence_release,
9331 };
9332
_nfs41_proc_sequence(struct nfs_client * clp,const struct cred * cred,struct nfs4_slot * slot,bool is_privileged)9333 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
9334 const struct cred *cred,
9335 struct nfs4_slot *slot,
9336 bool is_privileged)
9337 {
9338 struct nfs4_sequence_data *calldata;
9339 struct rpc_message msg = {
9340 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
9341 .rpc_cred = cred,
9342 };
9343 struct rpc_task_setup task_setup_data = {
9344 .rpc_client = clp->cl_rpcclient,
9345 .rpc_message = &msg,
9346 .callback_ops = &nfs41_sequence_ops,
9347 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT | RPC_TASK_MOVEABLE,
9348 };
9349 struct rpc_task *ret;
9350
9351 ret = ERR_PTR(-EIO);
9352 if (!refcount_inc_not_zero(&clp->cl_count))
9353 goto out_err;
9354
9355 ret = ERR_PTR(-ENOMEM);
9356 calldata = kzalloc(sizeof(*calldata), GFP_KERNEL);
9357 if (calldata == NULL)
9358 goto out_put_clp;
9359 nfs4_init_sequence(&calldata->args, &calldata->res, 0, is_privileged);
9360 nfs4_sequence_attach_slot(&calldata->args, &calldata->res, slot);
9361 msg.rpc_argp = &calldata->args;
9362 msg.rpc_resp = &calldata->res;
9363 calldata->clp = clp;
9364 task_setup_data.callback_data = calldata;
9365
9366 ret = rpc_run_task(&task_setup_data);
9367 if (IS_ERR(ret))
9368 goto out_err;
9369 return ret;
9370 out_put_clp:
9371 nfs_put_client(clp);
9372 out_err:
9373 nfs41_release_slot(slot);
9374 return ret;
9375 }
9376
nfs41_proc_async_sequence(struct nfs_client * clp,const struct cred * cred,unsigned renew_flags)9377 static int nfs41_proc_async_sequence(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags)
9378 {
9379 struct rpc_task *task;
9380 int ret = 0;
9381
9382 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
9383 return -EAGAIN;
9384 task = _nfs41_proc_sequence(clp, cred, NULL, false);
9385 if (IS_ERR(task))
9386 ret = PTR_ERR(task);
9387 else
9388 rpc_put_task_async(task);
9389 dprintk("<-- %s status=%d\n", __func__, ret);
9390 return ret;
9391 }
9392
nfs4_proc_sequence(struct nfs_client * clp,const struct cred * cred)9393 static int nfs4_proc_sequence(struct nfs_client *clp, const struct cred *cred)
9394 {
9395 struct rpc_task *task;
9396 int ret;
9397
9398 task = _nfs41_proc_sequence(clp, cred, NULL, true);
9399 if (IS_ERR(task)) {
9400 ret = PTR_ERR(task);
9401 goto out;
9402 }
9403 ret = rpc_wait_for_completion_task(task);
9404 if (!ret)
9405 ret = task->tk_status;
9406 rpc_put_task(task);
9407 out:
9408 dprintk("<-- %s status=%d\n", __func__, ret);
9409 return ret;
9410 }
9411
9412 struct nfs4_reclaim_complete_data {
9413 struct nfs_client *clp;
9414 struct nfs41_reclaim_complete_args arg;
9415 struct nfs41_reclaim_complete_res res;
9416 };
9417
nfs4_reclaim_complete_prepare(struct rpc_task * task,void * data)9418 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
9419 {
9420 struct nfs4_reclaim_complete_data *calldata = data;
9421
9422 nfs4_setup_sequence(calldata->clp,
9423 &calldata->arg.seq_args,
9424 &calldata->res.seq_res,
9425 task);
9426 }
9427
nfs41_reclaim_complete_handle_errors(struct rpc_task * task,struct nfs_client * clp)9428 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
9429 {
9430 switch(task->tk_status) {
9431 case 0:
9432 wake_up_all(&clp->cl_lock_waitq);
9433 fallthrough;
9434 case -NFS4ERR_COMPLETE_ALREADY:
9435 case -NFS4ERR_WRONG_CRED: /* What to do here? */
9436 break;
9437 case -NFS4ERR_DELAY:
9438 rpc_delay(task, NFS4_POLL_RETRY_MAX);
9439 fallthrough;
9440 case -NFS4ERR_RETRY_UNCACHED_REP:
9441 case -EACCES:
9442 dprintk("%s: failed to reclaim complete error %d for server %s, retrying\n",
9443 __func__, task->tk_status, clp->cl_hostname);
9444 return -EAGAIN;
9445 case -NFS4ERR_BADSESSION:
9446 case -NFS4ERR_DEADSESSION:
9447 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
9448 break;
9449 default:
9450 nfs4_schedule_lease_recovery(clp);
9451 }
9452 return 0;
9453 }
9454
nfs4_reclaim_complete_done(struct rpc_task * task,void * data)9455 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
9456 {
9457 struct nfs4_reclaim_complete_data *calldata = data;
9458 struct nfs_client *clp = calldata->clp;
9459 struct nfs4_sequence_res *res = &calldata->res.seq_res;
9460
9461 dprintk("--> %s\n", __func__);
9462 if (!nfs41_sequence_done(task, res))
9463 return;
9464
9465 trace_nfs4_reclaim_complete(clp, task->tk_status);
9466 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
9467 rpc_restart_call_prepare(task);
9468 return;
9469 }
9470 dprintk("<-- %s\n", __func__);
9471 }
9472
nfs4_free_reclaim_complete_data(void * data)9473 static void nfs4_free_reclaim_complete_data(void *data)
9474 {
9475 struct nfs4_reclaim_complete_data *calldata = data;
9476
9477 kfree(calldata);
9478 }
9479
9480 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
9481 .rpc_call_prepare = nfs4_reclaim_complete_prepare,
9482 .rpc_call_done = nfs4_reclaim_complete_done,
9483 .rpc_release = nfs4_free_reclaim_complete_data,
9484 };
9485
9486 /*
9487 * Issue a global reclaim complete.
9488 */
nfs41_proc_reclaim_complete(struct nfs_client * clp,const struct cred * cred)9489 static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
9490 const struct cred *cred)
9491 {
9492 struct nfs4_reclaim_complete_data *calldata;
9493 struct rpc_message msg = {
9494 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
9495 .rpc_cred = cred,
9496 };
9497 struct rpc_task_setup task_setup_data = {
9498 .rpc_client = clp->cl_rpcclient,
9499 .rpc_message = &msg,
9500 .callback_ops = &nfs4_reclaim_complete_call_ops,
9501 .flags = RPC_TASK_NO_ROUND_ROBIN,
9502 };
9503 int status = -ENOMEM;
9504
9505 dprintk("--> %s\n", __func__);
9506 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
9507 if (calldata == NULL)
9508 goto out;
9509 calldata->clp = clp;
9510 calldata->arg.one_fs = 0;
9511
9512 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0, 1);
9513 msg.rpc_argp = &calldata->arg;
9514 msg.rpc_resp = &calldata->res;
9515 task_setup_data.callback_data = calldata;
9516 status = nfs4_call_sync_custom(&task_setup_data);
9517 out:
9518 dprintk("<-- %s status=%d\n", __func__, status);
9519 return status;
9520 }
9521
9522 static void
nfs4_layoutget_prepare(struct rpc_task * task,void * calldata)9523 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
9524 {
9525 struct nfs4_layoutget *lgp = calldata;
9526 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
9527
9528 dprintk("--> %s\n", __func__);
9529 nfs4_setup_sequence(server->nfs_client, &lgp->args.seq_args,
9530 &lgp->res.seq_res, task);
9531 dprintk("<-- %s\n", __func__);
9532 }
9533
nfs4_layoutget_done(struct rpc_task * task,void * calldata)9534 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
9535 {
9536 struct nfs4_layoutget *lgp = calldata;
9537
9538 dprintk("--> %s\n", __func__);
9539 nfs41_sequence_process(task, &lgp->res.seq_res);
9540 dprintk("<-- %s\n", __func__);
9541 }
9542
9543 static int
nfs4_layoutget_handle_exception(struct rpc_task * task,struct nfs4_layoutget * lgp,struct nfs4_exception * exception)9544 nfs4_layoutget_handle_exception(struct rpc_task *task,
9545 struct nfs4_layoutget *lgp, struct nfs4_exception *exception)
9546 {
9547 struct inode *inode = lgp->args.inode;
9548 struct nfs_server *server = NFS_SERVER(inode);
9549 struct pnfs_layout_hdr *lo = lgp->lo;
9550 int nfs4err = task->tk_status;
9551 int err, status = 0;
9552 LIST_HEAD(head);
9553
9554 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
9555
9556 nfs4_sequence_free_slot(&lgp->res.seq_res);
9557
9558 switch (nfs4err) {
9559 case 0:
9560 goto out;
9561
9562 /*
9563 * NFS4ERR_LAYOUTUNAVAILABLE means we are not supposed to use pnfs
9564 * on the file. set tk_status to -ENODATA to tell upper layer to
9565 * retry go inband.
9566 */
9567 case -NFS4ERR_LAYOUTUNAVAILABLE:
9568 status = -ENODATA;
9569 goto out;
9570 /*
9571 * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of
9572 * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3).
9573 */
9574 case -NFS4ERR_BADLAYOUT:
9575 status = -EOVERFLOW;
9576 goto out;
9577 /*
9578 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client
9579 * (or clients) writing to the same RAID stripe except when
9580 * the minlength argument is 0 (see RFC5661 section 18.43.3).
9581 *
9582 * Treat it like we would RECALLCONFLICT -- we retry for a little
9583 * while, and then eventually give up.
9584 */
9585 case -NFS4ERR_LAYOUTTRYLATER:
9586 if (lgp->args.minlength == 0) {
9587 status = -EOVERFLOW;
9588 goto out;
9589 }
9590 status = -EBUSY;
9591 break;
9592 case -NFS4ERR_RECALLCONFLICT:
9593 case -NFS4ERR_RETURNCONFLICT:
9594 status = -ERECALLCONFLICT;
9595 break;
9596 case -NFS4ERR_DELEG_REVOKED:
9597 case -NFS4ERR_ADMIN_REVOKED:
9598 case -NFS4ERR_EXPIRED:
9599 case -NFS4ERR_BAD_STATEID:
9600 exception->timeout = 0;
9601 spin_lock(&inode->i_lock);
9602 /* If the open stateid was bad, then recover it. */
9603 if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) ||
9604 !nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) {
9605 spin_unlock(&inode->i_lock);
9606 exception->state = lgp->args.ctx->state;
9607 exception->stateid = &lgp->args.stateid;
9608 break;
9609 }
9610
9611 /*
9612 * Mark the bad layout state as invalid, then retry
9613 */
9614 pnfs_mark_layout_stateid_invalid(lo, &head);
9615 spin_unlock(&inode->i_lock);
9616 nfs_commit_inode(inode, 0);
9617 pnfs_free_lseg_list(&head);
9618 status = -EAGAIN;
9619 goto out;
9620 }
9621
9622 err = nfs4_handle_exception(server, nfs4err, exception);
9623 if (!status) {
9624 if (exception->retry)
9625 status = -EAGAIN;
9626 else
9627 status = err;
9628 }
9629 out:
9630 dprintk("<-- %s\n", __func__);
9631 return status;
9632 }
9633
max_response_pages(struct nfs_server * server)9634 size_t max_response_pages(struct nfs_server *server)
9635 {
9636 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
9637 return nfs_page_array_len(0, max_resp_sz);
9638 }
9639
nfs4_layoutget_release(void * calldata)9640 static void nfs4_layoutget_release(void *calldata)
9641 {
9642 struct nfs4_layoutget *lgp = calldata;
9643
9644 dprintk("--> %s\n", __func__);
9645 nfs4_sequence_free_slot(&lgp->res.seq_res);
9646 pnfs_layoutget_free(lgp);
9647 dprintk("<-- %s\n", __func__);
9648 }
9649
9650 static const struct rpc_call_ops nfs4_layoutget_call_ops = {
9651 .rpc_call_prepare = nfs4_layoutget_prepare,
9652 .rpc_call_done = nfs4_layoutget_done,
9653 .rpc_release = nfs4_layoutget_release,
9654 };
9655
9656 struct pnfs_layout_segment *
nfs4_proc_layoutget(struct nfs4_layoutget * lgp,long * timeout)9657 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout)
9658 {
9659 struct inode *inode = lgp->args.inode;
9660 struct nfs_server *server = NFS_SERVER(inode);
9661 struct rpc_task *task;
9662 struct rpc_message msg = {
9663 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
9664 .rpc_argp = &lgp->args,
9665 .rpc_resp = &lgp->res,
9666 .rpc_cred = lgp->cred,
9667 };
9668 struct rpc_task_setup task_setup_data = {
9669 .rpc_client = server->client,
9670 .rpc_message = &msg,
9671 .callback_ops = &nfs4_layoutget_call_ops,
9672 .callback_data = lgp,
9673 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF |
9674 RPC_TASK_MOVEABLE,
9675 };
9676 struct pnfs_layout_segment *lseg = NULL;
9677 struct nfs4_exception exception = {
9678 .inode = inode,
9679 .timeout = *timeout,
9680 };
9681 int status = 0;
9682
9683 dprintk("--> %s\n", __func__);
9684
9685 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0, 0);
9686
9687 task = rpc_run_task(&task_setup_data);
9688
9689 status = rpc_wait_for_completion_task(task);
9690 if (status != 0)
9691 goto out;
9692
9693 if (task->tk_status < 0) {
9694 status = nfs4_layoutget_handle_exception(task, lgp, &exception);
9695 *timeout = exception.timeout;
9696 } else if (lgp->res.layoutp->len == 0) {
9697 status = -EAGAIN;
9698 *timeout = nfs4_update_delay(&exception.timeout);
9699 } else
9700 lseg = pnfs_layout_process(lgp);
9701 out:
9702 trace_nfs4_layoutget(lgp->args.ctx,
9703 &lgp->args.range,
9704 &lgp->res.range,
9705 &lgp->res.stateid,
9706 status);
9707
9708 rpc_put_task(task);
9709 dprintk("<-- %s status=%d\n", __func__, status);
9710 if (status)
9711 return ERR_PTR(status);
9712 return lseg;
9713 }
9714
9715 static void
nfs4_layoutreturn_prepare(struct rpc_task * task,void * calldata)9716 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
9717 {
9718 struct nfs4_layoutreturn *lrp = calldata;
9719
9720 dprintk("--> %s\n", __func__);
9721 nfs4_setup_sequence(lrp->clp,
9722 &lrp->args.seq_args,
9723 &lrp->res.seq_res,
9724 task);
9725 if (!pnfs_layout_is_valid(lrp->args.layout))
9726 rpc_exit(task, 0);
9727 }
9728
nfs4_layoutreturn_done(struct rpc_task * task,void * calldata)9729 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
9730 {
9731 struct nfs4_layoutreturn *lrp = calldata;
9732 struct nfs_server *server;
9733
9734 dprintk("--> %s\n", __func__);
9735
9736 if (!nfs41_sequence_process(task, &lrp->res.seq_res))
9737 return;
9738
9739 /*
9740 * Was there an RPC level error? Assume the call succeeded,
9741 * and that we need to release the layout
9742 */
9743 if (task->tk_rpc_status != 0 && RPC_WAS_SENT(task)) {
9744 lrp->res.lrs_present = 0;
9745 return;
9746 }
9747
9748 server = NFS_SERVER(lrp->args.inode);
9749 switch (task->tk_status) {
9750 case -NFS4ERR_OLD_STATEID:
9751 if (nfs4_layout_refresh_old_stateid(&lrp->args.stateid,
9752 &lrp->args.range,
9753 lrp->args.inode))
9754 goto out_restart;
9755 fallthrough;
9756 default:
9757 task->tk_status = 0;
9758 fallthrough;
9759 case 0:
9760 break;
9761 case -NFS4ERR_DELAY:
9762 if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN)
9763 break;
9764 goto out_restart;
9765 }
9766 dprintk("<-- %s\n", __func__);
9767 return;
9768 out_restart:
9769 task->tk_status = 0;
9770 nfs4_sequence_free_slot(&lrp->res.seq_res);
9771 rpc_restart_call_prepare(task);
9772 }
9773
nfs4_layoutreturn_release(void * calldata)9774 static void nfs4_layoutreturn_release(void *calldata)
9775 {
9776 struct nfs4_layoutreturn *lrp = calldata;
9777 struct pnfs_layout_hdr *lo = lrp->args.layout;
9778
9779 dprintk("--> %s\n", __func__);
9780 pnfs_layoutreturn_free_lsegs(lo, &lrp->args.stateid, &lrp->args.range,
9781 lrp->res.lrs_present ? &lrp->res.stateid : NULL);
9782 nfs4_sequence_free_slot(&lrp->res.seq_res);
9783 if (lrp->ld_private.ops && lrp->ld_private.ops->free)
9784 lrp->ld_private.ops->free(&lrp->ld_private);
9785 pnfs_put_layout_hdr(lrp->args.layout);
9786 nfs_iput_and_deactive(lrp->inode);
9787 put_cred(lrp->cred);
9788 kfree(calldata);
9789 dprintk("<-- %s\n", __func__);
9790 }
9791
9792 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
9793 .rpc_call_prepare = nfs4_layoutreturn_prepare,
9794 .rpc_call_done = nfs4_layoutreturn_done,
9795 .rpc_release = nfs4_layoutreturn_release,
9796 };
9797
nfs4_proc_layoutreturn(struct nfs4_layoutreturn * lrp,bool sync)9798 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
9799 {
9800 struct rpc_task *task;
9801 struct rpc_message msg = {
9802 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
9803 .rpc_argp = &lrp->args,
9804 .rpc_resp = &lrp->res,
9805 .rpc_cred = lrp->cred,
9806 };
9807 struct rpc_task_setup task_setup_data = {
9808 .rpc_client = NFS_SERVER(lrp->args.inode)->client,
9809 .rpc_message = &msg,
9810 .callback_ops = &nfs4_layoutreturn_call_ops,
9811 .callback_data = lrp,
9812 .flags = RPC_TASK_MOVEABLE,
9813 };
9814 int status = 0;
9815
9816 nfs4_state_protect(NFS_SERVER(lrp->args.inode)->nfs_client,
9817 NFS_SP4_MACH_CRED_PNFS_CLEANUP,
9818 &task_setup_data.rpc_client, &msg);
9819
9820 dprintk("--> %s\n", __func__);
9821 lrp->inode = nfs_igrab_and_active(lrp->args.inode);
9822 if (!sync) {
9823 if (!lrp->inode) {
9824 nfs4_layoutreturn_release(lrp);
9825 return -EAGAIN;
9826 }
9827 task_setup_data.flags |= RPC_TASK_ASYNC;
9828 }
9829 if (!lrp->inode)
9830 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1,
9831 1);
9832 else
9833 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1,
9834 0);
9835 task = rpc_run_task(&task_setup_data);
9836 if (IS_ERR(task))
9837 return PTR_ERR(task);
9838 if (sync)
9839 status = task->tk_status;
9840 trace_nfs4_layoutreturn(lrp->args.inode, &lrp->args.stateid, status);
9841 dprintk("<-- %s status=%d\n", __func__, status);
9842 rpc_put_task(task);
9843 return status;
9844 }
9845
9846 static int
_nfs4_proc_getdeviceinfo(struct nfs_server * server,struct pnfs_device * pdev,const struct cred * cred)9847 _nfs4_proc_getdeviceinfo(struct nfs_server *server,
9848 struct pnfs_device *pdev,
9849 const struct cred *cred)
9850 {
9851 struct nfs4_getdeviceinfo_args args = {
9852 .pdev = pdev,
9853 .notify_types = NOTIFY_DEVICEID4_CHANGE |
9854 NOTIFY_DEVICEID4_DELETE,
9855 };
9856 struct nfs4_getdeviceinfo_res res = {
9857 .pdev = pdev,
9858 };
9859 struct rpc_message msg = {
9860 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
9861 .rpc_argp = &args,
9862 .rpc_resp = &res,
9863 .rpc_cred = cred,
9864 };
9865 int status;
9866
9867 dprintk("--> %s\n", __func__);
9868 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
9869 if (res.notification & ~args.notify_types)
9870 dprintk("%s: unsupported notification\n", __func__);
9871 if (res.notification != args.notify_types)
9872 pdev->nocache = 1;
9873
9874 trace_nfs4_getdeviceinfo(server, &pdev->dev_id, status);
9875
9876 dprintk("<-- %s status=%d\n", __func__, status);
9877
9878 return status;
9879 }
9880
nfs4_proc_getdeviceinfo(struct nfs_server * server,struct pnfs_device * pdev,const struct cred * cred)9881 int nfs4_proc_getdeviceinfo(struct nfs_server *server,
9882 struct pnfs_device *pdev,
9883 const struct cred *cred)
9884 {
9885 struct nfs4_exception exception = { };
9886 int err;
9887
9888 do {
9889 err = nfs4_handle_exception(server,
9890 _nfs4_proc_getdeviceinfo(server, pdev, cred),
9891 &exception);
9892 } while (exception.retry);
9893 return err;
9894 }
9895 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
9896
nfs4_layoutcommit_prepare(struct rpc_task * task,void * calldata)9897 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
9898 {
9899 struct nfs4_layoutcommit_data *data = calldata;
9900 struct nfs_server *server = NFS_SERVER(data->args.inode);
9901
9902 nfs4_setup_sequence(server->nfs_client,
9903 &data->args.seq_args,
9904 &data->res.seq_res,
9905 task);
9906 }
9907
9908 static void
nfs4_layoutcommit_done(struct rpc_task * task,void * calldata)9909 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
9910 {
9911 struct nfs4_layoutcommit_data *data = calldata;
9912 struct nfs_server *server = NFS_SERVER(data->args.inode);
9913
9914 if (!nfs41_sequence_done(task, &data->res.seq_res))
9915 return;
9916
9917 switch (task->tk_status) { /* Just ignore these failures */
9918 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
9919 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
9920 case -NFS4ERR_BADLAYOUT: /* no layout */
9921 case -NFS4ERR_GRACE: /* loca_recalim always false */
9922 task->tk_status = 0;
9923 break;
9924 case 0:
9925 break;
9926 default:
9927 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) {
9928 rpc_restart_call_prepare(task);
9929 return;
9930 }
9931 }
9932 }
9933
nfs4_layoutcommit_release(void * calldata)9934 static void nfs4_layoutcommit_release(void *calldata)
9935 {
9936 struct nfs4_layoutcommit_data *data = calldata;
9937
9938 pnfs_cleanup_layoutcommit(data);
9939 nfs_post_op_update_inode_force_wcc(data->args.inode,
9940 data->res.fattr);
9941 put_cred(data->cred);
9942 nfs_iput_and_deactive(data->inode);
9943 kfree(data);
9944 }
9945
9946 static const struct rpc_call_ops nfs4_layoutcommit_ops = {
9947 .rpc_call_prepare = nfs4_layoutcommit_prepare,
9948 .rpc_call_done = nfs4_layoutcommit_done,
9949 .rpc_release = nfs4_layoutcommit_release,
9950 };
9951
9952 int
nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data * data,bool sync)9953 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
9954 {
9955 struct rpc_message msg = {
9956 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
9957 .rpc_argp = &data->args,
9958 .rpc_resp = &data->res,
9959 .rpc_cred = data->cred,
9960 };
9961 struct rpc_task_setup task_setup_data = {
9962 .task = &data->task,
9963 .rpc_client = NFS_CLIENT(data->args.inode),
9964 .rpc_message = &msg,
9965 .callback_ops = &nfs4_layoutcommit_ops,
9966 .callback_data = data,
9967 .flags = RPC_TASK_MOVEABLE,
9968 };
9969 struct rpc_task *task;
9970 int status = 0;
9971
9972 dprintk("NFS: initiating layoutcommit call. sync %d "
9973 "lbw: %llu inode %lu\n", sync,
9974 data->args.lastbytewritten,
9975 data->args.inode->i_ino);
9976
9977 if (!sync) {
9978 data->inode = nfs_igrab_and_active(data->args.inode);
9979 if (data->inode == NULL) {
9980 nfs4_layoutcommit_release(data);
9981 return -EAGAIN;
9982 }
9983 task_setup_data.flags = RPC_TASK_ASYNC;
9984 }
9985 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
9986 task = rpc_run_task(&task_setup_data);
9987 if (IS_ERR(task))
9988 return PTR_ERR(task);
9989 if (sync)
9990 status = task->tk_status;
9991 trace_nfs4_layoutcommit(data->args.inode, &data->args.stateid, status);
9992 dprintk("%s: status %d\n", __func__, status);
9993 rpc_put_task(task);
9994 return status;
9995 }
9996
9997 /*
9998 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if
9999 * possible) as per RFC3530bis and RFC5661 Security Considerations sections
10000 */
10001 static int
_nfs41_proc_secinfo_no_name(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info,struct nfs4_secinfo_flavors * flavors,bool use_integrity)10002 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
10003 struct nfs_fsinfo *info,
10004 struct nfs4_secinfo_flavors *flavors, bool use_integrity)
10005 {
10006 struct nfs41_secinfo_no_name_args args = {
10007 .style = SECINFO_STYLE_CURRENT_FH,
10008 };
10009 struct nfs4_secinfo_res res = {
10010 .flavors = flavors,
10011 };
10012 struct rpc_message msg = {
10013 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
10014 .rpc_argp = &args,
10015 .rpc_resp = &res,
10016 };
10017 struct nfs4_call_sync_data data = {
10018 .seq_server = server,
10019 .seq_args = &args.seq_args,
10020 .seq_res = &res.seq_res,
10021 };
10022 struct rpc_task_setup task_setup = {
10023 .rpc_client = server->client,
10024 .rpc_message = &msg,
10025 .callback_ops = server->nfs_client->cl_mvops->call_sync_ops,
10026 .callback_data = &data,
10027 .flags = RPC_TASK_NO_ROUND_ROBIN,
10028 };
10029 const struct cred *cred = NULL;
10030 int status;
10031
10032 if (use_integrity) {
10033 task_setup.rpc_client = server->nfs_client->cl_rpcclient;
10034
10035 cred = nfs4_get_clid_cred(server->nfs_client);
10036 msg.rpc_cred = cred;
10037 }
10038
10039 dprintk("--> %s\n", __func__);
10040 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
10041 status = nfs4_call_sync_custom(&task_setup);
10042 dprintk("<-- %s status=%d\n", __func__, status);
10043
10044 put_cred(cred);
10045
10046 return status;
10047 }
10048
10049 static int
nfs41_proc_secinfo_no_name(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info,struct nfs4_secinfo_flavors * flavors)10050 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
10051 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
10052 {
10053 struct nfs4_exception exception = {
10054 .interruptible = true,
10055 };
10056 int err;
10057 do {
10058 /* first try using integrity protection */
10059 err = -NFS4ERR_WRONGSEC;
10060
10061 /* try to use integrity protection with machine cred */
10062 if (_nfs4_is_integrity_protected(server->nfs_client))
10063 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
10064 flavors, true);
10065
10066 /*
10067 * if unable to use integrity protection, or SECINFO with
10068 * integrity protection returns NFS4ERR_WRONGSEC (which is
10069 * disallowed by spec, but exists in deployed servers) use
10070 * the current filesystem's rpc_client and the user cred.
10071 */
10072 if (err == -NFS4ERR_WRONGSEC)
10073 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
10074 flavors, false);
10075
10076 switch (err) {
10077 case 0:
10078 case -NFS4ERR_WRONGSEC:
10079 case -ENOTSUPP:
10080 goto out;
10081 default:
10082 err = nfs4_handle_exception(server, err, &exception);
10083 }
10084 } while (exception.retry);
10085 out:
10086 return err;
10087 }
10088
10089 static int
nfs41_find_root_sec(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info)10090 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
10091 struct nfs_fsinfo *info)
10092 {
10093 int err;
10094 struct page *page;
10095 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR;
10096 struct nfs4_secinfo_flavors *flavors;
10097 struct nfs4_secinfo4 *secinfo;
10098 int i;
10099
10100 page = alloc_page(GFP_KERNEL);
10101 if (!page) {
10102 err = -ENOMEM;
10103 goto out;
10104 }
10105
10106 flavors = page_address(page);
10107 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
10108
10109 /*
10110 * Fall back on "guess and check" method if
10111 * the server doesn't support SECINFO_NO_NAME
10112 */
10113 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) {
10114 err = nfs4_find_root_sec(server, fhandle, info);
10115 goto out_freepage;
10116 }
10117 if (err)
10118 goto out_freepage;
10119
10120 for (i = 0; i < flavors->num_flavors; i++) {
10121 secinfo = &flavors->flavors[i];
10122
10123 switch (secinfo->flavor) {
10124 case RPC_AUTH_NULL:
10125 case RPC_AUTH_UNIX:
10126 case RPC_AUTH_GSS:
10127 flavor = rpcauth_get_pseudoflavor(secinfo->flavor,
10128 &secinfo->flavor_info);
10129 break;
10130 default:
10131 flavor = RPC_AUTH_MAXFLAVOR;
10132 break;
10133 }
10134
10135 if (!nfs_auth_info_match(&server->auth_info, flavor))
10136 flavor = RPC_AUTH_MAXFLAVOR;
10137
10138 if (flavor != RPC_AUTH_MAXFLAVOR) {
10139 err = nfs4_lookup_root_sec(server, fhandle,
10140 info, flavor);
10141 if (!err)
10142 break;
10143 }
10144 }
10145
10146 if (flavor == RPC_AUTH_MAXFLAVOR)
10147 err = -EPERM;
10148
10149 out_freepage:
10150 put_page(page);
10151 if (err == -EACCES)
10152 return -EPERM;
10153 out:
10154 return err;
10155 }
10156
_nfs41_test_stateid(struct nfs_server * server,nfs4_stateid * stateid,const struct cred * cred)10157 static int _nfs41_test_stateid(struct nfs_server *server,
10158 nfs4_stateid *stateid,
10159 const struct cred *cred)
10160 {
10161 int status;
10162 struct nfs41_test_stateid_args args = {
10163 .stateid = stateid,
10164 };
10165 struct nfs41_test_stateid_res res;
10166 struct rpc_message msg = {
10167 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
10168 .rpc_argp = &args,
10169 .rpc_resp = &res,
10170 .rpc_cred = cred,
10171 };
10172 struct rpc_clnt *rpc_client = server->client;
10173
10174 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
10175 &rpc_client, &msg);
10176
10177 dprintk("NFS call test_stateid %p\n", stateid);
10178 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
10179 status = nfs4_call_sync_sequence(rpc_client, server, &msg,
10180 &args.seq_args, &res.seq_res);
10181 if (status != NFS_OK) {
10182 dprintk("NFS reply test_stateid: failed, %d\n", status);
10183 return status;
10184 }
10185 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status);
10186 return -res.status;
10187 }
10188
nfs4_handle_delay_or_session_error(struct nfs_server * server,int err,struct nfs4_exception * exception)10189 static void nfs4_handle_delay_or_session_error(struct nfs_server *server,
10190 int err, struct nfs4_exception *exception)
10191 {
10192 exception->retry = 0;
10193 switch(err) {
10194 case -NFS4ERR_DELAY:
10195 case -NFS4ERR_RETRY_UNCACHED_REP:
10196 nfs4_handle_exception(server, err, exception);
10197 break;
10198 case -NFS4ERR_BADSESSION:
10199 case -NFS4ERR_BADSLOT:
10200 case -NFS4ERR_BAD_HIGH_SLOT:
10201 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
10202 case -NFS4ERR_DEADSESSION:
10203 nfs4_do_handle_exception(server, err, exception);
10204 }
10205 }
10206
10207 /**
10208 * nfs41_test_stateid - perform a TEST_STATEID operation
10209 *
10210 * @server: server / transport on which to perform the operation
10211 * @stateid: state ID to test
10212 * @cred: credential
10213 *
10214 * Returns NFS_OK if the server recognizes that "stateid" is valid.
10215 * Otherwise a negative NFS4ERR value is returned if the operation
10216 * failed or the state ID is not currently valid.
10217 */
nfs41_test_stateid(struct nfs_server * server,nfs4_stateid * stateid,const struct cred * cred)10218 static int nfs41_test_stateid(struct nfs_server *server,
10219 nfs4_stateid *stateid,
10220 const struct cred *cred)
10221 {
10222 struct nfs4_exception exception = {
10223 .interruptible = true,
10224 };
10225 int err;
10226 do {
10227 err = _nfs41_test_stateid(server, stateid, cred);
10228 nfs4_handle_delay_or_session_error(server, err, &exception);
10229 } while (exception.retry);
10230 return err;
10231 }
10232
10233 struct nfs_free_stateid_data {
10234 struct nfs_server *server;
10235 struct nfs41_free_stateid_args args;
10236 struct nfs41_free_stateid_res res;
10237 };
10238
nfs41_free_stateid_prepare(struct rpc_task * task,void * calldata)10239 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata)
10240 {
10241 struct nfs_free_stateid_data *data = calldata;
10242 nfs4_setup_sequence(data->server->nfs_client,
10243 &data->args.seq_args,
10244 &data->res.seq_res,
10245 task);
10246 }
10247
nfs41_free_stateid_done(struct rpc_task * task,void * calldata)10248 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata)
10249 {
10250 struct nfs_free_stateid_data *data = calldata;
10251
10252 nfs41_sequence_done(task, &data->res.seq_res);
10253
10254 switch (task->tk_status) {
10255 case -NFS4ERR_DELAY:
10256 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN)
10257 rpc_restart_call_prepare(task);
10258 }
10259 }
10260
nfs41_free_stateid_release(void * calldata)10261 static void nfs41_free_stateid_release(void *calldata)
10262 {
10263 kfree(calldata);
10264 }
10265
10266 static const struct rpc_call_ops nfs41_free_stateid_ops = {
10267 .rpc_call_prepare = nfs41_free_stateid_prepare,
10268 .rpc_call_done = nfs41_free_stateid_done,
10269 .rpc_release = nfs41_free_stateid_release,
10270 };
10271
10272 /**
10273 * nfs41_free_stateid - perform a FREE_STATEID operation
10274 *
10275 * @server: server / transport on which to perform the operation
10276 * @stateid: state ID to release
10277 * @cred: credential
10278 * @privileged: set to true if this call needs to be privileged
10279 *
10280 * Note: this function is always asynchronous.
10281 */
nfs41_free_stateid(struct nfs_server * server,const nfs4_stateid * stateid,const struct cred * cred,bool privileged)10282 static int nfs41_free_stateid(struct nfs_server *server,
10283 const nfs4_stateid *stateid,
10284 const struct cred *cred,
10285 bool privileged)
10286 {
10287 struct rpc_message msg = {
10288 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
10289 .rpc_cred = cred,
10290 };
10291 struct rpc_task_setup task_setup = {
10292 .rpc_client = server->client,
10293 .rpc_message = &msg,
10294 .callback_ops = &nfs41_free_stateid_ops,
10295 .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE,
10296 };
10297 struct nfs_free_stateid_data *data;
10298 struct rpc_task *task;
10299
10300 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
10301 &task_setup.rpc_client, &msg);
10302
10303 dprintk("NFS call free_stateid %p\n", stateid);
10304 data = kmalloc(sizeof(*data), GFP_KERNEL);
10305 if (!data)
10306 return -ENOMEM;
10307 data->server = server;
10308 nfs4_stateid_copy(&data->args.stateid, stateid);
10309
10310 task_setup.callback_data = data;
10311
10312 msg.rpc_argp = &data->args;
10313 msg.rpc_resp = &data->res;
10314 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, privileged);
10315 task = rpc_run_task(&task_setup);
10316 if (IS_ERR(task))
10317 return PTR_ERR(task);
10318 rpc_put_task(task);
10319 return 0;
10320 }
10321
10322 static void
nfs41_free_lock_state(struct nfs_server * server,struct nfs4_lock_state * lsp)10323 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
10324 {
10325 const struct cred *cred = lsp->ls_state->owner->so_cred;
10326
10327 nfs41_free_stateid(server, &lsp->ls_stateid, cred, false);
10328 nfs4_free_lock_state(server, lsp);
10329 }
10330
nfs41_match_stateid(const nfs4_stateid * s1,const nfs4_stateid * s2)10331 static bool nfs41_match_stateid(const nfs4_stateid *s1,
10332 const nfs4_stateid *s2)
10333 {
10334 if (s1->type != s2->type)
10335 return false;
10336
10337 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
10338 return false;
10339
10340 if (s1->seqid == s2->seqid)
10341 return true;
10342
10343 return s1->seqid == 0 || s2->seqid == 0;
10344 }
10345
10346 #endif /* CONFIG_NFS_V4_1 */
10347
nfs4_match_stateid(const nfs4_stateid * s1,const nfs4_stateid * s2)10348 static bool nfs4_match_stateid(const nfs4_stateid *s1,
10349 const nfs4_stateid *s2)
10350 {
10351 return nfs4_stateid_match(s1, s2);
10352 }
10353
10354
10355 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
10356 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
10357 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
10358 .recover_open = nfs4_open_reclaim,
10359 .recover_lock = nfs4_lock_reclaim,
10360 .establish_clid = nfs4_init_clientid,
10361 .detect_trunking = nfs40_discover_server_trunking,
10362 };
10363
10364 #if defined(CONFIG_NFS_V4_1)
10365 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
10366 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
10367 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
10368 .recover_open = nfs4_open_reclaim,
10369 .recover_lock = nfs4_lock_reclaim,
10370 .establish_clid = nfs41_init_clientid,
10371 .reclaim_complete = nfs41_proc_reclaim_complete,
10372 .detect_trunking = nfs41_discover_server_trunking,
10373 };
10374 #endif /* CONFIG_NFS_V4_1 */
10375
10376 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
10377 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
10378 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
10379 .recover_open = nfs40_open_expired,
10380 .recover_lock = nfs4_lock_expired,
10381 .establish_clid = nfs4_init_clientid,
10382 };
10383
10384 #if defined(CONFIG_NFS_V4_1)
10385 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
10386 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
10387 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
10388 .recover_open = nfs41_open_expired,
10389 .recover_lock = nfs41_lock_expired,
10390 .establish_clid = nfs41_init_clientid,
10391 };
10392 #endif /* CONFIG_NFS_V4_1 */
10393
10394 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
10395 .sched_state_renewal = nfs4_proc_async_renew,
10396 .get_state_renewal_cred = nfs4_get_renew_cred,
10397 .renew_lease = nfs4_proc_renew,
10398 };
10399
10400 #if defined(CONFIG_NFS_V4_1)
10401 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
10402 .sched_state_renewal = nfs41_proc_async_sequence,
10403 .get_state_renewal_cred = nfs4_get_machine_cred,
10404 .renew_lease = nfs4_proc_sequence,
10405 };
10406 #endif
10407
10408 static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = {
10409 .get_locations = _nfs40_proc_get_locations,
10410 .fsid_present = _nfs40_proc_fsid_present,
10411 };
10412
10413 #if defined(CONFIG_NFS_V4_1)
10414 static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = {
10415 .get_locations = _nfs41_proc_get_locations,
10416 .fsid_present = _nfs41_proc_fsid_present,
10417 };
10418 #endif /* CONFIG_NFS_V4_1 */
10419
10420 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
10421 .minor_version = 0,
10422 .init_caps = NFS_CAP_READDIRPLUS
10423 | NFS_CAP_ATOMIC_OPEN
10424 | NFS_CAP_POSIX_LOCK,
10425 .init_client = nfs40_init_client,
10426 .shutdown_client = nfs40_shutdown_client,
10427 .match_stateid = nfs4_match_stateid,
10428 .find_root_sec = nfs4_find_root_sec,
10429 .free_lock_state = nfs4_release_lockowner,
10430 .test_and_free_expired = nfs40_test_and_free_expired_stateid,
10431 .alloc_seqid = nfs_alloc_seqid,
10432 .call_sync_ops = &nfs40_call_sync_ops,
10433 .reboot_recovery_ops = &nfs40_reboot_recovery_ops,
10434 .nograce_recovery_ops = &nfs40_nograce_recovery_ops,
10435 .state_renewal_ops = &nfs40_state_renewal_ops,
10436 .mig_recovery_ops = &nfs40_mig_recovery_ops,
10437 };
10438
10439 #if defined(CONFIG_NFS_V4_1)
10440 static struct nfs_seqid *
nfs_alloc_no_seqid(struct nfs_seqid_counter * arg1,gfp_t arg2)10441 nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2)
10442 {
10443 return NULL;
10444 }
10445
10446 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
10447 .minor_version = 1,
10448 .init_caps = NFS_CAP_READDIRPLUS
10449 | NFS_CAP_ATOMIC_OPEN
10450 | NFS_CAP_POSIX_LOCK
10451 | NFS_CAP_STATEID_NFSV41
10452 | NFS_CAP_ATOMIC_OPEN_V1
10453 | NFS_CAP_LGOPEN
10454 | NFS_CAP_MOVEABLE,
10455 .init_client = nfs41_init_client,
10456 .shutdown_client = nfs41_shutdown_client,
10457 .match_stateid = nfs41_match_stateid,
10458 .find_root_sec = nfs41_find_root_sec,
10459 .free_lock_state = nfs41_free_lock_state,
10460 .test_and_free_expired = nfs41_test_and_free_expired_stateid,
10461 .alloc_seqid = nfs_alloc_no_seqid,
10462 .session_trunk = nfs4_test_session_trunk,
10463 .call_sync_ops = &nfs41_call_sync_ops,
10464 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
10465 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
10466 .state_renewal_ops = &nfs41_state_renewal_ops,
10467 .mig_recovery_ops = &nfs41_mig_recovery_ops,
10468 };
10469 #endif
10470
10471 #if defined(CONFIG_NFS_V4_2)
10472 static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
10473 .minor_version = 2,
10474 .init_caps = NFS_CAP_READDIRPLUS
10475 | NFS_CAP_ATOMIC_OPEN
10476 | NFS_CAP_POSIX_LOCK
10477 | NFS_CAP_STATEID_NFSV41
10478 | NFS_CAP_ATOMIC_OPEN_V1
10479 | NFS_CAP_LGOPEN
10480 | NFS_CAP_ALLOCATE
10481 | NFS_CAP_COPY
10482 | NFS_CAP_OFFLOAD_CANCEL
10483 | NFS_CAP_COPY_NOTIFY
10484 | NFS_CAP_DEALLOCATE
10485 | NFS_CAP_SEEK
10486 | NFS_CAP_LAYOUTSTATS
10487 | NFS_CAP_CLONE
10488 | NFS_CAP_LAYOUTERROR
10489 | NFS_CAP_READ_PLUS
10490 | NFS_CAP_MOVEABLE,
10491 .init_client = nfs41_init_client,
10492 .shutdown_client = nfs41_shutdown_client,
10493 .match_stateid = nfs41_match_stateid,
10494 .find_root_sec = nfs41_find_root_sec,
10495 .free_lock_state = nfs41_free_lock_state,
10496 .call_sync_ops = &nfs41_call_sync_ops,
10497 .test_and_free_expired = nfs41_test_and_free_expired_stateid,
10498 .alloc_seqid = nfs_alloc_no_seqid,
10499 .session_trunk = nfs4_test_session_trunk,
10500 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
10501 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
10502 .state_renewal_ops = &nfs41_state_renewal_ops,
10503 .mig_recovery_ops = &nfs41_mig_recovery_ops,
10504 };
10505 #endif
10506
10507 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
10508 [0] = &nfs_v4_0_minor_ops,
10509 #if defined(CONFIG_NFS_V4_1)
10510 [1] = &nfs_v4_1_minor_ops,
10511 #endif
10512 #if defined(CONFIG_NFS_V4_2)
10513 [2] = &nfs_v4_2_minor_ops,
10514 #endif
10515 };
10516
nfs4_listxattr(struct dentry * dentry,char * list,size_t size)10517 static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
10518 {
10519 ssize_t error, error2, error3;
10520
10521 error = generic_listxattr(dentry, list, size);
10522 if (error < 0)
10523 return error;
10524 if (list) {
10525 list += error;
10526 size -= error;
10527 }
10528
10529 error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, size);
10530 if (error2 < 0)
10531 return error2;
10532
10533 if (list) {
10534 list += error2;
10535 size -= error2;
10536 }
10537
10538 error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, size);
10539 if (error3 < 0)
10540 return error3;
10541
10542 return error + error2 + error3;
10543 }
10544
nfs4_enable_swap(struct inode * inode)10545 static void nfs4_enable_swap(struct inode *inode)
10546 {
10547 /* The state manager thread must always be running.
10548 * It will notice the client is a swapper, and stay put.
10549 */
10550 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
10551
10552 nfs4_schedule_state_manager(clp);
10553 }
10554
nfs4_disable_swap(struct inode * inode)10555 static void nfs4_disable_swap(struct inode *inode)
10556 {
10557 /* The state manager thread will now exit once it is
10558 * woken.
10559 */
10560 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
10561
10562 set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
10563 clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
10564 wake_up_var(&clp->cl_state);
10565 }
10566
10567 static const struct inode_operations nfs4_dir_inode_operations = {
10568 .create = nfs_create,
10569 .lookup = nfs_lookup,
10570 .atomic_open = nfs_atomic_open,
10571 .link = nfs_link,
10572 .unlink = nfs_unlink,
10573 .symlink = nfs_symlink,
10574 .mkdir = nfs_mkdir,
10575 .rmdir = nfs_rmdir,
10576 .mknod = nfs_mknod,
10577 .rename = nfs_rename,
10578 .permission = nfs_permission,
10579 .getattr = nfs_getattr,
10580 .setattr = nfs_setattr,
10581 .listxattr = nfs4_listxattr,
10582 };
10583
10584 static const struct inode_operations nfs4_file_inode_operations = {
10585 .permission = nfs_permission,
10586 .getattr = nfs_getattr,
10587 .setattr = nfs_setattr,
10588 .listxattr = nfs4_listxattr,
10589 };
10590
10591 const struct nfs_rpc_ops nfs_v4_clientops = {
10592 .version = 4, /* protocol version */
10593 .dentry_ops = &nfs4_dentry_operations,
10594 .dir_inode_ops = &nfs4_dir_inode_operations,
10595 .file_inode_ops = &nfs4_file_inode_operations,
10596 .file_ops = &nfs4_file_operations,
10597 .getroot = nfs4_proc_get_root,
10598 .submount = nfs4_submount,
10599 .try_get_tree = nfs4_try_get_tree,
10600 .getattr = nfs4_proc_getattr,
10601 .setattr = nfs4_proc_setattr,
10602 .lookup = nfs4_proc_lookup,
10603 .lookupp = nfs4_proc_lookupp,
10604 .access = nfs4_proc_access,
10605 .readlink = nfs4_proc_readlink,
10606 .create = nfs4_proc_create,
10607 .remove = nfs4_proc_remove,
10608 .unlink_setup = nfs4_proc_unlink_setup,
10609 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
10610 .unlink_done = nfs4_proc_unlink_done,
10611 .rename_setup = nfs4_proc_rename_setup,
10612 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
10613 .rename_done = nfs4_proc_rename_done,
10614 .link = nfs4_proc_link,
10615 .symlink = nfs4_proc_symlink,
10616 .mkdir = nfs4_proc_mkdir,
10617 .rmdir = nfs4_proc_rmdir,
10618 .readdir = nfs4_proc_readdir,
10619 .mknod = nfs4_proc_mknod,
10620 .statfs = nfs4_proc_statfs,
10621 .fsinfo = nfs4_proc_fsinfo,
10622 .pathconf = nfs4_proc_pathconf,
10623 .set_capabilities = nfs4_server_capabilities,
10624 .decode_dirent = nfs4_decode_dirent,
10625 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare,
10626 .read_setup = nfs4_proc_read_setup,
10627 .read_done = nfs4_read_done,
10628 .write_setup = nfs4_proc_write_setup,
10629 .write_done = nfs4_write_done,
10630 .commit_setup = nfs4_proc_commit_setup,
10631 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare,
10632 .commit_done = nfs4_commit_done,
10633 .lock = nfs4_proc_lock,
10634 .clear_acl_cache = nfs4_zap_acl_attr,
10635 .close_context = nfs4_close_context,
10636 .open_context = nfs4_atomic_open,
10637 .have_delegation = nfs4_have_delegation,
10638 .alloc_client = nfs4_alloc_client,
10639 .init_client = nfs4_init_client,
10640 .free_client = nfs4_free_client,
10641 .create_server = nfs4_create_server,
10642 .clone_server = nfs_clone_server,
10643 .discover_trunking = nfs4_discover_trunking,
10644 .enable_swap = nfs4_enable_swap,
10645 .disable_swap = nfs4_disable_swap,
10646 };
10647
10648 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
10649 .name = XATTR_NAME_NFSV4_ACL,
10650 .list = nfs4_xattr_list_nfs4_acl,
10651 .get = nfs4_xattr_get_nfs4_acl,
10652 .set = nfs4_xattr_set_nfs4_acl,
10653 };
10654
10655 #ifdef CONFIG_NFS_V4_2
10656 static const struct xattr_handler nfs4_xattr_nfs4_user_handler = {
10657 .prefix = XATTR_USER_PREFIX,
10658 .get = nfs4_xattr_get_nfs4_user,
10659 .set = nfs4_xattr_set_nfs4_user,
10660 };
10661 #endif
10662
10663 const struct xattr_handler *nfs4_xattr_handlers[] = {
10664 &nfs4_xattr_nfs4_acl_handler,
10665 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
10666 &nfs4_xattr_nfs4_label_handler,
10667 #endif
10668 #ifdef CONFIG_NFS_V4_2
10669 &nfs4_xattr_nfs4_user_handler,
10670 #endif
10671 NULL
10672 };
10673