1 /*
2 * Copyright (c) 2001 The Regents of the University of Michigan.
3 * All rights reserved.
4 *
5 * Kendrick Smith <kmsmith@umich.edu>
6 * Andy Adamson <kandros@umich.edu>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35 #include <linux/file.h>
36 #include <linux/fs.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/ratelimit.h>
42 #include <linux/sunrpc/svcauth_gss.h>
43 #include <linux/sunrpc/addr.h>
44 #include <linux/jhash.h>
45 #include <linux/string_helpers.h>
46 #include "xdr4.h"
47 #include "xdr4cb.h"
48 #include "vfs.h"
49 #include "current_stateid.h"
50
51 #include "netns.h"
52 #include "pnfs.h"
53 #include "filecache.h"
54
55 #define NFSDDBG_FACILITY NFSDDBG_PROC
56
57 #define all_ones {{~0,~0},~0}
58 static const stateid_t one_stateid = {
59 .si_generation = ~0,
60 .si_opaque = all_ones,
61 };
62 static const stateid_t zero_stateid = {
63 /* all fields zero */
64 };
65 static const stateid_t currentstateid = {
66 .si_generation = 1,
67 };
68 static const stateid_t close_stateid = {
69 .si_generation = 0xffffffffU,
70 };
71
72 static u64 current_sessionid = 1;
73
74 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
75 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
76 #define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t)))
77 #define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
78
79 /* forward declarations */
80 static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
81 static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
82 void nfsd4_end_grace(struct nfsd_net *nn);
83
84 /* Locking: */
85
86 /*
87 * Currently used for the del_recall_lru and file hash table. In an
88 * effort to decrease the scope of the client_mutex, this spinlock may
89 * eventually cover more:
90 */
91 static DEFINE_SPINLOCK(state_lock);
92
93 enum nfsd4_st_mutex_lock_subclass {
94 OPEN_STATEID_MUTEX = 0,
95 LOCK_STATEID_MUTEX = 1,
96 };
97
98 /*
99 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
100 * the refcount on the open stateid to drop.
101 */
102 static DECLARE_WAIT_QUEUE_HEAD(close_wq);
103
104 /*
105 * A waitqueue where a writer to clients/#/ctl destroying a client can
106 * wait for cl_rpc_users to drop to 0 and then for the client to be
107 * unhashed.
108 */
109 static DECLARE_WAIT_QUEUE_HEAD(expiry_wq);
110
111 static struct kmem_cache *client_slab;
112 static struct kmem_cache *openowner_slab;
113 static struct kmem_cache *lockowner_slab;
114 static struct kmem_cache *file_slab;
115 static struct kmem_cache *stateid_slab;
116 static struct kmem_cache *deleg_slab;
117 static struct kmem_cache *odstate_slab;
118
119 static void free_session(struct nfsd4_session *);
120
121 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
122 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
123
is_session_dead(struct nfsd4_session * ses)124 static bool is_session_dead(struct nfsd4_session *ses)
125 {
126 return ses->se_flags & NFS4_SESSION_DEAD;
127 }
128
mark_session_dead_locked(struct nfsd4_session * ses,int ref_held_by_me)129 static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
130 {
131 if (atomic_read(&ses->se_ref) > ref_held_by_me)
132 return nfserr_jukebox;
133 ses->se_flags |= NFS4_SESSION_DEAD;
134 return nfs_ok;
135 }
136
is_client_expired(struct nfs4_client * clp)137 static bool is_client_expired(struct nfs4_client *clp)
138 {
139 return clp->cl_time == 0;
140 }
141
get_client_locked(struct nfs4_client * clp)142 static __be32 get_client_locked(struct nfs4_client *clp)
143 {
144 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
145
146 lockdep_assert_held(&nn->client_lock);
147
148 if (is_client_expired(clp))
149 return nfserr_expired;
150 atomic_inc(&clp->cl_rpc_users);
151 return nfs_ok;
152 }
153
154 /* must be called under the client_lock */
155 static inline void
renew_client_locked(struct nfs4_client * clp)156 renew_client_locked(struct nfs4_client *clp)
157 {
158 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
159
160 if (is_client_expired(clp)) {
161 WARN_ON(1);
162 printk("%s: client (clientid %08x/%08x) already expired\n",
163 __func__,
164 clp->cl_clientid.cl_boot,
165 clp->cl_clientid.cl_id);
166 return;
167 }
168
169 dprintk("renewing client (clientid %08x/%08x)\n",
170 clp->cl_clientid.cl_boot,
171 clp->cl_clientid.cl_id);
172 list_move_tail(&clp->cl_lru, &nn->client_lru);
173 clp->cl_time = get_seconds();
174 }
175
put_client_renew_locked(struct nfs4_client * clp)176 static void put_client_renew_locked(struct nfs4_client *clp)
177 {
178 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
179
180 lockdep_assert_held(&nn->client_lock);
181
182 if (!atomic_dec_and_test(&clp->cl_rpc_users))
183 return;
184 if (!is_client_expired(clp))
185 renew_client_locked(clp);
186 else
187 wake_up_all(&expiry_wq);
188 }
189
put_client_renew(struct nfs4_client * clp)190 static void put_client_renew(struct nfs4_client *clp)
191 {
192 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
193
194 if (!atomic_dec_and_lock(&clp->cl_rpc_users, &nn->client_lock))
195 return;
196 if (!is_client_expired(clp))
197 renew_client_locked(clp);
198 else
199 wake_up_all(&expiry_wq);
200 spin_unlock(&nn->client_lock);
201 }
202
nfsd4_get_session_locked(struct nfsd4_session * ses)203 static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
204 {
205 __be32 status;
206
207 if (is_session_dead(ses))
208 return nfserr_badsession;
209 status = get_client_locked(ses->se_client);
210 if (status)
211 return status;
212 atomic_inc(&ses->se_ref);
213 return nfs_ok;
214 }
215
nfsd4_put_session_locked(struct nfsd4_session * ses)216 static void nfsd4_put_session_locked(struct nfsd4_session *ses)
217 {
218 struct nfs4_client *clp = ses->se_client;
219 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
220
221 lockdep_assert_held(&nn->client_lock);
222
223 if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
224 free_session(ses);
225 put_client_renew_locked(clp);
226 }
227
nfsd4_put_session(struct nfsd4_session * ses)228 static void nfsd4_put_session(struct nfsd4_session *ses)
229 {
230 struct nfs4_client *clp = ses->se_client;
231 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
232
233 spin_lock(&nn->client_lock);
234 nfsd4_put_session_locked(ses);
235 spin_unlock(&nn->client_lock);
236 }
237
238 static struct nfsd4_blocked_lock *
find_blocked_lock(struct nfs4_lockowner * lo,struct knfsd_fh * fh,struct nfsd_net * nn)239 find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
240 struct nfsd_net *nn)
241 {
242 struct nfsd4_blocked_lock *cur, *found = NULL;
243
244 spin_lock(&nn->blocked_locks_lock);
245 list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
246 if (fh_match(fh, &cur->nbl_fh)) {
247 list_del_init(&cur->nbl_list);
248 list_del_init(&cur->nbl_lru);
249 found = cur;
250 break;
251 }
252 }
253 spin_unlock(&nn->blocked_locks_lock);
254 if (found)
255 locks_delete_block(&found->nbl_lock);
256 return found;
257 }
258
259 static struct nfsd4_blocked_lock *
find_or_allocate_block(struct nfs4_lockowner * lo,struct knfsd_fh * fh,struct nfsd_net * nn)260 find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
261 struct nfsd_net *nn)
262 {
263 struct nfsd4_blocked_lock *nbl;
264
265 nbl = find_blocked_lock(lo, fh, nn);
266 if (!nbl) {
267 nbl= kmalloc(sizeof(*nbl), GFP_KERNEL);
268 if (nbl) {
269 INIT_LIST_HEAD(&nbl->nbl_list);
270 INIT_LIST_HEAD(&nbl->nbl_lru);
271 fh_copy_shallow(&nbl->nbl_fh, fh);
272 locks_init_lock(&nbl->nbl_lock);
273 nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
274 &nfsd4_cb_notify_lock_ops,
275 NFSPROC4_CLNT_CB_NOTIFY_LOCK);
276 }
277 }
278 return nbl;
279 }
280
281 static void
free_blocked_lock(struct nfsd4_blocked_lock * nbl)282 free_blocked_lock(struct nfsd4_blocked_lock *nbl)
283 {
284 locks_delete_block(&nbl->nbl_lock);
285 locks_release_private(&nbl->nbl_lock);
286 kfree(nbl);
287 }
288
289 static void
remove_blocked_locks(struct nfs4_lockowner * lo)290 remove_blocked_locks(struct nfs4_lockowner *lo)
291 {
292 struct nfs4_client *clp = lo->lo_owner.so_client;
293 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
294 struct nfsd4_blocked_lock *nbl;
295 LIST_HEAD(reaplist);
296
297 /* Dequeue all blocked locks */
298 spin_lock(&nn->blocked_locks_lock);
299 while (!list_empty(&lo->lo_blocked)) {
300 nbl = list_first_entry(&lo->lo_blocked,
301 struct nfsd4_blocked_lock,
302 nbl_list);
303 list_del_init(&nbl->nbl_list);
304 list_move(&nbl->nbl_lru, &reaplist);
305 }
306 spin_unlock(&nn->blocked_locks_lock);
307
308 /* Now free them */
309 while (!list_empty(&reaplist)) {
310 nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock,
311 nbl_lru);
312 list_del_init(&nbl->nbl_lru);
313 free_blocked_lock(nbl);
314 }
315 }
316
317 static void
nfsd4_cb_notify_lock_prepare(struct nfsd4_callback * cb)318 nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb)
319 {
320 struct nfsd4_blocked_lock *nbl = container_of(cb,
321 struct nfsd4_blocked_lock, nbl_cb);
322 locks_delete_block(&nbl->nbl_lock);
323 }
324
325 static int
nfsd4_cb_notify_lock_done(struct nfsd4_callback * cb,struct rpc_task * task)326 nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
327 {
328 /*
329 * Since this is just an optimization, we don't try very hard if it
330 * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
331 * just quit trying on anything else.
332 */
333 switch (task->tk_status) {
334 case -NFS4ERR_DELAY:
335 rpc_delay(task, 1 * HZ);
336 return 0;
337 default:
338 return 1;
339 }
340 }
341
342 static void
nfsd4_cb_notify_lock_release(struct nfsd4_callback * cb)343 nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
344 {
345 struct nfsd4_blocked_lock *nbl = container_of(cb,
346 struct nfsd4_blocked_lock, nbl_cb);
347
348 free_blocked_lock(nbl);
349 }
350
351 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
352 .prepare = nfsd4_cb_notify_lock_prepare,
353 .done = nfsd4_cb_notify_lock_done,
354 .release = nfsd4_cb_notify_lock_release,
355 };
356
357 static inline struct nfs4_stateowner *
nfs4_get_stateowner(struct nfs4_stateowner * sop)358 nfs4_get_stateowner(struct nfs4_stateowner *sop)
359 {
360 atomic_inc(&sop->so_count);
361 return sop;
362 }
363
364 static int
same_owner_str(struct nfs4_stateowner * sop,struct xdr_netobj * owner)365 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
366 {
367 return (sop->so_owner.len == owner->len) &&
368 0 == memcmp(sop->so_owner.data, owner->data, owner->len);
369 }
370
371 static struct nfs4_openowner *
find_openstateowner_str_locked(unsigned int hashval,struct nfsd4_open * open,struct nfs4_client * clp)372 find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
373 struct nfs4_client *clp)
374 {
375 struct nfs4_stateowner *so;
376
377 lockdep_assert_held(&clp->cl_lock);
378
379 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
380 so_strhash) {
381 if (!so->so_is_open_owner)
382 continue;
383 if (same_owner_str(so, &open->op_owner))
384 return openowner(nfs4_get_stateowner(so));
385 }
386 return NULL;
387 }
388
389 static struct nfs4_openowner *
find_openstateowner_str(unsigned int hashval,struct nfsd4_open * open,struct nfs4_client * clp)390 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
391 struct nfs4_client *clp)
392 {
393 struct nfs4_openowner *oo;
394
395 spin_lock(&clp->cl_lock);
396 oo = find_openstateowner_str_locked(hashval, open, clp);
397 spin_unlock(&clp->cl_lock);
398 return oo;
399 }
400
401 static inline u32
opaque_hashval(const void * ptr,int nbytes)402 opaque_hashval(const void *ptr, int nbytes)
403 {
404 unsigned char *cptr = (unsigned char *) ptr;
405
406 u32 x = 0;
407 while (nbytes--) {
408 x *= 37;
409 x += *cptr++;
410 }
411 return x;
412 }
413
nfsd4_free_file_rcu(struct rcu_head * rcu)414 static void nfsd4_free_file_rcu(struct rcu_head *rcu)
415 {
416 struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
417
418 kmem_cache_free(file_slab, fp);
419 }
420
421 void
put_nfs4_file(struct nfs4_file * fi)422 put_nfs4_file(struct nfs4_file *fi)
423 {
424 might_lock(&state_lock);
425
426 if (refcount_dec_and_lock(&fi->fi_ref, &state_lock)) {
427 hlist_del_rcu(&fi->fi_hash);
428 spin_unlock(&state_lock);
429 WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
430 WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
431 call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
432 }
433 }
434
435 static struct nfsd_file *
__nfs4_get_fd(struct nfs4_file * f,int oflag)436 __nfs4_get_fd(struct nfs4_file *f, int oflag)
437 {
438 if (f->fi_fds[oflag])
439 return nfsd_file_get(f->fi_fds[oflag]);
440 return NULL;
441 }
442
443 static struct nfsd_file *
find_writeable_file_locked(struct nfs4_file * f)444 find_writeable_file_locked(struct nfs4_file *f)
445 {
446 struct nfsd_file *ret;
447
448 lockdep_assert_held(&f->fi_lock);
449
450 ret = __nfs4_get_fd(f, O_WRONLY);
451 if (!ret)
452 ret = __nfs4_get_fd(f, O_RDWR);
453 return ret;
454 }
455
456 static struct nfsd_file *
find_writeable_file(struct nfs4_file * f)457 find_writeable_file(struct nfs4_file *f)
458 {
459 struct nfsd_file *ret;
460
461 spin_lock(&f->fi_lock);
462 ret = find_writeable_file_locked(f);
463 spin_unlock(&f->fi_lock);
464
465 return ret;
466 }
467
468 static struct nfsd_file *
find_readable_file_locked(struct nfs4_file * f)469 find_readable_file_locked(struct nfs4_file *f)
470 {
471 struct nfsd_file *ret;
472
473 lockdep_assert_held(&f->fi_lock);
474
475 ret = __nfs4_get_fd(f, O_RDONLY);
476 if (!ret)
477 ret = __nfs4_get_fd(f, O_RDWR);
478 return ret;
479 }
480
481 static struct nfsd_file *
find_readable_file(struct nfs4_file * f)482 find_readable_file(struct nfs4_file *f)
483 {
484 struct nfsd_file *ret;
485
486 spin_lock(&f->fi_lock);
487 ret = find_readable_file_locked(f);
488 spin_unlock(&f->fi_lock);
489
490 return ret;
491 }
492
493 struct nfsd_file *
find_any_file(struct nfs4_file * f)494 find_any_file(struct nfs4_file *f)
495 {
496 struct nfsd_file *ret;
497
498 if (!f)
499 return NULL;
500 spin_lock(&f->fi_lock);
501 ret = __nfs4_get_fd(f, O_RDWR);
502 if (!ret) {
503 ret = __nfs4_get_fd(f, O_WRONLY);
504 if (!ret)
505 ret = __nfs4_get_fd(f, O_RDONLY);
506 }
507 spin_unlock(&f->fi_lock);
508 return ret;
509 }
510
find_any_file_locked(struct nfs4_file * f)511 static struct nfsd_file *find_any_file_locked(struct nfs4_file *f)
512 {
513 lockdep_assert_held(&f->fi_lock);
514
515 if (f->fi_fds[O_RDWR])
516 return f->fi_fds[O_RDWR];
517 if (f->fi_fds[O_WRONLY])
518 return f->fi_fds[O_WRONLY];
519 if (f->fi_fds[O_RDONLY])
520 return f->fi_fds[O_RDONLY];
521 return NULL;
522 }
523
find_deleg_file_locked(struct nfs4_file * f)524 static struct nfsd_file *find_deleg_file_locked(struct nfs4_file *f)
525 {
526 lockdep_assert_held(&f->fi_lock);
527
528 if (f->fi_deleg_file)
529 return f->fi_deleg_file;
530 return NULL;
531 }
532
533 static atomic_long_t num_delegations;
534 unsigned long max_delegations;
535
536 /*
537 * Open owner state (share locks)
538 */
539
540 /* hash tables for lock and open owners */
541 #define OWNER_HASH_BITS 8
542 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
543 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
544
ownerstr_hashval(struct xdr_netobj * ownername)545 static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
546 {
547 unsigned int ret;
548
549 ret = opaque_hashval(ownername->data, ownername->len);
550 return ret & OWNER_HASH_MASK;
551 }
552
553 /* hash table for nfs4_file */
554 #define FILE_HASH_BITS 8
555 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
556
nfsd_fh_hashval(struct knfsd_fh * fh)557 static unsigned int nfsd_fh_hashval(struct knfsd_fh *fh)
558 {
559 return jhash2(fh->fh_base.fh_pad, XDR_QUADLEN(fh->fh_size), 0);
560 }
561
file_hashval(struct knfsd_fh * fh)562 static unsigned int file_hashval(struct knfsd_fh *fh)
563 {
564 return nfsd_fh_hashval(fh) & (FILE_HASH_SIZE - 1);
565 }
566
567 static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
568
569 static void
__nfs4_file_get_access(struct nfs4_file * fp,u32 access)570 __nfs4_file_get_access(struct nfs4_file *fp, u32 access)
571 {
572 lockdep_assert_held(&fp->fi_lock);
573
574 if (access & NFS4_SHARE_ACCESS_WRITE)
575 atomic_inc(&fp->fi_access[O_WRONLY]);
576 if (access & NFS4_SHARE_ACCESS_READ)
577 atomic_inc(&fp->fi_access[O_RDONLY]);
578 }
579
580 static __be32
nfs4_file_get_access(struct nfs4_file * fp,u32 access)581 nfs4_file_get_access(struct nfs4_file *fp, u32 access)
582 {
583 lockdep_assert_held(&fp->fi_lock);
584
585 /* Does this access mode make sense? */
586 if (access & ~NFS4_SHARE_ACCESS_BOTH)
587 return nfserr_inval;
588
589 /* Does it conflict with a deny mode already set? */
590 if ((access & fp->fi_share_deny) != 0)
591 return nfserr_share_denied;
592
593 __nfs4_file_get_access(fp, access);
594 return nfs_ok;
595 }
596
nfs4_file_check_deny(struct nfs4_file * fp,u32 deny)597 static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
598 {
599 /* Common case is that there is no deny mode. */
600 if (deny) {
601 /* Does this deny mode make sense? */
602 if (deny & ~NFS4_SHARE_DENY_BOTH)
603 return nfserr_inval;
604
605 if ((deny & NFS4_SHARE_DENY_READ) &&
606 atomic_read(&fp->fi_access[O_RDONLY]))
607 return nfserr_share_denied;
608
609 if ((deny & NFS4_SHARE_DENY_WRITE) &&
610 atomic_read(&fp->fi_access[O_WRONLY]))
611 return nfserr_share_denied;
612 }
613 return nfs_ok;
614 }
615
__nfs4_file_put_access(struct nfs4_file * fp,int oflag)616 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
617 {
618 might_lock(&fp->fi_lock);
619
620 if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
621 struct nfsd_file *f1 = NULL;
622 struct nfsd_file *f2 = NULL;
623
624 swap(f1, fp->fi_fds[oflag]);
625 if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
626 swap(f2, fp->fi_fds[O_RDWR]);
627 spin_unlock(&fp->fi_lock);
628 if (f1)
629 nfsd_file_put(f1);
630 if (f2)
631 nfsd_file_put(f2);
632 }
633 }
634
nfs4_file_put_access(struct nfs4_file * fp,u32 access)635 static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
636 {
637 WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
638
639 if (access & NFS4_SHARE_ACCESS_WRITE)
640 __nfs4_file_put_access(fp, O_WRONLY);
641 if (access & NFS4_SHARE_ACCESS_READ)
642 __nfs4_file_put_access(fp, O_RDONLY);
643 }
644
645 /*
646 * Allocate a new open/delegation state counter. This is needed for
647 * pNFS for proper return on close semantics.
648 *
649 * Note that we only allocate it for pNFS-enabled exports, otherwise
650 * all pointers to struct nfs4_clnt_odstate are always NULL.
651 */
652 static struct nfs4_clnt_odstate *
alloc_clnt_odstate(struct nfs4_client * clp)653 alloc_clnt_odstate(struct nfs4_client *clp)
654 {
655 struct nfs4_clnt_odstate *co;
656
657 co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
658 if (co) {
659 co->co_client = clp;
660 refcount_set(&co->co_odcount, 1);
661 }
662 return co;
663 }
664
665 static void
hash_clnt_odstate_locked(struct nfs4_clnt_odstate * co)666 hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
667 {
668 struct nfs4_file *fp = co->co_file;
669
670 lockdep_assert_held(&fp->fi_lock);
671 list_add(&co->co_perfile, &fp->fi_clnt_odstate);
672 }
673
674 static inline void
get_clnt_odstate(struct nfs4_clnt_odstate * co)675 get_clnt_odstate(struct nfs4_clnt_odstate *co)
676 {
677 if (co)
678 refcount_inc(&co->co_odcount);
679 }
680
681 static void
put_clnt_odstate(struct nfs4_clnt_odstate * co)682 put_clnt_odstate(struct nfs4_clnt_odstate *co)
683 {
684 struct nfs4_file *fp;
685
686 if (!co)
687 return;
688
689 fp = co->co_file;
690 if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
691 list_del(&co->co_perfile);
692 spin_unlock(&fp->fi_lock);
693
694 nfsd4_return_all_file_layouts(co->co_client, fp);
695 kmem_cache_free(odstate_slab, co);
696 }
697 }
698
699 static struct nfs4_clnt_odstate *
find_or_hash_clnt_odstate(struct nfs4_file * fp,struct nfs4_clnt_odstate * new)700 find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
701 {
702 struct nfs4_clnt_odstate *co;
703 struct nfs4_client *cl;
704
705 if (!new)
706 return NULL;
707
708 cl = new->co_client;
709
710 spin_lock(&fp->fi_lock);
711 list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
712 if (co->co_client == cl) {
713 get_clnt_odstate(co);
714 goto out;
715 }
716 }
717 co = new;
718 co->co_file = fp;
719 hash_clnt_odstate_locked(new);
720 out:
721 spin_unlock(&fp->fi_lock);
722 return co;
723 }
724
nfs4_alloc_stid(struct nfs4_client * cl,struct kmem_cache * slab,void (* sc_free)(struct nfs4_stid *))725 struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
726 void (*sc_free)(struct nfs4_stid *))
727 {
728 struct nfs4_stid *stid;
729 int new_id;
730
731 stid = kmem_cache_zalloc(slab, GFP_KERNEL);
732 if (!stid)
733 return NULL;
734
735 idr_preload(GFP_KERNEL);
736 spin_lock(&cl->cl_lock);
737 /* Reserving 0 for start of file in nfsdfs "states" file: */
738 new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 1, 0, GFP_NOWAIT);
739 spin_unlock(&cl->cl_lock);
740 idr_preload_end();
741 if (new_id < 0)
742 goto out_free;
743
744 stid->sc_free = sc_free;
745 stid->sc_client = cl;
746 stid->sc_stateid.si_opaque.so_id = new_id;
747 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
748 /* Will be incremented before return to client: */
749 refcount_set(&stid->sc_count, 1);
750 spin_lock_init(&stid->sc_lock);
751
752 /*
753 * It shouldn't be a problem to reuse an opaque stateid value.
754 * I don't think it is for 4.1. But with 4.0 I worry that, for
755 * example, a stray write retransmission could be accepted by
756 * the server when it should have been rejected. Therefore,
757 * adopt a trick from the sctp code to attempt to maximize the
758 * amount of time until an id is reused, by ensuring they always
759 * "increase" (mod INT_MAX):
760 */
761 return stid;
762 out_free:
763 kmem_cache_free(slab, stid);
764 return NULL;
765 }
766
767 /*
768 * Create a unique stateid_t to represent each COPY.
769 */
nfs4_init_cp_state(struct nfsd_net * nn,struct nfsd4_copy * copy)770 int nfs4_init_cp_state(struct nfsd_net *nn, struct nfsd4_copy *copy)
771 {
772 int new_id;
773
774 idr_preload(GFP_KERNEL);
775 spin_lock(&nn->s2s_cp_lock);
776 new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, copy, 0, 0, GFP_NOWAIT);
777 spin_unlock(&nn->s2s_cp_lock);
778 idr_preload_end();
779 if (new_id < 0)
780 return 0;
781 copy->cp_stateid.si_opaque.so_id = new_id;
782 copy->cp_stateid.si_opaque.so_clid.cl_boot = nn->boot_time;
783 copy->cp_stateid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id;
784 return 1;
785 }
786
nfs4_free_cp_state(struct nfsd4_copy * copy)787 void nfs4_free_cp_state(struct nfsd4_copy *copy)
788 {
789 struct nfsd_net *nn;
790
791 nn = net_generic(copy->cp_clp->net, nfsd_net_id);
792 spin_lock(&nn->s2s_cp_lock);
793 idr_remove(&nn->s2s_cp_stateids, copy->cp_stateid.si_opaque.so_id);
794 spin_unlock(&nn->s2s_cp_lock);
795 }
796
nfs4_alloc_open_stateid(struct nfs4_client * clp)797 static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
798 {
799 struct nfs4_stid *stid;
800
801 stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
802 if (!stid)
803 return NULL;
804
805 return openlockstateid(stid);
806 }
807
nfs4_free_deleg(struct nfs4_stid * stid)808 static void nfs4_free_deleg(struct nfs4_stid *stid)
809 {
810 kmem_cache_free(deleg_slab, stid);
811 atomic_long_dec(&num_delegations);
812 }
813
814 /*
815 * When we recall a delegation, we should be careful not to hand it
816 * out again straight away.
817 * To ensure this we keep a pair of bloom filters ('new' and 'old')
818 * in which the filehandles of recalled delegations are "stored".
819 * If a filehandle appear in either filter, a delegation is blocked.
820 * When a delegation is recalled, the filehandle is stored in the "new"
821 * filter.
822 * Every 30 seconds we swap the filters and clear the "new" one,
823 * unless both are empty of course.
824 *
825 * Each filter is 256 bits. We hash the filehandle to 32bit and use the
826 * low 3 bytes as hash-table indices.
827 *
828 * 'blocked_delegations_lock', which is always taken in block_delegations(),
829 * is used to manage concurrent access. Testing does not need the lock
830 * except when swapping the two filters.
831 */
832 static DEFINE_SPINLOCK(blocked_delegations_lock);
833 static struct bloom_pair {
834 int entries, old_entries;
835 time_t swap_time;
836 int new; /* index into 'set' */
837 DECLARE_BITMAP(set[2], 256);
838 } blocked_delegations;
839
delegation_blocked(struct knfsd_fh * fh)840 static int delegation_blocked(struct knfsd_fh *fh)
841 {
842 u32 hash;
843 struct bloom_pair *bd = &blocked_delegations;
844
845 if (bd->entries == 0)
846 return 0;
847 if (seconds_since_boot() - bd->swap_time > 30) {
848 spin_lock(&blocked_delegations_lock);
849 if (seconds_since_boot() - bd->swap_time > 30) {
850 bd->entries -= bd->old_entries;
851 bd->old_entries = bd->entries;
852 memset(bd->set[bd->new], 0,
853 sizeof(bd->set[0]));
854 bd->new = 1-bd->new;
855 bd->swap_time = seconds_since_boot();
856 }
857 spin_unlock(&blocked_delegations_lock);
858 }
859 hash = jhash(&fh->fh_base, fh->fh_size, 0);
860 if (test_bit(hash&255, bd->set[0]) &&
861 test_bit((hash>>8)&255, bd->set[0]) &&
862 test_bit((hash>>16)&255, bd->set[0]))
863 return 1;
864
865 if (test_bit(hash&255, bd->set[1]) &&
866 test_bit((hash>>8)&255, bd->set[1]) &&
867 test_bit((hash>>16)&255, bd->set[1]))
868 return 1;
869
870 return 0;
871 }
872
block_delegations(struct knfsd_fh * fh)873 static void block_delegations(struct knfsd_fh *fh)
874 {
875 u32 hash;
876 struct bloom_pair *bd = &blocked_delegations;
877
878 hash = jhash(&fh->fh_base, fh->fh_size, 0);
879
880 spin_lock(&blocked_delegations_lock);
881 __set_bit(hash&255, bd->set[bd->new]);
882 __set_bit((hash>>8)&255, bd->set[bd->new]);
883 __set_bit((hash>>16)&255, bd->set[bd->new]);
884 if (bd->entries == 0)
885 bd->swap_time = seconds_since_boot();
886 bd->entries += 1;
887 spin_unlock(&blocked_delegations_lock);
888 }
889
890 static struct nfs4_delegation *
alloc_init_deleg(struct nfs4_client * clp,struct nfs4_file * fp,struct svc_fh * current_fh,struct nfs4_clnt_odstate * odstate)891 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
892 struct svc_fh *current_fh,
893 struct nfs4_clnt_odstate *odstate)
894 {
895 struct nfs4_delegation *dp;
896 long n;
897
898 dprintk("NFSD alloc_init_deleg\n");
899 n = atomic_long_inc_return(&num_delegations);
900 if (n < 0 || n > max_delegations)
901 goto out_dec;
902 if (delegation_blocked(¤t_fh->fh_handle))
903 goto out_dec;
904 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
905 if (dp == NULL)
906 goto out_dec;
907
908 /*
909 * delegation seqid's are never incremented. The 4.1 special
910 * meaning of seqid 0 isn't meaningful, really, but let's avoid
911 * 0 anyway just for consistency and use 1:
912 */
913 dp->dl_stid.sc_stateid.si_generation = 1;
914 INIT_LIST_HEAD(&dp->dl_perfile);
915 INIT_LIST_HEAD(&dp->dl_perclnt);
916 INIT_LIST_HEAD(&dp->dl_recall_lru);
917 dp->dl_clnt_odstate = odstate;
918 get_clnt_odstate(odstate);
919 dp->dl_type = NFS4_OPEN_DELEGATE_READ;
920 dp->dl_retries = 1;
921 nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
922 &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
923 get_nfs4_file(fp);
924 dp->dl_stid.sc_file = fp;
925 return dp;
926 out_dec:
927 atomic_long_dec(&num_delegations);
928 return NULL;
929 }
930
931 void
nfs4_put_stid(struct nfs4_stid * s)932 nfs4_put_stid(struct nfs4_stid *s)
933 {
934 struct nfs4_file *fp = s->sc_file;
935 struct nfs4_client *clp = s->sc_client;
936
937 might_lock(&clp->cl_lock);
938
939 if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
940 wake_up_all(&close_wq);
941 return;
942 }
943 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
944 spin_unlock(&clp->cl_lock);
945 s->sc_free(s);
946 if (fp)
947 put_nfs4_file(fp);
948 }
949
950 void
nfs4_inc_and_copy_stateid(stateid_t * dst,struct nfs4_stid * stid)951 nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
952 {
953 stateid_t *src = &stid->sc_stateid;
954
955 spin_lock(&stid->sc_lock);
956 if (unlikely(++src->si_generation == 0))
957 src->si_generation = 1;
958 memcpy(dst, src, sizeof(*dst));
959 spin_unlock(&stid->sc_lock);
960 }
961
put_deleg_file(struct nfs4_file * fp)962 static void put_deleg_file(struct nfs4_file *fp)
963 {
964 struct nfsd_file *nf = NULL;
965
966 spin_lock(&fp->fi_lock);
967 if (--fp->fi_delegees == 0)
968 swap(nf, fp->fi_deleg_file);
969 spin_unlock(&fp->fi_lock);
970
971 if (nf)
972 nfsd_file_put(nf);
973 }
974
nfs4_unlock_deleg_lease(struct nfs4_delegation * dp)975 static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp)
976 {
977 struct nfs4_file *fp = dp->dl_stid.sc_file;
978 struct nfsd_file *nf = fp->fi_deleg_file;
979
980 WARN_ON_ONCE(!fp->fi_delegees);
981
982 vfs_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp);
983 put_deleg_file(fp);
984 }
985
destroy_unhashed_deleg(struct nfs4_delegation * dp)986 static void destroy_unhashed_deleg(struct nfs4_delegation *dp)
987 {
988 put_clnt_odstate(dp->dl_clnt_odstate);
989 nfs4_unlock_deleg_lease(dp);
990 nfs4_put_stid(&dp->dl_stid);
991 }
992
nfs4_unhash_stid(struct nfs4_stid * s)993 void nfs4_unhash_stid(struct nfs4_stid *s)
994 {
995 s->sc_type = 0;
996 }
997
998 /**
999 * nfs4_delegation_exists - Discover if this delegation already exists
1000 * @clp: a pointer to the nfs4_client we're granting a delegation to
1001 * @fp: a pointer to the nfs4_file we're granting a delegation on
1002 *
1003 * Return:
1004 * On success: true iff an existing delegation is found
1005 */
1006
1007 static bool
nfs4_delegation_exists(struct nfs4_client * clp,struct nfs4_file * fp)1008 nfs4_delegation_exists(struct nfs4_client *clp, struct nfs4_file *fp)
1009 {
1010 struct nfs4_delegation *searchdp = NULL;
1011 struct nfs4_client *searchclp = NULL;
1012
1013 lockdep_assert_held(&state_lock);
1014 lockdep_assert_held(&fp->fi_lock);
1015
1016 list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
1017 searchclp = searchdp->dl_stid.sc_client;
1018 if (clp == searchclp) {
1019 return true;
1020 }
1021 }
1022 return false;
1023 }
1024
1025 /**
1026 * hash_delegation_locked - Add a delegation to the appropriate lists
1027 * @dp: a pointer to the nfs4_delegation we are adding.
1028 * @fp: a pointer to the nfs4_file we're granting a delegation on
1029 *
1030 * Return:
1031 * On success: NULL if the delegation was successfully hashed.
1032 *
1033 * On error: -EAGAIN if one was previously granted to this
1034 * nfs4_client for this nfs4_file. Delegation is not hashed.
1035 *
1036 */
1037
1038 static int
hash_delegation_locked(struct nfs4_delegation * dp,struct nfs4_file * fp)1039 hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
1040 {
1041 struct nfs4_client *clp = dp->dl_stid.sc_client;
1042
1043 lockdep_assert_held(&state_lock);
1044 lockdep_assert_held(&fp->fi_lock);
1045
1046 if (nfs4_delegation_exists(clp, fp))
1047 return -EAGAIN;
1048 refcount_inc(&dp->dl_stid.sc_count);
1049 dp->dl_stid.sc_type = NFS4_DELEG_STID;
1050 list_add(&dp->dl_perfile, &fp->fi_delegations);
1051 list_add(&dp->dl_perclnt, &clp->cl_delegations);
1052 return 0;
1053 }
1054
delegation_hashed(struct nfs4_delegation * dp)1055 static bool delegation_hashed(struct nfs4_delegation *dp)
1056 {
1057 return !(list_empty(&dp->dl_perfile));
1058 }
1059
1060 static bool
unhash_delegation_locked(struct nfs4_delegation * dp)1061 unhash_delegation_locked(struct nfs4_delegation *dp)
1062 {
1063 struct nfs4_file *fp = dp->dl_stid.sc_file;
1064
1065 lockdep_assert_held(&state_lock);
1066
1067 if (!delegation_hashed(dp))
1068 return false;
1069
1070 dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
1071 /* Ensure that deleg break won't try to requeue it */
1072 ++dp->dl_time;
1073 spin_lock(&fp->fi_lock);
1074 list_del_init(&dp->dl_perclnt);
1075 list_del_init(&dp->dl_recall_lru);
1076 list_del_init(&dp->dl_perfile);
1077 spin_unlock(&fp->fi_lock);
1078 return true;
1079 }
1080
destroy_delegation(struct nfs4_delegation * dp)1081 static void destroy_delegation(struct nfs4_delegation *dp)
1082 {
1083 bool unhashed;
1084
1085 spin_lock(&state_lock);
1086 unhashed = unhash_delegation_locked(dp);
1087 spin_unlock(&state_lock);
1088 if (unhashed)
1089 destroy_unhashed_deleg(dp);
1090 }
1091
revoke_delegation(struct nfs4_delegation * dp)1092 static void revoke_delegation(struct nfs4_delegation *dp)
1093 {
1094 struct nfs4_client *clp = dp->dl_stid.sc_client;
1095
1096 WARN_ON(!list_empty(&dp->dl_recall_lru));
1097
1098 if (clp->cl_minorversion) {
1099 spin_lock(&clp->cl_lock);
1100 dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
1101 refcount_inc(&dp->dl_stid.sc_count);
1102 list_add(&dp->dl_recall_lru, &clp->cl_revoked);
1103 spin_unlock(&clp->cl_lock);
1104 }
1105 destroy_unhashed_deleg(dp);
1106 }
1107
1108 /*
1109 * SETCLIENTID state
1110 */
1111
clientid_hashval(u32 id)1112 static unsigned int clientid_hashval(u32 id)
1113 {
1114 return id & CLIENT_HASH_MASK;
1115 }
1116
clientstr_hashval(struct xdr_netobj name)1117 static unsigned int clientstr_hashval(struct xdr_netobj name)
1118 {
1119 return opaque_hashval(name.data, 8) & CLIENT_HASH_MASK;
1120 }
1121
1122 /*
1123 * We store the NONE, READ, WRITE, and BOTH bits separately in the
1124 * st_{access,deny}_bmap field of the stateid, in order to track not
1125 * only what share bits are currently in force, but also what
1126 * combinations of share bits previous opens have used. This allows us
1127 * to enforce the recommendation of rfc 3530 14.2.19 that the server
1128 * return an error if the client attempt to downgrade to a combination
1129 * of share bits not explicable by closing some of its previous opens.
1130 *
1131 * XXX: This enforcement is actually incomplete, since we don't keep
1132 * track of access/deny bit combinations; so, e.g., we allow:
1133 *
1134 * OPEN allow read, deny write
1135 * OPEN allow both, deny none
1136 * DOWNGRADE allow read, deny none
1137 *
1138 * which we should reject.
1139 */
1140 static unsigned int
bmap_to_share_mode(unsigned long bmap)1141 bmap_to_share_mode(unsigned long bmap) {
1142 int i;
1143 unsigned int access = 0;
1144
1145 for (i = 1; i < 4; i++) {
1146 if (test_bit(i, &bmap))
1147 access |= i;
1148 }
1149 return access;
1150 }
1151
1152 /* set share access for a given stateid */
1153 static inline void
set_access(u32 access,struct nfs4_ol_stateid * stp)1154 set_access(u32 access, struct nfs4_ol_stateid *stp)
1155 {
1156 unsigned char mask = 1 << access;
1157
1158 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
1159 stp->st_access_bmap |= mask;
1160 }
1161
1162 /* clear share access for a given stateid */
1163 static inline void
clear_access(u32 access,struct nfs4_ol_stateid * stp)1164 clear_access(u32 access, struct nfs4_ol_stateid *stp)
1165 {
1166 unsigned char mask = 1 << access;
1167
1168 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
1169 stp->st_access_bmap &= ~mask;
1170 }
1171
1172 /* test whether a given stateid has access */
1173 static inline bool
test_access(u32 access,struct nfs4_ol_stateid * stp)1174 test_access(u32 access, struct nfs4_ol_stateid *stp)
1175 {
1176 unsigned char mask = 1 << access;
1177
1178 return (bool)(stp->st_access_bmap & mask);
1179 }
1180
1181 /* set share deny for a given stateid */
1182 static inline void
set_deny(u32 deny,struct nfs4_ol_stateid * stp)1183 set_deny(u32 deny, struct nfs4_ol_stateid *stp)
1184 {
1185 unsigned char mask = 1 << deny;
1186
1187 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
1188 stp->st_deny_bmap |= mask;
1189 }
1190
1191 /* clear share deny for a given stateid */
1192 static inline void
clear_deny(u32 deny,struct nfs4_ol_stateid * stp)1193 clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
1194 {
1195 unsigned char mask = 1 << deny;
1196
1197 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
1198 stp->st_deny_bmap &= ~mask;
1199 }
1200
1201 /* test whether a given stateid is denying specific access */
1202 static inline bool
test_deny(u32 deny,struct nfs4_ol_stateid * stp)1203 test_deny(u32 deny, struct nfs4_ol_stateid *stp)
1204 {
1205 unsigned char mask = 1 << deny;
1206
1207 return (bool)(stp->st_deny_bmap & mask);
1208 }
1209
nfs4_access_to_omode(u32 access)1210 static int nfs4_access_to_omode(u32 access)
1211 {
1212 switch (access & NFS4_SHARE_ACCESS_BOTH) {
1213 case NFS4_SHARE_ACCESS_READ:
1214 return O_RDONLY;
1215 case NFS4_SHARE_ACCESS_WRITE:
1216 return O_WRONLY;
1217 case NFS4_SHARE_ACCESS_BOTH:
1218 return O_RDWR;
1219 }
1220 WARN_ON_ONCE(1);
1221 return O_RDONLY;
1222 }
1223
1224 /*
1225 * A stateid that had a deny mode associated with it is being released
1226 * or downgraded. Recalculate the deny mode on the file.
1227 */
1228 static void
recalculate_deny_mode(struct nfs4_file * fp)1229 recalculate_deny_mode(struct nfs4_file *fp)
1230 {
1231 struct nfs4_ol_stateid *stp;
1232
1233 spin_lock(&fp->fi_lock);
1234 fp->fi_share_deny = 0;
1235 list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
1236 fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
1237 spin_unlock(&fp->fi_lock);
1238 }
1239
1240 static void
reset_union_bmap_deny(u32 deny,struct nfs4_ol_stateid * stp)1241 reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
1242 {
1243 int i;
1244 bool change = false;
1245
1246 for (i = 1; i < 4; i++) {
1247 if ((i & deny) != i) {
1248 change = true;
1249 clear_deny(i, stp);
1250 }
1251 }
1252
1253 /* Recalculate per-file deny mode if there was a change */
1254 if (change)
1255 recalculate_deny_mode(stp->st_stid.sc_file);
1256 }
1257
1258 /* release all access and file references for a given stateid */
1259 static void
release_all_access(struct nfs4_ol_stateid * stp)1260 release_all_access(struct nfs4_ol_stateid *stp)
1261 {
1262 int i;
1263 struct nfs4_file *fp = stp->st_stid.sc_file;
1264
1265 if (fp && stp->st_deny_bmap != 0)
1266 recalculate_deny_mode(fp);
1267
1268 for (i = 1; i < 4; i++) {
1269 if (test_access(i, stp))
1270 nfs4_file_put_access(stp->st_stid.sc_file, i);
1271 clear_access(i, stp);
1272 }
1273 }
1274
nfs4_free_stateowner(struct nfs4_stateowner * sop)1275 static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop)
1276 {
1277 kfree(sop->so_owner.data);
1278 sop->so_ops->so_free(sop);
1279 }
1280
nfs4_put_stateowner(struct nfs4_stateowner * sop)1281 static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
1282 {
1283 struct nfs4_client *clp = sop->so_client;
1284
1285 might_lock(&clp->cl_lock);
1286
1287 if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
1288 return;
1289 sop->so_ops->so_unhash(sop);
1290 spin_unlock(&clp->cl_lock);
1291 nfs4_free_stateowner(sop);
1292 }
1293
1294 static bool
nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid * stp)1295 nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp)
1296 {
1297 return list_empty(&stp->st_perfile);
1298 }
1299
unhash_ol_stateid(struct nfs4_ol_stateid * stp)1300 static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
1301 {
1302 struct nfs4_file *fp = stp->st_stid.sc_file;
1303
1304 lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
1305
1306 if (list_empty(&stp->st_perfile))
1307 return false;
1308
1309 spin_lock(&fp->fi_lock);
1310 list_del_init(&stp->st_perfile);
1311 spin_unlock(&fp->fi_lock);
1312 list_del(&stp->st_perstateowner);
1313 return true;
1314 }
1315
nfs4_free_ol_stateid(struct nfs4_stid * stid)1316 static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
1317 {
1318 struct nfs4_ol_stateid *stp = openlockstateid(stid);
1319
1320 put_clnt_odstate(stp->st_clnt_odstate);
1321 release_all_access(stp);
1322 if (stp->st_stateowner)
1323 nfs4_put_stateowner(stp->st_stateowner);
1324 kmem_cache_free(stateid_slab, stid);
1325 }
1326
nfs4_free_lock_stateid(struct nfs4_stid * stid)1327 static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
1328 {
1329 struct nfs4_ol_stateid *stp = openlockstateid(stid);
1330 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
1331 struct nfsd_file *nf;
1332
1333 nf = find_any_file(stp->st_stid.sc_file);
1334 if (nf) {
1335 get_file(nf->nf_file);
1336 filp_close(nf->nf_file, (fl_owner_t)lo);
1337 nfsd_file_put(nf);
1338 }
1339 nfs4_free_ol_stateid(stid);
1340 }
1341
1342 /*
1343 * Put the persistent reference to an already unhashed generic stateid, while
1344 * holding the cl_lock. If it's the last reference, then put it onto the
1345 * reaplist for later destruction.
1346 */
put_ol_stateid_locked(struct nfs4_ol_stateid * stp,struct list_head * reaplist)1347 static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
1348 struct list_head *reaplist)
1349 {
1350 struct nfs4_stid *s = &stp->st_stid;
1351 struct nfs4_client *clp = s->sc_client;
1352
1353 lockdep_assert_held(&clp->cl_lock);
1354
1355 WARN_ON_ONCE(!list_empty(&stp->st_locks));
1356
1357 if (!refcount_dec_and_test(&s->sc_count)) {
1358 wake_up_all(&close_wq);
1359 return;
1360 }
1361
1362 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1363 list_add(&stp->st_locks, reaplist);
1364 }
1365
unhash_lock_stateid(struct nfs4_ol_stateid * stp)1366 static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
1367 {
1368 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1369
1370 if (!unhash_ol_stateid(stp))
1371 return false;
1372 list_del_init(&stp->st_locks);
1373 nfs4_unhash_stid(&stp->st_stid);
1374 return true;
1375 }
1376
release_lock_stateid(struct nfs4_ol_stateid * stp)1377 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
1378 {
1379 struct nfs4_client *clp = stp->st_stid.sc_client;
1380 bool unhashed;
1381
1382 spin_lock(&clp->cl_lock);
1383 unhashed = unhash_lock_stateid(stp);
1384 spin_unlock(&clp->cl_lock);
1385 if (unhashed)
1386 nfs4_put_stid(&stp->st_stid);
1387 }
1388
unhash_lockowner_locked(struct nfs4_lockowner * lo)1389 static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
1390 {
1391 struct nfs4_client *clp = lo->lo_owner.so_client;
1392
1393 lockdep_assert_held(&clp->cl_lock);
1394
1395 list_del_init(&lo->lo_owner.so_strhash);
1396 }
1397
1398 /*
1399 * Free a list of generic stateids that were collected earlier after being
1400 * fully unhashed.
1401 */
1402 static void
free_ol_stateid_reaplist(struct list_head * reaplist)1403 free_ol_stateid_reaplist(struct list_head *reaplist)
1404 {
1405 struct nfs4_ol_stateid *stp;
1406 struct nfs4_file *fp;
1407
1408 might_sleep();
1409
1410 while (!list_empty(reaplist)) {
1411 stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
1412 st_locks);
1413 list_del(&stp->st_locks);
1414 fp = stp->st_stid.sc_file;
1415 stp->st_stid.sc_free(&stp->st_stid);
1416 if (fp)
1417 put_nfs4_file(fp);
1418 }
1419 }
1420
release_open_stateid_locks(struct nfs4_ol_stateid * open_stp,struct list_head * reaplist)1421 static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
1422 struct list_head *reaplist)
1423 {
1424 struct nfs4_ol_stateid *stp;
1425
1426 lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
1427
1428 while (!list_empty(&open_stp->st_locks)) {
1429 stp = list_entry(open_stp->st_locks.next,
1430 struct nfs4_ol_stateid, st_locks);
1431 WARN_ON(!unhash_lock_stateid(stp));
1432 put_ol_stateid_locked(stp, reaplist);
1433 }
1434 }
1435
unhash_open_stateid(struct nfs4_ol_stateid * stp,struct list_head * reaplist)1436 static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
1437 struct list_head *reaplist)
1438 {
1439 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1440
1441 if (!unhash_ol_stateid(stp))
1442 return false;
1443 release_open_stateid_locks(stp, reaplist);
1444 return true;
1445 }
1446
release_open_stateid(struct nfs4_ol_stateid * stp)1447 static void release_open_stateid(struct nfs4_ol_stateid *stp)
1448 {
1449 LIST_HEAD(reaplist);
1450
1451 spin_lock(&stp->st_stid.sc_client->cl_lock);
1452 if (unhash_open_stateid(stp, &reaplist))
1453 put_ol_stateid_locked(stp, &reaplist);
1454 spin_unlock(&stp->st_stid.sc_client->cl_lock);
1455 free_ol_stateid_reaplist(&reaplist);
1456 }
1457
unhash_openowner_locked(struct nfs4_openowner * oo)1458 static void unhash_openowner_locked(struct nfs4_openowner *oo)
1459 {
1460 struct nfs4_client *clp = oo->oo_owner.so_client;
1461
1462 lockdep_assert_held(&clp->cl_lock);
1463
1464 list_del_init(&oo->oo_owner.so_strhash);
1465 list_del_init(&oo->oo_perclient);
1466 }
1467
release_last_closed_stateid(struct nfs4_openowner * oo)1468 static void release_last_closed_stateid(struct nfs4_openowner *oo)
1469 {
1470 struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
1471 nfsd_net_id);
1472 struct nfs4_ol_stateid *s;
1473
1474 spin_lock(&nn->client_lock);
1475 s = oo->oo_last_closed_stid;
1476 if (s) {
1477 list_del_init(&oo->oo_close_lru);
1478 oo->oo_last_closed_stid = NULL;
1479 }
1480 spin_unlock(&nn->client_lock);
1481 if (s)
1482 nfs4_put_stid(&s->st_stid);
1483 }
1484
release_openowner(struct nfs4_openowner * oo)1485 static void release_openowner(struct nfs4_openowner *oo)
1486 {
1487 struct nfs4_ol_stateid *stp;
1488 struct nfs4_client *clp = oo->oo_owner.so_client;
1489 struct list_head reaplist;
1490
1491 INIT_LIST_HEAD(&reaplist);
1492
1493 spin_lock(&clp->cl_lock);
1494 unhash_openowner_locked(oo);
1495 while (!list_empty(&oo->oo_owner.so_stateids)) {
1496 stp = list_first_entry(&oo->oo_owner.so_stateids,
1497 struct nfs4_ol_stateid, st_perstateowner);
1498 if (unhash_open_stateid(stp, &reaplist))
1499 put_ol_stateid_locked(stp, &reaplist);
1500 }
1501 spin_unlock(&clp->cl_lock);
1502 free_ol_stateid_reaplist(&reaplist);
1503 release_last_closed_stateid(oo);
1504 nfs4_put_stateowner(&oo->oo_owner);
1505 }
1506
1507 static inline int
hash_sessionid(struct nfs4_sessionid * sessionid)1508 hash_sessionid(struct nfs4_sessionid *sessionid)
1509 {
1510 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
1511
1512 return sid->sequence % SESSION_HASH_SIZE;
1513 }
1514
1515 #ifdef CONFIG_SUNRPC_DEBUG
1516 static inline void
dump_sessionid(const char * fn,struct nfs4_sessionid * sessionid)1517 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1518 {
1519 u32 *ptr = (u32 *)(&sessionid->data[0]);
1520 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
1521 }
1522 #else
1523 static inline void
dump_sessionid(const char * fn,struct nfs4_sessionid * sessionid)1524 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1525 {
1526 }
1527 #endif
1528
1529 /*
1530 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1531 * won't be used for replay.
1532 */
nfsd4_bump_seqid(struct nfsd4_compound_state * cstate,__be32 nfserr)1533 void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
1534 {
1535 struct nfs4_stateowner *so = cstate->replay_owner;
1536
1537 if (nfserr == nfserr_replay_me)
1538 return;
1539
1540 if (!seqid_mutating_err(ntohl(nfserr))) {
1541 nfsd4_cstate_clear_replay(cstate);
1542 return;
1543 }
1544 if (!so)
1545 return;
1546 if (so->so_is_open_owner)
1547 release_last_closed_stateid(openowner(so));
1548 so->so_seqid++;
1549 return;
1550 }
1551
1552 static void
gen_sessionid(struct nfsd4_session * ses)1553 gen_sessionid(struct nfsd4_session *ses)
1554 {
1555 struct nfs4_client *clp = ses->se_client;
1556 struct nfsd4_sessionid *sid;
1557
1558 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
1559 sid->clientid = clp->cl_clientid;
1560 sid->sequence = current_sessionid++;
1561 sid->reserved = 0;
1562 }
1563
1564 /*
1565 * The protocol defines ca_maxresponssize_cached to include the size of
1566 * the rpc header, but all we need to cache is the data starting after
1567 * the end of the initial SEQUENCE operation--the rest we regenerate
1568 * each time. Therefore we can advertise a ca_maxresponssize_cached
1569 * value that is the number of bytes in our cache plus a few additional
1570 * bytes. In order to stay on the safe side, and not promise more than
1571 * we can cache, those additional bytes must be the minimum possible: 24
1572 * bytes of rpc header (xid through accept state, with AUTH_NULL
1573 * verifier), 12 for the compound header (with zero-length tag), and 44
1574 * for the SEQUENCE op response:
1575 */
1576 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
1577
1578 static void
free_session_slots(struct nfsd4_session * ses)1579 free_session_slots(struct nfsd4_session *ses)
1580 {
1581 int i;
1582
1583 for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
1584 free_svc_cred(&ses->se_slots[i]->sl_cred);
1585 kfree(ses->se_slots[i]);
1586 }
1587 }
1588
1589 /*
1590 * We don't actually need to cache the rpc and session headers, so we
1591 * can allocate a little less for each slot:
1592 */
slot_bytes(struct nfsd4_channel_attrs * ca)1593 static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
1594 {
1595 u32 size;
1596
1597 if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
1598 size = 0;
1599 else
1600 size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
1601 return size + sizeof(struct nfsd4_slot);
1602 }
1603
1604 /*
1605 * XXX: If we run out of reserved DRC memory we could (up to a point)
1606 * re-negotiate active sessions and reduce their slot usage to make
1607 * room for new connections. For now we just fail the create session.
1608 */
nfsd4_get_drc_mem(struct nfsd4_channel_attrs * ca,struct nfsd_net * nn)1609 static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
1610 {
1611 u32 slotsize = slot_bytes(ca);
1612 u32 num = ca->maxreqs;
1613 unsigned long avail, total_avail;
1614 unsigned int scale_factor;
1615
1616 spin_lock(&nfsd_drc_lock);
1617 if (nfsd_drc_max_mem > nfsd_drc_mem_used)
1618 total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used;
1619 else
1620 /* We have handed out more space than we chose in
1621 * set_max_drc() to allow. That isn't really a
1622 * problem as long as that doesn't make us think we
1623 * have lots more due to integer overflow.
1624 */
1625 total_avail = 0;
1626 avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail);
1627 /*
1628 * Never use more than a fraction of the remaining memory,
1629 * unless it's the only way to give this client a slot.
1630 * The chosen fraction is either 1/8 or 1/number of threads,
1631 * whichever is smaller. This ensures there are adequate
1632 * slots to support multiple clients per thread.
1633 * Give the client one slot even if that would require
1634 * over-allocation--it is better than failure.
1635 */
1636 scale_factor = max_t(unsigned int, 8, nn->nfsd_serv->sv_nrthreads);
1637
1638 avail = clamp_t(unsigned long, avail, slotsize,
1639 total_avail/scale_factor);
1640 num = min_t(int, num, avail / slotsize);
1641 num = max_t(int, num, 1);
1642 nfsd_drc_mem_used += num * slotsize;
1643 spin_unlock(&nfsd_drc_lock);
1644
1645 return num;
1646 }
1647
nfsd4_put_drc_mem(struct nfsd4_channel_attrs * ca)1648 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
1649 {
1650 int slotsize = slot_bytes(ca);
1651
1652 spin_lock(&nfsd_drc_lock);
1653 nfsd_drc_mem_used -= slotsize * ca->maxreqs;
1654 spin_unlock(&nfsd_drc_lock);
1655 }
1656
alloc_session(struct nfsd4_channel_attrs * fattrs,struct nfsd4_channel_attrs * battrs)1657 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
1658 struct nfsd4_channel_attrs *battrs)
1659 {
1660 int numslots = fattrs->maxreqs;
1661 int slotsize = slot_bytes(fattrs);
1662 struct nfsd4_session *new;
1663 int mem, i;
1664
1665 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
1666 + sizeof(struct nfsd4_session) > PAGE_SIZE);
1667 mem = numslots * sizeof(struct nfsd4_slot *);
1668
1669 new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
1670 if (!new)
1671 return NULL;
1672 /* allocate each struct nfsd4_slot and data cache in one piece */
1673 for (i = 0; i < numslots; i++) {
1674 new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
1675 if (!new->se_slots[i])
1676 goto out_free;
1677 }
1678
1679 memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
1680 memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
1681
1682 return new;
1683 out_free:
1684 while (i--)
1685 kfree(new->se_slots[i]);
1686 kfree(new);
1687 return NULL;
1688 }
1689
free_conn(struct nfsd4_conn * c)1690 static void free_conn(struct nfsd4_conn *c)
1691 {
1692 svc_xprt_put(c->cn_xprt);
1693 kfree(c);
1694 }
1695
nfsd4_conn_lost(struct svc_xpt_user * u)1696 static void nfsd4_conn_lost(struct svc_xpt_user *u)
1697 {
1698 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
1699 struct nfs4_client *clp = c->cn_session->se_client;
1700
1701 spin_lock(&clp->cl_lock);
1702 if (!list_empty(&c->cn_persession)) {
1703 list_del(&c->cn_persession);
1704 free_conn(c);
1705 }
1706 nfsd4_probe_callback(clp);
1707 spin_unlock(&clp->cl_lock);
1708 }
1709
alloc_conn(struct svc_rqst * rqstp,u32 flags)1710 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
1711 {
1712 struct nfsd4_conn *conn;
1713
1714 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
1715 if (!conn)
1716 return NULL;
1717 svc_xprt_get(rqstp->rq_xprt);
1718 conn->cn_xprt = rqstp->rq_xprt;
1719 conn->cn_flags = flags;
1720 INIT_LIST_HEAD(&conn->cn_xpt_user.list);
1721 return conn;
1722 }
1723
__nfsd4_hash_conn(struct nfsd4_conn * conn,struct nfsd4_session * ses)1724 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1725 {
1726 conn->cn_session = ses;
1727 list_add(&conn->cn_persession, &ses->se_conns);
1728 }
1729
nfsd4_hash_conn(struct nfsd4_conn * conn,struct nfsd4_session * ses)1730 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1731 {
1732 struct nfs4_client *clp = ses->se_client;
1733
1734 spin_lock(&clp->cl_lock);
1735 __nfsd4_hash_conn(conn, ses);
1736 spin_unlock(&clp->cl_lock);
1737 }
1738
nfsd4_register_conn(struct nfsd4_conn * conn)1739 static int nfsd4_register_conn(struct nfsd4_conn *conn)
1740 {
1741 conn->cn_xpt_user.callback = nfsd4_conn_lost;
1742 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
1743 }
1744
nfsd4_init_conn(struct svc_rqst * rqstp,struct nfsd4_conn * conn,struct nfsd4_session * ses)1745 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
1746 {
1747 int ret;
1748
1749 nfsd4_hash_conn(conn, ses);
1750 ret = nfsd4_register_conn(conn);
1751 if (ret)
1752 /* oops; xprt is already down: */
1753 nfsd4_conn_lost(&conn->cn_xpt_user);
1754 /* We may have gained or lost a callback channel: */
1755 nfsd4_probe_callback_sync(ses->se_client);
1756 }
1757
alloc_conn_from_crses(struct svc_rqst * rqstp,struct nfsd4_create_session * cses)1758 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
1759 {
1760 u32 dir = NFS4_CDFC4_FORE;
1761
1762 if (cses->flags & SESSION4_BACK_CHAN)
1763 dir |= NFS4_CDFC4_BACK;
1764 return alloc_conn(rqstp, dir);
1765 }
1766
1767 /* must be called under client_lock */
nfsd4_del_conns(struct nfsd4_session * s)1768 static void nfsd4_del_conns(struct nfsd4_session *s)
1769 {
1770 struct nfs4_client *clp = s->se_client;
1771 struct nfsd4_conn *c;
1772
1773 spin_lock(&clp->cl_lock);
1774 while (!list_empty(&s->se_conns)) {
1775 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
1776 list_del_init(&c->cn_persession);
1777 spin_unlock(&clp->cl_lock);
1778
1779 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
1780 free_conn(c);
1781
1782 spin_lock(&clp->cl_lock);
1783 }
1784 spin_unlock(&clp->cl_lock);
1785 }
1786
__free_session(struct nfsd4_session * ses)1787 static void __free_session(struct nfsd4_session *ses)
1788 {
1789 free_session_slots(ses);
1790 kfree(ses);
1791 }
1792
free_session(struct nfsd4_session * ses)1793 static void free_session(struct nfsd4_session *ses)
1794 {
1795 nfsd4_del_conns(ses);
1796 nfsd4_put_drc_mem(&ses->se_fchannel);
1797 __free_session(ses);
1798 }
1799
init_session(struct svc_rqst * rqstp,struct nfsd4_session * new,struct nfs4_client * clp,struct nfsd4_create_session * cses)1800 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
1801 {
1802 int idx;
1803 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1804
1805 new->se_client = clp;
1806 gen_sessionid(new);
1807
1808 INIT_LIST_HEAD(&new->se_conns);
1809
1810 new->se_cb_seq_nr = 1;
1811 new->se_flags = cses->flags;
1812 new->se_cb_prog = cses->callback_prog;
1813 new->se_cb_sec = cses->cb_sec;
1814 atomic_set(&new->se_ref, 0);
1815 idx = hash_sessionid(&new->se_sessionid);
1816 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
1817 spin_lock(&clp->cl_lock);
1818 list_add(&new->se_perclnt, &clp->cl_sessions);
1819 spin_unlock(&clp->cl_lock);
1820
1821 {
1822 struct sockaddr *sa = svc_addr(rqstp);
1823 /*
1824 * This is a little silly; with sessions there's no real
1825 * use for the callback address. Use the peer address
1826 * as a reasonable default for now, but consider fixing
1827 * the rpc client not to require an address in the
1828 * future:
1829 */
1830 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
1831 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
1832 }
1833 }
1834
1835 /* caller must hold client_lock */
1836 static struct nfsd4_session *
__find_in_sessionid_hashtbl(struct nfs4_sessionid * sessionid,struct net * net)1837 __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
1838 {
1839 struct nfsd4_session *elem;
1840 int idx;
1841 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1842
1843 lockdep_assert_held(&nn->client_lock);
1844
1845 dump_sessionid(__func__, sessionid);
1846 idx = hash_sessionid(sessionid);
1847 /* Search in the appropriate list */
1848 list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
1849 if (!memcmp(elem->se_sessionid.data, sessionid->data,
1850 NFS4_MAX_SESSIONID_LEN)) {
1851 return elem;
1852 }
1853 }
1854
1855 dprintk("%s: session not found\n", __func__);
1856 return NULL;
1857 }
1858
1859 static struct nfsd4_session *
find_in_sessionid_hashtbl(struct nfs4_sessionid * sessionid,struct net * net,__be32 * ret)1860 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
1861 __be32 *ret)
1862 {
1863 struct nfsd4_session *session;
1864 __be32 status = nfserr_badsession;
1865
1866 session = __find_in_sessionid_hashtbl(sessionid, net);
1867 if (!session)
1868 goto out;
1869 status = nfsd4_get_session_locked(session);
1870 if (status)
1871 session = NULL;
1872 out:
1873 *ret = status;
1874 return session;
1875 }
1876
1877 /* caller must hold client_lock */
1878 static void
unhash_session(struct nfsd4_session * ses)1879 unhash_session(struct nfsd4_session *ses)
1880 {
1881 struct nfs4_client *clp = ses->se_client;
1882 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1883
1884 lockdep_assert_held(&nn->client_lock);
1885
1886 list_del(&ses->se_hash);
1887 spin_lock(&ses->se_client->cl_lock);
1888 list_del(&ses->se_perclnt);
1889 spin_unlock(&ses->se_client->cl_lock);
1890 }
1891
1892 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1893 static int
STALE_CLIENTID(clientid_t * clid,struct nfsd_net * nn)1894 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
1895 {
1896 /*
1897 * We're assuming the clid was not given out from a boot
1898 * precisely 2^32 (about 136 years) before this one. That seems
1899 * a safe assumption:
1900 */
1901 if (clid->cl_boot == (u32)nn->boot_time)
1902 return 0;
1903 dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
1904 clid->cl_boot, clid->cl_id, nn->boot_time);
1905 return 1;
1906 }
1907
1908 /*
1909 * XXX Should we use a slab cache ?
1910 * This type of memory management is somewhat inefficient, but we use it
1911 * anyway since SETCLIENTID is not a common operation.
1912 */
alloc_client(struct xdr_netobj name)1913 static struct nfs4_client *alloc_client(struct xdr_netobj name)
1914 {
1915 struct nfs4_client *clp;
1916 int i;
1917
1918 clp = kmem_cache_zalloc(client_slab, GFP_KERNEL);
1919 if (clp == NULL)
1920 return NULL;
1921 xdr_netobj_dup(&clp->cl_name, &name, GFP_KERNEL);
1922 if (clp->cl_name.data == NULL)
1923 goto err_no_name;
1924 clp->cl_ownerstr_hashtbl = kmalloc_array(OWNER_HASH_SIZE,
1925 sizeof(struct list_head),
1926 GFP_KERNEL);
1927 if (!clp->cl_ownerstr_hashtbl)
1928 goto err_no_hashtbl;
1929 for (i = 0; i < OWNER_HASH_SIZE; i++)
1930 INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
1931 INIT_LIST_HEAD(&clp->cl_sessions);
1932 idr_init(&clp->cl_stateids);
1933 atomic_set(&clp->cl_rpc_users, 0);
1934 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
1935 INIT_LIST_HEAD(&clp->cl_idhash);
1936 INIT_LIST_HEAD(&clp->cl_openowners);
1937 INIT_LIST_HEAD(&clp->cl_delegations);
1938 INIT_LIST_HEAD(&clp->cl_lru);
1939 INIT_LIST_HEAD(&clp->cl_revoked);
1940 #ifdef CONFIG_NFSD_PNFS
1941 INIT_LIST_HEAD(&clp->cl_lo_states);
1942 #endif
1943 INIT_LIST_HEAD(&clp->async_copies);
1944 spin_lock_init(&clp->async_lock);
1945 spin_lock_init(&clp->cl_lock);
1946 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1947 return clp;
1948 err_no_hashtbl:
1949 kfree(clp->cl_name.data);
1950 err_no_name:
1951 kmem_cache_free(client_slab, clp);
1952 return NULL;
1953 }
1954
__free_client(struct kref * k)1955 static void __free_client(struct kref *k)
1956 {
1957 struct nfsdfs_client *c = container_of(k, struct nfsdfs_client, cl_ref);
1958 struct nfs4_client *clp = container_of(c, struct nfs4_client, cl_nfsdfs);
1959
1960 free_svc_cred(&clp->cl_cred);
1961 kfree(clp->cl_ownerstr_hashtbl);
1962 kfree(clp->cl_name.data);
1963 kfree(clp->cl_nii_domain.data);
1964 kfree(clp->cl_nii_name.data);
1965 idr_destroy(&clp->cl_stateids);
1966 kmem_cache_free(client_slab, clp);
1967 }
1968
drop_client(struct nfs4_client * clp)1969 static void drop_client(struct nfs4_client *clp)
1970 {
1971 kref_put(&clp->cl_nfsdfs.cl_ref, __free_client);
1972 }
1973
1974 static void
free_client(struct nfs4_client * clp)1975 free_client(struct nfs4_client *clp)
1976 {
1977 while (!list_empty(&clp->cl_sessions)) {
1978 struct nfsd4_session *ses;
1979 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
1980 se_perclnt);
1981 list_del(&ses->se_perclnt);
1982 WARN_ON_ONCE(atomic_read(&ses->se_ref));
1983 free_session(ses);
1984 }
1985 rpc_destroy_wait_queue(&clp->cl_cb_waitq);
1986 if (clp->cl_nfsd_dentry) {
1987 nfsd_client_rmdir(clp->cl_nfsd_dentry);
1988 clp->cl_nfsd_dentry = NULL;
1989 wake_up_all(&expiry_wq);
1990 }
1991 drop_client(clp);
1992 }
1993
1994 /* must be called under the client_lock */
1995 static void
unhash_client_locked(struct nfs4_client * clp)1996 unhash_client_locked(struct nfs4_client *clp)
1997 {
1998 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1999 struct nfsd4_session *ses;
2000
2001 lockdep_assert_held(&nn->client_lock);
2002
2003 /* Mark the client as expired! */
2004 clp->cl_time = 0;
2005 /* Make it invisible */
2006 if (!list_empty(&clp->cl_idhash)) {
2007 list_del_init(&clp->cl_idhash);
2008 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
2009 rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
2010 else
2011 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
2012 }
2013 list_del_init(&clp->cl_lru);
2014 spin_lock(&clp->cl_lock);
2015 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
2016 list_del_init(&ses->se_hash);
2017 spin_unlock(&clp->cl_lock);
2018 }
2019
2020 static void
unhash_client(struct nfs4_client * clp)2021 unhash_client(struct nfs4_client *clp)
2022 {
2023 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2024
2025 spin_lock(&nn->client_lock);
2026 unhash_client_locked(clp);
2027 spin_unlock(&nn->client_lock);
2028 }
2029
mark_client_expired_locked(struct nfs4_client * clp)2030 static __be32 mark_client_expired_locked(struct nfs4_client *clp)
2031 {
2032 if (atomic_read(&clp->cl_rpc_users))
2033 return nfserr_jukebox;
2034 unhash_client_locked(clp);
2035 return nfs_ok;
2036 }
2037
2038 static void
__destroy_client(struct nfs4_client * clp)2039 __destroy_client(struct nfs4_client *clp)
2040 {
2041 int i;
2042 struct nfs4_openowner *oo;
2043 struct nfs4_delegation *dp;
2044 struct list_head reaplist;
2045
2046 INIT_LIST_HEAD(&reaplist);
2047 spin_lock(&state_lock);
2048 while (!list_empty(&clp->cl_delegations)) {
2049 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
2050 WARN_ON(!unhash_delegation_locked(dp));
2051 list_add(&dp->dl_recall_lru, &reaplist);
2052 }
2053 spin_unlock(&state_lock);
2054 while (!list_empty(&reaplist)) {
2055 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
2056 list_del_init(&dp->dl_recall_lru);
2057 destroy_unhashed_deleg(dp);
2058 }
2059 while (!list_empty(&clp->cl_revoked)) {
2060 dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
2061 list_del_init(&dp->dl_recall_lru);
2062 nfs4_put_stid(&dp->dl_stid);
2063 }
2064 while (!list_empty(&clp->cl_openowners)) {
2065 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
2066 nfs4_get_stateowner(&oo->oo_owner);
2067 release_openowner(oo);
2068 }
2069 for (i = 0; i < OWNER_HASH_SIZE; i++) {
2070 struct nfs4_stateowner *so, *tmp;
2071
2072 list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i],
2073 so_strhash) {
2074 /* Should be no openowners at this point */
2075 WARN_ON_ONCE(so->so_is_open_owner);
2076 remove_blocked_locks(lockowner(so));
2077 }
2078 }
2079 nfsd4_return_all_client_layouts(clp);
2080 nfsd4_shutdown_copy(clp);
2081 nfsd4_shutdown_callback(clp);
2082 if (clp->cl_cb_conn.cb_xprt)
2083 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
2084 free_client(clp);
2085 wake_up_all(&expiry_wq);
2086 }
2087
2088 static void
destroy_client(struct nfs4_client * clp)2089 destroy_client(struct nfs4_client *clp)
2090 {
2091 unhash_client(clp);
2092 __destroy_client(clp);
2093 }
2094
inc_reclaim_complete(struct nfs4_client * clp)2095 static void inc_reclaim_complete(struct nfs4_client *clp)
2096 {
2097 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2098
2099 if (!nn->track_reclaim_completes)
2100 return;
2101 if (!nfsd4_find_reclaim_client(clp->cl_name, nn))
2102 return;
2103 if (atomic_inc_return(&nn->nr_reclaim_complete) ==
2104 nn->reclaim_str_hashtbl_size) {
2105 printk(KERN_INFO "NFSD: all clients done reclaiming, ending NFSv4 grace period (net %x)\n",
2106 clp->net->ns.inum);
2107 nfsd4_end_grace(nn);
2108 }
2109 }
2110
expire_client(struct nfs4_client * clp)2111 static void expire_client(struct nfs4_client *clp)
2112 {
2113 unhash_client(clp);
2114 nfsd4_client_record_remove(clp);
2115 __destroy_client(clp);
2116 }
2117
copy_verf(struct nfs4_client * target,nfs4_verifier * source)2118 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
2119 {
2120 memcpy(target->cl_verifier.data, source->data,
2121 sizeof(target->cl_verifier.data));
2122 }
2123
copy_clid(struct nfs4_client * target,struct nfs4_client * source)2124 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
2125 {
2126 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
2127 target->cl_clientid.cl_id = source->cl_clientid.cl_id;
2128 }
2129
copy_cred(struct svc_cred * target,struct svc_cred * source)2130 static int copy_cred(struct svc_cred *target, struct svc_cred *source)
2131 {
2132 target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL);
2133 target->cr_raw_principal = kstrdup(source->cr_raw_principal,
2134 GFP_KERNEL);
2135 target->cr_targ_princ = kstrdup(source->cr_targ_princ, GFP_KERNEL);
2136 if ((source->cr_principal && !target->cr_principal) ||
2137 (source->cr_raw_principal && !target->cr_raw_principal) ||
2138 (source->cr_targ_princ && !target->cr_targ_princ))
2139 return -ENOMEM;
2140
2141 target->cr_flavor = source->cr_flavor;
2142 target->cr_uid = source->cr_uid;
2143 target->cr_gid = source->cr_gid;
2144 target->cr_group_info = source->cr_group_info;
2145 get_group_info(target->cr_group_info);
2146 target->cr_gss_mech = source->cr_gss_mech;
2147 if (source->cr_gss_mech)
2148 gss_mech_get(source->cr_gss_mech);
2149 return 0;
2150 }
2151
2152 static int
compare_blob(const struct xdr_netobj * o1,const struct xdr_netobj * o2)2153 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
2154 {
2155 if (o1->len < o2->len)
2156 return -1;
2157 if (o1->len > o2->len)
2158 return 1;
2159 return memcmp(o1->data, o2->data, o1->len);
2160 }
2161
2162 static int
same_verf(nfs4_verifier * v1,nfs4_verifier * v2)2163 same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
2164 {
2165 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
2166 }
2167
2168 static int
same_clid(clientid_t * cl1,clientid_t * cl2)2169 same_clid(clientid_t *cl1, clientid_t *cl2)
2170 {
2171 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
2172 }
2173
groups_equal(struct group_info * g1,struct group_info * g2)2174 static bool groups_equal(struct group_info *g1, struct group_info *g2)
2175 {
2176 int i;
2177
2178 if (g1->ngroups != g2->ngroups)
2179 return false;
2180 for (i=0; i<g1->ngroups; i++)
2181 if (!gid_eq(g1->gid[i], g2->gid[i]))
2182 return false;
2183 return true;
2184 }
2185
2186 /*
2187 * RFC 3530 language requires clid_inuse be returned when the
2188 * "principal" associated with a requests differs from that previously
2189 * used. We use uid, gid's, and gss principal string as our best
2190 * approximation. We also don't want to allow non-gss use of a client
2191 * established using gss: in theory cr_principal should catch that
2192 * change, but in practice cr_principal can be null even in the gss case
2193 * since gssd doesn't always pass down a principal string.
2194 */
is_gss_cred(struct svc_cred * cr)2195 static bool is_gss_cred(struct svc_cred *cr)
2196 {
2197 /* Is cr_flavor one of the gss "pseudoflavors"?: */
2198 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
2199 }
2200
2201
2202 static bool
same_creds(struct svc_cred * cr1,struct svc_cred * cr2)2203 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
2204 {
2205 if ((is_gss_cred(cr1) != is_gss_cred(cr2))
2206 || (!uid_eq(cr1->cr_uid, cr2->cr_uid))
2207 || (!gid_eq(cr1->cr_gid, cr2->cr_gid))
2208 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
2209 return false;
2210 /* XXX: check that cr_targ_princ fields match ? */
2211 if (cr1->cr_principal == cr2->cr_principal)
2212 return true;
2213 if (!cr1->cr_principal || !cr2->cr_principal)
2214 return false;
2215 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
2216 }
2217
svc_rqst_integrity_protected(struct svc_rqst * rqstp)2218 static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
2219 {
2220 struct svc_cred *cr = &rqstp->rq_cred;
2221 u32 service;
2222
2223 if (!cr->cr_gss_mech)
2224 return false;
2225 service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
2226 return service == RPC_GSS_SVC_INTEGRITY ||
2227 service == RPC_GSS_SVC_PRIVACY;
2228 }
2229
nfsd4_mach_creds_match(struct nfs4_client * cl,struct svc_rqst * rqstp)2230 bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
2231 {
2232 struct svc_cred *cr = &rqstp->rq_cred;
2233
2234 if (!cl->cl_mach_cred)
2235 return true;
2236 if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
2237 return false;
2238 if (!svc_rqst_integrity_protected(rqstp))
2239 return false;
2240 if (cl->cl_cred.cr_raw_principal)
2241 return 0 == strcmp(cl->cl_cred.cr_raw_principal,
2242 cr->cr_raw_principal);
2243 if (!cr->cr_principal)
2244 return false;
2245 return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
2246 }
2247
gen_confirm(struct nfs4_client * clp,struct nfsd_net * nn)2248 static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
2249 {
2250 __be32 verf[2];
2251
2252 /*
2253 * This is opaque to client, so no need to byte-swap. Use
2254 * __force to keep sparse happy
2255 */
2256 verf[0] = (__force __be32)get_seconds();
2257 verf[1] = (__force __be32)nn->clverifier_counter++;
2258 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
2259 }
2260
gen_clid(struct nfs4_client * clp,struct nfsd_net * nn)2261 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
2262 {
2263 clp->cl_clientid.cl_boot = nn->boot_time;
2264 clp->cl_clientid.cl_id = nn->clientid_counter++;
2265 gen_confirm(clp, nn);
2266 }
2267
2268 static struct nfs4_stid *
find_stateid_locked(struct nfs4_client * cl,stateid_t * t)2269 find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
2270 {
2271 struct nfs4_stid *ret;
2272
2273 ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
2274 if (!ret || !ret->sc_type)
2275 return NULL;
2276 return ret;
2277 }
2278
2279 static struct nfs4_stid *
find_stateid_by_type(struct nfs4_client * cl,stateid_t * t,char typemask)2280 find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
2281 {
2282 struct nfs4_stid *s;
2283
2284 spin_lock(&cl->cl_lock);
2285 s = find_stateid_locked(cl, t);
2286 if (s != NULL) {
2287 if (typemask & s->sc_type)
2288 refcount_inc(&s->sc_count);
2289 else
2290 s = NULL;
2291 }
2292 spin_unlock(&cl->cl_lock);
2293 return s;
2294 }
2295
get_nfsdfs_clp(struct inode * inode)2296 static struct nfs4_client *get_nfsdfs_clp(struct inode *inode)
2297 {
2298 struct nfsdfs_client *nc;
2299 nc = get_nfsdfs_client(inode);
2300 if (!nc)
2301 return NULL;
2302 return container_of(nc, struct nfs4_client, cl_nfsdfs);
2303 }
2304
seq_quote_mem(struct seq_file * m,char * data,int len)2305 static void seq_quote_mem(struct seq_file *m, char *data, int len)
2306 {
2307 seq_printf(m, "\"");
2308 seq_escape_mem_ascii(m, data, len);
2309 seq_printf(m, "\"");
2310 }
2311
client_info_show(struct seq_file * m,void * v)2312 static int client_info_show(struct seq_file *m, void *v)
2313 {
2314 struct inode *inode = m->private;
2315 struct nfs4_client *clp;
2316 u64 clid;
2317
2318 clp = get_nfsdfs_clp(inode);
2319 if (!clp)
2320 return -ENXIO;
2321 memcpy(&clid, &clp->cl_clientid, sizeof(clid));
2322 seq_printf(m, "clientid: 0x%llx\n", clid);
2323 seq_printf(m, "address: \"%pISpc\"\n", (struct sockaddr *)&clp->cl_addr);
2324 seq_printf(m, "name: ");
2325 seq_quote_mem(m, clp->cl_name.data, clp->cl_name.len);
2326 seq_printf(m, "\nminor version: %d\n", clp->cl_minorversion);
2327 if (clp->cl_nii_domain.data) {
2328 seq_printf(m, "Implementation domain: ");
2329 seq_quote_mem(m, clp->cl_nii_domain.data,
2330 clp->cl_nii_domain.len);
2331 seq_printf(m, "\nImplementation name: ");
2332 seq_quote_mem(m, clp->cl_nii_name.data, clp->cl_nii_name.len);
2333 seq_printf(m, "\nImplementation time: [%ld, %ld]\n",
2334 clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec);
2335 }
2336 drop_client(clp);
2337
2338 return 0;
2339 }
2340
client_info_open(struct inode * inode,struct file * file)2341 static int client_info_open(struct inode *inode, struct file *file)
2342 {
2343 return single_open(file, client_info_show, inode);
2344 }
2345
2346 static const struct file_operations client_info_fops = {
2347 .open = client_info_open,
2348 .read = seq_read,
2349 .llseek = seq_lseek,
2350 .release = single_release,
2351 };
2352
states_start(struct seq_file * s,loff_t * pos)2353 static void *states_start(struct seq_file *s, loff_t *pos)
2354 __acquires(&clp->cl_lock)
2355 {
2356 struct nfs4_client *clp = s->private;
2357 unsigned long id = *pos;
2358 void *ret;
2359
2360 spin_lock(&clp->cl_lock);
2361 ret = idr_get_next_ul(&clp->cl_stateids, &id);
2362 *pos = id;
2363 return ret;
2364 }
2365
states_next(struct seq_file * s,void * v,loff_t * pos)2366 static void *states_next(struct seq_file *s, void *v, loff_t *pos)
2367 {
2368 struct nfs4_client *clp = s->private;
2369 unsigned long id = *pos;
2370 void *ret;
2371
2372 id = *pos;
2373 id++;
2374 ret = idr_get_next_ul(&clp->cl_stateids, &id);
2375 *pos = id;
2376 return ret;
2377 }
2378
states_stop(struct seq_file * s,void * v)2379 static void states_stop(struct seq_file *s, void *v)
2380 __releases(&clp->cl_lock)
2381 {
2382 struct nfs4_client *clp = s->private;
2383
2384 spin_unlock(&clp->cl_lock);
2385 }
2386
nfs4_show_superblock(struct seq_file * s,struct nfsd_file * f)2387 static void nfs4_show_superblock(struct seq_file *s, struct nfsd_file *f)
2388 {
2389 struct inode *inode = f->nf_inode;
2390
2391 seq_printf(s, "superblock: \"%02x:%02x:%ld\"",
2392 MAJOR(inode->i_sb->s_dev),
2393 MINOR(inode->i_sb->s_dev),
2394 inode->i_ino);
2395 }
2396
nfs4_show_owner(struct seq_file * s,struct nfs4_stateowner * oo)2397 static void nfs4_show_owner(struct seq_file *s, struct nfs4_stateowner *oo)
2398 {
2399 seq_printf(s, "owner: ");
2400 seq_quote_mem(s, oo->so_owner.data, oo->so_owner.len);
2401 }
2402
nfs4_show_open(struct seq_file * s,struct nfs4_stid * st)2403 static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
2404 {
2405 struct nfs4_ol_stateid *ols;
2406 struct nfs4_file *nf;
2407 struct nfsd_file *file;
2408 struct nfs4_stateowner *oo;
2409 unsigned int access, deny;
2410
2411 if (st->sc_type != NFS4_OPEN_STID && st->sc_type != NFS4_LOCK_STID)
2412 return 0; /* XXX: or SEQ_SKIP? */
2413 ols = openlockstateid(st);
2414 oo = ols->st_stateowner;
2415 nf = st->sc_file;
2416
2417 spin_lock(&nf->fi_lock);
2418 file = find_any_file_locked(nf);
2419 if (!file)
2420 goto out;
2421
2422 seq_printf(s, "- 0x%16phN: { type: open, ", &st->sc_stateid);
2423
2424 access = bmap_to_share_mode(ols->st_access_bmap);
2425 deny = bmap_to_share_mode(ols->st_deny_bmap);
2426
2427 seq_printf(s, "access: \%s\%s, ",
2428 access & NFS4_SHARE_ACCESS_READ ? "r" : "-",
2429 access & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
2430 seq_printf(s, "deny: \%s\%s, ",
2431 deny & NFS4_SHARE_ACCESS_READ ? "r" : "-",
2432 deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
2433
2434 nfs4_show_superblock(s, file);
2435 seq_printf(s, ", ");
2436 nfs4_show_owner(s, oo);
2437 seq_printf(s, " }\n");
2438 out:
2439 spin_unlock(&nf->fi_lock);
2440 return 0;
2441 }
2442
nfs4_show_lock(struct seq_file * s,struct nfs4_stid * st)2443 static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
2444 {
2445 struct nfs4_ol_stateid *ols;
2446 struct nfs4_file *nf;
2447 struct nfsd_file *file;
2448 struct nfs4_stateowner *oo;
2449
2450 ols = openlockstateid(st);
2451 oo = ols->st_stateowner;
2452 nf = st->sc_file;
2453 spin_lock(&nf->fi_lock);
2454 file = find_any_file_locked(nf);
2455 if (!file)
2456 goto out;
2457
2458 seq_printf(s, "- 0x%16phN: { type: lock, ", &st->sc_stateid);
2459
2460 /*
2461 * Note: a lock stateid isn't really the same thing as a lock,
2462 * it's the locking state held by one owner on a file, and there
2463 * may be multiple (or no) lock ranges associated with it.
2464 * (Same for the matter is true of open stateids.)
2465 */
2466
2467 nfs4_show_superblock(s, file);
2468 /* XXX: open stateid? */
2469 seq_printf(s, ", ");
2470 nfs4_show_owner(s, oo);
2471 seq_printf(s, " }\n");
2472 out:
2473 spin_unlock(&nf->fi_lock);
2474 return 0;
2475 }
2476
nfs4_show_deleg(struct seq_file * s,struct nfs4_stid * st)2477 static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
2478 {
2479 struct nfs4_delegation *ds;
2480 struct nfs4_file *nf;
2481 struct nfsd_file *file;
2482
2483 ds = delegstateid(st);
2484 nf = st->sc_file;
2485 spin_lock(&nf->fi_lock);
2486 file = find_deleg_file_locked(nf);
2487 if (!file)
2488 goto out;
2489
2490 seq_printf(s, "- 0x%16phN: { type: deleg, ", &st->sc_stateid);
2491
2492 /* Kinda dead code as long as we only support read delegs: */
2493 seq_printf(s, "access: %s, ",
2494 ds->dl_type == NFS4_OPEN_DELEGATE_READ ? "r" : "w");
2495
2496 /* XXX: lease time, whether it's being recalled. */
2497
2498 nfs4_show_superblock(s, file);
2499 seq_printf(s, " }\n");
2500 out:
2501 spin_unlock(&nf->fi_lock);
2502 return 0;
2503 }
2504
nfs4_show_layout(struct seq_file * s,struct nfs4_stid * st)2505 static int nfs4_show_layout(struct seq_file *s, struct nfs4_stid *st)
2506 {
2507 struct nfs4_layout_stateid *ls;
2508 struct nfsd_file *file;
2509
2510 ls = container_of(st, struct nfs4_layout_stateid, ls_stid);
2511 file = ls->ls_file;
2512
2513 seq_printf(s, "- 0x%16phN: { type: layout, ", &st->sc_stateid);
2514
2515 /* XXX: What else would be useful? */
2516
2517 nfs4_show_superblock(s, file);
2518 seq_printf(s, " }\n");
2519
2520 return 0;
2521 }
2522
states_show(struct seq_file * s,void * v)2523 static int states_show(struct seq_file *s, void *v)
2524 {
2525 struct nfs4_stid *st = v;
2526
2527 switch (st->sc_type) {
2528 case NFS4_OPEN_STID:
2529 return nfs4_show_open(s, st);
2530 case NFS4_LOCK_STID:
2531 return nfs4_show_lock(s, st);
2532 case NFS4_DELEG_STID:
2533 return nfs4_show_deleg(s, st);
2534 case NFS4_LAYOUT_STID:
2535 return nfs4_show_layout(s, st);
2536 default:
2537 return 0; /* XXX: or SEQ_SKIP? */
2538 }
2539 /* XXX: copy stateids? */
2540 }
2541
2542 static struct seq_operations states_seq_ops = {
2543 .start = states_start,
2544 .next = states_next,
2545 .stop = states_stop,
2546 .show = states_show
2547 };
2548
client_states_open(struct inode * inode,struct file * file)2549 static int client_states_open(struct inode *inode, struct file *file)
2550 {
2551 struct seq_file *s;
2552 struct nfs4_client *clp;
2553 int ret;
2554
2555 clp = get_nfsdfs_clp(inode);
2556 if (!clp)
2557 return -ENXIO;
2558
2559 ret = seq_open(file, &states_seq_ops);
2560 if (ret)
2561 return ret;
2562 s = file->private_data;
2563 s->private = clp;
2564 return 0;
2565 }
2566
client_opens_release(struct inode * inode,struct file * file)2567 static int client_opens_release(struct inode *inode, struct file *file)
2568 {
2569 struct seq_file *m = file->private_data;
2570 struct nfs4_client *clp = m->private;
2571
2572 /* XXX: alternatively, we could get/drop in seq start/stop */
2573 drop_client(clp);
2574 return seq_release(inode, file);
2575 }
2576
2577 static const struct file_operations client_states_fops = {
2578 .open = client_states_open,
2579 .read = seq_read,
2580 .llseek = seq_lseek,
2581 .release = client_opens_release,
2582 };
2583
2584 /*
2585 * Normally we refuse to destroy clients that are in use, but here the
2586 * administrator is telling us to just do it. We also want to wait
2587 * so the caller has a guarantee that the client's locks are gone by
2588 * the time the write returns:
2589 */
force_expire_client(struct nfs4_client * clp)2590 static void force_expire_client(struct nfs4_client *clp)
2591 {
2592 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2593 bool already_expired;
2594
2595 spin_lock(&nn->client_lock);
2596 clp->cl_time = 0;
2597 spin_unlock(&nn->client_lock);
2598
2599 wait_event(expiry_wq, atomic_read(&clp->cl_rpc_users) == 0);
2600 spin_lock(&nn->client_lock);
2601 already_expired = list_empty(&clp->cl_lru);
2602 if (!already_expired)
2603 unhash_client_locked(clp);
2604 spin_unlock(&nn->client_lock);
2605
2606 if (!already_expired)
2607 expire_client(clp);
2608 else
2609 wait_event(expiry_wq, clp->cl_nfsd_dentry == NULL);
2610 }
2611
client_ctl_write(struct file * file,const char __user * buf,size_t size,loff_t * pos)2612 static ssize_t client_ctl_write(struct file *file, const char __user *buf,
2613 size_t size, loff_t *pos)
2614 {
2615 char *data;
2616 struct nfs4_client *clp;
2617
2618 data = simple_transaction_get(file, buf, size);
2619 if (IS_ERR(data))
2620 return PTR_ERR(data);
2621 if (size != 7 || 0 != memcmp(data, "expire\n", 7))
2622 return -EINVAL;
2623 clp = get_nfsdfs_clp(file_inode(file));
2624 if (!clp)
2625 return -ENXIO;
2626 force_expire_client(clp);
2627 drop_client(clp);
2628 return 7;
2629 }
2630
2631 static const struct file_operations client_ctl_fops = {
2632 .write = client_ctl_write,
2633 .release = simple_transaction_release,
2634 };
2635
2636 static const struct tree_descr client_files[] = {
2637 [0] = {"info", &client_info_fops, S_IRUSR},
2638 [1] = {"states", &client_states_fops, S_IRUSR},
2639 [2] = {"ctl", &client_ctl_fops, S_IRUSR|S_IWUSR},
2640 [3] = {""},
2641 };
2642
create_client(struct xdr_netobj name,struct svc_rqst * rqstp,nfs4_verifier * verf)2643 static struct nfs4_client *create_client(struct xdr_netobj name,
2644 struct svc_rqst *rqstp, nfs4_verifier *verf)
2645 {
2646 struct nfs4_client *clp;
2647 struct sockaddr *sa = svc_addr(rqstp);
2648 int ret;
2649 struct net *net = SVC_NET(rqstp);
2650 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2651
2652 clp = alloc_client(name);
2653 if (clp == NULL)
2654 return NULL;
2655
2656 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
2657 if (ret) {
2658 free_client(clp);
2659 return NULL;
2660 }
2661 gen_clid(clp, nn);
2662 kref_init(&clp->cl_nfsdfs.cl_ref);
2663 nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
2664 clp->cl_time = get_seconds();
2665 clear_bit(0, &clp->cl_cb_slot_busy);
2666 copy_verf(clp, verf);
2667 memcpy(&clp->cl_addr, sa, sizeof(struct sockaddr_storage));
2668 clp->cl_cb_session = NULL;
2669 clp->net = net;
2670 clp->cl_nfsd_dentry = nfsd_client_mkdir(nn, &clp->cl_nfsdfs,
2671 clp->cl_clientid.cl_id - nn->clientid_base,
2672 client_files);
2673 if (!clp->cl_nfsd_dentry) {
2674 free_client(clp);
2675 return NULL;
2676 }
2677 return clp;
2678 }
2679
2680 static void
add_clp_to_name_tree(struct nfs4_client * new_clp,struct rb_root * root)2681 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
2682 {
2683 struct rb_node **new = &(root->rb_node), *parent = NULL;
2684 struct nfs4_client *clp;
2685
2686 while (*new) {
2687 clp = rb_entry(*new, struct nfs4_client, cl_namenode);
2688 parent = *new;
2689
2690 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
2691 new = &((*new)->rb_left);
2692 else
2693 new = &((*new)->rb_right);
2694 }
2695
2696 rb_link_node(&new_clp->cl_namenode, parent, new);
2697 rb_insert_color(&new_clp->cl_namenode, root);
2698 }
2699
2700 static struct nfs4_client *
find_clp_in_name_tree(struct xdr_netobj * name,struct rb_root * root)2701 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
2702 {
2703 int cmp;
2704 struct rb_node *node = root->rb_node;
2705 struct nfs4_client *clp;
2706
2707 while (node) {
2708 clp = rb_entry(node, struct nfs4_client, cl_namenode);
2709 cmp = compare_blob(&clp->cl_name, name);
2710 if (cmp > 0)
2711 node = node->rb_left;
2712 else if (cmp < 0)
2713 node = node->rb_right;
2714 else
2715 return clp;
2716 }
2717 return NULL;
2718 }
2719
2720 static void
add_to_unconfirmed(struct nfs4_client * clp)2721 add_to_unconfirmed(struct nfs4_client *clp)
2722 {
2723 unsigned int idhashval;
2724 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2725
2726 lockdep_assert_held(&nn->client_lock);
2727
2728 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2729 add_clp_to_name_tree(clp, &nn->unconf_name_tree);
2730 idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2731 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
2732 renew_client_locked(clp);
2733 }
2734
2735 static void
move_to_confirmed(struct nfs4_client * clp)2736 move_to_confirmed(struct nfs4_client *clp)
2737 {
2738 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2739 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2740
2741 lockdep_assert_held(&nn->client_lock);
2742
2743 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
2744 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
2745 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
2746 add_clp_to_name_tree(clp, &nn->conf_name_tree);
2747 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2748 renew_client_locked(clp);
2749 }
2750
2751 static struct nfs4_client *
find_client_in_id_table(struct list_head * tbl,clientid_t * clid,bool sessions)2752 find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
2753 {
2754 struct nfs4_client *clp;
2755 unsigned int idhashval = clientid_hashval(clid->cl_id);
2756
2757 list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
2758 if (same_clid(&clp->cl_clientid, clid)) {
2759 if ((bool)clp->cl_minorversion != sessions)
2760 return NULL;
2761 renew_client_locked(clp);
2762 return clp;
2763 }
2764 }
2765 return NULL;
2766 }
2767
2768 static struct nfs4_client *
find_confirmed_client(clientid_t * clid,bool sessions,struct nfsd_net * nn)2769 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2770 {
2771 struct list_head *tbl = nn->conf_id_hashtbl;
2772
2773 lockdep_assert_held(&nn->client_lock);
2774 return find_client_in_id_table(tbl, clid, sessions);
2775 }
2776
2777 static struct nfs4_client *
find_unconfirmed_client(clientid_t * clid,bool sessions,struct nfsd_net * nn)2778 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2779 {
2780 struct list_head *tbl = nn->unconf_id_hashtbl;
2781
2782 lockdep_assert_held(&nn->client_lock);
2783 return find_client_in_id_table(tbl, clid, sessions);
2784 }
2785
clp_used_exchangeid(struct nfs4_client * clp)2786 static bool clp_used_exchangeid(struct nfs4_client *clp)
2787 {
2788 return clp->cl_exchange_flags != 0;
2789 }
2790
2791 static struct nfs4_client *
find_confirmed_client_by_name(struct xdr_netobj * name,struct nfsd_net * nn)2792 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2793 {
2794 lockdep_assert_held(&nn->client_lock);
2795 return find_clp_in_name_tree(name, &nn->conf_name_tree);
2796 }
2797
2798 static struct nfs4_client *
find_unconfirmed_client_by_name(struct xdr_netobj * name,struct nfsd_net * nn)2799 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2800 {
2801 lockdep_assert_held(&nn->client_lock);
2802 return find_clp_in_name_tree(name, &nn->unconf_name_tree);
2803 }
2804
2805 static void
gen_callback(struct nfs4_client * clp,struct nfsd4_setclientid * se,struct svc_rqst * rqstp)2806 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
2807 {
2808 struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
2809 struct sockaddr *sa = svc_addr(rqstp);
2810 u32 scopeid = rpc_get_scope_id(sa);
2811 unsigned short expected_family;
2812
2813 /* Currently, we only support tcp and tcp6 for the callback channel */
2814 if (se->se_callback_netid_len == 3 &&
2815 !memcmp(se->se_callback_netid_val, "tcp", 3))
2816 expected_family = AF_INET;
2817 else if (se->se_callback_netid_len == 4 &&
2818 !memcmp(se->se_callback_netid_val, "tcp6", 4))
2819 expected_family = AF_INET6;
2820 else
2821 goto out_err;
2822
2823 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
2824 se->se_callback_addr_len,
2825 (struct sockaddr *)&conn->cb_addr,
2826 sizeof(conn->cb_addr));
2827
2828 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
2829 goto out_err;
2830
2831 if (conn->cb_addr.ss_family == AF_INET6)
2832 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
2833
2834 conn->cb_prog = se->se_callback_prog;
2835 conn->cb_ident = se->se_callback_ident;
2836 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
2837 return;
2838 out_err:
2839 conn->cb_addr.ss_family = AF_UNSPEC;
2840 conn->cb_addrlen = 0;
2841 dprintk("NFSD: this client (clientid %08x/%08x) "
2842 "will not receive delegations\n",
2843 clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
2844
2845 return;
2846 }
2847
2848 /*
2849 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
2850 */
2851 static void
nfsd4_store_cache_entry(struct nfsd4_compoundres * resp)2852 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
2853 {
2854 struct xdr_buf *buf = resp->xdr.buf;
2855 struct nfsd4_slot *slot = resp->cstate.slot;
2856 unsigned int base;
2857
2858 dprintk("--> %s slot %p\n", __func__, slot);
2859
2860 slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
2861 slot->sl_opcnt = resp->opcnt;
2862 slot->sl_status = resp->cstate.status;
2863 free_svc_cred(&slot->sl_cred);
2864 copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred);
2865
2866 if (!nfsd4_cache_this(resp)) {
2867 slot->sl_flags &= ~NFSD4_SLOT_CACHED;
2868 return;
2869 }
2870 slot->sl_flags |= NFSD4_SLOT_CACHED;
2871
2872 base = resp->cstate.data_offset;
2873 slot->sl_datalen = buf->len - base;
2874 if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
2875 WARN(1, "%s: sessions DRC could not cache compound\n",
2876 __func__);
2877 return;
2878 }
2879
2880 /*
2881 * Encode the replay sequence operation from the slot values.
2882 * If cachethis is FALSE encode the uncached rep error on the next
2883 * operation which sets resp->p and increments resp->opcnt for
2884 * nfs4svc_encode_compoundres.
2885 *
2886 */
2887 static __be32
nfsd4_enc_sequence_replay(struct nfsd4_compoundargs * args,struct nfsd4_compoundres * resp)2888 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
2889 struct nfsd4_compoundres *resp)
2890 {
2891 struct nfsd4_op *op;
2892 struct nfsd4_slot *slot = resp->cstate.slot;
2893
2894 /* Encode the replayed sequence operation */
2895 op = &args->ops[resp->opcnt - 1];
2896 nfsd4_encode_operation(resp, op);
2897
2898 if (slot->sl_flags & NFSD4_SLOT_CACHED)
2899 return op->status;
2900 if (args->opcnt == 1) {
2901 /*
2902 * The original operation wasn't a solo sequence--we
2903 * always cache those--so this retry must not match the
2904 * original:
2905 */
2906 op->status = nfserr_seq_false_retry;
2907 } else {
2908 op = &args->ops[resp->opcnt++];
2909 op->status = nfserr_retry_uncached_rep;
2910 nfsd4_encode_operation(resp, op);
2911 }
2912 return op->status;
2913 }
2914
2915 /*
2916 * The sequence operation is not cached because we can use the slot and
2917 * session values.
2918 */
2919 static __be32
nfsd4_replay_cache_entry(struct nfsd4_compoundres * resp,struct nfsd4_sequence * seq)2920 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
2921 struct nfsd4_sequence *seq)
2922 {
2923 struct nfsd4_slot *slot = resp->cstate.slot;
2924 struct xdr_stream *xdr = &resp->xdr;
2925 __be32 *p;
2926 __be32 status;
2927
2928 dprintk("--> %s slot %p\n", __func__, slot);
2929
2930 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
2931 if (status)
2932 return status;
2933
2934 p = xdr_reserve_space(xdr, slot->sl_datalen);
2935 if (!p) {
2936 WARN_ON_ONCE(1);
2937 return nfserr_serverfault;
2938 }
2939 xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
2940 xdr_commit_encode(xdr);
2941
2942 resp->opcnt = slot->sl_opcnt;
2943 return slot->sl_status;
2944 }
2945
2946 /*
2947 * Set the exchange_id flags returned by the server.
2948 */
2949 static void
nfsd4_set_ex_flags(struct nfs4_client * new,struct nfsd4_exchange_id * clid)2950 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
2951 {
2952 #ifdef CONFIG_NFSD_PNFS
2953 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS;
2954 #else
2955 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
2956 #endif
2957
2958 /* Referrals are supported, Migration is not. */
2959 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
2960
2961 /* set the wire flags to return to client. */
2962 clid->flags = new->cl_exchange_flags;
2963 }
2964
client_has_openowners(struct nfs4_client * clp)2965 static bool client_has_openowners(struct nfs4_client *clp)
2966 {
2967 struct nfs4_openowner *oo;
2968
2969 list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) {
2970 if (!list_empty(&oo->oo_owner.so_stateids))
2971 return true;
2972 }
2973 return false;
2974 }
2975
client_has_state(struct nfs4_client * clp)2976 static bool client_has_state(struct nfs4_client *clp)
2977 {
2978 return client_has_openowners(clp)
2979 #ifdef CONFIG_NFSD_PNFS
2980 || !list_empty(&clp->cl_lo_states)
2981 #endif
2982 || !list_empty(&clp->cl_delegations)
2983 || !list_empty(&clp->cl_sessions)
2984 || !list_empty(&clp->async_copies);
2985 }
2986
copy_impl_id(struct nfs4_client * clp,struct nfsd4_exchange_id * exid)2987 static __be32 copy_impl_id(struct nfs4_client *clp,
2988 struct nfsd4_exchange_id *exid)
2989 {
2990 if (!exid->nii_domain.data)
2991 return 0;
2992 xdr_netobj_dup(&clp->cl_nii_domain, &exid->nii_domain, GFP_KERNEL);
2993 if (!clp->cl_nii_domain.data)
2994 return nfserr_jukebox;
2995 xdr_netobj_dup(&clp->cl_nii_name, &exid->nii_name, GFP_KERNEL);
2996 if (!clp->cl_nii_name.data)
2997 return nfserr_jukebox;
2998 clp->cl_nii_time.tv_sec = exid->nii_time.tv_sec;
2999 clp->cl_nii_time.tv_nsec = exid->nii_time.tv_nsec;
3000 return 0;
3001 }
3002
3003 __be32
nfsd4_exchange_id(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3004 nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3005 union nfsd4_op_u *u)
3006 {
3007 struct nfsd4_exchange_id *exid = &u->exchange_id;
3008 struct nfs4_client *conf, *new;
3009 struct nfs4_client *unconf = NULL;
3010 __be32 status;
3011 char addr_str[INET6_ADDRSTRLEN];
3012 nfs4_verifier verf = exid->verifier;
3013 struct sockaddr *sa = svc_addr(rqstp);
3014 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
3015 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3016
3017 rpc_ntop(sa, addr_str, sizeof(addr_str));
3018 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
3019 "ip_addr=%s flags %x, spa_how %d\n",
3020 __func__, rqstp, exid, exid->clname.len, exid->clname.data,
3021 addr_str, exid->flags, exid->spa_how);
3022
3023 if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
3024 return nfserr_inval;
3025
3026 new = create_client(exid->clname, rqstp, &verf);
3027 if (new == NULL)
3028 return nfserr_jukebox;
3029 status = copy_impl_id(new, exid);
3030 if (status)
3031 goto out_nolock;
3032
3033 switch (exid->spa_how) {
3034 case SP4_MACH_CRED:
3035 exid->spo_must_enforce[0] = 0;
3036 exid->spo_must_enforce[1] = (
3037 1 << (OP_BIND_CONN_TO_SESSION - 32) |
3038 1 << (OP_EXCHANGE_ID - 32) |
3039 1 << (OP_CREATE_SESSION - 32) |
3040 1 << (OP_DESTROY_SESSION - 32) |
3041 1 << (OP_DESTROY_CLIENTID - 32));
3042
3043 exid->spo_must_allow[0] &= (1 << (OP_CLOSE) |
3044 1 << (OP_OPEN_DOWNGRADE) |
3045 1 << (OP_LOCKU) |
3046 1 << (OP_DELEGRETURN));
3047
3048 exid->spo_must_allow[1] &= (
3049 1 << (OP_TEST_STATEID - 32) |
3050 1 << (OP_FREE_STATEID - 32));
3051 if (!svc_rqst_integrity_protected(rqstp)) {
3052 status = nfserr_inval;
3053 goto out_nolock;
3054 }
3055 /*
3056 * Sometimes userspace doesn't give us a principal.
3057 * Which is a bug, really. Anyway, we can't enforce
3058 * MACH_CRED in that case, better to give up now:
3059 */
3060 if (!new->cl_cred.cr_principal &&
3061 !new->cl_cred.cr_raw_principal) {
3062 status = nfserr_serverfault;
3063 goto out_nolock;
3064 }
3065 new->cl_mach_cred = true;
3066 case SP4_NONE:
3067 break;
3068 default: /* checked by xdr code */
3069 WARN_ON_ONCE(1);
3070 /* fall through */
3071 case SP4_SSV:
3072 status = nfserr_encr_alg_unsupp;
3073 goto out_nolock;
3074 }
3075
3076 /* Cases below refer to rfc 5661 section 18.35.4: */
3077 spin_lock(&nn->client_lock);
3078 conf = find_confirmed_client_by_name(&exid->clname, nn);
3079 if (conf) {
3080 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
3081 bool verfs_match = same_verf(&verf, &conf->cl_verifier);
3082
3083 if (update) {
3084 if (!clp_used_exchangeid(conf)) { /* buggy client */
3085 status = nfserr_inval;
3086 goto out;
3087 }
3088 if (!nfsd4_mach_creds_match(conf, rqstp)) {
3089 status = nfserr_wrong_cred;
3090 goto out;
3091 }
3092 if (!creds_match) { /* case 9 */
3093 status = nfserr_perm;
3094 goto out;
3095 }
3096 if (!verfs_match) { /* case 8 */
3097 status = nfserr_not_same;
3098 goto out;
3099 }
3100 /* case 6 */
3101 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
3102 goto out_copy;
3103 }
3104 if (!creds_match) { /* case 3 */
3105 if (client_has_state(conf)) {
3106 status = nfserr_clid_inuse;
3107 goto out;
3108 }
3109 goto out_new;
3110 }
3111 if (verfs_match) { /* case 2 */
3112 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
3113 goto out_copy;
3114 }
3115 /* case 5, client reboot */
3116 conf = NULL;
3117 goto out_new;
3118 }
3119
3120 if (update) { /* case 7 */
3121 status = nfserr_noent;
3122 goto out;
3123 }
3124
3125 unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
3126 if (unconf) /* case 4, possible retry or client restart */
3127 unhash_client_locked(unconf);
3128
3129 /* case 1 (normal case) */
3130 out_new:
3131 if (conf) {
3132 status = mark_client_expired_locked(conf);
3133 if (status)
3134 goto out;
3135 }
3136 new->cl_minorversion = cstate->minorversion;
3137 new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0];
3138 new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1];
3139
3140 add_to_unconfirmed(new);
3141 swap(new, conf);
3142 out_copy:
3143 exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
3144 exid->clientid.cl_id = conf->cl_clientid.cl_id;
3145
3146 exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
3147 nfsd4_set_ex_flags(conf, exid);
3148
3149 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
3150 conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
3151 status = nfs_ok;
3152
3153 out:
3154 spin_unlock(&nn->client_lock);
3155 out_nolock:
3156 if (new)
3157 expire_client(new);
3158 if (unconf)
3159 expire_client(unconf);
3160 return status;
3161 }
3162
3163 static __be32
check_slot_seqid(u32 seqid,u32 slot_seqid,int slot_inuse)3164 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
3165 {
3166 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
3167 slot_seqid);
3168
3169 /* The slot is in use, and no response has been sent. */
3170 if (slot_inuse) {
3171 if (seqid == slot_seqid)
3172 return nfserr_jukebox;
3173 else
3174 return nfserr_seq_misordered;
3175 }
3176 /* Note unsigned 32-bit arithmetic handles wraparound: */
3177 if (likely(seqid == slot_seqid + 1))
3178 return nfs_ok;
3179 if (seqid == slot_seqid)
3180 return nfserr_replay_cache;
3181 return nfserr_seq_misordered;
3182 }
3183
3184 /*
3185 * Cache the create session result into the create session single DRC
3186 * slot cache by saving the xdr structure. sl_seqid has been set.
3187 * Do this for solo or embedded create session operations.
3188 */
3189 static void
nfsd4_cache_create_session(struct nfsd4_create_session * cr_ses,struct nfsd4_clid_slot * slot,__be32 nfserr)3190 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
3191 struct nfsd4_clid_slot *slot, __be32 nfserr)
3192 {
3193 slot->sl_status = nfserr;
3194 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
3195 }
3196
3197 static __be32
nfsd4_replay_create_session(struct nfsd4_create_session * cr_ses,struct nfsd4_clid_slot * slot)3198 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
3199 struct nfsd4_clid_slot *slot)
3200 {
3201 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
3202 return slot->sl_status;
3203 }
3204
3205 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
3206 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
3207 1 + /* MIN tag is length with zero, only length */ \
3208 3 + /* version, opcount, opcode */ \
3209 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3210 /* seqid, slotID, slotID, cache */ \
3211 4 ) * sizeof(__be32))
3212
3213 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
3214 2 + /* verifier: AUTH_NULL, length 0 */\
3215 1 + /* status */ \
3216 1 + /* MIN tag is length with zero, only length */ \
3217 3 + /* opcount, opcode, opstatus*/ \
3218 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3219 /* seqid, slotID, slotID, slotID, status */ \
3220 5 ) * sizeof(__be32))
3221
check_forechannel_attrs(struct nfsd4_channel_attrs * ca,struct nfsd_net * nn)3222 static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
3223 {
3224 u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
3225
3226 if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
3227 return nfserr_toosmall;
3228 if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
3229 return nfserr_toosmall;
3230 ca->headerpadsz = 0;
3231 ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
3232 ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
3233 ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
3234 ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
3235 NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
3236 ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
3237 /*
3238 * Note decreasing slot size below client's request may make it
3239 * difficult for client to function correctly, whereas
3240 * decreasing the number of slots will (just?) affect
3241 * performance. When short on memory we therefore prefer to
3242 * decrease number of slots instead of their size. Clients that
3243 * request larger slots than they need will get poor results:
3244 * Note that we always allow at least one slot, because our
3245 * accounting is soft and provides no guarantees either way.
3246 */
3247 ca->maxreqs = nfsd4_get_drc_mem(ca, nn);
3248
3249 return nfs_ok;
3250 }
3251
3252 /*
3253 * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
3254 * These are based on similar macros in linux/sunrpc/msg_prot.h .
3255 */
3256 #define RPC_MAX_HEADER_WITH_AUTH_SYS \
3257 (RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK))
3258
3259 #define RPC_MAX_REPHEADER_WITH_AUTH_SYS \
3260 (RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK))
3261
3262 #define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \
3263 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32))
3264 #define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \
3265 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \
3266 sizeof(__be32))
3267
check_backchannel_attrs(struct nfsd4_channel_attrs * ca)3268 static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
3269 {
3270 ca->headerpadsz = 0;
3271
3272 if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
3273 return nfserr_toosmall;
3274 if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
3275 return nfserr_toosmall;
3276 ca->maxresp_cached = 0;
3277 if (ca->maxops < 2)
3278 return nfserr_toosmall;
3279
3280 return nfs_ok;
3281 }
3282
nfsd4_check_cb_sec(struct nfsd4_cb_sec * cbs)3283 static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
3284 {
3285 switch (cbs->flavor) {
3286 case RPC_AUTH_NULL:
3287 case RPC_AUTH_UNIX:
3288 return nfs_ok;
3289 default:
3290 /*
3291 * GSS case: the spec doesn't allow us to return this
3292 * error. But it also doesn't allow us not to support
3293 * GSS.
3294 * I'd rather this fail hard than return some error the
3295 * client might think it can already handle:
3296 */
3297 return nfserr_encr_alg_unsupp;
3298 }
3299 }
3300
3301 __be32
nfsd4_create_session(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3302 nfsd4_create_session(struct svc_rqst *rqstp,
3303 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
3304 {
3305 struct nfsd4_create_session *cr_ses = &u->create_session;
3306 struct sockaddr *sa = svc_addr(rqstp);
3307 struct nfs4_client *conf, *unconf;
3308 struct nfs4_client *old = NULL;
3309 struct nfsd4_session *new;
3310 struct nfsd4_conn *conn;
3311 struct nfsd4_clid_slot *cs_slot = NULL;
3312 __be32 status = 0;
3313 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3314
3315 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
3316 return nfserr_inval;
3317 status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
3318 if (status)
3319 return status;
3320 status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
3321 if (status)
3322 return status;
3323 status = check_backchannel_attrs(&cr_ses->back_channel);
3324 if (status)
3325 goto out_release_drc_mem;
3326 status = nfserr_jukebox;
3327 new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
3328 if (!new)
3329 goto out_release_drc_mem;
3330 conn = alloc_conn_from_crses(rqstp, cr_ses);
3331 if (!conn)
3332 goto out_free_session;
3333
3334 spin_lock(&nn->client_lock);
3335 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
3336 conf = find_confirmed_client(&cr_ses->clientid, true, nn);
3337 WARN_ON_ONCE(conf && unconf);
3338
3339 if (conf) {
3340 status = nfserr_wrong_cred;
3341 if (!nfsd4_mach_creds_match(conf, rqstp))
3342 goto out_free_conn;
3343 cs_slot = &conf->cl_cs_slot;
3344 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
3345 if (status) {
3346 if (status == nfserr_replay_cache)
3347 status = nfsd4_replay_create_session(cr_ses, cs_slot);
3348 goto out_free_conn;
3349 }
3350 } else if (unconf) {
3351 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
3352 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
3353 status = nfserr_clid_inuse;
3354 goto out_free_conn;
3355 }
3356 status = nfserr_wrong_cred;
3357 if (!nfsd4_mach_creds_match(unconf, rqstp))
3358 goto out_free_conn;
3359 cs_slot = &unconf->cl_cs_slot;
3360 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
3361 if (status) {
3362 /* an unconfirmed replay returns misordered */
3363 status = nfserr_seq_misordered;
3364 goto out_free_conn;
3365 }
3366 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
3367 if (old) {
3368 status = mark_client_expired_locked(old);
3369 if (status) {
3370 old = NULL;
3371 goto out_free_conn;
3372 }
3373 }
3374 move_to_confirmed(unconf);
3375 conf = unconf;
3376 } else {
3377 status = nfserr_stale_clientid;
3378 goto out_free_conn;
3379 }
3380 status = nfs_ok;
3381 /* Persistent sessions are not supported */
3382 cr_ses->flags &= ~SESSION4_PERSIST;
3383 /* Upshifting from TCP to RDMA is not supported */
3384 cr_ses->flags &= ~SESSION4_RDMA;
3385
3386 init_session(rqstp, new, conf, cr_ses);
3387 nfsd4_get_session_locked(new);
3388
3389 memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
3390 NFS4_MAX_SESSIONID_LEN);
3391 cs_slot->sl_seqid++;
3392 cr_ses->seqid = cs_slot->sl_seqid;
3393
3394 /* cache solo and embedded create sessions under the client_lock */
3395 nfsd4_cache_create_session(cr_ses, cs_slot, status);
3396 spin_unlock(&nn->client_lock);
3397 /* init connection and backchannel */
3398 nfsd4_init_conn(rqstp, conn, new);
3399 nfsd4_put_session(new);
3400 if (old)
3401 expire_client(old);
3402 return status;
3403 out_free_conn:
3404 spin_unlock(&nn->client_lock);
3405 free_conn(conn);
3406 if (old)
3407 expire_client(old);
3408 out_free_session:
3409 __free_session(new);
3410 out_release_drc_mem:
3411 nfsd4_put_drc_mem(&cr_ses->fore_channel);
3412 return status;
3413 }
3414
nfsd4_map_bcts_dir(u32 * dir)3415 static __be32 nfsd4_map_bcts_dir(u32 *dir)
3416 {
3417 switch (*dir) {
3418 case NFS4_CDFC4_FORE:
3419 case NFS4_CDFC4_BACK:
3420 return nfs_ok;
3421 case NFS4_CDFC4_FORE_OR_BOTH:
3422 case NFS4_CDFC4_BACK_OR_BOTH:
3423 *dir = NFS4_CDFC4_BOTH;
3424 return nfs_ok;
3425 };
3426 return nfserr_inval;
3427 }
3428
nfsd4_backchannel_ctl(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3429 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp,
3430 struct nfsd4_compound_state *cstate,
3431 union nfsd4_op_u *u)
3432 {
3433 struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl;
3434 struct nfsd4_session *session = cstate->session;
3435 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3436 __be32 status;
3437
3438 status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
3439 if (status)
3440 return status;
3441 spin_lock(&nn->client_lock);
3442 session->se_cb_prog = bc->bc_cb_program;
3443 session->se_cb_sec = bc->bc_cb_sec;
3444 spin_unlock(&nn->client_lock);
3445
3446 nfsd4_probe_callback(session->se_client);
3447
3448 return nfs_ok;
3449 }
3450
nfsd4_bind_conn_to_session(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3451 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
3452 struct nfsd4_compound_state *cstate,
3453 union nfsd4_op_u *u)
3454 {
3455 struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
3456 __be32 status;
3457 struct nfsd4_conn *conn;
3458 struct nfsd4_session *session;
3459 struct net *net = SVC_NET(rqstp);
3460 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3461
3462 if (!nfsd4_last_compound_op(rqstp))
3463 return nfserr_not_only_op;
3464 spin_lock(&nn->client_lock);
3465 session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
3466 spin_unlock(&nn->client_lock);
3467 if (!session)
3468 goto out_no_session;
3469 status = nfserr_wrong_cred;
3470 if (!nfsd4_mach_creds_match(session->se_client, rqstp))
3471 goto out;
3472 status = nfsd4_map_bcts_dir(&bcts->dir);
3473 if (status)
3474 goto out;
3475 conn = alloc_conn(rqstp, bcts->dir);
3476 status = nfserr_jukebox;
3477 if (!conn)
3478 goto out;
3479 nfsd4_init_conn(rqstp, conn, session);
3480 status = nfs_ok;
3481 out:
3482 nfsd4_put_session(session);
3483 out_no_session:
3484 return status;
3485 }
3486
nfsd4_compound_in_session(struct nfsd4_compound_state * cstate,struct nfs4_sessionid * sid)3487 static bool nfsd4_compound_in_session(struct nfsd4_compound_state *cstate, struct nfs4_sessionid *sid)
3488 {
3489 if (!cstate->session)
3490 return false;
3491 return !memcmp(sid, &cstate->session->se_sessionid, sizeof(*sid));
3492 }
3493
3494 __be32
nfsd4_destroy_session(struct svc_rqst * r,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3495 nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate,
3496 union nfsd4_op_u *u)
3497 {
3498 struct nfs4_sessionid *sessionid = &u->destroy_session.sessionid;
3499 struct nfsd4_session *ses;
3500 __be32 status;
3501 int ref_held_by_me = 0;
3502 struct net *net = SVC_NET(r);
3503 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3504
3505 status = nfserr_not_only_op;
3506 if (nfsd4_compound_in_session(cstate, sessionid)) {
3507 if (!nfsd4_last_compound_op(r))
3508 goto out;
3509 ref_held_by_me++;
3510 }
3511 dump_sessionid(__func__, sessionid);
3512 spin_lock(&nn->client_lock);
3513 ses = find_in_sessionid_hashtbl(sessionid, net, &status);
3514 if (!ses)
3515 goto out_client_lock;
3516 status = nfserr_wrong_cred;
3517 if (!nfsd4_mach_creds_match(ses->se_client, r))
3518 goto out_put_session;
3519 status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
3520 if (status)
3521 goto out_put_session;
3522 unhash_session(ses);
3523 spin_unlock(&nn->client_lock);
3524
3525 nfsd4_probe_callback_sync(ses->se_client);
3526
3527 spin_lock(&nn->client_lock);
3528 status = nfs_ok;
3529 out_put_session:
3530 nfsd4_put_session_locked(ses);
3531 out_client_lock:
3532 spin_unlock(&nn->client_lock);
3533 out:
3534 return status;
3535 }
3536
__nfsd4_find_conn(struct svc_xprt * xpt,struct nfsd4_session * s)3537 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
3538 {
3539 struct nfsd4_conn *c;
3540
3541 list_for_each_entry(c, &s->se_conns, cn_persession) {
3542 if (c->cn_xprt == xpt) {
3543 return c;
3544 }
3545 }
3546 return NULL;
3547 }
3548
nfsd4_sequence_check_conn(struct nfsd4_conn * new,struct nfsd4_session * ses)3549 static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
3550 {
3551 struct nfs4_client *clp = ses->se_client;
3552 struct nfsd4_conn *c;
3553 __be32 status = nfs_ok;
3554 int ret;
3555
3556 spin_lock(&clp->cl_lock);
3557 c = __nfsd4_find_conn(new->cn_xprt, ses);
3558 if (c)
3559 goto out_free;
3560 status = nfserr_conn_not_bound_to_session;
3561 if (clp->cl_mach_cred)
3562 goto out_free;
3563 __nfsd4_hash_conn(new, ses);
3564 spin_unlock(&clp->cl_lock);
3565 ret = nfsd4_register_conn(new);
3566 if (ret)
3567 /* oops; xprt is already down: */
3568 nfsd4_conn_lost(&new->cn_xpt_user);
3569 return nfs_ok;
3570 out_free:
3571 spin_unlock(&clp->cl_lock);
3572 free_conn(new);
3573 return status;
3574 }
3575
nfsd4_session_too_many_ops(struct svc_rqst * rqstp,struct nfsd4_session * session)3576 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
3577 {
3578 struct nfsd4_compoundargs *args = rqstp->rq_argp;
3579
3580 return args->opcnt > session->se_fchannel.maxops;
3581 }
3582
nfsd4_request_too_big(struct svc_rqst * rqstp,struct nfsd4_session * session)3583 static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
3584 struct nfsd4_session *session)
3585 {
3586 struct xdr_buf *xb = &rqstp->rq_arg;
3587
3588 return xb->len > session->se_fchannel.maxreq_sz;
3589 }
3590
replay_matches_cache(struct svc_rqst * rqstp,struct nfsd4_sequence * seq,struct nfsd4_slot * slot)3591 static bool replay_matches_cache(struct svc_rqst *rqstp,
3592 struct nfsd4_sequence *seq, struct nfsd4_slot *slot)
3593 {
3594 struct nfsd4_compoundargs *argp = rqstp->rq_argp;
3595
3596 if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) !=
3597 (bool)seq->cachethis)
3598 return false;
3599 /*
3600 * If there's an error then the reply can have fewer ops than
3601 * the call.
3602 */
3603 if (slot->sl_opcnt < argp->opcnt && !slot->sl_status)
3604 return false;
3605 /*
3606 * But if we cached a reply with *more* ops than the call you're
3607 * sending us now, then this new call is clearly not really a
3608 * replay of the old one:
3609 */
3610 if (slot->sl_opcnt > argp->opcnt)
3611 return false;
3612 /* This is the only check explicitly called by spec: */
3613 if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
3614 return false;
3615 /*
3616 * There may be more comparisons we could actually do, but the
3617 * spec doesn't require us to catch every case where the calls
3618 * don't match (that would require caching the call as well as
3619 * the reply), so we don't bother.
3620 */
3621 return true;
3622 }
3623
3624 __be32
nfsd4_sequence(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3625 nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3626 union nfsd4_op_u *u)
3627 {
3628 struct nfsd4_sequence *seq = &u->sequence;
3629 struct nfsd4_compoundres *resp = rqstp->rq_resp;
3630 struct xdr_stream *xdr = &resp->xdr;
3631 struct nfsd4_session *session;
3632 struct nfs4_client *clp;
3633 struct nfsd4_slot *slot;
3634 struct nfsd4_conn *conn;
3635 __be32 status;
3636 int buflen;
3637 struct net *net = SVC_NET(rqstp);
3638 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3639
3640 if (resp->opcnt != 1)
3641 return nfserr_sequence_pos;
3642
3643 /*
3644 * Will be either used or freed by nfsd4_sequence_check_conn
3645 * below.
3646 */
3647 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
3648 if (!conn)
3649 return nfserr_jukebox;
3650
3651 spin_lock(&nn->client_lock);
3652 session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
3653 if (!session)
3654 goto out_no_session;
3655 clp = session->se_client;
3656
3657 status = nfserr_too_many_ops;
3658 if (nfsd4_session_too_many_ops(rqstp, session))
3659 goto out_put_session;
3660
3661 status = nfserr_req_too_big;
3662 if (nfsd4_request_too_big(rqstp, session))
3663 goto out_put_session;
3664
3665 status = nfserr_badslot;
3666 if (seq->slotid >= session->se_fchannel.maxreqs)
3667 goto out_put_session;
3668
3669 slot = session->se_slots[seq->slotid];
3670 dprintk("%s: slotid %d\n", __func__, seq->slotid);
3671
3672 /* We do not negotiate the number of slots yet, so set the
3673 * maxslots to the session maxreqs which is used to encode
3674 * sr_highest_slotid and the sr_target_slot id to maxslots */
3675 seq->maxslots = session->se_fchannel.maxreqs;
3676
3677 status = check_slot_seqid(seq->seqid, slot->sl_seqid,
3678 slot->sl_flags & NFSD4_SLOT_INUSE);
3679 if (status == nfserr_replay_cache) {
3680 status = nfserr_seq_misordered;
3681 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
3682 goto out_put_session;
3683 status = nfserr_seq_false_retry;
3684 if (!replay_matches_cache(rqstp, seq, slot))
3685 goto out_put_session;
3686 cstate->slot = slot;
3687 cstate->session = session;
3688 cstate->clp = clp;
3689 /* Return the cached reply status and set cstate->status
3690 * for nfsd4_proc_compound processing */
3691 status = nfsd4_replay_cache_entry(resp, seq);
3692 cstate->status = nfserr_replay_cache;
3693 goto out;
3694 }
3695 if (status)
3696 goto out_put_session;
3697
3698 status = nfsd4_sequence_check_conn(conn, session);
3699 conn = NULL;
3700 if (status)
3701 goto out_put_session;
3702
3703 buflen = (seq->cachethis) ?
3704 session->se_fchannel.maxresp_cached :
3705 session->se_fchannel.maxresp_sz;
3706 status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
3707 nfserr_rep_too_big;
3708 if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
3709 goto out_put_session;
3710 svc_reserve(rqstp, buflen);
3711
3712 status = nfs_ok;
3713 /* Success! bump slot seqid */
3714 slot->sl_seqid = seq->seqid;
3715 slot->sl_flags |= NFSD4_SLOT_INUSE;
3716 if (seq->cachethis)
3717 slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
3718 else
3719 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
3720
3721 cstate->slot = slot;
3722 cstate->session = session;
3723 cstate->clp = clp;
3724
3725 out:
3726 switch (clp->cl_cb_state) {
3727 case NFSD4_CB_DOWN:
3728 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
3729 break;
3730 case NFSD4_CB_FAULT:
3731 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
3732 break;
3733 default:
3734 seq->status_flags = 0;
3735 }
3736 if (!list_empty(&clp->cl_revoked))
3737 seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
3738 out_no_session:
3739 if (conn)
3740 free_conn(conn);
3741 spin_unlock(&nn->client_lock);
3742 return status;
3743 out_put_session:
3744 nfsd4_put_session_locked(session);
3745 goto out_no_session;
3746 }
3747
3748 void
nfsd4_sequence_done(struct nfsd4_compoundres * resp)3749 nfsd4_sequence_done(struct nfsd4_compoundres *resp)
3750 {
3751 struct nfsd4_compound_state *cs = &resp->cstate;
3752
3753 if (nfsd4_has_session(cs)) {
3754 if (cs->status != nfserr_replay_cache) {
3755 nfsd4_store_cache_entry(resp);
3756 cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
3757 }
3758 /* Drop session reference that was taken in nfsd4_sequence() */
3759 nfsd4_put_session(cs->session);
3760 } else if (cs->clp)
3761 put_client_renew(cs->clp);
3762 }
3763
3764 __be32
nfsd4_destroy_clientid(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3765 nfsd4_destroy_clientid(struct svc_rqst *rqstp,
3766 struct nfsd4_compound_state *cstate,
3767 union nfsd4_op_u *u)
3768 {
3769 struct nfsd4_destroy_clientid *dc = &u->destroy_clientid;
3770 struct nfs4_client *conf, *unconf;
3771 struct nfs4_client *clp = NULL;
3772 __be32 status = 0;
3773 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3774
3775 spin_lock(&nn->client_lock);
3776 unconf = find_unconfirmed_client(&dc->clientid, true, nn);
3777 conf = find_confirmed_client(&dc->clientid, true, nn);
3778 WARN_ON_ONCE(conf && unconf);
3779
3780 if (conf) {
3781 if (client_has_state(conf)) {
3782 status = nfserr_clientid_busy;
3783 goto out;
3784 }
3785 status = mark_client_expired_locked(conf);
3786 if (status)
3787 goto out;
3788 clp = conf;
3789 } else if (unconf)
3790 clp = unconf;
3791 else {
3792 status = nfserr_stale_clientid;
3793 goto out;
3794 }
3795 if (!nfsd4_mach_creds_match(clp, rqstp)) {
3796 clp = NULL;
3797 status = nfserr_wrong_cred;
3798 goto out;
3799 }
3800 unhash_client_locked(clp);
3801 out:
3802 spin_unlock(&nn->client_lock);
3803 if (clp)
3804 expire_client(clp);
3805 return status;
3806 }
3807
3808 __be32
nfsd4_reclaim_complete(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3809 nfsd4_reclaim_complete(struct svc_rqst *rqstp,
3810 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
3811 {
3812 struct nfsd4_reclaim_complete *rc = &u->reclaim_complete;
3813 __be32 status = 0;
3814
3815 if (rc->rca_one_fs) {
3816 if (!cstate->current_fh.fh_dentry)
3817 return nfserr_nofilehandle;
3818 /*
3819 * We don't take advantage of the rca_one_fs case.
3820 * That's OK, it's optional, we can safely ignore it.
3821 */
3822 return nfs_ok;
3823 }
3824
3825 status = nfserr_complete_already;
3826 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
3827 &cstate->session->se_client->cl_flags))
3828 goto out;
3829
3830 status = nfserr_stale_clientid;
3831 if (is_client_expired(cstate->session->se_client))
3832 /*
3833 * The following error isn't really legal.
3834 * But we only get here if the client just explicitly
3835 * destroyed the client. Surely it no longer cares what
3836 * error it gets back on an operation for the dead
3837 * client.
3838 */
3839 goto out;
3840
3841 status = nfs_ok;
3842 nfsd4_client_record_create(cstate->session->se_client);
3843 inc_reclaim_complete(cstate->session->se_client);
3844 out:
3845 return status;
3846 }
3847
3848 __be32
nfsd4_setclientid(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3849 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3850 union nfsd4_op_u *u)
3851 {
3852 struct nfsd4_setclientid *setclid = &u->setclientid;
3853 struct xdr_netobj clname = setclid->se_name;
3854 nfs4_verifier clverifier = setclid->se_verf;
3855 struct nfs4_client *conf, *new;
3856 struct nfs4_client *unconf = NULL;
3857 __be32 status;
3858 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3859
3860 new = create_client(clname, rqstp, &clverifier);
3861 if (new == NULL)
3862 return nfserr_jukebox;
3863 /* Cases below refer to rfc 3530 section 14.2.33: */
3864 spin_lock(&nn->client_lock);
3865 conf = find_confirmed_client_by_name(&clname, nn);
3866 if (conf && client_has_state(conf)) {
3867 /* case 0: */
3868 status = nfserr_clid_inuse;
3869 if (clp_used_exchangeid(conf))
3870 goto out;
3871 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
3872 char addr_str[INET6_ADDRSTRLEN];
3873 rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
3874 sizeof(addr_str));
3875 dprintk("NFSD: setclientid: string in use by client "
3876 "at %s\n", addr_str);
3877 goto out;
3878 }
3879 }
3880 unconf = find_unconfirmed_client_by_name(&clname, nn);
3881 if (unconf)
3882 unhash_client_locked(unconf);
3883 if (conf && same_verf(&conf->cl_verifier, &clverifier)) {
3884 /* case 1: probable callback update */
3885 copy_clid(new, conf);
3886 gen_confirm(new, nn);
3887 } else /* case 4 (new client) or cases 2, 3 (client reboot): */
3888 ;
3889 new->cl_minorversion = 0;
3890 gen_callback(new, setclid, rqstp);
3891 add_to_unconfirmed(new);
3892 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
3893 setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
3894 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
3895 new = NULL;
3896 status = nfs_ok;
3897 out:
3898 spin_unlock(&nn->client_lock);
3899 if (new)
3900 free_client(new);
3901 if (unconf)
3902 expire_client(unconf);
3903 return status;
3904 }
3905
3906
3907 __be32
nfsd4_setclientid_confirm(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3908 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
3909 struct nfsd4_compound_state *cstate,
3910 union nfsd4_op_u *u)
3911 {
3912 struct nfsd4_setclientid_confirm *setclientid_confirm =
3913 &u->setclientid_confirm;
3914 struct nfs4_client *conf, *unconf;
3915 struct nfs4_client *old = NULL;
3916 nfs4_verifier confirm = setclientid_confirm->sc_confirm;
3917 clientid_t * clid = &setclientid_confirm->sc_clientid;
3918 __be32 status;
3919 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3920
3921 if (STALE_CLIENTID(clid, nn))
3922 return nfserr_stale_clientid;
3923
3924 spin_lock(&nn->client_lock);
3925 conf = find_confirmed_client(clid, false, nn);
3926 unconf = find_unconfirmed_client(clid, false, nn);
3927 /*
3928 * We try hard to give out unique clientid's, so if we get an
3929 * attempt to confirm the same clientid with a different cred,
3930 * the client may be buggy; this should never happen.
3931 *
3932 * Nevertheless, RFC 7530 recommends INUSE for this case:
3933 */
3934 status = nfserr_clid_inuse;
3935 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
3936 goto out;
3937 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
3938 goto out;
3939 /* cases below refer to rfc 3530 section 14.2.34: */
3940 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
3941 if (conf && same_verf(&confirm, &conf->cl_confirm)) {
3942 /* case 2: probable retransmit */
3943 status = nfs_ok;
3944 } else /* case 4: client hasn't noticed we rebooted yet? */
3945 status = nfserr_stale_clientid;
3946 goto out;
3947 }
3948 status = nfs_ok;
3949 if (conf) { /* case 1: callback update */
3950 old = unconf;
3951 unhash_client_locked(old);
3952 nfsd4_change_callback(conf, &unconf->cl_cb_conn);
3953 } else { /* case 3: normal case; new or rebooted client */
3954 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
3955 if (old) {
3956 status = nfserr_clid_inuse;
3957 if (client_has_state(old)
3958 && !same_creds(&unconf->cl_cred,
3959 &old->cl_cred)) {
3960 old = NULL;
3961 goto out;
3962 }
3963 status = mark_client_expired_locked(old);
3964 if (status) {
3965 old = NULL;
3966 goto out;
3967 }
3968 }
3969 move_to_confirmed(unconf);
3970 conf = unconf;
3971 }
3972 get_client_locked(conf);
3973 spin_unlock(&nn->client_lock);
3974 nfsd4_probe_callback(conf);
3975 spin_lock(&nn->client_lock);
3976 put_client_renew_locked(conf);
3977 out:
3978 spin_unlock(&nn->client_lock);
3979 if (old)
3980 expire_client(old);
3981 return status;
3982 }
3983
nfsd4_alloc_file(void)3984 static struct nfs4_file *nfsd4_alloc_file(void)
3985 {
3986 return kmem_cache_alloc(file_slab, GFP_KERNEL);
3987 }
3988
3989 /* OPEN Share state helper functions */
nfsd4_init_file(struct knfsd_fh * fh,unsigned int hashval,struct nfs4_file * fp)3990 static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
3991 struct nfs4_file *fp)
3992 {
3993 lockdep_assert_held(&state_lock);
3994
3995 refcount_set(&fp->fi_ref, 1);
3996 spin_lock_init(&fp->fi_lock);
3997 INIT_LIST_HEAD(&fp->fi_stateids);
3998 INIT_LIST_HEAD(&fp->fi_delegations);
3999 INIT_LIST_HEAD(&fp->fi_clnt_odstate);
4000 fh_copy_shallow(&fp->fi_fhandle, fh);
4001 fp->fi_deleg_file = NULL;
4002 fp->fi_had_conflict = false;
4003 fp->fi_share_deny = 0;
4004 memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
4005 memset(fp->fi_access, 0, sizeof(fp->fi_access));
4006 #ifdef CONFIG_NFSD_PNFS
4007 INIT_LIST_HEAD(&fp->fi_lo_states);
4008 atomic_set(&fp->fi_lo_recalls, 0);
4009 #endif
4010 hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]);
4011 }
4012
4013 void
nfsd4_free_slabs(void)4014 nfsd4_free_slabs(void)
4015 {
4016 kmem_cache_destroy(client_slab);
4017 kmem_cache_destroy(openowner_slab);
4018 kmem_cache_destroy(lockowner_slab);
4019 kmem_cache_destroy(file_slab);
4020 kmem_cache_destroy(stateid_slab);
4021 kmem_cache_destroy(deleg_slab);
4022 kmem_cache_destroy(odstate_slab);
4023 }
4024
4025 int
nfsd4_init_slabs(void)4026 nfsd4_init_slabs(void)
4027 {
4028 client_slab = kmem_cache_create("nfsd4_clients",
4029 sizeof(struct nfs4_client), 0, 0, NULL);
4030 if (client_slab == NULL)
4031 goto out;
4032 openowner_slab = kmem_cache_create("nfsd4_openowners",
4033 sizeof(struct nfs4_openowner), 0, 0, NULL);
4034 if (openowner_slab == NULL)
4035 goto out_free_client_slab;
4036 lockowner_slab = kmem_cache_create("nfsd4_lockowners",
4037 sizeof(struct nfs4_lockowner), 0, 0, NULL);
4038 if (lockowner_slab == NULL)
4039 goto out_free_openowner_slab;
4040 file_slab = kmem_cache_create("nfsd4_files",
4041 sizeof(struct nfs4_file), 0, 0, NULL);
4042 if (file_slab == NULL)
4043 goto out_free_lockowner_slab;
4044 stateid_slab = kmem_cache_create("nfsd4_stateids",
4045 sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
4046 if (stateid_slab == NULL)
4047 goto out_free_file_slab;
4048 deleg_slab = kmem_cache_create("nfsd4_delegations",
4049 sizeof(struct nfs4_delegation), 0, 0, NULL);
4050 if (deleg_slab == NULL)
4051 goto out_free_stateid_slab;
4052 odstate_slab = kmem_cache_create("nfsd4_odstate",
4053 sizeof(struct nfs4_clnt_odstate), 0, 0, NULL);
4054 if (odstate_slab == NULL)
4055 goto out_free_deleg_slab;
4056 return 0;
4057
4058 out_free_deleg_slab:
4059 kmem_cache_destroy(deleg_slab);
4060 out_free_stateid_slab:
4061 kmem_cache_destroy(stateid_slab);
4062 out_free_file_slab:
4063 kmem_cache_destroy(file_slab);
4064 out_free_lockowner_slab:
4065 kmem_cache_destroy(lockowner_slab);
4066 out_free_openowner_slab:
4067 kmem_cache_destroy(openowner_slab);
4068 out_free_client_slab:
4069 kmem_cache_destroy(client_slab);
4070 out:
4071 dprintk("nfsd4: out of memory while initializing nfsv4\n");
4072 return -ENOMEM;
4073 }
4074
init_nfs4_replay(struct nfs4_replay * rp)4075 static void init_nfs4_replay(struct nfs4_replay *rp)
4076 {
4077 rp->rp_status = nfserr_serverfault;
4078 rp->rp_buflen = 0;
4079 rp->rp_buf = rp->rp_ibuf;
4080 mutex_init(&rp->rp_mutex);
4081 }
4082
nfsd4_cstate_assign_replay(struct nfsd4_compound_state * cstate,struct nfs4_stateowner * so)4083 static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
4084 struct nfs4_stateowner *so)
4085 {
4086 if (!nfsd4_has_session(cstate)) {
4087 mutex_lock(&so->so_replay.rp_mutex);
4088 cstate->replay_owner = nfs4_get_stateowner(so);
4089 }
4090 }
4091
nfsd4_cstate_clear_replay(struct nfsd4_compound_state * cstate)4092 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
4093 {
4094 struct nfs4_stateowner *so = cstate->replay_owner;
4095
4096 if (so != NULL) {
4097 cstate->replay_owner = NULL;
4098 mutex_unlock(&so->so_replay.rp_mutex);
4099 nfs4_put_stateowner(so);
4100 }
4101 }
4102
alloc_stateowner(struct kmem_cache * slab,struct xdr_netobj * owner,struct nfs4_client * clp)4103 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
4104 {
4105 struct nfs4_stateowner *sop;
4106
4107 sop = kmem_cache_alloc(slab, GFP_KERNEL);
4108 if (!sop)
4109 return NULL;
4110
4111 xdr_netobj_dup(&sop->so_owner, owner, GFP_KERNEL);
4112 if (!sop->so_owner.data) {
4113 kmem_cache_free(slab, sop);
4114 return NULL;
4115 }
4116
4117 INIT_LIST_HEAD(&sop->so_stateids);
4118 sop->so_client = clp;
4119 init_nfs4_replay(&sop->so_replay);
4120 atomic_set(&sop->so_count, 1);
4121 return sop;
4122 }
4123
hash_openowner(struct nfs4_openowner * oo,struct nfs4_client * clp,unsigned int strhashval)4124 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
4125 {
4126 lockdep_assert_held(&clp->cl_lock);
4127
4128 list_add(&oo->oo_owner.so_strhash,
4129 &clp->cl_ownerstr_hashtbl[strhashval]);
4130 list_add(&oo->oo_perclient, &clp->cl_openowners);
4131 }
4132
nfs4_unhash_openowner(struct nfs4_stateowner * so)4133 static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
4134 {
4135 unhash_openowner_locked(openowner(so));
4136 }
4137
nfs4_free_openowner(struct nfs4_stateowner * so)4138 static void nfs4_free_openowner(struct nfs4_stateowner *so)
4139 {
4140 struct nfs4_openowner *oo = openowner(so);
4141
4142 kmem_cache_free(openowner_slab, oo);
4143 }
4144
4145 static const struct nfs4_stateowner_operations openowner_ops = {
4146 .so_unhash = nfs4_unhash_openowner,
4147 .so_free = nfs4_free_openowner,
4148 };
4149
4150 static struct nfs4_ol_stateid *
nfsd4_find_existing_open(struct nfs4_file * fp,struct nfsd4_open * open)4151 nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
4152 {
4153 struct nfs4_ol_stateid *local, *ret = NULL;
4154 struct nfs4_openowner *oo = open->op_openowner;
4155
4156 lockdep_assert_held(&fp->fi_lock);
4157
4158 list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
4159 /* ignore lock owners */
4160 if (local->st_stateowner->so_is_open_owner == 0)
4161 continue;
4162 if (local->st_stateowner != &oo->oo_owner)
4163 continue;
4164 if (local->st_stid.sc_type == NFS4_OPEN_STID) {
4165 ret = local;
4166 refcount_inc(&ret->st_stid.sc_count);
4167 break;
4168 }
4169 }
4170 return ret;
4171 }
4172
4173 static __be32
nfsd4_verify_open_stid(struct nfs4_stid * s)4174 nfsd4_verify_open_stid(struct nfs4_stid *s)
4175 {
4176 __be32 ret = nfs_ok;
4177
4178 switch (s->sc_type) {
4179 default:
4180 break;
4181 case 0:
4182 case NFS4_CLOSED_STID:
4183 case NFS4_CLOSED_DELEG_STID:
4184 ret = nfserr_bad_stateid;
4185 break;
4186 case NFS4_REVOKED_DELEG_STID:
4187 ret = nfserr_deleg_revoked;
4188 }
4189 return ret;
4190 }
4191
4192 /* Lock the stateid st_mutex, and deal with races with CLOSE */
4193 static __be32
nfsd4_lock_ol_stateid(struct nfs4_ol_stateid * stp)4194 nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
4195 {
4196 __be32 ret;
4197
4198 mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX);
4199 ret = nfsd4_verify_open_stid(&stp->st_stid);
4200 if (ret != nfs_ok)
4201 mutex_unlock(&stp->st_mutex);
4202 return ret;
4203 }
4204
4205 static struct nfs4_ol_stateid *
nfsd4_find_and_lock_existing_open(struct nfs4_file * fp,struct nfsd4_open * open)4206 nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
4207 {
4208 struct nfs4_ol_stateid *stp;
4209 for (;;) {
4210 spin_lock(&fp->fi_lock);
4211 stp = nfsd4_find_existing_open(fp, open);
4212 spin_unlock(&fp->fi_lock);
4213 if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
4214 break;
4215 nfs4_put_stid(&stp->st_stid);
4216 }
4217 return stp;
4218 }
4219
4220 static struct nfs4_openowner *
alloc_init_open_stateowner(unsigned int strhashval,struct nfsd4_open * open,struct nfsd4_compound_state * cstate)4221 alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
4222 struct nfsd4_compound_state *cstate)
4223 {
4224 struct nfs4_client *clp = cstate->clp;
4225 struct nfs4_openowner *oo, *ret;
4226
4227 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
4228 if (!oo)
4229 return NULL;
4230 oo->oo_owner.so_ops = &openowner_ops;
4231 oo->oo_owner.so_is_open_owner = 1;
4232 oo->oo_owner.so_seqid = open->op_seqid;
4233 oo->oo_flags = 0;
4234 if (nfsd4_has_session(cstate))
4235 oo->oo_flags |= NFS4_OO_CONFIRMED;
4236 oo->oo_time = 0;
4237 oo->oo_last_closed_stid = NULL;
4238 INIT_LIST_HEAD(&oo->oo_close_lru);
4239 spin_lock(&clp->cl_lock);
4240 ret = find_openstateowner_str_locked(strhashval, open, clp);
4241 if (ret == NULL) {
4242 hash_openowner(oo, clp, strhashval);
4243 ret = oo;
4244 } else
4245 nfs4_free_stateowner(&oo->oo_owner);
4246
4247 spin_unlock(&clp->cl_lock);
4248 return ret;
4249 }
4250
4251 static struct nfs4_ol_stateid *
init_open_stateid(struct nfs4_file * fp,struct nfsd4_open * open)4252 init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
4253 {
4254
4255 struct nfs4_openowner *oo = open->op_openowner;
4256 struct nfs4_ol_stateid *retstp = NULL;
4257 struct nfs4_ol_stateid *stp;
4258
4259 stp = open->op_stp;
4260 /* We are moving these outside of the spinlocks to avoid the warnings */
4261 mutex_init(&stp->st_mutex);
4262 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
4263
4264 retry:
4265 spin_lock(&oo->oo_owner.so_client->cl_lock);
4266 spin_lock(&fp->fi_lock);
4267
4268 retstp = nfsd4_find_existing_open(fp, open);
4269 if (retstp)
4270 goto out_unlock;
4271
4272 open->op_stp = NULL;
4273 refcount_inc(&stp->st_stid.sc_count);
4274 stp->st_stid.sc_type = NFS4_OPEN_STID;
4275 INIT_LIST_HEAD(&stp->st_locks);
4276 stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
4277 get_nfs4_file(fp);
4278 stp->st_stid.sc_file = fp;
4279 stp->st_access_bmap = 0;
4280 stp->st_deny_bmap = 0;
4281 stp->st_openstp = NULL;
4282 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
4283 list_add(&stp->st_perfile, &fp->fi_stateids);
4284
4285 out_unlock:
4286 spin_unlock(&fp->fi_lock);
4287 spin_unlock(&oo->oo_owner.so_client->cl_lock);
4288 if (retstp) {
4289 /* Handle races with CLOSE */
4290 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
4291 nfs4_put_stid(&retstp->st_stid);
4292 goto retry;
4293 }
4294 /* To keep mutex tracking happy */
4295 mutex_unlock(&stp->st_mutex);
4296 stp = retstp;
4297 }
4298 return stp;
4299 }
4300
4301 /*
4302 * In the 4.0 case we need to keep the owners around a little while to handle
4303 * CLOSE replay. We still do need to release any file access that is held by
4304 * them before returning however.
4305 */
4306 static void
move_to_close_lru(struct nfs4_ol_stateid * s,struct net * net)4307 move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
4308 {
4309 struct nfs4_ol_stateid *last;
4310 struct nfs4_openowner *oo = openowner(s->st_stateowner);
4311 struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
4312 nfsd_net_id);
4313
4314 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
4315
4316 /*
4317 * We know that we hold one reference via nfsd4_close, and another
4318 * "persistent" reference for the client. If the refcount is higher
4319 * than 2, then there are still calls in progress that are using this
4320 * stateid. We can't put the sc_file reference until they are finished.
4321 * Wait for the refcount to drop to 2. Since it has been unhashed,
4322 * there should be no danger of the refcount going back up again at
4323 * this point.
4324 */
4325 wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2);
4326
4327 release_all_access(s);
4328 if (s->st_stid.sc_file) {
4329 put_nfs4_file(s->st_stid.sc_file);
4330 s->st_stid.sc_file = NULL;
4331 }
4332
4333 spin_lock(&nn->client_lock);
4334 last = oo->oo_last_closed_stid;
4335 oo->oo_last_closed_stid = s;
4336 list_move_tail(&oo->oo_close_lru, &nn->close_lru);
4337 oo->oo_time = get_seconds();
4338 spin_unlock(&nn->client_lock);
4339 if (last)
4340 nfs4_put_stid(&last->st_stid);
4341 }
4342
4343 /* search file_hashtbl[] for file */
4344 static struct nfs4_file *
find_file_locked(struct knfsd_fh * fh,unsigned int hashval)4345 find_file_locked(struct knfsd_fh *fh, unsigned int hashval)
4346 {
4347 struct nfs4_file *fp;
4348
4349 hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash) {
4350 if (fh_match(&fp->fi_fhandle, fh)) {
4351 if (refcount_inc_not_zero(&fp->fi_ref))
4352 return fp;
4353 }
4354 }
4355 return NULL;
4356 }
4357
4358 struct nfs4_file *
find_file(struct knfsd_fh * fh)4359 find_file(struct knfsd_fh *fh)
4360 {
4361 struct nfs4_file *fp;
4362 unsigned int hashval = file_hashval(fh);
4363
4364 rcu_read_lock();
4365 fp = find_file_locked(fh, hashval);
4366 rcu_read_unlock();
4367 return fp;
4368 }
4369
4370 static struct nfs4_file *
find_or_add_file(struct nfs4_file * new,struct knfsd_fh * fh)4371 find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh)
4372 {
4373 struct nfs4_file *fp;
4374 unsigned int hashval = file_hashval(fh);
4375
4376 rcu_read_lock();
4377 fp = find_file_locked(fh, hashval);
4378 rcu_read_unlock();
4379 if (fp)
4380 return fp;
4381
4382 spin_lock(&state_lock);
4383 fp = find_file_locked(fh, hashval);
4384 if (likely(fp == NULL)) {
4385 nfsd4_init_file(fh, hashval, new);
4386 fp = new;
4387 }
4388 spin_unlock(&state_lock);
4389
4390 return fp;
4391 }
4392
4393 /*
4394 * Called to check deny when READ with all zero stateid or
4395 * WRITE with all zero or all one stateid
4396 */
4397 static __be32
nfs4_share_conflict(struct svc_fh * current_fh,unsigned int deny_type)4398 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
4399 {
4400 struct nfs4_file *fp;
4401 __be32 ret = nfs_ok;
4402
4403 fp = find_file(¤t_fh->fh_handle);
4404 if (!fp)
4405 return ret;
4406 /* Check for conflicting share reservations */
4407 spin_lock(&fp->fi_lock);
4408 if (fp->fi_share_deny & deny_type)
4409 ret = nfserr_locked;
4410 spin_unlock(&fp->fi_lock);
4411 put_nfs4_file(fp);
4412 return ret;
4413 }
4414
nfsd4_cb_recall_prepare(struct nfsd4_callback * cb)4415 static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
4416 {
4417 struct nfs4_delegation *dp = cb_to_delegation(cb);
4418 struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
4419 nfsd_net_id);
4420
4421 block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
4422
4423 /*
4424 * We can't do this in nfsd_break_deleg_cb because it is
4425 * already holding inode->i_lock.
4426 *
4427 * If the dl_time != 0, then we know that it has already been
4428 * queued for a lease break. Don't queue it again.
4429 */
4430 spin_lock(&state_lock);
4431 if (delegation_hashed(dp) && dp->dl_time == 0) {
4432 dp->dl_time = get_seconds();
4433 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
4434 }
4435 spin_unlock(&state_lock);
4436 }
4437
nfsd4_cb_recall_done(struct nfsd4_callback * cb,struct rpc_task * task)4438 static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
4439 struct rpc_task *task)
4440 {
4441 struct nfs4_delegation *dp = cb_to_delegation(cb);
4442
4443 if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID)
4444 return 1;
4445
4446 switch (task->tk_status) {
4447 case 0:
4448 return 1;
4449 case -NFS4ERR_DELAY:
4450 rpc_delay(task, 2 * HZ);
4451 return 0;
4452 case -EBADHANDLE:
4453 case -NFS4ERR_BAD_STATEID:
4454 /*
4455 * Race: client probably got cb_recall before open reply
4456 * granting delegation.
4457 */
4458 if (dp->dl_retries--) {
4459 rpc_delay(task, 2 * HZ);
4460 return 0;
4461 }
4462 /*FALLTHRU*/
4463 default:
4464 return 1;
4465 }
4466 }
4467
nfsd4_cb_recall_release(struct nfsd4_callback * cb)4468 static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
4469 {
4470 struct nfs4_delegation *dp = cb_to_delegation(cb);
4471
4472 nfs4_put_stid(&dp->dl_stid);
4473 }
4474
4475 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
4476 .prepare = nfsd4_cb_recall_prepare,
4477 .done = nfsd4_cb_recall_done,
4478 .release = nfsd4_cb_recall_release,
4479 };
4480
nfsd_break_one_deleg(struct nfs4_delegation * dp)4481 static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
4482 {
4483 /*
4484 * We're assuming the state code never drops its reference
4485 * without first removing the lease. Since we're in this lease
4486 * callback (and since the lease code is serialized by the
4487 * i_lock) we know the server hasn't removed the lease yet, and
4488 * we know it's safe to take a reference.
4489 */
4490 refcount_inc(&dp->dl_stid.sc_count);
4491 nfsd4_run_cb(&dp->dl_recall);
4492 }
4493
4494 /* Called from break_lease() with i_lock held. */
4495 static bool
nfsd_break_deleg_cb(struct file_lock * fl)4496 nfsd_break_deleg_cb(struct file_lock *fl)
4497 {
4498 bool ret = false;
4499 struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
4500 struct nfs4_file *fp = dp->dl_stid.sc_file;
4501
4502 /*
4503 * We don't want the locks code to timeout the lease for us;
4504 * we'll remove it ourself if a delegation isn't returned
4505 * in time:
4506 */
4507 fl->fl_break_time = 0;
4508
4509 spin_lock(&fp->fi_lock);
4510 fp->fi_had_conflict = true;
4511 nfsd_break_one_deleg(dp);
4512 spin_unlock(&fp->fi_lock);
4513 return ret;
4514 }
4515
4516 static int
nfsd_change_deleg_cb(struct file_lock * onlist,int arg,struct list_head * dispose)4517 nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
4518 struct list_head *dispose)
4519 {
4520 if (arg & F_UNLCK)
4521 return lease_modify(onlist, arg, dispose);
4522 else
4523 return -EAGAIN;
4524 }
4525
4526 static const struct lock_manager_operations nfsd_lease_mng_ops = {
4527 .lm_break = nfsd_break_deleg_cb,
4528 .lm_change = nfsd_change_deleg_cb,
4529 };
4530
nfsd4_check_seqid(struct nfsd4_compound_state * cstate,struct nfs4_stateowner * so,u32 seqid)4531 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
4532 {
4533 if (nfsd4_has_session(cstate))
4534 return nfs_ok;
4535 if (seqid == so->so_seqid - 1)
4536 return nfserr_replay_me;
4537 if (seqid == so->so_seqid)
4538 return nfs_ok;
4539 return nfserr_bad_seqid;
4540 }
4541
lookup_clientid(clientid_t * clid,struct nfsd4_compound_state * cstate,struct nfsd_net * nn)4542 static __be32 lookup_clientid(clientid_t *clid,
4543 struct nfsd4_compound_state *cstate,
4544 struct nfsd_net *nn)
4545 {
4546 struct nfs4_client *found;
4547
4548 if (cstate->clp) {
4549 found = cstate->clp;
4550 if (!same_clid(&found->cl_clientid, clid))
4551 return nfserr_stale_clientid;
4552 return nfs_ok;
4553 }
4554
4555 if (STALE_CLIENTID(clid, nn))
4556 return nfserr_stale_clientid;
4557
4558 /*
4559 * For v4.1+ we get the client in the SEQUENCE op. If we don't have one
4560 * cached already then we know this is for is for v4.0 and "sessions"
4561 * will be false.
4562 */
4563 WARN_ON_ONCE(cstate->session);
4564 spin_lock(&nn->client_lock);
4565 found = find_confirmed_client(clid, false, nn);
4566 if (!found) {
4567 spin_unlock(&nn->client_lock);
4568 return nfserr_expired;
4569 }
4570 atomic_inc(&found->cl_rpc_users);
4571 spin_unlock(&nn->client_lock);
4572
4573 /* Cache the nfs4_client in cstate! */
4574 cstate->clp = found;
4575 return nfs_ok;
4576 }
4577
4578 __be32
nfsd4_process_open1(struct nfsd4_compound_state * cstate,struct nfsd4_open * open,struct nfsd_net * nn)4579 nfsd4_process_open1(struct nfsd4_compound_state *cstate,
4580 struct nfsd4_open *open, struct nfsd_net *nn)
4581 {
4582 clientid_t *clientid = &open->op_clientid;
4583 struct nfs4_client *clp = NULL;
4584 unsigned int strhashval;
4585 struct nfs4_openowner *oo = NULL;
4586 __be32 status;
4587
4588 if (STALE_CLIENTID(&open->op_clientid, nn))
4589 return nfserr_stale_clientid;
4590 /*
4591 * In case we need it later, after we've already created the
4592 * file and don't want to risk a further failure:
4593 */
4594 open->op_file = nfsd4_alloc_file();
4595 if (open->op_file == NULL)
4596 return nfserr_jukebox;
4597
4598 status = lookup_clientid(clientid, cstate, nn);
4599 if (status)
4600 return status;
4601 clp = cstate->clp;
4602
4603 strhashval = ownerstr_hashval(&open->op_owner);
4604 oo = find_openstateowner_str(strhashval, open, clp);
4605 open->op_openowner = oo;
4606 if (!oo) {
4607 goto new_owner;
4608 }
4609 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
4610 /* Replace unconfirmed owners without checking for replay. */
4611 release_openowner(oo);
4612 open->op_openowner = NULL;
4613 goto new_owner;
4614 }
4615 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
4616 if (status)
4617 return status;
4618 goto alloc_stateid;
4619 new_owner:
4620 oo = alloc_init_open_stateowner(strhashval, open, cstate);
4621 if (oo == NULL)
4622 return nfserr_jukebox;
4623 open->op_openowner = oo;
4624 alloc_stateid:
4625 open->op_stp = nfs4_alloc_open_stateid(clp);
4626 if (!open->op_stp)
4627 return nfserr_jukebox;
4628
4629 if (nfsd4_has_session(cstate) &&
4630 (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) {
4631 open->op_odstate = alloc_clnt_odstate(clp);
4632 if (!open->op_odstate)
4633 return nfserr_jukebox;
4634 }
4635
4636 return nfs_ok;
4637 }
4638
4639 static inline __be32
nfs4_check_delegmode(struct nfs4_delegation * dp,int flags)4640 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
4641 {
4642 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
4643 return nfserr_openmode;
4644 else
4645 return nfs_ok;
4646 }
4647
share_access_to_flags(u32 share_access)4648 static int share_access_to_flags(u32 share_access)
4649 {
4650 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
4651 }
4652
find_deleg_stateid(struct nfs4_client * cl,stateid_t * s)4653 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
4654 {
4655 struct nfs4_stid *ret;
4656
4657 ret = find_stateid_by_type(cl, s,
4658 NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
4659 if (!ret)
4660 return NULL;
4661 return delegstateid(ret);
4662 }
4663
nfsd4_is_deleg_cur(struct nfsd4_open * open)4664 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
4665 {
4666 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
4667 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
4668 }
4669
4670 static __be32
nfs4_check_deleg(struct nfs4_client * cl,struct nfsd4_open * open,struct nfs4_delegation ** dp)4671 nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
4672 struct nfs4_delegation **dp)
4673 {
4674 int flags;
4675 __be32 status = nfserr_bad_stateid;
4676 struct nfs4_delegation *deleg;
4677
4678 deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
4679 if (deleg == NULL)
4680 goto out;
4681 if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
4682 nfs4_put_stid(&deleg->dl_stid);
4683 if (cl->cl_minorversion)
4684 status = nfserr_deleg_revoked;
4685 goto out;
4686 }
4687 flags = share_access_to_flags(open->op_share_access);
4688 status = nfs4_check_delegmode(deleg, flags);
4689 if (status) {
4690 nfs4_put_stid(&deleg->dl_stid);
4691 goto out;
4692 }
4693 *dp = deleg;
4694 out:
4695 if (!nfsd4_is_deleg_cur(open))
4696 return nfs_ok;
4697 if (status)
4698 return status;
4699 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
4700 return nfs_ok;
4701 }
4702
nfs4_access_to_access(u32 nfs4_access)4703 static inline int nfs4_access_to_access(u32 nfs4_access)
4704 {
4705 int flags = 0;
4706
4707 if (nfs4_access & NFS4_SHARE_ACCESS_READ)
4708 flags |= NFSD_MAY_READ;
4709 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
4710 flags |= NFSD_MAY_WRITE;
4711 return flags;
4712 }
4713
4714 static inline __be32
nfsd4_truncate(struct svc_rqst * rqstp,struct svc_fh * fh,struct nfsd4_open * open)4715 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
4716 struct nfsd4_open *open)
4717 {
4718 struct iattr iattr = {
4719 .ia_valid = ATTR_SIZE,
4720 .ia_size = 0,
4721 };
4722 if (!open->op_truncate)
4723 return 0;
4724 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
4725 return nfserr_inval;
4726 return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
4727 }
4728
nfs4_get_vfs_file(struct svc_rqst * rqstp,struct nfs4_file * fp,struct svc_fh * cur_fh,struct nfs4_ol_stateid * stp,struct nfsd4_open * open)4729 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
4730 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
4731 struct nfsd4_open *open)
4732 {
4733 struct nfsd_file *nf = NULL;
4734 __be32 status;
4735 int oflag = nfs4_access_to_omode(open->op_share_access);
4736 int access = nfs4_access_to_access(open->op_share_access);
4737 unsigned char old_access_bmap, old_deny_bmap;
4738
4739 spin_lock(&fp->fi_lock);
4740
4741 /*
4742 * Are we trying to set a deny mode that would conflict with
4743 * current access?
4744 */
4745 status = nfs4_file_check_deny(fp, open->op_share_deny);
4746 if (status != nfs_ok) {
4747 spin_unlock(&fp->fi_lock);
4748 goto out;
4749 }
4750
4751 /* set access to the file */
4752 status = nfs4_file_get_access(fp, open->op_share_access);
4753 if (status != nfs_ok) {
4754 spin_unlock(&fp->fi_lock);
4755 goto out;
4756 }
4757
4758 /* Set access bits in stateid */
4759 old_access_bmap = stp->st_access_bmap;
4760 set_access(open->op_share_access, stp);
4761
4762 /* Set new deny mask */
4763 old_deny_bmap = stp->st_deny_bmap;
4764 set_deny(open->op_share_deny, stp);
4765 fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
4766
4767 if (!fp->fi_fds[oflag]) {
4768 spin_unlock(&fp->fi_lock);
4769 status = nfsd_file_acquire(rqstp, cur_fh, access, &nf);
4770 if (status)
4771 goto out_put_access;
4772 spin_lock(&fp->fi_lock);
4773 if (!fp->fi_fds[oflag]) {
4774 fp->fi_fds[oflag] = nf;
4775 nf = NULL;
4776 }
4777 }
4778 spin_unlock(&fp->fi_lock);
4779 if (nf)
4780 nfsd_file_put(nf);
4781
4782 status = nfsd4_truncate(rqstp, cur_fh, open);
4783 if (status)
4784 goto out_put_access;
4785 out:
4786 return status;
4787 out_put_access:
4788 stp->st_access_bmap = old_access_bmap;
4789 nfs4_file_put_access(fp, open->op_share_access);
4790 reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
4791 goto out;
4792 }
4793
4794 static __be32
nfs4_upgrade_open(struct svc_rqst * rqstp,struct nfs4_file * fp,struct svc_fh * cur_fh,struct nfs4_ol_stateid * stp,struct nfsd4_open * open)4795 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
4796 {
4797 __be32 status;
4798 unsigned char old_deny_bmap = stp->st_deny_bmap;
4799
4800 if (!test_access(open->op_share_access, stp))
4801 return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open);
4802
4803 /* test and set deny mode */
4804 spin_lock(&fp->fi_lock);
4805 status = nfs4_file_check_deny(fp, open->op_share_deny);
4806 if (status == nfs_ok) {
4807 set_deny(open->op_share_deny, stp);
4808 fp->fi_share_deny |=
4809 (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
4810 }
4811 spin_unlock(&fp->fi_lock);
4812
4813 if (status != nfs_ok)
4814 return status;
4815
4816 status = nfsd4_truncate(rqstp, cur_fh, open);
4817 if (status != nfs_ok)
4818 reset_union_bmap_deny(old_deny_bmap, stp);
4819 return status;
4820 }
4821
4822 /* Should we give out recallable state?: */
nfsd4_cb_channel_good(struct nfs4_client * clp)4823 static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
4824 {
4825 if (clp->cl_cb_state == NFSD4_CB_UP)
4826 return true;
4827 /*
4828 * In the sessions case, since we don't have to establish a
4829 * separate connection for callbacks, we assume it's OK
4830 * until we hear otherwise:
4831 */
4832 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
4833 }
4834
nfs4_alloc_init_lease(struct nfs4_delegation * dp,int flag)4835 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp,
4836 int flag)
4837 {
4838 struct file_lock *fl;
4839
4840 fl = locks_alloc_lock();
4841 if (!fl)
4842 return NULL;
4843 fl->fl_lmops = &nfsd_lease_mng_ops;
4844 fl->fl_flags = FL_DELEG;
4845 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
4846 fl->fl_end = OFFSET_MAX;
4847 fl->fl_owner = (fl_owner_t)dp;
4848 fl->fl_pid = current->tgid;
4849 fl->fl_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file;
4850 return fl;
4851 }
4852
4853 static struct nfs4_delegation *
nfs4_set_delegation(struct nfs4_client * clp,struct svc_fh * fh,struct nfs4_file * fp,struct nfs4_clnt_odstate * odstate)4854 nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
4855 struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate)
4856 {
4857 int status = 0;
4858 struct nfs4_delegation *dp;
4859 struct nfsd_file *nf;
4860 struct file_lock *fl;
4861
4862 /*
4863 * The fi_had_conflict and nfs_get_existing_delegation checks
4864 * here are just optimizations; we'll need to recheck them at
4865 * the end:
4866 */
4867 if (fp->fi_had_conflict)
4868 return ERR_PTR(-EAGAIN);
4869
4870 nf = find_readable_file(fp);
4871 if (!nf) {
4872 /* We should always have a readable file here */
4873 WARN_ON_ONCE(1);
4874 return ERR_PTR(-EBADF);
4875 }
4876 spin_lock(&state_lock);
4877 spin_lock(&fp->fi_lock);
4878 if (nfs4_delegation_exists(clp, fp))
4879 status = -EAGAIN;
4880 else if (!fp->fi_deleg_file) {
4881 fp->fi_deleg_file = nf;
4882 /* increment early to prevent fi_deleg_file from being
4883 * cleared */
4884 fp->fi_delegees = 1;
4885 nf = NULL;
4886 } else
4887 fp->fi_delegees++;
4888 spin_unlock(&fp->fi_lock);
4889 spin_unlock(&state_lock);
4890 if (nf)
4891 nfsd_file_put(nf);
4892 if (status)
4893 return ERR_PTR(status);
4894
4895 status = -ENOMEM;
4896 dp = alloc_init_deleg(clp, fp, fh, odstate);
4897 if (!dp)
4898 goto out_delegees;
4899
4900 fl = nfs4_alloc_init_lease(dp, NFS4_OPEN_DELEGATE_READ);
4901 if (!fl)
4902 goto out_clnt_odstate;
4903
4904 status = vfs_setlease(fp->fi_deleg_file->nf_file, fl->fl_type, &fl, NULL);
4905 if (fl)
4906 locks_free_lock(fl);
4907 if (status)
4908 goto out_clnt_odstate;
4909
4910 spin_lock(&state_lock);
4911 spin_lock(&fp->fi_lock);
4912 if (fp->fi_had_conflict)
4913 status = -EAGAIN;
4914 else
4915 status = hash_delegation_locked(dp, fp);
4916 spin_unlock(&fp->fi_lock);
4917 spin_unlock(&state_lock);
4918
4919 if (status)
4920 goto out_unlock;
4921
4922 return dp;
4923 out_unlock:
4924 vfs_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp);
4925 out_clnt_odstate:
4926 put_clnt_odstate(dp->dl_clnt_odstate);
4927 nfs4_put_stid(&dp->dl_stid);
4928 out_delegees:
4929 put_deleg_file(fp);
4930 return ERR_PTR(status);
4931 }
4932
nfsd4_open_deleg_none_ext(struct nfsd4_open * open,int status)4933 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
4934 {
4935 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4936 if (status == -EAGAIN)
4937 open->op_why_no_deleg = WND4_CONTENTION;
4938 else {
4939 open->op_why_no_deleg = WND4_RESOURCE;
4940 switch (open->op_deleg_want) {
4941 case NFS4_SHARE_WANT_READ_DELEG:
4942 case NFS4_SHARE_WANT_WRITE_DELEG:
4943 case NFS4_SHARE_WANT_ANY_DELEG:
4944 break;
4945 case NFS4_SHARE_WANT_CANCEL:
4946 open->op_why_no_deleg = WND4_CANCELLED;
4947 break;
4948 case NFS4_SHARE_WANT_NO_DELEG:
4949 WARN_ON_ONCE(1);
4950 }
4951 }
4952 }
4953
4954 /*
4955 * Attempt to hand out a delegation.
4956 *
4957 * Note we don't support write delegations, and won't until the vfs has
4958 * proper support for them.
4959 */
4960 static void
nfs4_open_delegation(struct svc_fh * fh,struct nfsd4_open * open,struct nfs4_ol_stateid * stp)4961 nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
4962 struct nfs4_ol_stateid *stp)
4963 {
4964 struct nfs4_delegation *dp;
4965 struct nfs4_openowner *oo = openowner(stp->st_stateowner);
4966 struct nfs4_client *clp = stp->st_stid.sc_client;
4967 int cb_up;
4968 int status = 0;
4969
4970 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
4971 open->op_recall = 0;
4972 switch (open->op_claim_type) {
4973 case NFS4_OPEN_CLAIM_PREVIOUS:
4974 if (!cb_up)
4975 open->op_recall = 1;
4976 if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ)
4977 goto out_no_deleg;
4978 break;
4979 case NFS4_OPEN_CLAIM_NULL:
4980 case NFS4_OPEN_CLAIM_FH:
4981 /*
4982 * Let's not give out any delegations till everyone's
4983 * had the chance to reclaim theirs, *and* until
4984 * NLM locks have all been reclaimed:
4985 */
4986 if (locks_in_grace(clp->net))
4987 goto out_no_deleg;
4988 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
4989 goto out_no_deleg;
4990 /*
4991 * Also, if the file was opened for write or
4992 * create, there's a good chance the client's
4993 * about to write to it, resulting in an
4994 * immediate recall (since we don't support
4995 * write delegations):
4996 */
4997 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
4998 goto out_no_deleg;
4999 if (open->op_create == NFS4_OPEN_CREATE)
5000 goto out_no_deleg;
5001 break;
5002 default:
5003 goto out_no_deleg;
5004 }
5005 dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file, stp->st_clnt_odstate);
5006 if (IS_ERR(dp))
5007 goto out_no_deleg;
5008
5009 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
5010
5011 dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
5012 STATEID_VAL(&dp->dl_stid.sc_stateid));
5013 open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
5014 nfs4_put_stid(&dp->dl_stid);
5015 return;
5016 out_no_deleg:
5017 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
5018 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
5019 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
5020 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
5021 open->op_recall = 1;
5022 }
5023
5024 /* 4.1 client asking for a delegation? */
5025 if (open->op_deleg_want)
5026 nfsd4_open_deleg_none_ext(open, status);
5027 return;
5028 }
5029
nfsd4_deleg_xgrade_none_ext(struct nfsd4_open * open,struct nfs4_delegation * dp)5030 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
5031 struct nfs4_delegation *dp)
5032 {
5033 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
5034 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
5035 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5036 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
5037 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
5038 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
5039 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5040 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
5041 }
5042 /* Otherwise the client must be confused wanting a delegation
5043 * it already has, therefore we don't return
5044 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
5045 */
5046 }
5047
5048 __be32
nfsd4_process_open2(struct svc_rqst * rqstp,struct svc_fh * current_fh,struct nfsd4_open * open)5049 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
5050 {
5051 struct nfsd4_compoundres *resp = rqstp->rq_resp;
5052 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
5053 struct nfs4_file *fp = NULL;
5054 struct nfs4_ol_stateid *stp = NULL;
5055 struct nfs4_delegation *dp = NULL;
5056 __be32 status;
5057 bool new_stp = false;
5058
5059 /*
5060 * Lookup file; if found, lookup stateid and check open request,
5061 * and check for delegations in the process of being recalled.
5062 * If not found, create the nfs4_file struct
5063 */
5064 fp = find_or_add_file(open->op_file, ¤t_fh->fh_handle);
5065 if (fp != open->op_file) {
5066 status = nfs4_check_deleg(cl, open, &dp);
5067 if (status)
5068 goto out;
5069 stp = nfsd4_find_and_lock_existing_open(fp, open);
5070 } else {
5071 open->op_file = NULL;
5072 status = nfserr_bad_stateid;
5073 if (nfsd4_is_deleg_cur(open))
5074 goto out;
5075 }
5076
5077 if (!stp) {
5078 stp = init_open_stateid(fp, open);
5079 if (!open->op_stp)
5080 new_stp = true;
5081 }
5082
5083 /*
5084 * OPEN the file, or upgrade an existing OPEN.
5085 * If truncate fails, the OPEN fails.
5086 *
5087 * stp is already locked.
5088 */
5089 if (!new_stp) {
5090 /* Stateid was found, this is an OPEN upgrade */
5091 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
5092 if (status) {
5093 mutex_unlock(&stp->st_mutex);
5094 goto out;
5095 }
5096 } else {
5097 status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
5098 if (status) {
5099 stp->st_stid.sc_type = NFS4_CLOSED_STID;
5100 release_open_stateid(stp);
5101 mutex_unlock(&stp->st_mutex);
5102 goto out;
5103 }
5104
5105 stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp,
5106 open->op_odstate);
5107 if (stp->st_clnt_odstate == open->op_odstate)
5108 open->op_odstate = NULL;
5109 }
5110
5111 nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
5112 mutex_unlock(&stp->st_mutex);
5113
5114 if (nfsd4_has_session(&resp->cstate)) {
5115 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
5116 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5117 open->op_why_no_deleg = WND4_NOT_WANTED;
5118 goto nodeleg;
5119 }
5120 }
5121
5122 /*
5123 * Attempt to hand out a delegation. No error return, because the
5124 * OPEN succeeds even if we fail.
5125 */
5126 nfs4_open_delegation(current_fh, open, stp);
5127 nodeleg:
5128 status = nfs_ok;
5129
5130 dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
5131 STATEID_VAL(&stp->st_stid.sc_stateid));
5132 out:
5133 /* 4.1 client trying to upgrade/downgrade delegation? */
5134 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
5135 open->op_deleg_want)
5136 nfsd4_deleg_xgrade_none_ext(open, dp);
5137
5138 if (fp)
5139 put_nfs4_file(fp);
5140 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
5141 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
5142 /*
5143 * To finish the open response, we just need to set the rflags.
5144 */
5145 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
5146 if (nfsd4_has_session(&resp->cstate))
5147 open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK;
5148 else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED))
5149 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
5150
5151 if (dp)
5152 nfs4_put_stid(&dp->dl_stid);
5153 if (stp)
5154 nfs4_put_stid(&stp->st_stid);
5155
5156 return status;
5157 }
5158
nfsd4_cleanup_open_state(struct nfsd4_compound_state * cstate,struct nfsd4_open * open)5159 void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
5160 struct nfsd4_open *open)
5161 {
5162 if (open->op_openowner) {
5163 struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
5164
5165 nfsd4_cstate_assign_replay(cstate, so);
5166 nfs4_put_stateowner(so);
5167 }
5168 if (open->op_file)
5169 kmem_cache_free(file_slab, open->op_file);
5170 if (open->op_stp)
5171 nfs4_put_stid(&open->op_stp->st_stid);
5172 if (open->op_odstate)
5173 kmem_cache_free(odstate_slab, open->op_odstate);
5174 }
5175
5176 __be32
nfsd4_renew(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)5177 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5178 union nfsd4_op_u *u)
5179 {
5180 clientid_t *clid = &u->renew;
5181 struct nfs4_client *clp;
5182 __be32 status;
5183 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5184
5185 dprintk("process_renew(%08x/%08x): starting\n",
5186 clid->cl_boot, clid->cl_id);
5187 status = lookup_clientid(clid, cstate, nn);
5188 if (status)
5189 goto out;
5190 clp = cstate->clp;
5191 status = nfserr_cb_path_down;
5192 if (!list_empty(&clp->cl_delegations)
5193 && clp->cl_cb_state != NFSD4_CB_UP)
5194 goto out;
5195 status = nfs_ok;
5196 out:
5197 return status;
5198 }
5199
5200 void
nfsd4_end_grace(struct nfsd_net * nn)5201 nfsd4_end_grace(struct nfsd_net *nn)
5202 {
5203 /* do nothing if grace period already ended */
5204 if (nn->grace_ended)
5205 return;
5206
5207 nn->grace_ended = true;
5208 /*
5209 * If the server goes down again right now, an NFSv4
5210 * client will still be allowed to reclaim after it comes back up,
5211 * even if it hasn't yet had a chance to reclaim state this time.
5212 *
5213 */
5214 nfsd4_record_grace_done(nn);
5215 /*
5216 * At this point, NFSv4 clients can still reclaim. But if the
5217 * server crashes, any that have not yet reclaimed will be out
5218 * of luck on the next boot.
5219 *
5220 * (NFSv4.1+ clients are considered to have reclaimed once they
5221 * call RECLAIM_COMPLETE. NFSv4.0 clients are considered to
5222 * have reclaimed after their first OPEN.)
5223 */
5224 locks_end_grace(&nn->nfsd4_manager);
5225 /*
5226 * At this point, and once lockd and/or any other containers
5227 * exit their grace period, further reclaims will fail and
5228 * regular locking can resume.
5229 */
5230 }
5231
5232 /*
5233 * If we've waited a lease period but there are still clients trying to
5234 * reclaim, wait a little longer to give them a chance to finish.
5235 */
clients_still_reclaiming(struct nfsd_net * nn)5236 static bool clients_still_reclaiming(struct nfsd_net *nn)
5237 {
5238 unsigned long now = get_seconds();
5239 unsigned long double_grace_period_end = nn->boot_time +
5240 2 * nn->nfsd4_lease;
5241
5242 if (nn->track_reclaim_completes &&
5243 atomic_read(&nn->nr_reclaim_complete) ==
5244 nn->reclaim_str_hashtbl_size)
5245 return false;
5246 if (!nn->somebody_reclaimed)
5247 return false;
5248 nn->somebody_reclaimed = false;
5249 /*
5250 * If we've given them *two* lease times to reclaim, and they're
5251 * still not done, give up:
5252 */
5253 if (time_after(now, double_grace_period_end))
5254 return false;
5255 return true;
5256 }
5257
5258 static time_t
nfs4_laundromat(struct nfsd_net * nn)5259 nfs4_laundromat(struct nfsd_net *nn)
5260 {
5261 struct nfs4_client *clp;
5262 struct nfs4_openowner *oo;
5263 struct nfs4_delegation *dp;
5264 struct nfs4_ol_stateid *stp;
5265 struct nfsd4_blocked_lock *nbl;
5266 struct list_head *pos, *next, reaplist;
5267 time_t cutoff = get_seconds() - nn->nfsd4_lease;
5268 time_t t, new_timeo = nn->nfsd4_lease;
5269
5270 dprintk("NFSD: laundromat service - starting\n");
5271
5272 if (clients_still_reclaiming(nn)) {
5273 new_timeo = 0;
5274 goto out;
5275 }
5276 dprintk("NFSD: end of grace period\n");
5277 nfsd4_end_grace(nn);
5278 INIT_LIST_HEAD(&reaplist);
5279 spin_lock(&nn->client_lock);
5280 list_for_each_safe(pos, next, &nn->client_lru) {
5281 clp = list_entry(pos, struct nfs4_client, cl_lru);
5282 if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
5283 t = clp->cl_time - cutoff;
5284 new_timeo = min(new_timeo, t);
5285 break;
5286 }
5287 if (mark_client_expired_locked(clp)) {
5288 dprintk("NFSD: client in use (clientid %08x)\n",
5289 clp->cl_clientid.cl_id);
5290 continue;
5291 }
5292 list_add(&clp->cl_lru, &reaplist);
5293 }
5294 spin_unlock(&nn->client_lock);
5295 list_for_each_safe(pos, next, &reaplist) {
5296 clp = list_entry(pos, struct nfs4_client, cl_lru);
5297 dprintk("NFSD: purging unused client (clientid %08x)\n",
5298 clp->cl_clientid.cl_id);
5299 list_del_init(&clp->cl_lru);
5300 expire_client(clp);
5301 }
5302 spin_lock(&state_lock);
5303 list_for_each_safe(pos, next, &nn->del_recall_lru) {
5304 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
5305 if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
5306 t = dp->dl_time - cutoff;
5307 new_timeo = min(new_timeo, t);
5308 break;
5309 }
5310 WARN_ON(!unhash_delegation_locked(dp));
5311 list_add(&dp->dl_recall_lru, &reaplist);
5312 }
5313 spin_unlock(&state_lock);
5314 while (!list_empty(&reaplist)) {
5315 dp = list_first_entry(&reaplist, struct nfs4_delegation,
5316 dl_recall_lru);
5317 list_del_init(&dp->dl_recall_lru);
5318 revoke_delegation(dp);
5319 }
5320
5321 spin_lock(&nn->client_lock);
5322 while (!list_empty(&nn->close_lru)) {
5323 oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
5324 oo_close_lru);
5325 if (time_after((unsigned long)oo->oo_time,
5326 (unsigned long)cutoff)) {
5327 t = oo->oo_time - cutoff;
5328 new_timeo = min(new_timeo, t);
5329 break;
5330 }
5331 list_del_init(&oo->oo_close_lru);
5332 stp = oo->oo_last_closed_stid;
5333 oo->oo_last_closed_stid = NULL;
5334 spin_unlock(&nn->client_lock);
5335 nfs4_put_stid(&stp->st_stid);
5336 spin_lock(&nn->client_lock);
5337 }
5338 spin_unlock(&nn->client_lock);
5339
5340 /*
5341 * It's possible for a client to try and acquire an already held lock
5342 * that is being held for a long time, and then lose interest in it.
5343 * So, we clean out any un-revisited request after a lease period
5344 * under the assumption that the client is no longer interested.
5345 *
5346 * RFC5661, sec. 9.6 states that the client must not rely on getting
5347 * notifications and must continue to poll for locks, even when the
5348 * server supports them. Thus this shouldn't lead to clients blocking
5349 * indefinitely once the lock does become free.
5350 */
5351 BUG_ON(!list_empty(&reaplist));
5352 spin_lock(&nn->blocked_locks_lock);
5353 while (!list_empty(&nn->blocked_locks_lru)) {
5354 nbl = list_first_entry(&nn->blocked_locks_lru,
5355 struct nfsd4_blocked_lock, nbl_lru);
5356 if (time_after((unsigned long)nbl->nbl_time,
5357 (unsigned long)cutoff)) {
5358 t = nbl->nbl_time - cutoff;
5359 new_timeo = min(new_timeo, t);
5360 break;
5361 }
5362 list_move(&nbl->nbl_lru, &reaplist);
5363 list_del_init(&nbl->nbl_list);
5364 }
5365 spin_unlock(&nn->blocked_locks_lock);
5366
5367 while (!list_empty(&reaplist)) {
5368 nbl = list_first_entry(&reaplist,
5369 struct nfsd4_blocked_lock, nbl_lru);
5370 list_del_init(&nbl->nbl_lru);
5371 free_blocked_lock(nbl);
5372 }
5373 out:
5374 new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
5375 return new_timeo;
5376 }
5377
5378 static struct workqueue_struct *laundry_wq;
5379 static void laundromat_main(struct work_struct *);
5380
5381 static void
laundromat_main(struct work_struct * laundry)5382 laundromat_main(struct work_struct *laundry)
5383 {
5384 time_t t;
5385 struct delayed_work *dwork = to_delayed_work(laundry);
5386 struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
5387 laundromat_work);
5388
5389 t = nfs4_laundromat(nn);
5390 dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
5391 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
5392 }
5393
nfs4_check_fh(struct svc_fh * fhp,struct nfs4_stid * stp)5394 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
5395 {
5396 if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
5397 return nfserr_bad_stateid;
5398 return nfs_ok;
5399 }
5400
5401 static inline int
access_permit_read(struct nfs4_ol_stateid * stp)5402 access_permit_read(struct nfs4_ol_stateid *stp)
5403 {
5404 return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
5405 test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
5406 test_access(NFS4_SHARE_ACCESS_WRITE, stp);
5407 }
5408
5409 static inline int
access_permit_write(struct nfs4_ol_stateid * stp)5410 access_permit_write(struct nfs4_ol_stateid *stp)
5411 {
5412 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
5413 test_access(NFS4_SHARE_ACCESS_BOTH, stp);
5414 }
5415
5416 static
nfs4_check_openmode(struct nfs4_ol_stateid * stp,int flags)5417 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
5418 {
5419 __be32 status = nfserr_openmode;
5420
5421 /* For lock stateid's, we test the parent open, not the lock: */
5422 if (stp->st_openstp)
5423 stp = stp->st_openstp;
5424 if ((flags & WR_STATE) && !access_permit_write(stp))
5425 goto out;
5426 if ((flags & RD_STATE) && !access_permit_read(stp))
5427 goto out;
5428 status = nfs_ok;
5429 out:
5430 return status;
5431 }
5432
5433 static inline __be32
check_special_stateids(struct net * net,svc_fh * current_fh,stateid_t * stateid,int flags)5434 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
5435 {
5436 if (ONE_STATEID(stateid) && (flags & RD_STATE))
5437 return nfs_ok;
5438 else if (opens_in_grace(net)) {
5439 /* Answer in remaining cases depends on existence of
5440 * conflicting state; so we must wait out the grace period. */
5441 return nfserr_grace;
5442 } else if (flags & WR_STATE)
5443 return nfs4_share_conflict(current_fh,
5444 NFS4_SHARE_DENY_WRITE);
5445 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
5446 return nfs4_share_conflict(current_fh,
5447 NFS4_SHARE_DENY_READ);
5448 }
5449
5450 /*
5451 * Allow READ/WRITE during grace period on recovered state only for files
5452 * that are not able to provide mandatory locking.
5453 */
5454 static inline int
grace_disallows_io(struct net * net,struct inode * inode)5455 grace_disallows_io(struct net *net, struct inode *inode)
5456 {
5457 return opens_in_grace(net) && mandatory_lock(inode);
5458 }
5459
check_stateid_generation(stateid_t * in,stateid_t * ref,bool has_session)5460 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
5461 {
5462 /*
5463 * When sessions are used the stateid generation number is ignored
5464 * when it is zero.
5465 */
5466 if (has_session && in->si_generation == 0)
5467 return nfs_ok;
5468
5469 if (in->si_generation == ref->si_generation)
5470 return nfs_ok;
5471
5472 /* If the client sends us a stateid from the future, it's buggy: */
5473 if (nfsd4_stateid_generation_after(in, ref))
5474 return nfserr_bad_stateid;
5475 /*
5476 * However, we could see a stateid from the past, even from a
5477 * non-buggy client. For example, if the client sends a lock
5478 * while some IO is outstanding, the lock may bump si_generation
5479 * while the IO is still in flight. The client could avoid that
5480 * situation by waiting for responses on all the IO requests,
5481 * but better performance may result in retrying IO that
5482 * receives an old_stateid error if requests are rarely
5483 * reordered in flight:
5484 */
5485 return nfserr_old_stateid;
5486 }
5487
nfsd4_stid_check_stateid_generation(stateid_t * in,struct nfs4_stid * s,bool has_session)5488 static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session)
5489 {
5490 __be32 ret;
5491
5492 spin_lock(&s->sc_lock);
5493 ret = nfsd4_verify_open_stid(s);
5494 if (ret == nfs_ok)
5495 ret = check_stateid_generation(in, &s->sc_stateid, has_session);
5496 spin_unlock(&s->sc_lock);
5497 return ret;
5498 }
5499
nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid * ols)5500 static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
5501 {
5502 if (ols->st_stateowner->so_is_open_owner &&
5503 !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
5504 return nfserr_bad_stateid;
5505 return nfs_ok;
5506 }
5507
nfsd4_validate_stateid(struct nfs4_client * cl,stateid_t * stateid)5508 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
5509 {
5510 struct nfs4_stid *s;
5511 __be32 status = nfserr_bad_stateid;
5512
5513 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
5514 CLOSE_STATEID(stateid))
5515 return status;
5516 spin_lock(&cl->cl_lock);
5517 s = find_stateid_locked(cl, stateid);
5518 if (!s)
5519 goto out_unlock;
5520 status = nfsd4_stid_check_stateid_generation(stateid, s, 1);
5521 if (status)
5522 goto out_unlock;
5523 switch (s->sc_type) {
5524 case NFS4_DELEG_STID:
5525 status = nfs_ok;
5526 break;
5527 case NFS4_REVOKED_DELEG_STID:
5528 status = nfserr_deleg_revoked;
5529 break;
5530 case NFS4_OPEN_STID:
5531 case NFS4_LOCK_STID:
5532 status = nfsd4_check_openowner_confirmed(openlockstateid(s));
5533 break;
5534 default:
5535 printk("unknown stateid type %x\n", s->sc_type);
5536 /* Fallthrough */
5537 case NFS4_CLOSED_STID:
5538 case NFS4_CLOSED_DELEG_STID:
5539 status = nfserr_bad_stateid;
5540 }
5541 out_unlock:
5542 spin_unlock(&cl->cl_lock);
5543 return status;
5544 }
5545
5546 __be32
nfsd4_lookup_stateid(struct nfsd4_compound_state * cstate,stateid_t * stateid,unsigned char typemask,struct nfs4_stid ** s,struct nfsd_net * nn)5547 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
5548 stateid_t *stateid, unsigned char typemask,
5549 struct nfs4_stid **s, struct nfsd_net *nn)
5550 {
5551 __be32 status;
5552 bool return_revoked = false;
5553
5554 /*
5555 * only return revoked delegations if explicitly asked.
5556 * otherwise we report revoked or bad_stateid status.
5557 */
5558 if (typemask & NFS4_REVOKED_DELEG_STID)
5559 return_revoked = true;
5560 else if (typemask & NFS4_DELEG_STID)
5561 typemask |= NFS4_REVOKED_DELEG_STID;
5562
5563 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
5564 CLOSE_STATEID(stateid))
5565 return nfserr_bad_stateid;
5566 status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
5567 if (status == nfserr_stale_clientid) {
5568 if (cstate->session)
5569 return nfserr_bad_stateid;
5570 return nfserr_stale_stateid;
5571 }
5572 if (status)
5573 return status;
5574 *s = find_stateid_by_type(cstate->clp, stateid, typemask);
5575 if (!*s)
5576 return nfserr_bad_stateid;
5577 if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
5578 nfs4_put_stid(*s);
5579 if (cstate->minorversion)
5580 return nfserr_deleg_revoked;
5581 return nfserr_bad_stateid;
5582 }
5583 return nfs_ok;
5584 }
5585
5586 static struct nfsd_file *
nfs4_find_file(struct nfs4_stid * s,int flags)5587 nfs4_find_file(struct nfs4_stid *s, int flags)
5588 {
5589 if (!s)
5590 return NULL;
5591
5592 switch (s->sc_type) {
5593 case NFS4_DELEG_STID:
5594 if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file))
5595 return NULL;
5596 return nfsd_file_get(s->sc_file->fi_deleg_file);
5597 case NFS4_OPEN_STID:
5598 case NFS4_LOCK_STID:
5599 if (flags & RD_STATE)
5600 return find_readable_file(s->sc_file);
5601 else
5602 return find_writeable_file(s->sc_file);
5603 break;
5604 }
5605
5606 return NULL;
5607 }
5608
5609 static __be32
nfs4_check_olstateid(struct nfs4_ol_stateid * ols,int flags)5610 nfs4_check_olstateid(struct nfs4_ol_stateid *ols, int flags)
5611 {
5612 __be32 status;
5613
5614 status = nfsd4_check_openowner_confirmed(ols);
5615 if (status)
5616 return status;
5617 return nfs4_check_openmode(ols, flags);
5618 }
5619
5620 static __be32
nfs4_check_file(struct svc_rqst * rqstp,struct svc_fh * fhp,struct nfs4_stid * s,struct nfsd_file ** nfp,int flags)5621 nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
5622 struct nfsd_file **nfp, int flags)
5623 {
5624 int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE;
5625 struct nfsd_file *nf;
5626 __be32 status;
5627
5628 nf = nfs4_find_file(s, flags);
5629 if (nf) {
5630 status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
5631 acc | NFSD_MAY_OWNER_OVERRIDE);
5632 if (status) {
5633 nfsd_file_put(nf);
5634 goto out;
5635 }
5636 } else {
5637 status = nfsd_file_acquire(rqstp, fhp, acc, &nf);
5638 if (status)
5639 return status;
5640 }
5641 *nfp = nf;
5642 out:
5643 return status;
5644 }
5645
5646 /*
5647 * Checks for stateid operations
5648 */
5649 __be32
nfs4_preprocess_stateid_op(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,struct svc_fh * fhp,stateid_t * stateid,int flags,struct nfsd_file ** nfp)5650 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
5651 struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
5652 stateid_t *stateid, int flags, struct nfsd_file **nfp)
5653 {
5654 struct inode *ino = d_inode(fhp->fh_dentry);
5655 struct net *net = SVC_NET(rqstp);
5656 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5657 struct nfs4_stid *s = NULL;
5658 __be32 status;
5659
5660 if (nfp)
5661 *nfp = NULL;
5662
5663 if (grace_disallows_io(net, ino))
5664 return nfserr_grace;
5665
5666 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
5667 status = check_special_stateids(net, fhp, stateid, flags);
5668 goto done;
5669 }
5670
5671 status = nfsd4_lookup_stateid(cstate, stateid,
5672 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
5673 &s, nn);
5674 if (status)
5675 return status;
5676 status = nfsd4_stid_check_stateid_generation(stateid, s,
5677 nfsd4_has_session(cstate));
5678 if (status)
5679 goto out;
5680
5681 switch (s->sc_type) {
5682 case NFS4_DELEG_STID:
5683 status = nfs4_check_delegmode(delegstateid(s), flags);
5684 break;
5685 case NFS4_OPEN_STID:
5686 case NFS4_LOCK_STID:
5687 status = nfs4_check_olstateid(openlockstateid(s), flags);
5688 break;
5689 default:
5690 status = nfserr_bad_stateid;
5691 break;
5692 }
5693 if (status)
5694 goto out;
5695 status = nfs4_check_fh(fhp, s);
5696
5697 done:
5698 if (status == nfs_ok && nfp)
5699 status = nfs4_check_file(rqstp, fhp, s, nfp, flags);
5700 out:
5701 if (s)
5702 nfs4_put_stid(s);
5703 return status;
5704 }
5705
5706 /*
5707 * Test if the stateid is valid
5708 */
5709 __be32
nfsd4_test_stateid(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)5710 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5711 union nfsd4_op_u *u)
5712 {
5713 struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
5714 struct nfsd4_test_stateid_id *stateid;
5715 struct nfs4_client *cl = cstate->session->se_client;
5716
5717 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
5718 stateid->ts_id_status =
5719 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
5720
5721 return nfs_ok;
5722 }
5723
5724 static __be32
nfsd4_free_lock_stateid(stateid_t * stateid,struct nfs4_stid * s)5725 nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
5726 {
5727 struct nfs4_ol_stateid *stp = openlockstateid(s);
5728 __be32 ret;
5729
5730 ret = nfsd4_lock_ol_stateid(stp);
5731 if (ret)
5732 goto out_put_stid;
5733
5734 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
5735 if (ret)
5736 goto out;
5737
5738 ret = nfserr_locks_held;
5739 if (check_for_locks(stp->st_stid.sc_file,
5740 lockowner(stp->st_stateowner)))
5741 goto out;
5742
5743 release_lock_stateid(stp);
5744 ret = nfs_ok;
5745
5746 out:
5747 mutex_unlock(&stp->st_mutex);
5748 out_put_stid:
5749 nfs4_put_stid(s);
5750 return ret;
5751 }
5752
5753 __be32
nfsd4_free_stateid(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)5754 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5755 union nfsd4_op_u *u)
5756 {
5757 struct nfsd4_free_stateid *free_stateid = &u->free_stateid;
5758 stateid_t *stateid = &free_stateid->fr_stateid;
5759 struct nfs4_stid *s;
5760 struct nfs4_delegation *dp;
5761 struct nfs4_client *cl = cstate->session->se_client;
5762 __be32 ret = nfserr_bad_stateid;
5763
5764 spin_lock(&cl->cl_lock);
5765 s = find_stateid_locked(cl, stateid);
5766 if (!s)
5767 goto out_unlock;
5768 spin_lock(&s->sc_lock);
5769 switch (s->sc_type) {
5770 case NFS4_DELEG_STID:
5771 ret = nfserr_locks_held;
5772 break;
5773 case NFS4_OPEN_STID:
5774 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
5775 if (ret)
5776 break;
5777 ret = nfserr_locks_held;
5778 break;
5779 case NFS4_LOCK_STID:
5780 spin_unlock(&s->sc_lock);
5781 refcount_inc(&s->sc_count);
5782 spin_unlock(&cl->cl_lock);
5783 ret = nfsd4_free_lock_stateid(stateid, s);
5784 goto out;
5785 case NFS4_REVOKED_DELEG_STID:
5786 spin_unlock(&s->sc_lock);
5787 dp = delegstateid(s);
5788 list_del_init(&dp->dl_recall_lru);
5789 spin_unlock(&cl->cl_lock);
5790 nfs4_put_stid(s);
5791 ret = nfs_ok;
5792 goto out;
5793 /* Default falls through and returns nfserr_bad_stateid */
5794 }
5795 spin_unlock(&s->sc_lock);
5796 out_unlock:
5797 spin_unlock(&cl->cl_lock);
5798 out:
5799 return ret;
5800 }
5801
5802 static inline int
setlkflg(int type)5803 setlkflg (int type)
5804 {
5805 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
5806 RD_STATE : WR_STATE;
5807 }
5808
nfs4_seqid_op_checks(struct nfsd4_compound_state * cstate,stateid_t * stateid,u32 seqid,struct nfs4_ol_stateid * stp)5809 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
5810 {
5811 struct svc_fh *current_fh = &cstate->current_fh;
5812 struct nfs4_stateowner *sop = stp->st_stateowner;
5813 __be32 status;
5814
5815 status = nfsd4_check_seqid(cstate, sop, seqid);
5816 if (status)
5817 return status;
5818 status = nfsd4_lock_ol_stateid(stp);
5819 if (status != nfs_ok)
5820 return status;
5821 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
5822 if (status == nfs_ok)
5823 status = nfs4_check_fh(current_fh, &stp->st_stid);
5824 if (status != nfs_ok)
5825 mutex_unlock(&stp->st_mutex);
5826 return status;
5827 }
5828
5829 /*
5830 * Checks for sequence id mutating operations.
5831 */
5832 static __be32
nfs4_preprocess_seqid_op(struct nfsd4_compound_state * cstate,u32 seqid,stateid_t * stateid,char typemask,struct nfs4_ol_stateid ** stpp,struct nfsd_net * nn)5833 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
5834 stateid_t *stateid, char typemask,
5835 struct nfs4_ol_stateid **stpp,
5836 struct nfsd_net *nn)
5837 {
5838 __be32 status;
5839 struct nfs4_stid *s;
5840 struct nfs4_ol_stateid *stp = NULL;
5841
5842 dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
5843 seqid, STATEID_VAL(stateid));
5844
5845 *stpp = NULL;
5846 status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
5847 if (status)
5848 return status;
5849 stp = openlockstateid(s);
5850 nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
5851
5852 status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
5853 if (!status)
5854 *stpp = stp;
5855 else
5856 nfs4_put_stid(&stp->st_stid);
5857 return status;
5858 }
5859
nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state * cstate,u32 seqid,stateid_t * stateid,struct nfs4_ol_stateid ** stpp,struct nfsd_net * nn)5860 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
5861 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
5862 {
5863 __be32 status;
5864 struct nfs4_openowner *oo;
5865 struct nfs4_ol_stateid *stp;
5866
5867 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
5868 NFS4_OPEN_STID, &stp, nn);
5869 if (status)
5870 return status;
5871 oo = openowner(stp->st_stateowner);
5872 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
5873 mutex_unlock(&stp->st_mutex);
5874 nfs4_put_stid(&stp->st_stid);
5875 return nfserr_bad_stateid;
5876 }
5877 *stpp = stp;
5878 return nfs_ok;
5879 }
5880
5881 __be32
nfsd4_open_confirm(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)5882 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5883 union nfsd4_op_u *u)
5884 {
5885 struct nfsd4_open_confirm *oc = &u->open_confirm;
5886 __be32 status;
5887 struct nfs4_openowner *oo;
5888 struct nfs4_ol_stateid *stp;
5889 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5890
5891 dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
5892 cstate->current_fh.fh_dentry);
5893
5894 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
5895 if (status)
5896 return status;
5897
5898 status = nfs4_preprocess_seqid_op(cstate,
5899 oc->oc_seqid, &oc->oc_req_stateid,
5900 NFS4_OPEN_STID, &stp, nn);
5901 if (status)
5902 goto out;
5903 oo = openowner(stp->st_stateowner);
5904 status = nfserr_bad_stateid;
5905 if (oo->oo_flags & NFS4_OO_CONFIRMED) {
5906 mutex_unlock(&stp->st_mutex);
5907 goto put_stateid;
5908 }
5909 oo->oo_flags |= NFS4_OO_CONFIRMED;
5910 nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
5911 mutex_unlock(&stp->st_mutex);
5912 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
5913 __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
5914
5915 nfsd4_client_record_create(oo->oo_owner.so_client);
5916 status = nfs_ok;
5917 put_stateid:
5918 nfs4_put_stid(&stp->st_stid);
5919 out:
5920 nfsd4_bump_seqid(cstate, status);
5921 return status;
5922 }
5923
nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid * stp,u32 access)5924 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
5925 {
5926 if (!test_access(access, stp))
5927 return;
5928 nfs4_file_put_access(stp->st_stid.sc_file, access);
5929 clear_access(access, stp);
5930 }
5931
nfs4_stateid_downgrade(struct nfs4_ol_stateid * stp,u32 to_access)5932 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
5933 {
5934 switch (to_access) {
5935 case NFS4_SHARE_ACCESS_READ:
5936 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
5937 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
5938 break;
5939 case NFS4_SHARE_ACCESS_WRITE:
5940 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
5941 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
5942 break;
5943 case NFS4_SHARE_ACCESS_BOTH:
5944 break;
5945 default:
5946 WARN_ON_ONCE(1);
5947 }
5948 }
5949
5950 __be32
nfsd4_open_downgrade(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)5951 nfsd4_open_downgrade(struct svc_rqst *rqstp,
5952 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
5953 {
5954 struct nfsd4_open_downgrade *od = &u->open_downgrade;
5955 __be32 status;
5956 struct nfs4_ol_stateid *stp;
5957 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5958
5959 dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
5960 cstate->current_fh.fh_dentry);
5961
5962 /* We don't yet support WANT bits: */
5963 if (od->od_deleg_want)
5964 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
5965 od->od_deleg_want);
5966
5967 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
5968 &od->od_stateid, &stp, nn);
5969 if (status)
5970 goto out;
5971 status = nfserr_inval;
5972 if (!test_access(od->od_share_access, stp)) {
5973 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
5974 stp->st_access_bmap, od->od_share_access);
5975 goto put_stateid;
5976 }
5977 if (!test_deny(od->od_share_deny, stp)) {
5978 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
5979 stp->st_deny_bmap, od->od_share_deny);
5980 goto put_stateid;
5981 }
5982 nfs4_stateid_downgrade(stp, od->od_share_access);
5983 reset_union_bmap_deny(od->od_share_deny, stp);
5984 nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
5985 status = nfs_ok;
5986 put_stateid:
5987 mutex_unlock(&stp->st_mutex);
5988 nfs4_put_stid(&stp->st_stid);
5989 out:
5990 nfsd4_bump_seqid(cstate, status);
5991 return status;
5992 }
5993
nfsd4_close_open_stateid(struct nfs4_ol_stateid * s)5994 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
5995 {
5996 struct nfs4_client *clp = s->st_stid.sc_client;
5997 bool unhashed;
5998 LIST_HEAD(reaplist);
5999
6000 spin_lock(&clp->cl_lock);
6001 unhashed = unhash_open_stateid(s, &reaplist);
6002
6003 if (clp->cl_minorversion) {
6004 if (unhashed)
6005 put_ol_stateid_locked(s, &reaplist);
6006 spin_unlock(&clp->cl_lock);
6007 free_ol_stateid_reaplist(&reaplist);
6008 } else {
6009 spin_unlock(&clp->cl_lock);
6010 free_ol_stateid_reaplist(&reaplist);
6011 if (unhashed)
6012 move_to_close_lru(s, clp->net);
6013 }
6014 }
6015
6016 /*
6017 * nfs4_unlock_state() called after encode
6018 */
6019 __be32
nfsd4_close(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)6020 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6021 union nfsd4_op_u *u)
6022 {
6023 struct nfsd4_close *close = &u->close;
6024 __be32 status;
6025 struct nfs4_ol_stateid *stp;
6026 struct net *net = SVC_NET(rqstp);
6027 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6028
6029 dprintk("NFSD: nfsd4_close on file %pd\n",
6030 cstate->current_fh.fh_dentry);
6031
6032 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
6033 &close->cl_stateid,
6034 NFS4_OPEN_STID|NFS4_CLOSED_STID,
6035 &stp, nn);
6036 nfsd4_bump_seqid(cstate, status);
6037 if (status)
6038 goto out;
6039
6040 stp->st_stid.sc_type = NFS4_CLOSED_STID;
6041
6042 /*
6043 * Technically we don't _really_ have to increment or copy it, since
6044 * it should just be gone after this operation and we clobber the
6045 * copied value below, but we continue to do so here just to ensure
6046 * that racing ops see that there was a state change.
6047 */
6048 nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
6049
6050 nfsd4_close_open_stateid(stp);
6051 mutex_unlock(&stp->st_mutex);
6052
6053 /* v4.1+ suggests that we send a special stateid in here, since the
6054 * clients should just ignore this anyway. Since this is not useful
6055 * for v4.0 clients either, we set it to the special close_stateid
6056 * universally.
6057 *
6058 * See RFC5661 section 18.2.4, and RFC7530 section 16.2.5
6059 */
6060 memcpy(&close->cl_stateid, &close_stateid, sizeof(close->cl_stateid));
6061
6062 /* put reference from nfs4_preprocess_seqid_op */
6063 nfs4_put_stid(&stp->st_stid);
6064 out:
6065 return status;
6066 }
6067
6068 __be32
nfsd4_delegreturn(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)6069 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6070 union nfsd4_op_u *u)
6071 {
6072 struct nfsd4_delegreturn *dr = &u->delegreturn;
6073 struct nfs4_delegation *dp;
6074 stateid_t *stateid = &dr->dr_stateid;
6075 struct nfs4_stid *s;
6076 __be32 status;
6077 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6078
6079 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
6080 return status;
6081
6082 status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
6083 if (status)
6084 goto out;
6085 dp = delegstateid(s);
6086 status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate));
6087 if (status)
6088 goto put_stateid;
6089
6090 destroy_delegation(dp);
6091 put_stateid:
6092 nfs4_put_stid(&dp->dl_stid);
6093 out:
6094 return status;
6095 }
6096
6097 static inline u64
end_offset(u64 start,u64 len)6098 end_offset(u64 start, u64 len)
6099 {
6100 u64 end;
6101
6102 end = start + len;
6103 return end >= start ? end: NFS4_MAX_UINT64;
6104 }
6105
6106 /* last octet in a range */
6107 static inline u64
last_byte_offset(u64 start,u64 len)6108 last_byte_offset(u64 start, u64 len)
6109 {
6110 u64 end;
6111
6112 WARN_ON_ONCE(!len);
6113 end = start + len;
6114 return end > start ? end - 1: NFS4_MAX_UINT64;
6115 }
6116
6117 /*
6118 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
6119 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
6120 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
6121 * locking, this prevents us from being completely protocol-compliant. The
6122 * real solution to this problem is to start using unsigned file offsets in
6123 * the VFS, but this is a very deep change!
6124 */
6125 static inline void
nfs4_transform_lock_offset(struct file_lock * lock)6126 nfs4_transform_lock_offset(struct file_lock *lock)
6127 {
6128 if (lock->fl_start < 0)
6129 lock->fl_start = OFFSET_MAX;
6130 if (lock->fl_end < 0)
6131 lock->fl_end = OFFSET_MAX;
6132 }
6133
6134 static fl_owner_t
nfsd4_fl_get_owner(fl_owner_t owner)6135 nfsd4_fl_get_owner(fl_owner_t owner)
6136 {
6137 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
6138
6139 nfs4_get_stateowner(&lo->lo_owner);
6140 return owner;
6141 }
6142
6143 static void
nfsd4_fl_put_owner(fl_owner_t owner)6144 nfsd4_fl_put_owner(fl_owner_t owner)
6145 {
6146 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
6147
6148 if (lo)
6149 nfs4_put_stateowner(&lo->lo_owner);
6150 }
6151
6152 static void
nfsd4_lm_notify(struct file_lock * fl)6153 nfsd4_lm_notify(struct file_lock *fl)
6154 {
6155 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)fl->fl_owner;
6156 struct net *net = lo->lo_owner.so_client->net;
6157 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6158 struct nfsd4_blocked_lock *nbl = container_of(fl,
6159 struct nfsd4_blocked_lock, nbl_lock);
6160 bool queue = false;
6161
6162 /* An empty list means that something else is going to be using it */
6163 spin_lock(&nn->blocked_locks_lock);
6164 if (!list_empty(&nbl->nbl_list)) {
6165 list_del_init(&nbl->nbl_list);
6166 list_del_init(&nbl->nbl_lru);
6167 queue = true;
6168 }
6169 spin_unlock(&nn->blocked_locks_lock);
6170
6171 if (queue)
6172 nfsd4_run_cb(&nbl->nbl_cb);
6173 }
6174
6175 static const struct lock_manager_operations nfsd_posix_mng_ops = {
6176 .lm_notify = nfsd4_lm_notify,
6177 .lm_get_owner = nfsd4_fl_get_owner,
6178 .lm_put_owner = nfsd4_fl_put_owner,
6179 };
6180
6181 static inline void
nfs4_set_lock_denied(struct file_lock * fl,struct nfsd4_lock_denied * deny)6182 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
6183 {
6184 struct nfs4_lockowner *lo;
6185
6186 if (fl->fl_lmops == &nfsd_posix_mng_ops) {
6187 lo = (struct nfs4_lockowner *) fl->fl_owner;
6188 xdr_netobj_dup(&deny->ld_owner, &lo->lo_owner.so_owner,
6189 GFP_KERNEL);
6190 if (!deny->ld_owner.data)
6191 /* We just don't care that much */
6192 goto nevermind;
6193 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
6194 } else {
6195 nevermind:
6196 deny->ld_owner.len = 0;
6197 deny->ld_owner.data = NULL;
6198 deny->ld_clientid.cl_boot = 0;
6199 deny->ld_clientid.cl_id = 0;
6200 }
6201 deny->ld_start = fl->fl_start;
6202 deny->ld_length = NFS4_MAX_UINT64;
6203 if (fl->fl_end != NFS4_MAX_UINT64)
6204 deny->ld_length = fl->fl_end - fl->fl_start + 1;
6205 deny->ld_type = NFS4_READ_LT;
6206 if (fl->fl_type != F_RDLCK)
6207 deny->ld_type = NFS4_WRITE_LT;
6208 }
6209
6210 static struct nfs4_lockowner *
find_lockowner_str_locked(struct nfs4_client * clp,struct xdr_netobj * owner)6211 find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
6212 {
6213 unsigned int strhashval = ownerstr_hashval(owner);
6214 struct nfs4_stateowner *so;
6215
6216 lockdep_assert_held(&clp->cl_lock);
6217
6218 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
6219 so_strhash) {
6220 if (so->so_is_open_owner)
6221 continue;
6222 if (same_owner_str(so, owner))
6223 return lockowner(nfs4_get_stateowner(so));
6224 }
6225 return NULL;
6226 }
6227
6228 static struct nfs4_lockowner *
find_lockowner_str(struct nfs4_client * clp,struct xdr_netobj * owner)6229 find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
6230 {
6231 struct nfs4_lockowner *lo;
6232
6233 spin_lock(&clp->cl_lock);
6234 lo = find_lockowner_str_locked(clp, owner);
6235 spin_unlock(&clp->cl_lock);
6236 return lo;
6237 }
6238
nfs4_unhash_lockowner(struct nfs4_stateowner * sop)6239 static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
6240 {
6241 unhash_lockowner_locked(lockowner(sop));
6242 }
6243
nfs4_free_lockowner(struct nfs4_stateowner * sop)6244 static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
6245 {
6246 struct nfs4_lockowner *lo = lockowner(sop);
6247
6248 kmem_cache_free(lockowner_slab, lo);
6249 }
6250
6251 static const struct nfs4_stateowner_operations lockowner_ops = {
6252 .so_unhash = nfs4_unhash_lockowner,
6253 .so_free = nfs4_free_lockowner,
6254 };
6255
6256 /*
6257 * Alloc a lock owner structure.
6258 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
6259 * occurred.
6260 *
6261 * strhashval = ownerstr_hashval
6262 */
6263 static struct nfs4_lockowner *
alloc_init_lock_stateowner(unsigned int strhashval,struct nfs4_client * clp,struct nfs4_ol_stateid * open_stp,struct nfsd4_lock * lock)6264 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
6265 struct nfs4_ol_stateid *open_stp,
6266 struct nfsd4_lock *lock)
6267 {
6268 struct nfs4_lockowner *lo, *ret;
6269
6270 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
6271 if (!lo)
6272 return NULL;
6273 INIT_LIST_HEAD(&lo->lo_blocked);
6274 INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
6275 lo->lo_owner.so_is_open_owner = 0;
6276 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
6277 lo->lo_owner.so_ops = &lockowner_ops;
6278 spin_lock(&clp->cl_lock);
6279 ret = find_lockowner_str_locked(clp, &lock->lk_new_owner);
6280 if (ret == NULL) {
6281 list_add(&lo->lo_owner.so_strhash,
6282 &clp->cl_ownerstr_hashtbl[strhashval]);
6283 ret = lo;
6284 } else
6285 nfs4_free_stateowner(&lo->lo_owner);
6286
6287 spin_unlock(&clp->cl_lock);
6288 return ret;
6289 }
6290
6291 static struct nfs4_ol_stateid *
find_lock_stateid(const struct nfs4_lockowner * lo,const struct nfs4_ol_stateid * ost)6292 find_lock_stateid(const struct nfs4_lockowner *lo,
6293 const struct nfs4_ol_stateid *ost)
6294 {
6295 struct nfs4_ol_stateid *lst;
6296
6297 lockdep_assert_held(&ost->st_stid.sc_client->cl_lock);
6298
6299 /* If ost is not hashed, ost->st_locks will not be valid */
6300 if (!nfs4_ol_stateid_unhashed(ost))
6301 list_for_each_entry(lst, &ost->st_locks, st_locks) {
6302 if (lst->st_stateowner == &lo->lo_owner) {
6303 refcount_inc(&lst->st_stid.sc_count);
6304 return lst;
6305 }
6306 }
6307 return NULL;
6308 }
6309
6310 static struct nfs4_ol_stateid *
init_lock_stateid(struct nfs4_ol_stateid * stp,struct nfs4_lockowner * lo,struct nfs4_file * fp,struct inode * inode,struct nfs4_ol_stateid * open_stp)6311 init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
6312 struct nfs4_file *fp, struct inode *inode,
6313 struct nfs4_ol_stateid *open_stp)
6314 {
6315 struct nfs4_client *clp = lo->lo_owner.so_client;
6316 struct nfs4_ol_stateid *retstp;
6317
6318 mutex_init(&stp->st_mutex);
6319 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
6320 retry:
6321 spin_lock(&clp->cl_lock);
6322 if (nfs4_ol_stateid_unhashed(open_stp))
6323 goto out_close;
6324 retstp = find_lock_stateid(lo, open_stp);
6325 if (retstp)
6326 goto out_found;
6327 refcount_inc(&stp->st_stid.sc_count);
6328 stp->st_stid.sc_type = NFS4_LOCK_STID;
6329 stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
6330 get_nfs4_file(fp);
6331 stp->st_stid.sc_file = fp;
6332 stp->st_access_bmap = 0;
6333 stp->st_deny_bmap = open_stp->st_deny_bmap;
6334 stp->st_openstp = open_stp;
6335 spin_lock(&fp->fi_lock);
6336 list_add(&stp->st_locks, &open_stp->st_locks);
6337 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
6338 list_add(&stp->st_perfile, &fp->fi_stateids);
6339 spin_unlock(&fp->fi_lock);
6340 spin_unlock(&clp->cl_lock);
6341 return stp;
6342 out_found:
6343 spin_unlock(&clp->cl_lock);
6344 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
6345 nfs4_put_stid(&retstp->st_stid);
6346 goto retry;
6347 }
6348 /* To keep mutex tracking happy */
6349 mutex_unlock(&stp->st_mutex);
6350 return retstp;
6351 out_close:
6352 spin_unlock(&clp->cl_lock);
6353 mutex_unlock(&stp->st_mutex);
6354 return NULL;
6355 }
6356
6357 static struct nfs4_ol_stateid *
find_or_create_lock_stateid(struct nfs4_lockowner * lo,struct nfs4_file * fi,struct inode * inode,struct nfs4_ol_stateid * ost,bool * new)6358 find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
6359 struct inode *inode, struct nfs4_ol_stateid *ost,
6360 bool *new)
6361 {
6362 struct nfs4_stid *ns = NULL;
6363 struct nfs4_ol_stateid *lst;
6364 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
6365 struct nfs4_client *clp = oo->oo_owner.so_client;
6366
6367 *new = false;
6368 spin_lock(&clp->cl_lock);
6369 lst = find_lock_stateid(lo, ost);
6370 spin_unlock(&clp->cl_lock);
6371 if (lst != NULL) {
6372 if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
6373 goto out;
6374 nfs4_put_stid(&lst->st_stid);
6375 }
6376 ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
6377 if (ns == NULL)
6378 return NULL;
6379
6380 lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost);
6381 if (lst == openlockstateid(ns))
6382 *new = true;
6383 else
6384 nfs4_put_stid(ns);
6385 out:
6386 return lst;
6387 }
6388
6389 static int
check_lock_length(u64 offset,u64 length)6390 check_lock_length(u64 offset, u64 length)
6391 {
6392 return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
6393 (length > ~offset)));
6394 }
6395
get_lock_access(struct nfs4_ol_stateid * lock_stp,u32 access)6396 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
6397 {
6398 struct nfs4_file *fp = lock_stp->st_stid.sc_file;
6399
6400 lockdep_assert_held(&fp->fi_lock);
6401
6402 if (test_access(access, lock_stp))
6403 return;
6404 __nfs4_file_get_access(fp, access);
6405 set_access(access, lock_stp);
6406 }
6407
6408 static __be32
lookup_or_create_lock_state(struct nfsd4_compound_state * cstate,struct nfs4_ol_stateid * ost,struct nfsd4_lock * lock,struct nfs4_ol_stateid ** plst,bool * new)6409 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
6410 struct nfs4_ol_stateid *ost,
6411 struct nfsd4_lock *lock,
6412 struct nfs4_ol_stateid **plst, bool *new)
6413 {
6414 __be32 status;
6415 struct nfs4_file *fi = ost->st_stid.sc_file;
6416 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
6417 struct nfs4_client *cl = oo->oo_owner.so_client;
6418 struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
6419 struct nfs4_lockowner *lo;
6420 struct nfs4_ol_stateid *lst;
6421 unsigned int strhashval;
6422
6423 lo = find_lockowner_str(cl, &lock->lk_new_owner);
6424 if (!lo) {
6425 strhashval = ownerstr_hashval(&lock->lk_new_owner);
6426 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
6427 if (lo == NULL)
6428 return nfserr_jukebox;
6429 } else {
6430 /* with an existing lockowner, seqids must be the same */
6431 status = nfserr_bad_seqid;
6432 if (!cstate->minorversion &&
6433 lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
6434 goto out;
6435 }
6436
6437 lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
6438 if (lst == NULL) {
6439 status = nfserr_jukebox;
6440 goto out;
6441 }
6442
6443 status = nfs_ok;
6444 *plst = lst;
6445 out:
6446 nfs4_put_stateowner(&lo->lo_owner);
6447 return status;
6448 }
6449
6450 /*
6451 * LOCK operation
6452 */
6453 __be32
nfsd4_lock(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)6454 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6455 union nfsd4_op_u *u)
6456 {
6457 struct nfsd4_lock *lock = &u->lock;
6458 struct nfs4_openowner *open_sop = NULL;
6459 struct nfs4_lockowner *lock_sop = NULL;
6460 struct nfs4_ol_stateid *lock_stp = NULL;
6461 struct nfs4_ol_stateid *open_stp = NULL;
6462 struct nfs4_file *fp;
6463 struct nfsd_file *nf = NULL;
6464 struct nfsd4_blocked_lock *nbl = NULL;
6465 struct file_lock *file_lock = NULL;
6466 struct file_lock *conflock = NULL;
6467 __be32 status = 0;
6468 int lkflg;
6469 int err;
6470 bool new = false;
6471 unsigned char fl_type;
6472 unsigned int fl_flags = FL_POSIX;
6473 struct net *net = SVC_NET(rqstp);
6474 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6475
6476 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
6477 (long long) lock->lk_offset,
6478 (long long) lock->lk_length);
6479
6480 if (check_lock_length(lock->lk_offset, lock->lk_length))
6481 return nfserr_inval;
6482
6483 if ((status = fh_verify(rqstp, &cstate->current_fh,
6484 S_IFREG, NFSD_MAY_LOCK))) {
6485 dprintk("NFSD: nfsd4_lock: permission denied!\n");
6486 return status;
6487 }
6488
6489 if (lock->lk_is_new) {
6490 if (nfsd4_has_session(cstate))
6491 /* See rfc 5661 18.10.3: given clientid is ignored: */
6492 memcpy(&lock->lk_new_clientid,
6493 &cstate->session->se_client->cl_clientid,
6494 sizeof(clientid_t));
6495
6496 status = nfserr_stale_clientid;
6497 if (STALE_CLIENTID(&lock->lk_new_clientid, nn))
6498 goto out;
6499
6500 /* validate and update open stateid and open seqid */
6501 status = nfs4_preprocess_confirmed_seqid_op(cstate,
6502 lock->lk_new_open_seqid,
6503 &lock->lk_new_open_stateid,
6504 &open_stp, nn);
6505 if (status)
6506 goto out;
6507 mutex_unlock(&open_stp->st_mutex);
6508 open_sop = openowner(open_stp->st_stateowner);
6509 status = nfserr_bad_stateid;
6510 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
6511 &lock->lk_new_clientid))
6512 goto out;
6513 status = lookup_or_create_lock_state(cstate, open_stp, lock,
6514 &lock_stp, &new);
6515 } else {
6516 status = nfs4_preprocess_seqid_op(cstate,
6517 lock->lk_old_lock_seqid,
6518 &lock->lk_old_lock_stateid,
6519 NFS4_LOCK_STID, &lock_stp, nn);
6520 }
6521 if (status)
6522 goto out;
6523 lock_sop = lockowner(lock_stp->st_stateowner);
6524
6525 lkflg = setlkflg(lock->lk_type);
6526 status = nfs4_check_openmode(lock_stp, lkflg);
6527 if (status)
6528 goto out;
6529
6530 status = nfserr_grace;
6531 if (locks_in_grace(net) && !lock->lk_reclaim)
6532 goto out;
6533 status = nfserr_no_grace;
6534 if (!locks_in_grace(net) && lock->lk_reclaim)
6535 goto out;
6536
6537 fp = lock_stp->st_stid.sc_file;
6538 switch (lock->lk_type) {
6539 case NFS4_READW_LT:
6540 if (nfsd4_has_session(cstate))
6541 fl_flags |= FL_SLEEP;
6542 /* Fallthrough */
6543 case NFS4_READ_LT:
6544 spin_lock(&fp->fi_lock);
6545 nf = find_readable_file_locked(fp);
6546 if (nf)
6547 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
6548 spin_unlock(&fp->fi_lock);
6549 fl_type = F_RDLCK;
6550 break;
6551 case NFS4_WRITEW_LT:
6552 if (nfsd4_has_session(cstate))
6553 fl_flags |= FL_SLEEP;
6554 /* Fallthrough */
6555 case NFS4_WRITE_LT:
6556 spin_lock(&fp->fi_lock);
6557 nf = find_writeable_file_locked(fp);
6558 if (nf)
6559 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
6560 spin_unlock(&fp->fi_lock);
6561 fl_type = F_WRLCK;
6562 break;
6563 default:
6564 status = nfserr_inval;
6565 goto out;
6566 }
6567
6568 if (!nf) {
6569 status = nfserr_openmode;
6570 goto out;
6571 }
6572
6573 nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn);
6574 if (!nbl) {
6575 dprintk("NFSD: %s: unable to allocate block!\n", __func__);
6576 status = nfserr_jukebox;
6577 goto out;
6578 }
6579
6580 file_lock = &nbl->nbl_lock;
6581 file_lock->fl_type = fl_type;
6582 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
6583 file_lock->fl_pid = current->tgid;
6584 file_lock->fl_file = nf->nf_file;
6585 file_lock->fl_flags = fl_flags;
6586 file_lock->fl_lmops = &nfsd_posix_mng_ops;
6587 file_lock->fl_start = lock->lk_offset;
6588 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
6589 nfs4_transform_lock_offset(file_lock);
6590
6591 conflock = locks_alloc_lock();
6592 if (!conflock) {
6593 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6594 status = nfserr_jukebox;
6595 goto out;
6596 }
6597
6598 if (fl_flags & FL_SLEEP) {
6599 nbl->nbl_time = get_seconds();
6600 spin_lock(&nn->blocked_locks_lock);
6601 list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
6602 list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
6603 spin_unlock(&nn->blocked_locks_lock);
6604 }
6605
6606 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, conflock);
6607 switch (err) {
6608 case 0: /* success! */
6609 nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
6610 status = 0;
6611 if (lock->lk_reclaim)
6612 nn->somebody_reclaimed = true;
6613 break;
6614 case FILE_LOCK_DEFERRED:
6615 nbl = NULL;
6616 /* Fallthrough */
6617 case -EAGAIN: /* conflock holds conflicting lock */
6618 status = nfserr_denied;
6619 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
6620 nfs4_set_lock_denied(conflock, &lock->lk_denied);
6621 break;
6622 case -EDEADLK:
6623 status = nfserr_deadlock;
6624 break;
6625 default:
6626 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
6627 status = nfserrno(err);
6628 break;
6629 }
6630 out:
6631 if (nbl) {
6632 /* dequeue it if we queued it before */
6633 if (fl_flags & FL_SLEEP) {
6634 spin_lock(&nn->blocked_locks_lock);
6635 list_del_init(&nbl->nbl_list);
6636 list_del_init(&nbl->nbl_lru);
6637 spin_unlock(&nn->blocked_locks_lock);
6638 }
6639 free_blocked_lock(nbl);
6640 }
6641 if (nf)
6642 nfsd_file_put(nf);
6643 if (lock_stp) {
6644 /* Bump seqid manually if the 4.0 replay owner is openowner */
6645 if (cstate->replay_owner &&
6646 cstate->replay_owner != &lock_sop->lo_owner &&
6647 seqid_mutating_err(ntohl(status)))
6648 lock_sop->lo_owner.so_seqid++;
6649
6650 /*
6651 * If this is a new, never-before-used stateid, and we are
6652 * returning an error, then just go ahead and release it.
6653 */
6654 if (status && new)
6655 release_lock_stateid(lock_stp);
6656
6657 mutex_unlock(&lock_stp->st_mutex);
6658
6659 nfs4_put_stid(&lock_stp->st_stid);
6660 }
6661 if (open_stp)
6662 nfs4_put_stid(&open_stp->st_stid);
6663 nfsd4_bump_seqid(cstate, status);
6664 if (conflock)
6665 locks_free_lock(conflock);
6666 return status;
6667 }
6668
6669 /*
6670 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
6671 * so we do a temporary open here just to get an open file to pass to
6672 * vfs_test_lock. (Arguably perhaps test_lock should be done with an
6673 * inode operation.)
6674 */
nfsd_test_lock(struct svc_rqst * rqstp,struct svc_fh * fhp,struct file_lock * lock)6675 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
6676 {
6677 struct nfsd_file *nf;
6678 __be32 err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
6679 if (!err) {
6680 err = nfserrno(vfs_test_lock(nf->nf_file, lock));
6681 nfsd_file_put(nf);
6682 }
6683 return err;
6684 }
6685
6686 /*
6687 * LOCKT operation
6688 */
6689 __be32
nfsd4_lockt(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)6690 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6691 union nfsd4_op_u *u)
6692 {
6693 struct nfsd4_lockt *lockt = &u->lockt;
6694 struct file_lock *file_lock = NULL;
6695 struct nfs4_lockowner *lo = NULL;
6696 __be32 status;
6697 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6698
6699 if (locks_in_grace(SVC_NET(rqstp)))
6700 return nfserr_grace;
6701
6702 if (check_lock_length(lockt->lt_offset, lockt->lt_length))
6703 return nfserr_inval;
6704
6705 if (!nfsd4_has_session(cstate)) {
6706 status = lookup_clientid(&lockt->lt_clientid, cstate, nn);
6707 if (status)
6708 goto out;
6709 }
6710
6711 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
6712 goto out;
6713
6714 file_lock = locks_alloc_lock();
6715 if (!file_lock) {
6716 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6717 status = nfserr_jukebox;
6718 goto out;
6719 }
6720
6721 switch (lockt->lt_type) {
6722 case NFS4_READ_LT:
6723 case NFS4_READW_LT:
6724 file_lock->fl_type = F_RDLCK;
6725 break;
6726 case NFS4_WRITE_LT:
6727 case NFS4_WRITEW_LT:
6728 file_lock->fl_type = F_WRLCK;
6729 break;
6730 default:
6731 dprintk("NFSD: nfs4_lockt: bad lock type!\n");
6732 status = nfserr_inval;
6733 goto out;
6734 }
6735
6736 lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
6737 if (lo)
6738 file_lock->fl_owner = (fl_owner_t)lo;
6739 file_lock->fl_pid = current->tgid;
6740 file_lock->fl_flags = FL_POSIX;
6741
6742 file_lock->fl_start = lockt->lt_offset;
6743 file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
6744
6745 nfs4_transform_lock_offset(file_lock);
6746
6747 status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
6748 if (status)
6749 goto out;
6750
6751 if (file_lock->fl_type != F_UNLCK) {
6752 status = nfserr_denied;
6753 nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
6754 }
6755 out:
6756 if (lo)
6757 nfs4_put_stateowner(&lo->lo_owner);
6758 if (file_lock)
6759 locks_free_lock(file_lock);
6760 return status;
6761 }
6762
6763 __be32
nfsd4_locku(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)6764 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6765 union nfsd4_op_u *u)
6766 {
6767 struct nfsd4_locku *locku = &u->locku;
6768 struct nfs4_ol_stateid *stp;
6769 struct nfsd_file *nf = NULL;
6770 struct file_lock *file_lock = NULL;
6771 __be32 status;
6772 int err;
6773 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6774
6775 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
6776 (long long) locku->lu_offset,
6777 (long long) locku->lu_length);
6778
6779 if (check_lock_length(locku->lu_offset, locku->lu_length))
6780 return nfserr_inval;
6781
6782 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
6783 &locku->lu_stateid, NFS4_LOCK_STID,
6784 &stp, nn);
6785 if (status)
6786 goto out;
6787 nf = find_any_file(stp->st_stid.sc_file);
6788 if (!nf) {
6789 status = nfserr_lock_range;
6790 goto put_stateid;
6791 }
6792 file_lock = locks_alloc_lock();
6793 if (!file_lock) {
6794 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6795 status = nfserr_jukebox;
6796 goto put_file;
6797 }
6798
6799 file_lock->fl_type = F_UNLCK;
6800 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
6801 file_lock->fl_pid = current->tgid;
6802 file_lock->fl_file = nf->nf_file;
6803 file_lock->fl_flags = FL_POSIX;
6804 file_lock->fl_lmops = &nfsd_posix_mng_ops;
6805 file_lock->fl_start = locku->lu_offset;
6806
6807 file_lock->fl_end = last_byte_offset(locku->lu_offset,
6808 locku->lu_length);
6809 nfs4_transform_lock_offset(file_lock);
6810
6811 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, NULL);
6812 if (err) {
6813 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
6814 goto out_nfserr;
6815 }
6816 nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid);
6817 put_file:
6818 nfsd_file_put(nf);
6819 put_stateid:
6820 mutex_unlock(&stp->st_mutex);
6821 nfs4_put_stid(&stp->st_stid);
6822 out:
6823 nfsd4_bump_seqid(cstate, status);
6824 if (file_lock)
6825 locks_free_lock(file_lock);
6826 return status;
6827
6828 out_nfserr:
6829 status = nfserrno(err);
6830 goto put_file;
6831 }
6832
6833 /*
6834 * returns
6835 * true: locks held by lockowner
6836 * false: no locks held by lockowner
6837 */
6838 static bool
check_for_locks(struct nfs4_file * fp,struct nfs4_lockowner * lowner)6839 check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
6840 {
6841 struct file_lock *fl;
6842 int status = false;
6843 struct nfsd_file *nf = find_any_file(fp);
6844 struct inode *inode;
6845 struct file_lock_context *flctx;
6846
6847 if (!nf) {
6848 /* Any valid lock stateid should have some sort of access */
6849 WARN_ON_ONCE(1);
6850 return status;
6851 }
6852
6853 inode = locks_inode(nf->nf_file);
6854 flctx = inode->i_flctx;
6855
6856 if (flctx && !list_empty_careful(&flctx->flc_posix)) {
6857 spin_lock(&flctx->flc_lock);
6858 list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
6859 if (fl->fl_owner == (fl_owner_t)lowner) {
6860 status = true;
6861 break;
6862 }
6863 }
6864 spin_unlock(&flctx->flc_lock);
6865 }
6866 nfsd_file_put(nf);
6867 return status;
6868 }
6869
6870 __be32
nfsd4_release_lockowner(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)6871 nfsd4_release_lockowner(struct svc_rqst *rqstp,
6872 struct nfsd4_compound_state *cstate,
6873 union nfsd4_op_u *u)
6874 {
6875 struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner;
6876 clientid_t *clid = &rlockowner->rl_clientid;
6877 struct nfs4_stateowner *sop;
6878 struct nfs4_lockowner *lo = NULL;
6879 struct nfs4_ol_stateid *stp;
6880 struct xdr_netobj *owner = &rlockowner->rl_owner;
6881 unsigned int hashval = ownerstr_hashval(owner);
6882 __be32 status;
6883 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6884 struct nfs4_client *clp;
6885 LIST_HEAD (reaplist);
6886
6887 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
6888 clid->cl_boot, clid->cl_id);
6889
6890 status = lookup_clientid(clid, cstate, nn);
6891 if (status)
6892 return status;
6893
6894 clp = cstate->clp;
6895 /* Find the matching lock stateowner */
6896 spin_lock(&clp->cl_lock);
6897 list_for_each_entry(sop, &clp->cl_ownerstr_hashtbl[hashval],
6898 so_strhash) {
6899
6900 if (sop->so_is_open_owner || !same_owner_str(sop, owner))
6901 continue;
6902
6903 if (atomic_read(&sop->so_count) != 1) {
6904 spin_unlock(&clp->cl_lock);
6905 return nfserr_locks_held;
6906 }
6907
6908 lo = lockowner(sop);
6909 nfs4_get_stateowner(sop);
6910 break;
6911 }
6912 if (!lo) {
6913 spin_unlock(&clp->cl_lock);
6914 return status;
6915 }
6916
6917 unhash_lockowner_locked(lo);
6918 while (!list_empty(&lo->lo_owner.so_stateids)) {
6919 stp = list_first_entry(&lo->lo_owner.so_stateids,
6920 struct nfs4_ol_stateid,
6921 st_perstateowner);
6922 WARN_ON(!unhash_lock_stateid(stp));
6923 put_ol_stateid_locked(stp, &reaplist);
6924 }
6925 spin_unlock(&clp->cl_lock);
6926 free_ol_stateid_reaplist(&reaplist);
6927 remove_blocked_locks(lo);
6928 nfs4_put_stateowner(&lo->lo_owner);
6929
6930 return status;
6931 }
6932
6933 static inline struct nfs4_client_reclaim *
alloc_reclaim(void)6934 alloc_reclaim(void)
6935 {
6936 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
6937 }
6938
6939 bool
nfs4_has_reclaimed_state(struct xdr_netobj name,struct nfsd_net * nn)6940 nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn)
6941 {
6942 struct nfs4_client_reclaim *crp;
6943
6944 crp = nfsd4_find_reclaim_client(name, nn);
6945 return (crp && crp->cr_clp);
6946 }
6947
6948 /*
6949 * failure => all reset bets are off, nfserr_no_grace...
6950 *
6951 * The caller is responsible for freeing name.data if NULL is returned (it
6952 * will be freed in nfs4_remove_reclaim_record in the normal case).
6953 */
6954 struct nfs4_client_reclaim *
nfs4_client_to_reclaim(struct xdr_netobj name,struct xdr_netobj princhash,struct nfsd_net * nn)6955 nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash,
6956 struct nfsd_net *nn)
6957 {
6958 unsigned int strhashval;
6959 struct nfs4_client_reclaim *crp;
6960
6961 dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", name.len, name.data);
6962 crp = alloc_reclaim();
6963 if (crp) {
6964 strhashval = clientstr_hashval(name);
6965 INIT_LIST_HEAD(&crp->cr_strhash);
6966 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
6967 crp->cr_name.data = name.data;
6968 crp->cr_name.len = name.len;
6969 crp->cr_princhash.data = princhash.data;
6970 crp->cr_princhash.len = princhash.len;
6971 crp->cr_clp = NULL;
6972 nn->reclaim_str_hashtbl_size++;
6973 }
6974 return crp;
6975 }
6976
6977 void
nfs4_remove_reclaim_record(struct nfs4_client_reclaim * crp,struct nfsd_net * nn)6978 nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
6979 {
6980 list_del(&crp->cr_strhash);
6981 kfree(crp->cr_name.data);
6982 kfree(crp->cr_princhash.data);
6983 kfree(crp);
6984 nn->reclaim_str_hashtbl_size--;
6985 }
6986
6987 void
nfs4_release_reclaim(struct nfsd_net * nn)6988 nfs4_release_reclaim(struct nfsd_net *nn)
6989 {
6990 struct nfs4_client_reclaim *crp = NULL;
6991 int i;
6992
6993 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
6994 while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
6995 crp = list_entry(nn->reclaim_str_hashtbl[i].next,
6996 struct nfs4_client_reclaim, cr_strhash);
6997 nfs4_remove_reclaim_record(crp, nn);
6998 }
6999 }
7000 WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
7001 }
7002
7003 /*
7004 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
7005 struct nfs4_client_reclaim *
nfsd4_find_reclaim_client(struct xdr_netobj name,struct nfsd_net * nn)7006 nfsd4_find_reclaim_client(struct xdr_netobj name, struct nfsd_net *nn)
7007 {
7008 unsigned int strhashval;
7009 struct nfs4_client_reclaim *crp = NULL;
7010
7011 dprintk("NFSD: nfs4_find_reclaim_client for name %.*s\n", name.len, name.data);
7012
7013 strhashval = clientstr_hashval(name);
7014 list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
7015 if (compare_blob(&crp->cr_name, &name) == 0) {
7016 return crp;
7017 }
7018 }
7019 return NULL;
7020 }
7021
7022 /*
7023 * Called from OPEN. Look for clientid in reclaim list.
7024 */
7025 __be32
nfs4_check_open_reclaim(clientid_t * clid,struct nfsd4_compound_state * cstate,struct nfsd_net * nn)7026 nfs4_check_open_reclaim(clientid_t *clid,
7027 struct nfsd4_compound_state *cstate,
7028 struct nfsd_net *nn)
7029 {
7030 __be32 status;
7031
7032 /* find clientid in conf_id_hashtbl */
7033 status = lookup_clientid(clid, cstate, nn);
7034 if (status)
7035 return nfserr_reclaim_bad;
7036
7037 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &cstate->clp->cl_flags))
7038 return nfserr_no_grace;
7039
7040 if (nfsd4_client_record_check(cstate->clp))
7041 return nfserr_reclaim_bad;
7042
7043 return nfs_ok;
7044 }
7045
7046 #ifdef CONFIG_NFSD_FAULT_INJECTION
7047 static inline void
put_client(struct nfs4_client * clp)7048 put_client(struct nfs4_client *clp)
7049 {
7050 atomic_dec(&clp->cl_rpc_users);
7051 }
7052
7053 static struct nfs4_client *
nfsd_find_client(struct sockaddr_storage * addr,size_t addr_size)7054 nfsd_find_client(struct sockaddr_storage *addr, size_t addr_size)
7055 {
7056 struct nfs4_client *clp;
7057 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7058 nfsd_net_id);
7059
7060 if (!nfsd_netns_ready(nn))
7061 return NULL;
7062
7063 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
7064 if (memcmp(&clp->cl_addr, addr, addr_size) == 0)
7065 return clp;
7066 }
7067 return NULL;
7068 }
7069
7070 u64
nfsd_inject_print_clients(void)7071 nfsd_inject_print_clients(void)
7072 {
7073 struct nfs4_client *clp;
7074 u64 count = 0;
7075 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7076 nfsd_net_id);
7077 char buf[INET6_ADDRSTRLEN];
7078
7079 if (!nfsd_netns_ready(nn))
7080 return 0;
7081
7082 spin_lock(&nn->client_lock);
7083 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
7084 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
7085 pr_info("NFS Client: %s\n", buf);
7086 ++count;
7087 }
7088 spin_unlock(&nn->client_lock);
7089
7090 return count;
7091 }
7092
7093 u64
nfsd_inject_forget_client(struct sockaddr_storage * addr,size_t addr_size)7094 nfsd_inject_forget_client(struct sockaddr_storage *addr, size_t addr_size)
7095 {
7096 u64 count = 0;
7097 struct nfs4_client *clp;
7098 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7099 nfsd_net_id);
7100
7101 if (!nfsd_netns_ready(nn))
7102 return count;
7103
7104 spin_lock(&nn->client_lock);
7105 clp = nfsd_find_client(addr, addr_size);
7106 if (clp) {
7107 if (mark_client_expired_locked(clp) == nfs_ok)
7108 ++count;
7109 else
7110 clp = NULL;
7111 }
7112 spin_unlock(&nn->client_lock);
7113
7114 if (clp)
7115 expire_client(clp);
7116
7117 return count;
7118 }
7119
7120 u64
nfsd_inject_forget_clients(u64 max)7121 nfsd_inject_forget_clients(u64 max)
7122 {
7123 u64 count = 0;
7124 struct nfs4_client *clp, *next;
7125 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7126 nfsd_net_id);
7127 LIST_HEAD(reaplist);
7128
7129 if (!nfsd_netns_ready(nn))
7130 return count;
7131
7132 spin_lock(&nn->client_lock);
7133 list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
7134 if (mark_client_expired_locked(clp) == nfs_ok) {
7135 list_add(&clp->cl_lru, &reaplist);
7136 if (max != 0 && ++count >= max)
7137 break;
7138 }
7139 }
7140 spin_unlock(&nn->client_lock);
7141
7142 list_for_each_entry_safe(clp, next, &reaplist, cl_lru)
7143 expire_client(clp);
7144
7145 return count;
7146 }
7147
nfsd_print_count(struct nfs4_client * clp,unsigned int count,const char * type)7148 static void nfsd_print_count(struct nfs4_client *clp, unsigned int count,
7149 const char *type)
7150 {
7151 char buf[INET6_ADDRSTRLEN];
7152 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
7153 printk(KERN_INFO "NFS Client: %s has %u %s\n", buf, count, type);
7154 }
7155
7156 static void
nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid * lst,struct list_head * collect)7157 nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid *lst,
7158 struct list_head *collect)
7159 {
7160 struct nfs4_client *clp = lst->st_stid.sc_client;
7161 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7162 nfsd_net_id);
7163
7164 if (!collect)
7165 return;
7166
7167 lockdep_assert_held(&nn->client_lock);
7168 atomic_inc(&clp->cl_rpc_users);
7169 list_add(&lst->st_locks, collect);
7170 }
7171
nfsd_foreach_client_lock(struct nfs4_client * clp,u64 max,struct list_head * collect,bool (* func)(struct nfs4_ol_stateid *))7172 static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max,
7173 struct list_head *collect,
7174 bool (*func)(struct nfs4_ol_stateid *))
7175 {
7176 struct nfs4_openowner *oop;
7177 struct nfs4_ol_stateid *stp, *st_next;
7178 struct nfs4_ol_stateid *lst, *lst_next;
7179 u64 count = 0;
7180
7181 spin_lock(&clp->cl_lock);
7182 list_for_each_entry(oop, &clp->cl_openowners, oo_perclient) {
7183 list_for_each_entry_safe(stp, st_next,
7184 &oop->oo_owner.so_stateids, st_perstateowner) {
7185 list_for_each_entry_safe(lst, lst_next,
7186 &stp->st_locks, st_locks) {
7187 if (func) {
7188 if (func(lst))
7189 nfsd_inject_add_lock_to_list(lst,
7190 collect);
7191 }
7192 ++count;
7193 /*
7194 * Despite the fact that these functions deal
7195 * with 64-bit integers for "count", we must
7196 * ensure that it doesn't blow up the
7197 * clp->cl_rpc_users. Throw a warning if we
7198 * start to approach INT_MAX here.
7199 */
7200 WARN_ON_ONCE(count == (INT_MAX / 2));
7201 if (count == max)
7202 goto out;
7203 }
7204 }
7205 }
7206 out:
7207 spin_unlock(&clp->cl_lock);
7208
7209 return count;
7210 }
7211
7212 static u64
nfsd_collect_client_locks(struct nfs4_client * clp,struct list_head * collect,u64 max)7213 nfsd_collect_client_locks(struct nfs4_client *clp, struct list_head *collect,
7214 u64 max)
7215 {
7216 return nfsd_foreach_client_lock(clp, max, collect, unhash_lock_stateid);
7217 }
7218
7219 static u64
nfsd_print_client_locks(struct nfs4_client * clp)7220 nfsd_print_client_locks(struct nfs4_client *clp)
7221 {
7222 u64 count = nfsd_foreach_client_lock(clp, 0, NULL, NULL);
7223 nfsd_print_count(clp, count, "locked files");
7224 return count;
7225 }
7226
7227 u64
nfsd_inject_print_locks(void)7228 nfsd_inject_print_locks(void)
7229 {
7230 struct nfs4_client *clp;
7231 u64 count = 0;
7232 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7233 nfsd_net_id);
7234
7235 if (!nfsd_netns_ready(nn))
7236 return 0;
7237
7238 spin_lock(&nn->client_lock);
7239 list_for_each_entry(clp, &nn->client_lru, cl_lru)
7240 count += nfsd_print_client_locks(clp);
7241 spin_unlock(&nn->client_lock);
7242
7243 return count;
7244 }
7245
7246 static void
nfsd_reap_locks(struct list_head * reaplist)7247 nfsd_reap_locks(struct list_head *reaplist)
7248 {
7249 struct nfs4_client *clp;
7250 struct nfs4_ol_stateid *stp, *next;
7251
7252 list_for_each_entry_safe(stp, next, reaplist, st_locks) {
7253 list_del_init(&stp->st_locks);
7254 clp = stp->st_stid.sc_client;
7255 nfs4_put_stid(&stp->st_stid);
7256 put_client(clp);
7257 }
7258 }
7259
7260 u64
nfsd_inject_forget_client_locks(struct sockaddr_storage * addr,size_t addr_size)7261 nfsd_inject_forget_client_locks(struct sockaddr_storage *addr, size_t addr_size)
7262 {
7263 unsigned int count = 0;
7264 struct nfs4_client *clp;
7265 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7266 nfsd_net_id);
7267 LIST_HEAD(reaplist);
7268
7269 if (!nfsd_netns_ready(nn))
7270 return count;
7271
7272 spin_lock(&nn->client_lock);
7273 clp = nfsd_find_client(addr, addr_size);
7274 if (clp)
7275 count = nfsd_collect_client_locks(clp, &reaplist, 0);
7276 spin_unlock(&nn->client_lock);
7277 nfsd_reap_locks(&reaplist);
7278 return count;
7279 }
7280
7281 u64
nfsd_inject_forget_locks(u64 max)7282 nfsd_inject_forget_locks(u64 max)
7283 {
7284 u64 count = 0;
7285 struct nfs4_client *clp;
7286 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7287 nfsd_net_id);
7288 LIST_HEAD(reaplist);
7289
7290 if (!nfsd_netns_ready(nn))
7291 return count;
7292
7293 spin_lock(&nn->client_lock);
7294 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
7295 count += nfsd_collect_client_locks(clp, &reaplist, max - count);
7296 if (max != 0 && count >= max)
7297 break;
7298 }
7299 spin_unlock(&nn->client_lock);
7300 nfsd_reap_locks(&reaplist);
7301 return count;
7302 }
7303
7304 static u64
nfsd_foreach_client_openowner(struct nfs4_client * clp,u64 max,struct list_head * collect,void (* func)(struct nfs4_openowner *))7305 nfsd_foreach_client_openowner(struct nfs4_client *clp, u64 max,
7306 struct list_head *collect,
7307 void (*func)(struct nfs4_openowner *))
7308 {
7309 struct nfs4_openowner *oop, *next;
7310 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7311 nfsd_net_id);
7312 u64 count = 0;
7313
7314 lockdep_assert_held(&nn->client_lock);
7315
7316 spin_lock(&clp->cl_lock);
7317 list_for_each_entry_safe(oop, next, &clp->cl_openowners, oo_perclient) {
7318 if (func) {
7319 func(oop);
7320 if (collect) {
7321 atomic_inc(&clp->cl_rpc_users);
7322 list_add(&oop->oo_perclient, collect);
7323 }
7324 }
7325 ++count;
7326 /*
7327 * Despite the fact that these functions deal with
7328 * 64-bit integers for "count", we must ensure that
7329 * it doesn't blow up the clp->cl_rpc_users. Throw a
7330 * warning if we start to approach INT_MAX here.
7331 */
7332 WARN_ON_ONCE(count == (INT_MAX / 2));
7333 if (count == max)
7334 break;
7335 }
7336 spin_unlock(&clp->cl_lock);
7337
7338 return count;
7339 }
7340
7341 static u64
nfsd_print_client_openowners(struct nfs4_client * clp)7342 nfsd_print_client_openowners(struct nfs4_client *clp)
7343 {
7344 u64 count = nfsd_foreach_client_openowner(clp, 0, NULL, NULL);
7345
7346 nfsd_print_count(clp, count, "openowners");
7347 return count;
7348 }
7349
7350 static u64
nfsd_collect_client_openowners(struct nfs4_client * clp,struct list_head * collect,u64 max)7351 nfsd_collect_client_openowners(struct nfs4_client *clp,
7352 struct list_head *collect, u64 max)
7353 {
7354 return nfsd_foreach_client_openowner(clp, max, collect,
7355 unhash_openowner_locked);
7356 }
7357
7358 u64
nfsd_inject_print_openowners(void)7359 nfsd_inject_print_openowners(void)
7360 {
7361 struct nfs4_client *clp;
7362 u64 count = 0;
7363 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7364 nfsd_net_id);
7365
7366 if (!nfsd_netns_ready(nn))
7367 return 0;
7368
7369 spin_lock(&nn->client_lock);
7370 list_for_each_entry(clp, &nn->client_lru, cl_lru)
7371 count += nfsd_print_client_openowners(clp);
7372 spin_unlock(&nn->client_lock);
7373
7374 return count;
7375 }
7376
7377 static void
nfsd_reap_openowners(struct list_head * reaplist)7378 nfsd_reap_openowners(struct list_head *reaplist)
7379 {
7380 struct nfs4_client *clp;
7381 struct nfs4_openowner *oop, *next;
7382
7383 list_for_each_entry_safe(oop, next, reaplist, oo_perclient) {
7384 list_del_init(&oop->oo_perclient);
7385 clp = oop->oo_owner.so_client;
7386 release_openowner(oop);
7387 put_client(clp);
7388 }
7389 }
7390
7391 u64
nfsd_inject_forget_client_openowners(struct sockaddr_storage * addr,size_t addr_size)7392 nfsd_inject_forget_client_openowners(struct sockaddr_storage *addr,
7393 size_t addr_size)
7394 {
7395 unsigned int count = 0;
7396 struct nfs4_client *clp;
7397 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7398 nfsd_net_id);
7399 LIST_HEAD(reaplist);
7400
7401 if (!nfsd_netns_ready(nn))
7402 return count;
7403
7404 spin_lock(&nn->client_lock);
7405 clp = nfsd_find_client(addr, addr_size);
7406 if (clp)
7407 count = nfsd_collect_client_openowners(clp, &reaplist, 0);
7408 spin_unlock(&nn->client_lock);
7409 nfsd_reap_openowners(&reaplist);
7410 return count;
7411 }
7412
7413 u64
nfsd_inject_forget_openowners(u64 max)7414 nfsd_inject_forget_openowners(u64 max)
7415 {
7416 u64 count = 0;
7417 struct nfs4_client *clp;
7418 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7419 nfsd_net_id);
7420 LIST_HEAD(reaplist);
7421
7422 if (!nfsd_netns_ready(nn))
7423 return count;
7424
7425 spin_lock(&nn->client_lock);
7426 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
7427 count += nfsd_collect_client_openowners(clp, &reaplist,
7428 max - count);
7429 if (max != 0 && count >= max)
7430 break;
7431 }
7432 spin_unlock(&nn->client_lock);
7433 nfsd_reap_openowners(&reaplist);
7434 return count;
7435 }
7436
nfsd_find_all_delegations(struct nfs4_client * clp,u64 max,struct list_head * victims)7437 static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max,
7438 struct list_head *victims)
7439 {
7440 struct nfs4_delegation *dp, *next;
7441 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7442 nfsd_net_id);
7443 u64 count = 0;
7444
7445 lockdep_assert_held(&nn->client_lock);
7446
7447 spin_lock(&state_lock);
7448 list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) {
7449 if (victims) {
7450 /*
7451 * It's not safe to mess with delegations that have a
7452 * non-zero dl_time. They might have already been broken
7453 * and could be processed by the laundromat outside of
7454 * the state_lock. Just leave them be.
7455 */
7456 if (dp->dl_time != 0)
7457 continue;
7458
7459 atomic_inc(&clp->cl_rpc_users);
7460 WARN_ON(!unhash_delegation_locked(dp));
7461 list_add(&dp->dl_recall_lru, victims);
7462 }
7463 ++count;
7464 /*
7465 * Despite the fact that these functions deal with
7466 * 64-bit integers for "count", we must ensure that
7467 * it doesn't blow up the clp->cl_rpc_users. Throw a
7468 * warning if we start to approach INT_MAX here.
7469 */
7470 WARN_ON_ONCE(count == (INT_MAX / 2));
7471 if (count == max)
7472 break;
7473 }
7474 spin_unlock(&state_lock);
7475 return count;
7476 }
7477
7478 static u64
nfsd_print_client_delegations(struct nfs4_client * clp)7479 nfsd_print_client_delegations(struct nfs4_client *clp)
7480 {
7481 u64 count = nfsd_find_all_delegations(clp, 0, NULL);
7482
7483 nfsd_print_count(clp, count, "delegations");
7484 return count;
7485 }
7486
7487 u64
nfsd_inject_print_delegations(void)7488 nfsd_inject_print_delegations(void)
7489 {
7490 struct nfs4_client *clp;
7491 u64 count = 0;
7492 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7493 nfsd_net_id);
7494
7495 if (!nfsd_netns_ready(nn))
7496 return 0;
7497
7498 spin_lock(&nn->client_lock);
7499 list_for_each_entry(clp, &nn->client_lru, cl_lru)
7500 count += nfsd_print_client_delegations(clp);
7501 spin_unlock(&nn->client_lock);
7502
7503 return count;
7504 }
7505
7506 static void
nfsd_forget_delegations(struct list_head * reaplist)7507 nfsd_forget_delegations(struct list_head *reaplist)
7508 {
7509 struct nfs4_client *clp;
7510 struct nfs4_delegation *dp, *next;
7511
7512 list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
7513 list_del_init(&dp->dl_recall_lru);
7514 clp = dp->dl_stid.sc_client;
7515 revoke_delegation(dp);
7516 put_client(clp);
7517 }
7518 }
7519
7520 u64
nfsd_inject_forget_client_delegations(struct sockaddr_storage * addr,size_t addr_size)7521 nfsd_inject_forget_client_delegations(struct sockaddr_storage *addr,
7522 size_t addr_size)
7523 {
7524 u64 count = 0;
7525 struct nfs4_client *clp;
7526 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7527 nfsd_net_id);
7528 LIST_HEAD(reaplist);
7529
7530 if (!nfsd_netns_ready(nn))
7531 return count;
7532
7533 spin_lock(&nn->client_lock);
7534 clp = nfsd_find_client(addr, addr_size);
7535 if (clp)
7536 count = nfsd_find_all_delegations(clp, 0, &reaplist);
7537 spin_unlock(&nn->client_lock);
7538
7539 nfsd_forget_delegations(&reaplist);
7540 return count;
7541 }
7542
7543 u64
nfsd_inject_forget_delegations(u64 max)7544 nfsd_inject_forget_delegations(u64 max)
7545 {
7546 u64 count = 0;
7547 struct nfs4_client *clp;
7548 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7549 nfsd_net_id);
7550 LIST_HEAD(reaplist);
7551
7552 if (!nfsd_netns_ready(nn))
7553 return count;
7554
7555 spin_lock(&nn->client_lock);
7556 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
7557 count += nfsd_find_all_delegations(clp, max - count, &reaplist);
7558 if (max != 0 && count >= max)
7559 break;
7560 }
7561 spin_unlock(&nn->client_lock);
7562 nfsd_forget_delegations(&reaplist);
7563 return count;
7564 }
7565
7566 static void
nfsd_recall_delegations(struct list_head * reaplist)7567 nfsd_recall_delegations(struct list_head *reaplist)
7568 {
7569 struct nfs4_client *clp;
7570 struct nfs4_delegation *dp, *next;
7571
7572 list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
7573 list_del_init(&dp->dl_recall_lru);
7574 clp = dp->dl_stid.sc_client;
7575 /*
7576 * We skipped all entries that had a zero dl_time before,
7577 * so we can now reset the dl_time back to 0. If a delegation
7578 * break comes in now, then it won't make any difference since
7579 * we're recalling it either way.
7580 */
7581 spin_lock(&state_lock);
7582 dp->dl_time = 0;
7583 spin_unlock(&state_lock);
7584 nfsd_break_one_deleg(dp);
7585 put_client(clp);
7586 }
7587 }
7588
7589 u64
nfsd_inject_recall_client_delegations(struct sockaddr_storage * addr,size_t addr_size)7590 nfsd_inject_recall_client_delegations(struct sockaddr_storage *addr,
7591 size_t addr_size)
7592 {
7593 u64 count = 0;
7594 struct nfs4_client *clp;
7595 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7596 nfsd_net_id);
7597 LIST_HEAD(reaplist);
7598
7599 if (!nfsd_netns_ready(nn))
7600 return count;
7601
7602 spin_lock(&nn->client_lock);
7603 clp = nfsd_find_client(addr, addr_size);
7604 if (clp)
7605 count = nfsd_find_all_delegations(clp, 0, &reaplist);
7606 spin_unlock(&nn->client_lock);
7607
7608 nfsd_recall_delegations(&reaplist);
7609 return count;
7610 }
7611
7612 u64
nfsd_inject_recall_delegations(u64 max)7613 nfsd_inject_recall_delegations(u64 max)
7614 {
7615 u64 count = 0;
7616 struct nfs4_client *clp, *next;
7617 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7618 nfsd_net_id);
7619 LIST_HEAD(reaplist);
7620
7621 if (!nfsd_netns_ready(nn))
7622 return count;
7623
7624 spin_lock(&nn->client_lock);
7625 list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
7626 count += nfsd_find_all_delegations(clp, max - count, &reaplist);
7627 if (max != 0 && ++count >= max)
7628 break;
7629 }
7630 spin_unlock(&nn->client_lock);
7631 nfsd_recall_delegations(&reaplist);
7632 return count;
7633 }
7634 #endif /* CONFIG_NFSD_FAULT_INJECTION */
7635
7636 /*
7637 * Since the lifetime of a delegation isn't limited to that of an open, a
7638 * client may quite reasonably hang on to a delegation as long as it has
7639 * the inode cached. This becomes an obvious problem the first time a
7640 * client's inode cache approaches the size of the server's total memory.
7641 *
7642 * For now we avoid this problem by imposing a hard limit on the number
7643 * of delegations, which varies according to the server's memory size.
7644 */
7645 static void
set_max_delegations(void)7646 set_max_delegations(void)
7647 {
7648 /*
7649 * Allow at most 4 delegations per megabyte of RAM. Quick
7650 * estimates suggest that in the worst case (where every delegation
7651 * is for a different inode), a delegation could take about 1.5K,
7652 * giving a worst case usage of about 6% of memory.
7653 */
7654 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
7655 }
7656
nfs4_state_create_net(struct net * net)7657 static int nfs4_state_create_net(struct net *net)
7658 {
7659 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7660 int i;
7661
7662 nn->conf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
7663 sizeof(struct list_head),
7664 GFP_KERNEL);
7665 if (!nn->conf_id_hashtbl)
7666 goto err;
7667 nn->unconf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
7668 sizeof(struct list_head),
7669 GFP_KERNEL);
7670 if (!nn->unconf_id_hashtbl)
7671 goto err_unconf_id;
7672 nn->sessionid_hashtbl = kmalloc_array(SESSION_HASH_SIZE,
7673 sizeof(struct list_head),
7674 GFP_KERNEL);
7675 if (!nn->sessionid_hashtbl)
7676 goto err_sessionid;
7677
7678 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7679 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
7680 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
7681 }
7682 for (i = 0; i < SESSION_HASH_SIZE; i++)
7683 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
7684 nn->conf_name_tree = RB_ROOT;
7685 nn->unconf_name_tree = RB_ROOT;
7686 nn->boot_time = get_seconds();
7687 nn->grace_ended = false;
7688 nn->nfsd4_manager.block_opens = true;
7689 INIT_LIST_HEAD(&nn->nfsd4_manager.list);
7690 INIT_LIST_HEAD(&nn->client_lru);
7691 INIT_LIST_HEAD(&nn->close_lru);
7692 INIT_LIST_HEAD(&nn->del_recall_lru);
7693 spin_lock_init(&nn->client_lock);
7694 spin_lock_init(&nn->s2s_cp_lock);
7695 idr_init(&nn->s2s_cp_stateids);
7696
7697 spin_lock_init(&nn->blocked_locks_lock);
7698 INIT_LIST_HEAD(&nn->blocked_locks_lru);
7699
7700 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
7701 get_net(net);
7702
7703 return 0;
7704
7705 err_sessionid:
7706 kfree(nn->unconf_id_hashtbl);
7707 err_unconf_id:
7708 kfree(nn->conf_id_hashtbl);
7709 err:
7710 return -ENOMEM;
7711 }
7712
7713 static void
nfs4_state_destroy_net(struct net * net)7714 nfs4_state_destroy_net(struct net *net)
7715 {
7716 int i;
7717 struct nfs4_client *clp = NULL;
7718 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7719
7720 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7721 while (!list_empty(&nn->conf_id_hashtbl[i])) {
7722 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
7723 destroy_client(clp);
7724 }
7725 }
7726
7727 WARN_ON(!list_empty(&nn->blocked_locks_lru));
7728
7729 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7730 while (!list_empty(&nn->unconf_id_hashtbl[i])) {
7731 clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
7732 destroy_client(clp);
7733 }
7734 }
7735
7736 kfree(nn->sessionid_hashtbl);
7737 kfree(nn->unconf_id_hashtbl);
7738 kfree(nn->conf_id_hashtbl);
7739 put_net(net);
7740 }
7741
7742 int
nfs4_state_start_net(struct net * net)7743 nfs4_state_start_net(struct net *net)
7744 {
7745 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7746 int ret;
7747
7748 ret = nfs4_state_create_net(net);
7749 if (ret)
7750 return ret;
7751 locks_start_grace(net, &nn->nfsd4_manager);
7752 nfsd4_client_tracking_init(net);
7753 if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0)
7754 goto skip_grace;
7755 printk(KERN_INFO "NFSD: starting %ld-second grace period (net %x)\n",
7756 nn->nfsd4_grace, net->ns.inum);
7757 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
7758 return 0;
7759
7760 skip_grace:
7761 printk(KERN_INFO "NFSD: no clients to reclaim, skipping NFSv4 grace period (net %x)\n",
7762 net->ns.inum);
7763 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_lease * HZ);
7764 nfsd4_end_grace(nn);
7765 return 0;
7766 }
7767
7768 /* initialization to perform when the nfsd service is started: */
7769
7770 int
nfs4_state_start(void)7771 nfs4_state_start(void)
7772 {
7773 int ret;
7774
7775 laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
7776 if (laundry_wq == NULL) {
7777 ret = -ENOMEM;
7778 goto out;
7779 }
7780 ret = nfsd4_create_callback_queue();
7781 if (ret)
7782 goto out_free_laundry;
7783
7784 set_max_delegations();
7785 return 0;
7786
7787 out_free_laundry:
7788 destroy_workqueue(laundry_wq);
7789 out:
7790 return ret;
7791 }
7792
7793 void
nfs4_state_shutdown_net(struct net * net)7794 nfs4_state_shutdown_net(struct net *net)
7795 {
7796 struct nfs4_delegation *dp = NULL;
7797 struct list_head *pos, *next, reaplist;
7798 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7799
7800 cancel_delayed_work_sync(&nn->laundromat_work);
7801 locks_end_grace(&nn->nfsd4_manager);
7802
7803 INIT_LIST_HEAD(&reaplist);
7804 spin_lock(&state_lock);
7805 list_for_each_safe(pos, next, &nn->del_recall_lru) {
7806 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
7807 WARN_ON(!unhash_delegation_locked(dp));
7808 list_add(&dp->dl_recall_lru, &reaplist);
7809 }
7810 spin_unlock(&state_lock);
7811 list_for_each_safe(pos, next, &reaplist) {
7812 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
7813 list_del_init(&dp->dl_recall_lru);
7814 destroy_unhashed_deleg(dp);
7815 }
7816
7817 nfsd4_client_tracking_exit(net);
7818 nfs4_state_destroy_net(net);
7819 }
7820
7821 void
nfs4_state_shutdown(void)7822 nfs4_state_shutdown(void)
7823 {
7824 destroy_workqueue(laundry_wq);
7825 nfsd4_destroy_callback_queue();
7826 }
7827
7828 static void
get_stateid(struct nfsd4_compound_state * cstate,stateid_t * stateid)7829 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
7830 {
7831 if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid))
7832 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
7833 }
7834
7835 static void
put_stateid(struct nfsd4_compound_state * cstate,stateid_t * stateid)7836 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
7837 {
7838 if (cstate->minorversion) {
7839 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
7840 SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
7841 }
7842 }
7843
7844 void
clear_current_stateid(struct nfsd4_compound_state * cstate)7845 clear_current_stateid(struct nfsd4_compound_state *cstate)
7846 {
7847 CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
7848 }
7849
7850 /*
7851 * functions to set current state id
7852 */
7853 void
nfsd4_set_opendowngradestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7854 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate,
7855 union nfsd4_op_u *u)
7856 {
7857 put_stateid(cstate, &u->open_downgrade.od_stateid);
7858 }
7859
7860 void
nfsd4_set_openstateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7861 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate,
7862 union nfsd4_op_u *u)
7863 {
7864 put_stateid(cstate, &u->open.op_stateid);
7865 }
7866
7867 void
nfsd4_set_closestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7868 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate,
7869 union nfsd4_op_u *u)
7870 {
7871 put_stateid(cstate, &u->close.cl_stateid);
7872 }
7873
7874 void
nfsd4_set_lockstateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7875 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate,
7876 union nfsd4_op_u *u)
7877 {
7878 put_stateid(cstate, &u->lock.lk_resp_stateid);
7879 }
7880
7881 /*
7882 * functions to consume current state id
7883 */
7884
7885 void
nfsd4_get_opendowngradestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7886 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate,
7887 union nfsd4_op_u *u)
7888 {
7889 get_stateid(cstate, &u->open_downgrade.od_stateid);
7890 }
7891
7892 void
nfsd4_get_delegreturnstateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7893 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate,
7894 union nfsd4_op_u *u)
7895 {
7896 get_stateid(cstate, &u->delegreturn.dr_stateid);
7897 }
7898
7899 void
nfsd4_get_freestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7900 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate,
7901 union nfsd4_op_u *u)
7902 {
7903 get_stateid(cstate, &u->free_stateid.fr_stateid);
7904 }
7905
7906 void
nfsd4_get_setattrstateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7907 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate,
7908 union nfsd4_op_u *u)
7909 {
7910 get_stateid(cstate, &u->setattr.sa_stateid);
7911 }
7912
7913 void
nfsd4_get_closestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7914 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate,
7915 union nfsd4_op_u *u)
7916 {
7917 get_stateid(cstate, &u->close.cl_stateid);
7918 }
7919
7920 void
nfsd4_get_lockustateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7921 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate,
7922 union nfsd4_op_u *u)
7923 {
7924 get_stateid(cstate, &u->locku.lu_stateid);
7925 }
7926
7927 void
nfsd4_get_readstateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7928 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate,
7929 union nfsd4_op_u *u)
7930 {
7931 get_stateid(cstate, &u->read.rd_stateid);
7932 }
7933
7934 void
nfsd4_get_writestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7935 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
7936 union nfsd4_op_u *u)
7937 {
7938 get_stateid(cstate, &u->write.wr_stateid);
7939 }
7940