1 /*
2 * Copyright (c) 2001 The Regents of the University of Michigan.
3 * All rights reserved.
4 *
5 * Kendrick Smith <kmsmith@umich.edu>
6 * Andy Adamson <kandros@umich.edu>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35 #include <linux/file.h>
36 #include <linux/fs.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/ratelimit.h>
42 #include <linux/sunrpc/svcauth_gss.h>
43 #include <linux/sunrpc/addr.h>
44 #include <linux/jhash.h>
45 #include "xdr4.h"
46 #include "xdr4cb.h"
47 #include "vfs.h"
48 #include "current_stateid.h"
49
50 #include "netns.h"
51 #include "pnfs.h"
52
53 #define NFSDDBG_FACILITY NFSDDBG_PROC
54
55 #define all_ones {{~0,~0},~0}
56 static const stateid_t one_stateid = {
57 .si_generation = ~0,
58 .si_opaque = all_ones,
59 };
60 static const stateid_t zero_stateid = {
61 /* all fields zero */
62 };
63 static const stateid_t currentstateid = {
64 .si_generation = 1,
65 };
66 static const stateid_t close_stateid = {
67 .si_generation = 0xffffffffU,
68 };
69
70 static u64 current_sessionid = 1;
71
72 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
73 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
74 #define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t)))
75 #define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
76
77 /* forward declarations */
78 static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
79 static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
80
81 /* Locking: */
82
83 /*
84 * Currently used for the del_recall_lru and file hash table. In an
85 * effort to decrease the scope of the client_mutex, this spinlock may
86 * eventually cover more:
87 */
88 static DEFINE_SPINLOCK(state_lock);
89
90 enum nfsd4_st_mutex_lock_subclass {
91 OPEN_STATEID_MUTEX = 0,
92 LOCK_STATEID_MUTEX = 1,
93 };
94
95 /*
96 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
97 * the refcount on the open stateid to drop.
98 */
99 static DECLARE_WAIT_QUEUE_HEAD(close_wq);
100
101 static struct kmem_cache *client_slab;
102 static struct kmem_cache *openowner_slab;
103 static struct kmem_cache *lockowner_slab;
104 static struct kmem_cache *file_slab;
105 static struct kmem_cache *stateid_slab;
106 static struct kmem_cache *deleg_slab;
107 static struct kmem_cache *odstate_slab;
108
109 static void free_session(struct nfsd4_session *);
110
111 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
112 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
113
is_session_dead(struct nfsd4_session * ses)114 static bool is_session_dead(struct nfsd4_session *ses)
115 {
116 return ses->se_flags & NFS4_SESSION_DEAD;
117 }
118
mark_session_dead_locked(struct nfsd4_session * ses,int ref_held_by_me)119 static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
120 {
121 if (atomic_read(&ses->se_ref) > ref_held_by_me)
122 return nfserr_jukebox;
123 ses->se_flags |= NFS4_SESSION_DEAD;
124 return nfs_ok;
125 }
126
is_client_expired(struct nfs4_client * clp)127 static bool is_client_expired(struct nfs4_client *clp)
128 {
129 return clp->cl_time == 0;
130 }
131
get_client_locked(struct nfs4_client * clp)132 static __be32 get_client_locked(struct nfs4_client *clp)
133 {
134 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
135
136 lockdep_assert_held(&nn->client_lock);
137
138 if (is_client_expired(clp))
139 return nfserr_expired;
140 atomic_inc(&clp->cl_refcount);
141 return nfs_ok;
142 }
143
144 /* must be called under the client_lock */
145 static inline void
renew_client_locked(struct nfs4_client * clp)146 renew_client_locked(struct nfs4_client *clp)
147 {
148 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
149
150 if (is_client_expired(clp)) {
151 WARN_ON(1);
152 printk("%s: client (clientid %08x/%08x) already expired\n",
153 __func__,
154 clp->cl_clientid.cl_boot,
155 clp->cl_clientid.cl_id);
156 return;
157 }
158
159 dprintk("renewing client (clientid %08x/%08x)\n",
160 clp->cl_clientid.cl_boot,
161 clp->cl_clientid.cl_id);
162 list_move_tail(&clp->cl_lru, &nn->client_lru);
163 clp->cl_time = get_seconds();
164 }
165
put_client_renew_locked(struct nfs4_client * clp)166 static void put_client_renew_locked(struct nfs4_client *clp)
167 {
168 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
169
170 lockdep_assert_held(&nn->client_lock);
171
172 if (!atomic_dec_and_test(&clp->cl_refcount))
173 return;
174 if (!is_client_expired(clp))
175 renew_client_locked(clp);
176 }
177
put_client_renew(struct nfs4_client * clp)178 static void put_client_renew(struct nfs4_client *clp)
179 {
180 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
181
182 if (!atomic_dec_and_lock(&clp->cl_refcount, &nn->client_lock))
183 return;
184 if (!is_client_expired(clp))
185 renew_client_locked(clp);
186 spin_unlock(&nn->client_lock);
187 }
188
nfsd4_get_session_locked(struct nfsd4_session * ses)189 static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
190 {
191 __be32 status;
192
193 if (is_session_dead(ses))
194 return nfserr_badsession;
195 status = get_client_locked(ses->se_client);
196 if (status)
197 return status;
198 atomic_inc(&ses->se_ref);
199 return nfs_ok;
200 }
201
nfsd4_put_session_locked(struct nfsd4_session * ses)202 static void nfsd4_put_session_locked(struct nfsd4_session *ses)
203 {
204 struct nfs4_client *clp = ses->se_client;
205 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
206
207 lockdep_assert_held(&nn->client_lock);
208
209 if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
210 free_session(ses);
211 put_client_renew_locked(clp);
212 }
213
nfsd4_put_session(struct nfsd4_session * ses)214 static void nfsd4_put_session(struct nfsd4_session *ses)
215 {
216 struct nfs4_client *clp = ses->se_client;
217 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
218
219 spin_lock(&nn->client_lock);
220 nfsd4_put_session_locked(ses);
221 spin_unlock(&nn->client_lock);
222 }
223
224 static struct nfsd4_blocked_lock *
find_blocked_lock(struct nfs4_lockowner * lo,struct knfsd_fh * fh,struct nfsd_net * nn)225 find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
226 struct nfsd_net *nn)
227 {
228 struct nfsd4_blocked_lock *cur, *found = NULL;
229
230 spin_lock(&nn->blocked_locks_lock);
231 list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
232 if (fh_match(fh, &cur->nbl_fh)) {
233 list_del_init(&cur->nbl_list);
234 list_del_init(&cur->nbl_lru);
235 found = cur;
236 break;
237 }
238 }
239 spin_unlock(&nn->blocked_locks_lock);
240 if (found)
241 posix_unblock_lock(&found->nbl_lock);
242 return found;
243 }
244
245 static struct nfsd4_blocked_lock *
find_or_allocate_block(struct nfs4_lockowner * lo,struct knfsd_fh * fh,struct nfsd_net * nn)246 find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
247 struct nfsd_net *nn)
248 {
249 struct nfsd4_blocked_lock *nbl;
250
251 nbl = find_blocked_lock(lo, fh, nn);
252 if (!nbl) {
253 nbl= kmalloc(sizeof(*nbl), GFP_KERNEL);
254 if (nbl) {
255 INIT_LIST_HEAD(&nbl->nbl_list);
256 INIT_LIST_HEAD(&nbl->nbl_lru);
257 fh_copy_shallow(&nbl->nbl_fh, fh);
258 locks_init_lock(&nbl->nbl_lock);
259 nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
260 &nfsd4_cb_notify_lock_ops,
261 NFSPROC4_CLNT_CB_NOTIFY_LOCK);
262 }
263 }
264 return nbl;
265 }
266
267 static void
free_blocked_lock(struct nfsd4_blocked_lock * nbl)268 free_blocked_lock(struct nfsd4_blocked_lock *nbl)
269 {
270 locks_release_private(&nbl->nbl_lock);
271 kfree(nbl);
272 }
273
274 static void
remove_blocked_locks(struct nfs4_lockowner * lo)275 remove_blocked_locks(struct nfs4_lockowner *lo)
276 {
277 struct nfs4_client *clp = lo->lo_owner.so_client;
278 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
279 struct nfsd4_blocked_lock *nbl;
280 LIST_HEAD(reaplist);
281
282 /* Dequeue all blocked locks */
283 spin_lock(&nn->blocked_locks_lock);
284 while (!list_empty(&lo->lo_blocked)) {
285 nbl = list_first_entry(&lo->lo_blocked,
286 struct nfsd4_blocked_lock,
287 nbl_list);
288 list_del_init(&nbl->nbl_list);
289 list_move(&nbl->nbl_lru, &reaplist);
290 }
291 spin_unlock(&nn->blocked_locks_lock);
292
293 /* Now free them */
294 while (!list_empty(&reaplist)) {
295 nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock,
296 nbl_lru);
297 list_del_init(&nbl->nbl_lru);
298 posix_unblock_lock(&nbl->nbl_lock);
299 free_blocked_lock(nbl);
300 }
301 }
302
303 static int
nfsd4_cb_notify_lock_done(struct nfsd4_callback * cb,struct rpc_task * task)304 nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
305 {
306 /*
307 * Since this is just an optimization, we don't try very hard if it
308 * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
309 * just quit trying on anything else.
310 */
311 switch (task->tk_status) {
312 case -NFS4ERR_DELAY:
313 rpc_delay(task, 1 * HZ);
314 return 0;
315 default:
316 return 1;
317 }
318 }
319
320 static void
nfsd4_cb_notify_lock_release(struct nfsd4_callback * cb)321 nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
322 {
323 struct nfsd4_blocked_lock *nbl = container_of(cb,
324 struct nfsd4_blocked_lock, nbl_cb);
325
326 free_blocked_lock(nbl);
327 }
328
329 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
330 .done = nfsd4_cb_notify_lock_done,
331 .release = nfsd4_cb_notify_lock_release,
332 };
333
334 static inline struct nfs4_stateowner *
nfs4_get_stateowner(struct nfs4_stateowner * sop)335 nfs4_get_stateowner(struct nfs4_stateowner *sop)
336 {
337 atomic_inc(&sop->so_count);
338 return sop;
339 }
340
341 static int
same_owner_str(struct nfs4_stateowner * sop,struct xdr_netobj * owner)342 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
343 {
344 return (sop->so_owner.len == owner->len) &&
345 0 == memcmp(sop->so_owner.data, owner->data, owner->len);
346 }
347
348 static struct nfs4_openowner *
find_openstateowner_str_locked(unsigned int hashval,struct nfsd4_open * open,struct nfs4_client * clp)349 find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
350 struct nfs4_client *clp)
351 {
352 struct nfs4_stateowner *so;
353
354 lockdep_assert_held(&clp->cl_lock);
355
356 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
357 so_strhash) {
358 if (!so->so_is_open_owner)
359 continue;
360 if (same_owner_str(so, &open->op_owner))
361 return openowner(nfs4_get_stateowner(so));
362 }
363 return NULL;
364 }
365
366 static struct nfs4_openowner *
find_openstateowner_str(unsigned int hashval,struct nfsd4_open * open,struct nfs4_client * clp)367 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
368 struct nfs4_client *clp)
369 {
370 struct nfs4_openowner *oo;
371
372 spin_lock(&clp->cl_lock);
373 oo = find_openstateowner_str_locked(hashval, open, clp);
374 spin_unlock(&clp->cl_lock);
375 return oo;
376 }
377
378 static inline u32
opaque_hashval(const void * ptr,int nbytes)379 opaque_hashval(const void *ptr, int nbytes)
380 {
381 unsigned char *cptr = (unsigned char *) ptr;
382
383 u32 x = 0;
384 while (nbytes--) {
385 x *= 37;
386 x += *cptr++;
387 }
388 return x;
389 }
390
nfsd4_free_file_rcu(struct rcu_head * rcu)391 static void nfsd4_free_file_rcu(struct rcu_head *rcu)
392 {
393 struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
394
395 kmem_cache_free(file_slab, fp);
396 }
397
398 void
put_nfs4_file(struct nfs4_file * fi)399 put_nfs4_file(struct nfs4_file *fi)
400 {
401 might_lock(&state_lock);
402
403 if (refcount_dec_and_lock(&fi->fi_ref, &state_lock)) {
404 hlist_del_rcu(&fi->fi_hash);
405 spin_unlock(&state_lock);
406 WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
407 WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
408 call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
409 }
410 }
411
412 static struct file *
__nfs4_get_fd(struct nfs4_file * f,int oflag)413 __nfs4_get_fd(struct nfs4_file *f, int oflag)
414 {
415 if (f->fi_fds[oflag])
416 return get_file(f->fi_fds[oflag]);
417 return NULL;
418 }
419
420 static struct file *
find_writeable_file_locked(struct nfs4_file * f)421 find_writeable_file_locked(struct nfs4_file *f)
422 {
423 struct file *ret;
424
425 lockdep_assert_held(&f->fi_lock);
426
427 ret = __nfs4_get_fd(f, O_WRONLY);
428 if (!ret)
429 ret = __nfs4_get_fd(f, O_RDWR);
430 return ret;
431 }
432
433 static struct file *
find_writeable_file(struct nfs4_file * f)434 find_writeable_file(struct nfs4_file *f)
435 {
436 struct file *ret;
437
438 spin_lock(&f->fi_lock);
439 ret = find_writeable_file_locked(f);
440 spin_unlock(&f->fi_lock);
441
442 return ret;
443 }
444
find_readable_file_locked(struct nfs4_file * f)445 static struct file *find_readable_file_locked(struct nfs4_file *f)
446 {
447 struct file *ret;
448
449 lockdep_assert_held(&f->fi_lock);
450
451 ret = __nfs4_get_fd(f, O_RDONLY);
452 if (!ret)
453 ret = __nfs4_get_fd(f, O_RDWR);
454 return ret;
455 }
456
457 static struct file *
find_readable_file(struct nfs4_file * f)458 find_readable_file(struct nfs4_file *f)
459 {
460 struct file *ret;
461
462 spin_lock(&f->fi_lock);
463 ret = find_readable_file_locked(f);
464 spin_unlock(&f->fi_lock);
465
466 return ret;
467 }
468
469 struct file *
find_any_file(struct nfs4_file * f)470 find_any_file(struct nfs4_file *f)
471 {
472 struct file *ret;
473
474 if (!f)
475 return NULL;
476 spin_lock(&f->fi_lock);
477 ret = __nfs4_get_fd(f, O_RDWR);
478 if (!ret) {
479 ret = __nfs4_get_fd(f, O_WRONLY);
480 if (!ret)
481 ret = __nfs4_get_fd(f, O_RDONLY);
482 }
483 spin_unlock(&f->fi_lock);
484 return ret;
485 }
486
487 static atomic_long_t num_delegations;
488 unsigned long max_delegations;
489
490 /*
491 * Open owner state (share locks)
492 */
493
494 /* hash tables for lock and open owners */
495 #define OWNER_HASH_BITS 8
496 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
497 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
498
ownerstr_hashval(struct xdr_netobj * ownername)499 static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
500 {
501 unsigned int ret;
502
503 ret = opaque_hashval(ownername->data, ownername->len);
504 return ret & OWNER_HASH_MASK;
505 }
506
507 /* hash table for nfs4_file */
508 #define FILE_HASH_BITS 8
509 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
510
nfsd_fh_hashval(struct knfsd_fh * fh)511 static unsigned int nfsd_fh_hashval(struct knfsd_fh *fh)
512 {
513 return jhash2(fh->fh_base.fh_pad, XDR_QUADLEN(fh->fh_size), 0);
514 }
515
file_hashval(struct knfsd_fh * fh)516 static unsigned int file_hashval(struct knfsd_fh *fh)
517 {
518 return nfsd_fh_hashval(fh) & (FILE_HASH_SIZE - 1);
519 }
520
521 static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
522
523 static void
__nfs4_file_get_access(struct nfs4_file * fp,u32 access)524 __nfs4_file_get_access(struct nfs4_file *fp, u32 access)
525 {
526 lockdep_assert_held(&fp->fi_lock);
527
528 if (access & NFS4_SHARE_ACCESS_WRITE)
529 atomic_inc(&fp->fi_access[O_WRONLY]);
530 if (access & NFS4_SHARE_ACCESS_READ)
531 atomic_inc(&fp->fi_access[O_RDONLY]);
532 }
533
534 static __be32
nfs4_file_get_access(struct nfs4_file * fp,u32 access)535 nfs4_file_get_access(struct nfs4_file *fp, u32 access)
536 {
537 lockdep_assert_held(&fp->fi_lock);
538
539 /* Does this access mode make sense? */
540 if (access & ~NFS4_SHARE_ACCESS_BOTH)
541 return nfserr_inval;
542
543 /* Does it conflict with a deny mode already set? */
544 if ((access & fp->fi_share_deny) != 0)
545 return nfserr_share_denied;
546
547 __nfs4_file_get_access(fp, access);
548 return nfs_ok;
549 }
550
nfs4_file_check_deny(struct nfs4_file * fp,u32 deny)551 static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
552 {
553 /* Common case is that there is no deny mode. */
554 if (deny) {
555 /* Does this deny mode make sense? */
556 if (deny & ~NFS4_SHARE_DENY_BOTH)
557 return nfserr_inval;
558
559 if ((deny & NFS4_SHARE_DENY_READ) &&
560 atomic_read(&fp->fi_access[O_RDONLY]))
561 return nfserr_share_denied;
562
563 if ((deny & NFS4_SHARE_DENY_WRITE) &&
564 atomic_read(&fp->fi_access[O_WRONLY]))
565 return nfserr_share_denied;
566 }
567 return nfs_ok;
568 }
569
__nfs4_file_put_access(struct nfs4_file * fp,int oflag)570 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
571 {
572 might_lock(&fp->fi_lock);
573
574 if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
575 struct file *f1 = NULL;
576 struct file *f2 = NULL;
577
578 swap(f1, fp->fi_fds[oflag]);
579 if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
580 swap(f2, fp->fi_fds[O_RDWR]);
581 spin_unlock(&fp->fi_lock);
582 if (f1)
583 fput(f1);
584 if (f2)
585 fput(f2);
586 }
587 }
588
nfs4_file_put_access(struct nfs4_file * fp,u32 access)589 static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
590 {
591 WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
592
593 if (access & NFS4_SHARE_ACCESS_WRITE)
594 __nfs4_file_put_access(fp, O_WRONLY);
595 if (access & NFS4_SHARE_ACCESS_READ)
596 __nfs4_file_put_access(fp, O_RDONLY);
597 }
598
599 /*
600 * Allocate a new open/delegation state counter. This is needed for
601 * pNFS for proper return on close semantics.
602 *
603 * Note that we only allocate it for pNFS-enabled exports, otherwise
604 * all pointers to struct nfs4_clnt_odstate are always NULL.
605 */
606 static struct nfs4_clnt_odstate *
alloc_clnt_odstate(struct nfs4_client * clp)607 alloc_clnt_odstate(struct nfs4_client *clp)
608 {
609 struct nfs4_clnt_odstate *co;
610
611 co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
612 if (co) {
613 co->co_client = clp;
614 refcount_set(&co->co_odcount, 1);
615 }
616 return co;
617 }
618
619 static void
hash_clnt_odstate_locked(struct nfs4_clnt_odstate * co)620 hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
621 {
622 struct nfs4_file *fp = co->co_file;
623
624 lockdep_assert_held(&fp->fi_lock);
625 list_add(&co->co_perfile, &fp->fi_clnt_odstate);
626 }
627
628 static inline void
get_clnt_odstate(struct nfs4_clnt_odstate * co)629 get_clnt_odstate(struct nfs4_clnt_odstate *co)
630 {
631 if (co)
632 refcount_inc(&co->co_odcount);
633 }
634
635 static void
put_clnt_odstate(struct nfs4_clnt_odstate * co)636 put_clnt_odstate(struct nfs4_clnt_odstate *co)
637 {
638 struct nfs4_file *fp;
639
640 if (!co)
641 return;
642
643 fp = co->co_file;
644 if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
645 list_del(&co->co_perfile);
646 spin_unlock(&fp->fi_lock);
647
648 nfsd4_return_all_file_layouts(co->co_client, fp);
649 kmem_cache_free(odstate_slab, co);
650 }
651 }
652
653 static struct nfs4_clnt_odstate *
find_or_hash_clnt_odstate(struct nfs4_file * fp,struct nfs4_clnt_odstate * new)654 find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
655 {
656 struct nfs4_clnt_odstate *co;
657 struct nfs4_client *cl;
658
659 if (!new)
660 return NULL;
661
662 cl = new->co_client;
663
664 spin_lock(&fp->fi_lock);
665 list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
666 if (co->co_client == cl) {
667 get_clnt_odstate(co);
668 goto out;
669 }
670 }
671 co = new;
672 co->co_file = fp;
673 hash_clnt_odstate_locked(new);
674 out:
675 spin_unlock(&fp->fi_lock);
676 return co;
677 }
678
nfs4_alloc_stid(struct nfs4_client * cl,struct kmem_cache * slab,void (* sc_free)(struct nfs4_stid *))679 struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
680 void (*sc_free)(struct nfs4_stid *))
681 {
682 struct nfs4_stid *stid;
683 int new_id;
684
685 stid = kmem_cache_zalloc(slab, GFP_KERNEL);
686 if (!stid)
687 return NULL;
688
689 idr_preload(GFP_KERNEL);
690 spin_lock(&cl->cl_lock);
691 new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 0, 0, GFP_NOWAIT);
692 spin_unlock(&cl->cl_lock);
693 idr_preload_end();
694 if (new_id < 0)
695 goto out_free;
696
697 stid->sc_free = sc_free;
698 stid->sc_client = cl;
699 stid->sc_stateid.si_opaque.so_id = new_id;
700 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
701 /* Will be incremented before return to client: */
702 refcount_set(&stid->sc_count, 1);
703 spin_lock_init(&stid->sc_lock);
704
705 /*
706 * It shouldn't be a problem to reuse an opaque stateid value.
707 * I don't think it is for 4.1. But with 4.0 I worry that, for
708 * example, a stray write retransmission could be accepted by
709 * the server when it should have been rejected. Therefore,
710 * adopt a trick from the sctp code to attempt to maximize the
711 * amount of time until an id is reused, by ensuring they always
712 * "increase" (mod INT_MAX):
713 */
714 return stid;
715 out_free:
716 kmem_cache_free(slab, stid);
717 return NULL;
718 }
719
nfs4_alloc_open_stateid(struct nfs4_client * clp)720 static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
721 {
722 struct nfs4_stid *stid;
723
724 stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
725 if (!stid)
726 return NULL;
727
728 return openlockstateid(stid);
729 }
730
nfs4_free_deleg(struct nfs4_stid * stid)731 static void nfs4_free_deleg(struct nfs4_stid *stid)
732 {
733 kmem_cache_free(deleg_slab, stid);
734 atomic_long_dec(&num_delegations);
735 }
736
737 /*
738 * When we recall a delegation, we should be careful not to hand it
739 * out again straight away.
740 * To ensure this we keep a pair of bloom filters ('new' and 'old')
741 * in which the filehandles of recalled delegations are "stored".
742 * If a filehandle appear in either filter, a delegation is blocked.
743 * When a delegation is recalled, the filehandle is stored in the "new"
744 * filter.
745 * Every 30 seconds we swap the filters and clear the "new" one,
746 * unless both are empty of course.
747 *
748 * Each filter is 256 bits. We hash the filehandle to 32bit and use the
749 * low 3 bytes as hash-table indices.
750 *
751 * 'blocked_delegations_lock', which is always taken in block_delegations(),
752 * is used to manage concurrent access. Testing does not need the lock
753 * except when swapping the two filters.
754 */
755 static DEFINE_SPINLOCK(blocked_delegations_lock);
756 static struct bloom_pair {
757 int entries, old_entries;
758 time_t swap_time;
759 int new; /* index into 'set' */
760 DECLARE_BITMAP(set[2], 256);
761 } blocked_delegations;
762
delegation_blocked(struct knfsd_fh * fh)763 static int delegation_blocked(struct knfsd_fh *fh)
764 {
765 u32 hash;
766 struct bloom_pair *bd = &blocked_delegations;
767
768 if (bd->entries == 0)
769 return 0;
770 if (seconds_since_boot() - bd->swap_time > 30) {
771 spin_lock(&blocked_delegations_lock);
772 if (seconds_since_boot() - bd->swap_time > 30) {
773 bd->entries -= bd->old_entries;
774 bd->old_entries = bd->entries;
775 memset(bd->set[bd->new], 0,
776 sizeof(bd->set[0]));
777 bd->new = 1-bd->new;
778 bd->swap_time = seconds_since_boot();
779 }
780 spin_unlock(&blocked_delegations_lock);
781 }
782 hash = jhash(&fh->fh_base, fh->fh_size, 0);
783 if (test_bit(hash&255, bd->set[0]) &&
784 test_bit((hash>>8)&255, bd->set[0]) &&
785 test_bit((hash>>16)&255, bd->set[0]))
786 return 1;
787
788 if (test_bit(hash&255, bd->set[1]) &&
789 test_bit((hash>>8)&255, bd->set[1]) &&
790 test_bit((hash>>16)&255, bd->set[1]))
791 return 1;
792
793 return 0;
794 }
795
block_delegations(struct knfsd_fh * fh)796 static void block_delegations(struct knfsd_fh *fh)
797 {
798 u32 hash;
799 struct bloom_pair *bd = &blocked_delegations;
800
801 hash = jhash(&fh->fh_base, fh->fh_size, 0);
802
803 spin_lock(&blocked_delegations_lock);
804 __set_bit(hash&255, bd->set[bd->new]);
805 __set_bit((hash>>8)&255, bd->set[bd->new]);
806 __set_bit((hash>>16)&255, bd->set[bd->new]);
807 if (bd->entries == 0)
808 bd->swap_time = seconds_since_boot();
809 bd->entries += 1;
810 spin_unlock(&blocked_delegations_lock);
811 }
812
813 static struct nfs4_delegation *
alloc_init_deleg(struct nfs4_client * clp,struct nfs4_file * fp,struct svc_fh * current_fh,struct nfs4_clnt_odstate * odstate)814 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
815 struct svc_fh *current_fh,
816 struct nfs4_clnt_odstate *odstate)
817 {
818 struct nfs4_delegation *dp;
819 long n;
820
821 dprintk("NFSD alloc_init_deleg\n");
822 n = atomic_long_inc_return(&num_delegations);
823 if (n < 0 || n > max_delegations)
824 goto out_dec;
825 if (delegation_blocked(¤t_fh->fh_handle))
826 goto out_dec;
827 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
828 if (dp == NULL)
829 goto out_dec;
830
831 /*
832 * delegation seqid's are never incremented. The 4.1 special
833 * meaning of seqid 0 isn't meaningful, really, but let's avoid
834 * 0 anyway just for consistency and use 1:
835 */
836 dp->dl_stid.sc_stateid.si_generation = 1;
837 INIT_LIST_HEAD(&dp->dl_perfile);
838 INIT_LIST_HEAD(&dp->dl_perclnt);
839 INIT_LIST_HEAD(&dp->dl_recall_lru);
840 dp->dl_clnt_odstate = odstate;
841 get_clnt_odstate(odstate);
842 dp->dl_type = NFS4_OPEN_DELEGATE_READ;
843 dp->dl_retries = 1;
844 nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
845 &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
846 get_nfs4_file(fp);
847 dp->dl_stid.sc_file = fp;
848 return dp;
849 out_dec:
850 atomic_long_dec(&num_delegations);
851 return NULL;
852 }
853
854 void
nfs4_put_stid(struct nfs4_stid * s)855 nfs4_put_stid(struct nfs4_stid *s)
856 {
857 struct nfs4_file *fp = s->sc_file;
858 struct nfs4_client *clp = s->sc_client;
859
860 might_lock(&clp->cl_lock);
861
862 if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
863 wake_up_all(&close_wq);
864 return;
865 }
866 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
867 spin_unlock(&clp->cl_lock);
868 s->sc_free(s);
869 if (fp)
870 put_nfs4_file(fp);
871 }
872
873 void
nfs4_inc_and_copy_stateid(stateid_t * dst,struct nfs4_stid * stid)874 nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
875 {
876 stateid_t *src = &stid->sc_stateid;
877
878 spin_lock(&stid->sc_lock);
879 if (unlikely(++src->si_generation == 0))
880 src->si_generation = 1;
881 memcpy(dst, src, sizeof(*dst));
882 spin_unlock(&stid->sc_lock);
883 }
884
put_deleg_file(struct nfs4_file * fp)885 static void put_deleg_file(struct nfs4_file *fp)
886 {
887 struct file *filp = NULL;
888
889 spin_lock(&fp->fi_lock);
890 if (--fp->fi_delegees == 0)
891 swap(filp, fp->fi_deleg_file);
892 spin_unlock(&fp->fi_lock);
893
894 if (filp)
895 fput(filp);
896 }
897
nfs4_unlock_deleg_lease(struct nfs4_delegation * dp)898 static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp)
899 {
900 struct nfs4_file *fp = dp->dl_stid.sc_file;
901 struct file *filp = fp->fi_deleg_file;
902
903 WARN_ON_ONCE(!fp->fi_delegees);
904
905 vfs_setlease(filp, F_UNLCK, NULL, (void **)&dp);
906 put_deleg_file(fp);
907 }
908
destroy_unhashed_deleg(struct nfs4_delegation * dp)909 static void destroy_unhashed_deleg(struct nfs4_delegation *dp)
910 {
911 put_clnt_odstate(dp->dl_clnt_odstate);
912 nfs4_unlock_deleg_lease(dp);
913 nfs4_put_stid(&dp->dl_stid);
914 }
915
nfs4_unhash_stid(struct nfs4_stid * s)916 void nfs4_unhash_stid(struct nfs4_stid *s)
917 {
918 s->sc_type = 0;
919 }
920
921 /**
922 * nfs4_delegation_exists - Discover if this delegation already exists
923 * @clp: a pointer to the nfs4_client we're granting a delegation to
924 * @fp: a pointer to the nfs4_file we're granting a delegation on
925 *
926 * Return:
927 * On success: true iff an existing delegation is found
928 */
929
930 static bool
nfs4_delegation_exists(struct nfs4_client * clp,struct nfs4_file * fp)931 nfs4_delegation_exists(struct nfs4_client *clp, struct nfs4_file *fp)
932 {
933 struct nfs4_delegation *searchdp = NULL;
934 struct nfs4_client *searchclp = NULL;
935
936 lockdep_assert_held(&state_lock);
937 lockdep_assert_held(&fp->fi_lock);
938
939 list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
940 searchclp = searchdp->dl_stid.sc_client;
941 if (clp == searchclp) {
942 return true;
943 }
944 }
945 return false;
946 }
947
948 /**
949 * hash_delegation_locked - Add a delegation to the appropriate lists
950 * @dp: a pointer to the nfs4_delegation we are adding.
951 * @fp: a pointer to the nfs4_file we're granting a delegation on
952 *
953 * Return:
954 * On success: NULL if the delegation was successfully hashed.
955 *
956 * On error: -EAGAIN if one was previously granted to this
957 * nfs4_client for this nfs4_file. Delegation is not hashed.
958 *
959 */
960
961 static int
hash_delegation_locked(struct nfs4_delegation * dp,struct nfs4_file * fp)962 hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
963 {
964 struct nfs4_client *clp = dp->dl_stid.sc_client;
965
966 lockdep_assert_held(&state_lock);
967 lockdep_assert_held(&fp->fi_lock);
968
969 if (nfs4_delegation_exists(clp, fp))
970 return -EAGAIN;
971 refcount_inc(&dp->dl_stid.sc_count);
972 dp->dl_stid.sc_type = NFS4_DELEG_STID;
973 list_add(&dp->dl_perfile, &fp->fi_delegations);
974 list_add(&dp->dl_perclnt, &clp->cl_delegations);
975 return 0;
976 }
977
978 static bool
unhash_delegation_locked(struct nfs4_delegation * dp)979 unhash_delegation_locked(struct nfs4_delegation *dp)
980 {
981 struct nfs4_file *fp = dp->dl_stid.sc_file;
982
983 lockdep_assert_held(&state_lock);
984
985 if (list_empty(&dp->dl_perfile))
986 return false;
987
988 dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
989 /* Ensure that deleg break won't try to requeue it */
990 ++dp->dl_time;
991 spin_lock(&fp->fi_lock);
992 list_del_init(&dp->dl_perclnt);
993 list_del_init(&dp->dl_recall_lru);
994 list_del_init(&dp->dl_perfile);
995 spin_unlock(&fp->fi_lock);
996 return true;
997 }
998
destroy_delegation(struct nfs4_delegation * dp)999 static void destroy_delegation(struct nfs4_delegation *dp)
1000 {
1001 bool unhashed;
1002
1003 spin_lock(&state_lock);
1004 unhashed = unhash_delegation_locked(dp);
1005 spin_unlock(&state_lock);
1006 if (unhashed)
1007 destroy_unhashed_deleg(dp);
1008 }
1009
revoke_delegation(struct nfs4_delegation * dp)1010 static void revoke_delegation(struct nfs4_delegation *dp)
1011 {
1012 struct nfs4_client *clp = dp->dl_stid.sc_client;
1013
1014 WARN_ON(!list_empty(&dp->dl_recall_lru));
1015
1016 if (clp->cl_minorversion) {
1017 dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
1018 refcount_inc(&dp->dl_stid.sc_count);
1019 spin_lock(&clp->cl_lock);
1020 list_add(&dp->dl_recall_lru, &clp->cl_revoked);
1021 spin_unlock(&clp->cl_lock);
1022 }
1023 destroy_unhashed_deleg(dp);
1024 }
1025
1026 /*
1027 * SETCLIENTID state
1028 */
1029
clientid_hashval(u32 id)1030 static unsigned int clientid_hashval(u32 id)
1031 {
1032 return id & CLIENT_HASH_MASK;
1033 }
1034
clientstr_hashval(const char * name)1035 static unsigned int clientstr_hashval(const char *name)
1036 {
1037 return opaque_hashval(name, 8) & CLIENT_HASH_MASK;
1038 }
1039
1040 /*
1041 * We store the NONE, READ, WRITE, and BOTH bits separately in the
1042 * st_{access,deny}_bmap field of the stateid, in order to track not
1043 * only what share bits are currently in force, but also what
1044 * combinations of share bits previous opens have used. This allows us
1045 * to enforce the recommendation of rfc 3530 14.2.19 that the server
1046 * return an error if the client attempt to downgrade to a combination
1047 * of share bits not explicable by closing some of its previous opens.
1048 *
1049 * XXX: This enforcement is actually incomplete, since we don't keep
1050 * track of access/deny bit combinations; so, e.g., we allow:
1051 *
1052 * OPEN allow read, deny write
1053 * OPEN allow both, deny none
1054 * DOWNGRADE allow read, deny none
1055 *
1056 * which we should reject.
1057 */
1058 static unsigned int
bmap_to_share_mode(unsigned long bmap)1059 bmap_to_share_mode(unsigned long bmap) {
1060 int i;
1061 unsigned int access = 0;
1062
1063 for (i = 1; i < 4; i++) {
1064 if (test_bit(i, &bmap))
1065 access |= i;
1066 }
1067 return access;
1068 }
1069
1070 /* set share access for a given stateid */
1071 static inline void
set_access(u32 access,struct nfs4_ol_stateid * stp)1072 set_access(u32 access, struct nfs4_ol_stateid *stp)
1073 {
1074 unsigned char mask = 1 << access;
1075
1076 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
1077 stp->st_access_bmap |= mask;
1078 }
1079
1080 /* clear share access for a given stateid */
1081 static inline void
clear_access(u32 access,struct nfs4_ol_stateid * stp)1082 clear_access(u32 access, struct nfs4_ol_stateid *stp)
1083 {
1084 unsigned char mask = 1 << access;
1085
1086 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
1087 stp->st_access_bmap &= ~mask;
1088 }
1089
1090 /* test whether a given stateid has access */
1091 static inline bool
test_access(u32 access,struct nfs4_ol_stateid * stp)1092 test_access(u32 access, struct nfs4_ol_stateid *stp)
1093 {
1094 unsigned char mask = 1 << access;
1095
1096 return (bool)(stp->st_access_bmap & mask);
1097 }
1098
1099 /* set share deny for a given stateid */
1100 static inline void
set_deny(u32 deny,struct nfs4_ol_stateid * stp)1101 set_deny(u32 deny, struct nfs4_ol_stateid *stp)
1102 {
1103 unsigned char mask = 1 << deny;
1104
1105 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
1106 stp->st_deny_bmap |= mask;
1107 }
1108
1109 /* clear share deny for a given stateid */
1110 static inline void
clear_deny(u32 deny,struct nfs4_ol_stateid * stp)1111 clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
1112 {
1113 unsigned char mask = 1 << deny;
1114
1115 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
1116 stp->st_deny_bmap &= ~mask;
1117 }
1118
1119 /* test whether a given stateid is denying specific access */
1120 static inline bool
test_deny(u32 deny,struct nfs4_ol_stateid * stp)1121 test_deny(u32 deny, struct nfs4_ol_stateid *stp)
1122 {
1123 unsigned char mask = 1 << deny;
1124
1125 return (bool)(stp->st_deny_bmap & mask);
1126 }
1127
nfs4_access_to_omode(u32 access)1128 static int nfs4_access_to_omode(u32 access)
1129 {
1130 switch (access & NFS4_SHARE_ACCESS_BOTH) {
1131 case NFS4_SHARE_ACCESS_READ:
1132 return O_RDONLY;
1133 case NFS4_SHARE_ACCESS_WRITE:
1134 return O_WRONLY;
1135 case NFS4_SHARE_ACCESS_BOTH:
1136 return O_RDWR;
1137 }
1138 WARN_ON_ONCE(1);
1139 return O_RDONLY;
1140 }
1141
1142 /*
1143 * A stateid that had a deny mode associated with it is being released
1144 * or downgraded. Recalculate the deny mode on the file.
1145 */
1146 static void
recalculate_deny_mode(struct nfs4_file * fp)1147 recalculate_deny_mode(struct nfs4_file *fp)
1148 {
1149 struct nfs4_ol_stateid *stp;
1150
1151 spin_lock(&fp->fi_lock);
1152 fp->fi_share_deny = 0;
1153 list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
1154 fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
1155 spin_unlock(&fp->fi_lock);
1156 }
1157
1158 static void
reset_union_bmap_deny(u32 deny,struct nfs4_ol_stateid * stp)1159 reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
1160 {
1161 int i;
1162 bool change = false;
1163
1164 for (i = 1; i < 4; i++) {
1165 if ((i & deny) != i) {
1166 change = true;
1167 clear_deny(i, stp);
1168 }
1169 }
1170
1171 /* Recalculate per-file deny mode if there was a change */
1172 if (change)
1173 recalculate_deny_mode(stp->st_stid.sc_file);
1174 }
1175
1176 /* release all access and file references for a given stateid */
1177 static void
release_all_access(struct nfs4_ol_stateid * stp)1178 release_all_access(struct nfs4_ol_stateid *stp)
1179 {
1180 int i;
1181 struct nfs4_file *fp = stp->st_stid.sc_file;
1182
1183 if (fp && stp->st_deny_bmap != 0)
1184 recalculate_deny_mode(fp);
1185
1186 for (i = 1; i < 4; i++) {
1187 if (test_access(i, stp))
1188 nfs4_file_put_access(stp->st_stid.sc_file, i);
1189 clear_access(i, stp);
1190 }
1191 }
1192
nfs4_free_stateowner(struct nfs4_stateowner * sop)1193 static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop)
1194 {
1195 kfree(sop->so_owner.data);
1196 sop->so_ops->so_free(sop);
1197 }
1198
nfs4_put_stateowner(struct nfs4_stateowner * sop)1199 static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
1200 {
1201 struct nfs4_client *clp = sop->so_client;
1202
1203 might_lock(&clp->cl_lock);
1204
1205 if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
1206 return;
1207 sop->so_ops->so_unhash(sop);
1208 spin_unlock(&clp->cl_lock);
1209 nfs4_free_stateowner(sop);
1210 }
1211
1212 static bool
nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid * stp)1213 nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp)
1214 {
1215 return list_empty(&stp->st_perfile);
1216 }
1217
unhash_ol_stateid(struct nfs4_ol_stateid * stp)1218 static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
1219 {
1220 struct nfs4_file *fp = stp->st_stid.sc_file;
1221
1222 lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
1223
1224 if (list_empty(&stp->st_perfile))
1225 return false;
1226
1227 spin_lock(&fp->fi_lock);
1228 list_del_init(&stp->st_perfile);
1229 spin_unlock(&fp->fi_lock);
1230 list_del(&stp->st_perstateowner);
1231 return true;
1232 }
1233
nfs4_free_ol_stateid(struct nfs4_stid * stid)1234 static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
1235 {
1236 struct nfs4_ol_stateid *stp = openlockstateid(stid);
1237
1238 put_clnt_odstate(stp->st_clnt_odstate);
1239 release_all_access(stp);
1240 if (stp->st_stateowner)
1241 nfs4_put_stateowner(stp->st_stateowner);
1242 kmem_cache_free(stateid_slab, stid);
1243 }
1244
nfs4_free_lock_stateid(struct nfs4_stid * stid)1245 static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
1246 {
1247 struct nfs4_ol_stateid *stp = openlockstateid(stid);
1248 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
1249 struct file *file;
1250
1251 file = find_any_file(stp->st_stid.sc_file);
1252 if (file)
1253 filp_close(file, (fl_owner_t)lo);
1254 nfs4_free_ol_stateid(stid);
1255 }
1256
1257 /*
1258 * Put the persistent reference to an already unhashed generic stateid, while
1259 * holding the cl_lock. If it's the last reference, then put it onto the
1260 * reaplist for later destruction.
1261 */
put_ol_stateid_locked(struct nfs4_ol_stateid * stp,struct list_head * reaplist)1262 static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
1263 struct list_head *reaplist)
1264 {
1265 struct nfs4_stid *s = &stp->st_stid;
1266 struct nfs4_client *clp = s->sc_client;
1267
1268 lockdep_assert_held(&clp->cl_lock);
1269
1270 WARN_ON_ONCE(!list_empty(&stp->st_locks));
1271
1272 if (!refcount_dec_and_test(&s->sc_count)) {
1273 wake_up_all(&close_wq);
1274 return;
1275 }
1276
1277 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1278 list_add(&stp->st_locks, reaplist);
1279 }
1280
unhash_lock_stateid(struct nfs4_ol_stateid * stp)1281 static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
1282 {
1283 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1284
1285 if (!unhash_ol_stateid(stp))
1286 return false;
1287 list_del_init(&stp->st_locks);
1288 nfs4_unhash_stid(&stp->st_stid);
1289 return true;
1290 }
1291
release_lock_stateid(struct nfs4_ol_stateid * stp)1292 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
1293 {
1294 struct nfs4_client *clp = stp->st_stid.sc_client;
1295 bool unhashed;
1296
1297 spin_lock(&clp->cl_lock);
1298 unhashed = unhash_lock_stateid(stp);
1299 spin_unlock(&clp->cl_lock);
1300 if (unhashed)
1301 nfs4_put_stid(&stp->st_stid);
1302 }
1303
unhash_lockowner_locked(struct nfs4_lockowner * lo)1304 static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
1305 {
1306 struct nfs4_client *clp = lo->lo_owner.so_client;
1307
1308 lockdep_assert_held(&clp->cl_lock);
1309
1310 list_del_init(&lo->lo_owner.so_strhash);
1311 }
1312
1313 /*
1314 * Free a list of generic stateids that were collected earlier after being
1315 * fully unhashed.
1316 */
1317 static void
free_ol_stateid_reaplist(struct list_head * reaplist)1318 free_ol_stateid_reaplist(struct list_head *reaplist)
1319 {
1320 struct nfs4_ol_stateid *stp;
1321 struct nfs4_file *fp;
1322
1323 might_sleep();
1324
1325 while (!list_empty(reaplist)) {
1326 stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
1327 st_locks);
1328 list_del(&stp->st_locks);
1329 fp = stp->st_stid.sc_file;
1330 stp->st_stid.sc_free(&stp->st_stid);
1331 if (fp)
1332 put_nfs4_file(fp);
1333 }
1334 }
1335
release_open_stateid_locks(struct nfs4_ol_stateid * open_stp,struct list_head * reaplist)1336 static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
1337 struct list_head *reaplist)
1338 {
1339 struct nfs4_ol_stateid *stp;
1340
1341 lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
1342
1343 while (!list_empty(&open_stp->st_locks)) {
1344 stp = list_entry(open_stp->st_locks.next,
1345 struct nfs4_ol_stateid, st_locks);
1346 WARN_ON(!unhash_lock_stateid(stp));
1347 put_ol_stateid_locked(stp, reaplist);
1348 }
1349 }
1350
unhash_open_stateid(struct nfs4_ol_stateid * stp,struct list_head * reaplist)1351 static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
1352 struct list_head *reaplist)
1353 {
1354 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1355
1356 if (!unhash_ol_stateid(stp))
1357 return false;
1358 release_open_stateid_locks(stp, reaplist);
1359 return true;
1360 }
1361
release_open_stateid(struct nfs4_ol_stateid * stp)1362 static void release_open_stateid(struct nfs4_ol_stateid *stp)
1363 {
1364 LIST_HEAD(reaplist);
1365
1366 spin_lock(&stp->st_stid.sc_client->cl_lock);
1367 if (unhash_open_stateid(stp, &reaplist))
1368 put_ol_stateid_locked(stp, &reaplist);
1369 spin_unlock(&stp->st_stid.sc_client->cl_lock);
1370 free_ol_stateid_reaplist(&reaplist);
1371 }
1372
unhash_openowner_locked(struct nfs4_openowner * oo)1373 static void unhash_openowner_locked(struct nfs4_openowner *oo)
1374 {
1375 struct nfs4_client *clp = oo->oo_owner.so_client;
1376
1377 lockdep_assert_held(&clp->cl_lock);
1378
1379 list_del_init(&oo->oo_owner.so_strhash);
1380 list_del_init(&oo->oo_perclient);
1381 }
1382
release_last_closed_stateid(struct nfs4_openowner * oo)1383 static void release_last_closed_stateid(struct nfs4_openowner *oo)
1384 {
1385 struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
1386 nfsd_net_id);
1387 struct nfs4_ol_stateid *s;
1388
1389 spin_lock(&nn->client_lock);
1390 s = oo->oo_last_closed_stid;
1391 if (s) {
1392 list_del_init(&oo->oo_close_lru);
1393 oo->oo_last_closed_stid = NULL;
1394 }
1395 spin_unlock(&nn->client_lock);
1396 if (s)
1397 nfs4_put_stid(&s->st_stid);
1398 }
1399
release_openowner(struct nfs4_openowner * oo)1400 static void release_openowner(struct nfs4_openowner *oo)
1401 {
1402 struct nfs4_ol_stateid *stp;
1403 struct nfs4_client *clp = oo->oo_owner.so_client;
1404 struct list_head reaplist;
1405
1406 INIT_LIST_HEAD(&reaplist);
1407
1408 spin_lock(&clp->cl_lock);
1409 unhash_openowner_locked(oo);
1410 while (!list_empty(&oo->oo_owner.so_stateids)) {
1411 stp = list_first_entry(&oo->oo_owner.so_stateids,
1412 struct nfs4_ol_stateid, st_perstateowner);
1413 if (unhash_open_stateid(stp, &reaplist))
1414 put_ol_stateid_locked(stp, &reaplist);
1415 }
1416 spin_unlock(&clp->cl_lock);
1417 free_ol_stateid_reaplist(&reaplist);
1418 release_last_closed_stateid(oo);
1419 nfs4_put_stateowner(&oo->oo_owner);
1420 }
1421
1422 static inline int
hash_sessionid(struct nfs4_sessionid * sessionid)1423 hash_sessionid(struct nfs4_sessionid *sessionid)
1424 {
1425 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
1426
1427 return sid->sequence % SESSION_HASH_SIZE;
1428 }
1429
1430 #ifdef CONFIG_SUNRPC_DEBUG
1431 static inline void
dump_sessionid(const char * fn,struct nfs4_sessionid * sessionid)1432 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1433 {
1434 u32 *ptr = (u32 *)(&sessionid->data[0]);
1435 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
1436 }
1437 #else
1438 static inline void
dump_sessionid(const char * fn,struct nfs4_sessionid * sessionid)1439 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1440 {
1441 }
1442 #endif
1443
1444 /*
1445 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1446 * won't be used for replay.
1447 */
nfsd4_bump_seqid(struct nfsd4_compound_state * cstate,__be32 nfserr)1448 void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
1449 {
1450 struct nfs4_stateowner *so = cstate->replay_owner;
1451
1452 if (nfserr == nfserr_replay_me)
1453 return;
1454
1455 if (!seqid_mutating_err(ntohl(nfserr))) {
1456 nfsd4_cstate_clear_replay(cstate);
1457 return;
1458 }
1459 if (!so)
1460 return;
1461 if (so->so_is_open_owner)
1462 release_last_closed_stateid(openowner(so));
1463 so->so_seqid++;
1464 return;
1465 }
1466
1467 static void
gen_sessionid(struct nfsd4_session * ses)1468 gen_sessionid(struct nfsd4_session *ses)
1469 {
1470 struct nfs4_client *clp = ses->se_client;
1471 struct nfsd4_sessionid *sid;
1472
1473 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
1474 sid->clientid = clp->cl_clientid;
1475 sid->sequence = current_sessionid++;
1476 sid->reserved = 0;
1477 }
1478
1479 /*
1480 * The protocol defines ca_maxresponssize_cached to include the size of
1481 * the rpc header, but all we need to cache is the data starting after
1482 * the end of the initial SEQUENCE operation--the rest we regenerate
1483 * each time. Therefore we can advertise a ca_maxresponssize_cached
1484 * value that is the number of bytes in our cache plus a few additional
1485 * bytes. In order to stay on the safe side, and not promise more than
1486 * we can cache, those additional bytes must be the minimum possible: 24
1487 * bytes of rpc header (xid through accept state, with AUTH_NULL
1488 * verifier), 12 for the compound header (with zero-length tag), and 44
1489 * for the SEQUENCE op response:
1490 */
1491 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
1492
1493 static void
free_session_slots(struct nfsd4_session * ses)1494 free_session_slots(struct nfsd4_session *ses)
1495 {
1496 int i;
1497
1498 for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
1499 free_svc_cred(&ses->se_slots[i]->sl_cred);
1500 kfree(ses->se_slots[i]);
1501 }
1502 }
1503
1504 /*
1505 * We don't actually need to cache the rpc and session headers, so we
1506 * can allocate a little less for each slot:
1507 */
slot_bytes(struct nfsd4_channel_attrs * ca)1508 static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
1509 {
1510 u32 size;
1511
1512 if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
1513 size = 0;
1514 else
1515 size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
1516 return size + sizeof(struct nfsd4_slot);
1517 }
1518
1519 /*
1520 * XXX: If we run out of reserved DRC memory we could (up to a point)
1521 * re-negotiate active sessions and reduce their slot usage to make
1522 * room for new connections. For now we just fail the create session.
1523 */
nfsd4_get_drc_mem(struct nfsd4_channel_attrs * ca)1524 static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca)
1525 {
1526 u32 slotsize = slot_bytes(ca);
1527 u32 num = ca->maxreqs;
1528 unsigned long avail, total_avail;
1529
1530 spin_lock(&nfsd_drc_lock);
1531 total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used;
1532 avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail);
1533 /*
1534 * Never use more than a third of the remaining memory,
1535 * unless it's the only way to give this client a slot:
1536 */
1537 avail = clamp_t(unsigned long, avail, slotsize, total_avail/3);
1538 num = min_t(int, num, avail / slotsize);
1539 nfsd_drc_mem_used += num * slotsize;
1540 spin_unlock(&nfsd_drc_lock);
1541
1542 return num;
1543 }
1544
nfsd4_put_drc_mem(struct nfsd4_channel_attrs * ca)1545 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
1546 {
1547 int slotsize = slot_bytes(ca);
1548
1549 spin_lock(&nfsd_drc_lock);
1550 nfsd_drc_mem_used -= slotsize * ca->maxreqs;
1551 spin_unlock(&nfsd_drc_lock);
1552 }
1553
alloc_session(struct nfsd4_channel_attrs * fattrs,struct nfsd4_channel_attrs * battrs)1554 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
1555 struct nfsd4_channel_attrs *battrs)
1556 {
1557 int numslots = fattrs->maxreqs;
1558 int slotsize = slot_bytes(fattrs);
1559 struct nfsd4_session *new;
1560 int mem, i;
1561
1562 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
1563 + sizeof(struct nfsd4_session) > PAGE_SIZE);
1564 mem = numslots * sizeof(struct nfsd4_slot *);
1565
1566 new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
1567 if (!new)
1568 return NULL;
1569 /* allocate each struct nfsd4_slot and data cache in one piece */
1570 for (i = 0; i < numslots; i++) {
1571 new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
1572 if (!new->se_slots[i])
1573 goto out_free;
1574 }
1575
1576 memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
1577 memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
1578
1579 return new;
1580 out_free:
1581 while (i--)
1582 kfree(new->se_slots[i]);
1583 kfree(new);
1584 return NULL;
1585 }
1586
free_conn(struct nfsd4_conn * c)1587 static void free_conn(struct nfsd4_conn *c)
1588 {
1589 svc_xprt_put(c->cn_xprt);
1590 kfree(c);
1591 }
1592
nfsd4_conn_lost(struct svc_xpt_user * u)1593 static void nfsd4_conn_lost(struct svc_xpt_user *u)
1594 {
1595 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
1596 struct nfs4_client *clp = c->cn_session->se_client;
1597
1598 spin_lock(&clp->cl_lock);
1599 if (!list_empty(&c->cn_persession)) {
1600 list_del(&c->cn_persession);
1601 free_conn(c);
1602 }
1603 nfsd4_probe_callback(clp);
1604 spin_unlock(&clp->cl_lock);
1605 }
1606
alloc_conn(struct svc_rqst * rqstp,u32 flags)1607 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
1608 {
1609 struct nfsd4_conn *conn;
1610
1611 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
1612 if (!conn)
1613 return NULL;
1614 svc_xprt_get(rqstp->rq_xprt);
1615 conn->cn_xprt = rqstp->rq_xprt;
1616 conn->cn_flags = flags;
1617 INIT_LIST_HEAD(&conn->cn_xpt_user.list);
1618 return conn;
1619 }
1620
__nfsd4_hash_conn(struct nfsd4_conn * conn,struct nfsd4_session * ses)1621 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1622 {
1623 conn->cn_session = ses;
1624 list_add(&conn->cn_persession, &ses->se_conns);
1625 }
1626
nfsd4_hash_conn(struct nfsd4_conn * conn,struct nfsd4_session * ses)1627 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1628 {
1629 struct nfs4_client *clp = ses->se_client;
1630
1631 spin_lock(&clp->cl_lock);
1632 __nfsd4_hash_conn(conn, ses);
1633 spin_unlock(&clp->cl_lock);
1634 }
1635
nfsd4_register_conn(struct nfsd4_conn * conn)1636 static int nfsd4_register_conn(struct nfsd4_conn *conn)
1637 {
1638 conn->cn_xpt_user.callback = nfsd4_conn_lost;
1639 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
1640 }
1641
nfsd4_init_conn(struct svc_rqst * rqstp,struct nfsd4_conn * conn,struct nfsd4_session * ses)1642 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
1643 {
1644 int ret;
1645
1646 nfsd4_hash_conn(conn, ses);
1647 ret = nfsd4_register_conn(conn);
1648 if (ret)
1649 /* oops; xprt is already down: */
1650 nfsd4_conn_lost(&conn->cn_xpt_user);
1651 /* We may have gained or lost a callback channel: */
1652 nfsd4_probe_callback_sync(ses->se_client);
1653 }
1654
alloc_conn_from_crses(struct svc_rqst * rqstp,struct nfsd4_create_session * cses)1655 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
1656 {
1657 u32 dir = NFS4_CDFC4_FORE;
1658
1659 if (cses->flags & SESSION4_BACK_CHAN)
1660 dir |= NFS4_CDFC4_BACK;
1661 return alloc_conn(rqstp, dir);
1662 }
1663
1664 /* must be called under client_lock */
nfsd4_del_conns(struct nfsd4_session * s)1665 static void nfsd4_del_conns(struct nfsd4_session *s)
1666 {
1667 struct nfs4_client *clp = s->se_client;
1668 struct nfsd4_conn *c;
1669
1670 spin_lock(&clp->cl_lock);
1671 while (!list_empty(&s->se_conns)) {
1672 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
1673 list_del_init(&c->cn_persession);
1674 spin_unlock(&clp->cl_lock);
1675
1676 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
1677 free_conn(c);
1678
1679 spin_lock(&clp->cl_lock);
1680 }
1681 spin_unlock(&clp->cl_lock);
1682 }
1683
__free_session(struct nfsd4_session * ses)1684 static void __free_session(struct nfsd4_session *ses)
1685 {
1686 free_session_slots(ses);
1687 kfree(ses);
1688 }
1689
free_session(struct nfsd4_session * ses)1690 static void free_session(struct nfsd4_session *ses)
1691 {
1692 nfsd4_del_conns(ses);
1693 nfsd4_put_drc_mem(&ses->se_fchannel);
1694 __free_session(ses);
1695 }
1696
init_session(struct svc_rqst * rqstp,struct nfsd4_session * new,struct nfs4_client * clp,struct nfsd4_create_session * cses)1697 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
1698 {
1699 int idx;
1700 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1701
1702 new->se_client = clp;
1703 gen_sessionid(new);
1704
1705 INIT_LIST_HEAD(&new->se_conns);
1706
1707 new->se_cb_seq_nr = 1;
1708 new->se_flags = cses->flags;
1709 new->se_cb_prog = cses->callback_prog;
1710 new->se_cb_sec = cses->cb_sec;
1711 atomic_set(&new->se_ref, 0);
1712 idx = hash_sessionid(&new->se_sessionid);
1713 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
1714 spin_lock(&clp->cl_lock);
1715 list_add(&new->se_perclnt, &clp->cl_sessions);
1716 spin_unlock(&clp->cl_lock);
1717
1718 {
1719 struct sockaddr *sa = svc_addr(rqstp);
1720 /*
1721 * This is a little silly; with sessions there's no real
1722 * use for the callback address. Use the peer address
1723 * as a reasonable default for now, but consider fixing
1724 * the rpc client not to require an address in the
1725 * future:
1726 */
1727 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
1728 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
1729 }
1730 }
1731
1732 /* caller must hold client_lock */
1733 static struct nfsd4_session *
__find_in_sessionid_hashtbl(struct nfs4_sessionid * sessionid,struct net * net)1734 __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
1735 {
1736 struct nfsd4_session *elem;
1737 int idx;
1738 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1739
1740 lockdep_assert_held(&nn->client_lock);
1741
1742 dump_sessionid(__func__, sessionid);
1743 idx = hash_sessionid(sessionid);
1744 /* Search in the appropriate list */
1745 list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
1746 if (!memcmp(elem->se_sessionid.data, sessionid->data,
1747 NFS4_MAX_SESSIONID_LEN)) {
1748 return elem;
1749 }
1750 }
1751
1752 dprintk("%s: session not found\n", __func__);
1753 return NULL;
1754 }
1755
1756 static struct nfsd4_session *
find_in_sessionid_hashtbl(struct nfs4_sessionid * sessionid,struct net * net,__be32 * ret)1757 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
1758 __be32 *ret)
1759 {
1760 struct nfsd4_session *session;
1761 __be32 status = nfserr_badsession;
1762
1763 session = __find_in_sessionid_hashtbl(sessionid, net);
1764 if (!session)
1765 goto out;
1766 status = nfsd4_get_session_locked(session);
1767 if (status)
1768 session = NULL;
1769 out:
1770 *ret = status;
1771 return session;
1772 }
1773
1774 /* caller must hold client_lock */
1775 static void
unhash_session(struct nfsd4_session * ses)1776 unhash_session(struct nfsd4_session *ses)
1777 {
1778 struct nfs4_client *clp = ses->se_client;
1779 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1780
1781 lockdep_assert_held(&nn->client_lock);
1782
1783 list_del(&ses->se_hash);
1784 spin_lock(&ses->se_client->cl_lock);
1785 list_del(&ses->se_perclnt);
1786 spin_unlock(&ses->se_client->cl_lock);
1787 }
1788
1789 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1790 static int
STALE_CLIENTID(clientid_t * clid,struct nfsd_net * nn)1791 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
1792 {
1793 /*
1794 * We're assuming the clid was not given out from a boot
1795 * precisely 2^32 (about 136 years) before this one. That seems
1796 * a safe assumption:
1797 */
1798 if (clid->cl_boot == (u32)nn->boot_time)
1799 return 0;
1800 dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
1801 clid->cl_boot, clid->cl_id, nn->boot_time);
1802 return 1;
1803 }
1804
1805 /*
1806 * XXX Should we use a slab cache ?
1807 * This type of memory management is somewhat inefficient, but we use it
1808 * anyway since SETCLIENTID is not a common operation.
1809 */
alloc_client(struct xdr_netobj name)1810 static struct nfs4_client *alloc_client(struct xdr_netobj name)
1811 {
1812 struct nfs4_client *clp;
1813 int i;
1814
1815 clp = kmem_cache_zalloc(client_slab, GFP_KERNEL);
1816 if (clp == NULL)
1817 return NULL;
1818 clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL);
1819 if (clp->cl_name.data == NULL)
1820 goto err_no_name;
1821 clp->cl_ownerstr_hashtbl = kmalloc_array(OWNER_HASH_SIZE,
1822 sizeof(struct list_head),
1823 GFP_KERNEL);
1824 if (!clp->cl_ownerstr_hashtbl)
1825 goto err_no_hashtbl;
1826 for (i = 0; i < OWNER_HASH_SIZE; i++)
1827 INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
1828 clp->cl_name.len = name.len;
1829 INIT_LIST_HEAD(&clp->cl_sessions);
1830 idr_init(&clp->cl_stateids);
1831 atomic_set(&clp->cl_refcount, 0);
1832 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
1833 INIT_LIST_HEAD(&clp->cl_idhash);
1834 INIT_LIST_HEAD(&clp->cl_openowners);
1835 INIT_LIST_HEAD(&clp->cl_delegations);
1836 INIT_LIST_HEAD(&clp->cl_lru);
1837 INIT_LIST_HEAD(&clp->cl_revoked);
1838 #ifdef CONFIG_NFSD_PNFS
1839 INIT_LIST_HEAD(&clp->cl_lo_states);
1840 #endif
1841 spin_lock_init(&clp->cl_lock);
1842 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1843 return clp;
1844 err_no_hashtbl:
1845 kfree(clp->cl_name.data);
1846 err_no_name:
1847 kmem_cache_free(client_slab, clp);
1848 return NULL;
1849 }
1850
1851 static void
free_client(struct nfs4_client * clp)1852 free_client(struct nfs4_client *clp)
1853 {
1854 while (!list_empty(&clp->cl_sessions)) {
1855 struct nfsd4_session *ses;
1856 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
1857 se_perclnt);
1858 list_del(&ses->se_perclnt);
1859 WARN_ON_ONCE(atomic_read(&ses->se_ref));
1860 free_session(ses);
1861 }
1862 rpc_destroy_wait_queue(&clp->cl_cb_waitq);
1863 free_svc_cred(&clp->cl_cred);
1864 kfree(clp->cl_ownerstr_hashtbl);
1865 kfree(clp->cl_name.data);
1866 idr_destroy(&clp->cl_stateids);
1867 kmem_cache_free(client_slab, clp);
1868 }
1869
1870 /* must be called under the client_lock */
1871 static void
unhash_client_locked(struct nfs4_client * clp)1872 unhash_client_locked(struct nfs4_client *clp)
1873 {
1874 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1875 struct nfsd4_session *ses;
1876
1877 lockdep_assert_held(&nn->client_lock);
1878
1879 /* Mark the client as expired! */
1880 clp->cl_time = 0;
1881 /* Make it invisible */
1882 if (!list_empty(&clp->cl_idhash)) {
1883 list_del_init(&clp->cl_idhash);
1884 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
1885 rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
1886 else
1887 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1888 }
1889 list_del_init(&clp->cl_lru);
1890 spin_lock(&clp->cl_lock);
1891 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
1892 list_del_init(&ses->se_hash);
1893 spin_unlock(&clp->cl_lock);
1894 }
1895
1896 static void
unhash_client(struct nfs4_client * clp)1897 unhash_client(struct nfs4_client *clp)
1898 {
1899 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1900
1901 spin_lock(&nn->client_lock);
1902 unhash_client_locked(clp);
1903 spin_unlock(&nn->client_lock);
1904 }
1905
mark_client_expired_locked(struct nfs4_client * clp)1906 static __be32 mark_client_expired_locked(struct nfs4_client *clp)
1907 {
1908 if (atomic_read(&clp->cl_refcount))
1909 return nfserr_jukebox;
1910 unhash_client_locked(clp);
1911 return nfs_ok;
1912 }
1913
1914 static void
__destroy_client(struct nfs4_client * clp)1915 __destroy_client(struct nfs4_client *clp)
1916 {
1917 int i;
1918 struct nfs4_openowner *oo;
1919 struct nfs4_delegation *dp;
1920 struct list_head reaplist;
1921
1922 INIT_LIST_HEAD(&reaplist);
1923 spin_lock(&state_lock);
1924 while (!list_empty(&clp->cl_delegations)) {
1925 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
1926 WARN_ON(!unhash_delegation_locked(dp));
1927 list_add(&dp->dl_recall_lru, &reaplist);
1928 }
1929 spin_unlock(&state_lock);
1930 while (!list_empty(&reaplist)) {
1931 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
1932 list_del_init(&dp->dl_recall_lru);
1933 destroy_unhashed_deleg(dp);
1934 }
1935 while (!list_empty(&clp->cl_revoked)) {
1936 dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
1937 list_del_init(&dp->dl_recall_lru);
1938 nfs4_put_stid(&dp->dl_stid);
1939 }
1940 while (!list_empty(&clp->cl_openowners)) {
1941 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
1942 nfs4_get_stateowner(&oo->oo_owner);
1943 release_openowner(oo);
1944 }
1945 for (i = 0; i < OWNER_HASH_SIZE; i++) {
1946 struct nfs4_stateowner *so, *tmp;
1947
1948 list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i],
1949 so_strhash) {
1950 /* Should be no openowners at this point */
1951 WARN_ON_ONCE(so->so_is_open_owner);
1952 remove_blocked_locks(lockowner(so));
1953 }
1954 }
1955 nfsd4_return_all_client_layouts(clp);
1956 nfsd4_shutdown_callback(clp);
1957 if (clp->cl_cb_conn.cb_xprt)
1958 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
1959 free_client(clp);
1960 }
1961
1962 static void
destroy_client(struct nfs4_client * clp)1963 destroy_client(struct nfs4_client *clp)
1964 {
1965 unhash_client(clp);
1966 __destroy_client(clp);
1967 }
1968
expire_client(struct nfs4_client * clp)1969 static void expire_client(struct nfs4_client *clp)
1970 {
1971 unhash_client(clp);
1972 nfsd4_client_record_remove(clp);
1973 __destroy_client(clp);
1974 }
1975
copy_verf(struct nfs4_client * target,nfs4_verifier * source)1976 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
1977 {
1978 memcpy(target->cl_verifier.data, source->data,
1979 sizeof(target->cl_verifier.data));
1980 }
1981
copy_clid(struct nfs4_client * target,struct nfs4_client * source)1982 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
1983 {
1984 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
1985 target->cl_clientid.cl_id = source->cl_clientid.cl_id;
1986 }
1987
copy_cred(struct svc_cred * target,struct svc_cred * source)1988 static int copy_cred(struct svc_cred *target, struct svc_cred *source)
1989 {
1990 target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL);
1991 target->cr_raw_principal = kstrdup(source->cr_raw_principal,
1992 GFP_KERNEL);
1993 target->cr_targ_princ = kstrdup(source->cr_targ_princ, GFP_KERNEL);
1994 if ((source->cr_principal && !target->cr_principal) ||
1995 (source->cr_raw_principal && !target->cr_raw_principal) ||
1996 (source->cr_targ_princ && !target->cr_targ_princ))
1997 return -ENOMEM;
1998
1999 target->cr_flavor = source->cr_flavor;
2000 target->cr_uid = source->cr_uid;
2001 target->cr_gid = source->cr_gid;
2002 target->cr_group_info = source->cr_group_info;
2003 get_group_info(target->cr_group_info);
2004 target->cr_gss_mech = source->cr_gss_mech;
2005 if (source->cr_gss_mech)
2006 gss_mech_get(source->cr_gss_mech);
2007 return 0;
2008 }
2009
2010 static int
compare_blob(const struct xdr_netobj * o1,const struct xdr_netobj * o2)2011 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
2012 {
2013 if (o1->len < o2->len)
2014 return -1;
2015 if (o1->len > o2->len)
2016 return 1;
2017 return memcmp(o1->data, o2->data, o1->len);
2018 }
2019
same_name(const char * n1,const char * n2)2020 static int same_name(const char *n1, const char *n2)
2021 {
2022 return 0 == memcmp(n1, n2, HEXDIR_LEN);
2023 }
2024
2025 static int
same_verf(nfs4_verifier * v1,nfs4_verifier * v2)2026 same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
2027 {
2028 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
2029 }
2030
2031 static int
same_clid(clientid_t * cl1,clientid_t * cl2)2032 same_clid(clientid_t *cl1, clientid_t *cl2)
2033 {
2034 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
2035 }
2036
groups_equal(struct group_info * g1,struct group_info * g2)2037 static bool groups_equal(struct group_info *g1, struct group_info *g2)
2038 {
2039 int i;
2040
2041 if (g1->ngroups != g2->ngroups)
2042 return false;
2043 for (i=0; i<g1->ngroups; i++)
2044 if (!gid_eq(g1->gid[i], g2->gid[i]))
2045 return false;
2046 return true;
2047 }
2048
2049 /*
2050 * RFC 3530 language requires clid_inuse be returned when the
2051 * "principal" associated with a requests differs from that previously
2052 * used. We use uid, gid's, and gss principal string as our best
2053 * approximation. We also don't want to allow non-gss use of a client
2054 * established using gss: in theory cr_principal should catch that
2055 * change, but in practice cr_principal can be null even in the gss case
2056 * since gssd doesn't always pass down a principal string.
2057 */
is_gss_cred(struct svc_cred * cr)2058 static bool is_gss_cred(struct svc_cred *cr)
2059 {
2060 /* Is cr_flavor one of the gss "pseudoflavors"?: */
2061 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
2062 }
2063
2064
2065 static bool
same_creds(struct svc_cred * cr1,struct svc_cred * cr2)2066 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
2067 {
2068 if ((is_gss_cred(cr1) != is_gss_cred(cr2))
2069 || (!uid_eq(cr1->cr_uid, cr2->cr_uid))
2070 || (!gid_eq(cr1->cr_gid, cr2->cr_gid))
2071 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
2072 return false;
2073 /* XXX: check that cr_targ_princ fields match ? */
2074 if (cr1->cr_principal == cr2->cr_principal)
2075 return true;
2076 if (!cr1->cr_principal || !cr2->cr_principal)
2077 return false;
2078 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
2079 }
2080
svc_rqst_integrity_protected(struct svc_rqst * rqstp)2081 static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
2082 {
2083 struct svc_cred *cr = &rqstp->rq_cred;
2084 u32 service;
2085
2086 if (!cr->cr_gss_mech)
2087 return false;
2088 service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
2089 return service == RPC_GSS_SVC_INTEGRITY ||
2090 service == RPC_GSS_SVC_PRIVACY;
2091 }
2092
nfsd4_mach_creds_match(struct nfs4_client * cl,struct svc_rqst * rqstp)2093 bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
2094 {
2095 struct svc_cred *cr = &rqstp->rq_cred;
2096
2097 if (!cl->cl_mach_cred)
2098 return true;
2099 if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
2100 return false;
2101 if (!svc_rqst_integrity_protected(rqstp))
2102 return false;
2103 if (cl->cl_cred.cr_raw_principal)
2104 return 0 == strcmp(cl->cl_cred.cr_raw_principal,
2105 cr->cr_raw_principal);
2106 if (!cr->cr_principal)
2107 return false;
2108 return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
2109 }
2110
gen_confirm(struct nfs4_client * clp,struct nfsd_net * nn)2111 static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
2112 {
2113 __be32 verf[2];
2114
2115 /*
2116 * This is opaque to client, so no need to byte-swap. Use
2117 * __force to keep sparse happy
2118 */
2119 verf[0] = (__force __be32)get_seconds();
2120 verf[1] = (__force __be32)nn->clverifier_counter++;
2121 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
2122 }
2123
gen_clid(struct nfs4_client * clp,struct nfsd_net * nn)2124 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
2125 {
2126 clp->cl_clientid.cl_boot = nn->boot_time;
2127 clp->cl_clientid.cl_id = nn->clientid_counter++;
2128 gen_confirm(clp, nn);
2129 }
2130
2131 static struct nfs4_stid *
find_stateid_locked(struct nfs4_client * cl,stateid_t * t)2132 find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
2133 {
2134 struct nfs4_stid *ret;
2135
2136 ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
2137 if (!ret || !ret->sc_type)
2138 return NULL;
2139 return ret;
2140 }
2141
2142 static struct nfs4_stid *
find_stateid_by_type(struct nfs4_client * cl,stateid_t * t,char typemask)2143 find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
2144 {
2145 struct nfs4_stid *s;
2146
2147 spin_lock(&cl->cl_lock);
2148 s = find_stateid_locked(cl, t);
2149 if (s != NULL) {
2150 if (typemask & s->sc_type)
2151 refcount_inc(&s->sc_count);
2152 else
2153 s = NULL;
2154 }
2155 spin_unlock(&cl->cl_lock);
2156 return s;
2157 }
2158
create_client(struct xdr_netobj name,struct svc_rqst * rqstp,nfs4_verifier * verf)2159 static struct nfs4_client *create_client(struct xdr_netobj name,
2160 struct svc_rqst *rqstp, nfs4_verifier *verf)
2161 {
2162 struct nfs4_client *clp;
2163 struct sockaddr *sa = svc_addr(rqstp);
2164 int ret;
2165 struct net *net = SVC_NET(rqstp);
2166
2167 clp = alloc_client(name);
2168 if (clp == NULL)
2169 return NULL;
2170
2171 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
2172 if (ret) {
2173 free_client(clp);
2174 return NULL;
2175 }
2176 nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
2177 clp->cl_time = get_seconds();
2178 clear_bit(0, &clp->cl_cb_slot_busy);
2179 copy_verf(clp, verf);
2180 rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
2181 clp->cl_cb_session = NULL;
2182 clp->net = net;
2183 return clp;
2184 }
2185
2186 static void
add_clp_to_name_tree(struct nfs4_client * new_clp,struct rb_root * root)2187 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
2188 {
2189 struct rb_node **new = &(root->rb_node), *parent = NULL;
2190 struct nfs4_client *clp;
2191
2192 while (*new) {
2193 clp = rb_entry(*new, struct nfs4_client, cl_namenode);
2194 parent = *new;
2195
2196 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
2197 new = &((*new)->rb_left);
2198 else
2199 new = &((*new)->rb_right);
2200 }
2201
2202 rb_link_node(&new_clp->cl_namenode, parent, new);
2203 rb_insert_color(&new_clp->cl_namenode, root);
2204 }
2205
2206 static struct nfs4_client *
find_clp_in_name_tree(struct xdr_netobj * name,struct rb_root * root)2207 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
2208 {
2209 int cmp;
2210 struct rb_node *node = root->rb_node;
2211 struct nfs4_client *clp;
2212
2213 while (node) {
2214 clp = rb_entry(node, struct nfs4_client, cl_namenode);
2215 cmp = compare_blob(&clp->cl_name, name);
2216 if (cmp > 0)
2217 node = node->rb_left;
2218 else if (cmp < 0)
2219 node = node->rb_right;
2220 else
2221 return clp;
2222 }
2223 return NULL;
2224 }
2225
2226 static void
add_to_unconfirmed(struct nfs4_client * clp)2227 add_to_unconfirmed(struct nfs4_client *clp)
2228 {
2229 unsigned int idhashval;
2230 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2231
2232 lockdep_assert_held(&nn->client_lock);
2233
2234 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2235 add_clp_to_name_tree(clp, &nn->unconf_name_tree);
2236 idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2237 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
2238 renew_client_locked(clp);
2239 }
2240
2241 static void
move_to_confirmed(struct nfs4_client * clp)2242 move_to_confirmed(struct nfs4_client *clp)
2243 {
2244 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2245 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2246
2247 lockdep_assert_held(&nn->client_lock);
2248
2249 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
2250 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
2251 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
2252 add_clp_to_name_tree(clp, &nn->conf_name_tree);
2253 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2254 renew_client_locked(clp);
2255 }
2256
2257 static struct nfs4_client *
find_client_in_id_table(struct list_head * tbl,clientid_t * clid,bool sessions)2258 find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
2259 {
2260 struct nfs4_client *clp;
2261 unsigned int idhashval = clientid_hashval(clid->cl_id);
2262
2263 list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
2264 if (same_clid(&clp->cl_clientid, clid)) {
2265 if ((bool)clp->cl_minorversion != sessions)
2266 return NULL;
2267 renew_client_locked(clp);
2268 return clp;
2269 }
2270 }
2271 return NULL;
2272 }
2273
2274 static struct nfs4_client *
find_confirmed_client(clientid_t * clid,bool sessions,struct nfsd_net * nn)2275 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2276 {
2277 struct list_head *tbl = nn->conf_id_hashtbl;
2278
2279 lockdep_assert_held(&nn->client_lock);
2280 return find_client_in_id_table(tbl, clid, sessions);
2281 }
2282
2283 static struct nfs4_client *
find_unconfirmed_client(clientid_t * clid,bool sessions,struct nfsd_net * nn)2284 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2285 {
2286 struct list_head *tbl = nn->unconf_id_hashtbl;
2287
2288 lockdep_assert_held(&nn->client_lock);
2289 return find_client_in_id_table(tbl, clid, sessions);
2290 }
2291
clp_used_exchangeid(struct nfs4_client * clp)2292 static bool clp_used_exchangeid(struct nfs4_client *clp)
2293 {
2294 return clp->cl_exchange_flags != 0;
2295 }
2296
2297 static struct nfs4_client *
find_confirmed_client_by_name(struct xdr_netobj * name,struct nfsd_net * nn)2298 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2299 {
2300 lockdep_assert_held(&nn->client_lock);
2301 return find_clp_in_name_tree(name, &nn->conf_name_tree);
2302 }
2303
2304 static struct nfs4_client *
find_unconfirmed_client_by_name(struct xdr_netobj * name,struct nfsd_net * nn)2305 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2306 {
2307 lockdep_assert_held(&nn->client_lock);
2308 return find_clp_in_name_tree(name, &nn->unconf_name_tree);
2309 }
2310
2311 static void
gen_callback(struct nfs4_client * clp,struct nfsd4_setclientid * se,struct svc_rqst * rqstp)2312 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
2313 {
2314 struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
2315 struct sockaddr *sa = svc_addr(rqstp);
2316 u32 scopeid = rpc_get_scope_id(sa);
2317 unsigned short expected_family;
2318
2319 /* Currently, we only support tcp and tcp6 for the callback channel */
2320 if (se->se_callback_netid_len == 3 &&
2321 !memcmp(se->se_callback_netid_val, "tcp", 3))
2322 expected_family = AF_INET;
2323 else if (se->se_callback_netid_len == 4 &&
2324 !memcmp(se->se_callback_netid_val, "tcp6", 4))
2325 expected_family = AF_INET6;
2326 else
2327 goto out_err;
2328
2329 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
2330 se->se_callback_addr_len,
2331 (struct sockaddr *)&conn->cb_addr,
2332 sizeof(conn->cb_addr));
2333
2334 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
2335 goto out_err;
2336
2337 if (conn->cb_addr.ss_family == AF_INET6)
2338 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
2339
2340 conn->cb_prog = se->se_callback_prog;
2341 conn->cb_ident = se->se_callback_ident;
2342 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
2343 return;
2344 out_err:
2345 conn->cb_addr.ss_family = AF_UNSPEC;
2346 conn->cb_addrlen = 0;
2347 dprintk("NFSD: this client (clientid %08x/%08x) "
2348 "will not receive delegations\n",
2349 clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
2350
2351 return;
2352 }
2353
2354 /*
2355 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
2356 */
2357 static void
nfsd4_store_cache_entry(struct nfsd4_compoundres * resp)2358 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
2359 {
2360 struct xdr_buf *buf = resp->xdr.buf;
2361 struct nfsd4_slot *slot = resp->cstate.slot;
2362 unsigned int base;
2363
2364 dprintk("--> %s slot %p\n", __func__, slot);
2365
2366 slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
2367 slot->sl_opcnt = resp->opcnt;
2368 slot->sl_status = resp->cstate.status;
2369 free_svc_cred(&slot->sl_cred);
2370 copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred);
2371
2372 if (!nfsd4_cache_this(resp)) {
2373 slot->sl_flags &= ~NFSD4_SLOT_CACHED;
2374 return;
2375 }
2376 slot->sl_flags |= NFSD4_SLOT_CACHED;
2377
2378 base = resp->cstate.data_offset;
2379 slot->sl_datalen = buf->len - base;
2380 if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
2381 WARN(1, "%s: sessions DRC could not cache compound\n",
2382 __func__);
2383 return;
2384 }
2385
2386 /*
2387 * Encode the replay sequence operation from the slot values.
2388 * If cachethis is FALSE encode the uncached rep error on the next
2389 * operation which sets resp->p and increments resp->opcnt for
2390 * nfs4svc_encode_compoundres.
2391 *
2392 */
2393 static __be32
nfsd4_enc_sequence_replay(struct nfsd4_compoundargs * args,struct nfsd4_compoundres * resp)2394 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
2395 struct nfsd4_compoundres *resp)
2396 {
2397 struct nfsd4_op *op;
2398 struct nfsd4_slot *slot = resp->cstate.slot;
2399
2400 /* Encode the replayed sequence operation */
2401 op = &args->ops[resp->opcnt - 1];
2402 nfsd4_encode_operation(resp, op);
2403
2404 if (slot->sl_flags & NFSD4_SLOT_CACHED)
2405 return op->status;
2406 if (args->opcnt == 1) {
2407 /*
2408 * The original operation wasn't a solo sequence--we
2409 * always cache those--so this retry must not match the
2410 * original:
2411 */
2412 op->status = nfserr_seq_false_retry;
2413 } else {
2414 op = &args->ops[resp->opcnt++];
2415 op->status = nfserr_retry_uncached_rep;
2416 nfsd4_encode_operation(resp, op);
2417 }
2418 return op->status;
2419 }
2420
2421 /*
2422 * The sequence operation is not cached because we can use the slot and
2423 * session values.
2424 */
2425 static __be32
nfsd4_replay_cache_entry(struct nfsd4_compoundres * resp,struct nfsd4_sequence * seq)2426 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
2427 struct nfsd4_sequence *seq)
2428 {
2429 struct nfsd4_slot *slot = resp->cstate.slot;
2430 struct xdr_stream *xdr = &resp->xdr;
2431 __be32 *p;
2432 __be32 status;
2433
2434 dprintk("--> %s slot %p\n", __func__, slot);
2435
2436 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
2437 if (status)
2438 return status;
2439
2440 p = xdr_reserve_space(xdr, slot->sl_datalen);
2441 if (!p) {
2442 WARN_ON_ONCE(1);
2443 return nfserr_serverfault;
2444 }
2445 xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
2446 xdr_commit_encode(xdr);
2447
2448 resp->opcnt = slot->sl_opcnt;
2449 return slot->sl_status;
2450 }
2451
2452 /*
2453 * Set the exchange_id flags returned by the server.
2454 */
2455 static void
nfsd4_set_ex_flags(struct nfs4_client * new,struct nfsd4_exchange_id * clid)2456 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
2457 {
2458 #ifdef CONFIG_NFSD_PNFS
2459 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS;
2460 #else
2461 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
2462 #endif
2463
2464 /* Referrals are supported, Migration is not. */
2465 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
2466
2467 /* set the wire flags to return to client. */
2468 clid->flags = new->cl_exchange_flags;
2469 }
2470
client_has_openowners(struct nfs4_client * clp)2471 static bool client_has_openowners(struct nfs4_client *clp)
2472 {
2473 struct nfs4_openowner *oo;
2474
2475 list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) {
2476 if (!list_empty(&oo->oo_owner.so_stateids))
2477 return true;
2478 }
2479 return false;
2480 }
2481
client_has_state(struct nfs4_client * clp)2482 static bool client_has_state(struct nfs4_client *clp)
2483 {
2484 return client_has_openowners(clp)
2485 #ifdef CONFIG_NFSD_PNFS
2486 || !list_empty(&clp->cl_lo_states)
2487 #endif
2488 || !list_empty(&clp->cl_delegations)
2489 || !list_empty(&clp->cl_sessions);
2490 }
2491
2492 __be32
nfsd4_exchange_id(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)2493 nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2494 union nfsd4_op_u *u)
2495 {
2496 struct nfsd4_exchange_id *exid = &u->exchange_id;
2497 struct nfs4_client *conf, *new;
2498 struct nfs4_client *unconf = NULL;
2499 __be32 status;
2500 char addr_str[INET6_ADDRSTRLEN];
2501 nfs4_verifier verf = exid->verifier;
2502 struct sockaddr *sa = svc_addr(rqstp);
2503 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
2504 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2505
2506 rpc_ntop(sa, addr_str, sizeof(addr_str));
2507 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
2508 "ip_addr=%s flags %x, spa_how %d\n",
2509 __func__, rqstp, exid, exid->clname.len, exid->clname.data,
2510 addr_str, exid->flags, exid->spa_how);
2511
2512 if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
2513 return nfserr_inval;
2514
2515 new = create_client(exid->clname, rqstp, &verf);
2516 if (new == NULL)
2517 return nfserr_jukebox;
2518
2519 switch (exid->spa_how) {
2520 case SP4_MACH_CRED:
2521 exid->spo_must_enforce[0] = 0;
2522 exid->spo_must_enforce[1] = (
2523 1 << (OP_BIND_CONN_TO_SESSION - 32) |
2524 1 << (OP_EXCHANGE_ID - 32) |
2525 1 << (OP_CREATE_SESSION - 32) |
2526 1 << (OP_DESTROY_SESSION - 32) |
2527 1 << (OP_DESTROY_CLIENTID - 32));
2528
2529 exid->spo_must_allow[0] &= (1 << (OP_CLOSE) |
2530 1 << (OP_OPEN_DOWNGRADE) |
2531 1 << (OP_LOCKU) |
2532 1 << (OP_DELEGRETURN));
2533
2534 exid->spo_must_allow[1] &= (
2535 1 << (OP_TEST_STATEID - 32) |
2536 1 << (OP_FREE_STATEID - 32));
2537 if (!svc_rqst_integrity_protected(rqstp)) {
2538 status = nfserr_inval;
2539 goto out_nolock;
2540 }
2541 /*
2542 * Sometimes userspace doesn't give us a principal.
2543 * Which is a bug, really. Anyway, we can't enforce
2544 * MACH_CRED in that case, better to give up now:
2545 */
2546 if (!new->cl_cred.cr_principal &&
2547 !new->cl_cred.cr_raw_principal) {
2548 status = nfserr_serverfault;
2549 goto out_nolock;
2550 }
2551 new->cl_mach_cred = true;
2552 case SP4_NONE:
2553 break;
2554 default: /* checked by xdr code */
2555 WARN_ON_ONCE(1);
2556 case SP4_SSV:
2557 status = nfserr_encr_alg_unsupp;
2558 goto out_nolock;
2559 }
2560
2561 /* Cases below refer to rfc 5661 section 18.35.4: */
2562 spin_lock(&nn->client_lock);
2563 conf = find_confirmed_client_by_name(&exid->clname, nn);
2564 if (conf) {
2565 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
2566 bool verfs_match = same_verf(&verf, &conf->cl_verifier);
2567
2568 if (update) {
2569 if (!clp_used_exchangeid(conf)) { /* buggy client */
2570 status = nfserr_inval;
2571 goto out;
2572 }
2573 if (!nfsd4_mach_creds_match(conf, rqstp)) {
2574 status = nfserr_wrong_cred;
2575 goto out;
2576 }
2577 if (!creds_match) { /* case 9 */
2578 status = nfserr_perm;
2579 goto out;
2580 }
2581 if (!verfs_match) { /* case 8 */
2582 status = nfserr_not_same;
2583 goto out;
2584 }
2585 /* case 6 */
2586 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
2587 goto out_copy;
2588 }
2589 if (!creds_match) { /* case 3 */
2590 if (client_has_state(conf)) {
2591 status = nfserr_clid_inuse;
2592 goto out;
2593 }
2594 goto out_new;
2595 }
2596 if (verfs_match) { /* case 2 */
2597 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
2598 goto out_copy;
2599 }
2600 /* case 5, client reboot */
2601 conf = NULL;
2602 goto out_new;
2603 }
2604
2605 if (update) { /* case 7 */
2606 status = nfserr_noent;
2607 goto out;
2608 }
2609
2610 unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
2611 if (unconf) /* case 4, possible retry or client restart */
2612 unhash_client_locked(unconf);
2613
2614 /* case 1 (normal case) */
2615 out_new:
2616 if (conf) {
2617 status = mark_client_expired_locked(conf);
2618 if (status)
2619 goto out;
2620 }
2621 new->cl_minorversion = cstate->minorversion;
2622 new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0];
2623 new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1];
2624
2625 gen_clid(new, nn);
2626 add_to_unconfirmed(new);
2627 swap(new, conf);
2628 out_copy:
2629 exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
2630 exid->clientid.cl_id = conf->cl_clientid.cl_id;
2631
2632 exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
2633 nfsd4_set_ex_flags(conf, exid);
2634
2635 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
2636 conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
2637 status = nfs_ok;
2638
2639 out:
2640 spin_unlock(&nn->client_lock);
2641 out_nolock:
2642 if (new)
2643 expire_client(new);
2644 if (unconf)
2645 expire_client(unconf);
2646 return status;
2647 }
2648
2649 static __be32
check_slot_seqid(u32 seqid,u32 slot_seqid,int slot_inuse)2650 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
2651 {
2652 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
2653 slot_seqid);
2654
2655 /* The slot is in use, and no response has been sent. */
2656 if (slot_inuse) {
2657 if (seqid == slot_seqid)
2658 return nfserr_jukebox;
2659 else
2660 return nfserr_seq_misordered;
2661 }
2662 /* Note unsigned 32-bit arithmetic handles wraparound: */
2663 if (likely(seqid == slot_seqid + 1))
2664 return nfs_ok;
2665 if (seqid == slot_seqid)
2666 return nfserr_replay_cache;
2667 return nfserr_seq_misordered;
2668 }
2669
2670 /*
2671 * Cache the create session result into the create session single DRC
2672 * slot cache by saving the xdr structure. sl_seqid has been set.
2673 * Do this for solo or embedded create session operations.
2674 */
2675 static void
nfsd4_cache_create_session(struct nfsd4_create_session * cr_ses,struct nfsd4_clid_slot * slot,__be32 nfserr)2676 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
2677 struct nfsd4_clid_slot *slot, __be32 nfserr)
2678 {
2679 slot->sl_status = nfserr;
2680 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
2681 }
2682
2683 static __be32
nfsd4_replay_create_session(struct nfsd4_create_session * cr_ses,struct nfsd4_clid_slot * slot)2684 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
2685 struct nfsd4_clid_slot *slot)
2686 {
2687 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
2688 return slot->sl_status;
2689 }
2690
2691 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
2692 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
2693 1 + /* MIN tag is length with zero, only length */ \
2694 3 + /* version, opcount, opcode */ \
2695 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2696 /* seqid, slotID, slotID, cache */ \
2697 4 ) * sizeof(__be32))
2698
2699 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
2700 2 + /* verifier: AUTH_NULL, length 0 */\
2701 1 + /* status */ \
2702 1 + /* MIN tag is length with zero, only length */ \
2703 3 + /* opcount, opcode, opstatus*/ \
2704 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2705 /* seqid, slotID, slotID, slotID, status */ \
2706 5 ) * sizeof(__be32))
2707
check_forechannel_attrs(struct nfsd4_channel_attrs * ca,struct nfsd_net * nn)2708 static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
2709 {
2710 u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
2711
2712 if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
2713 return nfserr_toosmall;
2714 if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
2715 return nfserr_toosmall;
2716 ca->headerpadsz = 0;
2717 ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
2718 ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
2719 ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
2720 ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
2721 NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
2722 ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
2723 /*
2724 * Note decreasing slot size below client's request may make it
2725 * difficult for client to function correctly, whereas
2726 * decreasing the number of slots will (just?) affect
2727 * performance. When short on memory we therefore prefer to
2728 * decrease number of slots instead of their size. Clients that
2729 * request larger slots than they need will get poor results:
2730 */
2731 ca->maxreqs = nfsd4_get_drc_mem(ca);
2732 if (!ca->maxreqs)
2733 return nfserr_jukebox;
2734
2735 return nfs_ok;
2736 }
2737
2738 /*
2739 * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
2740 * These are based on similar macros in linux/sunrpc/msg_prot.h .
2741 */
2742 #define RPC_MAX_HEADER_WITH_AUTH_SYS \
2743 (RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK))
2744
2745 #define RPC_MAX_REPHEADER_WITH_AUTH_SYS \
2746 (RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK))
2747
2748 #define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \
2749 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32))
2750 #define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \
2751 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \
2752 sizeof(__be32))
2753
check_backchannel_attrs(struct nfsd4_channel_attrs * ca)2754 static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
2755 {
2756 ca->headerpadsz = 0;
2757
2758 if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
2759 return nfserr_toosmall;
2760 if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
2761 return nfserr_toosmall;
2762 ca->maxresp_cached = 0;
2763 if (ca->maxops < 2)
2764 return nfserr_toosmall;
2765
2766 return nfs_ok;
2767 }
2768
nfsd4_check_cb_sec(struct nfsd4_cb_sec * cbs)2769 static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
2770 {
2771 switch (cbs->flavor) {
2772 case RPC_AUTH_NULL:
2773 case RPC_AUTH_UNIX:
2774 return nfs_ok;
2775 default:
2776 /*
2777 * GSS case: the spec doesn't allow us to return this
2778 * error. But it also doesn't allow us not to support
2779 * GSS.
2780 * I'd rather this fail hard than return some error the
2781 * client might think it can already handle:
2782 */
2783 return nfserr_encr_alg_unsupp;
2784 }
2785 }
2786
2787 __be32
nfsd4_create_session(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)2788 nfsd4_create_session(struct svc_rqst *rqstp,
2789 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
2790 {
2791 struct nfsd4_create_session *cr_ses = &u->create_session;
2792 struct sockaddr *sa = svc_addr(rqstp);
2793 struct nfs4_client *conf, *unconf;
2794 struct nfs4_client *old = NULL;
2795 struct nfsd4_session *new;
2796 struct nfsd4_conn *conn;
2797 struct nfsd4_clid_slot *cs_slot = NULL;
2798 __be32 status = 0;
2799 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2800
2801 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
2802 return nfserr_inval;
2803 status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
2804 if (status)
2805 return status;
2806 status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
2807 if (status)
2808 return status;
2809 status = check_backchannel_attrs(&cr_ses->back_channel);
2810 if (status)
2811 goto out_release_drc_mem;
2812 status = nfserr_jukebox;
2813 new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
2814 if (!new)
2815 goto out_release_drc_mem;
2816 conn = alloc_conn_from_crses(rqstp, cr_ses);
2817 if (!conn)
2818 goto out_free_session;
2819
2820 spin_lock(&nn->client_lock);
2821 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
2822 conf = find_confirmed_client(&cr_ses->clientid, true, nn);
2823 WARN_ON_ONCE(conf && unconf);
2824
2825 if (conf) {
2826 status = nfserr_wrong_cred;
2827 if (!nfsd4_mach_creds_match(conf, rqstp))
2828 goto out_free_conn;
2829 cs_slot = &conf->cl_cs_slot;
2830 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
2831 if (status) {
2832 if (status == nfserr_replay_cache)
2833 status = nfsd4_replay_create_session(cr_ses, cs_slot);
2834 goto out_free_conn;
2835 }
2836 } else if (unconf) {
2837 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
2838 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
2839 status = nfserr_clid_inuse;
2840 goto out_free_conn;
2841 }
2842 status = nfserr_wrong_cred;
2843 if (!nfsd4_mach_creds_match(unconf, rqstp))
2844 goto out_free_conn;
2845 cs_slot = &unconf->cl_cs_slot;
2846 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
2847 if (status) {
2848 /* an unconfirmed replay returns misordered */
2849 status = nfserr_seq_misordered;
2850 goto out_free_conn;
2851 }
2852 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
2853 if (old) {
2854 status = mark_client_expired_locked(old);
2855 if (status) {
2856 old = NULL;
2857 goto out_free_conn;
2858 }
2859 }
2860 move_to_confirmed(unconf);
2861 conf = unconf;
2862 } else {
2863 status = nfserr_stale_clientid;
2864 goto out_free_conn;
2865 }
2866 status = nfs_ok;
2867 /* Persistent sessions are not supported */
2868 cr_ses->flags &= ~SESSION4_PERSIST;
2869 /* Upshifting from TCP to RDMA is not supported */
2870 cr_ses->flags &= ~SESSION4_RDMA;
2871
2872 init_session(rqstp, new, conf, cr_ses);
2873 nfsd4_get_session_locked(new);
2874
2875 memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
2876 NFS4_MAX_SESSIONID_LEN);
2877 cs_slot->sl_seqid++;
2878 cr_ses->seqid = cs_slot->sl_seqid;
2879
2880 /* cache solo and embedded create sessions under the client_lock */
2881 nfsd4_cache_create_session(cr_ses, cs_slot, status);
2882 spin_unlock(&nn->client_lock);
2883 /* init connection and backchannel */
2884 nfsd4_init_conn(rqstp, conn, new);
2885 nfsd4_put_session(new);
2886 if (old)
2887 expire_client(old);
2888 return status;
2889 out_free_conn:
2890 spin_unlock(&nn->client_lock);
2891 free_conn(conn);
2892 if (old)
2893 expire_client(old);
2894 out_free_session:
2895 __free_session(new);
2896 out_release_drc_mem:
2897 nfsd4_put_drc_mem(&cr_ses->fore_channel);
2898 return status;
2899 }
2900
nfsd4_map_bcts_dir(u32 * dir)2901 static __be32 nfsd4_map_bcts_dir(u32 *dir)
2902 {
2903 switch (*dir) {
2904 case NFS4_CDFC4_FORE:
2905 case NFS4_CDFC4_BACK:
2906 return nfs_ok;
2907 case NFS4_CDFC4_FORE_OR_BOTH:
2908 case NFS4_CDFC4_BACK_OR_BOTH:
2909 *dir = NFS4_CDFC4_BOTH;
2910 return nfs_ok;
2911 };
2912 return nfserr_inval;
2913 }
2914
nfsd4_backchannel_ctl(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)2915 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp,
2916 struct nfsd4_compound_state *cstate,
2917 union nfsd4_op_u *u)
2918 {
2919 struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl;
2920 struct nfsd4_session *session = cstate->session;
2921 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2922 __be32 status;
2923
2924 status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
2925 if (status)
2926 return status;
2927 spin_lock(&nn->client_lock);
2928 session->se_cb_prog = bc->bc_cb_program;
2929 session->se_cb_sec = bc->bc_cb_sec;
2930 spin_unlock(&nn->client_lock);
2931
2932 nfsd4_probe_callback(session->se_client);
2933
2934 return nfs_ok;
2935 }
2936
nfsd4_bind_conn_to_session(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)2937 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
2938 struct nfsd4_compound_state *cstate,
2939 union nfsd4_op_u *u)
2940 {
2941 struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
2942 __be32 status;
2943 struct nfsd4_conn *conn;
2944 struct nfsd4_session *session;
2945 struct net *net = SVC_NET(rqstp);
2946 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2947
2948 if (!nfsd4_last_compound_op(rqstp))
2949 return nfserr_not_only_op;
2950 spin_lock(&nn->client_lock);
2951 session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
2952 spin_unlock(&nn->client_lock);
2953 if (!session)
2954 goto out_no_session;
2955 status = nfserr_wrong_cred;
2956 if (!nfsd4_mach_creds_match(session->se_client, rqstp))
2957 goto out;
2958 status = nfsd4_map_bcts_dir(&bcts->dir);
2959 if (status)
2960 goto out;
2961 conn = alloc_conn(rqstp, bcts->dir);
2962 status = nfserr_jukebox;
2963 if (!conn)
2964 goto out;
2965 nfsd4_init_conn(rqstp, conn, session);
2966 status = nfs_ok;
2967 out:
2968 nfsd4_put_session(session);
2969 out_no_session:
2970 return status;
2971 }
2972
nfsd4_compound_in_session(struct nfsd4_compound_state * cstate,struct nfs4_sessionid * sid)2973 static bool nfsd4_compound_in_session(struct nfsd4_compound_state *cstate, struct nfs4_sessionid *sid)
2974 {
2975 if (!cstate->session)
2976 return false;
2977 return !memcmp(sid, &cstate->session->se_sessionid, sizeof(*sid));
2978 }
2979
2980 __be32
nfsd4_destroy_session(struct svc_rqst * r,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)2981 nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate,
2982 union nfsd4_op_u *u)
2983 {
2984 struct nfs4_sessionid *sessionid = &u->destroy_session.sessionid;
2985 struct nfsd4_session *ses;
2986 __be32 status;
2987 int ref_held_by_me = 0;
2988 struct net *net = SVC_NET(r);
2989 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2990
2991 status = nfserr_not_only_op;
2992 if (nfsd4_compound_in_session(cstate, sessionid)) {
2993 if (!nfsd4_last_compound_op(r))
2994 goto out;
2995 ref_held_by_me++;
2996 }
2997 dump_sessionid(__func__, sessionid);
2998 spin_lock(&nn->client_lock);
2999 ses = find_in_sessionid_hashtbl(sessionid, net, &status);
3000 if (!ses)
3001 goto out_client_lock;
3002 status = nfserr_wrong_cred;
3003 if (!nfsd4_mach_creds_match(ses->se_client, r))
3004 goto out_put_session;
3005 status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
3006 if (status)
3007 goto out_put_session;
3008 unhash_session(ses);
3009 spin_unlock(&nn->client_lock);
3010
3011 nfsd4_probe_callback_sync(ses->se_client);
3012
3013 spin_lock(&nn->client_lock);
3014 status = nfs_ok;
3015 out_put_session:
3016 nfsd4_put_session_locked(ses);
3017 out_client_lock:
3018 spin_unlock(&nn->client_lock);
3019 out:
3020 return status;
3021 }
3022
__nfsd4_find_conn(struct svc_xprt * xpt,struct nfsd4_session * s)3023 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
3024 {
3025 struct nfsd4_conn *c;
3026
3027 list_for_each_entry(c, &s->se_conns, cn_persession) {
3028 if (c->cn_xprt == xpt) {
3029 return c;
3030 }
3031 }
3032 return NULL;
3033 }
3034
nfsd4_sequence_check_conn(struct nfsd4_conn * new,struct nfsd4_session * ses)3035 static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
3036 {
3037 struct nfs4_client *clp = ses->se_client;
3038 struct nfsd4_conn *c;
3039 __be32 status = nfs_ok;
3040 int ret;
3041
3042 spin_lock(&clp->cl_lock);
3043 c = __nfsd4_find_conn(new->cn_xprt, ses);
3044 if (c)
3045 goto out_free;
3046 status = nfserr_conn_not_bound_to_session;
3047 if (clp->cl_mach_cred)
3048 goto out_free;
3049 __nfsd4_hash_conn(new, ses);
3050 spin_unlock(&clp->cl_lock);
3051 ret = nfsd4_register_conn(new);
3052 if (ret)
3053 /* oops; xprt is already down: */
3054 nfsd4_conn_lost(&new->cn_xpt_user);
3055 return nfs_ok;
3056 out_free:
3057 spin_unlock(&clp->cl_lock);
3058 free_conn(new);
3059 return status;
3060 }
3061
nfsd4_session_too_many_ops(struct svc_rqst * rqstp,struct nfsd4_session * session)3062 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
3063 {
3064 struct nfsd4_compoundargs *args = rqstp->rq_argp;
3065
3066 return args->opcnt > session->se_fchannel.maxops;
3067 }
3068
nfsd4_request_too_big(struct svc_rqst * rqstp,struct nfsd4_session * session)3069 static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
3070 struct nfsd4_session *session)
3071 {
3072 struct xdr_buf *xb = &rqstp->rq_arg;
3073
3074 return xb->len > session->se_fchannel.maxreq_sz;
3075 }
3076
replay_matches_cache(struct svc_rqst * rqstp,struct nfsd4_sequence * seq,struct nfsd4_slot * slot)3077 static bool replay_matches_cache(struct svc_rqst *rqstp,
3078 struct nfsd4_sequence *seq, struct nfsd4_slot *slot)
3079 {
3080 struct nfsd4_compoundargs *argp = rqstp->rq_argp;
3081
3082 if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) !=
3083 (bool)seq->cachethis)
3084 return false;
3085 /*
3086 * If there's an error then the reply can have fewer ops than
3087 * the call.
3088 */
3089 if (slot->sl_opcnt < argp->opcnt && !slot->sl_status)
3090 return false;
3091 /*
3092 * But if we cached a reply with *more* ops than the call you're
3093 * sending us now, then this new call is clearly not really a
3094 * replay of the old one:
3095 */
3096 if (slot->sl_opcnt > argp->opcnt)
3097 return false;
3098 /* This is the only check explicitly called by spec: */
3099 if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
3100 return false;
3101 /*
3102 * There may be more comparisons we could actually do, but the
3103 * spec doesn't require us to catch every case where the calls
3104 * don't match (that would require caching the call as well as
3105 * the reply), so we don't bother.
3106 */
3107 return true;
3108 }
3109
3110 __be32
nfsd4_sequence(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3111 nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3112 union nfsd4_op_u *u)
3113 {
3114 struct nfsd4_sequence *seq = &u->sequence;
3115 struct nfsd4_compoundres *resp = rqstp->rq_resp;
3116 struct xdr_stream *xdr = &resp->xdr;
3117 struct nfsd4_session *session;
3118 struct nfs4_client *clp;
3119 struct nfsd4_slot *slot;
3120 struct nfsd4_conn *conn;
3121 __be32 status;
3122 int buflen;
3123 struct net *net = SVC_NET(rqstp);
3124 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3125
3126 if (resp->opcnt != 1)
3127 return nfserr_sequence_pos;
3128
3129 /*
3130 * Will be either used or freed by nfsd4_sequence_check_conn
3131 * below.
3132 */
3133 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
3134 if (!conn)
3135 return nfserr_jukebox;
3136
3137 spin_lock(&nn->client_lock);
3138 session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
3139 if (!session)
3140 goto out_no_session;
3141 clp = session->se_client;
3142
3143 status = nfserr_too_many_ops;
3144 if (nfsd4_session_too_many_ops(rqstp, session))
3145 goto out_put_session;
3146
3147 status = nfserr_req_too_big;
3148 if (nfsd4_request_too_big(rqstp, session))
3149 goto out_put_session;
3150
3151 status = nfserr_badslot;
3152 if (seq->slotid >= session->se_fchannel.maxreqs)
3153 goto out_put_session;
3154
3155 slot = session->se_slots[seq->slotid];
3156 dprintk("%s: slotid %d\n", __func__, seq->slotid);
3157
3158 /* We do not negotiate the number of slots yet, so set the
3159 * maxslots to the session maxreqs which is used to encode
3160 * sr_highest_slotid and the sr_target_slot id to maxslots */
3161 seq->maxslots = session->se_fchannel.maxreqs;
3162
3163 status = check_slot_seqid(seq->seqid, slot->sl_seqid,
3164 slot->sl_flags & NFSD4_SLOT_INUSE);
3165 if (status == nfserr_replay_cache) {
3166 status = nfserr_seq_misordered;
3167 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
3168 goto out_put_session;
3169 status = nfserr_seq_false_retry;
3170 if (!replay_matches_cache(rqstp, seq, slot))
3171 goto out_put_session;
3172 cstate->slot = slot;
3173 cstate->session = session;
3174 cstate->clp = clp;
3175 /* Return the cached reply status and set cstate->status
3176 * for nfsd4_proc_compound processing */
3177 status = nfsd4_replay_cache_entry(resp, seq);
3178 cstate->status = nfserr_replay_cache;
3179 goto out;
3180 }
3181 if (status)
3182 goto out_put_session;
3183
3184 status = nfsd4_sequence_check_conn(conn, session);
3185 conn = NULL;
3186 if (status)
3187 goto out_put_session;
3188
3189 buflen = (seq->cachethis) ?
3190 session->se_fchannel.maxresp_cached :
3191 session->se_fchannel.maxresp_sz;
3192 status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
3193 nfserr_rep_too_big;
3194 if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
3195 goto out_put_session;
3196 svc_reserve(rqstp, buflen);
3197
3198 status = nfs_ok;
3199 /* Success! bump slot seqid */
3200 slot->sl_seqid = seq->seqid;
3201 slot->sl_flags |= NFSD4_SLOT_INUSE;
3202 if (seq->cachethis)
3203 slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
3204 else
3205 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
3206
3207 cstate->slot = slot;
3208 cstate->session = session;
3209 cstate->clp = clp;
3210
3211 out:
3212 switch (clp->cl_cb_state) {
3213 case NFSD4_CB_DOWN:
3214 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
3215 break;
3216 case NFSD4_CB_FAULT:
3217 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
3218 break;
3219 default:
3220 seq->status_flags = 0;
3221 }
3222 if (!list_empty(&clp->cl_revoked))
3223 seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
3224 out_no_session:
3225 if (conn)
3226 free_conn(conn);
3227 spin_unlock(&nn->client_lock);
3228 return status;
3229 out_put_session:
3230 nfsd4_put_session_locked(session);
3231 goto out_no_session;
3232 }
3233
3234 void
nfsd4_sequence_done(struct nfsd4_compoundres * resp)3235 nfsd4_sequence_done(struct nfsd4_compoundres *resp)
3236 {
3237 struct nfsd4_compound_state *cs = &resp->cstate;
3238
3239 if (nfsd4_has_session(cs)) {
3240 if (cs->status != nfserr_replay_cache) {
3241 nfsd4_store_cache_entry(resp);
3242 cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
3243 }
3244 /* Drop session reference that was taken in nfsd4_sequence() */
3245 nfsd4_put_session(cs->session);
3246 } else if (cs->clp)
3247 put_client_renew(cs->clp);
3248 }
3249
3250 __be32
nfsd4_destroy_clientid(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3251 nfsd4_destroy_clientid(struct svc_rqst *rqstp,
3252 struct nfsd4_compound_state *cstate,
3253 union nfsd4_op_u *u)
3254 {
3255 struct nfsd4_destroy_clientid *dc = &u->destroy_clientid;
3256 struct nfs4_client *conf, *unconf;
3257 struct nfs4_client *clp = NULL;
3258 __be32 status = 0;
3259 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3260
3261 spin_lock(&nn->client_lock);
3262 unconf = find_unconfirmed_client(&dc->clientid, true, nn);
3263 conf = find_confirmed_client(&dc->clientid, true, nn);
3264 WARN_ON_ONCE(conf && unconf);
3265
3266 if (conf) {
3267 if (client_has_state(conf)) {
3268 status = nfserr_clientid_busy;
3269 goto out;
3270 }
3271 status = mark_client_expired_locked(conf);
3272 if (status)
3273 goto out;
3274 clp = conf;
3275 } else if (unconf)
3276 clp = unconf;
3277 else {
3278 status = nfserr_stale_clientid;
3279 goto out;
3280 }
3281 if (!nfsd4_mach_creds_match(clp, rqstp)) {
3282 clp = NULL;
3283 status = nfserr_wrong_cred;
3284 goto out;
3285 }
3286 unhash_client_locked(clp);
3287 out:
3288 spin_unlock(&nn->client_lock);
3289 if (clp)
3290 expire_client(clp);
3291 return status;
3292 }
3293
3294 __be32
nfsd4_reclaim_complete(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3295 nfsd4_reclaim_complete(struct svc_rqst *rqstp,
3296 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
3297 {
3298 struct nfsd4_reclaim_complete *rc = &u->reclaim_complete;
3299 __be32 status = 0;
3300
3301 if (rc->rca_one_fs) {
3302 if (!cstate->current_fh.fh_dentry)
3303 return nfserr_nofilehandle;
3304 /*
3305 * We don't take advantage of the rca_one_fs case.
3306 * That's OK, it's optional, we can safely ignore it.
3307 */
3308 return nfs_ok;
3309 }
3310
3311 status = nfserr_complete_already;
3312 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
3313 &cstate->session->se_client->cl_flags))
3314 goto out;
3315
3316 status = nfserr_stale_clientid;
3317 if (is_client_expired(cstate->session->se_client))
3318 /*
3319 * The following error isn't really legal.
3320 * But we only get here if the client just explicitly
3321 * destroyed the client. Surely it no longer cares what
3322 * error it gets back on an operation for the dead
3323 * client.
3324 */
3325 goto out;
3326
3327 status = nfs_ok;
3328 nfsd4_client_record_create(cstate->session->se_client);
3329 out:
3330 return status;
3331 }
3332
3333 __be32
nfsd4_setclientid(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3334 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3335 union nfsd4_op_u *u)
3336 {
3337 struct nfsd4_setclientid *setclid = &u->setclientid;
3338 struct xdr_netobj clname = setclid->se_name;
3339 nfs4_verifier clverifier = setclid->se_verf;
3340 struct nfs4_client *conf, *new;
3341 struct nfs4_client *unconf = NULL;
3342 __be32 status;
3343 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3344
3345 new = create_client(clname, rqstp, &clverifier);
3346 if (new == NULL)
3347 return nfserr_jukebox;
3348 /* Cases below refer to rfc 3530 section 14.2.33: */
3349 spin_lock(&nn->client_lock);
3350 conf = find_confirmed_client_by_name(&clname, nn);
3351 if (conf && client_has_state(conf)) {
3352 /* case 0: */
3353 status = nfserr_clid_inuse;
3354 if (clp_used_exchangeid(conf))
3355 goto out;
3356 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
3357 char addr_str[INET6_ADDRSTRLEN];
3358 rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
3359 sizeof(addr_str));
3360 dprintk("NFSD: setclientid: string in use by client "
3361 "at %s\n", addr_str);
3362 goto out;
3363 }
3364 }
3365 unconf = find_unconfirmed_client_by_name(&clname, nn);
3366 if (unconf)
3367 unhash_client_locked(unconf);
3368 if (conf && same_verf(&conf->cl_verifier, &clverifier)) {
3369 /* case 1: probable callback update */
3370 copy_clid(new, conf);
3371 gen_confirm(new, nn);
3372 } else /* case 4 (new client) or cases 2, 3 (client reboot): */
3373 gen_clid(new, nn);
3374 new->cl_minorversion = 0;
3375 gen_callback(new, setclid, rqstp);
3376 add_to_unconfirmed(new);
3377 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
3378 setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
3379 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
3380 new = NULL;
3381 status = nfs_ok;
3382 out:
3383 spin_unlock(&nn->client_lock);
3384 if (new)
3385 free_client(new);
3386 if (unconf)
3387 expire_client(unconf);
3388 return status;
3389 }
3390
3391
3392 __be32
nfsd4_setclientid_confirm(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3393 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
3394 struct nfsd4_compound_state *cstate,
3395 union nfsd4_op_u *u)
3396 {
3397 struct nfsd4_setclientid_confirm *setclientid_confirm =
3398 &u->setclientid_confirm;
3399 struct nfs4_client *conf, *unconf;
3400 struct nfs4_client *old = NULL;
3401 nfs4_verifier confirm = setclientid_confirm->sc_confirm;
3402 clientid_t * clid = &setclientid_confirm->sc_clientid;
3403 __be32 status;
3404 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3405
3406 if (STALE_CLIENTID(clid, nn))
3407 return nfserr_stale_clientid;
3408
3409 spin_lock(&nn->client_lock);
3410 conf = find_confirmed_client(clid, false, nn);
3411 unconf = find_unconfirmed_client(clid, false, nn);
3412 /*
3413 * We try hard to give out unique clientid's, so if we get an
3414 * attempt to confirm the same clientid with a different cred,
3415 * the client may be buggy; this should never happen.
3416 *
3417 * Nevertheless, RFC 7530 recommends INUSE for this case:
3418 */
3419 status = nfserr_clid_inuse;
3420 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
3421 goto out;
3422 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
3423 goto out;
3424 /* cases below refer to rfc 3530 section 14.2.34: */
3425 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
3426 if (conf && same_verf(&confirm, &conf->cl_confirm)) {
3427 /* case 2: probable retransmit */
3428 status = nfs_ok;
3429 } else /* case 4: client hasn't noticed we rebooted yet? */
3430 status = nfserr_stale_clientid;
3431 goto out;
3432 }
3433 status = nfs_ok;
3434 if (conf) { /* case 1: callback update */
3435 old = unconf;
3436 unhash_client_locked(old);
3437 nfsd4_change_callback(conf, &unconf->cl_cb_conn);
3438 } else { /* case 3: normal case; new or rebooted client */
3439 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
3440 if (old) {
3441 status = nfserr_clid_inuse;
3442 if (client_has_state(old)
3443 && !same_creds(&unconf->cl_cred,
3444 &old->cl_cred))
3445 goto out;
3446 status = mark_client_expired_locked(old);
3447 if (status) {
3448 old = NULL;
3449 goto out;
3450 }
3451 }
3452 move_to_confirmed(unconf);
3453 conf = unconf;
3454 }
3455 get_client_locked(conf);
3456 spin_unlock(&nn->client_lock);
3457 nfsd4_probe_callback(conf);
3458 spin_lock(&nn->client_lock);
3459 put_client_renew_locked(conf);
3460 out:
3461 spin_unlock(&nn->client_lock);
3462 if (old)
3463 expire_client(old);
3464 return status;
3465 }
3466
nfsd4_alloc_file(void)3467 static struct nfs4_file *nfsd4_alloc_file(void)
3468 {
3469 return kmem_cache_alloc(file_slab, GFP_KERNEL);
3470 }
3471
3472 /* OPEN Share state helper functions */
nfsd4_init_file(struct knfsd_fh * fh,unsigned int hashval,struct nfs4_file * fp)3473 static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
3474 struct nfs4_file *fp)
3475 {
3476 lockdep_assert_held(&state_lock);
3477
3478 refcount_set(&fp->fi_ref, 1);
3479 spin_lock_init(&fp->fi_lock);
3480 INIT_LIST_HEAD(&fp->fi_stateids);
3481 INIT_LIST_HEAD(&fp->fi_delegations);
3482 INIT_LIST_HEAD(&fp->fi_clnt_odstate);
3483 fh_copy_shallow(&fp->fi_fhandle, fh);
3484 fp->fi_deleg_file = NULL;
3485 fp->fi_had_conflict = false;
3486 fp->fi_share_deny = 0;
3487 memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
3488 memset(fp->fi_access, 0, sizeof(fp->fi_access));
3489 #ifdef CONFIG_NFSD_PNFS
3490 INIT_LIST_HEAD(&fp->fi_lo_states);
3491 atomic_set(&fp->fi_lo_recalls, 0);
3492 #endif
3493 hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]);
3494 }
3495
3496 void
nfsd4_free_slabs(void)3497 nfsd4_free_slabs(void)
3498 {
3499 kmem_cache_destroy(client_slab);
3500 kmem_cache_destroy(openowner_slab);
3501 kmem_cache_destroy(lockowner_slab);
3502 kmem_cache_destroy(file_slab);
3503 kmem_cache_destroy(stateid_slab);
3504 kmem_cache_destroy(deleg_slab);
3505 kmem_cache_destroy(odstate_slab);
3506 }
3507
3508 int
nfsd4_init_slabs(void)3509 nfsd4_init_slabs(void)
3510 {
3511 client_slab = kmem_cache_create("nfsd4_clients",
3512 sizeof(struct nfs4_client), 0, 0, NULL);
3513 if (client_slab == NULL)
3514 goto out;
3515 openowner_slab = kmem_cache_create("nfsd4_openowners",
3516 sizeof(struct nfs4_openowner), 0, 0, NULL);
3517 if (openowner_slab == NULL)
3518 goto out_free_client_slab;
3519 lockowner_slab = kmem_cache_create("nfsd4_lockowners",
3520 sizeof(struct nfs4_lockowner), 0, 0, NULL);
3521 if (lockowner_slab == NULL)
3522 goto out_free_openowner_slab;
3523 file_slab = kmem_cache_create("nfsd4_files",
3524 sizeof(struct nfs4_file), 0, 0, NULL);
3525 if (file_slab == NULL)
3526 goto out_free_lockowner_slab;
3527 stateid_slab = kmem_cache_create("nfsd4_stateids",
3528 sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
3529 if (stateid_slab == NULL)
3530 goto out_free_file_slab;
3531 deleg_slab = kmem_cache_create("nfsd4_delegations",
3532 sizeof(struct nfs4_delegation), 0, 0, NULL);
3533 if (deleg_slab == NULL)
3534 goto out_free_stateid_slab;
3535 odstate_slab = kmem_cache_create("nfsd4_odstate",
3536 sizeof(struct nfs4_clnt_odstate), 0, 0, NULL);
3537 if (odstate_slab == NULL)
3538 goto out_free_deleg_slab;
3539 return 0;
3540
3541 out_free_deleg_slab:
3542 kmem_cache_destroy(deleg_slab);
3543 out_free_stateid_slab:
3544 kmem_cache_destroy(stateid_slab);
3545 out_free_file_slab:
3546 kmem_cache_destroy(file_slab);
3547 out_free_lockowner_slab:
3548 kmem_cache_destroy(lockowner_slab);
3549 out_free_openowner_slab:
3550 kmem_cache_destroy(openowner_slab);
3551 out_free_client_slab:
3552 kmem_cache_destroy(client_slab);
3553 out:
3554 dprintk("nfsd4: out of memory while initializing nfsv4\n");
3555 return -ENOMEM;
3556 }
3557
init_nfs4_replay(struct nfs4_replay * rp)3558 static void init_nfs4_replay(struct nfs4_replay *rp)
3559 {
3560 rp->rp_status = nfserr_serverfault;
3561 rp->rp_buflen = 0;
3562 rp->rp_buf = rp->rp_ibuf;
3563 mutex_init(&rp->rp_mutex);
3564 }
3565
nfsd4_cstate_assign_replay(struct nfsd4_compound_state * cstate,struct nfs4_stateowner * so)3566 static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
3567 struct nfs4_stateowner *so)
3568 {
3569 if (!nfsd4_has_session(cstate)) {
3570 mutex_lock(&so->so_replay.rp_mutex);
3571 cstate->replay_owner = nfs4_get_stateowner(so);
3572 }
3573 }
3574
nfsd4_cstate_clear_replay(struct nfsd4_compound_state * cstate)3575 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
3576 {
3577 struct nfs4_stateowner *so = cstate->replay_owner;
3578
3579 if (so != NULL) {
3580 cstate->replay_owner = NULL;
3581 mutex_unlock(&so->so_replay.rp_mutex);
3582 nfs4_put_stateowner(so);
3583 }
3584 }
3585
alloc_stateowner(struct kmem_cache * slab,struct xdr_netobj * owner,struct nfs4_client * clp)3586 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
3587 {
3588 struct nfs4_stateowner *sop;
3589
3590 sop = kmem_cache_alloc(slab, GFP_KERNEL);
3591 if (!sop)
3592 return NULL;
3593
3594 sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL);
3595 if (!sop->so_owner.data) {
3596 kmem_cache_free(slab, sop);
3597 return NULL;
3598 }
3599 sop->so_owner.len = owner->len;
3600
3601 INIT_LIST_HEAD(&sop->so_stateids);
3602 sop->so_client = clp;
3603 init_nfs4_replay(&sop->so_replay);
3604 atomic_set(&sop->so_count, 1);
3605 return sop;
3606 }
3607
hash_openowner(struct nfs4_openowner * oo,struct nfs4_client * clp,unsigned int strhashval)3608 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
3609 {
3610 lockdep_assert_held(&clp->cl_lock);
3611
3612 list_add(&oo->oo_owner.so_strhash,
3613 &clp->cl_ownerstr_hashtbl[strhashval]);
3614 list_add(&oo->oo_perclient, &clp->cl_openowners);
3615 }
3616
nfs4_unhash_openowner(struct nfs4_stateowner * so)3617 static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
3618 {
3619 unhash_openowner_locked(openowner(so));
3620 }
3621
nfs4_free_openowner(struct nfs4_stateowner * so)3622 static void nfs4_free_openowner(struct nfs4_stateowner *so)
3623 {
3624 struct nfs4_openowner *oo = openowner(so);
3625
3626 kmem_cache_free(openowner_slab, oo);
3627 }
3628
3629 static const struct nfs4_stateowner_operations openowner_ops = {
3630 .so_unhash = nfs4_unhash_openowner,
3631 .so_free = nfs4_free_openowner,
3632 };
3633
3634 static struct nfs4_ol_stateid *
nfsd4_find_existing_open(struct nfs4_file * fp,struct nfsd4_open * open)3635 nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
3636 {
3637 struct nfs4_ol_stateid *local, *ret = NULL;
3638 struct nfs4_openowner *oo = open->op_openowner;
3639
3640 lockdep_assert_held(&fp->fi_lock);
3641
3642 list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
3643 /* ignore lock owners */
3644 if (local->st_stateowner->so_is_open_owner == 0)
3645 continue;
3646 if (local->st_stateowner != &oo->oo_owner)
3647 continue;
3648 if (local->st_stid.sc_type == NFS4_OPEN_STID) {
3649 ret = local;
3650 refcount_inc(&ret->st_stid.sc_count);
3651 break;
3652 }
3653 }
3654 return ret;
3655 }
3656
3657 static __be32
nfsd4_verify_open_stid(struct nfs4_stid * s)3658 nfsd4_verify_open_stid(struct nfs4_stid *s)
3659 {
3660 __be32 ret = nfs_ok;
3661
3662 switch (s->sc_type) {
3663 default:
3664 break;
3665 case 0:
3666 case NFS4_CLOSED_STID:
3667 case NFS4_CLOSED_DELEG_STID:
3668 ret = nfserr_bad_stateid;
3669 break;
3670 case NFS4_REVOKED_DELEG_STID:
3671 ret = nfserr_deleg_revoked;
3672 }
3673 return ret;
3674 }
3675
3676 /* Lock the stateid st_mutex, and deal with races with CLOSE */
3677 static __be32
nfsd4_lock_ol_stateid(struct nfs4_ol_stateid * stp)3678 nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
3679 {
3680 __be32 ret;
3681
3682 mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX);
3683 ret = nfsd4_verify_open_stid(&stp->st_stid);
3684 if (ret != nfs_ok)
3685 mutex_unlock(&stp->st_mutex);
3686 return ret;
3687 }
3688
3689 static struct nfs4_ol_stateid *
nfsd4_find_and_lock_existing_open(struct nfs4_file * fp,struct nfsd4_open * open)3690 nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
3691 {
3692 struct nfs4_ol_stateid *stp;
3693 for (;;) {
3694 spin_lock(&fp->fi_lock);
3695 stp = nfsd4_find_existing_open(fp, open);
3696 spin_unlock(&fp->fi_lock);
3697 if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
3698 break;
3699 nfs4_put_stid(&stp->st_stid);
3700 }
3701 return stp;
3702 }
3703
3704 static struct nfs4_openowner *
alloc_init_open_stateowner(unsigned int strhashval,struct nfsd4_open * open,struct nfsd4_compound_state * cstate)3705 alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
3706 struct nfsd4_compound_state *cstate)
3707 {
3708 struct nfs4_client *clp = cstate->clp;
3709 struct nfs4_openowner *oo, *ret;
3710
3711 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
3712 if (!oo)
3713 return NULL;
3714 oo->oo_owner.so_ops = &openowner_ops;
3715 oo->oo_owner.so_is_open_owner = 1;
3716 oo->oo_owner.so_seqid = open->op_seqid;
3717 oo->oo_flags = 0;
3718 if (nfsd4_has_session(cstate))
3719 oo->oo_flags |= NFS4_OO_CONFIRMED;
3720 oo->oo_time = 0;
3721 oo->oo_last_closed_stid = NULL;
3722 INIT_LIST_HEAD(&oo->oo_close_lru);
3723 spin_lock(&clp->cl_lock);
3724 ret = find_openstateowner_str_locked(strhashval, open, clp);
3725 if (ret == NULL) {
3726 hash_openowner(oo, clp, strhashval);
3727 ret = oo;
3728 } else
3729 nfs4_free_stateowner(&oo->oo_owner);
3730
3731 spin_unlock(&clp->cl_lock);
3732 return ret;
3733 }
3734
3735 static struct nfs4_ol_stateid *
init_open_stateid(struct nfs4_file * fp,struct nfsd4_open * open)3736 init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
3737 {
3738
3739 struct nfs4_openowner *oo = open->op_openowner;
3740 struct nfs4_ol_stateid *retstp = NULL;
3741 struct nfs4_ol_stateid *stp;
3742
3743 stp = open->op_stp;
3744 /* We are moving these outside of the spinlocks to avoid the warnings */
3745 mutex_init(&stp->st_mutex);
3746 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
3747
3748 retry:
3749 spin_lock(&oo->oo_owner.so_client->cl_lock);
3750 spin_lock(&fp->fi_lock);
3751
3752 retstp = nfsd4_find_existing_open(fp, open);
3753 if (retstp)
3754 goto out_unlock;
3755
3756 open->op_stp = NULL;
3757 refcount_inc(&stp->st_stid.sc_count);
3758 stp->st_stid.sc_type = NFS4_OPEN_STID;
3759 INIT_LIST_HEAD(&stp->st_locks);
3760 stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
3761 get_nfs4_file(fp);
3762 stp->st_stid.sc_file = fp;
3763 stp->st_access_bmap = 0;
3764 stp->st_deny_bmap = 0;
3765 stp->st_openstp = NULL;
3766 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
3767 list_add(&stp->st_perfile, &fp->fi_stateids);
3768
3769 out_unlock:
3770 spin_unlock(&fp->fi_lock);
3771 spin_unlock(&oo->oo_owner.so_client->cl_lock);
3772 if (retstp) {
3773 /* Handle races with CLOSE */
3774 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
3775 nfs4_put_stid(&retstp->st_stid);
3776 goto retry;
3777 }
3778 /* To keep mutex tracking happy */
3779 mutex_unlock(&stp->st_mutex);
3780 stp = retstp;
3781 }
3782 return stp;
3783 }
3784
3785 /*
3786 * In the 4.0 case we need to keep the owners around a little while to handle
3787 * CLOSE replay. We still do need to release any file access that is held by
3788 * them before returning however.
3789 */
3790 static void
move_to_close_lru(struct nfs4_ol_stateid * s,struct net * net)3791 move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
3792 {
3793 struct nfs4_ol_stateid *last;
3794 struct nfs4_openowner *oo = openowner(s->st_stateowner);
3795 struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
3796 nfsd_net_id);
3797
3798 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
3799
3800 /*
3801 * We know that we hold one reference via nfsd4_close, and another
3802 * "persistent" reference for the client. If the refcount is higher
3803 * than 2, then there are still calls in progress that are using this
3804 * stateid. We can't put the sc_file reference until they are finished.
3805 * Wait for the refcount to drop to 2. Since it has been unhashed,
3806 * there should be no danger of the refcount going back up again at
3807 * this point.
3808 */
3809 wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2);
3810
3811 release_all_access(s);
3812 if (s->st_stid.sc_file) {
3813 put_nfs4_file(s->st_stid.sc_file);
3814 s->st_stid.sc_file = NULL;
3815 }
3816
3817 spin_lock(&nn->client_lock);
3818 last = oo->oo_last_closed_stid;
3819 oo->oo_last_closed_stid = s;
3820 list_move_tail(&oo->oo_close_lru, &nn->close_lru);
3821 oo->oo_time = get_seconds();
3822 spin_unlock(&nn->client_lock);
3823 if (last)
3824 nfs4_put_stid(&last->st_stid);
3825 }
3826
3827 /* search file_hashtbl[] for file */
3828 static struct nfs4_file *
find_file_locked(struct knfsd_fh * fh,unsigned int hashval)3829 find_file_locked(struct knfsd_fh *fh, unsigned int hashval)
3830 {
3831 struct nfs4_file *fp;
3832
3833 hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash) {
3834 if (fh_match(&fp->fi_fhandle, fh)) {
3835 if (refcount_inc_not_zero(&fp->fi_ref))
3836 return fp;
3837 }
3838 }
3839 return NULL;
3840 }
3841
3842 struct nfs4_file *
find_file(struct knfsd_fh * fh)3843 find_file(struct knfsd_fh *fh)
3844 {
3845 struct nfs4_file *fp;
3846 unsigned int hashval = file_hashval(fh);
3847
3848 rcu_read_lock();
3849 fp = find_file_locked(fh, hashval);
3850 rcu_read_unlock();
3851 return fp;
3852 }
3853
3854 static struct nfs4_file *
find_or_add_file(struct nfs4_file * new,struct knfsd_fh * fh)3855 find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh)
3856 {
3857 struct nfs4_file *fp;
3858 unsigned int hashval = file_hashval(fh);
3859
3860 rcu_read_lock();
3861 fp = find_file_locked(fh, hashval);
3862 rcu_read_unlock();
3863 if (fp)
3864 return fp;
3865
3866 spin_lock(&state_lock);
3867 fp = find_file_locked(fh, hashval);
3868 if (likely(fp == NULL)) {
3869 nfsd4_init_file(fh, hashval, new);
3870 fp = new;
3871 }
3872 spin_unlock(&state_lock);
3873
3874 return fp;
3875 }
3876
3877 /*
3878 * Called to check deny when READ with all zero stateid or
3879 * WRITE with all zero or all one stateid
3880 */
3881 static __be32
nfs4_share_conflict(struct svc_fh * current_fh,unsigned int deny_type)3882 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
3883 {
3884 struct nfs4_file *fp;
3885 __be32 ret = nfs_ok;
3886
3887 fp = find_file(¤t_fh->fh_handle);
3888 if (!fp)
3889 return ret;
3890 /* Check for conflicting share reservations */
3891 spin_lock(&fp->fi_lock);
3892 if (fp->fi_share_deny & deny_type)
3893 ret = nfserr_locked;
3894 spin_unlock(&fp->fi_lock);
3895 put_nfs4_file(fp);
3896 return ret;
3897 }
3898
nfsd4_cb_recall_prepare(struct nfsd4_callback * cb)3899 static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
3900 {
3901 struct nfs4_delegation *dp = cb_to_delegation(cb);
3902 struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
3903 nfsd_net_id);
3904
3905 block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
3906
3907 /*
3908 * We can't do this in nfsd_break_deleg_cb because it is
3909 * already holding inode->i_lock.
3910 *
3911 * If the dl_time != 0, then we know that it has already been
3912 * queued for a lease break. Don't queue it again.
3913 */
3914 spin_lock(&state_lock);
3915 if (dp->dl_time == 0) {
3916 dp->dl_time = get_seconds();
3917 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
3918 }
3919 spin_unlock(&state_lock);
3920 }
3921
nfsd4_cb_recall_done(struct nfsd4_callback * cb,struct rpc_task * task)3922 static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
3923 struct rpc_task *task)
3924 {
3925 struct nfs4_delegation *dp = cb_to_delegation(cb);
3926
3927 if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID)
3928 return 1;
3929
3930 switch (task->tk_status) {
3931 case 0:
3932 return 1;
3933 case -EBADHANDLE:
3934 case -NFS4ERR_BAD_STATEID:
3935 /*
3936 * Race: client probably got cb_recall before open reply
3937 * granting delegation.
3938 */
3939 if (dp->dl_retries--) {
3940 rpc_delay(task, 2 * HZ);
3941 return 0;
3942 }
3943 /*FALLTHRU*/
3944 default:
3945 return -1;
3946 }
3947 }
3948
nfsd4_cb_recall_release(struct nfsd4_callback * cb)3949 static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
3950 {
3951 struct nfs4_delegation *dp = cb_to_delegation(cb);
3952
3953 nfs4_put_stid(&dp->dl_stid);
3954 }
3955
3956 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
3957 .prepare = nfsd4_cb_recall_prepare,
3958 .done = nfsd4_cb_recall_done,
3959 .release = nfsd4_cb_recall_release,
3960 };
3961
nfsd_break_one_deleg(struct nfs4_delegation * dp)3962 static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
3963 {
3964 /*
3965 * We're assuming the state code never drops its reference
3966 * without first removing the lease. Since we're in this lease
3967 * callback (and since the lease code is serialized by the
3968 * i_lock) we know the server hasn't removed the lease yet, and
3969 * we know it's safe to take a reference.
3970 */
3971 refcount_inc(&dp->dl_stid.sc_count);
3972 nfsd4_run_cb(&dp->dl_recall);
3973 }
3974
3975 /* Called from break_lease() with i_lock held. */
3976 static bool
nfsd_break_deleg_cb(struct file_lock * fl)3977 nfsd_break_deleg_cb(struct file_lock *fl)
3978 {
3979 bool ret = false;
3980 struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
3981 struct nfs4_file *fp = dp->dl_stid.sc_file;
3982
3983 /*
3984 * We don't want the locks code to timeout the lease for us;
3985 * we'll remove it ourself if a delegation isn't returned
3986 * in time:
3987 */
3988 fl->fl_break_time = 0;
3989
3990 spin_lock(&fp->fi_lock);
3991 fp->fi_had_conflict = true;
3992 nfsd_break_one_deleg(dp);
3993 spin_unlock(&fp->fi_lock);
3994 return ret;
3995 }
3996
3997 static int
nfsd_change_deleg_cb(struct file_lock * onlist,int arg,struct list_head * dispose)3998 nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
3999 struct list_head *dispose)
4000 {
4001 if (arg & F_UNLCK)
4002 return lease_modify(onlist, arg, dispose);
4003 else
4004 return -EAGAIN;
4005 }
4006
4007 static const struct lock_manager_operations nfsd_lease_mng_ops = {
4008 .lm_break = nfsd_break_deleg_cb,
4009 .lm_change = nfsd_change_deleg_cb,
4010 };
4011
nfsd4_check_seqid(struct nfsd4_compound_state * cstate,struct nfs4_stateowner * so,u32 seqid)4012 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
4013 {
4014 if (nfsd4_has_session(cstate))
4015 return nfs_ok;
4016 if (seqid == so->so_seqid - 1)
4017 return nfserr_replay_me;
4018 if (seqid == so->so_seqid)
4019 return nfs_ok;
4020 return nfserr_bad_seqid;
4021 }
4022
lookup_clientid(clientid_t * clid,struct nfsd4_compound_state * cstate,struct nfsd_net * nn)4023 static __be32 lookup_clientid(clientid_t *clid,
4024 struct nfsd4_compound_state *cstate,
4025 struct nfsd_net *nn)
4026 {
4027 struct nfs4_client *found;
4028
4029 if (cstate->clp) {
4030 found = cstate->clp;
4031 if (!same_clid(&found->cl_clientid, clid))
4032 return nfserr_stale_clientid;
4033 return nfs_ok;
4034 }
4035
4036 if (STALE_CLIENTID(clid, nn))
4037 return nfserr_stale_clientid;
4038
4039 /*
4040 * For v4.1+ we get the client in the SEQUENCE op. If we don't have one
4041 * cached already then we know this is for is for v4.0 and "sessions"
4042 * will be false.
4043 */
4044 WARN_ON_ONCE(cstate->session);
4045 spin_lock(&nn->client_lock);
4046 found = find_confirmed_client(clid, false, nn);
4047 if (!found) {
4048 spin_unlock(&nn->client_lock);
4049 return nfserr_expired;
4050 }
4051 atomic_inc(&found->cl_refcount);
4052 spin_unlock(&nn->client_lock);
4053
4054 /* Cache the nfs4_client in cstate! */
4055 cstate->clp = found;
4056 return nfs_ok;
4057 }
4058
4059 __be32
nfsd4_process_open1(struct nfsd4_compound_state * cstate,struct nfsd4_open * open,struct nfsd_net * nn)4060 nfsd4_process_open1(struct nfsd4_compound_state *cstate,
4061 struct nfsd4_open *open, struct nfsd_net *nn)
4062 {
4063 clientid_t *clientid = &open->op_clientid;
4064 struct nfs4_client *clp = NULL;
4065 unsigned int strhashval;
4066 struct nfs4_openowner *oo = NULL;
4067 __be32 status;
4068
4069 if (STALE_CLIENTID(&open->op_clientid, nn))
4070 return nfserr_stale_clientid;
4071 /*
4072 * In case we need it later, after we've already created the
4073 * file and don't want to risk a further failure:
4074 */
4075 open->op_file = nfsd4_alloc_file();
4076 if (open->op_file == NULL)
4077 return nfserr_jukebox;
4078
4079 status = lookup_clientid(clientid, cstate, nn);
4080 if (status)
4081 return status;
4082 clp = cstate->clp;
4083
4084 strhashval = ownerstr_hashval(&open->op_owner);
4085 oo = find_openstateowner_str(strhashval, open, clp);
4086 open->op_openowner = oo;
4087 if (!oo) {
4088 goto new_owner;
4089 }
4090 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
4091 /* Replace unconfirmed owners without checking for replay. */
4092 release_openowner(oo);
4093 open->op_openowner = NULL;
4094 goto new_owner;
4095 }
4096 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
4097 if (status)
4098 return status;
4099 goto alloc_stateid;
4100 new_owner:
4101 oo = alloc_init_open_stateowner(strhashval, open, cstate);
4102 if (oo == NULL)
4103 return nfserr_jukebox;
4104 open->op_openowner = oo;
4105 alloc_stateid:
4106 open->op_stp = nfs4_alloc_open_stateid(clp);
4107 if (!open->op_stp)
4108 return nfserr_jukebox;
4109
4110 if (nfsd4_has_session(cstate) &&
4111 (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) {
4112 open->op_odstate = alloc_clnt_odstate(clp);
4113 if (!open->op_odstate)
4114 return nfserr_jukebox;
4115 }
4116
4117 return nfs_ok;
4118 }
4119
4120 static inline __be32
nfs4_check_delegmode(struct nfs4_delegation * dp,int flags)4121 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
4122 {
4123 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
4124 return nfserr_openmode;
4125 else
4126 return nfs_ok;
4127 }
4128
share_access_to_flags(u32 share_access)4129 static int share_access_to_flags(u32 share_access)
4130 {
4131 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
4132 }
4133
find_deleg_stateid(struct nfs4_client * cl,stateid_t * s)4134 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
4135 {
4136 struct nfs4_stid *ret;
4137
4138 ret = find_stateid_by_type(cl, s,
4139 NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
4140 if (!ret)
4141 return NULL;
4142 return delegstateid(ret);
4143 }
4144
nfsd4_is_deleg_cur(struct nfsd4_open * open)4145 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
4146 {
4147 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
4148 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
4149 }
4150
4151 static __be32
nfs4_check_deleg(struct nfs4_client * cl,struct nfsd4_open * open,struct nfs4_delegation ** dp)4152 nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
4153 struct nfs4_delegation **dp)
4154 {
4155 int flags;
4156 __be32 status = nfserr_bad_stateid;
4157 struct nfs4_delegation *deleg;
4158
4159 deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
4160 if (deleg == NULL)
4161 goto out;
4162 if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
4163 nfs4_put_stid(&deleg->dl_stid);
4164 if (cl->cl_minorversion)
4165 status = nfserr_deleg_revoked;
4166 goto out;
4167 }
4168 flags = share_access_to_flags(open->op_share_access);
4169 status = nfs4_check_delegmode(deleg, flags);
4170 if (status) {
4171 nfs4_put_stid(&deleg->dl_stid);
4172 goto out;
4173 }
4174 *dp = deleg;
4175 out:
4176 if (!nfsd4_is_deleg_cur(open))
4177 return nfs_ok;
4178 if (status)
4179 return status;
4180 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
4181 return nfs_ok;
4182 }
4183
nfs4_access_to_access(u32 nfs4_access)4184 static inline int nfs4_access_to_access(u32 nfs4_access)
4185 {
4186 int flags = 0;
4187
4188 if (nfs4_access & NFS4_SHARE_ACCESS_READ)
4189 flags |= NFSD_MAY_READ;
4190 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
4191 flags |= NFSD_MAY_WRITE;
4192 return flags;
4193 }
4194
4195 static inline __be32
nfsd4_truncate(struct svc_rqst * rqstp,struct svc_fh * fh,struct nfsd4_open * open)4196 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
4197 struct nfsd4_open *open)
4198 {
4199 struct iattr iattr = {
4200 .ia_valid = ATTR_SIZE,
4201 .ia_size = 0,
4202 };
4203 if (!open->op_truncate)
4204 return 0;
4205 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
4206 return nfserr_inval;
4207 return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
4208 }
4209
nfs4_get_vfs_file(struct svc_rqst * rqstp,struct nfs4_file * fp,struct svc_fh * cur_fh,struct nfs4_ol_stateid * stp,struct nfsd4_open * open)4210 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
4211 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
4212 struct nfsd4_open *open)
4213 {
4214 struct file *filp = NULL;
4215 __be32 status;
4216 int oflag = nfs4_access_to_omode(open->op_share_access);
4217 int access = nfs4_access_to_access(open->op_share_access);
4218 unsigned char old_access_bmap, old_deny_bmap;
4219
4220 spin_lock(&fp->fi_lock);
4221
4222 /*
4223 * Are we trying to set a deny mode that would conflict with
4224 * current access?
4225 */
4226 status = nfs4_file_check_deny(fp, open->op_share_deny);
4227 if (status != nfs_ok) {
4228 spin_unlock(&fp->fi_lock);
4229 goto out;
4230 }
4231
4232 /* set access to the file */
4233 status = nfs4_file_get_access(fp, open->op_share_access);
4234 if (status != nfs_ok) {
4235 spin_unlock(&fp->fi_lock);
4236 goto out;
4237 }
4238
4239 /* Set access bits in stateid */
4240 old_access_bmap = stp->st_access_bmap;
4241 set_access(open->op_share_access, stp);
4242
4243 /* Set new deny mask */
4244 old_deny_bmap = stp->st_deny_bmap;
4245 set_deny(open->op_share_deny, stp);
4246 fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
4247
4248 if (!fp->fi_fds[oflag]) {
4249 spin_unlock(&fp->fi_lock);
4250 status = nfsd_open(rqstp, cur_fh, S_IFREG, access, &filp);
4251 if (status)
4252 goto out_put_access;
4253 spin_lock(&fp->fi_lock);
4254 if (!fp->fi_fds[oflag]) {
4255 fp->fi_fds[oflag] = filp;
4256 filp = NULL;
4257 }
4258 }
4259 spin_unlock(&fp->fi_lock);
4260 if (filp)
4261 fput(filp);
4262
4263 status = nfsd4_truncate(rqstp, cur_fh, open);
4264 if (status)
4265 goto out_put_access;
4266 out:
4267 return status;
4268 out_put_access:
4269 stp->st_access_bmap = old_access_bmap;
4270 nfs4_file_put_access(fp, open->op_share_access);
4271 reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
4272 goto out;
4273 }
4274
4275 static __be32
nfs4_upgrade_open(struct svc_rqst * rqstp,struct nfs4_file * fp,struct svc_fh * cur_fh,struct nfs4_ol_stateid * stp,struct nfsd4_open * open)4276 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
4277 {
4278 __be32 status;
4279 unsigned char old_deny_bmap = stp->st_deny_bmap;
4280
4281 if (!test_access(open->op_share_access, stp))
4282 return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open);
4283
4284 /* test and set deny mode */
4285 spin_lock(&fp->fi_lock);
4286 status = nfs4_file_check_deny(fp, open->op_share_deny);
4287 if (status == nfs_ok) {
4288 set_deny(open->op_share_deny, stp);
4289 fp->fi_share_deny |=
4290 (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
4291 }
4292 spin_unlock(&fp->fi_lock);
4293
4294 if (status != nfs_ok)
4295 return status;
4296
4297 status = nfsd4_truncate(rqstp, cur_fh, open);
4298 if (status != nfs_ok)
4299 reset_union_bmap_deny(old_deny_bmap, stp);
4300 return status;
4301 }
4302
4303 /* Should we give out recallable state?: */
nfsd4_cb_channel_good(struct nfs4_client * clp)4304 static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
4305 {
4306 if (clp->cl_cb_state == NFSD4_CB_UP)
4307 return true;
4308 /*
4309 * In the sessions case, since we don't have to establish a
4310 * separate connection for callbacks, we assume it's OK
4311 * until we hear otherwise:
4312 */
4313 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
4314 }
4315
nfs4_alloc_init_lease(struct nfs4_delegation * dp,int flag)4316 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp,
4317 int flag)
4318 {
4319 struct file_lock *fl;
4320
4321 fl = locks_alloc_lock();
4322 if (!fl)
4323 return NULL;
4324 fl->fl_lmops = &nfsd_lease_mng_ops;
4325 fl->fl_flags = FL_DELEG;
4326 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
4327 fl->fl_end = OFFSET_MAX;
4328 fl->fl_owner = (fl_owner_t)dp;
4329 fl->fl_pid = current->tgid;
4330 fl->fl_file = dp->dl_stid.sc_file->fi_deleg_file;
4331 return fl;
4332 }
4333
4334 static struct nfs4_delegation *
nfs4_set_delegation(struct nfs4_client * clp,struct svc_fh * fh,struct nfs4_file * fp,struct nfs4_clnt_odstate * odstate)4335 nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
4336 struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate)
4337 {
4338 int status = 0;
4339 struct nfs4_delegation *dp;
4340 struct file *filp;
4341 struct file_lock *fl;
4342
4343 /*
4344 * The fi_had_conflict and nfs_get_existing_delegation checks
4345 * here are just optimizations; we'll need to recheck them at
4346 * the end:
4347 */
4348 if (fp->fi_had_conflict)
4349 return ERR_PTR(-EAGAIN);
4350
4351 filp = find_readable_file(fp);
4352 if (!filp) {
4353 /* We should always have a readable file here */
4354 WARN_ON_ONCE(1);
4355 return ERR_PTR(-EBADF);
4356 }
4357 spin_lock(&state_lock);
4358 spin_lock(&fp->fi_lock);
4359 if (nfs4_delegation_exists(clp, fp))
4360 status = -EAGAIN;
4361 else if (!fp->fi_deleg_file) {
4362 fp->fi_deleg_file = filp;
4363 /* increment early to prevent fi_deleg_file from being
4364 * cleared */
4365 fp->fi_delegees = 1;
4366 filp = NULL;
4367 } else
4368 fp->fi_delegees++;
4369 spin_unlock(&fp->fi_lock);
4370 spin_unlock(&state_lock);
4371 if (filp)
4372 fput(filp);
4373 if (status)
4374 return ERR_PTR(status);
4375
4376 status = -ENOMEM;
4377 dp = alloc_init_deleg(clp, fp, fh, odstate);
4378 if (!dp)
4379 goto out_delegees;
4380
4381 fl = nfs4_alloc_init_lease(dp, NFS4_OPEN_DELEGATE_READ);
4382 if (!fl)
4383 goto out_clnt_odstate;
4384
4385 status = vfs_setlease(fp->fi_deleg_file, fl->fl_type, &fl, NULL);
4386 if (fl)
4387 locks_free_lock(fl);
4388 if (status)
4389 goto out_clnt_odstate;
4390
4391 spin_lock(&state_lock);
4392 spin_lock(&fp->fi_lock);
4393 if (fp->fi_had_conflict)
4394 status = -EAGAIN;
4395 else
4396 status = hash_delegation_locked(dp, fp);
4397 spin_unlock(&fp->fi_lock);
4398 spin_unlock(&state_lock);
4399
4400 if (status)
4401 goto out_unlock;
4402
4403 return dp;
4404 out_unlock:
4405 vfs_setlease(fp->fi_deleg_file, F_UNLCK, NULL, (void **)&dp);
4406 out_clnt_odstate:
4407 put_clnt_odstate(dp->dl_clnt_odstate);
4408 nfs4_put_stid(&dp->dl_stid);
4409 out_delegees:
4410 put_deleg_file(fp);
4411 return ERR_PTR(status);
4412 }
4413
nfsd4_open_deleg_none_ext(struct nfsd4_open * open,int status)4414 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
4415 {
4416 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4417 if (status == -EAGAIN)
4418 open->op_why_no_deleg = WND4_CONTENTION;
4419 else {
4420 open->op_why_no_deleg = WND4_RESOURCE;
4421 switch (open->op_deleg_want) {
4422 case NFS4_SHARE_WANT_READ_DELEG:
4423 case NFS4_SHARE_WANT_WRITE_DELEG:
4424 case NFS4_SHARE_WANT_ANY_DELEG:
4425 break;
4426 case NFS4_SHARE_WANT_CANCEL:
4427 open->op_why_no_deleg = WND4_CANCELLED;
4428 break;
4429 case NFS4_SHARE_WANT_NO_DELEG:
4430 WARN_ON_ONCE(1);
4431 }
4432 }
4433 }
4434
4435 /*
4436 * Attempt to hand out a delegation.
4437 *
4438 * Note we don't support write delegations, and won't until the vfs has
4439 * proper support for them.
4440 */
4441 static void
nfs4_open_delegation(struct svc_fh * fh,struct nfsd4_open * open,struct nfs4_ol_stateid * stp)4442 nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
4443 struct nfs4_ol_stateid *stp)
4444 {
4445 struct nfs4_delegation *dp;
4446 struct nfs4_openowner *oo = openowner(stp->st_stateowner);
4447 struct nfs4_client *clp = stp->st_stid.sc_client;
4448 int cb_up;
4449 int status = 0;
4450
4451 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
4452 open->op_recall = 0;
4453 switch (open->op_claim_type) {
4454 case NFS4_OPEN_CLAIM_PREVIOUS:
4455 if (!cb_up)
4456 open->op_recall = 1;
4457 if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ)
4458 goto out_no_deleg;
4459 break;
4460 case NFS4_OPEN_CLAIM_NULL:
4461 case NFS4_OPEN_CLAIM_FH:
4462 /*
4463 * Let's not give out any delegations till everyone's
4464 * had the chance to reclaim theirs, *and* until
4465 * NLM locks have all been reclaimed:
4466 */
4467 if (locks_in_grace(clp->net))
4468 goto out_no_deleg;
4469 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
4470 goto out_no_deleg;
4471 /*
4472 * Also, if the file was opened for write or
4473 * create, there's a good chance the client's
4474 * about to write to it, resulting in an
4475 * immediate recall (since we don't support
4476 * write delegations):
4477 */
4478 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
4479 goto out_no_deleg;
4480 if (open->op_create == NFS4_OPEN_CREATE)
4481 goto out_no_deleg;
4482 break;
4483 default:
4484 goto out_no_deleg;
4485 }
4486 dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file, stp->st_clnt_odstate);
4487 if (IS_ERR(dp))
4488 goto out_no_deleg;
4489
4490 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
4491
4492 dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
4493 STATEID_VAL(&dp->dl_stid.sc_stateid));
4494 open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
4495 nfs4_put_stid(&dp->dl_stid);
4496 return;
4497 out_no_deleg:
4498 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
4499 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
4500 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
4501 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
4502 open->op_recall = 1;
4503 }
4504
4505 /* 4.1 client asking for a delegation? */
4506 if (open->op_deleg_want)
4507 nfsd4_open_deleg_none_ext(open, status);
4508 return;
4509 }
4510
nfsd4_deleg_xgrade_none_ext(struct nfsd4_open * open,struct nfs4_delegation * dp)4511 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
4512 struct nfs4_delegation *dp)
4513 {
4514 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
4515 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
4516 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4517 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
4518 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
4519 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
4520 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4521 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
4522 }
4523 /* Otherwise the client must be confused wanting a delegation
4524 * it already has, therefore we don't return
4525 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
4526 */
4527 }
4528
4529 __be32
nfsd4_process_open2(struct svc_rqst * rqstp,struct svc_fh * current_fh,struct nfsd4_open * open)4530 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
4531 {
4532 struct nfsd4_compoundres *resp = rqstp->rq_resp;
4533 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
4534 struct nfs4_file *fp = NULL;
4535 struct nfs4_ol_stateid *stp = NULL;
4536 struct nfs4_delegation *dp = NULL;
4537 __be32 status;
4538 bool new_stp = false;
4539
4540 /*
4541 * Lookup file; if found, lookup stateid and check open request,
4542 * and check for delegations in the process of being recalled.
4543 * If not found, create the nfs4_file struct
4544 */
4545 fp = find_or_add_file(open->op_file, ¤t_fh->fh_handle);
4546 if (fp != open->op_file) {
4547 status = nfs4_check_deleg(cl, open, &dp);
4548 if (status)
4549 goto out;
4550 stp = nfsd4_find_and_lock_existing_open(fp, open);
4551 } else {
4552 open->op_file = NULL;
4553 status = nfserr_bad_stateid;
4554 if (nfsd4_is_deleg_cur(open))
4555 goto out;
4556 }
4557
4558 if (!stp) {
4559 stp = init_open_stateid(fp, open);
4560 if (!open->op_stp)
4561 new_stp = true;
4562 }
4563
4564 /*
4565 * OPEN the file, or upgrade an existing OPEN.
4566 * If truncate fails, the OPEN fails.
4567 *
4568 * stp is already locked.
4569 */
4570 if (!new_stp) {
4571 /* Stateid was found, this is an OPEN upgrade */
4572 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
4573 if (status) {
4574 mutex_unlock(&stp->st_mutex);
4575 goto out;
4576 }
4577 } else {
4578 status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
4579 if (status) {
4580 stp->st_stid.sc_type = NFS4_CLOSED_STID;
4581 release_open_stateid(stp);
4582 mutex_unlock(&stp->st_mutex);
4583 goto out;
4584 }
4585
4586 stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp,
4587 open->op_odstate);
4588 if (stp->st_clnt_odstate == open->op_odstate)
4589 open->op_odstate = NULL;
4590 }
4591
4592 nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
4593 mutex_unlock(&stp->st_mutex);
4594
4595 if (nfsd4_has_session(&resp->cstate)) {
4596 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
4597 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4598 open->op_why_no_deleg = WND4_NOT_WANTED;
4599 goto nodeleg;
4600 }
4601 }
4602
4603 /*
4604 * Attempt to hand out a delegation. No error return, because the
4605 * OPEN succeeds even if we fail.
4606 */
4607 nfs4_open_delegation(current_fh, open, stp);
4608 nodeleg:
4609 status = nfs_ok;
4610
4611 dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
4612 STATEID_VAL(&stp->st_stid.sc_stateid));
4613 out:
4614 /* 4.1 client trying to upgrade/downgrade delegation? */
4615 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
4616 open->op_deleg_want)
4617 nfsd4_deleg_xgrade_none_ext(open, dp);
4618
4619 if (fp)
4620 put_nfs4_file(fp);
4621 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
4622 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
4623 /*
4624 * To finish the open response, we just need to set the rflags.
4625 */
4626 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
4627 if (nfsd4_has_session(&resp->cstate))
4628 open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK;
4629 else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED))
4630 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
4631
4632 if (dp)
4633 nfs4_put_stid(&dp->dl_stid);
4634 if (stp)
4635 nfs4_put_stid(&stp->st_stid);
4636
4637 return status;
4638 }
4639
nfsd4_cleanup_open_state(struct nfsd4_compound_state * cstate,struct nfsd4_open * open)4640 void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
4641 struct nfsd4_open *open)
4642 {
4643 if (open->op_openowner) {
4644 struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
4645
4646 nfsd4_cstate_assign_replay(cstate, so);
4647 nfs4_put_stateowner(so);
4648 }
4649 if (open->op_file)
4650 kmem_cache_free(file_slab, open->op_file);
4651 if (open->op_stp)
4652 nfs4_put_stid(&open->op_stp->st_stid);
4653 if (open->op_odstate)
4654 kmem_cache_free(odstate_slab, open->op_odstate);
4655 }
4656
4657 __be32
nfsd4_renew(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)4658 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4659 union nfsd4_op_u *u)
4660 {
4661 clientid_t *clid = &u->renew;
4662 struct nfs4_client *clp;
4663 __be32 status;
4664 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4665
4666 dprintk("process_renew(%08x/%08x): starting\n",
4667 clid->cl_boot, clid->cl_id);
4668 status = lookup_clientid(clid, cstate, nn);
4669 if (status)
4670 goto out;
4671 clp = cstate->clp;
4672 status = nfserr_cb_path_down;
4673 if (!list_empty(&clp->cl_delegations)
4674 && clp->cl_cb_state != NFSD4_CB_UP)
4675 goto out;
4676 status = nfs_ok;
4677 out:
4678 return status;
4679 }
4680
4681 void
nfsd4_end_grace(struct nfsd_net * nn)4682 nfsd4_end_grace(struct nfsd_net *nn)
4683 {
4684 /* do nothing if grace period already ended */
4685 if (nn->grace_ended)
4686 return;
4687
4688 dprintk("NFSD: end of grace period\n");
4689 nn->grace_ended = true;
4690 /*
4691 * If the server goes down again right now, an NFSv4
4692 * client will still be allowed to reclaim after it comes back up,
4693 * even if it hasn't yet had a chance to reclaim state this time.
4694 *
4695 */
4696 nfsd4_record_grace_done(nn);
4697 /*
4698 * At this point, NFSv4 clients can still reclaim. But if the
4699 * server crashes, any that have not yet reclaimed will be out
4700 * of luck on the next boot.
4701 *
4702 * (NFSv4.1+ clients are considered to have reclaimed once they
4703 * call RECLAIM_COMPLETE. NFSv4.0 clients are considered to
4704 * have reclaimed after their first OPEN.)
4705 */
4706 locks_end_grace(&nn->nfsd4_manager);
4707 /*
4708 * At this point, and once lockd and/or any other containers
4709 * exit their grace period, further reclaims will fail and
4710 * regular locking can resume.
4711 */
4712 }
4713
4714 /*
4715 * If we've waited a lease period but there are still clients trying to
4716 * reclaim, wait a little longer to give them a chance to finish.
4717 */
clients_still_reclaiming(struct nfsd_net * nn)4718 static bool clients_still_reclaiming(struct nfsd_net *nn)
4719 {
4720 unsigned long now = get_seconds();
4721 unsigned long double_grace_period_end = nn->boot_time +
4722 2 * nn->nfsd4_lease;
4723
4724 if (!nn->somebody_reclaimed)
4725 return false;
4726 nn->somebody_reclaimed = false;
4727 /*
4728 * If we've given them *two* lease times to reclaim, and they're
4729 * still not done, give up:
4730 */
4731 if (time_after(now, double_grace_period_end))
4732 return false;
4733 return true;
4734 }
4735
4736 static time_t
nfs4_laundromat(struct nfsd_net * nn)4737 nfs4_laundromat(struct nfsd_net *nn)
4738 {
4739 struct nfs4_client *clp;
4740 struct nfs4_openowner *oo;
4741 struct nfs4_delegation *dp;
4742 struct nfs4_ol_stateid *stp;
4743 struct nfsd4_blocked_lock *nbl;
4744 struct list_head *pos, *next, reaplist;
4745 time_t cutoff = get_seconds() - nn->nfsd4_lease;
4746 time_t t, new_timeo = nn->nfsd4_lease;
4747
4748 dprintk("NFSD: laundromat service - starting\n");
4749
4750 if (clients_still_reclaiming(nn)) {
4751 new_timeo = 0;
4752 goto out;
4753 }
4754 nfsd4_end_grace(nn);
4755 INIT_LIST_HEAD(&reaplist);
4756 spin_lock(&nn->client_lock);
4757 list_for_each_safe(pos, next, &nn->client_lru) {
4758 clp = list_entry(pos, struct nfs4_client, cl_lru);
4759 if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
4760 t = clp->cl_time - cutoff;
4761 new_timeo = min(new_timeo, t);
4762 break;
4763 }
4764 if (mark_client_expired_locked(clp)) {
4765 dprintk("NFSD: client in use (clientid %08x)\n",
4766 clp->cl_clientid.cl_id);
4767 continue;
4768 }
4769 list_add(&clp->cl_lru, &reaplist);
4770 }
4771 spin_unlock(&nn->client_lock);
4772 list_for_each_safe(pos, next, &reaplist) {
4773 clp = list_entry(pos, struct nfs4_client, cl_lru);
4774 dprintk("NFSD: purging unused client (clientid %08x)\n",
4775 clp->cl_clientid.cl_id);
4776 list_del_init(&clp->cl_lru);
4777 expire_client(clp);
4778 }
4779 spin_lock(&state_lock);
4780 list_for_each_safe(pos, next, &nn->del_recall_lru) {
4781 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
4782 if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
4783 t = dp->dl_time - cutoff;
4784 new_timeo = min(new_timeo, t);
4785 break;
4786 }
4787 WARN_ON(!unhash_delegation_locked(dp));
4788 list_add(&dp->dl_recall_lru, &reaplist);
4789 }
4790 spin_unlock(&state_lock);
4791 while (!list_empty(&reaplist)) {
4792 dp = list_first_entry(&reaplist, struct nfs4_delegation,
4793 dl_recall_lru);
4794 list_del_init(&dp->dl_recall_lru);
4795 revoke_delegation(dp);
4796 }
4797
4798 spin_lock(&nn->client_lock);
4799 while (!list_empty(&nn->close_lru)) {
4800 oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
4801 oo_close_lru);
4802 if (time_after((unsigned long)oo->oo_time,
4803 (unsigned long)cutoff)) {
4804 t = oo->oo_time - cutoff;
4805 new_timeo = min(new_timeo, t);
4806 break;
4807 }
4808 list_del_init(&oo->oo_close_lru);
4809 stp = oo->oo_last_closed_stid;
4810 oo->oo_last_closed_stid = NULL;
4811 spin_unlock(&nn->client_lock);
4812 nfs4_put_stid(&stp->st_stid);
4813 spin_lock(&nn->client_lock);
4814 }
4815 spin_unlock(&nn->client_lock);
4816
4817 /*
4818 * It's possible for a client to try and acquire an already held lock
4819 * that is being held for a long time, and then lose interest in it.
4820 * So, we clean out any un-revisited request after a lease period
4821 * under the assumption that the client is no longer interested.
4822 *
4823 * RFC5661, sec. 9.6 states that the client must not rely on getting
4824 * notifications and must continue to poll for locks, even when the
4825 * server supports them. Thus this shouldn't lead to clients blocking
4826 * indefinitely once the lock does become free.
4827 */
4828 BUG_ON(!list_empty(&reaplist));
4829 spin_lock(&nn->blocked_locks_lock);
4830 while (!list_empty(&nn->blocked_locks_lru)) {
4831 nbl = list_first_entry(&nn->blocked_locks_lru,
4832 struct nfsd4_blocked_lock, nbl_lru);
4833 if (time_after((unsigned long)nbl->nbl_time,
4834 (unsigned long)cutoff)) {
4835 t = nbl->nbl_time - cutoff;
4836 new_timeo = min(new_timeo, t);
4837 break;
4838 }
4839 list_move(&nbl->nbl_lru, &reaplist);
4840 list_del_init(&nbl->nbl_list);
4841 }
4842 spin_unlock(&nn->blocked_locks_lock);
4843
4844 while (!list_empty(&reaplist)) {
4845 nbl = list_first_entry(&reaplist,
4846 struct nfsd4_blocked_lock, nbl_lru);
4847 list_del_init(&nbl->nbl_lru);
4848 posix_unblock_lock(&nbl->nbl_lock);
4849 free_blocked_lock(nbl);
4850 }
4851 out:
4852 new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
4853 return new_timeo;
4854 }
4855
4856 static struct workqueue_struct *laundry_wq;
4857 static void laundromat_main(struct work_struct *);
4858
4859 static void
laundromat_main(struct work_struct * laundry)4860 laundromat_main(struct work_struct *laundry)
4861 {
4862 time_t t;
4863 struct delayed_work *dwork = to_delayed_work(laundry);
4864 struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
4865 laundromat_work);
4866
4867 t = nfs4_laundromat(nn);
4868 dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
4869 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
4870 }
4871
nfs4_check_fh(struct svc_fh * fhp,struct nfs4_stid * stp)4872 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
4873 {
4874 if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
4875 return nfserr_bad_stateid;
4876 return nfs_ok;
4877 }
4878
4879 static inline int
access_permit_read(struct nfs4_ol_stateid * stp)4880 access_permit_read(struct nfs4_ol_stateid *stp)
4881 {
4882 return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
4883 test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
4884 test_access(NFS4_SHARE_ACCESS_WRITE, stp);
4885 }
4886
4887 static inline int
access_permit_write(struct nfs4_ol_stateid * stp)4888 access_permit_write(struct nfs4_ol_stateid *stp)
4889 {
4890 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
4891 test_access(NFS4_SHARE_ACCESS_BOTH, stp);
4892 }
4893
4894 static
nfs4_check_openmode(struct nfs4_ol_stateid * stp,int flags)4895 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
4896 {
4897 __be32 status = nfserr_openmode;
4898
4899 /* For lock stateid's, we test the parent open, not the lock: */
4900 if (stp->st_openstp)
4901 stp = stp->st_openstp;
4902 if ((flags & WR_STATE) && !access_permit_write(stp))
4903 goto out;
4904 if ((flags & RD_STATE) && !access_permit_read(stp))
4905 goto out;
4906 status = nfs_ok;
4907 out:
4908 return status;
4909 }
4910
4911 static inline __be32
check_special_stateids(struct net * net,svc_fh * current_fh,stateid_t * stateid,int flags)4912 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
4913 {
4914 if (ONE_STATEID(stateid) && (flags & RD_STATE))
4915 return nfs_ok;
4916 else if (opens_in_grace(net)) {
4917 /* Answer in remaining cases depends on existence of
4918 * conflicting state; so we must wait out the grace period. */
4919 return nfserr_grace;
4920 } else if (flags & WR_STATE)
4921 return nfs4_share_conflict(current_fh,
4922 NFS4_SHARE_DENY_WRITE);
4923 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
4924 return nfs4_share_conflict(current_fh,
4925 NFS4_SHARE_DENY_READ);
4926 }
4927
4928 /*
4929 * Allow READ/WRITE during grace period on recovered state only for files
4930 * that are not able to provide mandatory locking.
4931 */
4932 static inline int
grace_disallows_io(struct net * net,struct inode * inode)4933 grace_disallows_io(struct net *net, struct inode *inode)
4934 {
4935 return opens_in_grace(net) && mandatory_lock(inode);
4936 }
4937
check_stateid_generation(stateid_t * in,stateid_t * ref,bool has_session)4938 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
4939 {
4940 /*
4941 * When sessions are used the stateid generation number is ignored
4942 * when it is zero.
4943 */
4944 if (has_session && in->si_generation == 0)
4945 return nfs_ok;
4946
4947 if (in->si_generation == ref->si_generation)
4948 return nfs_ok;
4949
4950 /* If the client sends us a stateid from the future, it's buggy: */
4951 if (nfsd4_stateid_generation_after(in, ref))
4952 return nfserr_bad_stateid;
4953 /*
4954 * However, we could see a stateid from the past, even from a
4955 * non-buggy client. For example, if the client sends a lock
4956 * while some IO is outstanding, the lock may bump si_generation
4957 * while the IO is still in flight. The client could avoid that
4958 * situation by waiting for responses on all the IO requests,
4959 * but better performance may result in retrying IO that
4960 * receives an old_stateid error if requests are rarely
4961 * reordered in flight:
4962 */
4963 return nfserr_old_stateid;
4964 }
4965
nfsd4_stid_check_stateid_generation(stateid_t * in,struct nfs4_stid * s,bool has_session)4966 static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session)
4967 {
4968 __be32 ret;
4969
4970 spin_lock(&s->sc_lock);
4971 ret = nfsd4_verify_open_stid(s);
4972 if (ret == nfs_ok)
4973 ret = check_stateid_generation(in, &s->sc_stateid, has_session);
4974 spin_unlock(&s->sc_lock);
4975 return ret;
4976 }
4977
nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid * ols)4978 static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
4979 {
4980 if (ols->st_stateowner->so_is_open_owner &&
4981 !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
4982 return nfserr_bad_stateid;
4983 return nfs_ok;
4984 }
4985
nfsd4_validate_stateid(struct nfs4_client * cl,stateid_t * stateid)4986 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
4987 {
4988 struct nfs4_stid *s;
4989 __be32 status = nfserr_bad_stateid;
4990
4991 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
4992 CLOSE_STATEID(stateid))
4993 return status;
4994 /* Client debugging aid. */
4995 if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
4996 char addr_str[INET6_ADDRSTRLEN];
4997 rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str,
4998 sizeof(addr_str));
4999 pr_warn_ratelimited("NFSD: client %s testing state ID "
5000 "with incorrect client ID\n", addr_str);
5001 return status;
5002 }
5003 spin_lock(&cl->cl_lock);
5004 s = find_stateid_locked(cl, stateid);
5005 if (!s)
5006 goto out_unlock;
5007 status = nfsd4_stid_check_stateid_generation(stateid, s, 1);
5008 if (status)
5009 goto out_unlock;
5010 switch (s->sc_type) {
5011 case NFS4_DELEG_STID:
5012 status = nfs_ok;
5013 break;
5014 case NFS4_REVOKED_DELEG_STID:
5015 status = nfserr_deleg_revoked;
5016 break;
5017 case NFS4_OPEN_STID:
5018 case NFS4_LOCK_STID:
5019 status = nfsd4_check_openowner_confirmed(openlockstateid(s));
5020 break;
5021 default:
5022 printk("unknown stateid type %x\n", s->sc_type);
5023 /* Fallthrough */
5024 case NFS4_CLOSED_STID:
5025 case NFS4_CLOSED_DELEG_STID:
5026 status = nfserr_bad_stateid;
5027 }
5028 out_unlock:
5029 spin_unlock(&cl->cl_lock);
5030 return status;
5031 }
5032
5033 __be32
nfsd4_lookup_stateid(struct nfsd4_compound_state * cstate,stateid_t * stateid,unsigned char typemask,struct nfs4_stid ** s,struct nfsd_net * nn)5034 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
5035 stateid_t *stateid, unsigned char typemask,
5036 struct nfs4_stid **s, struct nfsd_net *nn)
5037 {
5038 __be32 status;
5039 bool return_revoked = false;
5040
5041 /*
5042 * only return revoked delegations if explicitly asked.
5043 * otherwise we report revoked or bad_stateid status.
5044 */
5045 if (typemask & NFS4_REVOKED_DELEG_STID)
5046 return_revoked = true;
5047 else if (typemask & NFS4_DELEG_STID)
5048 typemask |= NFS4_REVOKED_DELEG_STID;
5049
5050 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
5051 CLOSE_STATEID(stateid))
5052 return nfserr_bad_stateid;
5053 status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
5054 if (status == nfserr_stale_clientid) {
5055 if (cstate->session)
5056 return nfserr_bad_stateid;
5057 return nfserr_stale_stateid;
5058 }
5059 if (status)
5060 return status;
5061 *s = find_stateid_by_type(cstate->clp, stateid, typemask);
5062 if (!*s)
5063 return nfserr_bad_stateid;
5064 if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
5065 nfs4_put_stid(*s);
5066 if (cstate->minorversion)
5067 return nfserr_deleg_revoked;
5068 return nfserr_bad_stateid;
5069 }
5070 return nfs_ok;
5071 }
5072
5073 static struct file *
nfs4_find_file(struct nfs4_stid * s,int flags)5074 nfs4_find_file(struct nfs4_stid *s, int flags)
5075 {
5076 if (!s)
5077 return NULL;
5078
5079 switch (s->sc_type) {
5080 case NFS4_DELEG_STID:
5081 if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file))
5082 return NULL;
5083 return get_file(s->sc_file->fi_deleg_file);
5084 case NFS4_OPEN_STID:
5085 case NFS4_LOCK_STID:
5086 if (flags & RD_STATE)
5087 return find_readable_file(s->sc_file);
5088 else
5089 return find_writeable_file(s->sc_file);
5090 break;
5091 }
5092
5093 return NULL;
5094 }
5095
5096 static __be32
nfs4_check_olstateid(struct svc_fh * fhp,struct nfs4_ol_stateid * ols,int flags)5097 nfs4_check_olstateid(struct svc_fh *fhp, struct nfs4_ol_stateid *ols, int flags)
5098 {
5099 __be32 status;
5100
5101 status = nfsd4_check_openowner_confirmed(ols);
5102 if (status)
5103 return status;
5104 return nfs4_check_openmode(ols, flags);
5105 }
5106
5107 static __be32
nfs4_check_file(struct svc_rqst * rqstp,struct svc_fh * fhp,struct nfs4_stid * s,struct file ** filpp,bool * tmp_file,int flags)5108 nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
5109 struct file **filpp, bool *tmp_file, int flags)
5110 {
5111 int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE;
5112 struct file *file;
5113 __be32 status;
5114
5115 file = nfs4_find_file(s, flags);
5116 if (file) {
5117 status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
5118 acc | NFSD_MAY_OWNER_OVERRIDE);
5119 if (status) {
5120 fput(file);
5121 return status;
5122 }
5123
5124 *filpp = file;
5125 } else {
5126 status = nfsd_open(rqstp, fhp, S_IFREG, acc, filpp);
5127 if (status)
5128 return status;
5129
5130 if (tmp_file)
5131 *tmp_file = true;
5132 }
5133
5134 return 0;
5135 }
5136
5137 /*
5138 * Checks for stateid operations
5139 */
5140 __be32
nfs4_preprocess_stateid_op(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,struct svc_fh * fhp,stateid_t * stateid,int flags,struct file ** filpp,bool * tmp_file)5141 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
5142 struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
5143 stateid_t *stateid, int flags, struct file **filpp, bool *tmp_file)
5144 {
5145 struct inode *ino = d_inode(fhp->fh_dentry);
5146 struct net *net = SVC_NET(rqstp);
5147 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5148 struct nfs4_stid *s = NULL;
5149 __be32 status;
5150
5151 if (filpp)
5152 *filpp = NULL;
5153 if (tmp_file)
5154 *tmp_file = false;
5155
5156 if (grace_disallows_io(net, ino))
5157 return nfserr_grace;
5158
5159 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
5160 status = check_special_stateids(net, fhp, stateid, flags);
5161 goto done;
5162 }
5163
5164 status = nfsd4_lookup_stateid(cstate, stateid,
5165 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
5166 &s, nn);
5167 if (status)
5168 return status;
5169 status = nfsd4_stid_check_stateid_generation(stateid, s,
5170 nfsd4_has_session(cstate));
5171 if (status)
5172 goto out;
5173
5174 switch (s->sc_type) {
5175 case NFS4_DELEG_STID:
5176 status = nfs4_check_delegmode(delegstateid(s), flags);
5177 break;
5178 case NFS4_OPEN_STID:
5179 case NFS4_LOCK_STID:
5180 status = nfs4_check_olstateid(fhp, openlockstateid(s), flags);
5181 break;
5182 default:
5183 status = nfserr_bad_stateid;
5184 break;
5185 }
5186 if (status)
5187 goto out;
5188 status = nfs4_check_fh(fhp, s);
5189
5190 done:
5191 if (!status && filpp)
5192 status = nfs4_check_file(rqstp, fhp, s, filpp, tmp_file, flags);
5193 out:
5194 if (s)
5195 nfs4_put_stid(s);
5196 return status;
5197 }
5198
5199 /*
5200 * Test if the stateid is valid
5201 */
5202 __be32
nfsd4_test_stateid(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)5203 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5204 union nfsd4_op_u *u)
5205 {
5206 struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
5207 struct nfsd4_test_stateid_id *stateid;
5208 struct nfs4_client *cl = cstate->session->se_client;
5209
5210 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
5211 stateid->ts_id_status =
5212 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
5213
5214 return nfs_ok;
5215 }
5216
5217 static __be32
nfsd4_free_lock_stateid(stateid_t * stateid,struct nfs4_stid * s)5218 nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
5219 {
5220 struct nfs4_ol_stateid *stp = openlockstateid(s);
5221 __be32 ret;
5222
5223 ret = nfsd4_lock_ol_stateid(stp);
5224 if (ret)
5225 goto out_put_stid;
5226
5227 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
5228 if (ret)
5229 goto out;
5230
5231 ret = nfserr_locks_held;
5232 if (check_for_locks(stp->st_stid.sc_file,
5233 lockowner(stp->st_stateowner)))
5234 goto out;
5235
5236 release_lock_stateid(stp);
5237 ret = nfs_ok;
5238
5239 out:
5240 mutex_unlock(&stp->st_mutex);
5241 out_put_stid:
5242 nfs4_put_stid(s);
5243 return ret;
5244 }
5245
5246 __be32
nfsd4_free_stateid(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)5247 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5248 union nfsd4_op_u *u)
5249 {
5250 struct nfsd4_free_stateid *free_stateid = &u->free_stateid;
5251 stateid_t *stateid = &free_stateid->fr_stateid;
5252 struct nfs4_stid *s;
5253 struct nfs4_delegation *dp;
5254 struct nfs4_client *cl = cstate->session->se_client;
5255 __be32 ret = nfserr_bad_stateid;
5256
5257 spin_lock(&cl->cl_lock);
5258 s = find_stateid_locked(cl, stateid);
5259 if (!s)
5260 goto out_unlock;
5261 spin_lock(&s->sc_lock);
5262 switch (s->sc_type) {
5263 case NFS4_DELEG_STID:
5264 ret = nfserr_locks_held;
5265 break;
5266 case NFS4_OPEN_STID:
5267 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
5268 if (ret)
5269 break;
5270 ret = nfserr_locks_held;
5271 break;
5272 case NFS4_LOCK_STID:
5273 spin_unlock(&s->sc_lock);
5274 refcount_inc(&s->sc_count);
5275 spin_unlock(&cl->cl_lock);
5276 ret = nfsd4_free_lock_stateid(stateid, s);
5277 goto out;
5278 case NFS4_REVOKED_DELEG_STID:
5279 spin_unlock(&s->sc_lock);
5280 dp = delegstateid(s);
5281 list_del_init(&dp->dl_recall_lru);
5282 spin_unlock(&cl->cl_lock);
5283 nfs4_put_stid(s);
5284 ret = nfs_ok;
5285 goto out;
5286 /* Default falls through and returns nfserr_bad_stateid */
5287 }
5288 spin_unlock(&s->sc_lock);
5289 out_unlock:
5290 spin_unlock(&cl->cl_lock);
5291 out:
5292 return ret;
5293 }
5294
5295 static inline int
setlkflg(int type)5296 setlkflg (int type)
5297 {
5298 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
5299 RD_STATE : WR_STATE;
5300 }
5301
nfs4_seqid_op_checks(struct nfsd4_compound_state * cstate,stateid_t * stateid,u32 seqid,struct nfs4_ol_stateid * stp)5302 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
5303 {
5304 struct svc_fh *current_fh = &cstate->current_fh;
5305 struct nfs4_stateowner *sop = stp->st_stateowner;
5306 __be32 status;
5307
5308 status = nfsd4_check_seqid(cstate, sop, seqid);
5309 if (status)
5310 return status;
5311 status = nfsd4_lock_ol_stateid(stp);
5312 if (status != nfs_ok)
5313 return status;
5314 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
5315 if (status == nfs_ok)
5316 status = nfs4_check_fh(current_fh, &stp->st_stid);
5317 if (status != nfs_ok)
5318 mutex_unlock(&stp->st_mutex);
5319 return status;
5320 }
5321
5322 /*
5323 * Checks for sequence id mutating operations.
5324 */
5325 static __be32
nfs4_preprocess_seqid_op(struct nfsd4_compound_state * cstate,u32 seqid,stateid_t * stateid,char typemask,struct nfs4_ol_stateid ** stpp,struct nfsd_net * nn)5326 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
5327 stateid_t *stateid, char typemask,
5328 struct nfs4_ol_stateid **stpp,
5329 struct nfsd_net *nn)
5330 {
5331 __be32 status;
5332 struct nfs4_stid *s;
5333 struct nfs4_ol_stateid *stp = NULL;
5334
5335 dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
5336 seqid, STATEID_VAL(stateid));
5337
5338 *stpp = NULL;
5339 status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
5340 if (status)
5341 return status;
5342 stp = openlockstateid(s);
5343 nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
5344
5345 status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
5346 if (!status)
5347 *stpp = stp;
5348 else
5349 nfs4_put_stid(&stp->st_stid);
5350 return status;
5351 }
5352
nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state * cstate,u32 seqid,stateid_t * stateid,struct nfs4_ol_stateid ** stpp,struct nfsd_net * nn)5353 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
5354 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
5355 {
5356 __be32 status;
5357 struct nfs4_openowner *oo;
5358 struct nfs4_ol_stateid *stp;
5359
5360 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
5361 NFS4_OPEN_STID, &stp, nn);
5362 if (status)
5363 return status;
5364 oo = openowner(stp->st_stateowner);
5365 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
5366 mutex_unlock(&stp->st_mutex);
5367 nfs4_put_stid(&stp->st_stid);
5368 return nfserr_bad_stateid;
5369 }
5370 *stpp = stp;
5371 return nfs_ok;
5372 }
5373
5374 __be32
nfsd4_open_confirm(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)5375 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5376 union nfsd4_op_u *u)
5377 {
5378 struct nfsd4_open_confirm *oc = &u->open_confirm;
5379 __be32 status;
5380 struct nfs4_openowner *oo;
5381 struct nfs4_ol_stateid *stp;
5382 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5383
5384 dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
5385 cstate->current_fh.fh_dentry);
5386
5387 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
5388 if (status)
5389 return status;
5390
5391 status = nfs4_preprocess_seqid_op(cstate,
5392 oc->oc_seqid, &oc->oc_req_stateid,
5393 NFS4_OPEN_STID, &stp, nn);
5394 if (status)
5395 goto out;
5396 oo = openowner(stp->st_stateowner);
5397 status = nfserr_bad_stateid;
5398 if (oo->oo_flags & NFS4_OO_CONFIRMED) {
5399 mutex_unlock(&stp->st_mutex);
5400 goto put_stateid;
5401 }
5402 oo->oo_flags |= NFS4_OO_CONFIRMED;
5403 nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
5404 mutex_unlock(&stp->st_mutex);
5405 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
5406 __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
5407
5408 nfsd4_client_record_create(oo->oo_owner.so_client);
5409 status = nfs_ok;
5410 put_stateid:
5411 nfs4_put_stid(&stp->st_stid);
5412 out:
5413 nfsd4_bump_seqid(cstate, status);
5414 return status;
5415 }
5416
nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid * stp,u32 access)5417 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
5418 {
5419 if (!test_access(access, stp))
5420 return;
5421 nfs4_file_put_access(stp->st_stid.sc_file, access);
5422 clear_access(access, stp);
5423 }
5424
nfs4_stateid_downgrade(struct nfs4_ol_stateid * stp,u32 to_access)5425 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
5426 {
5427 switch (to_access) {
5428 case NFS4_SHARE_ACCESS_READ:
5429 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
5430 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
5431 break;
5432 case NFS4_SHARE_ACCESS_WRITE:
5433 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
5434 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
5435 break;
5436 case NFS4_SHARE_ACCESS_BOTH:
5437 break;
5438 default:
5439 WARN_ON_ONCE(1);
5440 }
5441 }
5442
5443 __be32
nfsd4_open_downgrade(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)5444 nfsd4_open_downgrade(struct svc_rqst *rqstp,
5445 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
5446 {
5447 struct nfsd4_open_downgrade *od = &u->open_downgrade;
5448 __be32 status;
5449 struct nfs4_ol_stateid *stp;
5450 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5451
5452 dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
5453 cstate->current_fh.fh_dentry);
5454
5455 /* We don't yet support WANT bits: */
5456 if (od->od_deleg_want)
5457 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
5458 od->od_deleg_want);
5459
5460 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
5461 &od->od_stateid, &stp, nn);
5462 if (status)
5463 goto out;
5464 status = nfserr_inval;
5465 if (!test_access(od->od_share_access, stp)) {
5466 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
5467 stp->st_access_bmap, od->od_share_access);
5468 goto put_stateid;
5469 }
5470 if (!test_deny(od->od_share_deny, stp)) {
5471 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
5472 stp->st_deny_bmap, od->od_share_deny);
5473 goto put_stateid;
5474 }
5475 nfs4_stateid_downgrade(stp, od->od_share_access);
5476 reset_union_bmap_deny(od->od_share_deny, stp);
5477 nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
5478 status = nfs_ok;
5479 put_stateid:
5480 mutex_unlock(&stp->st_mutex);
5481 nfs4_put_stid(&stp->st_stid);
5482 out:
5483 nfsd4_bump_seqid(cstate, status);
5484 return status;
5485 }
5486
nfsd4_close_open_stateid(struct nfs4_ol_stateid * s)5487 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
5488 {
5489 struct nfs4_client *clp = s->st_stid.sc_client;
5490 bool unhashed;
5491 LIST_HEAD(reaplist);
5492
5493 spin_lock(&clp->cl_lock);
5494 unhashed = unhash_open_stateid(s, &reaplist);
5495
5496 if (clp->cl_minorversion) {
5497 if (unhashed)
5498 put_ol_stateid_locked(s, &reaplist);
5499 spin_unlock(&clp->cl_lock);
5500 free_ol_stateid_reaplist(&reaplist);
5501 } else {
5502 spin_unlock(&clp->cl_lock);
5503 free_ol_stateid_reaplist(&reaplist);
5504 if (unhashed)
5505 move_to_close_lru(s, clp->net);
5506 }
5507 }
5508
5509 /*
5510 * nfs4_unlock_state() called after encode
5511 */
5512 __be32
nfsd4_close(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)5513 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5514 union nfsd4_op_u *u)
5515 {
5516 struct nfsd4_close *close = &u->close;
5517 __be32 status;
5518 struct nfs4_ol_stateid *stp;
5519 struct net *net = SVC_NET(rqstp);
5520 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5521
5522 dprintk("NFSD: nfsd4_close on file %pd\n",
5523 cstate->current_fh.fh_dentry);
5524
5525 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
5526 &close->cl_stateid,
5527 NFS4_OPEN_STID|NFS4_CLOSED_STID,
5528 &stp, nn);
5529 nfsd4_bump_seqid(cstate, status);
5530 if (status)
5531 goto out;
5532
5533 stp->st_stid.sc_type = NFS4_CLOSED_STID;
5534
5535 /*
5536 * Technically we don't _really_ have to increment or copy it, since
5537 * it should just be gone after this operation and we clobber the
5538 * copied value below, but we continue to do so here just to ensure
5539 * that racing ops see that there was a state change.
5540 */
5541 nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
5542
5543 nfsd4_close_open_stateid(stp);
5544 mutex_unlock(&stp->st_mutex);
5545
5546 /* v4.1+ suggests that we send a special stateid in here, since the
5547 * clients should just ignore this anyway. Since this is not useful
5548 * for v4.0 clients either, we set it to the special close_stateid
5549 * universally.
5550 *
5551 * See RFC5661 section 18.2.4, and RFC7530 section 16.2.5
5552 */
5553 memcpy(&close->cl_stateid, &close_stateid, sizeof(close->cl_stateid));
5554
5555 /* put reference from nfs4_preprocess_seqid_op */
5556 nfs4_put_stid(&stp->st_stid);
5557 out:
5558 return status;
5559 }
5560
5561 __be32
nfsd4_delegreturn(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)5562 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5563 union nfsd4_op_u *u)
5564 {
5565 struct nfsd4_delegreturn *dr = &u->delegreturn;
5566 struct nfs4_delegation *dp;
5567 stateid_t *stateid = &dr->dr_stateid;
5568 struct nfs4_stid *s;
5569 __be32 status;
5570 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5571
5572 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
5573 return status;
5574
5575 status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
5576 if (status)
5577 goto out;
5578 dp = delegstateid(s);
5579 status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate));
5580 if (status)
5581 goto put_stateid;
5582
5583 destroy_delegation(dp);
5584 put_stateid:
5585 nfs4_put_stid(&dp->dl_stid);
5586 out:
5587 return status;
5588 }
5589
5590 static inline u64
end_offset(u64 start,u64 len)5591 end_offset(u64 start, u64 len)
5592 {
5593 u64 end;
5594
5595 end = start + len;
5596 return end >= start ? end: NFS4_MAX_UINT64;
5597 }
5598
5599 /* last octet in a range */
5600 static inline u64
last_byte_offset(u64 start,u64 len)5601 last_byte_offset(u64 start, u64 len)
5602 {
5603 u64 end;
5604
5605 WARN_ON_ONCE(!len);
5606 end = start + len;
5607 return end > start ? end - 1: NFS4_MAX_UINT64;
5608 }
5609
5610 /*
5611 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
5612 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
5613 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
5614 * locking, this prevents us from being completely protocol-compliant. The
5615 * real solution to this problem is to start using unsigned file offsets in
5616 * the VFS, but this is a very deep change!
5617 */
5618 static inline void
nfs4_transform_lock_offset(struct file_lock * lock)5619 nfs4_transform_lock_offset(struct file_lock *lock)
5620 {
5621 if (lock->fl_start < 0)
5622 lock->fl_start = OFFSET_MAX;
5623 if (lock->fl_end < 0)
5624 lock->fl_end = OFFSET_MAX;
5625 }
5626
5627 static fl_owner_t
nfsd4_fl_get_owner(fl_owner_t owner)5628 nfsd4_fl_get_owner(fl_owner_t owner)
5629 {
5630 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
5631
5632 nfs4_get_stateowner(&lo->lo_owner);
5633 return owner;
5634 }
5635
5636 static void
nfsd4_fl_put_owner(fl_owner_t owner)5637 nfsd4_fl_put_owner(fl_owner_t owner)
5638 {
5639 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
5640
5641 if (lo)
5642 nfs4_put_stateowner(&lo->lo_owner);
5643 }
5644
5645 static void
nfsd4_lm_notify(struct file_lock * fl)5646 nfsd4_lm_notify(struct file_lock *fl)
5647 {
5648 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)fl->fl_owner;
5649 struct net *net = lo->lo_owner.so_client->net;
5650 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5651 struct nfsd4_blocked_lock *nbl = container_of(fl,
5652 struct nfsd4_blocked_lock, nbl_lock);
5653 bool queue = false;
5654
5655 /* An empty list means that something else is going to be using it */
5656 spin_lock(&nn->blocked_locks_lock);
5657 if (!list_empty(&nbl->nbl_list)) {
5658 list_del_init(&nbl->nbl_list);
5659 list_del_init(&nbl->nbl_lru);
5660 queue = true;
5661 }
5662 spin_unlock(&nn->blocked_locks_lock);
5663
5664 if (queue)
5665 nfsd4_run_cb(&nbl->nbl_cb);
5666 }
5667
5668 static const struct lock_manager_operations nfsd_posix_mng_ops = {
5669 .lm_notify = nfsd4_lm_notify,
5670 .lm_get_owner = nfsd4_fl_get_owner,
5671 .lm_put_owner = nfsd4_fl_put_owner,
5672 };
5673
5674 static inline void
nfs4_set_lock_denied(struct file_lock * fl,struct nfsd4_lock_denied * deny)5675 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
5676 {
5677 struct nfs4_lockowner *lo;
5678
5679 if (fl->fl_lmops == &nfsd_posix_mng_ops) {
5680 lo = (struct nfs4_lockowner *) fl->fl_owner;
5681 deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data,
5682 lo->lo_owner.so_owner.len, GFP_KERNEL);
5683 if (!deny->ld_owner.data)
5684 /* We just don't care that much */
5685 goto nevermind;
5686 deny->ld_owner.len = lo->lo_owner.so_owner.len;
5687 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
5688 } else {
5689 nevermind:
5690 deny->ld_owner.len = 0;
5691 deny->ld_owner.data = NULL;
5692 deny->ld_clientid.cl_boot = 0;
5693 deny->ld_clientid.cl_id = 0;
5694 }
5695 deny->ld_start = fl->fl_start;
5696 deny->ld_length = NFS4_MAX_UINT64;
5697 if (fl->fl_end != NFS4_MAX_UINT64)
5698 deny->ld_length = fl->fl_end - fl->fl_start + 1;
5699 deny->ld_type = NFS4_READ_LT;
5700 if (fl->fl_type != F_RDLCK)
5701 deny->ld_type = NFS4_WRITE_LT;
5702 }
5703
5704 static struct nfs4_lockowner *
find_lockowner_str_locked(struct nfs4_client * clp,struct xdr_netobj * owner)5705 find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
5706 {
5707 unsigned int strhashval = ownerstr_hashval(owner);
5708 struct nfs4_stateowner *so;
5709
5710 lockdep_assert_held(&clp->cl_lock);
5711
5712 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
5713 so_strhash) {
5714 if (so->so_is_open_owner)
5715 continue;
5716 if (same_owner_str(so, owner))
5717 return lockowner(nfs4_get_stateowner(so));
5718 }
5719 return NULL;
5720 }
5721
5722 static struct nfs4_lockowner *
find_lockowner_str(struct nfs4_client * clp,struct xdr_netobj * owner)5723 find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
5724 {
5725 struct nfs4_lockowner *lo;
5726
5727 spin_lock(&clp->cl_lock);
5728 lo = find_lockowner_str_locked(clp, owner);
5729 spin_unlock(&clp->cl_lock);
5730 return lo;
5731 }
5732
nfs4_unhash_lockowner(struct nfs4_stateowner * sop)5733 static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
5734 {
5735 unhash_lockowner_locked(lockowner(sop));
5736 }
5737
nfs4_free_lockowner(struct nfs4_stateowner * sop)5738 static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
5739 {
5740 struct nfs4_lockowner *lo = lockowner(sop);
5741
5742 kmem_cache_free(lockowner_slab, lo);
5743 }
5744
5745 static const struct nfs4_stateowner_operations lockowner_ops = {
5746 .so_unhash = nfs4_unhash_lockowner,
5747 .so_free = nfs4_free_lockowner,
5748 };
5749
5750 /*
5751 * Alloc a lock owner structure.
5752 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
5753 * occurred.
5754 *
5755 * strhashval = ownerstr_hashval
5756 */
5757 static struct nfs4_lockowner *
alloc_init_lock_stateowner(unsigned int strhashval,struct nfs4_client * clp,struct nfs4_ol_stateid * open_stp,struct nfsd4_lock * lock)5758 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
5759 struct nfs4_ol_stateid *open_stp,
5760 struct nfsd4_lock *lock)
5761 {
5762 struct nfs4_lockowner *lo, *ret;
5763
5764 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
5765 if (!lo)
5766 return NULL;
5767 INIT_LIST_HEAD(&lo->lo_blocked);
5768 INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
5769 lo->lo_owner.so_is_open_owner = 0;
5770 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
5771 lo->lo_owner.so_ops = &lockowner_ops;
5772 spin_lock(&clp->cl_lock);
5773 ret = find_lockowner_str_locked(clp, &lock->lk_new_owner);
5774 if (ret == NULL) {
5775 list_add(&lo->lo_owner.so_strhash,
5776 &clp->cl_ownerstr_hashtbl[strhashval]);
5777 ret = lo;
5778 } else
5779 nfs4_free_stateowner(&lo->lo_owner);
5780
5781 spin_unlock(&clp->cl_lock);
5782 return ret;
5783 }
5784
5785 static struct nfs4_ol_stateid *
find_lock_stateid(const struct nfs4_lockowner * lo,const struct nfs4_ol_stateid * ost)5786 find_lock_stateid(const struct nfs4_lockowner *lo,
5787 const struct nfs4_ol_stateid *ost)
5788 {
5789 struct nfs4_ol_stateid *lst;
5790
5791 lockdep_assert_held(&ost->st_stid.sc_client->cl_lock);
5792
5793 /* If ost is not hashed, ost->st_locks will not be valid */
5794 if (!nfs4_ol_stateid_unhashed(ost))
5795 list_for_each_entry(lst, &ost->st_locks, st_locks) {
5796 if (lst->st_stateowner == &lo->lo_owner) {
5797 refcount_inc(&lst->st_stid.sc_count);
5798 return lst;
5799 }
5800 }
5801 return NULL;
5802 }
5803
5804 static struct nfs4_ol_stateid *
init_lock_stateid(struct nfs4_ol_stateid * stp,struct nfs4_lockowner * lo,struct nfs4_file * fp,struct inode * inode,struct nfs4_ol_stateid * open_stp)5805 init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
5806 struct nfs4_file *fp, struct inode *inode,
5807 struct nfs4_ol_stateid *open_stp)
5808 {
5809 struct nfs4_client *clp = lo->lo_owner.so_client;
5810 struct nfs4_ol_stateid *retstp;
5811
5812 mutex_init(&stp->st_mutex);
5813 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
5814 retry:
5815 spin_lock(&clp->cl_lock);
5816 if (nfs4_ol_stateid_unhashed(open_stp))
5817 goto out_close;
5818 retstp = find_lock_stateid(lo, open_stp);
5819 if (retstp)
5820 goto out_found;
5821 refcount_inc(&stp->st_stid.sc_count);
5822 stp->st_stid.sc_type = NFS4_LOCK_STID;
5823 stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
5824 get_nfs4_file(fp);
5825 stp->st_stid.sc_file = fp;
5826 stp->st_access_bmap = 0;
5827 stp->st_deny_bmap = open_stp->st_deny_bmap;
5828 stp->st_openstp = open_stp;
5829 spin_lock(&fp->fi_lock);
5830 list_add(&stp->st_locks, &open_stp->st_locks);
5831 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
5832 list_add(&stp->st_perfile, &fp->fi_stateids);
5833 spin_unlock(&fp->fi_lock);
5834 spin_unlock(&clp->cl_lock);
5835 return stp;
5836 out_found:
5837 spin_unlock(&clp->cl_lock);
5838 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
5839 nfs4_put_stid(&retstp->st_stid);
5840 goto retry;
5841 }
5842 /* To keep mutex tracking happy */
5843 mutex_unlock(&stp->st_mutex);
5844 return retstp;
5845 out_close:
5846 spin_unlock(&clp->cl_lock);
5847 mutex_unlock(&stp->st_mutex);
5848 return NULL;
5849 }
5850
5851 static struct nfs4_ol_stateid *
find_or_create_lock_stateid(struct nfs4_lockowner * lo,struct nfs4_file * fi,struct inode * inode,struct nfs4_ol_stateid * ost,bool * new)5852 find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
5853 struct inode *inode, struct nfs4_ol_stateid *ost,
5854 bool *new)
5855 {
5856 struct nfs4_stid *ns = NULL;
5857 struct nfs4_ol_stateid *lst;
5858 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
5859 struct nfs4_client *clp = oo->oo_owner.so_client;
5860
5861 *new = false;
5862 spin_lock(&clp->cl_lock);
5863 lst = find_lock_stateid(lo, ost);
5864 spin_unlock(&clp->cl_lock);
5865 if (lst != NULL) {
5866 if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
5867 goto out;
5868 nfs4_put_stid(&lst->st_stid);
5869 }
5870 ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
5871 if (ns == NULL)
5872 return NULL;
5873
5874 lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost);
5875 if (lst == openlockstateid(ns))
5876 *new = true;
5877 else
5878 nfs4_put_stid(ns);
5879 out:
5880 return lst;
5881 }
5882
5883 static int
check_lock_length(u64 offset,u64 length)5884 check_lock_length(u64 offset, u64 length)
5885 {
5886 return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
5887 (length > ~offset)));
5888 }
5889
get_lock_access(struct nfs4_ol_stateid * lock_stp,u32 access)5890 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
5891 {
5892 struct nfs4_file *fp = lock_stp->st_stid.sc_file;
5893
5894 lockdep_assert_held(&fp->fi_lock);
5895
5896 if (test_access(access, lock_stp))
5897 return;
5898 __nfs4_file_get_access(fp, access);
5899 set_access(access, lock_stp);
5900 }
5901
5902 static __be32
lookup_or_create_lock_state(struct nfsd4_compound_state * cstate,struct nfs4_ol_stateid * ost,struct nfsd4_lock * lock,struct nfs4_ol_stateid ** plst,bool * new)5903 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
5904 struct nfs4_ol_stateid *ost,
5905 struct nfsd4_lock *lock,
5906 struct nfs4_ol_stateid **plst, bool *new)
5907 {
5908 __be32 status;
5909 struct nfs4_file *fi = ost->st_stid.sc_file;
5910 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
5911 struct nfs4_client *cl = oo->oo_owner.so_client;
5912 struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
5913 struct nfs4_lockowner *lo;
5914 struct nfs4_ol_stateid *lst;
5915 unsigned int strhashval;
5916
5917 lo = find_lockowner_str(cl, &lock->lk_new_owner);
5918 if (!lo) {
5919 strhashval = ownerstr_hashval(&lock->lk_new_owner);
5920 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
5921 if (lo == NULL)
5922 return nfserr_jukebox;
5923 } else {
5924 /* with an existing lockowner, seqids must be the same */
5925 status = nfserr_bad_seqid;
5926 if (!cstate->minorversion &&
5927 lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
5928 goto out;
5929 }
5930
5931 lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
5932 if (lst == NULL) {
5933 status = nfserr_jukebox;
5934 goto out;
5935 }
5936
5937 status = nfs_ok;
5938 *plst = lst;
5939 out:
5940 nfs4_put_stateowner(&lo->lo_owner);
5941 return status;
5942 }
5943
5944 /*
5945 * LOCK operation
5946 */
5947 __be32
nfsd4_lock(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)5948 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5949 union nfsd4_op_u *u)
5950 {
5951 struct nfsd4_lock *lock = &u->lock;
5952 struct nfs4_openowner *open_sop = NULL;
5953 struct nfs4_lockowner *lock_sop = NULL;
5954 struct nfs4_ol_stateid *lock_stp = NULL;
5955 struct nfs4_ol_stateid *open_stp = NULL;
5956 struct nfs4_file *fp;
5957 struct file *filp = NULL;
5958 struct nfsd4_blocked_lock *nbl = NULL;
5959 struct file_lock *file_lock = NULL;
5960 struct file_lock *conflock = NULL;
5961 __be32 status = 0;
5962 int lkflg;
5963 int err;
5964 bool new = false;
5965 unsigned char fl_type;
5966 unsigned int fl_flags = FL_POSIX;
5967 struct net *net = SVC_NET(rqstp);
5968 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5969
5970 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
5971 (long long) lock->lk_offset,
5972 (long long) lock->lk_length);
5973
5974 if (check_lock_length(lock->lk_offset, lock->lk_length))
5975 return nfserr_inval;
5976
5977 if ((status = fh_verify(rqstp, &cstate->current_fh,
5978 S_IFREG, NFSD_MAY_LOCK))) {
5979 dprintk("NFSD: nfsd4_lock: permission denied!\n");
5980 return status;
5981 }
5982
5983 if (lock->lk_is_new) {
5984 if (nfsd4_has_session(cstate))
5985 /* See rfc 5661 18.10.3: given clientid is ignored: */
5986 memcpy(&lock->lk_new_clientid,
5987 &cstate->session->se_client->cl_clientid,
5988 sizeof(clientid_t));
5989
5990 status = nfserr_stale_clientid;
5991 if (STALE_CLIENTID(&lock->lk_new_clientid, nn))
5992 goto out;
5993
5994 /* validate and update open stateid and open seqid */
5995 status = nfs4_preprocess_confirmed_seqid_op(cstate,
5996 lock->lk_new_open_seqid,
5997 &lock->lk_new_open_stateid,
5998 &open_stp, nn);
5999 if (status)
6000 goto out;
6001 mutex_unlock(&open_stp->st_mutex);
6002 open_sop = openowner(open_stp->st_stateowner);
6003 status = nfserr_bad_stateid;
6004 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
6005 &lock->lk_new_clientid))
6006 goto out;
6007 status = lookup_or_create_lock_state(cstate, open_stp, lock,
6008 &lock_stp, &new);
6009 } else {
6010 status = nfs4_preprocess_seqid_op(cstate,
6011 lock->lk_old_lock_seqid,
6012 &lock->lk_old_lock_stateid,
6013 NFS4_LOCK_STID, &lock_stp, nn);
6014 }
6015 if (status)
6016 goto out;
6017 lock_sop = lockowner(lock_stp->st_stateowner);
6018
6019 lkflg = setlkflg(lock->lk_type);
6020 status = nfs4_check_openmode(lock_stp, lkflg);
6021 if (status)
6022 goto out;
6023
6024 status = nfserr_grace;
6025 if (locks_in_grace(net) && !lock->lk_reclaim)
6026 goto out;
6027 status = nfserr_no_grace;
6028 if (!locks_in_grace(net) && lock->lk_reclaim)
6029 goto out;
6030
6031 fp = lock_stp->st_stid.sc_file;
6032 switch (lock->lk_type) {
6033 case NFS4_READW_LT:
6034 if (nfsd4_has_session(cstate))
6035 fl_flags |= FL_SLEEP;
6036 /* Fallthrough */
6037 case NFS4_READ_LT:
6038 spin_lock(&fp->fi_lock);
6039 filp = find_readable_file_locked(fp);
6040 if (filp)
6041 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
6042 spin_unlock(&fp->fi_lock);
6043 fl_type = F_RDLCK;
6044 break;
6045 case NFS4_WRITEW_LT:
6046 if (nfsd4_has_session(cstate))
6047 fl_flags |= FL_SLEEP;
6048 /* Fallthrough */
6049 case NFS4_WRITE_LT:
6050 spin_lock(&fp->fi_lock);
6051 filp = find_writeable_file_locked(fp);
6052 if (filp)
6053 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
6054 spin_unlock(&fp->fi_lock);
6055 fl_type = F_WRLCK;
6056 break;
6057 default:
6058 status = nfserr_inval;
6059 goto out;
6060 }
6061
6062 if (!filp) {
6063 status = nfserr_openmode;
6064 goto out;
6065 }
6066
6067 nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn);
6068 if (!nbl) {
6069 dprintk("NFSD: %s: unable to allocate block!\n", __func__);
6070 status = nfserr_jukebox;
6071 goto out;
6072 }
6073
6074 file_lock = &nbl->nbl_lock;
6075 file_lock->fl_type = fl_type;
6076 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
6077 file_lock->fl_pid = current->tgid;
6078 file_lock->fl_file = filp;
6079 file_lock->fl_flags = fl_flags;
6080 file_lock->fl_lmops = &nfsd_posix_mng_ops;
6081 file_lock->fl_start = lock->lk_offset;
6082 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
6083 nfs4_transform_lock_offset(file_lock);
6084
6085 conflock = locks_alloc_lock();
6086 if (!conflock) {
6087 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6088 status = nfserr_jukebox;
6089 goto out;
6090 }
6091
6092 if (fl_flags & FL_SLEEP) {
6093 nbl->nbl_time = get_seconds();
6094 spin_lock(&nn->blocked_locks_lock);
6095 list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
6096 list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
6097 spin_unlock(&nn->blocked_locks_lock);
6098 }
6099
6100 err = vfs_lock_file(filp, F_SETLK, file_lock, conflock);
6101 switch (err) {
6102 case 0: /* success! */
6103 nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
6104 status = 0;
6105 if (lock->lk_reclaim)
6106 nn->somebody_reclaimed = true;
6107 break;
6108 case FILE_LOCK_DEFERRED:
6109 nbl = NULL;
6110 /* Fallthrough */
6111 case -EAGAIN: /* conflock holds conflicting lock */
6112 status = nfserr_denied;
6113 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
6114 nfs4_set_lock_denied(conflock, &lock->lk_denied);
6115 break;
6116 case -EDEADLK:
6117 status = nfserr_deadlock;
6118 break;
6119 default:
6120 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
6121 status = nfserrno(err);
6122 break;
6123 }
6124 out:
6125 if (nbl) {
6126 /* dequeue it if we queued it before */
6127 if (fl_flags & FL_SLEEP) {
6128 spin_lock(&nn->blocked_locks_lock);
6129 list_del_init(&nbl->nbl_list);
6130 list_del_init(&nbl->nbl_lru);
6131 spin_unlock(&nn->blocked_locks_lock);
6132 }
6133 free_blocked_lock(nbl);
6134 }
6135 if (filp)
6136 fput(filp);
6137 if (lock_stp) {
6138 /* Bump seqid manually if the 4.0 replay owner is openowner */
6139 if (cstate->replay_owner &&
6140 cstate->replay_owner != &lock_sop->lo_owner &&
6141 seqid_mutating_err(ntohl(status)))
6142 lock_sop->lo_owner.so_seqid++;
6143
6144 /*
6145 * If this is a new, never-before-used stateid, and we are
6146 * returning an error, then just go ahead and release it.
6147 */
6148 if (status && new)
6149 release_lock_stateid(lock_stp);
6150
6151 mutex_unlock(&lock_stp->st_mutex);
6152
6153 nfs4_put_stid(&lock_stp->st_stid);
6154 }
6155 if (open_stp)
6156 nfs4_put_stid(&open_stp->st_stid);
6157 nfsd4_bump_seqid(cstate, status);
6158 if (conflock)
6159 locks_free_lock(conflock);
6160 return status;
6161 }
6162
6163 /*
6164 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
6165 * so we do a temporary open here just to get an open file to pass to
6166 * vfs_test_lock. (Arguably perhaps test_lock should be done with an
6167 * inode operation.)
6168 */
nfsd_test_lock(struct svc_rqst * rqstp,struct svc_fh * fhp,struct file_lock * lock)6169 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
6170 {
6171 struct file *file;
6172 __be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
6173 if (!err) {
6174 err = nfserrno(vfs_test_lock(file, lock));
6175 fput(file);
6176 }
6177 return err;
6178 }
6179
6180 /*
6181 * LOCKT operation
6182 */
6183 __be32
nfsd4_lockt(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)6184 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6185 union nfsd4_op_u *u)
6186 {
6187 struct nfsd4_lockt *lockt = &u->lockt;
6188 struct file_lock *file_lock = NULL;
6189 struct nfs4_lockowner *lo = NULL;
6190 __be32 status;
6191 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6192
6193 if (locks_in_grace(SVC_NET(rqstp)))
6194 return nfserr_grace;
6195
6196 if (check_lock_length(lockt->lt_offset, lockt->lt_length))
6197 return nfserr_inval;
6198
6199 if (!nfsd4_has_session(cstate)) {
6200 status = lookup_clientid(&lockt->lt_clientid, cstate, nn);
6201 if (status)
6202 goto out;
6203 }
6204
6205 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
6206 goto out;
6207
6208 file_lock = locks_alloc_lock();
6209 if (!file_lock) {
6210 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6211 status = nfserr_jukebox;
6212 goto out;
6213 }
6214
6215 switch (lockt->lt_type) {
6216 case NFS4_READ_LT:
6217 case NFS4_READW_LT:
6218 file_lock->fl_type = F_RDLCK;
6219 break;
6220 case NFS4_WRITE_LT:
6221 case NFS4_WRITEW_LT:
6222 file_lock->fl_type = F_WRLCK;
6223 break;
6224 default:
6225 dprintk("NFSD: nfs4_lockt: bad lock type!\n");
6226 status = nfserr_inval;
6227 goto out;
6228 }
6229
6230 lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
6231 if (lo)
6232 file_lock->fl_owner = (fl_owner_t)lo;
6233 file_lock->fl_pid = current->tgid;
6234 file_lock->fl_flags = FL_POSIX;
6235
6236 file_lock->fl_start = lockt->lt_offset;
6237 file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
6238
6239 nfs4_transform_lock_offset(file_lock);
6240
6241 status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
6242 if (status)
6243 goto out;
6244
6245 if (file_lock->fl_type != F_UNLCK) {
6246 status = nfserr_denied;
6247 nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
6248 }
6249 out:
6250 if (lo)
6251 nfs4_put_stateowner(&lo->lo_owner);
6252 if (file_lock)
6253 locks_free_lock(file_lock);
6254 return status;
6255 }
6256
6257 __be32
nfsd4_locku(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)6258 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6259 union nfsd4_op_u *u)
6260 {
6261 struct nfsd4_locku *locku = &u->locku;
6262 struct nfs4_ol_stateid *stp;
6263 struct file *filp = NULL;
6264 struct file_lock *file_lock = NULL;
6265 __be32 status;
6266 int err;
6267 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6268
6269 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
6270 (long long) locku->lu_offset,
6271 (long long) locku->lu_length);
6272
6273 if (check_lock_length(locku->lu_offset, locku->lu_length))
6274 return nfserr_inval;
6275
6276 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
6277 &locku->lu_stateid, NFS4_LOCK_STID,
6278 &stp, nn);
6279 if (status)
6280 goto out;
6281 filp = find_any_file(stp->st_stid.sc_file);
6282 if (!filp) {
6283 status = nfserr_lock_range;
6284 goto put_stateid;
6285 }
6286 file_lock = locks_alloc_lock();
6287 if (!file_lock) {
6288 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6289 status = nfserr_jukebox;
6290 goto fput;
6291 }
6292
6293 file_lock->fl_type = F_UNLCK;
6294 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
6295 file_lock->fl_pid = current->tgid;
6296 file_lock->fl_file = filp;
6297 file_lock->fl_flags = FL_POSIX;
6298 file_lock->fl_lmops = &nfsd_posix_mng_ops;
6299 file_lock->fl_start = locku->lu_offset;
6300
6301 file_lock->fl_end = last_byte_offset(locku->lu_offset,
6302 locku->lu_length);
6303 nfs4_transform_lock_offset(file_lock);
6304
6305 err = vfs_lock_file(filp, F_SETLK, file_lock, NULL);
6306 if (err) {
6307 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
6308 goto out_nfserr;
6309 }
6310 nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid);
6311 fput:
6312 fput(filp);
6313 put_stateid:
6314 mutex_unlock(&stp->st_mutex);
6315 nfs4_put_stid(&stp->st_stid);
6316 out:
6317 nfsd4_bump_seqid(cstate, status);
6318 if (file_lock)
6319 locks_free_lock(file_lock);
6320 return status;
6321
6322 out_nfserr:
6323 status = nfserrno(err);
6324 goto fput;
6325 }
6326
6327 /*
6328 * returns
6329 * true: locks held by lockowner
6330 * false: no locks held by lockowner
6331 */
6332 static bool
check_for_locks(struct nfs4_file * fp,struct nfs4_lockowner * lowner)6333 check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
6334 {
6335 struct file_lock *fl;
6336 int status = false;
6337 struct file *filp = find_any_file(fp);
6338 struct inode *inode;
6339 struct file_lock_context *flctx;
6340
6341 if (!filp) {
6342 /* Any valid lock stateid should have some sort of access */
6343 WARN_ON_ONCE(1);
6344 return status;
6345 }
6346
6347 inode = locks_inode(filp);
6348 flctx = inode->i_flctx;
6349
6350 if (flctx && !list_empty_careful(&flctx->flc_posix)) {
6351 spin_lock(&flctx->flc_lock);
6352 list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
6353 if (fl->fl_owner == (fl_owner_t)lowner) {
6354 status = true;
6355 break;
6356 }
6357 }
6358 spin_unlock(&flctx->flc_lock);
6359 }
6360 fput(filp);
6361 return status;
6362 }
6363
6364 __be32
nfsd4_release_lockowner(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)6365 nfsd4_release_lockowner(struct svc_rqst *rqstp,
6366 struct nfsd4_compound_state *cstate,
6367 union nfsd4_op_u *u)
6368 {
6369 struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner;
6370 clientid_t *clid = &rlockowner->rl_clientid;
6371 struct nfs4_stateowner *sop;
6372 struct nfs4_lockowner *lo = NULL;
6373 struct nfs4_ol_stateid *stp;
6374 struct xdr_netobj *owner = &rlockowner->rl_owner;
6375 unsigned int hashval = ownerstr_hashval(owner);
6376 __be32 status;
6377 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6378 struct nfs4_client *clp;
6379 LIST_HEAD (reaplist);
6380
6381 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
6382 clid->cl_boot, clid->cl_id);
6383
6384 status = lookup_clientid(clid, cstate, nn);
6385 if (status)
6386 return status;
6387
6388 clp = cstate->clp;
6389 /* Find the matching lock stateowner */
6390 spin_lock(&clp->cl_lock);
6391 list_for_each_entry(sop, &clp->cl_ownerstr_hashtbl[hashval],
6392 so_strhash) {
6393
6394 if (sop->so_is_open_owner || !same_owner_str(sop, owner))
6395 continue;
6396
6397 /* see if there are still any locks associated with it */
6398 lo = lockowner(sop);
6399 list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
6400 if (check_for_locks(stp->st_stid.sc_file, lo)) {
6401 status = nfserr_locks_held;
6402 spin_unlock(&clp->cl_lock);
6403 return status;
6404 }
6405 }
6406
6407 nfs4_get_stateowner(sop);
6408 break;
6409 }
6410 if (!lo) {
6411 spin_unlock(&clp->cl_lock);
6412 return status;
6413 }
6414
6415 unhash_lockowner_locked(lo);
6416 while (!list_empty(&lo->lo_owner.so_stateids)) {
6417 stp = list_first_entry(&lo->lo_owner.so_stateids,
6418 struct nfs4_ol_stateid,
6419 st_perstateowner);
6420 WARN_ON(!unhash_lock_stateid(stp));
6421 put_ol_stateid_locked(stp, &reaplist);
6422 }
6423 spin_unlock(&clp->cl_lock);
6424 free_ol_stateid_reaplist(&reaplist);
6425 remove_blocked_locks(lo);
6426 nfs4_put_stateowner(&lo->lo_owner);
6427
6428 return status;
6429 }
6430
6431 static inline struct nfs4_client_reclaim *
alloc_reclaim(void)6432 alloc_reclaim(void)
6433 {
6434 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
6435 }
6436
6437 bool
nfs4_has_reclaimed_state(const char * name,struct nfsd_net * nn)6438 nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn)
6439 {
6440 struct nfs4_client_reclaim *crp;
6441
6442 crp = nfsd4_find_reclaim_client(name, nn);
6443 return (crp && crp->cr_clp);
6444 }
6445
6446 /*
6447 * failure => all reset bets are off, nfserr_no_grace...
6448 */
6449 struct nfs4_client_reclaim *
nfs4_client_to_reclaim(const char * name,struct nfsd_net * nn)6450 nfs4_client_to_reclaim(const char *name, struct nfsd_net *nn)
6451 {
6452 unsigned int strhashval;
6453 struct nfs4_client_reclaim *crp;
6454
6455 dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name);
6456 crp = alloc_reclaim();
6457 if (crp) {
6458 strhashval = clientstr_hashval(name);
6459 INIT_LIST_HEAD(&crp->cr_strhash);
6460 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
6461 memcpy(crp->cr_recdir, name, HEXDIR_LEN);
6462 crp->cr_clp = NULL;
6463 nn->reclaim_str_hashtbl_size++;
6464 }
6465 return crp;
6466 }
6467
6468 void
nfs4_remove_reclaim_record(struct nfs4_client_reclaim * crp,struct nfsd_net * nn)6469 nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
6470 {
6471 list_del(&crp->cr_strhash);
6472 kfree(crp);
6473 nn->reclaim_str_hashtbl_size--;
6474 }
6475
6476 void
nfs4_release_reclaim(struct nfsd_net * nn)6477 nfs4_release_reclaim(struct nfsd_net *nn)
6478 {
6479 struct nfs4_client_reclaim *crp = NULL;
6480 int i;
6481
6482 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
6483 while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
6484 crp = list_entry(nn->reclaim_str_hashtbl[i].next,
6485 struct nfs4_client_reclaim, cr_strhash);
6486 nfs4_remove_reclaim_record(crp, nn);
6487 }
6488 }
6489 WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
6490 }
6491
6492 /*
6493 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
6494 struct nfs4_client_reclaim *
nfsd4_find_reclaim_client(const char * recdir,struct nfsd_net * nn)6495 nfsd4_find_reclaim_client(const char *recdir, struct nfsd_net *nn)
6496 {
6497 unsigned int strhashval;
6498 struct nfs4_client_reclaim *crp = NULL;
6499
6500 dprintk("NFSD: nfs4_find_reclaim_client for recdir %s\n", recdir);
6501
6502 strhashval = clientstr_hashval(recdir);
6503 list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
6504 if (same_name(crp->cr_recdir, recdir)) {
6505 return crp;
6506 }
6507 }
6508 return NULL;
6509 }
6510
6511 /*
6512 * Called from OPEN. Look for clientid in reclaim list.
6513 */
6514 __be32
nfs4_check_open_reclaim(clientid_t * clid,struct nfsd4_compound_state * cstate,struct nfsd_net * nn)6515 nfs4_check_open_reclaim(clientid_t *clid,
6516 struct nfsd4_compound_state *cstate,
6517 struct nfsd_net *nn)
6518 {
6519 __be32 status;
6520
6521 /* find clientid in conf_id_hashtbl */
6522 status = lookup_clientid(clid, cstate, nn);
6523 if (status)
6524 return nfserr_reclaim_bad;
6525
6526 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &cstate->clp->cl_flags))
6527 return nfserr_no_grace;
6528
6529 if (nfsd4_client_record_check(cstate->clp))
6530 return nfserr_reclaim_bad;
6531
6532 return nfs_ok;
6533 }
6534
6535 #ifdef CONFIG_NFSD_FAULT_INJECTION
6536 static inline void
put_client(struct nfs4_client * clp)6537 put_client(struct nfs4_client *clp)
6538 {
6539 atomic_dec(&clp->cl_refcount);
6540 }
6541
6542 static struct nfs4_client *
nfsd_find_client(struct sockaddr_storage * addr,size_t addr_size)6543 nfsd_find_client(struct sockaddr_storage *addr, size_t addr_size)
6544 {
6545 struct nfs4_client *clp;
6546 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6547 nfsd_net_id);
6548
6549 if (!nfsd_netns_ready(nn))
6550 return NULL;
6551
6552 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6553 if (memcmp(&clp->cl_addr, addr, addr_size) == 0)
6554 return clp;
6555 }
6556 return NULL;
6557 }
6558
6559 u64
nfsd_inject_print_clients(void)6560 nfsd_inject_print_clients(void)
6561 {
6562 struct nfs4_client *clp;
6563 u64 count = 0;
6564 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6565 nfsd_net_id);
6566 char buf[INET6_ADDRSTRLEN];
6567
6568 if (!nfsd_netns_ready(nn))
6569 return 0;
6570
6571 spin_lock(&nn->client_lock);
6572 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6573 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
6574 pr_info("NFS Client: %s\n", buf);
6575 ++count;
6576 }
6577 spin_unlock(&nn->client_lock);
6578
6579 return count;
6580 }
6581
6582 u64
nfsd_inject_forget_client(struct sockaddr_storage * addr,size_t addr_size)6583 nfsd_inject_forget_client(struct sockaddr_storage *addr, size_t addr_size)
6584 {
6585 u64 count = 0;
6586 struct nfs4_client *clp;
6587 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6588 nfsd_net_id);
6589
6590 if (!nfsd_netns_ready(nn))
6591 return count;
6592
6593 spin_lock(&nn->client_lock);
6594 clp = nfsd_find_client(addr, addr_size);
6595 if (clp) {
6596 if (mark_client_expired_locked(clp) == nfs_ok)
6597 ++count;
6598 else
6599 clp = NULL;
6600 }
6601 spin_unlock(&nn->client_lock);
6602
6603 if (clp)
6604 expire_client(clp);
6605
6606 return count;
6607 }
6608
6609 u64
nfsd_inject_forget_clients(u64 max)6610 nfsd_inject_forget_clients(u64 max)
6611 {
6612 u64 count = 0;
6613 struct nfs4_client *clp, *next;
6614 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6615 nfsd_net_id);
6616 LIST_HEAD(reaplist);
6617
6618 if (!nfsd_netns_ready(nn))
6619 return count;
6620
6621 spin_lock(&nn->client_lock);
6622 list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
6623 if (mark_client_expired_locked(clp) == nfs_ok) {
6624 list_add(&clp->cl_lru, &reaplist);
6625 if (max != 0 && ++count >= max)
6626 break;
6627 }
6628 }
6629 spin_unlock(&nn->client_lock);
6630
6631 list_for_each_entry_safe(clp, next, &reaplist, cl_lru)
6632 expire_client(clp);
6633
6634 return count;
6635 }
6636
nfsd_print_count(struct nfs4_client * clp,unsigned int count,const char * type)6637 static void nfsd_print_count(struct nfs4_client *clp, unsigned int count,
6638 const char *type)
6639 {
6640 char buf[INET6_ADDRSTRLEN];
6641 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
6642 printk(KERN_INFO "NFS Client: %s has %u %s\n", buf, count, type);
6643 }
6644
6645 static void
nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid * lst,struct list_head * collect)6646 nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid *lst,
6647 struct list_head *collect)
6648 {
6649 struct nfs4_client *clp = lst->st_stid.sc_client;
6650 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6651 nfsd_net_id);
6652
6653 if (!collect)
6654 return;
6655
6656 lockdep_assert_held(&nn->client_lock);
6657 atomic_inc(&clp->cl_refcount);
6658 list_add(&lst->st_locks, collect);
6659 }
6660
nfsd_foreach_client_lock(struct nfs4_client * clp,u64 max,struct list_head * collect,bool (* func)(struct nfs4_ol_stateid *))6661 static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max,
6662 struct list_head *collect,
6663 bool (*func)(struct nfs4_ol_stateid *))
6664 {
6665 struct nfs4_openowner *oop;
6666 struct nfs4_ol_stateid *stp, *st_next;
6667 struct nfs4_ol_stateid *lst, *lst_next;
6668 u64 count = 0;
6669
6670 spin_lock(&clp->cl_lock);
6671 list_for_each_entry(oop, &clp->cl_openowners, oo_perclient) {
6672 list_for_each_entry_safe(stp, st_next,
6673 &oop->oo_owner.so_stateids, st_perstateowner) {
6674 list_for_each_entry_safe(lst, lst_next,
6675 &stp->st_locks, st_locks) {
6676 if (func) {
6677 if (func(lst))
6678 nfsd_inject_add_lock_to_list(lst,
6679 collect);
6680 }
6681 ++count;
6682 /*
6683 * Despite the fact that these functions deal
6684 * with 64-bit integers for "count", we must
6685 * ensure that it doesn't blow up the
6686 * clp->cl_refcount. Throw a warning if we
6687 * start to approach INT_MAX here.
6688 */
6689 WARN_ON_ONCE(count == (INT_MAX / 2));
6690 if (count == max)
6691 goto out;
6692 }
6693 }
6694 }
6695 out:
6696 spin_unlock(&clp->cl_lock);
6697
6698 return count;
6699 }
6700
6701 static u64
nfsd_collect_client_locks(struct nfs4_client * clp,struct list_head * collect,u64 max)6702 nfsd_collect_client_locks(struct nfs4_client *clp, struct list_head *collect,
6703 u64 max)
6704 {
6705 return nfsd_foreach_client_lock(clp, max, collect, unhash_lock_stateid);
6706 }
6707
6708 static u64
nfsd_print_client_locks(struct nfs4_client * clp)6709 nfsd_print_client_locks(struct nfs4_client *clp)
6710 {
6711 u64 count = nfsd_foreach_client_lock(clp, 0, NULL, NULL);
6712 nfsd_print_count(clp, count, "locked files");
6713 return count;
6714 }
6715
6716 u64
nfsd_inject_print_locks(void)6717 nfsd_inject_print_locks(void)
6718 {
6719 struct nfs4_client *clp;
6720 u64 count = 0;
6721 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6722 nfsd_net_id);
6723
6724 if (!nfsd_netns_ready(nn))
6725 return 0;
6726
6727 spin_lock(&nn->client_lock);
6728 list_for_each_entry(clp, &nn->client_lru, cl_lru)
6729 count += nfsd_print_client_locks(clp);
6730 spin_unlock(&nn->client_lock);
6731
6732 return count;
6733 }
6734
6735 static void
nfsd_reap_locks(struct list_head * reaplist)6736 nfsd_reap_locks(struct list_head *reaplist)
6737 {
6738 struct nfs4_client *clp;
6739 struct nfs4_ol_stateid *stp, *next;
6740
6741 list_for_each_entry_safe(stp, next, reaplist, st_locks) {
6742 list_del_init(&stp->st_locks);
6743 clp = stp->st_stid.sc_client;
6744 nfs4_put_stid(&stp->st_stid);
6745 put_client(clp);
6746 }
6747 }
6748
6749 u64
nfsd_inject_forget_client_locks(struct sockaddr_storage * addr,size_t addr_size)6750 nfsd_inject_forget_client_locks(struct sockaddr_storage *addr, size_t addr_size)
6751 {
6752 unsigned int count = 0;
6753 struct nfs4_client *clp;
6754 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6755 nfsd_net_id);
6756 LIST_HEAD(reaplist);
6757
6758 if (!nfsd_netns_ready(nn))
6759 return count;
6760
6761 spin_lock(&nn->client_lock);
6762 clp = nfsd_find_client(addr, addr_size);
6763 if (clp)
6764 count = nfsd_collect_client_locks(clp, &reaplist, 0);
6765 spin_unlock(&nn->client_lock);
6766 nfsd_reap_locks(&reaplist);
6767 return count;
6768 }
6769
6770 u64
nfsd_inject_forget_locks(u64 max)6771 nfsd_inject_forget_locks(u64 max)
6772 {
6773 u64 count = 0;
6774 struct nfs4_client *clp;
6775 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6776 nfsd_net_id);
6777 LIST_HEAD(reaplist);
6778
6779 if (!nfsd_netns_ready(nn))
6780 return count;
6781
6782 spin_lock(&nn->client_lock);
6783 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6784 count += nfsd_collect_client_locks(clp, &reaplist, max - count);
6785 if (max != 0 && count >= max)
6786 break;
6787 }
6788 spin_unlock(&nn->client_lock);
6789 nfsd_reap_locks(&reaplist);
6790 return count;
6791 }
6792
6793 static u64
nfsd_foreach_client_openowner(struct nfs4_client * clp,u64 max,struct list_head * collect,void (* func)(struct nfs4_openowner *))6794 nfsd_foreach_client_openowner(struct nfs4_client *clp, u64 max,
6795 struct list_head *collect,
6796 void (*func)(struct nfs4_openowner *))
6797 {
6798 struct nfs4_openowner *oop, *next;
6799 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6800 nfsd_net_id);
6801 u64 count = 0;
6802
6803 lockdep_assert_held(&nn->client_lock);
6804
6805 spin_lock(&clp->cl_lock);
6806 list_for_each_entry_safe(oop, next, &clp->cl_openowners, oo_perclient) {
6807 if (func) {
6808 func(oop);
6809 if (collect) {
6810 atomic_inc(&clp->cl_refcount);
6811 list_add(&oop->oo_perclient, collect);
6812 }
6813 }
6814 ++count;
6815 /*
6816 * Despite the fact that these functions deal with
6817 * 64-bit integers for "count", we must ensure that
6818 * it doesn't blow up the clp->cl_refcount. Throw a
6819 * warning if we start to approach INT_MAX here.
6820 */
6821 WARN_ON_ONCE(count == (INT_MAX / 2));
6822 if (count == max)
6823 break;
6824 }
6825 spin_unlock(&clp->cl_lock);
6826
6827 return count;
6828 }
6829
6830 static u64
nfsd_print_client_openowners(struct nfs4_client * clp)6831 nfsd_print_client_openowners(struct nfs4_client *clp)
6832 {
6833 u64 count = nfsd_foreach_client_openowner(clp, 0, NULL, NULL);
6834
6835 nfsd_print_count(clp, count, "openowners");
6836 return count;
6837 }
6838
6839 static u64
nfsd_collect_client_openowners(struct nfs4_client * clp,struct list_head * collect,u64 max)6840 nfsd_collect_client_openowners(struct nfs4_client *clp,
6841 struct list_head *collect, u64 max)
6842 {
6843 return nfsd_foreach_client_openowner(clp, max, collect,
6844 unhash_openowner_locked);
6845 }
6846
6847 u64
nfsd_inject_print_openowners(void)6848 nfsd_inject_print_openowners(void)
6849 {
6850 struct nfs4_client *clp;
6851 u64 count = 0;
6852 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6853 nfsd_net_id);
6854
6855 if (!nfsd_netns_ready(nn))
6856 return 0;
6857
6858 spin_lock(&nn->client_lock);
6859 list_for_each_entry(clp, &nn->client_lru, cl_lru)
6860 count += nfsd_print_client_openowners(clp);
6861 spin_unlock(&nn->client_lock);
6862
6863 return count;
6864 }
6865
6866 static void
nfsd_reap_openowners(struct list_head * reaplist)6867 nfsd_reap_openowners(struct list_head *reaplist)
6868 {
6869 struct nfs4_client *clp;
6870 struct nfs4_openowner *oop, *next;
6871
6872 list_for_each_entry_safe(oop, next, reaplist, oo_perclient) {
6873 list_del_init(&oop->oo_perclient);
6874 clp = oop->oo_owner.so_client;
6875 release_openowner(oop);
6876 put_client(clp);
6877 }
6878 }
6879
6880 u64
nfsd_inject_forget_client_openowners(struct sockaddr_storage * addr,size_t addr_size)6881 nfsd_inject_forget_client_openowners(struct sockaddr_storage *addr,
6882 size_t addr_size)
6883 {
6884 unsigned int count = 0;
6885 struct nfs4_client *clp;
6886 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6887 nfsd_net_id);
6888 LIST_HEAD(reaplist);
6889
6890 if (!nfsd_netns_ready(nn))
6891 return count;
6892
6893 spin_lock(&nn->client_lock);
6894 clp = nfsd_find_client(addr, addr_size);
6895 if (clp)
6896 count = nfsd_collect_client_openowners(clp, &reaplist, 0);
6897 spin_unlock(&nn->client_lock);
6898 nfsd_reap_openowners(&reaplist);
6899 return count;
6900 }
6901
6902 u64
nfsd_inject_forget_openowners(u64 max)6903 nfsd_inject_forget_openowners(u64 max)
6904 {
6905 u64 count = 0;
6906 struct nfs4_client *clp;
6907 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6908 nfsd_net_id);
6909 LIST_HEAD(reaplist);
6910
6911 if (!nfsd_netns_ready(nn))
6912 return count;
6913
6914 spin_lock(&nn->client_lock);
6915 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6916 count += nfsd_collect_client_openowners(clp, &reaplist,
6917 max - count);
6918 if (max != 0 && count >= max)
6919 break;
6920 }
6921 spin_unlock(&nn->client_lock);
6922 nfsd_reap_openowners(&reaplist);
6923 return count;
6924 }
6925
nfsd_find_all_delegations(struct nfs4_client * clp,u64 max,struct list_head * victims)6926 static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max,
6927 struct list_head *victims)
6928 {
6929 struct nfs4_delegation *dp, *next;
6930 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6931 nfsd_net_id);
6932 u64 count = 0;
6933
6934 lockdep_assert_held(&nn->client_lock);
6935
6936 spin_lock(&state_lock);
6937 list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) {
6938 if (victims) {
6939 /*
6940 * It's not safe to mess with delegations that have a
6941 * non-zero dl_time. They might have already been broken
6942 * and could be processed by the laundromat outside of
6943 * the state_lock. Just leave them be.
6944 */
6945 if (dp->dl_time != 0)
6946 continue;
6947
6948 atomic_inc(&clp->cl_refcount);
6949 WARN_ON(!unhash_delegation_locked(dp));
6950 list_add(&dp->dl_recall_lru, victims);
6951 }
6952 ++count;
6953 /*
6954 * Despite the fact that these functions deal with
6955 * 64-bit integers for "count", we must ensure that
6956 * it doesn't blow up the clp->cl_refcount. Throw a
6957 * warning if we start to approach INT_MAX here.
6958 */
6959 WARN_ON_ONCE(count == (INT_MAX / 2));
6960 if (count == max)
6961 break;
6962 }
6963 spin_unlock(&state_lock);
6964 return count;
6965 }
6966
6967 static u64
nfsd_print_client_delegations(struct nfs4_client * clp)6968 nfsd_print_client_delegations(struct nfs4_client *clp)
6969 {
6970 u64 count = nfsd_find_all_delegations(clp, 0, NULL);
6971
6972 nfsd_print_count(clp, count, "delegations");
6973 return count;
6974 }
6975
6976 u64
nfsd_inject_print_delegations(void)6977 nfsd_inject_print_delegations(void)
6978 {
6979 struct nfs4_client *clp;
6980 u64 count = 0;
6981 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6982 nfsd_net_id);
6983
6984 if (!nfsd_netns_ready(nn))
6985 return 0;
6986
6987 spin_lock(&nn->client_lock);
6988 list_for_each_entry(clp, &nn->client_lru, cl_lru)
6989 count += nfsd_print_client_delegations(clp);
6990 spin_unlock(&nn->client_lock);
6991
6992 return count;
6993 }
6994
6995 static void
nfsd_forget_delegations(struct list_head * reaplist)6996 nfsd_forget_delegations(struct list_head *reaplist)
6997 {
6998 struct nfs4_client *clp;
6999 struct nfs4_delegation *dp, *next;
7000
7001 list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
7002 list_del_init(&dp->dl_recall_lru);
7003 clp = dp->dl_stid.sc_client;
7004 revoke_delegation(dp);
7005 put_client(clp);
7006 }
7007 }
7008
7009 u64
nfsd_inject_forget_client_delegations(struct sockaddr_storage * addr,size_t addr_size)7010 nfsd_inject_forget_client_delegations(struct sockaddr_storage *addr,
7011 size_t addr_size)
7012 {
7013 u64 count = 0;
7014 struct nfs4_client *clp;
7015 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7016 nfsd_net_id);
7017 LIST_HEAD(reaplist);
7018
7019 if (!nfsd_netns_ready(nn))
7020 return count;
7021
7022 spin_lock(&nn->client_lock);
7023 clp = nfsd_find_client(addr, addr_size);
7024 if (clp)
7025 count = nfsd_find_all_delegations(clp, 0, &reaplist);
7026 spin_unlock(&nn->client_lock);
7027
7028 nfsd_forget_delegations(&reaplist);
7029 return count;
7030 }
7031
7032 u64
nfsd_inject_forget_delegations(u64 max)7033 nfsd_inject_forget_delegations(u64 max)
7034 {
7035 u64 count = 0;
7036 struct nfs4_client *clp;
7037 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7038 nfsd_net_id);
7039 LIST_HEAD(reaplist);
7040
7041 if (!nfsd_netns_ready(nn))
7042 return count;
7043
7044 spin_lock(&nn->client_lock);
7045 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
7046 count += nfsd_find_all_delegations(clp, max - count, &reaplist);
7047 if (max != 0 && count >= max)
7048 break;
7049 }
7050 spin_unlock(&nn->client_lock);
7051 nfsd_forget_delegations(&reaplist);
7052 return count;
7053 }
7054
7055 static void
nfsd_recall_delegations(struct list_head * reaplist)7056 nfsd_recall_delegations(struct list_head *reaplist)
7057 {
7058 struct nfs4_client *clp;
7059 struct nfs4_delegation *dp, *next;
7060
7061 list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
7062 list_del_init(&dp->dl_recall_lru);
7063 clp = dp->dl_stid.sc_client;
7064 /*
7065 * We skipped all entries that had a zero dl_time before,
7066 * so we can now reset the dl_time back to 0. If a delegation
7067 * break comes in now, then it won't make any difference since
7068 * we're recalling it either way.
7069 */
7070 spin_lock(&state_lock);
7071 dp->dl_time = 0;
7072 spin_unlock(&state_lock);
7073 nfsd_break_one_deleg(dp);
7074 put_client(clp);
7075 }
7076 }
7077
7078 u64
nfsd_inject_recall_client_delegations(struct sockaddr_storage * addr,size_t addr_size)7079 nfsd_inject_recall_client_delegations(struct sockaddr_storage *addr,
7080 size_t addr_size)
7081 {
7082 u64 count = 0;
7083 struct nfs4_client *clp;
7084 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7085 nfsd_net_id);
7086 LIST_HEAD(reaplist);
7087
7088 if (!nfsd_netns_ready(nn))
7089 return count;
7090
7091 spin_lock(&nn->client_lock);
7092 clp = nfsd_find_client(addr, addr_size);
7093 if (clp)
7094 count = nfsd_find_all_delegations(clp, 0, &reaplist);
7095 spin_unlock(&nn->client_lock);
7096
7097 nfsd_recall_delegations(&reaplist);
7098 return count;
7099 }
7100
7101 u64
nfsd_inject_recall_delegations(u64 max)7102 nfsd_inject_recall_delegations(u64 max)
7103 {
7104 u64 count = 0;
7105 struct nfs4_client *clp, *next;
7106 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7107 nfsd_net_id);
7108 LIST_HEAD(reaplist);
7109
7110 if (!nfsd_netns_ready(nn))
7111 return count;
7112
7113 spin_lock(&nn->client_lock);
7114 list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
7115 count += nfsd_find_all_delegations(clp, max - count, &reaplist);
7116 if (max != 0 && ++count >= max)
7117 break;
7118 }
7119 spin_unlock(&nn->client_lock);
7120 nfsd_recall_delegations(&reaplist);
7121 return count;
7122 }
7123 #endif /* CONFIG_NFSD_FAULT_INJECTION */
7124
7125 /*
7126 * Since the lifetime of a delegation isn't limited to that of an open, a
7127 * client may quite reasonably hang on to a delegation as long as it has
7128 * the inode cached. This becomes an obvious problem the first time a
7129 * client's inode cache approaches the size of the server's total memory.
7130 *
7131 * For now we avoid this problem by imposing a hard limit on the number
7132 * of delegations, which varies according to the server's memory size.
7133 */
7134 static void
set_max_delegations(void)7135 set_max_delegations(void)
7136 {
7137 /*
7138 * Allow at most 4 delegations per megabyte of RAM. Quick
7139 * estimates suggest that in the worst case (where every delegation
7140 * is for a different inode), a delegation could take about 1.5K,
7141 * giving a worst case usage of about 6% of memory.
7142 */
7143 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
7144 }
7145
nfs4_state_create_net(struct net * net)7146 static int nfs4_state_create_net(struct net *net)
7147 {
7148 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7149 int i;
7150
7151 nn->conf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
7152 sizeof(struct list_head),
7153 GFP_KERNEL);
7154 if (!nn->conf_id_hashtbl)
7155 goto err;
7156 nn->unconf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
7157 sizeof(struct list_head),
7158 GFP_KERNEL);
7159 if (!nn->unconf_id_hashtbl)
7160 goto err_unconf_id;
7161 nn->sessionid_hashtbl = kmalloc_array(SESSION_HASH_SIZE,
7162 sizeof(struct list_head),
7163 GFP_KERNEL);
7164 if (!nn->sessionid_hashtbl)
7165 goto err_sessionid;
7166
7167 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7168 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
7169 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
7170 }
7171 for (i = 0; i < SESSION_HASH_SIZE; i++)
7172 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
7173 nn->conf_name_tree = RB_ROOT;
7174 nn->unconf_name_tree = RB_ROOT;
7175 nn->boot_time = get_seconds();
7176 nn->grace_ended = false;
7177 nn->nfsd4_manager.block_opens = true;
7178 INIT_LIST_HEAD(&nn->nfsd4_manager.list);
7179 INIT_LIST_HEAD(&nn->client_lru);
7180 INIT_LIST_HEAD(&nn->close_lru);
7181 INIT_LIST_HEAD(&nn->del_recall_lru);
7182 spin_lock_init(&nn->client_lock);
7183
7184 spin_lock_init(&nn->blocked_locks_lock);
7185 INIT_LIST_HEAD(&nn->blocked_locks_lru);
7186
7187 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
7188 get_net(net);
7189
7190 return 0;
7191
7192 err_sessionid:
7193 kfree(nn->unconf_id_hashtbl);
7194 err_unconf_id:
7195 kfree(nn->conf_id_hashtbl);
7196 err:
7197 return -ENOMEM;
7198 }
7199
7200 static void
nfs4_state_destroy_net(struct net * net)7201 nfs4_state_destroy_net(struct net *net)
7202 {
7203 int i;
7204 struct nfs4_client *clp = NULL;
7205 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7206
7207 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7208 while (!list_empty(&nn->conf_id_hashtbl[i])) {
7209 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
7210 destroy_client(clp);
7211 }
7212 }
7213
7214 WARN_ON(!list_empty(&nn->blocked_locks_lru));
7215
7216 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7217 while (!list_empty(&nn->unconf_id_hashtbl[i])) {
7218 clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
7219 destroy_client(clp);
7220 }
7221 }
7222
7223 kfree(nn->sessionid_hashtbl);
7224 kfree(nn->unconf_id_hashtbl);
7225 kfree(nn->conf_id_hashtbl);
7226 put_net(net);
7227 }
7228
7229 int
nfs4_state_start_net(struct net * net)7230 nfs4_state_start_net(struct net *net)
7231 {
7232 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7233 int ret;
7234
7235 ret = nfs4_state_create_net(net);
7236 if (ret)
7237 return ret;
7238 locks_start_grace(net, &nn->nfsd4_manager);
7239 nfsd4_client_tracking_init(net);
7240 printk(KERN_INFO "NFSD: starting %ld-second grace period (net %x)\n",
7241 nn->nfsd4_grace, net->ns.inum);
7242 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
7243 return 0;
7244 }
7245
7246 /* initialization to perform when the nfsd service is started: */
7247
7248 int
nfs4_state_start(void)7249 nfs4_state_start(void)
7250 {
7251 int ret;
7252
7253 laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
7254 if (laundry_wq == NULL) {
7255 ret = -ENOMEM;
7256 goto out;
7257 }
7258 ret = nfsd4_create_callback_queue();
7259 if (ret)
7260 goto out_free_laundry;
7261
7262 set_max_delegations();
7263 return 0;
7264
7265 out_free_laundry:
7266 destroy_workqueue(laundry_wq);
7267 out:
7268 return ret;
7269 }
7270
7271 void
nfs4_state_shutdown_net(struct net * net)7272 nfs4_state_shutdown_net(struct net *net)
7273 {
7274 struct nfs4_delegation *dp = NULL;
7275 struct list_head *pos, *next, reaplist;
7276 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7277
7278 cancel_delayed_work_sync(&nn->laundromat_work);
7279 locks_end_grace(&nn->nfsd4_manager);
7280
7281 INIT_LIST_HEAD(&reaplist);
7282 spin_lock(&state_lock);
7283 list_for_each_safe(pos, next, &nn->del_recall_lru) {
7284 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
7285 WARN_ON(!unhash_delegation_locked(dp));
7286 list_add(&dp->dl_recall_lru, &reaplist);
7287 }
7288 spin_unlock(&state_lock);
7289 list_for_each_safe(pos, next, &reaplist) {
7290 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
7291 list_del_init(&dp->dl_recall_lru);
7292 destroy_unhashed_deleg(dp);
7293 }
7294
7295 nfsd4_client_tracking_exit(net);
7296 nfs4_state_destroy_net(net);
7297 }
7298
7299 void
nfs4_state_shutdown(void)7300 nfs4_state_shutdown(void)
7301 {
7302 destroy_workqueue(laundry_wq);
7303 nfsd4_destroy_callback_queue();
7304 }
7305
7306 static void
get_stateid(struct nfsd4_compound_state * cstate,stateid_t * stateid)7307 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
7308 {
7309 if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid))
7310 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
7311 }
7312
7313 static void
put_stateid(struct nfsd4_compound_state * cstate,stateid_t * stateid)7314 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
7315 {
7316 if (cstate->minorversion) {
7317 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
7318 SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
7319 }
7320 }
7321
7322 void
clear_current_stateid(struct nfsd4_compound_state * cstate)7323 clear_current_stateid(struct nfsd4_compound_state *cstate)
7324 {
7325 CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
7326 }
7327
7328 /*
7329 * functions to set current state id
7330 */
7331 void
nfsd4_set_opendowngradestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7332 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate,
7333 union nfsd4_op_u *u)
7334 {
7335 put_stateid(cstate, &u->open_downgrade.od_stateid);
7336 }
7337
7338 void
nfsd4_set_openstateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7339 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate,
7340 union nfsd4_op_u *u)
7341 {
7342 put_stateid(cstate, &u->open.op_stateid);
7343 }
7344
7345 void
nfsd4_set_closestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7346 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate,
7347 union nfsd4_op_u *u)
7348 {
7349 put_stateid(cstate, &u->close.cl_stateid);
7350 }
7351
7352 void
nfsd4_set_lockstateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7353 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate,
7354 union nfsd4_op_u *u)
7355 {
7356 put_stateid(cstate, &u->lock.lk_resp_stateid);
7357 }
7358
7359 /*
7360 * functions to consume current state id
7361 */
7362
7363 void
nfsd4_get_opendowngradestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7364 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate,
7365 union nfsd4_op_u *u)
7366 {
7367 get_stateid(cstate, &u->open_downgrade.od_stateid);
7368 }
7369
7370 void
nfsd4_get_delegreturnstateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7371 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate,
7372 union nfsd4_op_u *u)
7373 {
7374 get_stateid(cstate, &u->delegreturn.dr_stateid);
7375 }
7376
7377 void
nfsd4_get_freestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7378 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate,
7379 union nfsd4_op_u *u)
7380 {
7381 get_stateid(cstate, &u->free_stateid.fr_stateid);
7382 }
7383
7384 void
nfsd4_get_setattrstateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7385 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate,
7386 union nfsd4_op_u *u)
7387 {
7388 get_stateid(cstate, &u->setattr.sa_stateid);
7389 }
7390
7391 void
nfsd4_get_closestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7392 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate,
7393 union nfsd4_op_u *u)
7394 {
7395 get_stateid(cstate, &u->close.cl_stateid);
7396 }
7397
7398 void
nfsd4_get_lockustateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7399 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate,
7400 union nfsd4_op_u *u)
7401 {
7402 get_stateid(cstate, &u->locku.lu_stateid);
7403 }
7404
7405 void
nfsd4_get_readstateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7406 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate,
7407 union nfsd4_op_u *u)
7408 {
7409 get_stateid(cstate, &u->read.rd_stateid);
7410 }
7411
7412 void
nfsd4_get_writestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7413 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
7414 union nfsd4_op_u *u)
7415 {
7416 get_stateid(cstate, &u->write.wr_stateid);
7417 }
7418