1 /*
2 * Copyright (c) 2001 The Regents of the University of Michigan.
3 * All rights reserved.
4 *
5 * Kendrick Smith <kmsmith@umich.edu>
6 * Andy Adamson <kandros@umich.edu>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35 #include <linux/file.h>
36 #include <linux/fs.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/ratelimit.h>
42 #include <linux/sunrpc/svcauth_gss.h>
43 #include <linux/sunrpc/addr.h>
44 #include <linux/jhash.h>
45 #include <linux/string_helpers.h>
46 #include <linux/fsnotify.h>
47 #include <linux/nfs_ssc.h>
48 #include "xdr4.h"
49 #include "xdr4cb.h"
50 #include "vfs.h"
51 #include "current_stateid.h"
52
53 #include "netns.h"
54 #include "pnfs.h"
55 #include "filecache.h"
56 #include "trace.h"
57
58 #define NFSDDBG_FACILITY NFSDDBG_PROC
59
60 #define all_ones {{~0,~0},~0}
61 static const stateid_t one_stateid = {
62 .si_generation = ~0,
63 .si_opaque = all_ones,
64 };
65 static const stateid_t zero_stateid = {
66 /* all fields zero */
67 };
68 static const stateid_t currentstateid = {
69 .si_generation = 1,
70 };
71 static const stateid_t close_stateid = {
72 .si_generation = 0xffffffffU,
73 };
74
75 static u64 current_sessionid = 1;
76
77 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
78 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
79 #define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t)))
80 #define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
81
82 /* forward declarations */
83 static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
84 static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
85 void nfsd4_end_grace(struct nfsd_net *nn);
86 static void _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps);
87
88 /* Locking: */
89
90 /*
91 * Currently used for the del_recall_lru and file hash table. In an
92 * effort to decrease the scope of the client_mutex, this spinlock may
93 * eventually cover more:
94 */
95 static DEFINE_SPINLOCK(state_lock);
96
97 enum nfsd4_st_mutex_lock_subclass {
98 OPEN_STATEID_MUTEX = 0,
99 LOCK_STATEID_MUTEX = 1,
100 };
101
102 /*
103 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
104 * the refcount on the open stateid to drop.
105 */
106 static DECLARE_WAIT_QUEUE_HEAD(close_wq);
107
108 /*
109 * A waitqueue where a writer to clients/#/ctl destroying a client can
110 * wait for cl_rpc_users to drop to 0 and then for the client to be
111 * unhashed.
112 */
113 static DECLARE_WAIT_QUEUE_HEAD(expiry_wq);
114
115 static struct kmem_cache *client_slab;
116 static struct kmem_cache *openowner_slab;
117 static struct kmem_cache *lockowner_slab;
118 static struct kmem_cache *file_slab;
119 static struct kmem_cache *stateid_slab;
120 static struct kmem_cache *deleg_slab;
121 static struct kmem_cache *odstate_slab;
122
123 static void free_session(struct nfsd4_session *);
124
125 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
126 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
127
128 static struct workqueue_struct *laundry_wq;
129
nfsd4_create_laundry_wq(void)130 int nfsd4_create_laundry_wq(void)
131 {
132 int rc = 0;
133
134 laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
135 if (laundry_wq == NULL)
136 rc = -ENOMEM;
137 return rc;
138 }
139
nfsd4_destroy_laundry_wq(void)140 void nfsd4_destroy_laundry_wq(void)
141 {
142 destroy_workqueue(laundry_wq);
143 }
144
is_session_dead(struct nfsd4_session * ses)145 static bool is_session_dead(struct nfsd4_session *ses)
146 {
147 return ses->se_flags & NFS4_SESSION_DEAD;
148 }
149
mark_session_dead_locked(struct nfsd4_session * ses,int ref_held_by_me)150 static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
151 {
152 if (atomic_read(&ses->se_ref) > ref_held_by_me)
153 return nfserr_jukebox;
154 ses->se_flags |= NFS4_SESSION_DEAD;
155 return nfs_ok;
156 }
157
is_client_expired(struct nfs4_client * clp)158 static bool is_client_expired(struct nfs4_client *clp)
159 {
160 return clp->cl_time == 0;
161 }
162
nfsd4_dec_courtesy_client_count(struct nfsd_net * nn,struct nfs4_client * clp)163 static void nfsd4_dec_courtesy_client_count(struct nfsd_net *nn,
164 struct nfs4_client *clp)
165 {
166 if (clp->cl_state != NFSD4_ACTIVE)
167 atomic_add_unless(&nn->nfsd_courtesy_clients, -1, 0);
168 }
169
get_client_locked(struct nfs4_client * clp)170 static __be32 get_client_locked(struct nfs4_client *clp)
171 {
172 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
173
174 lockdep_assert_held(&nn->client_lock);
175
176 if (is_client_expired(clp))
177 return nfserr_expired;
178 atomic_inc(&clp->cl_rpc_users);
179 nfsd4_dec_courtesy_client_count(nn, clp);
180 clp->cl_state = NFSD4_ACTIVE;
181 return nfs_ok;
182 }
183
184 /* must be called under the client_lock */
185 static inline void
renew_client_locked(struct nfs4_client * clp)186 renew_client_locked(struct nfs4_client *clp)
187 {
188 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
189
190 if (is_client_expired(clp)) {
191 WARN_ON(1);
192 printk("%s: client (clientid %08x/%08x) already expired\n",
193 __func__,
194 clp->cl_clientid.cl_boot,
195 clp->cl_clientid.cl_id);
196 return;
197 }
198
199 list_move_tail(&clp->cl_lru, &nn->client_lru);
200 clp->cl_time = ktime_get_boottime_seconds();
201 nfsd4_dec_courtesy_client_count(nn, clp);
202 clp->cl_state = NFSD4_ACTIVE;
203 }
204
put_client_renew_locked(struct nfs4_client * clp)205 static void put_client_renew_locked(struct nfs4_client *clp)
206 {
207 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
208
209 lockdep_assert_held(&nn->client_lock);
210
211 if (!atomic_dec_and_test(&clp->cl_rpc_users))
212 return;
213 if (!is_client_expired(clp))
214 renew_client_locked(clp);
215 else
216 wake_up_all(&expiry_wq);
217 }
218
put_client_renew(struct nfs4_client * clp)219 static void put_client_renew(struct nfs4_client *clp)
220 {
221 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
222
223 if (!atomic_dec_and_lock(&clp->cl_rpc_users, &nn->client_lock))
224 return;
225 if (!is_client_expired(clp))
226 renew_client_locked(clp);
227 else
228 wake_up_all(&expiry_wq);
229 spin_unlock(&nn->client_lock);
230 }
231
nfsd4_get_session_locked(struct nfsd4_session * ses)232 static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
233 {
234 __be32 status;
235
236 if (is_session_dead(ses))
237 return nfserr_badsession;
238 status = get_client_locked(ses->se_client);
239 if (status)
240 return status;
241 atomic_inc(&ses->se_ref);
242 return nfs_ok;
243 }
244
nfsd4_put_session_locked(struct nfsd4_session * ses)245 static void nfsd4_put_session_locked(struct nfsd4_session *ses)
246 {
247 struct nfs4_client *clp = ses->se_client;
248 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
249
250 lockdep_assert_held(&nn->client_lock);
251
252 if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
253 free_session(ses);
254 put_client_renew_locked(clp);
255 }
256
nfsd4_put_session(struct nfsd4_session * ses)257 static void nfsd4_put_session(struct nfsd4_session *ses)
258 {
259 struct nfs4_client *clp = ses->se_client;
260 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
261
262 spin_lock(&nn->client_lock);
263 nfsd4_put_session_locked(ses);
264 spin_unlock(&nn->client_lock);
265 }
266
267 static struct nfsd4_blocked_lock *
find_blocked_lock(struct nfs4_lockowner * lo,struct knfsd_fh * fh,struct nfsd_net * nn)268 find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
269 struct nfsd_net *nn)
270 {
271 struct nfsd4_blocked_lock *cur, *found = NULL;
272
273 spin_lock(&nn->blocked_locks_lock);
274 list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
275 if (fh_match(fh, &cur->nbl_fh)) {
276 list_del_init(&cur->nbl_list);
277 WARN_ON(list_empty(&cur->nbl_lru));
278 list_del_init(&cur->nbl_lru);
279 found = cur;
280 break;
281 }
282 }
283 spin_unlock(&nn->blocked_locks_lock);
284 if (found)
285 locks_delete_block(&found->nbl_lock);
286 return found;
287 }
288
289 static struct nfsd4_blocked_lock *
find_or_allocate_block(struct nfs4_lockowner * lo,struct knfsd_fh * fh,struct nfsd_net * nn)290 find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
291 struct nfsd_net *nn)
292 {
293 struct nfsd4_blocked_lock *nbl;
294
295 nbl = find_blocked_lock(lo, fh, nn);
296 if (!nbl) {
297 nbl= kmalloc(sizeof(*nbl), GFP_KERNEL);
298 if (nbl) {
299 INIT_LIST_HEAD(&nbl->nbl_list);
300 INIT_LIST_HEAD(&nbl->nbl_lru);
301 fh_copy_shallow(&nbl->nbl_fh, fh);
302 locks_init_lock(&nbl->nbl_lock);
303 kref_init(&nbl->nbl_kref);
304 nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
305 &nfsd4_cb_notify_lock_ops,
306 NFSPROC4_CLNT_CB_NOTIFY_LOCK);
307 }
308 }
309 return nbl;
310 }
311
312 static void
free_nbl(struct kref * kref)313 free_nbl(struct kref *kref)
314 {
315 struct nfsd4_blocked_lock *nbl;
316
317 nbl = container_of(kref, struct nfsd4_blocked_lock, nbl_kref);
318 kfree(nbl);
319 }
320
321 static void
free_blocked_lock(struct nfsd4_blocked_lock * nbl)322 free_blocked_lock(struct nfsd4_blocked_lock *nbl)
323 {
324 locks_delete_block(&nbl->nbl_lock);
325 locks_release_private(&nbl->nbl_lock);
326 kref_put(&nbl->nbl_kref, free_nbl);
327 }
328
329 static void
remove_blocked_locks(struct nfs4_lockowner * lo)330 remove_blocked_locks(struct nfs4_lockowner *lo)
331 {
332 struct nfs4_client *clp = lo->lo_owner.so_client;
333 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
334 struct nfsd4_blocked_lock *nbl;
335 LIST_HEAD(reaplist);
336
337 /* Dequeue all blocked locks */
338 spin_lock(&nn->blocked_locks_lock);
339 while (!list_empty(&lo->lo_blocked)) {
340 nbl = list_first_entry(&lo->lo_blocked,
341 struct nfsd4_blocked_lock,
342 nbl_list);
343 list_del_init(&nbl->nbl_list);
344 WARN_ON(list_empty(&nbl->nbl_lru));
345 list_move(&nbl->nbl_lru, &reaplist);
346 }
347 spin_unlock(&nn->blocked_locks_lock);
348
349 /* Now free them */
350 while (!list_empty(&reaplist)) {
351 nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock,
352 nbl_lru);
353 list_del_init(&nbl->nbl_lru);
354 free_blocked_lock(nbl);
355 }
356 }
357
358 static void
nfsd4_cb_notify_lock_prepare(struct nfsd4_callback * cb)359 nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb)
360 {
361 struct nfsd4_blocked_lock *nbl = container_of(cb,
362 struct nfsd4_blocked_lock, nbl_cb);
363 locks_delete_block(&nbl->nbl_lock);
364 }
365
366 static int
nfsd4_cb_notify_lock_done(struct nfsd4_callback * cb,struct rpc_task * task)367 nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
368 {
369 trace_nfsd_cb_notify_lock_done(&zero_stateid, task);
370
371 /*
372 * Since this is just an optimization, we don't try very hard if it
373 * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
374 * just quit trying on anything else.
375 */
376 switch (task->tk_status) {
377 case -NFS4ERR_DELAY:
378 rpc_delay(task, 1 * HZ);
379 return 0;
380 default:
381 return 1;
382 }
383 }
384
385 static void
nfsd4_cb_notify_lock_release(struct nfsd4_callback * cb)386 nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
387 {
388 struct nfsd4_blocked_lock *nbl = container_of(cb,
389 struct nfsd4_blocked_lock, nbl_cb);
390
391 free_blocked_lock(nbl);
392 }
393
394 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
395 .prepare = nfsd4_cb_notify_lock_prepare,
396 .done = nfsd4_cb_notify_lock_done,
397 .release = nfsd4_cb_notify_lock_release,
398 };
399
400 /*
401 * We store the NONE, READ, WRITE, and BOTH bits separately in the
402 * st_{access,deny}_bmap field of the stateid, in order to track not
403 * only what share bits are currently in force, but also what
404 * combinations of share bits previous opens have used. This allows us
405 * to enforce the recommendation in
406 * https://datatracker.ietf.org/doc/html/rfc7530#section-16.19.4 that
407 * the server return an error if the client attempt to downgrade to a
408 * combination of share bits not explicable by closing some of its
409 * previous opens.
410 *
411 * This enforcement is arguably incomplete, since we don't keep
412 * track of access/deny bit combinations; so, e.g., we allow:
413 *
414 * OPEN allow read, deny write
415 * OPEN allow both, deny none
416 * DOWNGRADE allow read, deny none
417 *
418 * which we should reject.
419 *
420 * But you could also argue that our current code is already overkill,
421 * since it only exists to return NFS4ERR_INVAL on incorrect client
422 * behavior.
423 */
424 static unsigned int
bmap_to_share_mode(unsigned long bmap)425 bmap_to_share_mode(unsigned long bmap)
426 {
427 int i;
428 unsigned int access = 0;
429
430 for (i = 1; i < 4; i++) {
431 if (test_bit(i, &bmap))
432 access |= i;
433 }
434 return access;
435 }
436
437 /* set share access for a given stateid */
438 static inline void
set_access(u32 access,struct nfs4_ol_stateid * stp)439 set_access(u32 access, struct nfs4_ol_stateid *stp)
440 {
441 unsigned char mask = 1 << access;
442
443 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
444 stp->st_access_bmap |= mask;
445 }
446
447 /* clear share access for a given stateid */
448 static inline void
clear_access(u32 access,struct nfs4_ol_stateid * stp)449 clear_access(u32 access, struct nfs4_ol_stateid *stp)
450 {
451 unsigned char mask = 1 << access;
452
453 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
454 stp->st_access_bmap &= ~mask;
455 }
456
457 /* test whether a given stateid has access */
458 static inline bool
test_access(u32 access,struct nfs4_ol_stateid * stp)459 test_access(u32 access, struct nfs4_ol_stateid *stp)
460 {
461 unsigned char mask = 1 << access;
462
463 return (bool)(stp->st_access_bmap & mask);
464 }
465
466 /* set share deny for a given stateid */
467 static inline void
set_deny(u32 deny,struct nfs4_ol_stateid * stp)468 set_deny(u32 deny, struct nfs4_ol_stateid *stp)
469 {
470 unsigned char mask = 1 << deny;
471
472 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
473 stp->st_deny_bmap |= mask;
474 }
475
476 /* clear share deny for a given stateid */
477 static inline void
clear_deny(u32 deny,struct nfs4_ol_stateid * stp)478 clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
479 {
480 unsigned char mask = 1 << deny;
481
482 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
483 stp->st_deny_bmap &= ~mask;
484 }
485
486 /* test whether a given stateid is denying specific access */
487 static inline bool
test_deny(u32 deny,struct nfs4_ol_stateid * stp)488 test_deny(u32 deny, struct nfs4_ol_stateid *stp)
489 {
490 unsigned char mask = 1 << deny;
491
492 return (bool)(stp->st_deny_bmap & mask);
493 }
494
nfs4_access_to_omode(u32 access)495 static int nfs4_access_to_omode(u32 access)
496 {
497 switch (access & NFS4_SHARE_ACCESS_BOTH) {
498 case NFS4_SHARE_ACCESS_READ:
499 return O_RDONLY;
500 case NFS4_SHARE_ACCESS_WRITE:
501 return O_WRONLY;
502 case NFS4_SHARE_ACCESS_BOTH:
503 return O_RDWR;
504 }
505 WARN_ON_ONCE(1);
506 return O_RDONLY;
507 }
508
509 static inline int
access_permit_read(struct nfs4_ol_stateid * stp)510 access_permit_read(struct nfs4_ol_stateid *stp)
511 {
512 return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
513 test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
514 test_access(NFS4_SHARE_ACCESS_WRITE, stp);
515 }
516
517 static inline int
access_permit_write(struct nfs4_ol_stateid * stp)518 access_permit_write(struct nfs4_ol_stateid *stp)
519 {
520 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
521 test_access(NFS4_SHARE_ACCESS_BOTH, stp);
522 }
523
524 static inline struct nfs4_stateowner *
nfs4_get_stateowner(struct nfs4_stateowner * sop)525 nfs4_get_stateowner(struct nfs4_stateowner *sop)
526 {
527 atomic_inc(&sop->so_count);
528 return sop;
529 }
530
531 static int
same_owner_str(struct nfs4_stateowner * sop,struct xdr_netobj * owner)532 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
533 {
534 return (sop->so_owner.len == owner->len) &&
535 0 == memcmp(sop->so_owner.data, owner->data, owner->len);
536 }
537
538 static struct nfs4_openowner *
find_openstateowner_str_locked(unsigned int hashval,struct nfsd4_open * open,struct nfs4_client * clp)539 find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
540 struct nfs4_client *clp)
541 {
542 struct nfs4_stateowner *so;
543
544 lockdep_assert_held(&clp->cl_lock);
545
546 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
547 so_strhash) {
548 if (!so->so_is_open_owner)
549 continue;
550 if (same_owner_str(so, &open->op_owner))
551 return openowner(nfs4_get_stateowner(so));
552 }
553 return NULL;
554 }
555
556 static struct nfs4_openowner *
find_openstateowner_str(unsigned int hashval,struct nfsd4_open * open,struct nfs4_client * clp)557 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
558 struct nfs4_client *clp)
559 {
560 struct nfs4_openowner *oo;
561
562 spin_lock(&clp->cl_lock);
563 oo = find_openstateowner_str_locked(hashval, open, clp);
564 spin_unlock(&clp->cl_lock);
565 return oo;
566 }
567
568 static inline u32
opaque_hashval(const void * ptr,int nbytes)569 opaque_hashval(const void *ptr, int nbytes)
570 {
571 unsigned char *cptr = (unsigned char *) ptr;
572
573 u32 x = 0;
574 while (nbytes--) {
575 x *= 37;
576 x += *cptr++;
577 }
578 return x;
579 }
580
nfsd4_free_file_rcu(struct rcu_head * rcu)581 static void nfsd4_free_file_rcu(struct rcu_head *rcu)
582 {
583 struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
584
585 kmem_cache_free(file_slab, fp);
586 }
587
588 void
put_nfs4_file(struct nfs4_file * fi)589 put_nfs4_file(struct nfs4_file *fi)
590 {
591 might_lock(&state_lock);
592
593 if (refcount_dec_and_lock(&fi->fi_ref, &state_lock)) {
594 hlist_del_rcu(&fi->fi_hash);
595 spin_unlock(&state_lock);
596 WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
597 WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
598 call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
599 }
600 }
601
602 static struct nfsd_file *
__nfs4_get_fd(struct nfs4_file * f,int oflag)603 __nfs4_get_fd(struct nfs4_file *f, int oflag)
604 {
605 if (f->fi_fds[oflag])
606 return nfsd_file_get(f->fi_fds[oflag]);
607 return NULL;
608 }
609
610 static struct nfsd_file *
find_writeable_file_locked(struct nfs4_file * f)611 find_writeable_file_locked(struct nfs4_file *f)
612 {
613 struct nfsd_file *ret;
614
615 lockdep_assert_held(&f->fi_lock);
616
617 ret = __nfs4_get_fd(f, O_WRONLY);
618 if (!ret)
619 ret = __nfs4_get_fd(f, O_RDWR);
620 return ret;
621 }
622
623 static struct nfsd_file *
find_writeable_file(struct nfs4_file * f)624 find_writeable_file(struct nfs4_file *f)
625 {
626 struct nfsd_file *ret;
627
628 spin_lock(&f->fi_lock);
629 ret = find_writeable_file_locked(f);
630 spin_unlock(&f->fi_lock);
631
632 return ret;
633 }
634
635 static struct nfsd_file *
find_readable_file_locked(struct nfs4_file * f)636 find_readable_file_locked(struct nfs4_file *f)
637 {
638 struct nfsd_file *ret;
639
640 lockdep_assert_held(&f->fi_lock);
641
642 ret = __nfs4_get_fd(f, O_RDONLY);
643 if (!ret)
644 ret = __nfs4_get_fd(f, O_RDWR);
645 return ret;
646 }
647
648 static struct nfsd_file *
find_readable_file(struct nfs4_file * f)649 find_readable_file(struct nfs4_file *f)
650 {
651 struct nfsd_file *ret;
652
653 spin_lock(&f->fi_lock);
654 ret = find_readable_file_locked(f);
655 spin_unlock(&f->fi_lock);
656
657 return ret;
658 }
659
660 struct nfsd_file *
find_any_file(struct nfs4_file * f)661 find_any_file(struct nfs4_file *f)
662 {
663 struct nfsd_file *ret;
664
665 if (!f)
666 return NULL;
667 spin_lock(&f->fi_lock);
668 ret = __nfs4_get_fd(f, O_RDWR);
669 if (!ret) {
670 ret = __nfs4_get_fd(f, O_WRONLY);
671 if (!ret)
672 ret = __nfs4_get_fd(f, O_RDONLY);
673 }
674 spin_unlock(&f->fi_lock);
675 return ret;
676 }
677
find_any_file_locked(struct nfs4_file * f)678 static struct nfsd_file *find_any_file_locked(struct nfs4_file *f)
679 {
680 lockdep_assert_held(&f->fi_lock);
681
682 if (f->fi_fds[O_RDWR])
683 return f->fi_fds[O_RDWR];
684 if (f->fi_fds[O_WRONLY])
685 return f->fi_fds[O_WRONLY];
686 if (f->fi_fds[O_RDONLY])
687 return f->fi_fds[O_RDONLY];
688 return NULL;
689 }
690
find_deleg_file_locked(struct nfs4_file * f)691 static struct nfsd_file *find_deleg_file_locked(struct nfs4_file *f)
692 {
693 lockdep_assert_held(&f->fi_lock);
694
695 if (f->fi_deleg_file)
696 return f->fi_deleg_file;
697 return NULL;
698 }
699
700 static atomic_long_t num_delegations;
701 unsigned long max_delegations;
702
703 /*
704 * Open owner state (share locks)
705 */
706
707 /* hash tables for lock and open owners */
708 #define OWNER_HASH_BITS 8
709 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
710 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
711
ownerstr_hashval(struct xdr_netobj * ownername)712 static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
713 {
714 unsigned int ret;
715
716 ret = opaque_hashval(ownername->data, ownername->len);
717 return ret & OWNER_HASH_MASK;
718 }
719
720 /* hash table for nfs4_file */
721 #define FILE_HASH_BITS 8
722 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
723
file_hashval(struct svc_fh * fh)724 static unsigned int file_hashval(struct svc_fh *fh)
725 {
726 struct inode *inode = d_inode(fh->fh_dentry);
727
728 /* XXX: why not (here & in file cache) use inode? */
729 return (unsigned int)hash_long(inode->i_ino, FILE_HASH_BITS);
730 }
731
732 static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
733
734 /*
735 * Check if courtesy clients have conflicting access and resolve it if possible
736 *
737 * access: is op_share_access if share_access is true.
738 * Check if access mode, op_share_access, would conflict with
739 * the current deny mode of the file 'fp'.
740 * access: is op_share_deny if share_access is false.
741 * Check if the deny mode, op_share_deny, would conflict with
742 * current access of the file 'fp'.
743 * stp: skip checking this entry.
744 * new_stp: normal open, not open upgrade.
745 *
746 * Function returns:
747 * false - access/deny mode conflict with normal client.
748 * true - no conflict or conflict with courtesy client(s) is resolved.
749 */
750 static bool
nfs4_resolve_deny_conflicts_locked(struct nfs4_file * fp,bool new_stp,struct nfs4_ol_stateid * stp,u32 access,bool share_access)751 nfs4_resolve_deny_conflicts_locked(struct nfs4_file *fp, bool new_stp,
752 struct nfs4_ol_stateid *stp, u32 access, bool share_access)
753 {
754 struct nfs4_ol_stateid *st;
755 bool resolvable = true;
756 unsigned char bmap;
757 struct nfsd_net *nn;
758 struct nfs4_client *clp;
759
760 lockdep_assert_held(&fp->fi_lock);
761 list_for_each_entry(st, &fp->fi_stateids, st_perfile) {
762 /* ignore lock stateid */
763 if (st->st_openstp)
764 continue;
765 if (st == stp && new_stp)
766 continue;
767 /* check file access against deny mode or vice versa */
768 bmap = share_access ? st->st_deny_bmap : st->st_access_bmap;
769 if (!(access & bmap_to_share_mode(bmap)))
770 continue;
771 clp = st->st_stid.sc_client;
772 if (try_to_expire_client(clp))
773 continue;
774 resolvable = false;
775 break;
776 }
777 if (resolvable) {
778 clp = stp->st_stid.sc_client;
779 nn = net_generic(clp->net, nfsd_net_id);
780 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
781 }
782 return resolvable;
783 }
784
785 static void
__nfs4_file_get_access(struct nfs4_file * fp,u32 access)786 __nfs4_file_get_access(struct nfs4_file *fp, u32 access)
787 {
788 lockdep_assert_held(&fp->fi_lock);
789
790 if (access & NFS4_SHARE_ACCESS_WRITE)
791 atomic_inc(&fp->fi_access[O_WRONLY]);
792 if (access & NFS4_SHARE_ACCESS_READ)
793 atomic_inc(&fp->fi_access[O_RDONLY]);
794 }
795
796 static __be32
nfs4_file_get_access(struct nfs4_file * fp,u32 access)797 nfs4_file_get_access(struct nfs4_file *fp, u32 access)
798 {
799 lockdep_assert_held(&fp->fi_lock);
800
801 /* Does this access mode make sense? */
802 if (access & ~NFS4_SHARE_ACCESS_BOTH)
803 return nfserr_inval;
804
805 /* Does it conflict with a deny mode already set? */
806 if ((access & fp->fi_share_deny) != 0)
807 return nfserr_share_denied;
808
809 __nfs4_file_get_access(fp, access);
810 return nfs_ok;
811 }
812
nfs4_file_check_deny(struct nfs4_file * fp,u32 deny)813 static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
814 {
815 /* Common case is that there is no deny mode. */
816 if (deny) {
817 /* Does this deny mode make sense? */
818 if (deny & ~NFS4_SHARE_DENY_BOTH)
819 return nfserr_inval;
820
821 if ((deny & NFS4_SHARE_DENY_READ) &&
822 atomic_read(&fp->fi_access[O_RDONLY]))
823 return nfserr_share_denied;
824
825 if ((deny & NFS4_SHARE_DENY_WRITE) &&
826 atomic_read(&fp->fi_access[O_WRONLY]))
827 return nfserr_share_denied;
828 }
829 return nfs_ok;
830 }
831
__nfs4_file_put_access(struct nfs4_file * fp,int oflag)832 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
833 {
834 might_lock(&fp->fi_lock);
835
836 if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
837 struct nfsd_file *f1 = NULL;
838 struct nfsd_file *f2 = NULL;
839
840 swap(f1, fp->fi_fds[oflag]);
841 if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
842 swap(f2, fp->fi_fds[O_RDWR]);
843 spin_unlock(&fp->fi_lock);
844 if (f1)
845 nfsd_file_put(f1);
846 if (f2)
847 nfsd_file_put(f2);
848 }
849 }
850
nfs4_file_put_access(struct nfs4_file * fp,u32 access)851 static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
852 {
853 WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
854
855 if (access & NFS4_SHARE_ACCESS_WRITE)
856 __nfs4_file_put_access(fp, O_WRONLY);
857 if (access & NFS4_SHARE_ACCESS_READ)
858 __nfs4_file_put_access(fp, O_RDONLY);
859 }
860
861 /*
862 * Allocate a new open/delegation state counter. This is needed for
863 * pNFS for proper return on close semantics.
864 *
865 * Note that we only allocate it for pNFS-enabled exports, otherwise
866 * all pointers to struct nfs4_clnt_odstate are always NULL.
867 */
868 static struct nfs4_clnt_odstate *
alloc_clnt_odstate(struct nfs4_client * clp)869 alloc_clnt_odstate(struct nfs4_client *clp)
870 {
871 struct nfs4_clnt_odstate *co;
872
873 co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
874 if (co) {
875 co->co_client = clp;
876 refcount_set(&co->co_odcount, 1);
877 }
878 return co;
879 }
880
881 static void
hash_clnt_odstate_locked(struct nfs4_clnt_odstate * co)882 hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
883 {
884 struct nfs4_file *fp = co->co_file;
885
886 lockdep_assert_held(&fp->fi_lock);
887 list_add(&co->co_perfile, &fp->fi_clnt_odstate);
888 }
889
890 static inline void
get_clnt_odstate(struct nfs4_clnt_odstate * co)891 get_clnt_odstate(struct nfs4_clnt_odstate *co)
892 {
893 if (co)
894 refcount_inc(&co->co_odcount);
895 }
896
897 static void
put_clnt_odstate(struct nfs4_clnt_odstate * co)898 put_clnt_odstate(struct nfs4_clnt_odstate *co)
899 {
900 struct nfs4_file *fp;
901
902 if (!co)
903 return;
904
905 fp = co->co_file;
906 if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
907 list_del(&co->co_perfile);
908 spin_unlock(&fp->fi_lock);
909
910 nfsd4_return_all_file_layouts(co->co_client, fp);
911 kmem_cache_free(odstate_slab, co);
912 }
913 }
914
915 static struct nfs4_clnt_odstate *
find_or_hash_clnt_odstate(struct nfs4_file * fp,struct nfs4_clnt_odstate * new)916 find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
917 {
918 struct nfs4_clnt_odstate *co;
919 struct nfs4_client *cl;
920
921 if (!new)
922 return NULL;
923
924 cl = new->co_client;
925
926 spin_lock(&fp->fi_lock);
927 list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
928 if (co->co_client == cl) {
929 get_clnt_odstate(co);
930 goto out;
931 }
932 }
933 co = new;
934 co->co_file = fp;
935 hash_clnt_odstate_locked(new);
936 out:
937 spin_unlock(&fp->fi_lock);
938 return co;
939 }
940
nfs4_alloc_stid(struct nfs4_client * cl,struct kmem_cache * slab,void (* sc_free)(struct nfs4_stid *))941 struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
942 void (*sc_free)(struct nfs4_stid *))
943 {
944 struct nfs4_stid *stid;
945 int new_id;
946
947 stid = kmem_cache_zalloc(slab, GFP_KERNEL);
948 if (!stid)
949 return NULL;
950
951 idr_preload(GFP_KERNEL);
952 spin_lock(&cl->cl_lock);
953 /* Reserving 0 for start of file in nfsdfs "states" file: */
954 new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 1, 0, GFP_NOWAIT);
955 spin_unlock(&cl->cl_lock);
956 idr_preload_end();
957 if (new_id < 0)
958 goto out_free;
959
960 stid->sc_free = sc_free;
961 stid->sc_client = cl;
962 stid->sc_stateid.si_opaque.so_id = new_id;
963 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
964 /* Will be incremented before return to client: */
965 refcount_set(&stid->sc_count, 1);
966 spin_lock_init(&stid->sc_lock);
967 INIT_LIST_HEAD(&stid->sc_cp_list);
968
969 /*
970 * It shouldn't be a problem to reuse an opaque stateid value.
971 * I don't think it is for 4.1. But with 4.0 I worry that, for
972 * example, a stray write retransmission could be accepted by
973 * the server when it should have been rejected. Therefore,
974 * adopt a trick from the sctp code to attempt to maximize the
975 * amount of time until an id is reused, by ensuring they always
976 * "increase" (mod INT_MAX):
977 */
978 return stid;
979 out_free:
980 kmem_cache_free(slab, stid);
981 return NULL;
982 }
983
984 /*
985 * Create a unique stateid_t to represent each COPY.
986 */
nfs4_init_cp_state(struct nfsd_net * nn,copy_stateid_t * stid,unsigned char cs_type)987 static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid,
988 unsigned char cs_type)
989 {
990 int new_id;
991
992 stid->cs_stid.si_opaque.so_clid.cl_boot = (u32)nn->boot_time;
993 stid->cs_stid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id;
994
995 idr_preload(GFP_KERNEL);
996 spin_lock(&nn->s2s_cp_lock);
997 new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, stid, 0, 0, GFP_NOWAIT);
998 stid->cs_stid.si_opaque.so_id = new_id;
999 stid->cs_stid.si_generation = 1;
1000 spin_unlock(&nn->s2s_cp_lock);
1001 idr_preload_end();
1002 if (new_id < 0)
1003 return 0;
1004 stid->cs_type = cs_type;
1005 return 1;
1006 }
1007
nfs4_init_copy_state(struct nfsd_net * nn,struct nfsd4_copy * copy)1008 int nfs4_init_copy_state(struct nfsd_net *nn, struct nfsd4_copy *copy)
1009 {
1010 return nfs4_init_cp_state(nn, ©->cp_stateid, NFS4_COPY_STID);
1011 }
1012
nfs4_alloc_init_cpntf_state(struct nfsd_net * nn,struct nfs4_stid * p_stid)1013 struct nfs4_cpntf_state *nfs4_alloc_init_cpntf_state(struct nfsd_net *nn,
1014 struct nfs4_stid *p_stid)
1015 {
1016 struct nfs4_cpntf_state *cps;
1017
1018 cps = kzalloc(sizeof(struct nfs4_cpntf_state), GFP_KERNEL);
1019 if (!cps)
1020 return NULL;
1021 cps->cpntf_time = ktime_get_boottime_seconds();
1022 refcount_set(&cps->cp_stateid.cs_count, 1);
1023 if (!nfs4_init_cp_state(nn, &cps->cp_stateid, NFS4_COPYNOTIFY_STID))
1024 goto out_free;
1025 spin_lock(&nn->s2s_cp_lock);
1026 list_add(&cps->cp_list, &p_stid->sc_cp_list);
1027 spin_unlock(&nn->s2s_cp_lock);
1028 return cps;
1029 out_free:
1030 kfree(cps);
1031 return NULL;
1032 }
1033
nfs4_free_copy_state(struct nfsd4_copy * copy)1034 void nfs4_free_copy_state(struct nfsd4_copy *copy)
1035 {
1036 struct nfsd_net *nn;
1037
1038 if (copy->cp_stateid.cs_type != NFS4_COPY_STID)
1039 return;
1040 nn = net_generic(copy->cp_clp->net, nfsd_net_id);
1041 spin_lock(&nn->s2s_cp_lock);
1042 idr_remove(&nn->s2s_cp_stateids,
1043 copy->cp_stateid.cs_stid.si_opaque.so_id);
1044 spin_unlock(&nn->s2s_cp_lock);
1045 }
1046
nfs4_free_cpntf_statelist(struct net * net,struct nfs4_stid * stid)1047 static void nfs4_free_cpntf_statelist(struct net *net, struct nfs4_stid *stid)
1048 {
1049 struct nfs4_cpntf_state *cps;
1050 struct nfsd_net *nn;
1051
1052 nn = net_generic(net, nfsd_net_id);
1053 spin_lock(&nn->s2s_cp_lock);
1054 while (!list_empty(&stid->sc_cp_list)) {
1055 cps = list_first_entry(&stid->sc_cp_list,
1056 struct nfs4_cpntf_state, cp_list);
1057 _free_cpntf_state_locked(nn, cps);
1058 }
1059 spin_unlock(&nn->s2s_cp_lock);
1060 }
1061
nfs4_alloc_open_stateid(struct nfs4_client * clp)1062 static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
1063 {
1064 struct nfs4_stid *stid;
1065
1066 stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
1067 if (!stid)
1068 return NULL;
1069
1070 return openlockstateid(stid);
1071 }
1072
nfs4_free_deleg(struct nfs4_stid * stid)1073 static void nfs4_free_deleg(struct nfs4_stid *stid)
1074 {
1075 struct nfs4_delegation *dp = delegstateid(stid);
1076
1077 WARN_ON_ONCE(!list_empty(&stid->sc_cp_list));
1078 WARN_ON_ONCE(!list_empty(&dp->dl_perfile));
1079 WARN_ON_ONCE(!list_empty(&dp->dl_perclnt));
1080 WARN_ON_ONCE(!list_empty(&dp->dl_recall_lru));
1081 kmem_cache_free(deleg_slab, stid);
1082 atomic_long_dec(&num_delegations);
1083 }
1084
1085 /*
1086 * When we recall a delegation, we should be careful not to hand it
1087 * out again straight away.
1088 * To ensure this we keep a pair of bloom filters ('new' and 'old')
1089 * in which the filehandles of recalled delegations are "stored".
1090 * If a filehandle appear in either filter, a delegation is blocked.
1091 * When a delegation is recalled, the filehandle is stored in the "new"
1092 * filter.
1093 * Every 30 seconds we swap the filters and clear the "new" one,
1094 * unless both are empty of course.
1095 *
1096 * Each filter is 256 bits. We hash the filehandle to 32bit and use the
1097 * low 3 bytes as hash-table indices.
1098 *
1099 * 'blocked_delegations_lock', which is always taken in block_delegations(),
1100 * is used to manage concurrent access. Testing does not need the lock
1101 * except when swapping the two filters.
1102 */
1103 static DEFINE_SPINLOCK(blocked_delegations_lock);
1104 static struct bloom_pair {
1105 int entries, old_entries;
1106 time64_t swap_time;
1107 int new; /* index into 'set' */
1108 DECLARE_BITMAP(set[2], 256);
1109 } blocked_delegations;
1110
delegation_blocked(struct knfsd_fh * fh)1111 static int delegation_blocked(struct knfsd_fh *fh)
1112 {
1113 u32 hash;
1114 struct bloom_pair *bd = &blocked_delegations;
1115
1116 if (bd->entries == 0)
1117 return 0;
1118 if (ktime_get_seconds() - bd->swap_time > 30) {
1119 spin_lock(&blocked_delegations_lock);
1120 if (ktime_get_seconds() - bd->swap_time > 30) {
1121 bd->entries -= bd->old_entries;
1122 bd->old_entries = bd->entries;
1123 memset(bd->set[bd->new], 0,
1124 sizeof(bd->set[0]));
1125 bd->new = 1-bd->new;
1126 bd->swap_time = ktime_get_seconds();
1127 }
1128 spin_unlock(&blocked_delegations_lock);
1129 }
1130 hash = jhash(&fh->fh_raw, fh->fh_size, 0);
1131 if (test_bit(hash&255, bd->set[0]) &&
1132 test_bit((hash>>8)&255, bd->set[0]) &&
1133 test_bit((hash>>16)&255, bd->set[0]))
1134 return 1;
1135
1136 if (test_bit(hash&255, bd->set[1]) &&
1137 test_bit((hash>>8)&255, bd->set[1]) &&
1138 test_bit((hash>>16)&255, bd->set[1]))
1139 return 1;
1140
1141 return 0;
1142 }
1143
block_delegations(struct knfsd_fh * fh)1144 static void block_delegations(struct knfsd_fh *fh)
1145 {
1146 u32 hash;
1147 struct bloom_pair *bd = &blocked_delegations;
1148
1149 hash = jhash(&fh->fh_raw, fh->fh_size, 0);
1150
1151 spin_lock(&blocked_delegations_lock);
1152 __set_bit(hash&255, bd->set[bd->new]);
1153 __set_bit((hash>>8)&255, bd->set[bd->new]);
1154 __set_bit((hash>>16)&255, bd->set[bd->new]);
1155 if (bd->entries == 0)
1156 bd->swap_time = ktime_get_seconds();
1157 bd->entries += 1;
1158 spin_unlock(&blocked_delegations_lock);
1159 }
1160
1161 static struct nfs4_delegation *
alloc_init_deleg(struct nfs4_client * clp,struct nfs4_file * fp,struct nfs4_clnt_odstate * odstate)1162 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
1163 struct nfs4_clnt_odstate *odstate)
1164 {
1165 struct nfs4_delegation *dp;
1166 long n;
1167
1168 dprintk("NFSD alloc_init_deleg\n");
1169 n = atomic_long_inc_return(&num_delegations);
1170 if (n < 0 || n > max_delegations)
1171 goto out_dec;
1172 if (delegation_blocked(&fp->fi_fhandle))
1173 goto out_dec;
1174 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
1175 if (dp == NULL)
1176 goto out_dec;
1177
1178 /*
1179 * delegation seqid's are never incremented. The 4.1 special
1180 * meaning of seqid 0 isn't meaningful, really, but let's avoid
1181 * 0 anyway just for consistency and use 1:
1182 */
1183 dp->dl_stid.sc_stateid.si_generation = 1;
1184 INIT_LIST_HEAD(&dp->dl_perfile);
1185 INIT_LIST_HEAD(&dp->dl_perclnt);
1186 INIT_LIST_HEAD(&dp->dl_recall_lru);
1187 dp->dl_clnt_odstate = odstate;
1188 get_clnt_odstate(odstate);
1189 dp->dl_type = NFS4_OPEN_DELEGATE_READ;
1190 dp->dl_retries = 1;
1191 dp->dl_recalled = false;
1192 nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
1193 &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
1194 get_nfs4_file(fp);
1195 dp->dl_stid.sc_file = fp;
1196 return dp;
1197 out_dec:
1198 atomic_long_dec(&num_delegations);
1199 return NULL;
1200 }
1201
1202 void
nfs4_put_stid(struct nfs4_stid * s)1203 nfs4_put_stid(struct nfs4_stid *s)
1204 {
1205 struct nfs4_file *fp = s->sc_file;
1206 struct nfs4_client *clp = s->sc_client;
1207
1208 might_lock(&clp->cl_lock);
1209
1210 if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
1211 wake_up_all(&close_wq);
1212 return;
1213 }
1214 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1215 nfs4_free_cpntf_statelist(clp->net, s);
1216 spin_unlock(&clp->cl_lock);
1217 s->sc_free(s);
1218 if (fp)
1219 put_nfs4_file(fp);
1220 }
1221
1222 void
nfs4_inc_and_copy_stateid(stateid_t * dst,struct nfs4_stid * stid)1223 nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
1224 {
1225 stateid_t *src = &stid->sc_stateid;
1226
1227 spin_lock(&stid->sc_lock);
1228 if (unlikely(++src->si_generation == 0))
1229 src->si_generation = 1;
1230 memcpy(dst, src, sizeof(*dst));
1231 spin_unlock(&stid->sc_lock);
1232 }
1233
put_deleg_file(struct nfs4_file * fp)1234 static void put_deleg_file(struct nfs4_file *fp)
1235 {
1236 struct nfsd_file *nf = NULL;
1237
1238 spin_lock(&fp->fi_lock);
1239 if (--fp->fi_delegees == 0)
1240 swap(nf, fp->fi_deleg_file);
1241 spin_unlock(&fp->fi_lock);
1242
1243 if (nf)
1244 nfsd_file_put(nf);
1245 }
1246
nfs4_unlock_deleg_lease(struct nfs4_delegation * dp)1247 static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp)
1248 {
1249 struct nfs4_file *fp = dp->dl_stid.sc_file;
1250 struct nfsd_file *nf = fp->fi_deleg_file;
1251
1252 WARN_ON_ONCE(!fp->fi_delegees);
1253
1254 vfs_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp);
1255 put_deleg_file(fp);
1256 }
1257
destroy_unhashed_deleg(struct nfs4_delegation * dp)1258 static void destroy_unhashed_deleg(struct nfs4_delegation *dp)
1259 {
1260 put_clnt_odstate(dp->dl_clnt_odstate);
1261 nfs4_unlock_deleg_lease(dp);
1262 nfs4_put_stid(&dp->dl_stid);
1263 }
1264
nfs4_unhash_stid(struct nfs4_stid * s)1265 void nfs4_unhash_stid(struct nfs4_stid *s)
1266 {
1267 s->sc_type = 0;
1268 }
1269
1270 /**
1271 * nfs4_delegation_exists - Discover if this delegation already exists
1272 * @clp: a pointer to the nfs4_client we're granting a delegation to
1273 * @fp: a pointer to the nfs4_file we're granting a delegation on
1274 *
1275 * Return:
1276 * On success: true iff an existing delegation is found
1277 */
1278
1279 static bool
nfs4_delegation_exists(struct nfs4_client * clp,struct nfs4_file * fp)1280 nfs4_delegation_exists(struct nfs4_client *clp, struct nfs4_file *fp)
1281 {
1282 struct nfs4_delegation *searchdp = NULL;
1283 struct nfs4_client *searchclp = NULL;
1284
1285 lockdep_assert_held(&state_lock);
1286 lockdep_assert_held(&fp->fi_lock);
1287
1288 list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
1289 searchclp = searchdp->dl_stid.sc_client;
1290 if (clp == searchclp) {
1291 return true;
1292 }
1293 }
1294 return false;
1295 }
1296
1297 /**
1298 * hash_delegation_locked - Add a delegation to the appropriate lists
1299 * @dp: a pointer to the nfs4_delegation we are adding.
1300 * @fp: a pointer to the nfs4_file we're granting a delegation on
1301 *
1302 * Return:
1303 * On success: NULL if the delegation was successfully hashed.
1304 *
1305 * On error: -EAGAIN if one was previously granted to this
1306 * nfs4_client for this nfs4_file. Delegation is not hashed.
1307 *
1308 */
1309
1310 static int
hash_delegation_locked(struct nfs4_delegation * dp,struct nfs4_file * fp)1311 hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
1312 {
1313 struct nfs4_client *clp = dp->dl_stid.sc_client;
1314
1315 lockdep_assert_held(&state_lock);
1316 lockdep_assert_held(&fp->fi_lock);
1317
1318 if (nfs4_delegation_exists(clp, fp))
1319 return -EAGAIN;
1320 refcount_inc(&dp->dl_stid.sc_count);
1321 dp->dl_stid.sc_type = NFS4_DELEG_STID;
1322 list_add(&dp->dl_perfile, &fp->fi_delegations);
1323 list_add(&dp->dl_perclnt, &clp->cl_delegations);
1324 return 0;
1325 }
1326
delegation_hashed(struct nfs4_delegation * dp)1327 static bool delegation_hashed(struct nfs4_delegation *dp)
1328 {
1329 return !(list_empty(&dp->dl_perfile));
1330 }
1331
1332 static bool
unhash_delegation_locked(struct nfs4_delegation * dp)1333 unhash_delegation_locked(struct nfs4_delegation *dp)
1334 {
1335 struct nfs4_file *fp = dp->dl_stid.sc_file;
1336
1337 lockdep_assert_held(&state_lock);
1338
1339 if (!delegation_hashed(dp))
1340 return false;
1341
1342 dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
1343 /* Ensure that deleg break won't try to requeue it */
1344 ++dp->dl_time;
1345 spin_lock(&fp->fi_lock);
1346 list_del_init(&dp->dl_perclnt);
1347 list_del_init(&dp->dl_recall_lru);
1348 list_del_init(&dp->dl_perfile);
1349 spin_unlock(&fp->fi_lock);
1350 return true;
1351 }
1352
destroy_delegation(struct nfs4_delegation * dp)1353 static void destroy_delegation(struct nfs4_delegation *dp)
1354 {
1355 bool unhashed;
1356
1357 spin_lock(&state_lock);
1358 unhashed = unhash_delegation_locked(dp);
1359 spin_unlock(&state_lock);
1360 if (unhashed)
1361 destroy_unhashed_deleg(dp);
1362 }
1363
revoke_delegation(struct nfs4_delegation * dp)1364 static void revoke_delegation(struct nfs4_delegation *dp)
1365 {
1366 struct nfs4_client *clp = dp->dl_stid.sc_client;
1367
1368 WARN_ON(!list_empty(&dp->dl_recall_lru));
1369
1370 if (clp->cl_minorversion) {
1371 spin_lock(&clp->cl_lock);
1372 dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
1373 refcount_inc(&dp->dl_stid.sc_count);
1374 list_add(&dp->dl_recall_lru, &clp->cl_revoked);
1375 spin_unlock(&clp->cl_lock);
1376 }
1377 destroy_unhashed_deleg(dp);
1378 }
1379
1380 /*
1381 * SETCLIENTID state
1382 */
1383
clientid_hashval(u32 id)1384 static unsigned int clientid_hashval(u32 id)
1385 {
1386 return id & CLIENT_HASH_MASK;
1387 }
1388
clientstr_hashval(struct xdr_netobj name)1389 static unsigned int clientstr_hashval(struct xdr_netobj name)
1390 {
1391 return opaque_hashval(name.data, 8) & CLIENT_HASH_MASK;
1392 }
1393
1394 /*
1395 * A stateid that had a deny mode associated with it is being released
1396 * or downgraded. Recalculate the deny mode on the file.
1397 */
1398 static void
recalculate_deny_mode(struct nfs4_file * fp)1399 recalculate_deny_mode(struct nfs4_file *fp)
1400 {
1401 struct nfs4_ol_stateid *stp;
1402
1403 spin_lock(&fp->fi_lock);
1404 fp->fi_share_deny = 0;
1405 list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
1406 fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
1407 spin_unlock(&fp->fi_lock);
1408 }
1409
1410 static void
reset_union_bmap_deny(u32 deny,struct nfs4_ol_stateid * stp)1411 reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
1412 {
1413 int i;
1414 bool change = false;
1415
1416 for (i = 1; i < 4; i++) {
1417 if ((i & deny) != i) {
1418 change = true;
1419 clear_deny(i, stp);
1420 }
1421 }
1422
1423 /* Recalculate per-file deny mode if there was a change */
1424 if (change)
1425 recalculate_deny_mode(stp->st_stid.sc_file);
1426 }
1427
1428 /* release all access and file references for a given stateid */
1429 static void
release_all_access(struct nfs4_ol_stateid * stp)1430 release_all_access(struct nfs4_ol_stateid *stp)
1431 {
1432 int i;
1433 struct nfs4_file *fp = stp->st_stid.sc_file;
1434
1435 if (fp && stp->st_deny_bmap != 0)
1436 recalculate_deny_mode(fp);
1437
1438 for (i = 1; i < 4; i++) {
1439 if (test_access(i, stp))
1440 nfs4_file_put_access(stp->st_stid.sc_file, i);
1441 clear_access(i, stp);
1442 }
1443 }
1444
nfs4_free_stateowner(struct nfs4_stateowner * sop)1445 static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop)
1446 {
1447 kfree(sop->so_owner.data);
1448 sop->so_ops->so_free(sop);
1449 }
1450
nfs4_put_stateowner(struct nfs4_stateowner * sop)1451 static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
1452 {
1453 struct nfs4_client *clp = sop->so_client;
1454
1455 might_lock(&clp->cl_lock);
1456
1457 if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
1458 return;
1459 sop->so_ops->so_unhash(sop);
1460 spin_unlock(&clp->cl_lock);
1461 nfs4_free_stateowner(sop);
1462 }
1463
1464 static bool
nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid * stp)1465 nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp)
1466 {
1467 return list_empty(&stp->st_perfile);
1468 }
1469
unhash_ol_stateid(struct nfs4_ol_stateid * stp)1470 static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
1471 {
1472 struct nfs4_file *fp = stp->st_stid.sc_file;
1473
1474 lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
1475
1476 if (list_empty(&stp->st_perfile))
1477 return false;
1478
1479 spin_lock(&fp->fi_lock);
1480 list_del_init(&stp->st_perfile);
1481 spin_unlock(&fp->fi_lock);
1482 list_del(&stp->st_perstateowner);
1483 return true;
1484 }
1485
nfs4_free_ol_stateid(struct nfs4_stid * stid)1486 static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
1487 {
1488 struct nfs4_ol_stateid *stp = openlockstateid(stid);
1489
1490 put_clnt_odstate(stp->st_clnt_odstate);
1491 release_all_access(stp);
1492 if (stp->st_stateowner)
1493 nfs4_put_stateowner(stp->st_stateowner);
1494 WARN_ON(!list_empty(&stid->sc_cp_list));
1495 kmem_cache_free(stateid_slab, stid);
1496 }
1497
nfs4_free_lock_stateid(struct nfs4_stid * stid)1498 static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
1499 {
1500 struct nfs4_ol_stateid *stp = openlockstateid(stid);
1501 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
1502 struct nfsd_file *nf;
1503
1504 nf = find_any_file(stp->st_stid.sc_file);
1505 if (nf) {
1506 get_file(nf->nf_file);
1507 filp_close(nf->nf_file, (fl_owner_t)lo);
1508 nfsd_file_put(nf);
1509 }
1510 nfs4_free_ol_stateid(stid);
1511 }
1512
1513 /*
1514 * Put the persistent reference to an already unhashed generic stateid, while
1515 * holding the cl_lock. If it's the last reference, then put it onto the
1516 * reaplist for later destruction.
1517 */
put_ol_stateid_locked(struct nfs4_ol_stateid * stp,struct list_head * reaplist)1518 static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
1519 struct list_head *reaplist)
1520 {
1521 struct nfs4_stid *s = &stp->st_stid;
1522 struct nfs4_client *clp = s->sc_client;
1523
1524 lockdep_assert_held(&clp->cl_lock);
1525
1526 WARN_ON_ONCE(!list_empty(&stp->st_locks));
1527
1528 if (!refcount_dec_and_test(&s->sc_count)) {
1529 wake_up_all(&close_wq);
1530 return;
1531 }
1532
1533 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1534 list_add(&stp->st_locks, reaplist);
1535 }
1536
unhash_lock_stateid(struct nfs4_ol_stateid * stp)1537 static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
1538 {
1539 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1540
1541 if (!unhash_ol_stateid(stp))
1542 return false;
1543 list_del_init(&stp->st_locks);
1544 nfs4_unhash_stid(&stp->st_stid);
1545 return true;
1546 }
1547
release_lock_stateid(struct nfs4_ol_stateid * stp)1548 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
1549 {
1550 struct nfs4_client *clp = stp->st_stid.sc_client;
1551 bool unhashed;
1552
1553 spin_lock(&clp->cl_lock);
1554 unhashed = unhash_lock_stateid(stp);
1555 spin_unlock(&clp->cl_lock);
1556 if (unhashed)
1557 nfs4_put_stid(&stp->st_stid);
1558 }
1559
unhash_lockowner_locked(struct nfs4_lockowner * lo)1560 static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
1561 {
1562 struct nfs4_client *clp = lo->lo_owner.so_client;
1563
1564 lockdep_assert_held(&clp->cl_lock);
1565
1566 list_del_init(&lo->lo_owner.so_strhash);
1567 }
1568
1569 /*
1570 * Free a list of generic stateids that were collected earlier after being
1571 * fully unhashed.
1572 */
1573 static void
free_ol_stateid_reaplist(struct list_head * reaplist)1574 free_ol_stateid_reaplist(struct list_head *reaplist)
1575 {
1576 struct nfs4_ol_stateid *stp;
1577 struct nfs4_file *fp;
1578
1579 might_sleep();
1580
1581 while (!list_empty(reaplist)) {
1582 stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
1583 st_locks);
1584 list_del(&stp->st_locks);
1585 fp = stp->st_stid.sc_file;
1586 stp->st_stid.sc_free(&stp->st_stid);
1587 if (fp)
1588 put_nfs4_file(fp);
1589 }
1590 }
1591
release_open_stateid_locks(struct nfs4_ol_stateid * open_stp,struct list_head * reaplist)1592 static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
1593 struct list_head *reaplist)
1594 {
1595 struct nfs4_ol_stateid *stp;
1596
1597 lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
1598
1599 while (!list_empty(&open_stp->st_locks)) {
1600 stp = list_entry(open_stp->st_locks.next,
1601 struct nfs4_ol_stateid, st_locks);
1602 WARN_ON(!unhash_lock_stateid(stp));
1603 put_ol_stateid_locked(stp, reaplist);
1604 }
1605 }
1606
unhash_open_stateid(struct nfs4_ol_stateid * stp,struct list_head * reaplist)1607 static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
1608 struct list_head *reaplist)
1609 {
1610 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1611
1612 if (!unhash_ol_stateid(stp))
1613 return false;
1614 release_open_stateid_locks(stp, reaplist);
1615 return true;
1616 }
1617
release_open_stateid(struct nfs4_ol_stateid * stp)1618 static void release_open_stateid(struct nfs4_ol_stateid *stp)
1619 {
1620 LIST_HEAD(reaplist);
1621
1622 spin_lock(&stp->st_stid.sc_client->cl_lock);
1623 if (unhash_open_stateid(stp, &reaplist))
1624 put_ol_stateid_locked(stp, &reaplist);
1625 spin_unlock(&stp->st_stid.sc_client->cl_lock);
1626 free_ol_stateid_reaplist(&reaplist);
1627 }
1628
unhash_openowner_locked(struct nfs4_openowner * oo)1629 static void unhash_openowner_locked(struct nfs4_openowner *oo)
1630 {
1631 struct nfs4_client *clp = oo->oo_owner.so_client;
1632
1633 lockdep_assert_held(&clp->cl_lock);
1634
1635 list_del_init(&oo->oo_owner.so_strhash);
1636 list_del_init(&oo->oo_perclient);
1637 }
1638
release_last_closed_stateid(struct nfs4_openowner * oo)1639 static void release_last_closed_stateid(struct nfs4_openowner *oo)
1640 {
1641 struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
1642 nfsd_net_id);
1643 struct nfs4_ol_stateid *s;
1644
1645 spin_lock(&nn->client_lock);
1646 s = oo->oo_last_closed_stid;
1647 if (s) {
1648 list_del_init(&oo->oo_close_lru);
1649 oo->oo_last_closed_stid = NULL;
1650 }
1651 spin_unlock(&nn->client_lock);
1652 if (s)
1653 nfs4_put_stid(&s->st_stid);
1654 }
1655
release_openowner(struct nfs4_openowner * oo)1656 static void release_openowner(struct nfs4_openowner *oo)
1657 {
1658 struct nfs4_ol_stateid *stp;
1659 struct nfs4_client *clp = oo->oo_owner.so_client;
1660 struct list_head reaplist;
1661
1662 INIT_LIST_HEAD(&reaplist);
1663
1664 spin_lock(&clp->cl_lock);
1665 unhash_openowner_locked(oo);
1666 while (!list_empty(&oo->oo_owner.so_stateids)) {
1667 stp = list_first_entry(&oo->oo_owner.so_stateids,
1668 struct nfs4_ol_stateid, st_perstateowner);
1669 if (unhash_open_stateid(stp, &reaplist))
1670 put_ol_stateid_locked(stp, &reaplist);
1671 }
1672 spin_unlock(&clp->cl_lock);
1673 free_ol_stateid_reaplist(&reaplist);
1674 release_last_closed_stateid(oo);
1675 nfs4_put_stateowner(&oo->oo_owner);
1676 }
1677
1678 static inline int
hash_sessionid(struct nfs4_sessionid * sessionid)1679 hash_sessionid(struct nfs4_sessionid *sessionid)
1680 {
1681 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
1682
1683 return sid->sequence % SESSION_HASH_SIZE;
1684 }
1685
1686 #ifdef CONFIG_SUNRPC_DEBUG
1687 static inline void
dump_sessionid(const char * fn,struct nfs4_sessionid * sessionid)1688 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1689 {
1690 u32 *ptr = (u32 *)(&sessionid->data[0]);
1691 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
1692 }
1693 #else
1694 static inline void
dump_sessionid(const char * fn,struct nfs4_sessionid * sessionid)1695 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1696 {
1697 }
1698 #endif
1699
1700 /*
1701 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1702 * won't be used for replay.
1703 */
nfsd4_bump_seqid(struct nfsd4_compound_state * cstate,__be32 nfserr)1704 void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
1705 {
1706 struct nfs4_stateowner *so = cstate->replay_owner;
1707
1708 if (nfserr == nfserr_replay_me)
1709 return;
1710
1711 if (!seqid_mutating_err(ntohl(nfserr))) {
1712 nfsd4_cstate_clear_replay(cstate);
1713 return;
1714 }
1715 if (!so)
1716 return;
1717 if (so->so_is_open_owner)
1718 release_last_closed_stateid(openowner(so));
1719 so->so_seqid++;
1720 return;
1721 }
1722
1723 static void
gen_sessionid(struct nfsd4_session * ses)1724 gen_sessionid(struct nfsd4_session *ses)
1725 {
1726 struct nfs4_client *clp = ses->se_client;
1727 struct nfsd4_sessionid *sid;
1728
1729 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
1730 sid->clientid = clp->cl_clientid;
1731 sid->sequence = current_sessionid++;
1732 sid->reserved = 0;
1733 }
1734
1735 /*
1736 * The protocol defines ca_maxresponssize_cached to include the size of
1737 * the rpc header, but all we need to cache is the data starting after
1738 * the end of the initial SEQUENCE operation--the rest we regenerate
1739 * each time. Therefore we can advertise a ca_maxresponssize_cached
1740 * value that is the number of bytes in our cache plus a few additional
1741 * bytes. In order to stay on the safe side, and not promise more than
1742 * we can cache, those additional bytes must be the minimum possible: 24
1743 * bytes of rpc header (xid through accept state, with AUTH_NULL
1744 * verifier), 12 for the compound header (with zero-length tag), and 44
1745 * for the SEQUENCE op response:
1746 */
1747 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
1748
1749 static void
free_session_slots(struct nfsd4_session * ses)1750 free_session_slots(struct nfsd4_session *ses)
1751 {
1752 int i;
1753
1754 for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
1755 free_svc_cred(&ses->se_slots[i]->sl_cred);
1756 kfree(ses->se_slots[i]);
1757 }
1758 }
1759
1760 /*
1761 * We don't actually need to cache the rpc and session headers, so we
1762 * can allocate a little less for each slot:
1763 */
slot_bytes(struct nfsd4_channel_attrs * ca)1764 static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
1765 {
1766 u32 size;
1767
1768 if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
1769 size = 0;
1770 else
1771 size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
1772 return size + sizeof(struct nfsd4_slot);
1773 }
1774
1775 /*
1776 * XXX: If we run out of reserved DRC memory we could (up to a point)
1777 * re-negotiate active sessions and reduce their slot usage to make
1778 * room for new connections. For now we just fail the create session.
1779 */
nfsd4_get_drc_mem(struct nfsd4_channel_attrs * ca,struct nfsd_net * nn)1780 static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
1781 {
1782 u32 slotsize = slot_bytes(ca);
1783 u32 num = ca->maxreqs;
1784 unsigned long avail, total_avail;
1785 unsigned int scale_factor;
1786
1787 spin_lock(&nfsd_drc_lock);
1788 if (nfsd_drc_max_mem > nfsd_drc_mem_used)
1789 total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used;
1790 else
1791 /* We have handed out more space than we chose in
1792 * set_max_drc() to allow. That isn't really a
1793 * problem as long as that doesn't make us think we
1794 * have lots more due to integer overflow.
1795 */
1796 total_avail = 0;
1797 avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail);
1798 /*
1799 * Never use more than a fraction of the remaining memory,
1800 * unless it's the only way to give this client a slot.
1801 * The chosen fraction is either 1/8 or 1/number of threads,
1802 * whichever is smaller. This ensures there are adequate
1803 * slots to support multiple clients per thread.
1804 * Give the client one slot even if that would require
1805 * over-allocation--it is better than failure.
1806 */
1807 scale_factor = max_t(unsigned int, 8, nn->nfsd_serv->sv_nrthreads);
1808
1809 avail = clamp_t(unsigned long, avail, slotsize,
1810 total_avail/scale_factor);
1811 num = min_t(int, num, avail / slotsize);
1812 num = max_t(int, num, 1);
1813 nfsd_drc_mem_used += num * slotsize;
1814 spin_unlock(&nfsd_drc_lock);
1815
1816 return num;
1817 }
1818
nfsd4_put_drc_mem(struct nfsd4_channel_attrs * ca)1819 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
1820 {
1821 int slotsize = slot_bytes(ca);
1822
1823 spin_lock(&nfsd_drc_lock);
1824 nfsd_drc_mem_used -= slotsize * ca->maxreqs;
1825 spin_unlock(&nfsd_drc_lock);
1826 }
1827
alloc_session(struct nfsd4_channel_attrs * fattrs,struct nfsd4_channel_attrs * battrs)1828 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
1829 struct nfsd4_channel_attrs *battrs)
1830 {
1831 int numslots = fattrs->maxreqs;
1832 int slotsize = slot_bytes(fattrs);
1833 struct nfsd4_session *new;
1834 int mem, i;
1835
1836 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
1837 + sizeof(struct nfsd4_session) > PAGE_SIZE);
1838 mem = numslots * sizeof(struct nfsd4_slot *);
1839
1840 new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
1841 if (!new)
1842 return NULL;
1843 /* allocate each struct nfsd4_slot and data cache in one piece */
1844 for (i = 0; i < numslots; i++) {
1845 new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
1846 if (!new->se_slots[i])
1847 goto out_free;
1848 }
1849
1850 memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
1851 memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
1852
1853 return new;
1854 out_free:
1855 while (i--)
1856 kfree(new->se_slots[i]);
1857 kfree(new);
1858 return NULL;
1859 }
1860
free_conn(struct nfsd4_conn * c)1861 static void free_conn(struct nfsd4_conn *c)
1862 {
1863 svc_xprt_put(c->cn_xprt);
1864 kfree(c);
1865 }
1866
nfsd4_conn_lost(struct svc_xpt_user * u)1867 static void nfsd4_conn_lost(struct svc_xpt_user *u)
1868 {
1869 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
1870 struct nfs4_client *clp = c->cn_session->se_client;
1871
1872 trace_nfsd_cb_lost(clp);
1873
1874 spin_lock(&clp->cl_lock);
1875 if (!list_empty(&c->cn_persession)) {
1876 list_del(&c->cn_persession);
1877 free_conn(c);
1878 }
1879 nfsd4_probe_callback(clp);
1880 spin_unlock(&clp->cl_lock);
1881 }
1882
alloc_conn(struct svc_rqst * rqstp,u32 flags)1883 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
1884 {
1885 struct nfsd4_conn *conn;
1886
1887 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
1888 if (!conn)
1889 return NULL;
1890 svc_xprt_get(rqstp->rq_xprt);
1891 conn->cn_xprt = rqstp->rq_xprt;
1892 conn->cn_flags = flags;
1893 INIT_LIST_HEAD(&conn->cn_xpt_user.list);
1894 return conn;
1895 }
1896
__nfsd4_hash_conn(struct nfsd4_conn * conn,struct nfsd4_session * ses)1897 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1898 {
1899 conn->cn_session = ses;
1900 list_add(&conn->cn_persession, &ses->se_conns);
1901 }
1902
nfsd4_hash_conn(struct nfsd4_conn * conn,struct nfsd4_session * ses)1903 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1904 {
1905 struct nfs4_client *clp = ses->se_client;
1906
1907 spin_lock(&clp->cl_lock);
1908 __nfsd4_hash_conn(conn, ses);
1909 spin_unlock(&clp->cl_lock);
1910 }
1911
nfsd4_register_conn(struct nfsd4_conn * conn)1912 static int nfsd4_register_conn(struct nfsd4_conn *conn)
1913 {
1914 conn->cn_xpt_user.callback = nfsd4_conn_lost;
1915 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
1916 }
1917
nfsd4_init_conn(struct svc_rqst * rqstp,struct nfsd4_conn * conn,struct nfsd4_session * ses)1918 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
1919 {
1920 int ret;
1921
1922 nfsd4_hash_conn(conn, ses);
1923 ret = nfsd4_register_conn(conn);
1924 if (ret)
1925 /* oops; xprt is already down: */
1926 nfsd4_conn_lost(&conn->cn_xpt_user);
1927 /* We may have gained or lost a callback channel: */
1928 nfsd4_probe_callback_sync(ses->se_client);
1929 }
1930
alloc_conn_from_crses(struct svc_rqst * rqstp,struct nfsd4_create_session * cses)1931 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
1932 {
1933 u32 dir = NFS4_CDFC4_FORE;
1934
1935 if (cses->flags & SESSION4_BACK_CHAN)
1936 dir |= NFS4_CDFC4_BACK;
1937 return alloc_conn(rqstp, dir);
1938 }
1939
1940 /* must be called under client_lock */
nfsd4_del_conns(struct nfsd4_session * s)1941 static void nfsd4_del_conns(struct nfsd4_session *s)
1942 {
1943 struct nfs4_client *clp = s->se_client;
1944 struct nfsd4_conn *c;
1945
1946 spin_lock(&clp->cl_lock);
1947 while (!list_empty(&s->se_conns)) {
1948 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
1949 list_del_init(&c->cn_persession);
1950 spin_unlock(&clp->cl_lock);
1951
1952 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
1953 free_conn(c);
1954
1955 spin_lock(&clp->cl_lock);
1956 }
1957 spin_unlock(&clp->cl_lock);
1958 }
1959
__free_session(struct nfsd4_session * ses)1960 static void __free_session(struct nfsd4_session *ses)
1961 {
1962 free_session_slots(ses);
1963 kfree(ses);
1964 }
1965
free_session(struct nfsd4_session * ses)1966 static void free_session(struct nfsd4_session *ses)
1967 {
1968 nfsd4_del_conns(ses);
1969 nfsd4_put_drc_mem(&ses->se_fchannel);
1970 __free_session(ses);
1971 }
1972
init_session(struct svc_rqst * rqstp,struct nfsd4_session * new,struct nfs4_client * clp,struct nfsd4_create_session * cses)1973 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
1974 {
1975 int idx;
1976 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1977
1978 new->se_client = clp;
1979 gen_sessionid(new);
1980
1981 INIT_LIST_HEAD(&new->se_conns);
1982
1983 new->se_cb_seq_nr = 1;
1984 new->se_flags = cses->flags;
1985 new->se_cb_prog = cses->callback_prog;
1986 new->se_cb_sec = cses->cb_sec;
1987 atomic_set(&new->se_ref, 0);
1988 idx = hash_sessionid(&new->se_sessionid);
1989 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
1990 spin_lock(&clp->cl_lock);
1991 list_add(&new->se_perclnt, &clp->cl_sessions);
1992 spin_unlock(&clp->cl_lock);
1993
1994 {
1995 struct sockaddr *sa = svc_addr(rqstp);
1996 /*
1997 * This is a little silly; with sessions there's no real
1998 * use for the callback address. Use the peer address
1999 * as a reasonable default for now, but consider fixing
2000 * the rpc client not to require an address in the
2001 * future:
2002 */
2003 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
2004 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
2005 }
2006 }
2007
2008 /* caller must hold client_lock */
2009 static struct nfsd4_session *
__find_in_sessionid_hashtbl(struct nfs4_sessionid * sessionid,struct net * net)2010 __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
2011 {
2012 struct nfsd4_session *elem;
2013 int idx;
2014 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2015
2016 lockdep_assert_held(&nn->client_lock);
2017
2018 dump_sessionid(__func__, sessionid);
2019 idx = hash_sessionid(sessionid);
2020 /* Search in the appropriate list */
2021 list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
2022 if (!memcmp(elem->se_sessionid.data, sessionid->data,
2023 NFS4_MAX_SESSIONID_LEN)) {
2024 return elem;
2025 }
2026 }
2027
2028 dprintk("%s: session not found\n", __func__);
2029 return NULL;
2030 }
2031
2032 static struct nfsd4_session *
find_in_sessionid_hashtbl(struct nfs4_sessionid * sessionid,struct net * net,__be32 * ret)2033 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
2034 __be32 *ret)
2035 {
2036 struct nfsd4_session *session;
2037 __be32 status = nfserr_badsession;
2038
2039 session = __find_in_sessionid_hashtbl(sessionid, net);
2040 if (!session)
2041 goto out;
2042 status = nfsd4_get_session_locked(session);
2043 if (status)
2044 session = NULL;
2045 out:
2046 *ret = status;
2047 return session;
2048 }
2049
2050 /* caller must hold client_lock */
2051 static void
unhash_session(struct nfsd4_session * ses)2052 unhash_session(struct nfsd4_session *ses)
2053 {
2054 struct nfs4_client *clp = ses->se_client;
2055 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2056
2057 lockdep_assert_held(&nn->client_lock);
2058
2059 list_del(&ses->se_hash);
2060 spin_lock(&ses->se_client->cl_lock);
2061 list_del(&ses->se_perclnt);
2062 spin_unlock(&ses->se_client->cl_lock);
2063 }
2064
2065 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
2066 static int
STALE_CLIENTID(clientid_t * clid,struct nfsd_net * nn)2067 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
2068 {
2069 /*
2070 * We're assuming the clid was not given out from a boot
2071 * precisely 2^32 (about 136 years) before this one. That seems
2072 * a safe assumption:
2073 */
2074 if (clid->cl_boot == (u32)nn->boot_time)
2075 return 0;
2076 trace_nfsd_clid_stale(clid);
2077 return 1;
2078 }
2079
2080 /*
2081 * XXX Should we use a slab cache ?
2082 * This type of memory management is somewhat inefficient, but we use it
2083 * anyway since SETCLIENTID is not a common operation.
2084 */
alloc_client(struct xdr_netobj name,struct nfsd_net * nn)2085 static struct nfs4_client *alloc_client(struct xdr_netobj name,
2086 struct nfsd_net *nn)
2087 {
2088 struct nfs4_client *clp;
2089 int i;
2090
2091 if (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients) {
2092 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
2093 return NULL;
2094 }
2095 clp = kmem_cache_zalloc(client_slab, GFP_KERNEL);
2096 if (clp == NULL)
2097 return NULL;
2098 xdr_netobj_dup(&clp->cl_name, &name, GFP_KERNEL);
2099 if (clp->cl_name.data == NULL)
2100 goto err_no_name;
2101 clp->cl_ownerstr_hashtbl = kmalloc_array(OWNER_HASH_SIZE,
2102 sizeof(struct list_head),
2103 GFP_KERNEL);
2104 if (!clp->cl_ownerstr_hashtbl)
2105 goto err_no_hashtbl;
2106 for (i = 0; i < OWNER_HASH_SIZE; i++)
2107 INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
2108 INIT_LIST_HEAD(&clp->cl_sessions);
2109 idr_init(&clp->cl_stateids);
2110 atomic_set(&clp->cl_rpc_users, 0);
2111 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
2112 clp->cl_state = NFSD4_ACTIVE;
2113 atomic_inc(&nn->nfs4_client_count);
2114 atomic_set(&clp->cl_delegs_in_recall, 0);
2115 INIT_LIST_HEAD(&clp->cl_idhash);
2116 INIT_LIST_HEAD(&clp->cl_openowners);
2117 INIT_LIST_HEAD(&clp->cl_delegations);
2118 INIT_LIST_HEAD(&clp->cl_lru);
2119 INIT_LIST_HEAD(&clp->cl_revoked);
2120 #ifdef CONFIG_NFSD_PNFS
2121 INIT_LIST_HEAD(&clp->cl_lo_states);
2122 #endif
2123 INIT_LIST_HEAD(&clp->async_copies);
2124 spin_lock_init(&clp->async_lock);
2125 spin_lock_init(&clp->cl_lock);
2126 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
2127 return clp;
2128 err_no_hashtbl:
2129 kfree(clp->cl_name.data);
2130 err_no_name:
2131 kmem_cache_free(client_slab, clp);
2132 return NULL;
2133 }
2134
__free_client(struct kref * k)2135 static void __free_client(struct kref *k)
2136 {
2137 struct nfsdfs_client *c = container_of(k, struct nfsdfs_client, cl_ref);
2138 struct nfs4_client *clp = container_of(c, struct nfs4_client, cl_nfsdfs);
2139
2140 free_svc_cred(&clp->cl_cred);
2141 kfree(clp->cl_ownerstr_hashtbl);
2142 kfree(clp->cl_name.data);
2143 kfree(clp->cl_nii_domain.data);
2144 kfree(clp->cl_nii_name.data);
2145 idr_destroy(&clp->cl_stateids);
2146 kmem_cache_free(client_slab, clp);
2147 }
2148
drop_client(struct nfs4_client * clp)2149 static void drop_client(struct nfs4_client *clp)
2150 {
2151 kref_put(&clp->cl_nfsdfs.cl_ref, __free_client);
2152 }
2153
2154 static void
free_client(struct nfs4_client * clp)2155 free_client(struct nfs4_client *clp)
2156 {
2157 while (!list_empty(&clp->cl_sessions)) {
2158 struct nfsd4_session *ses;
2159 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
2160 se_perclnt);
2161 list_del(&ses->se_perclnt);
2162 WARN_ON_ONCE(atomic_read(&ses->se_ref));
2163 free_session(ses);
2164 }
2165 rpc_destroy_wait_queue(&clp->cl_cb_waitq);
2166 if (clp->cl_nfsd_dentry) {
2167 nfsd_client_rmdir(clp->cl_nfsd_dentry);
2168 clp->cl_nfsd_dentry = NULL;
2169 wake_up_all(&expiry_wq);
2170 }
2171 drop_client(clp);
2172 }
2173
2174 /* must be called under the client_lock */
2175 static void
unhash_client_locked(struct nfs4_client * clp)2176 unhash_client_locked(struct nfs4_client *clp)
2177 {
2178 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2179 struct nfsd4_session *ses;
2180
2181 lockdep_assert_held(&nn->client_lock);
2182
2183 /* Mark the client as expired! */
2184 clp->cl_time = 0;
2185 /* Make it invisible */
2186 if (!list_empty(&clp->cl_idhash)) {
2187 list_del_init(&clp->cl_idhash);
2188 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
2189 rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
2190 else
2191 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
2192 }
2193 list_del_init(&clp->cl_lru);
2194 spin_lock(&clp->cl_lock);
2195 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
2196 list_del_init(&ses->se_hash);
2197 spin_unlock(&clp->cl_lock);
2198 }
2199
2200 static void
unhash_client(struct nfs4_client * clp)2201 unhash_client(struct nfs4_client *clp)
2202 {
2203 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2204
2205 spin_lock(&nn->client_lock);
2206 unhash_client_locked(clp);
2207 spin_unlock(&nn->client_lock);
2208 }
2209
mark_client_expired_locked(struct nfs4_client * clp)2210 static __be32 mark_client_expired_locked(struct nfs4_client *clp)
2211 {
2212 if (atomic_read(&clp->cl_rpc_users))
2213 return nfserr_jukebox;
2214 unhash_client_locked(clp);
2215 return nfs_ok;
2216 }
2217
2218 static void
__destroy_client(struct nfs4_client * clp)2219 __destroy_client(struct nfs4_client *clp)
2220 {
2221 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2222 int i;
2223 struct nfs4_openowner *oo;
2224 struct nfs4_delegation *dp;
2225 struct list_head reaplist;
2226
2227 INIT_LIST_HEAD(&reaplist);
2228 spin_lock(&state_lock);
2229 while (!list_empty(&clp->cl_delegations)) {
2230 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
2231 WARN_ON(!unhash_delegation_locked(dp));
2232 list_add(&dp->dl_recall_lru, &reaplist);
2233 }
2234 spin_unlock(&state_lock);
2235 while (!list_empty(&reaplist)) {
2236 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
2237 list_del_init(&dp->dl_recall_lru);
2238 destroy_unhashed_deleg(dp);
2239 }
2240 while (!list_empty(&clp->cl_revoked)) {
2241 dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
2242 list_del_init(&dp->dl_recall_lru);
2243 nfs4_put_stid(&dp->dl_stid);
2244 }
2245 while (!list_empty(&clp->cl_openowners)) {
2246 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
2247 nfs4_get_stateowner(&oo->oo_owner);
2248 release_openowner(oo);
2249 }
2250 for (i = 0; i < OWNER_HASH_SIZE; i++) {
2251 struct nfs4_stateowner *so, *tmp;
2252
2253 list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i],
2254 so_strhash) {
2255 /* Should be no openowners at this point */
2256 WARN_ON_ONCE(so->so_is_open_owner);
2257 remove_blocked_locks(lockowner(so));
2258 }
2259 }
2260 nfsd4_return_all_client_layouts(clp);
2261 nfsd4_shutdown_copy(clp);
2262 nfsd4_shutdown_callback(clp);
2263 if (clp->cl_cb_conn.cb_xprt)
2264 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
2265 atomic_add_unless(&nn->nfs4_client_count, -1, 0);
2266 nfsd4_dec_courtesy_client_count(nn, clp);
2267 free_client(clp);
2268 wake_up_all(&expiry_wq);
2269 }
2270
2271 static void
destroy_client(struct nfs4_client * clp)2272 destroy_client(struct nfs4_client *clp)
2273 {
2274 unhash_client(clp);
2275 __destroy_client(clp);
2276 }
2277
inc_reclaim_complete(struct nfs4_client * clp)2278 static void inc_reclaim_complete(struct nfs4_client *clp)
2279 {
2280 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2281
2282 if (!nn->track_reclaim_completes)
2283 return;
2284 if (!nfsd4_find_reclaim_client(clp->cl_name, nn))
2285 return;
2286 if (atomic_inc_return(&nn->nr_reclaim_complete) ==
2287 nn->reclaim_str_hashtbl_size) {
2288 printk(KERN_INFO "NFSD: all clients done reclaiming, ending NFSv4 grace period (net %x)\n",
2289 clp->net->ns.inum);
2290 nfsd4_end_grace(nn);
2291 }
2292 }
2293
expire_client(struct nfs4_client * clp)2294 static void expire_client(struct nfs4_client *clp)
2295 {
2296 unhash_client(clp);
2297 nfsd4_client_record_remove(clp);
2298 __destroy_client(clp);
2299 }
2300
copy_verf(struct nfs4_client * target,nfs4_verifier * source)2301 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
2302 {
2303 memcpy(target->cl_verifier.data, source->data,
2304 sizeof(target->cl_verifier.data));
2305 }
2306
copy_clid(struct nfs4_client * target,struct nfs4_client * source)2307 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
2308 {
2309 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
2310 target->cl_clientid.cl_id = source->cl_clientid.cl_id;
2311 }
2312
copy_cred(struct svc_cred * target,struct svc_cred * source)2313 static int copy_cred(struct svc_cred *target, struct svc_cred *source)
2314 {
2315 target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL);
2316 target->cr_raw_principal = kstrdup(source->cr_raw_principal,
2317 GFP_KERNEL);
2318 target->cr_targ_princ = kstrdup(source->cr_targ_princ, GFP_KERNEL);
2319 if ((source->cr_principal && !target->cr_principal) ||
2320 (source->cr_raw_principal && !target->cr_raw_principal) ||
2321 (source->cr_targ_princ && !target->cr_targ_princ))
2322 return -ENOMEM;
2323
2324 target->cr_flavor = source->cr_flavor;
2325 target->cr_uid = source->cr_uid;
2326 target->cr_gid = source->cr_gid;
2327 target->cr_group_info = source->cr_group_info;
2328 get_group_info(target->cr_group_info);
2329 target->cr_gss_mech = source->cr_gss_mech;
2330 if (source->cr_gss_mech)
2331 gss_mech_get(source->cr_gss_mech);
2332 return 0;
2333 }
2334
2335 static int
compare_blob(const struct xdr_netobj * o1,const struct xdr_netobj * o2)2336 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
2337 {
2338 if (o1->len < o2->len)
2339 return -1;
2340 if (o1->len > o2->len)
2341 return 1;
2342 return memcmp(o1->data, o2->data, o1->len);
2343 }
2344
2345 static int
same_verf(nfs4_verifier * v1,nfs4_verifier * v2)2346 same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
2347 {
2348 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
2349 }
2350
2351 static int
same_clid(clientid_t * cl1,clientid_t * cl2)2352 same_clid(clientid_t *cl1, clientid_t *cl2)
2353 {
2354 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
2355 }
2356
groups_equal(struct group_info * g1,struct group_info * g2)2357 static bool groups_equal(struct group_info *g1, struct group_info *g2)
2358 {
2359 int i;
2360
2361 if (g1->ngroups != g2->ngroups)
2362 return false;
2363 for (i=0; i<g1->ngroups; i++)
2364 if (!gid_eq(g1->gid[i], g2->gid[i]))
2365 return false;
2366 return true;
2367 }
2368
2369 /*
2370 * RFC 3530 language requires clid_inuse be returned when the
2371 * "principal" associated with a requests differs from that previously
2372 * used. We use uid, gid's, and gss principal string as our best
2373 * approximation. We also don't want to allow non-gss use of a client
2374 * established using gss: in theory cr_principal should catch that
2375 * change, but in practice cr_principal can be null even in the gss case
2376 * since gssd doesn't always pass down a principal string.
2377 */
is_gss_cred(struct svc_cred * cr)2378 static bool is_gss_cred(struct svc_cred *cr)
2379 {
2380 /* Is cr_flavor one of the gss "pseudoflavors"?: */
2381 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
2382 }
2383
2384
2385 static bool
same_creds(struct svc_cred * cr1,struct svc_cred * cr2)2386 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
2387 {
2388 if ((is_gss_cred(cr1) != is_gss_cred(cr2))
2389 || (!uid_eq(cr1->cr_uid, cr2->cr_uid))
2390 || (!gid_eq(cr1->cr_gid, cr2->cr_gid))
2391 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
2392 return false;
2393 /* XXX: check that cr_targ_princ fields match ? */
2394 if (cr1->cr_principal == cr2->cr_principal)
2395 return true;
2396 if (!cr1->cr_principal || !cr2->cr_principal)
2397 return false;
2398 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
2399 }
2400
svc_rqst_integrity_protected(struct svc_rqst * rqstp)2401 static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
2402 {
2403 struct svc_cred *cr = &rqstp->rq_cred;
2404 u32 service;
2405
2406 if (!cr->cr_gss_mech)
2407 return false;
2408 service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
2409 return service == RPC_GSS_SVC_INTEGRITY ||
2410 service == RPC_GSS_SVC_PRIVACY;
2411 }
2412
nfsd4_mach_creds_match(struct nfs4_client * cl,struct svc_rqst * rqstp)2413 bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
2414 {
2415 struct svc_cred *cr = &rqstp->rq_cred;
2416
2417 if (!cl->cl_mach_cred)
2418 return true;
2419 if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
2420 return false;
2421 if (!svc_rqst_integrity_protected(rqstp))
2422 return false;
2423 if (cl->cl_cred.cr_raw_principal)
2424 return 0 == strcmp(cl->cl_cred.cr_raw_principal,
2425 cr->cr_raw_principal);
2426 if (!cr->cr_principal)
2427 return false;
2428 return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
2429 }
2430
gen_confirm(struct nfs4_client * clp,struct nfsd_net * nn)2431 static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
2432 {
2433 __be32 verf[2];
2434
2435 /*
2436 * This is opaque to client, so no need to byte-swap. Use
2437 * __force to keep sparse happy
2438 */
2439 verf[0] = (__force __be32)(u32)ktime_get_real_seconds();
2440 verf[1] = (__force __be32)nn->clverifier_counter++;
2441 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
2442 }
2443
gen_clid(struct nfs4_client * clp,struct nfsd_net * nn)2444 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
2445 {
2446 clp->cl_clientid.cl_boot = (u32)nn->boot_time;
2447 clp->cl_clientid.cl_id = nn->clientid_counter++;
2448 gen_confirm(clp, nn);
2449 }
2450
2451 static struct nfs4_stid *
find_stateid_locked(struct nfs4_client * cl,stateid_t * t)2452 find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
2453 {
2454 struct nfs4_stid *ret;
2455
2456 ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
2457 if (!ret || !ret->sc_type)
2458 return NULL;
2459 return ret;
2460 }
2461
2462 static struct nfs4_stid *
find_stateid_by_type(struct nfs4_client * cl,stateid_t * t,char typemask)2463 find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
2464 {
2465 struct nfs4_stid *s;
2466
2467 spin_lock(&cl->cl_lock);
2468 s = find_stateid_locked(cl, t);
2469 if (s != NULL) {
2470 if (typemask & s->sc_type)
2471 refcount_inc(&s->sc_count);
2472 else
2473 s = NULL;
2474 }
2475 spin_unlock(&cl->cl_lock);
2476 return s;
2477 }
2478
get_nfsdfs_clp(struct inode * inode)2479 static struct nfs4_client *get_nfsdfs_clp(struct inode *inode)
2480 {
2481 struct nfsdfs_client *nc;
2482 nc = get_nfsdfs_client(inode);
2483 if (!nc)
2484 return NULL;
2485 return container_of(nc, struct nfs4_client, cl_nfsdfs);
2486 }
2487
seq_quote_mem(struct seq_file * m,char * data,int len)2488 static void seq_quote_mem(struct seq_file *m, char *data, int len)
2489 {
2490 seq_printf(m, "\"");
2491 seq_escape_mem(m, data, len, ESCAPE_HEX | ESCAPE_NAP | ESCAPE_APPEND, "\"\\");
2492 seq_printf(m, "\"");
2493 }
2494
cb_state2str(int state)2495 static const char *cb_state2str(int state)
2496 {
2497 switch (state) {
2498 case NFSD4_CB_UP:
2499 return "UP";
2500 case NFSD4_CB_UNKNOWN:
2501 return "UNKNOWN";
2502 case NFSD4_CB_DOWN:
2503 return "DOWN";
2504 case NFSD4_CB_FAULT:
2505 return "FAULT";
2506 }
2507 return "UNDEFINED";
2508 }
2509
client_info_show(struct seq_file * m,void * v)2510 static int client_info_show(struct seq_file *m, void *v)
2511 {
2512 struct inode *inode = file_inode(m->file);
2513 struct nfs4_client *clp;
2514 u64 clid;
2515
2516 clp = get_nfsdfs_clp(inode);
2517 if (!clp)
2518 return -ENXIO;
2519 memcpy(&clid, &clp->cl_clientid, sizeof(clid));
2520 seq_printf(m, "clientid: 0x%llx\n", clid);
2521 seq_printf(m, "address: \"%pISpc\"\n", (struct sockaddr *)&clp->cl_addr);
2522
2523 if (clp->cl_state == NFSD4_COURTESY)
2524 seq_puts(m, "status: courtesy\n");
2525 else if (clp->cl_state == NFSD4_EXPIRABLE)
2526 seq_puts(m, "status: expirable\n");
2527 else if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
2528 seq_puts(m, "status: confirmed\n");
2529 else
2530 seq_puts(m, "status: unconfirmed\n");
2531 seq_printf(m, "seconds from last renew: %lld\n",
2532 ktime_get_boottime_seconds() - clp->cl_time);
2533 seq_printf(m, "name: ");
2534 seq_quote_mem(m, clp->cl_name.data, clp->cl_name.len);
2535 seq_printf(m, "\nminor version: %d\n", clp->cl_minorversion);
2536 if (clp->cl_nii_domain.data) {
2537 seq_printf(m, "Implementation domain: ");
2538 seq_quote_mem(m, clp->cl_nii_domain.data,
2539 clp->cl_nii_domain.len);
2540 seq_printf(m, "\nImplementation name: ");
2541 seq_quote_mem(m, clp->cl_nii_name.data, clp->cl_nii_name.len);
2542 seq_printf(m, "\nImplementation time: [%lld, %ld]\n",
2543 clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec);
2544 }
2545 seq_printf(m, "callback state: %s\n", cb_state2str(clp->cl_cb_state));
2546 seq_printf(m, "callback address: %pISpc\n", &clp->cl_cb_conn.cb_addr);
2547 drop_client(clp);
2548
2549 return 0;
2550 }
2551
2552 DEFINE_SHOW_ATTRIBUTE(client_info);
2553
states_start(struct seq_file * s,loff_t * pos)2554 static void *states_start(struct seq_file *s, loff_t *pos)
2555 __acquires(&clp->cl_lock)
2556 {
2557 struct nfs4_client *clp = s->private;
2558 unsigned long id = *pos;
2559 void *ret;
2560
2561 spin_lock(&clp->cl_lock);
2562 ret = idr_get_next_ul(&clp->cl_stateids, &id);
2563 *pos = id;
2564 return ret;
2565 }
2566
states_next(struct seq_file * s,void * v,loff_t * pos)2567 static void *states_next(struct seq_file *s, void *v, loff_t *pos)
2568 {
2569 struct nfs4_client *clp = s->private;
2570 unsigned long id = *pos;
2571 void *ret;
2572
2573 id = *pos;
2574 id++;
2575 ret = idr_get_next_ul(&clp->cl_stateids, &id);
2576 *pos = id;
2577 return ret;
2578 }
2579
states_stop(struct seq_file * s,void * v)2580 static void states_stop(struct seq_file *s, void *v)
2581 __releases(&clp->cl_lock)
2582 {
2583 struct nfs4_client *clp = s->private;
2584
2585 spin_unlock(&clp->cl_lock);
2586 }
2587
nfs4_show_fname(struct seq_file * s,struct nfsd_file * f)2588 static void nfs4_show_fname(struct seq_file *s, struct nfsd_file *f)
2589 {
2590 seq_printf(s, "filename: \"%pD2\"", f->nf_file);
2591 }
2592
nfs4_show_superblock(struct seq_file * s,struct nfsd_file * f)2593 static void nfs4_show_superblock(struct seq_file *s, struct nfsd_file *f)
2594 {
2595 struct inode *inode = file_inode(f->nf_file);
2596
2597 seq_printf(s, "superblock: \"%02x:%02x:%ld\"",
2598 MAJOR(inode->i_sb->s_dev),
2599 MINOR(inode->i_sb->s_dev),
2600 inode->i_ino);
2601 }
2602
nfs4_show_owner(struct seq_file * s,struct nfs4_stateowner * oo)2603 static void nfs4_show_owner(struct seq_file *s, struct nfs4_stateowner *oo)
2604 {
2605 seq_printf(s, "owner: ");
2606 seq_quote_mem(s, oo->so_owner.data, oo->so_owner.len);
2607 }
2608
nfs4_show_stateid(struct seq_file * s,stateid_t * stid)2609 static void nfs4_show_stateid(struct seq_file *s, stateid_t *stid)
2610 {
2611 seq_printf(s, "0x%.8x", stid->si_generation);
2612 seq_printf(s, "%12phN", &stid->si_opaque);
2613 }
2614
nfs4_show_open(struct seq_file * s,struct nfs4_stid * st)2615 static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
2616 {
2617 struct nfs4_ol_stateid *ols;
2618 struct nfs4_file *nf;
2619 struct nfsd_file *file;
2620 struct nfs4_stateowner *oo;
2621 unsigned int access, deny;
2622
2623 if (st->sc_type != NFS4_OPEN_STID && st->sc_type != NFS4_LOCK_STID)
2624 return 0; /* XXX: or SEQ_SKIP? */
2625 ols = openlockstateid(st);
2626 oo = ols->st_stateowner;
2627 nf = st->sc_file;
2628
2629 spin_lock(&nf->fi_lock);
2630 file = find_any_file_locked(nf);
2631 if (!file)
2632 goto out;
2633
2634 seq_printf(s, "- ");
2635 nfs4_show_stateid(s, &st->sc_stateid);
2636 seq_printf(s, ": { type: open, ");
2637
2638 access = bmap_to_share_mode(ols->st_access_bmap);
2639 deny = bmap_to_share_mode(ols->st_deny_bmap);
2640
2641 seq_printf(s, "access: %s%s, ",
2642 access & NFS4_SHARE_ACCESS_READ ? "r" : "-",
2643 access & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
2644 seq_printf(s, "deny: %s%s, ",
2645 deny & NFS4_SHARE_ACCESS_READ ? "r" : "-",
2646 deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
2647
2648 nfs4_show_superblock(s, file);
2649 seq_printf(s, ", ");
2650 nfs4_show_fname(s, file);
2651 seq_printf(s, ", ");
2652 nfs4_show_owner(s, oo);
2653 seq_printf(s, " }\n");
2654 out:
2655 spin_unlock(&nf->fi_lock);
2656 return 0;
2657 }
2658
nfs4_show_lock(struct seq_file * s,struct nfs4_stid * st)2659 static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
2660 {
2661 struct nfs4_ol_stateid *ols;
2662 struct nfs4_file *nf;
2663 struct nfsd_file *file;
2664 struct nfs4_stateowner *oo;
2665
2666 ols = openlockstateid(st);
2667 oo = ols->st_stateowner;
2668 nf = st->sc_file;
2669 spin_lock(&nf->fi_lock);
2670 file = find_any_file_locked(nf);
2671 if (!file)
2672 goto out;
2673
2674 seq_printf(s, "- ");
2675 nfs4_show_stateid(s, &st->sc_stateid);
2676 seq_printf(s, ": { type: lock, ");
2677
2678 /*
2679 * Note: a lock stateid isn't really the same thing as a lock,
2680 * it's the locking state held by one owner on a file, and there
2681 * may be multiple (or no) lock ranges associated with it.
2682 * (Same for the matter is true of open stateids.)
2683 */
2684
2685 nfs4_show_superblock(s, file);
2686 /* XXX: open stateid? */
2687 seq_printf(s, ", ");
2688 nfs4_show_fname(s, file);
2689 seq_printf(s, ", ");
2690 nfs4_show_owner(s, oo);
2691 seq_printf(s, " }\n");
2692 out:
2693 spin_unlock(&nf->fi_lock);
2694 return 0;
2695 }
2696
nfs4_show_deleg(struct seq_file * s,struct nfs4_stid * st)2697 static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
2698 {
2699 struct nfs4_delegation *ds;
2700 struct nfs4_file *nf;
2701 struct nfsd_file *file;
2702
2703 ds = delegstateid(st);
2704 nf = st->sc_file;
2705 spin_lock(&nf->fi_lock);
2706 file = find_deleg_file_locked(nf);
2707 if (!file)
2708 goto out;
2709
2710 seq_printf(s, "- ");
2711 nfs4_show_stateid(s, &st->sc_stateid);
2712 seq_printf(s, ": { type: deleg, ");
2713
2714 /* Kinda dead code as long as we only support read delegs: */
2715 seq_printf(s, "access: %s, ",
2716 ds->dl_type == NFS4_OPEN_DELEGATE_READ ? "r" : "w");
2717
2718 /* XXX: lease time, whether it's being recalled. */
2719
2720 nfs4_show_superblock(s, file);
2721 seq_printf(s, ", ");
2722 nfs4_show_fname(s, file);
2723 seq_printf(s, " }\n");
2724 out:
2725 spin_unlock(&nf->fi_lock);
2726 return 0;
2727 }
2728
nfs4_show_layout(struct seq_file * s,struct nfs4_stid * st)2729 static int nfs4_show_layout(struct seq_file *s, struct nfs4_stid *st)
2730 {
2731 struct nfs4_layout_stateid *ls;
2732 struct nfsd_file *file;
2733
2734 ls = container_of(st, struct nfs4_layout_stateid, ls_stid);
2735 file = ls->ls_file;
2736
2737 seq_printf(s, "- ");
2738 nfs4_show_stateid(s, &st->sc_stateid);
2739 seq_printf(s, ": { type: layout, ");
2740
2741 /* XXX: What else would be useful? */
2742
2743 nfs4_show_superblock(s, file);
2744 seq_printf(s, ", ");
2745 nfs4_show_fname(s, file);
2746 seq_printf(s, " }\n");
2747
2748 return 0;
2749 }
2750
states_show(struct seq_file * s,void * v)2751 static int states_show(struct seq_file *s, void *v)
2752 {
2753 struct nfs4_stid *st = v;
2754
2755 switch (st->sc_type) {
2756 case NFS4_OPEN_STID:
2757 return nfs4_show_open(s, st);
2758 case NFS4_LOCK_STID:
2759 return nfs4_show_lock(s, st);
2760 case NFS4_DELEG_STID:
2761 return nfs4_show_deleg(s, st);
2762 case NFS4_LAYOUT_STID:
2763 return nfs4_show_layout(s, st);
2764 default:
2765 return 0; /* XXX: or SEQ_SKIP? */
2766 }
2767 /* XXX: copy stateids? */
2768 }
2769
2770 static struct seq_operations states_seq_ops = {
2771 .start = states_start,
2772 .next = states_next,
2773 .stop = states_stop,
2774 .show = states_show
2775 };
2776
client_states_open(struct inode * inode,struct file * file)2777 static int client_states_open(struct inode *inode, struct file *file)
2778 {
2779 struct seq_file *s;
2780 struct nfs4_client *clp;
2781 int ret;
2782
2783 clp = get_nfsdfs_clp(inode);
2784 if (!clp)
2785 return -ENXIO;
2786
2787 ret = seq_open(file, &states_seq_ops);
2788 if (ret)
2789 return ret;
2790 s = file->private_data;
2791 s->private = clp;
2792 return 0;
2793 }
2794
client_opens_release(struct inode * inode,struct file * file)2795 static int client_opens_release(struct inode *inode, struct file *file)
2796 {
2797 struct seq_file *m = file->private_data;
2798 struct nfs4_client *clp = m->private;
2799
2800 /* XXX: alternatively, we could get/drop in seq start/stop */
2801 drop_client(clp);
2802 return seq_release(inode, file);
2803 }
2804
2805 static const struct file_operations client_states_fops = {
2806 .open = client_states_open,
2807 .read = seq_read,
2808 .llseek = seq_lseek,
2809 .release = client_opens_release,
2810 };
2811
2812 /*
2813 * Normally we refuse to destroy clients that are in use, but here the
2814 * administrator is telling us to just do it. We also want to wait
2815 * so the caller has a guarantee that the client's locks are gone by
2816 * the time the write returns:
2817 */
force_expire_client(struct nfs4_client * clp)2818 static void force_expire_client(struct nfs4_client *clp)
2819 {
2820 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2821 bool already_expired;
2822
2823 trace_nfsd_clid_admin_expired(&clp->cl_clientid);
2824
2825 spin_lock(&nn->client_lock);
2826 clp->cl_time = 0;
2827 spin_unlock(&nn->client_lock);
2828
2829 wait_event(expiry_wq, atomic_read(&clp->cl_rpc_users) == 0);
2830 spin_lock(&nn->client_lock);
2831 already_expired = list_empty(&clp->cl_lru);
2832 if (!already_expired)
2833 unhash_client_locked(clp);
2834 spin_unlock(&nn->client_lock);
2835
2836 if (!already_expired)
2837 expire_client(clp);
2838 else
2839 wait_event(expiry_wq, clp->cl_nfsd_dentry == NULL);
2840 }
2841
client_ctl_write(struct file * file,const char __user * buf,size_t size,loff_t * pos)2842 static ssize_t client_ctl_write(struct file *file, const char __user *buf,
2843 size_t size, loff_t *pos)
2844 {
2845 char *data;
2846 struct nfs4_client *clp;
2847
2848 data = simple_transaction_get(file, buf, size);
2849 if (IS_ERR(data))
2850 return PTR_ERR(data);
2851 if (size != 7 || 0 != memcmp(data, "expire\n", 7))
2852 return -EINVAL;
2853 clp = get_nfsdfs_clp(file_inode(file));
2854 if (!clp)
2855 return -ENXIO;
2856 force_expire_client(clp);
2857 drop_client(clp);
2858 return 7;
2859 }
2860
2861 static const struct file_operations client_ctl_fops = {
2862 .write = client_ctl_write,
2863 .release = simple_transaction_release,
2864 };
2865
2866 static const struct tree_descr client_files[] = {
2867 [0] = {"info", &client_info_fops, S_IRUSR},
2868 [1] = {"states", &client_states_fops, S_IRUSR},
2869 [2] = {"ctl", &client_ctl_fops, S_IWUSR},
2870 [3] = {""},
2871 };
2872
create_client(struct xdr_netobj name,struct svc_rqst * rqstp,nfs4_verifier * verf)2873 static struct nfs4_client *create_client(struct xdr_netobj name,
2874 struct svc_rqst *rqstp, nfs4_verifier *verf)
2875 {
2876 struct nfs4_client *clp;
2877 struct sockaddr *sa = svc_addr(rqstp);
2878 int ret;
2879 struct net *net = SVC_NET(rqstp);
2880 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2881 struct dentry *dentries[ARRAY_SIZE(client_files)];
2882
2883 clp = alloc_client(name, nn);
2884 if (clp == NULL)
2885 return NULL;
2886
2887 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
2888 if (ret) {
2889 free_client(clp);
2890 return NULL;
2891 }
2892 gen_clid(clp, nn);
2893 kref_init(&clp->cl_nfsdfs.cl_ref);
2894 nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
2895 clp->cl_time = ktime_get_boottime_seconds();
2896 clear_bit(0, &clp->cl_cb_slot_busy);
2897 copy_verf(clp, verf);
2898 memcpy(&clp->cl_addr, sa, sizeof(struct sockaddr_storage));
2899 clp->cl_cb_session = NULL;
2900 clp->net = net;
2901 clp->cl_nfsd_dentry = nfsd_client_mkdir(
2902 nn, &clp->cl_nfsdfs,
2903 clp->cl_clientid.cl_id - nn->clientid_base,
2904 client_files, dentries);
2905 clp->cl_nfsd_info_dentry = dentries[0];
2906 if (!clp->cl_nfsd_dentry) {
2907 free_client(clp);
2908 return NULL;
2909 }
2910 return clp;
2911 }
2912
2913 static void
add_clp_to_name_tree(struct nfs4_client * new_clp,struct rb_root * root)2914 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
2915 {
2916 struct rb_node **new = &(root->rb_node), *parent = NULL;
2917 struct nfs4_client *clp;
2918
2919 while (*new) {
2920 clp = rb_entry(*new, struct nfs4_client, cl_namenode);
2921 parent = *new;
2922
2923 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
2924 new = &((*new)->rb_left);
2925 else
2926 new = &((*new)->rb_right);
2927 }
2928
2929 rb_link_node(&new_clp->cl_namenode, parent, new);
2930 rb_insert_color(&new_clp->cl_namenode, root);
2931 }
2932
2933 static struct nfs4_client *
find_clp_in_name_tree(struct xdr_netobj * name,struct rb_root * root)2934 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
2935 {
2936 int cmp;
2937 struct rb_node *node = root->rb_node;
2938 struct nfs4_client *clp;
2939
2940 while (node) {
2941 clp = rb_entry(node, struct nfs4_client, cl_namenode);
2942 cmp = compare_blob(&clp->cl_name, name);
2943 if (cmp > 0)
2944 node = node->rb_left;
2945 else if (cmp < 0)
2946 node = node->rb_right;
2947 else
2948 return clp;
2949 }
2950 return NULL;
2951 }
2952
2953 static void
add_to_unconfirmed(struct nfs4_client * clp)2954 add_to_unconfirmed(struct nfs4_client *clp)
2955 {
2956 unsigned int idhashval;
2957 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2958
2959 lockdep_assert_held(&nn->client_lock);
2960
2961 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2962 add_clp_to_name_tree(clp, &nn->unconf_name_tree);
2963 idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2964 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
2965 renew_client_locked(clp);
2966 }
2967
2968 static void
move_to_confirmed(struct nfs4_client * clp)2969 move_to_confirmed(struct nfs4_client *clp)
2970 {
2971 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2972 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2973
2974 lockdep_assert_held(&nn->client_lock);
2975
2976 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
2977 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
2978 add_clp_to_name_tree(clp, &nn->conf_name_tree);
2979 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2980 trace_nfsd_clid_confirmed(&clp->cl_clientid);
2981 renew_client_locked(clp);
2982 }
2983
2984 static struct nfs4_client *
find_client_in_id_table(struct list_head * tbl,clientid_t * clid,bool sessions)2985 find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
2986 {
2987 struct nfs4_client *clp;
2988 unsigned int idhashval = clientid_hashval(clid->cl_id);
2989
2990 list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
2991 if (same_clid(&clp->cl_clientid, clid)) {
2992 if ((bool)clp->cl_minorversion != sessions)
2993 return NULL;
2994 renew_client_locked(clp);
2995 return clp;
2996 }
2997 }
2998 return NULL;
2999 }
3000
3001 static struct nfs4_client *
find_confirmed_client(clientid_t * clid,bool sessions,struct nfsd_net * nn)3002 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
3003 {
3004 struct list_head *tbl = nn->conf_id_hashtbl;
3005
3006 lockdep_assert_held(&nn->client_lock);
3007 return find_client_in_id_table(tbl, clid, sessions);
3008 }
3009
3010 static struct nfs4_client *
find_unconfirmed_client(clientid_t * clid,bool sessions,struct nfsd_net * nn)3011 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
3012 {
3013 struct list_head *tbl = nn->unconf_id_hashtbl;
3014
3015 lockdep_assert_held(&nn->client_lock);
3016 return find_client_in_id_table(tbl, clid, sessions);
3017 }
3018
clp_used_exchangeid(struct nfs4_client * clp)3019 static bool clp_used_exchangeid(struct nfs4_client *clp)
3020 {
3021 return clp->cl_exchange_flags != 0;
3022 }
3023
3024 static struct nfs4_client *
find_confirmed_client_by_name(struct xdr_netobj * name,struct nfsd_net * nn)3025 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
3026 {
3027 lockdep_assert_held(&nn->client_lock);
3028 return find_clp_in_name_tree(name, &nn->conf_name_tree);
3029 }
3030
3031 static struct nfs4_client *
find_unconfirmed_client_by_name(struct xdr_netobj * name,struct nfsd_net * nn)3032 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
3033 {
3034 lockdep_assert_held(&nn->client_lock);
3035 return find_clp_in_name_tree(name, &nn->unconf_name_tree);
3036 }
3037
3038 static void
gen_callback(struct nfs4_client * clp,struct nfsd4_setclientid * se,struct svc_rqst * rqstp)3039 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
3040 {
3041 struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
3042 struct sockaddr *sa = svc_addr(rqstp);
3043 u32 scopeid = rpc_get_scope_id(sa);
3044 unsigned short expected_family;
3045
3046 /* Currently, we only support tcp and tcp6 for the callback channel */
3047 if (se->se_callback_netid_len == 3 &&
3048 !memcmp(se->se_callback_netid_val, "tcp", 3))
3049 expected_family = AF_INET;
3050 else if (se->se_callback_netid_len == 4 &&
3051 !memcmp(se->se_callback_netid_val, "tcp6", 4))
3052 expected_family = AF_INET6;
3053 else
3054 goto out_err;
3055
3056 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
3057 se->se_callback_addr_len,
3058 (struct sockaddr *)&conn->cb_addr,
3059 sizeof(conn->cb_addr));
3060
3061 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
3062 goto out_err;
3063
3064 if (conn->cb_addr.ss_family == AF_INET6)
3065 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
3066
3067 conn->cb_prog = se->se_callback_prog;
3068 conn->cb_ident = se->se_callback_ident;
3069 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
3070 trace_nfsd_cb_args(clp, conn);
3071 return;
3072 out_err:
3073 conn->cb_addr.ss_family = AF_UNSPEC;
3074 conn->cb_addrlen = 0;
3075 trace_nfsd_cb_nodelegs(clp);
3076 return;
3077 }
3078
3079 /*
3080 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
3081 */
3082 static void
nfsd4_store_cache_entry(struct nfsd4_compoundres * resp)3083 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
3084 {
3085 struct xdr_buf *buf = resp->xdr->buf;
3086 struct nfsd4_slot *slot = resp->cstate.slot;
3087 unsigned int base;
3088
3089 dprintk("--> %s slot %p\n", __func__, slot);
3090
3091 slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
3092 slot->sl_opcnt = resp->opcnt;
3093 slot->sl_status = resp->cstate.status;
3094 free_svc_cred(&slot->sl_cred);
3095 copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred);
3096
3097 if (!nfsd4_cache_this(resp)) {
3098 slot->sl_flags &= ~NFSD4_SLOT_CACHED;
3099 return;
3100 }
3101 slot->sl_flags |= NFSD4_SLOT_CACHED;
3102
3103 base = resp->cstate.data_offset;
3104 slot->sl_datalen = buf->len - base;
3105 if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
3106 WARN(1, "%s: sessions DRC could not cache compound\n",
3107 __func__);
3108 return;
3109 }
3110
3111 /*
3112 * Encode the replay sequence operation from the slot values.
3113 * If cachethis is FALSE encode the uncached rep error on the next
3114 * operation which sets resp->p and increments resp->opcnt for
3115 * nfs4svc_encode_compoundres.
3116 *
3117 */
3118 static __be32
nfsd4_enc_sequence_replay(struct nfsd4_compoundargs * args,struct nfsd4_compoundres * resp)3119 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
3120 struct nfsd4_compoundres *resp)
3121 {
3122 struct nfsd4_op *op;
3123 struct nfsd4_slot *slot = resp->cstate.slot;
3124
3125 /* Encode the replayed sequence operation */
3126 op = &args->ops[resp->opcnt - 1];
3127 nfsd4_encode_operation(resp, op);
3128
3129 if (slot->sl_flags & NFSD4_SLOT_CACHED)
3130 return op->status;
3131 if (args->opcnt == 1) {
3132 /*
3133 * The original operation wasn't a solo sequence--we
3134 * always cache those--so this retry must not match the
3135 * original:
3136 */
3137 op->status = nfserr_seq_false_retry;
3138 } else {
3139 op = &args->ops[resp->opcnt++];
3140 op->status = nfserr_retry_uncached_rep;
3141 nfsd4_encode_operation(resp, op);
3142 }
3143 return op->status;
3144 }
3145
3146 /*
3147 * The sequence operation is not cached because we can use the slot and
3148 * session values.
3149 */
3150 static __be32
nfsd4_replay_cache_entry(struct nfsd4_compoundres * resp,struct nfsd4_sequence * seq)3151 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
3152 struct nfsd4_sequence *seq)
3153 {
3154 struct nfsd4_slot *slot = resp->cstate.slot;
3155 struct xdr_stream *xdr = resp->xdr;
3156 __be32 *p;
3157 __be32 status;
3158
3159 dprintk("--> %s slot %p\n", __func__, slot);
3160
3161 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
3162 if (status)
3163 return status;
3164
3165 p = xdr_reserve_space(xdr, slot->sl_datalen);
3166 if (!p) {
3167 WARN_ON_ONCE(1);
3168 return nfserr_serverfault;
3169 }
3170 xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
3171 xdr_commit_encode(xdr);
3172
3173 resp->opcnt = slot->sl_opcnt;
3174 return slot->sl_status;
3175 }
3176
3177 /*
3178 * Set the exchange_id flags returned by the server.
3179 */
3180 static void
nfsd4_set_ex_flags(struct nfs4_client * new,struct nfsd4_exchange_id * clid)3181 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
3182 {
3183 #ifdef CONFIG_NFSD_PNFS
3184 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS;
3185 #else
3186 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
3187 #endif
3188
3189 /* Referrals are supported, Migration is not. */
3190 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
3191
3192 /* set the wire flags to return to client. */
3193 clid->flags = new->cl_exchange_flags;
3194 }
3195
client_has_openowners(struct nfs4_client * clp)3196 static bool client_has_openowners(struct nfs4_client *clp)
3197 {
3198 struct nfs4_openowner *oo;
3199
3200 list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) {
3201 if (!list_empty(&oo->oo_owner.so_stateids))
3202 return true;
3203 }
3204 return false;
3205 }
3206
client_has_state(struct nfs4_client * clp)3207 static bool client_has_state(struct nfs4_client *clp)
3208 {
3209 return client_has_openowners(clp)
3210 #ifdef CONFIG_NFSD_PNFS
3211 || !list_empty(&clp->cl_lo_states)
3212 #endif
3213 || !list_empty(&clp->cl_delegations)
3214 || !list_empty(&clp->cl_sessions)
3215 || !list_empty(&clp->async_copies);
3216 }
3217
copy_impl_id(struct nfs4_client * clp,struct nfsd4_exchange_id * exid)3218 static __be32 copy_impl_id(struct nfs4_client *clp,
3219 struct nfsd4_exchange_id *exid)
3220 {
3221 if (!exid->nii_domain.data)
3222 return 0;
3223 xdr_netobj_dup(&clp->cl_nii_domain, &exid->nii_domain, GFP_KERNEL);
3224 if (!clp->cl_nii_domain.data)
3225 return nfserr_jukebox;
3226 xdr_netobj_dup(&clp->cl_nii_name, &exid->nii_name, GFP_KERNEL);
3227 if (!clp->cl_nii_name.data)
3228 return nfserr_jukebox;
3229 clp->cl_nii_time = exid->nii_time;
3230 return 0;
3231 }
3232
3233 __be32
nfsd4_exchange_id(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3234 nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3235 union nfsd4_op_u *u)
3236 {
3237 struct nfsd4_exchange_id *exid = &u->exchange_id;
3238 struct nfs4_client *conf, *new;
3239 struct nfs4_client *unconf = NULL;
3240 __be32 status;
3241 char addr_str[INET6_ADDRSTRLEN];
3242 nfs4_verifier verf = exid->verifier;
3243 struct sockaddr *sa = svc_addr(rqstp);
3244 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
3245 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3246
3247 rpc_ntop(sa, addr_str, sizeof(addr_str));
3248 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
3249 "ip_addr=%s flags %x, spa_how %u\n",
3250 __func__, rqstp, exid, exid->clname.len, exid->clname.data,
3251 addr_str, exid->flags, exid->spa_how);
3252
3253 if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
3254 return nfserr_inval;
3255
3256 new = create_client(exid->clname, rqstp, &verf);
3257 if (new == NULL)
3258 return nfserr_jukebox;
3259 status = copy_impl_id(new, exid);
3260 if (status)
3261 goto out_nolock;
3262
3263 switch (exid->spa_how) {
3264 case SP4_MACH_CRED:
3265 exid->spo_must_enforce[0] = 0;
3266 exid->spo_must_enforce[1] = (
3267 1 << (OP_BIND_CONN_TO_SESSION - 32) |
3268 1 << (OP_EXCHANGE_ID - 32) |
3269 1 << (OP_CREATE_SESSION - 32) |
3270 1 << (OP_DESTROY_SESSION - 32) |
3271 1 << (OP_DESTROY_CLIENTID - 32));
3272
3273 exid->spo_must_allow[0] &= (1 << (OP_CLOSE) |
3274 1 << (OP_OPEN_DOWNGRADE) |
3275 1 << (OP_LOCKU) |
3276 1 << (OP_DELEGRETURN));
3277
3278 exid->spo_must_allow[1] &= (
3279 1 << (OP_TEST_STATEID - 32) |
3280 1 << (OP_FREE_STATEID - 32));
3281 if (!svc_rqst_integrity_protected(rqstp)) {
3282 status = nfserr_inval;
3283 goto out_nolock;
3284 }
3285 /*
3286 * Sometimes userspace doesn't give us a principal.
3287 * Which is a bug, really. Anyway, we can't enforce
3288 * MACH_CRED in that case, better to give up now:
3289 */
3290 if (!new->cl_cred.cr_principal &&
3291 !new->cl_cred.cr_raw_principal) {
3292 status = nfserr_serverfault;
3293 goto out_nolock;
3294 }
3295 new->cl_mach_cred = true;
3296 break;
3297 case SP4_NONE:
3298 break;
3299 default: /* checked by xdr code */
3300 WARN_ON_ONCE(1);
3301 fallthrough;
3302 case SP4_SSV:
3303 status = nfserr_encr_alg_unsupp;
3304 goto out_nolock;
3305 }
3306
3307 /* Cases below refer to rfc 5661 section 18.35.4: */
3308 spin_lock(&nn->client_lock);
3309 conf = find_confirmed_client_by_name(&exid->clname, nn);
3310 if (conf) {
3311 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
3312 bool verfs_match = same_verf(&verf, &conf->cl_verifier);
3313
3314 if (update) {
3315 if (!clp_used_exchangeid(conf)) { /* buggy client */
3316 status = nfserr_inval;
3317 goto out;
3318 }
3319 if (!nfsd4_mach_creds_match(conf, rqstp)) {
3320 status = nfserr_wrong_cred;
3321 goto out;
3322 }
3323 if (!creds_match) { /* case 9 */
3324 status = nfserr_perm;
3325 goto out;
3326 }
3327 if (!verfs_match) { /* case 8 */
3328 status = nfserr_not_same;
3329 goto out;
3330 }
3331 /* case 6 */
3332 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
3333 trace_nfsd_clid_confirmed_r(conf);
3334 goto out_copy;
3335 }
3336 if (!creds_match) { /* case 3 */
3337 if (client_has_state(conf)) {
3338 status = nfserr_clid_inuse;
3339 trace_nfsd_clid_cred_mismatch(conf, rqstp);
3340 goto out;
3341 }
3342 goto out_new;
3343 }
3344 if (verfs_match) { /* case 2 */
3345 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
3346 trace_nfsd_clid_confirmed_r(conf);
3347 goto out_copy;
3348 }
3349 /* case 5, client reboot */
3350 trace_nfsd_clid_verf_mismatch(conf, rqstp, &verf);
3351 conf = NULL;
3352 goto out_new;
3353 }
3354
3355 if (update) { /* case 7 */
3356 status = nfserr_noent;
3357 goto out;
3358 }
3359
3360 unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
3361 if (unconf) /* case 4, possible retry or client restart */
3362 unhash_client_locked(unconf);
3363
3364 /* case 1, new owner ID */
3365 trace_nfsd_clid_fresh(new);
3366
3367 out_new:
3368 if (conf) {
3369 status = mark_client_expired_locked(conf);
3370 if (status)
3371 goto out;
3372 trace_nfsd_clid_replaced(&conf->cl_clientid);
3373 }
3374 new->cl_minorversion = cstate->minorversion;
3375 new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0];
3376 new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1];
3377
3378 add_to_unconfirmed(new);
3379 swap(new, conf);
3380 out_copy:
3381 exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
3382 exid->clientid.cl_id = conf->cl_clientid.cl_id;
3383
3384 exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
3385 nfsd4_set_ex_flags(conf, exid);
3386
3387 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
3388 conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
3389 status = nfs_ok;
3390
3391 out:
3392 spin_unlock(&nn->client_lock);
3393 out_nolock:
3394 if (new)
3395 expire_client(new);
3396 if (unconf) {
3397 trace_nfsd_clid_expire_unconf(&unconf->cl_clientid);
3398 expire_client(unconf);
3399 }
3400 return status;
3401 }
3402
3403 static __be32
check_slot_seqid(u32 seqid,u32 slot_seqid,int slot_inuse)3404 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
3405 {
3406 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
3407 slot_seqid);
3408
3409 /* The slot is in use, and no response has been sent. */
3410 if (slot_inuse) {
3411 if (seqid == slot_seqid)
3412 return nfserr_jukebox;
3413 else
3414 return nfserr_seq_misordered;
3415 }
3416 /* Note unsigned 32-bit arithmetic handles wraparound: */
3417 if (likely(seqid == slot_seqid + 1))
3418 return nfs_ok;
3419 if (seqid == slot_seqid)
3420 return nfserr_replay_cache;
3421 return nfserr_seq_misordered;
3422 }
3423
3424 /*
3425 * Cache the create session result into the create session single DRC
3426 * slot cache by saving the xdr structure. sl_seqid has been set.
3427 * Do this for solo or embedded create session operations.
3428 */
3429 static void
nfsd4_cache_create_session(struct nfsd4_create_session * cr_ses,struct nfsd4_clid_slot * slot,__be32 nfserr)3430 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
3431 struct nfsd4_clid_slot *slot, __be32 nfserr)
3432 {
3433 slot->sl_status = nfserr;
3434 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
3435 }
3436
3437 static __be32
nfsd4_replay_create_session(struct nfsd4_create_session * cr_ses,struct nfsd4_clid_slot * slot)3438 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
3439 struct nfsd4_clid_slot *slot)
3440 {
3441 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
3442 return slot->sl_status;
3443 }
3444
3445 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
3446 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
3447 1 + /* MIN tag is length with zero, only length */ \
3448 3 + /* version, opcount, opcode */ \
3449 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3450 /* seqid, slotID, slotID, cache */ \
3451 4 ) * sizeof(__be32))
3452
3453 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
3454 2 + /* verifier: AUTH_NULL, length 0 */\
3455 1 + /* status */ \
3456 1 + /* MIN tag is length with zero, only length */ \
3457 3 + /* opcount, opcode, opstatus*/ \
3458 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3459 /* seqid, slotID, slotID, slotID, status */ \
3460 5 ) * sizeof(__be32))
3461
check_forechannel_attrs(struct nfsd4_channel_attrs * ca,struct nfsd_net * nn)3462 static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
3463 {
3464 u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
3465
3466 if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
3467 return nfserr_toosmall;
3468 if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
3469 return nfserr_toosmall;
3470 ca->headerpadsz = 0;
3471 ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
3472 ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
3473 ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
3474 ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
3475 NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
3476 ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
3477 /*
3478 * Note decreasing slot size below client's request may make it
3479 * difficult for client to function correctly, whereas
3480 * decreasing the number of slots will (just?) affect
3481 * performance. When short on memory we therefore prefer to
3482 * decrease number of slots instead of their size. Clients that
3483 * request larger slots than they need will get poor results:
3484 * Note that we always allow at least one slot, because our
3485 * accounting is soft and provides no guarantees either way.
3486 */
3487 ca->maxreqs = nfsd4_get_drc_mem(ca, nn);
3488
3489 return nfs_ok;
3490 }
3491
3492 /*
3493 * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
3494 * These are based on similar macros in linux/sunrpc/msg_prot.h .
3495 */
3496 #define RPC_MAX_HEADER_WITH_AUTH_SYS \
3497 (RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK))
3498
3499 #define RPC_MAX_REPHEADER_WITH_AUTH_SYS \
3500 (RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK))
3501
3502 #define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \
3503 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32))
3504 #define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \
3505 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \
3506 sizeof(__be32))
3507
check_backchannel_attrs(struct nfsd4_channel_attrs * ca)3508 static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
3509 {
3510 ca->headerpadsz = 0;
3511
3512 if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
3513 return nfserr_toosmall;
3514 if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
3515 return nfserr_toosmall;
3516 ca->maxresp_cached = 0;
3517 if (ca->maxops < 2)
3518 return nfserr_toosmall;
3519
3520 return nfs_ok;
3521 }
3522
nfsd4_check_cb_sec(struct nfsd4_cb_sec * cbs)3523 static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
3524 {
3525 switch (cbs->flavor) {
3526 case RPC_AUTH_NULL:
3527 case RPC_AUTH_UNIX:
3528 return nfs_ok;
3529 default:
3530 /*
3531 * GSS case: the spec doesn't allow us to return this
3532 * error. But it also doesn't allow us not to support
3533 * GSS.
3534 * I'd rather this fail hard than return some error the
3535 * client might think it can already handle:
3536 */
3537 return nfserr_encr_alg_unsupp;
3538 }
3539 }
3540
3541 __be32
nfsd4_create_session(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3542 nfsd4_create_session(struct svc_rqst *rqstp,
3543 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
3544 {
3545 struct nfsd4_create_session *cr_ses = &u->create_session;
3546 struct sockaddr *sa = svc_addr(rqstp);
3547 struct nfs4_client *conf, *unconf;
3548 struct nfs4_client *old = NULL;
3549 struct nfsd4_session *new;
3550 struct nfsd4_conn *conn;
3551 struct nfsd4_clid_slot *cs_slot = NULL;
3552 __be32 status = 0;
3553 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3554
3555 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
3556 return nfserr_inval;
3557 status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
3558 if (status)
3559 return status;
3560 status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
3561 if (status)
3562 return status;
3563 status = check_backchannel_attrs(&cr_ses->back_channel);
3564 if (status)
3565 goto out_release_drc_mem;
3566 status = nfserr_jukebox;
3567 new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
3568 if (!new)
3569 goto out_release_drc_mem;
3570 conn = alloc_conn_from_crses(rqstp, cr_ses);
3571 if (!conn)
3572 goto out_free_session;
3573
3574 spin_lock(&nn->client_lock);
3575 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
3576 conf = find_confirmed_client(&cr_ses->clientid, true, nn);
3577 WARN_ON_ONCE(conf && unconf);
3578
3579 if (conf) {
3580 status = nfserr_wrong_cred;
3581 if (!nfsd4_mach_creds_match(conf, rqstp))
3582 goto out_free_conn;
3583 cs_slot = &conf->cl_cs_slot;
3584 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
3585 if (status) {
3586 if (status == nfserr_replay_cache)
3587 status = nfsd4_replay_create_session(cr_ses, cs_slot);
3588 goto out_free_conn;
3589 }
3590 } else if (unconf) {
3591 status = nfserr_clid_inuse;
3592 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
3593 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
3594 trace_nfsd_clid_cred_mismatch(unconf, rqstp);
3595 goto out_free_conn;
3596 }
3597 status = nfserr_wrong_cred;
3598 if (!nfsd4_mach_creds_match(unconf, rqstp))
3599 goto out_free_conn;
3600 cs_slot = &unconf->cl_cs_slot;
3601 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
3602 if (status) {
3603 /* an unconfirmed replay returns misordered */
3604 status = nfserr_seq_misordered;
3605 goto out_free_conn;
3606 }
3607 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
3608 if (old) {
3609 status = mark_client_expired_locked(old);
3610 if (status) {
3611 old = NULL;
3612 goto out_free_conn;
3613 }
3614 trace_nfsd_clid_replaced(&old->cl_clientid);
3615 }
3616 move_to_confirmed(unconf);
3617 conf = unconf;
3618 } else {
3619 status = nfserr_stale_clientid;
3620 goto out_free_conn;
3621 }
3622 status = nfs_ok;
3623 /* Persistent sessions are not supported */
3624 cr_ses->flags &= ~SESSION4_PERSIST;
3625 /* Upshifting from TCP to RDMA is not supported */
3626 cr_ses->flags &= ~SESSION4_RDMA;
3627
3628 init_session(rqstp, new, conf, cr_ses);
3629 nfsd4_get_session_locked(new);
3630
3631 memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
3632 NFS4_MAX_SESSIONID_LEN);
3633 cs_slot->sl_seqid++;
3634 cr_ses->seqid = cs_slot->sl_seqid;
3635
3636 /* cache solo and embedded create sessions under the client_lock */
3637 nfsd4_cache_create_session(cr_ses, cs_slot, status);
3638 spin_unlock(&nn->client_lock);
3639 if (conf == unconf)
3640 fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY);
3641 /* init connection and backchannel */
3642 nfsd4_init_conn(rqstp, conn, new);
3643 nfsd4_put_session(new);
3644 if (old)
3645 expire_client(old);
3646 return status;
3647 out_free_conn:
3648 spin_unlock(&nn->client_lock);
3649 free_conn(conn);
3650 if (old)
3651 expire_client(old);
3652 out_free_session:
3653 __free_session(new);
3654 out_release_drc_mem:
3655 nfsd4_put_drc_mem(&cr_ses->fore_channel);
3656 return status;
3657 }
3658
nfsd4_map_bcts_dir(u32 * dir)3659 static __be32 nfsd4_map_bcts_dir(u32 *dir)
3660 {
3661 switch (*dir) {
3662 case NFS4_CDFC4_FORE:
3663 case NFS4_CDFC4_BACK:
3664 return nfs_ok;
3665 case NFS4_CDFC4_FORE_OR_BOTH:
3666 case NFS4_CDFC4_BACK_OR_BOTH:
3667 *dir = NFS4_CDFC4_BOTH;
3668 return nfs_ok;
3669 }
3670 return nfserr_inval;
3671 }
3672
nfsd4_backchannel_ctl(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3673 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp,
3674 struct nfsd4_compound_state *cstate,
3675 union nfsd4_op_u *u)
3676 {
3677 struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl;
3678 struct nfsd4_session *session = cstate->session;
3679 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3680 __be32 status;
3681
3682 status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
3683 if (status)
3684 return status;
3685 spin_lock(&nn->client_lock);
3686 session->se_cb_prog = bc->bc_cb_program;
3687 session->se_cb_sec = bc->bc_cb_sec;
3688 spin_unlock(&nn->client_lock);
3689
3690 nfsd4_probe_callback(session->se_client);
3691
3692 return nfs_ok;
3693 }
3694
__nfsd4_find_conn(struct svc_xprt * xpt,struct nfsd4_session * s)3695 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
3696 {
3697 struct nfsd4_conn *c;
3698
3699 list_for_each_entry(c, &s->se_conns, cn_persession) {
3700 if (c->cn_xprt == xpt) {
3701 return c;
3702 }
3703 }
3704 return NULL;
3705 }
3706
nfsd4_match_existing_connection(struct svc_rqst * rqst,struct nfsd4_session * session,u32 req,struct nfsd4_conn ** conn)3707 static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst,
3708 struct nfsd4_session *session, u32 req, struct nfsd4_conn **conn)
3709 {
3710 struct nfs4_client *clp = session->se_client;
3711 struct svc_xprt *xpt = rqst->rq_xprt;
3712 struct nfsd4_conn *c;
3713 __be32 status;
3714
3715 /* Following the last paragraph of RFC 5661 Section 18.34.3: */
3716 spin_lock(&clp->cl_lock);
3717 c = __nfsd4_find_conn(xpt, session);
3718 if (!c)
3719 status = nfserr_noent;
3720 else if (req == c->cn_flags)
3721 status = nfs_ok;
3722 else if (req == NFS4_CDFC4_FORE_OR_BOTH &&
3723 c->cn_flags != NFS4_CDFC4_BACK)
3724 status = nfs_ok;
3725 else if (req == NFS4_CDFC4_BACK_OR_BOTH &&
3726 c->cn_flags != NFS4_CDFC4_FORE)
3727 status = nfs_ok;
3728 else
3729 status = nfserr_inval;
3730 spin_unlock(&clp->cl_lock);
3731 if (status == nfs_ok && conn)
3732 *conn = c;
3733 return status;
3734 }
3735
nfsd4_bind_conn_to_session(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3736 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
3737 struct nfsd4_compound_state *cstate,
3738 union nfsd4_op_u *u)
3739 {
3740 struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
3741 __be32 status;
3742 struct nfsd4_conn *conn;
3743 struct nfsd4_session *session;
3744 struct net *net = SVC_NET(rqstp);
3745 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3746
3747 if (!nfsd4_last_compound_op(rqstp))
3748 return nfserr_not_only_op;
3749 spin_lock(&nn->client_lock);
3750 session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
3751 spin_unlock(&nn->client_lock);
3752 if (!session)
3753 goto out_no_session;
3754 status = nfserr_wrong_cred;
3755 if (!nfsd4_mach_creds_match(session->se_client, rqstp))
3756 goto out;
3757 status = nfsd4_match_existing_connection(rqstp, session,
3758 bcts->dir, &conn);
3759 if (status == nfs_ok) {
3760 if (bcts->dir == NFS4_CDFC4_FORE_OR_BOTH ||
3761 bcts->dir == NFS4_CDFC4_BACK)
3762 conn->cn_flags |= NFS4_CDFC4_BACK;
3763 nfsd4_probe_callback(session->se_client);
3764 goto out;
3765 }
3766 if (status == nfserr_inval)
3767 goto out;
3768 status = nfsd4_map_bcts_dir(&bcts->dir);
3769 if (status)
3770 goto out;
3771 conn = alloc_conn(rqstp, bcts->dir);
3772 status = nfserr_jukebox;
3773 if (!conn)
3774 goto out;
3775 nfsd4_init_conn(rqstp, conn, session);
3776 status = nfs_ok;
3777 out:
3778 nfsd4_put_session(session);
3779 out_no_session:
3780 return status;
3781 }
3782
nfsd4_compound_in_session(struct nfsd4_compound_state * cstate,struct nfs4_sessionid * sid)3783 static bool nfsd4_compound_in_session(struct nfsd4_compound_state *cstate, struct nfs4_sessionid *sid)
3784 {
3785 if (!cstate->session)
3786 return false;
3787 return !memcmp(sid, &cstate->session->se_sessionid, sizeof(*sid));
3788 }
3789
3790 __be32
nfsd4_destroy_session(struct svc_rqst * r,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3791 nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate,
3792 union nfsd4_op_u *u)
3793 {
3794 struct nfs4_sessionid *sessionid = &u->destroy_session.sessionid;
3795 struct nfsd4_session *ses;
3796 __be32 status;
3797 int ref_held_by_me = 0;
3798 struct net *net = SVC_NET(r);
3799 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3800
3801 status = nfserr_not_only_op;
3802 if (nfsd4_compound_in_session(cstate, sessionid)) {
3803 if (!nfsd4_last_compound_op(r))
3804 goto out;
3805 ref_held_by_me++;
3806 }
3807 dump_sessionid(__func__, sessionid);
3808 spin_lock(&nn->client_lock);
3809 ses = find_in_sessionid_hashtbl(sessionid, net, &status);
3810 if (!ses)
3811 goto out_client_lock;
3812 status = nfserr_wrong_cred;
3813 if (!nfsd4_mach_creds_match(ses->se_client, r))
3814 goto out_put_session;
3815 status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
3816 if (status)
3817 goto out_put_session;
3818 unhash_session(ses);
3819 spin_unlock(&nn->client_lock);
3820
3821 nfsd4_probe_callback_sync(ses->se_client);
3822
3823 spin_lock(&nn->client_lock);
3824 status = nfs_ok;
3825 out_put_session:
3826 nfsd4_put_session_locked(ses);
3827 out_client_lock:
3828 spin_unlock(&nn->client_lock);
3829 out:
3830 return status;
3831 }
3832
nfsd4_sequence_check_conn(struct nfsd4_conn * new,struct nfsd4_session * ses)3833 static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
3834 {
3835 struct nfs4_client *clp = ses->se_client;
3836 struct nfsd4_conn *c;
3837 __be32 status = nfs_ok;
3838 int ret;
3839
3840 spin_lock(&clp->cl_lock);
3841 c = __nfsd4_find_conn(new->cn_xprt, ses);
3842 if (c)
3843 goto out_free;
3844 status = nfserr_conn_not_bound_to_session;
3845 if (clp->cl_mach_cred)
3846 goto out_free;
3847 __nfsd4_hash_conn(new, ses);
3848 spin_unlock(&clp->cl_lock);
3849 ret = nfsd4_register_conn(new);
3850 if (ret)
3851 /* oops; xprt is already down: */
3852 nfsd4_conn_lost(&new->cn_xpt_user);
3853 return nfs_ok;
3854 out_free:
3855 spin_unlock(&clp->cl_lock);
3856 free_conn(new);
3857 return status;
3858 }
3859
nfsd4_session_too_many_ops(struct svc_rqst * rqstp,struct nfsd4_session * session)3860 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
3861 {
3862 struct nfsd4_compoundargs *args = rqstp->rq_argp;
3863
3864 return args->opcnt > session->se_fchannel.maxops;
3865 }
3866
nfsd4_request_too_big(struct svc_rqst * rqstp,struct nfsd4_session * session)3867 static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
3868 struct nfsd4_session *session)
3869 {
3870 struct xdr_buf *xb = &rqstp->rq_arg;
3871
3872 return xb->len > session->se_fchannel.maxreq_sz;
3873 }
3874
replay_matches_cache(struct svc_rqst * rqstp,struct nfsd4_sequence * seq,struct nfsd4_slot * slot)3875 static bool replay_matches_cache(struct svc_rqst *rqstp,
3876 struct nfsd4_sequence *seq, struct nfsd4_slot *slot)
3877 {
3878 struct nfsd4_compoundargs *argp = rqstp->rq_argp;
3879
3880 if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) !=
3881 (bool)seq->cachethis)
3882 return false;
3883 /*
3884 * If there's an error then the reply can have fewer ops than
3885 * the call.
3886 */
3887 if (slot->sl_opcnt < argp->opcnt && !slot->sl_status)
3888 return false;
3889 /*
3890 * But if we cached a reply with *more* ops than the call you're
3891 * sending us now, then this new call is clearly not really a
3892 * replay of the old one:
3893 */
3894 if (slot->sl_opcnt > argp->opcnt)
3895 return false;
3896 /* This is the only check explicitly called by spec: */
3897 if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
3898 return false;
3899 /*
3900 * There may be more comparisons we could actually do, but the
3901 * spec doesn't require us to catch every case where the calls
3902 * don't match (that would require caching the call as well as
3903 * the reply), so we don't bother.
3904 */
3905 return true;
3906 }
3907
3908 __be32
nfsd4_sequence(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3909 nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3910 union nfsd4_op_u *u)
3911 {
3912 struct nfsd4_sequence *seq = &u->sequence;
3913 struct nfsd4_compoundres *resp = rqstp->rq_resp;
3914 struct xdr_stream *xdr = resp->xdr;
3915 struct nfsd4_session *session;
3916 struct nfs4_client *clp;
3917 struct nfsd4_slot *slot;
3918 struct nfsd4_conn *conn;
3919 __be32 status;
3920 int buflen;
3921 struct net *net = SVC_NET(rqstp);
3922 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3923
3924 if (resp->opcnt != 1)
3925 return nfserr_sequence_pos;
3926
3927 /*
3928 * Will be either used or freed by nfsd4_sequence_check_conn
3929 * below.
3930 */
3931 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
3932 if (!conn)
3933 return nfserr_jukebox;
3934
3935 spin_lock(&nn->client_lock);
3936 session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
3937 if (!session)
3938 goto out_no_session;
3939 clp = session->se_client;
3940
3941 status = nfserr_too_many_ops;
3942 if (nfsd4_session_too_many_ops(rqstp, session))
3943 goto out_put_session;
3944
3945 status = nfserr_req_too_big;
3946 if (nfsd4_request_too_big(rqstp, session))
3947 goto out_put_session;
3948
3949 status = nfserr_badslot;
3950 if (seq->slotid >= session->se_fchannel.maxreqs)
3951 goto out_put_session;
3952
3953 slot = session->se_slots[seq->slotid];
3954 dprintk("%s: slotid %d\n", __func__, seq->slotid);
3955
3956 /* We do not negotiate the number of slots yet, so set the
3957 * maxslots to the session maxreqs which is used to encode
3958 * sr_highest_slotid and the sr_target_slot id to maxslots */
3959 seq->maxslots = session->se_fchannel.maxreqs;
3960
3961 status = check_slot_seqid(seq->seqid, slot->sl_seqid,
3962 slot->sl_flags & NFSD4_SLOT_INUSE);
3963 if (status == nfserr_replay_cache) {
3964 status = nfserr_seq_misordered;
3965 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
3966 goto out_put_session;
3967 status = nfserr_seq_false_retry;
3968 if (!replay_matches_cache(rqstp, seq, slot))
3969 goto out_put_session;
3970 cstate->slot = slot;
3971 cstate->session = session;
3972 cstate->clp = clp;
3973 /* Return the cached reply status and set cstate->status
3974 * for nfsd4_proc_compound processing */
3975 status = nfsd4_replay_cache_entry(resp, seq);
3976 cstate->status = nfserr_replay_cache;
3977 goto out;
3978 }
3979 if (status)
3980 goto out_put_session;
3981
3982 status = nfsd4_sequence_check_conn(conn, session);
3983 conn = NULL;
3984 if (status)
3985 goto out_put_session;
3986
3987 buflen = (seq->cachethis) ?
3988 session->se_fchannel.maxresp_cached :
3989 session->se_fchannel.maxresp_sz;
3990 status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
3991 nfserr_rep_too_big;
3992 if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
3993 goto out_put_session;
3994 svc_reserve(rqstp, buflen);
3995
3996 status = nfs_ok;
3997 /* Success! bump slot seqid */
3998 slot->sl_seqid = seq->seqid;
3999 slot->sl_flags |= NFSD4_SLOT_INUSE;
4000 if (seq->cachethis)
4001 slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
4002 else
4003 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
4004
4005 cstate->slot = slot;
4006 cstate->session = session;
4007 cstate->clp = clp;
4008
4009 out:
4010 switch (clp->cl_cb_state) {
4011 case NFSD4_CB_DOWN:
4012 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
4013 break;
4014 case NFSD4_CB_FAULT:
4015 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
4016 break;
4017 default:
4018 seq->status_flags = 0;
4019 }
4020 if (!list_empty(&clp->cl_revoked))
4021 seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
4022 out_no_session:
4023 if (conn)
4024 free_conn(conn);
4025 spin_unlock(&nn->client_lock);
4026 return status;
4027 out_put_session:
4028 nfsd4_put_session_locked(session);
4029 goto out_no_session;
4030 }
4031
4032 void
nfsd4_sequence_done(struct nfsd4_compoundres * resp)4033 nfsd4_sequence_done(struct nfsd4_compoundres *resp)
4034 {
4035 struct nfsd4_compound_state *cs = &resp->cstate;
4036
4037 if (nfsd4_has_session(cs)) {
4038 if (cs->status != nfserr_replay_cache) {
4039 nfsd4_store_cache_entry(resp);
4040 cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
4041 }
4042 /* Drop session reference that was taken in nfsd4_sequence() */
4043 nfsd4_put_session(cs->session);
4044 } else if (cs->clp)
4045 put_client_renew(cs->clp);
4046 }
4047
4048 __be32
nfsd4_destroy_clientid(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)4049 nfsd4_destroy_clientid(struct svc_rqst *rqstp,
4050 struct nfsd4_compound_state *cstate,
4051 union nfsd4_op_u *u)
4052 {
4053 struct nfsd4_destroy_clientid *dc = &u->destroy_clientid;
4054 struct nfs4_client *conf, *unconf;
4055 struct nfs4_client *clp = NULL;
4056 __be32 status = 0;
4057 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4058
4059 spin_lock(&nn->client_lock);
4060 unconf = find_unconfirmed_client(&dc->clientid, true, nn);
4061 conf = find_confirmed_client(&dc->clientid, true, nn);
4062 WARN_ON_ONCE(conf && unconf);
4063
4064 if (conf) {
4065 if (client_has_state(conf)) {
4066 status = nfserr_clientid_busy;
4067 goto out;
4068 }
4069 status = mark_client_expired_locked(conf);
4070 if (status)
4071 goto out;
4072 clp = conf;
4073 } else if (unconf)
4074 clp = unconf;
4075 else {
4076 status = nfserr_stale_clientid;
4077 goto out;
4078 }
4079 if (!nfsd4_mach_creds_match(clp, rqstp)) {
4080 clp = NULL;
4081 status = nfserr_wrong_cred;
4082 goto out;
4083 }
4084 trace_nfsd_clid_destroyed(&clp->cl_clientid);
4085 unhash_client_locked(clp);
4086 out:
4087 spin_unlock(&nn->client_lock);
4088 if (clp)
4089 expire_client(clp);
4090 return status;
4091 }
4092
4093 __be32
nfsd4_reclaim_complete(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)4094 nfsd4_reclaim_complete(struct svc_rqst *rqstp,
4095 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
4096 {
4097 struct nfsd4_reclaim_complete *rc = &u->reclaim_complete;
4098 struct nfs4_client *clp = cstate->clp;
4099 __be32 status = 0;
4100
4101 if (rc->rca_one_fs) {
4102 if (!cstate->current_fh.fh_dentry)
4103 return nfserr_nofilehandle;
4104 /*
4105 * We don't take advantage of the rca_one_fs case.
4106 * That's OK, it's optional, we can safely ignore it.
4107 */
4108 return nfs_ok;
4109 }
4110
4111 status = nfserr_complete_already;
4112 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags))
4113 goto out;
4114
4115 status = nfserr_stale_clientid;
4116 if (is_client_expired(clp))
4117 /*
4118 * The following error isn't really legal.
4119 * But we only get here if the client just explicitly
4120 * destroyed the client. Surely it no longer cares what
4121 * error it gets back on an operation for the dead
4122 * client.
4123 */
4124 goto out;
4125
4126 status = nfs_ok;
4127 trace_nfsd_clid_reclaim_complete(&clp->cl_clientid);
4128 nfsd4_client_record_create(clp);
4129 inc_reclaim_complete(clp);
4130 out:
4131 return status;
4132 }
4133
4134 __be32
nfsd4_setclientid(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)4135 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4136 union nfsd4_op_u *u)
4137 {
4138 struct nfsd4_setclientid *setclid = &u->setclientid;
4139 struct xdr_netobj clname = setclid->se_name;
4140 nfs4_verifier clverifier = setclid->se_verf;
4141 struct nfs4_client *conf, *new;
4142 struct nfs4_client *unconf = NULL;
4143 __be32 status;
4144 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4145
4146 new = create_client(clname, rqstp, &clverifier);
4147 if (new == NULL)
4148 return nfserr_jukebox;
4149 spin_lock(&nn->client_lock);
4150 conf = find_confirmed_client_by_name(&clname, nn);
4151 if (conf && client_has_state(conf)) {
4152 status = nfserr_clid_inuse;
4153 if (clp_used_exchangeid(conf))
4154 goto out;
4155 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
4156 trace_nfsd_clid_cred_mismatch(conf, rqstp);
4157 goto out;
4158 }
4159 }
4160 unconf = find_unconfirmed_client_by_name(&clname, nn);
4161 if (unconf)
4162 unhash_client_locked(unconf);
4163 if (conf) {
4164 if (same_verf(&conf->cl_verifier, &clverifier)) {
4165 copy_clid(new, conf);
4166 gen_confirm(new, nn);
4167 } else
4168 trace_nfsd_clid_verf_mismatch(conf, rqstp,
4169 &clverifier);
4170 } else
4171 trace_nfsd_clid_fresh(new);
4172 new->cl_minorversion = 0;
4173 gen_callback(new, setclid, rqstp);
4174 add_to_unconfirmed(new);
4175 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
4176 setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
4177 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
4178 new = NULL;
4179 status = nfs_ok;
4180 out:
4181 spin_unlock(&nn->client_lock);
4182 if (new)
4183 free_client(new);
4184 if (unconf) {
4185 trace_nfsd_clid_expire_unconf(&unconf->cl_clientid);
4186 expire_client(unconf);
4187 }
4188 return status;
4189 }
4190
4191 __be32
nfsd4_setclientid_confirm(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)4192 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
4193 struct nfsd4_compound_state *cstate,
4194 union nfsd4_op_u *u)
4195 {
4196 struct nfsd4_setclientid_confirm *setclientid_confirm =
4197 &u->setclientid_confirm;
4198 struct nfs4_client *conf, *unconf;
4199 struct nfs4_client *old = NULL;
4200 nfs4_verifier confirm = setclientid_confirm->sc_confirm;
4201 clientid_t * clid = &setclientid_confirm->sc_clientid;
4202 __be32 status;
4203 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4204
4205 if (STALE_CLIENTID(clid, nn))
4206 return nfserr_stale_clientid;
4207
4208 spin_lock(&nn->client_lock);
4209 conf = find_confirmed_client(clid, false, nn);
4210 unconf = find_unconfirmed_client(clid, false, nn);
4211 /*
4212 * We try hard to give out unique clientid's, so if we get an
4213 * attempt to confirm the same clientid with a different cred,
4214 * the client may be buggy; this should never happen.
4215 *
4216 * Nevertheless, RFC 7530 recommends INUSE for this case:
4217 */
4218 status = nfserr_clid_inuse;
4219 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred)) {
4220 trace_nfsd_clid_cred_mismatch(unconf, rqstp);
4221 goto out;
4222 }
4223 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
4224 trace_nfsd_clid_cred_mismatch(conf, rqstp);
4225 goto out;
4226 }
4227 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
4228 if (conf && same_verf(&confirm, &conf->cl_confirm)) {
4229 status = nfs_ok;
4230 } else
4231 status = nfserr_stale_clientid;
4232 goto out;
4233 }
4234 status = nfs_ok;
4235 if (conf) {
4236 old = unconf;
4237 unhash_client_locked(old);
4238 nfsd4_change_callback(conf, &unconf->cl_cb_conn);
4239 } else {
4240 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
4241 if (old) {
4242 status = nfserr_clid_inuse;
4243 if (client_has_state(old)
4244 && !same_creds(&unconf->cl_cred,
4245 &old->cl_cred)) {
4246 old = NULL;
4247 goto out;
4248 }
4249 status = mark_client_expired_locked(old);
4250 if (status) {
4251 old = NULL;
4252 goto out;
4253 }
4254 trace_nfsd_clid_replaced(&old->cl_clientid);
4255 }
4256 move_to_confirmed(unconf);
4257 conf = unconf;
4258 }
4259 get_client_locked(conf);
4260 spin_unlock(&nn->client_lock);
4261 if (conf == unconf)
4262 fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY);
4263 nfsd4_probe_callback(conf);
4264 spin_lock(&nn->client_lock);
4265 put_client_renew_locked(conf);
4266 out:
4267 spin_unlock(&nn->client_lock);
4268 if (old)
4269 expire_client(old);
4270 return status;
4271 }
4272
nfsd4_alloc_file(void)4273 static struct nfs4_file *nfsd4_alloc_file(void)
4274 {
4275 return kmem_cache_alloc(file_slab, GFP_KERNEL);
4276 }
4277
4278 /* OPEN Share state helper functions */
nfsd4_init_file(struct svc_fh * fh,unsigned int hashval,struct nfs4_file * fp)4279 static void nfsd4_init_file(struct svc_fh *fh, unsigned int hashval,
4280 struct nfs4_file *fp)
4281 {
4282 lockdep_assert_held(&state_lock);
4283
4284 refcount_set(&fp->fi_ref, 1);
4285 spin_lock_init(&fp->fi_lock);
4286 INIT_LIST_HEAD(&fp->fi_stateids);
4287 INIT_LIST_HEAD(&fp->fi_delegations);
4288 INIT_LIST_HEAD(&fp->fi_clnt_odstate);
4289 fh_copy_shallow(&fp->fi_fhandle, &fh->fh_handle);
4290 fp->fi_deleg_file = NULL;
4291 fp->fi_had_conflict = false;
4292 fp->fi_share_deny = 0;
4293 memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
4294 memset(fp->fi_access, 0, sizeof(fp->fi_access));
4295 fp->fi_aliased = false;
4296 fp->fi_inode = d_inode(fh->fh_dentry);
4297 #ifdef CONFIG_NFSD_PNFS
4298 INIT_LIST_HEAD(&fp->fi_lo_states);
4299 atomic_set(&fp->fi_lo_recalls, 0);
4300 #endif
4301 hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]);
4302 }
4303
4304 void
nfsd4_free_slabs(void)4305 nfsd4_free_slabs(void)
4306 {
4307 kmem_cache_destroy(client_slab);
4308 kmem_cache_destroy(openowner_slab);
4309 kmem_cache_destroy(lockowner_slab);
4310 kmem_cache_destroy(file_slab);
4311 kmem_cache_destroy(stateid_slab);
4312 kmem_cache_destroy(deleg_slab);
4313 kmem_cache_destroy(odstate_slab);
4314 }
4315
4316 int
nfsd4_init_slabs(void)4317 nfsd4_init_slabs(void)
4318 {
4319 client_slab = kmem_cache_create("nfsd4_clients",
4320 sizeof(struct nfs4_client), 0, 0, NULL);
4321 if (client_slab == NULL)
4322 goto out;
4323 openowner_slab = kmem_cache_create("nfsd4_openowners",
4324 sizeof(struct nfs4_openowner), 0, 0, NULL);
4325 if (openowner_slab == NULL)
4326 goto out_free_client_slab;
4327 lockowner_slab = kmem_cache_create("nfsd4_lockowners",
4328 sizeof(struct nfs4_lockowner), 0, 0, NULL);
4329 if (lockowner_slab == NULL)
4330 goto out_free_openowner_slab;
4331 file_slab = kmem_cache_create("nfsd4_files",
4332 sizeof(struct nfs4_file), 0, 0, NULL);
4333 if (file_slab == NULL)
4334 goto out_free_lockowner_slab;
4335 stateid_slab = kmem_cache_create("nfsd4_stateids",
4336 sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
4337 if (stateid_slab == NULL)
4338 goto out_free_file_slab;
4339 deleg_slab = kmem_cache_create("nfsd4_delegations",
4340 sizeof(struct nfs4_delegation), 0, 0, NULL);
4341 if (deleg_slab == NULL)
4342 goto out_free_stateid_slab;
4343 odstate_slab = kmem_cache_create("nfsd4_odstate",
4344 sizeof(struct nfs4_clnt_odstate), 0, 0, NULL);
4345 if (odstate_slab == NULL)
4346 goto out_free_deleg_slab;
4347 return 0;
4348
4349 out_free_deleg_slab:
4350 kmem_cache_destroy(deleg_slab);
4351 out_free_stateid_slab:
4352 kmem_cache_destroy(stateid_slab);
4353 out_free_file_slab:
4354 kmem_cache_destroy(file_slab);
4355 out_free_lockowner_slab:
4356 kmem_cache_destroy(lockowner_slab);
4357 out_free_openowner_slab:
4358 kmem_cache_destroy(openowner_slab);
4359 out_free_client_slab:
4360 kmem_cache_destroy(client_slab);
4361 out:
4362 return -ENOMEM;
4363 }
4364
4365 static unsigned long
nfsd_courtesy_client_count(struct shrinker * shrink,struct shrink_control * sc)4366 nfsd_courtesy_client_count(struct shrinker *shrink, struct shrink_control *sc)
4367 {
4368 int cnt;
4369 struct nfsd_net *nn = container_of(shrink,
4370 struct nfsd_net, nfsd_client_shrinker);
4371
4372 cnt = atomic_read(&nn->nfsd_courtesy_clients);
4373 if (cnt > 0)
4374 mod_delayed_work(laundry_wq, &nn->nfsd_shrinker_work, 0);
4375 return (unsigned long)cnt;
4376 }
4377
4378 static unsigned long
nfsd_courtesy_client_scan(struct shrinker * shrink,struct shrink_control * sc)4379 nfsd_courtesy_client_scan(struct shrinker *shrink, struct shrink_control *sc)
4380 {
4381 return SHRINK_STOP;
4382 }
4383
4384 int
nfsd4_init_leases_net(struct nfsd_net * nn)4385 nfsd4_init_leases_net(struct nfsd_net *nn)
4386 {
4387 struct sysinfo si;
4388 u64 max_clients;
4389
4390 nn->nfsd4_lease = 90; /* default lease time */
4391 nn->nfsd4_grace = 90;
4392 nn->somebody_reclaimed = false;
4393 nn->track_reclaim_completes = false;
4394 nn->clverifier_counter = get_random_u32();
4395 nn->clientid_base = get_random_u32();
4396 nn->clientid_counter = nn->clientid_base + 1;
4397 nn->s2s_cp_cl_id = nn->clientid_counter++;
4398
4399 atomic_set(&nn->nfs4_client_count, 0);
4400 si_meminfo(&si);
4401 max_clients = (u64)si.totalram * si.mem_unit / (1024 * 1024 * 1024);
4402 max_clients *= NFS4_CLIENTS_PER_GB;
4403 nn->nfs4_max_clients = max_t(int, max_clients, NFS4_CLIENTS_PER_GB);
4404
4405 atomic_set(&nn->nfsd_courtesy_clients, 0);
4406 nn->nfsd_client_shrinker.scan_objects = nfsd_courtesy_client_scan;
4407 nn->nfsd_client_shrinker.count_objects = nfsd_courtesy_client_count;
4408 nn->nfsd_client_shrinker.seeks = DEFAULT_SEEKS;
4409 return register_shrinker(&nn->nfsd_client_shrinker, "nfsd-client");
4410 }
4411
4412 void
nfsd4_leases_net_shutdown(struct nfsd_net * nn)4413 nfsd4_leases_net_shutdown(struct nfsd_net *nn)
4414 {
4415 unregister_shrinker(&nn->nfsd_client_shrinker);
4416 }
4417
init_nfs4_replay(struct nfs4_replay * rp)4418 static void init_nfs4_replay(struct nfs4_replay *rp)
4419 {
4420 rp->rp_status = nfserr_serverfault;
4421 rp->rp_buflen = 0;
4422 rp->rp_buf = rp->rp_ibuf;
4423 mutex_init(&rp->rp_mutex);
4424 }
4425
nfsd4_cstate_assign_replay(struct nfsd4_compound_state * cstate,struct nfs4_stateowner * so)4426 static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
4427 struct nfs4_stateowner *so)
4428 {
4429 if (!nfsd4_has_session(cstate)) {
4430 mutex_lock(&so->so_replay.rp_mutex);
4431 cstate->replay_owner = nfs4_get_stateowner(so);
4432 }
4433 }
4434
nfsd4_cstate_clear_replay(struct nfsd4_compound_state * cstate)4435 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
4436 {
4437 struct nfs4_stateowner *so = cstate->replay_owner;
4438
4439 if (so != NULL) {
4440 cstate->replay_owner = NULL;
4441 mutex_unlock(&so->so_replay.rp_mutex);
4442 nfs4_put_stateowner(so);
4443 }
4444 }
4445
alloc_stateowner(struct kmem_cache * slab,struct xdr_netobj * owner,struct nfs4_client * clp)4446 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
4447 {
4448 struct nfs4_stateowner *sop;
4449
4450 sop = kmem_cache_alloc(slab, GFP_KERNEL);
4451 if (!sop)
4452 return NULL;
4453
4454 xdr_netobj_dup(&sop->so_owner, owner, GFP_KERNEL);
4455 if (!sop->so_owner.data) {
4456 kmem_cache_free(slab, sop);
4457 return NULL;
4458 }
4459
4460 INIT_LIST_HEAD(&sop->so_stateids);
4461 sop->so_client = clp;
4462 init_nfs4_replay(&sop->so_replay);
4463 atomic_set(&sop->so_count, 1);
4464 return sop;
4465 }
4466
hash_openowner(struct nfs4_openowner * oo,struct nfs4_client * clp,unsigned int strhashval)4467 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
4468 {
4469 lockdep_assert_held(&clp->cl_lock);
4470
4471 list_add(&oo->oo_owner.so_strhash,
4472 &clp->cl_ownerstr_hashtbl[strhashval]);
4473 list_add(&oo->oo_perclient, &clp->cl_openowners);
4474 }
4475
nfs4_unhash_openowner(struct nfs4_stateowner * so)4476 static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
4477 {
4478 unhash_openowner_locked(openowner(so));
4479 }
4480
nfs4_free_openowner(struct nfs4_stateowner * so)4481 static void nfs4_free_openowner(struct nfs4_stateowner *so)
4482 {
4483 struct nfs4_openowner *oo = openowner(so);
4484
4485 kmem_cache_free(openowner_slab, oo);
4486 }
4487
4488 static const struct nfs4_stateowner_operations openowner_ops = {
4489 .so_unhash = nfs4_unhash_openowner,
4490 .so_free = nfs4_free_openowner,
4491 };
4492
4493 static struct nfs4_ol_stateid *
nfsd4_find_existing_open(struct nfs4_file * fp,struct nfsd4_open * open)4494 nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
4495 {
4496 struct nfs4_ol_stateid *local, *ret = NULL;
4497 struct nfs4_openowner *oo = open->op_openowner;
4498
4499 lockdep_assert_held(&fp->fi_lock);
4500
4501 list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
4502 /* ignore lock owners */
4503 if (local->st_stateowner->so_is_open_owner == 0)
4504 continue;
4505 if (local->st_stateowner != &oo->oo_owner)
4506 continue;
4507 if (local->st_stid.sc_type == NFS4_OPEN_STID) {
4508 ret = local;
4509 refcount_inc(&ret->st_stid.sc_count);
4510 break;
4511 }
4512 }
4513 return ret;
4514 }
4515
4516 static __be32
nfsd4_verify_open_stid(struct nfs4_stid * s)4517 nfsd4_verify_open_stid(struct nfs4_stid *s)
4518 {
4519 __be32 ret = nfs_ok;
4520
4521 switch (s->sc_type) {
4522 default:
4523 break;
4524 case 0:
4525 case NFS4_CLOSED_STID:
4526 case NFS4_CLOSED_DELEG_STID:
4527 ret = nfserr_bad_stateid;
4528 break;
4529 case NFS4_REVOKED_DELEG_STID:
4530 ret = nfserr_deleg_revoked;
4531 }
4532 return ret;
4533 }
4534
4535 /* Lock the stateid st_mutex, and deal with races with CLOSE */
4536 static __be32
nfsd4_lock_ol_stateid(struct nfs4_ol_stateid * stp)4537 nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
4538 {
4539 __be32 ret;
4540
4541 mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX);
4542 ret = nfsd4_verify_open_stid(&stp->st_stid);
4543 if (ret != nfs_ok)
4544 mutex_unlock(&stp->st_mutex);
4545 return ret;
4546 }
4547
4548 static struct nfs4_ol_stateid *
nfsd4_find_and_lock_existing_open(struct nfs4_file * fp,struct nfsd4_open * open)4549 nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
4550 {
4551 struct nfs4_ol_stateid *stp;
4552 for (;;) {
4553 spin_lock(&fp->fi_lock);
4554 stp = nfsd4_find_existing_open(fp, open);
4555 spin_unlock(&fp->fi_lock);
4556 if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
4557 break;
4558 nfs4_put_stid(&stp->st_stid);
4559 }
4560 return stp;
4561 }
4562
4563 static struct nfs4_openowner *
alloc_init_open_stateowner(unsigned int strhashval,struct nfsd4_open * open,struct nfsd4_compound_state * cstate)4564 alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
4565 struct nfsd4_compound_state *cstate)
4566 {
4567 struct nfs4_client *clp = cstate->clp;
4568 struct nfs4_openowner *oo, *ret;
4569
4570 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
4571 if (!oo)
4572 return NULL;
4573 oo->oo_owner.so_ops = &openowner_ops;
4574 oo->oo_owner.so_is_open_owner = 1;
4575 oo->oo_owner.so_seqid = open->op_seqid;
4576 oo->oo_flags = 0;
4577 if (nfsd4_has_session(cstate))
4578 oo->oo_flags |= NFS4_OO_CONFIRMED;
4579 oo->oo_time = 0;
4580 oo->oo_last_closed_stid = NULL;
4581 INIT_LIST_HEAD(&oo->oo_close_lru);
4582 spin_lock(&clp->cl_lock);
4583 ret = find_openstateowner_str_locked(strhashval, open, clp);
4584 if (ret == NULL) {
4585 hash_openowner(oo, clp, strhashval);
4586 ret = oo;
4587 } else
4588 nfs4_free_stateowner(&oo->oo_owner);
4589
4590 spin_unlock(&clp->cl_lock);
4591 return ret;
4592 }
4593
4594 static struct nfs4_ol_stateid *
init_open_stateid(struct nfs4_file * fp,struct nfsd4_open * open)4595 init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
4596 {
4597
4598 struct nfs4_openowner *oo = open->op_openowner;
4599 struct nfs4_ol_stateid *retstp = NULL;
4600 struct nfs4_ol_stateid *stp;
4601
4602 stp = open->op_stp;
4603 /* We are moving these outside of the spinlocks to avoid the warnings */
4604 mutex_init(&stp->st_mutex);
4605 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
4606
4607 retry:
4608 spin_lock(&oo->oo_owner.so_client->cl_lock);
4609 spin_lock(&fp->fi_lock);
4610
4611 retstp = nfsd4_find_existing_open(fp, open);
4612 if (retstp)
4613 goto out_unlock;
4614
4615 open->op_stp = NULL;
4616 refcount_inc(&stp->st_stid.sc_count);
4617 stp->st_stid.sc_type = NFS4_OPEN_STID;
4618 INIT_LIST_HEAD(&stp->st_locks);
4619 stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
4620 get_nfs4_file(fp);
4621 stp->st_stid.sc_file = fp;
4622 stp->st_access_bmap = 0;
4623 stp->st_deny_bmap = 0;
4624 stp->st_openstp = NULL;
4625 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
4626 list_add(&stp->st_perfile, &fp->fi_stateids);
4627
4628 out_unlock:
4629 spin_unlock(&fp->fi_lock);
4630 spin_unlock(&oo->oo_owner.so_client->cl_lock);
4631 if (retstp) {
4632 /* Handle races with CLOSE */
4633 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
4634 nfs4_put_stid(&retstp->st_stid);
4635 goto retry;
4636 }
4637 /* To keep mutex tracking happy */
4638 mutex_unlock(&stp->st_mutex);
4639 stp = retstp;
4640 }
4641 return stp;
4642 }
4643
4644 /*
4645 * In the 4.0 case we need to keep the owners around a little while to handle
4646 * CLOSE replay. We still do need to release any file access that is held by
4647 * them before returning however.
4648 */
4649 static void
move_to_close_lru(struct nfs4_ol_stateid * s,struct net * net)4650 move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
4651 {
4652 struct nfs4_ol_stateid *last;
4653 struct nfs4_openowner *oo = openowner(s->st_stateowner);
4654 struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
4655 nfsd_net_id);
4656
4657 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
4658
4659 /*
4660 * We know that we hold one reference via nfsd4_close, and another
4661 * "persistent" reference for the client. If the refcount is higher
4662 * than 2, then there are still calls in progress that are using this
4663 * stateid. We can't put the sc_file reference until they are finished.
4664 * Wait for the refcount to drop to 2. Since it has been unhashed,
4665 * there should be no danger of the refcount going back up again at
4666 * this point.
4667 */
4668 wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2);
4669
4670 release_all_access(s);
4671 if (s->st_stid.sc_file) {
4672 put_nfs4_file(s->st_stid.sc_file);
4673 s->st_stid.sc_file = NULL;
4674 }
4675
4676 spin_lock(&nn->client_lock);
4677 last = oo->oo_last_closed_stid;
4678 oo->oo_last_closed_stid = s;
4679 list_move_tail(&oo->oo_close_lru, &nn->close_lru);
4680 oo->oo_time = ktime_get_boottime_seconds();
4681 spin_unlock(&nn->client_lock);
4682 if (last)
4683 nfs4_put_stid(&last->st_stid);
4684 }
4685
4686 /* search file_hashtbl[] for file */
4687 static struct nfs4_file *
find_file_locked(struct svc_fh * fh,unsigned int hashval)4688 find_file_locked(struct svc_fh *fh, unsigned int hashval)
4689 {
4690 struct nfs4_file *fp;
4691
4692 hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash,
4693 lockdep_is_held(&state_lock)) {
4694 if (fh_match(&fp->fi_fhandle, &fh->fh_handle)) {
4695 if (refcount_inc_not_zero(&fp->fi_ref))
4696 return fp;
4697 }
4698 }
4699 return NULL;
4700 }
4701
insert_file(struct nfs4_file * new,struct svc_fh * fh,unsigned int hashval)4702 static struct nfs4_file *insert_file(struct nfs4_file *new, struct svc_fh *fh,
4703 unsigned int hashval)
4704 {
4705 struct nfs4_file *fp;
4706 struct nfs4_file *ret = NULL;
4707 bool alias_found = false;
4708
4709 spin_lock(&state_lock);
4710 hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash,
4711 lockdep_is_held(&state_lock)) {
4712 if (fh_match(&fp->fi_fhandle, &fh->fh_handle)) {
4713 if (refcount_inc_not_zero(&fp->fi_ref))
4714 ret = fp;
4715 } else if (d_inode(fh->fh_dentry) == fp->fi_inode)
4716 fp->fi_aliased = alias_found = true;
4717 }
4718 if (likely(ret == NULL)) {
4719 nfsd4_init_file(fh, hashval, new);
4720 new->fi_aliased = alias_found;
4721 ret = new;
4722 }
4723 spin_unlock(&state_lock);
4724 return ret;
4725 }
4726
find_file(struct svc_fh * fh)4727 static struct nfs4_file * find_file(struct svc_fh *fh)
4728 {
4729 struct nfs4_file *fp;
4730 unsigned int hashval = file_hashval(fh);
4731
4732 rcu_read_lock();
4733 fp = find_file_locked(fh, hashval);
4734 rcu_read_unlock();
4735 return fp;
4736 }
4737
4738 static struct nfs4_file *
find_or_add_file(struct nfs4_file * new,struct svc_fh * fh)4739 find_or_add_file(struct nfs4_file *new, struct svc_fh *fh)
4740 {
4741 struct nfs4_file *fp;
4742 unsigned int hashval = file_hashval(fh);
4743
4744 rcu_read_lock();
4745 fp = find_file_locked(fh, hashval);
4746 rcu_read_unlock();
4747 if (fp)
4748 return fp;
4749
4750 return insert_file(new, fh, hashval);
4751 }
4752
4753 /*
4754 * Called to check deny when READ with all zero stateid or
4755 * WRITE with all zero or all one stateid
4756 */
4757 static __be32
nfs4_share_conflict(struct svc_fh * current_fh,unsigned int deny_type)4758 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
4759 {
4760 struct nfs4_file *fp;
4761 __be32 ret = nfs_ok;
4762
4763 fp = find_file(current_fh);
4764 if (!fp)
4765 return ret;
4766 /* Check for conflicting share reservations */
4767 spin_lock(&fp->fi_lock);
4768 if (fp->fi_share_deny & deny_type)
4769 ret = nfserr_locked;
4770 spin_unlock(&fp->fi_lock);
4771 put_nfs4_file(fp);
4772 return ret;
4773 }
4774
nfsd4_deleg_present(const struct inode * inode)4775 static bool nfsd4_deleg_present(const struct inode *inode)
4776 {
4777 struct file_lock_context *ctx = smp_load_acquire(&inode->i_flctx);
4778
4779 return ctx && !list_empty_careful(&ctx->flc_lease);
4780 }
4781
4782 /**
4783 * nfsd_wait_for_delegreturn - wait for delegations to be returned
4784 * @rqstp: the RPC transaction being executed
4785 * @inode: in-core inode of the file being waited for
4786 *
4787 * The timeout prevents deadlock if all nfsd threads happen to be
4788 * tied up waiting for returning delegations.
4789 *
4790 * Return values:
4791 * %true: delegation was returned
4792 * %false: timed out waiting for delegreturn
4793 */
nfsd_wait_for_delegreturn(struct svc_rqst * rqstp,struct inode * inode)4794 bool nfsd_wait_for_delegreturn(struct svc_rqst *rqstp, struct inode *inode)
4795 {
4796 long __maybe_unused timeo;
4797
4798 timeo = wait_var_event_timeout(inode, !nfsd4_deleg_present(inode),
4799 NFSD_DELEGRETURN_TIMEOUT);
4800 trace_nfsd_delegret_wakeup(rqstp, inode, timeo);
4801 return timeo > 0;
4802 }
4803
nfsd4_cb_recall_prepare(struct nfsd4_callback * cb)4804 static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
4805 {
4806 struct nfs4_delegation *dp = cb_to_delegation(cb);
4807 struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
4808 nfsd_net_id);
4809
4810 block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
4811
4812 /*
4813 * We can't do this in nfsd_break_deleg_cb because it is
4814 * already holding inode->i_lock.
4815 *
4816 * If the dl_time != 0, then we know that it has already been
4817 * queued for a lease break. Don't queue it again.
4818 */
4819 spin_lock(&state_lock);
4820 if (delegation_hashed(dp) && dp->dl_time == 0) {
4821 dp->dl_time = ktime_get_boottime_seconds();
4822 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
4823 }
4824 spin_unlock(&state_lock);
4825 }
4826
nfsd4_cb_recall_done(struct nfsd4_callback * cb,struct rpc_task * task)4827 static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
4828 struct rpc_task *task)
4829 {
4830 struct nfs4_delegation *dp = cb_to_delegation(cb);
4831
4832 trace_nfsd_cb_recall_done(&dp->dl_stid.sc_stateid, task);
4833
4834 if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID ||
4835 dp->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID)
4836 return 1;
4837
4838 switch (task->tk_status) {
4839 case 0:
4840 return 1;
4841 case -NFS4ERR_DELAY:
4842 rpc_delay(task, 2 * HZ);
4843 return 0;
4844 case -EBADHANDLE:
4845 case -NFS4ERR_BAD_STATEID:
4846 /*
4847 * Race: client probably got cb_recall before open reply
4848 * granting delegation.
4849 */
4850 if (dp->dl_retries--) {
4851 rpc_delay(task, 2 * HZ);
4852 return 0;
4853 }
4854 fallthrough;
4855 default:
4856 return 1;
4857 }
4858 }
4859
nfsd4_cb_recall_release(struct nfsd4_callback * cb)4860 static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
4861 {
4862 struct nfs4_delegation *dp = cb_to_delegation(cb);
4863
4864 nfs4_put_stid(&dp->dl_stid);
4865 }
4866
4867 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
4868 .prepare = nfsd4_cb_recall_prepare,
4869 .done = nfsd4_cb_recall_done,
4870 .release = nfsd4_cb_recall_release,
4871 };
4872
nfsd_break_one_deleg(struct nfs4_delegation * dp)4873 static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
4874 {
4875 /*
4876 * We're assuming the state code never drops its reference
4877 * without first removing the lease. Since we're in this lease
4878 * callback (and since the lease code is serialized by the
4879 * flc_lock) we know the server hasn't removed the lease yet, and
4880 * we know it's safe to take a reference.
4881 */
4882 refcount_inc(&dp->dl_stid.sc_count);
4883 WARN_ON_ONCE(!nfsd4_run_cb(&dp->dl_recall));
4884 }
4885
4886 /* Called from break_lease() with flc_lock held. */
4887 static bool
nfsd_break_deleg_cb(struct file_lock * fl)4888 nfsd_break_deleg_cb(struct file_lock *fl)
4889 {
4890 struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
4891 struct nfs4_file *fp = dp->dl_stid.sc_file;
4892 struct nfs4_client *clp = dp->dl_stid.sc_client;
4893 struct nfsd_net *nn;
4894
4895 trace_nfsd_cb_recall(&dp->dl_stid);
4896
4897 dp->dl_recalled = true;
4898 atomic_inc(&clp->cl_delegs_in_recall);
4899 if (try_to_expire_client(clp)) {
4900 nn = net_generic(clp->net, nfsd_net_id);
4901 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
4902 }
4903
4904 /*
4905 * We don't want the locks code to timeout the lease for us;
4906 * we'll remove it ourself if a delegation isn't returned
4907 * in time:
4908 */
4909 fl->fl_break_time = 0;
4910
4911 fp->fi_had_conflict = true;
4912 nfsd_break_one_deleg(dp);
4913 return false;
4914 }
4915
4916 /**
4917 * nfsd_breaker_owns_lease - Check if lease conflict was resolved
4918 * @fl: Lock state to check
4919 *
4920 * Return values:
4921 * %true: Lease conflict was resolved
4922 * %false: Lease conflict was not resolved.
4923 */
nfsd_breaker_owns_lease(struct file_lock * fl)4924 static bool nfsd_breaker_owns_lease(struct file_lock *fl)
4925 {
4926 struct nfs4_delegation *dl = fl->fl_owner;
4927 struct svc_rqst *rqst;
4928 struct nfs4_client *clp;
4929
4930 if (!i_am_nfsd())
4931 return false;
4932 rqst = kthread_data(current);
4933 /* Note rq_prog == NFS_ACL_PROGRAM is also possible: */
4934 if (rqst->rq_prog != NFS_PROGRAM || rqst->rq_vers < 4)
4935 return false;
4936 clp = *(rqst->rq_lease_breaker);
4937 return dl->dl_stid.sc_client == clp;
4938 }
4939
4940 static int
nfsd_change_deleg_cb(struct file_lock * onlist,int arg,struct list_head * dispose)4941 nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
4942 struct list_head *dispose)
4943 {
4944 struct nfs4_delegation *dp = (struct nfs4_delegation *)onlist->fl_owner;
4945 struct nfs4_client *clp = dp->dl_stid.sc_client;
4946
4947 if (arg & F_UNLCK) {
4948 if (dp->dl_recalled)
4949 atomic_dec(&clp->cl_delegs_in_recall);
4950 return lease_modify(onlist, arg, dispose);
4951 } else
4952 return -EAGAIN;
4953 }
4954
4955 static const struct lock_manager_operations nfsd_lease_mng_ops = {
4956 .lm_breaker_owns_lease = nfsd_breaker_owns_lease,
4957 .lm_break = nfsd_break_deleg_cb,
4958 .lm_change = nfsd_change_deleg_cb,
4959 };
4960
nfsd4_check_seqid(struct nfsd4_compound_state * cstate,struct nfs4_stateowner * so,u32 seqid)4961 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
4962 {
4963 if (nfsd4_has_session(cstate))
4964 return nfs_ok;
4965 if (seqid == so->so_seqid - 1)
4966 return nfserr_replay_me;
4967 if (seqid == so->so_seqid)
4968 return nfs_ok;
4969 return nfserr_bad_seqid;
4970 }
4971
lookup_clientid(clientid_t * clid,bool sessions,struct nfsd_net * nn)4972 static struct nfs4_client *lookup_clientid(clientid_t *clid, bool sessions,
4973 struct nfsd_net *nn)
4974 {
4975 struct nfs4_client *found;
4976
4977 spin_lock(&nn->client_lock);
4978 found = find_confirmed_client(clid, sessions, nn);
4979 if (found)
4980 atomic_inc(&found->cl_rpc_users);
4981 spin_unlock(&nn->client_lock);
4982 return found;
4983 }
4984
set_client(clientid_t * clid,struct nfsd4_compound_state * cstate,struct nfsd_net * nn)4985 static __be32 set_client(clientid_t *clid,
4986 struct nfsd4_compound_state *cstate,
4987 struct nfsd_net *nn)
4988 {
4989 if (cstate->clp) {
4990 if (!same_clid(&cstate->clp->cl_clientid, clid))
4991 return nfserr_stale_clientid;
4992 return nfs_ok;
4993 }
4994 if (STALE_CLIENTID(clid, nn))
4995 return nfserr_stale_clientid;
4996 /*
4997 * We're in the 4.0 case (otherwise the SEQUENCE op would have
4998 * set cstate->clp), so session = false:
4999 */
5000 cstate->clp = lookup_clientid(clid, false, nn);
5001 if (!cstate->clp)
5002 return nfserr_expired;
5003 return nfs_ok;
5004 }
5005
5006 __be32
nfsd4_process_open1(struct nfsd4_compound_state * cstate,struct nfsd4_open * open,struct nfsd_net * nn)5007 nfsd4_process_open1(struct nfsd4_compound_state *cstate,
5008 struct nfsd4_open *open, struct nfsd_net *nn)
5009 {
5010 clientid_t *clientid = &open->op_clientid;
5011 struct nfs4_client *clp = NULL;
5012 unsigned int strhashval;
5013 struct nfs4_openowner *oo = NULL;
5014 __be32 status;
5015
5016 /*
5017 * In case we need it later, after we've already created the
5018 * file and don't want to risk a further failure:
5019 */
5020 open->op_file = nfsd4_alloc_file();
5021 if (open->op_file == NULL)
5022 return nfserr_jukebox;
5023
5024 status = set_client(clientid, cstate, nn);
5025 if (status)
5026 return status;
5027 clp = cstate->clp;
5028
5029 strhashval = ownerstr_hashval(&open->op_owner);
5030 oo = find_openstateowner_str(strhashval, open, clp);
5031 open->op_openowner = oo;
5032 if (!oo) {
5033 goto new_owner;
5034 }
5035 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
5036 /* Replace unconfirmed owners without checking for replay. */
5037 release_openowner(oo);
5038 open->op_openowner = NULL;
5039 goto new_owner;
5040 }
5041 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
5042 if (status)
5043 return status;
5044 goto alloc_stateid;
5045 new_owner:
5046 oo = alloc_init_open_stateowner(strhashval, open, cstate);
5047 if (oo == NULL)
5048 return nfserr_jukebox;
5049 open->op_openowner = oo;
5050 alloc_stateid:
5051 open->op_stp = nfs4_alloc_open_stateid(clp);
5052 if (!open->op_stp)
5053 return nfserr_jukebox;
5054
5055 if (nfsd4_has_session(cstate) &&
5056 (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) {
5057 open->op_odstate = alloc_clnt_odstate(clp);
5058 if (!open->op_odstate)
5059 return nfserr_jukebox;
5060 }
5061
5062 return nfs_ok;
5063 }
5064
5065 static inline __be32
nfs4_check_delegmode(struct nfs4_delegation * dp,int flags)5066 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
5067 {
5068 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
5069 return nfserr_openmode;
5070 else
5071 return nfs_ok;
5072 }
5073
share_access_to_flags(u32 share_access)5074 static int share_access_to_flags(u32 share_access)
5075 {
5076 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
5077 }
5078
find_deleg_stateid(struct nfs4_client * cl,stateid_t * s)5079 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
5080 {
5081 struct nfs4_stid *ret;
5082
5083 ret = find_stateid_by_type(cl, s,
5084 NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
5085 if (!ret)
5086 return NULL;
5087 return delegstateid(ret);
5088 }
5089
nfsd4_is_deleg_cur(struct nfsd4_open * open)5090 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
5091 {
5092 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
5093 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
5094 }
5095
5096 static __be32
nfs4_check_deleg(struct nfs4_client * cl,struct nfsd4_open * open,struct nfs4_delegation ** dp)5097 nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
5098 struct nfs4_delegation **dp)
5099 {
5100 int flags;
5101 __be32 status = nfserr_bad_stateid;
5102 struct nfs4_delegation *deleg;
5103
5104 deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
5105 if (deleg == NULL)
5106 goto out;
5107 if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
5108 nfs4_put_stid(&deleg->dl_stid);
5109 if (cl->cl_minorversion)
5110 status = nfserr_deleg_revoked;
5111 goto out;
5112 }
5113 flags = share_access_to_flags(open->op_share_access);
5114 status = nfs4_check_delegmode(deleg, flags);
5115 if (status) {
5116 nfs4_put_stid(&deleg->dl_stid);
5117 goto out;
5118 }
5119 *dp = deleg;
5120 out:
5121 if (!nfsd4_is_deleg_cur(open))
5122 return nfs_ok;
5123 if (status)
5124 return status;
5125 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
5126 return nfs_ok;
5127 }
5128
nfs4_access_to_access(u32 nfs4_access)5129 static inline int nfs4_access_to_access(u32 nfs4_access)
5130 {
5131 int flags = 0;
5132
5133 if (nfs4_access & NFS4_SHARE_ACCESS_READ)
5134 flags |= NFSD_MAY_READ;
5135 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
5136 flags |= NFSD_MAY_WRITE;
5137 return flags;
5138 }
5139
5140 static inline __be32
nfsd4_truncate(struct svc_rqst * rqstp,struct svc_fh * fh,struct nfsd4_open * open)5141 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
5142 struct nfsd4_open *open)
5143 {
5144 struct iattr iattr = {
5145 .ia_valid = ATTR_SIZE,
5146 .ia_size = 0,
5147 };
5148 struct nfsd_attrs attrs = {
5149 .na_iattr = &iattr,
5150 };
5151 if (!open->op_truncate)
5152 return 0;
5153 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
5154 return nfserr_inval;
5155 return nfsd_setattr(rqstp, fh, &attrs, 0, (time64_t)0);
5156 }
5157
nfs4_get_vfs_file(struct svc_rqst * rqstp,struct nfs4_file * fp,struct svc_fh * cur_fh,struct nfs4_ol_stateid * stp,struct nfsd4_open * open,bool new_stp)5158 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
5159 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
5160 struct nfsd4_open *open, bool new_stp)
5161 {
5162 struct nfsd_file *nf = NULL;
5163 __be32 status;
5164 int oflag = nfs4_access_to_omode(open->op_share_access);
5165 int access = nfs4_access_to_access(open->op_share_access);
5166 unsigned char old_access_bmap, old_deny_bmap;
5167
5168 spin_lock(&fp->fi_lock);
5169
5170 /*
5171 * Are we trying to set a deny mode that would conflict with
5172 * current access?
5173 */
5174 status = nfs4_file_check_deny(fp, open->op_share_deny);
5175 if (status != nfs_ok) {
5176 if (status != nfserr_share_denied) {
5177 spin_unlock(&fp->fi_lock);
5178 goto out;
5179 }
5180 if (nfs4_resolve_deny_conflicts_locked(fp, new_stp,
5181 stp, open->op_share_deny, false))
5182 status = nfserr_jukebox;
5183 spin_unlock(&fp->fi_lock);
5184 goto out;
5185 }
5186
5187 /* set access to the file */
5188 status = nfs4_file_get_access(fp, open->op_share_access);
5189 if (status != nfs_ok) {
5190 if (status != nfserr_share_denied) {
5191 spin_unlock(&fp->fi_lock);
5192 goto out;
5193 }
5194 if (nfs4_resolve_deny_conflicts_locked(fp, new_stp,
5195 stp, open->op_share_access, true))
5196 status = nfserr_jukebox;
5197 spin_unlock(&fp->fi_lock);
5198 goto out;
5199 }
5200
5201 /* Set access bits in stateid */
5202 old_access_bmap = stp->st_access_bmap;
5203 set_access(open->op_share_access, stp);
5204
5205 /* Set new deny mask */
5206 old_deny_bmap = stp->st_deny_bmap;
5207 set_deny(open->op_share_deny, stp);
5208 fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
5209
5210 if (!fp->fi_fds[oflag]) {
5211 spin_unlock(&fp->fi_lock);
5212
5213 status = nfsd_file_acquire_opened(rqstp, cur_fh, access,
5214 open->op_filp, &nf);
5215 if (status != nfs_ok)
5216 goto out_put_access;
5217
5218 spin_lock(&fp->fi_lock);
5219 if (!fp->fi_fds[oflag]) {
5220 fp->fi_fds[oflag] = nf;
5221 nf = NULL;
5222 }
5223 }
5224 spin_unlock(&fp->fi_lock);
5225 if (nf)
5226 nfsd_file_put(nf);
5227
5228 status = nfserrno(nfsd_open_break_lease(cur_fh->fh_dentry->d_inode,
5229 access));
5230 if (status)
5231 goto out_put_access;
5232
5233 status = nfsd4_truncate(rqstp, cur_fh, open);
5234 if (status)
5235 goto out_put_access;
5236 out:
5237 return status;
5238 out_put_access:
5239 stp->st_access_bmap = old_access_bmap;
5240 nfs4_file_put_access(fp, open->op_share_access);
5241 reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
5242 goto out;
5243 }
5244
5245 static __be32
nfs4_upgrade_open(struct svc_rqst * rqstp,struct nfs4_file * fp,struct svc_fh * cur_fh,struct nfs4_ol_stateid * stp,struct nfsd4_open * open)5246 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp,
5247 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
5248 struct nfsd4_open *open)
5249 {
5250 __be32 status;
5251 unsigned char old_deny_bmap = stp->st_deny_bmap;
5252
5253 if (!test_access(open->op_share_access, stp))
5254 return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open, false);
5255
5256 /* test and set deny mode */
5257 spin_lock(&fp->fi_lock);
5258 status = nfs4_file_check_deny(fp, open->op_share_deny);
5259 switch (status) {
5260 case nfs_ok:
5261 set_deny(open->op_share_deny, stp);
5262 fp->fi_share_deny |=
5263 (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
5264 break;
5265 case nfserr_share_denied:
5266 if (nfs4_resolve_deny_conflicts_locked(fp, false,
5267 stp, open->op_share_deny, false))
5268 status = nfserr_jukebox;
5269 break;
5270 }
5271 spin_unlock(&fp->fi_lock);
5272
5273 if (status != nfs_ok)
5274 return status;
5275
5276 status = nfsd4_truncate(rqstp, cur_fh, open);
5277 if (status != nfs_ok)
5278 reset_union_bmap_deny(old_deny_bmap, stp);
5279 return status;
5280 }
5281
5282 /* Should we give out recallable state?: */
nfsd4_cb_channel_good(struct nfs4_client * clp)5283 static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
5284 {
5285 if (clp->cl_cb_state == NFSD4_CB_UP)
5286 return true;
5287 /*
5288 * In the sessions case, since we don't have to establish a
5289 * separate connection for callbacks, we assume it's OK
5290 * until we hear otherwise:
5291 */
5292 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
5293 }
5294
nfs4_alloc_init_lease(struct nfs4_delegation * dp,int flag)5295 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp,
5296 int flag)
5297 {
5298 struct file_lock *fl;
5299
5300 fl = locks_alloc_lock();
5301 if (!fl)
5302 return NULL;
5303 fl->fl_lmops = &nfsd_lease_mng_ops;
5304 fl->fl_flags = FL_DELEG;
5305 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
5306 fl->fl_end = OFFSET_MAX;
5307 fl->fl_owner = (fl_owner_t)dp;
5308 fl->fl_pid = current->tgid;
5309 fl->fl_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file;
5310 return fl;
5311 }
5312
nfsd4_check_conflicting_opens(struct nfs4_client * clp,struct nfs4_file * fp)5313 static int nfsd4_check_conflicting_opens(struct nfs4_client *clp,
5314 struct nfs4_file *fp)
5315 {
5316 struct nfs4_ol_stateid *st;
5317 struct file *f = fp->fi_deleg_file->nf_file;
5318 struct inode *ino = locks_inode(f);
5319 int writes;
5320
5321 writes = atomic_read(&ino->i_writecount);
5322 if (!writes)
5323 return 0;
5324 /*
5325 * There could be multiple filehandles (hence multiple
5326 * nfs4_files) referencing this file, but that's not too
5327 * common; let's just give up in that case rather than
5328 * trying to go look up all the clients using that other
5329 * nfs4_file as well:
5330 */
5331 if (fp->fi_aliased)
5332 return -EAGAIN;
5333 /*
5334 * If there's a close in progress, make sure that we see it
5335 * clear any fi_fds[] entries before we see it decrement
5336 * i_writecount:
5337 */
5338 smp_mb__after_atomic();
5339
5340 if (fp->fi_fds[O_WRONLY])
5341 writes--;
5342 if (fp->fi_fds[O_RDWR])
5343 writes--;
5344 if (writes > 0)
5345 return -EAGAIN; /* There may be non-NFSv4 writers */
5346 /*
5347 * It's possible there are non-NFSv4 write opens in progress,
5348 * but if they haven't incremented i_writecount yet then they
5349 * also haven't called break lease yet; so, they'll break this
5350 * lease soon enough. So, all that's left to check for is NFSv4
5351 * opens:
5352 */
5353 spin_lock(&fp->fi_lock);
5354 list_for_each_entry(st, &fp->fi_stateids, st_perfile) {
5355 if (st->st_openstp == NULL /* it's an open */ &&
5356 access_permit_write(st) &&
5357 st->st_stid.sc_client != clp) {
5358 spin_unlock(&fp->fi_lock);
5359 return -EAGAIN;
5360 }
5361 }
5362 spin_unlock(&fp->fi_lock);
5363 /*
5364 * There's a small chance that we could be racing with another
5365 * NFSv4 open. However, any open that hasn't added itself to
5366 * the fi_stateids list also hasn't called break_lease yet; so,
5367 * they'll break this lease soon enough.
5368 */
5369 return 0;
5370 }
5371
5372 /*
5373 * It's possible that between opening the dentry and setting the delegation,
5374 * that it has been renamed or unlinked. Redo the lookup to verify that this
5375 * hasn't happened.
5376 */
5377 static int
nfsd4_verify_deleg_dentry(struct nfsd4_open * open,struct nfs4_file * fp,struct svc_fh * parent)5378 nfsd4_verify_deleg_dentry(struct nfsd4_open *open, struct nfs4_file *fp,
5379 struct svc_fh *parent)
5380 {
5381 struct svc_export *exp;
5382 struct dentry *child;
5383 __be32 err;
5384
5385 err = nfsd_lookup_dentry(open->op_rqstp, parent,
5386 open->op_fname, open->op_fnamelen,
5387 &exp, &child);
5388
5389 if (err)
5390 return -EAGAIN;
5391
5392 exp_put(exp);
5393 dput(child);
5394 if (child != file_dentry(fp->fi_deleg_file->nf_file))
5395 return -EAGAIN;
5396
5397 return 0;
5398 }
5399
5400 /*
5401 * We avoid breaking delegations held by a client due to its own activity, but
5402 * clearing setuid/setgid bits on a write is an implicit activity and the client
5403 * may not notice and continue using the old mode. Avoid giving out a delegation
5404 * on setuid/setgid files when the client is requesting an open for write.
5405 */
5406 static int
nfsd4_verify_setuid_write(struct nfsd4_open * open,struct nfsd_file * nf)5407 nfsd4_verify_setuid_write(struct nfsd4_open *open, struct nfsd_file *nf)
5408 {
5409 struct inode *inode = file_inode(nf->nf_file);
5410
5411 if ((open->op_share_access & NFS4_SHARE_ACCESS_WRITE) &&
5412 (inode->i_mode & (S_ISUID|S_ISGID)))
5413 return -EAGAIN;
5414 return 0;
5415 }
5416
5417 static struct nfs4_delegation *
nfs4_set_delegation(struct nfsd4_open * open,struct nfs4_ol_stateid * stp,struct svc_fh * parent)5418 nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
5419 struct svc_fh *parent)
5420 {
5421 int status = 0;
5422 struct nfs4_client *clp = stp->st_stid.sc_client;
5423 struct nfs4_file *fp = stp->st_stid.sc_file;
5424 struct nfs4_clnt_odstate *odstate = stp->st_clnt_odstate;
5425 struct nfs4_delegation *dp;
5426 struct nfsd_file *nf;
5427 struct file_lock *fl;
5428
5429 /*
5430 * The fi_had_conflict and nfs_get_existing_delegation checks
5431 * here are just optimizations; we'll need to recheck them at
5432 * the end:
5433 */
5434 if (fp->fi_had_conflict)
5435 return ERR_PTR(-EAGAIN);
5436
5437 nf = find_readable_file(fp);
5438 if (!nf) {
5439 /*
5440 * We probably could attempt another open and get a read
5441 * delegation, but for now, don't bother until the
5442 * client actually sends us one.
5443 */
5444 return ERR_PTR(-EAGAIN);
5445 }
5446 spin_lock(&state_lock);
5447 spin_lock(&fp->fi_lock);
5448 if (nfs4_delegation_exists(clp, fp))
5449 status = -EAGAIN;
5450 else if (nfsd4_verify_setuid_write(open, nf))
5451 status = -EAGAIN;
5452 else if (!fp->fi_deleg_file) {
5453 fp->fi_deleg_file = nf;
5454 /* increment early to prevent fi_deleg_file from being
5455 * cleared */
5456 fp->fi_delegees = 1;
5457 nf = NULL;
5458 } else
5459 fp->fi_delegees++;
5460 spin_unlock(&fp->fi_lock);
5461 spin_unlock(&state_lock);
5462 if (nf)
5463 nfsd_file_put(nf);
5464 if (status)
5465 return ERR_PTR(status);
5466
5467 status = -ENOMEM;
5468 dp = alloc_init_deleg(clp, fp, odstate);
5469 if (!dp)
5470 goto out_delegees;
5471
5472 fl = nfs4_alloc_init_lease(dp, NFS4_OPEN_DELEGATE_READ);
5473 if (!fl)
5474 goto out_clnt_odstate;
5475
5476 status = vfs_setlease(fp->fi_deleg_file->nf_file, fl->fl_type, &fl, NULL);
5477 if (fl)
5478 locks_free_lock(fl);
5479 if (status)
5480 goto out_clnt_odstate;
5481
5482 if (parent) {
5483 status = nfsd4_verify_deleg_dentry(open, fp, parent);
5484 if (status)
5485 goto out_unlock;
5486 }
5487
5488 status = nfsd4_check_conflicting_opens(clp, fp);
5489 if (status)
5490 goto out_unlock;
5491
5492 /*
5493 * Now that the deleg is set, check again to ensure that nothing
5494 * raced in and changed the mode while we weren't lookng.
5495 */
5496 status = nfsd4_verify_setuid_write(open, fp->fi_deleg_file);
5497 if (status)
5498 goto out_unlock;
5499
5500 status = -EAGAIN;
5501 if (fp->fi_had_conflict)
5502 goto out_unlock;
5503
5504 spin_lock(&state_lock);
5505 spin_lock(&fp->fi_lock);
5506 status = hash_delegation_locked(dp, fp);
5507 spin_unlock(&fp->fi_lock);
5508 spin_unlock(&state_lock);
5509
5510 if (status)
5511 goto out_unlock;
5512
5513 return dp;
5514 out_unlock:
5515 vfs_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp);
5516 out_clnt_odstate:
5517 put_clnt_odstate(dp->dl_clnt_odstate);
5518 nfs4_put_stid(&dp->dl_stid);
5519 out_delegees:
5520 put_deleg_file(fp);
5521 return ERR_PTR(status);
5522 }
5523
nfsd4_open_deleg_none_ext(struct nfsd4_open * open,int status)5524 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
5525 {
5526 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5527 if (status == -EAGAIN)
5528 open->op_why_no_deleg = WND4_CONTENTION;
5529 else {
5530 open->op_why_no_deleg = WND4_RESOURCE;
5531 switch (open->op_deleg_want) {
5532 case NFS4_SHARE_WANT_READ_DELEG:
5533 case NFS4_SHARE_WANT_WRITE_DELEG:
5534 case NFS4_SHARE_WANT_ANY_DELEG:
5535 break;
5536 case NFS4_SHARE_WANT_CANCEL:
5537 open->op_why_no_deleg = WND4_CANCELLED;
5538 break;
5539 case NFS4_SHARE_WANT_NO_DELEG:
5540 WARN_ON_ONCE(1);
5541 }
5542 }
5543 }
5544
5545 /*
5546 * Attempt to hand out a delegation.
5547 *
5548 * Note we don't support write delegations, and won't until the vfs has
5549 * proper support for them.
5550 */
5551 static void
nfs4_open_delegation(struct nfsd4_open * open,struct nfs4_ol_stateid * stp,struct svc_fh * currentfh)5552 nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
5553 struct svc_fh *currentfh)
5554 {
5555 struct nfs4_delegation *dp;
5556 struct nfs4_openowner *oo = openowner(stp->st_stateowner);
5557 struct nfs4_client *clp = stp->st_stid.sc_client;
5558 struct svc_fh *parent = NULL;
5559 int cb_up;
5560 int status = 0;
5561
5562 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
5563 open->op_recall = 0;
5564 switch (open->op_claim_type) {
5565 case NFS4_OPEN_CLAIM_PREVIOUS:
5566 if (!cb_up)
5567 open->op_recall = 1;
5568 if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ)
5569 goto out_no_deleg;
5570 break;
5571 case NFS4_OPEN_CLAIM_NULL:
5572 parent = currentfh;
5573 fallthrough;
5574 case NFS4_OPEN_CLAIM_FH:
5575 /*
5576 * Let's not give out any delegations till everyone's
5577 * had the chance to reclaim theirs, *and* until
5578 * NLM locks have all been reclaimed:
5579 */
5580 if (locks_in_grace(clp->net))
5581 goto out_no_deleg;
5582 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
5583 goto out_no_deleg;
5584 break;
5585 default:
5586 goto out_no_deleg;
5587 }
5588 dp = nfs4_set_delegation(open, stp, parent);
5589 if (IS_ERR(dp))
5590 goto out_no_deleg;
5591
5592 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
5593
5594 trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid);
5595 open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
5596 nfs4_put_stid(&dp->dl_stid);
5597 return;
5598 out_no_deleg:
5599 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
5600 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
5601 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
5602 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
5603 open->op_recall = 1;
5604 }
5605
5606 /* 4.1 client asking for a delegation? */
5607 if (open->op_deleg_want)
5608 nfsd4_open_deleg_none_ext(open, status);
5609 return;
5610 }
5611
nfsd4_deleg_xgrade_none_ext(struct nfsd4_open * open,struct nfs4_delegation * dp)5612 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
5613 struct nfs4_delegation *dp)
5614 {
5615 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
5616 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
5617 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5618 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
5619 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
5620 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
5621 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5622 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
5623 }
5624 /* Otherwise the client must be confused wanting a delegation
5625 * it already has, therefore we don't return
5626 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
5627 */
5628 }
5629
5630 /**
5631 * nfsd4_process_open2 - finish open processing
5632 * @rqstp: the RPC transaction being executed
5633 * @current_fh: NFSv4 COMPOUND's current filehandle
5634 * @open: OPEN arguments
5635 *
5636 * If successful, (1) truncate the file if open->op_truncate was
5637 * set, (2) set open->op_stateid, (3) set open->op_delegation.
5638 *
5639 * Returns %nfs_ok on success; otherwise an nfs4stat value in
5640 * network byte order is returned.
5641 */
5642 __be32
nfsd4_process_open2(struct svc_rqst * rqstp,struct svc_fh * current_fh,struct nfsd4_open * open)5643 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
5644 {
5645 struct nfsd4_compoundres *resp = rqstp->rq_resp;
5646 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
5647 struct nfs4_file *fp = NULL;
5648 struct nfs4_ol_stateid *stp = NULL;
5649 struct nfs4_delegation *dp = NULL;
5650 __be32 status;
5651 bool new_stp = false;
5652
5653 /*
5654 * Lookup file; if found, lookup stateid and check open request,
5655 * and check for delegations in the process of being recalled.
5656 * If not found, create the nfs4_file struct
5657 */
5658 fp = find_or_add_file(open->op_file, current_fh);
5659 if (fp != open->op_file) {
5660 status = nfs4_check_deleg(cl, open, &dp);
5661 if (status)
5662 goto out;
5663 stp = nfsd4_find_and_lock_existing_open(fp, open);
5664 } else {
5665 open->op_file = NULL;
5666 status = nfserr_bad_stateid;
5667 if (nfsd4_is_deleg_cur(open))
5668 goto out;
5669 }
5670
5671 if (!stp) {
5672 stp = init_open_stateid(fp, open);
5673 if (!open->op_stp)
5674 new_stp = true;
5675 }
5676
5677 /*
5678 * OPEN the file, or upgrade an existing OPEN.
5679 * If truncate fails, the OPEN fails.
5680 *
5681 * stp is already locked.
5682 */
5683 if (!new_stp) {
5684 /* Stateid was found, this is an OPEN upgrade */
5685 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
5686 if (status) {
5687 mutex_unlock(&stp->st_mutex);
5688 goto out;
5689 }
5690 } else {
5691 status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open, true);
5692 if (status) {
5693 stp->st_stid.sc_type = NFS4_CLOSED_STID;
5694 release_open_stateid(stp);
5695 mutex_unlock(&stp->st_mutex);
5696 goto out;
5697 }
5698
5699 stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp,
5700 open->op_odstate);
5701 if (stp->st_clnt_odstate == open->op_odstate)
5702 open->op_odstate = NULL;
5703 }
5704
5705 nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
5706 mutex_unlock(&stp->st_mutex);
5707
5708 if (nfsd4_has_session(&resp->cstate)) {
5709 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
5710 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5711 open->op_why_no_deleg = WND4_NOT_WANTED;
5712 goto nodeleg;
5713 }
5714 }
5715
5716 /*
5717 * Attempt to hand out a delegation. No error return, because the
5718 * OPEN succeeds even if we fail.
5719 */
5720 nfs4_open_delegation(open, stp, &resp->cstate.current_fh);
5721 nodeleg:
5722 status = nfs_ok;
5723 trace_nfsd_open(&stp->st_stid.sc_stateid);
5724 out:
5725 /* 4.1 client trying to upgrade/downgrade delegation? */
5726 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
5727 open->op_deleg_want)
5728 nfsd4_deleg_xgrade_none_ext(open, dp);
5729
5730 if (fp)
5731 put_nfs4_file(fp);
5732 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
5733 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
5734 /*
5735 * To finish the open response, we just need to set the rflags.
5736 */
5737 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
5738 if (nfsd4_has_session(&resp->cstate))
5739 open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK;
5740 else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED))
5741 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
5742
5743 if (dp)
5744 nfs4_put_stid(&dp->dl_stid);
5745 if (stp)
5746 nfs4_put_stid(&stp->st_stid);
5747
5748 return status;
5749 }
5750
nfsd4_cleanup_open_state(struct nfsd4_compound_state * cstate,struct nfsd4_open * open)5751 void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
5752 struct nfsd4_open *open)
5753 {
5754 if (open->op_openowner) {
5755 struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
5756
5757 nfsd4_cstate_assign_replay(cstate, so);
5758 nfs4_put_stateowner(so);
5759 }
5760 if (open->op_file)
5761 kmem_cache_free(file_slab, open->op_file);
5762 if (open->op_stp)
5763 nfs4_put_stid(&open->op_stp->st_stid);
5764 if (open->op_odstate)
5765 kmem_cache_free(odstate_slab, open->op_odstate);
5766 }
5767
5768 __be32
nfsd4_renew(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)5769 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5770 union nfsd4_op_u *u)
5771 {
5772 clientid_t *clid = &u->renew;
5773 struct nfs4_client *clp;
5774 __be32 status;
5775 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5776
5777 trace_nfsd_clid_renew(clid);
5778 status = set_client(clid, cstate, nn);
5779 if (status)
5780 return status;
5781 clp = cstate->clp;
5782 if (!list_empty(&clp->cl_delegations)
5783 && clp->cl_cb_state != NFSD4_CB_UP)
5784 return nfserr_cb_path_down;
5785 return nfs_ok;
5786 }
5787
5788 void
nfsd4_end_grace(struct nfsd_net * nn)5789 nfsd4_end_grace(struct nfsd_net *nn)
5790 {
5791 /* do nothing if grace period already ended */
5792 if (nn->grace_ended)
5793 return;
5794
5795 trace_nfsd_grace_complete(nn);
5796 nn->grace_ended = true;
5797 /*
5798 * If the server goes down again right now, an NFSv4
5799 * client will still be allowed to reclaim after it comes back up,
5800 * even if it hasn't yet had a chance to reclaim state this time.
5801 *
5802 */
5803 nfsd4_record_grace_done(nn);
5804 /*
5805 * At this point, NFSv4 clients can still reclaim. But if the
5806 * server crashes, any that have not yet reclaimed will be out
5807 * of luck on the next boot.
5808 *
5809 * (NFSv4.1+ clients are considered to have reclaimed once they
5810 * call RECLAIM_COMPLETE. NFSv4.0 clients are considered to
5811 * have reclaimed after their first OPEN.)
5812 */
5813 locks_end_grace(&nn->nfsd4_manager);
5814 /*
5815 * At this point, and once lockd and/or any other containers
5816 * exit their grace period, further reclaims will fail and
5817 * regular locking can resume.
5818 */
5819 }
5820
5821 /*
5822 * If we've waited a lease period but there are still clients trying to
5823 * reclaim, wait a little longer to give them a chance to finish.
5824 */
clients_still_reclaiming(struct nfsd_net * nn)5825 static bool clients_still_reclaiming(struct nfsd_net *nn)
5826 {
5827 time64_t double_grace_period_end = nn->boot_time +
5828 2 * nn->nfsd4_lease;
5829
5830 if (nn->track_reclaim_completes &&
5831 atomic_read(&nn->nr_reclaim_complete) ==
5832 nn->reclaim_str_hashtbl_size)
5833 return false;
5834 if (!nn->somebody_reclaimed)
5835 return false;
5836 nn->somebody_reclaimed = false;
5837 /*
5838 * If we've given them *two* lease times to reclaim, and they're
5839 * still not done, give up:
5840 */
5841 if (ktime_get_boottime_seconds() > double_grace_period_end)
5842 return false;
5843 return true;
5844 }
5845
5846 struct laundry_time {
5847 time64_t cutoff;
5848 time64_t new_timeo;
5849 };
5850
state_expired(struct laundry_time * lt,time64_t last_refresh)5851 static bool state_expired(struct laundry_time *lt, time64_t last_refresh)
5852 {
5853 time64_t time_remaining;
5854
5855 if (last_refresh < lt->cutoff)
5856 return true;
5857 time_remaining = last_refresh - lt->cutoff;
5858 lt->new_timeo = min(lt->new_timeo, time_remaining);
5859 return false;
5860 }
5861
5862 #ifdef CONFIG_NFSD_V4_2_INTER_SSC
nfsd4_ssc_init_umount_work(struct nfsd_net * nn)5863 void nfsd4_ssc_init_umount_work(struct nfsd_net *nn)
5864 {
5865 spin_lock_init(&nn->nfsd_ssc_lock);
5866 INIT_LIST_HEAD(&nn->nfsd_ssc_mount_list);
5867 init_waitqueue_head(&nn->nfsd_ssc_waitq);
5868 }
5869 EXPORT_SYMBOL_GPL(nfsd4_ssc_init_umount_work);
5870
5871 /*
5872 * This is called when nfsd is being shutdown, after all inter_ssc
5873 * cleanup were done, to destroy the ssc delayed unmount list.
5874 */
nfsd4_ssc_shutdown_umount(struct nfsd_net * nn)5875 static void nfsd4_ssc_shutdown_umount(struct nfsd_net *nn)
5876 {
5877 struct nfsd4_ssc_umount_item *ni = NULL;
5878 struct nfsd4_ssc_umount_item *tmp;
5879
5880 spin_lock(&nn->nfsd_ssc_lock);
5881 list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) {
5882 list_del(&ni->nsui_list);
5883 spin_unlock(&nn->nfsd_ssc_lock);
5884 mntput(ni->nsui_vfsmount);
5885 kfree(ni);
5886 spin_lock(&nn->nfsd_ssc_lock);
5887 }
5888 spin_unlock(&nn->nfsd_ssc_lock);
5889 }
5890
nfsd4_ssc_expire_umount(struct nfsd_net * nn)5891 static void nfsd4_ssc_expire_umount(struct nfsd_net *nn)
5892 {
5893 bool do_wakeup = false;
5894 struct nfsd4_ssc_umount_item *ni = NULL;
5895 struct nfsd4_ssc_umount_item *tmp;
5896
5897 spin_lock(&nn->nfsd_ssc_lock);
5898 list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) {
5899 if (time_after(jiffies, ni->nsui_expire)) {
5900 if (refcount_read(&ni->nsui_refcnt) > 1)
5901 continue;
5902
5903 /* mark being unmount */
5904 ni->nsui_busy = true;
5905 spin_unlock(&nn->nfsd_ssc_lock);
5906 mntput(ni->nsui_vfsmount);
5907 spin_lock(&nn->nfsd_ssc_lock);
5908
5909 /* waiters need to start from begin of list */
5910 list_del(&ni->nsui_list);
5911 kfree(ni);
5912
5913 /* wakeup ssc_connect waiters */
5914 do_wakeup = true;
5915 continue;
5916 }
5917 break;
5918 }
5919 if (do_wakeup)
5920 wake_up_all(&nn->nfsd_ssc_waitq);
5921 spin_unlock(&nn->nfsd_ssc_lock);
5922 }
5923 #endif
5924
5925 /* Check if any lock belonging to this lockowner has any blockers */
5926 static bool
nfs4_lockowner_has_blockers(struct nfs4_lockowner * lo)5927 nfs4_lockowner_has_blockers(struct nfs4_lockowner *lo)
5928 {
5929 struct file_lock_context *ctx;
5930 struct nfs4_ol_stateid *stp;
5931 struct nfs4_file *nf;
5932
5933 list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) {
5934 nf = stp->st_stid.sc_file;
5935 ctx = nf->fi_inode->i_flctx;
5936 if (!ctx)
5937 continue;
5938 if (locks_owner_has_blockers(ctx, lo))
5939 return true;
5940 }
5941 return false;
5942 }
5943
5944 static bool
nfs4_anylock_blockers(struct nfs4_client * clp)5945 nfs4_anylock_blockers(struct nfs4_client *clp)
5946 {
5947 int i;
5948 struct nfs4_stateowner *so;
5949 struct nfs4_lockowner *lo;
5950
5951 if (atomic_read(&clp->cl_delegs_in_recall))
5952 return true;
5953 spin_lock(&clp->cl_lock);
5954 for (i = 0; i < OWNER_HASH_SIZE; i++) {
5955 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[i],
5956 so_strhash) {
5957 if (so->so_is_open_owner)
5958 continue;
5959 lo = lockowner(so);
5960 if (nfs4_lockowner_has_blockers(lo)) {
5961 spin_unlock(&clp->cl_lock);
5962 return true;
5963 }
5964 }
5965 }
5966 spin_unlock(&clp->cl_lock);
5967 return false;
5968 }
5969
5970 static void
nfs4_get_client_reaplist(struct nfsd_net * nn,struct list_head * reaplist,struct laundry_time * lt)5971 nfs4_get_client_reaplist(struct nfsd_net *nn, struct list_head *reaplist,
5972 struct laundry_time *lt)
5973 {
5974 unsigned int maxreap, reapcnt = 0;
5975 struct list_head *pos, *next;
5976 struct nfs4_client *clp;
5977
5978 maxreap = (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients) ?
5979 NFSD_CLIENT_MAX_TRIM_PER_RUN : 0;
5980 INIT_LIST_HEAD(reaplist);
5981 spin_lock(&nn->client_lock);
5982 list_for_each_safe(pos, next, &nn->client_lru) {
5983 clp = list_entry(pos, struct nfs4_client, cl_lru);
5984 if (clp->cl_state == NFSD4_EXPIRABLE)
5985 goto exp_client;
5986 if (!state_expired(lt, clp->cl_time))
5987 break;
5988 if (!atomic_read(&clp->cl_rpc_users)) {
5989 if (clp->cl_state == NFSD4_ACTIVE)
5990 atomic_inc(&nn->nfsd_courtesy_clients);
5991 clp->cl_state = NFSD4_COURTESY;
5992 }
5993 if (!client_has_state(clp))
5994 goto exp_client;
5995 if (!nfs4_anylock_blockers(clp))
5996 if (reapcnt >= maxreap)
5997 continue;
5998 exp_client:
5999 if (!mark_client_expired_locked(clp)) {
6000 list_add(&clp->cl_lru, reaplist);
6001 reapcnt++;
6002 }
6003 }
6004 spin_unlock(&nn->client_lock);
6005 }
6006
6007 static void
nfs4_get_courtesy_client_reaplist(struct nfsd_net * nn,struct list_head * reaplist)6008 nfs4_get_courtesy_client_reaplist(struct nfsd_net *nn,
6009 struct list_head *reaplist)
6010 {
6011 unsigned int maxreap = 0, reapcnt = 0;
6012 struct list_head *pos, *next;
6013 struct nfs4_client *clp;
6014
6015 maxreap = NFSD_CLIENT_MAX_TRIM_PER_RUN;
6016 INIT_LIST_HEAD(reaplist);
6017
6018 spin_lock(&nn->client_lock);
6019 list_for_each_safe(pos, next, &nn->client_lru) {
6020 clp = list_entry(pos, struct nfs4_client, cl_lru);
6021 if (clp->cl_state == NFSD4_ACTIVE)
6022 break;
6023 if (reapcnt >= maxreap)
6024 break;
6025 if (!mark_client_expired_locked(clp)) {
6026 list_add(&clp->cl_lru, reaplist);
6027 reapcnt++;
6028 }
6029 }
6030 spin_unlock(&nn->client_lock);
6031 }
6032
6033 static void
nfs4_process_client_reaplist(struct list_head * reaplist)6034 nfs4_process_client_reaplist(struct list_head *reaplist)
6035 {
6036 struct list_head *pos, *next;
6037 struct nfs4_client *clp;
6038
6039 list_for_each_safe(pos, next, reaplist) {
6040 clp = list_entry(pos, struct nfs4_client, cl_lru);
6041 trace_nfsd_clid_purged(&clp->cl_clientid);
6042 list_del_init(&clp->cl_lru);
6043 expire_client(clp);
6044 }
6045 }
6046
6047 static time64_t
nfs4_laundromat(struct nfsd_net * nn)6048 nfs4_laundromat(struct nfsd_net *nn)
6049 {
6050 struct nfs4_openowner *oo;
6051 struct nfs4_delegation *dp;
6052 struct nfs4_ol_stateid *stp;
6053 struct nfsd4_blocked_lock *nbl;
6054 struct list_head *pos, *next, reaplist;
6055 struct laundry_time lt = {
6056 .cutoff = ktime_get_boottime_seconds() - nn->nfsd4_lease,
6057 .new_timeo = nn->nfsd4_lease
6058 };
6059 struct nfs4_cpntf_state *cps;
6060 copy_stateid_t *cps_t;
6061 int i;
6062
6063 if (clients_still_reclaiming(nn)) {
6064 lt.new_timeo = 0;
6065 goto out;
6066 }
6067 nfsd4_end_grace(nn);
6068
6069 spin_lock(&nn->s2s_cp_lock);
6070 idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) {
6071 cps = container_of(cps_t, struct nfs4_cpntf_state, cp_stateid);
6072 if (cps->cp_stateid.cs_type == NFS4_COPYNOTIFY_STID &&
6073 state_expired(<, cps->cpntf_time))
6074 _free_cpntf_state_locked(nn, cps);
6075 }
6076 spin_unlock(&nn->s2s_cp_lock);
6077 nfs4_get_client_reaplist(nn, &reaplist, <);
6078 nfs4_process_client_reaplist(&reaplist);
6079
6080 spin_lock(&state_lock);
6081 list_for_each_safe(pos, next, &nn->del_recall_lru) {
6082 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
6083 if (!state_expired(<, dp->dl_time))
6084 break;
6085 WARN_ON(!unhash_delegation_locked(dp));
6086 list_add(&dp->dl_recall_lru, &reaplist);
6087 }
6088 spin_unlock(&state_lock);
6089 while (!list_empty(&reaplist)) {
6090 dp = list_first_entry(&reaplist, struct nfs4_delegation,
6091 dl_recall_lru);
6092 list_del_init(&dp->dl_recall_lru);
6093 revoke_delegation(dp);
6094 }
6095
6096 spin_lock(&nn->client_lock);
6097 while (!list_empty(&nn->close_lru)) {
6098 oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
6099 oo_close_lru);
6100 if (!state_expired(<, oo->oo_time))
6101 break;
6102 list_del_init(&oo->oo_close_lru);
6103 stp = oo->oo_last_closed_stid;
6104 oo->oo_last_closed_stid = NULL;
6105 spin_unlock(&nn->client_lock);
6106 nfs4_put_stid(&stp->st_stid);
6107 spin_lock(&nn->client_lock);
6108 }
6109 spin_unlock(&nn->client_lock);
6110
6111 /*
6112 * It's possible for a client to try and acquire an already held lock
6113 * that is being held for a long time, and then lose interest in it.
6114 * So, we clean out any un-revisited request after a lease period
6115 * under the assumption that the client is no longer interested.
6116 *
6117 * RFC5661, sec. 9.6 states that the client must not rely on getting
6118 * notifications and must continue to poll for locks, even when the
6119 * server supports them. Thus this shouldn't lead to clients blocking
6120 * indefinitely once the lock does become free.
6121 */
6122 BUG_ON(!list_empty(&reaplist));
6123 spin_lock(&nn->blocked_locks_lock);
6124 while (!list_empty(&nn->blocked_locks_lru)) {
6125 nbl = list_first_entry(&nn->blocked_locks_lru,
6126 struct nfsd4_blocked_lock, nbl_lru);
6127 if (!state_expired(<, nbl->nbl_time))
6128 break;
6129 list_move(&nbl->nbl_lru, &reaplist);
6130 list_del_init(&nbl->nbl_list);
6131 }
6132 spin_unlock(&nn->blocked_locks_lock);
6133
6134 while (!list_empty(&reaplist)) {
6135 nbl = list_first_entry(&reaplist,
6136 struct nfsd4_blocked_lock, nbl_lru);
6137 list_del_init(&nbl->nbl_lru);
6138 free_blocked_lock(nbl);
6139 }
6140 #ifdef CONFIG_NFSD_V4_2_INTER_SSC
6141 /* service the server-to-server copy delayed unmount list */
6142 nfsd4_ssc_expire_umount(nn);
6143 #endif
6144 out:
6145 return max_t(time64_t, lt.new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
6146 }
6147
6148 static void laundromat_main(struct work_struct *);
6149
6150 static void
laundromat_main(struct work_struct * laundry)6151 laundromat_main(struct work_struct *laundry)
6152 {
6153 time64_t t;
6154 struct delayed_work *dwork = to_delayed_work(laundry);
6155 struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
6156 laundromat_work);
6157
6158 t = nfs4_laundromat(nn);
6159 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
6160 }
6161
6162 static void
courtesy_client_reaper(struct work_struct * reaper)6163 courtesy_client_reaper(struct work_struct *reaper)
6164 {
6165 struct list_head reaplist;
6166 struct delayed_work *dwork = to_delayed_work(reaper);
6167 struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
6168 nfsd_shrinker_work);
6169
6170 nfs4_get_courtesy_client_reaplist(nn, &reaplist);
6171 nfs4_process_client_reaplist(&reaplist);
6172 }
6173
nfs4_check_fh(struct svc_fh * fhp,struct nfs4_stid * stp)6174 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
6175 {
6176 if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
6177 return nfserr_bad_stateid;
6178 return nfs_ok;
6179 }
6180
6181 static
nfs4_check_openmode(struct nfs4_ol_stateid * stp,int flags)6182 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
6183 {
6184 __be32 status = nfserr_openmode;
6185
6186 /* For lock stateid's, we test the parent open, not the lock: */
6187 if (stp->st_openstp)
6188 stp = stp->st_openstp;
6189 if ((flags & WR_STATE) && !access_permit_write(stp))
6190 goto out;
6191 if ((flags & RD_STATE) && !access_permit_read(stp))
6192 goto out;
6193 status = nfs_ok;
6194 out:
6195 return status;
6196 }
6197
6198 static inline __be32
check_special_stateids(struct net * net,svc_fh * current_fh,stateid_t * stateid,int flags)6199 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
6200 {
6201 if (ONE_STATEID(stateid) && (flags & RD_STATE))
6202 return nfs_ok;
6203 else if (opens_in_grace(net)) {
6204 /* Answer in remaining cases depends on existence of
6205 * conflicting state; so we must wait out the grace period. */
6206 return nfserr_grace;
6207 } else if (flags & WR_STATE)
6208 return nfs4_share_conflict(current_fh,
6209 NFS4_SHARE_DENY_WRITE);
6210 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
6211 return nfs4_share_conflict(current_fh,
6212 NFS4_SHARE_DENY_READ);
6213 }
6214
check_stateid_generation(stateid_t * in,stateid_t * ref,bool has_session)6215 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
6216 {
6217 /*
6218 * When sessions are used the stateid generation number is ignored
6219 * when it is zero.
6220 */
6221 if (has_session && in->si_generation == 0)
6222 return nfs_ok;
6223
6224 if (in->si_generation == ref->si_generation)
6225 return nfs_ok;
6226
6227 /* If the client sends us a stateid from the future, it's buggy: */
6228 if (nfsd4_stateid_generation_after(in, ref))
6229 return nfserr_bad_stateid;
6230 /*
6231 * However, we could see a stateid from the past, even from a
6232 * non-buggy client. For example, if the client sends a lock
6233 * while some IO is outstanding, the lock may bump si_generation
6234 * while the IO is still in flight. The client could avoid that
6235 * situation by waiting for responses on all the IO requests,
6236 * but better performance may result in retrying IO that
6237 * receives an old_stateid error if requests are rarely
6238 * reordered in flight:
6239 */
6240 return nfserr_old_stateid;
6241 }
6242
nfsd4_stid_check_stateid_generation(stateid_t * in,struct nfs4_stid * s,bool has_session)6243 static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session)
6244 {
6245 __be32 ret;
6246
6247 spin_lock(&s->sc_lock);
6248 ret = nfsd4_verify_open_stid(s);
6249 if (ret == nfs_ok)
6250 ret = check_stateid_generation(in, &s->sc_stateid, has_session);
6251 spin_unlock(&s->sc_lock);
6252 return ret;
6253 }
6254
nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid * ols)6255 static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
6256 {
6257 if (ols->st_stateowner->so_is_open_owner &&
6258 !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
6259 return nfserr_bad_stateid;
6260 return nfs_ok;
6261 }
6262
nfsd4_validate_stateid(struct nfs4_client * cl,stateid_t * stateid)6263 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
6264 {
6265 struct nfs4_stid *s;
6266 __be32 status = nfserr_bad_stateid;
6267
6268 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
6269 CLOSE_STATEID(stateid))
6270 return status;
6271 spin_lock(&cl->cl_lock);
6272 s = find_stateid_locked(cl, stateid);
6273 if (!s)
6274 goto out_unlock;
6275 status = nfsd4_stid_check_stateid_generation(stateid, s, 1);
6276 if (status)
6277 goto out_unlock;
6278 switch (s->sc_type) {
6279 case NFS4_DELEG_STID:
6280 status = nfs_ok;
6281 break;
6282 case NFS4_REVOKED_DELEG_STID:
6283 status = nfserr_deleg_revoked;
6284 break;
6285 case NFS4_OPEN_STID:
6286 case NFS4_LOCK_STID:
6287 status = nfsd4_check_openowner_confirmed(openlockstateid(s));
6288 break;
6289 default:
6290 printk("unknown stateid type %x\n", s->sc_type);
6291 fallthrough;
6292 case NFS4_CLOSED_STID:
6293 case NFS4_CLOSED_DELEG_STID:
6294 status = nfserr_bad_stateid;
6295 }
6296 out_unlock:
6297 spin_unlock(&cl->cl_lock);
6298 return status;
6299 }
6300
6301 __be32
nfsd4_lookup_stateid(struct nfsd4_compound_state * cstate,stateid_t * stateid,unsigned char typemask,struct nfs4_stid ** s,struct nfsd_net * nn)6302 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
6303 stateid_t *stateid, unsigned char typemask,
6304 struct nfs4_stid **s, struct nfsd_net *nn)
6305 {
6306 __be32 status;
6307 struct nfs4_stid *stid;
6308 bool return_revoked = false;
6309
6310 /*
6311 * only return revoked delegations if explicitly asked.
6312 * otherwise we report revoked or bad_stateid status.
6313 */
6314 if (typemask & NFS4_REVOKED_DELEG_STID)
6315 return_revoked = true;
6316 else if (typemask & NFS4_DELEG_STID)
6317 typemask |= NFS4_REVOKED_DELEG_STID;
6318
6319 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
6320 CLOSE_STATEID(stateid))
6321 return nfserr_bad_stateid;
6322 status = set_client(&stateid->si_opaque.so_clid, cstate, nn);
6323 if (status == nfserr_stale_clientid) {
6324 if (cstate->session)
6325 return nfserr_bad_stateid;
6326 return nfserr_stale_stateid;
6327 }
6328 if (status)
6329 return status;
6330 stid = find_stateid_by_type(cstate->clp, stateid, typemask);
6331 if (!stid)
6332 return nfserr_bad_stateid;
6333 if ((stid->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
6334 nfs4_put_stid(stid);
6335 if (cstate->minorversion)
6336 return nfserr_deleg_revoked;
6337 return nfserr_bad_stateid;
6338 }
6339 *s = stid;
6340 return nfs_ok;
6341 }
6342
6343 static struct nfsd_file *
nfs4_find_file(struct nfs4_stid * s,int flags)6344 nfs4_find_file(struct nfs4_stid *s, int flags)
6345 {
6346 if (!s)
6347 return NULL;
6348
6349 switch (s->sc_type) {
6350 case NFS4_DELEG_STID:
6351 if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file))
6352 return NULL;
6353 return nfsd_file_get(s->sc_file->fi_deleg_file);
6354 case NFS4_OPEN_STID:
6355 case NFS4_LOCK_STID:
6356 if (flags & RD_STATE)
6357 return find_readable_file(s->sc_file);
6358 else
6359 return find_writeable_file(s->sc_file);
6360 }
6361
6362 return NULL;
6363 }
6364
6365 static __be32
nfs4_check_olstateid(struct nfs4_ol_stateid * ols,int flags)6366 nfs4_check_olstateid(struct nfs4_ol_stateid *ols, int flags)
6367 {
6368 __be32 status;
6369
6370 status = nfsd4_check_openowner_confirmed(ols);
6371 if (status)
6372 return status;
6373 return nfs4_check_openmode(ols, flags);
6374 }
6375
6376 static __be32
nfs4_check_file(struct svc_rqst * rqstp,struct svc_fh * fhp,struct nfs4_stid * s,struct nfsd_file ** nfp,int flags)6377 nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
6378 struct nfsd_file **nfp, int flags)
6379 {
6380 int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE;
6381 struct nfsd_file *nf;
6382 __be32 status;
6383
6384 nf = nfs4_find_file(s, flags);
6385 if (nf) {
6386 status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
6387 acc | NFSD_MAY_OWNER_OVERRIDE);
6388 if (status) {
6389 nfsd_file_put(nf);
6390 goto out;
6391 }
6392 } else {
6393 status = nfsd_file_acquire(rqstp, fhp, acc, &nf);
6394 if (status)
6395 return status;
6396 }
6397 *nfp = nf;
6398 out:
6399 return status;
6400 }
6401 static void
_free_cpntf_state_locked(struct nfsd_net * nn,struct nfs4_cpntf_state * cps)6402 _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
6403 {
6404 WARN_ON_ONCE(cps->cp_stateid.cs_type != NFS4_COPYNOTIFY_STID);
6405 if (!refcount_dec_and_test(&cps->cp_stateid.cs_count))
6406 return;
6407 list_del(&cps->cp_list);
6408 idr_remove(&nn->s2s_cp_stateids,
6409 cps->cp_stateid.cs_stid.si_opaque.so_id);
6410 kfree(cps);
6411 }
6412 /*
6413 * A READ from an inter server to server COPY will have a
6414 * copy stateid. Look up the copy notify stateid from the
6415 * idr structure and take a reference on it.
6416 */
manage_cpntf_state(struct nfsd_net * nn,stateid_t * st,struct nfs4_client * clp,struct nfs4_cpntf_state ** cps)6417 __be32 manage_cpntf_state(struct nfsd_net *nn, stateid_t *st,
6418 struct nfs4_client *clp,
6419 struct nfs4_cpntf_state **cps)
6420 {
6421 copy_stateid_t *cps_t;
6422 struct nfs4_cpntf_state *state = NULL;
6423
6424 if (st->si_opaque.so_clid.cl_id != nn->s2s_cp_cl_id)
6425 return nfserr_bad_stateid;
6426 spin_lock(&nn->s2s_cp_lock);
6427 cps_t = idr_find(&nn->s2s_cp_stateids, st->si_opaque.so_id);
6428 if (cps_t) {
6429 state = container_of(cps_t, struct nfs4_cpntf_state,
6430 cp_stateid);
6431 if (state->cp_stateid.cs_type != NFS4_COPYNOTIFY_STID) {
6432 state = NULL;
6433 goto unlock;
6434 }
6435 if (!clp)
6436 refcount_inc(&state->cp_stateid.cs_count);
6437 else
6438 _free_cpntf_state_locked(nn, state);
6439 }
6440 unlock:
6441 spin_unlock(&nn->s2s_cp_lock);
6442 if (!state)
6443 return nfserr_bad_stateid;
6444 if (!clp && state)
6445 *cps = state;
6446 return 0;
6447 }
6448
find_cpntf_state(struct nfsd_net * nn,stateid_t * st,struct nfs4_stid ** stid)6449 static __be32 find_cpntf_state(struct nfsd_net *nn, stateid_t *st,
6450 struct nfs4_stid **stid)
6451 {
6452 __be32 status;
6453 struct nfs4_cpntf_state *cps = NULL;
6454 struct nfs4_client *found;
6455
6456 status = manage_cpntf_state(nn, st, NULL, &cps);
6457 if (status)
6458 return status;
6459
6460 cps->cpntf_time = ktime_get_boottime_seconds();
6461
6462 status = nfserr_expired;
6463 found = lookup_clientid(&cps->cp_p_clid, true, nn);
6464 if (!found)
6465 goto out;
6466
6467 *stid = find_stateid_by_type(found, &cps->cp_p_stateid,
6468 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID);
6469 if (*stid)
6470 status = nfs_ok;
6471 else
6472 status = nfserr_bad_stateid;
6473
6474 put_client_renew(found);
6475 out:
6476 nfs4_put_cpntf_state(nn, cps);
6477 return status;
6478 }
6479
nfs4_put_cpntf_state(struct nfsd_net * nn,struct nfs4_cpntf_state * cps)6480 void nfs4_put_cpntf_state(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
6481 {
6482 spin_lock(&nn->s2s_cp_lock);
6483 _free_cpntf_state_locked(nn, cps);
6484 spin_unlock(&nn->s2s_cp_lock);
6485 }
6486
6487 /*
6488 * Checks for stateid operations
6489 */
6490 __be32
nfs4_preprocess_stateid_op(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,struct svc_fh * fhp,stateid_t * stateid,int flags,struct nfsd_file ** nfp,struct nfs4_stid ** cstid)6491 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
6492 struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
6493 stateid_t *stateid, int flags, struct nfsd_file **nfp,
6494 struct nfs4_stid **cstid)
6495 {
6496 struct net *net = SVC_NET(rqstp);
6497 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6498 struct nfs4_stid *s = NULL;
6499 __be32 status;
6500
6501 if (nfp)
6502 *nfp = NULL;
6503
6504 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
6505 if (cstid)
6506 status = nfserr_bad_stateid;
6507 else
6508 status = check_special_stateids(net, fhp, stateid,
6509 flags);
6510 goto done;
6511 }
6512
6513 status = nfsd4_lookup_stateid(cstate, stateid,
6514 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
6515 &s, nn);
6516 if (status == nfserr_bad_stateid)
6517 status = find_cpntf_state(nn, stateid, &s);
6518 if (status)
6519 return status;
6520 status = nfsd4_stid_check_stateid_generation(stateid, s,
6521 nfsd4_has_session(cstate));
6522 if (status)
6523 goto out;
6524
6525 switch (s->sc_type) {
6526 case NFS4_DELEG_STID:
6527 status = nfs4_check_delegmode(delegstateid(s), flags);
6528 break;
6529 case NFS4_OPEN_STID:
6530 case NFS4_LOCK_STID:
6531 status = nfs4_check_olstateid(openlockstateid(s), flags);
6532 break;
6533 default:
6534 status = nfserr_bad_stateid;
6535 break;
6536 }
6537 if (status)
6538 goto out;
6539 status = nfs4_check_fh(fhp, s);
6540
6541 done:
6542 if (status == nfs_ok && nfp)
6543 status = nfs4_check_file(rqstp, fhp, s, nfp, flags);
6544 out:
6545 if (s) {
6546 if (!status && cstid)
6547 *cstid = s;
6548 else
6549 nfs4_put_stid(s);
6550 }
6551 return status;
6552 }
6553
6554 /*
6555 * Test if the stateid is valid
6556 */
6557 __be32
nfsd4_test_stateid(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)6558 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6559 union nfsd4_op_u *u)
6560 {
6561 struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
6562 struct nfsd4_test_stateid_id *stateid;
6563 struct nfs4_client *cl = cstate->clp;
6564
6565 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
6566 stateid->ts_id_status =
6567 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
6568
6569 return nfs_ok;
6570 }
6571
6572 static __be32
nfsd4_free_lock_stateid(stateid_t * stateid,struct nfs4_stid * s)6573 nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
6574 {
6575 struct nfs4_ol_stateid *stp = openlockstateid(s);
6576 __be32 ret;
6577
6578 ret = nfsd4_lock_ol_stateid(stp);
6579 if (ret)
6580 goto out_put_stid;
6581
6582 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
6583 if (ret)
6584 goto out;
6585
6586 ret = nfserr_locks_held;
6587 if (check_for_locks(stp->st_stid.sc_file,
6588 lockowner(stp->st_stateowner)))
6589 goto out;
6590
6591 release_lock_stateid(stp);
6592 ret = nfs_ok;
6593
6594 out:
6595 mutex_unlock(&stp->st_mutex);
6596 out_put_stid:
6597 nfs4_put_stid(s);
6598 return ret;
6599 }
6600
6601 __be32
nfsd4_free_stateid(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)6602 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6603 union nfsd4_op_u *u)
6604 {
6605 struct nfsd4_free_stateid *free_stateid = &u->free_stateid;
6606 stateid_t *stateid = &free_stateid->fr_stateid;
6607 struct nfs4_stid *s;
6608 struct nfs4_delegation *dp;
6609 struct nfs4_client *cl = cstate->clp;
6610 __be32 ret = nfserr_bad_stateid;
6611
6612 spin_lock(&cl->cl_lock);
6613 s = find_stateid_locked(cl, stateid);
6614 if (!s)
6615 goto out_unlock;
6616 spin_lock(&s->sc_lock);
6617 switch (s->sc_type) {
6618 case NFS4_DELEG_STID:
6619 ret = nfserr_locks_held;
6620 break;
6621 case NFS4_OPEN_STID:
6622 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
6623 if (ret)
6624 break;
6625 ret = nfserr_locks_held;
6626 break;
6627 case NFS4_LOCK_STID:
6628 spin_unlock(&s->sc_lock);
6629 refcount_inc(&s->sc_count);
6630 spin_unlock(&cl->cl_lock);
6631 ret = nfsd4_free_lock_stateid(stateid, s);
6632 goto out;
6633 case NFS4_REVOKED_DELEG_STID:
6634 spin_unlock(&s->sc_lock);
6635 dp = delegstateid(s);
6636 list_del_init(&dp->dl_recall_lru);
6637 spin_unlock(&cl->cl_lock);
6638 nfs4_put_stid(s);
6639 ret = nfs_ok;
6640 goto out;
6641 /* Default falls through and returns nfserr_bad_stateid */
6642 }
6643 spin_unlock(&s->sc_lock);
6644 out_unlock:
6645 spin_unlock(&cl->cl_lock);
6646 out:
6647 return ret;
6648 }
6649
6650 static inline int
setlkflg(int type)6651 setlkflg (int type)
6652 {
6653 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
6654 RD_STATE : WR_STATE;
6655 }
6656
nfs4_seqid_op_checks(struct nfsd4_compound_state * cstate,stateid_t * stateid,u32 seqid,struct nfs4_ol_stateid * stp)6657 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
6658 {
6659 struct svc_fh *current_fh = &cstate->current_fh;
6660 struct nfs4_stateowner *sop = stp->st_stateowner;
6661 __be32 status;
6662
6663 status = nfsd4_check_seqid(cstate, sop, seqid);
6664 if (status)
6665 return status;
6666 status = nfsd4_lock_ol_stateid(stp);
6667 if (status != nfs_ok)
6668 return status;
6669 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
6670 if (status == nfs_ok)
6671 status = nfs4_check_fh(current_fh, &stp->st_stid);
6672 if (status != nfs_ok)
6673 mutex_unlock(&stp->st_mutex);
6674 return status;
6675 }
6676
6677 /*
6678 * Checks for sequence id mutating operations.
6679 */
6680 static __be32
nfs4_preprocess_seqid_op(struct nfsd4_compound_state * cstate,u32 seqid,stateid_t * stateid,char typemask,struct nfs4_ol_stateid ** stpp,struct nfsd_net * nn)6681 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
6682 stateid_t *stateid, char typemask,
6683 struct nfs4_ol_stateid **stpp,
6684 struct nfsd_net *nn)
6685 {
6686 __be32 status;
6687 struct nfs4_stid *s;
6688 struct nfs4_ol_stateid *stp = NULL;
6689
6690 trace_nfsd_preprocess(seqid, stateid);
6691
6692 *stpp = NULL;
6693 status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
6694 if (status)
6695 return status;
6696 stp = openlockstateid(s);
6697 nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
6698
6699 status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
6700 if (!status)
6701 *stpp = stp;
6702 else
6703 nfs4_put_stid(&stp->st_stid);
6704 return status;
6705 }
6706
nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state * cstate,u32 seqid,stateid_t * stateid,struct nfs4_ol_stateid ** stpp,struct nfsd_net * nn)6707 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
6708 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
6709 {
6710 __be32 status;
6711 struct nfs4_openowner *oo;
6712 struct nfs4_ol_stateid *stp;
6713
6714 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
6715 NFS4_OPEN_STID, &stp, nn);
6716 if (status)
6717 return status;
6718 oo = openowner(stp->st_stateowner);
6719 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
6720 mutex_unlock(&stp->st_mutex);
6721 nfs4_put_stid(&stp->st_stid);
6722 return nfserr_bad_stateid;
6723 }
6724 *stpp = stp;
6725 return nfs_ok;
6726 }
6727
6728 __be32
nfsd4_open_confirm(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)6729 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6730 union nfsd4_op_u *u)
6731 {
6732 struct nfsd4_open_confirm *oc = &u->open_confirm;
6733 __be32 status;
6734 struct nfs4_openowner *oo;
6735 struct nfs4_ol_stateid *stp;
6736 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6737
6738 dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
6739 cstate->current_fh.fh_dentry);
6740
6741 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
6742 if (status)
6743 return status;
6744
6745 status = nfs4_preprocess_seqid_op(cstate,
6746 oc->oc_seqid, &oc->oc_req_stateid,
6747 NFS4_OPEN_STID, &stp, nn);
6748 if (status)
6749 goto out;
6750 oo = openowner(stp->st_stateowner);
6751 status = nfserr_bad_stateid;
6752 if (oo->oo_flags & NFS4_OO_CONFIRMED) {
6753 mutex_unlock(&stp->st_mutex);
6754 goto put_stateid;
6755 }
6756 oo->oo_flags |= NFS4_OO_CONFIRMED;
6757 nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
6758 mutex_unlock(&stp->st_mutex);
6759 trace_nfsd_open_confirm(oc->oc_seqid, &stp->st_stid.sc_stateid);
6760 nfsd4_client_record_create(oo->oo_owner.so_client);
6761 status = nfs_ok;
6762 put_stateid:
6763 nfs4_put_stid(&stp->st_stid);
6764 out:
6765 nfsd4_bump_seqid(cstate, status);
6766 return status;
6767 }
6768
nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid * stp,u32 access)6769 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
6770 {
6771 if (!test_access(access, stp))
6772 return;
6773 nfs4_file_put_access(stp->st_stid.sc_file, access);
6774 clear_access(access, stp);
6775 }
6776
nfs4_stateid_downgrade(struct nfs4_ol_stateid * stp,u32 to_access)6777 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
6778 {
6779 switch (to_access) {
6780 case NFS4_SHARE_ACCESS_READ:
6781 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
6782 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
6783 break;
6784 case NFS4_SHARE_ACCESS_WRITE:
6785 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
6786 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
6787 break;
6788 case NFS4_SHARE_ACCESS_BOTH:
6789 break;
6790 default:
6791 WARN_ON_ONCE(1);
6792 }
6793 }
6794
6795 __be32
nfsd4_open_downgrade(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)6796 nfsd4_open_downgrade(struct svc_rqst *rqstp,
6797 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
6798 {
6799 struct nfsd4_open_downgrade *od = &u->open_downgrade;
6800 __be32 status;
6801 struct nfs4_ol_stateid *stp;
6802 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6803
6804 dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
6805 cstate->current_fh.fh_dentry);
6806
6807 /* We don't yet support WANT bits: */
6808 if (od->od_deleg_want)
6809 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
6810 od->od_deleg_want);
6811
6812 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
6813 &od->od_stateid, &stp, nn);
6814 if (status)
6815 goto out;
6816 status = nfserr_inval;
6817 if (!test_access(od->od_share_access, stp)) {
6818 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
6819 stp->st_access_bmap, od->od_share_access);
6820 goto put_stateid;
6821 }
6822 if (!test_deny(od->od_share_deny, stp)) {
6823 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
6824 stp->st_deny_bmap, od->od_share_deny);
6825 goto put_stateid;
6826 }
6827 nfs4_stateid_downgrade(stp, od->od_share_access);
6828 reset_union_bmap_deny(od->od_share_deny, stp);
6829 nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
6830 status = nfs_ok;
6831 put_stateid:
6832 mutex_unlock(&stp->st_mutex);
6833 nfs4_put_stid(&stp->st_stid);
6834 out:
6835 nfsd4_bump_seqid(cstate, status);
6836 return status;
6837 }
6838
nfsd4_close_open_stateid(struct nfs4_ol_stateid * s)6839 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
6840 {
6841 struct nfs4_client *clp = s->st_stid.sc_client;
6842 bool unhashed;
6843 LIST_HEAD(reaplist);
6844 struct nfs4_ol_stateid *stp;
6845
6846 spin_lock(&clp->cl_lock);
6847 unhashed = unhash_open_stateid(s, &reaplist);
6848
6849 if (clp->cl_minorversion) {
6850 if (unhashed)
6851 put_ol_stateid_locked(s, &reaplist);
6852 spin_unlock(&clp->cl_lock);
6853 list_for_each_entry(stp, &reaplist, st_locks)
6854 nfs4_free_cpntf_statelist(clp->net, &stp->st_stid);
6855 free_ol_stateid_reaplist(&reaplist);
6856 } else {
6857 spin_unlock(&clp->cl_lock);
6858 free_ol_stateid_reaplist(&reaplist);
6859 if (unhashed)
6860 move_to_close_lru(s, clp->net);
6861 }
6862 }
6863
6864 /*
6865 * nfs4_unlock_state() called after encode
6866 */
6867 __be32
nfsd4_close(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)6868 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6869 union nfsd4_op_u *u)
6870 {
6871 struct nfsd4_close *close = &u->close;
6872 __be32 status;
6873 struct nfs4_ol_stateid *stp;
6874 struct net *net = SVC_NET(rqstp);
6875 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6876
6877 dprintk("NFSD: nfsd4_close on file %pd\n",
6878 cstate->current_fh.fh_dentry);
6879
6880 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
6881 &close->cl_stateid,
6882 NFS4_OPEN_STID|NFS4_CLOSED_STID,
6883 &stp, nn);
6884 nfsd4_bump_seqid(cstate, status);
6885 if (status)
6886 goto out;
6887
6888 stp->st_stid.sc_type = NFS4_CLOSED_STID;
6889
6890 /*
6891 * Technically we don't _really_ have to increment or copy it, since
6892 * it should just be gone after this operation and we clobber the
6893 * copied value below, but we continue to do so here just to ensure
6894 * that racing ops see that there was a state change.
6895 */
6896 nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
6897
6898 nfsd4_close_open_stateid(stp);
6899 mutex_unlock(&stp->st_mutex);
6900
6901 /* v4.1+ suggests that we send a special stateid in here, since the
6902 * clients should just ignore this anyway. Since this is not useful
6903 * for v4.0 clients either, we set it to the special close_stateid
6904 * universally.
6905 *
6906 * See RFC5661 section 18.2.4, and RFC7530 section 16.2.5
6907 */
6908 memcpy(&close->cl_stateid, &close_stateid, sizeof(close->cl_stateid));
6909
6910 /* put reference from nfs4_preprocess_seqid_op */
6911 nfs4_put_stid(&stp->st_stid);
6912 out:
6913 return status;
6914 }
6915
6916 __be32
nfsd4_delegreturn(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)6917 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6918 union nfsd4_op_u *u)
6919 {
6920 struct nfsd4_delegreturn *dr = &u->delegreturn;
6921 struct nfs4_delegation *dp;
6922 stateid_t *stateid = &dr->dr_stateid;
6923 struct nfs4_stid *s;
6924 __be32 status;
6925 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6926
6927 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
6928 return status;
6929
6930 status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
6931 if (status)
6932 goto out;
6933 dp = delegstateid(s);
6934 status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate));
6935 if (status)
6936 goto put_stateid;
6937
6938 wake_up_var(d_inode(cstate->current_fh.fh_dentry));
6939 destroy_delegation(dp);
6940 put_stateid:
6941 nfs4_put_stid(&dp->dl_stid);
6942 out:
6943 return status;
6944 }
6945
6946 /* last octet in a range */
6947 static inline u64
last_byte_offset(u64 start,u64 len)6948 last_byte_offset(u64 start, u64 len)
6949 {
6950 u64 end;
6951
6952 WARN_ON_ONCE(!len);
6953 end = start + len;
6954 return end > start ? end - 1: NFS4_MAX_UINT64;
6955 }
6956
6957 /*
6958 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
6959 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
6960 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
6961 * locking, this prevents us from being completely protocol-compliant. The
6962 * real solution to this problem is to start using unsigned file offsets in
6963 * the VFS, but this is a very deep change!
6964 */
6965 static inline void
nfs4_transform_lock_offset(struct file_lock * lock)6966 nfs4_transform_lock_offset(struct file_lock *lock)
6967 {
6968 if (lock->fl_start < 0)
6969 lock->fl_start = OFFSET_MAX;
6970 if (lock->fl_end < 0)
6971 lock->fl_end = OFFSET_MAX;
6972 }
6973
6974 static fl_owner_t
nfsd4_lm_get_owner(fl_owner_t owner)6975 nfsd4_lm_get_owner(fl_owner_t owner)
6976 {
6977 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
6978
6979 nfs4_get_stateowner(&lo->lo_owner);
6980 return owner;
6981 }
6982
6983 static void
nfsd4_lm_put_owner(fl_owner_t owner)6984 nfsd4_lm_put_owner(fl_owner_t owner)
6985 {
6986 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
6987
6988 if (lo)
6989 nfs4_put_stateowner(&lo->lo_owner);
6990 }
6991
6992 /* return pointer to struct nfs4_client if client is expirable */
6993 static bool
nfsd4_lm_lock_expirable(struct file_lock * cfl)6994 nfsd4_lm_lock_expirable(struct file_lock *cfl)
6995 {
6996 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)cfl->fl_owner;
6997 struct nfs4_client *clp = lo->lo_owner.so_client;
6998 struct nfsd_net *nn;
6999
7000 if (try_to_expire_client(clp)) {
7001 nn = net_generic(clp->net, nfsd_net_id);
7002 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
7003 return true;
7004 }
7005 return false;
7006 }
7007
7008 /* schedule laundromat to run immediately and wait for it to complete */
7009 static void
nfsd4_lm_expire_lock(void)7010 nfsd4_lm_expire_lock(void)
7011 {
7012 flush_workqueue(laundry_wq);
7013 }
7014
7015 static void
nfsd4_lm_notify(struct file_lock * fl)7016 nfsd4_lm_notify(struct file_lock *fl)
7017 {
7018 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)fl->fl_owner;
7019 struct net *net = lo->lo_owner.so_client->net;
7020 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7021 struct nfsd4_blocked_lock *nbl = container_of(fl,
7022 struct nfsd4_blocked_lock, nbl_lock);
7023 bool queue = false;
7024
7025 /* An empty list means that something else is going to be using it */
7026 spin_lock(&nn->blocked_locks_lock);
7027 if (!list_empty(&nbl->nbl_list)) {
7028 list_del_init(&nbl->nbl_list);
7029 list_del_init(&nbl->nbl_lru);
7030 queue = true;
7031 }
7032 spin_unlock(&nn->blocked_locks_lock);
7033
7034 if (queue) {
7035 trace_nfsd_cb_notify_lock(lo, nbl);
7036 nfsd4_run_cb(&nbl->nbl_cb);
7037 }
7038 }
7039
7040 static const struct lock_manager_operations nfsd_posix_mng_ops = {
7041 .lm_mod_owner = THIS_MODULE,
7042 .lm_notify = nfsd4_lm_notify,
7043 .lm_get_owner = nfsd4_lm_get_owner,
7044 .lm_put_owner = nfsd4_lm_put_owner,
7045 .lm_lock_expirable = nfsd4_lm_lock_expirable,
7046 .lm_expire_lock = nfsd4_lm_expire_lock,
7047 };
7048
7049 static inline void
nfs4_set_lock_denied(struct file_lock * fl,struct nfsd4_lock_denied * deny)7050 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
7051 {
7052 struct nfs4_lockowner *lo;
7053
7054 if (fl->fl_lmops == &nfsd_posix_mng_ops) {
7055 lo = (struct nfs4_lockowner *) fl->fl_owner;
7056 xdr_netobj_dup(&deny->ld_owner, &lo->lo_owner.so_owner,
7057 GFP_KERNEL);
7058 if (!deny->ld_owner.data)
7059 /* We just don't care that much */
7060 goto nevermind;
7061 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
7062 } else {
7063 nevermind:
7064 deny->ld_owner.len = 0;
7065 deny->ld_owner.data = NULL;
7066 deny->ld_clientid.cl_boot = 0;
7067 deny->ld_clientid.cl_id = 0;
7068 }
7069 deny->ld_start = fl->fl_start;
7070 deny->ld_length = NFS4_MAX_UINT64;
7071 if (fl->fl_end != NFS4_MAX_UINT64)
7072 deny->ld_length = fl->fl_end - fl->fl_start + 1;
7073 deny->ld_type = NFS4_READ_LT;
7074 if (fl->fl_type != F_RDLCK)
7075 deny->ld_type = NFS4_WRITE_LT;
7076 }
7077
7078 static struct nfs4_lockowner *
find_lockowner_str_locked(struct nfs4_client * clp,struct xdr_netobj * owner)7079 find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
7080 {
7081 unsigned int strhashval = ownerstr_hashval(owner);
7082 struct nfs4_stateowner *so;
7083
7084 lockdep_assert_held(&clp->cl_lock);
7085
7086 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
7087 so_strhash) {
7088 if (so->so_is_open_owner)
7089 continue;
7090 if (same_owner_str(so, owner))
7091 return lockowner(nfs4_get_stateowner(so));
7092 }
7093 return NULL;
7094 }
7095
7096 static struct nfs4_lockowner *
find_lockowner_str(struct nfs4_client * clp,struct xdr_netobj * owner)7097 find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
7098 {
7099 struct nfs4_lockowner *lo;
7100
7101 spin_lock(&clp->cl_lock);
7102 lo = find_lockowner_str_locked(clp, owner);
7103 spin_unlock(&clp->cl_lock);
7104 return lo;
7105 }
7106
nfs4_unhash_lockowner(struct nfs4_stateowner * sop)7107 static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
7108 {
7109 unhash_lockowner_locked(lockowner(sop));
7110 }
7111
nfs4_free_lockowner(struct nfs4_stateowner * sop)7112 static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
7113 {
7114 struct nfs4_lockowner *lo = lockowner(sop);
7115
7116 kmem_cache_free(lockowner_slab, lo);
7117 }
7118
7119 static const struct nfs4_stateowner_operations lockowner_ops = {
7120 .so_unhash = nfs4_unhash_lockowner,
7121 .so_free = nfs4_free_lockowner,
7122 };
7123
7124 /*
7125 * Alloc a lock owner structure.
7126 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
7127 * occurred.
7128 *
7129 * strhashval = ownerstr_hashval
7130 */
7131 static struct nfs4_lockowner *
alloc_init_lock_stateowner(unsigned int strhashval,struct nfs4_client * clp,struct nfs4_ol_stateid * open_stp,struct nfsd4_lock * lock)7132 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
7133 struct nfs4_ol_stateid *open_stp,
7134 struct nfsd4_lock *lock)
7135 {
7136 struct nfs4_lockowner *lo, *ret;
7137
7138 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
7139 if (!lo)
7140 return NULL;
7141 INIT_LIST_HEAD(&lo->lo_blocked);
7142 INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
7143 lo->lo_owner.so_is_open_owner = 0;
7144 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
7145 lo->lo_owner.so_ops = &lockowner_ops;
7146 spin_lock(&clp->cl_lock);
7147 ret = find_lockowner_str_locked(clp, &lock->lk_new_owner);
7148 if (ret == NULL) {
7149 list_add(&lo->lo_owner.so_strhash,
7150 &clp->cl_ownerstr_hashtbl[strhashval]);
7151 ret = lo;
7152 } else
7153 nfs4_free_stateowner(&lo->lo_owner);
7154
7155 spin_unlock(&clp->cl_lock);
7156 return ret;
7157 }
7158
7159 static struct nfs4_ol_stateid *
find_lock_stateid(const struct nfs4_lockowner * lo,const struct nfs4_ol_stateid * ost)7160 find_lock_stateid(const struct nfs4_lockowner *lo,
7161 const struct nfs4_ol_stateid *ost)
7162 {
7163 struct nfs4_ol_stateid *lst;
7164
7165 lockdep_assert_held(&ost->st_stid.sc_client->cl_lock);
7166
7167 /* If ost is not hashed, ost->st_locks will not be valid */
7168 if (!nfs4_ol_stateid_unhashed(ost))
7169 list_for_each_entry(lst, &ost->st_locks, st_locks) {
7170 if (lst->st_stateowner == &lo->lo_owner) {
7171 refcount_inc(&lst->st_stid.sc_count);
7172 return lst;
7173 }
7174 }
7175 return NULL;
7176 }
7177
7178 static struct nfs4_ol_stateid *
init_lock_stateid(struct nfs4_ol_stateid * stp,struct nfs4_lockowner * lo,struct nfs4_file * fp,struct inode * inode,struct nfs4_ol_stateid * open_stp)7179 init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
7180 struct nfs4_file *fp, struct inode *inode,
7181 struct nfs4_ol_stateid *open_stp)
7182 {
7183 struct nfs4_client *clp = lo->lo_owner.so_client;
7184 struct nfs4_ol_stateid *retstp;
7185
7186 mutex_init(&stp->st_mutex);
7187 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
7188 retry:
7189 spin_lock(&clp->cl_lock);
7190 if (nfs4_ol_stateid_unhashed(open_stp))
7191 goto out_close;
7192 retstp = find_lock_stateid(lo, open_stp);
7193 if (retstp)
7194 goto out_found;
7195 refcount_inc(&stp->st_stid.sc_count);
7196 stp->st_stid.sc_type = NFS4_LOCK_STID;
7197 stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
7198 get_nfs4_file(fp);
7199 stp->st_stid.sc_file = fp;
7200 stp->st_access_bmap = 0;
7201 stp->st_deny_bmap = open_stp->st_deny_bmap;
7202 stp->st_openstp = open_stp;
7203 spin_lock(&fp->fi_lock);
7204 list_add(&stp->st_locks, &open_stp->st_locks);
7205 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
7206 list_add(&stp->st_perfile, &fp->fi_stateids);
7207 spin_unlock(&fp->fi_lock);
7208 spin_unlock(&clp->cl_lock);
7209 return stp;
7210 out_found:
7211 spin_unlock(&clp->cl_lock);
7212 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
7213 nfs4_put_stid(&retstp->st_stid);
7214 goto retry;
7215 }
7216 /* To keep mutex tracking happy */
7217 mutex_unlock(&stp->st_mutex);
7218 return retstp;
7219 out_close:
7220 spin_unlock(&clp->cl_lock);
7221 mutex_unlock(&stp->st_mutex);
7222 return NULL;
7223 }
7224
7225 static struct nfs4_ol_stateid *
find_or_create_lock_stateid(struct nfs4_lockowner * lo,struct nfs4_file * fi,struct inode * inode,struct nfs4_ol_stateid * ost,bool * new)7226 find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
7227 struct inode *inode, struct nfs4_ol_stateid *ost,
7228 bool *new)
7229 {
7230 struct nfs4_stid *ns = NULL;
7231 struct nfs4_ol_stateid *lst;
7232 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
7233 struct nfs4_client *clp = oo->oo_owner.so_client;
7234
7235 *new = false;
7236 spin_lock(&clp->cl_lock);
7237 lst = find_lock_stateid(lo, ost);
7238 spin_unlock(&clp->cl_lock);
7239 if (lst != NULL) {
7240 if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
7241 goto out;
7242 nfs4_put_stid(&lst->st_stid);
7243 }
7244 ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
7245 if (ns == NULL)
7246 return NULL;
7247
7248 lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost);
7249 if (lst == openlockstateid(ns))
7250 *new = true;
7251 else
7252 nfs4_put_stid(ns);
7253 out:
7254 return lst;
7255 }
7256
7257 static int
check_lock_length(u64 offset,u64 length)7258 check_lock_length(u64 offset, u64 length)
7259 {
7260 return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
7261 (length > ~offset)));
7262 }
7263
get_lock_access(struct nfs4_ol_stateid * lock_stp,u32 access)7264 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
7265 {
7266 struct nfs4_file *fp = lock_stp->st_stid.sc_file;
7267
7268 lockdep_assert_held(&fp->fi_lock);
7269
7270 if (test_access(access, lock_stp))
7271 return;
7272 __nfs4_file_get_access(fp, access);
7273 set_access(access, lock_stp);
7274 }
7275
7276 static __be32
lookup_or_create_lock_state(struct nfsd4_compound_state * cstate,struct nfs4_ol_stateid * ost,struct nfsd4_lock * lock,struct nfs4_ol_stateid ** plst,bool * new)7277 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
7278 struct nfs4_ol_stateid *ost,
7279 struct nfsd4_lock *lock,
7280 struct nfs4_ol_stateid **plst, bool *new)
7281 {
7282 __be32 status;
7283 struct nfs4_file *fi = ost->st_stid.sc_file;
7284 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
7285 struct nfs4_client *cl = oo->oo_owner.so_client;
7286 struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
7287 struct nfs4_lockowner *lo;
7288 struct nfs4_ol_stateid *lst;
7289 unsigned int strhashval;
7290
7291 lo = find_lockowner_str(cl, &lock->lk_new_owner);
7292 if (!lo) {
7293 strhashval = ownerstr_hashval(&lock->lk_new_owner);
7294 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
7295 if (lo == NULL)
7296 return nfserr_jukebox;
7297 } else {
7298 /* with an existing lockowner, seqids must be the same */
7299 status = nfserr_bad_seqid;
7300 if (!cstate->minorversion &&
7301 lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
7302 goto out;
7303 }
7304
7305 lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
7306 if (lst == NULL) {
7307 status = nfserr_jukebox;
7308 goto out;
7309 }
7310
7311 status = nfs_ok;
7312 *plst = lst;
7313 out:
7314 nfs4_put_stateowner(&lo->lo_owner);
7315 return status;
7316 }
7317
7318 /*
7319 * LOCK operation
7320 */
7321 __be32
nfsd4_lock(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7322 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7323 union nfsd4_op_u *u)
7324 {
7325 struct nfsd4_lock *lock = &u->lock;
7326 struct nfs4_openowner *open_sop = NULL;
7327 struct nfs4_lockowner *lock_sop = NULL;
7328 struct nfs4_ol_stateid *lock_stp = NULL;
7329 struct nfs4_ol_stateid *open_stp = NULL;
7330 struct nfs4_file *fp;
7331 struct nfsd_file *nf = NULL;
7332 struct nfsd4_blocked_lock *nbl = NULL;
7333 struct file_lock *file_lock = NULL;
7334 struct file_lock *conflock = NULL;
7335 __be32 status = 0;
7336 int lkflg;
7337 int err;
7338 bool new = false;
7339 unsigned char fl_type;
7340 unsigned int fl_flags = FL_POSIX;
7341 struct net *net = SVC_NET(rqstp);
7342 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7343
7344 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
7345 (long long) lock->lk_offset,
7346 (long long) lock->lk_length);
7347
7348 if (check_lock_length(lock->lk_offset, lock->lk_length))
7349 return nfserr_inval;
7350
7351 if ((status = fh_verify(rqstp, &cstate->current_fh,
7352 S_IFREG, NFSD_MAY_LOCK))) {
7353 dprintk("NFSD: nfsd4_lock: permission denied!\n");
7354 return status;
7355 }
7356
7357 if (lock->lk_is_new) {
7358 if (nfsd4_has_session(cstate))
7359 /* See rfc 5661 18.10.3: given clientid is ignored: */
7360 memcpy(&lock->lk_new_clientid,
7361 &cstate->clp->cl_clientid,
7362 sizeof(clientid_t));
7363
7364 /* validate and update open stateid and open seqid */
7365 status = nfs4_preprocess_confirmed_seqid_op(cstate,
7366 lock->lk_new_open_seqid,
7367 &lock->lk_new_open_stateid,
7368 &open_stp, nn);
7369 if (status)
7370 goto out;
7371 mutex_unlock(&open_stp->st_mutex);
7372 open_sop = openowner(open_stp->st_stateowner);
7373 status = nfserr_bad_stateid;
7374 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
7375 &lock->lk_new_clientid))
7376 goto out;
7377 status = lookup_or_create_lock_state(cstate, open_stp, lock,
7378 &lock_stp, &new);
7379 } else {
7380 status = nfs4_preprocess_seqid_op(cstate,
7381 lock->lk_old_lock_seqid,
7382 &lock->lk_old_lock_stateid,
7383 NFS4_LOCK_STID, &lock_stp, nn);
7384 }
7385 if (status)
7386 goto out;
7387 lock_sop = lockowner(lock_stp->st_stateowner);
7388
7389 lkflg = setlkflg(lock->lk_type);
7390 status = nfs4_check_openmode(lock_stp, lkflg);
7391 if (status)
7392 goto out;
7393
7394 status = nfserr_grace;
7395 if (locks_in_grace(net) && !lock->lk_reclaim)
7396 goto out;
7397 status = nfserr_no_grace;
7398 if (!locks_in_grace(net) && lock->lk_reclaim)
7399 goto out;
7400
7401 if (lock->lk_reclaim)
7402 fl_flags |= FL_RECLAIM;
7403
7404 fp = lock_stp->st_stid.sc_file;
7405 switch (lock->lk_type) {
7406 case NFS4_READW_LT:
7407 if (nfsd4_has_session(cstate))
7408 fl_flags |= FL_SLEEP;
7409 fallthrough;
7410 case NFS4_READ_LT:
7411 spin_lock(&fp->fi_lock);
7412 nf = find_readable_file_locked(fp);
7413 if (nf)
7414 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
7415 spin_unlock(&fp->fi_lock);
7416 fl_type = F_RDLCK;
7417 break;
7418 case NFS4_WRITEW_LT:
7419 if (nfsd4_has_session(cstate))
7420 fl_flags |= FL_SLEEP;
7421 fallthrough;
7422 case NFS4_WRITE_LT:
7423 spin_lock(&fp->fi_lock);
7424 nf = find_writeable_file_locked(fp);
7425 if (nf)
7426 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
7427 spin_unlock(&fp->fi_lock);
7428 fl_type = F_WRLCK;
7429 break;
7430 default:
7431 status = nfserr_inval;
7432 goto out;
7433 }
7434
7435 if (!nf) {
7436 status = nfserr_openmode;
7437 goto out;
7438 }
7439
7440 /*
7441 * Most filesystems with their own ->lock operations will block
7442 * the nfsd thread waiting to acquire the lock. That leads to
7443 * deadlocks (we don't want every nfsd thread tied up waiting
7444 * for file locks), so don't attempt blocking lock notifications
7445 * on those filesystems:
7446 */
7447 if (nf->nf_file->f_op->lock)
7448 fl_flags &= ~FL_SLEEP;
7449
7450 nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn);
7451 if (!nbl) {
7452 dprintk("NFSD: %s: unable to allocate block!\n", __func__);
7453 status = nfserr_jukebox;
7454 goto out;
7455 }
7456
7457 file_lock = &nbl->nbl_lock;
7458 file_lock->fl_type = fl_type;
7459 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
7460 file_lock->fl_pid = current->tgid;
7461 file_lock->fl_file = nf->nf_file;
7462 file_lock->fl_flags = fl_flags;
7463 file_lock->fl_lmops = &nfsd_posix_mng_ops;
7464 file_lock->fl_start = lock->lk_offset;
7465 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
7466 nfs4_transform_lock_offset(file_lock);
7467
7468 conflock = locks_alloc_lock();
7469 if (!conflock) {
7470 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
7471 status = nfserr_jukebox;
7472 goto out;
7473 }
7474
7475 if (fl_flags & FL_SLEEP) {
7476 nbl->nbl_time = ktime_get_boottime_seconds();
7477 spin_lock(&nn->blocked_locks_lock);
7478 list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
7479 list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
7480 kref_get(&nbl->nbl_kref);
7481 spin_unlock(&nn->blocked_locks_lock);
7482 }
7483
7484 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, conflock);
7485 switch (err) {
7486 case 0: /* success! */
7487 nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
7488 status = 0;
7489 if (lock->lk_reclaim)
7490 nn->somebody_reclaimed = true;
7491 break;
7492 case FILE_LOCK_DEFERRED:
7493 kref_put(&nbl->nbl_kref, free_nbl);
7494 nbl = NULL;
7495 fallthrough;
7496 case -EAGAIN: /* conflock holds conflicting lock */
7497 status = nfserr_denied;
7498 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
7499 nfs4_set_lock_denied(conflock, &lock->lk_denied);
7500 break;
7501 case -EDEADLK:
7502 status = nfserr_deadlock;
7503 break;
7504 default:
7505 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
7506 status = nfserrno(err);
7507 break;
7508 }
7509 out:
7510 if (nbl) {
7511 /* dequeue it if we queued it before */
7512 if (fl_flags & FL_SLEEP) {
7513 spin_lock(&nn->blocked_locks_lock);
7514 if (!list_empty(&nbl->nbl_list) &&
7515 !list_empty(&nbl->nbl_lru)) {
7516 list_del_init(&nbl->nbl_list);
7517 list_del_init(&nbl->nbl_lru);
7518 kref_put(&nbl->nbl_kref, free_nbl);
7519 }
7520 /* nbl can use one of lists to be linked to reaplist */
7521 spin_unlock(&nn->blocked_locks_lock);
7522 }
7523 free_blocked_lock(nbl);
7524 }
7525 if (nf)
7526 nfsd_file_put(nf);
7527 if (lock_stp) {
7528 /* Bump seqid manually if the 4.0 replay owner is openowner */
7529 if (cstate->replay_owner &&
7530 cstate->replay_owner != &lock_sop->lo_owner &&
7531 seqid_mutating_err(ntohl(status)))
7532 lock_sop->lo_owner.so_seqid++;
7533
7534 /*
7535 * If this is a new, never-before-used stateid, and we are
7536 * returning an error, then just go ahead and release it.
7537 */
7538 if (status && new)
7539 release_lock_stateid(lock_stp);
7540
7541 mutex_unlock(&lock_stp->st_mutex);
7542
7543 nfs4_put_stid(&lock_stp->st_stid);
7544 }
7545 if (open_stp)
7546 nfs4_put_stid(&open_stp->st_stid);
7547 nfsd4_bump_seqid(cstate, status);
7548 if (conflock)
7549 locks_free_lock(conflock);
7550 return status;
7551 }
7552
7553 /*
7554 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
7555 * so we do a temporary open here just to get an open file to pass to
7556 * vfs_test_lock.
7557 */
nfsd_test_lock(struct svc_rqst * rqstp,struct svc_fh * fhp,struct file_lock * lock)7558 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
7559 {
7560 struct nfsd_file *nf;
7561 struct inode *inode;
7562 __be32 err;
7563
7564 err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
7565 if (err)
7566 return err;
7567 inode = fhp->fh_dentry->d_inode;
7568 inode_lock(inode); /* to block new leases till after test_lock: */
7569 err = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
7570 if (err)
7571 goto out;
7572 lock->fl_file = nf->nf_file;
7573 err = nfserrno(vfs_test_lock(nf->nf_file, lock));
7574 lock->fl_file = NULL;
7575 out:
7576 inode_unlock(inode);
7577 nfsd_file_put(nf);
7578 return err;
7579 }
7580
7581 /*
7582 * LOCKT operation
7583 */
7584 __be32
nfsd4_lockt(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7585 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7586 union nfsd4_op_u *u)
7587 {
7588 struct nfsd4_lockt *lockt = &u->lockt;
7589 struct file_lock *file_lock = NULL;
7590 struct nfs4_lockowner *lo = NULL;
7591 __be32 status;
7592 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7593
7594 if (locks_in_grace(SVC_NET(rqstp)))
7595 return nfserr_grace;
7596
7597 if (check_lock_length(lockt->lt_offset, lockt->lt_length))
7598 return nfserr_inval;
7599
7600 if (!nfsd4_has_session(cstate)) {
7601 status = set_client(&lockt->lt_clientid, cstate, nn);
7602 if (status)
7603 goto out;
7604 }
7605
7606 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
7607 goto out;
7608
7609 file_lock = locks_alloc_lock();
7610 if (!file_lock) {
7611 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
7612 status = nfserr_jukebox;
7613 goto out;
7614 }
7615
7616 switch (lockt->lt_type) {
7617 case NFS4_READ_LT:
7618 case NFS4_READW_LT:
7619 file_lock->fl_type = F_RDLCK;
7620 break;
7621 case NFS4_WRITE_LT:
7622 case NFS4_WRITEW_LT:
7623 file_lock->fl_type = F_WRLCK;
7624 break;
7625 default:
7626 dprintk("NFSD: nfs4_lockt: bad lock type!\n");
7627 status = nfserr_inval;
7628 goto out;
7629 }
7630
7631 lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
7632 if (lo)
7633 file_lock->fl_owner = (fl_owner_t)lo;
7634 file_lock->fl_pid = current->tgid;
7635 file_lock->fl_flags = FL_POSIX;
7636
7637 file_lock->fl_start = lockt->lt_offset;
7638 file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
7639
7640 nfs4_transform_lock_offset(file_lock);
7641
7642 status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
7643 if (status)
7644 goto out;
7645
7646 if (file_lock->fl_type != F_UNLCK) {
7647 status = nfserr_denied;
7648 nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
7649 }
7650 out:
7651 if (lo)
7652 nfs4_put_stateowner(&lo->lo_owner);
7653 if (file_lock)
7654 locks_free_lock(file_lock);
7655 return status;
7656 }
7657
7658 __be32
nfsd4_locku(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7659 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7660 union nfsd4_op_u *u)
7661 {
7662 struct nfsd4_locku *locku = &u->locku;
7663 struct nfs4_ol_stateid *stp;
7664 struct nfsd_file *nf = NULL;
7665 struct file_lock *file_lock = NULL;
7666 __be32 status;
7667 int err;
7668 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7669
7670 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
7671 (long long) locku->lu_offset,
7672 (long long) locku->lu_length);
7673
7674 if (check_lock_length(locku->lu_offset, locku->lu_length))
7675 return nfserr_inval;
7676
7677 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
7678 &locku->lu_stateid, NFS4_LOCK_STID,
7679 &stp, nn);
7680 if (status)
7681 goto out;
7682 nf = find_any_file(stp->st_stid.sc_file);
7683 if (!nf) {
7684 status = nfserr_lock_range;
7685 goto put_stateid;
7686 }
7687 file_lock = locks_alloc_lock();
7688 if (!file_lock) {
7689 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
7690 status = nfserr_jukebox;
7691 goto put_file;
7692 }
7693
7694 file_lock->fl_type = F_UNLCK;
7695 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
7696 file_lock->fl_pid = current->tgid;
7697 file_lock->fl_file = nf->nf_file;
7698 file_lock->fl_flags = FL_POSIX;
7699 file_lock->fl_lmops = &nfsd_posix_mng_ops;
7700 file_lock->fl_start = locku->lu_offset;
7701
7702 file_lock->fl_end = last_byte_offset(locku->lu_offset,
7703 locku->lu_length);
7704 nfs4_transform_lock_offset(file_lock);
7705
7706 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, NULL);
7707 if (err) {
7708 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
7709 goto out_nfserr;
7710 }
7711 nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid);
7712 put_file:
7713 nfsd_file_put(nf);
7714 put_stateid:
7715 mutex_unlock(&stp->st_mutex);
7716 nfs4_put_stid(&stp->st_stid);
7717 out:
7718 nfsd4_bump_seqid(cstate, status);
7719 if (file_lock)
7720 locks_free_lock(file_lock);
7721 return status;
7722
7723 out_nfserr:
7724 status = nfserrno(err);
7725 goto put_file;
7726 }
7727
7728 /*
7729 * returns
7730 * true: locks held by lockowner
7731 * false: no locks held by lockowner
7732 */
7733 static bool
check_for_locks(struct nfs4_file * fp,struct nfs4_lockowner * lowner)7734 check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
7735 {
7736 struct file_lock *fl;
7737 int status = false;
7738 struct nfsd_file *nf;
7739 struct inode *inode;
7740 struct file_lock_context *flctx;
7741
7742 spin_lock(&fp->fi_lock);
7743 nf = find_any_file_locked(fp);
7744 if (!nf) {
7745 /* Any valid lock stateid should have some sort of access */
7746 WARN_ON_ONCE(1);
7747 goto out;
7748 }
7749
7750 inode = locks_inode(nf->nf_file);
7751 flctx = inode->i_flctx;
7752
7753 if (flctx && !list_empty_careful(&flctx->flc_posix)) {
7754 spin_lock(&flctx->flc_lock);
7755 list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
7756 if (fl->fl_owner == (fl_owner_t)lowner) {
7757 status = true;
7758 break;
7759 }
7760 }
7761 spin_unlock(&flctx->flc_lock);
7762 }
7763 out:
7764 spin_unlock(&fp->fi_lock);
7765 return status;
7766 }
7767
7768 /**
7769 * nfsd4_release_lockowner - process NFSv4.0 RELEASE_LOCKOWNER operations
7770 * @rqstp: RPC transaction
7771 * @cstate: NFSv4 COMPOUND state
7772 * @u: RELEASE_LOCKOWNER arguments
7773 *
7774 * Check if theree are any locks still held and if not - free the lockowner
7775 * and any lock state that is owned.
7776 *
7777 * Return values:
7778 * %nfs_ok: lockowner released or not found
7779 * %nfserr_locks_held: lockowner still in use
7780 * %nfserr_stale_clientid: clientid no longer active
7781 * %nfserr_expired: clientid not recognized
7782 */
7783 __be32
nfsd4_release_lockowner(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7784 nfsd4_release_lockowner(struct svc_rqst *rqstp,
7785 struct nfsd4_compound_state *cstate,
7786 union nfsd4_op_u *u)
7787 {
7788 struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner;
7789 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7790 clientid_t *clid = &rlockowner->rl_clientid;
7791 struct nfs4_ol_stateid *stp;
7792 struct nfs4_lockowner *lo;
7793 struct nfs4_client *clp;
7794 LIST_HEAD(reaplist);
7795 __be32 status;
7796
7797 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
7798 clid->cl_boot, clid->cl_id);
7799
7800 status = set_client(clid, cstate, nn);
7801 if (status)
7802 return status;
7803 clp = cstate->clp;
7804
7805 spin_lock(&clp->cl_lock);
7806 lo = find_lockowner_str_locked(clp, &rlockowner->rl_owner);
7807 if (!lo) {
7808 spin_unlock(&clp->cl_lock);
7809 return nfs_ok;
7810 }
7811
7812 list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) {
7813 if (check_for_locks(stp->st_stid.sc_file, lo)) {
7814 spin_unlock(&clp->cl_lock);
7815 nfs4_put_stateowner(&lo->lo_owner);
7816 return nfserr_locks_held;
7817 }
7818 }
7819 unhash_lockowner_locked(lo);
7820 while (!list_empty(&lo->lo_owner.so_stateids)) {
7821 stp = list_first_entry(&lo->lo_owner.so_stateids,
7822 struct nfs4_ol_stateid,
7823 st_perstateowner);
7824 WARN_ON(!unhash_lock_stateid(stp));
7825 put_ol_stateid_locked(stp, &reaplist);
7826 }
7827 spin_unlock(&clp->cl_lock);
7828
7829 free_ol_stateid_reaplist(&reaplist);
7830 remove_blocked_locks(lo);
7831 nfs4_put_stateowner(&lo->lo_owner);
7832 return nfs_ok;
7833 }
7834
7835 static inline struct nfs4_client_reclaim *
alloc_reclaim(void)7836 alloc_reclaim(void)
7837 {
7838 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
7839 }
7840
7841 bool
nfs4_has_reclaimed_state(struct xdr_netobj name,struct nfsd_net * nn)7842 nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn)
7843 {
7844 struct nfs4_client_reclaim *crp;
7845
7846 crp = nfsd4_find_reclaim_client(name, nn);
7847 return (crp && crp->cr_clp);
7848 }
7849
7850 /*
7851 * failure => all reset bets are off, nfserr_no_grace...
7852 *
7853 * The caller is responsible for freeing name.data if NULL is returned (it
7854 * will be freed in nfs4_remove_reclaim_record in the normal case).
7855 */
7856 struct nfs4_client_reclaim *
nfs4_client_to_reclaim(struct xdr_netobj name,struct xdr_netobj princhash,struct nfsd_net * nn)7857 nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash,
7858 struct nfsd_net *nn)
7859 {
7860 unsigned int strhashval;
7861 struct nfs4_client_reclaim *crp;
7862
7863 crp = alloc_reclaim();
7864 if (crp) {
7865 strhashval = clientstr_hashval(name);
7866 INIT_LIST_HEAD(&crp->cr_strhash);
7867 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
7868 crp->cr_name.data = name.data;
7869 crp->cr_name.len = name.len;
7870 crp->cr_princhash.data = princhash.data;
7871 crp->cr_princhash.len = princhash.len;
7872 crp->cr_clp = NULL;
7873 nn->reclaim_str_hashtbl_size++;
7874 }
7875 return crp;
7876 }
7877
7878 void
nfs4_remove_reclaim_record(struct nfs4_client_reclaim * crp,struct nfsd_net * nn)7879 nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
7880 {
7881 list_del(&crp->cr_strhash);
7882 kfree(crp->cr_name.data);
7883 kfree(crp->cr_princhash.data);
7884 kfree(crp);
7885 nn->reclaim_str_hashtbl_size--;
7886 }
7887
7888 void
nfs4_release_reclaim(struct nfsd_net * nn)7889 nfs4_release_reclaim(struct nfsd_net *nn)
7890 {
7891 struct nfs4_client_reclaim *crp = NULL;
7892 int i;
7893
7894 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7895 while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
7896 crp = list_entry(nn->reclaim_str_hashtbl[i].next,
7897 struct nfs4_client_reclaim, cr_strhash);
7898 nfs4_remove_reclaim_record(crp, nn);
7899 }
7900 }
7901 WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
7902 }
7903
7904 /*
7905 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
7906 struct nfs4_client_reclaim *
nfsd4_find_reclaim_client(struct xdr_netobj name,struct nfsd_net * nn)7907 nfsd4_find_reclaim_client(struct xdr_netobj name, struct nfsd_net *nn)
7908 {
7909 unsigned int strhashval;
7910 struct nfs4_client_reclaim *crp = NULL;
7911
7912 strhashval = clientstr_hashval(name);
7913 list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
7914 if (compare_blob(&crp->cr_name, &name) == 0) {
7915 return crp;
7916 }
7917 }
7918 return NULL;
7919 }
7920
7921 __be32
nfs4_check_open_reclaim(struct nfs4_client * clp)7922 nfs4_check_open_reclaim(struct nfs4_client *clp)
7923 {
7924 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags))
7925 return nfserr_no_grace;
7926
7927 if (nfsd4_client_record_check(clp))
7928 return nfserr_reclaim_bad;
7929
7930 return nfs_ok;
7931 }
7932
7933 /*
7934 * Since the lifetime of a delegation isn't limited to that of an open, a
7935 * client may quite reasonably hang on to a delegation as long as it has
7936 * the inode cached. This becomes an obvious problem the first time a
7937 * client's inode cache approaches the size of the server's total memory.
7938 *
7939 * For now we avoid this problem by imposing a hard limit on the number
7940 * of delegations, which varies according to the server's memory size.
7941 */
7942 static void
set_max_delegations(void)7943 set_max_delegations(void)
7944 {
7945 /*
7946 * Allow at most 4 delegations per megabyte of RAM. Quick
7947 * estimates suggest that in the worst case (where every delegation
7948 * is for a different inode), a delegation could take about 1.5K,
7949 * giving a worst case usage of about 6% of memory.
7950 */
7951 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
7952 }
7953
nfs4_state_create_net(struct net * net)7954 static int nfs4_state_create_net(struct net *net)
7955 {
7956 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7957 int i;
7958
7959 nn->conf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
7960 sizeof(struct list_head),
7961 GFP_KERNEL);
7962 if (!nn->conf_id_hashtbl)
7963 goto err;
7964 nn->unconf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
7965 sizeof(struct list_head),
7966 GFP_KERNEL);
7967 if (!nn->unconf_id_hashtbl)
7968 goto err_unconf_id;
7969 nn->sessionid_hashtbl = kmalloc_array(SESSION_HASH_SIZE,
7970 sizeof(struct list_head),
7971 GFP_KERNEL);
7972 if (!nn->sessionid_hashtbl)
7973 goto err_sessionid;
7974
7975 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7976 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
7977 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
7978 }
7979 for (i = 0; i < SESSION_HASH_SIZE; i++)
7980 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
7981 nn->conf_name_tree = RB_ROOT;
7982 nn->unconf_name_tree = RB_ROOT;
7983 nn->boot_time = ktime_get_real_seconds();
7984 nn->grace_ended = false;
7985 nn->nfsd4_manager.block_opens = true;
7986 INIT_LIST_HEAD(&nn->nfsd4_manager.list);
7987 INIT_LIST_HEAD(&nn->client_lru);
7988 INIT_LIST_HEAD(&nn->close_lru);
7989 INIT_LIST_HEAD(&nn->del_recall_lru);
7990 spin_lock_init(&nn->client_lock);
7991 spin_lock_init(&nn->s2s_cp_lock);
7992 idr_init(&nn->s2s_cp_stateids);
7993
7994 spin_lock_init(&nn->blocked_locks_lock);
7995 INIT_LIST_HEAD(&nn->blocked_locks_lru);
7996
7997 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
7998 INIT_DELAYED_WORK(&nn->nfsd_shrinker_work, courtesy_client_reaper);
7999 get_net(net);
8000
8001 return 0;
8002
8003 err_sessionid:
8004 kfree(nn->unconf_id_hashtbl);
8005 err_unconf_id:
8006 kfree(nn->conf_id_hashtbl);
8007 err:
8008 return -ENOMEM;
8009 }
8010
8011 static void
nfs4_state_destroy_net(struct net * net)8012 nfs4_state_destroy_net(struct net *net)
8013 {
8014 int i;
8015 struct nfs4_client *clp = NULL;
8016 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
8017
8018 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
8019 while (!list_empty(&nn->conf_id_hashtbl[i])) {
8020 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
8021 destroy_client(clp);
8022 }
8023 }
8024
8025 WARN_ON(!list_empty(&nn->blocked_locks_lru));
8026
8027 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
8028 while (!list_empty(&nn->unconf_id_hashtbl[i])) {
8029 clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
8030 destroy_client(clp);
8031 }
8032 }
8033
8034 kfree(nn->sessionid_hashtbl);
8035 kfree(nn->unconf_id_hashtbl);
8036 kfree(nn->conf_id_hashtbl);
8037 put_net(net);
8038 }
8039
8040 int
nfs4_state_start_net(struct net * net)8041 nfs4_state_start_net(struct net *net)
8042 {
8043 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
8044 int ret;
8045
8046 ret = nfs4_state_create_net(net);
8047 if (ret)
8048 return ret;
8049 locks_start_grace(net, &nn->nfsd4_manager);
8050 nfsd4_client_tracking_init(net);
8051 if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0)
8052 goto skip_grace;
8053 printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n",
8054 nn->nfsd4_grace, net->ns.inum);
8055 trace_nfsd_grace_start(nn);
8056 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
8057 return 0;
8058
8059 skip_grace:
8060 printk(KERN_INFO "NFSD: no clients to reclaim, skipping NFSv4 grace period (net %x)\n",
8061 net->ns.inum);
8062 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_lease * HZ);
8063 nfsd4_end_grace(nn);
8064 return 0;
8065 }
8066
8067 /* initialization to perform when the nfsd service is started: */
8068
8069 int
nfs4_state_start(void)8070 nfs4_state_start(void)
8071 {
8072 int ret;
8073
8074 ret = nfsd4_create_callback_queue();
8075 if (ret)
8076 return ret;
8077
8078 set_max_delegations();
8079 return 0;
8080 }
8081
8082 void
nfs4_state_shutdown_net(struct net * net)8083 nfs4_state_shutdown_net(struct net *net)
8084 {
8085 struct nfs4_delegation *dp = NULL;
8086 struct list_head *pos, *next, reaplist;
8087 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
8088
8089 cancel_delayed_work_sync(&nn->laundromat_work);
8090 locks_end_grace(&nn->nfsd4_manager);
8091
8092 INIT_LIST_HEAD(&reaplist);
8093 spin_lock(&state_lock);
8094 list_for_each_safe(pos, next, &nn->del_recall_lru) {
8095 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
8096 WARN_ON(!unhash_delegation_locked(dp));
8097 list_add(&dp->dl_recall_lru, &reaplist);
8098 }
8099 spin_unlock(&state_lock);
8100 list_for_each_safe(pos, next, &reaplist) {
8101 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
8102 list_del_init(&dp->dl_recall_lru);
8103 destroy_unhashed_deleg(dp);
8104 }
8105
8106 nfsd4_client_tracking_exit(net);
8107 nfs4_state_destroy_net(net);
8108 #ifdef CONFIG_NFSD_V4_2_INTER_SSC
8109 nfsd4_ssc_shutdown_umount(nn);
8110 #endif
8111 }
8112
8113 void
nfs4_state_shutdown(void)8114 nfs4_state_shutdown(void)
8115 {
8116 nfsd4_destroy_callback_queue();
8117 }
8118
8119 static void
get_stateid(struct nfsd4_compound_state * cstate,stateid_t * stateid)8120 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
8121 {
8122 if (HAS_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG) &&
8123 CURRENT_STATEID(stateid))
8124 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
8125 }
8126
8127 static void
put_stateid(struct nfsd4_compound_state * cstate,stateid_t * stateid)8128 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
8129 {
8130 if (cstate->minorversion) {
8131 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
8132 SET_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
8133 }
8134 }
8135
8136 void
clear_current_stateid(struct nfsd4_compound_state * cstate)8137 clear_current_stateid(struct nfsd4_compound_state *cstate)
8138 {
8139 CLEAR_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
8140 }
8141
8142 /*
8143 * functions to set current state id
8144 */
8145 void
nfsd4_set_opendowngradestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8146 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate,
8147 union nfsd4_op_u *u)
8148 {
8149 put_stateid(cstate, &u->open_downgrade.od_stateid);
8150 }
8151
8152 void
nfsd4_set_openstateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8153 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate,
8154 union nfsd4_op_u *u)
8155 {
8156 put_stateid(cstate, &u->open.op_stateid);
8157 }
8158
8159 void
nfsd4_set_closestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8160 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate,
8161 union nfsd4_op_u *u)
8162 {
8163 put_stateid(cstate, &u->close.cl_stateid);
8164 }
8165
8166 void
nfsd4_set_lockstateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8167 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate,
8168 union nfsd4_op_u *u)
8169 {
8170 put_stateid(cstate, &u->lock.lk_resp_stateid);
8171 }
8172
8173 /*
8174 * functions to consume current state id
8175 */
8176
8177 void
nfsd4_get_opendowngradestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8178 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate,
8179 union nfsd4_op_u *u)
8180 {
8181 get_stateid(cstate, &u->open_downgrade.od_stateid);
8182 }
8183
8184 void
nfsd4_get_delegreturnstateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8185 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate,
8186 union nfsd4_op_u *u)
8187 {
8188 get_stateid(cstate, &u->delegreturn.dr_stateid);
8189 }
8190
8191 void
nfsd4_get_freestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8192 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate,
8193 union nfsd4_op_u *u)
8194 {
8195 get_stateid(cstate, &u->free_stateid.fr_stateid);
8196 }
8197
8198 void
nfsd4_get_setattrstateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8199 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate,
8200 union nfsd4_op_u *u)
8201 {
8202 get_stateid(cstate, &u->setattr.sa_stateid);
8203 }
8204
8205 void
nfsd4_get_closestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8206 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate,
8207 union nfsd4_op_u *u)
8208 {
8209 get_stateid(cstate, &u->close.cl_stateid);
8210 }
8211
8212 void
nfsd4_get_lockustateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8213 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate,
8214 union nfsd4_op_u *u)
8215 {
8216 get_stateid(cstate, &u->locku.lu_stateid);
8217 }
8218
8219 void
nfsd4_get_readstateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8220 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate,
8221 union nfsd4_op_u *u)
8222 {
8223 get_stateid(cstate, &u->read.rd_stateid);
8224 }
8225
8226 void
nfsd4_get_writestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8227 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
8228 union nfsd4_op_u *u)
8229 {
8230 get_stateid(cstate, &u->write.wr_stateid);
8231 }
8232