1 /*
2 * Copyright (c) 2001 The Regents of the University of Michigan.
3 * All rights reserved.
4 *
5 * Kendrick Smith <kmsmith@umich.edu>
6 * Andy Adamson <kandros@umich.edu>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35 #include <linux/file.h>
36 #include <linux/fs.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/ratelimit.h>
42 #include <linux/sunrpc/svcauth_gss.h>
43 #include <linux/sunrpc/addr.h>
44 #include <linux/jhash.h>
45 #include <linux/string_helpers.h>
46 #include <linux/fsnotify.h>
47 #include <linux/rhashtable.h>
48 #include <linux/nfs_ssc.h>
49
50 #include "xdr4.h"
51 #include "xdr4cb.h"
52 #include "vfs.h"
53 #include "current_stateid.h"
54
55 #include "netns.h"
56 #include "pnfs.h"
57 #include "filecache.h"
58 #include "trace.h"
59
60 #define NFSDDBG_FACILITY NFSDDBG_PROC
61
62 #define all_ones {{ ~0, ~0}, ~0}
63 static const stateid_t one_stateid = {
64 .si_generation = ~0,
65 .si_opaque = all_ones,
66 };
67 static const stateid_t zero_stateid = {
68 /* all fields zero */
69 };
70 static const stateid_t currentstateid = {
71 .si_generation = 1,
72 };
73 static const stateid_t close_stateid = {
74 .si_generation = 0xffffffffU,
75 };
76
77 static u64 current_sessionid = 1;
78
79 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
80 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
81 #define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t)))
82 #define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
83
84 /* forward declarations */
85 static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
86 static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
87 void nfsd4_end_grace(struct nfsd_net *nn);
88 static void _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps);
89 static void nfsd4_file_hash_remove(struct nfs4_file *fi);
90 static void deleg_reaper(struct nfsd_net *nn);
91
92 /* Locking: */
93
94 /*
95 * Currently used for the del_recall_lru and file hash table. In an
96 * effort to decrease the scope of the client_mutex, this spinlock may
97 * eventually cover more:
98 */
99 static DEFINE_SPINLOCK(state_lock);
100
101 enum nfsd4_st_mutex_lock_subclass {
102 OPEN_STATEID_MUTEX = 0,
103 LOCK_STATEID_MUTEX = 1,
104 };
105
106 /*
107 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
108 * the refcount on the open stateid to drop.
109 */
110 static DECLARE_WAIT_QUEUE_HEAD(close_wq);
111
112 /*
113 * A waitqueue where a writer to clients/#/ctl destroying a client can
114 * wait for cl_rpc_users to drop to 0 and then for the client to be
115 * unhashed.
116 */
117 static DECLARE_WAIT_QUEUE_HEAD(expiry_wq);
118
119 static struct kmem_cache *client_slab;
120 static struct kmem_cache *openowner_slab;
121 static struct kmem_cache *lockowner_slab;
122 static struct kmem_cache *file_slab;
123 static struct kmem_cache *stateid_slab;
124 static struct kmem_cache *deleg_slab;
125 static struct kmem_cache *odstate_slab;
126
127 static void free_session(struct nfsd4_session *);
128
129 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
130 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
131 static const struct nfsd4_callback_ops nfsd4_cb_getattr_ops;
132
133 static struct workqueue_struct *laundry_wq;
134
nfsd4_create_laundry_wq(void)135 int nfsd4_create_laundry_wq(void)
136 {
137 int rc = 0;
138
139 laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
140 if (laundry_wq == NULL)
141 rc = -ENOMEM;
142 return rc;
143 }
144
nfsd4_destroy_laundry_wq(void)145 void nfsd4_destroy_laundry_wq(void)
146 {
147 destroy_workqueue(laundry_wq);
148 }
149
is_session_dead(struct nfsd4_session * ses)150 static bool is_session_dead(struct nfsd4_session *ses)
151 {
152 return ses->se_flags & NFS4_SESSION_DEAD;
153 }
154
mark_session_dead_locked(struct nfsd4_session * ses,int ref_held_by_me)155 static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
156 {
157 if (atomic_read(&ses->se_ref) > ref_held_by_me)
158 return nfserr_jukebox;
159 ses->se_flags |= NFS4_SESSION_DEAD;
160 return nfs_ok;
161 }
162
is_client_expired(struct nfs4_client * clp)163 static bool is_client_expired(struct nfs4_client *clp)
164 {
165 return clp->cl_time == 0;
166 }
167
nfsd4_dec_courtesy_client_count(struct nfsd_net * nn,struct nfs4_client * clp)168 static void nfsd4_dec_courtesy_client_count(struct nfsd_net *nn,
169 struct nfs4_client *clp)
170 {
171 if (clp->cl_state != NFSD4_ACTIVE)
172 atomic_add_unless(&nn->nfsd_courtesy_clients, -1, 0);
173 }
174
get_client_locked(struct nfs4_client * clp)175 static __be32 get_client_locked(struct nfs4_client *clp)
176 {
177 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
178
179 lockdep_assert_held(&nn->client_lock);
180
181 if (is_client_expired(clp))
182 return nfserr_expired;
183 atomic_inc(&clp->cl_rpc_users);
184 nfsd4_dec_courtesy_client_count(nn, clp);
185 clp->cl_state = NFSD4_ACTIVE;
186 return nfs_ok;
187 }
188
189 /* must be called under the client_lock */
190 static inline void
renew_client_locked(struct nfs4_client * clp)191 renew_client_locked(struct nfs4_client *clp)
192 {
193 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
194
195 if (is_client_expired(clp)) {
196 WARN_ON(1);
197 printk("%s: client (clientid %08x/%08x) already expired\n",
198 __func__,
199 clp->cl_clientid.cl_boot,
200 clp->cl_clientid.cl_id);
201 return;
202 }
203
204 list_move_tail(&clp->cl_lru, &nn->client_lru);
205 clp->cl_time = ktime_get_boottime_seconds();
206 nfsd4_dec_courtesy_client_count(nn, clp);
207 clp->cl_state = NFSD4_ACTIVE;
208 }
209
put_client_renew_locked(struct nfs4_client * clp)210 static void put_client_renew_locked(struct nfs4_client *clp)
211 {
212 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
213
214 lockdep_assert_held(&nn->client_lock);
215
216 if (!atomic_dec_and_test(&clp->cl_rpc_users))
217 return;
218 if (!is_client_expired(clp))
219 renew_client_locked(clp);
220 else
221 wake_up_all(&expiry_wq);
222 }
223
put_client_renew(struct nfs4_client * clp)224 static void put_client_renew(struct nfs4_client *clp)
225 {
226 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
227
228 if (!atomic_dec_and_lock(&clp->cl_rpc_users, &nn->client_lock))
229 return;
230 if (!is_client_expired(clp))
231 renew_client_locked(clp);
232 else
233 wake_up_all(&expiry_wq);
234 spin_unlock(&nn->client_lock);
235 }
236
nfsd4_get_session_locked(struct nfsd4_session * ses)237 static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
238 {
239 __be32 status;
240
241 if (is_session_dead(ses))
242 return nfserr_badsession;
243 status = get_client_locked(ses->se_client);
244 if (status)
245 return status;
246 atomic_inc(&ses->se_ref);
247 return nfs_ok;
248 }
249
nfsd4_put_session_locked(struct nfsd4_session * ses)250 static void nfsd4_put_session_locked(struct nfsd4_session *ses)
251 {
252 struct nfs4_client *clp = ses->se_client;
253 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
254
255 lockdep_assert_held(&nn->client_lock);
256
257 if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
258 free_session(ses);
259 put_client_renew_locked(clp);
260 }
261
nfsd4_put_session(struct nfsd4_session * ses)262 static void nfsd4_put_session(struct nfsd4_session *ses)
263 {
264 struct nfs4_client *clp = ses->se_client;
265 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
266
267 spin_lock(&nn->client_lock);
268 nfsd4_put_session_locked(ses);
269 spin_unlock(&nn->client_lock);
270 }
271
272 static struct nfsd4_blocked_lock *
find_blocked_lock(struct nfs4_lockowner * lo,struct knfsd_fh * fh,struct nfsd_net * nn)273 find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
274 struct nfsd_net *nn)
275 {
276 struct nfsd4_blocked_lock *cur, *found = NULL;
277
278 spin_lock(&nn->blocked_locks_lock);
279 list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
280 if (fh_match(fh, &cur->nbl_fh)) {
281 list_del_init(&cur->nbl_list);
282 WARN_ON(list_empty(&cur->nbl_lru));
283 list_del_init(&cur->nbl_lru);
284 found = cur;
285 break;
286 }
287 }
288 spin_unlock(&nn->blocked_locks_lock);
289 if (found)
290 locks_delete_block(&found->nbl_lock);
291 return found;
292 }
293
294 static struct nfsd4_blocked_lock *
find_or_allocate_block(struct nfs4_lockowner * lo,struct knfsd_fh * fh,struct nfsd_net * nn)295 find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
296 struct nfsd_net *nn)
297 {
298 struct nfsd4_blocked_lock *nbl;
299
300 nbl = find_blocked_lock(lo, fh, nn);
301 if (!nbl) {
302 nbl = kmalloc(sizeof(*nbl), GFP_KERNEL);
303 if (nbl) {
304 INIT_LIST_HEAD(&nbl->nbl_list);
305 INIT_LIST_HEAD(&nbl->nbl_lru);
306 fh_copy_shallow(&nbl->nbl_fh, fh);
307 locks_init_lock(&nbl->nbl_lock);
308 kref_init(&nbl->nbl_kref);
309 nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
310 &nfsd4_cb_notify_lock_ops,
311 NFSPROC4_CLNT_CB_NOTIFY_LOCK);
312 }
313 }
314 return nbl;
315 }
316
317 static void
free_nbl(struct kref * kref)318 free_nbl(struct kref *kref)
319 {
320 struct nfsd4_blocked_lock *nbl;
321
322 nbl = container_of(kref, struct nfsd4_blocked_lock, nbl_kref);
323 locks_release_private(&nbl->nbl_lock);
324 kfree(nbl);
325 }
326
327 static void
free_blocked_lock(struct nfsd4_blocked_lock * nbl)328 free_blocked_lock(struct nfsd4_blocked_lock *nbl)
329 {
330 locks_delete_block(&nbl->nbl_lock);
331 kref_put(&nbl->nbl_kref, free_nbl);
332 }
333
334 static void
remove_blocked_locks(struct nfs4_lockowner * lo)335 remove_blocked_locks(struct nfs4_lockowner *lo)
336 {
337 struct nfs4_client *clp = lo->lo_owner.so_client;
338 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
339 struct nfsd4_blocked_lock *nbl;
340 LIST_HEAD(reaplist);
341
342 /* Dequeue all blocked locks */
343 spin_lock(&nn->blocked_locks_lock);
344 while (!list_empty(&lo->lo_blocked)) {
345 nbl = list_first_entry(&lo->lo_blocked,
346 struct nfsd4_blocked_lock,
347 nbl_list);
348 list_del_init(&nbl->nbl_list);
349 WARN_ON(list_empty(&nbl->nbl_lru));
350 list_move(&nbl->nbl_lru, &reaplist);
351 }
352 spin_unlock(&nn->blocked_locks_lock);
353
354 /* Now free them */
355 while (!list_empty(&reaplist)) {
356 nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock,
357 nbl_lru);
358 list_del_init(&nbl->nbl_lru);
359 free_blocked_lock(nbl);
360 }
361 }
362
363 static void
nfsd4_cb_notify_lock_prepare(struct nfsd4_callback * cb)364 nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb)
365 {
366 struct nfsd4_blocked_lock *nbl = container_of(cb,
367 struct nfsd4_blocked_lock, nbl_cb);
368 locks_delete_block(&nbl->nbl_lock);
369 }
370
371 static int
nfsd4_cb_notify_lock_done(struct nfsd4_callback * cb,struct rpc_task * task)372 nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
373 {
374 trace_nfsd_cb_notify_lock_done(&zero_stateid, task);
375
376 /*
377 * Since this is just an optimization, we don't try very hard if it
378 * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
379 * just quit trying on anything else.
380 */
381 switch (task->tk_status) {
382 case -NFS4ERR_DELAY:
383 rpc_delay(task, 1 * HZ);
384 return 0;
385 default:
386 return 1;
387 }
388 }
389
390 static void
nfsd4_cb_notify_lock_release(struct nfsd4_callback * cb)391 nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
392 {
393 struct nfsd4_blocked_lock *nbl = container_of(cb,
394 struct nfsd4_blocked_lock, nbl_cb);
395
396 free_blocked_lock(nbl);
397 }
398
399 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
400 .prepare = nfsd4_cb_notify_lock_prepare,
401 .done = nfsd4_cb_notify_lock_done,
402 .release = nfsd4_cb_notify_lock_release,
403 .opcode = OP_CB_NOTIFY_LOCK,
404 };
405
406 /*
407 * We store the NONE, READ, WRITE, and BOTH bits separately in the
408 * st_{access,deny}_bmap field of the stateid, in order to track not
409 * only what share bits are currently in force, but also what
410 * combinations of share bits previous opens have used. This allows us
411 * to enforce the recommendation in
412 * https://datatracker.ietf.org/doc/html/rfc7530#section-16.19.4 that
413 * the server return an error if the client attempt to downgrade to a
414 * combination of share bits not explicable by closing some of its
415 * previous opens.
416 *
417 * This enforcement is arguably incomplete, since we don't keep
418 * track of access/deny bit combinations; so, e.g., we allow:
419 *
420 * OPEN allow read, deny write
421 * OPEN allow both, deny none
422 * DOWNGRADE allow read, deny none
423 *
424 * which we should reject.
425 *
426 * But you could also argue that our current code is already overkill,
427 * since it only exists to return NFS4ERR_INVAL on incorrect client
428 * behavior.
429 */
430 static unsigned int
bmap_to_share_mode(unsigned long bmap)431 bmap_to_share_mode(unsigned long bmap)
432 {
433 int i;
434 unsigned int access = 0;
435
436 for (i = 1; i < 4; i++) {
437 if (test_bit(i, &bmap))
438 access |= i;
439 }
440 return access;
441 }
442
443 /* set share access for a given stateid */
444 static inline void
set_access(u32 access,struct nfs4_ol_stateid * stp)445 set_access(u32 access, struct nfs4_ol_stateid *stp)
446 {
447 unsigned char mask = 1 << access;
448
449 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
450 stp->st_access_bmap |= mask;
451 }
452
453 /* clear share access for a given stateid */
454 static inline void
clear_access(u32 access,struct nfs4_ol_stateid * stp)455 clear_access(u32 access, struct nfs4_ol_stateid *stp)
456 {
457 unsigned char mask = 1 << access;
458
459 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
460 stp->st_access_bmap &= ~mask;
461 }
462
463 /* test whether a given stateid has access */
464 static inline bool
test_access(u32 access,struct nfs4_ol_stateid * stp)465 test_access(u32 access, struct nfs4_ol_stateid *stp)
466 {
467 unsigned char mask = 1 << access;
468
469 return (bool)(stp->st_access_bmap & mask);
470 }
471
472 /* set share deny for a given stateid */
473 static inline void
set_deny(u32 deny,struct nfs4_ol_stateid * stp)474 set_deny(u32 deny, struct nfs4_ol_stateid *stp)
475 {
476 unsigned char mask = 1 << deny;
477
478 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
479 stp->st_deny_bmap |= mask;
480 }
481
482 /* clear share deny for a given stateid */
483 static inline void
clear_deny(u32 deny,struct nfs4_ol_stateid * stp)484 clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
485 {
486 unsigned char mask = 1 << deny;
487
488 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
489 stp->st_deny_bmap &= ~mask;
490 }
491
492 /* test whether a given stateid is denying specific access */
493 static inline bool
test_deny(u32 deny,struct nfs4_ol_stateid * stp)494 test_deny(u32 deny, struct nfs4_ol_stateid *stp)
495 {
496 unsigned char mask = 1 << deny;
497
498 return (bool)(stp->st_deny_bmap & mask);
499 }
500
nfs4_access_to_omode(u32 access)501 static int nfs4_access_to_omode(u32 access)
502 {
503 switch (access & NFS4_SHARE_ACCESS_BOTH) {
504 case NFS4_SHARE_ACCESS_READ:
505 return O_RDONLY;
506 case NFS4_SHARE_ACCESS_WRITE:
507 return O_WRONLY;
508 case NFS4_SHARE_ACCESS_BOTH:
509 return O_RDWR;
510 }
511 WARN_ON_ONCE(1);
512 return O_RDONLY;
513 }
514
515 static inline int
access_permit_read(struct nfs4_ol_stateid * stp)516 access_permit_read(struct nfs4_ol_stateid *stp)
517 {
518 return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
519 test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
520 test_access(NFS4_SHARE_ACCESS_WRITE, stp);
521 }
522
523 static inline int
access_permit_write(struct nfs4_ol_stateid * stp)524 access_permit_write(struct nfs4_ol_stateid *stp)
525 {
526 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
527 test_access(NFS4_SHARE_ACCESS_BOTH, stp);
528 }
529
530 static inline struct nfs4_stateowner *
nfs4_get_stateowner(struct nfs4_stateowner * sop)531 nfs4_get_stateowner(struct nfs4_stateowner *sop)
532 {
533 atomic_inc(&sop->so_count);
534 return sop;
535 }
536
537 static int
same_owner_str(struct nfs4_stateowner * sop,struct xdr_netobj * owner)538 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
539 {
540 return (sop->so_owner.len == owner->len) &&
541 0 == memcmp(sop->so_owner.data, owner->data, owner->len);
542 }
543
544 static struct nfs4_openowner *
find_openstateowner_str(unsigned int hashval,struct nfsd4_open * open,struct nfs4_client * clp)545 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
546 struct nfs4_client *clp)
547 {
548 struct nfs4_stateowner *so;
549
550 lockdep_assert_held(&clp->cl_lock);
551
552 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
553 so_strhash) {
554 if (!so->so_is_open_owner)
555 continue;
556 if (same_owner_str(so, &open->op_owner))
557 return openowner(nfs4_get_stateowner(so));
558 }
559 return NULL;
560 }
561
562 static inline u32
opaque_hashval(const void * ptr,int nbytes)563 opaque_hashval(const void *ptr, int nbytes)
564 {
565 unsigned char *cptr = (unsigned char *) ptr;
566
567 u32 x = 0;
568 while (nbytes--) {
569 x *= 37;
570 x += *cptr++;
571 }
572 return x;
573 }
574
nfsd4_free_file_rcu(struct rcu_head * rcu)575 static void nfsd4_free_file_rcu(struct rcu_head *rcu)
576 {
577 struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
578
579 kmem_cache_free(file_slab, fp);
580 }
581
582 void
put_nfs4_file(struct nfs4_file * fi)583 put_nfs4_file(struct nfs4_file *fi)
584 {
585 if (refcount_dec_and_test(&fi->fi_ref)) {
586 nfsd4_file_hash_remove(fi);
587 WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
588 WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
589 call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
590 }
591 }
592
593 static struct nfsd_file *
find_writeable_file_locked(struct nfs4_file * f)594 find_writeable_file_locked(struct nfs4_file *f)
595 {
596 struct nfsd_file *ret;
597
598 lockdep_assert_held(&f->fi_lock);
599
600 ret = nfsd_file_get(f->fi_fds[O_WRONLY]);
601 if (!ret)
602 ret = nfsd_file_get(f->fi_fds[O_RDWR]);
603 return ret;
604 }
605
606 static struct nfsd_file *
find_writeable_file(struct nfs4_file * f)607 find_writeable_file(struct nfs4_file *f)
608 {
609 struct nfsd_file *ret;
610
611 spin_lock(&f->fi_lock);
612 ret = find_writeable_file_locked(f);
613 spin_unlock(&f->fi_lock);
614
615 return ret;
616 }
617
618 static struct nfsd_file *
find_readable_file_locked(struct nfs4_file * f)619 find_readable_file_locked(struct nfs4_file *f)
620 {
621 struct nfsd_file *ret;
622
623 lockdep_assert_held(&f->fi_lock);
624
625 ret = nfsd_file_get(f->fi_fds[O_RDONLY]);
626 if (!ret)
627 ret = nfsd_file_get(f->fi_fds[O_RDWR]);
628 return ret;
629 }
630
631 static struct nfsd_file *
find_readable_file(struct nfs4_file * f)632 find_readable_file(struct nfs4_file *f)
633 {
634 struct nfsd_file *ret;
635
636 spin_lock(&f->fi_lock);
637 ret = find_readable_file_locked(f);
638 spin_unlock(&f->fi_lock);
639
640 return ret;
641 }
642
643 static struct nfsd_file *
find_rw_file(struct nfs4_file * f)644 find_rw_file(struct nfs4_file *f)
645 {
646 struct nfsd_file *ret;
647
648 spin_lock(&f->fi_lock);
649 ret = nfsd_file_get(f->fi_fds[O_RDWR]);
650 spin_unlock(&f->fi_lock);
651
652 return ret;
653 }
654
655 struct nfsd_file *
find_any_file(struct nfs4_file * f)656 find_any_file(struct nfs4_file *f)
657 {
658 struct nfsd_file *ret;
659
660 if (!f)
661 return NULL;
662 spin_lock(&f->fi_lock);
663 ret = nfsd_file_get(f->fi_fds[O_RDWR]);
664 if (!ret) {
665 ret = nfsd_file_get(f->fi_fds[O_WRONLY]);
666 if (!ret)
667 ret = nfsd_file_get(f->fi_fds[O_RDONLY]);
668 }
669 spin_unlock(&f->fi_lock);
670 return ret;
671 }
672
find_any_file_locked(struct nfs4_file * f)673 static struct nfsd_file *find_any_file_locked(struct nfs4_file *f)
674 {
675 lockdep_assert_held(&f->fi_lock);
676
677 if (f->fi_fds[O_RDWR])
678 return f->fi_fds[O_RDWR];
679 if (f->fi_fds[O_WRONLY])
680 return f->fi_fds[O_WRONLY];
681 if (f->fi_fds[O_RDONLY])
682 return f->fi_fds[O_RDONLY];
683 return NULL;
684 }
685
686 static atomic_long_t num_delegations;
687 unsigned long max_delegations;
688
689 /*
690 * Open owner state (share locks)
691 */
692
693 /* hash tables for lock and open owners */
694 #define OWNER_HASH_BITS 8
695 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
696 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
697
ownerstr_hashval(struct xdr_netobj * ownername)698 static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
699 {
700 unsigned int ret;
701
702 ret = opaque_hashval(ownername->data, ownername->len);
703 return ret & OWNER_HASH_MASK;
704 }
705
706 static struct rhltable nfs4_file_rhltable ____cacheline_aligned_in_smp;
707
708 static const struct rhashtable_params nfs4_file_rhash_params = {
709 .key_len = sizeof_field(struct nfs4_file, fi_inode),
710 .key_offset = offsetof(struct nfs4_file, fi_inode),
711 .head_offset = offsetof(struct nfs4_file, fi_rlist),
712
713 /*
714 * Start with a single page hash table to reduce resizing churn
715 * on light workloads.
716 */
717 .min_size = 256,
718 .automatic_shrinking = true,
719 };
720
721 /*
722 * Check if courtesy clients have conflicting access and resolve it if possible
723 *
724 * access: is op_share_access if share_access is true.
725 * Check if access mode, op_share_access, would conflict with
726 * the current deny mode of the file 'fp'.
727 * access: is op_share_deny if share_access is false.
728 * Check if the deny mode, op_share_deny, would conflict with
729 * current access of the file 'fp'.
730 * stp: skip checking this entry.
731 * new_stp: normal open, not open upgrade.
732 *
733 * Function returns:
734 * false - access/deny mode conflict with normal client.
735 * true - no conflict or conflict with courtesy client(s) is resolved.
736 */
737 static bool
nfs4_resolve_deny_conflicts_locked(struct nfs4_file * fp,bool new_stp,struct nfs4_ol_stateid * stp,u32 access,bool share_access)738 nfs4_resolve_deny_conflicts_locked(struct nfs4_file *fp, bool new_stp,
739 struct nfs4_ol_stateid *stp, u32 access, bool share_access)
740 {
741 struct nfs4_ol_stateid *st;
742 bool resolvable = true;
743 unsigned char bmap;
744 struct nfsd_net *nn;
745 struct nfs4_client *clp;
746
747 lockdep_assert_held(&fp->fi_lock);
748 list_for_each_entry(st, &fp->fi_stateids, st_perfile) {
749 /* ignore lock stateid */
750 if (st->st_openstp)
751 continue;
752 if (st == stp && new_stp)
753 continue;
754 /* check file access against deny mode or vice versa */
755 bmap = share_access ? st->st_deny_bmap : st->st_access_bmap;
756 if (!(access & bmap_to_share_mode(bmap)))
757 continue;
758 clp = st->st_stid.sc_client;
759 if (try_to_expire_client(clp))
760 continue;
761 resolvable = false;
762 break;
763 }
764 if (resolvable) {
765 clp = stp->st_stid.sc_client;
766 nn = net_generic(clp->net, nfsd_net_id);
767 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
768 }
769 return resolvable;
770 }
771
772 static void
__nfs4_file_get_access(struct nfs4_file * fp,u32 access)773 __nfs4_file_get_access(struct nfs4_file *fp, u32 access)
774 {
775 lockdep_assert_held(&fp->fi_lock);
776
777 if (access & NFS4_SHARE_ACCESS_WRITE)
778 atomic_inc(&fp->fi_access[O_WRONLY]);
779 if (access & NFS4_SHARE_ACCESS_READ)
780 atomic_inc(&fp->fi_access[O_RDONLY]);
781 }
782
783 static __be32
nfs4_file_get_access(struct nfs4_file * fp,u32 access)784 nfs4_file_get_access(struct nfs4_file *fp, u32 access)
785 {
786 lockdep_assert_held(&fp->fi_lock);
787
788 /* Does this access mode make sense? */
789 if (access & ~NFS4_SHARE_ACCESS_BOTH)
790 return nfserr_inval;
791
792 /* Does it conflict with a deny mode already set? */
793 if ((access & fp->fi_share_deny) != 0)
794 return nfserr_share_denied;
795
796 __nfs4_file_get_access(fp, access);
797 return nfs_ok;
798 }
799
nfs4_file_check_deny(struct nfs4_file * fp,u32 deny)800 static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
801 {
802 /* Common case is that there is no deny mode. */
803 if (deny) {
804 /* Does this deny mode make sense? */
805 if (deny & ~NFS4_SHARE_DENY_BOTH)
806 return nfserr_inval;
807
808 if ((deny & NFS4_SHARE_DENY_READ) &&
809 atomic_read(&fp->fi_access[O_RDONLY]))
810 return nfserr_share_denied;
811
812 if ((deny & NFS4_SHARE_DENY_WRITE) &&
813 atomic_read(&fp->fi_access[O_WRONLY]))
814 return nfserr_share_denied;
815 }
816 return nfs_ok;
817 }
818
__nfs4_file_put_access(struct nfs4_file * fp,int oflag)819 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
820 {
821 might_lock(&fp->fi_lock);
822
823 if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
824 struct nfsd_file *f1 = NULL;
825 struct nfsd_file *f2 = NULL;
826
827 swap(f1, fp->fi_fds[oflag]);
828 if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
829 swap(f2, fp->fi_fds[O_RDWR]);
830 spin_unlock(&fp->fi_lock);
831 if (f1)
832 nfsd_file_put(f1);
833 if (f2)
834 nfsd_file_put(f2);
835 }
836 }
837
nfs4_file_put_access(struct nfs4_file * fp,u32 access)838 static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
839 {
840 WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
841
842 if (access & NFS4_SHARE_ACCESS_WRITE)
843 __nfs4_file_put_access(fp, O_WRONLY);
844 if (access & NFS4_SHARE_ACCESS_READ)
845 __nfs4_file_put_access(fp, O_RDONLY);
846 }
847
848 /*
849 * Allocate a new open/delegation state counter. This is needed for
850 * pNFS for proper return on close semantics.
851 *
852 * Note that we only allocate it for pNFS-enabled exports, otherwise
853 * all pointers to struct nfs4_clnt_odstate are always NULL.
854 */
855 static struct nfs4_clnt_odstate *
alloc_clnt_odstate(struct nfs4_client * clp)856 alloc_clnt_odstate(struct nfs4_client *clp)
857 {
858 struct nfs4_clnt_odstate *co;
859
860 co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
861 if (co) {
862 co->co_client = clp;
863 refcount_set(&co->co_odcount, 1);
864 }
865 return co;
866 }
867
868 static void
hash_clnt_odstate_locked(struct nfs4_clnt_odstate * co)869 hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
870 {
871 struct nfs4_file *fp = co->co_file;
872
873 lockdep_assert_held(&fp->fi_lock);
874 list_add(&co->co_perfile, &fp->fi_clnt_odstate);
875 }
876
877 static inline void
get_clnt_odstate(struct nfs4_clnt_odstate * co)878 get_clnt_odstate(struct nfs4_clnt_odstate *co)
879 {
880 if (co)
881 refcount_inc(&co->co_odcount);
882 }
883
884 static void
put_clnt_odstate(struct nfs4_clnt_odstate * co)885 put_clnt_odstate(struct nfs4_clnt_odstate *co)
886 {
887 struct nfs4_file *fp;
888
889 if (!co)
890 return;
891
892 fp = co->co_file;
893 if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
894 list_del(&co->co_perfile);
895 spin_unlock(&fp->fi_lock);
896
897 nfsd4_return_all_file_layouts(co->co_client, fp);
898 kmem_cache_free(odstate_slab, co);
899 }
900 }
901
902 static struct nfs4_clnt_odstate *
find_or_hash_clnt_odstate(struct nfs4_file * fp,struct nfs4_clnt_odstate * new)903 find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
904 {
905 struct nfs4_clnt_odstate *co;
906 struct nfs4_client *cl;
907
908 if (!new)
909 return NULL;
910
911 cl = new->co_client;
912
913 spin_lock(&fp->fi_lock);
914 list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
915 if (co->co_client == cl) {
916 get_clnt_odstate(co);
917 goto out;
918 }
919 }
920 co = new;
921 co->co_file = fp;
922 hash_clnt_odstate_locked(new);
923 out:
924 spin_unlock(&fp->fi_lock);
925 return co;
926 }
927
nfs4_alloc_stid(struct nfs4_client * cl,struct kmem_cache * slab,void (* sc_free)(struct nfs4_stid *))928 struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
929 void (*sc_free)(struct nfs4_stid *))
930 {
931 struct nfs4_stid *stid;
932 int new_id;
933
934 stid = kmem_cache_zalloc(slab, GFP_KERNEL);
935 if (!stid)
936 return NULL;
937
938 idr_preload(GFP_KERNEL);
939 spin_lock(&cl->cl_lock);
940 /* Reserving 0 for start of file in nfsdfs "states" file: */
941 new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 1, 0, GFP_NOWAIT);
942 spin_unlock(&cl->cl_lock);
943 idr_preload_end();
944 if (new_id < 0)
945 goto out_free;
946
947 stid->sc_free = sc_free;
948 stid->sc_client = cl;
949 stid->sc_stateid.si_opaque.so_id = new_id;
950 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
951 /* Will be incremented before return to client: */
952 refcount_set(&stid->sc_count, 1);
953 spin_lock_init(&stid->sc_lock);
954 INIT_LIST_HEAD(&stid->sc_cp_list);
955
956 /*
957 * It shouldn't be a problem to reuse an opaque stateid value.
958 * I don't think it is for 4.1. But with 4.0 I worry that, for
959 * example, a stray write retransmission could be accepted by
960 * the server when it should have been rejected. Therefore,
961 * adopt a trick from the sctp code to attempt to maximize the
962 * amount of time until an id is reused, by ensuring they always
963 * "increase" (mod INT_MAX):
964 */
965 return stid;
966 out_free:
967 kmem_cache_free(slab, stid);
968 return NULL;
969 }
970
971 /*
972 * Create a unique stateid_t to represent each COPY.
973 */
nfs4_init_cp_state(struct nfsd_net * nn,copy_stateid_t * stid,unsigned char cs_type)974 static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid,
975 unsigned char cs_type)
976 {
977 int new_id;
978
979 stid->cs_stid.si_opaque.so_clid.cl_boot = (u32)nn->boot_time;
980 stid->cs_stid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id;
981
982 idr_preload(GFP_KERNEL);
983 spin_lock(&nn->s2s_cp_lock);
984 new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, stid, 0, 0, GFP_NOWAIT);
985 stid->cs_stid.si_opaque.so_id = new_id;
986 stid->cs_stid.si_generation = 1;
987 spin_unlock(&nn->s2s_cp_lock);
988 idr_preload_end();
989 if (new_id < 0)
990 return 0;
991 stid->cs_type = cs_type;
992 return 1;
993 }
994
nfs4_init_copy_state(struct nfsd_net * nn,struct nfsd4_copy * copy)995 int nfs4_init_copy_state(struct nfsd_net *nn, struct nfsd4_copy *copy)
996 {
997 return nfs4_init_cp_state(nn, ©->cp_stateid, NFS4_COPY_STID);
998 }
999
nfs4_alloc_init_cpntf_state(struct nfsd_net * nn,struct nfs4_stid * p_stid)1000 struct nfs4_cpntf_state *nfs4_alloc_init_cpntf_state(struct nfsd_net *nn,
1001 struct nfs4_stid *p_stid)
1002 {
1003 struct nfs4_cpntf_state *cps;
1004
1005 cps = kzalloc(sizeof(struct nfs4_cpntf_state), GFP_KERNEL);
1006 if (!cps)
1007 return NULL;
1008 cps->cpntf_time = ktime_get_boottime_seconds();
1009 refcount_set(&cps->cp_stateid.cs_count, 1);
1010 if (!nfs4_init_cp_state(nn, &cps->cp_stateid, NFS4_COPYNOTIFY_STID))
1011 goto out_free;
1012 spin_lock(&nn->s2s_cp_lock);
1013 list_add(&cps->cp_list, &p_stid->sc_cp_list);
1014 spin_unlock(&nn->s2s_cp_lock);
1015 return cps;
1016 out_free:
1017 kfree(cps);
1018 return NULL;
1019 }
1020
nfs4_free_copy_state(struct nfsd4_copy * copy)1021 void nfs4_free_copy_state(struct nfsd4_copy *copy)
1022 {
1023 struct nfsd_net *nn;
1024
1025 if (copy->cp_stateid.cs_type != NFS4_COPY_STID)
1026 return;
1027 nn = net_generic(copy->cp_clp->net, nfsd_net_id);
1028 spin_lock(&nn->s2s_cp_lock);
1029 idr_remove(&nn->s2s_cp_stateids,
1030 copy->cp_stateid.cs_stid.si_opaque.so_id);
1031 spin_unlock(&nn->s2s_cp_lock);
1032 }
1033
nfs4_free_cpntf_statelist(struct net * net,struct nfs4_stid * stid)1034 static void nfs4_free_cpntf_statelist(struct net *net, struct nfs4_stid *stid)
1035 {
1036 struct nfs4_cpntf_state *cps;
1037 struct nfsd_net *nn;
1038
1039 nn = net_generic(net, nfsd_net_id);
1040 spin_lock(&nn->s2s_cp_lock);
1041 while (!list_empty(&stid->sc_cp_list)) {
1042 cps = list_first_entry(&stid->sc_cp_list,
1043 struct nfs4_cpntf_state, cp_list);
1044 _free_cpntf_state_locked(nn, cps);
1045 }
1046 spin_unlock(&nn->s2s_cp_lock);
1047 }
1048
nfs4_alloc_open_stateid(struct nfs4_client * clp)1049 static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
1050 {
1051 struct nfs4_stid *stid;
1052
1053 stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
1054 if (!stid)
1055 return NULL;
1056
1057 return openlockstateid(stid);
1058 }
1059
1060 /*
1061 * As the sc_free callback of deleg, this may be called by nfs4_put_stid
1062 * in nfsd_break_one_deleg.
1063 * Considering nfsd_break_one_deleg is called with the flc->flc_lock held,
1064 * this function mustn't ever sleep.
1065 */
nfs4_free_deleg(struct nfs4_stid * stid)1066 static void nfs4_free_deleg(struct nfs4_stid *stid)
1067 {
1068 struct nfs4_delegation *dp = delegstateid(stid);
1069
1070 WARN_ON_ONCE(!list_empty(&stid->sc_cp_list));
1071 WARN_ON_ONCE(!list_empty(&dp->dl_perfile));
1072 WARN_ON_ONCE(!list_empty(&dp->dl_perclnt));
1073 WARN_ON_ONCE(!list_empty(&dp->dl_recall_lru));
1074 kmem_cache_free(deleg_slab, stid);
1075 atomic_long_dec(&num_delegations);
1076 }
1077
1078 /*
1079 * When we recall a delegation, we should be careful not to hand it
1080 * out again straight away.
1081 * To ensure this we keep a pair of bloom filters ('new' and 'old')
1082 * in which the filehandles of recalled delegations are "stored".
1083 * If a filehandle appear in either filter, a delegation is blocked.
1084 * When a delegation is recalled, the filehandle is stored in the "new"
1085 * filter.
1086 * Every 30 seconds we swap the filters and clear the "new" one,
1087 * unless both are empty of course. This results in delegations for a
1088 * given filehandle being blocked for between 30 and 60 seconds.
1089 *
1090 * Each filter is 256 bits. We hash the filehandle to 32bit and use the
1091 * low 3 bytes as hash-table indices.
1092 *
1093 * 'blocked_delegations_lock', which is always taken in block_delegations(),
1094 * is used to manage concurrent access. Testing does not need the lock
1095 * except when swapping the two filters.
1096 */
1097 static DEFINE_SPINLOCK(blocked_delegations_lock);
1098 static struct bloom_pair {
1099 int entries, old_entries;
1100 time64_t swap_time;
1101 int new; /* index into 'set' */
1102 DECLARE_BITMAP(set[2], 256);
1103 } blocked_delegations;
1104
delegation_blocked(struct knfsd_fh * fh)1105 static int delegation_blocked(struct knfsd_fh *fh)
1106 {
1107 u32 hash;
1108 struct bloom_pair *bd = &blocked_delegations;
1109
1110 if (bd->entries == 0)
1111 return 0;
1112 if (ktime_get_seconds() - bd->swap_time > 30) {
1113 spin_lock(&blocked_delegations_lock);
1114 if (ktime_get_seconds() - bd->swap_time > 30) {
1115 bd->entries -= bd->old_entries;
1116 bd->old_entries = bd->entries;
1117 bd->new = 1-bd->new;
1118 memset(bd->set[bd->new], 0,
1119 sizeof(bd->set[0]));
1120 bd->swap_time = ktime_get_seconds();
1121 }
1122 spin_unlock(&blocked_delegations_lock);
1123 }
1124 hash = jhash(&fh->fh_raw, fh->fh_size, 0);
1125 if (test_bit(hash&255, bd->set[0]) &&
1126 test_bit((hash>>8)&255, bd->set[0]) &&
1127 test_bit((hash>>16)&255, bd->set[0]))
1128 return 1;
1129
1130 if (test_bit(hash&255, bd->set[1]) &&
1131 test_bit((hash>>8)&255, bd->set[1]) &&
1132 test_bit((hash>>16)&255, bd->set[1]))
1133 return 1;
1134
1135 return 0;
1136 }
1137
block_delegations(struct knfsd_fh * fh)1138 static void block_delegations(struct knfsd_fh *fh)
1139 {
1140 u32 hash;
1141 struct bloom_pair *bd = &blocked_delegations;
1142
1143 hash = jhash(&fh->fh_raw, fh->fh_size, 0);
1144
1145 spin_lock(&blocked_delegations_lock);
1146 __set_bit(hash&255, bd->set[bd->new]);
1147 __set_bit((hash>>8)&255, bd->set[bd->new]);
1148 __set_bit((hash>>16)&255, bd->set[bd->new]);
1149 if (bd->entries == 0)
1150 bd->swap_time = ktime_get_seconds();
1151 bd->entries += 1;
1152 spin_unlock(&blocked_delegations_lock);
1153 }
1154
1155 static struct nfs4_delegation *
alloc_init_deleg(struct nfs4_client * clp,struct nfs4_file * fp,struct nfs4_clnt_odstate * odstate,u32 dl_type)1156 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
1157 struct nfs4_clnt_odstate *odstate, u32 dl_type)
1158 {
1159 struct nfs4_delegation *dp;
1160 struct nfs4_stid *stid;
1161 long n;
1162
1163 dprintk("NFSD alloc_init_deleg\n");
1164 n = atomic_long_inc_return(&num_delegations);
1165 if (n < 0 || n > max_delegations)
1166 goto out_dec;
1167 if (delegation_blocked(&fp->fi_fhandle))
1168 goto out_dec;
1169 stid = nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg);
1170 if (stid == NULL)
1171 goto out_dec;
1172 dp = delegstateid(stid);
1173
1174 /*
1175 * delegation seqid's are never incremented. The 4.1 special
1176 * meaning of seqid 0 isn't meaningful, really, but let's avoid
1177 * 0 anyway just for consistency and use 1:
1178 */
1179 dp->dl_stid.sc_stateid.si_generation = 1;
1180 INIT_LIST_HEAD(&dp->dl_perfile);
1181 INIT_LIST_HEAD(&dp->dl_perclnt);
1182 INIT_LIST_HEAD(&dp->dl_recall_lru);
1183 dp->dl_clnt_odstate = odstate;
1184 get_clnt_odstate(odstate);
1185 dp->dl_type = dl_type;
1186 dp->dl_retries = 1;
1187 dp->dl_recalled = false;
1188 nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
1189 &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
1190 nfsd4_init_cb(&dp->dl_cb_fattr.ncf_getattr, dp->dl_stid.sc_client,
1191 &nfsd4_cb_getattr_ops, NFSPROC4_CLNT_CB_GETATTR);
1192 dp->dl_cb_fattr.ncf_file_modified = false;
1193 dp->dl_cb_fattr.ncf_cb_bmap[0] = FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE;
1194 get_nfs4_file(fp);
1195 dp->dl_stid.sc_file = fp;
1196 return dp;
1197 out_dec:
1198 atomic_long_dec(&num_delegations);
1199 return NULL;
1200 }
1201
1202 void
nfs4_put_stid(struct nfs4_stid * s)1203 nfs4_put_stid(struct nfs4_stid *s)
1204 {
1205 struct nfs4_file *fp = s->sc_file;
1206 struct nfs4_client *clp = s->sc_client;
1207
1208 might_lock(&clp->cl_lock);
1209
1210 if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
1211 wake_up_all(&close_wq);
1212 return;
1213 }
1214 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1215 if (s->sc_status & SC_STATUS_ADMIN_REVOKED)
1216 atomic_dec(&s->sc_client->cl_admin_revoked);
1217 nfs4_free_cpntf_statelist(clp->net, s);
1218 spin_unlock(&clp->cl_lock);
1219 s->sc_free(s);
1220 if (fp)
1221 put_nfs4_file(fp);
1222 }
1223
1224 void
nfs4_inc_and_copy_stateid(stateid_t * dst,struct nfs4_stid * stid)1225 nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
1226 {
1227 stateid_t *src = &stid->sc_stateid;
1228
1229 spin_lock(&stid->sc_lock);
1230 if (unlikely(++src->si_generation == 0))
1231 src->si_generation = 1;
1232 memcpy(dst, src, sizeof(*dst));
1233 spin_unlock(&stid->sc_lock);
1234 }
1235
put_deleg_file(struct nfs4_file * fp)1236 static void put_deleg_file(struct nfs4_file *fp)
1237 {
1238 struct nfsd_file *nf = NULL;
1239
1240 spin_lock(&fp->fi_lock);
1241 if (--fp->fi_delegees == 0)
1242 swap(nf, fp->fi_deleg_file);
1243 spin_unlock(&fp->fi_lock);
1244
1245 if (nf)
1246 nfsd_file_put(nf);
1247 }
1248
nfs4_unlock_deleg_lease(struct nfs4_delegation * dp)1249 static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp)
1250 {
1251 struct nfs4_file *fp = dp->dl_stid.sc_file;
1252 struct nfsd_file *nf = fp->fi_deleg_file;
1253
1254 WARN_ON_ONCE(!fp->fi_delegees);
1255
1256 kernel_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp);
1257 put_deleg_file(fp);
1258 }
1259
destroy_unhashed_deleg(struct nfs4_delegation * dp)1260 static void destroy_unhashed_deleg(struct nfs4_delegation *dp)
1261 {
1262 put_clnt_odstate(dp->dl_clnt_odstate);
1263 nfs4_unlock_deleg_lease(dp);
1264 nfs4_put_stid(&dp->dl_stid);
1265 }
1266
1267 /**
1268 * nfs4_delegation_exists - Discover if this delegation already exists
1269 * @clp: a pointer to the nfs4_client we're granting a delegation to
1270 * @fp: a pointer to the nfs4_file we're granting a delegation on
1271 *
1272 * Return:
1273 * On success: true iff an existing delegation is found
1274 */
1275
1276 static bool
nfs4_delegation_exists(struct nfs4_client * clp,struct nfs4_file * fp)1277 nfs4_delegation_exists(struct nfs4_client *clp, struct nfs4_file *fp)
1278 {
1279 struct nfs4_delegation *searchdp = NULL;
1280 struct nfs4_client *searchclp = NULL;
1281
1282 lockdep_assert_held(&state_lock);
1283 lockdep_assert_held(&fp->fi_lock);
1284
1285 list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
1286 searchclp = searchdp->dl_stid.sc_client;
1287 if (clp == searchclp) {
1288 return true;
1289 }
1290 }
1291 return false;
1292 }
1293
1294 /**
1295 * hash_delegation_locked - Add a delegation to the appropriate lists
1296 * @dp: a pointer to the nfs4_delegation we are adding.
1297 * @fp: a pointer to the nfs4_file we're granting a delegation on
1298 *
1299 * Return:
1300 * On success: NULL if the delegation was successfully hashed.
1301 *
1302 * On error: -EAGAIN if one was previously granted to this
1303 * nfs4_client for this nfs4_file. Delegation is not hashed.
1304 *
1305 */
1306
1307 static int
hash_delegation_locked(struct nfs4_delegation * dp,struct nfs4_file * fp)1308 hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
1309 {
1310 struct nfs4_client *clp = dp->dl_stid.sc_client;
1311
1312 lockdep_assert_held(&state_lock);
1313 lockdep_assert_held(&fp->fi_lock);
1314 lockdep_assert_held(&clp->cl_lock);
1315
1316 if (nfs4_delegation_exists(clp, fp))
1317 return -EAGAIN;
1318 refcount_inc(&dp->dl_stid.sc_count);
1319 dp->dl_stid.sc_type = SC_TYPE_DELEG;
1320 list_add(&dp->dl_perfile, &fp->fi_delegations);
1321 list_add(&dp->dl_perclnt, &clp->cl_delegations);
1322 return 0;
1323 }
1324
delegation_hashed(struct nfs4_delegation * dp)1325 static bool delegation_hashed(struct nfs4_delegation *dp)
1326 {
1327 return !(list_empty(&dp->dl_perfile));
1328 }
1329
1330 static bool
unhash_delegation_locked(struct nfs4_delegation * dp,unsigned short statusmask)1331 unhash_delegation_locked(struct nfs4_delegation *dp, unsigned short statusmask)
1332 {
1333 struct nfs4_file *fp = dp->dl_stid.sc_file;
1334
1335 lockdep_assert_held(&state_lock);
1336
1337 if (!delegation_hashed(dp))
1338 return false;
1339
1340 if (statusmask == SC_STATUS_REVOKED &&
1341 dp->dl_stid.sc_client->cl_minorversion == 0)
1342 statusmask = SC_STATUS_CLOSED;
1343 dp->dl_stid.sc_status |= statusmask;
1344 if (statusmask & SC_STATUS_ADMIN_REVOKED)
1345 atomic_inc(&dp->dl_stid.sc_client->cl_admin_revoked);
1346
1347 /* Ensure that deleg break won't try to requeue it */
1348 ++dp->dl_time;
1349 spin_lock(&fp->fi_lock);
1350 list_del_init(&dp->dl_perclnt);
1351 list_del_init(&dp->dl_recall_lru);
1352 list_del_init(&dp->dl_perfile);
1353 spin_unlock(&fp->fi_lock);
1354 return true;
1355 }
1356
destroy_delegation(struct nfs4_delegation * dp)1357 static void destroy_delegation(struct nfs4_delegation *dp)
1358 {
1359 bool unhashed;
1360
1361 spin_lock(&state_lock);
1362 unhashed = unhash_delegation_locked(dp, SC_STATUS_CLOSED);
1363 spin_unlock(&state_lock);
1364 if (unhashed)
1365 destroy_unhashed_deleg(dp);
1366 }
1367
1368 /**
1369 * revoke_delegation - perform nfs4 delegation structure cleanup
1370 * @dp: pointer to the delegation
1371 *
1372 * This function assumes that it's called either from the administrative
1373 * interface (nfsd4_revoke_states()) that's revoking a specific delegation
1374 * stateid or it's called from a laundromat thread (nfsd4_landromat()) that
1375 * determined that this specific state has expired and needs to be revoked
1376 * (both mark state with the appropriate stid sc_status mode). It is also
1377 * assumed that a reference was taken on the @dp state.
1378 *
1379 * If this function finds that the @dp state is SC_STATUS_FREED it means
1380 * that a FREE_STATEID operation for this stateid has been processed and
1381 * we can proceed to removing it from recalled list. However, if @dp state
1382 * isn't marked SC_STATUS_FREED, it means we need place it on the cl_revoked
1383 * list and wait for the FREE_STATEID to arrive from the client. At the same
1384 * time, we need to mark it as SC_STATUS_FREEABLE to indicate to the
1385 * nfsd4_free_stateid() function that this stateid has already been added
1386 * to the cl_revoked list and that nfsd4_free_stateid() is now responsible
1387 * for removing it from the list. Inspection of where the delegation state
1388 * in the revocation process is protected by the clp->cl_lock.
1389 */
revoke_delegation(struct nfs4_delegation * dp)1390 static void revoke_delegation(struct nfs4_delegation *dp)
1391 {
1392 struct nfs4_client *clp = dp->dl_stid.sc_client;
1393
1394 WARN_ON(!list_empty(&dp->dl_recall_lru));
1395 WARN_ON_ONCE(!(dp->dl_stid.sc_status &
1396 (SC_STATUS_REVOKED | SC_STATUS_ADMIN_REVOKED)));
1397
1398 trace_nfsd_stid_revoke(&dp->dl_stid);
1399
1400 spin_lock(&clp->cl_lock);
1401 if (dp->dl_stid.sc_status & SC_STATUS_FREED) {
1402 list_del_init(&dp->dl_recall_lru);
1403 goto out;
1404 }
1405 list_add(&dp->dl_recall_lru, &clp->cl_revoked);
1406 dp->dl_stid.sc_status |= SC_STATUS_FREEABLE;
1407 out:
1408 spin_unlock(&clp->cl_lock);
1409 destroy_unhashed_deleg(dp);
1410 }
1411
1412 /*
1413 * SETCLIENTID state
1414 */
1415
clientid_hashval(u32 id)1416 static unsigned int clientid_hashval(u32 id)
1417 {
1418 return id & CLIENT_HASH_MASK;
1419 }
1420
clientstr_hashval(struct xdr_netobj name)1421 static unsigned int clientstr_hashval(struct xdr_netobj name)
1422 {
1423 return opaque_hashval(name.data, 8) & CLIENT_HASH_MASK;
1424 }
1425
1426 /*
1427 * A stateid that had a deny mode associated with it is being released
1428 * or downgraded. Recalculate the deny mode on the file.
1429 */
1430 static void
recalculate_deny_mode(struct nfs4_file * fp)1431 recalculate_deny_mode(struct nfs4_file *fp)
1432 {
1433 struct nfs4_ol_stateid *stp;
1434 u32 old_deny;
1435
1436 spin_lock(&fp->fi_lock);
1437 old_deny = fp->fi_share_deny;
1438 fp->fi_share_deny = 0;
1439 list_for_each_entry(stp, &fp->fi_stateids, st_perfile) {
1440 fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
1441 if (fp->fi_share_deny == old_deny)
1442 break;
1443 }
1444 spin_unlock(&fp->fi_lock);
1445 }
1446
1447 static void
reset_union_bmap_deny(u32 deny,struct nfs4_ol_stateid * stp)1448 reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
1449 {
1450 int i;
1451 bool change = false;
1452
1453 for (i = 1; i < 4; i++) {
1454 if ((i & deny) != i) {
1455 change = true;
1456 clear_deny(i, stp);
1457 }
1458 }
1459
1460 /* Recalculate per-file deny mode if there was a change */
1461 if (change)
1462 recalculate_deny_mode(stp->st_stid.sc_file);
1463 }
1464
1465 /* release all access and file references for a given stateid */
1466 static void
release_all_access(struct nfs4_ol_stateid * stp)1467 release_all_access(struct nfs4_ol_stateid *stp)
1468 {
1469 int i;
1470 struct nfs4_file *fp = stp->st_stid.sc_file;
1471
1472 if (fp && stp->st_deny_bmap != 0)
1473 recalculate_deny_mode(fp);
1474
1475 for (i = 1; i < 4; i++) {
1476 if (test_access(i, stp))
1477 nfs4_file_put_access(stp->st_stid.sc_file, i);
1478 clear_access(i, stp);
1479 }
1480 }
1481
nfs4_free_stateowner(struct nfs4_stateowner * sop)1482 static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop)
1483 {
1484 kfree(sop->so_owner.data);
1485 sop->so_ops->so_free(sop);
1486 }
1487
nfs4_put_stateowner(struct nfs4_stateowner * sop)1488 static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
1489 {
1490 struct nfs4_client *clp = sop->so_client;
1491
1492 might_lock(&clp->cl_lock);
1493
1494 if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
1495 return;
1496 sop->so_ops->so_unhash(sop);
1497 spin_unlock(&clp->cl_lock);
1498 nfs4_free_stateowner(sop);
1499 }
1500
1501 static bool
nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid * stp)1502 nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp)
1503 {
1504 return list_empty(&stp->st_perfile);
1505 }
1506
unhash_ol_stateid(struct nfs4_ol_stateid * stp)1507 static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
1508 {
1509 struct nfs4_file *fp = stp->st_stid.sc_file;
1510
1511 lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
1512
1513 if (list_empty(&stp->st_perfile))
1514 return false;
1515
1516 spin_lock(&fp->fi_lock);
1517 list_del_init(&stp->st_perfile);
1518 spin_unlock(&fp->fi_lock);
1519 list_del(&stp->st_perstateowner);
1520 return true;
1521 }
1522
nfs4_free_ol_stateid(struct nfs4_stid * stid)1523 static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
1524 {
1525 struct nfs4_ol_stateid *stp = openlockstateid(stid);
1526
1527 put_clnt_odstate(stp->st_clnt_odstate);
1528 release_all_access(stp);
1529 if (stp->st_stateowner)
1530 nfs4_put_stateowner(stp->st_stateowner);
1531 WARN_ON(!list_empty(&stid->sc_cp_list));
1532 kmem_cache_free(stateid_slab, stid);
1533 }
1534
nfs4_free_lock_stateid(struct nfs4_stid * stid)1535 static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
1536 {
1537 struct nfs4_ol_stateid *stp = openlockstateid(stid);
1538 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
1539 struct nfsd_file *nf;
1540
1541 nf = find_any_file(stp->st_stid.sc_file);
1542 if (nf) {
1543 get_file(nf->nf_file);
1544 filp_close(nf->nf_file, (fl_owner_t)lo);
1545 nfsd_file_put(nf);
1546 }
1547 nfs4_free_ol_stateid(stid);
1548 }
1549
1550 /*
1551 * Put the persistent reference to an already unhashed generic stateid, while
1552 * holding the cl_lock. If it's the last reference, then put it onto the
1553 * reaplist for later destruction.
1554 */
put_ol_stateid_locked(struct nfs4_ol_stateid * stp,struct list_head * reaplist)1555 static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
1556 struct list_head *reaplist)
1557 {
1558 struct nfs4_stid *s = &stp->st_stid;
1559 struct nfs4_client *clp = s->sc_client;
1560
1561 lockdep_assert_held(&clp->cl_lock);
1562
1563 WARN_ON_ONCE(!list_empty(&stp->st_locks));
1564
1565 if (!refcount_dec_and_test(&s->sc_count)) {
1566 wake_up_all(&close_wq);
1567 return;
1568 }
1569
1570 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1571 if (s->sc_status & SC_STATUS_ADMIN_REVOKED)
1572 atomic_dec(&s->sc_client->cl_admin_revoked);
1573 list_add(&stp->st_locks, reaplist);
1574 }
1575
unhash_lock_stateid(struct nfs4_ol_stateid * stp)1576 static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
1577 {
1578 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1579
1580 if (!unhash_ol_stateid(stp))
1581 return false;
1582 list_del_init(&stp->st_locks);
1583 stp->st_stid.sc_status |= SC_STATUS_CLOSED;
1584 return true;
1585 }
1586
release_lock_stateid(struct nfs4_ol_stateid * stp)1587 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
1588 {
1589 struct nfs4_client *clp = stp->st_stid.sc_client;
1590 bool unhashed;
1591
1592 spin_lock(&clp->cl_lock);
1593 unhashed = unhash_lock_stateid(stp);
1594 spin_unlock(&clp->cl_lock);
1595 if (unhashed)
1596 nfs4_put_stid(&stp->st_stid);
1597 }
1598
unhash_lockowner_locked(struct nfs4_lockowner * lo)1599 static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
1600 {
1601 struct nfs4_client *clp = lo->lo_owner.so_client;
1602
1603 lockdep_assert_held(&clp->cl_lock);
1604
1605 list_del_init(&lo->lo_owner.so_strhash);
1606 }
1607
1608 /*
1609 * Free a list of generic stateids that were collected earlier after being
1610 * fully unhashed.
1611 */
1612 static void
free_ol_stateid_reaplist(struct list_head * reaplist)1613 free_ol_stateid_reaplist(struct list_head *reaplist)
1614 {
1615 struct nfs4_ol_stateid *stp;
1616 struct nfs4_file *fp;
1617
1618 might_sleep();
1619
1620 while (!list_empty(reaplist)) {
1621 stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
1622 st_locks);
1623 list_del(&stp->st_locks);
1624 fp = stp->st_stid.sc_file;
1625 stp->st_stid.sc_free(&stp->st_stid);
1626 if (fp)
1627 put_nfs4_file(fp);
1628 }
1629 }
1630
release_open_stateid_locks(struct nfs4_ol_stateid * open_stp,struct list_head * reaplist)1631 static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
1632 struct list_head *reaplist)
1633 {
1634 struct nfs4_ol_stateid *stp;
1635
1636 lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
1637
1638 while (!list_empty(&open_stp->st_locks)) {
1639 stp = list_entry(open_stp->st_locks.next,
1640 struct nfs4_ol_stateid, st_locks);
1641 unhash_lock_stateid(stp);
1642 put_ol_stateid_locked(stp, reaplist);
1643 }
1644 }
1645
unhash_open_stateid(struct nfs4_ol_stateid * stp,struct list_head * reaplist)1646 static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
1647 struct list_head *reaplist)
1648 {
1649 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1650
1651 if (!unhash_ol_stateid(stp))
1652 return false;
1653 release_open_stateid_locks(stp, reaplist);
1654 return true;
1655 }
1656
release_open_stateid(struct nfs4_ol_stateid * stp)1657 static void release_open_stateid(struct nfs4_ol_stateid *stp)
1658 {
1659 LIST_HEAD(reaplist);
1660
1661 spin_lock(&stp->st_stid.sc_client->cl_lock);
1662 stp->st_stid.sc_status |= SC_STATUS_CLOSED;
1663 if (unhash_open_stateid(stp, &reaplist))
1664 put_ol_stateid_locked(stp, &reaplist);
1665 spin_unlock(&stp->st_stid.sc_client->cl_lock);
1666 free_ol_stateid_reaplist(&reaplist);
1667 }
1668
nfs4_openowner_unhashed(struct nfs4_openowner * oo)1669 static bool nfs4_openowner_unhashed(struct nfs4_openowner *oo)
1670 {
1671 lockdep_assert_held(&oo->oo_owner.so_client->cl_lock);
1672
1673 return list_empty(&oo->oo_owner.so_strhash) &&
1674 list_empty(&oo->oo_perclient);
1675 }
1676
unhash_openowner_locked(struct nfs4_openowner * oo)1677 static void unhash_openowner_locked(struct nfs4_openowner *oo)
1678 {
1679 struct nfs4_client *clp = oo->oo_owner.so_client;
1680
1681 lockdep_assert_held(&clp->cl_lock);
1682
1683 list_del_init(&oo->oo_owner.so_strhash);
1684 list_del_init(&oo->oo_perclient);
1685 }
1686
release_last_closed_stateid(struct nfs4_openowner * oo)1687 static void release_last_closed_stateid(struct nfs4_openowner *oo)
1688 {
1689 struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
1690 nfsd_net_id);
1691 struct nfs4_ol_stateid *s;
1692
1693 spin_lock(&nn->client_lock);
1694 s = oo->oo_last_closed_stid;
1695 if (s) {
1696 list_del_init(&oo->oo_close_lru);
1697 oo->oo_last_closed_stid = NULL;
1698 }
1699 spin_unlock(&nn->client_lock);
1700 if (s)
1701 nfs4_put_stid(&s->st_stid);
1702 }
1703
release_openowner(struct nfs4_openowner * oo)1704 static void release_openowner(struct nfs4_openowner *oo)
1705 {
1706 struct nfs4_ol_stateid *stp;
1707 struct nfs4_client *clp = oo->oo_owner.so_client;
1708 LIST_HEAD(reaplist);
1709
1710 spin_lock(&clp->cl_lock);
1711 unhash_openowner_locked(oo);
1712 while (!list_empty(&oo->oo_owner.so_stateids)) {
1713 stp = list_first_entry(&oo->oo_owner.so_stateids,
1714 struct nfs4_ol_stateid, st_perstateowner);
1715 if (unhash_open_stateid(stp, &reaplist))
1716 put_ol_stateid_locked(stp, &reaplist);
1717 }
1718 spin_unlock(&clp->cl_lock);
1719 free_ol_stateid_reaplist(&reaplist);
1720 release_last_closed_stateid(oo);
1721 nfs4_put_stateowner(&oo->oo_owner);
1722 }
1723
find_one_sb_stid(struct nfs4_client * clp,struct super_block * sb,unsigned int sc_types)1724 static struct nfs4_stid *find_one_sb_stid(struct nfs4_client *clp,
1725 struct super_block *sb,
1726 unsigned int sc_types)
1727 {
1728 unsigned long id, tmp;
1729 struct nfs4_stid *stid;
1730
1731 spin_lock(&clp->cl_lock);
1732 idr_for_each_entry_ul(&clp->cl_stateids, stid, tmp, id)
1733 if ((stid->sc_type & sc_types) &&
1734 stid->sc_status == 0 &&
1735 stid->sc_file->fi_inode->i_sb == sb) {
1736 refcount_inc(&stid->sc_count);
1737 break;
1738 }
1739 spin_unlock(&clp->cl_lock);
1740 return stid;
1741 }
1742
1743 /**
1744 * nfsd4_revoke_states - revoke all nfsv4 states associated with given filesystem
1745 * @net: used to identify instance of nfsd (there is one per net namespace)
1746 * @sb: super_block used to identify target filesystem
1747 *
1748 * All nfs4 states (open, lock, delegation, layout) held by the server instance
1749 * and associated with a file on the given filesystem will be revoked resulting
1750 * in any files being closed and so all references from nfsd to the filesystem
1751 * being released. Thus nfsd will no longer prevent the filesystem from being
1752 * unmounted.
1753 *
1754 * The clients which own the states will subsequently being notified that the
1755 * states have been "admin-revoked".
1756 */
nfsd4_revoke_states(struct net * net,struct super_block * sb)1757 void nfsd4_revoke_states(struct net *net, struct super_block *sb)
1758 {
1759 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1760 unsigned int idhashval;
1761 unsigned int sc_types;
1762
1763 sc_types = SC_TYPE_OPEN | SC_TYPE_LOCK | SC_TYPE_DELEG | SC_TYPE_LAYOUT;
1764
1765 spin_lock(&nn->client_lock);
1766 for (idhashval = 0; idhashval < CLIENT_HASH_MASK; idhashval++) {
1767 struct list_head *head = &nn->conf_id_hashtbl[idhashval];
1768 struct nfs4_client *clp;
1769 retry:
1770 list_for_each_entry(clp, head, cl_idhash) {
1771 struct nfs4_stid *stid = find_one_sb_stid(clp, sb,
1772 sc_types);
1773 if (stid) {
1774 struct nfs4_ol_stateid *stp;
1775 struct nfs4_delegation *dp;
1776 struct nfs4_layout_stateid *ls;
1777
1778 spin_unlock(&nn->client_lock);
1779 switch (stid->sc_type) {
1780 case SC_TYPE_OPEN:
1781 stp = openlockstateid(stid);
1782 mutex_lock_nested(&stp->st_mutex,
1783 OPEN_STATEID_MUTEX);
1784
1785 spin_lock(&clp->cl_lock);
1786 if (stid->sc_status == 0) {
1787 stid->sc_status |=
1788 SC_STATUS_ADMIN_REVOKED;
1789 atomic_inc(&clp->cl_admin_revoked);
1790 spin_unlock(&clp->cl_lock);
1791 release_all_access(stp);
1792 } else
1793 spin_unlock(&clp->cl_lock);
1794 mutex_unlock(&stp->st_mutex);
1795 break;
1796 case SC_TYPE_LOCK:
1797 stp = openlockstateid(stid);
1798 mutex_lock_nested(&stp->st_mutex,
1799 LOCK_STATEID_MUTEX);
1800 spin_lock(&clp->cl_lock);
1801 if (stid->sc_status == 0) {
1802 struct nfs4_lockowner *lo =
1803 lockowner(stp->st_stateowner);
1804 struct nfsd_file *nf;
1805
1806 stid->sc_status |=
1807 SC_STATUS_ADMIN_REVOKED;
1808 atomic_inc(&clp->cl_admin_revoked);
1809 spin_unlock(&clp->cl_lock);
1810 nf = find_any_file(stp->st_stid.sc_file);
1811 if (nf) {
1812 get_file(nf->nf_file);
1813 filp_close(nf->nf_file,
1814 (fl_owner_t)lo);
1815 nfsd_file_put(nf);
1816 }
1817 release_all_access(stp);
1818 } else
1819 spin_unlock(&clp->cl_lock);
1820 mutex_unlock(&stp->st_mutex);
1821 break;
1822 case SC_TYPE_DELEG:
1823 refcount_inc(&stid->sc_count);
1824 dp = delegstateid(stid);
1825 spin_lock(&state_lock);
1826 if (!unhash_delegation_locked(
1827 dp, SC_STATUS_ADMIN_REVOKED))
1828 dp = NULL;
1829 spin_unlock(&state_lock);
1830 if (dp)
1831 revoke_delegation(dp);
1832 break;
1833 case SC_TYPE_LAYOUT:
1834 ls = layoutstateid(stid);
1835 nfsd4_close_layout(ls);
1836 break;
1837 }
1838 nfs4_put_stid(stid);
1839 spin_lock(&nn->client_lock);
1840 if (clp->cl_minorversion == 0)
1841 /* Allow cleanup after a lease period.
1842 * store_release ensures cleanup will
1843 * see any newly revoked states if it
1844 * sees the time updated.
1845 */
1846 nn->nfs40_last_revoke =
1847 ktime_get_boottime_seconds();
1848 goto retry;
1849 }
1850 }
1851 }
1852 spin_unlock(&nn->client_lock);
1853 }
1854
1855 static inline int
hash_sessionid(struct nfs4_sessionid * sessionid)1856 hash_sessionid(struct nfs4_sessionid *sessionid)
1857 {
1858 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
1859
1860 return sid->sequence % SESSION_HASH_SIZE;
1861 }
1862
1863 #ifdef CONFIG_SUNRPC_DEBUG
1864 static inline void
dump_sessionid(const char * fn,struct nfs4_sessionid * sessionid)1865 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1866 {
1867 u32 *ptr = (u32 *)(&sessionid->data[0]);
1868 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
1869 }
1870 #else
1871 static inline void
dump_sessionid(const char * fn,struct nfs4_sessionid * sessionid)1872 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1873 {
1874 }
1875 #endif
1876
1877 /*
1878 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1879 * won't be used for replay.
1880 */
nfsd4_bump_seqid(struct nfsd4_compound_state * cstate,__be32 nfserr)1881 void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
1882 {
1883 struct nfs4_stateowner *so = cstate->replay_owner;
1884
1885 if (nfserr == nfserr_replay_me)
1886 return;
1887
1888 if (!seqid_mutating_err(ntohl(nfserr))) {
1889 nfsd4_cstate_clear_replay(cstate);
1890 return;
1891 }
1892 if (!so)
1893 return;
1894 if (so->so_is_open_owner)
1895 release_last_closed_stateid(openowner(so));
1896 so->so_seqid++;
1897 return;
1898 }
1899
1900 static void
gen_sessionid(struct nfsd4_session * ses)1901 gen_sessionid(struct nfsd4_session *ses)
1902 {
1903 struct nfs4_client *clp = ses->se_client;
1904 struct nfsd4_sessionid *sid;
1905
1906 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
1907 sid->clientid = clp->cl_clientid;
1908 sid->sequence = current_sessionid++;
1909 sid->reserved = 0;
1910 }
1911
1912 /*
1913 * The protocol defines ca_maxresponssize_cached to include the size of
1914 * the rpc header, but all we need to cache is the data starting after
1915 * the end of the initial SEQUENCE operation--the rest we regenerate
1916 * each time. Therefore we can advertise a ca_maxresponssize_cached
1917 * value that is the number of bytes in our cache plus a few additional
1918 * bytes. In order to stay on the safe side, and not promise more than
1919 * we can cache, those additional bytes must be the minimum possible: 24
1920 * bytes of rpc header (xid through accept state, with AUTH_NULL
1921 * verifier), 12 for the compound header (with zero-length tag), and 44
1922 * for the SEQUENCE op response:
1923 */
1924 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
1925
1926 static void
free_session_slots(struct nfsd4_session * ses)1927 free_session_slots(struct nfsd4_session *ses)
1928 {
1929 int i;
1930
1931 for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
1932 free_svc_cred(&ses->se_slots[i]->sl_cred);
1933 kfree(ses->se_slots[i]);
1934 }
1935 }
1936
1937 /*
1938 * We don't actually need to cache the rpc and session headers, so we
1939 * can allocate a little less for each slot:
1940 */
slot_bytes(struct nfsd4_channel_attrs * ca)1941 static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
1942 {
1943 u32 size;
1944
1945 if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
1946 size = 0;
1947 else
1948 size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
1949 return size + sizeof(struct nfsd4_slot);
1950 }
1951
1952 /*
1953 * XXX: If we run out of reserved DRC memory we could (up to a point)
1954 * re-negotiate active sessions and reduce their slot usage to make
1955 * room for new connections. For now we just fail the create session.
1956 */
nfsd4_get_drc_mem(struct nfsd4_channel_attrs * ca,struct nfsd_net * nn)1957 static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
1958 {
1959 u32 slotsize = slot_bytes(ca);
1960 u32 num = ca->maxreqs;
1961 unsigned long avail, total_avail;
1962 unsigned int scale_factor;
1963
1964 spin_lock(&nfsd_drc_lock);
1965 if (nfsd_drc_max_mem > nfsd_drc_mem_used)
1966 total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used;
1967 else
1968 /* We have handed out more space than we chose in
1969 * set_max_drc() to allow. That isn't really a
1970 * problem as long as that doesn't make us think we
1971 * have lots more due to integer overflow.
1972 */
1973 total_avail = 0;
1974 avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail);
1975 /*
1976 * Never use more than a fraction of the remaining memory,
1977 * unless it's the only way to give this client a slot.
1978 * The chosen fraction is either 1/8 or 1/number of threads,
1979 * whichever is smaller. This ensures there are adequate
1980 * slots to support multiple clients per thread.
1981 * Give the client one slot even if that would require
1982 * over-allocation--it is better than failure.
1983 */
1984 scale_factor = max_t(unsigned int, 8, nn->nfsd_serv->sv_nrthreads);
1985
1986 avail = clamp_t(unsigned long, avail, slotsize,
1987 total_avail/scale_factor);
1988 num = min_t(int, num, avail / slotsize);
1989 num = max_t(int, num, 1);
1990 nfsd_drc_mem_used += num * slotsize;
1991 spin_unlock(&nfsd_drc_lock);
1992
1993 return num;
1994 }
1995
nfsd4_put_drc_mem(struct nfsd4_channel_attrs * ca)1996 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
1997 {
1998 int slotsize = slot_bytes(ca);
1999
2000 spin_lock(&nfsd_drc_lock);
2001 nfsd_drc_mem_used -= slotsize * ca->maxreqs;
2002 spin_unlock(&nfsd_drc_lock);
2003 }
2004
alloc_session(struct nfsd4_channel_attrs * fattrs,struct nfsd4_channel_attrs * battrs)2005 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
2006 struct nfsd4_channel_attrs *battrs)
2007 {
2008 int numslots = fattrs->maxreqs;
2009 int slotsize = slot_bytes(fattrs);
2010 struct nfsd4_session *new;
2011 int i;
2012
2013 BUILD_BUG_ON(struct_size(new, se_slots, NFSD_MAX_SLOTS_PER_SESSION)
2014 > PAGE_SIZE);
2015
2016 new = kzalloc(struct_size(new, se_slots, numslots), GFP_KERNEL);
2017 if (!new)
2018 return NULL;
2019 /* allocate each struct nfsd4_slot and data cache in one piece */
2020 for (i = 0; i < numslots; i++) {
2021 new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
2022 if (!new->se_slots[i])
2023 goto out_free;
2024 }
2025
2026 memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
2027 memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
2028
2029 return new;
2030 out_free:
2031 while (i--)
2032 kfree(new->se_slots[i]);
2033 kfree(new);
2034 return NULL;
2035 }
2036
free_conn(struct nfsd4_conn * c)2037 static void free_conn(struct nfsd4_conn *c)
2038 {
2039 svc_xprt_put(c->cn_xprt);
2040 kfree(c);
2041 }
2042
nfsd4_conn_lost(struct svc_xpt_user * u)2043 static void nfsd4_conn_lost(struct svc_xpt_user *u)
2044 {
2045 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
2046 struct nfs4_client *clp = c->cn_session->se_client;
2047
2048 trace_nfsd_cb_lost(clp);
2049
2050 spin_lock(&clp->cl_lock);
2051 if (!list_empty(&c->cn_persession)) {
2052 list_del(&c->cn_persession);
2053 free_conn(c);
2054 }
2055 nfsd4_probe_callback(clp);
2056 spin_unlock(&clp->cl_lock);
2057 }
2058
alloc_conn(struct svc_rqst * rqstp,u32 flags)2059 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
2060 {
2061 struct nfsd4_conn *conn;
2062
2063 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
2064 if (!conn)
2065 return NULL;
2066 svc_xprt_get(rqstp->rq_xprt);
2067 conn->cn_xprt = rqstp->rq_xprt;
2068 conn->cn_flags = flags;
2069 INIT_LIST_HEAD(&conn->cn_xpt_user.list);
2070 return conn;
2071 }
2072
__nfsd4_hash_conn(struct nfsd4_conn * conn,struct nfsd4_session * ses)2073 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
2074 {
2075 conn->cn_session = ses;
2076 list_add(&conn->cn_persession, &ses->se_conns);
2077 }
2078
nfsd4_hash_conn(struct nfsd4_conn * conn,struct nfsd4_session * ses)2079 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
2080 {
2081 struct nfs4_client *clp = ses->se_client;
2082
2083 spin_lock(&clp->cl_lock);
2084 __nfsd4_hash_conn(conn, ses);
2085 spin_unlock(&clp->cl_lock);
2086 }
2087
nfsd4_register_conn(struct nfsd4_conn * conn)2088 static int nfsd4_register_conn(struct nfsd4_conn *conn)
2089 {
2090 conn->cn_xpt_user.callback = nfsd4_conn_lost;
2091 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
2092 }
2093
nfsd4_init_conn(struct svc_rqst * rqstp,struct nfsd4_conn * conn,struct nfsd4_session * ses)2094 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
2095 {
2096 int ret;
2097
2098 nfsd4_hash_conn(conn, ses);
2099 ret = nfsd4_register_conn(conn);
2100 if (ret)
2101 /* oops; xprt is already down: */
2102 nfsd4_conn_lost(&conn->cn_xpt_user);
2103 /* We may have gained or lost a callback channel: */
2104 nfsd4_probe_callback_sync(ses->se_client);
2105 }
2106
alloc_conn_from_crses(struct svc_rqst * rqstp,struct nfsd4_create_session * cses)2107 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
2108 {
2109 u32 dir = NFS4_CDFC4_FORE;
2110
2111 if (cses->flags & SESSION4_BACK_CHAN)
2112 dir |= NFS4_CDFC4_BACK;
2113 return alloc_conn(rqstp, dir);
2114 }
2115
2116 /* must be called under client_lock */
nfsd4_del_conns(struct nfsd4_session * s)2117 static void nfsd4_del_conns(struct nfsd4_session *s)
2118 {
2119 struct nfs4_client *clp = s->se_client;
2120 struct nfsd4_conn *c;
2121
2122 spin_lock(&clp->cl_lock);
2123 while (!list_empty(&s->se_conns)) {
2124 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
2125 list_del_init(&c->cn_persession);
2126 spin_unlock(&clp->cl_lock);
2127
2128 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
2129 free_conn(c);
2130
2131 spin_lock(&clp->cl_lock);
2132 }
2133 spin_unlock(&clp->cl_lock);
2134 }
2135
__free_session(struct nfsd4_session * ses)2136 static void __free_session(struct nfsd4_session *ses)
2137 {
2138 free_session_slots(ses);
2139 kfree(ses);
2140 }
2141
free_session(struct nfsd4_session * ses)2142 static void free_session(struct nfsd4_session *ses)
2143 {
2144 nfsd4_del_conns(ses);
2145 nfsd4_put_drc_mem(&ses->se_fchannel);
2146 __free_session(ses);
2147 }
2148
init_session(struct svc_rqst * rqstp,struct nfsd4_session * new,struct nfs4_client * clp,struct nfsd4_create_session * cses)2149 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
2150 {
2151 int idx;
2152 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2153
2154 new->se_client = clp;
2155 gen_sessionid(new);
2156
2157 INIT_LIST_HEAD(&new->se_conns);
2158
2159 new->se_cb_seq_nr = 1;
2160 new->se_flags = cses->flags;
2161 new->se_cb_prog = cses->callback_prog;
2162 new->se_cb_sec = cses->cb_sec;
2163 atomic_set(&new->se_ref, 0);
2164 idx = hash_sessionid(&new->se_sessionid);
2165 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
2166 spin_lock(&clp->cl_lock);
2167 list_add(&new->se_perclnt, &clp->cl_sessions);
2168 spin_unlock(&clp->cl_lock);
2169
2170 {
2171 struct sockaddr *sa = svc_addr(rqstp);
2172 /*
2173 * This is a little silly; with sessions there's no real
2174 * use for the callback address. Use the peer address
2175 * as a reasonable default for now, but consider fixing
2176 * the rpc client not to require an address in the
2177 * future:
2178 */
2179 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
2180 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
2181 }
2182 }
2183
2184 /* caller must hold client_lock */
2185 static struct nfsd4_session *
__find_in_sessionid_hashtbl(struct nfs4_sessionid * sessionid,struct net * net)2186 __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
2187 {
2188 struct nfsd4_session *elem;
2189 int idx;
2190 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2191
2192 lockdep_assert_held(&nn->client_lock);
2193
2194 dump_sessionid(__func__, sessionid);
2195 idx = hash_sessionid(sessionid);
2196 /* Search in the appropriate list */
2197 list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
2198 if (!memcmp(elem->se_sessionid.data, sessionid->data,
2199 NFS4_MAX_SESSIONID_LEN)) {
2200 return elem;
2201 }
2202 }
2203
2204 dprintk("%s: session not found\n", __func__);
2205 return NULL;
2206 }
2207
2208 static struct nfsd4_session *
find_in_sessionid_hashtbl(struct nfs4_sessionid * sessionid,struct net * net,__be32 * ret)2209 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
2210 __be32 *ret)
2211 {
2212 struct nfsd4_session *session;
2213 __be32 status = nfserr_badsession;
2214
2215 session = __find_in_sessionid_hashtbl(sessionid, net);
2216 if (!session)
2217 goto out;
2218 status = nfsd4_get_session_locked(session);
2219 if (status)
2220 session = NULL;
2221 out:
2222 *ret = status;
2223 return session;
2224 }
2225
2226 /* caller must hold client_lock */
2227 static void
unhash_session(struct nfsd4_session * ses)2228 unhash_session(struct nfsd4_session *ses)
2229 {
2230 struct nfs4_client *clp = ses->se_client;
2231 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2232
2233 lockdep_assert_held(&nn->client_lock);
2234
2235 list_del(&ses->se_hash);
2236 spin_lock(&ses->se_client->cl_lock);
2237 list_del(&ses->se_perclnt);
2238 spin_unlock(&ses->se_client->cl_lock);
2239 }
2240
2241 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
2242 static int
STALE_CLIENTID(clientid_t * clid,struct nfsd_net * nn)2243 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
2244 {
2245 /*
2246 * We're assuming the clid was not given out from a boot
2247 * precisely 2^32 (about 136 years) before this one. That seems
2248 * a safe assumption:
2249 */
2250 if (clid->cl_boot == (u32)nn->boot_time)
2251 return 0;
2252 trace_nfsd_clid_stale(clid);
2253 return 1;
2254 }
2255
2256 /*
2257 * XXX Should we use a slab cache ?
2258 * This type of memory management is somewhat inefficient, but we use it
2259 * anyway since SETCLIENTID is not a common operation.
2260 */
alloc_client(struct xdr_netobj name,struct nfsd_net * nn)2261 static struct nfs4_client *alloc_client(struct xdr_netobj name,
2262 struct nfsd_net *nn)
2263 {
2264 struct nfs4_client *clp;
2265 int i;
2266
2267 if (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients) {
2268 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
2269 return NULL;
2270 }
2271 clp = kmem_cache_zalloc(client_slab, GFP_KERNEL);
2272 if (clp == NULL)
2273 return NULL;
2274 xdr_netobj_dup(&clp->cl_name, &name, GFP_KERNEL);
2275 if (clp->cl_name.data == NULL)
2276 goto err_no_name;
2277 clp->cl_ownerstr_hashtbl = kmalloc_array(OWNER_HASH_SIZE,
2278 sizeof(struct list_head),
2279 GFP_KERNEL);
2280 if (!clp->cl_ownerstr_hashtbl)
2281 goto err_no_hashtbl;
2282 clp->cl_callback_wq = alloc_ordered_workqueue("nfsd4_callbacks", 0);
2283 if (!clp->cl_callback_wq)
2284 goto err_no_callback_wq;
2285
2286 for (i = 0; i < OWNER_HASH_SIZE; i++)
2287 INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
2288 INIT_LIST_HEAD(&clp->cl_sessions);
2289 idr_init(&clp->cl_stateids);
2290 atomic_set(&clp->cl_rpc_users, 0);
2291 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
2292 clp->cl_state = NFSD4_ACTIVE;
2293 atomic_inc(&nn->nfs4_client_count);
2294 atomic_set(&clp->cl_delegs_in_recall, 0);
2295 INIT_LIST_HEAD(&clp->cl_idhash);
2296 INIT_LIST_HEAD(&clp->cl_openowners);
2297 INIT_LIST_HEAD(&clp->cl_delegations);
2298 INIT_LIST_HEAD(&clp->cl_lru);
2299 INIT_LIST_HEAD(&clp->cl_revoked);
2300 #ifdef CONFIG_NFSD_PNFS
2301 INIT_LIST_HEAD(&clp->cl_lo_states);
2302 #endif
2303 INIT_LIST_HEAD(&clp->async_copies);
2304 spin_lock_init(&clp->async_lock);
2305 spin_lock_init(&clp->cl_lock);
2306 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
2307 return clp;
2308 err_no_callback_wq:
2309 kfree(clp->cl_ownerstr_hashtbl);
2310 err_no_hashtbl:
2311 kfree(clp->cl_name.data);
2312 err_no_name:
2313 kmem_cache_free(client_slab, clp);
2314 return NULL;
2315 }
2316
__free_client(struct kref * k)2317 static void __free_client(struct kref *k)
2318 {
2319 struct nfsdfs_client *c = container_of(k, struct nfsdfs_client, cl_ref);
2320 struct nfs4_client *clp = container_of(c, struct nfs4_client, cl_nfsdfs);
2321
2322 free_svc_cred(&clp->cl_cred);
2323 destroy_workqueue(clp->cl_callback_wq);
2324 kfree(clp->cl_ownerstr_hashtbl);
2325 kfree(clp->cl_name.data);
2326 kfree(clp->cl_nii_domain.data);
2327 kfree(clp->cl_nii_name.data);
2328 idr_destroy(&clp->cl_stateids);
2329 kfree(clp->cl_ra);
2330 kmem_cache_free(client_slab, clp);
2331 }
2332
drop_client(struct nfs4_client * clp)2333 static void drop_client(struct nfs4_client *clp)
2334 {
2335 kref_put(&clp->cl_nfsdfs.cl_ref, __free_client);
2336 }
2337
2338 static void
free_client(struct nfs4_client * clp)2339 free_client(struct nfs4_client *clp)
2340 {
2341 while (!list_empty(&clp->cl_sessions)) {
2342 struct nfsd4_session *ses;
2343 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
2344 se_perclnt);
2345 list_del(&ses->se_perclnt);
2346 WARN_ON_ONCE(atomic_read(&ses->se_ref));
2347 free_session(ses);
2348 }
2349 rpc_destroy_wait_queue(&clp->cl_cb_waitq);
2350 if (clp->cl_nfsd_dentry) {
2351 nfsd_client_rmdir(clp->cl_nfsd_dentry);
2352 clp->cl_nfsd_dentry = NULL;
2353 wake_up_all(&expiry_wq);
2354 }
2355 drop_client(clp);
2356 }
2357
2358 /* must be called under the client_lock */
2359 static void
unhash_client_locked(struct nfs4_client * clp)2360 unhash_client_locked(struct nfs4_client *clp)
2361 {
2362 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2363 struct nfsd4_session *ses;
2364
2365 lockdep_assert_held(&nn->client_lock);
2366
2367 /* Mark the client as expired! */
2368 clp->cl_time = 0;
2369 /* Make it invisible */
2370 if (!list_empty(&clp->cl_idhash)) {
2371 list_del_init(&clp->cl_idhash);
2372 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
2373 rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
2374 else
2375 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
2376 }
2377 list_del_init(&clp->cl_lru);
2378 spin_lock(&clp->cl_lock);
2379 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
2380 list_del_init(&ses->se_hash);
2381 spin_unlock(&clp->cl_lock);
2382 }
2383
2384 static void
unhash_client(struct nfs4_client * clp)2385 unhash_client(struct nfs4_client *clp)
2386 {
2387 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2388
2389 spin_lock(&nn->client_lock);
2390 unhash_client_locked(clp);
2391 spin_unlock(&nn->client_lock);
2392 }
2393
mark_client_expired_locked(struct nfs4_client * clp)2394 static __be32 mark_client_expired_locked(struct nfs4_client *clp)
2395 {
2396 int users = atomic_read(&clp->cl_rpc_users);
2397
2398 trace_nfsd_mark_client_expired(clp, users);
2399
2400 if (users)
2401 return nfserr_jukebox;
2402 unhash_client_locked(clp);
2403 return nfs_ok;
2404 }
2405
2406 static void
__destroy_client(struct nfs4_client * clp)2407 __destroy_client(struct nfs4_client *clp)
2408 {
2409 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2410 int i;
2411 struct nfs4_openowner *oo;
2412 struct nfs4_delegation *dp;
2413 LIST_HEAD(reaplist);
2414
2415 spin_lock(&state_lock);
2416 while (!list_empty(&clp->cl_delegations)) {
2417 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
2418 unhash_delegation_locked(dp, SC_STATUS_CLOSED);
2419 list_add(&dp->dl_recall_lru, &reaplist);
2420 }
2421 spin_unlock(&state_lock);
2422 while (!list_empty(&reaplist)) {
2423 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
2424 list_del_init(&dp->dl_recall_lru);
2425 destroy_unhashed_deleg(dp);
2426 }
2427 while (!list_empty(&clp->cl_revoked)) {
2428 dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
2429 list_del_init(&dp->dl_recall_lru);
2430 nfs4_put_stid(&dp->dl_stid);
2431 }
2432 while (!list_empty(&clp->cl_openowners)) {
2433 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
2434 nfs4_get_stateowner(&oo->oo_owner);
2435 release_openowner(oo);
2436 }
2437 for (i = 0; i < OWNER_HASH_SIZE; i++) {
2438 struct nfs4_stateowner *so, *tmp;
2439
2440 list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i],
2441 so_strhash) {
2442 /* Should be no openowners at this point */
2443 WARN_ON_ONCE(so->so_is_open_owner);
2444 remove_blocked_locks(lockowner(so));
2445 }
2446 }
2447 nfsd4_return_all_client_layouts(clp);
2448 nfsd4_shutdown_copy(clp);
2449 nfsd4_shutdown_callback(clp);
2450 if (clp->cl_cb_conn.cb_xprt)
2451 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
2452 atomic_add_unless(&nn->nfs4_client_count, -1, 0);
2453 nfsd4_dec_courtesy_client_count(nn, clp);
2454 free_client(clp);
2455 wake_up_all(&expiry_wq);
2456 }
2457
2458 static void
destroy_client(struct nfs4_client * clp)2459 destroy_client(struct nfs4_client *clp)
2460 {
2461 unhash_client(clp);
2462 __destroy_client(clp);
2463 }
2464
inc_reclaim_complete(struct nfs4_client * clp)2465 static void inc_reclaim_complete(struct nfs4_client *clp)
2466 {
2467 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2468
2469 if (!nn->track_reclaim_completes)
2470 return;
2471 if (!nfsd4_find_reclaim_client(clp->cl_name, nn))
2472 return;
2473 if (atomic_inc_return(&nn->nr_reclaim_complete) ==
2474 nn->reclaim_str_hashtbl_size) {
2475 printk(KERN_INFO "NFSD: all clients done reclaiming, ending NFSv4 grace period (net %x)\n",
2476 clp->net->ns.inum);
2477 nfsd4_end_grace(nn);
2478 }
2479 }
2480
expire_client(struct nfs4_client * clp)2481 static void expire_client(struct nfs4_client *clp)
2482 {
2483 unhash_client(clp);
2484 nfsd4_client_record_remove(clp);
2485 __destroy_client(clp);
2486 }
2487
copy_verf(struct nfs4_client * target,nfs4_verifier * source)2488 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
2489 {
2490 memcpy(target->cl_verifier.data, source->data,
2491 sizeof(target->cl_verifier.data));
2492 }
2493
copy_clid(struct nfs4_client * target,struct nfs4_client * source)2494 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
2495 {
2496 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
2497 target->cl_clientid.cl_id = source->cl_clientid.cl_id;
2498 }
2499
copy_cred(struct svc_cred * target,struct svc_cred * source)2500 static int copy_cred(struct svc_cred *target, struct svc_cred *source)
2501 {
2502 target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL);
2503 target->cr_raw_principal = kstrdup(source->cr_raw_principal,
2504 GFP_KERNEL);
2505 target->cr_targ_princ = kstrdup(source->cr_targ_princ, GFP_KERNEL);
2506 if ((source->cr_principal && !target->cr_principal) ||
2507 (source->cr_raw_principal && !target->cr_raw_principal) ||
2508 (source->cr_targ_princ && !target->cr_targ_princ))
2509 return -ENOMEM;
2510
2511 target->cr_flavor = source->cr_flavor;
2512 target->cr_uid = source->cr_uid;
2513 target->cr_gid = source->cr_gid;
2514 target->cr_group_info = source->cr_group_info;
2515 get_group_info(target->cr_group_info);
2516 target->cr_gss_mech = source->cr_gss_mech;
2517 if (source->cr_gss_mech)
2518 gss_mech_get(source->cr_gss_mech);
2519 return 0;
2520 }
2521
2522 static int
compare_blob(const struct xdr_netobj * o1,const struct xdr_netobj * o2)2523 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
2524 {
2525 if (o1->len < o2->len)
2526 return -1;
2527 if (o1->len > o2->len)
2528 return 1;
2529 return memcmp(o1->data, o2->data, o1->len);
2530 }
2531
2532 static int
same_verf(nfs4_verifier * v1,nfs4_verifier * v2)2533 same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
2534 {
2535 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
2536 }
2537
2538 static int
same_clid(clientid_t * cl1,clientid_t * cl2)2539 same_clid(clientid_t *cl1, clientid_t *cl2)
2540 {
2541 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
2542 }
2543
groups_equal(struct group_info * g1,struct group_info * g2)2544 static bool groups_equal(struct group_info *g1, struct group_info *g2)
2545 {
2546 int i;
2547
2548 if (g1->ngroups != g2->ngroups)
2549 return false;
2550 for (i=0; i<g1->ngroups; i++)
2551 if (!gid_eq(g1->gid[i], g2->gid[i]))
2552 return false;
2553 return true;
2554 }
2555
2556 /*
2557 * RFC 3530 language requires clid_inuse be returned when the
2558 * "principal" associated with a requests differs from that previously
2559 * used. We use uid, gid's, and gss principal string as our best
2560 * approximation. We also don't want to allow non-gss use of a client
2561 * established using gss: in theory cr_principal should catch that
2562 * change, but in practice cr_principal can be null even in the gss case
2563 * since gssd doesn't always pass down a principal string.
2564 */
is_gss_cred(struct svc_cred * cr)2565 static bool is_gss_cred(struct svc_cred *cr)
2566 {
2567 /* Is cr_flavor one of the gss "pseudoflavors"?: */
2568 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
2569 }
2570
2571
2572 static bool
same_creds(struct svc_cred * cr1,struct svc_cred * cr2)2573 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
2574 {
2575 if ((is_gss_cred(cr1) != is_gss_cred(cr2))
2576 || (!uid_eq(cr1->cr_uid, cr2->cr_uid))
2577 || (!gid_eq(cr1->cr_gid, cr2->cr_gid))
2578 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
2579 return false;
2580 /* XXX: check that cr_targ_princ fields match ? */
2581 if (cr1->cr_principal == cr2->cr_principal)
2582 return true;
2583 if (!cr1->cr_principal || !cr2->cr_principal)
2584 return false;
2585 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
2586 }
2587
svc_rqst_integrity_protected(struct svc_rqst * rqstp)2588 static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
2589 {
2590 struct svc_cred *cr = &rqstp->rq_cred;
2591 u32 service;
2592
2593 if (!cr->cr_gss_mech)
2594 return false;
2595 service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
2596 return service == RPC_GSS_SVC_INTEGRITY ||
2597 service == RPC_GSS_SVC_PRIVACY;
2598 }
2599
nfsd4_mach_creds_match(struct nfs4_client * cl,struct svc_rqst * rqstp)2600 bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
2601 {
2602 struct svc_cred *cr = &rqstp->rq_cred;
2603
2604 if (!cl->cl_mach_cred)
2605 return true;
2606 if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
2607 return false;
2608 if (!svc_rqst_integrity_protected(rqstp))
2609 return false;
2610 if (cl->cl_cred.cr_raw_principal)
2611 return 0 == strcmp(cl->cl_cred.cr_raw_principal,
2612 cr->cr_raw_principal);
2613 if (!cr->cr_principal)
2614 return false;
2615 return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
2616 }
2617
gen_confirm(struct nfs4_client * clp,struct nfsd_net * nn)2618 static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
2619 {
2620 __be32 verf[2];
2621
2622 /*
2623 * This is opaque to client, so no need to byte-swap. Use
2624 * __force to keep sparse happy
2625 */
2626 verf[0] = (__force __be32)(u32)ktime_get_real_seconds();
2627 verf[1] = (__force __be32)nn->clverifier_counter++;
2628 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
2629 }
2630
gen_clid(struct nfs4_client * clp,struct nfsd_net * nn)2631 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
2632 {
2633 clp->cl_clientid.cl_boot = (u32)nn->boot_time;
2634 clp->cl_clientid.cl_id = nn->clientid_counter++;
2635 gen_confirm(clp, nn);
2636 }
2637
2638 static struct nfs4_stid *
find_stateid_locked(struct nfs4_client * cl,stateid_t * t)2639 find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
2640 {
2641 struct nfs4_stid *ret;
2642
2643 ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
2644 if (!ret || !ret->sc_type)
2645 return NULL;
2646 return ret;
2647 }
2648
2649 static struct nfs4_stid *
find_stateid_by_type(struct nfs4_client * cl,stateid_t * t,unsigned short typemask,unsigned short ok_states)2650 find_stateid_by_type(struct nfs4_client *cl, stateid_t *t,
2651 unsigned short typemask, unsigned short ok_states)
2652 {
2653 struct nfs4_stid *s;
2654
2655 spin_lock(&cl->cl_lock);
2656 s = find_stateid_locked(cl, t);
2657 if (s != NULL) {
2658 if ((s->sc_status & ~ok_states) == 0 &&
2659 (typemask & s->sc_type))
2660 refcount_inc(&s->sc_count);
2661 else
2662 s = NULL;
2663 }
2664 spin_unlock(&cl->cl_lock);
2665 return s;
2666 }
2667
get_nfsdfs_clp(struct inode * inode)2668 static struct nfs4_client *get_nfsdfs_clp(struct inode *inode)
2669 {
2670 struct nfsdfs_client *nc;
2671 nc = get_nfsdfs_client(inode);
2672 if (!nc)
2673 return NULL;
2674 return container_of(nc, struct nfs4_client, cl_nfsdfs);
2675 }
2676
seq_quote_mem(struct seq_file * m,char * data,int len)2677 static void seq_quote_mem(struct seq_file *m, char *data, int len)
2678 {
2679 seq_puts(m, "\"");
2680 seq_escape_mem(m, data, len, ESCAPE_HEX | ESCAPE_NAP | ESCAPE_APPEND, "\"\\");
2681 seq_puts(m, "\"");
2682 }
2683
cb_state2str(int state)2684 static const char *cb_state2str(int state)
2685 {
2686 switch (state) {
2687 case NFSD4_CB_UP:
2688 return "UP";
2689 case NFSD4_CB_UNKNOWN:
2690 return "UNKNOWN";
2691 case NFSD4_CB_DOWN:
2692 return "DOWN";
2693 case NFSD4_CB_FAULT:
2694 return "FAULT";
2695 }
2696 return "UNDEFINED";
2697 }
2698
client_info_show(struct seq_file * m,void * v)2699 static int client_info_show(struct seq_file *m, void *v)
2700 {
2701 struct inode *inode = file_inode(m->file);
2702 struct nfs4_client *clp;
2703 u64 clid;
2704
2705 clp = get_nfsdfs_clp(inode);
2706 if (!clp)
2707 return -ENXIO;
2708 memcpy(&clid, &clp->cl_clientid, sizeof(clid));
2709 seq_printf(m, "clientid: 0x%llx\n", clid);
2710 seq_printf(m, "address: \"%pISpc\"\n", (struct sockaddr *)&clp->cl_addr);
2711
2712 if (clp->cl_state == NFSD4_COURTESY)
2713 seq_puts(m, "status: courtesy\n");
2714 else if (clp->cl_state == NFSD4_EXPIRABLE)
2715 seq_puts(m, "status: expirable\n");
2716 else if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
2717 seq_puts(m, "status: confirmed\n");
2718 else
2719 seq_puts(m, "status: unconfirmed\n");
2720 seq_printf(m, "seconds from last renew: %lld\n",
2721 ktime_get_boottime_seconds() - clp->cl_time);
2722 seq_puts(m, "name: ");
2723 seq_quote_mem(m, clp->cl_name.data, clp->cl_name.len);
2724 seq_printf(m, "\nminor version: %d\n", clp->cl_minorversion);
2725 if (clp->cl_nii_domain.data) {
2726 seq_puts(m, "Implementation domain: ");
2727 seq_quote_mem(m, clp->cl_nii_domain.data,
2728 clp->cl_nii_domain.len);
2729 seq_puts(m, "\nImplementation name: ");
2730 seq_quote_mem(m, clp->cl_nii_name.data, clp->cl_nii_name.len);
2731 seq_printf(m, "\nImplementation time: [%lld, %ld]\n",
2732 clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec);
2733 }
2734 seq_printf(m, "callback state: %s\n", cb_state2str(clp->cl_cb_state));
2735 seq_printf(m, "callback address: \"%pISpc\"\n", &clp->cl_cb_conn.cb_addr);
2736 seq_printf(m, "admin-revoked states: %d\n",
2737 atomic_read(&clp->cl_admin_revoked));
2738 drop_client(clp);
2739
2740 return 0;
2741 }
2742
2743 DEFINE_SHOW_ATTRIBUTE(client_info);
2744
states_start(struct seq_file * s,loff_t * pos)2745 static void *states_start(struct seq_file *s, loff_t *pos)
2746 __acquires(&clp->cl_lock)
2747 {
2748 struct nfs4_client *clp = s->private;
2749 unsigned long id = *pos;
2750 void *ret;
2751
2752 spin_lock(&clp->cl_lock);
2753 ret = idr_get_next_ul(&clp->cl_stateids, &id);
2754 *pos = id;
2755 return ret;
2756 }
2757
states_next(struct seq_file * s,void * v,loff_t * pos)2758 static void *states_next(struct seq_file *s, void *v, loff_t *pos)
2759 {
2760 struct nfs4_client *clp = s->private;
2761 unsigned long id = *pos;
2762 void *ret;
2763
2764 id = *pos;
2765 id++;
2766 ret = idr_get_next_ul(&clp->cl_stateids, &id);
2767 *pos = id;
2768 return ret;
2769 }
2770
states_stop(struct seq_file * s,void * v)2771 static void states_stop(struct seq_file *s, void *v)
2772 __releases(&clp->cl_lock)
2773 {
2774 struct nfs4_client *clp = s->private;
2775
2776 spin_unlock(&clp->cl_lock);
2777 }
2778
nfs4_show_fname(struct seq_file * s,struct nfsd_file * f)2779 static void nfs4_show_fname(struct seq_file *s, struct nfsd_file *f)
2780 {
2781 seq_printf(s, "filename: \"%pD2\"", f->nf_file);
2782 }
2783
nfs4_show_superblock(struct seq_file * s,struct nfsd_file * f)2784 static void nfs4_show_superblock(struct seq_file *s, struct nfsd_file *f)
2785 {
2786 struct inode *inode = file_inode(f->nf_file);
2787
2788 seq_printf(s, "superblock: \"%02x:%02x:%ld\"",
2789 MAJOR(inode->i_sb->s_dev),
2790 MINOR(inode->i_sb->s_dev),
2791 inode->i_ino);
2792 }
2793
nfs4_show_owner(struct seq_file * s,struct nfs4_stateowner * oo)2794 static void nfs4_show_owner(struct seq_file *s, struct nfs4_stateowner *oo)
2795 {
2796 seq_puts(s, "owner: ");
2797 seq_quote_mem(s, oo->so_owner.data, oo->so_owner.len);
2798 }
2799
nfs4_show_stateid(struct seq_file * s,stateid_t * stid)2800 static void nfs4_show_stateid(struct seq_file *s, stateid_t *stid)
2801 {
2802 seq_printf(s, "0x%.8x", stid->si_generation);
2803 seq_printf(s, "%12phN", &stid->si_opaque);
2804 }
2805
nfs4_show_open(struct seq_file * s,struct nfs4_stid * st)2806 static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
2807 {
2808 struct nfs4_ol_stateid *ols;
2809 struct nfs4_file *nf;
2810 struct nfsd_file *file;
2811 struct nfs4_stateowner *oo;
2812 unsigned int access, deny;
2813
2814 ols = openlockstateid(st);
2815 oo = ols->st_stateowner;
2816 nf = st->sc_file;
2817
2818 seq_puts(s, "- ");
2819 nfs4_show_stateid(s, &st->sc_stateid);
2820 seq_puts(s, ": { type: open, ");
2821
2822 access = bmap_to_share_mode(ols->st_access_bmap);
2823 deny = bmap_to_share_mode(ols->st_deny_bmap);
2824
2825 seq_printf(s, "access: %s%s, ",
2826 access & NFS4_SHARE_ACCESS_READ ? "r" : "-",
2827 access & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
2828 seq_printf(s, "deny: %s%s, ",
2829 deny & NFS4_SHARE_ACCESS_READ ? "r" : "-",
2830 deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
2831
2832 if (nf) {
2833 spin_lock(&nf->fi_lock);
2834 file = find_any_file_locked(nf);
2835 if (file) {
2836 nfs4_show_superblock(s, file);
2837 seq_puts(s, ", ");
2838 nfs4_show_fname(s, file);
2839 seq_puts(s, ", ");
2840 }
2841 spin_unlock(&nf->fi_lock);
2842 } else
2843 seq_puts(s, "closed, ");
2844 nfs4_show_owner(s, oo);
2845 if (st->sc_status & SC_STATUS_ADMIN_REVOKED)
2846 seq_puts(s, ", admin-revoked");
2847 seq_puts(s, " }\n");
2848 return 0;
2849 }
2850
nfs4_show_lock(struct seq_file * s,struct nfs4_stid * st)2851 static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
2852 {
2853 struct nfs4_ol_stateid *ols;
2854 struct nfs4_file *nf;
2855 struct nfsd_file *file;
2856 struct nfs4_stateowner *oo;
2857
2858 ols = openlockstateid(st);
2859 oo = ols->st_stateowner;
2860 nf = st->sc_file;
2861
2862 seq_puts(s, "- ");
2863 nfs4_show_stateid(s, &st->sc_stateid);
2864 seq_puts(s, ": { type: lock, ");
2865
2866 spin_lock(&nf->fi_lock);
2867 file = find_any_file_locked(nf);
2868 if (file) {
2869 /*
2870 * Note: a lock stateid isn't really the same thing as a lock,
2871 * it's the locking state held by one owner on a file, and there
2872 * may be multiple (or no) lock ranges associated with it.
2873 * (Same for the matter is true of open stateids.)
2874 */
2875
2876 nfs4_show_superblock(s, file);
2877 /* XXX: open stateid? */
2878 seq_puts(s, ", ");
2879 nfs4_show_fname(s, file);
2880 seq_puts(s, ", ");
2881 }
2882 nfs4_show_owner(s, oo);
2883 if (st->sc_status & SC_STATUS_ADMIN_REVOKED)
2884 seq_puts(s, ", admin-revoked");
2885 seq_puts(s, " }\n");
2886 spin_unlock(&nf->fi_lock);
2887 return 0;
2888 }
2889
nfs4_show_deleg(struct seq_file * s,struct nfs4_stid * st)2890 static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
2891 {
2892 struct nfs4_delegation *ds;
2893 struct nfs4_file *nf;
2894 struct nfsd_file *file;
2895
2896 ds = delegstateid(st);
2897 nf = st->sc_file;
2898
2899 seq_puts(s, "- ");
2900 nfs4_show_stateid(s, &st->sc_stateid);
2901 seq_puts(s, ": { type: deleg, ");
2902
2903 seq_printf(s, "access: %s",
2904 ds->dl_type == NFS4_OPEN_DELEGATE_READ ? "r" : "w");
2905
2906 /* XXX: lease time, whether it's being recalled. */
2907
2908 spin_lock(&nf->fi_lock);
2909 file = nf->fi_deleg_file;
2910 if (file) {
2911 seq_puts(s, ", ");
2912 nfs4_show_superblock(s, file);
2913 seq_puts(s, ", ");
2914 nfs4_show_fname(s, file);
2915 }
2916 spin_unlock(&nf->fi_lock);
2917 if (st->sc_status & SC_STATUS_ADMIN_REVOKED)
2918 seq_puts(s, ", admin-revoked");
2919 seq_puts(s, " }\n");
2920 return 0;
2921 }
2922
nfs4_show_layout(struct seq_file * s,struct nfs4_stid * st)2923 static int nfs4_show_layout(struct seq_file *s, struct nfs4_stid *st)
2924 {
2925 struct nfs4_layout_stateid *ls;
2926 struct nfsd_file *file;
2927
2928 ls = container_of(st, struct nfs4_layout_stateid, ls_stid);
2929
2930 seq_puts(s, "- ");
2931 nfs4_show_stateid(s, &st->sc_stateid);
2932 seq_puts(s, ": { type: layout");
2933
2934 /* XXX: What else would be useful? */
2935
2936 spin_lock(&ls->ls_stid.sc_file->fi_lock);
2937 file = ls->ls_file;
2938 if (file) {
2939 seq_puts(s, ", ");
2940 nfs4_show_superblock(s, file);
2941 seq_puts(s, ", ");
2942 nfs4_show_fname(s, file);
2943 }
2944 spin_unlock(&ls->ls_stid.sc_file->fi_lock);
2945 if (st->sc_status & SC_STATUS_ADMIN_REVOKED)
2946 seq_puts(s, ", admin-revoked");
2947 seq_puts(s, " }\n");
2948
2949 return 0;
2950 }
2951
states_show(struct seq_file * s,void * v)2952 static int states_show(struct seq_file *s, void *v)
2953 {
2954 struct nfs4_stid *st = v;
2955
2956 switch (st->sc_type) {
2957 case SC_TYPE_OPEN:
2958 return nfs4_show_open(s, st);
2959 case SC_TYPE_LOCK:
2960 return nfs4_show_lock(s, st);
2961 case SC_TYPE_DELEG:
2962 return nfs4_show_deleg(s, st);
2963 case SC_TYPE_LAYOUT:
2964 return nfs4_show_layout(s, st);
2965 default:
2966 return 0; /* XXX: or SEQ_SKIP? */
2967 }
2968 /* XXX: copy stateids? */
2969 }
2970
2971 static struct seq_operations states_seq_ops = {
2972 .start = states_start,
2973 .next = states_next,
2974 .stop = states_stop,
2975 .show = states_show
2976 };
2977
client_states_open(struct inode * inode,struct file * file)2978 static int client_states_open(struct inode *inode, struct file *file)
2979 {
2980 struct seq_file *s;
2981 struct nfs4_client *clp;
2982 int ret;
2983
2984 clp = get_nfsdfs_clp(inode);
2985 if (!clp)
2986 return -ENXIO;
2987
2988 ret = seq_open(file, &states_seq_ops);
2989 if (ret)
2990 return ret;
2991 s = file->private_data;
2992 s->private = clp;
2993 return 0;
2994 }
2995
client_opens_release(struct inode * inode,struct file * file)2996 static int client_opens_release(struct inode *inode, struct file *file)
2997 {
2998 struct seq_file *m = file->private_data;
2999 struct nfs4_client *clp = m->private;
3000
3001 /* XXX: alternatively, we could get/drop in seq start/stop */
3002 drop_client(clp);
3003 return seq_release(inode, file);
3004 }
3005
3006 static const struct file_operations client_states_fops = {
3007 .open = client_states_open,
3008 .read = seq_read,
3009 .llseek = seq_lseek,
3010 .release = client_opens_release,
3011 };
3012
3013 /*
3014 * Normally we refuse to destroy clients that are in use, but here the
3015 * administrator is telling us to just do it. We also want to wait
3016 * so the caller has a guarantee that the client's locks are gone by
3017 * the time the write returns:
3018 */
force_expire_client(struct nfs4_client * clp)3019 static void force_expire_client(struct nfs4_client *clp)
3020 {
3021 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
3022 bool already_expired;
3023
3024 trace_nfsd_clid_admin_expired(&clp->cl_clientid);
3025
3026 spin_lock(&nn->client_lock);
3027 clp->cl_time = 0;
3028 spin_unlock(&nn->client_lock);
3029
3030 wait_event(expiry_wq, atomic_read(&clp->cl_rpc_users) == 0);
3031 spin_lock(&nn->client_lock);
3032 already_expired = list_empty(&clp->cl_lru);
3033 if (!already_expired)
3034 unhash_client_locked(clp);
3035 spin_unlock(&nn->client_lock);
3036
3037 if (!already_expired)
3038 expire_client(clp);
3039 else
3040 wait_event(expiry_wq, clp->cl_nfsd_dentry == NULL);
3041 }
3042
client_ctl_write(struct file * file,const char __user * buf,size_t size,loff_t * pos)3043 static ssize_t client_ctl_write(struct file *file, const char __user *buf,
3044 size_t size, loff_t *pos)
3045 {
3046 char *data;
3047 struct nfs4_client *clp;
3048
3049 data = simple_transaction_get(file, buf, size);
3050 if (IS_ERR(data))
3051 return PTR_ERR(data);
3052 if (size != 7 || 0 != memcmp(data, "expire\n", 7))
3053 return -EINVAL;
3054 clp = get_nfsdfs_clp(file_inode(file));
3055 if (!clp)
3056 return -ENXIO;
3057 force_expire_client(clp);
3058 drop_client(clp);
3059 return 7;
3060 }
3061
3062 static const struct file_operations client_ctl_fops = {
3063 .write = client_ctl_write,
3064 .release = simple_transaction_release,
3065 };
3066
3067 static const struct tree_descr client_files[] = {
3068 [0] = {"info", &client_info_fops, S_IRUSR},
3069 [1] = {"states", &client_states_fops, S_IRUSR},
3070 [2] = {"ctl", &client_ctl_fops, S_IWUSR},
3071 [3] = {""},
3072 };
3073
3074 static int
nfsd4_cb_recall_any_done(struct nfsd4_callback * cb,struct rpc_task * task)3075 nfsd4_cb_recall_any_done(struct nfsd4_callback *cb,
3076 struct rpc_task *task)
3077 {
3078 trace_nfsd_cb_recall_any_done(cb, task);
3079 switch (task->tk_status) {
3080 case -NFS4ERR_DELAY:
3081 rpc_delay(task, 2 * HZ);
3082 return 0;
3083 default:
3084 return 1;
3085 }
3086 }
3087
3088 static void
nfsd4_cb_recall_any_release(struct nfsd4_callback * cb)3089 nfsd4_cb_recall_any_release(struct nfsd4_callback *cb)
3090 {
3091 struct nfs4_client *clp = cb->cb_clp;
3092
3093 clear_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags);
3094 drop_client(clp);
3095 }
3096
3097 static int
nfsd4_cb_getattr_done(struct nfsd4_callback * cb,struct rpc_task * task)3098 nfsd4_cb_getattr_done(struct nfsd4_callback *cb, struct rpc_task *task)
3099 {
3100 struct nfs4_cb_fattr *ncf =
3101 container_of(cb, struct nfs4_cb_fattr, ncf_getattr);
3102 struct nfs4_delegation *dp =
3103 container_of(ncf, struct nfs4_delegation, dl_cb_fattr);
3104
3105 trace_nfsd_cb_getattr_done(&dp->dl_stid.sc_stateid, task);
3106 ncf->ncf_cb_status = task->tk_status;
3107 switch (task->tk_status) {
3108 case -NFS4ERR_DELAY:
3109 rpc_delay(task, 2 * HZ);
3110 return 0;
3111 default:
3112 return 1;
3113 }
3114 }
3115
3116 static void
nfsd4_cb_getattr_release(struct nfsd4_callback * cb)3117 nfsd4_cb_getattr_release(struct nfsd4_callback *cb)
3118 {
3119 struct nfs4_cb_fattr *ncf =
3120 container_of(cb, struct nfs4_cb_fattr, ncf_getattr);
3121 struct nfs4_delegation *dp =
3122 container_of(ncf, struct nfs4_delegation, dl_cb_fattr);
3123
3124 clear_and_wake_up_bit(CB_GETATTR_BUSY, &ncf->ncf_cb_flags);
3125 nfs4_put_stid(&dp->dl_stid);
3126 }
3127
3128 static const struct nfsd4_callback_ops nfsd4_cb_recall_any_ops = {
3129 .done = nfsd4_cb_recall_any_done,
3130 .release = nfsd4_cb_recall_any_release,
3131 .opcode = OP_CB_RECALL_ANY,
3132 };
3133
3134 static const struct nfsd4_callback_ops nfsd4_cb_getattr_ops = {
3135 .done = nfsd4_cb_getattr_done,
3136 .release = nfsd4_cb_getattr_release,
3137 .opcode = OP_CB_GETATTR,
3138 };
3139
nfs4_cb_getattr(struct nfs4_cb_fattr * ncf)3140 static void nfs4_cb_getattr(struct nfs4_cb_fattr *ncf)
3141 {
3142 struct nfs4_delegation *dp =
3143 container_of(ncf, struct nfs4_delegation, dl_cb_fattr);
3144
3145 if (test_and_set_bit(CB_GETATTR_BUSY, &ncf->ncf_cb_flags))
3146 return;
3147 /* set to proper status when nfsd4_cb_getattr_done runs */
3148 ncf->ncf_cb_status = NFS4ERR_IO;
3149
3150 refcount_inc(&dp->dl_stid.sc_count);
3151 nfsd4_run_cb(&ncf->ncf_getattr);
3152 }
3153
create_client(struct xdr_netobj name,struct svc_rqst * rqstp,nfs4_verifier * verf)3154 static struct nfs4_client *create_client(struct xdr_netobj name,
3155 struct svc_rqst *rqstp, nfs4_verifier *verf)
3156 {
3157 struct nfs4_client *clp;
3158 struct sockaddr *sa = svc_addr(rqstp);
3159 int ret;
3160 struct net *net = SVC_NET(rqstp);
3161 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3162 struct dentry *dentries[ARRAY_SIZE(client_files)];
3163
3164 clp = alloc_client(name, nn);
3165 if (clp == NULL)
3166 return NULL;
3167
3168 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
3169 if (ret) {
3170 free_client(clp);
3171 return NULL;
3172 }
3173 gen_clid(clp, nn);
3174 kref_init(&clp->cl_nfsdfs.cl_ref);
3175 nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
3176 clp->cl_time = ktime_get_boottime_seconds();
3177 clear_bit(0, &clp->cl_cb_slot_busy);
3178 copy_verf(clp, verf);
3179 memcpy(&clp->cl_addr, sa, sizeof(struct sockaddr_storage));
3180 clp->cl_cb_session = NULL;
3181 clp->net = net;
3182 clp->cl_nfsd_dentry = nfsd_client_mkdir(
3183 nn, &clp->cl_nfsdfs,
3184 clp->cl_clientid.cl_id - nn->clientid_base,
3185 client_files, dentries);
3186 clp->cl_nfsd_info_dentry = dentries[0];
3187 if (!clp->cl_nfsd_dentry) {
3188 free_client(clp);
3189 return NULL;
3190 }
3191 clp->cl_ra = kzalloc(sizeof(*clp->cl_ra), GFP_KERNEL);
3192 if (!clp->cl_ra) {
3193 free_client(clp);
3194 return NULL;
3195 }
3196 clp->cl_ra_time = 0;
3197 nfsd4_init_cb(&clp->cl_ra->ra_cb, clp, &nfsd4_cb_recall_any_ops,
3198 NFSPROC4_CLNT_CB_RECALL_ANY);
3199 return clp;
3200 }
3201
3202 static void
add_clp_to_name_tree(struct nfs4_client * new_clp,struct rb_root * root)3203 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
3204 {
3205 struct rb_node **new = &(root->rb_node), *parent = NULL;
3206 struct nfs4_client *clp;
3207
3208 while (*new) {
3209 clp = rb_entry(*new, struct nfs4_client, cl_namenode);
3210 parent = *new;
3211
3212 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
3213 new = &((*new)->rb_left);
3214 else
3215 new = &((*new)->rb_right);
3216 }
3217
3218 rb_link_node(&new_clp->cl_namenode, parent, new);
3219 rb_insert_color(&new_clp->cl_namenode, root);
3220 }
3221
3222 static struct nfs4_client *
find_clp_in_name_tree(struct xdr_netobj * name,struct rb_root * root)3223 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
3224 {
3225 int cmp;
3226 struct rb_node *node = root->rb_node;
3227 struct nfs4_client *clp;
3228
3229 while (node) {
3230 clp = rb_entry(node, struct nfs4_client, cl_namenode);
3231 cmp = compare_blob(&clp->cl_name, name);
3232 if (cmp > 0)
3233 node = node->rb_left;
3234 else if (cmp < 0)
3235 node = node->rb_right;
3236 else
3237 return clp;
3238 }
3239 return NULL;
3240 }
3241
3242 static void
add_to_unconfirmed(struct nfs4_client * clp)3243 add_to_unconfirmed(struct nfs4_client *clp)
3244 {
3245 unsigned int idhashval;
3246 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
3247
3248 lockdep_assert_held(&nn->client_lock);
3249
3250 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
3251 add_clp_to_name_tree(clp, &nn->unconf_name_tree);
3252 idhashval = clientid_hashval(clp->cl_clientid.cl_id);
3253 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
3254 renew_client_locked(clp);
3255 }
3256
3257 static void
move_to_confirmed(struct nfs4_client * clp)3258 move_to_confirmed(struct nfs4_client *clp)
3259 {
3260 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
3261 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
3262
3263 lockdep_assert_held(&nn->client_lock);
3264
3265 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
3266 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
3267 add_clp_to_name_tree(clp, &nn->conf_name_tree);
3268 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
3269 trace_nfsd_clid_confirmed(&clp->cl_clientid);
3270 renew_client_locked(clp);
3271 }
3272
3273 static struct nfs4_client *
find_client_in_id_table(struct list_head * tbl,clientid_t * clid,bool sessions)3274 find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
3275 {
3276 struct nfs4_client *clp;
3277 unsigned int idhashval = clientid_hashval(clid->cl_id);
3278
3279 list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
3280 if (same_clid(&clp->cl_clientid, clid)) {
3281 if ((bool)clp->cl_minorversion != sessions)
3282 return NULL;
3283 renew_client_locked(clp);
3284 return clp;
3285 }
3286 }
3287 return NULL;
3288 }
3289
3290 static struct nfs4_client *
find_confirmed_client(clientid_t * clid,bool sessions,struct nfsd_net * nn)3291 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
3292 {
3293 struct list_head *tbl = nn->conf_id_hashtbl;
3294
3295 lockdep_assert_held(&nn->client_lock);
3296 return find_client_in_id_table(tbl, clid, sessions);
3297 }
3298
3299 static struct nfs4_client *
find_unconfirmed_client(clientid_t * clid,bool sessions,struct nfsd_net * nn)3300 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
3301 {
3302 struct list_head *tbl = nn->unconf_id_hashtbl;
3303
3304 lockdep_assert_held(&nn->client_lock);
3305 return find_client_in_id_table(tbl, clid, sessions);
3306 }
3307
clp_used_exchangeid(struct nfs4_client * clp)3308 static bool clp_used_exchangeid(struct nfs4_client *clp)
3309 {
3310 return clp->cl_exchange_flags != 0;
3311 }
3312
3313 static struct nfs4_client *
find_confirmed_client_by_name(struct xdr_netobj * name,struct nfsd_net * nn)3314 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
3315 {
3316 lockdep_assert_held(&nn->client_lock);
3317 return find_clp_in_name_tree(name, &nn->conf_name_tree);
3318 }
3319
3320 static struct nfs4_client *
find_unconfirmed_client_by_name(struct xdr_netobj * name,struct nfsd_net * nn)3321 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
3322 {
3323 lockdep_assert_held(&nn->client_lock);
3324 return find_clp_in_name_tree(name, &nn->unconf_name_tree);
3325 }
3326
3327 static void
gen_callback(struct nfs4_client * clp,struct nfsd4_setclientid * se,struct svc_rqst * rqstp)3328 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
3329 {
3330 struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
3331 struct sockaddr *sa = svc_addr(rqstp);
3332 u32 scopeid = rpc_get_scope_id(sa);
3333 unsigned short expected_family;
3334
3335 /* Currently, we only support tcp and tcp6 for the callback channel */
3336 if (se->se_callback_netid_len == 3 &&
3337 !memcmp(se->se_callback_netid_val, "tcp", 3))
3338 expected_family = AF_INET;
3339 else if (se->se_callback_netid_len == 4 &&
3340 !memcmp(se->se_callback_netid_val, "tcp6", 4))
3341 expected_family = AF_INET6;
3342 else
3343 goto out_err;
3344
3345 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
3346 se->se_callback_addr_len,
3347 (struct sockaddr *)&conn->cb_addr,
3348 sizeof(conn->cb_addr));
3349
3350 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
3351 goto out_err;
3352
3353 if (conn->cb_addr.ss_family == AF_INET6)
3354 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
3355
3356 conn->cb_prog = se->se_callback_prog;
3357 conn->cb_ident = se->se_callback_ident;
3358 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
3359 trace_nfsd_cb_args(clp, conn);
3360 return;
3361 out_err:
3362 conn->cb_addr.ss_family = AF_UNSPEC;
3363 conn->cb_addrlen = 0;
3364 trace_nfsd_cb_nodelegs(clp);
3365 return;
3366 }
3367
3368 /*
3369 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
3370 */
3371 static void
nfsd4_store_cache_entry(struct nfsd4_compoundres * resp)3372 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
3373 {
3374 struct xdr_buf *buf = resp->xdr->buf;
3375 struct nfsd4_slot *slot = resp->cstate.slot;
3376 unsigned int base;
3377
3378 dprintk("--> %s slot %p\n", __func__, slot);
3379
3380 slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
3381 slot->sl_opcnt = resp->opcnt;
3382 slot->sl_status = resp->cstate.status;
3383 free_svc_cred(&slot->sl_cred);
3384 copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred);
3385
3386 if (!nfsd4_cache_this(resp)) {
3387 slot->sl_flags &= ~NFSD4_SLOT_CACHED;
3388 return;
3389 }
3390 slot->sl_flags |= NFSD4_SLOT_CACHED;
3391
3392 base = resp->cstate.data_offset;
3393 slot->sl_datalen = buf->len - base;
3394 if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
3395 WARN(1, "%s: sessions DRC could not cache compound\n",
3396 __func__);
3397 return;
3398 }
3399
3400 /*
3401 * Encode the replay sequence operation from the slot values.
3402 * If cachethis is FALSE encode the uncached rep error on the next
3403 * operation which sets resp->p and increments resp->opcnt for
3404 * nfs4svc_encode_compoundres.
3405 *
3406 */
3407 static __be32
nfsd4_enc_sequence_replay(struct nfsd4_compoundargs * args,struct nfsd4_compoundres * resp)3408 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
3409 struct nfsd4_compoundres *resp)
3410 {
3411 struct nfsd4_op *op;
3412 struct nfsd4_slot *slot = resp->cstate.slot;
3413
3414 /* Encode the replayed sequence operation */
3415 op = &args->ops[resp->opcnt - 1];
3416 nfsd4_encode_operation(resp, op);
3417
3418 if (slot->sl_flags & NFSD4_SLOT_CACHED)
3419 return op->status;
3420 if (args->opcnt == 1) {
3421 /*
3422 * The original operation wasn't a solo sequence--we
3423 * always cache those--so this retry must not match the
3424 * original:
3425 */
3426 op->status = nfserr_seq_false_retry;
3427 } else {
3428 op = &args->ops[resp->opcnt++];
3429 op->status = nfserr_retry_uncached_rep;
3430 nfsd4_encode_operation(resp, op);
3431 }
3432 return op->status;
3433 }
3434
3435 /*
3436 * The sequence operation is not cached because we can use the slot and
3437 * session values.
3438 */
3439 static __be32
nfsd4_replay_cache_entry(struct nfsd4_compoundres * resp,struct nfsd4_sequence * seq)3440 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
3441 struct nfsd4_sequence *seq)
3442 {
3443 struct nfsd4_slot *slot = resp->cstate.slot;
3444 struct xdr_stream *xdr = resp->xdr;
3445 __be32 *p;
3446 __be32 status;
3447
3448 dprintk("--> %s slot %p\n", __func__, slot);
3449
3450 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
3451 if (status)
3452 return status;
3453
3454 p = xdr_reserve_space(xdr, slot->sl_datalen);
3455 if (!p) {
3456 WARN_ON_ONCE(1);
3457 return nfserr_serverfault;
3458 }
3459 xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
3460 xdr_commit_encode(xdr);
3461
3462 resp->opcnt = slot->sl_opcnt;
3463 return slot->sl_status;
3464 }
3465
3466 /*
3467 * Set the exchange_id flags returned by the server.
3468 */
3469 static void
nfsd4_set_ex_flags(struct nfs4_client * new,struct nfsd4_exchange_id * clid)3470 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
3471 {
3472 #ifdef CONFIG_NFSD_PNFS
3473 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS;
3474 #else
3475 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
3476 #endif
3477
3478 /* Referrals are supported, Migration is not. */
3479 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
3480
3481 /* set the wire flags to return to client. */
3482 clid->flags = new->cl_exchange_flags;
3483 }
3484
client_has_openowners(struct nfs4_client * clp)3485 static bool client_has_openowners(struct nfs4_client *clp)
3486 {
3487 struct nfs4_openowner *oo;
3488
3489 list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) {
3490 if (!list_empty(&oo->oo_owner.so_stateids))
3491 return true;
3492 }
3493 return false;
3494 }
3495
client_has_state(struct nfs4_client * clp)3496 static bool client_has_state(struct nfs4_client *clp)
3497 {
3498 return client_has_openowners(clp)
3499 #ifdef CONFIG_NFSD_PNFS
3500 || !list_empty(&clp->cl_lo_states)
3501 #endif
3502 || !list_empty(&clp->cl_delegations)
3503 || !list_empty(&clp->cl_sessions)
3504 || !list_empty(&clp->async_copies);
3505 }
3506
copy_impl_id(struct nfs4_client * clp,struct nfsd4_exchange_id * exid)3507 static __be32 copy_impl_id(struct nfs4_client *clp,
3508 struct nfsd4_exchange_id *exid)
3509 {
3510 if (!exid->nii_domain.data)
3511 return 0;
3512 xdr_netobj_dup(&clp->cl_nii_domain, &exid->nii_domain, GFP_KERNEL);
3513 if (!clp->cl_nii_domain.data)
3514 return nfserr_jukebox;
3515 xdr_netobj_dup(&clp->cl_nii_name, &exid->nii_name, GFP_KERNEL);
3516 if (!clp->cl_nii_name.data)
3517 return nfserr_jukebox;
3518 clp->cl_nii_time = exid->nii_time;
3519 return 0;
3520 }
3521
3522 __be32
nfsd4_exchange_id(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3523 nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3524 union nfsd4_op_u *u)
3525 {
3526 struct nfsd4_exchange_id *exid = &u->exchange_id;
3527 struct nfs4_client *conf, *new;
3528 struct nfs4_client *unconf = NULL;
3529 __be32 status;
3530 char addr_str[INET6_ADDRSTRLEN];
3531 nfs4_verifier verf = exid->verifier;
3532 struct sockaddr *sa = svc_addr(rqstp);
3533 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
3534 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3535
3536 rpc_ntop(sa, addr_str, sizeof(addr_str));
3537 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
3538 "ip_addr=%s flags %x, spa_how %u\n",
3539 __func__, rqstp, exid, exid->clname.len, exid->clname.data,
3540 addr_str, exid->flags, exid->spa_how);
3541
3542 if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
3543 return nfserr_inval;
3544
3545 new = create_client(exid->clname, rqstp, &verf);
3546 if (new == NULL)
3547 return nfserr_jukebox;
3548 status = copy_impl_id(new, exid);
3549 if (status)
3550 goto out_nolock;
3551
3552 switch (exid->spa_how) {
3553 case SP4_MACH_CRED:
3554 exid->spo_must_enforce[0] = 0;
3555 exid->spo_must_enforce[1] = (
3556 1 << (OP_BIND_CONN_TO_SESSION - 32) |
3557 1 << (OP_EXCHANGE_ID - 32) |
3558 1 << (OP_CREATE_SESSION - 32) |
3559 1 << (OP_DESTROY_SESSION - 32) |
3560 1 << (OP_DESTROY_CLIENTID - 32));
3561
3562 exid->spo_must_allow[0] &= (1 << (OP_CLOSE) |
3563 1 << (OP_OPEN_DOWNGRADE) |
3564 1 << (OP_LOCKU) |
3565 1 << (OP_DELEGRETURN));
3566
3567 exid->spo_must_allow[1] &= (
3568 1 << (OP_TEST_STATEID - 32) |
3569 1 << (OP_FREE_STATEID - 32));
3570 if (!svc_rqst_integrity_protected(rqstp)) {
3571 status = nfserr_inval;
3572 goto out_nolock;
3573 }
3574 /*
3575 * Sometimes userspace doesn't give us a principal.
3576 * Which is a bug, really. Anyway, we can't enforce
3577 * MACH_CRED in that case, better to give up now:
3578 */
3579 if (!new->cl_cred.cr_principal &&
3580 !new->cl_cred.cr_raw_principal) {
3581 status = nfserr_serverfault;
3582 goto out_nolock;
3583 }
3584 new->cl_mach_cred = true;
3585 break;
3586 case SP4_NONE:
3587 break;
3588 default: /* checked by xdr code */
3589 WARN_ON_ONCE(1);
3590 fallthrough;
3591 case SP4_SSV:
3592 status = nfserr_encr_alg_unsupp;
3593 goto out_nolock;
3594 }
3595
3596 /* Cases below refer to rfc 5661 section 18.35.4: */
3597 spin_lock(&nn->client_lock);
3598 conf = find_confirmed_client_by_name(&exid->clname, nn);
3599 if (conf) {
3600 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
3601 bool verfs_match = same_verf(&verf, &conf->cl_verifier);
3602
3603 if (update) {
3604 if (!clp_used_exchangeid(conf)) { /* buggy client */
3605 status = nfserr_inval;
3606 goto out;
3607 }
3608 if (!nfsd4_mach_creds_match(conf, rqstp)) {
3609 status = nfserr_wrong_cred;
3610 goto out;
3611 }
3612 if (!creds_match) { /* case 9 */
3613 status = nfserr_perm;
3614 goto out;
3615 }
3616 if (!verfs_match) { /* case 8 */
3617 status = nfserr_not_same;
3618 goto out;
3619 }
3620 /* case 6 */
3621 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
3622 trace_nfsd_clid_confirmed_r(conf);
3623 goto out_copy;
3624 }
3625 if (!creds_match) { /* case 3 */
3626 if (client_has_state(conf)) {
3627 status = nfserr_clid_inuse;
3628 trace_nfsd_clid_cred_mismatch(conf, rqstp);
3629 goto out;
3630 }
3631 goto out_new;
3632 }
3633 if (verfs_match) { /* case 2 */
3634 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
3635 trace_nfsd_clid_confirmed_r(conf);
3636 goto out_copy;
3637 }
3638 /* case 5, client reboot */
3639 trace_nfsd_clid_verf_mismatch(conf, rqstp, &verf);
3640 conf = NULL;
3641 goto out_new;
3642 }
3643
3644 if (update) { /* case 7 */
3645 status = nfserr_noent;
3646 goto out;
3647 }
3648
3649 unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
3650 if (unconf) /* case 4, possible retry or client restart */
3651 unhash_client_locked(unconf);
3652
3653 /* case 1, new owner ID */
3654 trace_nfsd_clid_fresh(new);
3655
3656 out_new:
3657 if (conf) {
3658 status = mark_client_expired_locked(conf);
3659 if (status)
3660 goto out;
3661 trace_nfsd_clid_replaced(&conf->cl_clientid);
3662 }
3663 new->cl_minorversion = cstate->minorversion;
3664 new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0];
3665 new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1];
3666
3667 /* Contrived initial CREATE_SESSION response */
3668 new->cl_cs_slot.sl_status = nfserr_seq_misordered;
3669
3670 add_to_unconfirmed(new);
3671 swap(new, conf);
3672 out_copy:
3673 exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
3674 exid->clientid.cl_id = conf->cl_clientid.cl_id;
3675
3676 exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
3677 nfsd4_set_ex_flags(conf, exid);
3678
3679 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
3680 conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
3681 status = nfs_ok;
3682
3683 out:
3684 spin_unlock(&nn->client_lock);
3685 out_nolock:
3686 if (new)
3687 expire_client(new);
3688 if (unconf) {
3689 trace_nfsd_clid_expire_unconf(&unconf->cl_clientid);
3690 expire_client(unconf);
3691 }
3692 return status;
3693 }
3694
check_slot_seqid(u32 seqid,u32 slot_seqid,bool slot_inuse)3695 static __be32 check_slot_seqid(u32 seqid, u32 slot_seqid, bool slot_inuse)
3696 {
3697 /* The slot is in use, and no response has been sent. */
3698 if (slot_inuse) {
3699 if (seqid == slot_seqid)
3700 return nfserr_jukebox;
3701 else
3702 return nfserr_seq_misordered;
3703 }
3704 /* Note unsigned 32-bit arithmetic handles wraparound: */
3705 if (likely(seqid == slot_seqid + 1))
3706 return nfs_ok;
3707 if (seqid == slot_seqid)
3708 return nfserr_replay_cache;
3709 return nfserr_seq_misordered;
3710 }
3711
3712 /*
3713 * Cache the create session result into the create session single DRC
3714 * slot cache by saving the xdr structure. sl_seqid has been set.
3715 * Do this for solo or embedded create session operations.
3716 */
3717 static void
nfsd4_cache_create_session(struct nfsd4_create_session * cr_ses,struct nfsd4_clid_slot * slot,__be32 nfserr)3718 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
3719 struct nfsd4_clid_slot *slot, __be32 nfserr)
3720 {
3721 slot->sl_status = nfserr;
3722 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
3723 }
3724
3725 static __be32
nfsd4_replay_create_session(struct nfsd4_create_session * cr_ses,struct nfsd4_clid_slot * slot)3726 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
3727 struct nfsd4_clid_slot *slot)
3728 {
3729 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
3730 return slot->sl_status;
3731 }
3732
3733 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
3734 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
3735 1 + /* MIN tag is length with zero, only length */ \
3736 3 + /* version, opcount, opcode */ \
3737 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3738 /* seqid, slotID, slotID, cache */ \
3739 4 ) * sizeof(__be32))
3740
3741 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
3742 2 + /* verifier: AUTH_NULL, length 0 */\
3743 1 + /* status */ \
3744 1 + /* MIN tag is length with zero, only length */ \
3745 3 + /* opcount, opcode, opstatus*/ \
3746 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3747 /* seqid, slotID, slotID, slotID, status */ \
3748 5 ) * sizeof(__be32))
3749
check_forechannel_attrs(struct nfsd4_channel_attrs * ca,struct nfsd_net * nn)3750 static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
3751 {
3752 u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
3753
3754 if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
3755 return nfserr_toosmall;
3756 if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
3757 return nfserr_toosmall;
3758 ca->headerpadsz = 0;
3759 ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
3760 ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
3761 ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
3762 ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
3763 NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
3764 ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
3765 /*
3766 * Note decreasing slot size below client's request may make it
3767 * difficult for client to function correctly, whereas
3768 * decreasing the number of slots will (just?) affect
3769 * performance. When short on memory we therefore prefer to
3770 * decrease number of slots instead of their size. Clients that
3771 * request larger slots than they need will get poor results:
3772 * Note that we always allow at least one slot, because our
3773 * accounting is soft and provides no guarantees either way.
3774 */
3775 ca->maxreqs = nfsd4_get_drc_mem(ca, nn);
3776
3777 return nfs_ok;
3778 }
3779
3780 /*
3781 * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
3782 * These are based on similar macros in linux/sunrpc/msg_prot.h .
3783 */
3784 #define RPC_MAX_HEADER_WITH_AUTH_SYS \
3785 (RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK))
3786
3787 #define RPC_MAX_REPHEADER_WITH_AUTH_SYS \
3788 (RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK))
3789
3790 #define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \
3791 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32))
3792 #define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \
3793 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \
3794 sizeof(__be32))
3795
check_backchannel_attrs(struct nfsd4_channel_attrs * ca)3796 static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
3797 {
3798 ca->headerpadsz = 0;
3799
3800 if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
3801 return nfserr_toosmall;
3802 if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
3803 return nfserr_toosmall;
3804 ca->maxresp_cached = 0;
3805 if (ca->maxops < 2)
3806 return nfserr_toosmall;
3807
3808 return nfs_ok;
3809 }
3810
nfsd4_check_cb_sec(struct nfsd4_cb_sec * cbs)3811 static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
3812 {
3813 switch (cbs->flavor) {
3814 case RPC_AUTH_NULL:
3815 case RPC_AUTH_UNIX:
3816 return nfs_ok;
3817 default:
3818 /*
3819 * GSS case: the spec doesn't allow us to return this
3820 * error. But it also doesn't allow us not to support
3821 * GSS.
3822 * I'd rather this fail hard than return some error the
3823 * client might think it can already handle:
3824 */
3825 return nfserr_encr_alg_unsupp;
3826 }
3827 }
3828
3829 __be32
nfsd4_create_session(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3830 nfsd4_create_session(struct svc_rqst *rqstp,
3831 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
3832 {
3833 struct nfsd4_create_session *cr_ses = &u->create_session;
3834 struct sockaddr *sa = svc_addr(rqstp);
3835 struct nfs4_client *conf, *unconf;
3836 struct nfsd4_clid_slot *cs_slot;
3837 struct nfs4_client *old = NULL;
3838 struct nfsd4_session *new;
3839 struct nfsd4_conn *conn;
3840 __be32 status = 0;
3841 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3842
3843 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
3844 return nfserr_inval;
3845 status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
3846 if (status)
3847 return status;
3848 status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
3849 if (status)
3850 return status;
3851 status = check_backchannel_attrs(&cr_ses->back_channel);
3852 if (status)
3853 goto out_release_drc_mem;
3854 status = nfserr_jukebox;
3855 new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
3856 if (!new)
3857 goto out_release_drc_mem;
3858 conn = alloc_conn_from_crses(rqstp, cr_ses);
3859 if (!conn)
3860 goto out_free_session;
3861
3862 spin_lock(&nn->client_lock);
3863
3864 /* RFC 8881 Section 18.36.4 Phase 1: Client record look-up. */
3865 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
3866 conf = find_confirmed_client(&cr_ses->clientid, true, nn);
3867 if (!conf && !unconf) {
3868 status = nfserr_stale_clientid;
3869 goto out_free_conn;
3870 }
3871
3872 /* RFC 8881 Section 18.36.4 Phase 2: Sequence ID processing. */
3873 if (conf) {
3874 cs_slot = &conf->cl_cs_slot;
3875 trace_nfsd_slot_seqid_conf(conf, cr_ses);
3876 } else {
3877 cs_slot = &unconf->cl_cs_slot;
3878 trace_nfsd_slot_seqid_unconf(unconf, cr_ses);
3879 }
3880 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
3881 switch (status) {
3882 case nfs_ok:
3883 cs_slot->sl_seqid++;
3884 cr_ses->seqid = cs_slot->sl_seqid;
3885 break;
3886 case nfserr_replay_cache:
3887 status = nfsd4_replay_create_session(cr_ses, cs_slot);
3888 fallthrough;
3889 case nfserr_jukebox:
3890 /* The server MUST NOT cache NFS4ERR_DELAY */
3891 goto out_free_conn;
3892 default:
3893 goto out_cache_error;
3894 }
3895
3896 /* RFC 8881 Section 18.36.4 Phase 3: Client ID confirmation. */
3897 if (conf) {
3898 status = nfserr_wrong_cred;
3899 if (!nfsd4_mach_creds_match(conf, rqstp))
3900 goto out_cache_error;
3901 } else {
3902 status = nfserr_clid_inuse;
3903 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
3904 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
3905 trace_nfsd_clid_cred_mismatch(unconf, rqstp);
3906 goto out_cache_error;
3907 }
3908 status = nfserr_wrong_cred;
3909 if (!nfsd4_mach_creds_match(unconf, rqstp))
3910 goto out_cache_error;
3911 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
3912 if (old) {
3913 status = mark_client_expired_locked(old);
3914 if (status)
3915 goto out_expired_error;
3916 trace_nfsd_clid_replaced(&old->cl_clientid);
3917 }
3918 move_to_confirmed(unconf);
3919 conf = unconf;
3920 }
3921
3922 /* RFC 8881 Section 18.36.4 Phase 4: Session creation. */
3923 status = nfs_ok;
3924 /* Persistent sessions are not supported */
3925 cr_ses->flags &= ~SESSION4_PERSIST;
3926 /* Upshifting from TCP to RDMA is not supported */
3927 cr_ses->flags &= ~SESSION4_RDMA;
3928
3929 init_session(rqstp, new, conf, cr_ses);
3930 nfsd4_get_session_locked(new);
3931
3932 memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
3933 NFS4_MAX_SESSIONID_LEN);
3934
3935 /* cache solo and embedded create sessions under the client_lock */
3936 nfsd4_cache_create_session(cr_ses, cs_slot, status);
3937 spin_unlock(&nn->client_lock);
3938 if (conf == unconf)
3939 fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY);
3940 /* init connection and backchannel */
3941 nfsd4_init_conn(rqstp, conn, new);
3942 nfsd4_put_session(new);
3943 if (old)
3944 expire_client(old);
3945 return status;
3946
3947 out_expired_error:
3948 old = NULL;
3949 /*
3950 * Revert the slot seq_nr change so the server will process
3951 * the client's resend instead of returning a cached response.
3952 */
3953 if (status == nfserr_jukebox) {
3954 cs_slot->sl_seqid--;
3955 cr_ses->seqid = cs_slot->sl_seqid;
3956 goto out_free_conn;
3957 }
3958 out_cache_error:
3959 nfsd4_cache_create_session(cr_ses, cs_slot, status);
3960 out_free_conn:
3961 spin_unlock(&nn->client_lock);
3962 free_conn(conn);
3963 if (old)
3964 expire_client(old);
3965 out_free_session:
3966 __free_session(new);
3967 out_release_drc_mem:
3968 nfsd4_put_drc_mem(&cr_ses->fore_channel);
3969 return status;
3970 }
3971
nfsd4_map_bcts_dir(u32 * dir)3972 static __be32 nfsd4_map_bcts_dir(u32 *dir)
3973 {
3974 switch (*dir) {
3975 case NFS4_CDFC4_FORE:
3976 case NFS4_CDFC4_BACK:
3977 return nfs_ok;
3978 case NFS4_CDFC4_FORE_OR_BOTH:
3979 case NFS4_CDFC4_BACK_OR_BOTH:
3980 *dir = NFS4_CDFC4_BOTH;
3981 return nfs_ok;
3982 }
3983 return nfserr_inval;
3984 }
3985
nfsd4_backchannel_ctl(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3986 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp,
3987 struct nfsd4_compound_state *cstate,
3988 union nfsd4_op_u *u)
3989 {
3990 struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl;
3991 struct nfsd4_session *session = cstate->session;
3992 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3993 __be32 status;
3994
3995 status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
3996 if (status)
3997 return status;
3998 spin_lock(&nn->client_lock);
3999 session->se_cb_prog = bc->bc_cb_program;
4000 session->se_cb_sec = bc->bc_cb_sec;
4001 spin_unlock(&nn->client_lock);
4002
4003 nfsd4_probe_callback(session->se_client);
4004
4005 return nfs_ok;
4006 }
4007
__nfsd4_find_conn(struct svc_xprt * xpt,struct nfsd4_session * s)4008 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
4009 {
4010 struct nfsd4_conn *c;
4011
4012 list_for_each_entry(c, &s->se_conns, cn_persession) {
4013 if (c->cn_xprt == xpt) {
4014 return c;
4015 }
4016 }
4017 return NULL;
4018 }
4019
nfsd4_match_existing_connection(struct svc_rqst * rqst,struct nfsd4_session * session,u32 req,struct nfsd4_conn ** conn)4020 static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst,
4021 struct nfsd4_session *session, u32 req, struct nfsd4_conn **conn)
4022 {
4023 struct nfs4_client *clp = session->se_client;
4024 struct svc_xprt *xpt = rqst->rq_xprt;
4025 struct nfsd4_conn *c;
4026 __be32 status;
4027
4028 /* Following the last paragraph of RFC 5661 Section 18.34.3: */
4029 spin_lock(&clp->cl_lock);
4030 c = __nfsd4_find_conn(xpt, session);
4031 if (!c)
4032 status = nfserr_noent;
4033 else if (req == c->cn_flags)
4034 status = nfs_ok;
4035 else if (req == NFS4_CDFC4_FORE_OR_BOTH &&
4036 c->cn_flags != NFS4_CDFC4_BACK)
4037 status = nfs_ok;
4038 else if (req == NFS4_CDFC4_BACK_OR_BOTH &&
4039 c->cn_flags != NFS4_CDFC4_FORE)
4040 status = nfs_ok;
4041 else
4042 status = nfserr_inval;
4043 spin_unlock(&clp->cl_lock);
4044 if (status == nfs_ok && conn)
4045 *conn = c;
4046 return status;
4047 }
4048
nfsd4_bind_conn_to_session(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)4049 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
4050 struct nfsd4_compound_state *cstate,
4051 union nfsd4_op_u *u)
4052 {
4053 struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
4054 __be32 status;
4055 struct nfsd4_conn *conn;
4056 struct nfsd4_session *session;
4057 struct net *net = SVC_NET(rqstp);
4058 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4059
4060 if (!nfsd4_last_compound_op(rqstp))
4061 return nfserr_not_only_op;
4062 spin_lock(&nn->client_lock);
4063 session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
4064 spin_unlock(&nn->client_lock);
4065 if (!session)
4066 goto out_no_session;
4067 status = nfserr_wrong_cred;
4068 if (!nfsd4_mach_creds_match(session->se_client, rqstp))
4069 goto out;
4070 status = nfsd4_match_existing_connection(rqstp, session,
4071 bcts->dir, &conn);
4072 if (status == nfs_ok) {
4073 if (bcts->dir == NFS4_CDFC4_FORE_OR_BOTH ||
4074 bcts->dir == NFS4_CDFC4_BACK)
4075 conn->cn_flags |= NFS4_CDFC4_BACK;
4076 nfsd4_probe_callback(session->se_client);
4077 goto out;
4078 }
4079 if (status == nfserr_inval)
4080 goto out;
4081 status = nfsd4_map_bcts_dir(&bcts->dir);
4082 if (status)
4083 goto out;
4084 conn = alloc_conn(rqstp, bcts->dir);
4085 status = nfserr_jukebox;
4086 if (!conn)
4087 goto out;
4088 nfsd4_init_conn(rqstp, conn, session);
4089 status = nfs_ok;
4090 out:
4091 nfsd4_put_session(session);
4092 out_no_session:
4093 return status;
4094 }
4095
nfsd4_compound_in_session(struct nfsd4_compound_state * cstate,struct nfs4_sessionid * sid)4096 static bool nfsd4_compound_in_session(struct nfsd4_compound_state *cstate, struct nfs4_sessionid *sid)
4097 {
4098 if (!cstate->session)
4099 return false;
4100 return !memcmp(sid, &cstate->session->se_sessionid, sizeof(*sid));
4101 }
4102
4103 __be32
nfsd4_destroy_session(struct svc_rqst * r,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)4104 nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate,
4105 union nfsd4_op_u *u)
4106 {
4107 struct nfs4_sessionid *sessionid = &u->destroy_session.sessionid;
4108 struct nfsd4_session *ses;
4109 __be32 status;
4110 int ref_held_by_me = 0;
4111 struct net *net = SVC_NET(r);
4112 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4113
4114 status = nfserr_not_only_op;
4115 if (nfsd4_compound_in_session(cstate, sessionid)) {
4116 if (!nfsd4_last_compound_op(r))
4117 goto out;
4118 ref_held_by_me++;
4119 }
4120 dump_sessionid(__func__, sessionid);
4121 spin_lock(&nn->client_lock);
4122 ses = find_in_sessionid_hashtbl(sessionid, net, &status);
4123 if (!ses)
4124 goto out_client_lock;
4125 status = nfserr_wrong_cred;
4126 if (!nfsd4_mach_creds_match(ses->se_client, r))
4127 goto out_put_session;
4128 status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
4129 if (status)
4130 goto out_put_session;
4131 unhash_session(ses);
4132 spin_unlock(&nn->client_lock);
4133
4134 nfsd4_probe_callback_sync(ses->se_client);
4135
4136 spin_lock(&nn->client_lock);
4137 status = nfs_ok;
4138 out_put_session:
4139 nfsd4_put_session_locked(ses);
4140 out_client_lock:
4141 spin_unlock(&nn->client_lock);
4142 out:
4143 return status;
4144 }
4145
nfsd4_sequence_check_conn(struct nfsd4_conn * new,struct nfsd4_session * ses)4146 static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
4147 {
4148 struct nfs4_client *clp = ses->se_client;
4149 struct nfsd4_conn *c;
4150 __be32 status = nfs_ok;
4151 int ret;
4152
4153 spin_lock(&clp->cl_lock);
4154 c = __nfsd4_find_conn(new->cn_xprt, ses);
4155 if (c)
4156 goto out_free;
4157 status = nfserr_conn_not_bound_to_session;
4158 if (clp->cl_mach_cred)
4159 goto out_free;
4160 __nfsd4_hash_conn(new, ses);
4161 spin_unlock(&clp->cl_lock);
4162 ret = nfsd4_register_conn(new);
4163 if (ret)
4164 /* oops; xprt is already down: */
4165 nfsd4_conn_lost(&new->cn_xpt_user);
4166 return nfs_ok;
4167 out_free:
4168 spin_unlock(&clp->cl_lock);
4169 free_conn(new);
4170 return status;
4171 }
4172
nfsd4_session_too_many_ops(struct svc_rqst * rqstp,struct nfsd4_session * session)4173 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
4174 {
4175 struct nfsd4_compoundargs *args = rqstp->rq_argp;
4176
4177 return args->opcnt > session->se_fchannel.maxops;
4178 }
4179
nfsd4_request_too_big(struct svc_rqst * rqstp,struct nfsd4_session * session)4180 static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
4181 struct nfsd4_session *session)
4182 {
4183 struct xdr_buf *xb = &rqstp->rq_arg;
4184
4185 return xb->len > session->se_fchannel.maxreq_sz;
4186 }
4187
replay_matches_cache(struct svc_rqst * rqstp,struct nfsd4_sequence * seq,struct nfsd4_slot * slot)4188 static bool replay_matches_cache(struct svc_rqst *rqstp,
4189 struct nfsd4_sequence *seq, struct nfsd4_slot *slot)
4190 {
4191 struct nfsd4_compoundargs *argp = rqstp->rq_argp;
4192
4193 if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) !=
4194 (bool)seq->cachethis)
4195 return false;
4196 /*
4197 * If there's an error then the reply can have fewer ops than
4198 * the call.
4199 */
4200 if (slot->sl_opcnt < argp->opcnt && !slot->sl_status)
4201 return false;
4202 /*
4203 * But if we cached a reply with *more* ops than the call you're
4204 * sending us now, then this new call is clearly not really a
4205 * replay of the old one:
4206 */
4207 if (slot->sl_opcnt > argp->opcnt)
4208 return false;
4209 /* This is the only check explicitly called by spec: */
4210 if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
4211 return false;
4212 /*
4213 * There may be more comparisons we could actually do, but the
4214 * spec doesn't require us to catch every case where the calls
4215 * don't match (that would require caching the call as well as
4216 * the reply), so we don't bother.
4217 */
4218 return true;
4219 }
4220
4221 __be32
nfsd4_sequence(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)4222 nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4223 union nfsd4_op_u *u)
4224 {
4225 struct nfsd4_sequence *seq = &u->sequence;
4226 struct nfsd4_compoundres *resp = rqstp->rq_resp;
4227 struct xdr_stream *xdr = resp->xdr;
4228 struct nfsd4_session *session;
4229 struct nfs4_client *clp;
4230 struct nfsd4_slot *slot;
4231 struct nfsd4_conn *conn;
4232 __be32 status;
4233 int buflen;
4234 struct net *net = SVC_NET(rqstp);
4235 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4236
4237 if (resp->opcnt != 1)
4238 return nfserr_sequence_pos;
4239
4240 /*
4241 * Will be either used or freed by nfsd4_sequence_check_conn
4242 * below.
4243 */
4244 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
4245 if (!conn)
4246 return nfserr_jukebox;
4247
4248 spin_lock(&nn->client_lock);
4249 session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
4250 if (!session)
4251 goto out_no_session;
4252 clp = session->se_client;
4253
4254 status = nfserr_too_many_ops;
4255 if (nfsd4_session_too_many_ops(rqstp, session))
4256 goto out_put_session;
4257
4258 status = nfserr_req_too_big;
4259 if (nfsd4_request_too_big(rqstp, session))
4260 goto out_put_session;
4261
4262 status = nfserr_badslot;
4263 if (seq->slotid >= session->se_fchannel.maxreqs)
4264 goto out_put_session;
4265
4266 slot = session->se_slots[seq->slotid];
4267 dprintk("%s: slotid %d\n", __func__, seq->slotid);
4268
4269 /* We do not negotiate the number of slots yet, so set the
4270 * maxslots to the session maxreqs which is used to encode
4271 * sr_highest_slotid and the sr_target_slot id to maxslots */
4272 seq->maxslots = session->se_fchannel.maxreqs;
4273
4274 trace_nfsd_slot_seqid_sequence(clp, seq, slot);
4275 status = check_slot_seqid(seq->seqid, slot->sl_seqid,
4276 slot->sl_flags & NFSD4_SLOT_INUSE);
4277 if (status == nfserr_replay_cache) {
4278 status = nfserr_seq_misordered;
4279 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
4280 goto out_put_session;
4281 status = nfserr_seq_false_retry;
4282 if (!replay_matches_cache(rqstp, seq, slot))
4283 goto out_put_session;
4284 cstate->slot = slot;
4285 cstate->session = session;
4286 cstate->clp = clp;
4287 /* Return the cached reply status and set cstate->status
4288 * for nfsd4_proc_compound processing */
4289 status = nfsd4_replay_cache_entry(resp, seq);
4290 cstate->status = nfserr_replay_cache;
4291 goto out;
4292 }
4293 if (status)
4294 goto out_put_session;
4295
4296 status = nfsd4_sequence_check_conn(conn, session);
4297 conn = NULL;
4298 if (status)
4299 goto out_put_session;
4300
4301 buflen = (seq->cachethis) ?
4302 session->se_fchannel.maxresp_cached :
4303 session->se_fchannel.maxresp_sz;
4304 status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
4305 nfserr_rep_too_big;
4306 if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
4307 goto out_put_session;
4308 svc_reserve(rqstp, buflen);
4309
4310 status = nfs_ok;
4311 /* Success! bump slot seqid */
4312 slot->sl_seqid = seq->seqid;
4313 slot->sl_flags |= NFSD4_SLOT_INUSE;
4314 if (seq->cachethis)
4315 slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
4316 else
4317 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
4318
4319 cstate->slot = slot;
4320 cstate->session = session;
4321 cstate->clp = clp;
4322
4323 out:
4324 switch (clp->cl_cb_state) {
4325 case NFSD4_CB_DOWN:
4326 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
4327 break;
4328 case NFSD4_CB_FAULT:
4329 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
4330 break;
4331 default:
4332 seq->status_flags = 0;
4333 }
4334 if (!list_empty(&clp->cl_revoked))
4335 seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
4336 if (atomic_read(&clp->cl_admin_revoked))
4337 seq->status_flags |= SEQ4_STATUS_ADMIN_STATE_REVOKED;
4338 trace_nfsd_seq4_status(rqstp, seq);
4339 out_no_session:
4340 if (conn)
4341 free_conn(conn);
4342 spin_unlock(&nn->client_lock);
4343 return status;
4344 out_put_session:
4345 nfsd4_put_session_locked(session);
4346 goto out_no_session;
4347 }
4348
4349 void
nfsd4_sequence_done(struct nfsd4_compoundres * resp)4350 nfsd4_sequence_done(struct nfsd4_compoundres *resp)
4351 {
4352 struct nfsd4_compound_state *cs = &resp->cstate;
4353
4354 if (nfsd4_has_session(cs)) {
4355 if (cs->status != nfserr_replay_cache) {
4356 nfsd4_store_cache_entry(resp);
4357 cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
4358 }
4359 /* Drop session reference that was taken in nfsd4_sequence() */
4360 nfsd4_put_session(cs->session);
4361 } else if (cs->clp)
4362 put_client_renew(cs->clp);
4363 }
4364
4365 __be32
nfsd4_destroy_clientid(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)4366 nfsd4_destroy_clientid(struct svc_rqst *rqstp,
4367 struct nfsd4_compound_state *cstate,
4368 union nfsd4_op_u *u)
4369 {
4370 struct nfsd4_destroy_clientid *dc = &u->destroy_clientid;
4371 struct nfs4_client *conf, *unconf;
4372 struct nfs4_client *clp = NULL;
4373 __be32 status = 0;
4374 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4375
4376 spin_lock(&nn->client_lock);
4377 unconf = find_unconfirmed_client(&dc->clientid, true, nn);
4378 conf = find_confirmed_client(&dc->clientid, true, nn);
4379 WARN_ON_ONCE(conf && unconf);
4380
4381 if (conf) {
4382 if (client_has_state(conf)) {
4383 status = nfserr_clientid_busy;
4384 goto out;
4385 }
4386 status = mark_client_expired_locked(conf);
4387 if (status)
4388 goto out;
4389 clp = conf;
4390 } else if (unconf)
4391 clp = unconf;
4392 else {
4393 status = nfserr_stale_clientid;
4394 goto out;
4395 }
4396 if (!nfsd4_mach_creds_match(clp, rqstp)) {
4397 clp = NULL;
4398 status = nfserr_wrong_cred;
4399 goto out;
4400 }
4401 trace_nfsd_clid_destroyed(&clp->cl_clientid);
4402 unhash_client_locked(clp);
4403 out:
4404 spin_unlock(&nn->client_lock);
4405 if (clp)
4406 expire_client(clp);
4407 return status;
4408 }
4409
4410 __be32
nfsd4_reclaim_complete(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)4411 nfsd4_reclaim_complete(struct svc_rqst *rqstp,
4412 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
4413 {
4414 struct nfsd4_reclaim_complete *rc = &u->reclaim_complete;
4415 struct nfs4_client *clp = cstate->clp;
4416 __be32 status = 0;
4417
4418 if (rc->rca_one_fs) {
4419 if (!cstate->current_fh.fh_dentry)
4420 return nfserr_nofilehandle;
4421 /*
4422 * We don't take advantage of the rca_one_fs case.
4423 * That's OK, it's optional, we can safely ignore it.
4424 */
4425 return nfs_ok;
4426 }
4427
4428 status = nfserr_complete_already;
4429 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags))
4430 goto out;
4431
4432 status = nfserr_stale_clientid;
4433 if (is_client_expired(clp))
4434 /*
4435 * The following error isn't really legal.
4436 * But we only get here if the client just explicitly
4437 * destroyed the client. Surely it no longer cares what
4438 * error it gets back on an operation for the dead
4439 * client.
4440 */
4441 goto out;
4442
4443 status = nfs_ok;
4444 trace_nfsd_clid_reclaim_complete(&clp->cl_clientid);
4445 nfsd4_client_record_create(clp);
4446 inc_reclaim_complete(clp);
4447 out:
4448 return status;
4449 }
4450
4451 __be32
nfsd4_setclientid(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)4452 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4453 union nfsd4_op_u *u)
4454 {
4455 struct nfsd4_setclientid *setclid = &u->setclientid;
4456 struct xdr_netobj clname = setclid->se_name;
4457 nfs4_verifier clverifier = setclid->se_verf;
4458 struct nfs4_client *conf, *new;
4459 struct nfs4_client *unconf = NULL;
4460 __be32 status;
4461 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4462
4463 new = create_client(clname, rqstp, &clverifier);
4464 if (new == NULL)
4465 return nfserr_jukebox;
4466 spin_lock(&nn->client_lock);
4467 conf = find_confirmed_client_by_name(&clname, nn);
4468 if (conf && client_has_state(conf)) {
4469 status = nfserr_clid_inuse;
4470 if (clp_used_exchangeid(conf))
4471 goto out;
4472 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
4473 trace_nfsd_clid_cred_mismatch(conf, rqstp);
4474 goto out;
4475 }
4476 }
4477 unconf = find_unconfirmed_client_by_name(&clname, nn);
4478 if (unconf)
4479 unhash_client_locked(unconf);
4480 if (conf) {
4481 if (same_verf(&conf->cl_verifier, &clverifier)) {
4482 copy_clid(new, conf);
4483 gen_confirm(new, nn);
4484 } else
4485 trace_nfsd_clid_verf_mismatch(conf, rqstp,
4486 &clverifier);
4487 } else
4488 trace_nfsd_clid_fresh(new);
4489 new->cl_minorversion = 0;
4490 gen_callback(new, setclid, rqstp);
4491 add_to_unconfirmed(new);
4492 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
4493 setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
4494 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
4495 new = NULL;
4496 status = nfs_ok;
4497 out:
4498 spin_unlock(&nn->client_lock);
4499 if (new)
4500 free_client(new);
4501 if (unconf) {
4502 trace_nfsd_clid_expire_unconf(&unconf->cl_clientid);
4503 expire_client(unconf);
4504 }
4505 return status;
4506 }
4507
4508 __be32
nfsd4_setclientid_confirm(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)4509 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
4510 struct nfsd4_compound_state *cstate,
4511 union nfsd4_op_u *u)
4512 {
4513 struct nfsd4_setclientid_confirm *setclientid_confirm =
4514 &u->setclientid_confirm;
4515 struct nfs4_client *conf, *unconf;
4516 struct nfs4_client *old = NULL;
4517 nfs4_verifier confirm = setclientid_confirm->sc_confirm;
4518 clientid_t * clid = &setclientid_confirm->sc_clientid;
4519 __be32 status;
4520 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4521
4522 if (STALE_CLIENTID(clid, nn))
4523 return nfserr_stale_clientid;
4524
4525 spin_lock(&nn->client_lock);
4526 conf = find_confirmed_client(clid, false, nn);
4527 unconf = find_unconfirmed_client(clid, false, nn);
4528 /*
4529 * We try hard to give out unique clientid's, so if we get an
4530 * attempt to confirm the same clientid with a different cred,
4531 * the client may be buggy; this should never happen.
4532 *
4533 * Nevertheless, RFC 7530 recommends INUSE for this case:
4534 */
4535 status = nfserr_clid_inuse;
4536 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred)) {
4537 trace_nfsd_clid_cred_mismatch(unconf, rqstp);
4538 goto out;
4539 }
4540 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
4541 trace_nfsd_clid_cred_mismatch(conf, rqstp);
4542 goto out;
4543 }
4544 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
4545 if (conf && same_verf(&confirm, &conf->cl_confirm)) {
4546 status = nfs_ok;
4547 } else
4548 status = nfserr_stale_clientid;
4549 goto out;
4550 }
4551 status = nfs_ok;
4552 if (conf) {
4553 if (get_client_locked(conf) == nfs_ok) {
4554 old = unconf;
4555 unhash_client_locked(old);
4556 nfsd4_change_callback(conf, &unconf->cl_cb_conn);
4557 } else {
4558 conf = NULL;
4559 }
4560 }
4561
4562 if (!conf) {
4563 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
4564 if (old) {
4565 status = nfserr_clid_inuse;
4566 if (client_has_state(old)
4567 && !same_creds(&unconf->cl_cred,
4568 &old->cl_cred)) {
4569 old = NULL;
4570 goto out;
4571 }
4572 status = mark_client_expired_locked(old);
4573 if (status) {
4574 old = NULL;
4575 goto out;
4576 }
4577 trace_nfsd_clid_replaced(&old->cl_clientid);
4578 }
4579 status = get_client_locked(unconf);
4580 if (status != nfs_ok) {
4581 old = NULL;
4582 goto out;
4583 }
4584 move_to_confirmed(unconf);
4585 conf = unconf;
4586 }
4587 spin_unlock(&nn->client_lock);
4588 if (conf == unconf)
4589 fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY);
4590 nfsd4_probe_callback(conf);
4591 spin_lock(&nn->client_lock);
4592 put_client_renew_locked(conf);
4593 out:
4594 spin_unlock(&nn->client_lock);
4595 if (old)
4596 expire_client(old);
4597 return status;
4598 }
4599
nfsd4_alloc_file(void)4600 static struct nfs4_file *nfsd4_alloc_file(void)
4601 {
4602 return kmem_cache_alloc(file_slab, GFP_KERNEL);
4603 }
4604
4605 /* OPEN Share state helper functions */
4606
nfsd4_file_init(const struct svc_fh * fh,struct nfs4_file * fp)4607 static void nfsd4_file_init(const struct svc_fh *fh, struct nfs4_file *fp)
4608 {
4609 refcount_set(&fp->fi_ref, 1);
4610 spin_lock_init(&fp->fi_lock);
4611 INIT_LIST_HEAD(&fp->fi_stateids);
4612 INIT_LIST_HEAD(&fp->fi_delegations);
4613 INIT_LIST_HEAD(&fp->fi_clnt_odstate);
4614 fh_copy_shallow(&fp->fi_fhandle, &fh->fh_handle);
4615 fp->fi_deleg_file = NULL;
4616 fp->fi_had_conflict = false;
4617 fp->fi_share_deny = 0;
4618 memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
4619 memset(fp->fi_access, 0, sizeof(fp->fi_access));
4620 fp->fi_aliased = false;
4621 fp->fi_inode = d_inode(fh->fh_dentry);
4622 #ifdef CONFIG_NFSD_PNFS
4623 INIT_LIST_HEAD(&fp->fi_lo_states);
4624 atomic_set(&fp->fi_lo_recalls, 0);
4625 #endif
4626 }
4627
4628 void
nfsd4_free_slabs(void)4629 nfsd4_free_slabs(void)
4630 {
4631 kmem_cache_destroy(client_slab);
4632 kmem_cache_destroy(openowner_slab);
4633 kmem_cache_destroy(lockowner_slab);
4634 kmem_cache_destroy(file_slab);
4635 kmem_cache_destroy(stateid_slab);
4636 kmem_cache_destroy(deleg_slab);
4637 kmem_cache_destroy(odstate_slab);
4638 }
4639
4640 int
nfsd4_init_slabs(void)4641 nfsd4_init_slabs(void)
4642 {
4643 client_slab = KMEM_CACHE(nfs4_client, 0);
4644 if (client_slab == NULL)
4645 goto out;
4646 openowner_slab = KMEM_CACHE(nfs4_openowner, 0);
4647 if (openowner_slab == NULL)
4648 goto out_free_client_slab;
4649 lockowner_slab = KMEM_CACHE(nfs4_lockowner, 0);
4650 if (lockowner_slab == NULL)
4651 goto out_free_openowner_slab;
4652 file_slab = KMEM_CACHE(nfs4_file, 0);
4653 if (file_slab == NULL)
4654 goto out_free_lockowner_slab;
4655 stateid_slab = KMEM_CACHE(nfs4_ol_stateid, 0);
4656 if (stateid_slab == NULL)
4657 goto out_free_file_slab;
4658 deleg_slab = KMEM_CACHE(nfs4_delegation, 0);
4659 if (deleg_slab == NULL)
4660 goto out_free_stateid_slab;
4661 odstate_slab = KMEM_CACHE(nfs4_clnt_odstate, 0);
4662 if (odstate_slab == NULL)
4663 goto out_free_deleg_slab;
4664 return 0;
4665
4666 out_free_deleg_slab:
4667 kmem_cache_destroy(deleg_slab);
4668 out_free_stateid_slab:
4669 kmem_cache_destroy(stateid_slab);
4670 out_free_file_slab:
4671 kmem_cache_destroy(file_slab);
4672 out_free_lockowner_slab:
4673 kmem_cache_destroy(lockowner_slab);
4674 out_free_openowner_slab:
4675 kmem_cache_destroy(openowner_slab);
4676 out_free_client_slab:
4677 kmem_cache_destroy(client_slab);
4678 out:
4679 return -ENOMEM;
4680 }
4681
4682 static unsigned long
nfsd4_state_shrinker_count(struct shrinker * shrink,struct shrink_control * sc)4683 nfsd4_state_shrinker_count(struct shrinker *shrink, struct shrink_control *sc)
4684 {
4685 int count;
4686 struct nfsd_net *nn = shrink->private_data;
4687
4688 count = atomic_read(&nn->nfsd_courtesy_clients);
4689 if (!count)
4690 count = atomic_long_read(&num_delegations);
4691 if (count)
4692 queue_work(laundry_wq, &nn->nfsd_shrinker_work);
4693 return (unsigned long)count;
4694 }
4695
4696 static unsigned long
nfsd4_state_shrinker_scan(struct shrinker * shrink,struct shrink_control * sc)4697 nfsd4_state_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc)
4698 {
4699 return SHRINK_STOP;
4700 }
4701
4702 void
nfsd4_init_leases_net(struct nfsd_net * nn)4703 nfsd4_init_leases_net(struct nfsd_net *nn)
4704 {
4705 struct sysinfo si;
4706 u64 max_clients;
4707
4708 nn->nfsd4_lease = 90; /* default lease time */
4709 nn->nfsd4_grace = 90;
4710 nn->somebody_reclaimed = false;
4711 nn->track_reclaim_completes = false;
4712 nn->clverifier_counter = get_random_u32();
4713 nn->clientid_base = get_random_u32();
4714 nn->clientid_counter = nn->clientid_base + 1;
4715 nn->s2s_cp_cl_id = nn->clientid_counter++;
4716
4717 atomic_set(&nn->nfs4_client_count, 0);
4718 si_meminfo(&si);
4719 max_clients = (u64)si.totalram * si.mem_unit / (1024 * 1024 * 1024);
4720 max_clients *= NFS4_CLIENTS_PER_GB;
4721 nn->nfs4_max_clients = max_t(int, max_clients, NFS4_CLIENTS_PER_GB);
4722
4723 atomic_set(&nn->nfsd_courtesy_clients, 0);
4724 }
4725
4726 enum rp_lock {
4727 RP_UNLOCKED,
4728 RP_LOCKED,
4729 RP_UNHASHED,
4730 };
4731
init_nfs4_replay(struct nfs4_replay * rp)4732 static void init_nfs4_replay(struct nfs4_replay *rp)
4733 {
4734 rp->rp_status = nfserr_serverfault;
4735 rp->rp_buflen = 0;
4736 rp->rp_buf = rp->rp_ibuf;
4737 atomic_set(&rp->rp_locked, RP_UNLOCKED);
4738 }
4739
nfsd4_cstate_assign_replay(struct nfsd4_compound_state * cstate,struct nfs4_stateowner * so)4740 static int nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
4741 struct nfs4_stateowner *so)
4742 {
4743 if (!nfsd4_has_session(cstate)) {
4744 wait_var_event(&so->so_replay.rp_locked,
4745 atomic_cmpxchg(&so->so_replay.rp_locked,
4746 RP_UNLOCKED, RP_LOCKED) != RP_LOCKED);
4747 if (atomic_read(&so->so_replay.rp_locked) == RP_UNHASHED)
4748 return -EAGAIN;
4749 cstate->replay_owner = nfs4_get_stateowner(so);
4750 }
4751 return 0;
4752 }
4753
nfsd4_cstate_clear_replay(struct nfsd4_compound_state * cstate)4754 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
4755 {
4756 struct nfs4_stateowner *so = cstate->replay_owner;
4757
4758 if (so != NULL) {
4759 cstate->replay_owner = NULL;
4760 atomic_set(&so->so_replay.rp_locked, RP_UNLOCKED);
4761 smp_mb__after_atomic();
4762 wake_up_var(&so->so_replay.rp_locked);
4763 nfs4_put_stateowner(so);
4764 }
4765 }
4766
alloc_stateowner(struct kmem_cache * slab,struct xdr_netobj * owner,struct nfs4_client * clp)4767 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
4768 {
4769 struct nfs4_stateowner *sop;
4770
4771 sop = kmem_cache_alloc(slab, GFP_KERNEL);
4772 if (!sop)
4773 return NULL;
4774
4775 xdr_netobj_dup(&sop->so_owner, owner, GFP_KERNEL);
4776 if (!sop->so_owner.data) {
4777 kmem_cache_free(slab, sop);
4778 return NULL;
4779 }
4780
4781 INIT_LIST_HEAD(&sop->so_stateids);
4782 sop->so_client = clp;
4783 init_nfs4_replay(&sop->so_replay);
4784 atomic_set(&sop->so_count, 1);
4785 return sop;
4786 }
4787
hash_openowner(struct nfs4_openowner * oo,struct nfs4_client * clp,unsigned int strhashval)4788 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
4789 {
4790 lockdep_assert_held(&clp->cl_lock);
4791
4792 list_add(&oo->oo_owner.so_strhash,
4793 &clp->cl_ownerstr_hashtbl[strhashval]);
4794 list_add(&oo->oo_perclient, &clp->cl_openowners);
4795 }
4796
nfs4_unhash_openowner(struct nfs4_stateowner * so)4797 static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
4798 {
4799 unhash_openowner_locked(openowner(so));
4800 }
4801
nfs4_free_openowner(struct nfs4_stateowner * so)4802 static void nfs4_free_openowner(struct nfs4_stateowner *so)
4803 {
4804 struct nfs4_openowner *oo = openowner(so);
4805
4806 kmem_cache_free(openowner_slab, oo);
4807 }
4808
4809 static const struct nfs4_stateowner_operations openowner_ops = {
4810 .so_unhash = nfs4_unhash_openowner,
4811 .so_free = nfs4_free_openowner,
4812 };
4813
4814 static struct nfs4_ol_stateid *
nfsd4_find_existing_open(struct nfs4_file * fp,struct nfsd4_open * open)4815 nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
4816 {
4817 struct nfs4_ol_stateid *local, *ret = NULL;
4818 struct nfs4_openowner *oo = open->op_openowner;
4819
4820 lockdep_assert_held(&fp->fi_lock);
4821
4822 list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
4823 /* ignore lock owners */
4824 if (local->st_stateowner->so_is_open_owner == 0)
4825 continue;
4826 if (local->st_stateowner != &oo->oo_owner)
4827 continue;
4828 if (local->st_stid.sc_type == SC_TYPE_OPEN &&
4829 !local->st_stid.sc_status) {
4830 ret = local;
4831 refcount_inc(&ret->st_stid.sc_count);
4832 break;
4833 }
4834 }
4835 return ret;
4836 }
4837
nfsd4_drop_revoked_stid(struct nfs4_stid * s)4838 static void nfsd4_drop_revoked_stid(struct nfs4_stid *s)
4839 __releases(&s->sc_client->cl_lock)
4840 {
4841 struct nfs4_client *cl = s->sc_client;
4842 LIST_HEAD(reaplist);
4843 struct nfs4_ol_stateid *stp;
4844 struct nfs4_delegation *dp;
4845 bool unhashed;
4846
4847 switch (s->sc_type) {
4848 case SC_TYPE_OPEN:
4849 stp = openlockstateid(s);
4850 if (unhash_open_stateid(stp, &reaplist))
4851 put_ol_stateid_locked(stp, &reaplist);
4852 spin_unlock(&cl->cl_lock);
4853 free_ol_stateid_reaplist(&reaplist);
4854 break;
4855 case SC_TYPE_LOCK:
4856 stp = openlockstateid(s);
4857 unhashed = unhash_lock_stateid(stp);
4858 spin_unlock(&cl->cl_lock);
4859 if (unhashed)
4860 nfs4_put_stid(s);
4861 break;
4862 case SC_TYPE_DELEG:
4863 dp = delegstateid(s);
4864 list_del_init(&dp->dl_recall_lru);
4865 spin_unlock(&cl->cl_lock);
4866 nfs4_put_stid(s);
4867 break;
4868 default:
4869 spin_unlock(&cl->cl_lock);
4870 }
4871 }
4872
nfsd40_drop_revoked_stid(struct nfs4_client * cl,stateid_t * stid)4873 static void nfsd40_drop_revoked_stid(struct nfs4_client *cl,
4874 stateid_t *stid)
4875 {
4876 /* NFSv4.0 has no way for the client to tell the server
4877 * that it can forget an admin-revoked stateid.
4878 * So we keep it around until the first time that the
4879 * client uses it, and drop it the first time
4880 * nfserr_admin_revoked is returned.
4881 * For v4.1 and later we wait until explicitly told
4882 * to free the stateid.
4883 */
4884 if (cl->cl_minorversion == 0) {
4885 struct nfs4_stid *st;
4886
4887 spin_lock(&cl->cl_lock);
4888 st = find_stateid_locked(cl, stid);
4889 if (st)
4890 nfsd4_drop_revoked_stid(st);
4891 else
4892 spin_unlock(&cl->cl_lock);
4893 }
4894 }
4895
4896 static __be32
nfsd4_verify_open_stid(struct nfs4_stid * s)4897 nfsd4_verify_open_stid(struct nfs4_stid *s)
4898 {
4899 __be32 ret = nfs_ok;
4900
4901 if (s->sc_status & SC_STATUS_ADMIN_REVOKED)
4902 ret = nfserr_admin_revoked;
4903 else if (s->sc_status & SC_STATUS_REVOKED)
4904 ret = nfserr_deleg_revoked;
4905 else if (s->sc_status & SC_STATUS_CLOSED)
4906 ret = nfserr_bad_stateid;
4907 return ret;
4908 }
4909
4910 /* Lock the stateid st_mutex, and deal with races with CLOSE */
4911 static __be32
nfsd4_lock_ol_stateid(struct nfs4_ol_stateid * stp)4912 nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
4913 {
4914 __be32 ret;
4915
4916 mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX);
4917 ret = nfsd4_verify_open_stid(&stp->st_stid);
4918 if (ret == nfserr_admin_revoked)
4919 nfsd40_drop_revoked_stid(stp->st_stid.sc_client,
4920 &stp->st_stid.sc_stateid);
4921
4922 if (ret != nfs_ok)
4923 mutex_unlock(&stp->st_mutex);
4924 return ret;
4925 }
4926
4927 static struct nfs4_ol_stateid *
nfsd4_find_and_lock_existing_open(struct nfs4_file * fp,struct nfsd4_open * open)4928 nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
4929 {
4930 struct nfs4_ol_stateid *stp;
4931 for (;;) {
4932 spin_lock(&fp->fi_lock);
4933 stp = nfsd4_find_existing_open(fp, open);
4934 spin_unlock(&fp->fi_lock);
4935 if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
4936 break;
4937 nfs4_put_stid(&stp->st_stid);
4938 }
4939 return stp;
4940 }
4941
4942 static struct nfs4_openowner *
find_or_alloc_open_stateowner(unsigned int strhashval,struct nfsd4_open * open,struct nfsd4_compound_state * cstate)4943 find_or_alloc_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
4944 struct nfsd4_compound_state *cstate)
4945 {
4946 struct nfs4_client *clp = cstate->clp;
4947 struct nfs4_openowner *oo, *new = NULL;
4948
4949 retry:
4950 spin_lock(&clp->cl_lock);
4951 oo = find_openstateowner_str(strhashval, open, clp);
4952 if (!oo && new) {
4953 hash_openowner(new, clp, strhashval);
4954 spin_unlock(&clp->cl_lock);
4955 return new;
4956 }
4957 spin_unlock(&clp->cl_lock);
4958
4959 if (oo && !(oo->oo_flags & NFS4_OO_CONFIRMED)) {
4960 /* Replace unconfirmed owners without checking for replay. */
4961 release_openowner(oo);
4962 oo = NULL;
4963 }
4964 if (oo) {
4965 if (new)
4966 nfs4_free_stateowner(&new->oo_owner);
4967 return oo;
4968 }
4969
4970 new = alloc_stateowner(openowner_slab, &open->op_owner, clp);
4971 if (!new)
4972 return NULL;
4973 new->oo_owner.so_ops = &openowner_ops;
4974 new->oo_owner.so_is_open_owner = 1;
4975 new->oo_owner.so_seqid = open->op_seqid;
4976 new->oo_flags = 0;
4977 if (nfsd4_has_session(cstate))
4978 new->oo_flags |= NFS4_OO_CONFIRMED;
4979 new->oo_time = 0;
4980 new->oo_last_closed_stid = NULL;
4981 INIT_LIST_HEAD(&new->oo_close_lru);
4982 goto retry;
4983 }
4984
4985 static struct nfs4_ol_stateid *
init_open_stateid(struct nfs4_file * fp,struct nfsd4_open * open)4986 init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
4987 {
4988
4989 struct nfs4_openowner *oo = open->op_openowner;
4990 struct nfs4_ol_stateid *retstp = NULL;
4991 struct nfs4_ol_stateid *stp;
4992
4993 stp = open->op_stp;
4994 /* We are moving these outside of the spinlocks to avoid the warnings */
4995 mutex_init(&stp->st_mutex);
4996 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
4997
4998 retry:
4999 spin_lock(&oo->oo_owner.so_client->cl_lock);
5000 spin_lock(&fp->fi_lock);
5001
5002 if (nfs4_openowner_unhashed(oo)) {
5003 mutex_unlock(&stp->st_mutex);
5004 stp = NULL;
5005 goto out_unlock;
5006 }
5007
5008 retstp = nfsd4_find_existing_open(fp, open);
5009 if (retstp)
5010 goto out_unlock;
5011
5012 open->op_stp = NULL;
5013 refcount_inc(&stp->st_stid.sc_count);
5014 stp->st_stid.sc_type = SC_TYPE_OPEN;
5015 INIT_LIST_HEAD(&stp->st_locks);
5016 stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
5017 get_nfs4_file(fp);
5018 stp->st_stid.sc_file = fp;
5019 stp->st_access_bmap = 0;
5020 stp->st_deny_bmap = 0;
5021 stp->st_openstp = NULL;
5022 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
5023 list_add(&stp->st_perfile, &fp->fi_stateids);
5024
5025 out_unlock:
5026 spin_unlock(&fp->fi_lock);
5027 spin_unlock(&oo->oo_owner.so_client->cl_lock);
5028 if (retstp) {
5029 /* Handle races with CLOSE */
5030 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
5031 nfs4_put_stid(&retstp->st_stid);
5032 goto retry;
5033 }
5034 /* To keep mutex tracking happy */
5035 mutex_unlock(&stp->st_mutex);
5036 stp = retstp;
5037 }
5038 return stp;
5039 }
5040
5041 /*
5042 * In the 4.0 case we need to keep the owners around a little while to handle
5043 * CLOSE replay. We still do need to release any file access that is held by
5044 * them before returning however.
5045 */
5046 static void
move_to_close_lru(struct nfs4_ol_stateid * s,struct net * net)5047 move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
5048 {
5049 struct nfs4_ol_stateid *last;
5050 struct nfs4_openowner *oo = openowner(s->st_stateowner);
5051 struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
5052 nfsd_net_id);
5053
5054 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
5055
5056 /*
5057 * We know that we hold one reference via nfsd4_close, and another
5058 * "persistent" reference for the client. If the refcount is higher
5059 * than 2, then there are still calls in progress that are using this
5060 * stateid. We can't put the sc_file reference until they are finished.
5061 * Wait for the refcount to drop to 2. Since it has been unhashed,
5062 * there should be no danger of the refcount going back up again at
5063 * this point.
5064 * Some threads with a reference might be waiting for rp_locked,
5065 * so tell them to stop waiting.
5066 */
5067 atomic_set(&oo->oo_owner.so_replay.rp_locked, RP_UNHASHED);
5068 smp_mb__after_atomic();
5069 wake_up_var(&oo->oo_owner.so_replay.rp_locked);
5070 wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2);
5071
5072 release_all_access(s);
5073 if (s->st_stid.sc_file) {
5074 put_nfs4_file(s->st_stid.sc_file);
5075 s->st_stid.sc_file = NULL;
5076 }
5077
5078 spin_lock(&nn->client_lock);
5079 last = oo->oo_last_closed_stid;
5080 oo->oo_last_closed_stid = s;
5081 list_move_tail(&oo->oo_close_lru, &nn->close_lru);
5082 oo->oo_time = ktime_get_boottime_seconds();
5083 spin_unlock(&nn->client_lock);
5084 if (last)
5085 nfs4_put_stid(&last->st_stid);
5086 }
5087
5088 static noinline_for_stack struct nfs4_file *
nfsd4_file_hash_lookup(const struct svc_fh * fhp)5089 nfsd4_file_hash_lookup(const struct svc_fh *fhp)
5090 {
5091 struct inode *inode = d_inode(fhp->fh_dentry);
5092 struct rhlist_head *tmp, *list;
5093 struct nfs4_file *fi;
5094
5095 rcu_read_lock();
5096 list = rhltable_lookup(&nfs4_file_rhltable, &inode,
5097 nfs4_file_rhash_params);
5098 rhl_for_each_entry_rcu(fi, tmp, list, fi_rlist) {
5099 if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) {
5100 if (refcount_inc_not_zero(&fi->fi_ref)) {
5101 rcu_read_unlock();
5102 return fi;
5103 }
5104 }
5105 }
5106 rcu_read_unlock();
5107 return NULL;
5108 }
5109
5110 /*
5111 * On hash insertion, identify entries with the same inode but
5112 * distinct filehandles. They will all be on the list returned
5113 * by rhltable_lookup().
5114 *
5115 * inode->i_lock prevents racing insertions from adding an entry
5116 * for the same inode/fhp pair twice.
5117 */
5118 static noinline_for_stack struct nfs4_file *
nfsd4_file_hash_insert(struct nfs4_file * new,const struct svc_fh * fhp)5119 nfsd4_file_hash_insert(struct nfs4_file *new, const struct svc_fh *fhp)
5120 {
5121 struct inode *inode = d_inode(fhp->fh_dentry);
5122 struct rhlist_head *tmp, *list;
5123 struct nfs4_file *ret = NULL;
5124 bool alias_found = false;
5125 struct nfs4_file *fi;
5126 int err;
5127
5128 rcu_read_lock();
5129 spin_lock(&inode->i_lock);
5130
5131 list = rhltable_lookup(&nfs4_file_rhltable, &inode,
5132 nfs4_file_rhash_params);
5133 rhl_for_each_entry_rcu(fi, tmp, list, fi_rlist) {
5134 if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) {
5135 if (refcount_inc_not_zero(&fi->fi_ref))
5136 ret = fi;
5137 } else
5138 fi->fi_aliased = alias_found = true;
5139 }
5140 if (ret)
5141 goto out_unlock;
5142
5143 nfsd4_file_init(fhp, new);
5144 err = rhltable_insert(&nfs4_file_rhltable, &new->fi_rlist,
5145 nfs4_file_rhash_params);
5146 if (err)
5147 goto out_unlock;
5148
5149 new->fi_aliased = alias_found;
5150 ret = new;
5151
5152 out_unlock:
5153 spin_unlock(&inode->i_lock);
5154 rcu_read_unlock();
5155 return ret;
5156 }
5157
nfsd4_file_hash_remove(struct nfs4_file * fi)5158 static noinline_for_stack void nfsd4_file_hash_remove(struct nfs4_file *fi)
5159 {
5160 rhltable_remove(&nfs4_file_rhltable, &fi->fi_rlist,
5161 nfs4_file_rhash_params);
5162 }
5163
5164 /*
5165 * Called to check deny when READ with all zero stateid or
5166 * WRITE with all zero or all one stateid
5167 */
5168 static __be32
nfs4_share_conflict(struct svc_fh * current_fh,unsigned int deny_type)5169 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
5170 {
5171 struct nfs4_file *fp;
5172 __be32 ret = nfs_ok;
5173
5174 fp = nfsd4_file_hash_lookup(current_fh);
5175 if (!fp)
5176 return ret;
5177
5178 /* Check for conflicting share reservations */
5179 spin_lock(&fp->fi_lock);
5180 if (fp->fi_share_deny & deny_type)
5181 ret = nfserr_locked;
5182 spin_unlock(&fp->fi_lock);
5183 put_nfs4_file(fp);
5184 return ret;
5185 }
5186
nfsd4_deleg_present(const struct inode * inode)5187 static bool nfsd4_deleg_present(const struct inode *inode)
5188 {
5189 struct file_lock_context *ctx = locks_inode_context(inode);
5190
5191 return ctx && !list_empty_careful(&ctx->flc_lease);
5192 }
5193
5194 /**
5195 * nfsd_wait_for_delegreturn - wait for delegations to be returned
5196 * @rqstp: the RPC transaction being executed
5197 * @inode: in-core inode of the file being waited for
5198 *
5199 * The timeout prevents deadlock if all nfsd threads happen to be
5200 * tied up waiting for returning delegations.
5201 *
5202 * Return values:
5203 * %true: delegation was returned
5204 * %false: timed out waiting for delegreturn
5205 */
nfsd_wait_for_delegreturn(struct svc_rqst * rqstp,struct inode * inode)5206 bool nfsd_wait_for_delegreturn(struct svc_rqst *rqstp, struct inode *inode)
5207 {
5208 long __maybe_unused timeo;
5209
5210 timeo = wait_var_event_timeout(inode, !nfsd4_deleg_present(inode),
5211 NFSD_DELEGRETURN_TIMEOUT);
5212 trace_nfsd_delegret_wakeup(rqstp, inode, timeo);
5213 return timeo > 0;
5214 }
5215
nfsd4_cb_recall_prepare(struct nfsd4_callback * cb)5216 static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
5217 {
5218 struct nfs4_delegation *dp = cb_to_delegation(cb);
5219 struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
5220 nfsd_net_id);
5221
5222 block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
5223
5224 /*
5225 * We can't do this in nfsd_break_deleg_cb because it is
5226 * already holding inode->i_lock.
5227 *
5228 * If the dl_time != 0, then we know that it has already been
5229 * queued for a lease break. Don't queue it again.
5230 */
5231 spin_lock(&state_lock);
5232 if (delegation_hashed(dp) && dp->dl_time == 0) {
5233 dp->dl_time = ktime_get_boottime_seconds();
5234 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
5235 }
5236 spin_unlock(&state_lock);
5237 }
5238
nfsd4_cb_recall_done(struct nfsd4_callback * cb,struct rpc_task * task)5239 static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
5240 struct rpc_task *task)
5241 {
5242 struct nfs4_delegation *dp = cb_to_delegation(cb);
5243
5244 trace_nfsd_cb_recall_done(&dp->dl_stid.sc_stateid, task);
5245
5246 if (dp->dl_stid.sc_status)
5247 /* CLOSED or REVOKED */
5248 return 1;
5249
5250 switch (task->tk_status) {
5251 case 0:
5252 return 1;
5253 case -NFS4ERR_DELAY:
5254 rpc_delay(task, 2 * HZ);
5255 return 0;
5256 case -EBADHANDLE:
5257 case -NFS4ERR_BAD_STATEID:
5258 /*
5259 * Race: client probably got cb_recall before open reply
5260 * granting delegation.
5261 */
5262 if (dp->dl_retries--) {
5263 rpc_delay(task, 2 * HZ);
5264 return 0;
5265 }
5266 fallthrough;
5267 default:
5268 return 1;
5269 }
5270 }
5271
nfsd4_cb_recall_release(struct nfsd4_callback * cb)5272 static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
5273 {
5274 struct nfs4_delegation *dp = cb_to_delegation(cb);
5275
5276 nfs4_put_stid(&dp->dl_stid);
5277 }
5278
5279 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
5280 .prepare = nfsd4_cb_recall_prepare,
5281 .done = nfsd4_cb_recall_done,
5282 .release = nfsd4_cb_recall_release,
5283 .opcode = OP_CB_RECALL,
5284 };
5285
nfsd_break_one_deleg(struct nfs4_delegation * dp)5286 static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
5287 {
5288 bool queued;
5289 /*
5290 * We're assuming the state code never drops its reference
5291 * without first removing the lease. Since we're in this lease
5292 * callback (and since the lease code is serialized by the
5293 * flc_lock) we know the server hasn't removed the lease yet, and
5294 * we know it's safe to take a reference.
5295 */
5296 refcount_inc(&dp->dl_stid.sc_count);
5297 queued = nfsd4_run_cb(&dp->dl_recall);
5298 WARN_ON_ONCE(!queued);
5299 if (!queued)
5300 refcount_dec(&dp->dl_stid.sc_count);
5301 }
5302
5303 /* Called from break_lease() with flc_lock held. */
5304 static bool
nfsd_break_deleg_cb(struct file_lease * fl)5305 nfsd_break_deleg_cb(struct file_lease *fl)
5306 {
5307 struct nfs4_delegation *dp = (struct nfs4_delegation *) fl->c.flc_owner;
5308 struct nfs4_file *fp = dp->dl_stid.sc_file;
5309 struct nfs4_client *clp = dp->dl_stid.sc_client;
5310 struct nfsd_net *nn;
5311
5312 trace_nfsd_cb_recall(&dp->dl_stid);
5313
5314 dp->dl_recalled = true;
5315 atomic_inc(&clp->cl_delegs_in_recall);
5316 if (try_to_expire_client(clp)) {
5317 nn = net_generic(clp->net, nfsd_net_id);
5318 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
5319 }
5320
5321 /*
5322 * We don't want the locks code to timeout the lease for us;
5323 * we'll remove it ourself if a delegation isn't returned
5324 * in time:
5325 */
5326 fl->fl_break_time = 0;
5327
5328 fp->fi_had_conflict = true;
5329 nfsd_break_one_deleg(dp);
5330 return false;
5331 }
5332
5333 /**
5334 * nfsd_breaker_owns_lease - Check if lease conflict was resolved
5335 * @fl: Lock state to check
5336 *
5337 * Return values:
5338 * %true: Lease conflict was resolved
5339 * %false: Lease conflict was not resolved.
5340 */
nfsd_breaker_owns_lease(struct file_lease * fl)5341 static bool nfsd_breaker_owns_lease(struct file_lease *fl)
5342 {
5343 struct nfs4_delegation *dl = fl->c.flc_owner;
5344 struct svc_rqst *rqst;
5345 struct nfs4_client *clp;
5346
5347 rqst = nfsd_current_rqst();
5348 if (!nfsd_v4client(rqst))
5349 return false;
5350 clp = *(rqst->rq_lease_breaker);
5351 return dl->dl_stid.sc_client == clp;
5352 }
5353
5354 static int
nfsd_change_deleg_cb(struct file_lease * onlist,int arg,struct list_head * dispose)5355 nfsd_change_deleg_cb(struct file_lease *onlist, int arg,
5356 struct list_head *dispose)
5357 {
5358 struct nfs4_delegation *dp = (struct nfs4_delegation *) onlist->c.flc_owner;
5359 struct nfs4_client *clp = dp->dl_stid.sc_client;
5360
5361 if (arg & F_UNLCK) {
5362 if (dp->dl_recalled)
5363 atomic_dec(&clp->cl_delegs_in_recall);
5364 return lease_modify(onlist, arg, dispose);
5365 } else
5366 return -EAGAIN;
5367 }
5368
5369 static const struct lease_manager_operations nfsd_lease_mng_ops = {
5370 .lm_breaker_owns_lease = nfsd_breaker_owns_lease,
5371 .lm_break = nfsd_break_deleg_cb,
5372 .lm_change = nfsd_change_deleg_cb,
5373 };
5374
nfsd4_check_seqid(struct nfsd4_compound_state * cstate,struct nfs4_stateowner * so,u32 seqid)5375 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
5376 {
5377 if (nfsd4_has_session(cstate))
5378 return nfs_ok;
5379 if (seqid == so->so_seqid - 1)
5380 return nfserr_replay_me;
5381 if (seqid == so->so_seqid)
5382 return nfs_ok;
5383 return nfserr_bad_seqid;
5384 }
5385
lookup_clientid(clientid_t * clid,bool sessions,struct nfsd_net * nn)5386 static struct nfs4_client *lookup_clientid(clientid_t *clid, bool sessions,
5387 struct nfsd_net *nn)
5388 {
5389 struct nfs4_client *found;
5390
5391 spin_lock(&nn->client_lock);
5392 found = find_confirmed_client(clid, sessions, nn);
5393 if (found)
5394 atomic_inc(&found->cl_rpc_users);
5395 spin_unlock(&nn->client_lock);
5396 return found;
5397 }
5398
set_client(clientid_t * clid,struct nfsd4_compound_state * cstate,struct nfsd_net * nn)5399 static __be32 set_client(clientid_t *clid,
5400 struct nfsd4_compound_state *cstate,
5401 struct nfsd_net *nn)
5402 {
5403 if (cstate->clp) {
5404 if (!same_clid(&cstate->clp->cl_clientid, clid))
5405 return nfserr_stale_clientid;
5406 return nfs_ok;
5407 }
5408 if (STALE_CLIENTID(clid, nn))
5409 return nfserr_stale_clientid;
5410 /*
5411 * We're in the 4.0 case (otherwise the SEQUENCE op would have
5412 * set cstate->clp), so session = false:
5413 */
5414 cstate->clp = lookup_clientid(clid, false, nn);
5415 if (!cstate->clp)
5416 return nfserr_expired;
5417 return nfs_ok;
5418 }
5419
5420 __be32
nfsd4_process_open1(struct nfsd4_compound_state * cstate,struct nfsd4_open * open,struct nfsd_net * nn)5421 nfsd4_process_open1(struct nfsd4_compound_state *cstate,
5422 struct nfsd4_open *open, struct nfsd_net *nn)
5423 {
5424 clientid_t *clientid = &open->op_clientid;
5425 struct nfs4_client *clp = NULL;
5426 unsigned int strhashval;
5427 struct nfs4_openowner *oo = NULL;
5428 __be32 status;
5429
5430 /*
5431 * In case we need it later, after we've already created the
5432 * file and don't want to risk a further failure:
5433 */
5434 open->op_file = nfsd4_alloc_file();
5435 if (open->op_file == NULL)
5436 return nfserr_jukebox;
5437
5438 status = set_client(clientid, cstate, nn);
5439 if (status)
5440 return status;
5441 clp = cstate->clp;
5442
5443 strhashval = ownerstr_hashval(&open->op_owner);
5444 retry:
5445 oo = find_or_alloc_open_stateowner(strhashval, open, cstate);
5446 open->op_openowner = oo;
5447 if (!oo)
5448 return nfserr_jukebox;
5449 if (nfsd4_cstate_assign_replay(cstate, &oo->oo_owner) == -EAGAIN) {
5450 nfs4_put_stateowner(&oo->oo_owner);
5451 goto retry;
5452 }
5453 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
5454 if (status)
5455 return status;
5456
5457 open->op_stp = nfs4_alloc_open_stateid(clp);
5458 if (!open->op_stp)
5459 return nfserr_jukebox;
5460
5461 if (nfsd4_has_session(cstate) &&
5462 (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) {
5463 open->op_odstate = alloc_clnt_odstate(clp);
5464 if (!open->op_odstate)
5465 return nfserr_jukebox;
5466 }
5467
5468 return nfs_ok;
5469 }
5470
5471 static inline __be32
nfs4_check_delegmode(struct nfs4_delegation * dp,int flags)5472 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
5473 {
5474 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
5475 return nfserr_openmode;
5476 else
5477 return nfs_ok;
5478 }
5479
share_access_to_flags(u32 share_access)5480 static int share_access_to_flags(u32 share_access)
5481 {
5482 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
5483 }
5484
find_deleg_stateid(struct nfs4_client * cl,stateid_t * s)5485 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl,
5486 stateid_t *s)
5487 {
5488 struct nfs4_stid *ret;
5489
5490 ret = find_stateid_by_type(cl, s, SC_TYPE_DELEG, SC_STATUS_REVOKED);
5491 if (!ret)
5492 return NULL;
5493 return delegstateid(ret);
5494 }
5495
nfsd4_is_deleg_cur(struct nfsd4_open * open)5496 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
5497 {
5498 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
5499 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
5500 }
5501
5502 static __be32
nfs4_check_deleg(struct nfs4_client * cl,struct nfsd4_open * open,struct nfs4_delegation ** dp)5503 nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
5504 struct nfs4_delegation **dp)
5505 {
5506 int flags;
5507 __be32 status = nfserr_bad_stateid;
5508 struct nfs4_delegation *deleg;
5509
5510 deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
5511 if (deleg == NULL)
5512 goto out;
5513 if (deleg->dl_stid.sc_status & SC_STATUS_ADMIN_REVOKED) {
5514 nfs4_put_stid(&deleg->dl_stid);
5515 status = nfserr_admin_revoked;
5516 goto out;
5517 }
5518 if (deleg->dl_stid.sc_status & SC_STATUS_REVOKED) {
5519 nfs4_put_stid(&deleg->dl_stid);
5520 nfsd40_drop_revoked_stid(cl, &open->op_delegate_stateid);
5521 status = nfserr_deleg_revoked;
5522 goto out;
5523 }
5524 flags = share_access_to_flags(open->op_share_access);
5525 status = nfs4_check_delegmode(deleg, flags);
5526 if (status) {
5527 nfs4_put_stid(&deleg->dl_stid);
5528 goto out;
5529 }
5530 *dp = deleg;
5531 out:
5532 if (!nfsd4_is_deleg_cur(open))
5533 return nfs_ok;
5534 if (status)
5535 return status;
5536 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
5537 return nfs_ok;
5538 }
5539
nfs4_access_to_access(u32 nfs4_access)5540 static inline int nfs4_access_to_access(u32 nfs4_access)
5541 {
5542 int flags = 0;
5543
5544 if (nfs4_access & NFS4_SHARE_ACCESS_READ)
5545 flags |= NFSD_MAY_READ;
5546 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
5547 flags |= NFSD_MAY_WRITE;
5548 return flags;
5549 }
5550
5551 static inline __be32
nfsd4_truncate(struct svc_rqst * rqstp,struct svc_fh * fh,struct nfsd4_open * open)5552 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
5553 struct nfsd4_open *open)
5554 {
5555 struct iattr iattr = {
5556 .ia_valid = ATTR_SIZE,
5557 .ia_size = 0,
5558 };
5559 struct nfsd_attrs attrs = {
5560 .na_iattr = &iattr,
5561 };
5562 if (!open->op_truncate)
5563 return 0;
5564 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
5565 return nfserr_inval;
5566 return nfsd_setattr(rqstp, fh, &attrs, NULL);
5567 }
5568
nfs4_get_vfs_file(struct svc_rqst * rqstp,struct nfs4_file * fp,struct svc_fh * cur_fh,struct nfs4_ol_stateid * stp,struct nfsd4_open * open,bool new_stp)5569 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
5570 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
5571 struct nfsd4_open *open, bool new_stp)
5572 {
5573 struct nfsd_file *nf = NULL;
5574 __be32 status;
5575 int oflag = nfs4_access_to_omode(open->op_share_access);
5576 int access = nfs4_access_to_access(open->op_share_access);
5577 unsigned char old_access_bmap, old_deny_bmap;
5578
5579 spin_lock(&fp->fi_lock);
5580
5581 /*
5582 * Are we trying to set a deny mode that would conflict with
5583 * current access?
5584 */
5585 status = nfs4_file_check_deny(fp, open->op_share_deny);
5586 if (status != nfs_ok) {
5587 if (status != nfserr_share_denied) {
5588 spin_unlock(&fp->fi_lock);
5589 goto out;
5590 }
5591 if (nfs4_resolve_deny_conflicts_locked(fp, new_stp,
5592 stp, open->op_share_deny, false))
5593 status = nfserr_jukebox;
5594 spin_unlock(&fp->fi_lock);
5595 goto out;
5596 }
5597
5598 /* set access to the file */
5599 status = nfs4_file_get_access(fp, open->op_share_access);
5600 if (status != nfs_ok) {
5601 if (status != nfserr_share_denied) {
5602 spin_unlock(&fp->fi_lock);
5603 goto out;
5604 }
5605 if (nfs4_resolve_deny_conflicts_locked(fp, new_stp,
5606 stp, open->op_share_access, true))
5607 status = nfserr_jukebox;
5608 spin_unlock(&fp->fi_lock);
5609 goto out;
5610 }
5611
5612 /* Set access bits in stateid */
5613 old_access_bmap = stp->st_access_bmap;
5614 set_access(open->op_share_access, stp);
5615
5616 /* Set new deny mask */
5617 old_deny_bmap = stp->st_deny_bmap;
5618 set_deny(open->op_share_deny, stp);
5619 fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
5620
5621 if (!fp->fi_fds[oflag]) {
5622 spin_unlock(&fp->fi_lock);
5623
5624 status = nfsd_file_acquire_opened(rqstp, cur_fh, access,
5625 open->op_filp, &nf);
5626 if (status != nfs_ok)
5627 goto out_put_access;
5628
5629 spin_lock(&fp->fi_lock);
5630 if (!fp->fi_fds[oflag]) {
5631 fp->fi_fds[oflag] = nf;
5632 nf = NULL;
5633 }
5634 }
5635 spin_unlock(&fp->fi_lock);
5636 if (nf)
5637 nfsd_file_put(nf);
5638
5639 status = nfserrno(nfsd_open_break_lease(cur_fh->fh_dentry->d_inode,
5640 access));
5641 if (status)
5642 goto out_put_access;
5643
5644 status = nfsd4_truncate(rqstp, cur_fh, open);
5645 if (status)
5646 goto out_put_access;
5647 out:
5648 return status;
5649 out_put_access:
5650 stp->st_access_bmap = old_access_bmap;
5651 nfs4_file_put_access(fp, open->op_share_access);
5652 reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
5653 goto out;
5654 }
5655
5656 static __be32
nfs4_upgrade_open(struct svc_rqst * rqstp,struct nfs4_file * fp,struct svc_fh * cur_fh,struct nfs4_ol_stateid * stp,struct nfsd4_open * open)5657 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp,
5658 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
5659 struct nfsd4_open *open)
5660 {
5661 __be32 status;
5662 unsigned char old_deny_bmap = stp->st_deny_bmap;
5663
5664 if (!test_access(open->op_share_access, stp))
5665 return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open, false);
5666
5667 /* test and set deny mode */
5668 spin_lock(&fp->fi_lock);
5669 status = nfs4_file_check_deny(fp, open->op_share_deny);
5670 switch (status) {
5671 case nfs_ok:
5672 set_deny(open->op_share_deny, stp);
5673 fp->fi_share_deny |=
5674 (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
5675 break;
5676 case nfserr_share_denied:
5677 if (nfs4_resolve_deny_conflicts_locked(fp, false,
5678 stp, open->op_share_deny, false))
5679 status = nfserr_jukebox;
5680 break;
5681 }
5682 spin_unlock(&fp->fi_lock);
5683
5684 if (status != nfs_ok)
5685 return status;
5686
5687 status = nfsd4_truncate(rqstp, cur_fh, open);
5688 if (status != nfs_ok)
5689 reset_union_bmap_deny(old_deny_bmap, stp);
5690 return status;
5691 }
5692
5693 /* Should we give out recallable state?: */
nfsd4_cb_channel_good(struct nfs4_client * clp)5694 static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
5695 {
5696 if (clp->cl_cb_state == NFSD4_CB_UP)
5697 return true;
5698 /*
5699 * In the sessions case, since we don't have to establish a
5700 * separate connection for callbacks, we assume it's OK
5701 * until we hear otherwise:
5702 */
5703 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
5704 }
5705
nfs4_alloc_init_lease(struct nfs4_delegation * dp,int flag)5706 static struct file_lease *nfs4_alloc_init_lease(struct nfs4_delegation *dp,
5707 int flag)
5708 {
5709 struct file_lease *fl;
5710
5711 fl = locks_alloc_lease();
5712 if (!fl)
5713 return NULL;
5714 fl->fl_lmops = &nfsd_lease_mng_ops;
5715 fl->c.flc_flags = FL_DELEG;
5716 fl->c.flc_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
5717 fl->c.flc_owner = (fl_owner_t)dp;
5718 fl->c.flc_pid = current->tgid;
5719 fl->c.flc_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file;
5720 return fl;
5721 }
5722
nfsd4_check_conflicting_opens(struct nfs4_client * clp,struct nfs4_file * fp)5723 static int nfsd4_check_conflicting_opens(struct nfs4_client *clp,
5724 struct nfs4_file *fp)
5725 {
5726 struct nfs4_ol_stateid *st;
5727 struct file *f = fp->fi_deleg_file->nf_file;
5728 struct inode *ino = file_inode(f);
5729 int writes;
5730
5731 writes = atomic_read(&ino->i_writecount);
5732 if (!writes)
5733 return 0;
5734 /*
5735 * There could be multiple filehandles (hence multiple
5736 * nfs4_files) referencing this file, but that's not too
5737 * common; let's just give up in that case rather than
5738 * trying to go look up all the clients using that other
5739 * nfs4_file as well:
5740 */
5741 if (fp->fi_aliased)
5742 return -EAGAIN;
5743 /*
5744 * If there's a close in progress, make sure that we see it
5745 * clear any fi_fds[] entries before we see it decrement
5746 * i_writecount:
5747 */
5748 smp_mb__after_atomic();
5749
5750 if (fp->fi_fds[O_WRONLY])
5751 writes--;
5752 if (fp->fi_fds[O_RDWR])
5753 writes--;
5754 if (writes > 0)
5755 return -EAGAIN; /* There may be non-NFSv4 writers */
5756 /*
5757 * It's possible there are non-NFSv4 write opens in progress,
5758 * but if they haven't incremented i_writecount yet then they
5759 * also haven't called break lease yet; so, they'll break this
5760 * lease soon enough. So, all that's left to check for is NFSv4
5761 * opens:
5762 */
5763 spin_lock(&fp->fi_lock);
5764 list_for_each_entry(st, &fp->fi_stateids, st_perfile) {
5765 if (st->st_openstp == NULL /* it's an open */ &&
5766 access_permit_write(st) &&
5767 st->st_stid.sc_client != clp) {
5768 spin_unlock(&fp->fi_lock);
5769 return -EAGAIN;
5770 }
5771 }
5772 spin_unlock(&fp->fi_lock);
5773 /*
5774 * There's a small chance that we could be racing with another
5775 * NFSv4 open. However, any open that hasn't added itself to
5776 * the fi_stateids list also hasn't called break_lease yet; so,
5777 * they'll break this lease soon enough.
5778 */
5779 return 0;
5780 }
5781
5782 /*
5783 * It's possible that between opening the dentry and setting the delegation,
5784 * that it has been renamed or unlinked. Redo the lookup to verify that this
5785 * hasn't happened.
5786 */
5787 static int
nfsd4_verify_deleg_dentry(struct nfsd4_open * open,struct nfs4_file * fp,struct svc_fh * parent)5788 nfsd4_verify_deleg_dentry(struct nfsd4_open *open, struct nfs4_file *fp,
5789 struct svc_fh *parent)
5790 {
5791 struct svc_export *exp;
5792 struct dentry *child;
5793 __be32 err;
5794
5795 err = nfsd_lookup_dentry(open->op_rqstp, parent,
5796 open->op_fname, open->op_fnamelen,
5797 &exp, &child);
5798
5799 if (err)
5800 return -EAGAIN;
5801
5802 exp_put(exp);
5803 dput(child);
5804 if (child != file_dentry(fp->fi_deleg_file->nf_file))
5805 return -EAGAIN;
5806
5807 return 0;
5808 }
5809
5810 /*
5811 * We avoid breaking delegations held by a client due to its own activity, but
5812 * clearing setuid/setgid bits on a write is an implicit activity and the client
5813 * may not notice and continue using the old mode. Avoid giving out a delegation
5814 * on setuid/setgid files when the client is requesting an open for write.
5815 */
5816 static int
nfsd4_verify_setuid_write(struct nfsd4_open * open,struct nfsd_file * nf)5817 nfsd4_verify_setuid_write(struct nfsd4_open *open, struct nfsd_file *nf)
5818 {
5819 struct inode *inode = file_inode(nf->nf_file);
5820
5821 if ((open->op_share_access & NFS4_SHARE_ACCESS_WRITE) &&
5822 (inode->i_mode & (S_ISUID|S_ISGID)))
5823 return -EAGAIN;
5824 return 0;
5825 }
5826
5827 static struct nfs4_delegation *
nfs4_set_delegation(struct nfsd4_open * open,struct nfs4_ol_stateid * stp,struct svc_fh * parent)5828 nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
5829 struct svc_fh *parent)
5830 {
5831 int status = 0;
5832 struct nfs4_client *clp = stp->st_stid.sc_client;
5833 struct nfs4_file *fp = stp->st_stid.sc_file;
5834 struct nfs4_clnt_odstate *odstate = stp->st_clnt_odstate;
5835 struct nfs4_delegation *dp;
5836 struct nfsd_file *nf = NULL;
5837 struct file_lease *fl;
5838 u32 dl_type;
5839
5840 /*
5841 * The fi_had_conflict and nfs_get_existing_delegation checks
5842 * here are just optimizations; we'll need to recheck them at
5843 * the end:
5844 */
5845 if (fp->fi_had_conflict)
5846 return ERR_PTR(-EAGAIN);
5847
5848 /*
5849 * Try for a write delegation first. RFC8881 section 10.4 says:
5850 *
5851 * "An OPEN_DELEGATE_WRITE delegation allows the client to handle,
5852 * on its own, all opens."
5853 *
5854 * Furthermore the client can use a write delegation for most READ
5855 * operations as well, so we require a O_RDWR file here.
5856 *
5857 * Offer a write delegation in the case of a BOTH open, and ensure
5858 * we get the O_RDWR descriptor.
5859 */
5860 if ((open->op_share_access & NFS4_SHARE_ACCESS_BOTH) == NFS4_SHARE_ACCESS_BOTH) {
5861 nf = find_rw_file(fp);
5862 dl_type = NFS4_OPEN_DELEGATE_WRITE;
5863 }
5864
5865 /*
5866 * If the file is being opened O_RDONLY or we couldn't get a O_RDWR
5867 * file for some reason, then try for a read delegation instead.
5868 */
5869 if (!nf && (open->op_share_access & NFS4_SHARE_ACCESS_READ)) {
5870 nf = find_readable_file(fp);
5871 dl_type = NFS4_OPEN_DELEGATE_READ;
5872 }
5873
5874 if (!nf)
5875 return ERR_PTR(-EAGAIN);
5876
5877 spin_lock(&state_lock);
5878 spin_lock(&fp->fi_lock);
5879 if (nfs4_delegation_exists(clp, fp))
5880 status = -EAGAIN;
5881 else if (nfsd4_verify_setuid_write(open, nf))
5882 status = -EAGAIN;
5883 else if (!fp->fi_deleg_file) {
5884 fp->fi_deleg_file = nf;
5885 /* increment early to prevent fi_deleg_file from being
5886 * cleared */
5887 fp->fi_delegees = 1;
5888 nf = NULL;
5889 } else
5890 fp->fi_delegees++;
5891 spin_unlock(&fp->fi_lock);
5892 spin_unlock(&state_lock);
5893 if (nf)
5894 nfsd_file_put(nf);
5895 if (status)
5896 return ERR_PTR(status);
5897
5898 status = -ENOMEM;
5899 dp = alloc_init_deleg(clp, fp, odstate, dl_type);
5900 if (!dp)
5901 goto out_delegees;
5902
5903 fl = nfs4_alloc_init_lease(dp, dl_type);
5904 if (!fl)
5905 goto out_clnt_odstate;
5906
5907 status = kernel_setlease(fp->fi_deleg_file->nf_file,
5908 fl->c.flc_type, &fl, NULL);
5909 if (fl)
5910 locks_free_lease(fl);
5911 if (status)
5912 goto out_clnt_odstate;
5913
5914 if (parent) {
5915 status = nfsd4_verify_deleg_dentry(open, fp, parent);
5916 if (status)
5917 goto out_unlock;
5918 }
5919
5920 status = nfsd4_check_conflicting_opens(clp, fp);
5921 if (status)
5922 goto out_unlock;
5923
5924 /*
5925 * Now that the deleg is set, check again to ensure that nothing
5926 * raced in and changed the mode while we weren't looking.
5927 */
5928 status = nfsd4_verify_setuid_write(open, fp->fi_deleg_file);
5929 if (status)
5930 goto out_unlock;
5931
5932 status = -EAGAIN;
5933 if (fp->fi_had_conflict)
5934 goto out_unlock;
5935
5936 spin_lock(&state_lock);
5937 spin_lock(&clp->cl_lock);
5938 spin_lock(&fp->fi_lock);
5939 status = hash_delegation_locked(dp, fp);
5940 spin_unlock(&fp->fi_lock);
5941 spin_unlock(&clp->cl_lock);
5942 spin_unlock(&state_lock);
5943
5944 if (status)
5945 goto out_unlock;
5946
5947 return dp;
5948 out_unlock:
5949 kernel_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp);
5950 out_clnt_odstate:
5951 put_clnt_odstate(dp->dl_clnt_odstate);
5952 nfs4_put_stid(&dp->dl_stid);
5953 out_delegees:
5954 put_deleg_file(fp);
5955 return ERR_PTR(status);
5956 }
5957
nfsd4_open_deleg_none_ext(struct nfsd4_open * open,int status)5958 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
5959 {
5960 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5961 if (status == -EAGAIN)
5962 open->op_why_no_deleg = WND4_CONTENTION;
5963 else {
5964 open->op_why_no_deleg = WND4_RESOURCE;
5965 switch (open->op_deleg_want) {
5966 case NFS4_SHARE_WANT_READ_DELEG:
5967 case NFS4_SHARE_WANT_WRITE_DELEG:
5968 case NFS4_SHARE_WANT_ANY_DELEG:
5969 break;
5970 case NFS4_SHARE_WANT_CANCEL:
5971 open->op_why_no_deleg = WND4_CANCELLED;
5972 break;
5973 case NFS4_SHARE_WANT_NO_DELEG:
5974 WARN_ON_ONCE(1);
5975 }
5976 }
5977 }
5978
5979 static bool
nfs4_delegation_stat(struct nfs4_delegation * dp,struct svc_fh * currentfh,struct kstat * stat)5980 nfs4_delegation_stat(struct nfs4_delegation *dp, struct svc_fh *currentfh,
5981 struct kstat *stat)
5982 {
5983 struct nfsd_file *nf = find_rw_file(dp->dl_stid.sc_file);
5984 struct path path;
5985 int rc;
5986
5987 if (!nf)
5988 return false;
5989
5990 path.mnt = currentfh->fh_export->ex_path.mnt;
5991 path.dentry = file_dentry(nf->nf_file);
5992
5993 rc = vfs_getattr(&path, stat,
5994 (STATX_MODE | STATX_SIZE | STATX_CTIME | STATX_CHANGE_COOKIE),
5995 AT_STATX_SYNC_AS_STAT);
5996
5997 nfsd_file_put(nf);
5998 return rc == 0;
5999 }
6000
6001 /*
6002 * The Linux NFS server does not offer write delegations to NFSv4.0
6003 * clients in order to avoid conflicts between write delegations and
6004 * GETATTRs requesting CHANGE or SIZE attributes.
6005 *
6006 * With NFSv4.1 and later minorversions, the SEQUENCE operation that
6007 * begins each COMPOUND contains a client ID. Delegation recall can
6008 * be avoided when the server recognizes the client sending a
6009 * GETATTR also holds write delegation it conflicts with.
6010 *
6011 * However, the NFSv4.0 protocol does not enable a server to
6012 * determine that a GETATTR originated from the client holding the
6013 * conflicting delegation versus coming from some other client. Per
6014 * RFC 7530 Section 16.7.5, the server must recall or send a
6015 * CB_GETATTR even when the GETATTR originates from the client that
6016 * holds the conflicting delegation.
6017 *
6018 * An NFSv4.0 client can trigger a pathological situation if it
6019 * always sends a DELEGRETURN preceded by a conflicting GETATTR in
6020 * the same COMPOUND. COMPOUND execution will always stop at the
6021 * GETATTR and the DELEGRETURN will never get executed. The server
6022 * eventually revokes the delegation, which can result in loss of
6023 * open or lock state.
6024 */
6025 static void
nfs4_open_delegation(struct nfsd4_open * open,struct nfs4_ol_stateid * stp,struct svc_fh * currentfh)6026 nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
6027 struct svc_fh *currentfh)
6028 {
6029 struct nfs4_delegation *dp;
6030 struct nfs4_openowner *oo = openowner(stp->st_stateowner);
6031 struct nfs4_client *clp = stp->st_stid.sc_client;
6032 struct svc_fh *parent = NULL;
6033 int cb_up;
6034 int status = 0;
6035 struct kstat stat;
6036
6037 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
6038 open->op_recall = false;
6039 switch (open->op_claim_type) {
6040 case NFS4_OPEN_CLAIM_PREVIOUS:
6041 if (!cb_up)
6042 open->op_recall = true;
6043 break;
6044 case NFS4_OPEN_CLAIM_NULL:
6045 parent = currentfh;
6046 fallthrough;
6047 case NFS4_OPEN_CLAIM_FH:
6048 /*
6049 * Let's not give out any delegations till everyone's
6050 * had the chance to reclaim theirs, *and* until
6051 * NLM locks have all been reclaimed:
6052 */
6053 if (locks_in_grace(clp->net))
6054 goto out_no_deleg;
6055 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
6056 goto out_no_deleg;
6057 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE &&
6058 !clp->cl_minorversion)
6059 goto out_no_deleg;
6060 break;
6061 default:
6062 goto out_no_deleg;
6063 }
6064 dp = nfs4_set_delegation(open, stp, parent);
6065 if (IS_ERR(dp))
6066 goto out_no_deleg;
6067
6068 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
6069
6070 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) {
6071 if (!nfs4_delegation_stat(dp, currentfh, &stat)) {
6072 nfs4_put_stid(&dp->dl_stid);
6073 destroy_delegation(dp);
6074 goto out_no_deleg;
6075 }
6076 open->op_delegate_type = NFS4_OPEN_DELEGATE_WRITE;
6077 dp->dl_cb_fattr.ncf_cur_fsize = stat.size;
6078 dp->dl_cb_fattr.ncf_initial_cinfo = nfsd4_change_attribute(&stat);
6079 trace_nfsd_deleg_write(&dp->dl_stid.sc_stateid);
6080 } else {
6081 open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
6082 trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid);
6083 }
6084 nfs4_put_stid(&dp->dl_stid);
6085 return;
6086 out_no_deleg:
6087 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
6088 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
6089 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
6090 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
6091 open->op_recall = true;
6092 }
6093
6094 /* 4.1 client asking for a delegation? */
6095 if (open->op_deleg_want)
6096 nfsd4_open_deleg_none_ext(open, status);
6097 return;
6098 }
6099
nfsd4_deleg_xgrade_none_ext(struct nfsd4_open * open,struct nfs4_delegation * dp)6100 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
6101 struct nfs4_delegation *dp)
6102 {
6103 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
6104 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
6105 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
6106 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
6107 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
6108 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
6109 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
6110 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
6111 }
6112 /* Otherwise the client must be confused wanting a delegation
6113 * it already has, therefore we don't return
6114 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
6115 */
6116 }
6117
6118 /**
6119 * nfsd4_process_open2 - finish open processing
6120 * @rqstp: the RPC transaction being executed
6121 * @current_fh: NFSv4 COMPOUND's current filehandle
6122 * @open: OPEN arguments
6123 *
6124 * If successful, (1) truncate the file if open->op_truncate was
6125 * set, (2) set open->op_stateid, (3) set open->op_delegation.
6126 *
6127 * Returns %nfs_ok on success; otherwise an nfs4stat value in
6128 * network byte order is returned.
6129 */
6130 __be32
nfsd4_process_open2(struct svc_rqst * rqstp,struct svc_fh * current_fh,struct nfsd4_open * open)6131 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
6132 {
6133 struct nfsd4_compoundres *resp = rqstp->rq_resp;
6134 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
6135 struct nfs4_file *fp = NULL;
6136 struct nfs4_ol_stateid *stp = NULL;
6137 struct nfs4_delegation *dp = NULL;
6138 __be32 status;
6139 bool new_stp = false;
6140
6141 /*
6142 * Lookup file; if found, lookup stateid and check open request,
6143 * and check for delegations in the process of being recalled.
6144 * If not found, create the nfs4_file struct
6145 */
6146 fp = nfsd4_file_hash_insert(open->op_file, current_fh);
6147 if (unlikely(!fp))
6148 return nfserr_jukebox;
6149 if (fp != open->op_file) {
6150 status = nfs4_check_deleg(cl, open, &dp);
6151 if (status)
6152 goto out;
6153 if (dp && nfsd4_is_deleg_cur(open) &&
6154 (dp->dl_stid.sc_file != fp)) {
6155 /*
6156 * RFC8881 section 8.2.4 mandates the server to return
6157 * NFS4ERR_BAD_STATEID if the selected table entry does
6158 * not match the current filehandle. However returning
6159 * NFS4ERR_BAD_STATEID in the OPEN can cause the client
6160 * to repeatedly retry the operation with the same
6161 * stateid, since the stateid itself is valid. To avoid
6162 * this situation NFSD returns NFS4ERR_INVAL instead.
6163 */
6164 status = nfserr_inval;
6165 goto out;
6166 }
6167 stp = nfsd4_find_and_lock_existing_open(fp, open);
6168 } else {
6169 open->op_file = NULL;
6170 status = nfserr_bad_stateid;
6171 if (nfsd4_is_deleg_cur(open))
6172 goto out;
6173 }
6174
6175 if (!stp) {
6176 stp = init_open_stateid(fp, open);
6177 if (!stp) {
6178 status = nfserr_jukebox;
6179 goto out;
6180 }
6181
6182 if (!open->op_stp)
6183 new_stp = true;
6184 }
6185
6186 /*
6187 * OPEN the file, or upgrade an existing OPEN.
6188 * If truncate fails, the OPEN fails.
6189 *
6190 * stp is already locked.
6191 */
6192 if (!new_stp) {
6193 /* Stateid was found, this is an OPEN upgrade */
6194 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
6195 if (status) {
6196 mutex_unlock(&stp->st_mutex);
6197 goto out;
6198 }
6199 } else {
6200 status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open, true);
6201 if (status) {
6202 release_open_stateid(stp);
6203 mutex_unlock(&stp->st_mutex);
6204 goto out;
6205 }
6206
6207 stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp,
6208 open->op_odstate);
6209 if (stp->st_clnt_odstate == open->op_odstate)
6210 open->op_odstate = NULL;
6211 }
6212
6213 nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
6214 mutex_unlock(&stp->st_mutex);
6215
6216 if (nfsd4_has_session(&resp->cstate)) {
6217 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
6218 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
6219 open->op_why_no_deleg = WND4_NOT_WANTED;
6220 goto nodeleg;
6221 }
6222 }
6223
6224 /*
6225 * Attempt to hand out a delegation. No error return, because the
6226 * OPEN succeeds even if we fail.
6227 */
6228 nfs4_open_delegation(open, stp, &resp->cstate.current_fh);
6229 nodeleg:
6230 status = nfs_ok;
6231 trace_nfsd_open(&stp->st_stid.sc_stateid);
6232 out:
6233 /* 4.1 client trying to upgrade/downgrade delegation? */
6234 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
6235 open->op_deleg_want)
6236 nfsd4_deleg_xgrade_none_ext(open, dp);
6237
6238 if (fp)
6239 put_nfs4_file(fp);
6240 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
6241 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
6242 /*
6243 * To finish the open response, we just need to set the rflags.
6244 */
6245 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
6246 if (nfsd4_has_session(&resp->cstate))
6247 open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK;
6248 else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED))
6249 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
6250
6251 if (dp)
6252 nfs4_put_stid(&dp->dl_stid);
6253 if (stp)
6254 nfs4_put_stid(&stp->st_stid);
6255
6256 return status;
6257 }
6258
nfsd4_cleanup_open_state(struct nfsd4_compound_state * cstate,struct nfsd4_open * open)6259 void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
6260 struct nfsd4_open *open)
6261 {
6262 if (open->op_openowner)
6263 nfs4_put_stateowner(&open->op_openowner->oo_owner);
6264 if (open->op_file)
6265 kmem_cache_free(file_slab, open->op_file);
6266 if (open->op_stp)
6267 nfs4_put_stid(&open->op_stp->st_stid);
6268 if (open->op_odstate)
6269 kmem_cache_free(odstate_slab, open->op_odstate);
6270 }
6271
6272 __be32
nfsd4_renew(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)6273 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6274 union nfsd4_op_u *u)
6275 {
6276 clientid_t *clid = &u->renew;
6277 struct nfs4_client *clp;
6278 __be32 status;
6279 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6280
6281 trace_nfsd_clid_renew(clid);
6282 status = set_client(clid, cstate, nn);
6283 if (status)
6284 return status;
6285 clp = cstate->clp;
6286 if (!list_empty(&clp->cl_delegations)
6287 && clp->cl_cb_state != NFSD4_CB_UP)
6288 return nfserr_cb_path_down;
6289 return nfs_ok;
6290 }
6291
6292 void
nfsd4_end_grace(struct nfsd_net * nn)6293 nfsd4_end_grace(struct nfsd_net *nn)
6294 {
6295 /* do nothing if grace period already ended */
6296 if (nn->grace_ended)
6297 return;
6298
6299 trace_nfsd_grace_complete(nn);
6300 nn->grace_ended = true;
6301 /*
6302 * If the server goes down again right now, an NFSv4
6303 * client will still be allowed to reclaim after it comes back up,
6304 * even if it hasn't yet had a chance to reclaim state this time.
6305 *
6306 */
6307 nfsd4_record_grace_done(nn);
6308 /*
6309 * At this point, NFSv4 clients can still reclaim. But if the
6310 * server crashes, any that have not yet reclaimed will be out
6311 * of luck on the next boot.
6312 *
6313 * (NFSv4.1+ clients are considered to have reclaimed once they
6314 * call RECLAIM_COMPLETE. NFSv4.0 clients are considered to
6315 * have reclaimed after their first OPEN.)
6316 */
6317 locks_end_grace(&nn->nfsd4_manager);
6318 /*
6319 * At this point, and once lockd and/or any other containers
6320 * exit their grace period, further reclaims will fail and
6321 * regular locking can resume.
6322 */
6323 }
6324
6325 /*
6326 * If we've waited a lease period but there are still clients trying to
6327 * reclaim, wait a little longer to give them a chance to finish.
6328 */
clients_still_reclaiming(struct nfsd_net * nn)6329 static bool clients_still_reclaiming(struct nfsd_net *nn)
6330 {
6331 time64_t double_grace_period_end = nn->boot_time +
6332 2 * nn->nfsd4_lease;
6333
6334 if (nn->track_reclaim_completes &&
6335 atomic_read(&nn->nr_reclaim_complete) ==
6336 nn->reclaim_str_hashtbl_size)
6337 return false;
6338 if (!nn->somebody_reclaimed)
6339 return false;
6340 nn->somebody_reclaimed = false;
6341 /*
6342 * If we've given them *two* lease times to reclaim, and they're
6343 * still not done, give up:
6344 */
6345 if (ktime_get_boottime_seconds() > double_grace_period_end)
6346 return false;
6347 return true;
6348 }
6349
6350 struct laundry_time {
6351 time64_t cutoff;
6352 time64_t new_timeo;
6353 };
6354
state_expired(struct laundry_time * lt,time64_t last_refresh)6355 static bool state_expired(struct laundry_time *lt, time64_t last_refresh)
6356 {
6357 time64_t time_remaining;
6358
6359 if (last_refresh < lt->cutoff)
6360 return true;
6361 time_remaining = last_refresh - lt->cutoff;
6362 lt->new_timeo = min(lt->new_timeo, time_remaining);
6363 return false;
6364 }
6365
6366 #ifdef CONFIG_NFSD_V4_2_INTER_SSC
nfsd4_ssc_init_umount_work(struct nfsd_net * nn)6367 void nfsd4_ssc_init_umount_work(struct nfsd_net *nn)
6368 {
6369 spin_lock_init(&nn->nfsd_ssc_lock);
6370 INIT_LIST_HEAD(&nn->nfsd_ssc_mount_list);
6371 init_waitqueue_head(&nn->nfsd_ssc_waitq);
6372 }
6373
6374 /*
6375 * This is called when nfsd is being shutdown, after all inter_ssc
6376 * cleanup were done, to destroy the ssc delayed unmount list.
6377 */
nfsd4_ssc_shutdown_umount(struct nfsd_net * nn)6378 static void nfsd4_ssc_shutdown_umount(struct nfsd_net *nn)
6379 {
6380 struct nfsd4_ssc_umount_item *ni = NULL;
6381 struct nfsd4_ssc_umount_item *tmp;
6382
6383 spin_lock(&nn->nfsd_ssc_lock);
6384 list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) {
6385 list_del(&ni->nsui_list);
6386 spin_unlock(&nn->nfsd_ssc_lock);
6387 mntput(ni->nsui_vfsmount);
6388 kfree(ni);
6389 spin_lock(&nn->nfsd_ssc_lock);
6390 }
6391 spin_unlock(&nn->nfsd_ssc_lock);
6392 }
6393
nfsd4_ssc_expire_umount(struct nfsd_net * nn)6394 static void nfsd4_ssc_expire_umount(struct nfsd_net *nn)
6395 {
6396 bool do_wakeup = false;
6397 struct nfsd4_ssc_umount_item *ni = NULL;
6398 struct nfsd4_ssc_umount_item *tmp;
6399
6400 spin_lock(&nn->nfsd_ssc_lock);
6401 list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) {
6402 if (time_after(jiffies, ni->nsui_expire)) {
6403 if (refcount_read(&ni->nsui_refcnt) > 1)
6404 continue;
6405
6406 /* mark being unmount */
6407 ni->nsui_busy = true;
6408 spin_unlock(&nn->nfsd_ssc_lock);
6409 mntput(ni->nsui_vfsmount);
6410 spin_lock(&nn->nfsd_ssc_lock);
6411
6412 /* waiters need to start from begin of list */
6413 list_del(&ni->nsui_list);
6414 kfree(ni);
6415
6416 /* wakeup ssc_connect waiters */
6417 do_wakeup = true;
6418 continue;
6419 }
6420 break;
6421 }
6422 if (do_wakeup)
6423 wake_up_all(&nn->nfsd_ssc_waitq);
6424 spin_unlock(&nn->nfsd_ssc_lock);
6425 }
6426 #endif
6427
6428 /* Check if any lock belonging to this lockowner has any blockers */
6429 static bool
nfs4_lockowner_has_blockers(struct nfs4_lockowner * lo)6430 nfs4_lockowner_has_blockers(struct nfs4_lockowner *lo)
6431 {
6432 struct file_lock_context *ctx;
6433 struct nfs4_ol_stateid *stp;
6434 struct nfs4_file *nf;
6435
6436 list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) {
6437 nf = stp->st_stid.sc_file;
6438 ctx = locks_inode_context(nf->fi_inode);
6439 if (!ctx)
6440 continue;
6441 if (locks_owner_has_blockers(ctx, lo))
6442 return true;
6443 }
6444 return false;
6445 }
6446
6447 static bool
nfs4_anylock_blockers(struct nfs4_client * clp)6448 nfs4_anylock_blockers(struct nfs4_client *clp)
6449 {
6450 int i;
6451 struct nfs4_stateowner *so;
6452 struct nfs4_lockowner *lo;
6453
6454 if (atomic_read(&clp->cl_delegs_in_recall))
6455 return true;
6456 spin_lock(&clp->cl_lock);
6457 for (i = 0; i < OWNER_HASH_SIZE; i++) {
6458 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[i],
6459 so_strhash) {
6460 if (so->so_is_open_owner)
6461 continue;
6462 lo = lockowner(so);
6463 if (nfs4_lockowner_has_blockers(lo)) {
6464 spin_unlock(&clp->cl_lock);
6465 return true;
6466 }
6467 }
6468 }
6469 spin_unlock(&clp->cl_lock);
6470 return false;
6471 }
6472
6473 static void
nfs4_get_client_reaplist(struct nfsd_net * nn,struct list_head * reaplist,struct laundry_time * lt)6474 nfs4_get_client_reaplist(struct nfsd_net *nn, struct list_head *reaplist,
6475 struct laundry_time *lt)
6476 {
6477 unsigned int maxreap, reapcnt = 0;
6478 struct list_head *pos, *next;
6479 struct nfs4_client *clp;
6480
6481 maxreap = (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients) ?
6482 NFSD_CLIENT_MAX_TRIM_PER_RUN : 0;
6483 INIT_LIST_HEAD(reaplist);
6484 spin_lock(&nn->client_lock);
6485 list_for_each_safe(pos, next, &nn->client_lru) {
6486 clp = list_entry(pos, struct nfs4_client, cl_lru);
6487 if (clp->cl_state == NFSD4_EXPIRABLE)
6488 goto exp_client;
6489 if (!state_expired(lt, clp->cl_time))
6490 break;
6491 if (!atomic_read(&clp->cl_rpc_users)) {
6492 if (clp->cl_state == NFSD4_ACTIVE)
6493 atomic_inc(&nn->nfsd_courtesy_clients);
6494 clp->cl_state = NFSD4_COURTESY;
6495 }
6496 if (!client_has_state(clp))
6497 goto exp_client;
6498 if (!nfs4_anylock_blockers(clp))
6499 if (reapcnt >= maxreap)
6500 continue;
6501 exp_client:
6502 if (!mark_client_expired_locked(clp)) {
6503 list_add(&clp->cl_lru, reaplist);
6504 reapcnt++;
6505 }
6506 }
6507 spin_unlock(&nn->client_lock);
6508 }
6509
6510 static void
nfs4_get_courtesy_client_reaplist(struct nfsd_net * nn,struct list_head * reaplist)6511 nfs4_get_courtesy_client_reaplist(struct nfsd_net *nn,
6512 struct list_head *reaplist)
6513 {
6514 unsigned int maxreap = 0, reapcnt = 0;
6515 struct list_head *pos, *next;
6516 struct nfs4_client *clp;
6517
6518 maxreap = NFSD_CLIENT_MAX_TRIM_PER_RUN;
6519 INIT_LIST_HEAD(reaplist);
6520
6521 spin_lock(&nn->client_lock);
6522 list_for_each_safe(pos, next, &nn->client_lru) {
6523 clp = list_entry(pos, struct nfs4_client, cl_lru);
6524 if (clp->cl_state == NFSD4_ACTIVE)
6525 break;
6526 if (reapcnt >= maxreap)
6527 break;
6528 if (!mark_client_expired_locked(clp)) {
6529 list_add(&clp->cl_lru, reaplist);
6530 reapcnt++;
6531 }
6532 }
6533 spin_unlock(&nn->client_lock);
6534 }
6535
6536 static void
nfs4_process_client_reaplist(struct list_head * reaplist)6537 nfs4_process_client_reaplist(struct list_head *reaplist)
6538 {
6539 struct list_head *pos, *next;
6540 struct nfs4_client *clp;
6541
6542 list_for_each_safe(pos, next, reaplist) {
6543 clp = list_entry(pos, struct nfs4_client, cl_lru);
6544 trace_nfsd_clid_purged(&clp->cl_clientid);
6545 list_del_init(&clp->cl_lru);
6546 expire_client(clp);
6547 }
6548 }
6549
nfs40_clean_admin_revoked(struct nfsd_net * nn,struct laundry_time * lt)6550 static void nfs40_clean_admin_revoked(struct nfsd_net *nn,
6551 struct laundry_time *lt)
6552 {
6553 struct nfs4_client *clp;
6554
6555 spin_lock(&nn->client_lock);
6556 if (nn->nfs40_last_revoke == 0 ||
6557 nn->nfs40_last_revoke > lt->cutoff) {
6558 spin_unlock(&nn->client_lock);
6559 return;
6560 }
6561 nn->nfs40_last_revoke = 0;
6562
6563 retry:
6564 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6565 unsigned long id, tmp;
6566 struct nfs4_stid *stid;
6567
6568 if (atomic_read(&clp->cl_admin_revoked) == 0)
6569 continue;
6570
6571 spin_lock(&clp->cl_lock);
6572 idr_for_each_entry_ul(&clp->cl_stateids, stid, tmp, id)
6573 if (stid->sc_status & SC_STATUS_ADMIN_REVOKED) {
6574 refcount_inc(&stid->sc_count);
6575 spin_unlock(&nn->client_lock);
6576 /* this function drops ->cl_lock */
6577 nfsd4_drop_revoked_stid(stid);
6578 nfs4_put_stid(stid);
6579 spin_lock(&nn->client_lock);
6580 goto retry;
6581 }
6582 spin_unlock(&clp->cl_lock);
6583 }
6584 spin_unlock(&nn->client_lock);
6585 }
6586
6587 static time64_t
nfs4_laundromat(struct nfsd_net * nn)6588 nfs4_laundromat(struct nfsd_net *nn)
6589 {
6590 struct nfs4_openowner *oo;
6591 struct nfs4_delegation *dp;
6592 struct nfs4_ol_stateid *stp;
6593 struct nfsd4_blocked_lock *nbl;
6594 struct list_head *pos, *next, reaplist;
6595 struct laundry_time lt = {
6596 .cutoff = ktime_get_boottime_seconds() - nn->nfsd4_lease,
6597 .new_timeo = nn->nfsd4_lease
6598 };
6599 struct nfs4_cpntf_state *cps;
6600 copy_stateid_t *cps_t;
6601 int i;
6602
6603 if (clients_still_reclaiming(nn)) {
6604 lt.new_timeo = 0;
6605 goto out;
6606 }
6607 nfsd4_end_grace(nn);
6608
6609 spin_lock(&nn->s2s_cp_lock);
6610 idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) {
6611 cps = container_of(cps_t, struct nfs4_cpntf_state, cp_stateid);
6612 if (cps->cp_stateid.cs_type == NFS4_COPYNOTIFY_STID &&
6613 state_expired(<, cps->cpntf_time))
6614 _free_cpntf_state_locked(nn, cps);
6615 }
6616 spin_unlock(&nn->s2s_cp_lock);
6617 nfs4_get_client_reaplist(nn, &reaplist, <);
6618 nfs4_process_client_reaplist(&reaplist);
6619
6620 nfs40_clean_admin_revoked(nn, <);
6621
6622 spin_lock(&state_lock);
6623 list_for_each_safe(pos, next, &nn->del_recall_lru) {
6624 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
6625 if (!state_expired(<, dp->dl_time))
6626 break;
6627 refcount_inc(&dp->dl_stid.sc_count);
6628 unhash_delegation_locked(dp, SC_STATUS_REVOKED);
6629 list_add(&dp->dl_recall_lru, &reaplist);
6630 }
6631 spin_unlock(&state_lock);
6632 while (!list_empty(&reaplist)) {
6633 dp = list_first_entry(&reaplist, struct nfs4_delegation,
6634 dl_recall_lru);
6635 list_del_init(&dp->dl_recall_lru);
6636 revoke_delegation(dp);
6637 }
6638
6639 spin_lock(&nn->client_lock);
6640 while (!list_empty(&nn->close_lru)) {
6641 oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
6642 oo_close_lru);
6643 if (!state_expired(<, oo->oo_time))
6644 break;
6645 list_del_init(&oo->oo_close_lru);
6646 stp = oo->oo_last_closed_stid;
6647 oo->oo_last_closed_stid = NULL;
6648 spin_unlock(&nn->client_lock);
6649 nfs4_put_stid(&stp->st_stid);
6650 spin_lock(&nn->client_lock);
6651 }
6652 spin_unlock(&nn->client_lock);
6653
6654 /*
6655 * It's possible for a client to try and acquire an already held lock
6656 * that is being held for a long time, and then lose interest in it.
6657 * So, we clean out any un-revisited request after a lease period
6658 * under the assumption that the client is no longer interested.
6659 *
6660 * RFC5661, sec. 9.6 states that the client must not rely on getting
6661 * notifications and must continue to poll for locks, even when the
6662 * server supports them. Thus this shouldn't lead to clients blocking
6663 * indefinitely once the lock does become free.
6664 */
6665 BUG_ON(!list_empty(&reaplist));
6666 spin_lock(&nn->blocked_locks_lock);
6667 while (!list_empty(&nn->blocked_locks_lru)) {
6668 nbl = list_first_entry(&nn->blocked_locks_lru,
6669 struct nfsd4_blocked_lock, nbl_lru);
6670 if (!state_expired(<, nbl->nbl_time))
6671 break;
6672 list_move(&nbl->nbl_lru, &reaplist);
6673 list_del_init(&nbl->nbl_list);
6674 }
6675 spin_unlock(&nn->blocked_locks_lock);
6676
6677 while (!list_empty(&reaplist)) {
6678 nbl = list_first_entry(&reaplist,
6679 struct nfsd4_blocked_lock, nbl_lru);
6680 list_del_init(&nbl->nbl_lru);
6681 free_blocked_lock(nbl);
6682 }
6683 #ifdef CONFIG_NFSD_V4_2_INTER_SSC
6684 /* service the server-to-server copy delayed unmount list */
6685 nfsd4_ssc_expire_umount(nn);
6686 #endif
6687 if (atomic_long_read(&num_delegations) >= max_delegations)
6688 deleg_reaper(nn);
6689 out:
6690 return max_t(time64_t, lt.new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
6691 }
6692
6693 static void laundromat_main(struct work_struct *);
6694
6695 static void
laundromat_main(struct work_struct * laundry)6696 laundromat_main(struct work_struct *laundry)
6697 {
6698 time64_t t;
6699 struct delayed_work *dwork = to_delayed_work(laundry);
6700 struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
6701 laundromat_work);
6702
6703 t = nfs4_laundromat(nn);
6704 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
6705 }
6706
6707 static void
courtesy_client_reaper(struct nfsd_net * nn)6708 courtesy_client_reaper(struct nfsd_net *nn)
6709 {
6710 struct list_head reaplist;
6711
6712 nfs4_get_courtesy_client_reaplist(nn, &reaplist);
6713 nfs4_process_client_reaplist(&reaplist);
6714 }
6715
6716 static void
deleg_reaper(struct nfsd_net * nn)6717 deleg_reaper(struct nfsd_net *nn)
6718 {
6719 struct list_head *pos, *next;
6720 struct nfs4_client *clp;
6721 LIST_HEAD(cblist);
6722
6723 spin_lock(&nn->client_lock);
6724 list_for_each_safe(pos, next, &nn->client_lru) {
6725 clp = list_entry(pos, struct nfs4_client, cl_lru);
6726
6727 if (clp->cl_state != NFSD4_ACTIVE)
6728 continue;
6729 if (list_empty(&clp->cl_delegations))
6730 continue;
6731 if (atomic_read(&clp->cl_delegs_in_recall))
6732 continue;
6733 if (test_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags))
6734 continue;
6735 if (ktime_get_boottime_seconds() - clp->cl_ra_time < 5)
6736 continue;
6737 if (clp->cl_cb_state != NFSD4_CB_UP)
6738 continue;
6739 list_add(&clp->cl_ra_cblist, &cblist);
6740
6741 /* release in nfsd4_cb_recall_any_release */
6742 kref_get(&clp->cl_nfsdfs.cl_ref);
6743 set_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags);
6744 clp->cl_ra_time = ktime_get_boottime_seconds();
6745 }
6746 spin_unlock(&nn->client_lock);
6747
6748 while (!list_empty(&cblist)) {
6749 clp = list_first_entry(&cblist, struct nfs4_client,
6750 cl_ra_cblist);
6751 list_del_init(&clp->cl_ra_cblist);
6752 clp->cl_ra->ra_keep = 0;
6753 clp->cl_ra->ra_bmval[0] = BIT(RCA4_TYPE_MASK_RDATA_DLG) |
6754 BIT(RCA4_TYPE_MASK_WDATA_DLG);
6755 trace_nfsd_cb_recall_any(clp->cl_ra);
6756 nfsd4_run_cb(&clp->cl_ra->ra_cb);
6757 }
6758 }
6759
6760 static void
nfsd4_state_shrinker_worker(struct work_struct * work)6761 nfsd4_state_shrinker_worker(struct work_struct *work)
6762 {
6763 struct nfsd_net *nn = container_of(work, struct nfsd_net,
6764 nfsd_shrinker_work);
6765
6766 courtesy_client_reaper(nn);
6767 deleg_reaper(nn);
6768 }
6769
nfs4_check_fh(struct svc_fh * fhp,struct nfs4_stid * stp)6770 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
6771 {
6772 if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
6773 return nfserr_bad_stateid;
6774 return nfs_ok;
6775 }
6776
6777 static
nfs4_check_openmode(struct nfs4_ol_stateid * stp,int flags)6778 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
6779 {
6780 __be32 status = nfserr_openmode;
6781
6782 /* For lock stateid's, we test the parent open, not the lock: */
6783 if (stp->st_openstp)
6784 stp = stp->st_openstp;
6785 if ((flags & WR_STATE) && !access_permit_write(stp))
6786 goto out;
6787 if ((flags & RD_STATE) && !access_permit_read(stp))
6788 goto out;
6789 status = nfs_ok;
6790 out:
6791 return status;
6792 }
6793
6794 static inline __be32
check_special_stateids(struct net * net,svc_fh * current_fh,stateid_t * stateid,int flags)6795 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
6796 {
6797 if (ONE_STATEID(stateid) && (flags & RD_STATE))
6798 return nfs_ok;
6799 else if (opens_in_grace(net)) {
6800 /* Answer in remaining cases depends on existence of
6801 * conflicting state; so we must wait out the grace period. */
6802 return nfserr_grace;
6803 } else if (flags & WR_STATE)
6804 return nfs4_share_conflict(current_fh,
6805 NFS4_SHARE_DENY_WRITE);
6806 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
6807 return nfs4_share_conflict(current_fh,
6808 NFS4_SHARE_DENY_READ);
6809 }
6810
check_stateid_generation(stateid_t * in,stateid_t * ref,bool has_session)6811 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
6812 {
6813 /*
6814 * When sessions are used the stateid generation number is ignored
6815 * when it is zero.
6816 */
6817 if (has_session && in->si_generation == 0)
6818 return nfs_ok;
6819
6820 if (in->si_generation == ref->si_generation)
6821 return nfs_ok;
6822
6823 /* If the client sends us a stateid from the future, it's buggy: */
6824 if (nfsd4_stateid_generation_after(in, ref))
6825 return nfserr_bad_stateid;
6826 /*
6827 * However, we could see a stateid from the past, even from a
6828 * non-buggy client. For example, if the client sends a lock
6829 * while some IO is outstanding, the lock may bump si_generation
6830 * while the IO is still in flight. The client could avoid that
6831 * situation by waiting for responses on all the IO requests,
6832 * but better performance may result in retrying IO that
6833 * receives an old_stateid error if requests are rarely
6834 * reordered in flight:
6835 */
6836 return nfserr_old_stateid;
6837 }
6838
nfsd4_stid_check_stateid_generation(stateid_t * in,struct nfs4_stid * s,bool has_session)6839 static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session)
6840 {
6841 __be32 ret;
6842
6843 spin_lock(&s->sc_lock);
6844 ret = nfsd4_verify_open_stid(s);
6845 if (ret == nfs_ok)
6846 ret = check_stateid_generation(in, &s->sc_stateid, has_session);
6847 spin_unlock(&s->sc_lock);
6848 if (ret == nfserr_admin_revoked)
6849 nfsd40_drop_revoked_stid(s->sc_client,
6850 &s->sc_stateid);
6851 return ret;
6852 }
6853
nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid * ols)6854 static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
6855 {
6856 if (ols->st_stateowner->so_is_open_owner &&
6857 !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
6858 return nfserr_bad_stateid;
6859 return nfs_ok;
6860 }
6861
nfsd4_validate_stateid(struct nfs4_client * cl,stateid_t * stateid)6862 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
6863 {
6864 struct nfs4_stid *s;
6865 __be32 status = nfserr_bad_stateid;
6866
6867 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
6868 CLOSE_STATEID(stateid))
6869 return status;
6870 spin_lock(&cl->cl_lock);
6871 s = find_stateid_locked(cl, stateid);
6872 if (!s)
6873 goto out_unlock;
6874 status = nfsd4_stid_check_stateid_generation(stateid, s, 1);
6875 if (status)
6876 goto out_unlock;
6877 status = nfsd4_verify_open_stid(s);
6878 if (status)
6879 goto out_unlock;
6880
6881 switch (s->sc_type) {
6882 case SC_TYPE_DELEG:
6883 status = nfs_ok;
6884 break;
6885 case SC_TYPE_OPEN:
6886 case SC_TYPE_LOCK:
6887 status = nfsd4_check_openowner_confirmed(openlockstateid(s));
6888 break;
6889 default:
6890 printk("unknown stateid type %x\n", s->sc_type);
6891 status = nfserr_bad_stateid;
6892 }
6893 out_unlock:
6894 spin_unlock(&cl->cl_lock);
6895 if (status == nfserr_admin_revoked)
6896 nfsd40_drop_revoked_stid(cl, stateid);
6897 return status;
6898 }
6899
6900 __be32
nfsd4_lookup_stateid(struct nfsd4_compound_state * cstate,stateid_t * stateid,unsigned short typemask,unsigned short statusmask,struct nfs4_stid ** s,struct nfsd_net * nn)6901 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
6902 stateid_t *stateid,
6903 unsigned short typemask, unsigned short statusmask,
6904 struct nfs4_stid **s, struct nfsd_net *nn)
6905 {
6906 __be32 status;
6907 struct nfs4_stid *stid;
6908 bool return_revoked = false;
6909
6910 /*
6911 * only return revoked delegations if explicitly asked.
6912 * otherwise we report revoked or bad_stateid status.
6913 */
6914 if (statusmask & SC_STATUS_REVOKED)
6915 return_revoked = true;
6916 if (typemask & SC_TYPE_DELEG)
6917 /* Always allow REVOKED for DELEG so we can
6918 * retturn the appropriate error.
6919 */
6920 statusmask |= SC_STATUS_REVOKED;
6921
6922 statusmask |= SC_STATUS_ADMIN_REVOKED | SC_STATUS_FREEABLE;
6923
6924 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
6925 CLOSE_STATEID(stateid))
6926 return nfserr_bad_stateid;
6927 status = set_client(&stateid->si_opaque.so_clid, cstate, nn);
6928 if (status == nfserr_stale_clientid) {
6929 if (cstate->session)
6930 return nfserr_bad_stateid;
6931 return nfserr_stale_stateid;
6932 }
6933 if (status)
6934 return status;
6935 stid = find_stateid_by_type(cstate->clp, stateid, typemask, statusmask);
6936 if (!stid)
6937 return nfserr_bad_stateid;
6938 if ((stid->sc_status & SC_STATUS_REVOKED) && !return_revoked) {
6939 nfs4_put_stid(stid);
6940 return nfserr_deleg_revoked;
6941 }
6942 if (stid->sc_status & SC_STATUS_ADMIN_REVOKED) {
6943 nfsd40_drop_revoked_stid(cstate->clp, stateid);
6944 nfs4_put_stid(stid);
6945 return nfserr_admin_revoked;
6946 }
6947 *s = stid;
6948 return nfs_ok;
6949 }
6950
6951 static struct nfsd_file *
nfs4_find_file(struct nfs4_stid * s,int flags)6952 nfs4_find_file(struct nfs4_stid *s, int flags)
6953 {
6954 struct nfsd_file *ret = NULL;
6955
6956 if (!s || s->sc_status)
6957 return NULL;
6958
6959 switch (s->sc_type) {
6960 case SC_TYPE_DELEG:
6961 spin_lock(&s->sc_file->fi_lock);
6962 ret = nfsd_file_get(s->sc_file->fi_deleg_file);
6963 spin_unlock(&s->sc_file->fi_lock);
6964 break;
6965 case SC_TYPE_OPEN:
6966 case SC_TYPE_LOCK:
6967 if (flags & RD_STATE)
6968 ret = find_readable_file(s->sc_file);
6969 else
6970 ret = find_writeable_file(s->sc_file);
6971 }
6972
6973 return ret;
6974 }
6975
6976 static __be32
nfs4_check_olstateid(struct nfs4_ol_stateid * ols,int flags)6977 nfs4_check_olstateid(struct nfs4_ol_stateid *ols, int flags)
6978 {
6979 __be32 status;
6980
6981 status = nfsd4_check_openowner_confirmed(ols);
6982 if (status)
6983 return status;
6984 return nfs4_check_openmode(ols, flags);
6985 }
6986
6987 static __be32
nfs4_check_file(struct svc_rqst * rqstp,struct svc_fh * fhp,struct nfs4_stid * s,struct nfsd_file ** nfp,int flags)6988 nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
6989 struct nfsd_file **nfp, int flags)
6990 {
6991 int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE;
6992 struct nfsd_file *nf;
6993 __be32 status;
6994
6995 nf = nfs4_find_file(s, flags);
6996 if (nf) {
6997 status = nfsd_permission(&rqstp->rq_cred,
6998 fhp->fh_export, fhp->fh_dentry,
6999 acc | NFSD_MAY_OWNER_OVERRIDE);
7000 if (status) {
7001 nfsd_file_put(nf);
7002 goto out;
7003 }
7004 } else {
7005 status = nfsd_file_acquire(rqstp, fhp, acc, &nf);
7006 if (status)
7007 return status;
7008 }
7009 *nfp = nf;
7010 out:
7011 return status;
7012 }
7013 static void
_free_cpntf_state_locked(struct nfsd_net * nn,struct nfs4_cpntf_state * cps)7014 _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
7015 {
7016 WARN_ON_ONCE(cps->cp_stateid.cs_type != NFS4_COPYNOTIFY_STID);
7017 if (!refcount_dec_and_test(&cps->cp_stateid.cs_count))
7018 return;
7019 list_del(&cps->cp_list);
7020 idr_remove(&nn->s2s_cp_stateids,
7021 cps->cp_stateid.cs_stid.si_opaque.so_id);
7022 kfree(cps);
7023 }
7024 /*
7025 * A READ from an inter server to server COPY will have a
7026 * copy stateid. Look up the copy notify stateid from the
7027 * idr structure and take a reference on it.
7028 */
manage_cpntf_state(struct nfsd_net * nn,stateid_t * st,struct nfs4_client * clp,struct nfs4_cpntf_state ** cps)7029 __be32 manage_cpntf_state(struct nfsd_net *nn, stateid_t *st,
7030 struct nfs4_client *clp,
7031 struct nfs4_cpntf_state **cps)
7032 {
7033 copy_stateid_t *cps_t;
7034 struct nfs4_cpntf_state *state = NULL;
7035
7036 if (st->si_opaque.so_clid.cl_id != nn->s2s_cp_cl_id)
7037 return nfserr_bad_stateid;
7038 spin_lock(&nn->s2s_cp_lock);
7039 cps_t = idr_find(&nn->s2s_cp_stateids, st->si_opaque.so_id);
7040 if (cps_t) {
7041 state = container_of(cps_t, struct nfs4_cpntf_state,
7042 cp_stateid);
7043 if (state->cp_stateid.cs_type != NFS4_COPYNOTIFY_STID) {
7044 state = NULL;
7045 goto unlock;
7046 }
7047 if (!clp)
7048 refcount_inc(&state->cp_stateid.cs_count);
7049 else
7050 _free_cpntf_state_locked(nn, state);
7051 }
7052 unlock:
7053 spin_unlock(&nn->s2s_cp_lock);
7054 if (!state)
7055 return nfserr_bad_stateid;
7056 if (!clp)
7057 *cps = state;
7058 return 0;
7059 }
7060
find_cpntf_state(struct nfsd_net * nn,stateid_t * st,struct nfs4_stid ** stid)7061 static __be32 find_cpntf_state(struct nfsd_net *nn, stateid_t *st,
7062 struct nfs4_stid **stid)
7063 {
7064 __be32 status;
7065 struct nfs4_cpntf_state *cps = NULL;
7066 struct nfs4_client *found;
7067
7068 status = manage_cpntf_state(nn, st, NULL, &cps);
7069 if (status)
7070 return status;
7071
7072 cps->cpntf_time = ktime_get_boottime_seconds();
7073
7074 status = nfserr_expired;
7075 found = lookup_clientid(&cps->cp_p_clid, true, nn);
7076 if (!found)
7077 goto out;
7078
7079 *stid = find_stateid_by_type(found, &cps->cp_p_stateid,
7080 SC_TYPE_DELEG|SC_TYPE_OPEN|SC_TYPE_LOCK,
7081 0);
7082 if (*stid)
7083 status = nfs_ok;
7084 else
7085 status = nfserr_bad_stateid;
7086
7087 put_client_renew(found);
7088 out:
7089 nfs4_put_cpntf_state(nn, cps);
7090 return status;
7091 }
7092
nfs4_put_cpntf_state(struct nfsd_net * nn,struct nfs4_cpntf_state * cps)7093 void nfs4_put_cpntf_state(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
7094 {
7095 spin_lock(&nn->s2s_cp_lock);
7096 _free_cpntf_state_locked(nn, cps);
7097 spin_unlock(&nn->s2s_cp_lock);
7098 }
7099
7100 /**
7101 * nfs4_preprocess_stateid_op - find and prep stateid for an operation
7102 * @rqstp: incoming request from client
7103 * @cstate: current compound state
7104 * @fhp: filehandle associated with requested stateid
7105 * @stateid: stateid (provided by client)
7106 * @flags: flags describing type of operation to be done
7107 * @nfp: optional nfsd_file return pointer (may be NULL)
7108 * @cstid: optional returned nfs4_stid pointer (may be NULL)
7109 *
7110 * Given info from the client, look up a nfs4_stid for the operation. On
7111 * success, it returns a reference to the nfs4_stid and/or the nfsd_file
7112 * associated with it.
7113 */
7114 __be32
nfs4_preprocess_stateid_op(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,struct svc_fh * fhp,stateid_t * stateid,int flags,struct nfsd_file ** nfp,struct nfs4_stid ** cstid)7115 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
7116 struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
7117 stateid_t *stateid, int flags, struct nfsd_file **nfp,
7118 struct nfs4_stid **cstid)
7119 {
7120 struct net *net = SVC_NET(rqstp);
7121 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7122 struct nfs4_stid *s = NULL;
7123 __be32 status;
7124
7125 if (nfp)
7126 *nfp = NULL;
7127
7128 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
7129 status = check_special_stateids(net, fhp, stateid, flags);
7130 goto done;
7131 }
7132
7133 status = nfsd4_lookup_stateid(cstate, stateid,
7134 SC_TYPE_DELEG|SC_TYPE_OPEN|SC_TYPE_LOCK,
7135 0, &s, nn);
7136 if (status == nfserr_bad_stateid)
7137 status = find_cpntf_state(nn, stateid, &s);
7138 if (status)
7139 return status;
7140 status = nfsd4_stid_check_stateid_generation(stateid, s,
7141 nfsd4_has_session(cstate));
7142 if (status)
7143 goto out;
7144
7145 switch (s->sc_type) {
7146 case SC_TYPE_DELEG:
7147 status = nfs4_check_delegmode(delegstateid(s), flags);
7148 break;
7149 case SC_TYPE_OPEN:
7150 case SC_TYPE_LOCK:
7151 status = nfs4_check_olstateid(openlockstateid(s), flags);
7152 break;
7153 }
7154 if (status)
7155 goto out;
7156 status = nfs4_check_fh(fhp, s);
7157
7158 done:
7159 if (status == nfs_ok && nfp)
7160 status = nfs4_check_file(rqstp, fhp, s, nfp, flags);
7161 out:
7162 if (s) {
7163 if (!status && cstid)
7164 *cstid = s;
7165 else
7166 nfs4_put_stid(s);
7167 }
7168 return status;
7169 }
7170
7171 /*
7172 * Test if the stateid is valid
7173 */
7174 __be32
nfsd4_test_stateid(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7175 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7176 union nfsd4_op_u *u)
7177 {
7178 struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
7179 struct nfsd4_test_stateid_id *stateid;
7180 struct nfs4_client *cl = cstate->clp;
7181
7182 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
7183 stateid->ts_id_status =
7184 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
7185
7186 return nfs_ok;
7187 }
7188
7189 static __be32
nfsd4_free_lock_stateid(stateid_t * stateid,struct nfs4_stid * s)7190 nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
7191 {
7192 struct nfs4_ol_stateid *stp = openlockstateid(s);
7193 __be32 ret;
7194
7195 ret = nfsd4_lock_ol_stateid(stp);
7196 if (ret)
7197 goto out_put_stid;
7198
7199 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
7200 if (ret)
7201 goto out;
7202
7203 ret = nfserr_locks_held;
7204 if (check_for_locks(stp->st_stid.sc_file,
7205 lockowner(stp->st_stateowner)))
7206 goto out;
7207
7208 release_lock_stateid(stp);
7209 ret = nfs_ok;
7210
7211 out:
7212 mutex_unlock(&stp->st_mutex);
7213 out_put_stid:
7214 nfs4_put_stid(s);
7215 return ret;
7216 }
7217
7218 __be32
nfsd4_free_stateid(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7219 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7220 union nfsd4_op_u *u)
7221 {
7222 struct nfsd4_free_stateid *free_stateid = &u->free_stateid;
7223 stateid_t *stateid = &free_stateid->fr_stateid;
7224 struct nfs4_stid *s;
7225 struct nfs4_delegation *dp;
7226 struct nfs4_client *cl = cstate->clp;
7227 __be32 ret = nfserr_bad_stateid;
7228
7229 spin_lock(&cl->cl_lock);
7230 s = find_stateid_locked(cl, stateid);
7231 if (!s || s->sc_status & SC_STATUS_CLOSED)
7232 goto out_unlock;
7233 if (s->sc_status & SC_STATUS_ADMIN_REVOKED) {
7234 nfsd4_drop_revoked_stid(s);
7235 ret = nfs_ok;
7236 goto out;
7237 }
7238 spin_lock(&s->sc_lock);
7239 switch (s->sc_type) {
7240 case SC_TYPE_DELEG:
7241 if (s->sc_status & SC_STATUS_REVOKED) {
7242 s->sc_status |= SC_STATUS_CLOSED;
7243 spin_unlock(&s->sc_lock);
7244 dp = delegstateid(s);
7245 if (s->sc_status & SC_STATUS_FREEABLE)
7246 list_del_init(&dp->dl_recall_lru);
7247 s->sc_status |= SC_STATUS_FREED;
7248 spin_unlock(&cl->cl_lock);
7249 nfs4_put_stid(s);
7250 ret = nfs_ok;
7251 goto out;
7252 }
7253 ret = nfserr_locks_held;
7254 break;
7255 case SC_TYPE_OPEN:
7256 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
7257 if (ret)
7258 break;
7259 ret = nfserr_locks_held;
7260 break;
7261 case SC_TYPE_LOCK:
7262 spin_unlock(&s->sc_lock);
7263 refcount_inc(&s->sc_count);
7264 spin_unlock(&cl->cl_lock);
7265 ret = nfsd4_free_lock_stateid(stateid, s);
7266 goto out;
7267 }
7268 spin_unlock(&s->sc_lock);
7269 out_unlock:
7270 spin_unlock(&cl->cl_lock);
7271 out:
7272 return ret;
7273 }
7274
7275 static inline int
setlkflg(int type)7276 setlkflg (int type)
7277 {
7278 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
7279 RD_STATE : WR_STATE;
7280 }
7281
nfs4_seqid_op_checks(struct nfsd4_compound_state * cstate,stateid_t * stateid,u32 seqid,struct nfs4_ol_stateid * stp)7282 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
7283 {
7284 struct svc_fh *current_fh = &cstate->current_fh;
7285 struct nfs4_stateowner *sop = stp->st_stateowner;
7286 __be32 status;
7287
7288 status = nfsd4_check_seqid(cstate, sop, seqid);
7289 if (status)
7290 return status;
7291 status = nfsd4_lock_ol_stateid(stp);
7292 if (status != nfs_ok)
7293 return status;
7294 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
7295 if (status == nfs_ok)
7296 status = nfs4_check_fh(current_fh, &stp->st_stid);
7297 if (status != nfs_ok)
7298 mutex_unlock(&stp->st_mutex);
7299 return status;
7300 }
7301
7302 /**
7303 * nfs4_preprocess_seqid_op - find and prep an ol_stateid for a seqid-morphing op
7304 * @cstate: compund state
7305 * @seqid: seqid (provided by client)
7306 * @stateid: stateid (provided by client)
7307 * @typemask: mask of allowable types for this operation
7308 * @statusmask: mask of allowed states: 0 or STID_CLOSED
7309 * @stpp: return pointer for the stateid found
7310 * @nn: net namespace for request
7311 *
7312 * Given a stateid+seqid from a client, look up an nfs4_ol_stateid and
7313 * return it in @stpp. On a nfs_ok return, the returned stateid will
7314 * have its st_mutex locked.
7315 */
7316 static __be32
nfs4_preprocess_seqid_op(struct nfsd4_compound_state * cstate,u32 seqid,stateid_t * stateid,unsigned short typemask,unsigned short statusmask,struct nfs4_ol_stateid ** stpp,struct nfsd_net * nn)7317 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
7318 stateid_t *stateid,
7319 unsigned short typemask, unsigned short statusmask,
7320 struct nfs4_ol_stateid **stpp,
7321 struct nfsd_net *nn)
7322 {
7323 __be32 status;
7324 struct nfs4_stid *s;
7325 struct nfs4_ol_stateid *stp = NULL;
7326
7327 trace_nfsd_preprocess(seqid, stateid);
7328
7329 *stpp = NULL;
7330 retry:
7331 status = nfsd4_lookup_stateid(cstate, stateid,
7332 typemask, statusmask, &s, nn);
7333 if (status)
7334 return status;
7335 stp = openlockstateid(s);
7336 if (nfsd4_cstate_assign_replay(cstate, stp->st_stateowner) == -EAGAIN) {
7337 nfs4_put_stateowner(stp->st_stateowner);
7338 goto retry;
7339 }
7340
7341 status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
7342 if (!status)
7343 *stpp = stp;
7344 else
7345 nfs4_put_stid(&stp->st_stid);
7346 return status;
7347 }
7348
nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state * cstate,u32 seqid,stateid_t * stateid,struct nfs4_ol_stateid ** stpp,struct nfsd_net * nn)7349 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
7350 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
7351 {
7352 __be32 status;
7353 struct nfs4_openowner *oo;
7354 struct nfs4_ol_stateid *stp;
7355
7356 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
7357 SC_TYPE_OPEN, 0, &stp, nn);
7358 if (status)
7359 return status;
7360 oo = openowner(stp->st_stateowner);
7361 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
7362 mutex_unlock(&stp->st_mutex);
7363 nfs4_put_stid(&stp->st_stid);
7364 return nfserr_bad_stateid;
7365 }
7366 *stpp = stp;
7367 return nfs_ok;
7368 }
7369
7370 __be32
nfsd4_open_confirm(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7371 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7372 union nfsd4_op_u *u)
7373 {
7374 struct nfsd4_open_confirm *oc = &u->open_confirm;
7375 __be32 status;
7376 struct nfs4_openowner *oo;
7377 struct nfs4_ol_stateid *stp;
7378 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7379
7380 dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
7381 cstate->current_fh.fh_dentry);
7382
7383 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
7384 if (status)
7385 return status;
7386
7387 status = nfs4_preprocess_seqid_op(cstate,
7388 oc->oc_seqid, &oc->oc_req_stateid,
7389 SC_TYPE_OPEN, 0, &stp, nn);
7390 if (status)
7391 goto out;
7392 oo = openowner(stp->st_stateowner);
7393 status = nfserr_bad_stateid;
7394 if (oo->oo_flags & NFS4_OO_CONFIRMED) {
7395 mutex_unlock(&stp->st_mutex);
7396 goto put_stateid;
7397 }
7398 oo->oo_flags |= NFS4_OO_CONFIRMED;
7399 nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
7400 mutex_unlock(&stp->st_mutex);
7401 trace_nfsd_open_confirm(oc->oc_seqid, &stp->st_stid.sc_stateid);
7402 nfsd4_client_record_create(oo->oo_owner.so_client);
7403 status = nfs_ok;
7404 put_stateid:
7405 nfs4_put_stid(&stp->st_stid);
7406 out:
7407 nfsd4_bump_seqid(cstate, status);
7408 return status;
7409 }
7410
nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid * stp,u32 access)7411 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
7412 {
7413 if (!test_access(access, stp))
7414 return;
7415 nfs4_file_put_access(stp->st_stid.sc_file, access);
7416 clear_access(access, stp);
7417 }
7418
nfs4_stateid_downgrade(struct nfs4_ol_stateid * stp,u32 to_access)7419 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
7420 {
7421 switch (to_access) {
7422 case NFS4_SHARE_ACCESS_READ:
7423 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
7424 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
7425 break;
7426 case NFS4_SHARE_ACCESS_WRITE:
7427 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
7428 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
7429 break;
7430 case NFS4_SHARE_ACCESS_BOTH:
7431 break;
7432 default:
7433 WARN_ON_ONCE(1);
7434 }
7435 }
7436
7437 __be32
nfsd4_open_downgrade(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7438 nfsd4_open_downgrade(struct svc_rqst *rqstp,
7439 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
7440 {
7441 struct nfsd4_open_downgrade *od = &u->open_downgrade;
7442 __be32 status;
7443 struct nfs4_ol_stateid *stp;
7444 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7445
7446 dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
7447 cstate->current_fh.fh_dentry);
7448
7449 /* We don't yet support WANT bits: */
7450 if (od->od_deleg_want)
7451 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
7452 od->od_deleg_want);
7453
7454 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
7455 &od->od_stateid, &stp, nn);
7456 if (status)
7457 goto out;
7458 status = nfserr_inval;
7459 if (!test_access(od->od_share_access, stp)) {
7460 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
7461 stp->st_access_bmap, od->od_share_access);
7462 goto put_stateid;
7463 }
7464 if (!test_deny(od->od_share_deny, stp)) {
7465 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
7466 stp->st_deny_bmap, od->od_share_deny);
7467 goto put_stateid;
7468 }
7469 nfs4_stateid_downgrade(stp, od->od_share_access);
7470 reset_union_bmap_deny(od->od_share_deny, stp);
7471 nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
7472 status = nfs_ok;
7473 put_stateid:
7474 mutex_unlock(&stp->st_mutex);
7475 nfs4_put_stid(&stp->st_stid);
7476 out:
7477 nfsd4_bump_seqid(cstate, status);
7478 return status;
7479 }
7480
nfsd4_close_open_stateid(struct nfs4_ol_stateid * s)7481 static bool nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
7482 {
7483 struct nfs4_client *clp = s->st_stid.sc_client;
7484 bool unhashed;
7485 LIST_HEAD(reaplist);
7486 struct nfs4_ol_stateid *stp;
7487
7488 spin_lock(&clp->cl_lock);
7489 unhashed = unhash_open_stateid(s, &reaplist);
7490
7491 if (clp->cl_minorversion) {
7492 if (unhashed)
7493 put_ol_stateid_locked(s, &reaplist);
7494 spin_unlock(&clp->cl_lock);
7495 list_for_each_entry(stp, &reaplist, st_locks)
7496 nfs4_free_cpntf_statelist(clp->net, &stp->st_stid);
7497 free_ol_stateid_reaplist(&reaplist);
7498 return false;
7499 } else {
7500 spin_unlock(&clp->cl_lock);
7501 free_ol_stateid_reaplist(&reaplist);
7502 return unhashed;
7503 }
7504 }
7505
7506 /*
7507 * nfs4_unlock_state() called after encode
7508 */
7509 __be32
nfsd4_close(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7510 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7511 union nfsd4_op_u *u)
7512 {
7513 struct nfsd4_close *close = &u->close;
7514 __be32 status;
7515 struct nfs4_ol_stateid *stp;
7516 struct net *net = SVC_NET(rqstp);
7517 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7518 bool need_move_to_close_list;
7519
7520 dprintk("NFSD: nfsd4_close on file %pd\n",
7521 cstate->current_fh.fh_dentry);
7522
7523 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
7524 &close->cl_stateid,
7525 SC_TYPE_OPEN, SC_STATUS_CLOSED,
7526 &stp, nn);
7527 nfsd4_bump_seqid(cstate, status);
7528 if (status)
7529 goto out;
7530
7531 spin_lock(&stp->st_stid.sc_client->cl_lock);
7532 stp->st_stid.sc_status |= SC_STATUS_CLOSED;
7533 spin_unlock(&stp->st_stid.sc_client->cl_lock);
7534
7535 /*
7536 * Technically we don't _really_ have to increment or copy it, since
7537 * it should just be gone after this operation and we clobber the
7538 * copied value below, but we continue to do so here just to ensure
7539 * that racing ops see that there was a state change.
7540 */
7541 nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
7542
7543 need_move_to_close_list = nfsd4_close_open_stateid(stp);
7544 mutex_unlock(&stp->st_mutex);
7545 if (need_move_to_close_list)
7546 move_to_close_lru(stp, net);
7547
7548 /* v4.1+ suggests that we send a special stateid in here, since the
7549 * clients should just ignore this anyway. Since this is not useful
7550 * for v4.0 clients either, we set it to the special close_stateid
7551 * universally.
7552 *
7553 * See RFC5661 section 18.2.4, and RFC7530 section 16.2.5
7554 */
7555 memcpy(&close->cl_stateid, &close_stateid, sizeof(close->cl_stateid));
7556
7557 /* put reference from nfs4_preprocess_seqid_op */
7558 nfs4_put_stid(&stp->st_stid);
7559 out:
7560 return status;
7561 }
7562
7563 __be32
nfsd4_delegreturn(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7564 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7565 union nfsd4_op_u *u)
7566 {
7567 struct nfsd4_delegreturn *dr = &u->delegreturn;
7568 struct nfs4_delegation *dp;
7569 stateid_t *stateid = &dr->dr_stateid;
7570 struct nfs4_stid *s;
7571 __be32 status;
7572 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7573
7574 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
7575 return status;
7576
7577 status = nfsd4_lookup_stateid(cstate, stateid, SC_TYPE_DELEG, SC_STATUS_REVOKED, &s, nn);
7578 if (status)
7579 goto out;
7580 dp = delegstateid(s);
7581 status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate));
7582 if (status)
7583 goto put_stateid;
7584
7585 trace_nfsd_deleg_return(stateid);
7586 destroy_delegation(dp);
7587 smp_mb__after_atomic();
7588 wake_up_var(d_inode(cstate->current_fh.fh_dentry));
7589 put_stateid:
7590 nfs4_put_stid(&dp->dl_stid);
7591 out:
7592 return status;
7593 }
7594
7595 /* last octet in a range */
7596 static inline u64
last_byte_offset(u64 start,u64 len)7597 last_byte_offset(u64 start, u64 len)
7598 {
7599 u64 end;
7600
7601 WARN_ON_ONCE(!len);
7602 end = start + len;
7603 return end > start ? end - 1: NFS4_MAX_UINT64;
7604 }
7605
7606 /*
7607 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
7608 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
7609 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
7610 * locking, this prevents us from being completely protocol-compliant. The
7611 * real solution to this problem is to start using unsigned file offsets in
7612 * the VFS, but this is a very deep change!
7613 */
7614 static inline void
nfs4_transform_lock_offset(struct file_lock * lock)7615 nfs4_transform_lock_offset(struct file_lock *lock)
7616 {
7617 if (lock->fl_start < 0)
7618 lock->fl_start = OFFSET_MAX;
7619 if (lock->fl_end < 0)
7620 lock->fl_end = OFFSET_MAX;
7621 }
7622
7623 static fl_owner_t
nfsd4_lm_get_owner(fl_owner_t owner)7624 nfsd4_lm_get_owner(fl_owner_t owner)
7625 {
7626 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
7627
7628 nfs4_get_stateowner(&lo->lo_owner);
7629 return owner;
7630 }
7631
7632 static void
nfsd4_lm_put_owner(fl_owner_t owner)7633 nfsd4_lm_put_owner(fl_owner_t owner)
7634 {
7635 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
7636
7637 if (lo)
7638 nfs4_put_stateowner(&lo->lo_owner);
7639 }
7640
7641 /* return pointer to struct nfs4_client if client is expirable */
7642 static bool
nfsd4_lm_lock_expirable(struct file_lock * cfl)7643 nfsd4_lm_lock_expirable(struct file_lock *cfl)
7644 {
7645 struct nfs4_lockowner *lo = (struct nfs4_lockowner *) cfl->c.flc_owner;
7646 struct nfs4_client *clp = lo->lo_owner.so_client;
7647 struct nfsd_net *nn;
7648
7649 if (try_to_expire_client(clp)) {
7650 nn = net_generic(clp->net, nfsd_net_id);
7651 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
7652 return true;
7653 }
7654 return false;
7655 }
7656
7657 /* schedule laundromat to run immediately and wait for it to complete */
7658 static void
nfsd4_lm_expire_lock(void)7659 nfsd4_lm_expire_lock(void)
7660 {
7661 flush_workqueue(laundry_wq);
7662 }
7663
7664 static void
nfsd4_lm_notify(struct file_lock * fl)7665 nfsd4_lm_notify(struct file_lock *fl)
7666 {
7667 struct nfs4_lockowner *lo = (struct nfs4_lockowner *) fl->c.flc_owner;
7668 struct net *net = lo->lo_owner.so_client->net;
7669 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7670 struct nfsd4_blocked_lock *nbl = container_of(fl,
7671 struct nfsd4_blocked_lock, nbl_lock);
7672 bool queue = false;
7673
7674 /* An empty list means that something else is going to be using it */
7675 spin_lock(&nn->blocked_locks_lock);
7676 if (!list_empty(&nbl->nbl_list)) {
7677 list_del_init(&nbl->nbl_list);
7678 list_del_init(&nbl->nbl_lru);
7679 queue = true;
7680 }
7681 spin_unlock(&nn->blocked_locks_lock);
7682
7683 if (queue) {
7684 trace_nfsd_cb_notify_lock(lo, nbl);
7685 nfsd4_run_cb(&nbl->nbl_cb);
7686 }
7687 }
7688
7689 static const struct lock_manager_operations nfsd_posix_mng_ops = {
7690 .lm_mod_owner = THIS_MODULE,
7691 .lm_notify = nfsd4_lm_notify,
7692 .lm_get_owner = nfsd4_lm_get_owner,
7693 .lm_put_owner = nfsd4_lm_put_owner,
7694 .lm_lock_expirable = nfsd4_lm_lock_expirable,
7695 .lm_expire_lock = nfsd4_lm_expire_lock,
7696 };
7697
7698 static inline void
nfs4_set_lock_denied(struct file_lock * fl,struct nfsd4_lock_denied * deny)7699 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
7700 {
7701 struct nfs4_lockowner *lo;
7702
7703 if (fl->fl_lmops == &nfsd_posix_mng_ops) {
7704 lo = (struct nfs4_lockowner *) fl->c.flc_owner;
7705 xdr_netobj_dup(&deny->ld_owner, &lo->lo_owner.so_owner,
7706 GFP_KERNEL);
7707 if (!deny->ld_owner.data)
7708 /* We just don't care that much */
7709 goto nevermind;
7710 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
7711 } else {
7712 nevermind:
7713 deny->ld_owner.len = 0;
7714 deny->ld_owner.data = NULL;
7715 deny->ld_clientid.cl_boot = 0;
7716 deny->ld_clientid.cl_id = 0;
7717 }
7718 deny->ld_start = fl->fl_start;
7719 deny->ld_length = NFS4_MAX_UINT64;
7720 if (fl->fl_end != NFS4_MAX_UINT64)
7721 deny->ld_length = fl->fl_end - fl->fl_start + 1;
7722 deny->ld_type = NFS4_READ_LT;
7723 if (fl->c.flc_type != F_RDLCK)
7724 deny->ld_type = NFS4_WRITE_LT;
7725 }
7726
7727 static struct nfs4_lockowner *
find_lockowner_str_locked(struct nfs4_client * clp,struct xdr_netobj * owner)7728 find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
7729 {
7730 unsigned int strhashval = ownerstr_hashval(owner);
7731 struct nfs4_stateowner *so;
7732
7733 lockdep_assert_held(&clp->cl_lock);
7734
7735 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
7736 so_strhash) {
7737 if (so->so_is_open_owner)
7738 continue;
7739 if (same_owner_str(so, owner))
7740 return lockowner(nfs4_get_stateowner(so));
7741 }
7742 return NULL;
7743 }
7744
7745 static struct nfs4_lockowner *
find_lockowner_str(struct nfs4_client * clp,struct xdr_netobj * owner)7746 find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
7747 {
7748 struct nfs4_lockowner *lo;
7749
7750 spin_lock(&clp->cl_lock);
7751 lo = find_lockowner_str_locked(clp, owner);
7752 spin_unlock(&clp->cl_lock);
7753 return lo;
7754 }
7755
nfs4_unhash_lockowner(struct nfs4_stateowner * sop)7756 static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
7757 {
7758 unhash_lockowner_locked(lockowner(sop));
7759 }
7760
nfs4_free_lockowner(struct nfs4_stateowner * sop)7761 static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
7762 {
7763 struct nfs4_lockowner *lo = lockowner(sop);
7764
7765 kmem_cache_free(lockowner_slab, lo);
7766 }
7767
7768 static const struct nfs4_stateowner_operations lockowner_ops = {
7769 .so_unhash = nfs4_unhash_lockowner,
7770 .so_free = nfs4_free_lockowner,
7771 };
7772
7773 /*
7774 * Alloc a lock owner structure.
7775 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
7776 * occurred.
7777 *
7778 * strhashval = ownerstr_hashval
7779 */
7780 static struct nfs4_lockowner *
alloc_init_lock_stateowner(unsigned int strhashval,struct nfs4_client * clp,struct nfs4_ol_stateid * open_stp,struct nfsd4_lock * lock)7781 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
7782 struct nfs4_ol_stateid *open_stp,
7783 struct nfsd4_lock *lock)
7784 {
7785 struct nfs4_lockowner *lo, *ret;
7786
7787 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
7788 if (!lo)
7789 return NULL;
7790 INIT_LIST_HEAD(&lo->lo_blocked);
7791 INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
7792 lo->lo_owner.so_is_open_owner = 0;
7793 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
7794 lo->lo_owner.so_ops = &lockowner_ops;
7795 spin_lock(&clp->cl_lock);
7796 ret = find_lockowner_str_locked(clp, &lock->lk_new_owner);
7797 if (ret == NULL) {
7798 list_add(&lo->lo_owner.so_strhash,
7799 &clp->cl_ownerstr_hashtbl[strhashval]);
7800 ret = lo;
7801 } else
7802 nfs4_free_stateowner(&lo->lo_owner);
7803
7804 spin_unlock(&clp->cl_lock);
7805 return ret;
7806 }
7807
7808 static struct nfs4_ol_stateid *
find_lock_stateid(const struct nfs4_lockowner * lo,const struct nfs4_ol_stateid * ost)7809 find_lock_stateid(const struct nfs4_lockowner *lo,
7810 const struct nfs4_ol_stateid *ost)
7811 {
7812 struct nfs4_ol_stateid *lst;
7813
7814 lockdep_assert_held(&ost->st_stid.sc_client->cl_lock);
7815
7816 /* If ost is not hashed, ost->st_locks will not be valid */
7817 if (!nfs4_ol_stateid_unhashed(ost))
7818 list_for_each_entry(lst, &ost->st_locks, st_locks) {
7819 if (lst->st_stateowner == &lo->lo_owner) {
7820 refcount_inc(&lst->st_stid.sc_count);
7821 return lst;
7822 }
7823 }
7824 return NULL;
7825 }
7826
7827 static struct nfs4_ol_stateid *
init_lock_stateid(struct nfs4_ol_stateid * stp,struct nfs4_lockowner * lo,struct nfs4_file * fp,struct inode * inode,struct nfs4_ol_stateid * open_stp)7828 init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
7829 struct nfs4_file *fp, struct inode *inode,
7830 struct nfs4_ol_stateid *open_stp)
7831 {
7832 struct nfs4_client *clp = lo->lo_owner.so_client;
7833 struct nfs4_ol_stateid *retstp;
7834
7835 mutex_init(&stp->st_mutex);
7836 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
7837 retry:
7838 spin_lock(&clp->cl_lock);
7839 if (nfs4_ol_stateid_unhashed(open_stp))
7840 goto out_close;
7841 retstp = find_lock_stateid(lo, open_stp);
7842 if (retstp)
7843 goto out_found;
7844 refcount_inc(&stp->st_stid.sc_count);
7845 stp->st_stid.sc_type = SC_TYPE_LOCK;
7846 stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
7847 get_nfs4_file(fp);
7848 stp->st_stid.sc_file = fp;
7849 stp->st_access_bmap = 0;
7850 stp->st_deny_bmap = open_stp->st_deny_bmap;
7851 stp->st_openstp = open_stp;
7852 spin_lock(&fp->fi_lock);
7853 list_add(&stp->st_locks, &open_stp->st_locks);
7854 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
7855 list_add(&stp->st_perfile, &fp->fi_stateids);
7856 spin_unlock(&fp->fi_lock);
7857 spin_unlock(&clp->cl_lock);
7858 return stp;
7859 out_found:
7860 spin_unlock(&clp->cl_lock);
7861 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
7862 nfs4_put_stid(&retstp->st_stid);
7863 goto retry;
7864 }
7865 /* To keep mutex tracking happy */
7866 mutex_unlock(&stp->st_mutex);
7867 return retstp;
7868 out_close:
7869 spin_unlock(&clp->cl_lock);
7870 mutex_unlock(&stp->st_mutex);
7871 return NULL;
7872 }
7873
7874 static struct nfs4_ol_stateid *
find_or_create_lock_stateid(struct nfs4_lockowner * lo,struct nfs4_file * fi,struct inode * inode,struct nfs4_ol_stateid * ost,bool * new)7875 find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
7876 struct inode *inode, struct nfs4_ol_stateid *ost,
7877 bool *new)
7878 {
7879 struct nfs4_stid *ns = NULL;
7880 struct nfs4_ol_stateid *lst;
7881 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
7882 struct nfs4_client *clp = oo->oo_owner.so_client;
7883
7884 *new = false;
7885 spin_lock(&clp->cl_lock);
7886 lst = find_lock_stateid(lo, ost);
7887 spin_unlock(&clp->cl_lock);
7888 if (lst != NULL) {
7889 if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
7890 goto out;
7891 nfs4_put_stid(&lst->st_stid);
7892 }
7893 ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
7894 if (ns == NULL)
7895 return NULL;
7896
7897 lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost);
7898 if (lst == openlockstateid(ns))
7899 *new = true;
7900 else
7901 nfs4_put_stid(ns);
7902 out:
7903 return lst;
7904 }
7905
7906 static int
check_lock_length(u64 offset,u64 length)7907 check_lock_length(u64 offset, u64 length)
7908 {
7909 return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
7910 (length > ~offset)));
7911 }
7912
get_lock_access(struct nfs4_ol_stateid * lock_stp,u32 access)7913 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
7914 {
7915 struct nfs4_file *fp = lock_stp->st_stid.sc_file;
7916
7917 lockdep_assert_held(&fp->fi_lock);
7918
7919 if (test_access(access, lock_stp))
7920 return;
7921 __nfs4_file_get_access(fp, access);
7922 set_access(access, lock_stp);
7923 }
7924
7925 static __be32
lookup_or_create_lock_state(struct nfsd4_compound_state * cstate,struct nfs4_ol_stateid * ost,struct nfsd4_lock * lock,struct nfs4_ol_stateid ** plst,bool * new)7926 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
7927 struct nfs4_ol_stateid *ost,
7928 struct nfsd4_lock *lock,
7929 struct nfs4_ol_stateid **plst, bool *new)
7930 {
7931 __be32 status;
7932 struct nfs4_file *fi = ost->st_stid.sc_file;
7933 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
7934 struct nfs4_client *cl = oo->oo_owner.so_client;
7935 struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
7936 struct nfs4_lockowner *lo;
7937 struct nfs4_ol_stateid *lst;
7938 unsigned int strhashval;
7939
7940 lo = find_lockowner_str(cl, &lock->lk_new_owner);
7941 if (!lo) {
7942 strhashval = ownerstr_hashval(&lock->lk_new_owner);
7943 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
7944 if (lo == NULL)
7945 return nfserr_jukebox;
7946 } else {
7947 /* with an existing lockowner, seqids must be the same */
7948 status = nfserr_bad_seqid;
7949 if (!cstate->minorversion &&
7950 lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
7951 goto out;
7952 }
7953
7954 lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
7955 if (lst == NULL) {
7956 status = nfserr_jukebox;
7957 goto out;
7958 }
7959
7960 status = nfs_ok;
7961 *plst = lst;
7962 out:
7963 nfs4_put_stateowner(&lo->lo_owner);
7964 return status;
7965 }
7966
7967 /*
7968 * LOCK operation
7969 */
7970 __be32
nfsd4_lock(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7971 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7972 union nfsd4_op_u *u)
7973 {
7974 struct nfsd4_lock *lock = &u->lock;
7975 struct nfs4_openowner *open_sop = NULL;
7976 struct nfs4_lockowner *lock_sop = NULL;
7977 struct nfs4_ol_stateid *lock_stp = NULL;
7978 struct nfs4_ol_stateid *open_stp = NULL;
7979 struct nfs4_file *fp;
7980 struct nfsd_file *nf = NULL;
7981 struct nfsd4_blocked_lock *nbl = NULL;
7982 struct file_lock *file_lock = NULL;
7983 struct file_lock *conflock = NULL;
7984 struct super_block *sb;
7985 __be32 status = 0;
7986 int lkflg;
7987 int err;
7988 bool new = false;
7989 unsigned char type;
7990 unsigned int flags = FL_POSIX;
7991 struct net *net = SVC_NET(rqstp);
7992 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7993
7994 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
7995 (long long) lock->lk_offset,
7996 (long long) lock->lk_length);
7997
7998 if (check_lock_length(lock->lk_offset, lock->lk_length))
7999 return nfserr_inval;
8000
8001 if ((status = fh_verify(rqstp, &cstate->current_fh,
8002 S_IFREG, NFSD_MAY_LOCK))) {
8003 dprintk("NFSD: nfsd4_lock: permission denied!\n");
8004 return status;
8005 }
8006 sb = cstate->current_fh.fh_dentry->d_sb;
8007
8008 if (lock->lk_is_new) {
8009 if (nfsd4_has_session(cstate))
8010 /* See rfc 5661 18.10.3: given clientid is ignored: */
8011 memcpy(&lock->lk_new_clientid,
8012 &cstate->clp->cl_clientid,
8013 sizeof(clientid_t));
8014
8015 /* validate and update open stateid and open seqid */
8016 status = nfs4_preprocess_confirmed_seqid_op(cstate,
8017 lock->lk_new_open_seqid,
8018 &lock->lk_new_open_stateid,
8019 &open_stp, nn);
8020 if (status)
8021 goto out;
8022 mutex_unlock(&open_stp->st_mutex);
8023 open_sop = openowner(open_stp->st_stateowner);
8024 status = nfserr_bad_stateid;
8025 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
8026 &lock->lk_new_clientid))
8027 goto out;
8028 status = lookup_or_create_lock_state(cstate, open_stp, lock,
8029 &lock_stp, &new);
8030 } else {
8031 status = nfs4_preprocess_seqid_op(cstate,
8032 lock->lk_old_lock_seqid,
8033 &lock->lk_old_lock_stateid,
8034 SC_TYPE_LOCK, 0, &lock_stp,
8035 nn);
8036 }
8037 if (status)
8038 goto out;
8039 lock_sop = lockowner(lock_stp->st_stateowner);
8040
8041 lkflg = setlkflg(lock->lk_type);
8042 status = nfs4_check_openmode(lock_stp, lkflg);
8043 if (status)
8044 goto out;
8045
8046 status = nfserr_grace;
8047 if (locks_in_grace(net) && !lock->lk_reclaim)
8048 goto out;
8049 status = nfserr_no_grace;
8050 if (!locks_in_grace(net) && lock->lk_reclaim)
8051 goto out;
8052
8053 if (lock->lk_reclaim)
8054 flags |= FL_RECLAIM;
8055
8056 fp = lock_stp->st_stid.sc_file;
8057 switch (lock->lk_type) {
8058 case NFS4_READW_LT:
8059 if (nfsd4_has_session(cstate) ||
8060 exportfs_lock_op_is_async(sb->s_export_op))
8061 flags |= FL_SLEEP;
8062 fallthrough;
8063 case NFS4_READ_LT:
8064 spin_lock(&fp->fi_lock);
8065 nf = find_readable_file_locked(fp);
8066 if (nf)
8067 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
8068 spin_unlock(&fp->fi_lock);
8069 type = F_RDLCK;
8070 break;
8071 case NFS4_WRITEW_LT:
8072 if (nfsd4_has_session(cstate) ||
8073 exportfs_lock_op_is_async(sb->s_export_op))
8074 flags |= FL_SLEEP;
8075 fallthrough;
8076 case NFS4_WRITE_LT:
8077 spin_lock(&fp->fi_lock);
8078 nf = find_writeable_file_locked(fp);
8079 if (nf)
8080 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
8081 spin_unlock(&fp->fi_lock);
8082 type = F_WRLCK;
8083 break;
8084 default:
8085 status = nfserr_inval;
8086 goto out;
8087 }
8088
8089 if (!nf) {
8090 status = nfserr_openmode;
8091 goto out;
8092 }
8093
8094 /*
8095 * Most filesystems with their own ->lock operations will block
8096 * the nfsd thread waiting to acquire the lock. That leads to
8097 * deadlocks (we don't want every nfsd thread tied up waiting
8098 * for file locks), so don't attempt blocking lock notifications
8099 * on those filesystems:
8100 */
8101 if (!exportfs_lock_op_is_async(sb->s_export_op))
8102 flags &= ~FL_SLEEP;
8103
8104 nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn);
8105 if (!nbl) {
8106 dprintk("NFSD: %s: unable to allocate block!\n", __func__);
8107 status = nfserr_jukebox;
8108 goto out;
8109 }
8110
8111 file_lock = &nbl->nbl_lock;
8112 file_lock->c.flc_type = type;
8113 file_lock->c.flc_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
8114 file_lock->c.flc_pid = current->tgid;
8115 file_lock->c.flc_file = nf->nf_file;
8116 file_lock->c.flc_flags = flags;
8117 file_lock->fl_lmops = &nfsd_posix_mng_ops;
8118 file_lock->fl_start = lock->lk_offset;
8119 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
8120 nfs4_transform_lock_offset(file_lock);
8121
8122 conflock = locks_alloc_lock();
8123 if (!conflock) {
8124 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
8125 status = nfserr_jukebox;
8126 goto out;
8127 }
8128
8129 if (flags & FL_SLEEP) {
8130 nbl->nbl_time = ktime_get_boottime_seconds();
8131 spin_lock(&nn->blocked_locks_lock);
8132 list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
8133 list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
8134 kref_get(&nbl->nbl_kref);
8135 spin_unlock(&nn->blocked_locks_lock);
8136 }
8137
8138 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, conflock);
8139 switch (err) {
8140 case 0: /* success! */
8141 nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
8142 status = 0;
8143 if (lock->lk_reclaim)
8144 nn->somebody_reclaimed = true;
8145 break;
8146 case FILE_LOCK_DEFERRED:
8147 kref_put(&nbl->nbl_kref, free_nbl);
8148 nbl = NULL;
8149 fallthrough;
8150 case -EAGAIN: /* conflock holds conflicting lock */
8151 status = nfserr_denied;
8152 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
8153 nfs4_set_lock_denied(conflock, &lock->lk_denied);
8154 break;
8155 case -EDEADLK:
8156 status = nfserr_deadlock;
8157 break;
8158 default:
8159 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
8160 status = nfserrno(err);
8161 break;
8162 }
8163 out:
8164 if (nbl) {
8165 /* dequeue it if we queued it before */
8166 if (flags & FL_SLEEP) {
8167 spin_lock(&nn->blocked_locks_lock);
8168 if (!list_empty(&nbl->nbl_list) &&
8169 !list_empty(&nbl->nbl_lru)) {
8170 list_del_init(&nbl->nbl_list);
8171 list_del_init(&nbl->nbl_lru);
8172 kref_put(&nbl->nbl_kref, free_nbl);
8173 }
8174 /* nbl can use one of lists to be linked to reaplist */
8175 spin_unlock(&nn->blocked_locks_lock);
8176 }
8177 free_blocked_lock(nbl);
8178 }
8179 if (nf)
8180 nfsd_file_put(nf);
8181 if (lock_stp) {
8182 /* Bump seqid manually if the 4.0 replay owner is openowner */
8183 if (cstate->replay_owner &&
8184 cstate->replay_owner != &lock_sop->lo_owner &&
8185 seqid_mutating_err(ntohl(status)))
8186 lock_sop->lo_owner.so_seqid++;
8187
8188 /*
8189 * If this is a new, never-before-used stateid, and we are
8190 * returning an error, then just go ahead and release it.
8191 */
8192 if (status && new)
8193 release_lock_stateid(lock_stp);
8194
8195 mutex_unlock(&lock_stp->st_mutex);
8196
8197 nfs4_put_stid(&lock_stp->st_stid);
8198 }
8199 if (open_stp)
8200 nfs4_put_stid(&open_stp->st_stid);
8201 nfsd4_bump_seqid(cstate, status);
8202 if (conflock)
8203 locks_free_lock(conflock);
8204 return status;
8205 }
8206
nfsd4_lock_release(union nfsd4_op_u * u)8207 void nfsd4_lock_release(union nfsd4_op_u *u)
8208 {
8209 struct nfsd4_lock *lock = &u->lock;
8210 struct nfsd4_lock_denied *deny = &lock->lk_denied;
8211
8212 kfree(deny->ld_owner.data);
8213 }
8214
8215 /*
8216 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
8217 * so we do a temporary open here just to get an open file to pass to
8218 * vfs_test_lock.
8219 */
nfsd_test_lock(struct svc_rqst * rqstp,struct svc_fh * fhp,struct file_lock * lock)8220 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
8221 {
8222 struct nfsd_file *nf;
8223 struct inode *inode;
8224 __be32 err;
8225
8226 err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
8227 if (err)
8228 return err;
8229 inode = fhp->fh_dentry->d_inode;
8230 inode_lock(inode); /* to block new leases till after test_lock: */
8231 err = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
8232 if (err)
8233 goto out;
8234 lock->c.flc_file = nf->nf_file;
8235 err = nfserrno(vfs_test_lock(nf->nf_file, lock));
8236 lock->c.flc_file = NULL;
8237 out:
8238 inode_unlock(inode);
8239 nfsd_file_put(nf);
8240 return err;
8241 }
8242
8243 /*
8244 * LOCKT operation
8245 */
8246 __be32
nfsd4_lockt(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8247 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
8248 union nfsd4_op_u *u)
8249 {
8250 struct nfsd4_lockt *lockt = &u->lockt;
8251 struct file_lock *file_lock = NULL;
8252 struct nfs4_lockowner *lo = NULL;
8253 __be32 status;
8254 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
8255
8256 if (locks_in_grace(SVC_NET(rqstp)))
8257 return nfserr_grace;
8258
8259 if (check_lock_length(lockt->lt_offset, lockt->lt_length))
8260 return nfserr_inval;
8261
8262 if (!nfsd4_has_session(cstate)) {
8263 status = set_client(&lockt->lt_clientid, cstate, nn);
8264 if (status)
8265 goto out;
8266 }
8267
8268 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
8269 goto out;
8270
8271 file_lock = locks_alloc_lock();
8272 if (!file_lock) {
8273 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
8274 status = nfserr_jukebox;
8275 goto out;
8276 }
8277
8278 switch (lockt->lt_type) {
8279 case NFS4_READ_LT:
8280 case NFS4_READW_LT:
8281 file_lock->c.flc_type = F_RDLCK;
8282 break;
8283 case NFS4_WRITE_LT:
8284 case NFS4_WRITEW_LT:
8285 file_lock->c.flc_type = F_WRLCK;
8286 break;
8287 default:
8288 dprintk("NFSD: nfs4_lockt: bad lock type!\n");
8289 status = nfserr_inval;
8290 goto out;
8291 }
8292
8293 lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
8294 if (lo)
8295 file_lock->c.flc_owner = (fl_owner_t)lo;
8296 file_lock->c.flc_pid = current->tgid;
8297 file_lock->c.flc_flags = FL_POSIX;
8298
8299 file_lock->fl_start = lockt->lt_offset;
8300 file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
8301
8302 nfs4_transform_lock_offset(file_lock);
8303
8304 status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
8305 if (status)
8306 goto out;
8307
8308 if (file_lock->c.flc_type != F_UNLCK) {
8309 status = nfserr_denied;
8310 nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
8311 }
8312 out:
8313 if (lo)
8314 nfs4_put_stateowner(&lo->lo_owner);
8315 if (file_lock)
8316 locks_free_lock(file_lock);
8317 return status;
8318 }
8319
nfsd4_lockt_release(union nfsd4_op_u * u)8320 void nfsd4_lockt_release(union nfsd4_op_u *u)
8321 {
8322 struct nfsd4_lockt *lockt = &u->lockt;
8323 struct nfsd4_lock_denied *deny = &lockt->lt_denied;
8324
8325 kfree(deny->ld_owner.data);
8326 }
8327
8328 __be32
nfsd4_locku(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8329 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
8330 union nfsd4_op_u *u)
8331 {
8332 struct nfsd4_locku *locku = &u->locku;
8333 struct nfs4_ol_stateid *stp;
8334 struct nfsd_file *nf = NULL;
8335 struct file_lock *file_lock = NULL;
8336 __be32 status;
8337 int err;
8338 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
8339
8340 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
8341 (long long) locku->lu_offset,
8342 (long long) locku->lu_length);
8343
8344 if (check_lock_length(locku->lu_offset, locku->lu_length))
8345 return nfserr_inval;
8346
8347 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
8348 &locku->lu_stateid, SC_TYPE_LOCK, 0,
8349 &stp, nn);
8350 if (status)
8351 goto out;
8352 nf = find_any_file(stp->st_stid.sc_file);
8353 if (!nf) {
8354 status = nfserr_lock_range;
8355 goto put_stateid;
8356 }
8357 file_lock = locks_alloc_lock();
8358 if (!file_lock) {
8359 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
8360 status = nfserr_jukebox;
8361 goto put_file;
8362 }
8363
8364 file_lock->c.flc_type = F_UNLCK;
8365 file_lock->c.flc_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
8366 file_lock->c.flc_pid = current->tgid;
8367 file_lock->c.flc_file = nf->nf_file;
8368 file_lock->c.flc_flags = FL_POSIX;
8369 file_lock->fl_lmops = &nfsd_posix_mng_ops;
8370 file_lock->fl_start = locku->lu_offset;
8371
8372 file_lock->fl_end = last_byte_offset(locku->lu_offset,
8373 locku->lu_length);
8374 nfs4_transform_lock_offset(file_lock);
8375
8376 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, NULL);
8377 if (err) {
8378 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
8379 goto out_nfserr;
8380 }
8381 nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid);
8382 put_file:
8383 nfsd_file_put(nf);
8384 put_stateid:
8385 mutex_unlock(&stp->st_mutex);
8386 nfs4_put_stid(&stp->st_stid);
8387 out:
8388 nfsd4_bump_seqid(cstate, status);
8389 if (file_lock)
8390 locks_free_lock(file_lock);
8391 return status;
8392
8393 out_nfserr:
8394 status = nfserrno(err);
8395 goto put_file;
8396 }
8397
8398 /*
8399 * returns
8400 * true: locks held by lockowner
8401 * false: no locks held by lockowner
8402 */
8403 static bool
check_for_locks(struct nfs4_file * fp,struct nfs4_lockowner * lowner)8404 check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
8405 {
8406 struct file_lock *fl;
8407 int status = false;
8408 struct nfsd_file *nf;
8409 struct inode *inode;
8410 struct file_lock_context *flctx;
8411
8412 spin_lock(&fp->fi_lock);
8413 nf = find_any_file_locked(fp);
8414 if (!nf) {
8415 /* Any valid lock stateid should have some sort of access */
8416 WARN_ON_ONCE(1);
8417 goto out;
8418 }
8419
8420 inode = file_inode(nf->nf_file);
8421 flctx = locks_inode_context(inode);
8422
8423 if (flctx && !list_empty_careful(&flctx->flc_posix)) {
8424 spin_lock(&flctx->flc_lock);
8425 for_each_file_lock(fl, &flctx->flc_posix) {
8426 if (fl->c.flc_owner == (fl_owner_t)lowner) {
8427 status = true;
8428 break;
8429 }
8430 }
8431 spin_unlock(&flctx->flc_lock);
8432 }
8433 out:
8434 spin_unlock(&fp->fi_lock);
8435 return status;
8436 }
8437
8438 /**
8439 * nfsd4_release_lockowner - process NFSv4.0 RELEASE_LOCKOWNER operations
8440 * @rqstp: RPC transaction
8441 * @cstate: NFSv4 COMPOUND state
8442 * @u: RELEASE_LOCKOWNER arguments
8443 *
8444 * Check if there are any locks still held and if not, free the lockowner
8445 * and any lock state that is owned.
8446 *
8447 * Return values:
8448 * %nfs_ok: lockowner released or not found
8449 * %nfserr_locks_held: lockowner still in use
8450 * %nfserr_stale_clientid: clientid no longer active
8451 * %nfserr_expired: clientid not recognized
8452 */
8453 __be32
nfsd4_release_lockowner(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8454 nfsd4_release_lockowner(struct svc_rqst *rqstp,
8455 struct nfsd4_compound_state *cstate,
8456 union nfsd4_op_u *u)
8457 {
8458 struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner;
8459 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
8460 clientid_t *clid = &rlockowner->rl_clientid;
8461 struct nfs4_ol_stateid *stp;
8462 struct nfs4_lockowner *lo;
8463 struct nfs4_client *clp;
8464 LIST_HEAD(reaplist);
8465 __be32 status;
8466
8467 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
8468 clid->cl_boot, clid->cl_id);
8469
8470 status = set_client(clid, cstate, nn);
8471 if (status)
8472 return status;
8473 clp = cstate->clp;
8474
8475 spin_lock(&clp->cl_lock);
8476 lo = find_lockowner_str_locked(clp, &rlockowner->rl_owner);
8477 if (!lo) {
8478 spin_unlock(&clp->cl_lock);
8479 return nfs_ok;
8480 }
8481
8482 list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) {
8483 if (check_for_locks(stp->st_stid.sc_file, lo)) {
8484 spin_unlock(&clp->cl_lock);
8485 nfs4_put_stateowner(&lo->lo_owner);
8486 return nfserr_locks_held;
8487 }
8488 }
8489 unhash_lockowner_locked(lo);
8490 while (!list_empty(&lo->lo_owner.so_stateids)) {
8491 stp = list_first_entry(&lo->lo_owner.so_stateids,
8492 struct nfs4_ol_stateid,
8493 st_perstateowner);
8494 unhash_lock_stateid(stp);
8495 put_ol_stateid_locked(stp, &reaplist);
8496 }
8497 spin_unlock(&clp->cl_lock);
8498
8499 free_ol_stateid_reaplist(&reaplist);
8500 remove_blocked_locks(lo);
8501 nfs4_put_stateowner(&lo->lo_owner);
8502 return nfs_ok;
8503 }
8504
8505 static inline struct nfs4_client_reclaim *
alloc_reclaim(void)8506 alloc_reclaim(void)
8507 {
8508 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
8509 }
8510
8511 bool
nfs4_has_reclaimed_state(struct xdr_netobj name,struct nfsd_net * nn)8512 nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn)
8513 {
8514 struct nfs4_client_reclaim *crp;
8515
8516 crp = nfsd4_find_reclaim_client(name, nn);
8517 return (crp && crp->cr_clp);
8518 }
8519
8520 /*
8521 * failure => all reset bets are off, nfserr_no_grace...
8522 *
8523 * The caller is responsible for freeing name.data if NULL is returned (it
8524 * will be freed in nfs4_remove_reclaim_record in the normal case).
8525 */
8526 struct nfs4_client_reclaim *
nfs4_client_to_reclaim(struct xdr_netobj name,struct xdr_netobj princhash,struct nfsd_net * nn)8527 nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash,
8528 struct nfsd_net *nn)
8529 {
8530 unsigned int strhashval;
8531 struct nfs4_client_reclaim *crp;
8532
8533 crp = alloc_reclaim();
8534 if (crp) {
8535 strhashval = clientstr_hashval(name);
8536 INIT_LIST_HEAD(&crp->cr_strhash);
8537 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
8538 crp->cr_name.data = name.data;
8539 crp->cr_name.len = name.len;
8540 crp->cr_princhash.data = princhash.data;
8541 crp->cr_princhash.len = princhash.len;
8542 crp->cr_clp = NULL;
8543 nn->reclaim_str_hashtbl_size++;
8544 }
8545 return crp;
8546 }
8547
8548 void
nfs4_remove_reclaim_record(struct nfs4_client_reclaim * crp,struct nfsd_net * nn)8549 nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
8550 {
8551 list_del(&crp->cr_strhash);
8552 kfree(crp->cr_name.data);
8553 kfree(crp->cr_princhash.data);
8554 kfree(crp);
8555 nn->reclaim_str_hashtbl_size--;
8556 }
8557
8558 void
nfs4_release_reclaim(struct nfsd_net * nn)8559 nfs4_release_reclaim(struct nfsd_net *nn)
8560 {
8561 struct nfs4_client_reclaim *crp = NULL;
8562 int i;
8563
8564 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
8565 while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
8566 crp = list_entry(nn->reclaim_str_hashtbl[i].next,
8567 struct nfs4_client_reclaim, cr_strhash);
8568 nfs4_remove_reclaim_record(crp, nn);
8569 }
8570 }
8571 WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
8572 }
8573
8574 /*
8575 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
8576 struct nfs4_client_reclaim *
nfsd4_find_reclaim_client(struct xdr_netobj name,struct nfsd_net * nn)8577 nfsd4_find_reclaim_client(struct xdr_netobj name, struct nfsd_net *nn)
8578 {
8579 unsigned int strhashval;
8580 struct nfs4_client_reclaim *crp = NULL;
8581
8582 strhashval = clientstr_hashval(name);
8583 list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
8584 if (compare_blob(&crp->cr_name, &name) == 0) {
8585 return crp;
8586 }
8587 }
8588 return NULL;
8589 }
8590
8591 __be32
nfs4_check_open_reclaim(struct nfs4_client * clp)8592 nfs4_check_open_reclaim(struct nfs4_client *clp)
8593 {
8594 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags))
8595 return nfserr_no_grace;
8596
8597 if (nfsd4_client_record_check(clp))
8598 return nfserr_reclaim_bad;
8599
8600 return nfs_ok;
8601 }
8602
8603 /*
8604 * Since the lifetime of a delegation isn't limited to that of an open, a
8605 * client may quite reasonably hang on to a delegation as long as it has
8606 * the inode cached. This becomes an obvious problem the first time a
8607 * client's inode cache approaches the size of the server's total memory.
8608 *
8609 * For now we avoid this problem by imposing a hard limit on the number
8610 * of delegations, which varies according to the server's memory size.
8611 */
8612 static void
set_max_delegations(void)8613 set_max_delegations(void)
8614 {
8615 /*
8616 * Allow at most 4 delegations per megabyte of RAM. Quick
8617 * estimates suggest that in the worst case (where every delegation
8618 * is for a different inode), a delegation could take about 1.5K,
8619 * giving a worst case usage of about 6% of memory.
8620 */
8621 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
8622 }
8623
nfs4_state_create_net(struct net * net)8624 static int nfs4_state_create_net(struct net *net)
8625 {
8626 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
8627 int i;
8628
8629 nn->conf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
8630 sizeof(struct list_head),
8631 GFP_KERNEL);
8632 if (!nn->conf_id_hashtbl)
8633 goto err;
8634 nn->unconf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
8635 sizeof(struct list_head),
8636 GFP_KERNEL);
8637 if (!nn->unconf_id_hashtbl)
8638 goto err_unconf_id;
8639 nn->sessionid_hashtbl = kmalloc_array(SESSION_HASH_SIZE,
8640 sizeof(struct list_head),
8641 GFP_KERNEL);
8642 if (!nn->sessionid_hashtbl)
8643 goto err_sessionid;
8644
8645 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
8646 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
8647 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
8648 }
8649 for (i = 0; i < SESSION_HASH_SIZE; i++)
8650 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
8651 nn->conf_name_tree = RB_ROOT;
8652 nn->unconf_name_tree = RB_ROOT;
8653 nn->boot_time = ktime_get_real_seconds();
8654 nn->grace_ended = false;
8655 nn->nfsd4_manager.block_opens = true;
8656 INIT_LIST_HEAD(&nn->nfsd4_manager.list);
8657 INIT_LIST_HEAD(&nn->client_lru);
8658 INIT_LIST_HEAD(&nn->close_lru);
8659 INIT_LIST_HEAD(&nn->del_recall_lru);
8660 spin_lock_init(&nn->client_lock);
8661 spin_lock_init(&nn->s2s_cp_lock);
8662 idr_init(&nn->s2s_cp_stateids);
8663 atomic_set(&nn->pending_async_copies, 0);
8664
8665 spin_lock_init(&nn->blocked_locks_lock);
8666 INIT_LIST_HEAD(&nn->blocked_locks_lru);
8667
8668 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
8669 INIT_WORK(&nn->nfsd_shrinker_work, nfsd4_state_shrinker_worker);
8670 get_net(net);
8671
8672 nn->nfsd_client_shrinker = shrinker_alloc(0, "nfsd-client");
8673 if (!nn->nfsd_client_shrinker)
8674 goto err_shrinker;
8675
8676 nn->nfsd_client_shrinker->scan_objects = nfsd4_state_shrinker_scan;
8677 nn->nfsd_client_shrinker->count_objects = nfsd4_state_shrinker_count;
8678 nn->nfsd_client_shrinker->private_data = nn;
8679
8680 shrinker_register(nn->nfsd_client_shrinker);
8681
8682 return 0;
8683
8684 err_shrinker:
8685 put_net(net);
8686 kfree(nn->sessionid_hashtbl);
8687 err_sessionid:
8688 kfree(nn->unconf_id_hashtbl);
8689 err_unconf_id:
8690 kfree(nn->conf_id_hashtbl);
8691 err:
8692 return -ENOMEM;
8693 }
8694
8695 static void
nfs4_state_destroy_net(struct net * net)8696 nfs4_state_destroy_net(struct net *net)
8697 {
8698 int i;
8699 struct nfs4_client *clp = NULL;
8700 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
8701
8702 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
8703 while (!list_empty(&nn->conf_id_hashtbl[i])) {
8704 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
8705 destroy_client(clp);
8706 }
8707 }
8708
8709 WARN_ON(!list_empty(&nn->blocked_locks_lru));
8710
8711 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
8712 while (!list_empty(&nn->unconf_id_hashtbl[i])) {
8713 clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
8714 destroy_client(clp);
8715 }
8716 }
8717
8718 kfree(nn->sessionid_hashtbl);
8719 kfree(nn->unconf_id_hashtbl);
8720 kfree(nn->conf_id_hashtbl);
8721 put_net(net);
8722 }
8723
8724 int
nfs4_state_start_net(struct net * net)8725 nfs4_state_start_net(struct net *net)
8726 {
8727 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
8728 int ret;
8729
8730 ret = nfs4_state_create_net(net);
8731 if (ret)
8732 return ret;
8733 locks_start_grace(net, &nn->nfsd4_manager);
8734 nfsd4_client_tracking_init(net);
8735 if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0)
8736 goto skip_grace;
8737 printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n",
8738 nn->nfsd4_grace, net->ns.inum);
8739 trace_nfsd_grace_start(nn);
8740 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
8741 return 0;
8742
8743 skip_grace:
8744 printk(KERN_INFO "NFSD: no clients to reclaim, skipping NFSv4 grace period (net %x)\n",
8745 net->ns.inum);
8746 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_lease * HZ);
8747 nfsd4_end_grace(nn);
8748 return 0;
8749 }
8750
8751 /* initialization to perform when the nfsd service is started: */
8752
8753 int
nfs4_state_start(void)8754 nfs4_state_start(void)
8755 {
8756 int ret;
8757
8758 ret = rhltable_init(&nfs4_file_rhltable, &nfs4_file_rhash_params);
8759 if (ret)
8760 return ret;
8761
8762 set_max_delegations();
8763 return 0;
8764 }
8765
8766 void
nfs4_state_shutdown_net(struct net * net)8767 nfs4_state_shutdown_net(struct net *net)
8768 {
8769 struct nfs4_delegation *dp = NULL;
8770 struct list_head *pos, *next, reaplist;
8771 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
8772
8773 shrinker_free(nn->nfsd_client_shrinker);
8774 cancel_work_sync(&nn->nfsd_shrinker_work);
8775 cancel_delayed_work_sync(&nn->laundromat_work);
8776 locks_end_grace(&nn->nfsd4_manager);
8777
8778 INIT_LIST_HEAD(&reaplist);
8779 spin_lock(&state_lock);
8780 list_for_each_safe(pos, next, &nn->del_recall_lru) {
8781 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
8782 unhash_delegation_locked(dp, SC_STATUS_CLOSED);
8783 list_add(&dp->dl_recall_lru, &reaplist);
8784 }
8785 spin_unlock(&state_lock);
8786 list_for_each_safe(pos, next, &reaplist) {
8787 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
8788 list_del_init(&dp->dl_recall_lru);
8789 destroy_unhashed_deleg(dp);
8790 }
8791
8792 nfsd4_client_tracking_exit(net);
8793 nfs4_state_destroy_net(net);
8794 #ifdef CONFIG_NFSD_V4_2_INTER_SSC
8795 nfsd4_ssc_shutdown_umount(nn);
8796 #endif
8797 }
8798
8799 void
nfs4_state_shutdown(void)8800 nfs4_state_shutdown(void)
8801 {
8802 rhltable_destroy(&nfs4_file_rhltable);
8803 }
8804
8805 static void
get_stateid(struct nfsd4_compound_state * cstate,stateid_t * stateid)8806 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
8807 {
8808 if (HAS_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG) &&
8809 CURRENT_STATEID(stateid))
8810 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
8811 }
8812
8813 static void
put_stateid(struct nfsd4_compound_state * cstate,stateid_t * stateid)8814 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
8815 {
8816 if (cstate->minorversion) {
8817 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
8818 SET_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
8819 }
8820 }
8821
8822 void
clear_current_stateid(struct nfsd4_compound_state * cstate)8823 clear_current_stateid(struct nfsd4_compound_state *cstate)
8824 {
8825 CLEAR_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
8826 }
8827
8828 /*
8829 * functions to set current state id
8830 */
8831 void
nfsd4_set_opendowngradestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8832 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate,
8833 union nfsd4_op_u *u)
8834 {
8835 put_stateid(cstate, &u->open_downgrade.od_stateid);
8836 }
8837
8838 void
nfsd4_set_openstateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8839 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate,
8840 union nfsd4_op_u *u)
8841 {
8842 put_stateid(cstate, &u->open.op_stateid);
8843 }
8844
8845 void
nfsd4_set_closestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8846 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate,
8847 union nfsd4_op_u *u)
8848 {
8849 put_stateid(cstate, &u->close.cl_stateid);
8850 }
8851
8852 void
nfsd4_set_lockstateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8853 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate,
8854 union nfsd4_op_u *u)
8855 {
8856 put_stateid(cstate, &u->lock.lk_resp_stateid);
8857 }
8858
8859 /*
8860 * functions to consume current state id
8861 */
8862
8863 void
nfsd4_get_opendowngradestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8864 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate,
8865 union nfsd4_op_u *u)
8866 {
8867 get_stateid(cstate, &u->open_downgrade.od_stateid);
8868 }
8869
8870 void
nfsd4_get_delegreturnstateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8871 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate,
8872 union nfsd4_op_u *u)
8873 {
8874 get_stateid(cstate, &u->delegreturn.dr_stateid);
8875 }
8876
8877 void
nfsd4_get_freestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8878 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate,
8879 union nfsd4_op_u *u)
8880 {
8881 get_stateid(cstate, &u->free_stateid.fr_stateid);
8882 }
8883
8884 void
nfsd4_get_setattrstateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8885 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate,
8886 union nfsd4_op_u *u)
8887 {
8888 get_stateid(cstate, &u->setattr.sa_stateid);
8889 }
8890
8891 void
nfsd4_get_closestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8892 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate,
8893 union nfsd4_op_u *u)
8894 {
8895 get_stateid(cstate, &u->close.cl_stateid);
8896 }
8897
8898 void
nfsd4_get_lockustateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8899 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate,
8900 union nfsd4_op_u *u)
8901 {
8902 get_stateid(cstate, &u->locku.lu_stateid);
8903 }
8904
8905 void
nfsd4_get_readstateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8906 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate,
8907 union nfsd4_op_u *u)
8908 {
8909 get_stateid(cstate, &u->read.rd_stateid);
8910 }
8911
8912 void
nfsd4_get_writestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8913 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
8914 union nfsd4_op_u *u)
8915 {
8916 get_stateid(cstate, &u->write.wr_stateid);
8917 }
8918
8919 /**
8920 * nfsd4_deleg_getattr_conflict - Recall if GETATTR causes conflict
8921 * @rqstp: RPC transaction context
8922 * @dentry: dentry of inode to be checked for a conflict
8923 * @modified: return true if file was modified
8924 * @size: new size of file if modified is true
8925 *
8926 * This function is called when there is a conflict between a write
8927 * delegation and a change/size GETATTR from another client. The server
8928 * must either use the CB_GETATTR to get the current values of the
8929 * attributes from the client that holds the delegation or recall the
8930 * delegation before replying to the GETATTR. See RFC 8881 section
8931 * 18.7.4.
8932 *
8933 * Returns 0 if there is no conflict; otherwise an nfs_stat
8934 * code is returned.
8935 */
8936 __be32
nfsd4_deleg_getattr_conflict(struct svc_rqst * rqstp,struct dentry * dentry,bool * modified,u64 * size)8937 nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct dentry *dentry,
8938 bool *modified, u64 *size)
8939 {
8940 __be32 status;
8941 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
8942 struct file_lock_context *ctx;
8943 struct nfs4_delegation *dp = NULL;
8944 struct file_lease *fl;
8945 struct iattr attrs;
8946 struct nfs4_cb_fattr *ncf;
8947 struct inode *inode = d_inode(dentry);
8948
8949 *modified = false;
8950 ctx = locks_inode_context(inode);
8951 if (!ctx)
8952 return 0;
8953
8954 #define NON_NFSD_LEASE ((void *)1)
8955
8956 spin_lock(&ctx->flc_lock);
8957 for_each_file_lock(fl, &ctx->flc_lease) {
8958 if (fl->c.flc_flags == FL_LAYOUT)
8959 continue;
8960 if (fl->c.flc_type == F_WRLCK) {
8961 if (fl->fl_lmops == &nfsd_lease_mng_ops)
8962 dp = fl->c.flc_owner;
8963 else
8964 dp = NON_NFSD_LEASE;
8965 }
8966 break;
8967 }
8968 if (dp == NULL || dp == NON_NFSD_LEASE ||
8969 dp->dl_recall.cb_clp == *(rqstp->rq_lease_breaker)) {
8970 spin_unlock(&ctx->flc_lock);
8971 if (dp == NON_NFSD_LEASE) {
8972 status = nfserrno(nfsd_open_break_lease(inode,
8973 NFSD_MAY_READ));
8974 if (status != nfserr_jukebox ||
8975 !nfsd_wait_for_delegreturn(rqstp, inode))
8976 return status;
8977 }
8978 return 0;
8979 }
8980
8981 nfsd_stats_wdeleg_getattr_inc(nn);
8982 refcount_inc(&dp->dl_stid.sc_count);
8983 ncf = &dp->dl_cb_fattr;
8984 nfs4_cb_getattr(&dp->dl_cb_fattr);
8985 spin_unlock(&ctx->flc_lock);
8986
8987 wait_on_bit_timeout(&ncf->ncf_cb_flags, CB_GETATTR_BUSY,
8988 TASK_INTERRUPTIBLE, NFSD_CB_GETATTR_TIMEOUT);
8989 if (ncf->ncf_cb_status) {
8990 /* Recall delegation only if client didn't respond */
8991 status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
8992 if (status != nfserr_jukebox ||
8993 !nfsd_wait_for_delegreturn(rqstp, inode))
8994 goto out_status;
8995 }
8996 if (!ncf->ncf_file_modified &&
8997 (ncf->ncf_initial_cinfo != ncf->ncf_cb_change ||
8998 ncf->ncf_cur_fsize != ncf->ncf_cb_fsize))
8999 ncf->ncf_file_modified = true;
9000 if (ncf->ncf_file_modified) {
9001 int err;
9002
9003 /*
9004 * Per section 10.4.3 of RFC 8881, the server would
9005 * not update the file's metadata with the client's
9006 * modified size
9007 */
9008 attrs.ia_mtime = attrs.ia_ctime = current_time(inode);
9009 attrs.ia_valid = ATTR_MTIME | ATTR_CTIME | ATTR_DELEG;
9010 inode_lock(inode);
9011 err = notify_change(&nop_mnt_idmap, dentry, &attrs, NULL);
9012 inode_unlock(inode);
9013 if (err) {
9014 status = nfserrno(err);
9015 goto out_status;
9016 }
9017 ncf->ncf_cur_fsize = ncf->ncf_cb_fsize;
9018 *size = ncf->ncf_cur_fsize;
9019 *modified = true;
9020 }
9021 status = 0;
9022 out_status:
9023 nfs4_put_stid(&dp->dl_stid);
9024 return status;
9025 }
9026