1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/lockd/svclock.c
4 *
5 * Handling of server-side locks, mostly of the blocked variety.
6 * This is the ugliest part of lockd because we tread on very thin ice.
7 * GRANT and CANCEL calls may get stuck, meet in mid-flight, etc.
8 * IMNSHO introducing the grant callback into the NLM protocol was one
9 * of the worst ideas Sun ever had. Except maybe for the idea of doing
10 * NFS file locking at all.
11 *
12 * I'm trying hard to avoid race conditions by protecting most accesses
13 * to a file's list of blocked locks through a semaphore. The global
14 * list of blocked locks is not protected in this fashion however.
15 * Therefore, some functions (such as the RPC callback for the async grant
16 * call) move blocked locks towards the head of the list *while some other
17 * process might be traversing it*. This should not be a problem in
18 * practice, because this will only cause functions traversing the list
19 * to visit some blocks twice.
20 *
21 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
22 */
23
24 #include <linux/types.h>
25 #include <linux/slab.h>
26 #include <linux/errno.h>
27 #include <linux/kernel.h>
28 #include <linux/sched.h>
29 #include <linux/sunrpc/clnt.h>
30 #include <linux/sunrpc/svc_xprt.h>
31 #include <linux/lockd/nlm.h>
32 #include <linux/lockd/lockd.h>
33 #include <linux/kthread.h>
34
35 #define NLMDBG_FACILITY NLMDBG_SVCLOCK
36
37 #ifdef CONFIG_LOCKD_V4
38 #define nlm_deadlock nlm4_deadlock
39 #else
40 #define nlm_deadlock nlm_lck_denied
41 #endif
42
43 static void nlmsvc_release_block(struct nlm_block *block);
44 static void nlmsvc_insert_block(struct nlm_block *block, unsigned long);
45 static void nlmsvc_remove_block(struct nlm_block *block);
46
47 static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock);
48 static void nlmsvc_freegrantargs(struct nlm_rqst *call);
49 static const struct rpc_call_ops nlmsvc_grant_ops;
50
51 /*
52 * The list of blocked locks to retry
53 */
54 static LIST_HEAD(nlm_blocked);
55 static DEFINE_SPINLOCK(nlm_blocked_lock);
56
57 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
nlmdbg_cookie2a(const struct nlm_cookie * cookie)58 static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie)
59 {
60 /*
61 * We can get away with a static buffer because this is only called
62 * from lockd, which is single-threaded.
63 */
64 static char buf[2*NLM_MAXCOOKIELEN+1];
65 unsigned int i, len = sizeof(buf);
66 char *p = buf;
67
68 len--; /* allow for trailing \0 */
69 if (len < 3)
70 return "???";
71 for (i = 0 ; i < cookie->len ; i++) {
72 if (len < 2) {
73 strcpy(p-3, "...");
74 break;
75 }
76 sprintf(p, "%02x", cookie->data[i]);
77 p += 2;
78 len -= 2;
79 }
80 *p = '\0';
81
82 return buf;
83 }
84 #endif
85
86 /*
87 * Insert a blocked lock into the global list
88 */
89 static void
nlmsvc_insert_block_locked(struct nlm_block * block,unsigned long when)90 nlmsvc_insert_block_locked(struct nlm_block *block, unsigned long when)
91 {
92 struct nlm_block *b;
93 struct list_head *pos;
94
95 dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when);
96 if (list_empty(&block->b_list)) {
97 kref_get(&block->b_count);
98 } else {
99 list_del_init(&block->b_list);
100 }
101
102 pos = &nlm_blocked;
103 if (when != NLM_NEVER) {
104 if ((when += jiffies) == NLM_NEVER)
105 when ++;
106 list_for_each(pos, &nlm_blocked) {
107 b = list_entry(pos, struct nlm_block, b_list);
108 if (time_after(b->b_when,when) || b->b_when == NLM_NEVER)
109 break;
110 }
111 /* On normal exit from the loop, pos == &nlm_blocked,
112 * so we will be adding to the end of the list - good
113 */
114 }
115
116 list_add_tail(&block->b_list, pos);
117 block->b_when = when;
118 }
119
nlmsvc_insert_block(struct nlm_block * block,unsigned long when)120 static void nlmsvc_insert_block(struct nlm_block *block, unsigned long when)
121 {
122 spin_lock(&nlm_blocked_lock);
123 nlmsvc_insert_block_locked(block, when);
124 spin_unlock(&nlm_blocked_lock);
125 }
126
127 /*
128 * Remove a block from the global list
129 */
130 static inline void
nlmsvc_remove_block(struct nlm_block * block)131 nlmsvc_remove_block(struct nlm_block *block)
132 {
133 if (!list_empty(&block->b_list)) {
134 spin_lock(&nlm_blocked_lock);
135 list_del_init(&block->b_list);
136 spin_unlock(&nlm_blocked_lock);
137 nlmsvc_release_block(block);
138 }
139 }
140
141 /*
142 * Find a block for a given lock
143 */
144 static struct nlm_block *
nlmsvc_lookup_block(struct nlm_file * file,struct nlm_lock * lock)145 nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock)
146 {
147 struct nlm_block *block;
148 struct file_lock *fl;
149
150 dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n",
151 file, lock->fl.fl_pid,
152 (long long)lock->fl.fl_start,
153 (long long)lock->fl.fl_end, lock->fl.fl_type);
154 list_for_each_entry(block, &nlm_blocked, b_list) {
155 fl = &block->b_call->a_args.lock.fl;
156 dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n",
157 block->b_file, fl->fl_pid,
158 (long long)fl->fl_start,
159 (long long)fl->fl_end, fl->fl_type,
160 nlmdbg_cookie2a(&block->b_call->a_args.cookie));
161 if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) {
162 kref_get(&block->b_count);
163 return block;
164 }
165 }
166
167 return NULL;
168 }
169
nlm_cookie_match(struct nlm_cookie * a,struct nlm_cookie * b)170 static inline int nlm_cookie_match(struct nlm_cookie *a, struct nlm_cookie *b)
171 {
172 if (a->len != b->len)
173 return 0;
174 if (memcmp(a->data, b->data, a->len))
175 return 0;
176 return 1;
177 }
178
179 /*
180 * Find a block with a given NLM cookie.
181 */
182 static inline struct nlm_block *
nlmsvc_find_block(struct nlm_cookie * cookie)183 nlmsvc_find_block(struct nlm_cookie *cookie)
184 {
185 struct nlm_block *block;
186
187 list_for_each_entry(block, &nlm_blocked, b_list) {
188 if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie))
189 goto found;
190 }
191
192 return NULL;
193
194 found:
195 dprintk("nlmsvc_find_block(%s): block=%p\n", nlmdbg_cookie2a(cookie), block);
196 kref_get(&block->b_count);
197 return block;
198 }
199
200 /*
201 * Create a block and initialize it.
202 *
203 * Note: we explicitly set the cookie of the grant reply to that of
204 * the blocked lock request. The spec explicitly mentions that the client
205 * should _not_ rely on the callback containing the same cookie as the
206 * request, but (as I found out later) that's because some implementations
207 * do just this. Never mind the standards comittees, they support our
208 * logging industries.
209 *
210 * 10 years later: I hope we can safely ignore these old and broken
211 * clients by now. Let's fix this so we can uniquely identify an incoming
212 * GRANTED_RES message by cookie, without having to rely on the client's IP
213 * address. --okir
214 */
215 static struct nlm_block *
nlmsvc_create_block(struct svc_rqst * rqstp,struct nlm_host * host,struct nlm_file * file,struct nlm_lock * lock,struct nlm_cookie * cookie)216 nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host,
217 struct nlm_file *file, struct nlm_lock *lock,
218 struct nlm_cookie *cookie)
219 {
220 struct nlm_block *block;
221 struct nlm_rqst *call = NULL;
222
223 call = nlm_alloc_call(host);
224 if (call == NULL)
225 return NULL;
226
227 /* Allocate memory for block, and initialize arguments */
228 block = kzalloc(sizeof(*block), GFP_KERNEL);
229 if (block == NULL)
230 goto failed;
231 kref_init(&block->b_count);
232 INIT_LIST_HEAD(&block->b_list);
233 INIT_LIST_HEAD(&block->b_flist);
234
235 if (!nlmsvc_setgrantargs(call, lock))
236 goto failed_free;
237
238 /* Set notifier function for VFS, and init args */
239 call->a_args.lock.fl.fl_flags |= FL_SLEEP;
240 call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations;
241 nlmclnt_next_cookie(&call->a_args.cookie);
242
243 dprintk("lockd: created block %p...\n", block);
244
245 /* Create and initialize the block */
246 block->b_daemon = rqstp->rq_server;
247 block->b_host = host;
248 block->b_file = file;
249 file->f_count++;
250
251 /* Add to file's list of blocks */
252 list_add(&block->b_flist, &file->f_blocks);
253
254 /* Set up RPC arguments for callback */
255 block->b_call = call;
256 call->a_flags = RPC_TASK_ASYNC;
257 call->a_block = block;
258
259 return block;
260
261 failed_free:
262 kfree(block);
263 failed:
264 nlmsvc_release_call(call);
265 return NULL;
266 }
267
268 /*
269 * Delete a block.
270 * It is the caller's responsibility to check whether the file
271 * can be closed hereafter.
272 */
nlmsvc_unlink_block(struct nlm_block * block)273 static int nlmsvc_unlink_block(struct nlm_block *block)
274 {
275 int status;
276 dprintk("lockd: unlinking block %p...\n", block);
277
278 /* Remove block from list */
279 status = locks_delete_block(&block->b_call->a_args.lock.fl);
280 nlmsvc_remove_block(block);
281 return status;
282 }
283
nlmsvc_free_block(struct kref * kref)284 static void nlmsvc_free_block(struct kref *kref)
285 {
286 struct nlm_block *block = container_of(kref, struct nlm_block, b_count);
287 struct nlm_file *file = block->b_file;
288
289 dprintk("lockd: freeing block %p...\n", block);
290
291 /* Remove block from file's list of blocks */
292 list_del_init(&block->b_flist);
293 mutex_unlock(&file->f_mutex);
294
295 nlmsvc_freegrantargs(block->b_call);
296 nlmsvc_release_call(block->b_call);
297 nlm_release_file(block->b_file);
298 kfree(block);
299 }
300
nlmsvc_release_block(struct nlm_block * block)301 static void nlmsvc_release_block(struct nlm_block *block)
302 {
303 if (block != NULL)
304 kref_put_mutex(&block->b_count, nlmsvc_free_block, &block->b_file->f_mutex);
305 }
306
307 /*
308 * Loop over all blocks and delete blocks held by
309 * a matching host.
310 */
nlmsvc_traverse_blocks(struct nlm_host * host,struct nlm_file * file,nlm_host_match_fn_t match)311 void nlmsvc_traverse_blocks(struct nlm_host *host,
312 struct nlm_file *file,
313 nlm_host_match_fn_t match)
314 {
315 struct nlm_block *block, *next;
316
317 restart:
318 mutex_lock(&file->f_mutex);
319 list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) {
320 if (!match(block->b_host, host))
321 continue;
322 /* Do not destroy blocks that are not on
323 * the global retry list - why? */
324 if (list_empty(&block->b_list))
325 continue;
326 kref_get(&block->b_count);
327 mutex_unlock(&file->f_mutex);
328 nlmsvc_unlink_block(block);
329 nlmsvc_release_block(block);
330 goto restart;
331 }
332 mutex_unlock(&file->f_mutex);
333 }
334
335 static struct nlm_lockowner *
nlmsvc_get_lockowner(struct nlm_lockowner * lockowner)336 nlmsvc_get_lockowner(struct nlm_lockowner *lockowner)
337 {
338 refcount_inc(&lockowner->count);
339 return lockowner;
340 }
341
nlmsvc_put_lockowner(struct nlm_lockowner * lockowner)342 static void nlmsvc_put_lockowner(struct nlm_lockowner *lockowner)
343 {
344 if (!refcount_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
345 return;
346 list_del(&lockowner->list);
347 spin_unlock(&lockowner->host->h_lock);
348 nlmsvc_release_host(lockowner->host);
349 kfree(lockowner);
350 }
351
__nlmsvc_find_lockowner(struct nlm_host * host,pid_t pid)352 static struct nlm_lockowner *__nlmsvc_find_lockowner(struct nlm_host *host, pid_t pid)
353 {
354 struct nlm_lockowner *lockowner;
355 list_for_each_entry(lockowner, &host->h_lockowners, list) {
356 if (lockowner->pid != pid)
357 continue;
358 return nlmsvc_get_lockowner(lockowner);
359 }
360 return NULL;
361 }
362
nlmsvc_find_lockowner(struct nlm_host * host,pid_t pid)363 static struct nlm_lockowner *nlmsvc_find_lockowner(struct nlm_host *host, pid_t pid)
364 {
365 struct nlm_lockowner *res, *new = NULL;
366
367 spin_lock(&host->h_lock);
368 res = __nlmsvc_find_lockowner(host, pid);
369
370 if (res == NULL) {
371 spin_unlock(&host->h_lock);
372 new = kmalloc(sizeof(*res), GFP_KERNEL);
373 spin_lock(&host->h_lock);
374 res = __nlmsvc_find_lockowner(host, pid);
375 if (res == NULL && new != NULL) {
376 res = new;
377 /* fs/locks.c will manage the refcount through lock_ops */
378 refcount_set(&new->count, 1);
379 new->pid = pid;
380 new->host = nlm_get_host(host);
381 list_add(&new->list, &host->h_lockowners);
382 new = NULL;
383 }
384 }
385
386 spin_unlock(&host->h_lock);
387 kfree(new);
388 return res;
389 }
390
391 void
nlmsvc_release_lockowner(struct nlm_lock * lock)392 nlmsvc_release_lockowner(struct nlm_lock *lock)
393 {
394 if (lock->fl.fl_owner)
395 nlmsvc_put_lockowner(lock->fl.fl_owner);
396 }
397
nlmsvc_locks_init_private(struct file_lock * fl,struct nlm_host * host,pid_t pid)398 void nlmsvc_locks_init_private(struct file_lock *fl, struct nlm_host *host,
399 pid_t pid)
400 {
401 fl->fl_owner = nlmsvc_find_lockowner(host, pid);
402 }
403
404 /*
405 * Initialize arguments for GRANTED call. The nlm_rqst structure
406 * has been cleared already.
407 */
nlmsvc_setgrantargs(struct nlm_rqst * call,struct nlm_lock * lock)408 static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock)
409 {
410 locks_copy_lock(&call->a_args.lock.fl, &lock->fl);
411 memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh));
412 call->a_args.lock.caller = utsname()->nodename;
413 call->a_args.lock.oh.len = lock->oh.len;
414
415 /* set default data area */
416 call->a_args.lock.oh.data = call->a_owner;
417 call->a_args.lock.svid = ((struct nlm_lockowner *)lock->fl.fl_owner)->pid;
418
419 if (lock->oh.len > NLMCLNT_OHSIZE) {
420 void *data = kmalloc(lock->oh.len, GFP_KERNEL);
421 if (!data)
422 return 0;
423 call->a_args.lock.oh.data = (u8 *) data;
424 }
425
426 memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len);
427 return 1;
428 }
429
nlmsvc_freegrantargs(struct nlm_rqst * call)430 static void nlmsvc_freegrantargs(struct nlm_rqst *call)
431 {
432 if (call->a_args.lock.oh.data != call->a_owner)
433 kfree(call->a_args.lock.oh.data);
434
435 locks_release_private(&call->a_args.lock.fl);
436 }
437
438 /*
439 * Deferred lock request handling for non-blocking lock
440 */
441 static __be32
nlmsvc_defer_lock_rqst(struct svc_rqst * rqstp,struct nlm_block * block)442 nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block)
443 {
444 __be32 status = nlm_lck_denied_nolocks;
445
446 block->b_flags |= B_QUEUED;
447
448 nlmsvc_insert_block(block, NLM_TIMEOUT);
449
450 block->b_cache_req = &rqstp->rq_chandle;
451 if (rqstp->rq_chandle.defer) {
452 block->b_deferred_req =
453 rqstp->rq_chandle.defer(block->b_cache_req);
454 if (block->b_deferred_req != NULL)
455 status = nlm_drop_reply;
456 }
457 dprintk("lockd: nlmsvc_defer_lock_rqst block %p flags %d status %d\n",
458 block, block->b_flags, ntohl(status));
459
460 return status;
461 }
462
463 /*
464 * Attempt to establish a lock, and if it can't be granted, block it
465 * if required.
466 */
467 __be32
nlmsvc_lock(struct svc_rqst * rqstp,struct nlm_file * file,struct nlm_host * host,struct nlm_lock * lock,int wait,struct nlm_cookie * cookie,int reclaim)468 nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
469 struct nlm_host *host, struct nlm_lock *lock, int wait,
470 struct nlm_cookie *cookie, int reclaim)
471 {
472 struct nlm_block *block = NULL;
473 int error;
474 __be32 ret;
475
476 dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n",
477 locks_inode(file->f_file)->i_sb->s_id,
478 locks_inode(file->f_file)->i_ino,
479 lock->fl.fl_type, lock->fl.fl_pid,
480 (long long)lock->fl.fl_start,
481 (long long)lock->fl.fl_end,
482 wait);
483
484 /* Lock file against concurrent access */
485 mutex_lock(&file->f_mutex);
486 /* Get existing block (in case client is busy-waiting)
487 * or create new block
488 */
489 block = nlmsvc_lookup_block(file, lock);
490 if (block == NULL) {
491 block = nlmsvc_create_block(rqstp, host, file, lock, cookie);
492 ret = nlm_lck_denied_nolocks;
493 if (block == NULL)
494 goto out;
495 lock = &block->b_call->a_args.lock;
496 } else
497 lock->fl.fl_flags &= ~FL_SLEEP;
498
499 if (block->b_flags & B_QUEUED) {
500 dprintk("lockd: nlmsvc_lock deferred block %p flags %d\n",
501 block, block->b_flags);
502 if (block->b_granted) {
503 nlmsvc_unlink_block(block);
504 ret = nlm_granted;
505 goto out;
506 }
507 if (block->b_flags & B_TIMED_OUT) {
508 nlmsvc_unlink_block(block);
509 ret = nlm_lck_denied;
510 goto out;
511 }
512 ret = nlm_drop_reply;
513 goto out;
514 }
515
516 if (locks_in_grace(SVC_NET(rqstp)) && !reclaim) {
517 ret = nlm_lck_denied_grace_period;
518 goto out;
519 }
520 if (reclaim && !locks_in_grace(SVC_NET(rqstp))) {
521 ret = nlm_lck_denied_grace_period;
522 goto out;
523 }
524
525 if (!wait)
526 lock->fl.fl_flags &= ~FL_SLEEP;
527 error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
528 lock->fl.fl_flags &= ~FL_SLEEP;
529
530 dprintk("lockd: vfs_lock_file returned %d\n", error);
531 switch (error) {
532 case 0:
533 ret = nlm_granted;
534 goto out;
535 case -EAGAIN:
536 /*
537 * If this is a blocking request for an
538 * already pending lock request then we need
539 * to put it back on lockd's block list
540 */
541 if (wait)
542 break;
543 ret = nlm_lck_denied;
544 goto out;
545 case FILE_LOCK_DEFERRED:
546 if (wait)
547 break;
548 /* Filesystem lock operation is in progress
549 Add it to the queue waiting for callback */
550 ret = nlmsvc_defer_lock_rqst(rqstp, block);
551 goto out;
552 case -EDEADLK:
553 ret = nlm_deadlock;
554 goto out;
555 default: /* includes ENOLCK */
556 ret = nlm_lck_denied_nolocks;
557 goto out;
558 }
559
560 ret = nlm_lck_blocked;
561
562 /* Append to list of blocked */
563 nlmsvc_insert_block(block, NLM_NEVER);
564 out:
565 mutex_unlock(&file->f_mutex);
566 nlmsvc_release_block(block);
567 dprintk("lockd: nlmsvc_lock returned %u\n", ret);
568 return ret;
569 }
570
571 /*
572 * Test for presence of a conflicting lock.
573 */
574 __be32
nlmsvc_testlock(struct svc_rqst * rqstp,struct nlm_file * file,struct nlm_host * host,struct nlm_lock * lock,struct nlm_lock * conflock,struct nlm_cookie * cookie)575 nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
576 struct nlm_host *host, struct nlm_lock *lock,
577 struct nlm_lock *conflock, struct nlm_cookie *cookie)
578 {
579 int error;
580 __be32 ret;
581 struct nlm_lockowner *test_owner;
582
583 dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n",
584 locks_inode(file->f_file)->i_sb->s_id,
585 locks_inode(file->f_file)->i_ino,
586 lock->fl.fl_type,
587 (long long)lock->fl.fl_start,
588 (long long)lock->fl.fl_end);
589
590 if (locks_in_grace(SVC_NET(rqstp))) {
591 ret = nlm_lck_denied_grace_period;
592 goto out;
593 }
594
595 /* If there's a conflicting lock, remember to clean up the test lock */
596 test_owner = (struct nlm_lockowner *)lock->fl.fl_owner;
597
598 error = vfs_test_lock(file->f_file, &lock->fl);
599 if (error) {
600 /* We can't currently deal with deferred test requests */
601 if (error == FILE_LOCK_DEFERRED)
602 WARN_ON_ONCE(1);
603
604 ret = nlm_lck_denied_nolocks;
605 goto out;
606 }
607
608 if (lock->fl.fl_type == F_UNLCK) {
609 ret = nlm_granted;
610 goto out;
611 }
612
613 dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n",
614 lock->fl.fl_type, (long long)lock->fl.fl_start,
615 (long long)lock->fl.fl_end);
616 conflock->caller = "somehost"; /* FIXME */
617 conflock->len = strlen(conflock->caller);
618 conflock->oh.len = 0; /* don't return OH info */
619 conflock->svid = lock->fl.fl_pid;
620 conflock->fl.fl_type = lock->fl.fl_type;
621 conflock->fl.fl_start = lock->fl.fl_start;
622 conflock->fl.fl_end = lock->fl.fl_end;
623 locks_release_private(&lock->fl);
624
625 /* Clean up the test lock */
626 lock->fl.fl_owner = NULL;
627 nlmsvc_put_lockowner(test_owner);
628
629 ret = nlm_lck_denied;
630 out:
631 return ret;
632 }
633
634 /*
635 * Remove a lock.
636 * This implies a CANCEL call: We send a GRANT_MSG, the client replies
637 * with a GRANT_RES call which gets lost, and calls UNLOCK immediately
638 * afterwards. In this case the block will still be there, and hence
639 * must be removed.
640 */
641 __be32
nlmsvc_unlock(struct net * net,struct nlm_file * file,struct nlm_lock * lock)642 nlmsvc_unlock(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
643 {
644 int error;
645
646 dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n",
647 locks_inode(file->f_file)->i_sb->s_id,
648 locks_inode(file->f_file)->i_ino,
649 lock->fl.fl_pid,
650 (long long)lock->fl.fl_start,
651 (long long)lock->fl.fl_end);
652
653 /* First, cancel any lock that might be there */
654 nlmsvc_cancel_blocked(net, file, lock);
655
656 lock->fl.fl_type = F_UNLCK;
657 error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
658
659 return (error < 0)? nlm_lck_denied_nolocks : nlm_granted;
660 }
661
662 /*
663 * Cancel a previously blocked request.
664 *
665 * A cancel request always overrides any grant that may currently
666 * be in progress.
667 * The calling procedure must check whether the file can be closed.
668 */
669 __be32
nlmsvc_cancel_blocked(struct net * net,struct nlm_file * file,struct nlm_lock * lock)670 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
671 {
672 struct nlm_block *block;
673 int status = 0;
674
675 dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n",
676 locks_inode(file->f_file)->i_sb->s_id,
677 locks_inode(file->f_file)->i_ino,
678 lock->fl.fl_pid,
679 (long long)lock->fl.fl_start,
680 (long long)lock->fl.fl_end);
681
682 if (locks_in_grace(net))
683 return nlm_lck_denied_grace_period;
684
685 mutex_lock(&file->f_mutex);
686 block = nlmsvc_lookup_block(file, lock);
687 mutex_unlock(&file->f_mutex);
688 if (block != NULL) {
689 vfs_cancel_lock(block->b_file->f_file,
690 &block->b_call->a_args.lock.fl);
691 status = nlmsvc_unlink_block(block);
692 nlmsvc_release_block(block);
693 }
694 return status ? nlm_lck_denied : nlm_granted;
695 }
696
697 /*
698 * This is a callback from the filesystem for VFS file lock requests.
699 * It will be used if lm_grant is defined and the filesystem can not
700 * respond to the request immediately.
701 * For SETLK or SETLKW request it will get the local posix lock.
702 * In all cases it will move the block to the head of nlm_blocked q where
703 * nlmsvc_retry_blocked() can send back a reply for SETLKW or revisit the
704 * deferred rpc for GETLK and SETLK.
705 */
706 static void
nlmsvc_update_deferred_block(struct nlm_block * block,int result)707 nlmsvc_update_deferred_block(struct nlm_block *block, int result)
708 {
709 block->b_flags |= B_GOT_CALLBACK;
710 if (result == 0)
711 block->b_granted = 1;
712 else
713 block->b_flags |= B_TIMED_OUT;
714 }
715
nlmsvc_grant_deferred(struct file_lock * fl,int result)716 static int nlmsvc_grant_deferred(struct file_lock *fl, int result)
717 {
718 struct nlm_block *block;
719 int rc = -ENOENT;
720
721 spin_lock(&nlm_blocked_lock);
722 list_for_each_entry(block, &nlm_blocked, b_list) {
723 if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
724 dprintk("lockd: nlmsvc_notify_blocked block %p flags %d\n",
725 block, block->b_flags);
726 if (block->b_flags & B_QUEUED) {
727 if (block->b_flags & B_TIMED_OUT) {
728 rc = -ENOLCK;
729 break;
730 }
731 nlmsvc_update_deferred_block(block, result);
732 } else if (result == 0)
733 block->b_granted = 1;
734
735 nlmsvc_insert_block_locked(block, 0);
736 svc_wake_up(block->b_daemon);
737 rc = 0;
738 break;
739 }
740 }
741 spin_unlock(&nlm_blocked_lock);
742 if (rc == -ENOENT)
743 printk(KERN_WARNING "lockd: grant for unknown block\n");
744 return rc;
745 }
746
747 /*
748 * Unblock a blocked lock request. This is a callback invoked from the
749 * VFS layer when a lock on which we blocked is removed.
750 *
751 * This function doesn't grant the blocked lock instantly, but rather moves
752 * the block to the head of nlm_blocked where it can be picked up by lockd.
753 */
754 static void
nlmsvc_notify_blocked(struct file_lock * fl)755 nlmsvc_notify_blocked(struct file_lock *fl)
756 {
757 struct nlm_block *block;
758
759 dprintk("lockd: VFS unblock notification for block %p\n", fl);
760 spin_lock(&nlm_blocked_lock);
761 list_for_each_entry(block, &nlm_blocked, b_list) {
762 if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
763 nlmsvc_insert_block_locked(block, 0);
764 spin_unlock(&nlm_blocked_lock);
765 svc_wake_up(block->b_daemon);
766 return;
767 }
768 }
769 spin_unlock(&nlm_blocked_lock);
770 printk(KERN_WARNING "lockd: notification for unknown block!\n");
771 }
772
nlmsvc_get_owner(fl_owner_t owner)773 static fl_owner_t nlmsvc_get_owner(fl_owner_t owner)
774 {
775 return nlmsvc_get_lockowner(owner);
776 }
777
nlmsvc_put_owner(fl_owner_t owner)778 static void nlmsvc_put_owner(fl_owner_t owner)
779 {
780 nlmsvc_put_lockowner(owner);
781 }
782
783 const struct lock_manager_operations nlmsvc_lock_operations = {
784 .lm_notify = nlmsvc_notify_blocked,
785 .lm_grant = nlmsvc_grant_deferred,
786 .lm_get_owner = nlmsvc_get_owner,
787 .lm_put_owner = nlmsvc_put_owner,
788 };
789
790 /*
791 * Try to claim a lock that was previously blocked.
792 *
793 * Note that we use both the RPC_GRANTED_MSG call _and_ an async
794 * RPC thread when notifying the client. This seems like overkill...
795 * Here's why:
796 * - we don't want to use a synchronous RPC thread, otherwise
797 * we might find ourselves hanging on a dead portmapper.
798 * - Some lockd implementations (e.g. HP) don't react to
799 * RPC_GRANTED calls; they seem to insist on RPC_GRANTED_MSG calls.
800 */
801 static void
nlmsvc_grant_blocked(struct nlm_block * block)802 nlmsvc_grant_blocked(struct nlm_block *block)
803 {
804 struct nlm_file *file = block->b_file;
805 struct nlm_lock *lock = &block->b_call->a_args.lock;
806 int error;
807 loff_t fl_start, fl_end;
808
809 dprintk("lockd: grant blocked lock %p\n", block);
810
811 kref_get(&block->b_count);
812
813 /* Unlink block request from list */
814 nlmsvc_unlink_block(block);
815
816 /* If b_granted is true this means we've been here before.
817 * Just retry the grant callback, possibly refreshing the RPC
818 * binding */
819 if (block->b_granted) {
820 nlm_rebind_host(block->b_host);
821 goto callback;
822 }
823
824 /* Try the lock operation again */
825 /* vfs_lock_file() can mangle fl_start and fl_end, but we need
826 * them unchanged for the GRANT_MSG
827 */
828 lock->fl.fl_flags |= FL_SLEEP;
829 fl_start = lock->fl.fl_start;
830 fl_end = lock->fl.fl_end;
831 error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
832 lock->fl.fl_flags &= ~FL_SLEEP;
833 lock->fl.fl_start = fl_start;
834 lock->fl.fl_end = fl_end;
835
836 switch (error) {
837 case 0:
838 break;
839 case FILE_LOCK_DEFERRED:
840 dprintk("lockd: lock still blocked error %d\n", error);
841 nlmsvc_insert_block(block, NLM_NEVER);
842 nlmsvc_release_block(block);
843 return;
844 default:
845 printk(KERN_WARNING "lockd: unexpected error %d in %s!\n",
846 -error, __func__);
847 nlmsvc_insert_block(block, 10 * HZ);
848 nlmsvc_release_block(block);
849 return;
850 }
851
852 callback:
853 /* Lock was granted by VFS. */
854 dprintk("lockd: GRANTing blocked lock.\n");
855 block->b_granted = 1;
856
857 /* keep block on the list, but don't reattempt until the RPC
858 * completes or the submission fails
859 */
860 nlmsvc_insert_block(block, NLM_NEVER);
861
862 /* Call the client -- use a soft RPC task since nlmsvc_retry_blocked
863 * will queue up a new one if this one times out
864 */
865 error = nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG,
866 &nlmsvc_grant_ops);
867
868 /* RPC submission failed, wait a bit and retry */
869 if (error < 0)
870 nlmsvc_insert_block(block, 10 * HZ);
871 }
872
873 /*
874 * This is the callback from the RPC layer when the NLM_GRANTED_MSG
875 * RPC call has succeeded or timed out.
876 * Like all RPC callbacks, it is invoked by the rpciod process, so it
877 * better not sleep. Therefore, we put the blocked lock on the nlm_blocked
878 * chain once more in order to have it removed by lockd itself (which can
879 * then sleep on the file semaphore without disrupting e.g. the nfs client).
880 */
nlmsvc_grant_callback(struct rpc_task * task,void * data)881 static void nlmsvc_grant_callback(struct rpc_task *task, void *data)
882 {
883 struct nlm_rqst *call = data;
884 struct nlm_block *block = call->a_block;
885 unsigned long timeout;
886
887 dprintk("lockd: GRANT_MSG RPC callback\n");
888
889 spin_lock(&nlm_blocked_lock);
890 /* if the block is not on a list at this point then it has
891 * been invalidated. Don't try to requeue it.
892 *
893 * FIXME: it's possible that the block is removed from the list
894 * after this check but before the nlmsvc_insert_block. In that
895 * case it will be added back. Perhaps we need better locking
896 * for nlm_blocked?
897 */
898 if (list_empty(&block->b_list))
899 goto out;
900
901 /* Technically, we should down the file semaphore here. Since we
902 * move the block towards the head of the queue only, no harm
903 * can be done, though. */
904 if (task->tk_status < 0) {
905 /* RPC error: Re-insert for retransmission */
906 timeout = 10 * HZ;
907 } else {
908 /* Call was successful, now wait for client callback */
909 timeout = 60 * HZ;
910 }
911 nlmsvc_insert_block_locked(block, timeout);
912 svc_wake_up(block->b_daemon);
913 out:
914 spin_unlock(&nlm_blocked_lock);
915 }
916
917 /*
918 * FIXME: nlmsvc_release_block() grabs a mutex. This is not allowed for an
919 * .rpc_release rpc_call_op
920 */
nlmsvc_grant_release(void * data)921 static void nlmsvc_grant_release(void *data)
922 {
923 struct nlm_rqst *call = data;
924 nlmsvc_release_block(call->a_block);
925 }
926
927 static const struct rpc_call_ops nlmsvc_grant_ops = {
928 .rpc_call_done = nlmsvc_grant_callback,
929 .rpc_release = nlmsvc_grant_release,
930 };
931
932 /*
933 * We received a GRANT_RES callback. Try to find the corresponding
934 * block.
935 */
936 void
nlmsvc_grant_reply(struct nlm_cookie * cookie,__be32 status)937 nlmsvc_grant_reply(struct nlm_cookie *cookie, __be32 status)
938 {
939 struct nlm_block *block;
940
941 dprintk("grant_reply: looking for cookie %x, s=%d \n",
942 *(unsigned int *)(cookie->data), status);
943 if (!(block = nlmsvc_find_block(cookie)))
944 return;
945
946 if (status == nlm_lck_denied_grace_period) {
947 /* Try again in a couple of seconds */
948 nlmsvc_insert_block(block, 10 * HZ);
949 } else {
950 /*
951 * Lock is now held by client, or has been rejected.
952 * In both cases, the block should be removed.
953 */
954 nlmsvc_unlink_block(block);
955 }
956 nlmsvc_release_block(block);
957 }
958
959 /* Helper function to handle retry of a deferred block.
960 * If it is a blocking lock, call grant_blocked.
961 * For a non-blocking lock or test lock, revisit the request.
962 */
963 static void
retry_deferred_block(struct nlm_block * block)964 retry_deferred_block(struct nlm_block *block)
965 {
966 if (!(block->b_flags & B_GOT_CALLBACK))
967 block->b_flags |= B_TIMED_OUT;
968 nlmsvc_insert_block(block, NLM_TIMEOUT);
969 dprintk("revisit block %p flags %d\n", block, block->b_flags);
970 if (block->b_deferred_req) {
971 block->b_deferred_req->revisit(block->b_deferred_req, 0);
972 block->b_deferred_req = NULL;
973 }
974 }
975
976 /*
977 * Retry all blocked locks that have been notified. This is where lockd
978 * picks up locks that can be granted, or grant notifications that must
979 * be retransmitted.
980 */
981 unsigned long
nlmsvc_retry_blocked(void)982 nlmsvc_retry_blocked(void)
983 {
984 unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
985 struct nlm_block *block;
986
987 spin_lock(&nlm_blocked_lock);
988 while (!list_empty(&nlm_blocked) && !kthread_should_stop()) {
989 block = list_entry(nlm_blocked.next, struct nlm_block, b_list);
990
991 if (block->b_when == NLM_NEVER)
992 break;
993 if (time_after(block->b_when, jiffies)) {
994 timeout = block->b_when - jiffies;
995 break;
996 }
997 spin_unlock(&nlm_blocked_lock);
998
999 dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n",
1000 block, block->b_when);
1001 if (block->b_flags & B_QUEUED) {
1002 dprintk("nlmsvc_retry_blocked delete block (%p, granted=%d, flags=%d)\n",
1003 block, block->b_granted, block->b_flags);
1004 retry_deferred_block(block);
1005 } else
1006 nlmsvc_grant_blocked(block);
1007 spin_lock(&nlm_blocked_lock);
1008 }
1009 spin_unlock(&nlm_blocked_lock);
1010
1011 return timeout;
1012 }
1013