1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/fs/lockd/clntproc.c
4 *
5 * RPC procedures for the client side NLM implementation
6 *
7 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
8 */
9
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <linux/fs.h>
15 #include <linux/nfs_fs.h>
16 #include <linux/utsname.h>
17 #include <linux/freezer.h>
18 #include <linux/sunrpc/clnt.h>
19 #include <linux/sunrpc/svc.h>
20 #include <linux/lockd/lockd.h>
21
22 #define NLMDBG_FACILITY NLMDBG_CLIENT
23 #define NLMCLNT_GRACE_WAIT (5*HZ)
24 #define NLMCLNT_POLL_TIMEOUT (30*HZ)
25 #define NLMCLNT_MAX_RETRIES 3
26
27 static int nlmclnt_test(struct nlm_rqst *, struct file_lock *);
28 static int nlmclnt_lock(struct nlm_rqst *, struct file_lock *);
29 static int nlmclnt_unlock(struct nlm_rqst *, struct file_lock *);
30 static int nlm_stat_to_errno(__be32 stat);
31 static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host);
32 static int nlmclnt_cancel(struct nlm_host *, int , struct file_lock *);
33
34 static const struct rpc_call_ops nlmclnt_unlock_ops;
35 static const struct rpc_call_ops nlmclnt_cancel_ops;
36
37 /*
38 * Cookie counter for NLM requests
39 */
40 static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
41
nlmclnt_next_cookie(struct nlm_cookie * c)42 void nlmclnt_next_cookie(struct nlm_cookie *c)
43 {
44 u32 cookie = atomic_inc_return(&nlm_cookie);
45
46 memcpy(c->data, &cookie, 4);
47 c->len=4;
48 }
49
50 static struct nlm_lockowner *
nlmclnt_get_lockowner(struct nlm_lockowner * lockowner)51 nlmclnt_get_lockowner(struct nlm_lockowner *lockowner)
52 {
53 refcount_inc(&lockowner->count);
54 return lockowner;
55 }
56
nlmclnt_put_lockowner(struct nlm_lockowner * lockowner)57 static void nlmclnt_put_lockowner(struct nlm_lockowner *lockowner)
58 {
59 if (!refcount_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
60 return;
61 list_del(&lockowner->list);
62 spin_unlock(&lockowner->host->h_lock);
63 nlmclnt_release_host(lockowner->host);
64 kfree(lockowner);
65 }
66
nlm_pidbusy(struct nlm_host * host,uint32_t pid)67 static inline int nlm_pidbusy(struct nlm_host *host, uint32_t pid)
68 {
69 struct nlm_lockowner *lockowner;
70 list_for_each_entry(lockowner, &host->h_lockowners, list) {
71 if (lockowner->pid == pid)
72 return -EBUSY;
73 }
74 return 0;
75 }
76
__nlm_alloc_pid(struct nlm_host * host)77 static inline uint32_t __nlm_alloc_pid(struct nlm_host *host)
78 {
79 uint32_t res;
80 do {
81 res = host->h_pidcount++;
82 } while (nlm_pidbusy(host, res) < 0);
83 return res;
84 }
85
__nlmclnt_find_lockowner(struct nlm_host * host,fl_owner_t owner)86 static struct nlm_lockowner *__nlmclnt_find_lockowner(struct nlm_host *host, fl_owner_t owner)
87 {
88 struct nlm_lockowner *lockowner;
89 list_for_each_entry(lockowner, &host->h_lockowners, list) {
90 if (lockowner->owner != owner)
91 continue;
92 return nlmclnt_get_lockowner(lockowner);
93 }
94 return NULL;
95 }
96
nlmclnt_find_lockowner(struct nlm_host * host,fl_owner_t owner)97 static struct nlm_lockowner *nlmclnt_find_lockowner(struct nlm_host *host, fl_owner_t owner)
98 {
99 struct nlm_lockowner *res, *new = NULL;
100
101 spin_lock(&host->h_lock);
102 res = __nlmclnt_find_lockowner(host, owner);
103 if (res == NULL) {
104 spin_unlock(&host->h_lock);
105 new = kmalloc(sizeof(*new), GFP_KERNEL);
106 spin_lock(&host->h_lock);
107 res = __nlmclnt_find_lockowner(host, owner);
108 if (res == NULL && new != NULL) {
109 res = new;
110 refcount_set(&new->count, 1);
111 new->owner = owner;
112 new->pid = __nlm_alloc_pid(host);
113 new->host = nlm_get_host(host);
114 list_add(&new->list, &host->h_lockowners);
115 new = NULL;
116 }
117 }
118 spin_unlock(&host->h_lock);
119 kfree(new);
120 return res;
121 }
122
123 /*
124 * Initialize arguments for TEST/LOCK/UNLOCK/CANCEL calls
125 */
nlmclnt_setlockargs(struct nlm_rqst * req,struct file_lock * fl)126 static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
127 {
128 struct nlm_args *argp = &req->a_args;
129 struct nlm_lock *lock = &argp->lock;
130 char *nodename = req->a_host->h_rpcclnt->cl_nodename;
131
132 nlmclnt_next_cookie(&argp->cookie);
133 memcpy(&lock->fh, NFS_FH(locks_inode(fl->fl_file)), sizeof(struct nfs_fh));
134 lock->caller = nodename;
135 lock->oh.data = req->a_owner;
136 lock->oh.len = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s",
137 (unsigned int)fl->fl_u.nfs_fl.owner->pid,
138 nodename);
139 lock->svid = fl->fl_u.nfs_fl.owner->pid;
140 lock->fl.fl_start = fl->fl_start;
141 lock->fl.fl_end = fl->fl_end;
142 lock->fl.fl_type = fl->fl_type;
143 }
144
nlmclnt_release_lockargs(struct nlm_rqst * req)145 static void nlmclnt_release_lockargs(struct nlm_rqst *req)
146 {
147 WARN_ON_ONCE(req->a_args.lock.fl.fl_ops != NULL);
148 }
149
150 /**
151 * nlmclnt_proc - Perform a single client-side lock request
152 * @host: address of a valid nlm_host context representing the NLM server
153 * @cmd: fcntl-style file lock operation to perform
154 * @fl: address of arguments for the lock operation
155 * @data: address of data to be sent to callback operations
156 *
157 */
nlmclnt_proc(struct nlm_host * host,int cmd,struct file_lock * fl,void * data)158 int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl, void *data)
159 {
160 struct nlm_rqst *call;
161 int status;
162 const struct nlmclnt_operations *nlmclnt_ops = host->h_nlmclnt_ops;
163
164 call = nlm_alloc_call(host);
165 if (call == NULL)
166 return -ENOMEM;
167
168 if (nlmclnt_ops && nlmclnt_ops->nlmclnt_alloc_call)
169 nlmclnt_ops->nlmclnt_alloc_call(data);
170
171 nlmclnt_locks_init_private(fl, host);
172 if (!fl->fl_u.nfs_fl.owner) {
173 /* lockowner allocation has failed */
174 nlmclnt_release_call(call);
175 return -ENOMEM;
176 }
177 /* Set up the argument struct */
178 nlmclnt_setlockargs(call, fl);
179 call->a_callback_data = data;
180
181 if (IS_SETLK(cmd) || IS_SETLKW(cmd)) {
182 if (fl->fl_type != F_UNLCK) {
183 call->a_args.block = IS_SETLKW(cmd) ? 1 : 0;
184 status = nlmclnt_lock(call, fl);
185 } else
186 status = nlmclnt_unlock(call, fl);
187 } else if (IS_GETLK(cmd))
188 status = nlmclnt_test(call, fl);
189 else
190 status = -EINVAL;
191 fl->fl_ops->fl_release_private(fl);
192 fl->fl_ops = NULL;
193
194 dprintk("lockd: clnt proc returns %d\n", status);
195 return status;
196 }
197 EXPORT_SYMBOL_GPL(nlmclnt_proc);
198
199 /*
200 * Allocate an NLM RPC call struct
201 */
nlm_alloc_call(struct nlm_host * host)202 struct nlm_rqst *nlm_alloc_call(struct nlm_host *host)
203 {
204 struct nlm_rqst *call;
205
206 for(;;) {
207 call = kzalloc(sizeof(*call), GFP_KERNEL);
208 if (call != NULL) {
209 refcount_set(&call->a_count, 1);
210 locks_init_lock(&call->a_args.lock.fl);
211 locks_init_lock(&call->a_res.lock.fl);
212 call->a_host = nlm_get_host(host);
213 return call;
214 }
215 if (signalled())
216 break;
217 printk("nlm_alloc_call: failed, waiting for memory\n");
218 schedule_timeout_interruptible(5*HZ);
219 }
220 return NULL;
221 }
222
nlmclnt_release_call(struct nlm_rqst * call)223 void nlmclnt_release_call(struct nlm_rqst *call)
224 {
225 const struct nlmclnt_operations *nlmclnt_ops = call->a_host->h_nlmclnt_ops;
226
227 if (!refcount_dec_and_test(&call->a_count))
228 return;
229 if (nlmclnt_ops && nlmclnt_ops->nlmclnt_release_call)
230 nlmclnt_ops->nlmclnt_release_call(call->a_callback_data);
231 nlmclnt_release_host(call->a_host);
232 nlmclnt_release_lockargs(call);
233 kfree(call);
234 }
235
nlmclnt_rpc_release(void * data)236 static void nlmclnt_rpc_release(void *data)
237 {
238 nlmclnt_release_call(data);
239 }
240
nlm_wait_on_grace(wait_queue_head_t * queue)241 static int nlm_wait_on_grace(wait_queue_head_t *queue)
242 {
243 DEFINE_WAIT(wait);
244 int status = -EINTR;
245
246 prepare_to_wait(queue, &wait, TASK_INTERRUPTIBLE);
247 if (!signalled ()) {
248 schedule_timeout(NLMCLNT_GRACE_WAIT);
249 try_to_freeze();
250 if (!signalled ())
251 status = 0;
252 }
253 finish_wait(queue, &wait);
254 return status;
255 }
256
257 /*
258 * Generic NLM call
259 */
260 static int
nlmclnt_call(const struct cred * cred,struct nlm_rqst * req,u32 proc)261 nlmclnt_call(const struct cred *cred, struct nlm_rqst *req, u32 proc)
262 {
263 struct nlm_host *host = req->a_host;
264 struct rpc_clnt *clnt;
265 struct nlm_args *argp = &req->a_args;
266 struct nlm_res *resp = &req->a_res;
267 struct rpc_message msg = {
268 .rpc_argp = argp,
269 .rpc_resp = resp,
270 .rpc_cred = cred,
271 };
272 int status;
273
274 dprintk("lockd: call procedure %d on %s\n",
275 (int)proc, host->h_name);
276
277 do {
278 if (host->h_reclaiming && !argp->reclaim)
279 goto in_grace_period;
280
281 /* If we have no RPC client yet, create one. */
282 if ((clnt = nlm_bind_host(host)) == NULL)
283 return -ENOLCK;
284 msg.rpc_proc = &clnt->cl_procinfo[proc];
285
286 /* Perform the RPC call. If an error occurs, try again */
287 if ((status = rpc_call_sync(clnt, &msg, 0)) < 0) {
288 dprintk("lockd: rpc_call returned error %d\n", -status);
289 switch (status) {
290 case -EPROTONOSUPPORT:
291 status = -EINVAL;
292 break;
293 case -ECONNREFUSED:
294 case -ETIMEDOUT:
295 case -ENOTCONN:
296 nlm_rebind_host(host);
297 status = -EAGAIN;
298 break;
299 case -ERESTARTSYS:
300 return signalled () ? -EINTR : status;
301 default:
302 break;
303 }
304 break;
305 } else
306 if (resp->status == nlm_lck_denied_grace_period) {
307 dprintk("lockd: server in grace period\n");
308 if (argp->reclaim) {
309 printk(KERN_WARNING
310 "lockd: spurious grace period reject?!\n");
311 return -ENOLCK;
312 }
313 } else {
314 if (!argp->reclaim) {
315 /* We appear to be out of the grace period */
316 wake_up_all(&host->h_gracewait);
317 }
318 dprintk("lockd: server returns status %d\n",
319 ntohl(resp->status));
320 return 0; /* Okay, call complete */
321 }
322
323 in_grace_period:
324 /*
325 * The server has rebooted and appears to be in the grace
326 * period during which locks are only allowed to be
327 * reclaimed.
328 * We can only back off and try again later.
329 */
330 status = nlm_wait_on_grace(&host->h_gracewait);
331 } while (status == 0);
332
333 return status;
334 }
335
336 /*
337 * Generic NLM call, async version.
338 */
__nlm_async_call(struct nlm_rqst * req,u32 proc,struct rpc_message * msg,const struct rpc_call_ops * tk_ops)339 static struct rpc_task *__nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
340 {
341 struct nlm_host *host = req->a_host;
342 struct rpc_clnt *clnt;
343 struct rpc_task_setup task_setup_data = {
344 .rpc_message = msg,
345 .callback_ops = tk_ops,
346 .callback_data = req,
347 .flags = RPC_TASK_ASYNC,
348 };
349
350 dprintk("lockd: call procedure %d on %s (async)\n",
351 (int)proc, host->h_name);
352
353 /* If we have no RPC client yet, create one. */
354 clnt = nlm_bind_host(host);
355 if (clnt == NULL)
356 goto out_err;
357 msg->rpc_proc = &clnt->cl_procinfo[proc];
358 task_setup_data.rpc_client = clnt;
359
360 /* bootstrap and kick off the async RPC call */
361 return rpc_run_task(&task_setup_data);
362 out_err:
363 tk_ops->rpc_release(req);
364 return ERR_PTR(-ENOLCK);
365 }
366
nlm_do_async_call(struct nlm_rqst * req,u32 proc,struct rpc_message * msg,const struct rpc_call_ops * tk_ops)367 static int nlm_do_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
368 {
369 struct rpc_task *task;
370
371 task = __nlm_async_call(req, proc, msg, tk_ops);
372 if (IS_ERR(task))
373 return PTR_ERR(task);
374 rpc_put_task(task);
375 return 0;
376 }
377
378 /*
379 * NLM asynchronous call.
380 */
nlm_async_call(struct nlm_rqst * req,u32 proc,const struct rpc_call_ops * tk_ops)381 int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
382 {
383 struct rpc_message msg = {
384 .rpc_argp = &req->a_args,
385 .rpc_resp = &req->a_res,
386 };
387 return nlm_do_async_call(req, proc, &msg, tk_ops);
388 }
389
nlm_async_reply(struct nlm_rqst * req,u32 proc,const struct rpc_call_ops * tk_ops)390 int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
391 {
392 struct rpc_message msg = {
393 .rpc_argp = &req->a_res,
394 };
395 return nlm_do_async_call(req, proc, &msg, tk_ops);
396 }
397
398 /*
399 * NLM client asynchronous call.
400 *
401 * Note that although the calls are asynchronous, and are therefore
402 * guaranteed to complete, we still always attempt to wait for
403 * completion in order to be able to correctly track the lock
404 * state.
405 */
nlmclnt_async_call(const struct cred * cred,struct nlm_rqst * req,u32 proc,const struct rpc_call_ops * tk_ops)406 static int nlmclnt_async_call(const struct cred *cred, struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
407 {
408 struct rpc_message msg = {
409 .rpc_argp = &req->a_args,
410 .rpc_resp = &req->a_res,
411 .rpc_cred = cred,
412 };
413 struct rpc_task *task;
414 int err;
415
416 task = __nlm_async_call(req, proc, &msg, tk_ops);
417 if (IS_ERR(task))
418 return PTR_ERR(task);
419 err = rpc_wait_for_completion_task(task);
420 rpc_put_task(task);
421 return err;
422 }
423
424 /*
425 * TEST for the presence of a conflicting lock
426 */
427 static int
nlmclnt_test(struct nlm_rqst * req,struct file_lock * fl)428 nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
429 {
430 int status;
431
432 status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_TEST);
433 if (status < 0)
434 goto out;
435
436 switch (req->a_res.status) {
437 case nlm_granted:
438 fl->fl_type = F_UNLCK;
439 break;
440 case nlm_lck_denied:
441 /*
442 * Report the conflicting lock back to the application.
443 */
444 fl->fl_start = req->a_res.lock.fl.fl_start;
445 fl->fl_end = req->a_res.lock.fl.fl_end;
446 fl->fl_type = req->a_res.lock.fl.fl_type;
447 fl->fl_pid = -req->a_res.lock.fl.fl_pid;
448 break;
449 default:
450 status = nlm_stat_to_errno(req->a_res.status);
451 }
452 out:
453 nlmclnt_release_call(req);
454 return status;
455 }
456
nlmclnt_locks_copy_lock(struct file_lock * new,struct file_lock * fl)457 static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
458 {
459 spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock);
460 new->fl_u.nfs_fl.state = fl->fl_u.nfs_fl.state;
461 new->fl_u.nfs_fl.owner = nlmclnt_get_lockowner(fl->fl_u.nfs_fl.owner);
462 list_add_tail(&new->fl_u.nfs_fl.list, &fl->fl_u.nfs_fl.owner->host->h_granted);
463 spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock);
464 }
465
nlmclnt_locks_release_private(struct file_lock * fl)466 static void nlmclnt_locks_release_private(struct file_lock *fl)
467 {
468 spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock);
469 list_del(&fl->fl_u.nfs_fl.list);
470 spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock);
471 nlmclnt_put_lockowner(fl->fl_u.nfs_fl.owner);
472 }
473
474 static const struct file_lock_operations nlmclnt_lock_ops = {
475 .fl_copy_lock = nlmclnt_locks_copy_lock,
476 .fl_release_private = nlmclnt_locks_release_private,
477 };
478
nlmclnt_locks_init_private(struct file_lock * fl,struct nlm_host * host)479 static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host)
480 {
481 fl->fl_u.nfs_fl.state = 0;
482 fl->fl_u.nfs_fl.owner = nlmclnt_find_lockowner(host, fl->fl_owner);
483 INIT_LIST_HEAD(&fl->fl_u.nfs_fl.list);
484 fl->fl_ops = &nlmclnt_lock_ops;
485 }
486
do_vfs_lock(struct file_lock * fl)487 static int do_vfs_lock(struct file_lock *fl)
488 {
489 return locks_lock_file_wait(fl->fl_file, fl);
490 }
491
492 /*
493 * LOCK: Try to create a lock
494 *
495 * Programmer Harassment Alert
496 *
497 * When given a blocking lock request in a sync RPC call, the HPUX lockd
498 * will faithfully return LCK_BLOCKED but never cares to notify us when
499 * the lock could be granted. This way, our local process could hang
500 * around forever waiting for the callback.
501 *
502 * Solution A: Implement busy-waiting
503 * Solution B: Use the async version of the call (NLM_LOCK_{MSG,RES})
504 *
505 * For now I am implementing solution A, because I hate the idea of
506 * re-implementing lockd for a third time in two months. The async
507 * calls shouldn't be too hard to do, however.
508 *
509 * This is one of the lovely things about standards in the NFS area:
510 * they're so soft and squishy you can't really blame HP for doing this.
511 */
512 static int
nlmclnt_lock(struct nlm_rqst * req,struct file_lock * fl)513 nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
514 {
515 const struct cred *cred = nfs_file_cred(fl->fl_file);
516 struct nlm_host *host = req->a_host;
517 struct nlm_res *resp = &req->a_res;
518 struct nlm_wait *block = NULL;
519 unsigned char fl_flags = fl->fl_flags;
520 unsigned char fl_type;
521 int status = -ENOLCK;
522
523 if (nsm_monitor(host) < 0)
524 goto out;
525 req->a_args.state = nsm_local_state;
526
527 fl->fl_flags |= FL_ACCESS;
528 status = do_vfs_lock(fl);
529 fl->fl_flags = fl_flags;
530 if (status < 0)
531 goto out;
532
533 block = nlmclnt_prepare_block(host, fl);
534 again:
535 /*
536 * Initialise resp->status to a valid non-zero value,
537 * since 0 == nlm_lck_granted
538 */
539 resp->status = nlm_lck_blocked;
540 for(;;) {
541 /* Reboot protection */
542 fl->fl_u.nfs_fl.state = host->h_state;
543 status = nlmclnt_call(cred, req, NLMPROC_LOCK);
544 if (status < 0)
545 break;
546 /* Did a reclaimer thread notify us of a server reboot? */
547 if (resp->status == nlm_lck_denied_grace_period)
548 continue;
549 if (resp->status != nlm_lck_blocked)
550 break;
551 /* Wait on an NLM blocking lock */
552 status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT);
553 if (status < 0)
554 break;
555 if (resp->status != nlm_lck_blocked)
556 break;
557 }
558
559 /* if we were interrupted while blocking, then cancel the lock request
560 * and exit
561 */
562 if (resp->status == nlm_lck_blocked) {
563 if (!req->a_args.block)
564 goto out_unlock;
565 if (nlmclnt_cancel(host, req->a_args.block, fl) == 0)
566 goto out_unblock;
567 }
568
569 if (resp->status == nlm_granted) {
570 down_read(&host->h_rwsem);
571 /* Check whether or not the server has rebooted */
572 if (fl->fl_u.nfs_fl.state != host->h_state) {
573 up_read(&host->h_rwsem);
574 goto again;
575 }
576 /* Ensure the resulting lock will get added to granted list */
577 fl->fl_flags |= FL_SLEEP;
578 if (do_vfs_lock(fl) < 0)
579 printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__);
580 up_read(&host->h_rwsem);
581 fl->fl_flags = fl_flags;
582 status = 0;
583 }
584 if (status < 0)
585 goto out_unlock;
586 /*
587 * EAGAIN doesn't make sense for sleeping locks, and in some
588 * cases NLM_LCK_DENIED is returned for a permanent error. So
589 * turn it into an ENOLCK.
590 */
591 if (resp->status == nlm_lck_denied && (fl_flags & FL_SLEEP))
592 status = -ENOLCK;
593 else
594 status = nlm_stat_to_errno(resp->status);
595 out_unblock:
596 nlmclnt_finish_block(block);
597 out:
598 nlmclnt_release_call(req);
599 return status;
600 out_unlock:
601 /* Fatal error: ensure that we remove the lock altogether */
602 dprintk("lockd: lock attempt ended in fatal error.\n"
603 " Attempting to unlock.\n");
604 nlmclnt_finish_block(block);
605 fl_type = fl->fl_type;
606 fl->fl_type = F_UNLCK;
607 down_read(&host->h_rwsem);
608 do_vfs_lock(fl);
609 up_read(&host->h_rwsem);
610 fl->fl_type = fl_type;
611 fl->fl_flags = fl_flags;
612 nlmclnt_async_call(cred, req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
613 return status;
614 }
615
616 /*
617 * RECLAIM: Try to reclaim a lock
618 */
619 int
nlmclnt_reclaim(struct nlm_host * host,struct file_lock * fl,struct nlm_rqst * req)620 nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl,
621 struct nlm_rqst *req)
622 {
623 int status;
624
625 memset(req, 0, sizeof(*req));
626 locks_init_lock(&req->a_args.lock.fl);
627 locks_init_lock(&req->a_res.lock.fl);
628 req->a_host = host;
629
630 /* Set up the argument struct */
631 nlmclnt_setlockargs(req, fl);
632 req->a_args.reclaim = 1;
633
634 status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_LOCK);
635 if (status >= 0 && req->a_res.status == nlm_granted)
636 return 0;
637
638 printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d "
639 "(errno %d, status %d)\n", fl->fl_pid,
640 status, ntohl(req->a_res.status));
641
642 /*
643 * FIXME: This is a serious failure. We can
644 *
645 * a. Ignore the problem
646 * b. Send the owning process some signal (Linux doesn't have
647 * SIGLOST, though...)
648 * c. Retry the operation
649 *
650 * Until someone comes up with a simple implementation
651 * for b or c, I'll choose option a.
652 */
653
654 return -ENOLCK;
655 }
656
657 /*
658 * UNLOCK: remove an existing lock
659 */
660 static int
nlmclnt_unlock(struct nlm_rqst * req,struct file_lock * fl)661 nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
662 {
663 struct nlm_host *host = req->a_host;
664 struct nlm_res *resp = &req->a_res;
665 int status;
666 unsigned char fl_flags = fl->fl_flags;
667
668 /*
669 * Note: the server is supposed to either grant us the unlock
670 * request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either
671 * case, we want to unlock.
672 */
673 fl->fl_flags |= FL_EXISTS;
674 down_read(&host->h_rwsem);
675 status = do_vfs_lock(fl);
676 up_read(&host->h_rwsem);
677 fl->fl_flags = fl_flags;
678 if (status == -ENOENT) {
679 status = 0;
680 goto out;
681 }
682
683 refcount_inc(&req->a_count);
684 status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
685 NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
686 if (status < 0)
687 goto out;
688
689 if (resp->status == nlm_granted)
690 goto out;
691
692 if (resp->status != nlm_lck_denied_nolocks)
693 printk("lockd: unexpected unlock status: %d\n",
694 ntohl(resp->status));
695 /* What to do now? I'm out of my depth... */
696 status = -ENOLCK;
697 out:
698 nlmclnt_release_call(req);
699 return status;
700 }
701
nlmclnt_unlock_prepare(struct rpc_task * task,void * data)702 static void nlmclnt_unlock_prepare(struct rpc_task *task, void *data)
703 {
704 struct nlm_rqst *req = data;
705 const struct nlmclnt_operations *nlmclnt_ops = req->a_host->h_nlmclnt_ops;
706 bool defer_call = false;
707
708 if (nlmclnt_ops && nlmclnt_ops->nlmclnt_unlock_prepare)
709 defer_call = nlmclnt_ops->nlmclnt_unlock_prepare(task, req->a_callback_data);
710
711 if (!defer_call)
712 rpc_call_start(task);
713 }
714
nlmclnt_unlock_callback(struct rpc_task * task,void * data)715 static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
716 {
717 struct nlm_rqst *req = data;
718 u32 status = ntohl(req->a_res.status);
719
720 if (RPC_SIGNALLED(task))
721 goto die;
722
723 if (task->tk_status < 0) {
724 dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status);
725 switch (task->tk_status) {
726 case -EACCES:
727 case -EIO:
728 goto die;
729 default:
730 goto retry_rebind;
731 }
732 }
733 if (status == NLM_LCK_DENIED_GRACE_PERIOD) {
734 rpc_delay(task, NLMCLNT_GRACE_WAIT);
735 goto retry_unlock;
736 }
737 if (status != NLM_LCK_GRANTED)
738 printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status);
739 die:
740 return;
741 retry_rebind:
742 nlm_rebind_host(req->a_host);
743 retry_unlock:
744 rpc_restart_call(task);
745 }
746
747 static const struct rpc_call_ops nlmclnt_unlock_ops = {
748 .rpc_call_prepare = nlmclnt_unlock_prepare,
749 .rpc_call_done = nlmclnt_unlock_callback,
750 .rpc_release = nlmclnt_rpc_release,
751 };
752
753 /*
754 * Cancel a blocked lock request.
755 * We always use an async RPC call for this in order not to hang a
756 * process that has been Ctrl-C'ed.
757 */
nlmclnt_cancel(struct nlm_host * host,int block,struct file_lock * fl)758 static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl)
759 {
760 struct nlm_rqst *req;
761 int status;
762
763 dprintk("lockd: blocking lock attempt was interrupted by a signal.\n"
764 " Attempting to cancel lock.\n");
765
766 req = nlm_alloc_call(host);
767 if (!req)
768 return -ENOMEM;
769 req->a_flags = RPC_TASK_ASYNC;
770
771 nlmclnt_setlockargs(req, fl);
772 req->a_args.block = block;
773
774 refcount_inc(&req->a_count);
775 status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
776 NLMPROC_CANCEL, &nlmclnt_cancel_ops);
777 if (status == 0 && req->a_res.status == nlm_lck_denied)
778 status = -ENOLCK;
779 nlmclnt_release_call(req);
780 return status;
781 }
782
nlmclnt_cancel_callback(struct rpc_task * task,void * data)783 static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
784 {
785 struct nlm_rqst *req = data;
786 u32 status = ntohl(req->a_res.status);
787
788 if (RPC_SIGNALLED(task))
789 goto die;
790
791 if (task->tk_status < 0) {
792 dprintk("lockd: CANCEL call error %d, retrying.\n",
793 task->tk_status);
794 goto retry_cancel;
795 }
796
797 dprintk("lockd: cancel status %u (task %u)\n",
798 status, task->tk_pid);
799
800 switch (status) {
801 case NLM_LCK_GRANTED:
802 case NLM_LCK_DENIED_GRACE_PERIOD:
803 case NLM_LCK_DENIED:
804 /* Everything's good */
805 break;
806 case NLM_LCK_DENIED_NOLOCKS:
807 dprintk("lockd: CANCEL failed (server has no locks)\n");
808 goto retry_cancel;
809 default:
810 printk(KERN_NOTICE "lockd: weird return %d for CANCEL call\n",
811 status);
812 }
813
814 die:
815 return;
816
817 retry_cancel:
818 /* Don't ever retry more than 3 times */
819 if (req->a_retries++ >= NLMCLNT_MAX_RETRIES)
820 goto die;
821 nlm_rebind_host(req->a_host);
822 rpc_restart_call(task);
823 rpc_delay(task, 30 * HZ);
824 }
825
826 static const struct rpc_call_ops nlmclnt_cancel_ops = {
827 .rpc_call_done = nlmclnt_cancel_callback,
828 .rpc_release = nlmclnt_rpc_release,
829 };
830
831 /*
832 * Convert an NLM status code to a generic kernel errno
833 */
834 static int
nlm_stat_to_errno(__be32 status)835 nlm_stat_to_errno(__be32 status)
836 {
837 switch(ntohl(status)) {
838 case NLM_LCK_GRANTED:
839 return 0;
840 case NLM_LCK_DENIED:
841 return -EAGAIN;
842 case NLM_LCK_DENIED_NOLOCKS:
843 case NLM_LCK_DENIED_GRACE_PERIOD:
844 return -ENOLCK;
845 case NLM_LCK_BLOCKED:
846 printk(KERN_NOTICE "lockd: unexpected status NLM_BLOCKED\n");
847 return -ENOLCK;
848 #ifdef CONFIG_LOCKD_V4
849 case NLM_DEADLCK:
850 return -EDEADLK;
851 case NLM_ROFS:
852 return -EROFS;
853 case NLM_STALE_FH:
854 return -ESTALE;
855 case NLM_FBIG:
856 return -EOVERFLOW;
857 case NLM_FAILED:
858 return -ENOLCK;
859 #endif
860 }
861 printk(KERN_NOTICE "lockd: unexpected server status %d\n",
862 ntohl(status));
863 return -ENOLCK;
864 }
865