• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * linux/fs/lockd/clntproc.c
3  *
4  * RPC procedures for the client side NLM implementation
5  *
6  * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
7  */
8 
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/errno.h>
12 #include <linux/fs.h>
13 #include <linux/nfs_fs.h>
14 #include <linux/utsname.h>
15 #include <linux/freezer.h>
16 #include <linux/sunrpc/clnt.h>
17 #include <linux/sunrpc/svc.h>
18 #include <linux/lockd/lockd.h>
19 
20 #define NLMDBG_FACILITY		NLMDBG_CLIENT
21 #define NLMCLNT_GRACE_WAIT	(5*HZ)
22 #define NLMCLNT_POLL_TIMEOUT	(30*HZ)
23 #define NLMCLNT_MAX_RETRIES	3
24 
25 static int	nlmclnt_test(struct nlm_rqst *, struct file_lock *);
26 static int	nlmclnt_lock(struct nlm_rqst *, struct file_lock *);
27 static int	nlmclnt_unlock(struct nlm_rqst *, struct file_lock *);
28 static int	nlm_stat_to_errno(__be32 stat);
29 static void	nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host);
30 static int	nlmclnt_cancel(struct nlm_host *, int , struct file_lock *);
31 
32 static const struct rpc_call_ops nlmclnt_unlock_ops;
33 static const struct rpc_call_ops nlmclnt_cancel_ops;
34 
35 /*
36  * Cookie counter for NLM requests
37  */
38 static atomic_t	nlm_cookie = ATOMIC_INIT(0x1234);
39 
nlmclnt_next_cookie(struct nlm_cookie * c)40 void nlmclnt_next_cookie(struct nlm_cookie *c)
41 {
42 	u32	cookie = atomic_inc_return(&nlm_cookie);
43 
44 	memcpy(c->data, &cookie, 4);
45 	c->len=4;
46 }
47 
nlm_get_lockowner(struct nlm_lockowner * lockowner)48 static struct nlm_lockowner *nlm_get_lockowner(struct nlm_lockowner *lockowner)
49 {
50 	atomic_inc(&lockowner->count);
51 	return lockowner;
52 }
53 
nlm_put_lockowner(struct nlm_lockowner * lockowner)54 static void nlm_put_lockowner(struct nlm_lockowner *lockowner)
55 {
56 	if (!atomic_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
57 		return;
58 	list_del(&lockowner->list);
59 	spin_unlock(&lockowner->host->h_lock);
60 	nlm_release_host(lockowner->host);
61 	kfree(lockowner);
62 }
63 
nlm_pidbusy(struct nlm_host * host,uint32_t pid)64 static inline int nlm_pidbusy(struct nlm_host *host, uint32_t pid)
65 {
66 	struct nlm_lockowner *lockowner;
67 	list_for_each_entry(lockowner, &host->h_lockowners, list) {
68 		if (lockowner->pid == pid)
69 			return -EBUSY;
70 	}
71 	return 0;
72 }
73 
__nlm_alloc_pid(struct nlm_host * host)74 static inline uint32_t __nlm_alloc_pid(struct nlm_host *host)
75 {
76 	uint32_t res;
77 	do {
78 		res = host->h_pidcount++;
79 	} while (nlm_pidbusy(host, res) < 0);
80 	return res;
81 }
82 
__nlm_find_lockowner(struct nlm_host * host,fl_owner_t owner)83 static struct nlm_lockowner *__nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner)
84 {
85 	struct nlm_lockowner *lockowner;
86 	list_for_each_entry(lockowner, &host->h_lockowners, list) {
87 		if (lockowner->owner != owner)
88 			continue;
89 		return nlm_get_lockowner(lockowner);
90 	}
91 	return NULL;
92 }
93 
nlm_find_lockowner(struct nlm_host * host,fl_owner_t owner)94 static struct nlm_lockowner *nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner)
95 {
96 	struct nlm_lockowner *res, *new = NULL;
97 
98 	spin_lock(&host->h_lock);
99 	res = __nlm_find_lockowner(host, owner);
100 	if (res == NULL) {
101 		spin_unlock(&host->h_lock);
102 		new = kmalloc(sizeof(*new), GFP_KERNEL);
103 		spin_lock(&host->h_lock);
104 		res = __nlm_find_lockowner(host, owner);
105 		if (res == NULL && new != NULL) {
106 			res = new;
107 			atomic_set(&new->count, 1);
108 			new->owner = owner;
109 			new->pid = __nlm_alloc_pid(host);
110 			new->host = nlm_get_host(host);
111 			list_add(&new->list, &host->h_lockowners);
112 			new = NULL;
113 		}
114 	}
115 	spin_unlock(&host->h_lock);
116 	kfree(new);
117 	return res;
118 }
119 
120 /*
121  * Initialize arguments for TEST/LOCK/UNLOCK/CANCEL calls
122  */
nlmclnt_setlockargs(struct nlm_rqst * req,struct file_lock * fl)123 static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
124 {
125 	struct nlm_args	*argp = &req->a_args;
126 	struct nlm_lock	*lock = &argp->lock;
127 
128 	nlmclnt_next_cookie(&argp->cookie);
129 	argp->state   = nsm_local_state;
130 	memcpy(&lock->fh, NFS_FH(fl->fl_file->f_path.dentry->d_inode), sizeof(struct nfs_fh));
131 	lock->caller  = utsname()->nodename;
132 	lock->oh.data = req->a_owner;
133 	lock->oh.len  = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s",
134 				(unsigned int)fl->fl_u.nfs_fl.owner->pid,
135 				utsname()->nodename);
136 	lock->svid = fl->fl_u.nfs_fl.owner->pid;
137 	lock->fl.fl_start = fl->fl_start;
138 	lock->fl.fl_end = fl->fl_end;
139 	lock->fl.fl_type = fl->fl_type;
140 }
141 
nlmclnt_release_lockargs(struct nlm_rqst * req)142 static void nlmclnt_release_lockargs(struct nlm_rqst *req)
143 {
144 	BUG_ON(req->a_args.lock.fl.fl_ops != NULL);
145 }
146 
147 /**
148  * nlmclnt_proc - Perform a single client-side lock request
149  * @host: address of a valid nlm_host context representing the NLM server
150  * @cmd: fcntl-style file lock operation to perform
151  * @fl: address of arguments for the lock operation
152  *
153  */
nlmclnt_proc(struct nlm_host * host,int cmd,struct file_lock * fl)154 int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl)
155 {
156 	struct nlm_rqst		*call;
157 	int			status;
158 
159 	nlm_get_host(host);
160 	call = nlm_alloc_call(host);
161 	if (call == NULL)
162 		return -ENOMEM;
163 
164 	nlmclnt_locks_init_private(fl, host);
165 	/* Set up the argument struct */
166 	nlmclnt_setlockargs(call, fl);
167 
168 	if (IS_SETLK(cmd) || IS_SETLKW(cmd)) {
169 		if (fl->fl_type != F_UNLCK) {
170 			call->a_args.block = IS_SETLKW(cmd) ? 1 : 0;
171 			status = nlmclnt_lock(call, fl);
172 		} else
173 			status = nlmclnt_unlock(call, fl);
174 	} else if (IS_GETLK(cmd))
175 		status = nlmclnt_test(call, fl);
176 	else
177 		status = -EINVAL;
178 
179 	fl->fl_ops->fl_release_private(fl);
180 	fl->fl_ops = NULL;
181 
182 	dprintk("lockd: clnt proc returns %d\n", status);
183 	return status;
184 }
185 EXPORT_SYMBOL_GPL(nlmclnt_proc);
186 
187 /*
188  * Allocate an NLM RPC call struct
189  *
190  * Note: the caller must hold a reference to host. In case of failure,
191  * this reference will be released.
192  */
nlm_alloc_call(struct nlm_host * host)193 struct nlm_rqst *nlm_alloc_call(struct nlm_host *host)
194 {
195 	struct nlm_rqst	*call;
196 
197 	for(;;) {
198 		call = kzalloc(sizeof(*call), GFP_KERNEL);
199 		if (call != NULL) {
200 			atomic_set(&call->a_count, 1);
201 			locks_init_lock(&call->a_args.lock.fl);
202 			locks_init_lock(&call->a_res.lock.fl);
203 			call->a_host = host;
204 			return call;
205 		}
206 		if (signalled())
207 			break;
208 		printk("nlm_alloc_call: failed, waiting for memory\n");
209 		schedule_timeout_interruptible(5*HZ);
210 	}
211 	nlm_release_host(host);
212 	return NULL;
213 }
214 
nlm_release_call(struct nlm_rqst * call)215 void nlm_release_call(struct nlm_rqst *call)
216 {
217 	if (!atomic_dec_and_test(&call->a_count))
218 		return;
219 	nlm_release_host(call->a_host);
220 	nlmclnt_release_lockargs(call);
221 	kfree(call);
222 }
223 
nlmclnt_rpc_release(void * data)224 static void nlmclnt_rpc_release(void *data)
225 {
226 	lock_kernel();
227 	nlm_release_call(data);
228 	unlock_kernel();
229 }
230 
nlm_wait_on_grace(wait_queue_head_t * queue)231 static int nlm_wait_on_grace(wait_queue_head_t *queue)
232 {
233 	DEFINE_WAIT(wait);
234 	int status = -EINTR;
235 
236 	prepare_to_wait(queue, &wait, TASK_INTERRUPTIBLE);
237 	if (!signalled ()) {
238 		schedule_timeout(NLMCLNT_GRACE_WAIT);
239 		try_to_freeze();
240 		if (!signalled ())
241 			status = 0;
242 	}
243 	finish_wait(queue, &wait);
244 	return status;
245 }
246 
247 /*
248  * Generic NLM call
249  */
250 static int
nlmclnt_call(struct rpc_cred * cred,struct nlm_rqst * req,u32 proc)251 nlmclnt_call(struct rpc_cred *cred, struct nlm_rqst *req, u32 proc)
252 {
253 	struct nlm_host	*host = req->a_host;
254 	struct rpc_clnt	*clnt;
255 	struct nlm_args	*argp = &req->a_args;
256 	struct nlm_res	*resp = &req->a_res;
257 	struct rpc_message msg = {
258 		.rpc_argp	= argp,
259 		.rpc_resp	= resp,
260 		.rpc_cred	= cred,
261 	};
262 	int		status;
263 
264 	dprintk("lockd: call procedure %d on %s\n",
265 			(int)proc, host->h_name);
266 
267 	do {
268 		if (host->h_reclaiming && !argp->reclaim)
269 			goto in_grace_period;
270 
271 		/* If we have no RPC client yet, create one. */
272 		if ((clnt = nlm_bind_host(host)) == NULL)
273 			return -ENOLCK;
274 		msg.rpc_proc = &clnt->cl_procinfo[proc];
275 
276 		/* Perform the RPC call. If an error occurs, try again */
277 		if ((status = rpc_call_sync(clnt, &msg, 0)) < 0) {
278 			dprintk("lockd: rpc_call returned error %d\n", -status);
279 			switch (status) {
280 			case -EPROTONOSUPPORT:
281 				status = -EINVAL;
282 				break;
283 			case -ECONNREFUSED:
284 			case -ETIMEDOUT:
285 			case -ENOTCONN:
286 				nlm_rebind_host(host);
287 				status = -EAGAIN;
288 				break;
289 			case -ERESTARTSYS:
290 				return signalled () ? -EINTR : status;
291 			default:
292 				break;
293 			}
294 			break;
295 		} else
296 		if (resp->status == nlm_lck_denied_grace_period) {
297 			dprintk("lockd: server in grace period\n");
298 			if (argp->reclaim) {
299 				printk(KERN_WARNING
300 				     "lockd: spurious grace period reject?!\n");
301 				return -ENOLCK;
302 			}
303 		} else {
304 			if (!argp->reclaim) {
305 				/* We appear to be out of the grace period */
306 				wake_up_all(&host->h_gracewait);
307 			}
308 			dprintk("lockd: server returns status %d\n", resp->status);
309 			return 0;	/* Okay, call complete */
310 		}
311 
312 in_grace_period:
313 		/*
314 		 * The server has rebooted and appears to be in the grace
315 		 * period during which locks are only allowed to be
316 		 * reclaimed.
317 		 * We can only back off and try again later.
318 		 */
319 		status = nlm_wait_on_grace(&host->h_gracewait);
320 	} while (status == 0);
321 
322 	return status;
323 }
324 
325 /*
326  * Generic NLM call, async version.
327  */
__nlm_async_call(struct nlm_rqst * req,u32 proc,struct rpc_message * msg,const struct rpc_call_ops * tk_ops)328 static struct rpc_task *__nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
329 {
330 	struct nlm_host	*host = req->a_host;
331 	struct rpc_clnt	*clnt;
332 	struct rpc_task_setup task_setup_data = {
333 		.rpc_message = msg,
334 		.callback_ops = tk_ops,
335 		.callback_data = req,
336 		.flags = RPC_TASK_ASYNC,
337 	};
338 
339 	dprintk("lockd: call procedure %d on %s (async)\n",
340 			(int)proc, host->h_name);
341 
342 	/* If we have no RPC client yet, create one. */
343 	clnt = nlm_bind_host(host);
344 	if (clnt == NULL)
345 		goto out_err;
346 	msg->rpc_proc = &clnt->cl_procinfo[proc];
347 	task_setup_data.rpc_client = clnt;
348 
349         /* bootstrap and kick off the async RPC call */
350 	return rpc_run_task(&task_setup_data);
351 out_err:
352 	tk_ops->rpc_release(req);
353 	return ERR_PTR(-ENOLCK);
354 }
355 
nlm_do_async_call(struct nlm_rqst * req,u32 proc,struct rpc_message * msg,const struct rpc_call_ops * tk_ops)356 static int nlm_do_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
357 {
358 	struct rpc_task *task;
359 
360 	task = __nlm_async_call(req, proc, msg, tk_ops);
361 	if (IS_ERR(task))
362 		return PTR_ERR(task);
363 	rpc_put_task(task);
364 	return 0;
365 }
366 
367 /*
368  * NLM asynchronous call.
369  */
nlm_async_call(struct nlm_rqst * req,u32 proc,const struct rpc_call_ops * tk_ops)370 int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
371 {
372 	struct rpc_message msg = {
373 		.rpc_argp	= &req->a_args,
374 		.rpc_resp	= &req->a_res,
375 	};
376 	return nlm_do_async_call(req, proc, &msg, tk_ops);
377 }
378 
nlm_async_reply(struct nlm_rqst * req,u32 proc,const struct rpc_call_ops * tk_ops)379 int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
380 {
381 	struct rpc_message msg = {
382 		.rpc_argp	= &req->a_res,
383 	};
384 	return nlm_do_async_call(req, proc, &msg, tk_ops);
385 }
386 
387 /*
388  * NLM client asynchronous call.
389  *
390  * Note that although the calls are asynchronous, and are therefore
391  *      guaranteed to complete, we still always attempt to wait for
392  *      completion in order to be able to correctly track the lock
393  *      state.
394  */
nlmclnt_async_call(struct rpc_cred * cred,struct nlm_rqst * req,u32 proc,const struct rpc_call_ops * tk_ops)395 static int nlmclnt_async_call(struct rpc_cred *cred, struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
396 {
397 	struct rpc_message msg = {
398 		.rpc_argp	= &req->a_args,
399 		.rpc_resp	= &req->a_res,
400 		.rpc_cred	= cred,
401 	};
402 	struct rpc_task *task;
403 	int err;
404 
405 	task = __nlm_async_call(req, proc, &msg, tk_ops);
406 	if (IS_ERR(task))
407 		return PTR_ERR(task);
408 	err = rpc_wait_for_completion_task(task);
409 	rpc_put_task(task);
410 	return err;
411 }
412 
413 /*
414  * TEST for the presence of a conflicting lock
415  */
416 static int
nlmclnt_test(struct nlm_rqst * req,struct file_lock * fl)417 nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
418 {
419 	int	status;
420 
421 	status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_TEST);
422 	if (status < 0)
423 		goto out;
424 
425 	switch (req->a_res.status) {
426 		case nlm_granted:
427 			fl->fl_type = F_UNLCK;
428 			break;
429 		case nlm_lck_denied:
430 			/*
431 			 * Report the conflicting lock back to the application.
432 			 */
433 			fl->fl_start = req->a_res.lock.fl.fl_start;
434 			fl->fl_end = req->a_res.lock.fl.fl_end;
435 			fl->fl_type = req->a_res.lock.fl.fl_type;
436 			fl->fl_pid = 0;
437 			break;
438 		default:
439 			status = nlm_stat_to_errno(req->a_res.status);
440 	}
441 out:
442 	nlm_release_call(req);
443 	return status;
444 }
445 
nlmclnt_locks_copy_lock(struct file_lock * new,struct file_lock * fl)446 static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
447 {
448 	new->fl_u.nfs_fl.state = fl->fl_u.nfs_fl.state;
449 	new->fl_u.nfs_fl.owner = nlm_get_lockowner(fl->fl_u.nfs_fl.owner);
450 	list_add_tail(&new->fl_u.nfs_fl.list, &fl->fl_u.nfs_fl.owner->host->h_granted);
451 }
452 
nlmclnt_locks_release_private(struct file_lock * fl)453 static void nlmclnt_locks_release_private(struct file_lock *fl)
454 {
455 	list_del(&fl->fl_u.nfs_fl.list);
456 	nlm_put_lockowner(fl->fl_u.nfs_fl.owner);
457 }
458 
459 static struct file_lock_operations nlmclnt_lock_ops = {
460 	.fl_copy_lock = nlmclnt_locks_copy_lock,
461 	.fl_release_private = nlmclnt_locks_release_private,
462 };
463 
nlmclnt_locks_init_private(struct file_lock * fl,struct nlm_host * host)464 static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host)
465 {
466 	BUG_ON(fl->fl_ops != NULL);
467 	fl->fl_u.nfs_fl.state = 0;
468 	fl->fl_u.nfs_fl.owner = nlm_find_lockowner(host, fl->fl_owner);
469 	INIT_LIST_HEAD(&fl->fl_u.nfs_fl.list);
470 	fl->fl_ops = &nlmclnt_lock_ops;
471 }
472 
do_vfs_lock(struct file_lock * fl)473 static int do_vfs_lock(struct file_lock *fl)
474 {
475 	int res = 0;
476 	switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
477 		case FL_POSIX:
478 			res = posix_lock_file_wait(fl->fl_file, fl);
479 			break;
480 		case FL_FLOCK:
481 			res = flock_lock_file_wait(fl->fl_file, fl);
482 			break;
483 		default:
484 			BUG();
485 	}
486 	return res;
487 }
488 
489 /*
490  * LOCK: Try to create a lock
491  *
492  *			Programmer Harassment Alert
493  *
494  * When given a blocking lock request in a sync RPC call, the HPUX lockd
495  * will faithfully return LCK_BLOCKED but never cares to notify us when
496  * the lock could be granted. This way, our local process could hang
497  * around forever waiting for the callback.
498  *
499  *  Solution A:	Implement busy-waiting
500  *  Solution B: Use the async version of the call (NLM_LOCK_{MSG,RES})
501  *
502  * For now I am implementing solution A, because I hate the idea of
503  * re-implementing lockd for a third time in two months. The async
504  * calls shouldn't be too hard to do, however.
505  *
506  * This is one of the lovely things about standards in the NFS area:
507  * they're so soft and squishy you can't really blame HP for doing this.
508  */
509 static int
nlmclnt_lock(struct nlm_rqst * req,struct file_lock * fl)510 nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
511 {
512 	struct rpc_cred *cred = nfs_file_cred(fl->fl_file);
513 	struct nlm_host	*host = req->a_host;
514 	struct nlm_res	*resp = &req->a_res;
515 	struct nlm_wait *block = NULL;
516 	unsigned char fl_flags = fl->fl_flags;
517 	unsigned char fl_type;
518 	int status = -ENOLCK;
519 
520 	if (nsm_monitor(host) < 0)
521 		goto out;
522 
523 	fl->fl_flags |= FL_ACCESS;
524 	status = do_vfs_lock(fl);
525 	fl->fl_flags = fl_flags;
526 	if (status < 0)
527 		goto out;
528 
529 	block = nlmclnt_prepare_block(host, fl);
530 again:
531 	/*
532 	 * Initialise resp->status to a valid non-zero value,
533 	 * since 0 == nlm_lck_granted
534 	 */
535 	resp->status = nlm_lck_blocked;
536 	for(;;) {
537 		/* Reboot protection */
538 		fl->fl_u.nfs_fl.state = host->h_state;
539 		status = nlmclnt_call(cred, req, NLMPROC_LOCK);
540 		if (status < 0)
541 			break;
542 		/* Did a reclaimer thread notify us of a server reboot? */
543 		if (resp->status ==  nlm_lck_denied_grace_period)
544 			continue;
545 		if (resp->status != nlm_lck_blocked)
546 			break;
547 		/* Wait on an NLM blocking lock */
548 		status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT);
549 		if (status < 0)
550 			break;
551 		if (resp->status != nlm_lck_blocked)
552 			break;
553 	}
554 
555 	/* if we were interrupted while blocking, then cancel the lock request
556 	 * and exit
557 	 */
558 	if (resp->status == nlm_lck_blocked) {
559 		if (!req->a_args.block)
560 			goto out_unlock;
561 		if (nlmclnt_cancel(host, req->a_args.block, fl) == 0)
562 			goto out_unblock;
563 	}
564 
565 	if (resp->status == nlm_granted) {
566 		down_read(&host->h_rwsem);
567 		/* Check whether or not the server has rebooted */
568 		if (fl->fl_u.nfs_fl.state != host->h_state) {
569 			up_read(&host->h_rwsem);
570 			goto again;
571 		}
572 		/* Ensure the resulting lock will get added to granted list */
573 		fl->fl_flags |= FL_SLEEP;
574 		if (do_vfs_lock(fl) < 0)
575 			printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__);
576 		up_read(&host->h_rwsem);
577 		fl->fl_flags = fl_flags;
578 		status = 0;
579 	}
580 	if (status < 0)
581 		goto out_unlock;
582 	/*
583 	 * EAGAIN doesn't make sense for sleeping locks, and in some
584 	 * cases NLM_LCK_DENIED is returned for a permanent error.  So
585 	 * turn it into an ENOLCK.
586 	 */
587 	if (resp->status == nlm_lck_denied && (fl_flags & FL_SLEEP))
588 		status = -ENOLCK;
589 	else
590 		status = nlm_stat_to_errno(resp->status);
591 out_unblock:
592 	nlmclnt_finish_block(block);
593 out:
594 	nlm_release_call(req);
595 	return status;
596 out_unlock:
597 	/* Fatal error: ensure that we remove the lock altogether */
598 	dprintk("lockd: lock attempt ended in fatal error.\n"
599 		"       Attempting to unlock.\n");
600 	nlmclnt_finish_block(block);
601 	fl_type = fl->fl_type;
602 	fl->fl_type = F_UNLCK;
603 	down_read(&host->h_rwsem);
604 	do_vfs_lock(fl);
605 	up_read(&host->h_rwsem);
606 	fl->fl_type = fl_type;
607 	fl->fl_flags = fl_flags;
608 	nlmclnt_async_call(cred, req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
609 	return status;
610 }
611 
612 /*
613  * RECLAIM: Try to reclaim a lock
614  */
615 int
nlmclnt_reclaim(struct nlm_host * host,struct file_lock * fl)616 nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
617 {
618 	struct nlm_rqst reqst, *req;
619 	int		status;
620 
621 	req = &reqst;
622 	memset(req, 0, sizeof(*req));
623 	locks_init_lock(&req->a_args.lock.fl);
624 	locks_init_lock(&req->a_res.lock.fl);
625 	req->a_host  = host;
626 	req->a_flags = 0;
627 
628 	/* Set up the argument struct */
629 	nlmclnt_setlockargs(req, fl);
630 	req->a_args.reclaim = 1;
631 
632 	status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_LOCK);
633 	if (status >= 0 && req->a_res.status == nlm_granted)
634 		return 0;
635 
636 	printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d "
637 				"(errno %d, status %d)\n", fl->fl_pid,
638 				status, ntohl(req->a_res.status));
639 
640 	/*
641 	 * FIXME: This is a serious failure. We can
642 	 *
643 	 *  a.	Ignore the problem
644 	 *  b.	Send the owning process some signal (Linux doesn't have
645 	 *	SIGLOST, though...)
646 	 *  c.	Retry the operation
647 	 *
648 	 * Until someone comes up with a simple implementation
649 	 * for b or c, I'll choose option a.
650 	 */
651 
652 	return -ENOLCK;
653 }
654 
655 /*
656  * UNLOCK: remove an existing lock
657  */
658 static int
nlmclnt_unlock(struct nlm_rqst * req,struct file_lock * fl)659 nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
660 {
661 	struct nlm_host	*host = req->a_host;
662 	struct nlm_res	*resp = &req->a_res;
663 	int status;
664 	unsigned char fl_flags = fl->fl_flags;
665 
666 	/*
667 	 * Note: the server is supposed to either grant us the unlock
668 	 * request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either
669 	 * case, we want to unlock.
670 	 */
671 	fl->fl_flags |= FL_EXISTS;
672 	down_read(&host->h_rwsem);
673 	status = do_vfs_lock(fl);
674 	up_read(&host->h_rwsem);
675 	fl->fl_flags = fl_flags;
676 	if (status == -ENOENT) {
677 		status = 0;
678 		goto out;
679 	}
680 
681 	atomic_inc(&req->a_count);
682 	status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
683 			NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
684 	if (status < 0)
685 		goto out;
686 
687 	if (resp->status == nlm_granted)
688 		goto out;
689 
690 	if (resp->status != nlm_lck_denied_nolocks)
691 		printk("lockd: unexpected unlock status: %d\n", resp->status);
692 	/* What to do now? I'm out of my depth... */
693 	status = -ENOLCK;
694 out:
695 	nlm_release_call(req);
696 	return status;
697 }
698 
nlmclnt_unlock_callback(struct rpc_task * task,void * data)699 static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
700 {
701 	struct nlm_rqst	*req = data;
702 	u32 status = ntohl(req->a_res.status);
703 
704 	if (RPC_ASSASSINATED(task))
705 		goto die;
706 
707 	if (task->tk_status < 0) {
708 		dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status);
709 		goto retry_rebind;
710 	}
711 	if (status == NLM_LCK_DENIED_GRACE_PERIOD) {
712 		rpc_delay(task, NLMCLNT_GRACE_WAIT);
713 		goto retry_unlock;
714 	}
715 	if (status != NLM_LCK_GRANTED)
716 		printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status);
717 die:
718 	return;
719  retry_rebind:
720 	lock_kernel();
721 	nlm_rebind_host(req->a_host);
722 	unlock_kernel();
723  retry_unlock:
724 	rpc_restart_call(task);
725 }
726 
727 static const struct rpc_call_ops nlmclnt_unlock_ops = {
728 	.rpc_call_done = nlmclnt_unlock_callback,
729 	.rpc_release = nlmclnt_rpc_release,
730 };
731 
732 /*
733  * Cancel a blocked lock request.
734  * We always use an async RPC call for this in order not to hang a
735  * process that has been Ctrl-C'ed.
736  */
nlmclnt_cancel(struct nlm_host * host,int block,struct file_lock * fl)737 static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl)
738 {
739 	struct nlm_rqst	*req;
740 	int status;
741 
742 	dprintk("lockd: blocking lock attempt was interrupted by a signal.\n"
743 		"       Attempting to cancel lock.\n");
744 
745 	req = nlm_alloc_call(nlm_get_host(host));
746 	if (!req)
747 		return -ENOMEM;
748 	req->a_flags = RPC_TASK_ASYNC;
749 
750 	nlmclnt_setlockargs(req, fl);
751 	req->a_args.block = block;
752 
753 	atomic_inc(&req->a_count);
754 	status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
755 			NLMPROC_CANCEL, &nlmclnt_cancel_ops);
756 	if (status == 0 && req->a_res.status == nlm_lck_denied)
757 		status = -ENOLCK;
758 	nlm_release_call(req);
759 	return status;
760 }
761 
nlmclnt_cancel_callback(struct rpc_task * task,void * data)762 static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
763 {
764 	struct nlm_rqst	*req = data;
765 	u32 status = ntohl(req->a_res.status);
766 
767 	if (RPC_ASSASSINATED(task))
768 		goto die;
769 
770 	if (task->tk_status < 0) {
771 		dprintk("lockd: CANCEL call error %d, retrying.\n",
772 					task->tk_status);
773 		goto retry_cancel;
774 	}
775 
776 	dprintk("lockd: cancel status %u (task %u)\n",
777 			status, task->tk_pid);
778 
779 	switch (status) {
780 	case NLM_LCK_GRANTED:
781 	case NLM_LCK_DENIED_GRACE_PERIOD:
782 	case NLM_LCK_DENIED:
783 		/* Everything's good */
784 		break;
785 	case NLM_LCK_DENIED_NOLOCKS:
786 		dprintk("lockd: CANCEL failed (server has no locks)\n");
787 		goto retry_cancel;
788 	default:
789 		printk(KERN_NOTICE "lockd: weird return %d for CANCEL call\n",
790 			status);
791 	}
792 
793 die:
794 	return;
795 
796 retry_cancel:
797 	/* Don't ever retry more than 3 times */
798 	if (req->a_retries++ >= NLMCLNT_MAX_RETRIES)
799 		goto die;
800 	lock_kernel();
801 	nlm_rebind_host(req->a_host);
802 	unlock_kernel();
803 	rpc_restart_call(task);
804 	rpc_delay(task, 30 * HZ);
805 }
806 
807 static const struct rpc_call_ops nlmclnt_cancel_ops = {
808 	.rpc_call_done = nlmclnt_cancel_callback,
809 	.rpc_release = nlmclnt_rpc_release,
810 };
811 
812 /*
813  * Convert an NLM status code to a generic kernel errno
814  */
815 static int
nlm_stat_to_errno(__be32 status)816 nlm_stat_to_errno(__be32 status)
817 {
818 	switch(ntohl(status)) {
819 	case NLM_LCK_GRANTED:
820 		return 0;
821 	case NLM_LCK_DENIED:
822 		return -EAGAIN;
823 	case NLM_LCK_DENIED_NOLOCKS:
824 	case NLM_LCK_DENIED_GRACE_PERIOD:
825 		return -ENOLCK;
826 	case NLM_LCK_BLOCKED:
827 		printk(KERN_NOTICE "lockd: unexpected status NLM_BLOCKED\n");
828 		return -ENOLCK;
829 #ifdef CONFIG_LOCKD_V4
830 	case NLM_DEADLCK:
831 		return -EDEADLK;
832 	case NLM_ROFS:
833 		return -EROFS;
834 	case NLM_STALE_FH:
835 		return -ESTALE;
836 	case NLM_FBIG:
837 		return -EOVERFLOW;
838 	case NLM_FAILED:
839 		return -ENOLCK;
840 #endif
841 	}
842 	printk(KERN_NOTICE "lockd: unexpected server status %d\n", status);
843 	return -ENOLCK;
844 }
845