• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2006-2010 Red Hat, Inc.  All rights reserved.
3  *
4  * This copyrighted material is made available to anyone wishing to use,
5  * modify, copy, or redistribute it subject to the terms and conditions
6  * of the GNU General Public License v.2.
7  */
8 
9 #include <linux/miscdevice.h>
10 #include <linux/init.h>
11 #include <linux/wait.h>
12 #include <linux/module.h>
13 #include <linux/file.h>
14 #include <linux/fs.h>
15 #include <linux/poll.h>
16 #include <linux/signal.h>
17 #include <linux/spinlock.h>
18 #include <linux/dlm.h>
19 #include <linux/dlm_device.h>
20 #include <linux/slab.h>
21 
22 #include "dlm_internal.h"
23 #include "lockspace.h"
24 #include "lock.h"
25 #include "lvb_table.h"
26 #include "user.h"
27 #include "ast.h"
28 
29 static const char name_prefix[] = "dlm";
30 static const struct file_operations device_fops;
31 static atomic_t dlm_monitor_opened;
32 static int dlm_monitor_unused = 1;
33 
34 #ifdef CONFIG_COMPAT
35 
36 struct dlm_lock_params32 {
37 	__u8 mode;
38 	__u8 namelen;
39 	__u16 unused;
40 	__u32 flags;
41 	__u32 lkid;
42 	__u32 parent;
43 	__u64 xid;
44 	__u64 timeout;
45 	__u32 castparam;
46 	__u32 castaddr;
47 	__u32 bastparam;
48 	__u32 bastaddr;
49 	__u32 lksb;
50 	char lvb[DLM_USER_LVB_LEN];
51 	char name[0];
52 };
53 
54 struct dlm_write_request32 {
55 	__u32 version[3];
56 	__u8 cmd;
57 	__u8 is64bit;
58 	__u8 unused[2];
59 
60 	union  {
61 		struct dlm_lock_params32 lock;
62 		struct dlm_lspace_params lspace;
63 		struct dlm_purge_params purge;
64 	} i;
65 };
66 
67 struct dlm_lksb32 {
68 	__u32 sb_status;
69 	__u32 sb_lkid;
70 	__u8 sb_flags;
71 	__u32 sb_lvbptr;
72 };
73 
74 struct dlm_lock_result32 {
75 	__u32 version[3];
76 	__u32 length;
77 	__u32 user_astaddr;
78 	__u32 user_astparam;
79 	__u32 user_lksb;
80 	struct dlm_lksb32 lksb;
81 	__u8 bast_mode;
82 	__u8 unused[3];
83 	/* Offsets may be zero if no data is present */
84 	__u32 lvb_offset;
85 };
86 
compat_input(struct dlm_write_request * kb,struct dlm_write_request32 * kb32,int namelen)87 static void compat_input(struct dlm_write_request *kb,
88 			 struct dlm_write_request32 *kb32,
89 			 int namelen)
90 {
91 	kb->version[0] = kb32->version[0];
92 	kb->version[1] = kb32->version[1];
93 	kb->version[2] = kb32->version[2];
94 
95 	kb->cmd = kb32->cmd;
96 	kb->is64bit = kb32->is64bit;
97 	if (kb->cmd == DLM_USER_CREATE_LOCKSPACE ||
98 	    kb->cmd == DLM_USER_REMOVE_LOCKSPACE) {
99 		kb->i.lspace.flags = kb32->i.lspace.flags;
100 		kb->i.lspace.minor = kb32->i.lspace.minor;
101 		memcpy(kb->i.lspace.name, kb32->i.lspace.name, namelen);
102 	} else if (kb->cmd == DLM_USER_PURGE) {
103 		kb->i.purge.nodeid = kb32->i.purge.nodeid;
104 		kb->i.purge.pid = kb32->i.purge.pid;
105 	} else {
106 		kb->i.lock.mode = kb32->i.lock.mode;
107 		kb->i.lock.namelen = kb32->i.lock.namelen;
108 		kb->i.lock.flags = kb32->i.lock.flags;
109 		kb->i.lock.lkid = kb32->i.lock.lkid;
110 		kb->i.lock.parent = kb32->i.lock.parent;
111 		kb->i.lock.xid = kb32->i.lock.xid;
112 		kb->i.lock.timeout = kb32->i.lock.timeout;
113 		kb->i.lock.castparam = (void *)(long)kb32->i.lock.castparam;
114 		kb->i.lock.castaddr = (void *)(long)kb32->i.lock.castaddr;
115 		kb->i.lock.bastparam = (void *)(long)kb32->i.lock.bastparam;
116 		kb->i.lock.bastaddr = (void *)(long)kb32->i.lock.bastaddr;
117 		kb->i.lock.lksb = (void *)(long)kb32->i.lock.lksb;
118 		memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN);
119 		memcpy(kb->i.lock.name, kb32->i.lock.name, namelen);
120 	}
121 }
122 
compat_output(struct dlm_lock_result * res,struct dlm_lock_result32 * res32)123 static void compat_output(struct dlm_lock_result *res,
124 			  struct dlm_lock_result32 *res32)
125 {
126 	res32->version[0] = res->version[0];
127 	res32->version[1] = res->version[1];
128 	res32->version[2] = res->version[2];
129 
130 	res32->user_astaddr = (__u32)(long)res->user_astaddr;
131 	res32->user_astparam = (__u32)(long)res->user_astparam;
132 	res32->user_lksb = (__u32)(long)res->user_lksb;
133 	res32->bast_mode = res->bast_mode;
134 
135 	res32->lvb_offset = res->lvb_offset;
136 	res32->length = res->length;
137 
138 	res32->lksb.sb_status = res->lksb.sb_status;
139 	res32->lksb.sb_flags = res->lksb.sb_flags;
140 	res32->lksb.sb_lkid = res->lksb.sb_lkid;
141 	res32->lksb.sb_lvbptr = (__u32)(long)res->lksb.sb_lvbptr;
142 }
143 #endif
144 
145 /* Figure out if this lock is at the end of its life and no longer
146    available for the application to use.  The lkb still exists until
147    the final ast is read.  A lock becomes EOL in three situations:
148      1. a noqueue request fails with EAGAIN
149      2. an unlock completes with EUNLOCK
150      3. a cancel of a waiting request completes with ECANCEL/EDEADLK
151    An EOL lock needs to be removed from the process's list of locks.
152    And we can't allow any new operation on an EOL lock.  This is
153    not related to the lifetime of the lkb struct which is managed
154    entirely by refcount. */
155 
lkb_is_endoflife(int mode,int status)156 static int lkb_is_endoflife(int mode, int status)
157 {
158 	switch (status) {
159 	case -DLM_EUNLOCK:
160 		return 1;
161 	case -DLM_ECANCEL:
162 	case -ETIMEDOUT:
163 	case -EDEADLK:
164 	case -EAGAIN:
165 		if (mode == DLM_LOCK_IV)
166 			return 1;
167 		break;
168 	}
169 	return 0;
170 }
171 
172 /* we could possibly check if the cancel of an orphan has resulted in the lkb
173    being removed and then remove that lkb from the orphans list and free it */
174 
dlm_user_add_ast(struct dlm_lkb * lkb,uint32_t flags,int mode,int status,uint32_t sbflags,uint64_t seq)175 void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode,
176 		      int status, uint32_t sbflags, uint64_t seq)
177 {
178 	struct dlm_ls *ls;
179 	struct dlm_user_args *ua;
180 	struct dlm_user_proc *proc;
181 	int rv;
182 
183 	if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD))
184 		return;
185 
186 	ls = lkb->lkb_resource->res_ls;
187 	mutex_lock(&ls->ls_clear_proc_locks);
188 
189 	/* If ORPHAN/DEAD flag is set, it means the process is dead so an ast
190 	   can't be delivered.  For ORPHAN's, dlm_clear_proc_locks() freed
191 	   lkb->ua so we can't try to use it.  This second check is necessary
192 	   for cases where a completion ast is received for an operation that
193 	   began before clear_proc_locks did its cancel/unlock. */
194 
195 	if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD))
196 		goto out;
197 
198 	DLM_ASSERT(lkb->lkb_ua, dlm_print_lkb(lkb););
199 	ua = lkb->lkb_ua;
200 	proc = ua->proc;
201 
202 	if ((flags & DLM_CB_BAST) && ua->bastaddr == NULL)
203 		goto out;
204 
205 	if ((flags & DLM_CB_CAST) && lkb_is_endoflife(mode, status))
206 		lkb->lkb_flags |= DLM_IFL_ENDOFLIFE;
207 
208 	spin_lock(&proc->asts_spin);
209 
210 	rv = dlm_add_lkb_callback(lkb, flags, mode, status, sbflags, seq);
211 	if (rv < 0) {
212 		spin_unlock(&proc->asts_spin);
213 		goto out;
214 	}
215 
216 	if (list_empty(&lkb->lkb_cb_list)) {
217 		kref_get(&lkb->lkb_ref);
218 		list_add_tail(&lkb->lkb_cb_list, &proc->asts);
219 		wake_up_interruptible(&proc->wait);
220 	}
221 	spin_unlock(&proc->asts_spin);
222 
223 	if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) {
224 		/* N.B. spin_lock locks_spin, not asts_spin */
225 		spin_lock(&proc->locks_spin);
226 		if (!list_empty(&lkb->lkb_ownqueue)) {
227 			list_del_init(&lkb->lkb_ownqueue);
228 			dlm_put_lkb(lkb);
229 		}
230 		spin_unlock(&proc->locks_spin);
231 	}
232  out:
233 	mutex_unlock(&ls->ls_clear_proc_locks);
234 }
235 
device_user_lock(struct dlm_user_proc * proc,struct dlm_lock_params * params)236 static int device_user_lock(struct dlm_user_proc *proc,
237 			    struct dlm_lock_params *params)
238 {
239 	struct dlm_ls *ls;
240 	struct dlm_user_args *ua;
241 	int error = -ENOMEM;
242 
243 	ls = dlm_find_lockspace_local(proc->lockspace);
244 	if (!ls)
245 		return -ENOENT;
246 
247 	if (!params->castaddr || !params->lksb) {
248 		error = -EINVAL;
249 		goto out;
250 	}
251 
252 	ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
253 	if (!ua)
254 		goto out;
255 	ua->proc = proc;
256 	ua->user_lksb = params->lksb;
257 	ua->castparam = params->castparam;
258 	ua->castaddr = params->castaddr;
259 	ua->bastparam = params->bastparam;
260 	ua->bastaddr = params->bastaddr;
261 	ua->xid = params->xid;
262 
263 	if (params->flags & DLM_LKF_CONVERT)
264 		error = dlm_user_convert(ls, ua,
265 				         params->mode, params->flags,
266 				         params->lkid, params->lvb,
267 					 (unsigned long) params->timeout);
268 	else {
269 		error = dlm_user_request(ls, ua,
270 					 params->mode, params->flags,
271 					 params->name, params->namelen,
272 					 (unsigned long) params->timeout);
273 		if (!error)
274 			error = ua->lksb.sb_lkid;
275 	}
276  out:
277 	dlm_put_lockspace(ls);
278 	return error;
279 }
280 
device_user_unlock(struct dlm_user_proc * proc,struct dlm_lock_params * params)281 static int device_user_unlock(struct dlm_user_proc *proc,
282 			      struct dlm_lock_params *params)
283 {
284 	struct dlm_ls *ls;
285 	struct dlm_user_args *ua;
286 	int error = -ENOMEM;
287 
288 	ls = dlm_find_lockspace_local(proc->lockspace);
289 	if (!ls)
290 		return -ENOENT;
291 
292 	ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
293 	if (!ua)
294 		goto out;
295 	ua->proc = proc;
296 	ua->user_lksb = params->lksb;
297 	ua->castparam = params->castparam;
298 	ua->castaddr = params->castaddr;
299 
300 	if (params->flags & DLM_LKF_CANCEL)
301 		error = dlm_user_cancel(ls, ua, params->flags, params->lkid);
302 	else
303 		error = dlm_user_unlock(ls, ua, params->flags, params->lkid,
304 					params->lvb);
305  out:
306 	dlm_put_lockspace(ls);
307 	return error;
308 }
309 
device_user_deadlock(struct dlm_user_proc * proc,struct dlm_lock_params * params)310 static int device_user_deadlock(struct dlm_user_proc *proc,
311 				struct dlm_lock_params *params)
312 {
313 	struct dlm_ls *ls;
314 	int error;
315 
316 	ls = dlm_find_lockspace_local(proc->lockspace);
317 	if (!ls)
318 		return -ENOENT;
319 
320 	error = dlm_user_deadlock(ls, params->flags, params->lkid);
321 
322 	dlm_put_lockspace(ls);
323 	return error;
324 }
325 
dlm_device_register(struct dlm_ls * ls,char * name)326 static int dlm_device_register(struct dlm_ls *ls, char *name)
327 {
328 	int error, len;
329 
330 	/* The device is already registered.  This happens when the
331 	   lockspace is created multiple times from userspace. */
332 	if (ls->ls_device.name)
333 		return 0;
334 
335 	error = -ENOMEM;
336 	len = strlen(name) + strlen(name_prefix) + 2;
337 	ls->ls_device.name = kzalloc(len, GFP_NOFS);
338 	if (!ls->ls_device.name)
339 		goto fail;
340 
341 	snprintf((char *)ls->ls_device.name, len, "%s_%s", name_prefix,
342 		 name);
343 	ls->ls_device.fops = &device_fops;
344 	ls->ls_device.minor = MISC_DYNAMIC_MINOR;
345 
346 	error = misc_register(&ls->ls_device);
347 	if (error) {
348 		kfree(ls->ls_device.name);
349 		/* this has to be set to NULL
350 		 * to avoid a double-free in dlm_device_deregister
351 		 */
352 		ls->ls_device.name = NULL;
353 	}
354 fail:
355 	return error;
356 }
357 
dlm_device_deregister(struct dlm_ls * ls)358 int dlm_device_deregister(struct dlm_ls *ls)
359 {
360 	int error;
361 
362 	/* The device is not registered.  This happens when the lockspace
363 	   was never used from userspace, or when device_create_lockspace()
364 	   calls dlm_release_lockspace() after the register fails. */
365 	if (!ls->ls_device.name)
366 		return 0;
367 
368 	error = misc_deregister(&ls->ls_device);
369 	if (!error)
370 		kfree(ls->ls_device.name);
371 	return error;
372 }
373 
device_user_purge(struct dlm_user_proc * proc,struct dlm_purge_params * params)374 static int device_user_purge(struct dlm_user_proc *proc,
375 			     struct dlm_purge_params *params)
376 {
377 	struct dlm_ls *ls;
378 	int error;
379 
380 	ls = dlm_find_lockspace_local(proc->lockspace);
381 	if (!ls)
382 		return -ENOENT;
383 
384 	error = dlm_user_purge(ls, proc, params->nodeid, params->pid);
385 
386 	dlm_put_lockspace(ls);
387 	return error;
388 }
389 
device_create_lockspace(struct dlm_lspace_params * params)390 static int device_create_lockspace(struct dlm_lspace_params *params)
391 {
392 	dlm_lockspace_t *lockspace;
393 	struct dlm_ls *ls;
394 	int error;
395 
396 	if (!capable(CAP_SYS_ADMIN))
397 		return -EPERM;
398 
399 	error = dlm_new_lockspace(params->name, NULL, params->flags,
400 				  DLM_USER_LVB_LEN, NULL, NULL, NULL,
401 				  &lockspace);
402 	if (error)
403 		return error;
404 
405 	ls = dlm_find_lockspace_local(lockspace);
406 	if (!ls)
407 		return -ENOENT;
408 
409 	error = dlm_device_register(ls, params->name);
410 	dlm_put_lockspace(ls);
411 
412 	if (error)
413 		dlm_release_lockspace(lockspace, 0);
414 	else
415 		error = ls->ls_device.minor;
416 
417 	return error;
418 }
419 
device_remove_lockspace(struct dlm_lspace_params * params)420 static int device_remove_lockspace(struct dlm_lspace_params *params)
421 {
422 	dlm_lockspace_t *lockspace;
423 	struct dlm_ls *ls;
424 	int error, force = 0;
425 
426 	if (!capable(CAP_SYS_ADMIN))
427 		return -EPERM;
428 
429 	ls = dlm_find_lockspace_device(params->minor);
430 	if (!ls)
431 		return -ENOENT;
432 
433 	if (params->flags & DLM_USER_LSFLG_FORCEFREE)
434 		force = 2;
435 
436 	lockspace = ls->ls_local_handle;
437 	dlm_put_lockspace(ls);
438 
439 	/* The final dlm_release_lockspace waits for references to go to
440 	   zero, so all processes will need to close their device for the
441 	   ls before the release will proceed.  release also calls the
442 	   device_deregister above.  Converting a positive return value
443 	   from release to zero means that userspace won't know when its
444 	   release was the final one, but it shouldn't need to know. */
445 
446 	error = dlm_release_lockspace(lockspace, force);
447 	if (error > 0)
448 		error = 0;
449 	return error;
450 }
451 
452 /* Check the user's version matches ours */
check_version(struct dlm_write_request * req)453 static int check_version(struct dlm_write_request *req)
454 {
455 	if (req->version[0] != DLM_DEVICE_VERSION_MAJOR ||
456 	    (req->version[0] == DLM_DEVICE_VERSION_MAJOR &&
457 	     req->version[1] > DLM_DEVICE_VERSION_MINOR)) {
458 
459 		printk(KERN_DEBUG "dlm: process %s (%d) version mismatch "
460 		       "user (%d.%d.%d) kernel (%d.%d.%d)\n",
461 		       current->comm,
462 		       task_pid_nr(current),
463 		       req->version[0],
464 		       req->version[1],
465 		       req->version[2],
466 		       DLM_DEVICE_VERSION_MAJOR,
467 		       DLM_DEVICE_VERSION_MINOR,
468 		       DLM_DEVICE_VERSION_PATCH);
469 		return -EINVAL;
470 	}
471 	return 0;
472 }
473 
474 /*
475  * device_write
476  *
477  *   device_user_lock
478  *     dlm_user_request -> request_lock
479  *     dlm_user_convert -> convert_lock
480  *
481  *   device_user_unlock
482  *     dlm_user_unlock -> unlock_lock
483  *     dlm_user_cancel -> cancel_lock
484  *
485  *   device_create_lockspace
486  *     dlm_new_lockspace
487  *
488  *   device_remove_lockspace
489  *     dlm_release_lockspace
490  */
491 
492 /* a write to a lockspace device is a lock or unlock request, a write
493    to the control device is to create/remove a lockspace */
494 
device_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)495 static ssize_t device_write(struct file *file, const char __user *buf,
496 			    size_t count, loff_t *ppos)
497 {
498 	struct dlm_user_proc *proc = file->private_data;
499 	struct dlm_write_request *kbuf;
500 	int error;
501 
502 #ifdef CONFIG_COMPAT
503 	if (count < sizeof(struct dlm_write_request32))
504 #else
505 	if (count < sizeof(struct dlm_write_request))
506 #endif
507 		return -EINVAL;
508 
509 	/*
510 	 * can't compare against COMPAT/dlm_write_request32 because
511 	 * we don't yet know if is64bit is zero
512 	 */
513 	if (count > sizeof(struct dlm_write_request) + DLM_RESNAME_MAXLEN)
514 		return -EINVAL;
515 
516 	kbuf = kzalloc(count + 1, GFP_NOFS);
517 	if (!kbuf)
518 		return -ENOMEM;
519 
520 	if (copy_from_user(kbuf, buf, count)) {
521 		error = -EFAULT;
522 		goto out_free;
523 	}
524 
525 	if (check_version(kbuf)) {
526 		error = -EBADE;
527 		goto out_free;
528 	}
529 
530 #ifdef CONFIG_COMPAT
531 	if (!kbuf->is64bit) {
532 		struct dlm_write_request32 *k32buf;
533 		int namelen = 0;
534 
535 		if (count > sizeof(struct dlm_write_request32))
536 			namelen = count - sizeof(struct dlm_write_request32);
537 
538 		k32buf = (struct dlm_write_request32 *)kbuf;
539 
540 		/* add 1 after namelen so that the name string is terminated */
541 		kbuf = kzalloc(sizeof(struct dlm_write_request) + namelen + 1,
542 			       GFP_NOFS);
543 		if (!kbuf) {
544 			kfree(k32buf);
545 			return -ENOMEM;
546 		}
547 
548 		if (proc)
549 			set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags);
550 
551 		compat_input(kbuf, k32buf, namelen);
552 		kfree(k32buf);
553 	}
554 #endif
555 
556 	/* do we really need this? can a write happen after a close? */
557 	if ((kbuf->cmd == DLM_USER_LOCK || kbuf->cmd == DLM_USER_UNLOCK) &&
558 	    (proc && test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))) {
559 		error = -EINVAL;
560 		goto out_free;
561 	}
562 
563 	error = -EINVAL;
564 
565 	switch (kbuf->cmd)
566 	{
567 	case DLM_USER_LOCK:
568 		if (!proc) {
569 			log_print("no locking on control device");
570 			goto out_free;
571 		}
572 		error = device_user_lock(proc, &kbuf->i.lock);
573 		break;
574 
575 	case DLM_USER_UNLOCK:
576 		if (!proc) {
577 			log_print("no locking on control device");
578 			goto out_free;
579 		}
580 		error = device_user_unlock(proc, &kbuf->i.lock);
581 		break;
582 
583 	case DLM_USER_DEADLOCK:
584 		if (!proc) {
585 			log_print("no locking on control device");
586 			goto out_free;
587 		}
588 		error = device_user_deadlock(proc, &kbuf->i.lock);
589 		break;
590 
591 	case DLM_USER_CREATE_LOCKSPACE:
592 		if (proc) {
593 			log_print("create/remove only on control device");
594 			goto out_free;
595 		}
596 		error = device_create_lockspace(&kbuf->i.lspace);
597 		break;
598 
599 	case DLM_USER_REMOVE_LOCKSPACE:
600 		if (proc) {
601 			log_print("create/remove only on control device");
602 			goto out_free;
603 		}
604 		error = device_remove_lockspace(&kbuf->i.lspace);
605 		break;
606 
607 	case DLM_USER_PURGE:
608 		if (!proc) {
609 			log_print("no locking on control device");
610 			goto out_free;
611 		}
612 		error = device_user_purge(proc, &kbuf->i.purge);
613 		break;
614 
615 	default:
616 		log_print("Unknown command passed to DLM device : %d\n",
617 			  kbuf->cmd);
618 	}
619 
620  out_free:
621 	kfree(kbuf);
622 	return error;
623 }
624 
625 /* Every process that opens the lockspace device has its own "proc" structure
626    hanging off the open file that's used to keep track of locks owned by the
627    process and asts that need to be delivered to the process. */
628 
device_open(struct inode * inode,struct file * file)629 static int device_open(struct inode *inode, struct file *file)
630 {
631 	struct dlm_user_proc *proc;
632 	struct dlm_ls *ls;
633 
634 	ls = dlm_find_lockspace_device(iminor(inode));
635 	if (!ls)
636 		return -ENOENT;
637 
638 	proc = kzalloc(sizeof(struct dlm_user_proc), GFP_NOFS);
639 	if (!proc) {
640 		dlm_put_lockspace(ls);
641 		return -ENOMEM;
642 	}
643 
644 	proc->lockspace = ls->ls_local_handle;
645 	INIT_LIST_HEAD(&proc->asts);
646 	INIT_LIST_HEAD(&proc->locks);
647 	INIT_LIST_HEAD(&proc->unlocking);
648 	spin_lock_init(&proc->asts_spin);
649 	spin_lock_init(&proc->locks_spin);
650 	init_waitqueue_head(&proc->wait);
651 	file->private_data = proc;
652 
653 	return 0;
654 }
655 
device_close(struct inode * inode,struct file * file)656 static int device_close(struct inode *inode, struct file *file)
657 {
658 	struct dlm_user_proc *proc = file->private_data;
659 	struct dlm_ls *ls;
660 
661 	ls = dlm_find_lockspace_local(proc->lockspace);
662 	if (!ls)
663 		return -ENOENT;
664 
665 	set_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags);
666 
667 	dlm_clear_proc_locks(ls, proc);
668 
669 	/* at this point no more lkb's should exist for this lockspace,
670 	   so there's no chance of dlm_user_add_ast() being called and
671 	   looking for lkb->ua->proc */
672 
673 	kfree(proc);
674 	file->private_data = NULL;
675 
676 	dlm_put_lockspace(ls);
677 	dlm_put_lockspace(ls);  /* for the find in device_open() */
678 
679 	/* FIXME: AUTOFREE: if this ls is no longer used do
680 	   device_remove_lockspace() */
681 
682 	return 0;
683 }
684 
copy_result_to_user(struct dlm_user_args * ua,int compat,uint32_t flags,int mode,int copy_lvb,char __user * buf,size_t count)685 static int copy_result_to_user(struct dlm_user_args *ua, int compat,
686 			       uint32_t flags, int mode, int copy_lvb,
687 			       char __user *buf, size_t count)
688 {
689 #ifdef CONFIG_COMPAT
690 	struct dlm_lock_result32 result32;
691 #endif
692 	struct dlm_lock_result result;
693 	void *resultptr;
694 	int error=0;
695 	int len;
696 	int struct_len;
697 
698 	memset(&result, 0, sizeof(struct dlm_lock_result));
699 	result.version[0] = DLM_DEVICE_VERSION_MAJOR;
700 	result.version[1] = DLM_DEVICE_VERSION_MINOR;
701 	result.version[2] = DLM_DEVICE_VERSION_PATCH;
702 	memcpy(&result.lksb, &ua->lksb, sizeof(struct dlm_lksb));
703 	result.user_lksb = ua->user_lksb;
704 
705 	/* FIXME: dlm1 provides for the user's bastparam/addr to not be updated
706 	   in a conversion unless the conversion is successful.  See code
707 	   in dlm_user_convert() for updating ua from ua_tmp.  OpenVMS, though,
708 	   notes that a new blocking AST address and parameter are set even if
709 	   the conversion fails, so maybe we should just do that. */
710 
711 	if (flags & DLM_CB_BAST) {
712 		result.user_astaddr = ua->bastaddr;
713 		result.user_astparam = ua->bastparam;
714 		result.bast_mode = mode;
715 	} else {
716 		result.user_astaddr = ua->castaddr;
717 		result.user_astparam = ua->castparam;
718 	}
719 
720 #ifdef CONFIG_COMPAT
721 	if (compat)
722 		len = sizeof(struct dlm_lock_result32);
723 	else
724 #endif
725 		len = sizeof(struct dlm_lock_result);
726 	struct_len = len;
727 
728 	/* copy lvb to userspace if there is one, it's been updated, and
729 	   the user buffer has space for it */
730 
731 	if (copy_lvb && ua->lksb.sb_lvbptr && count >= len + DLM_USER_LVB_LEN) {
732 		if (copy_to_user(buf+len, ua->lksb.sb_lvbptr,
733 				 DLM_USER_LVB_LEN)) {
734 			error = -EFAULT;
735 			goto out;
736 		}
737 
738 		result.lvb_offset = len;
739 		len += DLM_USER_LVB_LEN;
740 	}
741 
742 	result.length = len;
743 	resultptr = &result;
744 #ifdef CONFIG_COMPAT
745 	if (compat) {
746 		compat_output(&result, &result32);
747 		resultptr = &result32;
748 	}
749 #endif
750 
751 	if (copy_to_user(buf, resultptr, struct_len))
752 		error = -EFAULT;
753 	else
754 		error = len;
755  out:
756 	return error;
757 }
758 
copy_version_to_user(char __user * buf,size_t count)759 static int copy_version_to_user(char __user *buf, size_t count)
760 {
761 	struct dlm_device_version ver;
762 
763 	memset(&ver, 0, sizeof(struct dlm_device_version));
764 	ver.version[0] = DLM_DEVICE_VERSION_MAJOR;
765 	ver.version[1] = DLM_DEVICE_VERSION_MINOR;
766 	ver.version[2] = DLM_DEVICE_VERSION_PATCH;
767 
768 	if (copy_to_user(buf, &ver, sizeof(struct dlm_device_version)))
769 		return -EFAULT;
770 	return sizeof(struct dlm_device_version);
771 }
772 
773 /* a read returns a single ast described in a struct dlm_lock_result */
774 
device_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)775 static ssize_t device_read(struct file *file, char __user *buf, size_t count,
776 			   loff_t *ppos)
777 {
778 	struct dlm_user_proc *proc = file->private_data;
779 	struct dlm_lkb *lkb;
780 	DECLARE_WAITQUEUE(wait, current);
781 	struct dlm_callback cb;
782 	int rv, resid, copy_lvb = 0;
783 
784 	if (count == sizeof(struct dlm_device_version)) {
785 		rv = copy_version_to_user(buf, count);
786 		return rv;
787 	}
788 
789 	if (!proc) {
790 		log_print("non-version read from control device %zu", count);
791 		return -EINVAL;
792 	}
793 
794 #ifdef CONFIG_COMPAT
795 	if (count < sizeof(struct dlm_lock_result32))
796 #else
797 	if (count < sizeof(struct dlm_lock_result))
798 #endif
799 		return -EINVAL;
800 
801  try_another:
802 
803 	/* do we really need this? can a read happen after a close? */
804 	if (test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))
805 		return -EINVAL;
806 
807 	spin_lock(&proc->asts_spin);
808 	if (list_empty(&proc->asts)) {
809 		if (file->f_flags & O_NONBLOCK) {
810 			spin_unlock(&proc->asts_spin);
811 			return -EAGAIN;
812 		}
813 
814 		add_wait_queue(&proc->wait, &wait);
815 
816 	repeat:
817 		set_current_state(TASK_INTERRUPTIBLE);
818 		if (list_empty(&proc->asts) && !signal_pending(current)) {
819 			spin_unlock(&proc->asts_spin);
820 			schedule();
821 			spin_lock(&proc->asts_spin);
822 			goto repeat;
823 		}
824 		set_current_state(TASK_RUNNING);
825 		remove_wait_queue(&proc->wait, &wait);
826 
827 		if (signal_pending(current)) {
828 			spin_unlock(&proc->asts_spin);
829 			return -ERESTARTSYS;
830 		}
831 	}
832 
833 	/* if we empty lkb_callbacks, we don't want to unlock the spinlock
834 	   without removing lkb_cb_list; so empty lkb_cb_list is always
835 	   consistent with empty lkb_callbacks */
836 
837 	lkb = list_entry(proc->asts.next, struct dlm_lkb, lkb_cb_list);
838 
839 	rv = dlm_rem_lkb_callback(lkb->lkb_resource->res_ls, lkb, &cb, &resid);
840 	if (rv < 0) {
841 		/* this shouldn't happen; lkb should have been removed from
842 		   list when resid was zero */
843 		log_print("dlm_rem_lkb_callback empty %x", lkb->lkb_id);
844 		list_del_init(&lkb->lkb_cb_list);
845 		spin_unlock(&proc->asts_spin);
846 		/* removes ref for proc->asts, may cause lkb to be freed */
847 		dlm_put_lkb(lkb);
848 		goto try_another;
849 	}
850 	if (!resid)
851 		list_del_init(&lkb->lkb_cb_list);
852 	spin_unlock(&proc->asts_spin);
853 
854 	if (cb.flags & DLM_CB_SKIP) {
855 		/* removes ref for proc->asts, may cause lkb to be freed */
856 		if (!resid)
857 			dlm_put_lkb(lkb);
858 		goto try_another;
859 	}
860 
861 	if (cb.flags & DLM_CB_CAST) {
862 		int old_mode, new_mode;
863 
864 		old_mode = lkb->lkb_last_cast.mode;
865 		new_mode = cb.mode;
866 
867 		if (!cb.sb_status && lkb->lkb_lksb->sb_lvbptr &&
868 		    dlm_lvb_operations[old_mode + 1][new_mode + 1])
869 			copy_lvb = 1;
870 
871 		lkb->lkb_lksb->sb_status = cb.sb_status;
872 		lkb->lkb_lksb->sb_flags = cb.sb_flags;
873 	}
874 
875 	rv = copy_result_to_user(lkb->lkb_ua,
876 				 test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
877 				 cb.flags, cb.mode, copy_lvb, buf, count);
878 
879 	/* removes ref for proc->asts, may cause lkb to be freed */
880 	if (!resid)
881 		dlm_put_lkb(lkb);
882 
883 	return rv;
884 }
885 
device_poll(struct file * file,poll_table * wait)886 static unsigned int device_poll(struct file *file, poll_table *wait)
887 {
888 	struct dlm_user_proc *proc = file->private_data;
889 
890 	poll_wait(file, &proc->wait, wait);
891 
892 	spin_lock(&proc->asts_spin);
893 	if (!list_empty(&proc->asts)) {
894 		spin_unlock(&proc->asts_spin);
895 		return POLLIN | POLLRDNORM;
896 	}
897 	spin_unlock(&proc->asts_spin);
898 	return 0;
899 }
900 
dlm_user_daemon_available(void)901 int dlm_user_daemon_available(void)
902 {
903 	/* dlm_controld hasn't started (or, has started, but not
904 	   properly populated configfs) */
905 
906 	if (!dlm_our_nodeid())
907 		return 0;
908 
909 	/* This is to deal with versions of dlm_controld that don't
910 	   know about the monitor device.  We assume that if the
911 	   dlm_controld was started (above), but the monitor device
912 	   was never opened, that it's an old version.  dlm_controld
913 	   should open the monitor device before populating configfs. */
914 
915 	if (dlm_monitor_unused)
916 		return 1;
917 
918 	return atomic_read(&dlm_monitor_opened) ? 1 : 0;
919 }
920 
ctl_device_open(struct inode * inode,struct file * file)921 static int ctl_device_open(struct inode *inode, struct file *file)
922 {
923 	file->private_data = NULL;
924 	return 0;
925 }
926 
ctl_device_close(struct inode * inode,struct file * file)927 static int ctl_device_close(struct inode *inode, struct file *file)
928 {
929 	return 0;
930 }
931 
monitor_device_open(struct inode * inode,struct file * file)932 static int monitor_device_open(struct inode *inode, struct file *file)
933 {
934 	atomic_inc(&dlm_monitor_opened);
935 	dlm_monitor_unused = 0;
936 	return 0;
937 }
938 
monitor_device_close(struct inode * inode,struct file * file)939 static int monitor_device_close(struct inode *inode, struct file *file)
940 {
941 	if (atomic_dec_and_test(&dlm_monitor_opened))
942 		dlm_stop_lockspaces();
943 	return 0;
944 }
945 
946 static const struct file_operations device_fops = {
947 	.open    = device_open,
948 	.release = device_close,
949 	.read    = device_read,
950 	.write   = device_write,
951 	.poll    = device_poll,
952 	.owner   = THIS_MODULE,
953 	.llseek  = noop_llseek,
954 };
955 
956 static const struct file_operations ctl_device_fops = {
957 	.open    = ctl_device_open,
958 	.release = ctl_device_close,
959 	.read    = device_read,
960 	.write   = device_write,
961 	.owner   = THIS_MODULE,
962 	.llseek  = noop_llseek,
963 };
964 
965 static struct miscdevice ctl_device = {
966 	.name  = "dlm-control",
967 	.fops  = &ctl_device_fops,
968 	.minor = MISC_DYNAMIC_MINOR,
969 };
970 
971 static const struct file_operations monitor_device_fops = {
972 	.open    = monitor_device_open,
973 	.release = monitor_device_close,
974 	.owner   = THIS_MODULE,
975 	.llseek  = noop_llseek,
976 };
977 
978 static struct miscdevice monitor_device = {
979 	.name  = "dlm-monitor",
980 	.fops  = &monitor_device_fops,
981 	.minor = MISC_DYNAMIC_MINOR,
982 };
983 
dlm_user_init(void)984 int __init dlm_user_init(void)
985 {
986 	int error;
987 
988 	atomic_set(&dlm_monitor_opened, 0);
989 
990 	error = misc_register(&ctl_device);
991 	if (error) {
992 		log_print("misc_register failed for control device");
993 		goto out;
994 	}
995 
996 	error = misc_register(&monitor_device);
997 	if (error) {
998 		log_print("misc_register failed for monitor device");
999 		misc_deregister(&ctl_device);
1000 	}
1001  out:
1002 	return error;
1003 }
1004 
dlm_user_exit(void)1005 void dlm_user_exit(void)
1006 {
1007 	misc_deregister(&ctl_device);
1008 	misc_deregister(&monitor_device);
1009 }
1010 
1011