• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2006-2008 Red Hat, Inc.  All rights reserved.
3  *
4  * This copyrighted material is made available to anyone wishing to use,
5  * modify, copy, or redistribute it subject to the terms and conditions
6  * of the GNU General Public License v.2.
7  */
8 
9 #include <linux/miscdevice.h>
10 #include <linux/init.h>
11 #include <linux/wait.h>
12 #include <linux/module.h>
13 #include <linux/file.h>
14 #include <linux/fs.h>
15 #include <linux/poll.h>
16 #include <linux/signal.h>
17 #include <linux/spinlock.h>
18 #include <linux/dlm.h>
19 #include <linux/dlm_device.h>
20 
21 #include "dlm_internal.h"
22 #include "lockspace.h"
23 #include "lock.h"
24 #include "lvb_table.h"
25 #include "user.h"
26 
27 static const char name_prefix[] = "dlm";
28 static const struct file_operations device_fops;
29 static atomic_t dlm_monitor_opened;
30 static int dlm_monitor_unused = 1;
31 
32 #ifdef CONFIG_COMPAT
33 
34 struct dlm_lock_params32 {
35 	__u8 mode;
36 	__u8 namelen;
37 	__u16 unused;
38 	__u32 flags;
39 	__u32 lkid;
40 	__u32 parent;
41 	__u64 xid;
42 	__u64 timeout;
43 	__u32 castparam;
44 	__u32 castaddr;
45 	__u32 bastparam;
46 	__u32 bastaddr;
47 	__u32 lksb;
48 	char lvb[DLM_USER_LVB_LEN];
49 	char name[0];
50 };
51 
52 struct dlm_write_request32 {
53 	__u32 version[3];
54 	__u8 cmd;
55 	__u8 is64bit;
56 	__u8 unused[2];
57 
58 	union  {
59 		struct dlm_lock_params32 lock;
60 		struct dlm_lspace_params lspace;
61 		struct dlm_purge_params purge;
62 	} i;
63 };
64 
65 struct dlm_lksb32 {
66 	__u32 sb_status;
67 	__u32 sb_lkid;
68 	__u8 sb_flags;
69 	__u32 sb_lvbptr;
70 };
71 
72 struct dlm_lock_result32 {
73 	__u32 version[3];
74 	__u32 length;
75 	__u32 user_astaddr;
76 	__u32 user_astparam;
77 	__u32 user_lksb;
78 	struct dlm_lksb32 lksb;
79 	__u8 bast_mode;
80 	__u8 unused[3];
81 	/* Offsets may be zero if no data is present */
82 	__u32 lvb_offset;
83 };
84 
compat_input(struct dlm_write_request * kb,struct dlm_write_request32 * kb32,size_t count)85 static void compat_input(struct dlm_write_request *kb,
86 			 struct dlm_write_request32 *kb32,
87 			 size_t count)
88 {
89 	kb->version[0] = kb32->version[0];
90 	kb->version[1] = kb32->version[1];
91 	kb->version[2] = kb32->version[2];
92 
93 	kb->cmd = kb32->cmd;
94 	kb->is64bit = kb32->is64bit;
95 	if (kb->cmd == DLM_USER_CREATE_LOCKSPACE ||
96 	    kb->cmd == DLM_USER_REMOVE_LOCKSPACE) {
97 		kb->i.lspace.flags = kb32->i.lspace.flags;
98 		kb->i.lspace.minor = kb32->i.lspace.minor;
99 		memcpy(kb->i.lspace.name, kb32->i.lspace.name, count -
100 			offsetof(struct dlm_write_request32, i.lspace.name));
101 	} else if (kb->cmd == DLM_USER_PURGE) {
102 		kb->i.purge.nodeid = kb32->i.purge.nodeid;
103 		kb->i.purge.pid = kb32->i.purge.pid;
104 	} else {
105 		kb->i.lock.mode = kb32->i.lock.mode;
106 		kb->i.lock.namelen = kb32->i.lock.namelen;
107 		kb->i.lock.flags = kb32->i.lock.flags;
108 		kb->i.lock.lkid = kb32->i.lock.lkid;
109 		kb->i.lock.parent = kb32->i.lock.parent;
110 		kb->i.lock.xid = kb32->i.lock.xid;
111 		kb->i.lock.timeout = kb32->i.lock.timeout;
112 		kb->i.lock.castparam = (void *)(long)kb32->i.lock.castparam;
113 		kb->i.lock.castaddr = (void *)(long)kb32->i.lock.castaddr;
114 		kb->i.lock.bastparam = (void *)(long)kb32->i.lock.bastparam;
115 		kb->i.lock.bastaddr = (void *)(long)kb32->i.lock.bastaddr;
116 		kb->i.lock.lksb = (void *)(long)kb32->i.lock.lksb;
117 		memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN);
118 		memcpy(kb->i.lock.name, kb32->i.lock.name, count -
119 			offsetof(struct dlm_write_request32, i.lock.name));
120 	}
121 }
122 
compat_output(struct dlm_lock_result * res,struct dlm_lock_result32 * res32)123 static void compat_output(struct dlm_lock_result *res,
124 			  struct dlm_lock_result32 *res32)
125 {
126 	res32->version[0] = res->version[0];
127 	res32->version[1] = res->version[1];
128 	res32->version[2] = res->version[2];
129 
130 	res32->user_astaddr = (__u32)(long)res->user_astaddr;
131 	res32->user_astparam = (__u32)(long)res->user_astparam;
132 	res32->user_lksb = (__u32)(long)res->user_lksb;
133 	res32->bast_mode = res->bast_mode;
134 
135 	res32->lvb_offset = res->lvb_offset;
136 	res32->length = res->length;
137 
138 	res32->lksb.sb_status = res->lksb.sb_status;
139 	res32->lksb.sb_flags = res->lksb.sb_flags;
140 	res32->lksb.sb_lkid = res->lksb.sb_lkid;
141 	res32->lksb.sb_lvbptr = (__u32)(long)res->lksb.sb_lvbptr;
142 }
143 #endif
144 
145 /* Figure out if this lock is at the end of its life and no longer
146    available for the application to use.  The lkb still exists until
147    the final ast is read.  A lock becomes EOL in three situations:
148      1. a noqueue request fails with EAGAIN
149      2. an unlock completes with EUNLOCK
150      3. a cancel of a waiting request completes with ECANCEL/EDEADLK
151    An EOL lock needs to be removed from the process's list of locks.
152    And we can't allow any new operation on an EOL lock.  This is
153    not related to the lifetime of the lkb struct which is managed
154    entirely by refcount. */
155 
lkb_is_endoflife(struct dlm_lkb * lkb,int sb_status,int type)156 static int lkb_is_endoflife(struct dlm_lkb *lkb, int sb_status, int type)
157 {
158 	switch (sb_status) {
159 	case -DLM_EUNLOCK:
160 		return 1;
161 	case -DLM_ECANCEL:
162 	case -ETIMEDOUT:
163 	case -EDEADLK:
164 		if (lkb->lkb_grmode == DLM_LOCK_IV)
165 			return 1;
166 		break;
167 	case -EAGAIN:
168 		if (type == AST_COMP && lkb->lkb_grmode == DLM_LOCK_IV)
169 			return 1;
170 		break;
171 	}
172 	return 0;
173 }
174 
175 /* we could possibly check if the cancel of an orphan has resulted in the lkb
176    being removed and then remove that lkb from the orphans list and free it */
177 
dlm_user_add_ast(struct dlm_lkb * lkb,int type,int bastmode)178 void dlm_user_add_ast(struct dlm_lkb *lkb, int type, int bastmode)
179 {
180 	struct dlm_ls *ls;
181 	struct dlm_user_args *ua;
182 	struct dlm_user_proc *proc;
183 	int eol = 0, ast_type;
184 
185 	if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD))
186 		return;
187 
188 	ls = lkb->lkb_resource->res_ls;
189 	mutex_lock(&ls->ls_clear_proc_locks);
190 
191 	/* If ORPHAN/DEAD flag is set, it means the process is dead so an ast
192 	   can't be delivered.  For ORPHAN's, dlm_clear_proc_locks() freed
193 	   lkb->ua so we can't try to use it.  This second check is necessary
194 	   for cases where a completion ast is received for an operation that
195 	   began before clear_proc_locks did its cancel/unlock. */
196 
197 	if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD))
198 		goto out;
199 
200 	DLM_ASSERT(lkb->lkb_ua, dlm_print_lkb(lkb););
201 	ua = lkb->lkb_ua;
202 	proc = ua->proc;
203 
204 	if (type == AST_BAST && ua->bastaddr == NULL)
205 		goto out;
206 
207 	spin_lock(&proc->asts_spin);
208 
209 	ast_type = lkb->lkb_ast_type;
210 	lkb->lkb_ast_type |= type;
211 	if (bastmode)
212 		lkb->lkb_bastmode = bastmode;
213 
214 	if (!ast_type) {
215 		kref_get(&lkb->lkb_ref);
216 		list_add_tail(&lkb->lkb_astqueue, &proc->asts);
217 		wake_up_interruptible(&proc->wait);
218 	}
219 	if (type == AST_COMP && (ast_type & AST_COMP))
220 		log_debug(ls, "ast overlap %x status %x %x",
221 			  lkb->lkb_id, ua->lksb.sb_status, lkb->lkb_flags);
222 
223 	eol = lkb_is_endoflife(lkb, ua->lksb.sb_status, type);
224 	if (eol) {
225 		lkb->lkb_ast_type &= ~AST_BAST;
226 		lkb->lkb_flags |= DLM_IFL_ENDOFLIFE;
227 	}
228 
229 	/* We want to copy the lvb to userspace when the completion
230 	   ast is read if the status is 0, the lock has an lvb and
231 	   lvb_ops says we should.  We could probably have set_lvb_lock()
232 	   set update_user_lvb instead and not need old_mode */
233 
234 	if ((lkb->lkb_ast_type & AST_COMP) &&
235 	    (lkb->lkb_lksb->sb_status == 0) &&
236 	    lkb->lkb_lksb->sb_lvbptr &&
237 	    dlm_lvb_operations[ua->old_mode + 1][lkb->lkb_grmode + 1])
238 		ua->update_user_lvb = 1;
239 	else
240 		ua->update_user_lvb = 0;
241 
242 	spin_unlock(&proc->asts_spin);
243 
244 	if (eol) {
245 		spin_lock(&proc->locks_spin);
246 		if (!list_empty(&lkb->lkb_ownqueue)) {
247 			list_del_init(&lkb->lkb_ownqueue);
248 			dlm_put_lkb(lkb);
249 		}
250 		spin_unlock(&proc->locks_spin);
251 	}
252  out:
253 	mutex_unlock(&ls->ls_clear_proc_locks);
254 }
255 
device_user_lock(struct dlm_user_proc * proc,struct dlm_lock_params * params)256 static int device_user_lock(struct dlm_user_proc *proc,
257 			    struct dlm_lock_params *params)
258 {
259 	struct dlm_ls *ls;
260 	struct dlm_user_args *ua;
261 	int error = -ENOMEM;
262 
263 	ls = dlm_find_lockspace_local(proc->lockspace);
264 	if (!ls)
265 		return -ENOENT;
266 
267 	if (!params->castaddr || !params->lksb) {
268 		error = -EINVAL;
269 		goto out;
270 	}
271 
272 	ua = kzalloc(sizeof(struct dlm_user_args), GFP_KERNEL);
273 	if (!ua)
274 		goto out;
275 	ua->proc = proc;
276 	ua->user_lksb = params->lksb;
277 	ua->castparam = params->castparam;
278 	ua->castaddr = params->castaddr;
279 	ua->bastparam = params->bastparam;
280 	ua->bastaddr = params->bastaddr;
281 	ua->xid = params->xid;
282 
283 	if (params->flags & DLM_LKF_CONVERT)
284 		error = dlm_user_convert(ls, ua,
285 				         params->mode, params->flags,
286 				         params->lkid, params->lvb,
287 					 (unsigned long) params->timeout);
288 	else {
289 		error = dlm_user_request(ls, ua,
290 					 params->mode, params->flags,
291 					 params->name, params->namelen,
292 					 (unsigned long) params->timeout);
293 		if (!error)
294 			error = ua->lksb.sb_lkid;
295 	}
296  out:
297 	dlm_put_lockspace(ls);
298 	return error;
299 }
300 
device_user_unlock(struct dlm_user_proc * proc,struct dlm_lock_params * params)301 static int device_user_unlock(struct dlm_user_proc *proc,
302 			      struct dlm_lock_params *params)
303 {
304 	struct dlm_ls *ls;
305 	struct dlm_user_args *ua;
306 	int error = -ENOMEM;
307 
308 	ls = dlm_find_lockspace_local(proc->lockspace);
309 	if (!ls)
310 		return -ENOENT;
311 
312 	ua = kzalloc(sizeof(struct dlm_user_args), GFP_KERNEL);
313 	if (!ua)
314 		goto out;
315 	ua->proc = proc;
316 	ua->user_lksb = params->lksb;
317 	ua->castparam = params->castparam;
318 	ua->castaddr = params->castaddr;
319 
320 	if (params->flags & DLM_LKF_CANCEL)
321 		error = dlm_user_cancel(ls, ua, params->flags, params->lkid);
322 	else
323 		error = dlm_user_unlock(ls, ua, params->flags, params->lkid,
324 					params->lvb);
325  out:
326 	dlm_put_lockspace(ls);
327 	return error;
328 }
329 
device_user_deadlock(struct dlm_user_proc * proc,struct dlm_lock_params * params)330 static int device_user_deadlock(struct dlm_user_proc *proc,
331 				struct dlm_lock_params *params)
332 {
333 	struct dlm_ls *ls;
334 	int error;
335 
336 	ls = dlm_find_lockspace_local(proc->lockspace);
337 	if (!ls)
338 		return -ENOENT;
339 
340 	error = dlm_user_deadlock(ls, params->flags, params->lkid);
341 
342 	dlm_put_lockspace(ls);
343 	return error;
344 }
345 
dlm_device_register(struct dlm_ls * ls,char * name)346 static int dlm_device_register(struct dlm_ls *ls, char *name)
347 {
348 	int error, len;
349 
350 	/* The device is already registered.  This happens when the
351 	   lockspace is created multiple times from userspace. */
352 	if (ls->ls_device.name)
353 		return 0;
354 
355 	error = -ENOMEM;
356 	len = strlen(name) + strlen(name_prefix) + 2;
357 	ls->ls_device.name = kzalloc(len, GFP_KERNEL);
358 	if (!ls->ls_device.name)
359 		goto fail;
360 
361 	snprintf((char *)ls->ls_device.name, len, "%s_%s", name_prefix,
362 		 name);
363 	ls->ls_device.fops = &device_fops;
364 	ls->ls_device.minor = MISC_DYNAMIC_MINOR;
365 
366 	error = misc_register(&ls->ls_device);
367 	if (error) {
368 		kfree(ls->ls_device.name);
369 	}
370 fail:
371 	return error;
372 }
373 
dlm_device_deregister(struct dlm_ls * ls)374 int dlm_device_deregister(struct dlm_ls *ls)
375 {
376 	int error;
377 
378 	/* The device is not registered.  This happens when the lockspace
379 	   was never used from userspace, or when device_create_lockspace()
380 	   calls dlm_release_lockspace() after the register fails. */
381 	if (!ls->ls_device.name)
382 		return 0;
383 
384 	error = misc_deregister(&ls->ls_device);
385 	if (!error)
386 		kfree(ls->ls_device.name);
387 	return error;
388 }
389 
device_user_purge(struct dlm_user_proc * proc,struct dlm_purge_params * params)390 static int device_user_purge(struct dlm_user_proc *proc,
391 			     struct dlm_purge_params *params)
392 {
393 	struct dlm_ls *ls;
394 	int error;
395 
396 	ls = dlm_find_lockspace_local(proc->lockspace);
397 	if (!ls)
398 		return -ENOENT;
399 
400 	error = dlm_user_purge(ls, proc, params->nodeid, params->pid);
401 
402 	dlm_put_lockspace(ls);
403 	return error;
404 }
405 
device_create_lockspace(struct dlm_lspace_params * params)406 static int device_create_lockspace(struct dlm_lspace_params *params)
407 {
408 	dlm_lockspace_t *lockspace;
409 	struct dlm_ls *ls;
410 	int error;
411 
412 	if (!capable(CAP_SYS_ADMIN))
413 		return -EPERM;
414 
415 	error = dlm_new_lockspace(params->name, strlen(params->name),
416 				  &lockspace, params->flags, DLM_USER_LVB_LEN);
417 	if (error)
418 		return error;
419 
420 	ls = dlm_find_lockspace_local(lockspace);
421 	if (!ls)
422 		return -ENOENT;
423 
424 	error = dlm_device_register(ls, params->name);
425 	dlm_put_lockspace(ls);
426 
427 	if (error)
428 		dlm_release_lockspace(lockspace, 0);
429 	else
430 		error = ls->ls_device.minor;
431 
432 	return error;
433 }
434 
device_remove_lockspace(struct dlm_lspace_params * params)435 static int device_remove_lockspace(struct dlm_lspace_params *params)
436 {
437 	dlm_lockspace_t *lockspace;
438 	struct dlm_ls *ls;
439 	int error, force = 0;
440 
441 	if (!capable(CAP_SYS_ADMIN))
442 		return -EPERM;
443 
444 	ls = dlm_find_lockspace_device(params->minor);
445 	if (!ls)
446 		return -ENOENT;
447 
448 	if (params->flags & DLM_USER_LSFLG_FORCEFREE)
449 		force = 2;
450 
451 	lockspace = ls->ls_local_handle;
452 	dlm_put_lockspace(ls);
453 
454 	/* The final dlm_release_lockspace waits for references to go to
455 	   zero, so all processes will need to close their device for the
456 	   ls before the release will proceed.  release also calls the
457 	   device_deregister above.  Converting a positive return value
458 	   from release to zero means that userspace won't know when its
459 	   release was the final one, but it shouldn't need to know. */
460 
461 	error = dlm_release_lockspace(lockspace, force);
462 	if (error > 0)
463 		error = 0;
464 	return error;
465 }
466 
467 /* Check the user's version matches ours */
check_version(struct dlm_write_request * req)468 static int check_version(struct dlm_write_request *req)
469 {
470 	if (req->version[0] != DLM_DEVICE_VERSION_MAJOR ||
471 	    (req->version[0] == DLM_DEVICE_VERSION_MAJOR &&
472 	     req->version[1] > DLM_DEVICE_VERSION_MINOR)) {
473 
474 		printk(KERN_DEBUG "dlm: process %s (%d) version mismatch "
475 		       "user (%d.%d.%d) kernel (%d.%d.%d)\n",
476 		       current->comm,
477 		       task_pid_nr(current),
478 		       req->version[0],
479 		       req->version[1],
480 		       req->version[2],
481 		       DLM_DEVICE_VERSION_MAJOR,
482 		       DLM_DEVICE_VERSION_MINOR,
483 		       DLM_DEVICE_VERSION_PATCH);
484 		return -EINVAL;
485 	}
486 	return 0;
487 }
488 
489 /*
490  * device_write
491  *
492  *   device_user_lock
493  *     dlm_user_request -> request_lock
494  *     dlm_user_convert -> convert_lock
495  *
496  *   device_user_unlock
497  *     dlm_user_unlock -> unlock_lock
498  *     dlm_user_cancel -> cancel_lock
499  *
500  *   device_create_lockspace
501  *     dlm_new_lockspace
502  *
503  *   device_remove_lockspace
504  *     dlm_release_lockspace
505  */
506 
507 /* a write to a lockspace device is a lock or unlock request, a write
508    to the control device is to create/remove a lockspace */
509 
device_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)510 static ssize_t device_write(struct file *file, const char __user *buf,
511 			    size_t count, loff_t *ppos)
512 {
513 	struct dlm_user_proc *proc = file->private_data;
514 	struct dlm_write_request *kbuf;
515 	sigset_t tmpsig, allsigs;
516 	int error;
517 
518 #ifdef CONFIG_COMPAT
519 	if (count < sizeof(struct dlm_write_request32))
520 #else
521 	if (count < sizeof(struct dlm_write_request))
522 #endif
523 		return -EINVAL;
524 
525 	kbuf = kzalloc(count + 1, GFP_KERNEL);
526 	if (!kbuf)
527 		return -ENOMEM;
528 
529 	if (copy_from_user(kbuf, buf, count)) {
530 		error = -EFAULT;
531 		goto out_free;
532 	}
533 
534 	if (check_version(kbuf)) {
535 		error = -EBADE;
536 		goto out_free;
537 	}
538 
539 #ifdef CONFIG_COMPAT
540 	if (!kbuf->is64bit) {
541 		struct dlm_write_request32 *k32buf;
542 		k32buf = (struct dlm_write_request32 *)kbuf;
543 		kbuf = kmalloc(count + 1 + (sizeof(struct dlm_write_request) -
544 			       sizeof(struct dlm_write_request32)), GFP_KERNEL);
545 		if (!kbuf) {
546 			kfree(k32buf);
547 			return -ENOMEM;
548 		}
549 
550 		if (proc)
551 			set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags);
552 		compat_input(kbuf, k32buf, count + 1);
553 		kfree(k32buf);
554 	}
555 #endif
556 
557 	/* do we really need this? can a write happen after a close? */
558 	if ((kbuf->cmd == DLM_USER_LOCK || kbuf->cmd == DLM_USER_UNLOCK) &&
559 	    (proc && test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))) {
560 		error = -EINVAL;
561 		goto out_free;
562 	}
563 
564 	sigfillset(&allsigs);
565 	sigprocmask(SIG_BLOCK, &allsigs, &tmpsig);
566 
567 	error = -EINVAL;
568 
569 	switch (kbuf->cmd)
570 	{
571 	case DLM_USER_LOCK:
572 		if (!proc) {
573 			log_print("no locking on control device");
574 			goto out_sig;
575 		}
576 		error = device_user_lock(proc, &kbuf->i.lock);
577 		break;
578 
579 	case DLM_USER_UNLOCK:
580 		if (!proc) {
581 			log_print("no locking on control device");
582 			goto out_sig;
583 		}
584 		error = device_user_unlock(proc, &kbuf->i.lock);
585 		break;
586 
587 	case DLM_USER_DEADLOCK:
588 		if (!proc) {
589 			log_print("no locking on control device");
590 			goto out_sig;
591 		}
592 		error = device_user_deadlock(proc, &kbuf->i.lock);
593 		break;
594 
595 	case DLM_USER_CREATE_LOCKSPACE:
596 		if (proc) {
597 			log_print("create/remove only on control device");
598 			goto out_sig;
599 		}
600 		error = device_create_lockspace(&kbuf->i.lspace);
601 		break;
602 
603 	case DLM_USER_REMOVE_LOCKSPACE:
604 		if (proc) {
605 			log_print("create/remove only on control device");
606 			goto out_sig;
607 		}
608 		error = device_remove_lockspace(&kbuf->i.lspace);
609 		break;
610 
611 	case DLM_USER_PURGE:
612 		if (!proc) {
613 			log_print("no locking on control device");
614 			goto out_sig;
615 		}
616 		error = device_user_purge(proc, &kbuf->i.purge);
617 		break;
618 
619 	default:
620 		log_print("Unknown command passed to DLM device : %d\n",
621 			  kbuf->cmd);
622 	}
623 
624  out_sig:
625 	sigprocmask(SIG_SETMASK, &tmpsig, NULL);
626 	recalc_sigpending();
627  out_free:
628 	kfree(kbuf);
629 	return error;
630 }
631 
632 /* Every process that opens the lockspace device has its own "proc" structure
633    hanging off the open file that's used to keep track of locks owned by the
634    process and asts that need to be delivered to the process. */
635 
device_open(struct inode * inode,struct file * file)636 static int device_open(struct inode *inode, struct file *file)
637 {
638 	struct dlm_user_proc *proc;
639 	struct dlm_ls *ls;
640 
641 	ls = dlm_find_lockspace_device(iminor(inode));
642 	if (!ls)
643 		return -ENOENT;
644 
645 	proc = kzalloc(sizeof(struct dlm_user_proc), GFP_KERNEL);
646 	if (!proc) {
647 		dlm_put_lockspace(ls);
648 		return -ENOMEM;
649 	}
650 
651 	proc->lockspace = ls->ls_local_handle;
652 	INIT_LIST_HEAD(&proc->asts);
653 	INIT_LIST_HEAD(&proc->locks);
654 	INIT_LIST_HEAD(&proc->unlocking);
655 	spin_lock_init(&proc->asts_spin);
656 	spin_lock_init(&proc->locks_spin);
657 	init_waitqueue_head(&proc->wait);
658 	file->private_data = proc;
659 
660 	return 0;
661 }
662 
device_close(struct inode * inode,struct file * file)663 static int device_close(struct inode *inode, struct file *file)
664 {
665 	struct dlm_user_proc *proc = file->private_data;
666 	struct dlm_ls *ls;
667 	sigset_t tmpsig, allsigs;
668 
669 	ls = dlm_find_lockspace_local(proc->lockspace);
670 	if (!ls)
671 		return -ENOENT;
672 
673 	sigfillset(&allsigs);
674 	sigprocmask(SIG_BLOCK, &allsigs, &tmpsig);
675 
676 	set_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags);
677 
678 	dlm_clear_proc_locks(ls, proc);
679 
680 	/* at this point no more lkb's should exist for this lockspace,
681 	   so there's no chance of dlm_user_add_ast() being called and
682 	   looking for lkb->ua->proc */
683 
684 	kfree(proc);
685 	file->private_data = NULL;
686 
687 	dlm_put_lockspace(ls);
688 	dlm_put_lockspace(ls);  /* for the find in device_open() */
689 
690 	/* FIXME: AUTOFREE: if this ls is no longer used do
691 	   device_remove_lockspace() */
692 
693 	sigprocmask(SIG_SETMASK, &tmpsig, NULL);
694 	recalc_sigpending();
695 
696 	return 0;
697 }
698 
copy_result_to_user(struct dlm_user_args * ua,int compat,int type,int bmode,char __user * buf,size_t count)699 static int copy_result_to_user(struct dlm_user_args *ua, int compat, int type,
700 			       int bmode, char __user *buf, size_t count)
701 {
702 #ifdef CONFIG_COMPAT
703 	struct dlm_lock_result32 result32;
704 #endif
705 	struct dlm_lock_result result;
706 	void *resultptr;
707 	int error=0;
708 	int len;
709 	int struct_len;
710 
711 	memset(&result, 0, sizeof(struct dlm_lock_result));
712 	result.version[0] = DLM_DEVICE_VERSION_MAJOR;
713 	result.version[1] = DLM_DEVICE_VERSION_MINOR;
714 	result.version[2] = DLM_DEVICE_VERSION_PATCH;
715 	memcpy(&result.lksb, &ua->lksb, sizeof(struct dlm_lksb));
716 	result.user_lksb = ua->user_lksb;
717 
718 	/* FIXME: dlm1 provides for the user's bastparam/addr to not be updated
719 	   in a conversion unless the conversion is successful.  See code
720 	   in dlm_user_convert() for updating ua from ua_tmp.  OpenVMS, though,
721 	   notes that a new blocking AST address and parameter are set even if
722 	   the conversion fails, so maybe we should just do that. */
723 
724 	if (type == AST_BAST) {
725 		result.user_astaddr = ua->bastaddr;
726 		result.user_astparam = ua->bastparam;
727 		result.bast_mode = bmode;
728 	} else {
729 		result.user_astaddr = ua->castaddr;
730 		result.user_astparam = ua->castparam;
731 	}
732 
733 #ifdef CONFIG_COMPAT
734 	if (compat)
735 		len = sizeof(struct dlm_lock_result32);
736 	else
737 #endif
738 		len = sizeof(struct dlm_lock_result);
739 	struct_len = len;
740 
741 	/* copy lvb to userspace if there is one, it's been updated, and
742 	   the user buffer has space for it */
743 
744 	if (ua->update_user_lvb && ua->lksb.sb_lvbptr &&
745 	    count >= len + DLM_USER_LVB_LEN) {
746 		if (copy_to_user(buf+len, ua->lksb.sb_lvbptr,
747 				 DLM_USER_LVB_LEN)) {
748 			error = -EFAULT;
749 			goto out;
750 		}
751 
752 		result.lvb_offset = len;
753 		len += DLM_USER_LVB_LEN;
754 	}
755 
756 	result.length = len;
757 	resultptr = &result;
758 #ifdef CONFIG_COMPAT
759 	if (compat) {
760 		compat_output(&result, &result32);
761 		resultptr = &result32;
762 	}
763 #endif
764 
765 	if (copy_to_user(buf, resultptr, struct_len))
766 		error = -EFAULT;
767 	else
768 		error = len;
769  out:
770 	return error;
771 }
772 
copy_version_to_user(char __user * buf,size_t count)773 static int copy_version_to_user(char __user *buf, size_t count)
774 {
775 	struct dlm_device_version ver;
776 
777 	memset(&ver, 0, sizeof(struct dlm_device_version));
778 	ver.version[0] = DLM_DEVICE_VERSION_MAJOR;
779 	ver.version[1] = DLM_DEVICE_VERSION_MINOR;
780 	ver.version[2] = DLM_DEVICE_VERSION_PATCH;
781 
782 	if (copy_to_user(buf, &ver, sizeof(struct dlm_device_version)))
783 		return -EFAULT;
784 	return sizeof(struct dlm_device_version);
785 }
786 
787 /* a read returns a single ast described in a struct dlm_lock_result */
788 
device_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)789 static ssize_t device_read(struct file *file, char __user *buf, size_t count,
790 			   loff_t *ppos)
791 {
792 	struct dlm_user_proc *proc = file->private_data;
793 	struct dlm_lkb *lkb;
794 	DECLARE_WAITQUEUE(wait, current);
795 	int error, type=0, bmode=0, removed = 0;
796 
797 	if (count == sizeof(struct dlm_device_version)) {
798 		error = copy_version_to_user(buf, count);
799 		return error;
800 	}
801 
802 	if (!proc) {
803 		log_print("non-version read from control device %zu", count);
804 		return -EINVAL;
805 	}
806 
807 #ifdef CONFIG_COMPAT
808 	if (count < sizeof(struct dlm_lock_result32))
809 #else
810 	if (count < sizeof(struct dlm_lock_result))
811 #endif
812 		return -EINVAL;
813 
814 	/* do we really need this? can a read happen after a close? */
815 	if (test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))
816 		return -EINVAL;
817 
818 	spin_lock(&proc->asts_spin);
819 	if (list_empty(&proc->asts)) {
820 		if (file->f_flags & O_NONBLOCK) {
821 			spin_unlock(&proc->asts_spin);
822 			return -EAGAIN;
823 		}
824 
825 		add_wait_queue(&proc->wait, &wait);
826 
827 	repeat:
828 		set_current_state(TASK_INTERRUPTIBLE);
829 		if (list_empty(&proc->asts) && !signal_pending(current)) {
830 			spin_unlock(&proc->asts_spin);
831 			schedule();
832 			spin_lock(&proc->asts_spin);
833 			goto repeat;
834 		}
835 		set_current_state(TASK_RUNNING);
836 		remove_wait_queue(&proc->wait, &wait);
837 
838 		if (signal_pending(current)) {
839 			spin_unlock(&proc->asts_spin);
840 			return -ERESTARTSYS;
841 		}
842 	}
843 
844 	/* there may be both completion and blocking asts to return for
845 	   the lkb, don't remove lkb from asts list unless no asts remain */
846 
847 	lkb = list_entry(proc->asts.next, struct dlm_lkb, lkb_astqueue);
848 
849 	if (lkb->lkb_ast_type & AST_COMP) {
850 		lkb->lkb_ast_type &= ~AST_COMP;
851 		type = AST_COMP;
852 	} else if (lkb->lkb_ast_type & AST_BAST) {
853 		lkb->lkb_ast_type &= ~AST_BAST;
854 		type = AST_BAST;
855 		bmode = lkb->lkb_bastmode;
856 	}
857 
858 	if (!lkb->lkb_ast_type) {
859 		list_del(&lkb->lkb_astqueue);
860 		removed = 1;
861 	}
862 	spin_unlock(&proc->asts_spin);
863 
864 	error = copy_result_to_user(lkb->lkb_ua,
865 			 	test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
866 				type, bmode, buf, count);
867 
868 	/* removes reference for the proc->asts lists added by
869 	   dlm_user_add_ast() and may result in the lkb being freed */
870 	if (removed)
871 		dlm_put_lkb(lkb);
872 
873 	return error;
874 }
875 
device_poll(struct file * file,poll_table * wait)876 static unsigned int device_poll(struct file *file, poll_table *wait)
877 {
878 	struct dlm_user_proc *proc = file->private_data;
879 
880 	poll_wait(file, &proc->wait, wait);
881 
882 	spin_lock(&proc->asts_spin);
883 	if (!list_empty(&proc->asts)) {
884 		spin_unlock(&proc->asts_spin);
885 		return POLLIN | POLLRDNORM;
886 	}
887 	spin_unlock(&proc->asts_spin);
888 	return 0;
889 }
890 
dlm_user_daemon_available(void)891 int dlm_user_daemon_available(void)
892 {
893 	/* dlm_controld hasn't started (or, has started, but not
894 	   properly populated configfs) */
895 
896 	if (!dlm_our_nodeid())
897 		return 0;
898 
899 	/* This is to deal with versions of dlm_controld that don't
900 	   know about the monitor device.  We assume that if the
901 	   dlm_controld was started (above), but the monitor device
902 	   was never opened, that it's an old version.  dlm_controld
903 	   should open the monitor device before populating configfs. */
904 
905 	if (dlm_monitor_unused)
906 		return 1;
907 
908 	return atomic_read(&dlm_monitor_opened) ? 1 : 0;
909 }
910 
ctl_device_open(struct inode * inode,struct file * file)911 static int ctl_device_open(struct inode *inode, struct file *file)
912 {
913 	file->private_data = NULL;
914 	return 0;
915 }
916 
ctl_device_close(struct inode * inode,struct file * file)917 static int ctl_device_close(struct inode *inode, struct file *file)
918 {
919 	return 0;
920 }
921 
monitor_device_open(struct inode * inode,struct file * file)922 static int monitor_device_open(struct inode *inode, struct file *file)
923 {
924 	atomic_inc(&dlm_monitor_opened);
925 	dlm_monitor_unused = 0;
926 	return 0;
927 }
928 
monitor_device_close(struct inode * inode,struct file * file)929 static int monitor_device_close(struct inode *inode, struct file *file)
930 {
931 	if (atomic_dec_and_test(&dlm_monitor_opened))
932 		dlm_stop_lockspaces();
933 	return 0;
934 }
935 
936 static const struct file_operations device_fops = {
937 	.open    = device_open,
938 	.release = device_close,
939 	.read    = device_read,
940 	.write   = device_write,
941 	.poll    = device_poll,
942 	.owner   = THIS_MODULE,
943 };
944 
945 static const struct file_operations ctl_device_fops = {
946 	.open    = ctl_device_open,
947 	.release = ctl_device_close,
948 	.read    = device_read,
949 	.write   = device_write,
950 	.owner   = THIS_MODULE,
951 };
952 
953 static struct miscdevice ctl_device = {
954 	.name  = "dlm-control",
955 	.fops  = &ctl_device_fops,
956 	.minor = MISC_DYNAMIC_MINOR,
957 };
958 
959 static const struct file_operations monitor_device_fops = {
960 	.open    = monitor_device_open,
961 	.release = monitor_device_close,
962 	.owner   = THIS_MODULE,
963 };
964 
965 static struct miscdevice monitor_device = {
966 	.name  = "dlm-monitor",
967 	.fops  = &monitor_device_fops,
968 	.minor = MISC_DYNAMIC_MINOR,
969 };
970 
dlm_user_init(void)971 int __init dlm_user_init(void)
972 {
973 	int error;
974 
975 	atomic_set(&dlm_monitor_opened, 0);
976 
977 	error = misc_register(&ctl_device);
978 	if (error) {
979 		log_print("misc_register failed for control device");
980 		goto out;
981 	}
982 
983 	error = misc_register(&monitor_device);
984 	if (error) {
985 		log_print("misc_register failed for monitor device");
986 		misc_deregister(&ctl_device);
987 	}
988  out:
989 	return error;
990 }
991 
dlm_user_exit(void)992 void dlm_user_exit(void)
993 {
994 	misc_deregister(&ctl_device);
995 	misc_deregister(&monitor_device);
996 }
997 
998