• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  linux/fs/locks.c
3  *
4  *  Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
5  *  Doug Evans (dje@spiff.uucp), August 07, 1992
6  *
7  *  Deadlock detection added.
8  *  FIXME: one thing isn't handled yet:
9  *	- mandatory locks (requires lots of changes elsewhere)
10  *  Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994.
11  *
12  *  Miscellaneous edits, and a total rewrite of posix_lock_file() code.
13  *  Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994
14  *
15  *  Converted file_lock_table to a linked list from an array, which eliminates
16  *  the limits on how many active file locks are open.
17  *  Chad Page (pageone@netcom.com), November 27, 1994
18  *
19  *  Removed dependency on file descriptors. dup()'ed file descriptors now
20  *  get the same locks as the original file descriptors, and a close() on
21  *  any file descriptor removes ALL the locks on the file for the current
22  *  process. Since locks still depend on the process id, locks are inherited
23  *  after an exec() but not after a fork(). This agrees with POSIX, and both
24  *  BSD and SVR4 practice.
25  *  Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995
26  *
27  *  Scrapped free list which is redundant now that we allocate locks
28  *  dynamically with kmalloc()/kfree().
29  *  Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995
30  *
31  *  Implemented two lock personalities - FL_FLOCK and FL_POSIX.
32  *
33  *  FL_POSIX locks are created with calls to fcntl() and lockf() through the
34  *  fcntl() system call. They have the semantics described above.
35  *
36  *  FL_FLOCK locks are created with calls to flock(), through the flock()
37  *  system call, which is new. Old C libraries implement flock() via fcntl()
38  *  and will continue to use the old, broken implementation.
39  *
40  *  FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
41  *  with a file pointer (filp). As a result they can be shared by a parent
42  *  process and its children after a fork(). They are removed when the last
43  *  file descriptor referring to the file pointer is closed (unless explicitly
44  *  unlocked).
45  *
46  *  FL_FLOCK locks never deadlock, an existing lock is always removed before
47  *  upgrading from shared to exclusive (or vice versa). When this happens
48  *  any processes blocked by the current lock are woken up and allowed to
49  *  run before the new lock is applied.
50  *  Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
51  *
52  *  Removed some race conditions in flock_lock_file(), marked other possible
53  *  races. Just grep for FIXME to see them.
54  *  Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996.
55  *
56  *  Addressed Dmitry's concerns. Deadlock checking no longer recursive.
57  *  Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
58  *  once we've checked for blocking and deadlocking.
59  *  Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996.
60  *
61  *  Initial implementation of mandatory locks. SunOS turned out to be
62  *  a rotten model, so I implemented the "obvious" semantics.
63  *  See 'Documentation/filesystems/mandatory-locking.txt' for details.
64  *  Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
65  *
66  *  Don't allow mandatory locks on mmap()'ed files. Added simple functions to
67  *  check if a file has mandatory locks, used by mmap(), open() and creat() to
68  *  see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference
69  *  Manual, Section 2.
70  *  Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996.
71  *
72  *  Tidied up block list handling. Added '/proc/locks' interface.
73  *  Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996.
74  *
75  *  Fixed deadlock condition for pathological code that mixes calls to
76  *  flock() and fcntl().
77  *  Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996.
78  *
79  *  Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use
80  *  for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to
81  *  guarantee sensible behaviour in the case where file system modules might
82  *  be compiled with different options than the kernel itself.
83  *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
84  *
85  *  Added a couple of missing wake_up() calls. Thanks to Thomas Meckel
86  *  (Thomas.Meckel@mni.fh-giessen.de) for spotting this.
87  *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
88  *
89  *  Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK
90  *  locks. Changed process synchronisation to avoid dereferencing locks that
91  *  have already been freed.
92  *  Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996.
93  *
94  *  Made the block list a circular list to minimise searching in the list.
95  *  Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996.
96  *
97  *  Made mandatory locking a mount option. Default is not to allow mandatory
98  *  locking.
99  *  Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996.
100  *
101  *  Some adaptations for NFS support.
102  *  Olaf Kirch (okir@monad.swb.de), Dec 1996,
103  *
104  *  Fixed /proc/locks interface so that we can't overrun the buffer we are handed.
105  *  Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997.
106  *
107  *  Use slab allocator instead of kmalloc/kfree.
108  *  Use generic list implementation from <linux/list.h>.
109  *  Sped up posix_locks_deadlock by only considering blocked locks.
110  *  Matthew Wilcox <willy@debian.org>, March, 2000.
111  *
112  *  Leases and LOCK_MAND
113  *  Matthew Wilcox <willy@debian.org>, June, 2000.
114  *  Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000.
115  */
116 
117 #include <linux/capability.h>
118 #include <linux/file.h>
119 #include <linux/fdtable.h>
120 #include <linux/fs.h>
121 #include <linux/init.h>
122 #include <linux/module.h>
123 #include <linux/security.h>
124 #include <linux/slab.h>
125 #include <linux/syscalls.h>
126 #include <linux/time.h>
127 #include <linux/rcupdate.h>
128 #include <linux/pid_namespace.h>
129 
130 #include <asm/uaccess.h>
131 
132 #define IS_POSIX(fl)	(fl->fl_flags & FL_POSIX)
133 #define IS_FLOCK(fl)	(fl->fl_flags & FL_FLOCK)
134 #define IS_LEASE(fl)	(fl->fl_flags & FL_LEASE)
135 
lease_breaking(struct file_lock * fl)136 static bool lease_breaking(struct file_lock *fl)
137 {
138 	return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
139 }
140 
target_leasetype(struct file_lock * fl)141 static int target_leasetype(struct file_lock *fl)
142 {
143 	if (fl->fl_flags & FL_UNLOCK_PENDING)
144 		return F_UNLCK;
145 	if (fl->fl_flags & FL_DOWNGRADE_PENDING)
146 		return F_RDLCK;
147 	return fl->fl_type;
148 }
149 
150 int leases_enable = 1;
151 int lease_break_time = 45;
152 
153 #define for_each_lock(inode, lockp) \
154 	for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next)
155 
156 static LIST_HEAD(file_lock_list);
157 static LIST_HEAD(blocked_list);
158 static DEFINE_SPINLOCK(file_lock_lock);
159 
160 /*
161  * Protects the two list heads above, plus the inode->i_flock list
162  */
lock_flocks(void)163 void lock_flocks(void)
164 {
165 	spin_lock(&file_lock_lock);
166 }
167 EXPORT_SYMBOL_GPL(lock_flocks);
168 
unlock_flocks(void)169 void unlock_flocks(void)
170 {
171 	spin_unlock(&file_lock_lock);
172 }
173 EXPORT_SYMBOL_GPL(unlock_flocks);
174 
175 static struct kmem_cache *filelock_cache __read_mostly;
176 
locks_init_lock_heads(struct file_lock * fl)177 static void locks_init_lock_heads(struct file_lock *fl)
178 {
179 	INIT_LIST_HEAD(&fl->fl_link);
180 	INIT_LIST_HEAD(&fl->fl_block);
181 	init_waitqueue_head(&fl->fl_wait);
182 }
183 
184 /* Allocate an empty lock structure. */
locks_alloc_lock(void)185 struct file_lock *locks_alloc_lock(void)
186 {
187 	struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
188 
189 	if (fl)
190 		locks_init_lock_heads(fl);
191 
192 	return fl;
193 }
194 EXPORT_SYMBOL_GPL(locks_alloc_lock);
195 
locks_release_private(struct file_lock * fl)196 void locks_release_private(struct file_lock *fl)
197 {
198 	if (fl->fl_ops) {
199 		if (fl->fl_ops->fl_release_private)
200 			fl->fl_ops->fl_release_private(fl);
201 		fl->fl_ops = NULL;
202 	}
203 	if (fl->fl_lmops) {
204 		if (fl->fl_lmops->lm_release_private)
205 			fl->fl_lmops->lm_release_private(fl);
206 		fl->fl_lmops = NULL;
207 	}
208 
209 }
210 EXPORT_SYMBOL_GPL(locks_release_private);
211 
212 /* Free a lock which is not in use. */
locks_free_lock(struct file_lock * fl)213 void locks_free_lock(struct file_lock *fl)
214 {
215 	BUG_ON(waitqueue_active(&fl->fl_wait));
216 	BUG_ON(!list_empty(&fl->fl_block));
217 	BUG_ON(!list_empty(&fl->fl_link));
218 
219 	locks_release_private(fl);
220 	kmem_cache_free(filelock_cache, fl);
221 }
222 EXPORT_SYMBOL(locks_free_lock);
223 
locks_init_lock(struct file_lock * fl)224 void locks_init_lock(struct file_lock *fl)
225 {
226 	memset(fl, 0, sizeof(struct file_lock));
227 	locks_init_lock_heads(fl);
228 }
229 
230 EXPORT_SYMBOL(locks_init_lock);
231 
locks_copy_private(struct file_lock * new,struct file_lock * fl)232 static void locks_copy_private(struct file_lock *new, struct file_lock *fl)
233 {
234 	if (fl->fl_ops) {
235 		if (fl->fl_ops->fl_copy_lock)
236 			fl->fl_ops->fl_copy_lock(new, fl);
237 		new->fl_ops = fl->fl_ops;
238 	}
239 	if (fl->fl_lmops)
240 		new->fl_lmops = fl->fl_lmops;
241 }
242 
243 /*
244  * Initialize a new lock from an existing file_lock structure.
245  */
__locks_copy_lock(struct file_lock * new,const struct file_lock * fl)246 void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
247 {
248 	new->fl_owner = fl->fl_owner;
249 	new->fl_pid = fl->fl_pid;
250 	new->fl_file = NULL;
251 	new->fl_flags = fl->fl_flags;
252 	new->fl_type = fl->fl_type;
253 	new->fl_start = fl->fl_start;
254 	new->fl_end = fl->fl_end;
255 	new->fl_ops = NULL;
256 	new->fl_lmops = NULL;
257 }
258 EXPORT_SYMBOL(__locks_copy_lock);
259 
locks_copy_lock(struct file_lock * new,struct file_lock * fl)260 void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
261 {
262 	locks_release_private(new);
263 
264 	__locks_copy_lock(new, fl);
265 	new->fl_file = fl->fl_file;
266 	new->fl_ops = fl->fl_ops;
267 	new->fl_lmops = fl->fl_lmops;
268 
269 	locks_copy_private(new, fl);
270 }
271 
272 EXPORT_SYMBOL(locks_copy_lock);
273 
flock_translate_cmd(int cmd)274 static inline int flock_translate_cmd(int cmd) {
275 	if (cmd & LOCK_MAND)
276 		return cmd & (LOCK_MAND | LOCK_RW);
277 	switch (cmd) {
278 	case LOCK_SH:
279 		return F_RDLCK;
280 	case LOCK_EX:
281 		return F_WRLCK;
282 	case LOCK_UN:
283 		return F_UNLCK;
284 	}
285 	return -EINVAL;
286 }
287 
288 /* Fill in a file_lock structure with an appropriate FLOCK lock. */
flock_make_lock(struct file * filp,struct file_lock ** lock,unsigned int cmd)289 static int flock_make_lock(struct file *filp, struct file_lock **lock,
290 		unsigned int cmd)
291 {
292 	struct file_lock *fl;
293 	int type = flock_translate_cmd(cmd);
294 	if (type < 0)
295 		return type;
296 
297 	fl = locks_alloc_lock();
298 	if (fl == NULL)
299 		return -ENOMEM;
300 
301 	fl->fl_file = filp;
302 	fl->fl_pid = current->tgid;
303 	fl->fl_flags = FL_FLOCK;
304 	fl->fl_type = type;
305 	fl->fl_end = OFFSET_MAX;
306 
307 	*lock = fl;
308 	return 0;
309 }
310 
assign_type(struct file_lock * fl,long type)311 static int assign_type(struct file_lock *fl, long type)
312 {
313 	switch (type) {
314 	case F_RDLCK:
315 	case F_WRLCK:
316 	case F_UNLCK:
317 		fl->fl_type = type;
318 		break;
319 	default:
320 		return -EINVAL;
321 	}
322 	return 0;
323 }
324 
325 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
326  * style lock.
327  */
flock_to_posix_lock(struct file * filp,struct file_lock * fl,struct flock * l)328 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
329 			       struct flock *l)
330 {
331 	off_t start, end;
332 
333 	switch (l->l_whence) {
334 	case SEEK_SET:
335 		start = 0;
336 		break;
337 	case SEEK_CUR:
338 		start = filp->f_pos;
339 		break;
340 	case SEEK_END:
341 		start = i_size_read(filp->f_path.dentry->d_inode);
342 		break;
343 	default:
344 		return -EINVAL;
345 	}
346 
347 	/* POSIX-1996 leaves the case l->l_len < 0 undefined;
348 	   POSIX-2001 defines it. */
349 	start += l->l_start;
350 	if (start < 0)
351 		return -EINVAL;
352 	fl->fl_end = OFFSET_MAX;
353 	if (l->l_len > 0) {
354 		end = start + l->l_len - 1;
355 		fl->fl_end = end;
356 	} else if (l->l_len < 0) {
357 		end = start - 1;
358 		fl->fl_end = end;
359 		start += l->l_len;
360 		if (start < 0)
361 			return -EINVAL;
362 	}
363 	fl->fl_start = start;	/* we record the absolute position */
364 	if (fl->fl_end < fl->fl_start)
365 		return -EOVERFLOW;
366 
367 	fl->fl_owner = current->files;
368 	fl->fl_pid = current->tgid;
369 	fl->fl_file = filp;
370 	fl->fl_flags = FL_POSIX;
371 	fl->fl_ops = NULL;
372 	fl->fl_lmops = NULL;
373 
374 	return assign_type(fl, l->l_type);
375 }
376 
377 #if BITS_PER_LONG == 32
flock64_to_posix_lock(struct file * filp,struct file_lock * fl,struct flock64 * l)378 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
379 				 struct flock64 *l)
380 {
381 	loff_t start;
382 
383 	switch (l->l_whence) {
384 	case SEEK_SET:
385 		start = 0;
386 		break;
387 	case SEEK_CUR:
388 		start = filp->f_pos;
389 		break;
390 	case SEEK_END:
391 		start = i_size_read(filp->f_path.dentry->d_inode);
392 		break;
393 	default:
394 		return -EINVAL;
395 	}
396 
397 	start += l->l_start;
398 	if (start < 0)
399 		return -EINVAL;
400 	fl->fl_end = OFFSET_MAX;
401 	if (l->l_len > 0) {
402 		fl->fl_end = start + l->l_len - 1;
403 	} else if (l->l_len < 0) {
404 		fl->fl_end = start - 1;
405 		start += l->l_len;
406 		if (start < 0)
407 			return -EINVAL;
408 	}
409 	fl->fl_start = start;	/* we record the absolute position */
410 	if (fl->fl_end < fl->fl_start)
411 		return -EOVERFLOW;
412 
413 	fl->fl_owner = current->files;
414 	fl->fl_pid = current->tgid;
415 	fl->fl_file = filp;
416 	fl->fl_flags = FL_POSIX;
417 	fl->fl_ops = NULL;
418 	fl->fl_lmops = NULL;
419 
420 	return assign_type(fl, l->l_type);
421 }
422 #endif
423 
424 /* default lease lock manager operations */
lease_break_callback(struct file_lock * fl)425 static void lease_break_callback(struct file_lock *fl)
426 {
427 	kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
428 }
429 
lease_release_private_callback(struct file_lock * fl)430 static void lease_release_private_callback(struct file_lock *fl)
431 {
432 	if (!fl->fl_file)
433 		return;
434 
435 	f_delown(fl->fl_file);
436 	fl->fl_file->f_owner.signum = 0;
437 }
438 
439 static const struct lock_manager_operations lease_manager_ops = {
440 	.lm_break = lease_break_callback,
441 	.lm_release_private = lease_release_private_callback,
442 	.lm_change = lease_modify,
443 };
444 
445 /*
446  * Initialize a lease, use the default lock manager operations
447  */
lease_init(struct file * filp,long type,struct file_lock * fl)448 static int lease_init(struct file *filp, long type, struct file_lock *fl)
449  {
450 	if (assign_type(fl, type) != 0)
451 		return -EINVAL;
452 
453 	fl->fl_owner = current->files;
454 	fl->fl_pid = current->tgid;
455 
456 	fl->fl_file = filp;
457 	fl->fl_flags = FL_LEASE;
458 	fl->fl_start = 0;
459 	fl->fl_end = OFFSET_MAX;
460 	fl->fl_ops = NULL;
461 	fl->fl_lmops = &lease_manager_ops;
462 	return 0;
463 }
464 
465 /* Allocate a file_lock initialised to this type of lease */
lease_alloc(struct file * filp,long type)466 static struct file_lock *lease_alloc(struct file *filp, long type)
467 {
468 	struct file_lock *fl = locks_alloc_lock();
469 	int error = -ENOMEM;
470 
471 	if (fl == NULL)
472 		return ERR_PTR(error);
473 
474 	error = lease_init(filp, type, fl);
475 	if (error) {
476 		locks_free_lock(fl);
477 		return ERR_PTR(error);
478 	}
479 	return fl;
480 }
481 
482 /* Check if two locks overlap each other.
483  */
locks_overlap(struct file_lock * fl1,struct file_lock * fl2)484 static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
485 {
486 	return ((fl1->fl_end >= fl2->fl_start) &&
487 		(fl2->fl_end >= fl1->fl_start));
488 }
489 
490 /*
491  * Check whether two locks have the same owner.
492  */
posix_same_owner(struct file_lock * fl1,struct file_lock * fl2)493 static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
494 {
495 	if (fl1->fl_lmops && fl1->fl_lmops->lm_compare_owner)
496 		return fl2->fl_lmops == fl1->fl_lmops &&
497 			fl1->fl_lmops->lm_compare_owner(fl1, fl2);
498 	return fl1->fl_owner == fl2->fl_owner;
499 }
500 
501 /* Remove waiter from blocker's block list.
502  * When blocker ends up pointing to itself then the list is empty.
503  */
__locks_delete_block(struct file_lock * waiter)504 static void __locks_delete_block(struct file_lock *waiter)
505 {
506 	list_del_init(&waiter->fl_block);
507 	list_del_init(&waiter->fl_link);
508 	waiter->fl_next = NULL;
509 }
510 
511 /*
512  */
locks_delete_block(struct file_lock * waiter)513 void locks_delete_block(struct file_lock *waiter)
514 {
515 	lock_flocks();
516 	__locks_delete_block(waiter);
517 	unlock_flocks();
518 }
519 EXPORT_SYMBOL(locks_delete_block);
520 
521 /* Insert waiter into blocker's block list.
522  * We use a circular list so that processes can be easily woken up in
523  * the order they blocked. The documentation doesn't require this but
524  * it seems like the reasonable thing to do.
525  */
locks_insert_block(struct file_lock * blocker,struct file_lock * waiter)526 static void locks_insert_block(struct file_lock *blocker,
527 			       struct file_lock *waiter)
528 {
529 	BUG_ON(!list_empty(&waiter->fl_block));
530 	list_add_tail(&waiter->fl_block, &blocker->fl_block);
531 	waiter->fl_next = blocker;
532 	if (IS_POSIX(blocker))
533 		list_add(&waiter->fl_link, &blocked_list);
534 }
535 
536 /* Wake up processes blocked waiting for blocker.
537  * If told to wait then schedule the processes until the block list
538  * is empty, otherwise empty the block list ourselves.
539  */
locks_wake_up_blocks(struct file_lock * blocker)540 static void locks_wake_up_blocks(struct file_lock *blocker)
541 {
542 	while (!list_empty(&blocker->fl_block)) {
543 		struct file_lock *waiter;
544 
545 		waiter = list_first_entry(&blocker->fl_block,
546 				struct file_lock, fl_block);
547 		__locks_delete_block(waiter);
548 		if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
549 			waiter->fl_lmops->lm_notify(waiter);
550 		else
551 			wake_up(&waiter->fl_wait);
552 	}
553 }
554 
555 /* Insert file lock fl into an inode's lock list at the position indicated
556  * by pos. At the same time add the lock to the global file lock list.
557  */
locks_insert_lock(struct file_lock ** pos,struct file_lock * fl)558 static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
559 {
560 	list_add(&fl->fl_link, &file_lock_list);
561 
562 	fl->fl_nspid = get_pid(task_tgid(current));
563 
564 	/* insert into file's list */
565 	fl->fl_next = *pos;
566 	*pos = fl;
567 }
568 
569 /*
570  * Delete a lock and then free it.
571  * Wake up processes that are blocked waiting for this lock,
572  * notify the FS that the lock has been cleared and
573  * finally free the lock.
574  */
locks_delete_lock(struct file_lock ** thisfl_p)575 static void locks_delete_lock(struct file_lock **thisfl_p)
576 {
577 	struct file_lock *fl = *thisfl_p;
578 
579 	*thisfl_p = fl->fl_next;
580 	fl->fl_next = NULL;
581 	list_del_init(&fl->fl_link);
582 
583 	fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
584 	if (fl->fl_fasync != NULL) {
585 		printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
586 		fl->fl_fasync = NULL;
587 	}
588 
589 	if (fl->fl_nspid) {
590 		put_pid(fl->fl_nspid);
591 		fl->fl_nspid = NULL;
592 	}
593 
594 	locks_wake_up_blocks(fl);
595 	locks_free_lock(fl);
596 }
597 
598 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
599  * checks for shared/exclusive status of overlapping locks.
600  */
locks_conflict(struct file_lock * caller_fl,struct file_lock * sys_fl)601 static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
602 {
603 	if (sys_fl->fl_type == F_WRLCK)
604 		return 1;
605 	if (caller_fl->fl_type == F_WRLCK)
606 		return 1;
607 	return 0;
608 }
609 
610 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
611  * checking before calling the locks_conflict().
612  */
posix_locks_conflict(struct file_lock * caller_fl,struct file_lock * sys_fl)613 static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
614 {
615 	/* POSIX locks owned by the same process do not conflict with
616 	 * each other.
617 	 */
618 	if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl))
619 		return (0);
620 
621 	/* Check whether they overlap */
622 	if (!locks_overlap(caller_fl, sys_fl))
623 		return 0;
624 
625 	return (locks_conflict(caller_fl, sys_fl));
626 }
627 
628 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
629  * checking before calling the locks_conflict().
630  */
flock_locks_conflict(struct file_lock * caller_fl,struct file_lock * sys_fl)631 static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
632 {
633 	/* FLOCK locks referring to the same filp do not conflict with
634 	 * each other.
635 	 */
636 	if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file))
637 		return (0);
638 	if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
639 		return 0;
640 
641 	return (locks_conflict(caller_fl, sys_fl));
642 }
643 
644 void
posix_test_lock(struct file * filp,struct file_lock * fl)645 posix_test_lock(struct file *filp, struct file_lock *fl)
646 {
647 	struct file_lock *cfl;
648 
649 	lock_flocks();
650 	for (cfl = filp->f_path.dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) {
651 		if (!IS_POSIX(cfl))
652 			continue;
653 		if (posix_locks_conflict(fl, cfl))
654 			break;
655 	}
656 	if (cfl) {
657 		__locks_copy_lock(fl, cfl);
658 		if (cfl->fl_nspid)
659 			fl->fl_pid = pid_vnr(cfl->fl_nspid);
660 	} else
661 		fl->fl_type = F_UNLCK;
662 	unlock_flocks();
663 	return;
664 }
665 EXPORT_SYMBOL(posix_test_lock);
666 
667 /*
668  * Deadlock detection:
669  *
670  * We attempt to detect deadlocks that are due purely to posix file
671  * locks.
672  *
673  * We assume that a task can be waiting for at most one lock at a time.
674  * So for any acquired lock, the process holding that lock may be
675  * waiting on at most one other lock.  That lock in turns may be held by
676  * someone waiting for at most one other lock.  Given a requested lock
677  * caller_fl which is about to wait for a conflicting lock block_fl, we
678  * follow this chain of waiters to ensure we are not about to create a
679  * cycle.
680  *
681  * Since we do this before we ever put a process to sleep on a lock, we
682  * are ensured that there is never a cycle; that is what guarantees that
683  * the while() loop in posix_locks_deadlock() eventually completes.
684  *
685  * Note: the above assumption may not be true when handling lock
686  * requests from a broken NFS client. It may also fail in the presence
687  * of tasks (such as posix threads) sharing the same open file table.
688  *
689  * To handle those cases, we just bail out after a few iterations.
690  */
691 
692 #define MAX_DEADLK_ITERATIONS 10
693 
694 /* Find a lock that the owner of the given block_fl is blocking on. */
what_owner_is_waiting_for(struct file_lock * block_fl)695 static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
696 {
697 	struct file_lock *fl;
698 
699 	list_for_each_entry(fl, &blocked_list, fl_link) {
700 		if (posix_same_owner(fl, block_fl))
701 			return fl->fl_next;
702 	}
703 	return NULL;
704 }
705 
posix_locks_deadlock(struct file_lock * caller_fl,struct file_lock * block_fl)706 static int posix_locks_deadlock(struct file_lock *caller_fl,
707 				struct file_lock *block_fl)
708 {
709 	int i = 0;
710 
711 	while ((block_fl = what_owner_is_waiting_for(block_fl))) {
712 		if (i++ > MAX_DEADLK_ITERATIONS)
713 			return 0;
714 		if (posix_same_owner(caller_fl, block_fl))
715 			return 1;
716 	}
717 	return 0;
718 }
719 
720 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
721  * after any leases, but before any posix locks.
722  *
723  * Note that if called with an FL_EXISTS argument, the caller may determine
724  * whether or not a lock was successfully freed by testing the return
725  * value for -ENOENT.
726  */
flock_lock_file(struct file * filp,struct file_lock * request)727 static int flock_lock_file(struct file *filp, struct file_lock *request)
728 {
729 	struct file_lock *new_fl = NULL;
730 	struct file_lock **before;
731 	struct inode * inode = filp->f_path.dentry->d_inode;
732 	int error = 0;
733 	int found = 0;
734 
735 	if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
736 		new_fl = locks_alloc_lock();
737 		if (!new_fl)
738 			return -ENOMEM;
739 	}
740 
741 	lock_flocks();
742 	if (request->fl_flags & FL_ACCESS)
743 		goto find_conflict;
744 
745 	for_each_lock(inode, before) {
746 		struct file_lock *fl = *before;
747 		if (IS_POSIX(fl))
748 			break;
749 		if (IS_LEASE(fl))
750 			continue;
751 		if (filp != fl->fl_file)
752 			continue;
753 		if (request->fl_type == fl->fl_type)
754 			goto out;
755 		found = 1;
756 		locks_delete_lock(before);
757 		break;
758 	}
759 
760 	if (request->fl_type == F_UNLCK) {
761 		if ((request->fl_flags & FL_EXISTS) && !found)
762 			error = -ENOENT;
763 		goto out;
764 	}
765 
766 	/*
767 	 * If a higher-priority process was blocked on the old file lock,
768 	 * give it the opportunity to lock the file.
769 	 */
770 	if (found) {
771 		unlock_flocks();
772 		cond_resched();
773 		lock_flocks();
774 	}
775 
776 find_conflict:
777 	for_each_lock(inode, before) {
778 		struct file_lock *fl = *before;
779 		if (IS_POSIX(fl))
780 			break;
781 		if (IS_LEASE(fl))
782 			continue;
783 		if (!flock_locks_conflict(request, fl))
784 			continue;
785 		error = -EAGAIN;
786 		if (!(request->fl_flags & FL_SLEEP))
787 			goto out;
788 		error = FILE_LOCK_DEFERRED;
789 		locks_insert_block(fl, request);
790 		goto out;
791 	}
792 	if (request->fl_flags & FL_ACCESS)
793 		goto out;
794 	locks_copy_lock(new_fl, request);
795 	locks_insert_lock(before, new_fl);
796 	new_fl = NULL;
797 	error = 0;
798 
799 out:
800 	unlock_flocks();
801 	if (new_fl)
802 		locks_free_lock(new_fl);
803 	return error;
804 }
805 
__posix_lock_file(struct inode * inode,struct file_lock * request,struct file_lock * conflock)806 static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
807 {
808 	struct file_lock *fl;
809 	struct file_lock *new_fl = NULL;
810 	struct file_lock *new_fl2 = NULL;
811 	struct file_lock *left = NULL;
812 	struct file_lock *right = NULL;
813 	struct file_lock **before;
814 	int error, added = 0;
815 
816 	/*
817 	 * We may need two file_lock structures for this operation,
818 	 * so we get them in advance to avoid races.
819 	 *
820 	 * In some cases we can be sure, that no new locks will be needed
821 	 */
822 	if (!(request->fl_flags & FL_ACCESS) &&
823 	    (request->fl_type != F_UNLCK ||
824 	     request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
825 		new_fl = locks_alloc_lock();
826 		new_fl2 = locks_alloc_lock();
827 	}
828 
829 	lock_flocks();
830 	if (request->fl_type != F_UNLCK) {
831 		for_each_lock(inode, before) {
832 			fl = *before;
833 			if (!IS_POSIX(fl))
834 				continue;
835 			if (!posix_locks_conflict(request, fl))
836 				continue;
837 			if (conflock)
838 				__locks_copy_lock(conflock, fl);
839 			error = -EAGAIN;
840 			if (!(request->fl_flags & FL_SLEEP))
841 				goto out;
842 			error = -EDEADLK;
843 			if (posix_locks_deadlock(request, fl))
844 				goto out;
845 			error = FILE_LOCK_DEFERRED;
846 			locks_insert_block(fl, request);
847 			goto out;
848   		}
849   	}
850 
851 	/* If we're just looking for a conflict, we're done. */
852 	error = 0;
853 	if (request->fl_flags & FL_ACCESS)
854 		goto out;
855 
856 	/*
857 	 * Find the first old lock with the same owner as the new lock.
858 	 */
859 
860 	before = &inode->i_flock;
861 
862 	/* First skip locks owned by other processes.  */
863 	while ((fl = *before) && (!IS_POSIX(fl) ||
864 				  !posix_same_owner(request, fl))) {
865 		before = &fl->fl_next;
866 	}
867 
868 	/* Process locks with this owner.  */
869 	while ((fl = *before) && posix_same_owner(request, fl)) {
870 		/* Detect adjacent or overlapping regions (if same lock type)
871 		 */
872 		if (request->fl_type == fl->fl_type) {
873 			/* In all comparisons of start vs end, use
874 			 * "start - 1" rather than "end + 1". If end
875 			 * is OFFSET_MAX, end + 1 will become negative.
876 			 */
877 			if (fl->fl_end < request->fl_start - 1)
878 				goto next_lock;
879 			/* If the next lock in the list has entirely bigger
880 			 * addresses than the new one, insert the lock here.
881 			 */
882 			if (fl->fl_start - 1 > request->fl_end)
883 				break;
884 
885 			/* If we come here, the new and old lock are of the
886 			 * same type and adjacent or overlapping. Make one
887 			 * lock yielding from the lower start address of both
888 			 * locks to the higher end address.
889 			 */
890 			if (fl->fl_start > request->fl_start)
891 				fl->fl_start = request->fl_start;
892 			else
893 				request->fl_start = fl->fl_start;
894 			if (fl->fl_end < request->fl_end)
895 				fl->fl_end = request->fl_end;
896 			else
897 				request->fl_end = fl->fl_end;
898 			if (added) {
899 				locks_delete_lock(before);
900 				continue;
901 			}
902 			request = fl;
903 			added = 1;
904 		}
905 		else {
906 			/* Processing for different lock types is a bit
907 			 * more complex.
908 			 */
909 			if (fl->fl_end < request->fl_start)
910 				goto next_lock;
911 			if (fl->fl_start > request->fl_end)
912 				break;
913 			if (request->fl_type == F_UNLCK)
914 				added = 1;
915 			if (fl->fl_start < request->fl_start)
916 				left = fl;
917 			/* If the next lock in the list has a higher end
918 			 * address than the new one, insert the new one here.
919 			 */
920 			if (fl->fl_end > request->fl_end) {
921 				right = fl;
922 				break;
923 			}
924 			if (fl->fl_start >= request->fl_start) {
925 				/* The new lock completely replaces an old
926 				 * one (This may happen several times).
927 				 */
928 				if (added) {
929 					locks_delete_lock(before);
930 					continue;
931 				}
932 				/* Replace the old lock with the new one.
933 				 * Wake up anybody waiting for the old one,
934 				 * as the change in lock type might satisfy
935 				 * their needs.
936 				 */
937 				locks_wake_up_blocks(fl);
938 				fl->fl_start = request->fl_start;
939 				fl->fl_end = request->fl_end;
940 				fl->fl_type = request->fl_type;
941 				locks_release_private(fl);
942 				locks_copy_private(fl, request);
943 				request = fl;
944 				added = 1;
945 			}
946 		}
947 		/* Go on to next lock.
948 		 */
949 	next_lock:
950 		before = &fl->fl_next;
951 	}
952 
953 	/*
954 	 * The above code only modifies existing locks in case of
955 	 * merging or replacing.  If new lock(s) need to be inserted
956 	 * all modifications are done bellow this, so it's safe yet to
957 	 * bail out.
958 	 */
959 	error = -ENOLCK; /* "no luck" */
960 	if (right && left == right && !new_fl2)
961 		goto out;
962 
963 	error = 0;
964 	if (!added) {
965 		if (request->fl_type == F_UNLCK) {
966 			if (request->fl_flags & FL_EXISTS)
967 				error = -ENOENT;
968 			goto out;
969 		}
970 
971 		if (!new_fl) {
972 			error = -ENOLCK;
973 			goto out;
974 		}
975 		locks_copy_lock(new_fl, request);
976 		locks_insert_lock(before, new_fl);
977 		new_fl = NULL;
978 	}
979 	if (right) {
980 		if (left == right) {
981 			/* The new lock breaks the old one in two pieces,
982 			 * so we have to use the second new lock.
983 			 */
984 			left = new_fl2;
985 			new_fl2 = NULL;
986 			locks_copy_lock(left, right);
987 			locks_insert_lock(before, left);
988 		}
989 		right->fl_start = request->fl_end + 1;
990 		locks_wake_up_blocks(right);
991 	}
992 	if (left) {
993 		left->fl_end = request->fl_start - 1;
994 		locks_wake_up_blocks(left);
995 	}
996  out:
997 	unlock_flocks();
998 	/*
999 	 * Free any unused locks.
1000 	 */
1001 	if (new_fl)
1002 		locks_free_lock(new_fl);
1003 	if (new_fl2)
1004 		locks_free_lock(new_fl2);
1005 	return error;
1006 }
1007 
1008 /**
1009  * posix_lock_file - Apply a POSIX-style lock to a file
1010  * @filp: The file to apply the lock to
1011  * @fl: The lock to be applied
1012  * @conflock: Place to return a copy of the conflicting lock, if found.
1013  *
1014  * Add a POSIX style lock to a file.
1015  * We merge adjacent & overlapping locks whenever possible.
1016  * POSIX locks are sorted by owner task, then by starting address
1017  *
1018  * Note that if called with an FL_EXISTS argument, the caller may determine
1019  * whether or not a lock was successfully freed by testing the return
1020  * value for -ENOENT.
1021  */
posix_lock_file(struct file * filp,struct file_lock * fl,struct file_lock * conflock)1022 int posix_lock_file(struct file *filp, struct file_lock *fl,
1023 			struct file_lock *conflock)
1024 {
1025 	return __posix_lock_file(filp->f_path.dentry->d_inode, fl, conflock);
1026 }
1027 EXPORT_SYMBOL(posix_lock_file);
1028 
1029 /**
1030  * posix_lock_file_wait - Apply a POSIX-style lock to a file
1031  * @filp: The file to apply the lock to
1032  * @fl: The lock to be applied
1033  *
1034  * Add a POSIX style lock to a file.
1035  * We merge adjacent & overlapping locks whenever possible.
1036  * POSIX locks are sorted by owner task, then by starting address
1037  */
posix_lock_file_wait(struct file * filp,struct file_lock * fl)1038 int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1039 {
1040 	int error;
1041 	might_sleep ();
1042 	for (;;) {
1043 		error = posix_lock_file(filp, fl, NULL);
1044 		if (error != FILE_LOCK_DEFERRED)
1045 			break;
1046 		error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1047 		if (!error)
1048 			continue;
1049 
1050 		locks_delete_block(fl);
1051 		break;
1052 	}
1053 	return error;
1054 }
1055 EXPORT_SYMBOL(posix_lock_file_wait);
1056 
1057 /**
1058  * locks_mandatory_locked - Check for an active lock
1059  * @inode: the file to check
1060  *
1061  * Searches the inode's list of locks to find any POSIX locks which conflict.
1062  * This function is called from locks_verify_locked() only.
1063  */
locks_mandatory_locked(struct inode * inode)1064 int locks_mandatory_locked(struct inode *inode)
1065 {
1066 	fl_owner_t owner = current->files;
1067 	struct file_lock *fl;
1068 
1069 	/*
1070 	 * Search the lock list for this inode for any POSIX locks.
1071 	 */
1072 	lock_flocks();
1073 	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
1074 		if (!IS_POSIX(fl))
1075 			continue;
1076 		if (fl->fl_owner != owner)
1077 			break;
1078 	}
1079 	unlock_flocks();
1080 	return fl ? -EAGAIN : 0;
1081 }
1082 
1083 /**
1084  * locks_mandatory_area - Check for a conflicting lock
1085  * @read_write: %FLOCK_VERIFY_WRITE for exclusive access, %FLOCK_VERIFY_READ
1086  *		for shared
1087  * @inode:      the file to check
1088  * @filp:       how the file was opened (if it was)
1089  * @offset:     start of area to check
1090  * @count:      length of area to check
1091  *
1092  * Searches the inode's list of locks to find any POSIX locks which conflict.
1093  * This function is called from rw_verify_area() and
1094  * locks_verify_truncate().
1095  */
locks_mandatory_area(int read_write,struct inode * inode,struct file * filp,loff_t offset,size_t count)1096 int locks_mandatory_area(int read_write, struct inode *inode,
1097 			 struct file *filp, loff_t offset,
1098 			 size_t count)
1099 {
1100 	struct file_lock fl;
1101 	int error;
1102 
1103 	locks_init_lock(&fl);
1104 	fl.fl_owner = current->files;
1105 	fl.fl_pid = current->tgid;
1106 	fl.fl_file = filp;
1107 	fl.fl_flags = FL_POSIX | FL_ACCESS;
1108 	if (filp && !(filp->f_flags & O_NONBLOCK))
1109 		fl.fl_flags |= FL_SLEEP;
1110 	fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK;
1111 	fl.fl_start = offset;
1112 	fl.fl_end = offset + count - 1;
1113 
1114 	for (;;) {
1115 		error = __posix_lock_file(inode, &fl, NULL);
1116 		if (error != FILE_LOCK_DEFERRED)
1117 			break;
1118 		error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
1119 		if (!error) {
1120 			/*
1121 			 * If we've been sleeping someone might have
1122 			 * changed the permissions behind our back.
1123 			 */
1124 			if (__mandatory_lock(inode))
1125 				continue;
1126 		}
1127 
1128 		locks_delete_block(&fl);
1129 		break;
1130 	}
1131 
1132 	return error;
1133 }
1134 
1135 EXPORT_SYMBOL(locks_mandatory_area);
1136 
lease_clear_pending(struct file_lock * fl,int arg)1137 static void lease_clear_pending(struct file_lock *fl, int arg)
1138 {
1139 	switch (arg) {
1140 	case F_UNLCK:
1141 		fl->fl_flags &= ~FL_UNLOCK_PENDING;
1142 		/* fall through: */
1143 	case F_RDLCK:
1144 		fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
1145 	}
1146 }
1147 
1148 /* We already had a lease on this file; just change its type */
lease_modify(struct file_lock ** before,int arg)1149 int lease_modify(struct file_lock **before, int arg)
1150 {
1151 	struct file_lock *fl = *before;
1152 	int error = assign_type(fl, arg);
1153 
1154 	if (error)
1155 		return error;
1156 	lease_clear_pending(fl, arg);
1157 	locks_wake_up_blocks(fl);
1158 	if (arg == F_UNLCK)
1159 		locks_delete_lock(before);
1160 	return 0;
1161 }
1162 
1163 EXPORT_SYMBOL(lease_modify);
1164 
past_time(unsigned long then)1165 static bool past_time(unsigned long then)
1166 {
1167 	if (!then)
1168 		/* 0 is a special value meaning "this never expires": */
1169 		return false;
1170 	return time_after(jiffies, then);
1171 }
1172 
time_out_leases(struct inode * inode)1173 static void time_out_leases(struct inode *inode)
1174 {
1175 	struct file_lock **before;
1176 	struct file_lock *fl;
1177 
1178 	before = &inode->i_flock;
1179 	while ((fl = *before) && IS_LEASE(fl) && lease_breaking(fl)) {
1180 		if (past_time(fl->fl_downgrade_time))
1181 			lease_modify(before, F_RDLCK);
1182 		if (past_time(fl->fl_break_time))
1183 			lease_modify(before, F_UNLCK);
1184 		if (fl == *before)	/* lease_modify may have freed fl */
1185 			before = &fl->fl_next;
1186 	}
1187 }
1188 
1189 /**
1190  *	__break_lease	-	revoke all outstanding leases on file
1191  *	@inode: the inode of the file to return
1192  *	@mode: the open mode (read or write)
1193  *
1194  *	break_lease (inlined for speed) has checked there already is at least
1195  *	some kind of lock (maybe a lease) on this file.  Leases are broken on
1196  *	a call to open() or truncate().  This function can sleep unless you
1197  *	specified %O_NONBLOCK to your open().
1198  */
__break_lease(struct inode * inode,unsigned int mode)1199 int __break_lease(struct inode *inode, unsigned int mode)
1200 {
1201 	int error = 0;
1202 	struct file_lock *new_fl, *flock;
1203 	struct file_lock *fl;
1204 	unsigned long break_time;
1205 	int i_have_this_lease = 0;
1206 	int want_write = (mode & O_ACCMODE) != O_RDONLY;
1207 
1208 	new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1209 	if (IS_ERR(new_fl))
1210 		return PTR_ERR(new_fl);
1211 
1212 	lock_flocks();
1213 
1214 	time_out_leases(inode);
1215 
1216 	flock = inode->i_flock;
1217 	if ((flock == NULL) || !IS_LEASE(flock))
1218 		goto out;
1219 
1220 	if (!locks_conflict(flock, new_fl))
1221 		goto out;
1222 
1223 	for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next)
1224 		if (fl->fl_owner == current->files)
1225 			i_have_this_lease = 1;
1226 
1227 	break_time = 0;
1228 	if (lease_break_time > 0) {
1229 		break_time = jiffies + lease_break_time * HZ;
1230 		if (break_time == 0)
1231 			break_time++;	/* so that 0 means no break time */
1232 	}
1233 
1234 	for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) {
1235 		if (want_write) {
1236 			if (fl->fl_flags & FL_UNLOCK_PENDING)
1237 				continue;
1238 			fl->fl_flags |= FL_UNLOCK_PENDING;
1239 			fl->fl_break_time = break_time;
1240 		} else {
1241 			if (lease_breaking(flock))
1242 				continue;
1243 			fl->fl_flags |= FL_DOWNGRADE_PENDING;
1244 			fl->fl_downgrade_time = break_time;
1245 		}
1246 		fl->fl_lmops->lm_break(fl);
1247 	}
1248 
1249 	if (i_have_this_lease || (mode & O_NONBLOCK)) {
1250 		error = -EWOULDBLOCK;
1251 		goto out;
1252 	}
1253 
1254 restart:
1255 	break_time = flock->fl_break_time;
1256 	if (break_time != 0) {
1257 		break_time -= jiffies;
1258 		if (break_time == 0)
1259 			break_time++;
1260 	}
1261 	locks_insert_block(flock, new_fl);
1262 	unlock_flocks();
1263 	error = wait_event_interruptible_timeout(new_fl->fl_wait,
1264 						!new_fl->fl_next, break_time);
1265 	lock_flocks();
1266 	__locks_delete_block(new_fl);
1267 	if (error >= 0) {
1268 		if (error == 0)
1269 			time_out_leases(inode);
1270 		/*
1271 		 * Wait for the next conflicting lease that has not been
1272 		 * broken yet
1273 		 */
1274 		for (flock = inode->i_flock; flock && IS_LEASE(flock);
1275 				flock = flock->fl_next) {
1276 			if (locks_conflict(new_fl, flock))
1277 				goto restart;
1278 		}
1279 		error = 0;
1280 	}
1281 
1282 out:
1283 	unlock_flocks();
1284 	locks_free_lock(new_fl);
1285 	return error;
1286 }
1287 
1288 EXPORT_SYMBOL(__break_lease);
1289 
1290 /**
1291  *	lease_get_mtime - get the last modified time of an inode
1292  *	@inode: the inode
1293  *      @time:  pointer to a timespec which will contain the last modified time
1294  *
1295  * This is to force NFS clients to flush their caches for files with
1296  * exclusive leases.  The justification is that if someone has an
1297  * exclusive lease, then they could be modifying it.
1298  */
lease_get_mtime(struct inode * inode,struct timespec * time)1299 void lease_get_mtime(struct inode *inode, struct timespec *time)
1300 {
1301 	struct file_lock *flock = inode->i_flock;
1302 	if (flock && IS_LEASE(flock) && (flock->fl_type & F_WRLCK))
1303 		*time = current_fs_time(inode->i_sb);
1304 	else
1305 		*time = inode->i_mtime;
1306 }
1307 
1308 EXPORT_SYMBOL(lease_get_mtime);
1309 
1310 /**
1311  *	fcntl_getlease - Enquire what lease is currently active
1312  *	@filp: the file
1313  *
1314  *	The value returned by this function will be one of
1315  *	(if no lease break is pending):
1316  *
1317  *	%F_RDLCK to indicate a shared lease is held.
1318  *
1319  *	%F_WRLCK to indicate an exclusive lease is held.
1320  *
1321  *	%F_UNLCK to indicate no lease is held.
1322  *
1323  *	(if a lease break is pending):
1324  *
1325  *	%F_RDLCK to indicate an exclusive lease needs to be
1326  *		changed to a shared lease (or removed).
1327  *
1328  *	%F_UNLCK to indicate the lease needs to be removed.
1329  *
1330  *	XXX: sfr & willy disagree over whether F_INPROGRESS
1331  *	should be returned to userspace.
1332  */
fcntl_getlease(struct file * filp)1333 int fcntl_getlease(struct file *filp)
1334 {
1335 	struct file_lock *fl;
1336 	int type = F_UNLCK;
1337 
1338 	lock_flocks();
1339 	time_out_leases(filp->f_path.dentry->d_inode);
1340 	for (fl = filp->f_path.dentry->d_inode->i_flock; fl && IS_LEASE(fl);
1341 			fl = fl->fl_next) {
1342 		if (fl->fl_file == filp) {
1343 			type = target_leasetype(fl);
1344 			break;
1345 		}
1346 	}
1347 	unlock_flocks();
1348 	return type;
1349 }
1350 
generic_add_lease(struct file * filp,long arg,struct file_lock ** flp)1351 int generic_add_lease(struct file *filp, long arg, struct file_lock **flp)
1352 {
1353 	struct file_lock *fl, **before, **my_before = NULL, *lease;
1354 	struct dentry *dentry = filp->f_path.dentry;
1355 	struct inode *inode = dentry->d_inode;
1356 	int error;
1357 
1358 	lease = *flp;
1359 
1360 	error = -EAGAIN;
1361 	if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
1362 		goto out;
1363 	if ((arg == F_WRLCK)
1364 	    && ((dentry->d_count > 1)
1365 		|| (atomic_read(&inode->i_count) > 1)))
1366 		goto out;
1367 
1368 	/*
1369 	 * At this point, we know that if there is an exclusive
1370 	 * lease on this file, then we hold it on this filp
1371 	 * (otherwise our open of this file would have blocked).
1372 	 * And if we are trying to acquire an exclusive lease,
1373 	 * then the file is not open by anyone (including us)
1374 	 * except for this filp.
1375 	 */
1376 	error = -EAGAIN;
1377 	for (before = &inode->i_flock;
1378 			((fl = *before) != NULL) && IS_LEASE(fl);
1379 			before = &fl->fl_next) {
1380 		if (fl->fl_file == filp) {
1381 			my_before = before;
1382 			continue;
1383 		}
1384 		/*
1385 		 * No exclusive leases if someone else has a lease on
1386 		 * this file:
1387 		 */
1388 		if (arg == F_WRLCK)
1389 			goto out;
1390 		/*
1391 		 * Modifying our existing lease is OK, but no getting a
1392 		 * new lease if someone else is opening for write:
1393 		 */
1394 		if (fl->fl_flags & FL_UNLOCK_PENDING)
1395 			goto out;
1396 	}
1397 
1398 	if (my_before != NULL) {
1399 		error = lease->fl_lmops->lm_change(my_before, arg);
1400 		if (!error)
1401 			*flp = *my_before;
1402 		goto out;
1403 	}
1404 
1405 	error = -EINVAL;
1406 	if (!leases_enable)
1407 		goto out;
1408 
1409 	locks_insert_lock(before, lease);
1410 	return 0;
1411 
1412 out:
1413 	return error;
1414 }
1415 
generic_delete_lease(struct file * filp,struct file_lock ** flp)1416 int generic_delete_lease(struct file *filp, struct file_lock **flp)
1417 {
1418 	struct file_lock *fl, **before;
1419 	struct dentry *dentry = filp->f_path.dentry;
1420 	struct inode *inode = dentry->d_inode;
1421 
1422 	for (before = &inode->i_flock;
1423 			((fl = *before) != NULL) && IS_LEASE(fl);
1424 			before = &fl->fl_next) {
1425 		if (fl->fl_file != filp)
1426 			continue;
1427 		return (*flp)->fl_lmops->lm_change(before, F_UNLCK);
1428 	}
1429 	return -EAGAIN;
1430 }
1431 
1432 /**
1433  *	generic_setlease	-	sets a lease on an open file
1434  *	@filp: file pointer
1435  *	@arg: type of lease to obtain
1436  *	@flp: input - file_lock to use, output - file_lock inserted
1437  *
1438  *	The (input) flp->fl_lmops->lm_break function is required
1439  *	by break_lease().
1440  *
1441  *	Called with file_lock_lock held.
1442  */
generic_setlease(struct file * filp,long arg,struct file_lock ** flp)1443 int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
1444 {
1445 	struct dentry *dentry = filp->f_path.dentry;
1446 	struct inode *inode = dentry->d_inode;
1447 	int error;
1448 
1449 	if ((current_fsuid() != inode->i_uid) && !capable(CAP_LEASE))
1450 		return -EACCES;
1451 	if (!S_ISREG(inode->i_mode))
1452 		return -EINVAL;
1453 	error = security_file_lock(filp, arg);
1454 	if (error)
1455 		return error;
1456 
1457 	time_out_leases(inode);
1458 
1459 	BUG_ON(!(*flp)->fl_lmops->lm_break);
1460 
1461 	switch (arg) {
1462 	case F_UNLCK:
1463 		return generic_delete_lease(filp, flp);
1464 	case F_RDLCK:
1465 	case F_WRLCK:
1466 		return generic_add_lease(filp, arg, flp);
1467 	default:
1468 		return -EINVAL;
1469 	}
1470 }
1471 EXPORT_SYMBOL(generic_setlease);
1472 
__vfs_setlease(struct file * filp,long arg,struct file_lock ** lease)1473 static int __vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1474 {
1475 	if (filp->f_op && filp->f_op->setlease)
1476 		return filp->f_op->setlease(filp, arg, lease);
1477 	else
1478 		return generic_setlease(filp, arg, lease);
1479 }
1480 
1481 /**
1482  *	vfs_setlease        -       sets a lease on an open file
1483  *	@filp: file pointer
1484  *	@arg: type of lease to obtain
1485  *	@lease: file_lock to use
1486  *
1487  *	Call this to establish a lease on the file.
1488  *	The (*lease)->fl_lmops->lm_break operation must be set; if not,
1489  *	break_lease will oops!
1490  *
1491  *	This will call the filesystem's setlease file method, if
1492  *	defined.  Note that there is no getlease method; instead, the
1493  *	filesystem setlease method should call back to setlease() to
1494  *	add a lease to the inode's lease list, where fcntl_getlease() can
1495  *	find it.  Since fcntl_getlease() only reports whether the current
1496  *	task holds a lease, a cluster filesystem need only do this for
1497  *	leases held by processes on this node.
1498  *
1499  *	There is also no break_lease method; filesystems that
1500  *	handle their own leases should break leases themselves from the
1501  *	filesystem's open, create, and (on truncate) setattr methods.
1502  *
1503  *	Warning: the only current setlease methods exist only to disable
1504  *	leases in certain cases.  More vfs changes may be required to
1505  *	allow a full filesystem lease implementation.
1506  */
1507 
vfs_setlease(struct file * filp,long arg,struct file_lock ** lease)1508 int vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1509 {
1510 	int error;
1511 
1512 	lock_flocks();
1513 	error = __vfs_setlease(filp, arg, lease);
1514 	unlock_flocks();
1515 
1516 	return error;
1517 }
1518 EXPORT_SYMBOL_GPL(vfs_setlease);
1519 
do_fcntl_delete_lease(struct file * filp)1520 static int do_fcntl_delete_lease(struct file *filp)
1521 {
1522 	struct file_lock fl, *flp = &fl;
1523 
1524 	lease_init(filp, F_UNLCK, flp);
1525 
1526 	return vfs_setlease(filp, F_UNLCK, &flp);
1527 }
1528 
do_fcntl_add_lease(unsigned int fd,struct file * filp,long arg)1529 static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1530 {
1531 	struct file_lock *fl, *ret;
1532 	struct fasync_struct *new;
1533 	int error;
1534 
1535 	fl = lease_alloc(filp, arg);
1536 	if (IS_ERR(fl))
1537 		return PTR_ERR(fl);
1538 
1539 	new = fasync_alloc();
1540 	if (!new) {
1541 		locks_free_lock(fl);
1542 		return -ENOMEM;
1543 	}
1544 	ret = fl;
1545 	lock_flocks();
1546 	error = __vfs_setlease(filp, arg, &ret);
1547 	if (error) {
1548 		unlock_flocks();
1549 		locks_free_lock(fl);
1550 		goto out_free_fasync;
1551 	}
1552 	if (ret != fl)
1553 		locks_free_lock(fl);
1554 
1555 	/*
1556 	 * fasync_insert_entry() returns the old entry if any.
1557 	 * If there was no old entry, then it used 'new' and
1558 	 * inserted it into the fasync list. Clear new so that
1559 	 * we don't release it here.
1560 	 */
1561 	if (!fasync_insert_entry(fd, filp, &ret->fl_fasync, new))
1562 		new = NULL;
1563 
1564 	error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
1565 	unlock_flocks();
1566 
1567 out_free_fasync:
1568 	if (new)
1569 		fasync_free(new);
1570 	return error;
1571 }
1572 
1573 /**
1574  *	fcntl_setlease	-	sets a lease on an open file
1575  *	@fd: open file descriptor
1576  *	@filp: file pointer
1577  *	@arg: type of lease to obtain
1578  *
1579  *	Call this fcntl to establish a lease on the file.
1580  *	Note that you also need to call %F_SETSIG to
1581  *	receive a signal when the lease is broken.
1582  */
fcntl_setlease(unsigned int fd,struct file * filp,long arg)1583 int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1584 {
1585 	if (arg == F_UNLCK)
1586 		return do_fcntl_delete_lease(filp);
1587 	return do_fcntl_add_lease(fd, filp, arg);
1588 }
1589 
1590 /**
1591  * flock_lock_file_wait - Apply a FLOCK-style lock to a file
1592  * @filp: The file to apply the lock to
1593  * @fl: The lock to be applied
1594  *
1595  * Add a FLOCK style lock to a file.
1596  */
flock_lock_file_wait(struct file * filp,struct file_lock * fl)1597 int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
1598 {
1599 	int error;
1600 	might_sleep();
1601 	for (;;) {
1602 		error = flock_lock_file(filp, fl);
1603 		if (error != FILE_LOCK_DEFERRED)
1604 			break;
1605 		error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1606 		if (!error)
1607 			continue;
1608 
1609 		locks_delete_block(fl);
1610 		break;
1611 	}
1612 	return error;
1613 }
1614 
1615 EXPORT_SYMBOL(flock_lock_file_wait);
1616 
1617 /**
1618  *	sys_flock: - flock() system call.
1619  *	@fd: the file descriptor to lock.
1620  *	@cmd: the type of lock to apply.
1621  *
1622  *	Apply a %FL_FLOCK style lock to an open file descriptor.
1623  *	The @cmd can be one of
1624  *
1625  *	%LOCK_SH -- a shared lock.
1626  *
1627  *	%LOCK_EX -- an exclusive lock.
1628  *
1629  *	%LOCK_UN -- remove an existing lock.
1630  *
1631  *	%LOCK_MAND -- a `mandatory' flock.  This exists to emulate Windows Share Modes.
1632  *
1633  *	%LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other
1634  *	processes read and write access respectively.
1635  */
SYSCALL_DEFINE2(flock,unsigned int,fd,unsigned int,cmd)1636 SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
1637 {
1638 	struct file *filp;
1639 	struct file_lock *lock;
1640 	int can_sleep, unlock;
1641 	int error;
1642 
1643 	error = -EBADF;
1644 	filp = fget(fd);
1645 	if (!filp)
1646 		goto out;
1647 
1648 	can_sleep = !(cmd & LOCK_NB);
1649 	cmd &= ~LOCK_NB;
1650 	unlock = (cmd == LOCK_UN);
1651 
1652 	if (!unlock && !(cmd & LOCK_MAND) &&
1653 	    !(filp->f_mode & (FMODE_READ|FMODE_WRITE)))
1654 		goto out_putf;
1655 
1656 	error = flock_make_lock(filp, &lock, cmd);
1657 	if (error)
1658 		goto out_putf;
1659 	if (can_sleep)
1660 		lock->fl_flags |= FL_SLEEP;
1661 
1662 	error = security_file_lock(filp, lock->fl_type);
1663 	if (error)
1664 		goto out_free;
1665 
1666 	if (filp->f_op && filp->f_op->flock)
1667 		error = filp->f_op->flock(filp,
1668 					  (can_sleep) ? F_SETLKW : F_SETLK,
1669 					  lock);
1670 	else
1671 		error = flock_lock_file_wait(filp, lock);
1672 
1673  out_free:
1674 	locks_free_lock(lock);
1675 
1676  out_putf:
1677 	fput(filp);
1678  out:
1679 	return error;
1680 }
1681 
1682 /**
1683  * vfs_test_lock - test file byte range lock
1684  * @filp: The file to test lock for
1685  * @fl: The lock to test; also used to hold result
1686  *
1687  * Returns -ERRNO on failure.  Indicates presence of conflicting lock by
1688  * setting conf->fl_type to something other than F_UNLCK.
1689  */
vfs_test_lock(struct file * filp,struct file_lock * fl)1690 int vfs_test_lock(struct file *filp, struct file_lock *fl)
1691 {
1692 	if (filp->f_op && filp->f_op->lock)
1693 		return filp->f_op->lock(filp, F_GETLK, fl);
1694 	posix_test_lock(filp, fl);
1695 	return 0;
1696 }
1697 EXPORT_SYMBOL_GPL(vfs_test_lock);
1698 
posix_lock_to_flock(struct flock * flock,struct file_lock * fl)1699 static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
1700 {
1701 	flock->l_pid = fl->fl_pid;
1702 #if BITS_PER_LONG == 32
1703 	/*
1704 	 * Make sure we can represent the posix lock via
1705 	 * legacy 32bit flock.
1706 	 */
1707 	if (fl->fl_start > OFFT_OFFSET_MAX)
1708 		return -EOVERFLOW;
1709 	if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
1710 		return -EOVERFLOW;
1711 #endif
1712 	flock->l_start = fl->fl_start;
1713 	flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1714 		fl->fl_end - fl->fl_start + 1;
1715 	flock->l_whence = 0;
1716 	flock->l_type = fl->fl_type;
1717 	return 0;
1718 }
1719 
1720 #if BITS_PER_LONG == 32
posix_lock_to_flock64(struct flock64 * flock,struct file_lock * fl)1721 static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
1722 {
1723 	flock->l_pid = fl->fl_pid;
1724 	flock->l_start = fl->fl_start;
1725 	flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1726 		fl->fl_end - fl->fl_start + 1;
1727 	flock->l_whence = 0;
1728 	flock->l_type = fl->fl_type;
1729 }
1730 #endif
1731 
1732 /* Report the first existing lock that would conflict with l.
1733  * This implements the F_GETLK command of fcntl().
1734  */
fcntl_getlk(struct file * filp,struct flock __user * l)1735 int fcntl_getlk(struct file *filp, struct flock __user *l)
1736 {
1737 	struct file_lock file_lock;
1738 	struct flock flock;
1739 	int error;
1740 
1741 	error = -EFAULT;
1742 	if (copy_from_user(&flock, l, sizeof(flock)))
1743 		goto out;
1744 	error = -EINVAL;
1745 	if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1746 		goto out;
1747 
1748 	error = flock_to_posix_lock(filp, &file_lock, &flock);
1749 	if (error)
1750 		goto out;
1751 
1752 	error = vfs_test_lock(filp, &file_lock);
1753 	if (error)
1754 		goto out;
1755 
1756 	flock.l_type = file_lock.fl_type;
1757 	if (file_lock.fl_type != F_UNLCK) {
1758 		error = posix_lock_to_flock(&flock, &file_lock);
1759 		if (error)
1760 			goto out;
1761 	}
1762 	error = -EFAULT;
1763 	if (!copy_to_user(l, &flock, sizeof(flock)))
1764 		error = 0;
1765 out:
1766 	return error;
1767 }
1768 
1769 /**
1770  * vfs_lock_file - file byte range lock
1771  * @filp: The file to apply the lock to
1772  * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
1773  * @fl: The lock to be applied
1774  * @conf: Place to return a copy of the conflicting lock, if found.
1775  *
1776  * A caller that doesn't care about the conflicting lock may pass NULL
1777  * as the final argument.
1778  *
1779  * If the filesystem defines a private ->lock() method, then @conf will
1780  * be left unchanged; so a caller that cares should initialize it to
1781  * some acceptable default.
1782  *
1783  * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
1784  * locks, the ->lock() interface may return asynchronously, before the lock has
1785  * been granted or denied by the underlying filesystem, if (and only if)
1786  * lm_grant is set. Callers expecting ->lock() to return asynchronously
1787  * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
1788  * the request is for a blocking lock. When ->lock() does return asynchronously,
1789  * it must return FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock
1790  * request completes.
1791  * If the request is for non-blocking lock the file system should return
1792  * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
1793  * with the result. If the request timed out the callback routine will return a
1794  * nonzero return code and the file system should release the lock. The file
1795  * system is also responsible to keep a corresponding posix lock when it
1796  * grants a lock so the VFS can find out which locks are locally held and do
1797  * the correct lock cleanup when required.
1798  * The underlying filesystem must not drop the kernel lock or call
1799  * ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED
1800  * return code.
1801  */
vfs_lock_file(struct file * filp,unsigned int cmd,struct file_lock * fl,struct file_lock * conf)1802 int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
1803 {
1804 	if (filp->f_op && filp->f_op->lock)
1805 		return filp->f_op->lock(filp, cmd, fl);
1806 	else
1807 		return posix_lock_file(filp, fl, conf);
1808 }
1809 EXPORT_SYMBOL_GPL(vfs_lock_file);
1810 
do_lock_file_wait(struct file * filp,unsigned int cmd,struct file_lock * fl)1811 static int do_lock_file_wait(struct file *filp, unsigned int cmd,
1812 			     struct file_lock *fl)
1813 {
1814 	int error;
1815 
1816 	error = security_file_lock(filp, fl->fl_type);
1817 	if (error)
1818 		return error;
1819 
1820 	for (;;) {
1821 		error = vfs_lock_file(filp, cmd, fl, NULL);
1822 		if (error != FILE_LOCK_DEFERRED)
1823 			break;
1824 		error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1825 		if (!error)
1826 			continue;
1827 
1828 		locks_delete_block(fl);
1829 		break;
1830 	}
1831 
1832 	return error;
1833 }
1834 
1835 /* Apply the lock described by l to an open file descriptor.
1836  * This implements both the F_SETLK and F_SETLKW commands of fcntl().
1837  */
fcntl_setlk(unsigned int fd,struct file * filp,unsigned int cmd,struct flock __user * l)1838 int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
1839 		struct flock __user *l)
1840 {
1841 	struct file_lock *file_lock = locks_alloc_lock();
1842 	struct flock flock;
1843 	struct inode *inode;
1844 	struct file *f;
1845 	int error;
1846 
1847 	if (file_lock == NULL)
1848 		return -ENOLCK;
1849 
1850 	/*
1851 	 * This might block, so we do it before checking the inode.
1852 	 */
1853 	error = -EFAULT;
1854 	if (copy_from_user(&flock, l, sizeof(flock)))
1855 		goto out;
1856 
1857 	inode = filp->f_path.dentry->d_inode;
1858 
1859 	/* Don't allow mandatory locks on files that may be memory mapped
1860 	 * and shared.
1861 	 */
1862 	if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1863 		error = -EAGAIN;
1864 		goto out;
1865 	}
1866 
1867 again:
1868 	error = flock_to_posix_lock(filp, file_lock, &flock);
1869 	if (error)
1870 		goto out;
1871 	if (cmd == F_SETLKW) {
1872 		file_lock->fl_flags |= FL_SLEEP;
1873 	}
1874 
1875 	error = -EBADF;
1876 	switch (flock.l_type) {
1877 	case F_RDLCK:
1878 		if (!(filp->f_mode & FMODE_READ))
1879 			goto out;
1880 		break;
1881 	case F_WRLCK:
1882 		if (!(filp->f_mode & FMODE_WRITE))
1883 			goto out;
1884 		break;
1885 	case F_UNLCK:
1886 		break;
1887 	default:
1888 		error = -EINVAL;
1889 		goto out;
1890 	}
1891 
1892 	error = do_lock_file_wait(filp, cmd, file_lock);
1893 
1894 	/*
1895 	 * Attempt to detect a close/fcntl race and recover by
1896 	 * releasing the lock that was just acquired.
1897 	 */
1898 	/*
1899 	 * we need that spin_lock here - it prevents reordering between
1900 	 * update of inode->i_flock and check for it done in close().
1901 	 * rcu_read_lock() wouldn't do.
1902 	 */
1903 	spin_lock(&current->files->file_lock);
1904 	f = fcheck(fd);
1905 	spin_unlock(&current->files->file_lock);
1906 	if (!error && f != filp && flock.l_type != F_UNLCK) {
1907 		flock.l_type = F_UNLCK;
1908 		goto again;
1909 	}
1910 
1911 out:
1912 	locks_free_lock(file_lock);
1913 	return error;
1914 }
1915 
1916 #if BITS_PER_LONG == 32
1917 /* Report the first existing lock that would conflict with l.
1918  * This implements the F_GETLK command of fcntl().
1919  */
fcntl_getlk64(struct file * filp,struct flock64 __user * l)1920 int fcntl_getlk64(struct file *filp, struct flock64 __user *l)
1921 {
1922 	struct file_lock file_lock;
1923 	struct flock64 flock;
1924 	int error;
1925 
1926 	error = -EFAULT;
1927 	if (copy_from_user(&flock, l, sizeof(flock)))
1928 		goto out;
1929 	error = -EINVAL;
1930 	if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1931 		goto out;
1932 
1933 	error = flock64_to_posix_lock(filp, &file_lock, &flock);
1934 	if (error)
1935 		goto out;
1936 
1937 	error = vfs_test_lock(filp, &file_lock);
1938 	if (error)
1939 		goto out;
1940 
1941 	flock.l_type = file_lock.fl_type;
1942 	if (file_lock.fl_type != F_UNLCK)
1943 		posix_lock_to_flock64(&flock, &file_lock);
1944 
1945 	error = -EFAULT;
1946 	if (!copy_to_user(l, &flock, sizeof(flock)))
1947 		error = 0;
1948 
1949 out:
1950 	return error;
1951 }
1952 
1953 /* Apply the lock described by l to an open file descriptor.
1954  * This implements both the F_SETLK and F_SETLKW commands of fcntl().
1955  */
fcntl_setlk64(unsigned int fd,struct file * filp,unsigned int cmd,struct flock64 __user * l)1956 int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
1957 		struct flock64 __user *l)
1958 {
1959 	struct file_lock *file_lock = locks_alloc_lock();
1960 	struct flock64 flock;
1961 	struct inode *inode;
1962 	struct file *f;
1963 	int error;
1964 
1965 	if (file_lock == NULL)
1966 		return -ENOLCK;
1967 
1968 	/*
1969 	 * This might block, so we do it before checking the inode.
1970 	 */
1971 	error = -EFAULT;
1972 	if (copy_from_user(&flock, l, sizeof(flock)))
1973 		goto out;
1974 
1975 	inode = filp->f_path.dentry->d_inode;
1976 
1977 	/* Don't allow mandatory locks on files that may be memory mapped
1978 	 * and shared.
1979 	 */
1980 	if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1981 		error = -EAGAIN;
1982 		goto out;
1983 	}
1984 
1985 again:
1986 	error = flock64_to_posix_lock(filp, file_lock, &flock);
1987 	if (error)
1988 		goto out;
1989 	if (cmd == F_SETLKW64) {
1990 		file_lock->fl_flags |= FL_SLEEP;
1991 	}
1992 
1993 	error = -EBADF;
1994 	switch (flock.l_type) {
1995 	case F_RDLCK:
1996 		if (!(filp->f_mode & FMODE_READ))
1997 			goto out;
1998 		break;
1999 	case F_WRLCK:
2000 		if (!(filp->f_mode & FMODE_WRITE))
2001 			goto out;
2002 		break;
2003 	case F_UNLCK:
2004 		break;
2005 	default:
2006 		error = -EINVAL;
2007 		goto out;
2008 	}
2009 
2010 	error = do_lock_file_wait(filp, cmd, file_lock);
2011 
2012 	/*
2013 	 * Attempt to detect a close/fcntl race and recover by
2014 	 * releasing the lock that was just acquired.
2015 	 */
2016 	spin_lock(&current->files->file_lock);
2017 	f = fcheck(fd);
2018 	spin_unlock(&current->files->file_lock);
2019 	if (!error && f != filp && flock.l_type != F_UNLCK) {
2020 		flock.l_type = F_UNLCK;
2021 		goto again;
2022 	}
2023 
2024 out:
2025 	locks_free_lock(file_lock);
2026 	return error;
2027 }
2028 #endif /* BITS_PER_LONG == 32 */
2029 
2030 /*
2031  * This function is called when the file is being removed
2032  * from the task's fd array.  POSIX locks belonging to this task
2033  * are deleted at this time.
2034  */
locks_remove_posix(struct file * filp,fl_owner_t owner)2035 void locks_remove_posix(struct file *filp, fl_owner_t owner)
2036 {
2037 	struct file_lock lock;
2038 
2039 	/*
2040 	 * If there are no locks held on this file, we don't need to call
2041 	 * posix_lock_file().  Another process could be setting a lock on this
2042 	 * file at the same time, but we wouldn't remove that lock anyway.
2043 	 */
2044 	if (!filp->f_path.dentry->d_inode->i_flock)
2045 		return;
2046 
2047 	lock.fl_type = F_UNLCK;
2048 	lock.fl_flags = FL_POSIX | FL_CLOSE;
2049 	lock.fl_start = 0;
2050 	lock.fl_end = OFFSET_MAX;
2051 	lock.fl_owner = owner;
2052 	lock.fl_pid = current->tgid;
2053 	lock.fl_file = filp;
2054 	lock.fl_ops = NULL;
2055 	lock.fl_lmops = NULL;
2056 
2057 	vfs_lock_file(filp, F_SETLK, &lock, NULL);
2058 
2059 	if (lock.fl_ops && lock.fl_ops->fl_release_private)
2060 		lock.fl_ops->fl_release_private(&lock);
2061 }
2062 
2063 EXPORT_SYMBOL(locks_remove_posix);
2064 
2065 /*
2066  * This function is called on the last close of an open file.
2067  */
locks_remove_flock(struct file * filp)2068 void locks_remove_flock(struct file *filp)
2069 {
2070 	struct inode * inode = filp->f_path.dentry->d_inode;
2071 	struct file_lock *fl;
2072 	struct file_lock **before;
2073 
2074 	if (!inode->i_flock)
2075 		return;
2076 
2077 	if (filp->f_op && filp->f_op->flock) {
2078 		struct file_lock fl = {
2079 			.fl_pid = current->tgid,
2080 			.fl_file = filp,
2081 			.fl_flags = FL_FLOCK,
2082 			.fl_type = F_UNLCK,
2083 			.fl_end = OFFSET_MAX,
2084 		};
2085 		filp->f_op->flock(filp, F_SETLKW, &fl);
2086 		if (fl.fl_ops && fl.fl_ops->fl_release_private)
2087 			fl.fl_ops->fl_release_private(&fl);
2088 	}
2089 
2090 	lock_flocks();
2091 	before = &inode->i_flock;
2092 
2093 	while ((fl = *before) != NULL) {
2094 		if (fl->fl_file == filp) {
2095 			if (IS_FLOCK(fl)) {
2096 				locks_delete_lock(before);
2097 				continue;
2098 			}
2099 			if (IS_LEASE(fl)) {
2100 				lease_modify(before, F_UNLCK);
2101 				continue;
2102 			}
2103 			/* What? */
2104 			BUG();
2105  		}
2106 		before = &fl->fl_next;
2107 	}
2108 	unlock_flocks();
2109 }
2110 
2111 /**
2112  *	posix_unblock_lock - stop waiting for a file lock
2113  *      @filp:   how the file was opened
2114  *	@waiter: the lock which was waiting
2115  *
2116  *	lockd needs to block waiting for locks.
2117  */
2118 int
posix_unblock_lock(struct file * filp,struct file_lock * waiter)2119 posix_unblock_lock(struct file *filp, struct file_lock *waiter)
2120 {
2121 	int status = 0;
2122 
2123 	lock_flocks();
2124 	if (waiter->fl_next)
2125 		__locks_delete_block(waiter);
2126 	else
2127 		status = -ENOENT;
2128 	unlock_flocks();
2129 	return status;
2130 }
2131 
2132 EXPORT_SYMBOL(posix_unblock_lock);
2133 
2134 /**
2135  * vfs_cancel_lock - file byte range unblock lock
2136  * @filp: The file to apply the unblock to
2137  * @fl: The lock to be unblocked
2138  *
2139  * Used by lock managers to cancel blocked requests
2140  */
vfs_cancel_lock(struct file * filp,struct file_lock * fl)2141 int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2142 {
2143 	if (filp->f_op && filp->f_op->lock)
2144 		return filp->f_op->lock(filp, F_CANCELLK, fl);
2145 	return 0;
2146 }
2147 
2148 EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2149 
2150 #ifdef CONFIG_PROC_FS
2151 #include <linux/proc_fs.h>
2152 #include <linux/seq_file.h>
2153 
lock_get_status(struct seq_file * f,struct file_lock * fl,loff_t id,char * pfx)2154 static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2155 			    loff_t id, char *pfx)
2156 {
2157 	struct inode *inode = NULL;
2158 	unsigned int fl_pid;
2159 
2160 	if (fl->fl_nspid)
2161 		fl_pid = pid_vnr(fl->fl_nspid);
2162 	else
2163 		fl_pid = fl->fl_pid;
2164 
2165 	if (fl->fl_file != NULL)
2166 		inode = fl->fl_file->f_path.dentry->d_inode;
2167 
2168 	seq_printf(f, "%lld:%s ", id, pfx);
2169 	if (IS_POSIX(fl)) {
2170 		seq_printf(f, "%6s %s ",
2171 			     (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ",
2172 			     (inode == NULL) ? "*NOINODE*" :
2173 			     mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
2174 	} else if (IS_FLOCK(fl)) {
2175 		if (fl->fl_type & LOCK_MAND) {
2176 			seq_printf(f, "FLOCK  MSNFS     ");
2177 		} else {
2178 			seq_printf(f, "FLOCK  ADVISORY  ");
2179 		}
2180 	} else if (IS_LEASE(fl)) {
2181 		seq_printf(f, "LEASE  ");
2182 		if (lease_breaking(fl))
2183 			seq_printf(f, "BREAKING  ");
2184 		else if (fl->fl_file)
2185 			seq_printf(f, "ACTIVE    ");
2186 		else
2187 			seq_printf(f, "BREAKER   ");
2188 	} else {
2189 		seq_printf(f, "UNKNOWN UNKNOWN  ");
2190 	}
2191 	if (fl->fl_type & LOCK_MAND) {
2192 		seq_printf(f, "%s ",
2193 			       (fl->fl_type & LOCK_READ)
2194 			       ? (fl->fl_type & LOCK_WRITE) ? "RW   " : "READ "
2195 			       : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2196 	} else {
2197 		seq_printf(f, "%s ",
2198 			       (lease_breaking(fl))
2199 			       ? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ "
2200 			       : (fl->fl_type & F_WRLCK) ? "WRITE" : "READ ");
2201 	}
2202 	if (inode) {
2203 #ifdef WE_CAN_BREAK_LSLK_NOW
2204 		seq_printf(f, "%d %s:%ld ", fl_pid,
2205 				inode->i_sb->s_id, inode->i_ino);
2206 #else
2207 		/* userspace relies on this representation of dev_t ;-( */
2208 		seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
2209 				MAJOR(inode->i_sb->s_dev),
2210 				MINOR(inode->i_sb->s_dev), inode->i_ino);
2211 #endif
2212 	} else {
2213 		seq_printf(f, "%d <none>:0 ", fl_pid);
2214 	}
2215 	if (IS_POSIX(fl)) {
2216 		if (fl->fl_end == OFFSET_MAX)
2217 			seq_printf(f, "%Ld EOF\n", fl->fl_start);
2218 		else
2219 			seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2220 	} else {
2221 		seq_printf(f, "0 EOF\n");
2222 	}
2223 }
2224 
locks_show(struct seq_file * f,void * v)2225 static int locks_show(struct seq_file *f, void *v)
2226 {
2227 	struct file_lock *fl, *bfl;
2228 
2229 	fl = list_entry(v, struct file_lock, fl_link);
2230 
2231 	lock_get_status(f, fl, *((loff_t *)f->private), "");
2232 
2233 	list_for_each_entry(bfl, &fl->fl_block, fl_block)
2234 		lock_get_status(f, bfl, *((loff_t *)f->private), " ->");
2235 
2236 	return 0;
2237 }
2238 
locks_start(struct seq_file * f,loff_t * pos)2239 static void *locks_start(struct seq_file *f, loff_t *pos)
2240 {
2241 	loff_t *p = f->private;
2242 
2243 	lock_flocks();
2244 	*p = (*pos + 1);
2245 	return seq_list_start(&file_lock_list, *pos);
2246 }
2247 
locks_next(struct seq_file * f,void * v,loff_t * pos)2248 static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2249 {
2250 	loff_t *p = f->private;
2251 	++*p;
2252 	return seq_list_next(v, &file_lock_list, pos);
2253 }
2254 
locks_stop(struct seq_file * f,void * v)2255 static void locks_stop(struct seq_file *f, void *v)
2256 {
2257 	unlock_flocks();
2258 }
2259 
2260 static const struct seq_operations locks_seq_operations = {
2261 	.start	= locks_start,
2262 	.next	= locks_next,
2263 	.stop	= locks_stop,
2264 	.show	= locks_show,
2265 };
2266 
locks_open(struct inode * inode,struct file * filp)2267 static int locks_open(struct inode *inode, struct file *filp)
2268 {
2269 	return seq_open_private(filp, &locks_seq_operations, sizeof(loff_t));
2270 }
2271 
2272 static const struct file_operations proc_locks_operations = {
2273 	.open		= locks_open,
2274 	.read		= seq_read,
2275 	.llseek		= seq_lseek,
2276 	.release	= seq_release_private,
2277 };
2278 
proc_locks_init(void)2279 static int __init proc_locks_init(void)
2280 {
2281 	proc_create("locks", 0, NULL, &proc_locks_operations);
2282 	return 0;
2283 }
2284 module_init(proc_locks_init);
2285 #endif
2286 
2287 /**
2288  *	lock_may_read - checks that the region is free of locks
2289  *	@inode: the inode that is being read
2290  *	@start: the first byte to read
2291  *	@len: the number of bytes to read
2292  *
2293  *	Emulates Windows locking requirements.  Whole-file
2294  *	mandatory locks (share modes) can prohibit a read and
2295  *	byte-range POSIX locks can prohibit a read if they overlap.
2296  *
2297  *	N.B. this function is only ever called
2298  *	from knfsd and ownership of locks is never checked.
2299  */
lock_may_read(struct inode * inode,loff_t start,unsigned long len)2300 int lock_may_read(struct inode *inode, loff_t start, unsigned long len)
2301 {
2302 	struct file_lock *fl;
2303 	int result = 1;
2304 	lock_flocks();
2305 	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2306 		if (IS_POSIX(fl)) {
2307 			if (fl->fl_type == F_RDLCK)
2308 				continue;
2309 			if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2310 				continue;
2311 		} else if (IS_FLOCK(fl)) {
2312 			if (!(fl->fl_type & LOCK_MAND))
2313 				continue;
2314 			if (fl->fl_type & LOCK_READ)
2315 				continue;
2316 		} else
2317 			continue;
2318 		result = 0;
2319 		break;
2320 	}
2321 	unlock_flocks();
2322 	return result;
2323 }
2324 
2325 EXPORT_SYMBOL(lock_may_read);
2326 
2327 /**
2328  *	lock_may_write - checks that the region is free of locks
2329  *	@inode: the inode that is being written
2330  *	@start: the first byte to write
2331  *	@len: the number of bytes to write
2332  *
2333  *	Emulates Windows locking requirements.  Whole-file
2334  *	mandatory locks (share modes) can prohibit a write and
2335  *	byte-range POSIX locks can prohibit a write if they overlap.
2336  *
2337  *	N.B. this function is only ever called
2338  *	from knfsd and ownership of locks is never checked.
2339  */
lock_may_write(struct inode * inode,loff_t start,unsigned long len)2340 int lock_may_write(struct inode *inode, loff_t start, unsigned long len)
2341 {
2342 	struct file_lock *fl;
2343 	int result = 1;
2344 	lock_flocks();
2345 	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2346 		if (IS_POSIX(fl)) {
2347 			if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2348 				continue;
2349 		} else if (IS_FLOCK(fl)) {
2350 			if (!(fl->fl_type & LOCK_MAND))
2351 				continue;
2352 			if (fl->fl_type & LOCK_WRITE)
2353 				continue;
2354 		} else
2355 			continue;
2356 		result = 0;
2357 		break;
2358 	}
2359 	unlock_flocks();
2360 	return result;
2361 }
2362 
2363 EXPORT_SYMBOL(lock_may_write);
2364 
filelock_init(void)2365 static int __init filelock_init(void)
2366 {
2367 	filelock_cache = kmem_cache_create("file_lock_cache",
2368 			sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
2369 
2370 	return 0;
2371 }
2372 
2373 core_initcall(filelock_init);
2374