1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/fs/locks.c
4 *
5 * Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
6 * Doug Evans (dje@spiff.uucp), August 07, 1992
7 *
8 * Deadlock detection added.
9 * FIXME: one thing isn't handled yet:
10 * - mandatory locks (requires lots of changes elsewhere)
11 * Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994.
12 *
13 * Miscellaneous edits, and a total rewrite of posix_lock_file() code.
14 * Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994
15 *
16 * Converted file_lock_table to a linked list from an array, which eliminates
17 * the limits on how many active file locks are open.
18 * Chad Page (pageone@netcom.com), November 27, 1994
19 *
20 * Removed dependency on file descriptors. dup()'ed file descriptors now
21 * get the same locks as the original file descriptors, and a close() on
22 * any file descriptor removes ALL the locks on the file for the current
23 * process. Since locks still depend on the process id, locks are inherited
24 * after an exec() but not after a fork(). This agrees with POSIX, and both
25 * BSD and SVR4 practice.
26 * Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995
27 *
28 * Scrapped free list which is redundant now that we allocate locks
29 * dynamically with kmalloc()/kfree().
30 * Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995
31 *
32 * Implemented two lock personalities - FL_FLOCK and FL_POSIX.
33 *
34 * FL_POSIX locks are created with calls to fcntl() and lockf() through the
35 * fcntl() system call. They have the semantics described above.
36 *
37 * FL_FLOCK locks are created with calls to flock(), through the flock()
38 * system call, which is new. Old C libraries implement flock() via fcntl()
39 * and will continue to use the old, broken implementation.
40 *
41 * FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
42 * with a file pointer (filp). As a result they can be shared by a parent
43 * process and its children after a fork(). They are removed when the last
44 * file descriptor referring to the file pointer is closed (unless explicitly
45 * unlocked).
46 *
47 * FL_FLOCK locks never deadlock, an existing lock is always removed before
48 * upgrading from shared to exclusive (or vice versa). When this happens
49 * any processes blocked by the current lock are woken up and allowed to
50 * run before the new lock is applied.
51 * Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
52 *
53 * Removed some race conditions in flock_lock_file(), marked other possible
54 * races. Just grep for FIXME to see them.
55 * Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996.
56 *
57 * Addressed Dmitry's concerns. Deadlock checking no longer recursive.
58 * Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
59 * once we've checked for blocking and deadlocking.
60 * Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996.
61 *
62 * Initial implementation of mandatory locks. SunOS turned out to be
63 * a rotten model, so I implemented the "obvious" semantics.
64 * See 'Documentation/filesystems/mandatory-locking.rst' for details.
65 * Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
66 *
67 * Don't allow mandatory locks on mmap()'ed files. Added simple functions to
68 * check if a file has mandatory locks, used by mmap(), open() and creat() to
69 * see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference
70 * Manual, Section 2.
71 * Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996.
72 *
73 * Tidied up block list handling. Added '/proc/locks' interface.
74 * Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996.
75 *
76 * Fixed deadlock condition for pathological code that mixes calls to
77 * flock() and fcntl().
78 * Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996.
79 *
80 * Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use
81 * for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to
82 * guarantee sensible behaviour in the case where file system modules might
83 * be compiled with different options than the kernel itself.
84 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
85 *
86 * Added a couple of missing wake_up() calls. Thanks to Thomas Meckel
87 * (Thomas.Meckel@mni.fh-giessen.de) for spotting this.
88 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
89 *
90 * Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK
91 * locks. Changed process synchronisation to avoid dereferencing locks that
92 * have already been freed.
93 * Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996.
94 *
95 * Made the block list a circular list to minimise searching in the list.
96 * Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996.
97 *
98 * Made mandatory locking a mount option. Default is not to allow mandatory
99 * locking.
100 * Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996.
101 *
102 * Some adaptations for NFS support.
103 * Olaf Kirch (okir@monad.swb.de), Dec 1996,
104 *
105 * Fixed /proc/locks interface so that we can't overrun the buffer we are handed.
106 * Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997.
107 *
108 * Use slab allocator instead of kmalloc/kfree.
109 * Use generic list implementation from <linux/list.h>.
110 * Sped up posix_locks_deadlock by only considering blocked locks.
111 * Matthew Wilcox <willy@debian.org>, March, 2000.
112 *
113 * Leases and LOCK_MAND
114 * Matthew Wilcox <willy@debian.org>, June, 2000.
115 * Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000.
116 *
117 * Locking conflicts and dependencies:
118 * If multiple threads attempt to lock the same byte (or flock the same file)
119 * only one can be granted the lock, and other must wait their turn.
120 * The first lock has been "applied" or "granted", the others are "waiting"
121 * and are "blocked" by the "applied" lock..
122 *
123 * Waiting and applied locks are all kept in trees whose properties are:
124 *
125 * - the root of a tree may be an applied or waiting lock.
126 * - every other node in the tree is a waiting lock that
127 * conflicts with every ancestor of that node.
128 *
129 * Every such tree begins life as a waiting singleton which obviously
130 * satisfies the above properties.
131 *
132 * The only ways we modify trees preserve these properties:
133 *
134 * 1. We may add a new leaf node, but only after first verifying that it
135 * conflicts with all of its ancestors.
136 * 2. We may remove the root of a tree, creating a new singleton
137 * tree from the root and N new trees rooted in the immediate
138 * children.
139 * 3. If the root of a tree is not currently an applied lock, we may
140 * apply it (if possible).
141 * 4. We may upgrade the root of the tree (either extend its range,
142 * or upgrade its entire range from read to write).
143 *
144 * When an applied lock is modified in a way that reduces or downgrades any
145 * part of its range, we remove all its children (2 above). This particularly
146 * happens when a lock is unlocked.
147 *
148 * For each of those child trees we "wake up" the thread which is
149 * waiting for the lock so it can continue handling as follows: if the
150 * root of the tree applies, we do so (3). If it doesn't, it must
151 * conflict with some applied lock. We remove (wake up) all of its children
152 * (2), and add it is a new leaf to the tree rooted in the applied
153 * lock (1). We then repeat the process recursively with those
154 * children.
155 *
156 */
157
158 #include <linux/capability.h>
159 #include <linux/file.h>
160 #include <linux/fdtable.h>
161 #include <linux/fs.h>
162 #include <linux/init.h>
163 #include <linux/security.h>
164 #include <linux/slab.h>
165 #include <linux/syscalls.h>
166 #include <linux/time.h>
167 #include <linux/rcupdate.h>
168 #include <linux/pid_namespace.h>
169 #include <linux/hashtable.h>
170 #include <linux/percpu.h>
171
172 #define CREATE_TRACE_POINTS
173 #include <trace/events/filelock.h>
174
175 #include <linux/uaccess.h>
176
177 #define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
178 #define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
179 #define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
180 #define IS_OFDLCK(fl) (fl->fl_flags & FL_OFDLCK)
181 #define IS_REMOTELCK(fl) (fl->fl_pid <= 0)
182
lease_breaking(struct file_lock * fl)183 static bool lease_breaking(struct file_lock *fl)
184 {
185 return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
186 }
187
target_leasetype(struct file_lock * fl)188 static int target_leasetype(struct file_lock *fl)
189 {
190 if (fl->fl_flags & FL_UNLOCK_PENDING)
191 return F_UNLCK;
192 if (fl->fl_flags & FL_DOWNGRADE_PENDING)
193 return F_RDLCK;
194 return fl->fl_type;
195 }
196
197 int leases_enable = 1;
198 int lease_break_time = 45;
199
200 /*
201 * The global file_lock_list is only used for displaying /proc/locks, so we
202 * keep a list on each CPU, with each list protected by its own spinlock.
203 * Global serialization is done using file_rwsem.
204 *
205 * Note that alterations to the list also require that the relevant flc_lock is
206 * held.
207 */
208 struct file_lock_list_struct {
209 spinlock_t lock;
210 struct hlist_head hlist;
211 };
212 static DEFINE_PER_CPU(struct file_lock_list_struct, file_lock_list);
213 DEFINE_STATIC_PERCPU_RWSEM(file_rwsem);
214
215
216 /*
217 * The blocked_hash is used to find POSIX lock loops for deadlock detection.
218 * It is protected by blocked_lock_lock.
219 *
220 * We hash locks by lockowner in order to optimize searching for the lock a
221 * particular lockowner is waiting on.
222 *
223 * FIXME: make this value scale via some heuristic? We generally will want more
224 * buckets when we have more lockowners holding locks, but that's a little
225 * difficult to determine without knowing what the workload will look like.
226 */
227 #define BLOCKED_HASH_BITS 7
228 static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
229
230 /*
231 * This lock protects the blocked_hash. Generally, if you're accessing it, you
232 * want to be holding this lock.
233 *
234 * In addition, it also protects the fl->fl_blocked_requests list, and the
235 * fl->fl_blocker pointer for file_lock structures that are acting as lock
236 * requests (in contrast to those that are acting as records of acquired locks).
237 *
238 * Note that when we acquire this lock in order to change the above fields,
239 * we often hold the flc_lock as well. In certain cases, when reading the fields
240 * protected by this lock, we can skip acquiring it iff we already hold the
241 * flc_lock.
242 */
243 static DEFINE_SPINLOCK(blocked_lock_lock);
244
245 static struct kmem_cache *flctx_cache __read_mostly;
246 static struct kmem_cache *filelock_cache __read_mostly;
247
248 static struct file_lock_context *
locks_get_lock_context(struct inode * inode,int type)249 locks_get_lock_context(struct inode *inode, int type)
250 {
251 struct file_lock_context *ctx;
252
253 /* paired with cmpxchg() below */
254 ctx = smp_load_acquire(&inode->i_flctx);
255 if (likely(ctx) || type == F_UNLCK)
256 goto out;
257
258 ctx = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
259 if (!ctx)
260 goto out;
261
262 spin_lock_init(&ctx->flc_lock);
263 INIT_LIST_HEAD(&ctx->flc_flock);
264 INIT_LIST_HEAD(&ctx->flc_posix);
265 INIT_LIST_HEAD(&ctx->flc_lease);
266
267 /*
268 * Assign the pointer if it's not already assigned. If it is, then
269 * free the context we just allocated.
270 */
271 if (cmpxchg(&inode->i_flctx, NULL, ctx)) {
272 kmem_cache_free(flctx_cache, ctx);
273 ctx = smp_load_acquire(&inode->i_flctx);
274 }
275 out:
276 trace_locks_get_lock_context(inode, type, ctx);
277 return ctx;
278 }
279
280 static void
locks_dump_ctx_list(struct list_head * list,char * list_type)281 locks_dump_ctx_list(struct list_head *list, char *list_type)
282 {
283 struct file_lock *fl;
284
285 list_for_each_entry(fl, list, fl_list) {
286 pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
287 }
288 }
289
290 static void
locks_check_ctx_lists(struct inode * inode)291 locks_check_ctx_lists(struct inode *inode)
292 {
293 struct file_lock_context *ctx = inode->i_flctx;
294
295 if (unlikely(!list_empty(&ctx->flc_flock) ||
296 !list_empty(&ctx->flc_posix) ||
297 !list_empty(&ctx->flc_lease))) {
298 pr_warn("Leaked locks on dev=0x%x:0x%x ino=0x%lx:\n",
299 MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
300 inode->i_ino);
301 locks_dump_ctx_list(&ctx->flc_flock, "FLOCK");
302 locks_dump_ctx_list(&ctx->flc_posix, "POSIX");
303 locks_dump_ctx_list(&ctx->flc_lease, "LEASE");
304 }
305 }
306
307 static void
locks_check_ctx_file_list(struct file * filp,struct list_head * list,char * list_type)308 locks_check_ctx_file_list(struct file *filp, struct list_head *list,
309 char *list_type)
310 {
311 struct file_lock *fl;
312 struct inode *inode = locks_inode(filp);
313
314 list_for_each_entry(fl, list, fl_list)
315 if (fl->fl_file == filp)
316 pr_warn("Leaked %s lock on dev=0x%x:0x%x ino=0x%lx "
317 " fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n",
318 list_type, MAJOR(inode->i_sb->s_dev),
319 MINOR(inode->i_sb->s_dev), inode->i_ino,
320 fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
321 }
322
323 void
locks_free_lock_context(struct inode * inode)324 locks_free_lock_context(struct inode *inode)
325 {
326 struct file_lock_context *ctx = inode->i_flctx;
327
328 if (unlikely(ctx)) {
329 locks_check_ctx_lists(inode);
330 kmem_cache_free(flctx_cache, ctx);
331 }
332 }
333
locks_init_lock_heads(struct file_lock * fl)334 static void locks_init_lock_heads(struct file_lock *fl)
335 {
336 INIT_HLIST_NODE(&fl->fl_link);
337 INIT_LIST_HEAD(&fl->fl_list);
338 INIT_LIST_HEAD(&fl->fl_blocked_requests);
339 INIT_LIST_HEAD(&fl->fl_blocked_member);
340 init_waitqueue_head(&fl->fl_wait);
341 }
342
343 /* Allocate an empty lock structure. */
locks_alloc_lock(void)344 struct file_lock *locks_alloc_lock(void)
345 {
346 struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
347
348 if (fl)
349 locks_init_lock_heads(fl);
350
351 return fl;
352 }
353 EXPORT_SYMBOL_GPL(locks_alloc_lock);
354
locks_release_private(struct file_lock * fl)355 void locks_release_private(struct file_lock *fl)
356 {
357 BUG_ON(waitqueue_active(&fl->fl_wait));
358 BUG_ON(!list_empty(&fl->fl_list));
359 BUG_ON(!list_empty(&fl->fl_blocked_requests));
360 BUG_ON(!list_empty(&fl->fl_blocked_member));
361 BUG_ON(!hlist_unhashed(&fl->fl_link));
362
363 if (fl->fl_ops) {
364 if (fl->fl_ops->fl_release_private)
365 fl->fl_ops->fl_release_private(fl);
366 fl->fl_ops = NULL;
367 }
368
369 if (fl->fl_lmops) {
370 if (fl->fl_lmops->lm_put_owner) {
371 fl->fl_lmops->lm_put_owner(fl->fl_owner);
372 fl->fl_owner = NULL;
373 }
374 fl->fl_lmops = NULL;
375 }
376 }
377 EXPORT_SYMBOL_GPL(locks_release_private);
378
379 /* Free a lock which is not in use. */
locks_free_lock(struct file_lock * fl)380 void locks_free_lock(struct file_lock *fl)
381 {
382 locks_release_private(fl);
383 kmem_cache_free(filelock_cache, fl);
384 }
385 EXPORT_SYMBOL(locks_free_lock);
386
387 static void
locks_dispose_list(struct list_head * dispose)388 locks_dispose_list(struct list_head *dispose)
389 {
390 struct file_lock *fl;
391
392 while (!list_empty(dispose)) {
393 fl = list_first_entry(dispose, struct file_lock, fl_list);
394 list_del_init(&fl->fl_list);
395 locks_free_lock(fl);
396 }
397 }
398
locks_init_lock(struct file_lock * fl)399 void locks_init_lock(struct file_lock *fl)
400 {
401 memset(fl, 0, sizeof(struct file_lock));
402 locks_init_lock_heads(fl);
403 }
404 EXPORT_SYMBOL(locks_init_lock);
405
406 /*
407 * Initialize a new lock from an existing file_lock structure.
408 */
locks_copy_conflock(struct file_lock * new,struct file_lock * fl)409 void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
410 {
411 new->fl_owner = fl->fl_owner;
412 new->fl_pid = fl->fl_pid;
413 new->fl_file = NULL;
414 new->fl_flags = fl->fl_flags;
415 new->fl_type = fl->fl_type;
416 new->fl_start = fl->fl_start;
417 new->fl_end = fl->fl_end;
418 new->fl_lmops = fl->fl_lmops;
419 new->fl_ops = NULL;
420
421 if (fl->fl_lmops) {
422 if (fl->fl_lmops->lm_get_owner)
423 fl->fl_lmops->lm_get_owner(fl->fl_owner);
424 }
425 }
426 EXPORT_SYMBOL(locks_copy_conflock);
427
locks_copy_lock(struct file_lock * new,struct file_lock * fl)428 void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
429 {
430 /* "new" must be a freshly-initialized lock */
431 WARN_ON_ONCE(new->fl_ops);
432
433 locks_copy_conflock(new, fl);
434
435 new->fl_file = fl->fl_file;
436 new->fl_ops = fl->fl_ops;
437
438 if (fl->fl_ops) {
439 if (fl->fl_ops->fl_copy_lock)
440 fl->fl_ops->fl_copy_lock(new, fl);
441 }
442 }
443 EXPORT_SYMBOL(locks_copy_lock);
444
locks_move_blocks(struct file_lock * new,struct file_lock * fl)445 static void locks_move_blocks(struct file_lock *new, struct file_lock *fl)
446 {
447 struct file_lock *f;
448
449 /*
450 * As ctx->flc_lock is held, new requests cannot be added to
451 * ->fl_blocked_requests, so we don't need a lock to check if it
452 * is empty.
453 */
454 if (list_empty(&fl->fl_blocked_requests))
455 return;
456 spin_lock(&blocked_lock_lock);
457 list_splice_init(&fl->fl_blocked_requests, &new->fl_blocked_requests);
458 list_for_each_entry(f, &new->fl_blocked_requests, fl_blocked_member)
459 f->fl_blocker = new;
460 spin_unlock(&blocked_lock_lock);
461 }
462
flock_translate_cmd(int cmd)463 static inline int flock_translate_cmd(int cmd) {
464 if (cmd & LOCK_MAND)
465 return cmd & (LOCK_MAND | LOCK_RW);
466 switch (cmd) {
467 case LOCK_SH:
468 return F_RDLCK;
469 case LOCK_EX:
470 return F_WRLCK;
471 case LOCK_UN:
472 return F_UNLCK;
473 }
474 return -EINVAL;
475 }
476
477 /* Fill in a file_lock structure with an appropriate FLOCK lock. */
478 static struct file_lock *
flock_make_lock(struct file * filp,unsigned int cmd,struct file_lock * fl)479 flock_make_lock(struct file *filp, unsigned int cmd, struct file_lock *fl)
480 {
481 int type = flock_translate_cmd(cmd);
482
483 if (type < 0)
484 return ERR_PTR(type);
485
486 if (fl == NULL) {
487 fl = locks_alloc_lock();
488 if (fl == NULL)
489 return ERR_PTR(-ENOMEM);
490 } else {
491 locks_init_lock(fl);
492 }
493
494 fl->fl_file = filp;
495 fl->fl_owner = filp;
496 fl->fl_pid = current->tgid;
497 fl->fl_flags = FL_FLOCK;
498 fl->fl_type = type;
499 fl->fl_end = OFFSET_MAX;
500
501 return fl;
502 }
503
assign_type(struct file_lock * fl,long type)504 static int assign_type(struct file_lock *fl, long type)
505 {
506 switch (type) {
507 case F_RDLCK:
508 case F_WRLCK:
509 case F_UNLCK:
510 fl->fl_type = type;
511 break;
512 default:
513 return -EINVAL;
514 }
515 return 0;
516 }
517
flock64_to_posix_lock(struct file * filp,struct file_lock * fl,struct flock64 * l)518 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
519 struct flock64 *l)
520 {
521 switch (l->l_whence) {
522 case SEEK_SET:
523 fl->fl_start = 0;
524 break;
525 case SEEK_CUR:
526 fl->fl_start = filp->f_pos;
527 break;
528 case SEEK_END:
529 fl->fl_start = i_size_read(file_inode(filp));
530 break;
531 default:
532 return -EINVAL;
533 }
534 if (l->l_start > OFFSET_MAX - fl->fl_start)
535 return -EOVERFLOW;
536 fl->fl_start += l->l_start;
537 if (fl->fl_start < 0)
538 return -EINVAL;
539
540 /* POSIX-1996 leaves the case l->l_len < 0 undefined;
541 POSIX-2001 defines it. */
542 if (l->l_len > 0) {
543 if (l->l_len - 1 > OFFSET_MAX - fl->fl_start)
544 return -EOVERFLOW;
545 fl->fl_end = fl->fl_start + (l->l_len - 1);
546
547 } else if (l->l_len < 0) {
548 if (fl->fl_start + l->l_len < 0)
549 return -EINVAL;
550 fl->fl_end = fl->fl_start - 1;
551 fl->fl_start += l->l_len;
552 } else
553 fl->fl_end = OFFSET_MAX;
554
555 fl->fl_owner = current->files;
556 fl->fl_pid = current->tgid;
557 fl->fl_file = filp;
558 fl->fl_flags = FL_POSIX;
559 fl->fl_ops = NULL;
560 fl->fl_lmops = NULL;
561
562 return assign_type(fl, l->l_type);
563 }
564
565 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
566 * style lock.
567 */
flock_to_posix_lock(struct file * filp,struct file_lock * fl,struct flock * l)568 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
569 struct flock *l)
570 {
571 struct flock64 ll = {
572 .l_type = l->l_type,
573 .l_whence = l->l_whence,
574 .l_start = l->l_start,
575 .l_len = l->l_len,
576 };
577
578 return flock64_to_posix_lock(filp, fl, &ll);
579 }
580
581 /* default lease lock manager operations */
582 static bool
lease_break_callback(struct file_lock * fl)583 lease_break_callback(struct file_lock *fl)
584 {
585 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
586 return false;
587 }
588
589 static void
lease_setup(struct file_lock * fl,void ** priv)590 lease_setup(struct file_lock *fl, void **priv)
591 {
592 struct file *filp = fl->fl_file;
593 struct fasync_struct *fa = *priv;
594
595 /*
596 * fasync_insert_entry() returns the old entry if any. If there was no
597 * old entry, then it used "priv" and inserted it into the fasync list.
598 * Clear the pointer to indicate that it shouldn't be freed.
599 */
600 if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa))
601 *priv = NULL;
602
603 __f_setown(filp, task_pid(current), PIDTYPE_TGID, 0);
604 }
605
606 static const struct lock_manager_operations lease_manager_ops = {
607 .lm_break = lease_break_callback,
608 .lm_change = lease_modify,
609 .lm_setup = lease_setup,
610 };
611
612 /*
613 * Initialize a lease, use the default lock manager operations
614 */
lease_init(struct file * filp,long type,struct file_lock * fl)615 static int lease_init(struct file *filp, long type, struct file_lock *fl)
616 {
617 if (assign_type(fl, type) != 0)
618 return -EINVAL;
619
620 fl->fl_owner = filp;
621 fl->fl_pid = current->tgid;
622
623 fl->fl_file = filp;
624 fl->fl_flags = FL_LEASE;
625 fl->fl_start = 0;
626 fl->fl_end = OFFSET_MAX;
627 fl->fl_ops = NULL;
628 fl->fl_lmops = &lease_manager_ops;
629 return 0;
630 }
631
632 /* Allocate a file_lock initialised to this type of lease */
lease_alloc(struct file * filp,long type)633 static struct file_lock *lease_alloc(struct file *filp, long type)
634 {
635 struct file_lock *fl = locks_alloc_lock();
636 int error = -ENOMEM;
637
638 if (fl == NULL)
639 return ERR_PTR(error);
640
641 error = lease_init(filp, type, fl);
642 if (error) {
643 locks_free_lock(fl);
644 return ERR_PTR(error);
645 }
646 return fl;
647 }
648
649 /* Check if two locks overlap each other.
650 */
locks_overlap(struct file_lock * fl1,struct file_lock * fl2)651 static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
652 {
653 return ((fl1->fl_end >= fl2->fl_start) &&
654 (fl2->fl_end >= fl1->fl_start));
655 }
656
657 /*
658 * Check whether two locks have the same owner.
659 */
posix_same_owner(struct file_lock * fl1,struct file_lock * fl2)660 static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
661 {
662 return fl1->fl_owner == fl2->fl_owner;
663 }
664
665 /* Must be called with the flc_lock held! */
locks_insert_global_locks(struct file_lock * fl)666 static void locks_insert_global_locks(struct file_lock *fl)
667 {
668 struct file_lock_list_struct *fll = this_cpu_ptr(&file_lock_list);
669
670 percpu_rwsem_assert_held(&file_rwsem);
671
672 spin_lock(&fll->lock);
673 fl->fl_link_cpu = smp_processor_id();
674 hlist_add_head(&fl->fl_link, &fll->hlist);
675 spin_unlock(&fll->lock);
676 }
677
678 /* Must be called with the flc_lock held! */
locks_delete_global_locks(struct file_lock * fl)679 static void locks_delete_global_locks(struct file_lock *fl)
680 {
681 struct file_lock_list_struct *fll;
682
683 percpu_rwsem_assert_held(&file_rwsem);
684
685 /*
686 * Avoid taking lock if already unhashed. This is safe since this check
687 * is done while holding the flc_lock, and new insertions into the list
688 * also require that it be held.
689 */
690 if (hlist_unhashed(&fl->fl_link))
691 return;
692
693 fll = per_cpu_ptr(&file_lock_list, fl->fl_link_cpu);
694 spin_lock(&fll->lock);
695 hlist_del_init(&fl->fl_link);
696 spin_unlock(&fll->lock);
697 }
698
699 static unsigned long
posix_owner_key(struct file_lock * fl)700 posix_owner_key(struct file_lock *fl)
701 {
702 return (unsigned long)fl->fl_owner;
703 }
704
locks_insert_global_blocked(struct file_lock * waiter)705 static void locks_insert_global_blocked(struct file_lock *waiter)
706 {
707 lockdep_assert_held(&blocked_lock_lock);
708
709 hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter));
710 }
711
locks_delete_global_blocked(struct file_lock * waiter)712 static void locks_delete_global_blocked(struct file_lock *waiter)
713 {
714 lockdep_assert_held(&blocked_lock_lock);
715
716 hash_del(&waiter->fl_link);
717 }
718
719 /* Remove waiter from blocker's block list.
720 * When blocker ends up pointing to itself then the list is empty.
721 *
722 * Must be called with blocked_lock_lock held.
723 */
__locks_delete_block(struct file_lock * waiter)724 static void __locks_delete_block(struct file_lock *waiter)
725 {
726 locks_delete_global_blocked(waiter);
727 list_del_init(&waiter->fl_blocked_member);
728 }
729
__locks_wake_up_blocks(struct file_lock * blocker)730 static void __locks_wake_up_blocks(struct file_lock *blocker)
731 {
732 while (!list_empty(&blocker->fl_blocked_requests)) {
733 struct file_lock *waiter;
734
735 waiter = list_first_entry(&blocker->fl_blocked_requests,
736 struct file_lock, fl_blocked_member);
737 __locks_delete_block(waiter);
738 if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
739 waiter->fl_lmops->lm_notify(waiter);
740 else
741 wake_up(&waiter->fl_wait);
742
743 /*
744 * The setting of fl_blocker to NULL marks the "done"
745 * point in deleting a block. Paired with acquire at the top
746 * of locks_delete_block().
747 */
748 smp_store_release(&waiter->fl_blocker, NULL);
749 }
750 }
751
752 /**
753 * locks_delete_block - stop waiting for a file lock
754 * @waiter: the lock which was waiting
755 *
756 * lockd/nfsd need to disconnect the lock while working on it.
757 */
locks_delete_block(struct file_lock * waiter)758 int locks_delete_block(struct file_lock *waiter)
759 {
760 int status = -ENOENT;
761
762 /*
763 * If fl_blocker is NULL, it won't be set again as this thread "owns"
764 * the lock and is the only one that might try to claim the lock.
765 *
766 * We use acquire/release to manage fl_blocker so that we can
767 * optimize away taking the blocked_lock_lock in many cases.
768 *
769 * The smp_load_acquire guarantees two things:
770 *
771 * 1/ that fl_blocked_requests can be tested locklessly. If something
772 * was recently added to that list it must have been in a locked region
773 * *before* the locked region when fl_blocker was set to NULL.
774 *
775 * 2/ that no other thread is accessing 'waiter', so it is safe to free
776 * it. __locks_wake_up_blocks is careful not to touch waiter after
777 * fl_blocker is released.
778 *
779 * If a lockless check of fl_blocker shows it to be NULL, we know that
780 * no new locks can be inserted into its fl_blocked_requests list, and
781 * can avoid doing anything further if the list is empty.
782 */
783 if (!smp_load_acquire(&waiter->fl_blocker) &&
784 list_empty(&waiter->fl_blocked_requests))
785 return status;
786
787 spin_lock(&blocked_lock_lock);
788 if (waiter->fl_blocker)
789 status = 0;
790 __locks_wake_up_blocks(waiter);
791 __locks_delete_block(waiter);
792
793 /*
794 * The setting of fl_blocker to NULL marks the "done" point in deleting
795 * a block. Paired with acquire at the top of this function.
796 */
797 smp_store_release(&waiter->fl_blocker, NULL);
798 spin_unlock(&blocked_lock_lock);
799 return status;
800 }
801 EXPORT_SYMBOL(locks_delete_block);
802
803 /* Insert waiter into blocker's block list.
804 * We use a circular list so that processes can be easily woken up in
805 * the order they blocked. The documentation doesn't require this but
806 * it seems like the reasonable thing to do.
807 *
808 * Must be called with both the flc_lock and blocked_lock_lock held. The
809 * fl_blocked_requests list itself is protected by the blocked_lock_lock,
810 * but by ensuring that the flc_lock is also held on insertions we can avoid
811 * taking the blocked_lock_lock in some cases when we see that the
812 * fl_blocked_requests list is empty.
813 *
814 * Rather than just adding to the list, we check for conflicts with any existing
815 * waiters, and add beneath any waiter that blocks the new waiter.
816 * Thus wakeups don't happen until needed.
817 */
__locks_insert_block(struct file_lock * blocker,struct file_lock * waiter,bool conflict (struct file_lock *,struct file_lock *))818 static void __locks_insert_block(struct file_lock *blocker,
819 struct file_lock *waiter,
820 bool conflict(struct file_lock *,
821 struct file_lock *))
822 {
823 struct file_lock *fl;
824 BUG_ON(!list_empty(&waiter->fl_blocked_member));
825
826 new_blocker:
827 list_for_each_entry(fl, &blocker->fl_blocked_requests, fl_blocked_member)
828 if (conflict(fl, waiter)) {
829 blocker = fl;
830 goto new_blocker;
831 }
832 waiter->fl_blocker = blocker;
833 list_add_tail(&waiter->fl_blocked_member, &blocker->fl_blocked_requests);
834 if (IS_POSIX(blocker) && !IS_OFDLCK(blocker))
835 locks_insert_global_blocked(waiter);
836
837 /* The requests in waiter->fl_blocked are known to conflict with
838 * waiter, but might not conflict with blocker, or the requests
839 * and lock which block it. So they all need to be woken.
840 */
841 __locks_wake_up_blocks(waiter);
842 }
843
844 /* Must be called with flc_lock held. */
locks_insert_block(struct file_lock * blocker,struct file_lock * waiter,bool conflict (struct file_lock *,struct file_lock *))845 static void locks_insert_block(struct file_lock *blocker,
846 struct file_lock *waiter,
847 bool conflict(struct file_lock *,
848 struct file_lock *))
849 {
850 spin_lock(&blocked_lock_lock);
851 __locks_insert_block(blocker, waiter, conflict);
852 spin_unlock(&blocked_lock_lock);
853 }
854
855 /*
856 * Wake up processes blocked waiting for blocker.
857 *
858 * Must be called with the inode->flc_lock held!
859 */
locks_wake_up_blocks(struct file_lock * blocker)860 static void locks_wake_up_blocks(struct file_lock *blocker)
861 {
862 /*
863 * Avoid taking global lock if list is empty. This is safe since new
864 * blocked requests are only added to the list under the flc_lock, and
865 * the flc_lock is always held here. Note that removal from the
866 * fl_blocked_requests list does not require the flc_lock, so we must
867 * recheck list_empty() after acquiring the blocked_lock_lock.
868 */
869 if (list_empty(&blocker->fl_blocked_requests))
870 return;
871
872 spin_lock(&blocked_lock_lock);
873 __locks_wake_up_blocks(blocker);
874 spin_unlock(&blocked_lock_lock);
875 }
876
877 static void
locks_insert_lock_ctx(struct file_lock * fl,struct list_head * before)878 locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before)
879 {
880 list_add_tail(&fl->fl_list, before);
881 locks_insert_global_locks(fl);
882 }
883
884 static void
locks_unlink_lock_ctx(struct file_lock * fl)885 locks_unlink_lock_ctx(struct file_lock *fl)
886 {
887 locks_delete_global_locks(fl);
888 list_del_init(&fl->fl_list);
889 locks_wake_up_blocks(fl);
890 }
891
892 static void
locks_delete_lock_ctx(struct file_lock * fl,struct list_head * dispose)893 locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose)
894 {
895 locks_unlink_lock_ctx(fl);
896 if (dispose)
897 list_add(&fl->fl_list, dispose);
898 else
899 locks_free_lock(fl);
900 }
901
902 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
903 * checks for shared/exclusive status of overlapping locks.
904 */
locks_conflict(struct file_lock * caller_fl,struct file_lock * sys_fl)905 static bool locks_conflict(struct file_lock *caller_fl,
906 struct file_lock *sys_fl)
907 {
908 if (sys_fl->fl_type == F_WRLCK)
909 return true;
910 if (caller_fl->fl_type == F_WRLCK)
911 return true;
912 return false;
913 }
914
915 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
916 * checking before calling the locks_conflict().
917 */
posix_locks_conflict(struct file_lock * caller_fl,struct file_lock * sys_fl)918 static bool posix_locks_conflict(struct file_lock *caller_fl,
919 struct file_lock *sys_fl)
920 {
921 /* POSIX locks owned by the same process do not conflict with
922 * each other.
923 */
924 if (posix_same_owner(caller_fl, sys_fl))
925 return false;
926
927 /* Check whether they overlap */
928 if (!locks_overlap(caller_fl, sys_fl))
929 return false;
930
931 return locks_conflict(caller_fl, sys_fl);
932 }
933
934 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
935 * checking before calling the locks_conflict().
936 */
flock_locks_conflict(struct file_lock * caller_fl,struct file_lock * sys_fl)937 static bool flock_locks_conflict(struct file_lock *caller_fl,
938 struct file_lock *sys_fl)
939 {
940 /* FLOCK locks referring to the same filp do not conflict with
941 * each other.
942 */
943 if (caller_fl->fl_file == sys_fl->fl_file)
944 return false;
945 if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
946 return false;
947
948 return locks_conflict(caller_fl, sys_fl);
949 }
950
951 void
posix_test_lock(struct file * filp,struct file_lock * fl)952 posix_test_lock(struct file *filp, struct file_lock *fl)
953 {
954 struct file_lock *cfl;
955 struct file_lock_context *ctx;
956 struct inode *inode = locks_inode(filp);
957
958 ctx = smp_load_acquire(&inode->i_flctx);
959 if (!ctx || list_empty_careful(&ctx->flc_posix)) {
960 fl->fl_type = F_UNLCK;
961 return;
962 }
963
964 spin_lock(&ctx->flc_lock);
965 list_for_each_entry(cfl, &ctx->flc_posix, fl_list) {
966 if (posix_locks_conflict(fl, cfl)) {
967 locks_copy_conflock(fl, cfl);
968 goto out;
969 }
970 }
971 fl->fl_type = F_UNLCK;
972 out:
973 spin_unlock(&ctx->flc_lock);
974 return;
975 }
976 EXPORT_SYMBOL(posix_test_lock);
977
978 /*
979 * Deadlock detection:
980 *
981 * We attempt to detect deadlocks that are due purely to posix file
982 * locks.
983 *
984 * We assume that a task can be waiting for at most one lock at a time.
985 * So for any acquired lock, the process holding that lock may be
986 * waiting on at most one other lock. That lock in turns may be held by
987 * someone waiting for at most one other lock. Given a requested lock
988 * caller_fl which is about to wait for a conflicting lock block_fl, we
989 * follow this chain of waiters to ensure we are not about to create a
990 * cycle.
991 *
992 * Since we do this before we ever put a process to sleep on a lock, we
993 * are ensured that there is never a cycle; that is what guarantees that
994 * the while() loop in posix_locks_deadlock() eventually completes.
995 *
996 * Note: the above assumption may not be true when handling lock
997 * requests from a broken NFS client. It may also fail in the presence
998 * of tasks (such as posix threads) sharing the same open file table.
999 * To handle those cases, we just bail out after a few iterations.
1000 *
1001 * For FL_OFDLCK locks, the owner is the filp, not the files_struct.
1002 * Because the owner is not even nominally tied to a thread of
1003 * execution, the deadlock detection below can't reasonably work well. Just
1004 * skip it for those.
1005 *
1006 * In principle, we could do a more limited deadlock detection on FL_OFDLCK
1007 * locks that just checks for the case where two tasks are attempting to
1008 * upgrade from read to write locks on the same inode.
1009 */
1010
1011 #define MAX_DEADLK_ITERATIONS 10
1012
1013 /* Find a lock that the owner of the given block_fl is blocking on. */
what_owner_is_waiting_for(struct file_lock * block_fl)1014 static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
1015 {
1016 struct file_lock *fl;
1017
1018 hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) {
1019 if (posix_same_owner(fl, block_fl)) {
1020 while (fl->fl_blocker)
1021 fl = fl->fl_blocker;
1022 return fl;
1023 }
1024 }
1025 return NULL;
1026 }
1027
1028 /* Must be called with the blocked_lock_lock held! */
posix_locks_deadlock(struct file_lock * caller_fl,struct file_lock * block_fl)1029 static int posix_locks_deadlock(struct file_lock *caller_fl,
1030 struct file_lock *block_fl)
1031 {
1032 int i = 0;
1033
1034 lockdep_assert_held(&blocked_lock_lock);
1035
1036 /*
1037 * This deadlock detector can't reasonably detect deadlocks with
1038 * FL_OFDLCK locks, since they aren't owned by a process, per-se.
1039 */
1040 if (IS_OFDLCK(caller_fl))
1041 return 0;
1042
1043 while ((block_fl = what_owner_is_waiting_for(block_fl))) {
1044 if (i++ > MAX_DEADLK_ITERATIONS)
1045 return 0;
1046 if (posix_same_owner(caller_fl, block_fl))
1047 return 1;
1048 }
1049 return 0;
1050 }
1051
1052 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
1053 * after any leases, but before any posix locks.
1054 *
1055 * Note that if called with an FL_EXISTS argument, the caller may determine
1056 * whether or not a lock was successfully freed by testing the return
1057 * value for -ENOENT.
1058 */
flock_lock_inode(struct inode * inode,struct file_lock * request)1059 static int flock_lock_inode(struct inode *inode, struct file_lock *request)
1060 {
1061 struct file_lock *new_fl = NULL;
1062 struct file_lock *fl;
1063 struct file_lock_context *ctx;
1064 int error = 0;
1065 bool found = false;
1066 LIST_HEAD(dispose);
1067
1068 ctx = locks_get_lock_context(inode, request->fl_type);
1069 if (!ctx) {
1070 if (request->fl_type != F_UNLCK)
1071 return -ENOMEM;
1072 return (request->fl_flags & FL_EXISTS) ? -ENOENT : 0;
1073 }
1074
1075 if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
1076 new_fl = locks_alloc_lock();
1077 if (!new_fl)
1078 return -ENOMEM;
1079 }
1080
1081 percpu_down_read(&file_rwsem);
1082 spin_lock(&ctx->flc_lock);
1083 if (request->fl_flags & FL_ACCESS)
1084 goto find_conflict;
1085
1086 list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
1087 if (request->fl_file != fl->fl_file)
1088 continue;
1089 if (request->fl_type == fl->fl_type)
1090 goto out;
1091 found = true;
1092 locks_delete_lock_ctx(fl, &dispose);
1093 break;
1094 }
1095
1096 if (request->fl_type == F_UNLCK) {
1097 if ((request->fl_flags & FL_EXISTS) && !found)
1098 error = -ENOENT;
1099 goto out;
1100 }
1101
1102 find_conflict:
1103 list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
1104 if (!flock_locks_conflict(request, fl))
1105 continue;
1106 error = -EAGAIN;
1107 if (!(request->fl_flags & FL_SLEEP))
1108 goto out;
1109 error = FILE_LOCK_DEFERRED;
1110 locks_insert_block(fl, request, flock_locks_conflict);
1111 goto out;
1112 }
1113 if (request->fl_flags & FL_ACCESS)
1114 goto out;
1115 locks_copy_lock(new_fl, request);
1116 locks_move_blocks(new_fl, request);
1117 locks_insert_lock_ctx(new_fl, &ctx->flc_flock);
1118 new_fl = NULL;
1119 error = 0;
1120
1121 out:
1122 spin_unlock(&ctx->flc_lock);
1123 percpu_up_read(&file_rwsem);
1124 if (new_fl)
1125 locks_free_lock(new_fl);
1126 locks_dispose_list(&dispose);
1127 trace_flock_lock_inode(inode, request, error);
1128 return error;
1129 }
1130
posix_lock_inode(struct inode * inode,struct file_lock * request,struct file_lock * conflock)1131 static int posix_lock_inode(struct inode *inode, struct file_lock *request,
1132 struct file_lock *conflock)
1133 {
1134 struct file_lock *fl, *tmp;
1135 struct file_lock *new_fl = NULL;
1136 struct file_lock *new_fl2 = NULL;
1137 struct file_lock *left = NULL;
1138 struct file_lock *right = NULL;
1139 struct file_lock_context *ctx;
1140 int error;
1141 bool added = false;
1142 LIST_HEAD(dispose);
1143
1144 ctx = locks_get_lock_context(inode, request->fl_type);
1145 if (!ctx)
1146 return (request->fl_type == F_UNLCK) ? 0 : -ENOMEM;
1147
1148 /*
1149 * We may need two file_lock structures for this operation,
1150 * so we get them in advance to avoid races.
1151 *
1152 * In some cases we can be sure, that no new locks will be needed
1153 */
1154 if (!(request->fl_flags & FL_ACCESS) &&
1155 (request->fl_type != F_UNLCK ||
1156 request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
1157 new_fl = locks_alloc_lock();
1158 new_fl2 = locks_alloc_lock();
1159 }
1160
1161 percpu_down_read(&file_rwsem);
1162 spin_lock(&ctx->flc_lock);
1163 /*
1164 * New lock request. Walk all POSIX locks and look for conflicts. If
1165 * there are any, either return error or put the request on the
1166 * blocker's list of waiters and the global blocked_hash.
1167 */
1168 if (request->fl_type != F_UNLCK) {
1169 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1170 if (!posix_locks_conflict(request, fl))
1171 continue;
1172 if (conflock)
1173 locks_copy_conflock(conflock, fl);
1174 error = -EAGAIN;
1175 if (!(request->fl_flags & FL_SLEEP))
1176 goto out;
1177 /*
1178 * Deadlock detection and insertion into the blocked
1179 * locks list must be done while holding the same lock!
1180 */
1181 error = -EDEADLK;
1182 spin_lock(&blocked_lock_lock);
1183 /*
1184 * Ensure that we don't find any locks blocked on this
1185 * request during deadlock detection.
1186 */
1187 __locks_wake_up_blocks(request);
1188 if (likely(!posix_locks_deadlock(request, fl))) {
1189 error = FILE_LOCK_DEFERRED;
1190 __locks_insert_block(fl, request,
1191 posix_locks_conflict);
1192 }
1193 spin_unlock(&blocked_lock_lock);
1194 goto out;
1195 }
1196 }
1197
1198 /* If we're just looking for a conflict, we're done. */
1199 error = 0;
1200 if (request->fl_flags & FL_ACCESS)
1201 goto out;
1202
1203 /* Find the first old lock with the same owner as the new lock */
1204 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1205 if (posix_same_owner(request, fl))
1206 break;
1207 }
1208
1209 /* Process locks with this owner. */
1210 list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) {
1211 if (!posix_same_owner(request, fl))
1212 break;
1213
1214 /* Detect adjacent or overlapping regions (if same lock type) */
1215 if (request->fl_type == fl->fl_type) {
1216 /* In all comparisons of start vs end, use
1217 * "start - 1" rather than "end + 1". If end
1218 * is OFFSET_MAX, end + 1 will become negative.
1219 */
1220 if (fl->fl_end < request->fl_start - 1)
1221 continue;
1222 /* If the next lock in the list has entirely bigger
1223 * addresses than the new one, insert the lock here.
1224 */
1225 if (fl->fl_start - 1 > request->fl_end)
1226 break;
1227
1228 /* If we come here, the new and old lock are of the
1229 * same type and adjacent or overlapping. Make one
1230 * lock yielding from the lower start address of both
1231 * locks to the higher end address.
1232 */
1233 if (fl->fl_start > request->fl_start)
1234 fl->fl_start = request->fl_start;
1235 else
1236 request->fl_start = fl->fl_start;
1237 if (fl->fl_end < request->fl_end)
1238 fl->fl_end = request->fl_end;
1239 else
1240 request->fl_end = fl->fl_end;
1241 if (added) {
1242 locks_delete_lock_ctx(fl, &dispose);
1243 continue;
1244 }
1245 request = fl;
1246 added = true;
1247 } else {
1248 /* Processing for different lock types is a bit
1249 * more complex.
1250 */
1251 if (fl->fl_end < request->fl_start)
1252 continue;
1253 if (fl->fl_start > request->fl_end)
1254 break;
1255 if (request->fl_type == F_UNLCK)
1256 added = true;
1257 if (fl->fl_start < request->fl_start)
1258 left = fl;
1259 /* If the next lock in the list has a higher end
1260 * address than the new one, insert the new one here.
1261 */
1262 if (fl->fl_end > request->fl_end) {
1263 right = fl;
1264 break;
1265 }
1266 if (fl->fl_start >= request->fl_start) {
1267 /* The new lock completely replaces an old
1268 * one (This may happen several times).
1269 */
1270 if (added) {
1271 locks_delete_lock_ctx(fl, &dispose);
1272 continue;
1273 }
1274 /*
1275 * Replace the old lock with new_fl, and
1276 * remove the old one. It's safe to do the
1277 * insert here since we know that we won't be
1278 * using new_fl later, and that the lock is
1279 * just replacing an existing lock.
1280 */
1281 error = -ENOLCK;
1282 if (!new_fl)
1283 goto out;
1284 locks_copy_lock(new_fl, request);
1285 locks_move_blocks(new_fl, request);
1286 request = new_fl;
1287 new_fl = NULL;
1288 locks_insert_lock_ctx(request, &fl->fl_list);
1289 locks_delete_lock_ctx(fl, &dispose);
1290 added = true;
1291 }
1292 }
1293 }
1294
1295 /*
1296 * The above code only modifies existing locks in case of merging or
1297 * replacing. If new lock(s) need to be inserted all modifications are
1298 * done below this, so it's safe yet to bail out.
1299 */
1300 error = -ENOLCK; /* "no luck" */
1301 if (right && left == right && !new_fl2)
1302 goto out;
1303
1304 error = 0;
1305 if (!added) {
1306 if (request->fl_type == F_UNLCK) {
1307 if (request->fl_flags & FL_EXISTS)
1308 error = -ENOENT;
1309 goto out;
1310 }
1311
1312 if (!new_fl) {
1313 error = -ENOLCK;
1314 goto out;
1315 }
1316 locks_copy_lock(new_fl, request);
1317 locks_move_blocks(new_fl, request);
1318 locks_insert_lock_ctx(new_fl, &fl->fl_list);
1319 fl = new_fl;
1320 new_fl = NULL;
1321 }
1322 if (right) {
1323 if (left == right) {
1324 /* The new lock breaks the old one in two pieces,
1325 * so we have to use the second new lock.
1326 */
1327 left = new_fl2;
1328 new_fl2 = NULL;
1329 locks_copy_lock(left, right);
1330 locks_insert_lock_ctx(left, &fl->fl_list);
1331 }
1332 right->fl_start = request->fl_end + 1;
1333 locks_wake_up_blocks(right);
1334 }
1335 if (left) {
1336 left->fl_end = request->fl_start - 1;
1337 locks_wake_up_blocks(left);
1338 }
1339 out:
1340 spin_unlock(&ctx->flc_lock);
1341 percpu_up_read(&file_rwsem);
1342 trace_posix_lock_inode(inode, request, error);
1343 /*
1344 * Free any unused locks.
1345 */
1346 if (new_fl)
1347 locks_free_lock(new_fl);
1348 if (new_fl2)
1349 locks_free_lock(new_fl2);
1350 locks_dispose_list(&dispose);
1351
1352 return error;
1353 }
1354
1355 /**
1356 * posix_lock_file - Apply a POSIX-style lock to a file
1357 * @filp: The file to apply the lock to
1358 * @fl: The lock to be applied
1359 * @conflock: Place to return a copy of the conflicting lock, if found.
1360 *
1361 * Add a POSIX style lock to a file.
1362 * We merge adjacent & overlapping locks whenever possible.
1363 * POSIX locks are sorted by owner task, then by starting address
1364 *
1365 * Note that if called with an FL_EXISTS argument, the caller may determine
1366 * whether or not a lock was successfully freed by testing the return
1367 * value for -ENOENT.
1368 */
posix_lock_file(struct file * filp,struct file_lock * fl,struct file_lock * conflock)1369 int posix_lock_file(struct file *filp, struct file_lock *fl,
1370 struct file_lock *conflock)
1371 {
1372 return posix_lock_inode(locks_inode(filp), fl, conflock);
1373 }
1374 EXPORT_SYMBOL(posix_lock_file);
1375
1376 /**
1377 * posix_lock_inode_wait - Apply a POSIX-style lock to a file
1378 * @inode: inode of file to which lock request should be applied
1379 * @fl: The lock to be applied
1380 *
1381 * Apply a POSIX style lock request to an inode.
1382 */
posix_lock_inode_wait(struct inode * inode,struct file_lock * fl)1383 static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1384 {
1385 int error;
1386 might_sleep ();
1387 for (;;) {
1388 error = posix_lock_inode(inode, fl, NULL);
1389 if (error != FILE_LOCK_DEFERRED)
1390 break;
1391 error = wait_event_interruptible(fl->fl_wait,
1392 list_empty(&fl->fl_blocked_member));
1393 if (error)
1394 break;
1395 }
1396 locks_delete_block(fl);
1397 return error;
1398 }
1399
lease_clear_pending(struct file_lock * fl,int arg)1400 static void lease_clear_pending(struct file_lock *fl, int arg)
1401 {
1402 switch (arg) {
1403 case F_UNLCK:
1404 fl->fl_flags &= ~FL_UNLOCK_PENDING;
1405 fallthrough;
1406 case F_RDLCK:
1407 fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
1408 }
1409 }
1410
1411 /* We already had a lease on this file; just change its type */
lease_modify(struct file_lock * fl,int arg,struct list_head * dispose)1412 int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
1413 {
1414 int error = assign_type(fl, arg);
1415
1416 if (error)
1417 return error;
1418 lease_clear_pending(fl, arg);
1419 locks_wake_up_blocks(fl);
1420 if (arg == F_UNLCK) {
1421 struct file *filp = fl->fl_file;
1422
1423 f_delown(filp);
1424 filp->f_owner.signum = 0;
1425 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
1426 if (fl->fl_fasync != NULL) {
1427 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
1428 fl->fl_fasync = NULL;
1429 }
1430 locks_delete_lock_ctx(fl, dispose);
1431 }
1432 return 0;
1433 }
1434 EXPORT_SYMBOL(lease_modify);
1435
past_time(unsigned long then)1436 static bool past_time(unsigned long then)
1437 {
1438 if (!then)
1439 /* 0 is a special value meaning "this never expires": */
1440 return false;
1441 return time_after(jiffies, then);
1442 }
1443
time_out_leases(struct inode * inode,struct list_head * dispose)1444 static void time_out_leases(struct inode *inode, struct list_head *dispose)
1445 {
1446 struct file_lock_context *ctx = inode->i_flctx;
1447 struct file_lock *fl, *tmp;
1448
1449 lockdep_assert_held(&ctx->flc_lock);
1450
1451 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1452 trace_time_out_leases(inode, fl);
1453 if (past_time(fl->fl_downgrade_time))
1454 lease_modify(fl, F_RDLCK, dispose);
1455 if (past_time(fl->fl_break_time))
1456 lease_modify(fl, F_UNLCK, dispose);
1457 }
1458 }
1459
leases_conflict(struct file_lock * lease,struct file_lock * breaker)1460 static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
1461 {
1462 bool rc;
1463
1464 if (lease->fl_lmops->lm_breaker_owns_lease
1465 && lease->fl_lmops->lm_breaker_owns_lease(lease))
1466 return false;
1467 if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT)) {
1468 rc = false;
1469 goto trace;
1470 }
1471 if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE)) {
1472 rc = false;
1473 goto trace;
1474 }
1475
1476 rc = locks_conflict(breaker, lease);
1477 trace:
1478 trace_leases_conflict(rc, lease, breaker);
1479 return rc;
1480 }
1481
1482 static bool
any_leases_conflict(struct inode * inode,struct file_lock * breaker)1483 any_leases_conflict(struct inode *inode, struct file_lock *breaker)
1484 {
1485 struct file_lock_context *ctx = inode->i_flctx;
1486 struct file_lock *fl;
1487
1488 lockdep_assert_held(&ctx->flc_lock);
1489
1490 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1491 if (leases_conflict(fl, breaker))
1492 return true;
1493 }
1494 return false;
1495 }
1496
1497 /**
1498 * __break_lease - revoke all outstanding leases on file
1499 * @inode: the inode of the file to return
1500 * @mode: O_RDONLY: break only write leases; O_WRONLY or O_RDWR:
1501 * break all leases
1502 * @type: FL_LEASE: break leases and delegations; FL_DELEG: break
1503 * only delegations
1504 *
1505 * break_lease (inlined for speed) has checked there already is at least
1506 * some kind of lock (maybe a lease) on this file. Leases are broken on
1507 * a call to open() or truncate(). This function can sleep unless you
1508 * specified %O_NONBLOCK to your open().
1509 */
__break_lease(struct inode * inode,unsigned int mode,unsigned int type)1510 int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
1511 {
1512 int error = 0;
1513 struct file_lock_context *ctx;
1514 struct file_lock *new_fl, *fl, *tmp;
1515 unsigned long break_time;
1516 int want_write = (mode & O_ACCMODE) != O_RDONLY;
1517 LIST_HEAD(dispose);
1518
1519 new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1520 if (IS_ERR(new_fl))
1521 return PTR_ERR(new_fl);
1522 new_fl->fl_flags = type;
1523
1524 /* typically we will check that ctx is non-NULL before calling */
1525 ctx = smp_load_acquire(&inode->i_flctx);
1526 if (!ctx) {
1527 WARN_ON_ONCE(1);
1528 goto free_lock;
1529 }
1530
1531 percpu_down_read(&file_rwsem);
1532 spin_lock(&ctx->flc_lock);
1533
1534 time_out_leases(inode, &dispose);
1535
1536 if (!any_leases_conflict(inode, new_fl))
1537 goto out;
1538
1539 break_time = 0;
1540 if (lease_break_time > 0) {
1541 break_time = jiffies + lease_break_time * HZ;
1542 if (break_time == 0)
1543 break_time++; /* so that 0 means no break time */
1544 }
1545
1546 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1547 if (!leases_conflict(fl, new_fl))
1548 continue;
1549 if (want_write) {
1550 if (fl->fl_flags & FL_UNLOCK_PENDING)
1551 continue;
1552 fl->fl_flags |= FL_UNLOCK_PENDING;
1553 fl->fl_break_time = break_time;
1554 } else {
1555 if (lease_breaking(fl))
1556 continue;
1557 fl->fl_flags |= FL_DOWNGRADE_PENDING;
1558 fl->fl_downgrade_time = break_time;
1559 }
1560 if (fl->fl_lmops->lm_break(fl))
1561 locks_delete_lock_ctx(fl, &dispose);
1562 }
1563
1564 if (list_empty(&ctx->flc_lease))
1565 goto out;
1566
1567 if (mode & O_NONBLOCK) {
1568 trace_break_lease_noblock(inode, new_fl);
1569 error = -EWOULDBLOCK;
1570 goto out;
1571 }
1572
1573 restart:
1574 fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list);
1575 break_time = fl->fl_break_time;
1576 if (break_time != 0)
1577 break_time -= jiffies;
1578 if (break_time == 0)
1579 break_time++;
1580 locks_insert_block(fl, new_fl, leases_conflict);
1581 trace_break_lease_block(inode, new_fl);
1582 spin_unlock(&ctx->flc_lock);
1583 percpu_up_read(&file_rwsem);
1584
1585 locks_dispose_list(&dispose);
1586 error = wait_event_interruptible_timeout(new_fl->fl_wait,
1587 list_empty(&new_fl->fl_blocked_member),
1588 break_time);
1589
1590 percpu_down_read(&file_rwsem);
1591 spin_lock(&ctx->flc_lock);
1592 trace_break_lease_unblock(inode, new_fl);
1593 locks_delete_block(new_fl);
1594 if (error >= 0) {
1595 /*
1596 * Wait for the next conflicting lease that has not been
1597 * broken yet
1598 */
1599 if (error == 0)
1600 time_out_leases(inode, &dispose);
1601 if (any_leases_conflict(inode, new_fl))
1602 goto restart;
1603 error = 0;
1604 }
1605 out:
1606 spin_unlock(&ctx->flc_lock);
1607 percpu_up_read(&file_rwsem);
1608 locks_dispose_list(&dispose);
1609 free_lock:
1610 locks_free_lock(new_fl);
1611 return error;
1612 }
1613 EXPORT_SYMBOL_NS(__break_lease, ANDROID_GKI_VFS_EXPORT_ONLY);
1614
1615 /**
1616 * lease_get_mtime - update modified time of an inode with exclusive lease
1617 * @inode: the inode
1618 * @time: pointer to a timespec which contains the last modified time
1619 *
1620 * This is to force NFS clients to flush their caches for files with
1621 * exclusive leases. The justification is that if someone has an
1622 * exclusive lease, then they could be modifying it.
1623 */
lease_get_mtime(struct inode * inode,struct timespec64 * time)1624 void lease_get_mtime(struct inode *inode, struct timespec64 *time)
1625 {
1626 bool has_lease = false;
1627 struct file_lock_context *ctx;
1628 struct file_lock *fl;
1629
1630 ctx = smp_load_acquire(&inode->i_flctx);
1631 if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1632 spin_lock(&ctx->flc_lock);
1633 fl = list_first_entry_or_null(&ctx->flc_lease,
1634 struct file_lock, fl_list);
1635 if (fl && (fl->fl_type == F_WRLCK))
1636 has_lease = true;
1637 spin_unlock(&ctx->flc_lock);
1638 }
1639
1640 if (has_lease)
1641 *time = current_time(inode);
1642 }
1643 EXPORT_SYMBOL(lease_get_mtime);
1644
1645 /**
1646 * fcntl_getlease - Enquire what lease is currently active
1647 * @filp: the file
1648 *
1649 * The value returned by this function will be one of
1650 * (if no lease break is pending):
1651 *
1652 * %F_RDLCK to indicate a shared lease is held.
1653 *
1654 * %F_WRLCK to indicate an exclusive lease is held.
1655 *
1656 * %F_UNLCK to indicate no lease is held.
1657 *
1658 * (if a lease break is pending):
1659 *
1660 * %F_RDLCK to indicate an exclusive lease needs to be
1661 * changed to a shared lease (or removed).
1662 *
1663 * %F_UNLCK to indicate the lease needs to be removed.
1664 *
1665 * XXX: sfr & willy disagree over whether F_INPROGRESS
1666 * should be returned to userspace.
1667 */
fcntl_getlease(struct file * filp)1668 int fcntl_getlease(struct file *filp)
1669 {
1670 struct file_lock *fl;
1671 struct inode *inode = locks_inode(filp);
1672 struct file_lock_context *ctx;
1673 int type = F_UNLCK;
1674 LIST_HEAD(dispose);
1675
1676 ctx = smp_load_acquire(&inode->i_flctx);
1677 if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1678 percpu_down_read(&file_rwsem);
1679 spin_lock(&ctx->flc_lock);
1680 time_out_leases(inode, &dispose);
1681 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1682 if (fl->fl_file != filp)
1683 continue;
1684 type = target_leasetype(fl);
1685 break;
1686 }
1687 spin_unlock(&ctx->flc_lock);
1688 percpu_up_read(&file_rwsem);
1689
1690 locks_dispose_list(&dispose);
1691 }
1692 return type;
1693 }
1694
1695 /**
1696 * check_conflicting_open - see if the given file points to an inode that has
1697 * an existing open that would conflict with the
1698 * desired lease.
1699 * @filp: file to check
1700 * @arg: type of lease that we're trying to acquire
1701 * @flags: current lock flags
1702 *
1703 * Check to see if there's an existing open fd on this file that would
1704 * conflict with the lease we're trying to set.
1705 */
1706 static int
check_conflicting_open(struct file * filp,const long arg,int flags)1707 check_conflicting_open(struct file *filp, const long arg, int flags)
1708 {
1709 struct inode *inode = locks_inode(filp);
1710 int self_wcount = 0, self_rcount = 0;
1711
1712 if (flags & FL_LAYOUT)
1713 return 0;
1714 if (flags & FL_DELEG)
1715 /* We leave these checks to the caller */
1716 return 0;
1717
1718 if (arg == F_RDLCK)
1719 return inode_is_open_for_write(inode) ? -EAGAIN : 0;
1720 else if (arg != F_WRLCK)
1721 return 0;
1722
1723 /*
1724 * Make sure that only read/write count is from lease requestor.
1725 * Note that this will result in denying write leases when i_writecount
1726 * is negative, which is what we want. (We shouldn't grant write leases
1727 * on files open for execution.)
1728 */
1729 if (filp->f_mode & FMODE_WRITE)
1730 self_wcount = 1;
1731 else if (filp->f_mode & FMODE_READ)
1732 self_rcount = 1;
1733
1734 if (atomic_read(&inode->i_writecount) != self_wcount ||
1735 atomic_read(&inode->i_readcount) != self_rcount)
1736 return -EAGAIN;
1737
1738 return 0;
1739 }
1740
1741 static int
generic_add_lease(struct file * filp,long arg,struct file_lock ** flp,void ** priv)1742 generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv)
1743 {
1744 struct file_lock *fl, *my_fl = NULL, *lease;
1745 struct inode *inode = locks_inode(filp);
1746 struct file_lock_context *ctx;
1747 bool is_deleg = (*flp)->fl_flags & FL_DELEG;
1748 int error;
1749 LIST_HEAD(dispose);
1750
1751 lease = *flp;
1752 trace_generic_add_lease(inode, lease);
1753
1754 /* Note that arg is never F_UNLCK here */
1755 ctx = locks_get_lock_context(inode, arg);
1756 if (!ctx)
1757 return -ENOMEM;
1758
1759 /*
1760 * In the delegation case we need mutual exclusion with
1761 * a number of operations that take the i_mutex. We trylock
1762 * because delegations are an optional optimization, and if
1763 * there's some chance of a conflict--we'd rather not
1764 * bother, maybe that's a sign this just isn't a good file to
1765 * hand out a delegation on.
1766 */
1767 if (is_deleg && !inode_trylock(inode))
1768 return -EAGAIN;
1769
1770 if (is_deleg && arg == F_WRLCK) {
1771 /* Write delegations are not currently supported: */
1772 inode_unlock(inode);
1773 WARN_ON_ONCE(1);
1774 return -EINVAL;
1775 }
1776
1777 percpu_down_read(&file_rwsem);
1778 spin_lock(&ctx->flc_lock);
1779 time_out_leases(inode, &dispose);
1780 error = check_conflicting_open(filp, arg, lease->fl_flags);
1781 if (error)
1782 goto out;
1783
1784 /*
1785 * At this point, we know that if there is an exclusive
1786 * lease on this file, then we hold it on this filp
1787 * (otherwise our open of this file would have blocked).
1788 * And if we are trying to acquire an exclusive lease,
1789 * then the file is not open by anyone (including us)
1790 * except for this filp.
1791 */
1792 error = -EAGAIN;
1793 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1794 if (fl->fl_file == filp &&
1795 fl->fl_owner == lease->fl_owner) {
1796 my_fl = fl;
1797 continue;
1798 }
1799
1800 /*
1801 * No exclusive leases if someone else has a lease on
1802 * this file:
1803 */
1804 if (arg == F_WRLCK)
1805 goto out;
1806 /*
1807 * Modifying our existing lease is OK, but no getting a
1808 * new lease if someone else is opening for write:
1809 */
1810 if (fl->fl_flags & FL_UNLOCK_PENDING)
1811 goto out;
1812 }
1813
1814 if (my_fl != NULL) {
1815 lease = my_fl;
1816 error = lease->fl_lmops->lm_change(lease, arg, &dispose);
1817 if (error)
1818 goto out;
1819 goto out_setup;
1820 }
1821
1822 error = -EINVAL;
1823 if (!leases_enable)
1824 goto out;
1825
1826 locks_insert_lock_ctx(lease, &ctx->flc_lease);
1827 /*
1828 * The check in break_lease() is lockless. It's possible for another
1829 * open to race in after we did the earlier check for a conflicting
1830 * open but before the lease was inserted. Check again for a
1831 * conflicting open and cancel the lease if there is one.
1832 *
1833 * We also add a barrier here to ensure that the insertion of the lock
1834 * precedes these checks.
1835 */
1836 smp_mb();
1837 error = check_conflicting_open(filp, arg, lease->fl_flags);
1838 if (error) {
1839 locks_unlink_lock_ctx(lease);
1840 goto out;
1841 }
1842
1843 out_setup:
1844 if (lease->fl_lmops->lm_setup)
1845 lease->fl_lmops->lm_setup(lease, priv);
1846 out:
1847 spin_unlock(&ctx->flc_lock);
1848 percpu_up_read(&file_rwsem);
1849 locks_dispose_list(&dispose);
1850 if (is_deleg)
1851 inode_unlock(inode);
1852 if (!error && !my_fl)
1853 *flp = NULL;
1854 return error;
1855 }
1856
generic_delete_lease(struct file * filp,void * owner)1857 static int generic_delete_lease(struct file *filp, void *owner)
1858 {
1859 int error = -EAGAIN;
1860 struct file_lock *fl, *victim = NULL;
1861 struct inode *inode = locks_inode(filp);
1862 struct file_lock_context *ctx;
1863 LIST_HEAD(dispose);
1864
1865 ctx = smp_load_acquire(&inode->i_flctx);
1866 if (!ctx) {
1867 trace_generic_delete_lease(inode, NULL);
1868 return error;
1869 }
1870
1871 percpu_down_read(&file_rwsem);
1872 spin_lock(&ctx->flc_lock);
1873 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1874 if (fl->fl_file == filp &&
1875 fl->fl_owner == owner) {
1876 victim = fl;
1877 break;
1878 }
1879 }
1880 trace_generic_delete_lease(inode, victim);
1881 if (victim)
1882 error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
1883 spin_unlock(&ctx->flc_lock);
1884 percpu_up_read(&file_rwsem);
1885 locks_dispose_list(&dispose);
1886 return error;
1887 }
1888
1889 /**
1890 * generic_setlease - sets a lease on an open file
1891 * @filp: file pointer
1892 * @arg: type of lease to obtain
1893 * @flp: input - file_lock to use, output - file_lock inserted
1894 * @priv: private data for lm_setup (may be NULL if lm_setup
1895 * doesn't require it)
1896 *
1897 * The (input) flp->fl_lmops->lm_break function is required
1898 * by break_lease().
1899 */
generic_setlease(struct file * filp,long arg,struct file_lock ** flp,void ** priv)1900 int generic_setlease(struct file *filp, long arg, struct file_lock **flp,
1901 void **priv)
1902 {
1903 struct inode *inode = locks_inode(filp);
1904 kuid_t uid = i_uid_into_mnt(file_mnt_user_ns(filp), inode);
1905 int error;
1906
1907 if ((!uid_eq(current_fsuid(), uid)) && !capable(CAP_LEASE))
1908 return -EACCES;
1909 if (!S_ISREG(inode->i_mode))
1910 return -EINVAL;
1911 error = security_file_lock(filp, arg);
1912 if (error)
1913 return error;
1914
1915 switch (arg) {
1916 case F_UNLCK:
1917 return generic_delete_lease(filp, *priv);
1918 case F_RDLCK:
1919 case F_WRLCK:
1920 if (!(*flp)->fl_lmops->lm_break) {
1921 WARN_ON_ONCE(1);
1922 return -ENOLCK;
1923 }
1924
1925 return generic_add_lease(filp, arg, flp, priv);
1926 default:
1927 return -EINVAL;
1928 }
1929 }
1930 EXPORT_SYMBOL(generic_setlease);
1931
1932 #if IS_ENABLED(CONFIG_SRCU)
1933 /*
1934 * Kernel subsystems can register to be notified on any attempt to set
1935 * a new lease with the lease_notifier_chain. This is used by (e.g.) nfsd
1936 * to close files that it may have cached when there is an attempt to set a
1937 * conflicting lease.
1938 */
1939 static struct srcu_notifier_head lease_notifier_chain;
1940
1941 static inline void
lease_notifier_chain_init(void)1942 lease_notifier_chain_init(void)
1943 {
1944 srcu_init_notifier_head(&lease_notifier_chain);
1945 }
1946
1947 static inline void
setlease_notifier(long arg,struct file_lock * lease)1948 setlease_notifier(long arg, struct file_lock *lease)
1949 {
1950 if (arg != F_UNLCK)
1951 srcu_notifier_call_chain(&lease_notifier_chain, arg, lease);
1952 }
1953
lease_register_notifier(struct notifier_block * nb)1954 int lease_register_notifier(struct notifier_block *nb)
1955 {
1956 return srcu_notifier_chain_register(&lease_notifier_chain, nb);
1957 }
1958 EXPORT_SYMBOL_GPL(lease_register_notifier);
1959
lease_unregister_notifier(struct notifier_block * nb)1960 void lease_unregister_notifier(struct notifier_block *nb)
1961 {
1962 srcu_notifier_chain_unregister(&lease_notifier_chain, nb);
1963 }
1964 EXPORT_SYMBOL_GPL(lease_unregister_notifier);
1965
1966 #else /* !IS_ENABLED(CONFIG_SRCU) */
1967 static inline void
lease_notifier_chain_init(void)1968 lease_notifier_chain_init(void)
1969 {
1970 }
1971
1972 static inline void
setlease_notifier(long arg,struct file_lock * lease)1973 setlease_notifier(long arg, struct file_lock *lease)
1974 {
1975 }
1976
lease_register_notifier(struct notifier_block * nb)1977 int lease_register_notifier(struct notifier_block *nb)
1978 {
1979 return 0;
1980 }
1981 EXPORT_SYMBOL_GPL(lease_register_notifier);
1982
lease_unregister_notifier(struct notifier_block * nb)1983 void lease_unregister_notifier(struct notifier_block *nb)
1984 {
1985 }
1986 EXPORT_SYMBOL_GPL(lease_unregister_notifier);
1987
1988 #endif /* IS_ENABLED(CONFIG_SRCU) */
1989
1990 /**
1991 * vfs_setlease - sets a lease on an open file
1992 * @filp: file pointer
1993 * @arg: type of lease to obtain
1994 * @lease: file_lock to use when adding a lease
1995 * @priv: private info for lm_setup when adding a lease (may be
1996 * NULL if lm_setup doesn't require it)
1997 *
1998 * Call this to establish a lease on the file. The "lease" argument is not
1999 * used for F_UNLCK requests and may be NULL. For commands that set or alter
2000 * an existing lease, the ``(*lease)->fl_lmops->lm_break`` operation must be
2001 * set; if not, this function will return -ENOLCK (and generate a scary-looking
2002 * stack trace).
2003 *
2004 * The "priv" pointer is passed directly to the lm_setup function as-is. It
2005 * may be NULL if the lm_setup operation doesn't require it.
2006 */
2007 int
vfs_setlease(struct file * filp,long arg,struct file_lock ** lease,void ** priv)2008 vfs_setlease(struct file *filp, long arg, struct file_lock **lease, void **priv)
2009 {
2010 if (lease)
2011 setlease_notifier(arg, *lease);
2012 if (filp->f_op->setlease)
2013 return filp->f_op->setlease(filp, arg, lease, priv);
2014 else
2015 return generic_setlease(filp, arg, lease, priv);
2016 }
2017 EXPORT_SYMBOL_GPL(vfs_setlease);
2018
do_fcntl_add_lease(unsigned int fd,struct file * filp,long arg)2019 static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
2020 {
2021 struct file_lock *fl;
2022 struct fasync_struct *new;
2023 int error;
2024
2025 fl = lease_alloc(filp, arg);
2026 if (IS_ERR(fl))
2027 return PTR_ERR(fl);
2028
2029 new = fasync_alloc();
2030 if (!new) {
2031 locks_free_lock(fl);
2032 return -ENOMEM;
2033 }
2034 new->fa_fd = fd;
2035
2036 error = vfs_setlease(filp, arg, &fl, (void **)&new);
2037 if (fl)
2038 locks_free_lock(fl);
2039 if (new)
2040 fasync_free(new);
2041 return error;
2042 }
2043
2044 /**
2045 * fcntl_setlease - sets a lease on an open file
2046 * @fd: open file descriptor
2047 * @filp: file pointer
2048 * @arg: type of lease to obtain
2049 *
2050 * Call this fcntl to establish a lease on the file.
2051 * Note that you also need to call %F_SETSIG to
2052 * receive a signal when the lease is broken.
2053 */
fcntl_setlease(unsigned int fd,struct file * filp,long arg)2054 int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
2055 {
2056 if (arg == F_UNLCK)
2057 return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp);
2058 return do_fcntl_add_lease(fd, filp, arg);
2059 }
2060
2061 /**
2062 * flock_lock_inode_wait - Apply a FLOCK-style lock to a file
2063 * @inode: inode of the file to apply to
2064 * @fl: The lock to be applied
2065 *
2066 * Apply a FLOCK style lock request to an inode.
2067 */
flock_lock_inode_wait(struct inode * inode,struct file_lock * fl)2068 static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
2069 {
2070 int error;
2071 might_sleep();
2072 for (;;) {
2073 error = flock_lock_inode(inode, fl);
2074 if (error != FILE_LOCK_DEFERRED)
2075 break;
2076 error = wait_event_interruptible(fl->fl_wait,
2077 list_empty(&fl->fl_blocked_member));
2078 if (error)
2079 break;
2080 }
2081 locks_delete_block(fl);
2082 return error;
2083 }
2084
2085 /**
2086 * locks_lock_inode_wait - Apply a lock to an inode
2087 * @inode: inode of the file to apply to
2088 * @fl: The lock to be applied
2089 *
2090 * Apply a POSIX or FLOCK style lock request to an inode.
2091 */
locks_lock_inode_wait(struct inode * inode,struct file_lock * fl)2092 int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
2093 {
2094 int res = 0;
2095 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
2096 case FL_POSIX:
2097 res = posix_lock_inode_wait(inode, fl);
2098 break;
2099 case FL_FLOCK:
2100 res = flock_lock_inode_wait(inode, fl);
2101 break;
2102 default:
2103 BUG();
2104 }
2105 return res;
2106 }
2107 EXPORT_SYMBOL(locks_lock_inode_wait);
2108
2109 /**
2110 * sys_flock: - flock() system call.
2111 * @fd: the file descriptor to lock.
2112 * @cmd: the type of lock to apply.
2113 *
2114 * Apply a %FL_FLOCK style lock to an open file descriptor.
2115 * The @cmd can be one of:
2116 *
2117 * - %LOCK_SH -- a shared lock.
2118 * - %LOCK_EX -- an exclusive lock.
2119 * - %LOCK_UN -- remove an existing lock.
2120 * - %LOCK_MAND -- a 'mandatory' flock.
2121 * This exists to emulate Windows Share Modes.
2122 *
2123 * %LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other
2124 * processes read and write access respectively.
2125 */
SYSCALL_DEFINE2(flock,unsigned int,fd,unsigned int,cmd)2126 SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
2127 {
2128 struct fd f = fdget(fd);
2129 struct file_lock *lock;
2130 int can_sleep, unlock;
2131 int error;
2132
2133 error = -EBADF;
2134 if (!f.file)
2135 goto out;
2136
2137 can_sleep = !(cmd & LOCK_NB);
2138 cmd &= ~LOCK_NB;
2139 unlock = (cmd == LOCK_UN);
2140
2141 if (!unlock && !(cmd & LOCK_MAND) &&
2142 !(f.file->f_mode & (FMODE_READ|FMODE_WRITE)))
2143 goto out_putf;
2144
2145 lock = flock_make_lock(f.file, cmd, NULL);
2146 if (IS_ERR(lock)) {
2147 error = PTR_ERR(lock);
2148 goto out_putf;
2149 }
2150
2151 if (can_sleep)
2152 lock->fl_flags |= FL_SLEEP;
2153
2154 error = security_file_lock(f.file, lock->fl_type);
2155 if (error)
2156 goto out_free;
2157
2158 if (f.file->f_op->flock)
2159 error = f.file->f_op->flock(f.file,
2160 (can_sleep) ? F_SETLKW : F_SETLK,
2161 lock);
2162 else
2163 error = locks_lock_file_wait(f.file, lock);
2164
2165 out_free:
2166 locks_free_lock(lock);
2167
2168 out_putf:
2169 fdput(f);
2170 out:
2171 return error;
2172 }
2173
2174 /**
2175 * vfs_test_lock - test file byte range lock
2176 * @filp: The file to test lock for
2177 * @fl: The lock to test; also used to hold result
2178 *
2179 * Returns -ERRNO on failure. Indicates presence of conflicting lock by
2180 * setting conf->fl_type to something other than F_UNLCK.
2181 */
vfs_test_lock(struct file * filp,struct file_lock * fl)2182 int vfs_test_lock(struct file *filp, struct file_lock *fl)
2183 {
2184 if (filp->f_op->lock)
2185 return filp->f_op->lock(filp, F_GETLK, fl);
2186 posix_test_lock(filp, fl);
2187 return 0;
2188 }
2189 EXPORT_SYMBOL_GPL(vfs_test_lock);
2190
2191 /**
2192 * locks_translate_pid - translate a file_lock's fl_pid number into a namespace
2193 * @fl: The file_lock who's fl_pid should be translated
2194 * @ns: The namespace into which the pid should be translated
2195 *
2196 * Used to tranlate a fl_pid into a namespace virtual pid number
2197 */
locks_translate_pid(struct file_lock * fl,struct pid_namespace * ns)2198 static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns)
2199 {
2200 pid_t vnr;
2201 struct pid *pid;
2202
2203 if (IS_OFDLCK(fl))
2204 return -1;
2205 if (IS_REMOTELCK(fl))
2206 return fl->fl_pid;
2207 /*
2208 * If the flock owner process is dead and its pid has been already
2209 * freed, the translation below won't work, but we still want to show
2210 * flock owner pid number in init pidns.
2211 */
2212 if (ns == &init_pid_ns)
2213 return (pid_t)fl->fl_pid;
2214
2215 rcu_read_lock();
2216 pid = find_pid_ns(fl->fl_pid, &init_pid_ns);
2217 vnr = pid_nr_ns(pid, ns);
2218 rcu_read_unlock();
2219 return vnr;
2220 }
2221
posix_lock_to_flock(struct flock * flock,struct file_lock * fl)2222 static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
2223 {
2224 flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2225 #if BITS_PER_LONG == 32
2226 /*
2227 * Make sure we can represent the posix lock via
2228 * legacy 32bit flock.
2229 */
2230 if (fl->fl_start > OFFT_OFFSET_MAX)
2231 return -EOVERFLOW;
2232 if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
2233 return -EOVERFLOW;
2234 #endif
2235 flock->l_start = fl->fl_start;
2236 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2237 fl->fl_end - fl->fl_start + 1;
2238 flock->l_whence = 0;
2239 flock->l_type = fl->fl_type;
2240 return 0;
2241 }
2242
2243 #if BITS_PER_LONG == 32
posix_lock_to_flock64(struct flock64 * flock,struct file_lock * fl)2244 static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
2245 {
2246 flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2247 flock->l_start = fl->fl_start;
2248 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2249 fl->fl_end - fl->fl_start + 1;
2250 flock->l_whence = 0;
2251 flock->l_type = fl->fl_type;
2252 }
2253 #endif
2254
2255 /* Report the first existing lock that would conflict with l.
2256 * This implements the F_GETLK command of fcntl().
2257 */
fcntl_getlk(struct file * filp,unsigned int cmd,struct flock * flock)2258 int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock *flock)
2259 {
2260 struct file_lock *fl;
2261 int error;
2262
2263 fl = locks_alloc_lock();
2264 if (fl == NULL)
2265 return -ENOMEM;
2266 error = -EINVAL;
2267 if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
2268 goto out;
2269
2270 error = flock_to_posix_lock(filp, fl, flock);
2271 if (error)
2272 goto out;
2273
2274 if (cmd == F_OFD_GETLK) {
2275 error = -EINVAL;
2276 if (flock->l_pid != 0)
2277 goto out;
2278
2279 fl->fl_flags |= FL_OFDLCK;
2280 fl->fl_owner = filp;
2281 }
2282
2283 error = vfs_test_lock(filp, fl);
2284 if (error)
2285 goto out;
2286
2287 flock->l_type = fl->fl_type;
2288 if (fl->fl_type != F_UNLCK) {
2289 error = posix_lock_to_flock(flock, fl);
2290 if (error)
2291 goto out;
2292 }
2293 out:
2294 locks_free_lock(fl);
2295 return error;
2296 }
2297
2298 /**
2299 * vfs_lock_file - file byte range lock
2300 * @filp: The file to apply the lock to
2301 * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
2302 * @fl: The lock to be applied
2303 * @conf: Place to return a copy of the conflicting lock, if found.
2304 *
2305 * A caller that doesn't care about the conflicting lock may pass NULL
2306 * as the final argument.
2307 *
2308 * If the filesystem defines a private ->lock() method, then @conf will
2309 * be left unchanged; so a caller that cares should initialize it to
2310 * some acceptable default.
2311 *
2312 * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
2313 * locks, the ->lock() interface may return asynchronously, before the lock has
2314 * been granted or denied by the underlying filesystem, if (and only if)
2315 * lm_grant is set. Callers expecting ->lock() to return asynchronously
2316 * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
2317 * the request is for a blocking lock. When ->lock() does return asynchronously,
2318 * it must return FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock
2319 * request completes.
2320 * If the request is for non-blocking lock the file system should return
2321 * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
2322 * with the result. If the request timed out the callback routine will return a
2323 * nonzero return code and the file system should release the lock. The file
2324 * system is also responsible to keep a corresponding posix lock when it
2325 * grants a lock so the VFS can find out which locks are locally held and do
2326 * the correct lock cleanup when required.
2327 * The underlying filesystem must not drop the kernel lock or call
2328 * ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED
2329 * return code.
2330 */
vfs_lock_file(struct file * filp,unsigned int cmd,struct file_lock * fl,struct file_lock * conf)2331 int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
2332 {
2333 if (filp->f_op->lock)
2334 return filp->f_op->lock(filp, cmd, fl);
2335 else
2336 return posix_lock_file(filp, fl, conf);
2337 }
2338 EXPORT_SYMBOL_GPL(vfs_lock_file);
2339
do_lock_file_wait(struct file * filp,unsigned int cmd,struct file_lock * fl)2340 static int do_lock_file_wait(struct file *filp, unsigned int cmd,
2341 struct file_lock *fl)
2342 {
2343 int error;
2344
2345 error = security_file_lock(filp, fl->fl_type);
2346 if (error)
2347 return error;
2348
2349 for (;;) {
2350 error = vfs_lock_file(filp, cmd, fl, NULL);
2351 if (error != FILE_LOCK_DEFERRED)
2352 break;
2353 error = wait_event_interruptible(fl->fl_wait,
2354 list_empty(&fl->fl_blocked_member));
2355 if (error)
2356 break;
2357 }
2358 locks_delete_block(fl);
2359
2360 return error;
2361 }
2362
2363 /* Ensure that fl->fl_file has compatible f_mode for F_SETLK calls */
2364 static int
check_fmode_for_setlk(struct file_lock * fl)2365 check_fmode_for_setlk(struct file_lock *fl)
2366 {
2367 switch (fl->fl_type) {
2368 case F_RDLCK:
2369 if (!(fl->fl_file->f_mode & FMODE_READ))
2370 return -EBADF;
2371 break;
2372 case F_WRLCK:
2373 if (!(fl->fl_file->f_mode & FMODE_WRITE))
2374 return -EBADF;
2375 }
2376 return 0;
2377 }
2378
2379 /* Apply the lock described by l to an open file descriptor.
2380 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2381 */
fcntl_setlk(unsigned int fd,struct file * filp,unsigned int cmd,struct flock * flock)2382 int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
2383 struct flock *flock)
2384 {
2385 struct file_lock *file_lock = locks_alloc_lock();
2386 struct inode *inode = locks_inode(filp);
2387 struct file *f;
2388 int error;
2389
2390 if (file_lock == NULL)
2391 return -ENOLCK;
2392
2393 error = flock_to_posix_lock(filp, file_lock, flock);
2394 if (error)
2395 goto out;
2396
2397 error = check_fmode_for_setlk(file_lock);
2398 if (error)
2399 goto out;
2400
2401 /*
2402 * If the cmd is requesting file-private locks, then set the
2403 * FL_OFDLCK flag and override the owner.
2404 */
2405 switch (cmd) {
2406 case F_OFD_SETLK:
2407 error = -EINVAL;
2408 if (flock->l_pid != 0)
2409 goto out;
2410
2411 cmd = F_SETLK;
2412 file_lock->fl_flags |= FL_OFDLCK;
2413 file_lock->fl_owner = filp;
2414 break;
2415 case F_OFD_SETLKW:
2416 error = -EINVAL;
2417 if (flock->l_pid != 0)
2418 goto out;
2419
2420 cmd = F_SETLKW;
2421 file_lock->fl_flags |= FL_OFDLCK;
2422 file_lock->fl_owner = filp;
2423 fallthrough;
2424 case F_SETLKW:
2425 file_lock->fl_flags |= FL_SLEEP;
2426 }
2427
2428 error = do_lock_file_wait(filp, cmd, file_lock);
2429
2430 /*
2431 * Attempt to detect a close/fcntl race and recover by releasing the
2432 * lock that was just acquired. There is no need to do that when we're
2433 * unlocking though, or for OFD locks.
2434 */
2435 if (!error && file_lock->fl_type != F_UNLCK &&
2436 !(file_lock->fl_flags & FL_OFDLCK)) {
2437 struct files_struct *files = current->files;
2438 /*
2439 * We need that spin_lock here - it prevents reordering between
2440 * update of i_flctx->flc_posix and check for it done in
2441 * close(). rcu_read_lock() wouldn't do.
2442 */
2443 spin_lock(&files->file_lock);
2444 f = files_lookup_fd_locked(files, fd);
2445 spin_unlock(&files->file_lock);
2446 if (f != filp) {
2447 file_lock->fl_type = F_UNLCK;
2448 error = do_lock_file_wait(filp, cmd, file_lock);
2449 WARN_ON_ONCE(error);
2450 error = -EBADF;
2451 }
2452 }
2453 out:
2454 trace_fcntl_setlk(inode, file_lock, error);
2455 locks_free_lock(file_lock);
2456 return error;
2457 }
2458
2459 #if BITS_PER_LONG == 32
2460 /* Report the first existing lock that would conflict with l.
2461 * This implements the F_GETLK command of fcntl().
2462 */
fcntl_getlk64(struct file * filp,unsigned int cmd,struct flock64 * flock)2463 int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 *flock)
2464 {
2465 struct file_lock *fl;
2466 int error;
2467
2468 fl = locks_alloc_lock();
2469 if (fl == NULL)
2470 return -ENOMEM;
2471
2472 error = -EINVAL;
2473 if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
2474 goto out;
2475
2476 error = flock64_to_posix_lock(filp, fl, flock);
2477 if (error)
2478 goto out;
2479
2480 if (cmd == F_OFD_GETLK) {
2481 error = -EINVAL;
2482 if (flock->l_pid != 0)
2483 goto out;
2484
2485 cmd = F_GETLK64;
2486 fl->fl_flags |= FL_OFDLCK;
2487 fl->fl_owner = filp;
2488 }
2489
2490 error = vfs_test_lock(filp, fl);
2491 if (error)
2492 goto out;
2493
2494 flock->l_type = fl->fl_type;
2495 if (fl->fl_type != F_UNLCK)
2496 posix_lock_to_flock64(flock, fl);
2497
2498 out:
2499 locks_free_lock(fl);
2500 return error;
2501 }
2502
2503 /* Apply the lock described by l to an open file descriptor.
2504 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2505 */
fcntl_setlk64(unsigned int fd,struct file * filp,unsigned int cmd,struct flock64 * flock)2506 int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
2507 struct flock64 *flock)
2508 {
2509 struct file_lock *file_lock = locks_alloc_lock();
2510 struct file *f;
2511 int error;
2512
2513 if (file_lock == NULL)
2514 return -ENOLCK;
2515
2516 error = flock64_to_posix_lock(filp, file_lock, flock);
2517 if (error)
2518 goto out;
2519
2520 error = check_fmode_for_setlk(file_lock);
2521 if (error)
2522 goto out;
2523
2524 /*
2525 * If the cmd is requesting file-private locks, then set the
2526 * FL_OFDLCK flag and override the owner.
2527 */
2528 switch (cmd) {
2529 case F_OFD_SETLK:
2530 error = -EINVAL;
2531 if (flock->l_pid != 0)
2532 goto out;
2533
2534 cmd = F_SETLK64;
2535 file_lock->fl_flags |= FL_OFDLCK;
2536 file_lock->fl_owner = filp;
2537 break;
2538 case F_OFD_SETLKW:
2539 error = -EINVAL;
2540 if (flock->l_pid != 0)
2541 goto out;
2542
2543 cmd = F_SETLKW64;
2544 file_lock->fl_flags |= FL_OFDLCK;
2545 file_lock->fl_owner = filp;
2546 fallthrough;
2547 case F_SETLKW64:
2548 file_lock->fl_flags |= FL_SLEEP;
2549 }
2550
2551 error = do_lock_file_wait(filp, cmd, file_lock);
2552
2553 /*
2554 * Attempt to detect a close/fcntl race and recover by releasing the
2555 * lock that was just acquired. There is no need to do that when we're
2556 * unlocking though, or for OFD locks.
2557 */
2558 if (!error && file_lock->fl_type != F_UNLCK &&
2559 !(file_lock->fl_flags & FL_OFDLCK)) {
2560 struct files_struct *files = current->files;
2561 /*
2562 * We need that spin_lock here - it prevents reordering between
2563 * update of i_flctx->flc_posix and check for it done in
2564 * close(). rcu_read_lock() wouldn't do.
2565 */
2566 spin_lock(&files->file_lock);
2567 f = files_lookup_fd_locked(files, fd);
2568 spin_unlock(&files->file_lock);
2569 if (f != filp) {
2570 file_lock->fl_type = F_UNLCK;
2571 error = do_lock_file_wait(filp, cmd, file_lock);
2572 WARN_ON_ONCE(error);
2573 error = -EBADF;
2574 }
2575 }
2576 out:
2577 locks_free_lock(file_lock);
2578 return error;
2579 }
2580 #endif /* BITS_PER_LONG == 32 */
2581
2582 /*
2583 * This function is called when the file is being removed
2584 * from the task's fd array. POSIX locks belonging to this task
2585 * are deleted at this time.
2586 */
locks_remove_posix(struct file * filp,fl_owner_t owner)2587 void locks_remove_posix(struct file *filp, fl_owner_t owner)
2588 {
2589 int error;
2590 struct inode *inode = locks_inode(filp);
2591 struct file_lock lock;
2592 struct file_lock_context *ctx;
2593
2594 /*
2595 * If there are no locks held on this file, we don't need to call
2596 * posix_lock_file(). Another process could be setting a lock on this
2597 * file at the same time, but we wouldn't remove that lock anyway.
2598 */
2599 ctx = smp_load_acquire(&inode->i_flctx);
2600 if (!ctx || list_empty(&ctx->flc_posix))
2601 return;
2602
2603 locks_init_lock(&lock);
2604 lock.fl_type = F_UNLCK;
2605 lock.fl_flags = FL_POSIX | FL_CLOSE;
2606 lock.fl_start = 0;
2607 lock.fl_end = OFFSET_MAX;
2608 lock.fl_owner = owner;
2609 lock.fl_pid = current->tgid;
2610 lock.fl_file = filp;
2611 lock.fl_ops = NULL;
2612 lock.fl_lmops = NULL;
2613
2614 error = vfs_lock_file(filp, F_SETLK, &lock, NULL);
2615
2616 if (lock.fl_ops && lock.fl_ops->fl_release_private)
2617 lock.fl_ops->fl_release_private(&lock);
2618 trace_locks_remove_posix(inode, &lock, error);
2619 }
2620 EXPORT_SYMBOL(locks_remove_posix);
2621
2622 /* The i_flctx must be valid when calling into here */
2623 static void
locks_remove_flock(struct file * filp,struct file_lock_context * flctx)2624 locks_remove_flock(struct file *filp, struct file_lock_context *flctx)
2625 {
2626 struct file_lock fl;
2627 struct inode *inode = locks_inode(filp);
2628
2629 if (list_empty(&flctx->flc_flock))
2630 return;
2631
2632 flock_make_lock(filp, LOCK_UN, &fl);
2633 fl.fl_flags |= FL_CLOSE;
2634
2635 if (filp->f_op->flock)
2636 filp->f_op->flock(filp, F_SETLKW, &fl);
2637 else
2638 flock_lock_inode(inode, &fl);
2639
2640 if (fl.fl_ops && fl.fl_ops->fl_release_private)
2641 fl.fl_ops->fl_release_private(&fl);
2642 }
2643
2644 /* The i_flctx must be valid when calling into here */
2645 static void
locks_remove_lease(struct file * filp,struct file_lock_context * ctx)2646 locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
2647 {
2648 struct file_lock *fl, *tmp;
2649 LIST_HEAD(dispose);
2650
2651 if (list_empty(&ctx->flc_lease))
2652 return;
2653
2654 percpu_down_read(&file_rwsem);
2655 spin_lock(&ctx->flc_lock);
2656 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
2657 if (filp == fl->fl_file)
2658 lease_modify(fl, F_UNLCK, &dispose);
2659 spin_unlock(&ctx->flc_lock);
2660 percpu_up_read(&file_rwsem);
2661
2662 locks_dispose_list(&dispose);
2663 }
2664
2665 /*
2666 * This function is called on the last close of an open file.
2667 */
locks_remove_file(struct file * filp)2668 void locks_remove_file(struct file *filp)
2669 {
2670 struct file_lock_context *ctx;
2671
2672 ctx = smp_load_acquire(&locks_inode(filp)->i_flctx);
2673 if (!ctx)
2674 return;
2675
2676 /* remove any OFD locks */
2677 locks_remove_posix(filp, filp);
2678
2679 /* remove flock locks */
2680 locks_remove_flock(filp, ctx);
2681
2682 /* remove any leases */
2683 locks_remove_lease(filp, ctx);
2684
2685 spin_lock(&ctx->flc_lock);
2686 locks_check_ctx_file_list(filp, &ctx->flc_posix, "POSIX");
2687 locks_check_ctx_file_list(filp, &ctx->flc_flock, "FLOCK");
2688 locks_check_ctx_file_list(filp, &ctx->flc_lease, "LEASE");
2689 spin_unlock(&ctx->flc_lock);
2690 }
2691
2692 /**
2693 * vfs_cancel_lock - file byte range unblock lock
2694 * @filp: The file to apply the unblock to
2695 * @fl: The lock to be unblocked
2696 *
2697 * Used by lock managers to cancel blocked requests
2698 */
vfs_cancel_lock(struct file * filp,struct file_lock * fl)2699 int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2700 {
2701 if (filp->f_op->lock)
2702 return filp->f_op->lock(filp, F_CANCELLK, fl);
2703 return 0;
2704 }
2705 EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2706
2707 /**
2708 * vfs_inode_has_locks - are any file locks held on @inode?
2709 * @inode: inode to check for locks
2710 *
2711 * Return true if there are any FL_POSIX or FL_FLOCK locks currently
2712 * set on @inode.
2713 */
vfs_inode_has_locks(struct inode * inode)2714 bool vfs_inode_has_locks(struct inode *inode)
2715 {
2716 struct file_lock_context *ctx;
2717 bool ret;
2718
2719 ctx = smp_load_acquire(&inode->i_flctx);
2720 if (!ctx)
2721 return false;
2722
2723 spin_lock(&ctx->flc_lock);
2724 ret = !list_empty(&ctx->flc_posix) || !list_empty(&ctx->flc_flock);
2725 spin_unlock(&ctx->flc_lock);
2726 return ret;
2727 }
2728 EXPORT_SYMBOL_GPL(vfs_inode_has_locks);
2729
2730 #ifdef CONFIG_PROC_FS
2731 #include <linux/proc_fs.h>
2732 #include <linux/seq_file.h>
2733
2734 struct locks_iterator {
2735 int li_cpu;
2736 loff_t li_pos;
2737 };
2738
lock_get_status(struct seq_file * f,struct file_lock * fl,loff_t id,char * pfx,int repeat)2739 static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2740 loff_t id, char *pfx, int repeat)
2741 {
2742 struct inode *inode = NULL;
2743 unsigned int fl_pid;
2744 struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb);
2745
2746 fl_pid = locks_translate_pid(fl, proc_pidns);
2747 /*
2748 * If lock owner is dead (and pid is freed) or not visible in current
2749 * pidns, zero is shown as a pid value. Check lock info from
2750 * init_pid_ns to get saved lock pid value.
2751 */
2752
2753 if (fl->fl_file != NULL)
2754 inode = locks_inode(fl->fl_file);
2755
2756 seq_printf(f, "%lld: ", id);
2757
2758 if (repeat)
2759 seq_printf(f, "%*s", repeat - 1 + (int)strlen(pfx), pfx);
2760
2761 if (IS_POSIX(fl)) {
2762 if (fl->fl_flags & FL_ACCESS)
2763 seq_puts(f, "ACCESS");
2764 else if (IS_OFDLCK(fl))
2765 seq_puts(f, "OFDLCK");
2766 else
2767 seq_puts(f, "POSIX ");
2768
2769 seq_printf(f, " %s ",
2770 (inode == NULL) ? "*NOINODE*" : "ADVISORY ");
2771 } else if (IS_FLOCK(fl)) {
2772 if (fl->fl_type & LOCK_MAND) {
2773 seq_puts(f, "FLOCK MSNFS ");
2774 } else {
2775 seq_puts(f, "FLOCK ADVISORY ");
2776 }
2777 } else if (IS_LEASE(fl)) {
2778 if (fl->fl_flags & FL_DELEG)
2779 seq_puts(f, "DELEG ");
2780 else
2781 seq_puts(f, "LEASE ");
2782
2783 if (lease_breaking(fl))
2784 seq_puts(f, "BREAKING ");
2785 else if (fl->fl_file)
2786 seq_puts(f, "ACTIVE ");
2787 else
2788 seq_puts(f, "BREAKER ");
2789 } else {
2790 seq_puts(f, "UNKNOWN UNKNOWN ");
2791 }
2792 if (fl->fl_type & LOCK_MAND) {
2793 seq_printf(f, "%s ",
2794 (fl->fl_type & LOCK_READ)
2795 ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ "
2796 : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2797 } else {
2798 int type = IS_LEASE(fl) ? target_leasetype(fl) : fl->fl_type;
2799
2800 seq_printf(f, "%s ", (type == F_WRLCK) ? "WRITE" :
2801 (type == F_RDLCK) ? "READ" : "UNLCK");
2802 }
2803 if (inode) {
2804 /* userspace relies on this representation of dev_t */
2805 seq_printf(f, "%d %02x:%02x:%lu ", fl_pid,
2806 MAJOR(inode->i_sb->s_dev),
2807 MINOR(inode->i_sb->s_dev), inode->i_ino);
2808 } else {
2809 seq_printf(f, "%d <none>:0 ", fl_pid);
2810 }
2811 if (IS_POSIX(fl)) {
2812 if (fl->fl_end == OFFSET_MAX)
2813 seq_printf(f, "%Ld EOF\n", fl->fl_start);
2814 else
2815 seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2816 } else {
2817 seq_puts(f, "0 EOF\n");
2818 }
2819 }
2820
get_next_blocked_member(struct file_lock * node)2821 static struct file_lock *get_next_blocked_member(struct file_lock *node)
2822 {
2823 struct file_lock *tmp;
2824
2825 /* NULL node or root node */
2826 if (node == NULL || node->fl_blocker == NULL)
2827 return NULL;
2828
2829 /* Next member in the linked list could be itself */
2830 tmp = list_next_entry(node, fl_blocked_member);
2831 if (list_entry_is_head(tmp, &node->fl_blocker->fl_blocked_requests, fl_blocked_member)
2832 || tmp == node) {
2833 return NULL;
2834 }
2835
2836 return tmp;
2837 }
2838
locks_show(struct seq_file * f,void * v)2839 static int locks_show(struct seq_file *f, void *v)
2840 {
2841 struct locks_iterator *iter = f->private;
2842 struct file_lock *cur, *tmp;
2843 struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb);
2844 int level = 0;
2845
2846 cur = hlist_entry(v, struct file_lock, fl_link);
2847
2848 if (locks_translate_pid(cur, proc_pidns) == 0)
2849 return 0;
2850
2851 /* View this crossed linked list as a binary tree, the first member of fl_blocked_requests
2852 * is the left child of current node, the next silibing in fl_blocked_member is the
2853 * right child, we can alse get the parent of current node from fl_blocker, so this
2854 * question becomes traversal of a binary tree
2855 */
2856 while (cur != NULL) {
2857 if (level)
2858 lock_get_status(f, cur, iter->li_pos, "-> ", level);
2859 else
2860 lock_get_status(f, cur, iter->li_pos, "", level);
2861
2862 if (!list_empty(&cur->fl_blocked_requests)) {
2863 /* Turn left */
2864 cur = list_first_entry_or_null(&cur->fl_blocked_requests,
2865 struct file_lock, fl_blocked_member);
2866 level++;
2867 } else {
2868 /* Turn right */
2869 tmp = get_next_blocked_member(cur);
2870 /* Fall back to parent node */
2871 while (tmp == NULL && cur->fl_blocker != NULL) {
2872 cur = cur->fl_blocker;
2873 level--;
2874 tmp = get_next_blocked_member(cur);
2875 }
2876 cur = tmp;
2877 }
2878 }
2879
2880 return 0;
2881 }
2882
__show_fd_locks(struct seq_file * f,struct list_head * head,int * id,struct file * filp,struct files_struct * files)2883 static void __show_fd_locks(struct seq_file *f,
2884 struct list_head *head, int *id,
2885 struct file *filp, struct files_struct *files)
2886 {
2887 struct file_lock *fl;
2888
2889 list_for_each_entry(fl, head, fl_list) {
2890
2891 if (filp != fl->fl_file)
2892 continue;
2893 if (fl->fl_owner != files &&
2894 fl->fl_owner != filp)
2895 continue;
2896
2897 (*id)++;
2898 seq_puts(f, "lock:\t");
2899 lock_get_status(f, fl, *id, "", 0);
2900 }
2901 }
2902
show_fd_locks(struct seq_file * f,struct file * filp,struct files_struct * files)2903 void show_fd_locks(struct seq_file *f,
2904 struct file *filp, struct files_struct *files)
2905 {
2906 struct inode *inode = locks_inode(filp);
2907 struct file_lock_context *ctx;
2908 int id = 0;
2909
2910 ctx = smp_load_acquire(&inode->i_flctx);
2911 if (!ctx)
2912 return;
2913
2914 spin_lock(&ctx->flc_lock);
2915 __show_fd_locks(f, &ctx->flc_flock, &id, filp, files);
2916 __show_fd_locks(f, &ctx->flc_posix, &id, filp, files);
2917 __show_fd_locks(f, &ctx->flc_lease, &id, filp, files);
2918 spin_unlock(&ctx->flc_lock);
2919 }
2920
locks_start(struct seq_file * f,loff_t * pos)2921 static void *locks_start(struct seq_file *f, loff_t *pos)
2922 __acquires(&blocked_lock_lock)
2923 {
2924 struct locks_iterator *iter = f->private;
2925
2926 iter->li_pos = *pos + 1;
2927 percpu_down_write(&file_rwsem);
2928 spin_lock(&blocked_lock_lock);
2929 return seq_hlist_start_percpu(&file_lock_list.hlist, &iter->li_cpu, *pos);
2930 }
2931
locks_next(struct seq_file * f,void * v,loff_t * pos)2932 static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2933 {
2934 struct locks_iterator *iter = f->private;
2935
2936 ++iter->li_pos;
2937 return seq_hlist_next_percpu(v, &file_lock_list.hlist, &iter->li_cpu, pos);
2938 }
2939
locks_stop(struct seq_file * f,void * v)2940 static void locks_stop(struct seq_file *f, void *v)
2941 __releases(&blocked_lock_lock)
2942 {
2943 spin_unlock(&blocked_lock_lock);
2944 percpu_up_write(&file_rwsem);
2945 }
2946
2947 static const struct seq_operations locks_seq_operations = {
2948 .start = locks_start,
2949 .next = locks_next,
2950 .stop = locks_stop,
2951 .show = locks_show,
2952 };
2953
proc_locks_init(void)2954 static int __init proc_locks_init(void)
2955 {
2956 proc_create_seq_private("locks", 0, NULL, &locks_seq_operations,
2957 sizeof(struct locks_iterator), NULL);
2958 return 0;
2959 }
2960 fs_initcall(proc_locks_init);
2961 #endif
2962
filelock_init(void)2963 static int __init filelock_init(void)
2964 {
2965 int i;
2966
2967 flctx_cache = kmem_cache_create("file_lock_ctx",
2968 sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL);
2969
2970 filelock_cache = kmem_cache_create("file_lock_cache",
2971 sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
2972
2973 for_each_possible_cpu(i) {
2974 struct file_lock_list_struct *fll = per_cpu_ptr(&file_lock_list, i);
2975
2976 spin_lock_init(&fll->lock);
2977 INIT_HLIST_HEAD(&fll->hlist);
2978 }
2979
2980 lease_notifier_chain_init();
2981 return 0;
2982 }
2983 core_initcall(filelock_init);
2984