• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * (C) 1997 Linus Torvalds
3  * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)
4  */
5 #include <linux/export.h>
6 #include <linux/fs.h>
7 #include <linux/mm.h>
8 #include <linux/backing-dev.h>
9 #include <linux/hash.h>
10 #include <linux/swap.h>
11 #include <linux/security.h>
12 #include <linux/cdev.h>
13 #include <linux/bootmem.h>
14 #include <linux/fsnotify.h>
15 #include <linux/mount.h>
16 #include <linux/posix_acl.h>
17 #include <linux/prefetch.h>
18 #include <linux/buffer_head.h> /* for inode_has_buffers */
19 #include <linux/ratelimit.h>
20 #include "internal.h"
21 
22 /*
23  * Inode locking rules:
24  *
25  * inode->i_lock protects:
26  *   inode->i_state, inode->i_hash, __iget()
27  * inode->i_sb->s_inode_lru_lock protects:
28  *   inode->i_sb->s_inode_lru, inode->i_lru
29  * inode_sb_list_lock protects:
30  *   sb->s_inodes, inode->i_sb_list
31  * bdi->wb.list_lock protects:
32  *   bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list
33  * inode_hash_lock protects:
34  *   inode_hashtable, inode->i_hash
35  *
36  * Lock ordering:
37  *
38  * inode_sb_list_lock
39  *   inode->i_lock
40  *     inode->i_sb->s_inode_lru_lock
41  *
42  * bdi->wb.list_lock
43  *   inode->i_lock
44  *
45  * inode_hash_lock
46  *   inode_sb_list_lock
47  *   inode->i_lock
48  *
49  * iunique_lock
50  *   inode_hash_lock
51  */
52 
53 static unsigned int i_hash_mask __read_mostly;
54 static unsigned int i_hash_shift __read_mostly;
55 static struct hlist_head *inode_hashtable __read_mostly;
56 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
57 
58 __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock);
59 
60 /*
61  * Empty aops. Can be used for the cases where the user does not
62  * define any of the address_space operations.
63  */
64 const struct address_space_operations empty_aops = {
65 };
66 EXPORT_SYMBOL(empty_aops);
67 
68 /*
69  * Statistics gathering..
70  */
71 struct inodes_stat_t inodes_stat;
72 
73 static DEFINE_PER_CPU(unsigned int, nr_inodes);
74 static DEFINE_PER_CPU(unsigned int, nr_unused);
75 
76 static struct kmem_cache *inode_cachep __read_mostly;
77 
get_nr_inodes(void)78 static int get_nr_inodes(void)
79 {
80 	int i;
81 	int sum = 0;
82 	for_each_possible_cpu(i)
83 		sum += per_cpu(nr_inodes, i);
84 	return sum < 0 ? 0 : sum;
85 }
86 
get_nr_inodes_unused(void)87 static inline int get_nr_inodes_unused(void)
88 {
89 	int i;
90 	int sum = 0;
91 	for_each_possible_cpu(i)
92 		sum += per_cpu(nr_unused, i);
93 	return sum < 0 ? 0 : sum;
94 }
95 
get_nr_dirty_inodes(void)96 int get_nr_dirty_inodes(void)
97 {
98 	/* not actually dirty inodes, but a wild approximation */
99 	int nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
100 	return nr_dirty > 0 ? nr_dirty : 0;
101 }
102 
103 /*
104  * Handle nr_inode sysctl
105  */
106 #ifdef CONFIG_SYSCTL
proc_nr_inodes(ctl_table * table,int write,void __user * buffer,size_t * lenp,loff_t * ppos)107 int proc_nr_inodes(ctl_table *table, int write,
108 		   void __user *buffer, size_t *lenp, loff_t *ppos)
109 {
110 	inodes_stat.nr_inodes = get_nr_inodes();
111 	inodes_stat.nr_unused = get_nr_inodes_unused();
112 	return proc_dointvec(table, write, buffer, lenp, ppos);
113 }
114 #endif
115 
116 /**
117  * inode_init_always - perform inode structure intialisation
118  * @sb: superblock inode belongs to
119  * @inode: inode to initialise
120  *
121  * These are initializations that need to be done on every inode
122  * allocation as the fields are not initialised by slab allocation.
123  */
inode_init_always(struct super_block * sb,struct inode * inode)124 int inode_init_always(struct super_block *sb, struct inode *inode)
125 {
126 	static const struct inode_operations empty_iops;
127 	static const struct file_operations empty_fops;
128 	struct address_space *const mapping = &inode->i_data;
129 
130 	inode->i_sb = sb;
131 	inode->i_blkbits = sb->s_blocksize_bits;
132 	inode->i_flags = 0;
133 	atomic_set(&inode->i_count, 1);
134 	inode->i_op = &empty_iops;
135 	inode->i_fop = &empty_fops;
136 	inode->__i_nlink = 1;
137 	inode->i_opflags = 0;
138 	i_uid_write(inode, 0);
139 	i_gid_write(inode, 0);
140 	atomic_set(&inode->i_writecount, 0);
141 	inode->i_size = 0;
142 	inode->i_blocks = 0;
143 	inode->i_bytes = 0;
144 	inode->i_generation = 0;
145 #ifdef CONFIG_QUOTA
146 	memset(&inode->i_dquot, 0, sizeof(inode->i_dquot));
147 #endif
148 	inode->i_pipe = NULL;
149 	inode->i_bdev = NULL;
150 	inode->i_cdev = NULL;
151 	inode->i_rdev = 0;
152 	inode->dirtied_when = 0;
153 
154 	if (security_inode_alloc(inode))
155 		goto out;
156 	spin_lock_init(&inode->i_lock);
157 	lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
158 
159 	mutex_init(&inode->i_mutex);
160 	lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key);
161 
162 	atomic_set(&inode->i_dio_count, 0);
163 
164 	mapping->a_ops = &empty_aops;
165 	mapping->host = inode;
166 	mapping->flags = 0;
167 	mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
168 	mapping->private_data = NULL;
169 	mapping->backing_dev_info = &default_backing_dev_info;
170 	mapping->writeback_index = 0;
171 
172 	/*
173 	 * If the block_device provides a backing_dev_info for client
174 	 * inodes then use that.  Otherwise the inode share the bdev's
175 	 * backing_dev_info.
176 	 */
177 	if (sb->s_bdev) {
178 		struct backing_dev_info *bdi;
179 
180 		bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
181 		mapping->backing_dev_info = bdi;
182 	}
183 	inode->i_private = NULL;
184 	inode->i_mapping = mapping;
185 	INIT_HLIST_HEAD(&inode->i_dentry);	/* buggered by rcu freeing */
186 #ifdef CONFIG_FS_POSIX_ACL
187 	inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
188 #endif
189 
190 #ifdef CONFIG_FSNOTIFY
191 	inode->i_fsnotify_mask = 0;
192 #endif
193 
194 	this_cpu_inc(nr_inodes);
195 
196 	return 0;
197 out:
198 	return -ENOMEM;
199 }
200 EXPORT_SYMBOL(inode_init_always);
201 
alloc_inode(struct super_block * sb)202 static struct inode *alloc_inode(struct super_block *sb)
203 {
204 	struct inode *inode;
205 
206 	if (sb->s_op->alloc_inode)
207 		inode = sb->s_op->alloc_inode(sb);
208 	else
209 		inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
210 
211 	if (!inode)
212 		return NULL;
213 
214 	if (unlikely(inode_init_always(sb, inode))) {
215 		if (inode->i_sb->s_op->destroy_inode)
216 			inode->i_sb->s_op->destroy_inode(inode);
217 		else
218 			kmem_cache_free(inode_cachep, inode);
219 		return NULL;
220 	}
221 
222 	return inode;
223 }
224 
free_inode_nonrcu(struct inode * inode)225 void free_inode_nonrcu(struct inode *inode)
226 {
227 	kmem_cache_free(inode_cachep, inode);
228 }
229 EXPORT_SYMBOL(free_inode_nonrcu);
230 
__destroy_inode(struct inode * inode)231 void __destroy_inode(struct inode *inode)
232 {
233 	BUG_ON(inode_has_buffers(inode));
234 	security_inode_free(inode);
235 	fsnotify_inode_delete(inode);
236 	if (!inode->i_nlink) {
237 		WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0);
238 		atomic_long_dec(&inode->i_sb->s_remove_count);
239 	}
240 
241 #ifdef CONFIG_FS_POSIX_ACL
242 	if (inode->i_acl && inode->i_acl != ACL_NOT_CACHED)
243 		posix_acl_release(inode->i_acl);
244 	if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED)
245 		posix_acl_release(inode->i_default_acl);
246 #endif
247 	this_cpu_dec(nr_inodes);
248 }
249 EXPORT_SYMBOL(__destroy_inode);
250 
i_callback(struct rcu_head * head)251 static void i_callback(struct rcu_head *head)
252 {
253 	struct inode *inode = container_of(head, struct inode, i_rcu);
254 	kmem_cache_free(inode_cachep, inode);
255 }
256 
destroy_inode(struct inode * inode)257 static void destroy_inode(struct inode *inode)
258 {
259 	BUG_ON(!list_empty(&inode->i_lru));
260 	__destroy_inode(inode);
261 	if (inode->i_sb->s_op->destroy_inode)
262 		inode->i_sb->s_op->destroy_inode(inode);
263 	else
264 		call_rcu(&inode->i_rcu, i_callback);
265 }
266 
267 /**
268  * drop_nlink - directly drop an inode's link count
269  * @inode: inode
270  *
271  * This is a low-level filesystem helper to replace any
272  * direct filesystem manipulation of i_nlink.  In cases
273  * where we are attempting to track writes to the
274  * filesystem, a decrement to zero means an imminent
275  * write when the file is truncated and actually unlinked
276  * on the filesystem.
277  */
drop_nlink(struct inode * inode)278 void drop_nlink(struct inode *inode)
279 {
280 	WARN_ON(inode->i_nlink == 0);
281 	inode->__i_nlink--;
282 	if (!inode->i_nlink)
283 		atomic_long_inc(&inode->i_sb->s_remove_count);
284 }
285 EXPORT_SYMBOL(drop_nlink);
286 
287 /**
288  * clear_nlink - directly zero an inode's link count
289  * @inode: inode
290  *
291  * This is a low-level filesystem helper to replace any
292  * direct filesystem manipulation of i_nlink.  See
293  * drop_nlink() for why we care about i_nlink hitting zero.
294  */
clear_nlink(struct inode * inode)295 void clear_nlink(struct inode *inode)
296 {
297 	if (inode->i_nlink) {
298 		inode->__i_nlink = 0;
299 		atomic_long_inc(&inode->i_sb->s_remove_count);
300 	}
301 }
302 EXPORT_SYMBOL(clear_nlink);
303 
304 /**
305  * set_nlink - directly set an inode's link count
306  * @inode: inode
307  * @nlink: new nlink (should be non-zero)
308  *
309  * This is a low-level filesystem helper to replace any
310  * direct filesystem manipulation of i_nlink.
311  */
set_nlink(struct inode * inode,unsigned int nlink)312 void set_nlink(struct inode *inode, unsigned int nlink)
313 {
314 	if (!nlink) {
315 		clear_nlink(inode);
316 	} else {
317 		/* Yes, some filesystems do change nlink from zero to one */
318 		if (inode->i_nlink == 0)
319 			atomic_long_dec(&inode->i_sb->s_remove_count);
320 
321 		inode->__i_nlink = nlink;
322 	}
323 }
324 EXPORT_SYMBOL(set_nlink);
325 
326 /**
327  * inc_nlink - directly increment an inode's link count
328  * @inode: inode
329  *
330  * This is a low-level filesystem helper to replace any
331  * direct filesystem manipulation of i_nlink.  Currently,
332  * it is only here for parity with dec_nlink().
333  */
inc_nlink(struct inode * inode)334 void inc_nlink(struct inode *inode)
335 {
336 	if (WARN_ON(inode->i_nlink == 0))
337 		atomic_long_dec(&inode->i_sb->s_remove_count);
338 
339 	inode->__i_nlink++;
340 }
341 EXPORT_SYMBOL(inc_nlink);
342 
address_space_init_once(struct address_space * mapping)343 void address_space_init_once(struct address_space *mapping)
344 {
345 	memset(mapping, 0, sizeof(*mapping));
346 	INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
347 	spin_lock_init(&mapping->tree_lock);
348 	mutex_init(&mapping->i_mmap_mutex);
349 	INIT_LIST_HEAD(&mapping->private_list);
350 	spin_lock_init(&mapping->private_lock);
351 	mapping->i_mmap = RB_ROOT;
352 	INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
353 }
354 EXPORT_SYMBOL(address_space_init_once);
355 
356 /*
357  * These are initializations that only need to be done
358  * once, because the fields are idempotent across use
359  * of the inode, so let the slab aware of that.
360  */
inode_init_once(struct inode * inode)361 void inode_init_once(struct inode *inode)
362 {
363 	memset(inode, 0, sizeof(*inode));
364 	INIT_HLIST_NODE(&inode->i_hash);
365 	INIT_LIST_HEAD(&inode->i_devices);
366 	INIT_LIST_HEAD(&inode->i_wb_list);
367 	INIT_LIST_HEAD(&inode->i_lru);
368 	address_space_init_once(&inode->i_data);
369 	i_size_ordered_init(inode);
370 #ifdef CONFIG_FSNOTIFY
371 	INIT_HLIST_HEAD(&inode->i_fsnotify_marks);
372 #endif
373 }
374 EXPORT_SYMBOL(inode_init_once);
375 
init_once(void * foo)376 static void init_once(void *foo)
377 {
378 	struct inode *inode = (struct inode *) foo;
379 
380 	inode_init_once(inode);
381 }
382 
383 /*
384  * inode->i_lock must be held
385  */
__iget(struct inode * inode)386 void __iget(struct inode *inode)
387 {
388 	atomic_inc(&inode->i_count);
389 }
390 
391 /*
392  * get additional reference to inode; caller must already hold one.
393  */
ihold(struct inode * inode)394 void ihold(struct inode *inode)
395 {
396 	WARN_ON(atomic_inc_return(&inode->i_count) < 2);
397 }
398 EXPORT_SYMBOL(ihold);
399 
inode_lru_list_add(struct inode * inode)400 static void inode_lru_list_add(struct inode *inode)
401 {
402 	spin_lock(&inode->i_sb->s_inode_lru_lock);
403 	if (list_empty(&inode->i_lru)) {
404 		list_add(&inode->i_lru, &inode->i_sb->s_inode_lru);
405 		inode->i_sb->s_nr_inodes_unused++;
406 		this_cpu_inc(nr_unused);
407 	}
408 	spin_unlock(&inode->i_sb->s_inode_lru_lock);
409 }
410 
411 /*
412  * Add inode to LRU if needed (inode is unused and clean).
413  *
414  * Needs inode->i_lock held.
415  */
inode_add_lru(struct inode * inode)416 void inode_add_lru(struct inode *inode)
417 {
418 	if (!(inode->i_state & (I_DIRTY | I_SYNC | I_FREEING | I_WILL_FREE)) &&
419 	    !atomic_read(&inode->i_count) && inode->i_sb->s_flags & MS_ACTIVE)
420 		inode_lru_list_add(inode);
421 }
422 
423 
inode_lru_list_del(struct inode * inode)424 static void inode_lru_list_del(struct inode *inode)
425 {
426 	spin_lock(&inode->i_sb->s_inode_lru_lock);
427 	if (!list_empty(&inode->i_lru)) {
428 		list_del_init(&inode->i_lru);
429 		inode->i_sb->s_nr_inodes_unused--;
430 		this_cpu_dec(nr_unused);
431 	}
432 	spin_unlock(&inode->i_sb->s_inode_lru_lock);
433 }
434 
435 /**
436  * inode_sb_list_add - add inode to the superblock list of inodes
437  * @inode: inode to add
438  */
inode_sb_list_add(struct inode * inode)439 void inode_sb_list_add(struct inode *inode)
440 {
441 	spin_lock(&inode_sb_list_lock);
442 	list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
443 	spin_unlock(&inode_sb_list_lock);
444 }
445 EXPORT_SYMBOL_GPL(inode_sb_list_add);
446 
inode_sb_list_del(struct inode * inode)447 static inline void inode_sb_list_del(struct inode *inode)
448 {
449 	if (!list_empty(&inode->i_sb_list)) {
450 		spin_lock(&inode_sb_list_lock);
451 		list_del_init(&inode->i_sb_list);
452 		spin_unlock(&inode_sb_list_lock);
453 	}
454 }
455 
hash(struct super_block * sb,unsigned long hashval)456 static unsigned long hash(struct super_block *sb, unsigned long hashval)
457 {
458 	unsigned long tmp;
459 
460 	tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
461 			L1_CACHE_BYTES;
462 	tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift);
463 	return tmp & i_hash_mask;
464 }
465 
466 /**
467  *	__insert_inode_hash - hash an inode
468  *	@inode: unhashed inode
469  *	@hashval: unsigned long value used to locate this object in the
470  *		inode_hashtable.
471  *
472  *	Add an inode to the inode hash for this superblock.
473  */
__insert_inode_hash(struct inode * inode,unsigned long hashval)474 void __insert_inode_hash(struct inode *inode, unsigned long hashval)
475 {
476 	struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
477 
478 	spin_lock(&inode_hash_lock);
479 	spin_lock(&inode->i_lock);
480 	hlist_add_head(&inode->i_hash, b);
481 	spin_unlock(&inode->i_lock);
482 	spin_unlock(&inode_hash_lock);
483 }
484 EXPORT_SYMBOL(__insert_inode_hash);
485 
486 /**
487  *	__remove_inode_hash - remove an inode from the hash
488  *	@inode: inode to unhash
489  *
490  *	Remove an inode from the superblock.
491  */
__remove_inode_hash(struct inode * inode)492 void __remove_inode_hash(struct inode *inode)
493 {
494 	spin_lock(&inode_hash_lock);
495 	spin_lock(&inode->i_lock);
496 	hlist_del_init(&inode->i_hash);
497 	spin_unlock(&inode->i_lock);
498 	spin_unlock(&inode_hash_lock);
499 }
500 EXPORT_SYMBOL(__remove_inode_hash);
501 
clear_inode(struct inode * inode)502 void clear_inode(struct inode *inode)
503 {
504 	might_sleep();
505 	/*
506 	 * We have to cycle tree_lock here because reclaim can be still in the
507 	 * process of removing the last page (in __delete_from_page_cache())
508 	 * and we must not free mapping under it.
509 	 */
510 	spin_lock_irq(&inode->i_data.tree_lock);
511 	BUG_ON(inode->i_data.nrpages);
512 	spin_unlock_irq(&inode->i_data.tree_lock);
513 	BUG_ON(!list_empty(&inode->i_data.private_list));
514 	BUG_ON(!(inode->i_state & I_FREEING));
515 	BUG_ON(inode->i_state & I_CLEAR);
516 	/* don't need i_lock here, no concurrent mods to i_state */
517 	inode->i_state = I_FREEING | I_CLEAR;
518 }
519 EXPORT_SYMBOL(clear_inode);
520 
521 /*
522  * Free the inode passed in, removing it from the lists it is still connected
523  * to. We remove any pages still attached to the inode and wait for any IO that
524  * is still in progress before finally destroying the inode.
525  *
526  * An inode must already be marked I_FREEING so that we avoid the inode being
527  * moved back onto lists if we race with other code that manipulates the lists
528  * (e.g. writeback_single_inode). The caller is responsible for setting this.
529  *
530  * An inode must already be removed from the LRU list before being evicted from
531  * the cache. This should occur atomically with setting the I_FREEING state
532  * flag, so no inodes here should ever be on the LRU when being evicted.
533  */
evict(struct inode * inode)534 static void evict(struct inode *inode)
535 {
536 	const struct super_operations *op = inode->i_sb->s_op;
537 
538 	BUG_ON(!(inode->i_state & I_FREEING));
539 	BUG_ON(!list_empty(&inode->i_lru));
540 
541 	if (!list_empty(&inode->i_wb_list))
542 		inode_wb_list_del(inode);
543 
544 	inode_sb_list_del(inode);
545 
546 	/*
547 	 * Wait for flusher thread to be done with the inode so that filesystem
548 	 * does not start destroying it while writeback is still running. Since
549 	 * the inode has I_FREEING set, flusher thread won't start new work on
550 	 * the inode.  We just have to wait for running writeback to finish.
551 	 */
552 	inode_wait_for_writeback(inode);
553 
554 	if (op->evict_inode) {
555 		op->evict_inode(inode);
556 	} else {
557 		if (inode->i_data.nrpages)
558 			truncate_inode_pages(&inode->i_data, 0);
559 		clear_inode(inode);
560 	}
561 	if (S_ISBLK(inode->i_mode) && inode->i_bdev)
562 		bd_forget(inode);
563 	if (S_ISCHR(inode->i_mode) && inode->i_cdev)
564 		cd_forget(inode);
565 
566 	remove_inode_hash(inode);
567 
568 	spin_lock(&inode->i_lock);
569 	wake_up_bit(&inode->i_state, __I_NEW);
570 	BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
571 	spin_unlock(&inode->i_lock);
572 
573 	destroy_inode(inode);
574 }
575 
576 /*
577  * dispose_list - dispose of the contents of a local list
578  * @head: the head of the list to free
579  *
580  * Dispose-list gets a local list with local inodes in it, so it doesn't
581  * need to worry about list corruption and SMP locks.
582  */
dispose_list(struct list_head * head)583 static void dispose_list(struct list_head *head)
584 {
585 	while (!list_empty(head)) {
586 		struct inode *inode;
587 
588 		inode = list_first_entry(head, struct inode, i_lru);
589 		list_del_init(&inode->i_lru);
590 
591 		evict(inode);
592 	}
593 }
594 
595 /**
596  * evict_inodes	- evict all evictable inodes for a superblock
597  * @sb:		superblock to operate on
598  *
599  * Make sure that no inodes with zero refcount are retained.  This is
600  * called by superblock shutdown after having MS_ACTIVE flag removed,
601  * so any inode reaching zero refcount during or after that call will
602  * be immediately evicted.
603  */
evict_inodes(struct super_block * sb)604 void evict_inodes(struct super_block *sb)
605 {
606 	struct inode *inode, *next;
607 	LIST_HEAD(dispose);
608 
609 	spin_lock(&inode_sb_list_lock);
610 	list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
611 		if (atomic_read(&inode->i_count))
612 			continue;
613 
614 		spin_lock(&inode->i_lock);
615 		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
616 			spin_unlock(&inode->i_lock);
617 			continue;
618 		}
619 
620 		inode->i_state |= I_FREEING;
621 		inode_lru_list_del(inode);
622 		spin_unlock(&inode->i_lock);
623 		list_add(&inode->i_lru, &dispose);
624 	}
625 	spin_unlock(&inode_sb_list_lock);
626 
627 	dispose_list(&dispose);
628 }
629 
630 /**
631  * invalidate_inodes	- attempt to free all inodes on a superblock
632  * @sb:		superblock to operate on
633  * @kill_dirty: flag to guide handling of dirty inodes
634  *
635  * Attempts to free all inodes for a given superblock.  If there were any
636  * busy inodes return a non-zero value, else zero.
637  * If @kill_dirty is set, discard dirty inodes too, otherwise treat
638  * them as busy.
639  */
invalidate_inodes(struct super_block * sb,bool kill_dirty)640 int invalidate_inodes(struct super_block *sb, bool kill_dirty)
641 {
642 	int busy = 0;
643 	struct inode *inode, *next;
644 	LIST_HEAD(dispose);
645 
646 	spin_lock(&inode_sb_list_lock);
647 	list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
648 		spin_lock(&inode->i_lock);
649 		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
650 			spin_unlock(&inode->i_lock);
651 			continue;
652 		}
653 		if (inode->i_state & I_DIRTY && !kill_dirty) {
654 			spin_unlock(&inode->i_lock);
655 			busy = 1;
656 			continue;
657 		}
658 		if (atomic_read(&inode->i_count)) {
659 			spin_unlock(&inode->i_lock);
660 			busy = 1;
661 			continue;
662 		}
663 
664 		inode->i_state |= I_FREEING;
665 		inode_lru_list_del(inode);
666 		spin_unlock(&inode->i_lock);
667 		list_add(&inode->i_lru, &dispose);
668 	}
669 	spin_unlock(&inode_sb_list_lock);
670 
671 	dispose_list(&dispose);
672 
673 	return busy;
674 }
675 
can_unuse(struct inode * inode)676 static int can_unuse(struct inode *inode)
677 {
678 	if (inode->i_state & ~I_REFERENCED)
679 		return 0;
680 	if (inode_has_buffers(inode))
681 		return 0;
682 	if (atomic_read(&inode->i_count))
683 		return 0;
684 	if (inode->i_data.nrpages)
685 		return 0;
686 	return 1;
687 }
688 
689 /*
690  * Walk the superblock inode LRU for freeable inodes and attempt to free them.
691  * This is called from the superblock shrinker function with a number of inodes
692  * to trim from the LRU. Inodes to be freed are moved to a temporary list and
693  * then are freed outside inode_lock by dispose_list().
694  *
695  * Any inodes which are pinned purely because of attached pagecache have their
696  * pagecache removed.  If the inode has metadata buffers attached to
697  * mapping->private_list then try to remove them.
698  *
699  * If the inode has the I_REFERENCED flag set, then it means that it has been
700  * used recently - the flag is set in iput_final(). When we encounter such an
701  * inode, clear the flag and move it to the back of the LRU so it gets another
702  * pass through the LRU before it gets reclaimed. This is necessary because of
703  * the fact we are doing lazy LRU updates to minimise lock contention so the
704  * LRU does not have strict ordering. Hence we don't want to reclaim inodes
705  * with this flag set because they are the inodes that are out of order.
706  */
prune_icache_sb(struct super_block * sb,int nr_to_scan)707 void prune_icache_sb(struct super_block *sb, int nr_to_scan)
708 {
709 	LIST_HEAD(freeable);
710 	int nr_scanned;
711 	unsigned long reap = 0;
712 
713 	spin_lock(&sb->s_inode_lru_lock);
714 	for (nr_scanned = nr_to_scan; nr_scanned >= 0; nr_scanned--) {
715 		struct inode *inode;
716 
717 		if (list_empty(&sb->s_inode_lru))
718 			break;
719 
720 		inode = list_entry(sb->s_inode_lru.prev, struct inode, i_lru);
721 
722 		/*
723 		 * we are inverting the sb->s_inode_lru_lock/inode->i_lock here,
724 		 * so use a trylock. If we fail to get the lock, just move the
725 		 * inode to the back of the list so we don't spin on it.
726 		 */
727 		if (!spin_trylock(&inode->i_lock)) {
728 			list_move(&inode->i_lru, &sb->s_inode_lru);
729 			continue;
730 		}
731 
732 		/*
733 		 * Referenced or dirty inodes are still in use. Give them
734 		 * another pass through the LRU as we canot reclaim them now.
735 		 */
736 		if (atomic_read(&inode->i_count) ||
737 		    (inode->i_state & ~I_REFERENCED)) {
738 			list_del_init(&inode->i_lru);
739 			spin_unlock(&inode->i_lock);
740 			sb->s_nr_inodes_unused--;
741 			this_cpu_dec(nr_unused);
742 			continue;
743 		}
744 
745 		/* recently referenced inodes get one more pass */
746 		if (inode->i_state & I_REFERENCED) {
747 			inode->i_state &= ~I_REFERENCED;
748 			list_move(&inode->i_lru, &sb->s_inode_lru);
749 			spin_unlock(&inode->i_lock);
750 			continue;
751 		}
752 		if (inode_has_buffers(inode) || inode->i_data.nrpages) {
753 			__iget(inode);
754 			spin_unlock(&inode->i_lock);
755 			spin_unlock(&sb->s_inode_lru_lock);
756 			if (remove_inode_buffers(inode))
757 				reap += invalidate_mapping_pages(&inode->i_data,
758 								0, -1);
759 			iput(inode);
760 			spin_lock(&sb->s_inode_lru_lock);
761 
762 			if (inode != list_entry(sb->s_inode_lru.next,
763 						struct inode, i_lru))
764 				continue;	/* wrong inode or list_empty */
765 			/* avoid lock inversions with trylock */
766 			if (!spin_trylock(&inode->i_lock))
767 				continue;
768 			if (!can_unuse(inode)) {
769 				spin_unlock(&inode->i_lock);
770 				continue;
771 			}
772 		}
773 		WARN_ON(inode->i_state & I_NEW);
774 		inode->i_state |= I_FREEING;
775 		spin_unlock(&inode->i_lock);
776 
777 		list_move(&inode->i_lru, &freeable);
778 		sb->s_nr_inodes_unused--;
779 		this_cpu_dec(nr_unused);
780 	}
781 	if (current_is_kswapd())
782 		__count_vm_events(KSWAPD_INODESTEAL, reap);
783 	else
784 		__count_vm_events(PGINODESTEAL, reap);
785 	spin_unlock(&sb->s_inode_lru_lock);
786 	if (current->reclaim_state)
787 		current->reclaim_state->reclaimed_slab += reap;
788 
789 	dispose_list(&freeable);
790 }
791 
792 static void __wait_on_freeing_inode(struct inode *inode);
793 /*
794  * Called with the inode lock held.
795  */
find_inode(struct super_block * sb,struct hlist_head * head,int (* test)(struct inode *,void *),void * data)796 static struct inode *find_inode(struct super_block *sb,
797 				struct hlist_head *head,
798 				int (*test)(struct inode *, void *),
799 				void *data)
800 {
801 	struct inode *inode = NULL;
802 
803 repeat:
804 	hlist_for_each_entry(inode, head, i_hash) {
805 		spin_lock(&inode->i_lock);
806 		if (inode->i_sb != sb) {
807 			spin_unlock(&inode->i_lock);
808 			continue;
809 		}
810 		if (!test(inode, data)) {
811 			spin_unlock(&inode->i_lock);
812 			continue;
813 		}
814 		if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
815 			__wait_on_freeing_inode(inode);
816 			goto repeat;
817 		}
818 		__iget(inode);
819 		spin_unlock(&inode->i_lock);
820 		return inode;
821 	}
822 	return NULL;
823 }
824 
825 /*
826  * find_inode_fast is the fast path version of find_inode, see the comment at
827  * iget_locked for details.
828  */
find_inode_fast(struct super_block * sb,struct hlist_head * head,unsigned long ino)829 static struct inode *find_inode_fast(struct super_block *sb,
830 				struct hlist_head *head, unsigned long ino)
831 {
832 	struct inode *inode = NULL;
833 
834 repeat:
835 	hlist_for_each_entry(inode, head, i_hash) {
836 		spin_lock(&inode->i_lock);
837 		if (inode->i_ino != ino) {
838 			spin_unlock(&inode->i_lock);
839 			continue;
840 		}
841 		if (inode->i_sb != sb) {
842 			spin_unlock(&inode->i_lock);
843 			continue;
844 		}
845 		if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
846 			__wait_on_freeing_inode(inode);
847 			goto repeat;
848 		}
849 		__iget(inode);
850 		spin_unlock(&inode->i_lock);
851 		return inode;
852 	}
853 	return NULL;
854 }
855 
856 /*
857  * Each cpu owns a range of LAST_INO_BATCH numbers.
858  * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations,
859  * to renew the exhausted range.
860  *
861  * This does not significantly increase overflow rate because every CPU can
862  * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is
863  * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the
864  * 2^32 range, and is a worst-case. Even a 50% wastage would only increase
865  * overflow rate by 2x, which does not seem too significant.
866  *
867  * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
868  * error if st_ino won't fit in target struct field. Use 32bit counter
869  * here to attempt to avoid that.
870  */
871 #define LAST_INO_BATCH 1024
872 static DEFINE_PER_CPU(unsigned int, last_ino);
873 
get_next_ino(void)874 unsigned int get_next_ino(void)
875 {
876 	unsigned int *p = &get_cpu_var(last_ino);
877 	unsigned int res = *p;
878 
879 #ifdef CONFIG_SMP
880 	if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
881 		static atomic_t shared_last_ino;
882 		int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
883 
884 		res = next - LAST_INO_BATCH;
885 	}
886 #endif
887 
888 	*p = ++res;
889 	put_cpu_var(last_ino);
890 	return res;
891 }
892 EXPORT_SYMBOL(get_next_ino);
893 
894 /**
895  *	new_inode_pseudo 	- obtain an inode
896  *	@sb: superblock
897  *
898  *	Allocates a new inode for given superblock.
899  *	Inode wont be chained in superblock s_inodes list
900  *	This means :
901  *	- fs can't be unmount
902  *	- quotas, fsnotify, writeback can't work
903  */
new_inode_pseudo(struct super_block * sb)904 struct inode *new_inode_pseudo(struct super_block *sb)
905 {
906 	struct inode *inode = alloc_inode(sb);
907 
908 	if (inode) {
909 		spin_lock(&inode->i_lock);
910 		inode->i_state = 0;
911 		spin_unlock(&inode->i_lock);
912 		INIT_LIST_HEAD(&inode->i_sb_list);
913 	}
914 	return inode;
915 }
916 
917 /**
918  *	new_inode 	- obtain an inode
919  *	@sb: superblock
920  *
921  *	Allocates a new inode for given superblock. The default gfp_mask
922  *	for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE.
923  *	If HIGHMEM pages are unsuitable or it is known that pages allocated
924  *	for the page cache are not reclaimable or migratable,
925  *	mapping_set_gfp_mask() must be called with suitable flags on the
926  *	newly created inode's mapping
927  *
928  */
new_inode(struct super_block * sb)929 struct inode *new_inode(struct super_block *sb)
930 {
931 	struct inode *inode;
932 
933 	spin_lock_prefetch(&inode_sb_list_lock);
934 
935 	inode = new_inode_pseudo(sb);
936 	if (inode)
937 		inode_sb_list_add(inode);
938 	return inode;
939 }
940 EXPORT_SYMBOL(new_inode);
941 
942 #ifdef CONFIG_DEBUG_LOCK_ALLOC
lockdep_annotate_inode_mutex_key(struct inode * inode)943 void lockdep_annotate_inode_mutex_key(struct inode *inode)
944 {
945 	if (S_ISDIR(inode->i_mode)) {
946 		struct file_system_type *type = inode->i_sb->s_type;
947 
948 		/* Set new key only if filesystem hasn't already changed it */
949 		if (lockdep_match_class(&inode->i_mutex, &type->i_mutex_key)) {
950 			/*
951 			 * ensure nobody is actually holding i_mutex
952 			 */
953 			mutex_destroy(&inode->i_mutex);
954 			mutex_init(&inode->i_mutex);
955 			lockdep_set_class(&inode->i_mutex,
956 					  &type->i_mutex_dir_key);
957 		}
958 	}
959 }
960 EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key);
961 #endif
962 
963 /**
964  * unlock_new_inode - clear the I_NEW state and wake up any waiters
965  * @inode:	new inode to unlock
966  *
967  * Called when the inode is fully initialised to clear the new state of the
968  * inode and wake up anyone waiting for the inode to finish initialisation.
969  */
unlock_new_inode(struct inode * inode)970 void unlock_new_inode(struct inode *inode)
971 {
972 	lockdep_annotate_inode_mutex_key(inode);
973 	spin_lock(&inode->i_lock);
974 	WARN_ON(!(inode->i_state & I_NEW));
975 	inode->i_state &= ~I_NEW;
976 	smp_mb();
977 	wake_up_bit(&inode->i_state, __I_NEW);
978 	spin_unlock(&inode->i_lock);
979 }
980 EXPORT_SYMBOL(unlock_new_inode);
981 
982 /**
983  * lock_two_nondirectories - take two i_mutexes on non-directory objects
984  *
985  * Lock any non-NULL argument that is not a directory.
986  * Zero, one or two objects may be locked by this function.
987  *
988  * @inode1: first inode to lock
989  * @inode2: second inode to lock
990  */
lock_two_nondirectories(struct inode * inode1,struct inode * inode2)991 void lock_two_nondirectories(struct inode *inode1, struct inode *inode2)
992 {
993 	if (inode1 > inode2)
994 		swap(inode1, inode2);
995 
996 	if (inode1 && !S_ISDIR(inode1->i_mode))
997 		mutex_lock(&inode1->i_mutex);
998 	if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
999 		mutex_lock_nested(&inode2->i_mutex, I_MUTEX_NONDIR2);
1000 }
1001 EXPORT_SYMBOL(lock_two_nondirectories);
1002 
1003 /**
1004  * unlock_two_nondirectories - release locks from lock_two_nondirectories()
1005  * @inode1: first inode to unlock
1006  * @inode2: second inode to unlock
1007  */
unlock_two_nondirectories(struct inode * inode1,struct inode * inode2)1008 void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2)
1009 {
1010 	if (inode1 && !S_ISDIR(inode1->i_mode))
1011 		mutex_unlock(&inode1->i_mutex);
1012 	if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
1013 		mutex_unlock(&inode2->i_mutex);
1014 }
1015 EXPORT_SYMBOL(unlock_two_nondirectories);
1016 
1017 /**
1018  * iget5_locked - obtain an inode from a mounted file system
1019  * @sb:		super block of file system
1020  * @hashval:	hash value (usually inode number) to get
1021  * @test:	callback used for comparisons between inodes
1022  * @set:	callback used to initialize a new struct inode
1023  * @data:	opaque data pointer to pass to @test and @set
1024  *
1025  * Search for the inode specified by @hashval and @data in the inode cache,
1026  * and if present it is return it with an increased reference count. This is
1027  * a generalized version of iget_locked() for file systems where the inode
1028  * number is not sufficient for unique identification of an inode.
1029  *
1030  * If the inode is not in cache, allocate a new inode and return it locked,
1031  * hashed, and with the I_NEW flag set. The file system gets to fill it in
1032  * before unlocking it via unlock_new_inode().
1033  *
1034  * Note both @test and @set are called with the inode_hash_lock held, so can't
1035  * sleep.
1036  */
iget5_locked(struct super_block * sb,unsigned long hashval,int (* test)(struct inode *,void *),int (* set)(struct inode *,void *),void * data)1037 struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
1038 		int (*test)(struct inode *, void *),
1039 		int (*set)(struct inode *, void *), void *data)
1040 {
1041 	struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1042 	struct inode *inode;
1043 
1044 	spin_lock(&inode_hash_lock);
1045 	inode = find_inode(sb, head, test, data);
1046 	spin_unlock(&inode_hash_lock);
1047 
1048 	if (inode) {
1049 		wait_on_inode(inode);
1050 		return inode;
1051 	}
1052 
1053 	inode = alloc_inode(sb);
1054 	if (inode) {
1055 		struct inode *old;
1056 
1057 		spin_lock(&inode_hash_lock);
1058 		/* We released the lock, so.. */
1059 		old = find_inode(sb, head, test, data);
1060 		if (!old) {
1061 			if (set(inode, data))
1062 				goto set_failed;
1063 
1064 			spin_lock(&inode->i_lock);
1065 			inode->i_state = I_NEW;
1066 			hlist_add_head(&inode->i_hash, head);
1067 			spin_unlock(&inode->i_lock);
1068 			inode_sb_list_add(inode);
1069 			spin_unlock(&inode_hash_lock);
1070 
1071 			/* Return the locked inode with I_NEW set, the
1072 			 * caller is responsible for filling in the contents
1073 			 */
1074 			return inode;
1075 		}
1076 
1077 		/*
1078 		 * Uhhuh, somebody else created the same inode under
1079 		 * us. Use the old inode instead of the one we just
1080 		 * allocated.
1081 		 */
1082 		spin_unlock(&inode_hash_lock);
1083 		destroy_inode(inode);
1084 		inode = old;
1085 		wait_on_inode(inode);
1086 	}
1087 	return inode;
1088 
1089 set_failed:
1090 	spin_unlock(&inode_hash_lock);
1091 	destroy_inode(inode);
1092 	return NULL;
1093 }
1094 EXPORT_SYMBOL(iget5_locked);
1095 
1096 /**
1097  * iget_locked - obtain an inode from a mounted file system
1098  * @sb:		super block of file system
1099  * @ino:	inode number to get
1100  *
1101  * Search for the inode specified by @ino in the inode cache and if present
1102  * return it with an increased reference count. This is for file systems
1103  * where the inode number is sufficient for unique identification of an inode.
1104  *
1105  * If the inode is not in cache, allocate a new inode and return it locked,
1106  * hashed, and with the I_NEW flag set.  The file system gets to fill it in
1107  * before unlocking it via unlock_new_inode().
1108  */
iget_locked(struct super_block * sb,unsigned long ino)1109 struct inode *iget_locked(struct super_block *sb, unsigned long ino)
1110 {
1111 	struct hlist_head *head = inode_hashtable + hash(sb, ino);
1112 	struct inode *inode;
1113 
1114 	spin_lock(&inode_hash_lock);
1115 	inode = find_inode_fast(sb, head, ino);
1116 	spin_unlock(&inode_hash_lock);
1117 	if (inode) {
1118 		wait_on_inode(inode);
1119 		return inode;
1120 	}
1121 
1122 	inode = alloc_inode(sb);
1123 	if (inode) {
1124 		struct inode *old;
1125 
1126 		spin_lock(&inode_hash_lock);
1127 		/* We released the lock, so.. */
1128 		old = find_inode_fast(sb, head, ino);
1129 		if (!old) {
1130 			inode->i_ino = ino;
1131 			spin_lock(&inode->i_lock);
1132 			inode->i_state = I_NEW;
1133 			hlist_add_head(&inode->i_hash, head);
1134 			spin_unlock(&inode->i_lock);
1135 			inode_sb_list_add(inode);
1136 			spin_unlock(&inode_hash_lock);
1137 
1138 			/* Return the locked inode with I_NEW set, the
1139 			 * caller is responsible for filling in the contents
1140 			 */
1141 			return inode;
1142 		}
1143 
1144 		/*
1145 		 * Uhhuh, somebody else created the same inode under
1146 		 * us. Use the old inode instead of the one we just
1147 		 * allocated.
1148 		 */
1149 		spin_unlock(&inode_hash_lock);
1150 		destroy_inode(inode);
1151 		inode = old;
1152 		wait_on_inode(inode);
1153 	}
1154 	return inode;
1155 }
1156 EXPORT_SYMBOL(iget_locked);
1157 
1158 /*
1159  * search the inode cache for a matching inode number.
1160  * If we find one, then the inode number we are trying to
1161  * allocate is not unique and so we should not use it.
1162  *
1163  * Returns 1 if the inode number is unique, 0 if it is not.
1164  */
test_inode_iunique(struct super_block * sb,unsigned long ino)1165 static int test_inode_iunique(struct super_block *sb, unsigned long ino)
1166 {
1167 	struct hlist_head *b = inode_hashtable + hash(sb, ino);
1168 	struct inode *inode;
1169 
1170 	spin_lock(&inode_hash_lock);
1171 	hlist_for_each_entry(inode, b, i_hash) {
1172 		if (inode->i_ino == ino && inode->i_sb == sb) {
1173 			spin_unlock(&inode_hash_lock);
1174 			return 0;
1175 		}
1176 	}
1177 	spin_unlock(&inode_hash_lock);
1178 
1179 	return 1;
1180 }
1181 
1182 /**
1183  *	iunique - get a unique inode number
1184  *	@sb: superblock
1185  *	@max_reserved: highest reserved inode number
1186  *
1187  *	Obtain an inode number that is unique on the system for a given
1188  *	superblock. This is used by file systems that have no natural
1189  *	permanent inode numbering system. An inode number is returned that
1190  *	is higher than the reserved limit but unique.
1191  *
1192  *	BUGS:
1193  *	With a large number of inodes live on the file system this function
1194  *	currently becomes quite slow.
1195  */
iunique(struct super_block * sb,ino_t max_reserved)1196 ino_t iunique(struct super_block *sb, ino_t max_reserved)
1197 {
1198 	/*
1199 	 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
1200 	 * error if st_ino won't fit in target struct field. Use 32bit counter
1201 	 * here to attempt to avoid that.
1202 	 */
1203 	static DEFINE_SPINLOCK(iunique_lock);
1204 	static unsigned int counter;
1205 	ino_t res;
1206 
1207 	spin_lock(&iunique_lock);
1208 	do {
1209 		if (counter <= max_reserved)
1210 			counter = max_reserved + 1;
1211 		res = counter++;
1212 	} while (!test_inode_iunique(sb, res));
1213 	spin_unlock(&iunique_lock);
1214 
1215 	return res;
1216 }
1217 EXPORT_SYMBOL(iunique);
1218 
igrab(struct inode * inode)1219 struct inode *igrab(struct inode *inode)
1220 {
1221 	spin_lock(&inode->i_lock);
1222 	if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
1223 		__iget(inode);
1224 		spin_unlock(&inode->i_lock);
1225 	} else {
1226 		spin_unlock(&inode->i_lock);
1227 		/*
1228 		 * Handle the case where s_op->clear_inode is not been
1229 		 * called yet, and somebody is calling igrab
1230 		 * while the inode is getting freed.
1231 		 */
1232 		inode = NULL;
1233 	}
1234 	return inode;
1235 }
1236 EXPORT_SYMBOL(igrab);
1237 
1238 /**
1239  * ilookup5_nowait - search for an inode in the inode cache
1240  * @sb:		super block of file system to search
1241  * @hashval:	hash value (usually inode number) to search for
1242  * @test:	callback used for comparisons between inodes
1243  * @data:	opaque data pointer to pass to @test
1244  *
1245  * Search for the inode specified by @hashval and @data in the inode cache.
1246  * If the inode is in the cache, the inode is returned with an incremented
1247  * reference count.
1248  *
1249  * Note: I_NEW is not waited upon so you have to be very careful what you do
1250  * with the returned inode.  You probably should be using ilookup5() instead.
1251  *
1252  * Note2: @test is called with the inode_hash_lock held, so can't sleep.
1253  */
ilookup5_nowait(struct super_block * sb,unsigned long hashval,int (* test)(struct inode *,void *),void * data)1254 struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
1255 		int (*test)(struct inode *, void *), void *data)
1256 {
1257 	struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1258 	struct inode *inode;
1259 
1260 	spin_lock(&inode_hash_lock);
1261 	inode = find_inode(sb, head, test, data);
1262 	spin_unlock(&inode_hash_lock);
1263 
1264 	return inode;
1265 }
1266 EXPORT_SYMBOL(ilookup5_nowait);
1267 
1268 /**
1269  * ilookup5 - search for an inode in the inode cache
1270  * @sb:		super block of file system to search
1271  * @hashval:	hash value (usually inode number) to search for
1272  * @test:	callback used for comparisons between inodes
1273  * @data:	opaque data pointer to pass to @test
1274  *
1275  * Search for the inode specified by @hashval and @data in the inode cache,
1276  * and if the inode is in the cache, return the inode with an incremented
1277  * reference count.  Waits on I_NEW before returning the inode.
1278  * returned with an incremented reference count.
1279  *
1280  * This is a generalized version of ilookup() for file systems where the
1281  * inode number is not sufficient for unique identification of an inode.
1282  *
1283  * Note: @test is called with the inode_hash_lock held, so can't sleep.
1284  */
ilookup5(struct super_block * sb,unsigned long hashval,int (* test)(struct inode *,void *),void * data)1285 struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
1286 		int (*test)(struct inode *, void *), void *data)
1287 {
1288 	struct inode *inode = ilookup5_nowait(sb, hashval, test, data);
1289 
1290 	if (inode)
1291 		wait_on_inode(inode);
1292 	return inode;
1293 }
1294 EXPORT_SYMBOL(ilookup5);
1295 
1296 /**
1297  * ilookup - search for an inode in the inode cache
1298  * @sb:		super block of file system to search
1299  * @ino:	inode number to search for
1300  *
1301  * Search for the inode @ino in the inode cache, and if the inode is in the
1302  * cache, the inode is returned with an incremented reference count.
1303  */
ilookup(struct super_block * sb,unsigned long ino)1304 struct inode *ilookup(struct super_block *sb, unsigned long ino)
1305 {
1306 	struct hlist_head *head = inode_hashtable + hash(sb, ino);
1307 	struct inode *inode;
1308 
1309 	spin_lock(&inode_hash_lock);
1310 	inode = find_inode_fast(sb, head, ino);
1311 	spin_unlock(&inode_hash_lock);
1312 
1313 	if (inode)
1314 		wait_on_inode(inode);
1315 	return inode;
1316 }
1317 EXPORT_SYMBOL(ilookup);
1318 
insert_inode_locked(struct inode * inode)1319 int insert_inode_locked(struct inode *inode)
1320 {
1321 	struct super_block *sb = inode->i_sb;
1322 	ino_t ino = inode->i_ino;
1323 	struct hlist_head *head = inode_hashtable + hash(sb, ino);
1324 
1325 	while (1) {
1326 		struct inode *old = NULL;
1327 		spin_lock(&inode_hash_lock);
1328 		hlist_for_each_entry(old, head, i_hash) {
1329 			if (old->i_ino != ino)
1330 				continue;
1331 			if (old->i_sb != sb)
1332 				continue;
1333 			spin_lock(&old->i_lock);
1334 			if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1335 				spin_unlock(&old->i_lock);
1336 				continue;
1337 			}
1338 			break;
1339 		}
1340 		if (likely(!old)) {
1341 			spin_lock(&inode->i_lock);
1342 			inode->i_state |= I_NEW;
1343 			hlist_add_head(&inode->i_hash, head);
1344 			spin_unlock(&inode->i_lock);
1345 			spin_unlock(&inode_hash_lock);
1346 			return 0;
1347 		}
1348 		__iget(old);
1349 		spin_unlock(&old->i_lock);
1350 		spin_unlock(&inode_hash_lock);
1351 		wait_on_inode(old);
1352 		if (unlikely(!inode_unhashed(old))) {
1353 			iput(old);
1354 			return -EBUSY;
1355 		}
1356 		iput(old);
1357 	}
1358 }
1359 EXPORT_SYMBOL(insert_inode_locked);
1360 
insert_inode_locked4(struct inode * inode,unsigned long hashval,int (* test)(struct inode *,void *),void * data)1361 int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1362 		int (*test)(struct inode *, void *), void *data)
1363 {
1364 	struct super_block *sb = inode->i_sb;
1365 	struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1366 
1367 	while (1) {
1368 		struct inode *old = NULL;
1369 
1370 		spin_lock(&inode_hash_lock);
1371 		hlist_for_each_entry(old, head, i_hash) {
1372 			if (old->i_sb != sb)
1373 				continue;
1374 			if (!test(old, data))
1375 				continue;
1376 			spin_lock(&old->i_lock);
1377 			if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1378 				spin_unlock(&old->i_lock);
1379 				continue;
1380 			}
1381 			break;
1382 		}
1383 		if (likely(!old)) {
1384 			spin_lock(&inode->i_lock);
1385 			inode->i_state |= I_NEW;
1386 			hlist_add_head(&inode->i_hash, head);
1387 			spin_unlock(&inode->i_lock);
1388 			spin_unlock(&inode_hash_lock);
1389 			return 0;
1390 		}
1391 		__iget(old);
1392 		spin_unlock(&old->i_lock);
1393 		spin_unlock(&inode_hash_lock);
1394 		wait_on_inode(old);
1395 		if (unlikely(!inode_unhashed(old))) {
1396 			iput(old);
1397 			return -EBUSY;
1398 		}
1399 		iput(old);
1400 	}
1401 }
1402 EXPORT_SYMBOL(insert_inode_locked4);
1403 
1404 
generic_delete_inode(struct inode * inode)1405 int generic_delete_inode(struct inode *inode)
1406 {
1407 	return 1;
1408 }
1409 EXPORT_SYMBOL(generic_delete_inode);
1410 
1411 /*
1412  * Called when we're dropping the last reference
1413  * to an inode.
1414  *
1415  * Call the FS "drop_inode()" function, defaulting to
1416  * the legacy UNIX filesystem behaviour.  If it tells
1417  * us to evict inode, do so.  Otherwise, retain inode
1418  * in cache if fs is alive, sync and evict if fs is
1419  * shutting down.
1420  */
iput_final(struct inode * inode)1421 static void iput_final(struct inode *inode)
1422 {
1423 	struct super_block *sb = inode->i_sb;
1424 	const struct super_operations *op = inode->i_sb->s_op;
1425 	int drop;
1426 
1427 	WARN_ON(inode->i_state & I_NEW);
1428 
1429 	if (op->drop_inode)
1430 		drop = op->drop_inode(inode);
1431 	else
1432 		drop = generic_drop_inode(inode);
1433 
1434 	if (!drop && (sb->s_flags & MS_ACTIVE)) {
1435 		inode->i_state |= I_REFERENCED;
1436 		inode_add_lru(inode);
1437 		spin_unlock(&inode->i_lock);
1438 		return;
1439 	}
1440 
1441 	if (!drop) {
1442 		inode->i_state |= I_WILL_FREE;
1443 		spin_unlock(&inode->i_lock);
1444 		write_inode_now(inode, 1);
1445 		spin_lock(&inode->i_lock);
1446 		WARN_ON(inode->i_state & I_NEW);
1447 		inode->i_state &= ~I_WILL_FREE;
1448 	}
1449 
1450 	inode->i_state |= I_FREEING;
1451 	if (!list_empty(&inode->i_lru))
1452 		inode_lru_list_del(inode);
1453 	spin_unlock(&inode->i_lock);
1454 
1455 	evict(inode);
1456 }
1457 
1458 /**
1459  *	iput	- put an inode
1460  *	@inode: inode to put
1461  *
1462  *	Puts an inode, dropping its usage count. If the inode use count hits
1463  *	zero, the inode is then freed and may also be destroyed.
1464  *
1465  *	Consequently, iput() can sleep.
1466  */
iput(struct inode * inode)1467 void iput(struct inode *inode)
1468 {
1469 	if (inode) {
1470 		BUG_ON(inode->i_state & I_CLEAR);
1471 
1472 		if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock))
1473 			iput_final(inode);
1474 	}
1475 }
1476 EXPORT_SYMBOL(iput);
1477 
1478 /**
1479  *	bmap	- find a block number in a file
1480  *	@inode: inode of file
1481  *	@block: block to find
1482  *
1483  *	Returns the block number on the device holding the inode that
1484  *	is the disk block number for the block of the file requested.
1485  *	That is, asked for block 4 of inode 1 the function will return the
1486  *	disk block relative to the disk start that holds that block of the
1487  *	file.
1488  */
bmap(struct inode * inode,sector_t block)1489 sector_t bmap(struct inode *inode, sector_t block)
1490 {
1491 	sector_t res = 0;
1492 	if (inode->i_mapping->a_ops->bmap)
1493 		res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block);
1494 	return res;
1495 }
1496 EXPORT_SYMBOL(bmap);
1497 
1498 /*
1499  * With relative atime, only update atime if the previous atime is
1500  * earlier than either the ctime or mtime or if at least a day has
1501  * passed since the last atime update.
1502  */
relatime_need_update(struct vfsmount * mnt,struct inode * inode,struct timespec now)1503 static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
1504 			     struct timespec now)
1505 {
1506 
1507 	if (!(mnt->mnt_flags & MNT_RELATIME))
1508 		return 1;
1509 	/*
1510 	 * Is mtime younger than atime? If yes, update atime:
1511 	 */
1512 	if (timespec_compare(&inode->i_mtime, &inode->i_atime) >= 0)
1513 		return 1;
1514 	/*
1515 	 * Is ctime younger than atime? If yes, update atime:
1516 	 */
1517 	if (timespec_compare(&inode->i_ctime, &inode->i_atime) >= 0)
1518 		return 1;
1519 
1520 	/*
1521 	 * Is the previous atime value older than a day? If yes,
1522 	 * update atime:
1523 	 */
1524 	if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60)
1525 		return 1;
1526 	/*
1527 	 * Good, we can skip the atime update:
1528 	 */
1529 	return 0;
1530 }
1531 
1532 /*
1533  * This does the actual work of updating an inodes time or version.  Must have
1534  * had called mnt_want_write() before calling this.
1535  */
update_time(struct inode * inode,struct timespec * time,int flags)1536 static int update_time(struct inode *inode, struct timespec *time, int flags)
1537 {
1538 	if (inode->i_op->update_time)
1539 		return inode->i_op->update_time(inode, time, flags);
1540 
1541 	if (flags & S_ATIME)
1542 		inode->i_atime = *time;
1543 	if (flags & S_VERSION)
1544 		inode_inc_iversion(inode);
1545 	if (flags & S_CTIME)
1546 		inode->i_ctime = *time;
1547 	if (flags & S_MTIME)
1548 		inode->i_mtime = *time;
1549 	mark_inode_dirty_sync(inode);
1550 	return 0;
1551 }
1552 
1553 /**
1554  *	touch_atime	-	update the access time
1555  *	@path: the &struct path to update
1556  *
1557  *	Update the accessed time on an inode and mark it for writeback.
1558  *	This function automatically handles read only file systems and media,
1559  *	as well as the "noatime" flag and inode specific "noatime" markers.
1560  */
touch_atime(struct path * path)1561 void touch_atime(struct path *path)
1562 {
1563 	struct vfsmount *mnt = path->mnt;
1564 	struct inode *inode = path->dentry->d_inode;
1565 	struct timespec now;
1566 
1567 	if (inode->i_flags & S_NOATIME)
1568 		return;
1569 	if (IS_NOATIME(inode))
1570 		return;
1571 	if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
1572 		return;
1573 
1574 	if (mnt->mnt_flags & MNT_NOATIME)
1575 		return;
1576 	if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
1577 		return;
1578 
1579 	now = current_fs_time(inode->i_sb);
1580 
1581 	if (!relatime_need_update(mnt, inode, now))
1582 		return;
1583 
1584 	if (timespec_equal(&inode->i_atime, &now))
1585 		return;
1586 
1587 	if (!sb_start_write_trylock(inode->i_sb))
1588 		return;
1589 
1590 	if (__mnt_want_write(mnt))
1591 		goto skip_update;
1592 	/*
1593 	 * File systems can error out when updating inodes if they need to
1594 	 * allocate new space to modify an inode (such is the case for
1595 	 * Btrfs), but since we touch atime while walking down the path we
1596 	 * really don't care if we failed to update the atime of the file,
1597 	 * so just ignore the return value.
1598 	 * We may also fail on filesystems that have the ability to make parts
1599 	 * of the fs read only, e.g. subvolumes in Btrfs.
1600 	 */
1601 	update_time(inode, &now, S_ATIME);
1602 	__mnt_drop_write(mnt);
1603 skip_update:
1604 	sb_end_write(inode->i_sb);
1605 }
1606 EXPORT_SYMBOL(touch_atime);
1607 
1608 /*
1609  * The logic we want is
1610  *
1611  *	if suid or (sgid and xgrp)
1612  *		remove privs
1613  */
should_remove_suid(struct dentry * dentry)1614 int should_remove_suid(struct dentry *dentry)
1615 {
1616 	umode_t mode = dentry->d_inode->i_mode;
1617 	int kill = 0;
1618 
1619 	/* suid always must be killed */
1620 	if (unlikely(mode & S_ISUID))
1621 		kill = ATTR_KILL_SUID;
1622 
1623 	/*
1624 	 * sgid without any exec bits is just a mandatory locking mark; leave
1625 	 * it alone.  If some exec bits are set, it's a real sgid; kill it.
1626 	 */
1627 	if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1628 		kill |= ATTR_KILL_SGID;
1629 
1630 	if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
1631 		return kill;
1632 
1633 	return 0;
1634 }
1635 EXPORT_SYMBOL(should_remove_suid);
1636 
__remove_suid(struct vfsmount * mnt,struct dentry * dentry,int kill)1637 static int __remove_suid(struct vfsmount *mnt, struct dentry *dentry, int kill)
1638 {
1639 	struct iattr newattrs;
1640 
1641 	newattrs.ia_valid = ATTR_FORCE | kill;
1642 	return notify_change2(mnt, dentry, &newattrs);
1643 }
1644 
file_remove_suid(struct file * file)1645 int file_remove_suid(struct file *file)
1646 {
1647 	struct dentry *dentry = file->f_path.dentry;
1648 	struct inode *inode = dentry->d_inode;
1649 	int killsuid;
1650 	int killpriv;
1651 	int error = 0;
1652 
1653 	/* Fast path for nothing security related */
1654 	if (IS_NOSEC(inode))
1655 		return 0;
1656 
1657 	killsuid = should_remove_suid(dentry);
1658 	killpriv = security_inode_need_killpriv(dentry);
1659 
1660 	if (killpriv < 0)
1661 		return killpriv;
1662 	if (killpriv)
1663 		error = security_inode_killpriv(dentry);
1664 	if (!error && killsuid)
1665 		error = __remove_suid(file->f_path.mnt, dentry, killsuid);
1666 	if (!error && (inode->i_sb->s_flags & MS_NOSEC))
1667 		inode->i_flags |= S_NOSEC;
1668 
1669 	return error;
1670 }
1671 EXPORT_SYMBOL(file_remove_suid);
1672 
1673 /**
1674  *	file_update_time	-	update mtime and ctime time
1675  *	@file: file accessed
1676  *
1677  *	Update the mtime and ctime members of an inode and mark the inode
1678  *	for writeback.  Note that this function is meant exclusively for
1679  *	usage in the file write path of filesystems, and filesystems may
1680  *	choose to explicitly ignore update via this function with the
1681  *	S_NOCMTIME inode flag, e.g. for network filesystem where these
1682  *	timestamps are handled by the server.  This can return an error for
1683  *	file systems who need to allocate space in order to update an inode.
1684  */
1685 
file_update_time(struct file * file)1686 int file_update_time(struct file *file)
1687 {
1688 	struct inode *inode = file_inode(file);
1689 	struct timespec now;
1690 	int sync_it = 0;
1691 	int ret;
1692 
1693 	/* First try to exhaust all avenues to not sync */
1694 	if (IS_NOCMTIME(inode))
1695 		return 0;
1696 
1697 	now = current_fs_time(inode->i_sb);
1698 	if (!timespec_equal(&inode->i_mtime, &now))
1699 		sync_it = S_MTIME;
1700 
1701 	if (!timespec_equal(&inode->i_ctime, &now))
1702 		sync_it |= S_CTIME;
1703 
1704 	if (IS_I_VERSION(inode))
1705 		sync_it |= S_VERSION;
1706 
1707 	if (!sync_it)
1708 		return 0;
1709 
1710 	/* Finally allowed to write? Takes lock. */
1711 	if (__mnt_want_write_file(file))
1712 		return 0;
1713 
1714 	ret = update_time(inode, &now, sync_it);
1715 	__mnt_drop_write_file(file);
1716 
1717 	return ret;
1718 }
1719 EXPORT_SYMBOL(file_update_time);
1720 
inode_needs_sync(struct inode * inode)1721 int inode_needs_sync(struct inode *inode)
1722 {
1723 	if (IS_SYNC(inode))
1724 		return 1;
1725 	if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
1726 		return 1;
1727 	return 0;
1728 }
1729 EXPORT_SYMBOL(inode_needs_sync);
1730 
inode_wait(void * word)1731 int inode_wait(void *word)
1732 {
1733 	schedule();
1734 	return 0;
1735 }
1736 EXPORT_SYMBOL(inode_wait);
1737 
1738 /*
1739  * If we try to find an inode in the inode hash while it is being
1740  * deleted, we have to wait until the filesystem completes its
1741  * deletion before reporting that it isn't found.  This function waits
1742  * until the deletion _might_ have completed.  Callers are responsible
1743  * to recheck inode state.
1744  *
1745  * It doesn't matter if I_NEW is not set initially, a call to
1746  * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
1747  * will DTRT.
1748  */
__wait_on_freeing_inode(struct inode * inode)1749 static void __wait_on_freeing_inode(struct inode *inode)
1750 {
1751 	wait_queue_head_t *wq;
1752 	DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
1753 	wq = bit_waitqueue(&inode->i_state, __I_NEW);
1754 	prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
1755 	spin_unlock(&inode->i_lock);
1756 	spin_unlock(&inode_hash_lock);
1757 	schedule();
1758 	finish_wait(wq, &wait.wait);
1759 	spin_lock(&inode_hash_lock);
1760 }
1761 
1762 static __initdata unsigned long ihash_entries;
set_ihash_entries(char * str)1763 static int __init set_ihash_entries(char *str)
1764 {
1765 	if (!str)
1766 		return 0;
1767 	ihash_entries = simple_strtoul(str, &str, 0);
1768 	return 1;
1769 }
1770 __setup("ihash_entries=", set_ihash_entries);
1771 
1772 /*
1773  * Initialize the waitqueues and inode hash table.
1774  */
inode_init_early(void)1775 void __init inode_init_early(void)
1776 {
1777 	unsigned int loop;
1778 
1779 	/* If hashes are distributed across NUMA nodes, defer
1780 	 * hash allocation until vmalloc space is available.
1781 	 */
1782 	if (hashdist)
1783 		return;
1784 
1785 	inode_hashtable =
1786 		alloc_large_system_hash("Inode-cache",
1787 					sizeof(struct hlist_head),
1788 					ihash_entries,
1789 					14,
1790 					HASH_EARLY,
1791 					&i_hash_shift,
1792 					&i_hash_mask,
1793 					0,
1794 					0);
1795 
1796 	for (loop = 0; loop < (1U << i_hash_shift); loop++)
1797 		INIT_HLIST_HEAD(&inode_hashtable[loop]);
1798 }
1799 
inode_init(void)1800 void __init inode_init(void)
1801 {
1802 	unsigned int loop;
1803 
1804 	/* inode slab cache */
1805 	inode_cachep = kmem_cache_create("inode_cache",
1806 					 sizeof(struct inode),
1807 					 0,
1808 					 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
1809 					 SLAB_MEM_SPREAD),
1810 					 init_once);
1811 
1812 	/* Hash may have been set up in inode_init_early */
1813 	if (!hashdist)
1814 		return;
1815 
1816 	inode_hashtable =
1817 		alloc_large_system_hash("Inode-cache",
1818 					sizeof(struct hlist_head),
1819 					ihash_entries,
1820 					14,
1821 					0,
1822 					&i_hash_shift,
1823 					&i_hash_mask,
1824 					0,
1825 					0);
1826 
1827 	for (loop = 0; loop < (1U << i_hash_shift); loop++)
1828 		INIT_HLIST_HEAD(&inode_hashtable[loop]);
1829 }
1830 
init_special_inode(struct inode * inode,umode_t mode,dev_t rdev)1831 void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
1832 {
1833 	inode->i_mode = mode;
1834 	if (S_ISCHR(mode)) {
1835 		inode->i_fop = &def_chr_fops;
1836 		inode->i_rdev = rdev;
1837 	} else if (S_ISBLK(mode)) {
1838 		inode->i_fop = &def_blk_fops;
1839 		inode->i_rdev = rdev;
1840 	} else if (S_ISFIFO(mode))
1841 		inode->i_fop = &pipefifo_fops;
1842 	else if (S_ISSOCK(mode))
1843 		inode->i_fop = &bad_sock_fops;
1844 	else
1845 		printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for"
1846 				  " inode %s:%lu\n", mode, inode->i_sb->s_id,
1847 				  inode->i_ino);
1848 }
1849 EXPORT_SYMBOL(init_special_inode);
1850 
1851 /**
1852  * inode_init_owner - Init uid,gid,mode for new inode according to posix standards
1853  * @inode: New inode
1854  * @dir: Directory inode
1855  * @mode: mode of the new inode
1856  */
inode_init_owner(struct inode * inode,const struct inode * dir,umode_t mode)1857 void inode_init_owner(struct inode *inode, const struct inode *dir,
1858 			umode_t mode)
1859 {
1860 	inode->i_uid = current_fsuid();
1861 	if (dir && dir->i_mode & S_ISGID) {
1862 		inode->i_gid = dir->i_gid;
1863 		if (S_ISDIR(mode))
1864 			mode |= S_ISGID;
1865 	} else
1866 		inode->i_gid = current_fsgid();
1867 	inode->i_mode = mode;
1868 }
1869 EXPORT_SYMBOL(inode_init_owner);
1870 
1871 /**
1872  * inode_owner_or_capable - check current task permissions to inode
1873  * @inode: inode being checked
1874  *
1875  * Return true if current either has CAP_FOWNER in a namespace with the
1876  * inode owner uid mapped, or owns the file.
1877  */
inode_owner_or_capable(const struct inode * inode)1878 bool inode_owner_or_capable(const struct inode *inode)
1879 {
1880 	struct user_namespace *ns;
1881 
1882 	if (uid_eq(current_fsuid(), inode->i_uid))
1883 		return true;
1884 
1885 	ns = current_user_ns();
1886 	if (ns_capable(ns, CAP_FOWNER) && kuid_has_mapping(ns, inode->i_uid))
1887 		return true;
1888 	return false;
1889 }
1890 EXPORT_SYMBOL(inode_owner_or_capable);
1891 
1892 /*
1893  * Direct i/o helper functions
1894  */
__inode_dio_wait(struct inode * inode)1895 static void __inode_dio_wait(struct inode *inode)
1896 {
1897 	wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP);
1898 	DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
1899 
1900 	do {
1901 		prepare_to_wait(wq, &q.wait, TASK_UNINTERRUPTIBLE);
1902 		if (atomic_read(&inode->i_dio_count))
1903 			schedule();
1904 	} while (atomic_read(&inode->i_dio_count));
1905 	finish_wait(wq, &q.wait);
1906 }
1907 
1908 /**
1909  * inode_dio_wait - wait for outstanding DIO requests to finish
1910  * @inode: inode to wait for
1911  *
1912  * Waits for all pending direct I/O requests to finish so that we can
1913  * proceed with a truncate or equivalent operation.
1914  *
1915  * Must be called under a lock that serializes taking new references
1916  * to i_dio_count, usually by inode->i_mutex.
1917  */
inode_dio_wait(struct inode * inode)1918 void inode_dio_wait(struct inode *inode)
1919 {
1920 	if (atomic_read(&inode->i_dio_count))
1921 		__inode_dio_wait(inode);
1922 }
1923 EXPORT_SYMBOL(inode_dio_wait);
1924 
1925 /*
1926  * inode_dio_done - signal finish of a direct I/O requests
1927  * @inode: inode the direct I/O happens on
1928  *
1929  * This is called once we've finished processing a direct I/O request,
1930  * and is used to wake up callers waiting for direct I/O to be quiesced.
1931  */
inode_dio_done(struct inode * inode)1932 void inode_dio_done(struct inode *inode)
1933 {
1934 	if (atomic_dec_and_test(&inode->i_dio_count))
1935 		wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
1936 }
1937 EXPORT_SYMBOL(inode_dio_done);
1938 
1939 /*
1940  * inode_set_flags - atomically set some inode flags
1941  *
1942  * Note: the caller should be holding i_mutex, or else be sure that
1943  * they have exclusive access to the inode structure (i.e., while the
1944  * inode is being instantiated).  The reason for the cmpxchg() loop
1945  * --- which wouldn't be necessary if all code paths which modify
1946  * i_flags actually followed this rule, is that there is at least one
1947  * code path which doesn't today --- for example,
1948  * __generic_file_aio_write() calls file_remove_suid() without holding
1949  * i_mutex --- so we use cmpxchg() out of an abundance of caution.
1950  *
1951  * In the long run, i_mutex is overkill, and we should probably look
1952  * at using the i_lock spinlock to protect i_flags, and then make sure
1953  * it is so documented in include/linux/fs.h and that all code follows
1954  * the locking convention!!
1955  */
inode_set_flags(struct inode * inode,unsigned int flags,unsigned int mask)1956 void inode_set_flags(struct inode *inode, unsigned int flags,
1957 		     unsigned int mask)
1958 {
1959 	unsigned int old_flags, new_flags;
1960 
1961 	WARN_ON_ONCE(flags & ~mask);
1962 	do {
1963 		old_flags = ACCESS_ONCE(inode->i_flags);
1964 		new_flags = (old_flags & ~mask) | flags;
1965 	} while (unlikely(cmpxchg(&inode->i_flags, old_flags,
1966 				  new_flags) != old_flags));
1967 }
1968 EXPORT_SYMBOL(inode_set_flags);
1969