1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * (C) 1997 Linus Torvalds
4 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)
5 */
6 #include <linux/export.h>
7 #include <linux/fs.h>
8 #include <linux/filelock.h>
9 #include <linux/mm.h>
10 #include <linux/backing-dev.h>
11 #include <linux/hash.h>
12 #include <linux/swap.h>
13 #include <linux/security.h>
14 #include <linux/cdev.h>
15 #include <linux/memblock.h>
16 #include <linux/fsnotify.h>
17 #include <linux/mount.h>
18 #include <linux/posix_acl.h>
19 #include <linux/buffer_head.h> /* for inode_has_buffers */
20 #include <linux/ratelimit.h>
21 #include <linux/list_lru.h>
22 #include <linux/iversion.h>
23 #include <linux/rw_hint.h>
24 #include <trace/events/writeback.h>
25 #include "internal.h"
26
27 #undef CREATE_TRACE_POINTS
28 #include <trace/hooks/vmscan.h>
29
30 /*
31 * Inode locking rules:
32 *
33 * inode->i_lock protects:
34 * inode->i_state, inode->i_hash, __iget(), inode->i_io_list
35 * Inode LRU list locks protect:
36 * inode->i_sb->s_inode_lru, inode->i_lru
37 * inode->i_sb->s_inode_list_lock protects:
38 * inode->i_sb->s_inodes, inode->i_sb_list
39 * bdi->wb.list_lock protects:
40 * bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_io_list
41 * inode_hash_lock protects:
42 * inode_hashtable, inode->i_hash
43 *
44 * Lock ordering:
45 *
46 * inode->i_sb->s_inode_list_lock
47 * inode->i_lock
48 * Inode LRU list locks
49 *
50 * bdi->wb.list_lock
51 * inode->i_lock
52 *
53 * inode_hash_lock
54 * inode->i_sb->s_inode_list_lock
55 * inode->i_lock
56 *
57 * iunique_lock
58 * inode_hash_lock
59 */
60
61 static unsigned int i_hash_mask __read_mostly;
62 static unsigned int i_hash_shift __read_mostly;
63 static struct hlist_head *inode_hashtable __read_mostly;
64 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
65
66 /*
67 * Empty aops. Can be used for the cases where the user does not
68 * define any of the address_space operations.
69 */
70 const struct address_space_operations empty_aops = {
71 };
72 EXPORT_SYMBOL(empty_aops);
73
74 static DEFINE_PER_CPU(unsigned long, nr_inodes);
75 static DEFINE_PER_CPU(unsigned long, nr_unused);
76
77 static struct kmem_cache *inode_cachep __read_mostly;
78
get_nr_inodes(void)79 static long get_nr_inodes(void)
80 {
81 int i;
82 long sum = 0;
83 for_each_possible_cpu(i)
84 sum += per_cpu(nr_inodes, i);
85 return sum < 0 ? 0 : sum;
86 }
87
get_nr_inodes_unused(void)88 static inline long get_nr_inodes_unused(void)
89 {
90 int i;
91 long sum = 0;
92 for_each_possible_cpu(i)
93 sum += per_cpu(nr_unused, i);
94 return sum < 0 ? 0 : sum;
95 }
96
get_nr_dirty_inodes(void)97 long get_nr_dirty_inodes(void)
98 {
99 /* not actually dirty inodes, but a wild approximation */
100 long nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
101 return nr_dirty > 0 ? nr_dirty : 0;
102 }
103
104 /*
105 * Handle nr_inode sysctl
106 */
107 #ifdef CONFIG_SYSCTL
108 /*
109 * Statistics gathering..
110 */
111 static struct inodes_stat_t inodes_stat;
112
proc_nr_inodes(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)113 static int proc_nr_inodes(struct ctl_table *table, int write, void *buffer,
114 size_t *lenp, loff_t *ppos)
115 {
116 inodes_stat.nr_inodes = get_nr_inodes();
117 inodes_stat.nr_unused = get_nr_inodes_unused();
118 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
119 }
120
121 static struct ctl_table inodes_sysctls[] = {
122 {
123 .procname = "inode-nr",
124 .data = &inodes_stat,
125 .maxlen = 2*sizeof(long),
126 .mode = 0444,
127 .proc_handler = proc_nr_inodes,
128 },
129 {
130 .procname = "inode-state",
131 .data = &inodes_stat,
132 .maxlen = 7*sizeof(long),
133 .mode = 0444,
134 .proc_handler = proc_nr_inodes,
135 },
136 { }
137 };
138
init_fs_inode_sysctls(void)139 static int __init init_fs_inode_sysctls(void)
140 {
141 register_sysctl_init("fs", inodes_sysctls);
142 return 0;
143 }
144 early_initcall(init_fs_inode_sysctls);
145 #endif
146
no_open(struct inode * inode,struct file * file)147 static int no_open(struct inode *inode, struct file *file)
148 {
149 return -ENXIO;
150 }
151
152 /**
153 * inode_init_always - perform inode structure initialisation
154 * @sb: superblock inode belongs to
155 * @inode: inode to initialise
156 *
157 * These are initializations that need to be done on every inode
158 * allocation as the fields are not initialised by slab allocation.
159 */
inode_init_always(struct super_block * sb,struct inode * inode)160 int inode_init_always(struct super_block *sb, struct inode *inode)
161 {
162 static const struct inode_operations empty_iops;
163 static const struct file_operations no_open_fops = {.open = no_open};
164 struct address_space *const mapping = &inode->i_data;
165
166 inode->i_sb = sb;
167 inode->i_blkbits = sb->s_blocksize_bits;
168 inode->i_flags = 0;
169 atomic64_set(&inode->i_sequence, 0);
170 atomic_set(&inode->i_count, 1);
171 inode->i_op = &empty_iops;
172 inode->i_fop = &no_open_fops;
173 inode->i_ino = 0;
174 inode->__i_nlink = 1;
175 inode->i_opflags = 0;
176 if (sb->s_xattr)
177 inode->i_opflags |= IOP_XATTR;
178 i_uid_write(inode, 0);
179 i_gid_write(inode, 0);
180 atomic_set(&inode->i_writecount, 0);
181 inode->i_size = 0;
182 inode->i_write_hint = WRITE_LIFE_NOT_SET;
183 inode->i_blocks = 0;
184 inode->i_bytes = 0;
185 inode->i_generation = 0;
186 inode->i_pipe = NULL;
187 inode->i_cdev = NULL;
188 inode->i_link = NULL;
189 inode->i_dir_seq = 0;
190 inode->i_rdev = 0;
191 inode->dirtied_when = 0;
192
193 #ifdef CONFIG_CGROUP_WRITEBACK
194 inode->i_wb_frn_winner = 0;
195 inode->i_wb_frn_avg_time = 0;
196 inode->i_wb_frn_history = 0;
197 #endif
198
199 spin_lock_init(&inode->i_lock);
200 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
201
202 init_rwsem(&inode->i_rwsem);
203 lockdep_set_class(&inode->i_rwsem, &sb->s_type->i_mutex_key);
204
205 atomic_set(&inode->i_dio_count, 0);
206
207 mapping->a_ops = &empty_aops;
208 mapping->host = inode;
209 mapping->flags = 0;
210 mapping->wb_err = 0;
211 atomic_set(&mapping->i_mmap_writable, 0);
212 #ifdef CONFIG_READ_ONLY_THP_FOR_FS
213 atomic_set(&mapping->nr_thps, 0);
214 #endif
215 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
216 mapping->private_data = NULL;
217 mapping->writeback_index = 0;
218 init_rwsem(&mapping->invalidate_lock);
219 lockdep_set_class_and_name(&mapping->invalidate_lock,
220 &sb->s_type->invalidate_lock_key,
221 "mapping.invalidate_lock");
222 if (sb->s_iflags & SB_I_STABLE_WRITES)
223 mapping_set_stable_writes(mapping);
224 inode->i_private = NULL;
225 inode->i_mapping = mapping;
226 INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */
227 #ifdef CONFIG_FS_POSIX_ACL
228 inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
229 #endif
230
231 #ifdef CONFIG_FSNOTIFY
232 inode->i_fsnotify_mask = 0;
233 #endif
234 inode->i_flctx = NULL;
235
236 if (unlikely(security_inode_alloc(inode)))
237 return -ENOMEM;
238 this_cpu_inc(nr_inodes);
239
240 return 0;
241 }
242 EXPORT_SYMBOL(inode_init_always);
243
free_inode_nonrcu(struct inode * inode)244 void free_inode_nonrcu(struct inode *inode)
245 {
246 kmem_cache_free(inode_cachep, inode);
247 }
248 EXPORT_SYMBOL(free_inode_nonrcu);
249
i_callback(struct rcu_head * head)250 static void i_callback(struct rcu_head *head)
251 {
252 struct inode *inode = container_of(head, struct inode, i_rcu);
253 if (inode->free_inode)
254 inode->free_inode(inode);
255 else
256 free_inode_nonrcu(inode);
257 }
258
alloc_inode(struct super_block * sb)259 static struct inode *alloc_inode(struct super_block *sb)
260 {
261 const struct super_operations *ops = sb->s_op;
262 struct inode *inode;
263
264 if (ops->alloc_inode)
265 inode = ops->alloc_inode(sb);
266 else
267 inode = alloc_inode_sb(sb, inode_cachep, GFP_KERNEL);
268
269 if (!inode)
270 return NULL;
271
272 if (unlikely(inode_init_always(sb, inode))) {
273 if (ops->destroy_inode) {
274 ops->destroy_inode(inode);
275 if (!ops->free_inode)
276 return NULL;
277 }
278 inode->free_inode = ops->free_inode;
279 i_callback(&inode->i_rcu);
280 return NULL;
281 }
282
283 return inode;
284 }
285
__destroy_inode(struct inode * inode)286 void __destroy_inode(struct inode *inode)
287 {
288 BUG_ON(inode_has_buffers(inode));
289 inode_detach_wb(inode);
290 security_inode_free(inode);
291 fsnotify_inode_delete(inode);
292 locks_free_lock_context(inode);
293 if (!inode->i_nlink) {
294 WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0);
295 atomic_long_dec(&inode->i_sb->s_remove_count);
296 }
297
298 #ifdef CONFIG_FS_POSIX_ACL
299 if (inode->i_acl && !is_uncached_acl(inode->i_acl))
300 posix_acl_release(inode->i_acl);
301 if (inode->i_default_acl && !is_uncached_acl(inode->i_default_acl))
302 posix_acl_release(inode->i_default_acl);
303 #endif
304 this_cpu_dec(nr_inodes);
305 }
306 EXPORT_SYMBOL(__destroy_inode);
307
destroy_inode(struct inode * inode)308 static void destroy_inode(struct inode *inode)
309 {
310 const struct super_operations *ops = inode->i_sb->s_op;
311
312 BUG_ON(!list_empty(&inode->i_lru));
313 __destroy_inode(inode);
314 if (ops->destroy_inode) {
315 ops->destroy_inode(inode);
316 if (!ops->free_inode)
317 return;
318 }
319 inode->free_inode = ops->free_inode;
320 call_rcu(&inode->i_rcu, i_callback);
321 }
322
323 /**
324 * drop_nlink - directly drop an inode's link count
325 * @inode: inode
326 *
327 * This is a low-level filesystem helper to replace any
328 * direct filesystem manipulation of i_nlink. In cases
329 * where we are attempting to track writes to the
330 * filesystem, a decrement to zero means an imminent
331 * write when the file is truncated and actually unlinked
332 * on the filesystem.
333 */
drop_nlink(struct inode * inode)334 void drop_nlink(struct inode *inode)
335 {
336 WARN_ON(inode->i_nlink == 0);
337 inode->__i_nlink--;
338 if (!inode->i_nlink)
339 atomic_long_inc(&inode->i_sb->s_remove_count);
340 }
341 EXPORT_SYMBOL_NS(drop_nlink, ANDROID_GKI_VFS_EXPORT_ONLY);
342
343 /**
344 * clear_nlink - directly zero an inode's link count
345 * @inode: inode
346 *
347 * This is a low-level filesystem helper to replace any
348 * direct filesystem manipulation of i_nlink. See
349 * drop_nlink() for why we care about i_nlink hitting zero.
350 */
clear_nlink(struct inode * inode)351 void clear_nlink(struct inode *inode)
352 {
353 if (inode->i_nlink) {
354 inode->__i_nlink = 0;
355 atomic_long_inc(&inode->i_sb->s_remove_count);
356 }
357 }
358 EXPORT_SYMBOL(clear_nlink);
359
360 /**
361 * set_nlink - directly set an inode's link count
362 * @inode: inode
363 * @nlink: new nlink (should be non-zero)
364 *
365 * This is a low-level filesystem helper to replace any
366 * direct filesystem manipulation of i_nlink.
367 */
set_nlink(struct inode * inode,unsigned int nlink)368 void set_nlink(struct inode *inode, unsigned int nlink)
369 {
370 if (!nlink) {
371 clear_nlink(inode);
372 } else {
373 /* Yes, some filesystems do change nlink from zero to one */
374 if (inode->i_nlink == 0)
375 atomic_long_dec(&inode->i_sb->s_remove_count);
376
377 inode->__i_nlink = nlink;
378 }
379 }
380 EXPORT_SYMBOL_NS(set_nlink, ANDROID_GKI_VFS_EXPORT_ONLY);
381
382 /**
383 * inc_nlink - directly increment an inode's link count
384 * @inode: inode
385 *
386 * This is a low-level filesystem helper to replace any
387 * direct filesystem manipulation of i_nlink. Currently,
388 * it is only here for parity with dec_nlink().
389 */
inc_nlink(struct inode * inode)390 void inc_nlink(struct inode *inode)
391 {
392 if (unlikely(inode->i_nlink == 0)) {
393 WARN_ON(!(inode->i_state & I_LINKABLE));
394 atomic_long_dec(&inode->i_sb->s_remove_count);
395 }
396
397 inode->__i_nlink++;
398 }
399 EXPORT_SYMBOL(inc_nlink);
400
__address_space_init_once(struct address_space * mapping)401 static void __address_space_init_once(struct address_space *mapping)
402 {
403 xa_init_flags(&mapping->i_pages, XA_FLAGS_LOCK_IRQ | XA_FLAGS_ACCOUNT);
404 init_rwsem(&mapping->i_mmap_rwsem);
405 INIT_LIST_HEAD(&mapping->private_list);
406 spin_lock_init(&mapping->private_lock);
407 mapping->i_mmap = RB_ROOT_CACHED;
408 }
409
address_space_init_once(struct address_space * mapping)410 void address_space_init_once(struct address_space *mapping)
411 {
412 memset(mapping, 0, sizeof(*mapping));
413 __address_space_init_once(mapping);
414 }
415 EXPORT_SYMBOL(address_space_init_once);
416
417 /*
418 * These are initializations that only need to be done
419 * once, because the fields are idempotent across use
420 * of the inode, so let the slab aware of that.
421 */
inode_init_once(struct inode * inode)422 void inode_init_once(struct inode *inode)
423 {
424 memset(inode, 0, sizeof(*inode));
425 INIT_HLIST_NODE(&inode->i_hash);
426 INIT_LIST_HEAD(&inode->i_devices);
427 INIT_LIST_HEAD(&inode->i_io_list);
428 INIT_LIST_HEAD(&inode->i_wb_list);
429 INIT_LIST_HEAD(&inode->i_lru);
430 INIT_LIST_HEAD(&inode->i_sb_list);
431 __address_space_init_once(&inode->i_data);
432 i_size_ordered_init(inode);
433 }
434 EXPORT_SYMBOL_NS(inode_init_once, ANDROID_GKI_VFS_EXPORT_ONLY);
435
init_once(void * foo)436 static void init_once(void *foo)
437 {
438 struct inode *inode = (struct inode *) foo;
439
440 inode_init_once(inode);
441 }
442
443 /*
444 * inode->i_lock must be held
445 */
__iget(struct inode * inode)446 void __iget(struct inode *inode)
447 {
448 atomic_inc(&inode->i_count);
449 }
450
451 /*
452 * get additional reference to inode; caller must already hold one.
453 */
ihold(struct inode * inode)454 void ihold(struct inode *inode)
455 {
456 WARN_ON(atomic_inc_return(&inode->i_count) < 2);
457 }
458 EXPORT_SYMBOL_NS(ihold, ANDROID_GKI_VFS_EXPORT_ONLY);
459
__inode_add_lru(struct inode * inode,bool rotate)460 static void __inode_add_lru(struct inode *inode, bool rotate)
461 {
462 if (inode->i_state & (I_DIRTY_ALL | I_SYNC | I_FREEING | I_WILL_FREE))
463 return;
464 if (atomic_read(&inode->i_count))
465 return;
466 if (!(inode->i_sb->s_flags & SB_ACTIVE))
467 return;
468 if (!mapping_shrinkable(&inode->i_data))
469 return;
470
471 if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru))
472 this_cpu_inc(nr_unused);
473 else if (rotate)
474 inode->i_state |= I_REFERENCED;
475 }
476
477 /*
478 * Add inode to LRU if needed (inode is unused and clean).
479 *
480 * Needs inode->i_lock held.
481 */
inode_add_lru(struct inode * inode)482 void inode_add_lru(struct inode *inode)
483 {
484 __inode_add_lru(inode, false);
485 }
486
inode_lru_list_del(struct inode * inode)487 static void inode_lru_list_del(struct inode *inode)
488 {
489 if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru))
490 this_cpu_dec(nr_unused);
491 }
492
inode_pin_lru_isolating(struct inode * inode)493 static void inode_pin_lru_isolating(struct inode *inode)
494 {
495 lockdep_assert_held(&inode->i_lock);
496 WARN_ON(inode->i_state & (I_LRU_ISOLATING | I_FREEING | I_WILL_FREE));
497 inode->i_state |= I_LRU_ISOLATING;
498 }
499
inode_unpin_lru_isolating(struct inode * inode)500 static void inode_unpin_lru_isolating(struct inode *inode)
501 {
502 spin_lock(&inode->i_lock);
503 WARN_ON(!(inode->i_state & I_LRU_ISOLATING));
504 inode->i_state &= ~I_LRU_ISOLATING;
505 smp_mb();
506 wake_up_bit(&inode->i_state, __I_LRU_ISOLATING);
507 spin_unlock(&inode->i_lock);
508 }
509
inode_wait_for_lru_isolating(struct inode * inode)510 static void inode_wait_for_lru_isolating(struct inode *inode)
511 {
512 spin_lock(&inode->i_lock);
513 if (inode->i_state & I_LRU_ISOLATING) {
514 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_LRU_ISOLATING);
515 wait_queue_head_t *wqh;
516
517 wqh = bit_waitqueue(&inode->i_state, __I_LRU_ISOLATING);
518 spin_unlock(&inode->i_lock);
519 __wait_on_bit(wqh, &wq, bit_wait, TASK_UNINTERRUPTIBLE);
520 spin_lock(&inode->i_lock);
521 WARN_ON(inode->i_state & I_LRU_ISOLATING);
522 }
523 spin_unlock(&inode->i_lock);
524 }
525
526 /**
527 * inode_sb_list_add - add inode to the superblock list of inodes
528 * @inode: inode to add
529 */
inode_sb_list_add(struct inode * inode)530 void inode_sb_list_add(struct inode *inode)
531 {
532 spin_lock(&inode->i_sb->s_inode_list_lock);
533 list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
534 spin_unlock(&inode->i_sb->s_inode_list_lock);
535 }
536 EXPORT_SYMBOL_GPL(inode_sb_list_add);
537
inode_sb_list_del(struct inode * inode)538 static inline void inode_sb_list_del(struct inode *inode)
539 {
540 if (!list_empty(&inode->i_sb_list)) {
541 spin_lock(&inode->i_sb->s_inode_list_lock);
542 list_del_init(&inode->i_sb_list);
543 spin_unlock(&inode->i_sb->s_inode_list_lock);
544 }
545 }
546
hash(struct super_block * sb,unsigned long hashval)547 static unsigned long hash(struct super_block *sb, unsigned long hashval)
548 {
549 unsigned long tmp;
550
551 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
552 L1_CACHE_BYTES;
553 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift);
554 return tmp & i_hash_mask;
555 }
556
557 /**
558 * __insert_inode_hash - hash an inode
559 * @inode: unhashed inode
560 * @hashval: unsigned long value used to locate this object in the
561 * inode_hashtable.
562 *
563 * Add an inode to the inode hash for this superblock.
564 */
__insert_inode_hash(struct inode * inode,unsigned long hashval)565 void __insert_inode_hash(struct inode *inode, unsigned long hashval)
566 {
567 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
568
569 spin_lock(&inode_hash_lock);
570 spin_lock(&inode->i_lock);
571 hlist_add_head_rcu(&inode->i_hash, b);
572 spin_unlock(&inode->i_lock);
573 spin_unlock(&inode_hash_lock);
574 }
575 EXPORT_SYMBOL_NS(__insert_inode_hash, ANDROID_GKI_VFS_EXPORT_ONLY);
576
577 /**
578 * __remove_inode_hash - remove an inode from the hash
579 * @inode: inode to unhash
580 *
581 * Remove an inode from the superblock.
582 */
__remove_inode_hash(struct inode * inode)583 void __remove_inode_hash(struct inode *inode)
584 {
585 spin_lock(&inode_hash_lock);
586 spin_lock(&inode->i_lock);
587 hlist_del_init_rcu(&inode->i_hash);
588 spin_unlock(&inode->i_lock);
589 spin_unlock(&inode_hash_lock);
590 }
591 EXPORT_SYMBOL_NS(__remove_inode_hash, ANDROID_GKI_VFS_EXPORT_ONLY);
592
dump_mapping(const struct address_space * mapping)593 void dump_mapping(const struct address_space *mapping)
594 {
595 struct inode *host;
596 const struct address_space_operations *a_ops;
597 struct hlist_node *dentry_first;
598 struct dentry *dentry_ptr;
599 struct dentry dentry;
600 unsigned long ino;
601
602 /*
603 * If mapping is an invalid pointer, we don't want to crash
604 * accessing it, so probe everything depending on it carefully.
605 */
606 if (get_kernel_nofault(host, &mapping->host) ||
607 get_kernel_nofault(a_ops, &mapping->a_ops)) {
608 pr_warn("invalid mapping:%px\n", mapping);
609 return;
610 }
611
612 if (!host) {
613 pr_warn("aops:%ps\n", a_ops);
614 return;
615 }
616
617 if (get_kernel_nofault(dentry_first, &host->i_dentry.first) ||
618 get_kernel_nofault(ino, &host->i_ino)) {
619 pr_warn("aops:%ps invalid inode:%px\n", a_ops, host);
620 return;
621 }
622
623 if (!dentry_first) {
624 pr_warn("aops:%ps ino:%lx\n", a_ops, ino);
625 return;
626 }
627
628 dentry_ptr = container_of(dentry_first, struct dentry, d_u.d_alias);
629 if (get_kernel_nofault(dentry, dentry_ptr)) {
630 pr_warn("aops:%ps ino:%lx invalid dentry:%px\n",
631 a_ops, ino, dentry_ptr);
632 return;
633 }
634
635 /*
636 * if dentry is corrupted, the %pd handler may still crash,
637 * but it's unlikely that we reach here with a corrupt mapping
638 */
639 pr_warn("aops:%ps ino:%lx dentry name:\"%pd\"\n", a_ops, ino, &dentry);
640 }
641
clear_inode(struct inode * inode)642 void clear_inode(struct inode *inode)
643 {
644 /*
645 * We have to cycle the i_pages lock here because reclaim can be in the
646 * process of removing the last page (in __filemap_remove_folio())
647 * and we must not free the mapping under it.
648 */
649 xa_lock_irq(&inode->i_data.i_pages);
650 BUG_ON(inode->i_data.nrpages);
651 /*
652 * Almost always, mapping_empty(&inode->i_data) here; but there are
653 * two known and long-standing ways in which nodes may get left behind
654 * (when deep radix-tree node allocation failed partway; or when THP
655 * collapse_file() failed). Until those two known cases are cleaned up,
656 * or a cleanup function is called here, do not BUG_ON(!mapping_empty),
657 * nor even WARN_ON(!mapping_empty).
658 */
659 xa_unlock_irq(&inode->i_data.i_pages);
660 BUG_ON(!list_empty(&inode->i_data.private_list));
661 BUG_ON(!(inode->i_state & I_FREEING));
662 BUG_ON(inode->i_state & I_CLEAR);
663 BUG_ON(!list_empty(&inode->i_wb_list));
664 /* don't need i_lock here, no concurrent mods to i_state */
665 inode->i_state = I_FREEING | I_CLEAR;
666 }
667 EXPORT_SYMBOL_NS(clear_inode, ANDROID_GKI_VFS_EXPORT_ONLY);
668
669 /*
670 * Free the inode passed in, removing it from the lists it is still connected
671 * to. We remove any pages still attached to the inode and wait for any IO that
672 * is still in progress before finally destroying the inode.
673 *
674 * An inode must already be marked I_FREEING so that we avoid the inode being
675 * moved back onto lists if we race with other code that manipulates the lists
676 * (e.g. writeback_single_inode). The caller is responsible for setting this.
677 *
678 * An inode must already be removed from the LRU list before being evicted from
679 * the cache. This should occur atomically with setting the I_FREEING state
680 * flag, so no inodes here should ever be on the LRU when being evicted.
681 */
evict(struct inode * inode)682 static void evict(struct inode *inode)
683 {
684 const struct super_operations *op = inode->i_sb->s_op;
685
686 BUG_ON(!(inode->i_state & I_FREEING));
687 BUG_ON(!list_empty(&inode->i_lru));
688
689 if (!list_empty(&inode->i_io_list))
690 inode_io_list_del(inode);
691
692 inode_sb_list_del(inode);
693
694 inode_wait_for_lru_isolating(inode);
695
696 /*
697 * Wait for flusher thread to be done with the inode so that filesystem
698 * does not start destroying it while writeback is still running. Since
699 * the inode has I_FREEING set, flusher thread won't start new work on
700 * the inode. We just have to wait for running writeback to finish.
701 */
702 inode_wait_for_writeback(inode);
703
704 if (op->evict_inode) {
705 op->evict_inode(inode);
706 } else {
707 truncate_inode_pages_final(&inode->i_data);
708 clear_inode(inode);
709 }
710 if (S_ISCHR(inode->i_mode) && inode->i_cdev)
711 cd_forget(inode);
712
713 remove_inode_hash(inode);
714
715 spin_lock(&inode->i_lock);
716 wake_up_bit(&inode->i_state, __I_NEW);
717 BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
718 spin_unlock(&inode->i_lock);
719
720 destroy_inode(inode);
721 }
722
723 /*
724 * dispose_list - dispose of the contents of a local list
725 * @head: the head of the list to free
726 *
727 * Dispose-list gets a local list with local inodes in it, so it doesn't
728 * need to worry about list corruption and SMP locks.
729 */
dispose_list(struct list_head * head)730 static void dispose_list(struct list_head *head)
731 {
732 while (!list_empty(head)) {
733 struct inode *inode;
734
735 inode = list_first_entry(head, struct inode, i_lru);
736 list_del_init(&inode->i_lru);
737
738 evict(inode);
739 cond_resched();
740 }
741 }
742
743 /**
744 * evict_inodes - evict all evictable inodes for a superblock
745 * @sb: superblock to operate on
746 *
747 * Make sure that no inodes with zero refcount are retained. This is
748 * called by superblock shutdown after having SB_ACTIVE flag removed,
749 * so any inode reaching zero refcount during or after that call will
750 * be immediately evicted.
751 */
evict_inodes(struct super_block * sb)752 void evict_inodes(struct super_block *sb)
753 {
754 struct inode *inode, *next;
755 LIST_HEAD(dispose);
756
757 again:
758 spin_lock(&sb->s_inode_list_lock);
759 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
760 if (atomic_read(&inode->i_count))
761 continue;
762
763 spin_lock(&inode->i_lock);
764 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
765 spin_unlock(&inode->i_lock);
766 continue;
767 }
768
769 inode->i_state |= I_FREEING;
770 inode_lru_list_del(inode);
771 spin_unlock(&inode->i_lock);
772 list_add(&inode->i_lru, &dispose);
773
774 /*
775 * We can have a ton of inodes to evict at unmount time given
776 * enough memory, check to see if we need to go to sleep for a
777 * bit so we don't livelock.
778 */
779 if (need_resched()) {
780 spin_unlock(&sb->s_inode_list_lock);
781 cond_resched();
782 dispose_list(&dispose);
783 goto again;
784 }
785 }
786 spin_unlock(&sb->s_inode_list_lock);
787
788 dispose_list(&dispose);
789 }
790 EXPORT_SYMBOL_GPL(evict_inodes);
791
792 /**
793 * invalidate_inodes - attempt to free all inodes on a superblock
794 * @sb: superblock to operate on
795 *
796 * Attempts to free all inodes (including dirty inodes) for a given superblock.
797 */
invalidate_inodes(struct super_block * sb)798 void invalidate_inodes(struct super_block *sb)
799 {
800 struct inode *inode, *next;
801 LIST_HEAD(dispose);
802
803 again:
804 spin_lock(&sb->s_inode_list_lock);
805 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
806 spin_lock(&inode->i_lock);
807 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
808 spin_unlock(&inode->i_lock);
809 continue;
810 }
811 if (atomic_read(&inode->i_count)) {
812 spin_unlock(&inode->i_lock);
813 continue;
814 }
815
816 inode->i_state |= I_FREEING;
817 inode_lru_list_del(inode);
818 spin_unlock(&inode->i_lock);
819 list_add(&inode->i_lru, &dispose);
820 if (need_resched()) {
821 spin_unlock(&sb->s_inode_list_lock);
822 cond_resched();
823 dispose_list(&dispose);
824 goto again;
825 }
826 }
827 spin_unlock(&sb->s_inode_list_lock);
828
829 dispose_list(&dispose);
830 }
831
832 /*
833 * Isolate the inode from the LRU in preparation for freeing it.
834 *
835 * If the inode has the I_REFERENCED flag set, then it means that it has been
836 * used recently - the flag is set in iput_final(). When we encounter such an
837 * inode, clear the flag and move it to the back of the LRU so it gets another
838 * pass through the LRU before it gets reclaimed. This is necessary because of
839 * the fact we are doing lazy LRU updates to minimise lock contention so the
840 * LRU does not have strict ordering. Hence we don't want to reclaim inodes
841 * with this flag set because they are the inodes that are out of order.
842 */
inode_lru_isolate(struct list_head * item,struct list_lru_one * lru,spinlock_t * lru_lock,void * arg)843 static enum lru_status inode_lru_isolate(struct list_head *item,
844 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
845 {
846 struct list_head *freeable = arg;
847 struct inode *inode = container_of(item, struct inode, i_lru);
848 bool skip = false;
849
850 /*
851 * We are inverting the lru lock/inode->i_lock here, so use a
852 * trylock. If we fail to get the lock, just skip it.
853 */
854 if (!spin_trylock(&inode->i_lock))
855 return LRU_SKIP;
856
857 trace_android_vh_inode_lru_isolate(inode, &skip);
858 if (skip) {
859 spin_unlock(&inode->i_lock);
860 return LRU_SKIP;
861 }
862
863 /*
864 * Inodes can get referenced, redirtied, or repopulated while
865 * they're already on the LRU, and this can make them
866 * unreclaimable for a while. Remove them lazily here; iput,
867 * sync, or the last page cache deletion will requeue them.
868 */
869 if (atomic_read(&inode->i_count) ||
870 (inode->i_state & ~I_REFERENCED) ||
871 !mapping_shrinkable(&inode->i_data)) {
872 list_lru_isolate(lru, &inode->i_lru);
873 spin_unlock(&inode->i_lock);
874 this_cpu_dec(nr_unused);
875 return LRU_REMOVED;
876 }
877
878 /* Recently referenced inodes get one more pass */
879 if (inode->i_state & I_REFERENCED) {
880 inode->i_state &= ~I_REFERENCED;
881 spin_unlock(&inode->i_lock);
882 return LRU_ROTATE;
883 }
884
885 /*
886 * On highmem systems, mapping_shrinkable() permits dropping
887 * page cache in order to free up struct inodes: lowmem might
888 * be under pressure before the cache inside the highmem zone.
889 */
890 if (inode_has_buffers(inode) || !mapping_empty(&inode->i_data)) {
891 inode_pin_lru_isolating(inode);
892 spin_unlock(&inode->i_lock);
893 spin_unlock(lru_lock);
894 if (remove_inode_buffers(inode)) {
895 unsigned long reap;
896 reap = invalidate_mapping_pages(&inode->i_data, 0, -1);
897 if (current_is_kswapd())
898 __count_vm_events(KSWAPD_INODESTEAL, reap);
899 else
900 __count_vm_events(PGINODESTEAL, reap);
901 mm_account_reclaimed_pages(reap);
902 }
903 inode_unpin_lru_isolating(inode);
904 spin_lock(lru_lock);
905 return LRU_RETRY;
906 }
907
908 WARN_ON(inode->i_state & I_NEW);
909 inode->i_state |= I_FREEING;
910 list_lru_isolate_move(lru, &inode->i_lru, freeable);
911 spin_unlock(&inode->i_lock);
912
913 this_cpu_dec(nr_unused);
914 return LRU_REMOVED;
915 }
916
917 /*
918 * Walk the superblock inode LRU for freeable inodes and attempt to free them.
919 * This is called from the superblock shrinker function with a number of inodes
920 * to trim from the LRU. Inodes to be freed are moved to a temporary list and
921 * then are freed outside inode_lock by dispose_list().
922 */
prune_icache_sb(struct super_block * sb,struct shrink_control * sc)923 long prune_icache_sb(struct super_block *sb, struct shrink_control *sc)
924 {
925 LIST_HEAD(freeable);
926 long freed;
927
928 freed = list_lru_shrink_walk(&sb->s_inode_lru, sc,
929 inode_lru_isolate, &freeable);
930 dispose_list(&freeable);
931 return freed;
932 }
933
934 static void __wait_on_freeing_inode(struct inode *inode);
935 /*
936 * Called with the inode lock held.
937 */
find_inode(struct super_block * sb,struct hlist_head * head,int (* test)(struct inode *,void *),void * data)938 static struct inode *find_inode(struct super_block *sb,
939 struct hlist_head *head,
940 int (*test)(struct inode *, void *),
941 void *data)
942 {
943 struct inode *inode = NULL;
944
945 repeat:
946 hlist_for_each_entry(inode, head, i_hash) {
947 if (inode->i_sb != sb)
948 continue;
949 if (!test(inode, data))
950 continue;
951 spin_lock(&inode->i_lock);
952 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
953 __wait_on_freeing_inode(inode);
954 goto repeat;
955 }
956 if (unlikely(inode->i_state & I_CREATING)) {
957 spin_unlock(&inode->i_lock);
958 return ERR_PTR(-ESTALE);
959 }
960 __iget(inode);
961 spin_unlock(&inode->i_lock);
962 return inode;
963 }
964 return NULL;
965 }
966
967 /*
968 * find_inode_fast is the fast path version of find_inode, see the comment at
969 * iget_locked for details.
970 */
find_inode_fast(struct super_block * sb,struct hlist_head * head,unsigned long ino)971 static struct inode *find_inode_fast(struct super_block *sb,
972 struct hlist_head *head, unsigned long ino)
973 {
974 struct inode *inode = NULL;
975
976 repeat:
977 hlist_for_each_entry(inode, head, i_hash) {
978 if (inode->i_ino != ino)
979 continue;
980 if (inode->i_sb != sb)
981 continue;
982 spin_lock(&inode->i_lock);
983 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
984 __wait_on_freeing_inode(inode);
985 goto repeat;
986 }
987 if (unlikely(inode->i_state & I_CREATING)) {
988 spin_unlock(&inode->i_lock);
989 return ERR_PTR(-ESTALE);
990 }
991 __iget(inode);
992 spin_unlock(&inode->i_lock);
993 return inode;
994 }
995 return NULL;
996 }
997
998 /*
999 * Each cpu owns a range of LAST_INO_BATCH numbers.
1000 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations,
1001 * to renew the exhausted range.
1002 *
1003 * This does not significantly increase overflow rate because every CPU can
1004 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is
1005 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the
1006 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase
1007 * overflow rate by 2x, which does not seem too significant.
1008 *
1009 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
1010 * error if st_ino won't fit in target struct field. Use 32bit counter
1011 * here to attempt to avoid that.
1012 */
1013 #define LAST_INO_BATCH 1024
1014 static DEFINE_PER_CPU(unsigned int, last_ino);
1015
get_next_ino(void)1016 unsigned int get_next_ino(void)
1017 {
1018 unsigned int *p = &get_cpu_var(last_ino);
1019 unsigned int res = *p;
1020
1021 #ifdef CONFIG_SMP
1022 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
1023 static atomic_t shared_last_ino;
1024 int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
1025
1026 res = next - LAST_INO_BATCH;
1027 }
1028 #endif
1029
1030 res++;
1031 /* get_next_ino should not provide a 0 inode number */
1032 if (unlikely(!res))
1033 res++;
1034 *p = res;
1035 put_cpu_var(last_ino);
1036 return res;
1037 }
1038 EXPORT_SYMBOL(get_next_ino);
1039
1040 /**
1041 * new_inode_pseudo - obtain an inode
1042 * @sb: superblock
1043 *
1044 * Allocates a new inode for given superblock.
1045 * Inode wont be chained in superblock s_inodes list
1046 * This means :
1047 * - fs can't be unmount
1048 * - quotas, fsnotify, writeback can't work
1049 */
new_inode_pseudo(struct super_block * sb)1050 struct inode *new_inode_pseudo(struct super_block *sb)
1051 {
1052 struct inode *inode = alloc_inode(sb);
1053
1054 if (inode) {
1055 spin_lock(&inode->i_lock);
1056 inode->i_state = 0;
1057 spin_unlock(&inode->i_lock);
1058 }
1059 return inode;
1060 }
1061
1062 /**
1063 * new_inode - obtain an inode
1064 * @sb: superblock
1065 *
1066 * Allocates a new inode for given superblock. The default gfp_mask
1067 * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE.
1068 * If HIGHMEM pages are unsuitable or it is known that pages allocated
1069 * for the page cache are not reclaimable or migratable,
1070 * mapping_set_gfp_mask() must be called with suitable flags on the
1071 * newly created inode's mapping
1072 *
1073 */
new_inode(struct super_block * sb)1074 struct inode *new_inode(struct super_block *sb)
1075 {
1076 struct inode *inode;
1077
1078 inode = new_inode_pseudo(sb);
1079 if (inode)
1080 inode_sb_list_add(inode);
1081 return inode;
1082 }
1083 EXPORT_SYMBOL(new_inode);
1084
1085 #ifdef CONFIG_DEBUG_LOCK_ALLOC
lockdep_annotate_inode_mutex_key(struct inode * inode)1086 void lockdep_annotate_inode_mutex_key(struct inode *inode)
1087 {
1088 if (S_ISDIR(inode->i_mode)) {
1089 struct file_system_type *type = inode->i_sb->s_type;
1090
1091 /* Set new key only if filesystem hasn't already changed it */
1092 if (lockdep_match_class(&inode->i_rwsem, &type->i_mutex_key)) {
1093 /*
1094 * ensure nobody is actually holding i_mutex
1095 */
1096 // mutex_destroy(&inode->i_mutex);
1097 init_rwsem(&inode->i_rwsem);
1098 lockdep_set_class(&inode->i_rwsem,
1099 &type->i_mutex_dir_key);
1100 }
1101 }
1102 }
1103 EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key);
1104 #endif
1105
1106 /**
1107 * unlock_new_inode - clear the I_NEW state and wake up any waiters
1108 * @inode: new inode to unlock
1109 *
1110 * Called when the inode is fully initialised to clear the new state of the
1111 * inode and wake up anyone waiting for the inode to finish initialisation.
1112 */
unlock_new_inode(struct inode * inode)1113 void unlock_new_inode(struct inode *inode)
1114 {
1115 lockdep_annotate_inode_mutex_key(inode);
1116 spin_lock(&inode->i_lock);
1117 WARN_ON(!(inode->i_state & I_NEW));
1118 inode->i_state &= ~I_NEW & ~I_CREATING;
1119 smp_mb();
1120 wake_up_bit(&inode->i_state, __I_NEW);
1121 spin_unlock(&inode->i_lock);
1122 }
1123 EXPORT_SYMBOL_NS(unlock_new_inode, ANDROID_GKI_VFS_EXPORT_ONLY);
1124
discard_new_inode(struct inode * inode)1125 void discard_new_inode(struct inode *inode)
1126 {
1127 lockdep_annotate_inode_mutex_key(inode);
1128 spin_lock(&inode->i_lock);
1129 WARN_ON(!(inode->i_state & I_NEW));
1130 inode->i_state &= ~I_NEW;
1131 smp_mb();
1132 wake_up_bit(&inode->i_state, __I_NEW);
1133 spin_unlock(&inode->i_lock);
1134 iput(inode);
1135 }
1136 EXPORT_SYMBOL(discard_new_inode);
1137
1138 /**
1139 * lock_two_inodes - lock two inodes (may be regular files but also dirs)
1140 *
1141 * Lock any non-NULL argument. The caller must make sure that if he is passing
1142 * in two directories, one is not ancestor of the other. Zero, one or two
1143 * objects may be locked by this function.
1144 *
1145 * @inode1: first inode to lock
1146 * @inode2: second inode to lock
1147 * @subclass1: inode lock subclass for the first lock obtained
1148 * @subclass2: inode lock subclass for the second lock obtained
1149 */
lock_two_inodes(struct inode * inode1,struct inode * inode2,unsigned subclass1,unsigned subclass2)1150 void lock_two_inodes(struct inode *inode1, struct inode *inode2,
1151 unsigned subclass1, unsigned subclass2)
1152 {
1153 if (!inode1 || !inode2) {
1154 /*
1155 * Make sure @subclass1 will be used for the acquired lock.
1156 * This is not strictly necessary (no current caller cares) but
1157 * let's keep things consistent.
1158 */
1159 if (!inode1)
1160 swap(inode1, inode2);
1161 goto lock;
1162 }
1163
1164 /*
1165 * If one object is directory and the other is not, we must make sure
1166 * to lock directory first as the other object may be its child.
1167 */
1168 if (S_ISDIR(inode2->i_mode) == S_ISDIR(inode1->i_mode)) {
1169 if (inode1 > inode2)
1170 swap(inode1, inode2);
1171 } else if (!S_ISDIR(inode1->i_mode))
1172 swap(inode1, inode2);
1173 lock:
1174 if (inode1)
1175 inode_lock_nested(inode1, subclass1);
1176 if (inode2 && inode2 != inode1)
1177 inode_lock_nested(inode2, subclass2);
1178 }
1179
1180 /**
1181 * lock_two_nondirectories - take two i_mutexes on non-directory objects
1182 *
1183 * Lock any non-NULL argument. Passed objects must not be directories.
1184 * Zero, one or two objects may be locked by this function.
1185 *
1186 * @inode1: first inode to lock
1187 * @inode2: second inode to lock
1188 */
lock_two_nondirectories(struct inode * inode1,struct inode * inode2)1189 void lock_two_nondirectories(struct inode *inode1, struct inode *inode2)
1190 {
1191 if (inode1)
1192 WARN_ON_ONCE(S_ISDIR(inode1->i_mode));
1193 if (inode2)
1194 WARN_ON_ONCE(S_ISDIR(inode2->i_mode));
1195 lock_two_inodes(inode1, inode2, I_MUTEX_NORMAL, I_MUTEX_NONDIR2);
1196 }
1197 EXPORT_SYMBOL(lock_two_nondirectories);
1198
1199 /**
1200 * unlock_two_nondirectories - release locks from lock_two_nondirectories()
1201 * @inode1: first inode to unlock
1202 * @inode2: second inode to unlock
1203 */
unlock_two_nondirectories(struct inode * inode1,struct inode * inode2)1204 void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2)
1205 {
1206 if (inode1) {
1207 WARN_ON_ONCE(S_ISDIR(inode1->i_mode));
1208 inode_unlock(inode1);
1209 }
1210 if (inode2 && inode2 != inode1) {
1211 WARN_ON_ONCE(S_ISDIR(inode2->i_mode));
1212 inode_unlock(inode2);
1213 }
1214 }
1215 EXPORT_SYMBOL(unlock_two_nondirectories);
1216
1217 /**
1218 * inode_insert5 - obtain an inode from a mounted file system
1219 * @inode: pre-allocated inode to use for insert to cache
1220 * @hashval: hash value (usually inode number) to get
1221 * @test: callback used for comparisons between inodes
1222 * @set: callback used to initialize a new struct inode
1223 * @data: opaque data pointer to pass to @test and @set
1224 *
1225 * Search for the inode specified by @hashval and @data in the inode cache,
1226 * and if present it is return it with an increased reference count. This is
1227 * a variant of iget5_locked() for callers that don't want to fail on memory
1228 * allocation of inode.
1229 *
1230 * If the inode is not in cache, insert the pre-allocated inode to cache and
1231 * return it locked, hashed, and with the I_NEW flag set. The file system gets
1232 * to fill it in before unlocking it via unlock_new_inode().
1233 *
1234 * Note both @test and @set are called with the inode_hash_lock held, so can't
1235 * sleep.
1236 */
inode_insert5(struct inode * inode,unsigned long hashval,int (* test)(struct inode *,void *),int (* set)(struct inode *,void *),void * data)1237 struct inode *inode_insert5(struct inode *inode, unsigned long hashval,
1238 int (*test)(struct inode *, void *),
1239 int (*set)(struct inode *, void *), void *data)
1240 {
1241 struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval);
1242 struct inode *old;
1243
1244 again:
1245 spin_lock(&inode_hash_lock);
1246 old = find_inode(inode->i_sb, head, test, data);
1247 if (unlikely(old)) {
1248 /*
1249 * Uhhuh, somebody else created the same inode under us.
1250 * Use the old inode instead of the preallocated one.
1251 */
1252 spin_unlock(&inode_hash_lock);
1253 if (IS_ERR(old))
1254 return NULL;
1255 wait_on_inode(old);
1256 if (unlikely(inode_unhashed(old))) {
1257 iput(old);
1258 goto again;
1259 }
1260 return old;
1261 }
1262
1263 if (set && unlikely(set(inode, data))) {
1264 inode = NULL;
1265 goto unlock;
1266 }
1267
1268 /*
1269 * Return the locked inode with I_NEW set, the
1270 * caller is responsible for filling in the contents
1271 */
1272 spin_lock(&inode->i_lock);
1273 inode->i_state |= I_NEW;
1274 hlist_add_head_rcu(&inode->i_hash, head);
1275 spin_unlock(&inode->i_lock);
1276
1277 /*
1278 * Add inode to the sb list if it's not already. It has I_NEW at this
1279 * point, so it should be safe to test i_sb_list locklessly.
1280 */
1281 if (list_empty(&inode->i_sb_list))
1282 inode_sb_list_add(inode);
1283 unlock:
1284 spin_unlock(&inode_hash_lock);
1285
1286 return inode;
1287 }
1288 EXPORT_SYMBOL(inode_insert5);
1289
1290 /**
1291 * iget5_locked - obtain an inode from a mounted file system
1292 * @sb: super block of file system
1293 * @hashval: hash value (usually inode number) to get
1294 * @test: callback used for comparisons between inodes
1295 * @set: callback used to initialize a new struct inode
1296 * @data: opaque data pointer to pass to @test and @set
1297 *
1298 * Search for the inode specified by @hashval and @data in the inode cache,
1299 * and if present it is return it with an increased reference count. This is
1300 * a generalized version of iget_locked() for file systems where the inode
1301 * number is not sufficient for unique identification of an inode.
1302 *
1303 * If the inode is not in cache, allocate a new inode and return it locked,
1304 * hashed, and with the I_NEW flag set. The file system gets to fill it in
1305 * before unlocking it via unlock_new_inode().
1306 *
1307 * Note both @test and @set are called with the inode_hash_lock held, so can't
1308 * sleep.
1309 */
iget5_locked(struct super_block * sb,unsigned long hashval,int (* test)(struct inode *,void *),int (* set)(struct inode *,void *),void * data)1310 struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
1311 int (*test)(struct inode *, void *),
1312 int (*set)(struct inode *, void *), void *data)
1313 {
1314 struct inode *inode = ilookup5(sb, hashval, test, data);
1315
1316 if (!inode) {
1317 struct inode *new = alloc_inode(sb);
1318
1319 if (new) {
1320 new->i_state = 0;
1321 inode = inode_insert5(new, hashval, test, set, data);
1322 if (unlikely(inode != new))
1323 destroy_inode(new);
1324 }
1325 }
1326 return inode;
1327 }
1328 EXPORT_SYMBOL_NS(iget5_locked, ANDROID_GKI_VFS_EXPORT_ONLY);
1329
1330 /**
1331 * iget_locked - obtain an inode from a mounted file system
1332 * @sb: super block of file system
1333 * @ino: inode number to get
1334 *
1335 * Search for the inode specified by @ino in the inode cache and if present
1336 * return it with an increased reference count. This is for file systems
1337 * where the inode number is sufficient for unique identification of an inode.
1338 *
1339 * If the inode is not in cache, allocate a new inode and return it locked,
1340 * hashed, and with the I_NEW flag set. The file system gets to fill it in
1341 * before unlocking it via unlock_new_inode().
1342 */
iget_locked(struct super_block * sb,unsigned long ino)1343 struct inode *iget_locked(struct super_block *sb, unsigned long ino)
1344 {
1345 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1346 struct inode *inode;
1347 again:
1348 spin_lock(&inode_hash_lock);
1349 inode = find_inode_fast(sb, head, ino);
1350 spin_unlock(&inode_hash_lock);
1351 if (inode) {
1352 if (IS_ERR(inode))
1353 return NULL;
1354 wait_on_inode(inode);
1355 if (unlikely(inode_unhashed(inode))) {
1356 iput(inode);
1357 goto again;
1358 }
1359 return inode;
1360 }
1361
1362 inode = alloc_inode(sb);
1363 if (inode) {
1364 struct inode *old;
1365
1366 spin_lock(&inode_hash_lock);
1367 /* We released the lock, so.. */
1368 old = find_inode_fast(sb, head, ino);
1369 if (!old) {
1370 inode->i_ino = ino;
1371 spin_lock(&inode->i_lock);
1372 inode->i_state = I_NEW;
1373 hlist_add_head_rcu(&inode->i_hash, head);
1374 spin_unlock(&inode->i_lock);
1375 inode_sb_list_add(inode);
1376 spin_unlock(&inode_hash_lock);
1377
1378 /* Return the locked inode with I_NEW set, the
1379 * caller is responsible for filling in the contents
1380 */
1381 return inode;
1382 }
1383
1384 /*
1385 * Uhhuh, somebody else created the same inode under
1386 * us. Use the old inode instead of the one we just
1387 * allocated.
1388 */
1389 spin_unlock(&inode_hash_lock);
1390 destroy_inode(inode);
1391 if (IS_ERR(old))
1392 return NULL;
1393 inode = old;
1394 wait_on_inode(inode);
1395 if (unlikely(inode_unhashed(inode))) {
1396 iput(inode);
1397 goto again;
1398 }
1399 }
1400 return inode;
1401 }
1402 EXPORT_SYMBOL(iget_locked);
1403
1404 /*
1405 * search the inode cache for a matching inode number.
1406 * If we find one, then the inode number we are trying to
1407 * allocate is not unique and so we should not use it.
1408 *
1409 * Returns 1 if the inode number is unique, 0 if it is not.
1410 */
test_inode_iunique(struct super_block * sb,unsigned long ino)1411 static int test_inode_iunique(struct super_block *sb, unsigned long ino)
1412 {
1413 struct hlist_head *b = inode_hashtable + hash(sb, ino);
1414 struct inode *inode;
1415
1416 hlist_for_each_entry_rcu(inode, b, i_hash) {
1417 if (inode->i_ino == ino && inode->i_sb == sb)
1418 return 0;
1419 }
1420 return 1;
1421 }
1422
1423 /**
1424 * iunique - get a unique inode number
1425 * @sb: superblock
1426 * @max_reserved: highest reserved inode number
1427 *
1428 * Obtain an inode number that is unique on the system for a given
1429 * superblock. This is used by file systems that have no natural
1430 * permanent inode numbering system. An inode number is returned that
1431 * is higher than the reserved limit but unique.
1432 *
1433 * BUGS:
1434 * With a large number of inodes live on the file system this function
1435 * currently becomes quite slow.
1436 */
iunique(struct super_block * sb,ino_t max_reserved)1437 ino_t iunique(struct super_block *sb, ino_t max_reserved)
1438 {
1439 /*
1440 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
1441 * error if st_ino won't fit in target struct field. Use 32bit counter
1442 * here to attempt to avoid that.
1443 */
1444 static DEFINE_SPINLOCK(iunique_lock);
1445 static unsigned int counter;
1446 ino_t res;
1447
1448 rcu_read_lock();
1449 spin_lock(&iunique_lock);
1450 do {
1451 if (counter <= max_reserved)
1452 counter = max_reserved + 1;
1453 res = counter++;
1454 } while (!test_inode_iunique(sb, res));
1455 spin_unlock(&iunique_lock);
1456 rcu_read_unlock();
1457
1458 return res;
1459 }
1460 EXPORT_SYMBOL_NS(iunique, ANDROID_GKI_VFS_EXPORT_ONLY);
1461
igrab(struct inode * inode)1462 struct inode *igrab(struct inode *inode)
1463 {
1464 spin_lock(&inode->i_lock);
1465 if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
1466 __iget(inode);
1467 spin_unlock(&inode->i_lock);
1468 } else {
1469 spin_unlock(&inode->i_lock);
1470 /*
1471 * Handle the case where s_op->clear_inode is not been
1472 * called yet, and somebody is calling igrab
1473 * while the inode is getting freed.
1474 */
1475 inode = NULL;
1476 }
1477 return inode;
1478 }
1479 EXPORT_SYMBOL(igrab);
1480
1481 /**
1482 * ilookup5_nowait - search for an inode in the inode cache
1483 * @sb: super block of file system to search
1484 * @hashval: hash value (usually inode number) to search for
1485 * @test: callback used for comparisons between inodes
1486 * @data: opaque data pointer to pass to @test
1487 *
1488 * Search for the inode specified by @hashval and @data in the inode cache.
1489 * If the inode is in the cache, the inode is returned with an incremented
1490 * reference count.
1491 *
1492 * Note: I_NEW is not waited upon so you have to be very careful what you do
1493 * with the returned inode. You probably should be using ilookup5() instead.
1494 *
1495 * Note2: @test is called with the inode_hash_lock held, so can't sleep.
1496 */
ilookup5_nowait(struct super_block * sb,unsigned long hashval,int (* test)(struct inode *,void *),void * data)1497 struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
1498 int (*test)(struct inode *, void *), void *data)
1499 {
1500 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1501 struct inode *inode;
1502
1503 spin_lock(&inode_hash_lock);
1504 inode = find_inode(sb, head, test, data);
1505 spin_unlock(&inode_hash_lock);
1506
1507 return IS_ERR(inode) ? NULL : inode;
1508 }
1509 EXPORT_SYMBOL(ilookup5_nowait);
1510
1511 /**
1512 * ilookup5 - search for an inode in the inode cache
1513 * @sb: super block of file system to search
1514 * @hashval: hash value (usually inode number) to search for
1515 * @test: callback used for comparisons between inodes
1516 * @data: opaque data pointer to pass to @test
1517 *
1518 * Search for the inode specified by @hashval and @data in the inode cache,
1519 * and if the inode is in the cache, return the inode with an incremented
1520 * reference count. Waits on I_NEW before returning the inode.
1521 * returned with an incremented reference count.
1522 *
1523 * This is a generalized version of ilookup() for file systems where the
1524 * inode number is not sufficient for unique identification of an inode.
1525 *
1526 * Note: @test is called with the inode_hash_lock held, so can't sleep.
1527 */
ilookup5(struct super_block * sb,unsigned long hashval,int (* test)(struct inode *,void *),void * data)1528 struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
1529 int (*test)(struct inode *, void *), void *data)
1530 {
1531 struct inode *inode;
1532 again:
1533 inode = ilookup5_nowait(sb, hashval, test, data);
1534 if (inode) {
1535 wait_on_inode(inode);
1536 if (unlikely(inode_unhashed(inode))) {
1537 iput(inode);
1538 goto again;
1539 }
1540 }
1541 return inode;
1542 }
1543 EXPORT_SYMBOL_NS(ilookup5, ANDROID_GKI_VFS_EXPORT_ONLY);
1544
1545 /**
1546 * ilookup - search for an inode in the inode cache
1547 * @sb: super block of file system to search
1548 * @ino: inode number to search for
1549 *
1550 * Search for the inode @ino in the inode cache, and if the inode is in the
1551 * cache, the inode is returned with an incremented reference count.
1552 */
ilookup(struct super_block * sb,unsigned long ino)1553 struct inode *ilookup(struct super_block *sb, unsigned long ino)
1554 {
1555 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1556 struct inode *inode;
1557 again:
1558 spin_lock(&inode_hash_lock);
1559 inode = find_inode_fast(sb, head, ino);
1560 spin_unlock(&inode_hash_lock);
1561
1562 if (inode) {
1563 if (IS_ERR(inode))
1564 return NULL;
1565 wait_on_inode(inode);
1566 if (unlikely(inode_unhashed(inode))) {
1567 iput(inode);
1568 goto again;
1569 }
1570 }
1571 return inode;
1572 }
1573 EXPORT_SYMBOL(ilookup);
1574
1575 /**
1576 * find_inode_nowait - find an inode in the inode cache
1577 * @sb: super block of file system to search
1578 * @hashval: hash value (usually inode number) to search for
1579 * @match: callback used for comparisons between inodes
1580 * @data: opaque data pointer to pass to @match
1581 *
1582 * Search for the inode specified by @hashval and @data in the inode
1583 * cache, where the helper function @match will return 0 if the inode
1584 * does not match, 1 if the inode does match, and -1 if the search
1585 * should be stopped. The @match function must be responsible for
1586 * taking the i_lock spin_lock and checking i_state for an inode being
1587 * freed or being initialized, and incrementing the reference count
1588 * before returning 1. It also must not sleep, since it is called with
1589 * the inode_hash_lock spinlock held.
1590 *
1591 * This is a even more generalized version of ilookup5() when the
1592 * function must never block --- find_inode() can block in
1593 * __wait_on_freeing_inode() --- or when the caller can not increment
1594 * the reference count because the resulting iput() might cause an
1595 * inode eviction. The tradeoff is that the @match funtion must be
1596 * very carefully implemented.
1597 */
find_inode_nowait(struct super_block * sb,unsigned long hashval,int (* match)(struct inode *,unsigned long,void *),void * data)1598 struct inode *find_inode_nowait(struct super_block *sb,
1599 unsigned long hashval,
1600 int (*match)(struct inode *, unsigned long,
1601 void *),
1602 void *data)
1603 {
1604 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1605 struct inode *inode, *ret_inode = NULL;
1606 int mval;
1607
1608 spin_lock(&inode_hash_lock);
1609 hlist_for_each_entry(inode, head, i_hash) {
1610 if (inode->i_sb != sb)
1611 continue;
1612 mval = match(inode, hashval, data);
1613 if (mval == 0)
1614 continue;
1615 if (mval == 1)
1616 ret_inode = inode;
1617 goto out;
1618 }
1619 out:
1620 spin_unlock(&inode_hash_lock);
1621 return ret_inode;
1622 }
1623 EXPORT_SYMBOL(find_inode_nowait);
1624
1625 /**
1626 * find_inode_rcu - find an inode in the inode cache
1627 * @sb: Super block of file system to search
1628 * @hashval: Key to hash
1629 * @test: Function to test match on an inode
1630 * @data: Data for test function
1631 *
1632 * Search for the inode specified by @hashval and @data in the inode cache,
1633 * where the helper function @test will return 0 if the inode does not match
1634 * and 1 if it does. The @test function must be responsible for taking the
1635 * i_lock spin_lock and checking i_state for an inode being freed or being
1636 * initialized.
1637 *
1638 * If successful, this will return the inode for which the @test function
1639 * returned 1 and NULL otherwise.
1640 *
1641 * The @test function is not permitted to take a ref on any inode presented.
1642 * It is also not permitted to sleep.
1643 *
1644 * The caller must hold the RCU read lock.
1645 */
find_inode_rcu(struct super_block * sb,unsigned long hashval,int (* test)(struct inode *,void *),void * data)1646 struct inode *find_inode_rcu(struct super_block *sb, unsigned long hashval,
1647 int (*test)(struct inode *, void *), void *data)
1648 {
1649 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1650 struct inode *inode;
1651
1652 RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
1653 "suspicious find_inode_rcu() usage");
1654
1655 hlist_for_each_entry_rcu(inode, head, i_hash) {
1656 if (inode->i_sb == sb &&
1657 !(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE)) &&
1658 test(inode, data))
1659 return inode;
1660 }
1661 return NULL;
1662 }
1663 EXPORT_SYMBOL(find_inode_rcu);
1664
1665 /**
1666 * find_inode_by_ino_rcu - Find an inode in the inode cache
1667 * @sb: Super block of file system to search
1668 * @ino: The inode number to match
1669 *
1670 * Search for the inode specified by @hashval and @data in the inode cache,
1671 * where the helper function @test will return 0 if the inode does not match
1672 * and 1 if it does. The @test function must be responsible for taking the
1673 * i_lock spin_lock and checking i_state for an inode being freed or being
1674 * initialized.
1675 *
1676 * If successful, this will return the inode for which the @test function
1677 * returned 1 and NULL otherwise.
1678 *
1679 * The @test function is not permitted to take a ref on any inode presented.
1680 * It is also not permitted to sleep.
1681 *
1682 * The caller must hold the RCU read lock.
1683 */
find_inode_by_ino_rcu(struct super_block * sb,unsigned long ino)1684 struct inode *find_inode_by_ino_rcu(struct super_block *sb,
1685 unsigned long ino)
1686 {
1687 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1688 struct inode *inode;
1689
1690 RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
1691 "suspicious find_inode_by_ino_rcu() usage");
1692
1693 hlist_for_each_entry_rcu(inode, head, i_hash) {
1694 if (inode->i_ino == ino &&
1695 inode->i_sb == sb &&
1696 !(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE)))
1697 return inode;
1698 }
1699 return NULL;
1700 }
1701 EXPORT_SYMBOL(find_inode_by_ino_rcu);
1702
insert_inode_locked(struct inode * inode)1703 int insert_inode_locked(struct inode *inode)
1704 {
1705 struct super_block *sb = inode->i_sb;
1706 ino_t ino = inode->i_ino;
1707 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1708
1709 while (1) {
1710 struct inode *old = NULL;
1711 spin_lock(&inode_hash_lock);
1712 hlist_for_each_entry(old, head, i_hash) {
1713 if (old->i_ino != ino)
1714 continue;
1715 if (old->i_sb != sb)
1716 continue;
1717 spin_lock(&old->i_lock);
1718 if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1719 spin_unlock(&old->i_lock);
1720 continue;
1721 }
1722 break;
1723 }
1724 if (likely(!old)) {
1725 spin_lock(&inode->i_lock);
1726 inode->i_state |= I_NEW | I_CREATING;
1727 hlist_add_head_rcu(&inode->i_hash, head);
1728 spin_unlock(&inode->i_lock);
1729 spin_unlock(&inode_hash_lock);
1730 return 0;
1731 }
1732 if (unlikely(old->i_state & I_CREATING)) {
1733 spin_unlock(&old->i_lock);
1734 spin_unlock(&inode_hash_lock);
1735 return -EBUSY;
1736 }
1737 __iget(old);
1738 spin_unlock(&old->i_lock);
1739 spin_unlock(&inode_hash_lock);
1740 wait_on_inode(old);
1741 if (unlikely(!inode_unhashed(old))) {
1742 iput(old);
1743 return -EBUSY;
1744 }
1745 iput(old);
1746 }
1747 }
1748 EXPORT_SYMBOL(insert_inode_locked);
1749
insert_inode_locked4(struct inode * inode,unsigned long hashval,int (* test)(struct inode *,void *),void * data)1750 int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1751 int (*test)(struct inode *, void *), void *data)
1752 {
1753 struct inode *old;
1754
1755 inode->i_state |= I_CREATING;
1756 old = inode_insert5(inode, hashval, test, NULL, data);
1757
1758 if (old != inode) {
1759 iput(old);
1760 return -EBUSY;
1761 }
1762 return 0;
1763 }
1764 EXPORT_SYMBOL(insert_inode_locked4);
1765
1766
generic_delete_inode(struct inode * inode)1767 int generic_delete_inode(struct inode *inode)
1768 {
1769 return 1;
1770 }
1771 EXPORT_SYMBOL(generic_delete_inode);
1772
1773 /*
1774 * Called when we're dropping the last reference
1775 * to an inode.
1776 *
1777 * Call the FS "drop_inode()" function, defaulting to
1778 * the legacy UNIX filesystem behaviour. If it tells
1779 * us to evict inode, do so. Otherwise, retain inode
1780 * in cache if fs is alive, sync and evict if fs is
1781 * shutting down.
1782 */
iput_final(struct inode * inode)1783 static void iput_final(struct inode *inode)
1784 {
1785 struct super_block *sb = inode->i_sb;
1786 const struct super_operations *op = inode->i_sb->s_op;
1787 unsigned long state;
1788 int drop;
1789
1790 WARN_ON(inode->i_state & I_NEW);
1791
1792 if (op->drop_inode)
1793 drop = op->drop_inode(inode);
1794 else
1795 drop = generic_drop_inode(inode);
1796
1797 if (!drop &&
1798 !(inode->i_state & I_DONTCACHE) &&
1799 (sb->s_flags & SB_ACTIVE)) {
1800 __inode_add_lru(inode, true);
1801 spin_unlock(&inode->i_lock);
1802 return;
1803 }
1804
1805 state = inode->i_state;
1806 if (!drop) {
1807 WRITE_ONCE(inode->i_state, state | I_WILL_FREE);
1808 spin_unlock(&inode->i_lock);
1809
1810 write_inode_now(inode, 1);
1811
1812 spin_lock(&inode->i_lock);
1813 state = inode->i_state;
1814 WARN_ON(state & I_NEW);
1815 state &= ~I_WILL_FREE;
1816 }
1817
1818 WRITE_ONCE(inode->i_state, state | I_FREEING);
1819 if (!list_empty(&inode->i_lru))
1820 inode_lru_list_del(inode);
1821 spin_unlock(&inode->i_lock);
1822
1823 evict(inode);
1824 }
1825
1826 /**
1827 * iput - put an inode
1828 * @inode: inode to put
1829 *
1830 * Puts an inode, dropping its usage count. If the inode use count hits
1831 * zero, the inode is then freed and may also be destroyed.
1832 *
1833 * Consequently, iput() can sleep.
1834 */
iput(struct inode * inode)1835 void iput(struct inode *inode)
1836 {
1837 if (!inode)
1838 return;
1839 BUG_ON(inode->i_state & I_CLEAR);
1840 retry:
1841 if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) {
1842 if (inode->i_nlink && (inode->i_state & I_DIRTY_TIME)) {
1843 atomic_inc(&inode->i_count);
1844 spin_unlock(&inode->i_lock);
1845 trace_writeback_lazytime_iput(inode);
1846 mark_inode_dirty_sync(inode);
1847 goto retry;
1848 }
1849 iput_final(inode);
1850 }
1851 }
1852 EXPORT_SYMBOL(iput);
1853
1854 #ifdef CONFIG_BLOCK
1855 /**
1856 * bmap - find a block number in a file
1857 * @inode: inode owning the block number being requested
1858 * @block: pointer containing the block to find
1859 *
1860 * Replaces the value in ``*block`` with the block number on the device holding
1861 * corresponding to the requested block number in the file.
1862 * That is, asked for block 4 of inode 1 the function will replace the
1863 * 4 in ``*block``, with disk block relative to the disk start that holds that
1864 * block of the file.
1865 *
1866 * Returns -EINVAL in case of error, 0 otherwise. If mapping falls into a
1867 * hole, returns 0 and ``*block`` is also set to 0.
1868 */
bmap(struct inode * inode,sector_t * block)1869 int bmap(struct inode *inode, sector_t *block)
1870 {
1871 if (!inode->i_mapping->a_ops->bmap)
1872 return -EINVAL;
1873
1874 *block = inode->i_mapping->a_ops->bmap(inode->i_mapping, *block);
1875 return 0;
1876 }
1877 EXPORT_SYMBOL(bmap);
1878 #endif
1879
1880 /*
1881 * With relative atime, only update atime if the previous atime is
1882 * earlier than or equal to either the ctime or mtime,
1883 * or if at least a day has passed since the last atime update.
1884 */
relatime_need_update(struct vfsmount * mnt,struct inode * inode,struct timespec64 now)1885 static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
1886 struct timespec64 now)
1887 {
1888 struct timespec64 ctime;
1889
1890 if (!(mnt->mnt_flags & MNT_RELATIME))
1891 return 1;
1892 /*
1893 * Is mtime younger than or equal to atime? If yes, update atime:
1894 */
1895 if (timespec64_compare(&inode->i_mtime, &inode->i_atime) >= 0)
1896 return 1;
1897 /*
1898 * Is ctime younger than or equal to atime? If yes, update atime:
1899 */
1900 ctime = inode_get_ctime(inode);
1901 if (timespec64_compare(&ctime, &inode->i_atime) >= 0)
1902 return 1;
1903
1904 /*
1905 * Is the previous atime value older than a day? If yes,
1906 * update atime:
1907 */
1908 if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60)
1909 return 1;
1910 /*
1911 * Good, we can skip the atime update:
1912 */
1913 return 0;
1914 }
1915
1916 /**
1917 * inode_update_timestamps - update the timestamps on the inode
1918 * @inode: inode to be updated
1919 * @flags: S_* flags that needed to be updated
1920 *
1921 * The update_time function is called when an inode's timestamps need to be
1922 * updated for a read or write operation. This function handles updating the
1923 * actual timestamps. It's up to the caller to ensure that the inode is marked
1924 * dirty appropriately.
1925 *
1926 * In the case where any of S_MTIME, S_CTIME, or S_VERSION need to be updated,
1927 * attempt to update all three of them. S_ATIME updates can be handled
1928 * independently of the rest.
1929 *
1930 * Returns a set of S_* flags indicating which values changed.
1931 */
inode_update_timestamps(struct inode * inode,int flags)1932 int inode_update_timestamps(struct inode *inode, int flags)
1933 {
1934 int updated = 0;
1935 struct timespec64 now;
1936
1937 if (flags & (S_MTIME|S_CTIME|S_VERSION)) {
1938 struct timespec64 ctime = inode_get_ctime(inode);
1939
1940 now = inode_set_ctime_current(inode);
1941 if (!timespec64_equal(&now, &ctime))
1942 updated |= S_CTIME;
1943 if (!timespec64_equal(&now, &inode->i_mtime)) {
1944 inode->i_mtime = now;
1945 updated |= S_MTIME;
1946 }
1947 if (IS_I_VERSION(inode) && inode_maybe_inc_iversion(inode, updated))
1948 updated |= S_VERSION;
1949 } else {
1950 now = current_time(inode);
1951 }
1952
1953 if (flags & S_ATIME) {
1954 if (!timespec64_equal(&now, &inode->i_atime)) {
1955 inode->i_atime = now;
1956 updated |= S_ATIME;
1957 }
1958 }
1959 return updated;
1960 }
1961 EXPORT_SYMBOL(inode_update_timestamps);
1962
1963 /**
1964 * generic_update_time - update the timestamps on the inode
1965 * @inode: inode to be updated
1966 * @flags: S_* flags that needed to be updated
1967 *
1968 * The update_time function is called when an inode's timestamps need to be
1969 * updated for a read or write operation. In the case where any of S_MTIME, S_CTIME,
1970 * or S_VERSION need to be updated we attempt to update all three of them. S_ATIME
1971 * updates can be handled done independently of the rest.
1972 *
1973 * Returns a S_* mask indicating which fields were updated.
1974 */
generic_update_time(struct inode * inode,int flags)1975 int generic_update_time(struct inode *inode, int flags)
1976 {
1977 int updated = inode_update_timestamps(inode, flags);
1978 int dirty_flags = 0;
1979
1980 if (updated & (S_ATIME|S_MTIME|S_CTIME))
1981 dirty_flags = inode->i_sb->s_flags & SB_LAZYTIME ? I_DIRTY_TIME : I_DIRTY_SYNC;
1982 if (updated & S_VERSION)
1983 dirty_flags |= I_DIRTY_SYNC;
1984 __mark_inode_dirty(inode, dirty_flags);
1985 return updated;
1986 }
1987 EXPORT_SYMBOL(generic_update_time);
1988
1989 /*
1990 * This does the actual work of updating an inodes time or version. Must have
1991 * had called mnt_want_write() before calling this.
1992 */
inode_update_time(struct inode * inode,int flags)1993 int inode_update_time(struct inode *inode, int flags)
1994 {
1995 if (inode->i_op->update_time)
1996 return inode->i_op->update_time(inode, flags);
1997 generic_update_time(inode, flags);
1998 return 0;
1999 }
2000 EXPORT_SYMBOL(inode_update_time);
2001
2002 /**
2003 * atime_needs_update - update the access time
2004 * @path: the &struct path to update
2005 * @inode: inode to update
2006 *
2007 * Update the accessed time on an inode and mark it for writeback.
2008 * This function automatically handles read only file systems and media,
2009 * as well as the "noatime" flag and inode specific "noatime" markers.
2010 */
atime_needs_update(const struct path * path,struct inode * inode)2011 bool atime_needs_update(const struct path *path, struct inode *inode)
2012 {
2013 struct vfsmount *mnt = path->mnt;
2014 struct timespec64 now;
2015
2016 if (inode->i_flags & S_NOATIME)
2017 return false;
2018
2019 /* Atime updates will likely cause i_uid and i_gid to be written
2020 * back improprely if their true value is unknown to the vfs.
2021 */
2022 if (HAS_UNMAPPED_ID(mnt_idmap(mnt), inode))
2023 return false;
2024
2025 if (IS_NOATIME(inode))
2026 return false;
2027 if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode))
2028 return false;
2029
2030 if (mnt->mnt_flags & MNT_NOATIME)
2031 return false;
2032 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
2033 return false;
2034
2035 now = current_time(inode);
2036
2037 if (!relatime_need_update(mnt, inode, now))
2038 return false;
2039
2040 if (timespec64_equal(&inode->i_atime, &now))
2041 return false;
2042
2043 return true;
2044 }
2045
touch_atime(const struct path * path)2046 void touch_atime(const struct path *path)
2047 {
2048 struct vfsmount *mnt = path->mnt;
2049 struct inode *inode = d_inode(path->dentry);
2050
2051 if (!atime_needs_update(path, inode))
2052 return;
2053
2054 if (!sb_start_write_trylock(inode->i_sb))
2055 return;
2056
2057 if (__mnt_want_write(mnt) != 0)
2058 goto skip_update;
2059 /*
2060 * File systems can error out when updating inodes if they need to
2061 * allocate new space to modify an inode (such is the case for
2062 * Btrfs), but since we touch atime while walking down the path we
2063 * really don't care if we failed to update the atime of the file,
2064 * so just ignore the return value.
2065 * We may also fail on filesystems that have the ability to make parts
2066 * of the fs read only, e.g. subvolumes in Btrfs.
2067 */
2068 inode_update_time(inode, S_ATIME);
2069 __mnt_drop_write(mnt);
2070 skip_update:
2071 sb_end_write(inode->i_sb);
2072 }
2073 EXPORT_SYMBOL_NS(touch_atime, ANDROID_GKI_VFS_EXPORT_ONLY);
2074
2075 /*
2076 * Return mask of changes for notify_change() that need to be done as a
2077 * response to write or truncate. Return 0 if nothing has to be changed.
2078 * Negative value on error (change should be denied).
2079 */
dentry_needs_remove_privs(struct mnt_idmap * idmap,struct dentry * dentry)2080 int dentry_needs_remove_privs(struct mnt_idmap *idmap,
2081 struct dentry *dentry)
2082 {
2083 struct inode *inode = d_inode(dentry);
2084 int mask = 0;
2085 int ret;
2086
2087 if (IS_NOSEC(inode))
2088 return 0;
2089
2090 mask = setattr_should_drop_suidgid(idmap, inode);
2091 ret = security_inode_need_killpriv(dentry);
2092 if (ret < 0)
2093 return ret;
2094 if (ret)
2095 mask |= ATTR_KILL_PRIV;
2096 return mask;
2097 }
2098
__remove_privs(struct mnt_idmap * idmap,struct dentry * dentry,int kill)2099 static int __remove_privs(struct mnt_idmap *idmap,
2100 struct dentry *dentry, int kill)
2101 {
2102 struct iattr newattrs;
2103
2104 newattrs.ia_valid = ATTR_FORCE | kill;
2105 /*
2106 * Note we call this on write, so notify_change will not
2107 * encounter any conflicting delegations:
2108 */
2109 return notify_change(idmap, dentry, &newattrs, NULL);
2110 }
2111
__file_remove_privs(struct file * file,unsigned int flags)2112 static int __file_remove_privs(struct file *file, unsigned int flags)
2113 {
2114 struct dentry *dentry = file_dentry(file);
2115 struct inode *inode = file_inode(file);
2116 int error = 0;
2117 int kill;
2118
2119 if (IS_NOSEC(inode) || !S_ISREG(inode->i_mode))
2120 return 0;
2121
2122 kill = dentry_needs_remove_privs(file_mnt_idmap(file), dentry);
2123 if (kill < 0)
2124 return kill;
2125
2126 if (kill) {
2127 if (flags & IOCB_NOWAIT)
2128 return -EAGAIN;
2129
2130 error = __remove_privs(file_mnt_idmap(file), dentry, kill);
2131 }
2132
2133 if (!error)
2134 inode_has_no_xattr(inode);
2135 return error;
2136 }
2137
2138 /**
2139 * file_remove_privs - remove special file privileges (suid, capabilities)
2140 * @file: file to remove privileges from
2141 *
2142 * When file is modified by a write or truncation ensure that special
2143 * file privileges are removed.
2144 *
2145 * Return: 0 on success, negative errno on failure.
2146 */
file_remove_privs(struct file * file)2147 int file_remove_privs(struct file *file)
2148 {
2149 return __file_remove_privs(file, 0);
2150 }
2151 EXPORT_SYMBOL(file_remove_privs);
2152
inode_needs_update_time(struct inode * inode)2153 static int inode_needs_update_time(struct inode *inode)
2154 {
2155 int sync_it = 0;
2156 struct timespec64 now = current_time(inode);
2157 struct timespec64 ctime;
2158
2159 /* First try to exhaust all avenues to not sync */
2160 if (IS_NOCMTIME(inode))
2161 return 0;
2162
2163 if (!timespec64_equal(&inode->i_mtime, &now))
2164 sync_it = S_MTIME;
2165
2166 ctime = inode_get_ctime(inode);
2167 if (!timespec64_equal(&ctime, &now))
2168 sync_it |= S_CTIME;
2169
2170 if (IS_I_VERSION(inode) && inode_iversion_need_inc(inode))
2171 sync_it |= S_VERSION;
2172
2173 return sync_it;
2174 }
2175
__file_update_time(struct file * file,int sync_mode)2176 static int __file_update_time(struct file *file, int sync_mode)
2177 {
2178 int ret = 0;
2179 struct inode *inode = file_inode(file);
2180
2181 /* try to update time settings */
2182 if (!__mnt_want_write_file(file)) {
2183 ret = inode_update_time(inode, sync_mode);
2184 __mnt_drop_write_file(file);
2185 }
2186
2187 return ret;
2188 }
2189
2190 /**
2191 * file_update_time - update mtime and ctime time
2192 * @file: file accessed
2193 *
2194 * Update the mtime and ctime members of an inode and mark the inode for
2195 * writeback. Note that this function is meant exclusively for usage in
2196 * the file write path of filesystems, and filesystems may choose to
2197 * explicitly ignore updates via this function with the _NOCMTIME inode
2198 * flag, e.g. for network filesystem where these imestamps are handled
2199 * by the server. This can return an error for file systems who need to
2200 * allocate space in order to update an inode.
2201 *
2202 * Return: 0 on success, negative errno on failure.
2203 */
file_update_time(struct file * file)2204 int file_update_time(struct file *file)
2205 {
2206 int ret;
2207 struct inode *inode = file_inode(file);
2208
2209 ret = inode_needs_update_time(inode);
2210 if (ret <= 0)
2211 return ret;
2212
2213 return __file_update_time(file, ret);
2214 }
2215 EXPORT_SYMBOL(file_update_time);
2216
2217 /**
2218 * file_modified_flags - handle mandated vfs changes when modifying a file
2219 * @file: file that was modified
2220 * @flags: kiocb flags
2221 *
2222 * When file has been modified ensure that special
2223 * file privileges are removed and time settings are updated.
2224 *
2225 * If IOCB_NOWAIT is set, special file privileges will not be removed and
2226 * time settings will not be updated. It will return -EAGAIN.
2227 *
2228 * Context: Caller must hold the file's inode lock.
2229 *
2230 * Return: 0 on success, negative errno on failure.
2231 */
file_modified_flags(struct file * file,int flags)2232 static int file_modified_flags(struct file *file, int flags)
2233 {
2234 int ret;
2235 struct inode *inode = file_inode(file);
2236
2237 /*
2238 * Clear the security bits if the process is not being run by root.
2239 * This keeps people from modifying setuid and setgid binaries.
2240 */
2241 ret = __file_remove_privs(file, flags);
2242 if (ret)
2243 return ret;
2244
2245 if (unlikely(file->f_mode & FMODE_NOCMTIME))
2246 return 0;
2247
2248 ret = inode_needs_update_time(inode);
2249 if (ret <= 0)
2250 return ret;
2251 if (flags & IOCB_NOWAIT)
2252 return -EAGAIN;
2253
2254 return __file_update_time(file, ret);
2255 }
2256
2257 /**
2258 * file_modified - handle mandated vfs changes when modifying a file
2259 * @file: file that was modified
2260 *
2261 * When file has been modified ensure that special
2262 * file privileges are removed and time settings are updated.
2263 *
2264 * Context: Caller must hold the file's inode lock.
2265 *
2266 * Return: 0 on success, negative errno on failure.
2267 */
file_modified(struct file * file)2268 int file_modified(struct file *file)
2269 {
2270 return file_modified_flags(file, 0);
2271 }
2272 EXPORT_SYMBOL(file_modified);
2273
2274 /**
2275 * kiocb_modified - handle mandated vfs changes when modifying a file
2276 * @iocb: iocb that was modified
2277 *
2278 * When file has been modified ensure that special
2279 * file privileges are removed and time settings are updated.
2280 *
2281 * Context: Caller must hold the file's inode lock.
2282 *
2283 * Return: 0 on success, negative errno on failure.
2284 */
kiocb_modified(struct kiocb * iocb)2285 int kiocb_modified(struct kiocb *iocb)
2286 {
2287 return file_modified_flags(iocb->ki_filp, iocb->ki_flags);
2288 }
2289 EXPORT_SYMBOL_GPL(kiocb_modified);
2290
inode_needs_sync(struct inode * inode)2291 int inode_needs_sync(struct inode *inode)
2292 {
2293 if (IS_SYNC(inode))
2294 return 1;
2295 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
2296 return 1;
2297 return 0;
2298 }
2299 EXPORT_SYMBOL(inode_needs_sync);
2300
2301 /*
2302 * If we try to find an inode in the inode hash while it is being
2303 * deleted, we have to wait until the filesystem completes its
2304 * deletion before reporting that it isn't found. This function waits
2305 * until the deletion _might_ have completed. Callers are responsible
2306 * to recheck inode state.
2307 *
2308 * It doesn't matter if I_NEW is not set initially, a call to
2309 * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
2310 * will DTRT.
2311 */
__wait_on_freeing_inode(struct inode * inode)2312 static void __wait_on_freeing_inode(struct inode *inode)
2313 {
2314 wait_queue_head_t *wq;
2315 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
2316 wq = bit_waitqueue(&inode->i_state, __I_NEW);
2317 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
2318 spin_unlock(&inode->i_lock);
2319 spin_unlock(&inode_hash_lock);
2320 schedule();
2321 finish_wait(wq, &wait.wq_entry);
2322 spin_lock(&inode_hash_lock);
2323 }
2324
2325 static __initdata unsigned long ihash_entries;
set_ihash_entries(char * str)2326 static int __init set_ihash_entries(char *str)
2327 {
2328 if (!str)
2329 return 0;
2330 ihash_entries = simple_strtoul(str, &str, 0);
2331 return 1;
2332 }
2333 __setup("ihash_entries=", set_ihash_entries);
2334
2335 /*
2336 * Initialize the waitqueues and inode hash table.
2337 */
inode_init_early(void)2338 void __init inode_init_early(void)
2339 {
2340 /* If hashes are distributed across NUMA nodes, defer
2341 * hash allocation until vmalloc space is available.
2342 */
2343 if (hashdist)
2344 return;
2345
2346 inode_hashtable =
2347 alloc_large_system_hash("Inode-cache",
2348 sizeof(struct hlist_head),
2349 ihash_entries,
2350 14,
2351 HASH_EARLY | HASH_ZERO,
2352 &i_hash_shift,
2353 &i_hash_mask,
2354 0,
2355 0);
2356 }
2357
inode_init(void)2358 void __init inode_init(void)
2359 {
2360 /* inode slab cache */
2361 inode_cachep = kmem_cache_create("inode_cache",
2362 sizeof(struct inode),
2363 0,
2364 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
2365 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
2366 init_once);
2367
2368 /* Hash may have been set up in inode_init_early */
2369 if (!hashdist)
2370 return;
2371
2372 inode_hashtable =
2373 alloc_large_system_hash("Inode-cache",
2374 sizeof(struct hlist_head),
2375 ihash_entries,
2376 14,
2377 HASH_ZERO,
2378 &i_hash_shift,
2379 &i_hash_mask,
2380 0,
2381 0);
2382 }
2383
init_special_inode(struct inode * inode,umode_t mode,dev_t rdev)2384 void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
2385 {
2386 inode->i_mode = mode;
2387 if (S_ISCHR(mode)) {
2388 inode->i_fop = &def_chr_fops;
2389 inode->i_rdev = rdev;
2390 } else if (S_ISBLK(mode)) {
2391 if (IS_ENABLED(CONFIG_BLOCK))
2392 inode->i_fop = &def_blk_fops;
2393 inode->i_rdev = rdev;
2394 } else if (S_ISFIFO(mode))
2395 inode->i_fop = &pipefifo_fops;
2396 else if (S_ISSOCK(mode))
2397 ; /* leave it no_open_fops */
2398 else
2399 printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for"
2400 " inode %s:%lu\n", mode, inode->i_sb->s_id,
2401 inode->i_ino);
2402 }
2403 EXPORT_SYMBOL_NS(init_special_inode, ANDROID_GKI_VFS_EXPORT_ONLY);
2404
2405 /**
2406 * inode_init_owner - Init uid,gid,mode for new inode according to posix standards
2407 * @idmap: idmap of the mount the inode was created from
2408 * @inode: New inode
2409 * @dir: Directory inode
2410 * @mode: mode of the new inode
2411 *
2412 * If the inode has been created through an idmapped mount the idmap of
2413 * the vfsmount must be passed through @idmap. This function will then take
2414 * care to map the inode according to @idmap before checking permissions
2415 * and initializing i_uid and i_gid. On non-idmapped mounts or if permission
2416 * checking is to be performed on the raw inode simply pass @nop_mnt_idmap.
2417 */
inode_init_owner(struct mnt_idmap * idmap,struct inode * inode,const struct inode * dir,umode_t mode)2418 void inode_init_owner(struct mnt_idmap *idmap, struct inode *inode,
2419 const struct inode *dir, umode_t mode)
2420 {
2421 inode_fsuid_set(inode, idmap);
2422 if (dir && dir->i_mode & S_ISGID) {
2423 inode->i_gid = dir->i_gid;
2424
2425 /* Directories are special, and always inherit S_ISGID */
2426 if (S_ISDIR(mode))
2427 mode |= S_ISGID;
2428 } else
2429 inode_fsgid_set(inode, idmap);
2430 inode->i_mode = mode;
2431 }
2432 EXPORT_SYMBOL_NS(inode_init_owner, ANDROID_GKI_VFS_EXPORT_ONLY);
2433
2434 /**
2435 * inode_owner_or_capable - check current task permissions to inode
2436 * @idmap: idmap of the mount the inode was found from
2437 * @inode: inode being checked
2438 *
2439 * Return true if current either has CAP_FOWNER in a namespace with the
2440 * inode owner uid mapped, or owns the file.
2441 *
2442 * If the inode has been found through an idmapped mount the idmap of
2443 * the vfsmount must be passed through @idmap. This function will then take
2444 * care to map the inode according to @idmap before checking permissions.
2445 * On non-idmapped mounts or if permission checking is to be performed on the
2446 * raw inode simply passs @nop_mnt_idmap.
2447 */
inode_owner_or_capable(struct mnt_idmap * idmap,const struct inode * inode)2448 bool inode_owner_or_capable(struct mnt_idmap *idmap,
2449 const struct inode *inode)
2450 {
2451 vfsuid_t vfsuid;
2452 struct user_namespace *ns;
2453
2454 vfsuid = i_uid_into_vfsuid(idmap, inode);
2455 if (vfsuid_eq_kuid(vfsuid, current_fsuid()))
2456 return true;
2457
2458 ns = current_user_ns();
2459 if (vfsuid_has_mapping(ns, vfsuid) && ns_capable(ns, CAP_FOWNER))
2460 return true;
2461 return false;
2462 }
2463 EXPORT_SYMBOL(inode_owner_or_capable);
2464
2465 /*
2466 * Direct i/o helper functions
2467 */
__inode_dio_wait(struct inode * inode)2468 static void __inode_dio_wait(struct inode *inode)
2469 {
2470 wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP);
2471 DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
2472
2473 do {
2474 prepare_to_wait(wq, &q.wq_entry, TASK_UNINTERRUPTIBLE);
2475 if (atomic_read(&inode->i_dio_count))
2476 schedule();
2477 } while (atomic_read(&inode->i_dio_count));
2478 finish_wait(wq, &q.wq_entry);
2479 }
2480
2481 /**
2482 * inode_dio_wait - wait for outstanding DIO requests to finish
2483 * @inode: inode to wait for
2484 *
2485 * Waits for all pending direct I/O requests to finish so that we can
2486 * proceed with a truncate or equivalent operation.
2487 *
2488 * Must be called under a lock that serializes taking new references
2489 * to i_dio_count, usually by inode->i_mutex.
2490 */
inode_dio_wait(struct inode * inode)2491 void inode_dio_wait(struct inode *inode)
2492 {
2493 if (atomic_read(&inode->i_dio_count))
2494 __inode_dio_wait(inode);
2495 }
2496 EXPORT_SYMBOL_NS(inode_dio_wait, ANDROID_GKI_VFS_EXPORT_ONLY);
2497
2498 /*
2499 * inode_set_flags - atomically set some inode flags
2500 *
2501 * Note: the caller should be holding i_mutex, or else be sure that
2502 * they have exclusive access to the inode structure (i.e., while the
2503 * inode is being instantiated). The reason for the cmpxchg() loop
2504 * --- which wouldn't be necessary if all code paths which modify
2505 * i_flags actually followed this rule, is that there is at least one
2506 * code path which doesn't today so we use cmpxchg() out of an abundance
2507 * of caution.
2508 *
2509 * In the long run, i_mutex is overkill, and we should probably look
2510 * at using the i_lock spinlock to protect i_flags, and then make sure
2511 * it is so documented in include/linux/fs.h and that all code follows
2512 * the locking convention!!
2513 */
inode_set_flags(struct inode * inode,unsigned int flags,unsigned int mask)2514 void inode_set_flags(struct inode *inode, unsigned int flags,
2515 unsigned int mask)
2516 {
2517 WARN_ON_ONCE(flags & ~mask);
2518 set_mask_bits(&inode->i_flags, mask, flags);
2519 }
2520 EXPORT_SYMBOL_NS(inode_set_flags, ANDROID_GKI_VFS_EXPORT_ONLY);
2521
inode_nohighmem(struct inode * inode)2522 void inode_nohighmem(struct inode *inode)
2523 {
2524 mapping_set_gfp_mask(inode->i_mapping, GFP_USER);
2525 }
2526 EXPORT_SYMBOL(inode_nohighmem);
2527
2528 /**
2529 * timestamp_truncate - Truncate timespec to a granularity
2530 * @t: Timespec
2531 * @inode: inode being updated
2532 *
2533 * Truncate a timespec to the granularity supported by the fs
2534 * containing the inode. Always rounds down. gran must
2535 * not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns).
2536 */
timestamp_truncate(struct timespec64 t,struct inode * inode)2537 struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode)
2538 {
2539 struct super_block *sb = inode->i_sb;
2540 unsigned int gran = sb->s_time_gran;
2541
2542 t.tv_sec = clamp(t.tv_sec, sb->s_time_min, sb->s_time_max);
2543 if (unlikely(t.tv_sec == sb->s_time_max || t.tv_sec == sb->s_time_min))
2544 t.tv_nsec = 0;
2545
2546 /* Avoid division in the common cases 1 ns and 1 s. */
2547 if (gran == 1)
2548 ; /* nothing */
2549 else if (gran == NSEC_PER_SEC)
2550 t.tv_nsec = 0;
2551 else if (gran > 1 && gran < NSEC_PER_SEC)
2552 t.tv_nsec -= t.tv_nsec % gran;
2553 else
2554 WARN(1, "invalid file time granularity: %u", gran);
2555 return t;
2556 }
2557 EXPORT_SYMBOL_NS(timestamp_truncate, ANDROID_GKI_VFS_EXPORT_ONLY);
2558
2559 /**
2560 * current_time - Return FS time
2561 * @inode: inode.
2562 *
2563 * Return the current time truncated to the time granularity supported by
2564 * the fs.
2565 *
2566 * Note that inode and inode->sb cannot be NULL.
2567 * Otherwise, the function warns and returns time without truncation.
2568 */
current_time(struct inode * inode)2569 struct timespec64 current_time(struct inode *inode)
2570 {
2571 struct timespec64 now;
2572
2573 ktime_get_coarse_real_ts64(&now);
2574 return timestamp_truncate(now, inode);
2575 }
2576 EXPORT_SYMBOL(current_time);
2577
2578 /**
2579 * inode_set_ctime_current - set the ctime to current_time
2580 * @inode: inode
2581 *
2582 * Set the inode->i_ctime to the current value for the inode. Returns
2583 * the current value that was assigned to i_ctime.
2584 */
inode_set_ctime_current(struct inode * inode)2585 struct timespec64 inode_set_ctime_current(struct inode *inode)
2586 {
2587 struct timespec64 now = current_time(inode);
2588
2589 inode_set_ctime(inode, now.tv_sec, now.tv_nsec);
2590 return now;
2591 }
2592 EXPORT_SYMBOL(inode_set_ctime_current);
2593
2594 /**
2595 * in_group_or_capable - check whether caller is CAP_FSETID privileged
2596 * @idmap: idmap of the mount @inode was found from
2597 * @inode: inode to check
2598 * @vfsgid: the new/current vfsgid of @inode
2599 *
2600 * Check wether @vfsgid is in the caller's group list or if the caller is
2601 * privileged with CAP_FSETID over @inode. This can be used to determine
2602 * whether the setgid bit can be kept or must be dropped.
2603 *
2604 * Return: true if the caller is sufficiently privileged, false if not.
2605 */
in_group_or_capable(struct mnt_idmap * idmap,const struct inode * inode,vfsgid_t vfsgid)2606 bool in_group_or_capable(struct mnt_idmap *idmap,
2607 const struct inode *inode, vfsgid_t vfsgid)
2608 {
2609 if (vfsgid_in_group_p(vfsgid))
2610 return true;
2611 if (capable_wrt_inode_uidgid(idmap, inode, CAP_FSETID))
2612 return true;
2613 return false;
2614 }
2615
2616 /**
2617 * mode_strip_sgid - handle the sgid bit for non-directories
2618 * @idmap: idmap of the mount the inode was created from
2619 * @dir: parent directory inode
2620 * @mode: mode of the file to be created in @dir
2621 *
2622 * If the @mode of the new file has both the S_ISGID and S_IXGRP bit
2623 * raised and @dir has the S_ISGID bit raised ensure that the caller is
2624 * either in the group of the parent directory or they have CAP_FSETID
2625 * in their user namespace and are privileged over the parent directory.
2626 * In all other cases, strip the S_ISGID bit from @mode.
2627 *
2628 * Return: the new mode to use for the file
2629 */
mode_strip_sgid(struct mnt_idmap * idmap,const struct inode * dir,umode_t mode)2630 umode_t mode_strip_sgid(struct mnt_idmap *idmap,
2631 const struct inode *dir, umode_t mode)
2632 {
2633 if ((mode & (S_ISGID | S_IXGRP)) != (S_ISGID | S_IXGRP))
2634 return mode;
2635 if (S_ISDIR(mode) || !dir || !(dir->i_mode & S_ISGID))
2636 return mode;
2637 if (in_group_or_capable(idmap, dir, i_gid_into_vfsgid(idmap, dir)))
2638 return mode;
2639 return mode & ~S_ISGID;
2640 }
2641 EXPORT_SYMBOL(mode_strip_sgid);
2642