• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/ext4/ialloc.c
4  *
5  * Copyright (C) 1992, 1993, 1994, 1995
6  * Remy Card (card@masi.ibp.fr)
7  * Laboratoire MASI - Institut Blaise Pascal
8  * Universite Pierre et Marie Curie (Paris VI)
9  *
10  *  BSD ufs-inspired inode and directory allocation by
11  *  Stephen Tweedie (sct@redhat.com), 1993
12  *  Big-endian to little-endian byte-swapping/bitmaps by
13  *        David S. Miller (davem@caip.rutgers.edu), 1995
14  */
15 
16 #include <linux/time.h>
17 #include <linux/fs.h>
18 #include <linux/stat.h>
19 #include <linux/string.h>
20 #include <linux/quotaops.h>
21 #include <linux/buffer_head.h>
22 #include <linux/random.h>
23 #include <linux/bitops.h>
24 #include <linux/blkdev.h>
25 #include <linux/cred.h>
26 
27 #include <asm/byteorder.h>
28 
29 #include "ext4.h"
30 #include "ext4_jbd2.h"
31 #include "xattr.h"
32 #include "acl.h"
33 
34 #include <trace/events/ext4.h>
35 
36 /*
37  * ialloc.c contains the inodes allocation and deallocation routines
38  */
39 
40 /*
41  * The free inodes are managed by bitmaps.  A file system contains several
42  * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
43  * block for inodes, N blocks for the inode table and data blocks.
44  *
45  * The file system contains group descriptors which are located after the
46  * super block.  Each descriptor contains the number of the bitmap block and
47  * the free blocks count in the block.
48  */
49 
50 /*
51  * To avoid calling the atomic setbit hundreds or thousands of times, we only
52  * need to use it within a single byte (to ensure we get endianness right).
53  * We can use memset for the rest of the bitmap as there are no other users.
54  */
ext4_mark_bitmap_end(int start_bit,int end_bit,char * bitmap)55 void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
56 {
57 	int i;
58 
59 	if (start_bit >= end_bit)
60 		return;
61 
62 	ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
63 	for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
64 		ext4_set_bit(i, bitmap);
65 	if (i < end_bit)
66 		memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
67 }
68 
ext4_end_bitmap_read(struct buffer_head * bh,int uptodate)69 void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate)
70 {
71 	if (uptodate) {
72 		set_buffer_uptodate(bh);
73 		set_bitmap_uptodate(bh);
74 	}
75 	unlock_buffer(bh);
76 	put_bh(bh);
77 }
78 
ext4_validate_inode_bitmap(struct super_block * sb,struct ext4_group_desc * desc,ext4_group_t block_group,struct buffer_head * bh)79 static int ext4_validate_inode_bitmap(struct super_block *sb,
80 				      struct ext4_group_desc *desc,
81 				      ext4_group_t block_group,
82 				      struct buffer_head *bh)
83 {
84 	ext4_fsblk_t	blk;
85 	struct ext4_group_info *grp;
86 
87 	if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)
88 		return 0;
89 
90 	grp = ext4_get_group_info(sb, block_group);
91 
92 	if (buffer_verified(bh))
93 		return 0;
94 	if (!grp || EXT4_MB_GRP_IBITMAP_CORRUPT(grp))
95 		return -EFSCORRUPTED;
96 
97 	ext4_lock_group(sb, block_group);
98 	if (buffer_verified(bh))
99 		goto verified;
100 	blk = ext4_inode_bitmap(sb, desc);
101 	if (!ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh,
102 					   EXT4_INODES_PER_GROUP(sb) / 8) ||
103 	    ext4_simulate_fail(sb, EXT4_SIM_IBITMAP_CRC)) {
104 		ext4_unlock_group(sb, block_group);
105 		ext4_error(sb, "Corrupt inode bitmap - block_group = %u, "
106 			   "inode_bitmap = %llu", block_group, blk);
107 		ext4_mark_group_bitmap_corrupted(sb, block_group,
108 					EXT4_GROUP_INFO_IBITMAP_CORRUPT);
109 		return -EFSBADCRC;
110 	}
111 	set_buffer_verified(bh);
112 verified:
113 	ext4_unlock_group(sb, block_group);
114 	return 0;
115 }
116 
117 /*
118  * Read the inode allocation bitmap for a given block_group, reading
119  * into the specified slot in the superblock's bitmap cache.
120  *
121  * Return buffer_head of bitmap on success, or an ERR_PTR on error.
122  */
123 static struct buffer_head *
ext4_read_inode_bitmap(struct super_block * sb,ext4_group_t block_group)124 ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
125 {
126 	struct ext4_group_desc *desc;
127 	struct ext4_sb_info *sbi = EXT4_SB(sb);
128 	struct buffer_head *bh = NULL;
129 	ext4_fsblk_t bitmap_blk;
130 	int err;
131 
132 	desc = ext4_get_group_desc(sb, block_group, NULL);
133 	if (!desc)
134 		return ERR_PTR(-EFSCORRUPTED);
135 
136 	bitmap_blk = ext4_inode_bitmap(sb, desc);
137 	if ((bitmap_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
138 	    (bitmap_blk >= ext4_blocks_count(sbi->s_es))) {
139 		ext4_error(sb, "Invalid inode bitmap blk %llu in "
140 			   "block_group %u", bitmap_blk, block_group);
141 		ext4_mark_group_bitmap_corrupted(sb, block_group,
142 					EXT4_GROUP_INFO_IBITMAP_CORRUPT);
143 		return ERR_PTR(-EFSCORRUPTED);
144 	}
145 	bh = sb_getblk(sb, bitmap_blk);
146 	if (unlikely(!bh)) {
147 		ext4_warning(sb, "Cannot read inode bitmap - "
148 			     "block_group = %u, inode_bitmap = %llu",
149 			     block_group, bitmap_blk);
150 		return ERR_PTR(-ENOMEM);
151 	}
152 	if (bitmap_uptodate(bh))
153 		goto verify;
154 
155 	lock_buffer(bh);
156 	if (bitmap_uptodate(bh)) {
157 		unlock_buffer(bh);
158 		goto verify;
159 	}
160 
161 	ext4_lock_group(sb, block_group);
162 	if (ext4_has_group_desc_csum(sb) &&
163 	    (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) {
164 		if (block_group == 0) {
165 			ext4_unlock_group(sb, block_group);
166 			unlock_buffer(bh);
167 			ext4_error(sb, "Inode bitmap for bg 0 marked "
168 				   "uninitialized");
169 			err = -EFSCORRUPTED;
170 			goto out;
171 		}
172 		memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
173 		ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
174 				     sb->s_blocksize * 8, bh->b_data);
175 		set_bitmap_uptodate(bh);
176 		set_buffer_uptodate(bh);
177 		set_buffer_verified(bh);
178 		ext4_unlock_group(sb, block_group);
179 		unlock_buffer(bh);
180 		return bh;
181 	}
182 	ext4_unlock_group(sb, block_group);
183 
184 	if (buffer_uptodate(bh)) {
185 		/*
186 		 * if not uninit if bh is uptodate,
187 		 * bitmap is also uptodate
188 		 */
189 		set_bitmap_uptodate(bh);
190 		unlock_buffer(bh);
191 		goto verify;
192 	}
193 	/*
194 	 * submit the buffer_head for reading
195 	 */
196 	trace_ext4_load_inode_bitmap(sb, block_group);
197 	ext4_read_bh(bh, REQ_META | REQ_PRIO, ext4_end_bitmap_read);
198 	ext4_simulate_fail_bh(sb, bh, EXT4_SIM_IBITMAP_EIO);
199 	if (!buffer_uptodate(bh)) {
200 		put_bh(bh);
201 		ext4_error_err(sb, EIO, "Cannot read inode bitmap - "
202 			       "block_group = %u, inode_bitmap = %llu",
203 			       block_group, bitmap_blk);
204 		ext4_mark_group_bitmap_corrupted(sb, block_group,
205 				EXT4_GROUP_INFO_IBITMAP_CORRUPT);
206 		return ERR_PTR(-EIO);
207 	}
208 
209 verify:
210 	err = ext4_validate_inode_bitmap(sb, desc, block_group, bh);
211 	if (err)
212 		goto out;
213 	return bh;
214 out:
215 	put_bh(bh);
216 	return ERR_PTR(err);
217 }
218 
219 /*
220  * NOTE! When we get the inode, we're the only people
221  * that have access to it, and as such there are no
222  * race conditions we have to worry about. The inode
223  * is not on the hash-lists, and it cannot be reached
224  * through the filesystem because the directory entry
225  * has been deleted earlier.
226  *
227  * HOWEVER: we must make sure that we get no aliases,
228  * which means that we have to call "clear_inode()"
229  * _before_ we mark the inode not in use in the inode
230  * bitmaps. Otherwise a newly created file might use
231  * the same inode number (not actually the same pointer
232  * though), and then we'd have two inodes sharing the
233  * same inode number and space on the harddisk.
234  */
ext4_free_inode(handle_t * handle,struct inode * inode)235 void ext4_free_inode(handle_t *handle, struct inode *inode)
236 {
237 	struct super_block *sb = inode->i_sb;
238 	int is_directory;
239 	unsigned long ino;
240 	struct buffer_head *bitmap_bh = NULL;
241 	struct buffer_head *bh2;
242 	ext4_group_t block_group;
243 	unsigned long bit;
244 	struct ext4_group_desc *gdp;
245 	struct ext4_super_block *es;
246 	struct ext4_sb_info *sbi;
247 	int fatal = 0, err, count, cleared;
248 	struct ext4_group_info *grp;
249 
250 	if (!sb) {
251 		printk(KERN_ERR "EXT4-fs: %s:%d: inode on "
252 		       "nonexistent device\n", __func__, __LINE__);
253 		return;
254 	}
255 	if (atomic_read(&inode->i_count) > 1) {
256 		ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: count=%d",
257 			 __func__, __LINE__, inode->i_ino,
258 			 atomic_read(&inode->i_count));
259 		return;
260 	}
261 	if (inode->i_nlink) {
262 		ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: nlink=%d\n",
263 			 __func__, __LINE__, inode->i_ino, inode->i_nlink);
264 		return;
265 	}
266 	sbi = EXT4_SB(sb);
267 
268 	ino = inode->i_ino;
269 	ext4_debug("freeing inode %lu\n", ino);
270 	trace_ext4_free_inode(inode);
271 
272 	dquot_initialize(inode);
273 	dquot_free_inode(inode);
274 
275 	is_directory = S_ISDIR(inode->i_mode);
276 
277 	/* Do this BEFORE marking the inode not in use or returning an error */
278 	ext4_clear_inode(inode);
279 
280 	es = sbi->s_es;
281 	if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
282 		ext4_error(sb, "reserved or nonexistent inode %lu", ino);
283 		goto error_return;
284 	}
285 	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
286 	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
287 	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
288 	/* Don't bother if the inode bitmap is corrupt. */
289 	if (IS_ERR(bitmap_bh)) {
290 		fatal = PTR_ERR(bitmap_bh);
291 		bitmap_bh = NULL;
292 		goto error_return;
293 	}
294 	if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
295 		grp = ext4_get_group_info(sb, block_group);
296 		if (!grp || unlikely(EXT4_MB_GRP_IBITMAP_CORRUPT(grp))) {
297 			fatal = -EFSCORRUPTED;
298 			goto error_return;
299 		}
300 	}
301 
302 	BUFFER_TRACE(bitmap_bh, "get_write_access");
303 	fatal = ext4_journal_get_write_access(handle, bitmap_bh);
304 	if (fatal)
305 		goto error_return;
306 
307 	fatal = -ESRCH;
308 	gdp = ext4_get_group_desc(sb, block_group, &bh2);
309 	if (gdp) {
310 		BUFFER_TRACE(bh2, "get_write_access");
311 		fatal = ext4_journal_get_write_access(handle, bh2);
312 	}
313 	ext4_lock_group(sb, block_group);
314 	cleared = ext4_test_and_clear_bit(bit, bitmap_bh->b_data);
315 	if (fatal || !cleared) {
316 		ext4_unlock_group(sb, block_group);
317 		goto out;
318 	}
319 
320 	count = ext4_free_inodes_count(sb, gdp) + 1;
321 	ext4_free_inodes_set(sb, gdp, count);
322 	if (is_directory) {
323 		count = ext4_used_dirs_count(sb, gdp) - 1;
324 		ext4_used_dirs_set(sb, gdp, count);
325 		if (percpu_counter_initialized(&sbi->s_dirs_counter))
326 			percpu_counter_dec(&sbi->s_dirs_counter);
327 	}
328 	ext4_inode_bitmap_csum_set(sb, block_group, gdp, bitmap_bh,
329 				   EXT4_INODES_PER_GROUP(sb) / 8);
330 	ext4_group_desc_csum_set(sb, block_group, gdp);
331 	ext4_unlock_group(sb, block_group);
332 
333 	if (percpu_counter_initialized(&sbi->s_freeinodes_counter))
334 		percpu_counter_inc(&sbi->s_freeinodes_counter);
335 	if (sbi->s_log_groups_per_flex) {
336 		struct flex_groups *fg;
337 
338 		fg = sbi_array_rcu_deref(sbi, s_flex_groups,
339 					 ext4_flex_group(sbi, block_group));
340 		atomic_inc(&fg->free_inodes);
341 		if (is_directory)
342 			atomic_dec(&fg->used_dirs);
343 	}
344 	BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
345 	fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
346 out:
347 	if (cleared) {
348 		BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
349 		err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
350 		if (!fatal)
351 			fatal = err;
352 	} else {
353 		ext4_error(sb, "bit already cleared for inode %lu", ino);
354 		ext4_mark_group_bitmap_corrupted(sb, block_group,
355 					EXT4_GROUP_INFO_IBITMAP_CORRUPT);
356 	}
357 
358 error_return:
359 	brelse(bitmap_bh);
360 	ext4_std_error(sb, fatal);
361 }
362 
363 struct orlov_stats {
364 	__u64 free_clusters;
365 	__u32 free_inodes;
366 	__u32 used_dirs;
367 };
368 
369 /*
370  * Helper function for Orlov's allocator; returns critical information
371  * for a particular block group or flex_bg.  If flex_size is 1, then g
372  * is a block group number; otherwise it is flex_bg number.
373  */
get_orlov_stats(struct super_block * sb,ext4_group_t g,int flex_size,struct orlov_stats * stats)374 static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
375 			    int flex_size, struct orlov_stats *stats)
376 {
377 	struct ext4_group_desc *desc;
378 
379 	if (flex_size > 1) {
380 		struct flex_groups *fg = sbi_array_rcu_deref(EXT4_SB(sb),
381 							     s_flex_groups, g);
382 		stats->free_inodes = atomic_read(&fg->free_inodes);
383 		stats->free_clusters = atomic64_read(&fg->free_clusters);
384 		stats->used_dirs = atomic_read(&fg->used_dirs);
385 		return;
386 	}
387 
388 	desc = ext4_get_group_desc(sb, g, NULL);
389 	if (desc) {
390 		stats->free_inodes = ext4_free_inodes_count(sb, desc);
391 		stats->free_clusters = ext4_free_group_clusters(sb, desc);
392 		stats->used_dirs = ext4_used_dirs_count(sb, desc);
393 	} else {
394 		stats->free_inodes = 0;
395 		stats->free_clusters = 0;
396 		stats->used_dirs = 0;
397 	}
398 }
399 
400 /*
401  * Orlov's allocator for directories.
402  *
403  * We always try to spread first-level directories.
404  *
405  * If there are blockgroups with both free inodes and free clusters counts
406  * not worse than average we return one with smallest directory count.
407  * Otherwise we simply return a random group.
408  *
409  * For the rest rules look so:
410  *
411  * It's OK to put directory into a group unless
412  * it has too many directories already (max_dirs) or
413  * it has too few free inodes left (min_inodes) or
414  * it has too few free clusters left (min_clusters) or
415  * Parent's group is preferred, if it doesn't satisfy these
416  * conditions we search cyclically through the rest. If none
417  * of the groups look good we just look for a group with more
418  * free inodes than average (starting at parent's group).
419  */
420 
find_group_orlov(struct super_block * sb,struct inode * parent,ext4_group_t * group,umode_t mode,const struct qstr * qstr)421 static int find_group_orlov(struct super_block *sb, struct inode *parent,
422 			    ext4_group_t *group, umode_t mode,
423 			    const struct qstr *qstr)
424 {
425 	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
426 	struct ext4_sb_info *sbi = EXT4_SB(sb);
427 	ext4_group_t real_ngroups = ext4_get_groups_count(sb);
428 	int inodes_per_group = EXT4_INODES_PER_GROUP(sb);
429 	unsigned int freei, avefreei, grp_free;
430 	ext4_fsblk_t freec, avefreec;
431 	unsigned int ndirs;
432 	int max_dirs, min_inodes;
433 	ext4_grpblk_t min_clusters;
434 	ext4_group_t i, grp, g, ngroups;
435 	struct ext4_group_desc *desc;
436 	struct orlov_stats stats;
437 	int flex_size = ext4_flex_bg_size(sbi);
438 	struct dx_hash_info hinfo;
439 
440 	ngroups = real_ngroups;
441 	if (flex_size > 1) {
442 		ngroups = (real_ngroups + flex_size - 1) >>
443 			sbi->s_log_groups_per_flex;
444 		parent_group >>= sbi->s_log_groups_per_flex;
445 	}
446 
447 	freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
448 	avefreei = freei / ngroups;
449 	freec = percpu_counter_read_positive(&sbi->s_freeclusters_counter);
450 	avefreec = freec;
451 	do_div(avefreec, ngroups);
452 	ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);
453 
454 	if (S_ISDIR(mode) &&
455 	    ((parent == d_inode(sb->s_root)) ||
456 	     (ext4_test_inode_flag(parent, EXT4_INODE_TOPDIR)))) {
457 		int best_ndir = inodes_per_group;
458 		int ret = -1;
459 
460 		if (qstr) {
461 			hinfo.hash_version = DX_HASH_HALF_MD4;
462 			hinfo.seed = sbi->s_hash_seed;
463 			ext4fs_dirhash(parent, qstr->name, qstr->len, &hinfo);
464 			grp = hinfo.hash;
465 		} else
466 			grp = prandom_u32();
467 		parent_group = (unsigned)grp % ngroups;
468 		for (i = 0; i < ngroups; i++) {
469 			g = (parent_group + i) % ngroups;
470 			get_orlov_stats(sb, g, flex_size, &stats);
471 			if (!stats.free_inodes)
472 				continue;
473 			if (stats.used_dirs >= best_ndir)
474 				continue;
475 			if (stats.free_inodes < avefreei)
476 				continue;
477 			if (stats.free_clusters < avefreec)
478 				continue;
479 			grp = g;
480 			ret = 0;
481 			best_ndir = stats.used_dirs;
482 		}
483 		if (ret)
484 			goto fallback;
485 	found_flex_bg:
486 		if (flex_size == 1) {
487 			*group = grp;
488 			return 0;
489 		}
490 
491 		/*
492 		 * We pack inodes at the beginning of the flexgroup's
493 		 * inode tables.  Block allocation decisions will do
494 		 * something similar, although regular files will
495 		 * start at 2nd block group of the flexgroup.  See
496 		 * ext4_ext_find_goal() and ext4_find_near().
497 		 */
498 		grp *= flex_size;
499 		for (i = 0; i < flex_size; i++) {
500 			if (grp+i >= real_ngroups)
501 				break;
502 			desc = ext4_get_group_desc(sb, grp+i, NULL);
503 			if (desc && ext4_free_inodes_count(sb, desc)) {
504 				*group = grp+i;
505 				return 0;
506 			}
507 		}
508 		goto fallback;
509 	}
510 
511 	max_dirs = ndirs / ngroups + inodes_per_group*flex_size / 16;
512 	min_inodes = avefreei - inodes_per_group*flex_size / 4;
513 	if (min_inodes < 1)
514 		min_inodes = 1;
515 	min_clusters = avefreec - EXT4_CLUSTERS_PER_GROUP(sb)*flex_size / 4;
516 
517 	/*
518 	 * Start looking in the flex group where we last allocated an
519 	 * inode for this parent directory
520 	 */
521 	if (EXT4_I(parent)->i_last_alloc_group != ~0) {
522 		parent_group = EXT4_I(parent)->i_last_alloc_group;
523 		if (flex_size > 1)
524 			parent_group >>= sbi->s_log_groups_per_flex;
525 	}
526 
527 	for (i = 0; i < ngroups; i++) {
528 		grp = (parent_group + i) % ngroups;
529 		get_orlov_stats(sb, grp, flex_size, &stats);
530 		if (stats.used_dirs >= max_dirs)
531 			continue;
532 		if (stats.free_inodes < min_inodes)
533 			continue;
534 		if (stats.free_clusters < min_clusters)
535 			continue;
536 		goto found_flex_bg;
537 	}
538 
539 fallback:
540 	ngroups = real_ngroups;
541 	avefreei = freei / ngroups;
542 fallback_retry:
543 	parent_group = EXT4_I(parent)->i_block_group;
544 	for (i = 0; i < ngroups; i++) {
545 		grp = (parent_group + i) % ngroups;
546 		desc = ext4_get_group_desc(sb, grp, NULL);
547 		if (desc) {
548 			grp_free = ext4_free_inodes_count(sb, desc);
549 			if (grp_free && grp_free >= avefreei) {
550 				*group = grp;
551 				return 0;
552 			}
553 		}
554 	}
555 
556 	if (avefreei) {
557 		/*
558 		 * The free-inodes counter is approximate, and for really small
559 		 * filesystems the above test can fail to find any blockgroups
560 		 */
561 		avefreei = 0;
562 		goto fallback_retry;
563 	}
564 
565 	return -1;
566 }
567 
find_group_other(struct super_block * sb,struct inode * parent,ext4_group_t * group,umode_t mode)568 static int find_group_other(struct super_block *sb, struct inode *parent,
569 			    ext4_group_t *group, umode_t mode)
570 {
571 	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
572 	ext4_group_t i, last, ngroups = ext4_get_groups_count(sb);
573 	struct ext4_group_desc *desc;
574 	int flex_size = ext4_flex_bg_size(EXT4_SB(sb));
575 
576 	/*
577 	 * Try to place the inode is the same flex group as its
578 	 * parent.  If we can't find space, use the Orlov algorithm to
579 	 * find another flex group, and store that information in the
580 	 * parent directory's inode information so that use that flex
581 	 * group for future allocations.
582 	 */
583 	if (flex_size > 1) {
584 		int retry = 0;
585 
586 	try_again:
587 		parent_group &= ~(flex_size-1);
588 		last = parent_group + flex_size;
589 		if (last > ngroups)
590 			last = ngroups;
591 		for  (i = parent_group; i < last; i++) {
592 			desc = ext4_get_group_desc(sb, i, NULL);
593 			if (desc && ext4_free_inodes_count(sb, desc)) {
594 				*group = i;
595 				return 0;
596 			}
597 		}
598 		if (!retry && EXT4_I(parent)->i_last_alloc_group != ~0) {
599 			retry = 1;
600 			parent_group = EXT4_I(parent)->i_last_alloc_group;
601 			goto try_again;
602 		}
603 		/*
604 		 * If this didn't work, use the Orlov search algorithm
605 		 * to find a new flex group; we pass in the mode to
606 		 * avoid the topdir algorithms.
607 		 */
608 		*group = parent_group + flex_size;
609 		if (*group > ngroups)
610 			*group = 0;
611 		return find_group_orlov(sb, parent, group, mode, NULL);
612 	}
613 
614 	/*
615 	 * Try to place the inode in its parent directory
616 	 */
617 	*group = parent_group;
618 	desc = ext4_get_group_desc(sb, *group, NULL);
619 	if (desc && ext4_free_inodes_count(sb, desc) &&
620 	    ext4_free_group_clusters(sb, desc))
621 		return 0;
622 
623 	/*
624 	 * We're going to place this inode in a different blockgroup from its
625 	 * parent.  We want to cause files in a common directory to all land in
626 	 * the same blockgroup.  But we want files which are in a different
627 	 * directory which shares a blockgroup with our parent to land in a
628 	 * different blockgroup.
629 	 *
630 	 * So add our directory's i_ino into the starting point for the hash.
631 	 */
632 	*group = (*group + parent->i_ino) % ngroups;
633 
634 	/*
635 	 * Use a quadratic hash to find a group with a free inode and some free
636 	 * blocks.
637 	 */
638 	for (i = 1; i < ngroups; i <<= 1) {
639 		*group += i;
640 		if (*group >= ngroups)
641 			*group -= ngroups;
642 		desc = ext4_get_group_desc(sb, *group, NULL);
643 		if (desc && ext4_free_inodes_count(sb, desc) &&
644 		    ext4_free_group_clusters(sb, desc))
645 			return 0;
646 	}
647 
648 	/*
649 	 * That failed: try linear search for a free inode, even if that group
650 	 * has no free blocks.
651 	 */
652 	*group = parent_group;
653 	for (i = 0; i < ngroups; i++) {
654 		if (++*group >= ngroups)
655 			*group = 0;
656 		desc = ext4_get_group_desc(sb, *group, NULL);
657 		if (desc && ext4_free_inodes_count(sb, desc))
658 			return 0;
659 	}
660 
661 	return -1;
662 }
663 
664 /*
665  * In no journal mode, if an inode has recently been deleted, we want
666  * to avoid reusing it until we're reasonably sure the inode table
667  * block has been written back to disk.  (Yes, these values are
668  * somewhat arbitrary...)
669  */
670 #define RECENTCY_MIN	60
671 #define RECENTCY_DIRTY	300
672 
recently_deleted(struct super_block * sb,ext4_group_t group,int ino)673 static int recently_deleted(struct super_block *sb, ext4_group_t group, int ino)
674 {
675 	struct ext4_group_desc	*gdp;
676 	struct ext4_inode	*raw_inode;
677 	struct buffer_head	*bh;
678 	int inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
679 	int offset, ret = 0;
680 	int recentcy = RECENTCY_MIN;
681 	u32 dtime, now;
682 
683 	gdp = ext4_get_group_desc(sb, group, NULL);
684 	if (unlikely(!gdp))
685 		return 0;
686 
687 	bh = sb_find_get_block(sb, ext4_inode_table(sb, gdp) +
688 		       (ino / inodes_per_block));
689 	if (!bh || !buffer_uptodate(bh))
690 		/*
691 		 * If the block is not in the buffer cache, then it
692 		 * must have been written out.
693 		 */
694 		goto out;
695 
696 	offset = (ino % inodes_per_block) * EXT4_INODE_SIZE(sb);
697 	raw_inode = (struct ext4_inode *) (bh->b_data + offset);
698 
699 	/* i_dtime is only 32 bits on disk, but we only care about relative
700 	 * times in the range of a few minutes (i.e. long enough to sync a
701 	 * recently-deleted inode to disk), so using the low 32 bits of the
702 	 * clock (a 68 year range) is enough, see time_before32() */
703 	dtime = le32_to_cpu(raw_inode->i_dtime);
704 	now = ktime_get_real_seconds();
705 	if (buffer_dirty(bh))
706 		recentcy += RECENTCY_DIRTY;
707 
708 	if (dtime && time_before32(dtime, now) &&
709 	    time_before32(now, dtime + recentcy))
710 		ret = 1;
711 out:
712 	brelse(bh);
713 	return ret;
714 }
715 
find_inode_bit(struct super_block * sb,ext4_group_t group,struct buffer_head * bitmap,unsigned long * ino)716 static int find_inode_bit(struct super_block *sb, ext4_group_t group,
717 			  struct buffer_head *bitmap, unsigned long *ino)
718 {
719 	bool check_recently_deleted = EXT4_SB(sb)->s_journal == NULL;
720 	unsigned long recently_deleted_ino = EXT4_INODES_PER_GROUP(sb);
721 
722 next:
723 	*ino = ext4_find_next_zero_bit((unsigned long *)
724 				       bitmap->b_data,
725 				       EXT4_INODES_PER_GROUP(sb), *ino);
726 	if (*ino >= EXT4_INODES_PER_GROUP(sb))
727 		goto not_found;
728 
729 	if (check_recently_deleted && recently_deleted(sb, group, *ino)) {
730 		recently_deleted_ino = *ino;
731 		*ino = *ino + 1;
732 		if (*ino < EXT4_INODES_PER_GROUP(sb))
733 			goto next;
734 		goto not_found;
735 	}
736 	return 1;
737 not_found:
738 	if (recently_deleted_ino >= EXT4_INODES_PER_GROUP(sb))
739 		return 0;
740 	/*
741 	 * Not reusing recently deleted inodes is mostly a preference. We don't
742 	 * want to report ENOSPC or skew allocation patterns because of that.
743 	 * So return even recently deleted inode if we could find better in the
744 	 * given range.
745 	 */
746 	*ino = recently_deleted_ino;
747 	return 1;
748 }
749 
ext4_mark_inode_used(struct super_block * sb,int ino)750 int ext4_mark_inode_used(struct super_block *sb, int ino)
751 {
752 	unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
753 	struct buffer_head *inode_bitmap_bh = NULL, *group_desc_bh = NULL;
754 	struct ext4_group_desc *gdp;
755 	ext4_group_t group;
756 	int bit;
757 	int err = -EFSCORRUPTED;
758 
759 	if (ino < EXT4_FIRST_INO(sb) || ino > max_ino)
760 		goto out;
761 
762 	group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
763 	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
764 	inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
765 	if (IS_ERR(inode_bitmap_bh))
766 		return PTR_ERR(inode_bitmap_bh);
767 
768 	if (ext4_test_bit(bit, inode_bitmap_bh->b_data)) {
769 		err = 0;
770 		goto out;
771 	}
772 
773 	gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
774 	if (!gdp || !group_desc_bh) {
775 		err = -EINVAL;
776 		goto out;
777 	}
778 
779 	ext4_set_bit(bit, inode_bitmap_bh->b_data);
780 
781 	BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata");
782 	err = ext4_handle_dirty_metadata(NULL, NULL, inode_bitmap_bh);
783 	if (err) {
784 		ext4_std_error(sb, err);
785 		goto out;
786 	}
787 	err = sync_dirty_buffer(inode_bitmap_bh);
788 	if (err) {
789 		ext4_std_error(sb, err);
790 		goto out;
791 	}
792 
793 	/* We may have to initialize the block bitmap if it isn't already */
794 	if (ext4_has_group_desc_csum(sb) &&
795 	    gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
796 		struct buffer_head *block_bitmap_bh;
797 
798 		block_bitmap_bh = ext4_read_block_bitmap(sb, group);
799 		if (IS_ERR(block_bitmap_bh)) {
800 			err = PTR_ERR(block_bitmap_bh);
801 			goto out;
802 		}
803 
804 		BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap");
805 		err = ext4_handle_dirty_metadata(NULL, NULL, block_bitmap_bh);
806 		sync_dirty_buffer(block_bitmap_bh);
807 
808 		/* recheck and clear flag under lock if we still need to */
809 		ext4_lock_group(sb, group);
810 		if (ext4_has_group_desc_csum(sb) &&
811 		    (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
812 			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
813 			ext4_free_group_clusters_set(sb, gdp,
814 				ext4_free_clusters_after_init(sb, group, gdp));
815 			ext4_block_bitmap_csum_set(sb, group, gdp,
816 						   block_bitmap_bh);
817 			ext4_group_desc_csum_set(sb, group, gdp);
818 		}
819 		ext4_unlock_group(sb, group);
820 		brelse(block_bitmap_bh);
821 
822 		if (err) {
823 			ext4_std_error(sb, err);
824 			goto out;
825 		}
826 	}
827 
828 	/* Update the relevant bg descriptor fields */
829 	if (ext4_has_group_desc_csum(sb)) {
830 		int free;
831 
832 		ext4_lock_group(sb, group); /* while we modify the bg desc */
833 		free = EXT4_INODES_PER_GROUP(sb) -
834 			ext4_itable_unused_count(sb, gdp);
835 		if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
836 			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
837 			free = 0;
838 		}
839 
840 		/*
841 		 * Check the relative inode number against the last used
842 		 * relative inode number in this group. if it is greater
843 		 * we need to update the bg_itable_unused count
844 		 */
845 		if (bit >= free)
846 			ext4_itable_unused_set(sb, gdp,
847 					(EXT4_INODES_PER_GROUP(sb) - bit - 1));
848 	} else {
849 		ext4_lock_group(sb, group);
850 	}
851 
852 	ext4_free_inodes_set(sb, gdp, ext4_free_inodes_count(sb, gdp) - 1);
853 	if (ext4_has_group_desc_csum(sb)) {
854 		ext4_inode_bitmap_csum_set(sb, group, gdp, inode_bitmap_bh,
855 					   EXT4_INODES_PER_GROUP(sb) / 8);
856 		ext4_group_desc_csum_set(sb, group, gdp);
857 	}
858 
859 	ext4_unlock_group(sb, group);
860 	err = ext4_handle_dirty_metadata(NULL, NULL, group_desc_bh);
861 	sync_dirty_buffer(group_desc_bh);
862 out:
863 	return err;
864 }
865 
ext4_xattr_credits_for_new_inode(struct inode * dir,mode_t mode,bool encrypt)866 static int ext4_xattr_credits_for_new_inode(struct inode *dir, mode_t mode,
867 					    bool encrypt)
868 {
869 	struct super_block *sb = dir->i_sb;
870 	int nblocks = 0;
871 #ifdef CONFIG_EXT4_FS_POSIX_ACL
872 	struct posix_acl *p = get_acl(dir, ACL_TYPE_DEFAULT);
873 
874 	if (IS_ERR(p))
875 		return PTR_ERR(p);
876 	if (p) {
877 		int acl_size = p->a_count * sizeof(ext4_acl_entry);
878 
879 		nblocks += (S_ISDIR(mode) ? 2 : 1) *
880 			__ext4_xattr_set_credits(sb, NULL /* inode */,
881 						 NULL /* block_bh */, acl_size,
882 						 true /* is_create */);
883 		posix_acl_release(p);
884 	}
885 #endif
886 
887 #ifdef CONFIG_SECURITY
888 	{
889 		int num_security_xattrs = 1;
890 
891 #ifdef CONFIG_INTEGRITY
892 		num_security_xattrs++;
893 #endif
894 		/*
895 		 * We assume that security xattrs are never more than 1k.
896 		 * In practice they are under 128 bytes.
897 		 */
898 		nblocks += num_security_xattrs *
899 			__ext4_xattr_set_credits(sb, NULL /* inode */,
900 						 NULL /* block_bh */, 1024,
901 						 true /* is_create */);
902 	}
903 #endif
904 	if (encrypt)
905 		nblocks += __ext4_xattr_set_credits(sb,
906 						    NULL /* inode */,
907 						    NULL /* block_bh */,
908 						    FSCRYPT_SET_CONTEXT_MAX_SIZE,
909 						    true /* is_create */);
910 	return nblocks;
911 }
912 
913 /*
914  * There are two policies for allocating an inode.  If the new inode is
915  * a directory, then a forward search is made for a block group with both
916  * free space and a low directory-to-inode ratio; if that fails, then of
917  * the groups with above-average free space, that group with the fewest
918  * directories already is chosen.
919  *
920  * For other inodes, search forward from the parent directory's block
921  * group to find a free inode.
922  */
__ext4_new_inode(handle_t * handle,struct inode * dir,umode_t mode,const struct qstr * qstr,__u32 goal,uid_t * owner,__u32 i_flags,int handle_type,unsigned int line_no,int nblocks)923 struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
924 			       umode_t mode, const struct qstr *qstr,
925 			       __u32 goal, uid_t *owner, __u32 i_flags,
926 			       int handle_type, unsigned int line_no,
927 			       int nblocks)
928 {
929 	struct super_block *sb;
930 	struct buffer_head *inode_bitmap_bh = NULL;
931 	struct buffer_head *group_desc_bh;
932 	ext4_group_t ngroups, group = 0;
933 	unsigned long ino = 0;
934 	struct inode *inode;
935 	struct ext4_group_desc *gdp = NULL;
936 	struct ext4_inode_info *ei;
937 	struct ext4_sb_info *sbi;
938 	int ret2, err;
939 	struct inode *ret;
940 	ext4_group_t i;
941 	ext4_group_t flex_group;
942 	struct ext4_group_info *grp = NULL;
943 	bool encrypt = false;
944 
945 	/* Cannot create files in a deleted directory */
946 	if (!dir || !dir->i_nlink)
947 		return ERR_PTR(-EPERM);
948 
949 	sb = dir->i_sb;
950 	sbi = EXT4_SB(sb);
951 
952 	if (unlikely(ext4_forced_shutdown(sbi)))
953 		return ERR_PTR(-EIO);
954 
955 	ngroups = ext4_get_groups_count(sb);
956 	trace_ext4_request_inode(dir, mode);
957 	inode = new_inode(sb);
958 	if (!inode)
959 		return ERR_PTR(-ENOMEM);
960 	ei = EXT4_I(inode);
961 
962 	/*
963 	 * Initialize owners and quota early so that we don't have to account
964 	 * for quota initialization worst case in standard inode creating
965 	 * transaction
966 	 */
967 	if (owner) {
968 		inode->i_mode = mode;
969 		i_uid_write(inode, owner[0]);
970 		i_gid_write(inode, owner[1]);
971 	} else if (test_opt(sb, GRPID)) {
972 		inode->i_mode = mode;
973 		inode->i_uid = current_fsuid();
974 		inode->i_gid = dir->i_gid;
975 	} else
976 		inode_init_owner(inode, dir, mode);
977 
978 	if (ext4_has_feature_project(sb) &&
979 	    ext4_test_inode_flag(dir, EXT4_INODE_PROJINHERIT))
980 		ei->i_projid = EXT4_I(dir)->i_projid;
981 	else
982 		ei->i_projid = make_kprojid(&init_user_ns, EXT4_DEF_PROJID);
983 
984 	if (!(i_flags & EXT4_EA_INODE_FL)) {
985 		err = fscrypt_prepare_new_inode(dir, inode, &encrypt);
986 		if (err)
987 			goto out;
988 	}
989 
990 	err = dquot_initialize(inode);
991 	if (err)
992 		goto out;
993 
994 	if (!handle && sbi->s_journal && !(i_flags & EXT4_EA_INODE_FL)) {
995 		ret2 = ext4_xattr_credits_for_new_inode(dir, mode, encrypt);
996 		if (ret2 < 0) {
997 			err = ret2;
998 			goto out;
999 		}
1000 		nblocks += ret2;
1001 	}
1002 
1003 	if (!goal)
1004 		goal = sbi->s_inode_goal;
1005 
1006 	if (goal && goal <= le32_to_cpu(sbi->s_es->s_inodes_count)) {
1007 		group = (goal - 1) / EXT4_INODES_PER_GROUP(sb);
1008 		ino = (goal - 1) % EXT4_INODES_PER_GROUP(sb);
1009 		ret2 = 0;
1010 		goto got_group;
1011 	}
1012 
1013 	if (S_ISDIR(mode))
1014 		ret2 = find_group_orlov(sb, dir, &group, mode, qstr);
1015 	else
1016 		ret2 = find_group_other(sb, dir, &group, mode);
1017 
1018 got_group:
1019 	EXT4_I(dir)->i_last_alloc_group = group;
1020 	err = -ENOSPC;
1021 	if (ret2 == -1)
1022 		goto out;
1023 
1024 	/*
1025 	 * Normally we will only go through one pass of this loop,
1026 	 * unless we get unlucky and it turns out the group we selected
1027 	 * had its last inode grabbed by someone else.
1028 	 */
1029 	for (i = 0; i < ngroups; i++, ino = 0) {
1030 		err = -EIO;
1031 
1032 		gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
1033 		if (!gdp)
1034 			goto out;
1035 
1036 		/*
1037 		 * Check free inodes count before loading bitmap.
1038 		 */
1039 		if (ext4_free_inodes_count(sb, gdp) == 0)
1040 			goto next_group;
1041 
1042 		if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
1043 			grp = ext4_get_group_info(sb, group);
1044 			/*
1045 			 * Skip groups with already-known suspicious inode
1046 			 * tables
1047 			 */
1048 			if (!grp || EXT4_MB_GRP_IBITMAP_CORRUPT(grp))
1049 				goto next_group;
1050 		}
1051 
1052 		brelse(inode_bitmap_bh);
1053 		inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
1054 		/* Skip groups with suspicious inode tables */
1055 		if (((!(sbi->s_mount_state & EXT4_FC_REPLAY))
1056 		     && EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) ||
1057 		    IS_ERR(inode_bitmap_bh)) {
1058 			inode_bitmap_bh = NULL;
1059 			goto next_group;
1060 		}
1061 
1062 repeat_in_this_group:
1063 		ret2 = find_inode_bit(sb, group, inode_bitmap_bh, &ino);
1064 		if (!ret2)
1065 			goto next_group;
1066 
1067 		if (group == 0 && (ino + 1) < EXT4_FIRST_INO(sb)) {
1068 			ext4_error(sb, "reserved inode found cleared - "
1069 				   "inode=%lu", ino + 1);
1070 			ext4_mark_group_bitmap_corrupted(sb, group,
1071 					EXT4_GROUP_INFO_IBITMAP_CORRUPT);
1072 			goto next_group;
1073 		}
1074 
1075 		if ((!(sbi->s_mount_state & EXT4_FC_REPLAY)) && !handle) {
1076 			BUG_ON(nblocks <= 0);
1077 			handle = __ext4_journal_start_sb(dir->i_sb, line_no,
1078 				 handle_type, nblocks, 0,
1079 				 ext4_trans_default_revoke_credits(sb));
1080 			if (IS_ERR(handle)) {
1081 				err = PTR_ERR(handle);
1082 				ext4_std_error(sb, err);
1083 				goto out;
1084 			}
1085 		}
1086 		BUFFER_TRACE(inode_bitmap_bh, "get_write_access");
1087 		err = ext4_journal_get_write_access(handle, inode_bitmap_bh);
1088 		if (err) {
1089 			ext4_std_error(sb, err);
1090 			goto out;
1091 		}
1092 		ext4_lock_group(sb, group);
1093 		ret2 = ext4_test_and_set_bit(ino, inode_bitmap_bh->b_data);
1094 		if (ret2) {
1095 			/* Someone already took the bit. Repeat the search
1096 			 * with lock held.
1097 			 */
1098 			ret2 = find_inode_bit(sb, group, inode_bitmap_bh, &ino);
1099 			if (ret2) {
1100 				ext4_set_bit(ino, inode_bitmap_bh->b_data);
1101 				ret2 = 0;
1102 			} else {
1103 				ret2 = 1; /* we didn't grab the inode */
1104 			}
1105 		}
1106 		ext4_unlock_group(sb, group);
1107 		ino++;		/* the inode bitmap is zero-based */
1108 		if (!ret2)
1109 			goto got; /* we grabbed the inode! */
1110 
1111 		if (ino < EXT4_INODES_PER_GROUP(sb))
1112 			goto repeat_in_this_group;
1113 next_group:
1114 		if (++group == ngroups)
1115 			group = 0;
1116 	}
1117 	err = -ENOSPC;
1118 	goto out;
1119 
1120 got:
1121 	BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata");
1122 	err = ext4_handle_dirty_metadata(handle, NULL, inode_bitmap_bh);
1123 	if (err) {
1124 		ext4_std_error(sb, err);
1125 		goto out;
1126 	}
1127 
1128 	BUFFER_TRACE(group_desc_bh, "get_write_access");
1129 	err = ext4_journal_get_write_access(handle, group_desc_bh);
1130 	if (err) {
1131 		ext4_std_error(sb, err);
1132 		goto out;
1133 	}
1134 
1135 	/* We may have to initialize the block bitmap if it isn't already */
1136 	if (ext4_has_group_desc_csum(sb) &&
1137 	    gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
1138 		struct buffer_head *block_bitmap_bh;
1139 
1140 		block_bitmap_bh = ext4_read_block_bitmap(sb, group);
1141 		if (IS_ERR(block_bitmap_bh)) {
1142 			err = PTR_ERR(block_bitmap_bh);
1143 			goto out;
1144 		}
1145 		BUFFER_TRACE(block_bitmap_bh, "get block bitmap access");
1146 		err = ext4_journal_get_write_access(handle, block_bitmap_bh);
1147 		if (err) {
1148 			brelse(block_bitmap_bh);
1149 			ext4_std_error(sb, err);
1150 			goto out;
1151 		}
1152 
1153 		BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap");
1154 		err = ext4_handle_dirty_metadata(handle, NULL, block_bitmap_bh);
1155 
1156 		/* recheck and clear flag under lock if we still need to */
1157 		ext4_lock_group(sb, group);
1158 		if (ext4_has_group_desc_csum(sb) &&
1159 		    (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
1160 			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
1161 			ext4_free_group_clusters_set(sb, gdp,
1162 				ext4_free_clusters_after_init(sb, group, gdp));
1163 			ext4_block_bitmap_csum_set(sb, group, gdp,
1164 						   block_bitmap_bh);
1165 			ext4_group_desc_csum_set(sb, group, gdp);
1166 		}
1167 		ext4_unlock_group(sb, group);
1168 		brelse(block_bitmap_bh);
1169 
1170 		if (err) {
1171 			ext4_std_error(sb, err);
1172 			goto out;
1173 		}
1174 	}
1175 
1176 	/* Update the relevant bg descriptor fields */
1177 	if (ext4_has_group_desc_csum(sb)) {
1178 		int free;
1179 		struct ext4_group_info *grp = NULL;
1180 
1181 		if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
1182 			grp = ext4_get_group_info(sb, group);
1183 			if (!grp) {
1184 				err = -EFSCORRUPTED;
1185 				goto out;
1186 			}
1187 			down_read(&grp->alloc_sem); /*
1188 						     * protect vs itable
1189 						     * lazyinit
1190 						     */
1191 		}
1192 		ext4_lock_group(sb, group); /* while we modify the bg desc */
1193 		free = EXT4_INODES_PER_GROUP(sb) -
1194 			ext4_itable_unused_count(sb, gdp);
1195 		if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
1196 			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
1197 			free = 0;
1198 		}
1199 		/*
1200 		 * Check the relative inode number against the last used
1201 		 * relative inode number in this group. if it is greater
1202 		 * we need to update the bg_itable_unused count
1203 		 */
1204 		if (ino > free)
1205 			ext4_itable_unused_set(sb, gdp,
1206 					(EXT4_INODES_PER_GROUP(sb) - ino));
1207 		if (!(sbi->s_mount_state & EXT4_FC_REPLAY))
1208 			up_read(&grp->alloc_sem);
1209 	} else {
1210 		ext4_lock_group(sb, group);
1211 	}
1212 
1213 	ext4_free_inodes_set(sb, gdp, ext4_free_inodes_count(sb, gdp) - 1);
1214 	if (S_ISDIR(mode)) {
1215 		ext4_used_dirs_set(sb, gdp, ext4_used_dirs_count(sb, gdp) + 1);
1216 		if (sbi->s_log_groups_per_flex) {
1217 			ext4_group_t f = ext4_flex_group(sbi, group);
1218 
1219 			atomic_inc(&sbi_array_rcu_deref(sbi, s_flex_groups,
1220 							f)->used_dirs);
1221 		}
1222 	}
1223 	if (ext4_has_group_desc_csum(sb)) {
1224 		ext4_inode_bitmap_csum_set(sb, group, gdp, inode_bitmap_bh,
1225 					   EXT4_INODES_PER_GROUP(sb) / 8);
1226 		ext4_group_desc_csum_set(sb, group, gdp);
1227 	}
1228 	ext4_unlock_group(sb, group);
1229 
1230 	BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata");
1231 	err = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh);
1232 	if (err) {
1233 		ext4_std_error(sb, err);
1234 		goto out;
1235 	}
1236 
1237 	percpu_counter_dec(&sbi->s_freeinodes_counter);
1238 	if (S_ISDIR(mode))
1239 		percpu_counter_inc(&sbi->s_dirs_counter);
1240 
1241 	if (sbi->s_log_groups_per_flex) {
1242 		flex_group = ext4_flex_group(sbi, group);
1243 		atomic_dec(&sbi_array_rcu_deref(sbi, s_flex_groups,
1244 						flex_group)->free_inodes);
1245 	}
1246 
1247 	inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
1248 	/* This is the optimal IO size (for stat), not the fs block size */
1249 	inode->i_blocks = 0;
1250 	inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
1251 	ei->i_crtime = inode->i_mtime;
1252 
1253 	memset(ei->i_data, 0, sizeof(ei->i_data));
1254 	ei->i_dir_start_lookup = 0;
1255 	ei->i_disksize = 0;
1256 
1257 	/* Don't inherit extent flag from directory, amongst others. */
1258 	ei->i_flags =
1259 		ext4_mask_flags(mode, EXT4_I(dir)->i_flags & EXT4_FL_INHERITED);
1260 	ei->i_flags |= i_flags;
1261 	ei->i_file_acl = 0;
1262 	ei->i_dtime = 0;
1263 	ei->i_block_group = group;
1264 	ei->i_last_alloc_group = ~0;
1265 
1266 	ext4_set_inode_flags(inode, true);
1267 	if (IS_DIRSYNC(inode))
1268 		ext4_handle_sync(handle);
1269 	if (insert_inode_locked(inode) < 0) {
1270 		/*
1271 		 * Likely a bitmap corruption causing inode to be allocated
1272 		 * twice.
1273 		 */
1274 		err = -EIO;
1275 		ext4_error(sb, "failed to insert inode %lu: doubly allocated?",
1276 			   inode->i_ino);
1277 		ext4_mark_group_bitmap_corrupted(sb, group,
1278 					EXT4_GROUP_INFO_IBITMAP_CORRUPT);
1279 		goto out;
1280 	}
1281 	inode->i_generation = prandom_u32();
1282 
1283 	/* Precompute checksum seed for inode metadata */
1284 	if (ext4_has_metadata_csum(sb)) {
1285 		__u32 csum;
1286 		__le32 inum = cpu_to_le32(inode->i_ino);
1287 		__le32 gen = cpu_to_le32(inode->i_generation);
1288 		csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
1289 				   sizeof(inum));
1290 		ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
1291 					      sizeof(gen));
1292 	}
1293 
1294 	ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
1295 	ext4_set_inode_state(inode, EXT4_STATE_NEW);
1296 
1297 	ei->i_extra_isize = sbi->s_want_extra_isize;
1298 	ei->i_inline_off = 0;
1299 	if (ext4_has_feature_inline_data(sb) &&
1300 	    (!(ei->i_flags & EXT4_DAX_FL) || S_ISDIR(mode)))
1301 		ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
1302 	ret = inode;
1303 	err = dquot_alloc_inode(inode);
1304 	if (err)
1305 		goto fail_drop;
1306 
1307 	/*
1308 	 * Since the encryption xattr will always be unique, create it first so
1309 	 * that it's less likely to end up in an external xattr block and
1310 	 * prevent its deduplication.
1311 	 */
1312 	if (encrypt) {
1313 		err = fscrypt_set_context(inode, handle);
1314 		if (err)
1315 			goto fail_free_drop;
1316 	}
1317 
1318 	if (!(ei->i_flags & EXT4_EA_INODE_FL)) {
1319 		err = ext4_init_acl(handle, inode, dir);
1320 		if (err)
1321 			goto fail_free_drop;
1322 
1323 		err = ext4_init_security(handle, inode, dir, qstr);
1324 		if (err)
1325 			goto fail_free_drop;
1326 	}
1327 
1328 	if (ext4_has_feature_extents(sb)) {
1329 		/* set extent flag only for directory, file and normal symlink*/
1330 		if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
1331 			ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
1332 			ext4_ext_tree_init(handle, inode);
1333 		}
1334 	}
1335 
1336 	if (ext4_handle_valid(handle)) {
1337 		ei->i_sync_tid = handle->h_transaction->t_tid;
1338 		ei->i_datasync_tid = handle->h_transaction->t_tid;
1339 	}
1340 
1341 	err = ext4_mark_inode_dirty(handle, inode);
1342 	if (err) {
1343 		ext4_std_error(sb, err);
1344 		goto fail_free_drop;
1345 	}
1346 
1347 	ext4_debug("allocating inode %lu\n", inode->i_ino);
1348 	trace_ext4_allocate_inode(inode, dir, mode);
1349 	brelse(inode_bitmap_bh);
1350 	return ret;
1351 
1352 fail_free_drop:
1353 	dquot_free_inode(inode);
1354 fail_drop:
1355 	clear_nlink(inode);
1356 	unlock_new_inode(inode);
1357 out:
1358 	dquot_drop(inode);
1359 	inode->i_flags |= S_NOQUOTA;
1360 	iput(inode);
1361 	brelse(inode_bitmap_bh);
1362 	return ERR_PTR(err);
1363 }
1364 
1365 /* Verify that we are loading a valid orphan from disk */
ext4_orphan_get(struct super_block * sb,unsigned long ino)1366 struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
1367 {
1368 	unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
1369 	ext4_group_t block_group;
1370 	int bit;
1371 	struct buffer_head *bitmap_bh = NULL;
1372 	struct inode *inode = NULL;
1373 	int err = -EFSCORRUPTED;
1374 
1375 	if (ino < EXT4_FIRST_INO(sb) || ino > max_ino)
1376 		goto bad_orphan;
1377 
1378 	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
1379 	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
1380 	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
1381 	if (IS_ERR(bitmap_bh))
1382 		return ERR_CAST(bitmap_bh);
1383 
1384 	/* Having the inode bit set should be a 100% indicator that this
1385 	 * is a valid orphan (no e2fsck run on fs).  Orphans also include
1386 	 * inodes that were being truncated, so we can't check i_nlink==0.
1387 	 */
1388 	if (!ext4_test_bit(bit, bitmap_bh->b_data))
1389 		goto bad_orphan;
1390 
1391 	inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
1392 	if (IS_ERR(inode)) {
1393 		err = PTR_ERR(inode);
1394 		ext4_error_err(sb, -err,
1395 			       "couldn't read orphan inode %lu (err %d)",
1396 			       ino, err);
1397 		brelse(bitmap_bh);
1398 		return inode;
1399 	}
1400 
1401 	/*
1402 	 * If the orphans has i_nlinks > 0 then it should be able to
1403 	 * be truncated, otherwise it won't be removed from the orphan
1404 	 * list during processing and an infinite loop will result.
1405 	 * Similarly, it must not be a bad inode.
1406 	 */
1407 	if ((inode->i_nlink && !ext4_can_truncate(inode)) ||
1408 	    is_bad_inode(inode))
1409 		goto bad_orphan;
1410 
1411 	if (NEXT_ORPHAN(inode) > max_ino)
1412 		goto bad_orphan;
1413 	brelse(bitmap_bh);
1414 	return inode;
1415 
1416 bad_orphan:
1417 	ext4_error(sb, "bad orphan inode %lu", ino);
1418 	if (bitmap_bh)
1419 		printk(KERN_ERR "ext4_test_bit(bit=%d, block=%llu) = %d\n",
1420 		       bit, (unsigned long long)bitmap_bh->b_blocknr,
1421 		       ext4_test_bit(bit, bitmap_bh->b_data));
1422 	if (inode) {
1423 		printk(KERN_ERR "is_bad_inode(inode)=%d\n",
1424 		       is_bad_inode(inode));
1425 		printk(KERN_ERR "NEXT_ORPHAN(inode)=%u\n",
1426 		       NEXT_ORPHAN(inode));
1427 		printk(KERN_ERR "max_ino=%lu\n", max_ino);
1428 		printk(KERN_ERR "i_nlink=%u\n", inode->i_nlink);
1429 		/* Avoid freeing blocks if we got a bad deleted inode */
1430 		if (inode->i_nlink == 0)
1431 			inode->i_blocks = 0;
1432 		iput(inode);
1433 	}
1434 	brelse(bitmap_bh);
1435 	return ERR_PTR(err);
1436 }
1437 
ext4_count_free_inodes(struct super_block * sb)1438 unsigned long ext4_count_free_inodes(struct super_block *sb)
1439 {
1440 	unsigned long desc_count;
1441 	struct ext4_group_desc *gdp;
1442 	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
1443 #ifdef EXT4FS_DEBUG
1444 	struct ext4_super_block *es;
1445 	unsigned long bitmap_count, x;
1446 	struct buffer_head *bitmap_bh = NULL;
1447 
1448 	es = EXT4_SB(sb)->s_es;
1449 	desc_count = 0;
1450 	bitmap_count = 0;
1451 	gdp = NULL;
1452 	for (i = 0; i < ngroups; i++) {
1453 		gdp = ext4_get_group_desc(sb, i, NULL);
1454 		if (!gdp)
1455 			continue;
1456 		desc_count += ext4_free_inodes_count(sb, gdp);
1457 		brelse(bitmap_bh);
1458 		bitmap_bh = ext4_read_inode_bitmap(sb, i);
1459 		if (IS_ERR(bitmap_bh)) {
1460 			bitmap_bh = NULL;
1461 			continue;
1462 		}
1463 
1464 		x = ext4_count_free(bitmap_bh->b_data,
1465 				    EXT4_INODES_PER_GROUP(sb) / 8);
1466 		printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
1467 			(unsigned long) i, ext4_free_inodes_count(sb, gdp), x);
1468 		bitmap_count += x;
1469 	}
1470 	brelse(bitmap_bh);
1471 	printk(KERN_DEBUG "ext4_count_free_inodes: "
1472 	       "stored = %u, computed = %lu, %lu\n",
1473 	       le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count);
1474 	return desc_count;
1475 #else
1476 	desc_count = 0;
1477 	for (i = 0; i < ngroups; i++) {
1478 		gdp = ext4_get_group_desc(sb, i, NULL);
1479 		if (!gdp)
1480 			continue;
1481 		desc_count += ext4_free_inodes_count(sb, gdp);
1482 		cond_resched();
1483 	}
1484 	return desc_count;
1485 #endif
1486 }
1487 
1488 /* Called at mount-time, super-block is locked */
ext4_count_dirs(struct super_block * sb)1489 unsigned long ext4_count_dirs(struct super_block * sb)
1490 {
1491 	unsigned long count = 0;
1492 	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
1493 
1494 	for (i = 0; i < ngroups; i++) {
1495 		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
1496 		if (!gdp)
1497 			continue;
1498 		count += ext4_used_dirs_count(sb, gdp);
1499 	}
1500 	return count;
1501 }
1502 
1503 /*
1504  * Zeroes not yet zeroed inode table - just write zeroes through the whole
1505  * inode table. Must be called without any spinlock held. The only place
1506  * where it is called from on active part of filesystem is ext4lazyinit
1507  * thread, so we do not need any special locks, however we have to prevent
1508  * inode allocation from the current group, so we take alloc_sem lock, to
1509  * block ext4_new_inode() until we are finished.
1510  */
ext4_init_inode_table(struct super_block * sb,ext4_group_t group,int barrier)1511 int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
1512 				 int barrier)
1513 {
1514 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
1515 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1516 	struct ext4_group_desc *gdp = NULL;
1517 	struct buffer_head *group_desc_bh;
1518 	handle_t *handle;
1519 	ext4_fsblk_t blk;
1520 	int num, ret = 0, used_blks = 0;
1521 	unsigned long used_inos = 0;
1522 
1523 	/* This should not happen, but just to be sure check this */
1524 	if (sb_rdonly(sb)) {
1525 		ret = 1;
1526 		goto out;
1527 	}
1528 
1529 	gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
1530 	if (!gdp || !grp)
1531 		goto out;
1532 
1533 	/*
1534 	 * We do not need to lock this, because we are the only one
1535 	 * handling this flag.
1536 	 */
1537 	if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))
1538 		goto out;
1539 
1540 	handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
1541 	if (IS_ERR(handle)) {
1542 		ret = PTR_ERR(handle);
1543 		goto out;
1544 	}
1545 
1546 	down_write(&grp->alloc_sem);
1547 	/*
1548 	 * If inode bitmap was already initialized there may be some
1549 	 * used inodes so we need to skip blocks with used inodes in
1550 	 * inode table.
1551 	 */
1552 	if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) {
1553 		used_inos = EXT4_INODES_PER_GROUP(sb) -
1554 			    ext4_itable_unused_count(sb, gdp);
1555 		used_blks = DIV_ROUND_UP(used_inos, sbi->s_inodes_per_block);
1556 
1557 		/* Bogus inode unused count? */
1558 		if (used_blks < 0 || used_blks > sbi->s_itb_per_group) {
1559 			ext4_error(sb, "Something is wrong with group %u: "
1560 				   "used itable blocks: %d; "
1561 				   "itable unused count: %u",
1562 				   group, used_blks,
1563 				   ext4_itable_unused_count(sb, gdp));
1564 			ret = 1;
1565 			goto err_out;
1566 		}
1567 
1568 		used_inos += group * EXT4_INODES_PER_GROUP(sb);
1569 		/*
1570 		 * Are there some uninitialized inodes in the inode table
1571 		 * before the first normal inode?
1572 		 */
1573 		if ((used_blks != sbi->s_itb_per_group) &&
1574 		     (used_inos < EXT4_FIRST_INO(sb))) {
1575 			ext4_error(sb, "Something is wrong with group %u: "
1576 				   "itable unused count: %u; "
1577 				   "itables initialized count: %ld",
1578 				   group, ext4_itable_unused_count(sb, gdp),
1579 				   used_inos);
1580 			ret = 1;
1581 			goto err_out;
1582 		}
1583 	}
1584 
1585 	blk = ext4_inode_table(sb, gdp) + used_blks;
1586 	num = sbi->s_itb_per_group - used_blks;
1587 
1588 	BUFFER_TRACE(group_desc_bh, "get_write_access");
1589 	ret = ext4_journal_get_write_access(handle,
1590 					    group_desc_bh);
1591 	if (ret)
1592 		goto err_out;
1593 
1594 	/*
1595 	 * Skip zeroout if the inode table is full. But we set the ZEROED
1596 	 * flag anyway, because obviously, when it is full it does not need
1597 	 * further zeroing.
1598 	 */
1599 	if (unlikely(num == 0))
1600 		goto skip_zeroout;
1601 
1602 	ext4_debug("going to zero out inode table in group %d\n",
1603 		   group);
1604 	ret = sb_issue_zeroout(sb, blk, num, GFP_NOFS);
1605 	if (ret < 0)
1606 		goto err_out;
1607 	if (barrier)
1608 		blkdev_issue_flush(sb->s_bdev, GFP_NOFS);
1609 
1610 skip_zeroout:
1611 	ext4_lock_group(sb, group);
1612 	gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED);
1613 	ext4_group_desc_csum_set(sb, group, gdp);
1614 	ext4_unlock_group(sb, group);
1615 
1616 	BUFFER_TRACE(group_desc_bh,
1617 		     "call ext4_handle_dirty_metadata");
1618 	ret = ext4_handle_dirty_metadata(handle, NULL,
1619 					 group_desc_bh);
1620 
1621 err_out:
1622 	up_write(&grp->alloc_sem);
1623 	ext4_journal_stop(handle);
1624 out:
1625 	return ret;
1626 }
1627