• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  linux/fs/ext4/ialloc.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  BSD ufs-inspired inode and directory allocation by
10  *  Stephen Tweedie (sct@redhat.com), 1993
11  *  Big-endian to little-endian byte-swapping/bitmaps by
12  *        David S. Miller (davem@caip.rutgers.edu), 1995
13  */
14 
15 #include <linux/time.h>
16 #include <linux/fs.h>
17 #include <linux/stat.h>
18 #include <linux/string.h>
19 #include <linux/quotaops.h>
20 #include <linux/buffer_head.h>
21 #include <linux/random.h>
22 #include <linux/bitops.h>
23 #include <linux/blkdev.h>
24 #include <asm/byteorder.h>
25 
26 #include "ext4.h"
27 #include "ext4_jbd2.h"
28 #include "xattr.h"
29 #include "acl.h"
30 
31 #include <trace/events/ext4.h>
32 
33 /*
34  * ialloc.c contains the inodes allocation and deallocation routines
35  */
36 
37 /*
38  * The free inodes are managed by bitmaps.  A file system contains several
39  * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
40  * block for inodes, N blocks for the inode table and data blocks.
41  *
42  * The file system contains group descriptors which are located after the
43  * super block.  Each descriptor contains the number of the bitmap block and
44  * the free blocks count in the block.
45  */
46 
47 /*
48  * To avoid calling the atomic setbit hundreds or thousands of times, we only
49  * need to use it within a single byte (to ensure we get endianness right).
50  * We can use memset for the rest of the bitmap as there are no other users.
51  */
ext4_mark_bitmap_end(int start_bit,int end_bit,char * bitmap)52 void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
53 {
54 	int i;
55 
56 	if (start_bit >= end_bit)
57 		return;
58 
59 	ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
60 	for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
61 		ext4_set_bit(i, bitmap);
62 	if (i < end_bit)
63 		memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
64 }
65 
ext4_end_bitmap_read(struct buffer_head * bh,int uptodate)66 void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate)
67 {
68 	if (uptodate) {
69 		set_buffer_uptodate(bh);
70 		set_bitmap_uptodate(bh);
71 	}
72 	unlock_buffer(bh);
73 	put_bh(bh);
74 }
75 
ext4_validate_inode_bitmap(struct super_block * sb,struct ext4_group_desc * desc,ext4_group_t block_group,struct buffer_head * bh)76 static int ext4_validate_inode_bitmap(struct super_block *sb,
77 				      struct ext4_group_desc *desc,
78 				      ext4_group_t block_group,
79 				      struct buffer_head *bh)
80 {
81 	ext4_fsblk_t	blk;
82 	struct ext4_group_info *grp = ext4_get_group_info(sb, block_group);
83 	struct ext4_sb_info *sbi = EXT4_SB(sb);
84 
85 	if (buffer_verified(bh))
86 		return 0;
87 	if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp))
88 		return -EFSCORRUPTED;
89 
90 	ext4_lock_group(sb, block_group);
91 	if (buffer_verified(bh))
92 		goto verified;
93 	blk = ext4_inode_bitmap(sb, desc);
94 	if (!ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh,
95 					   EXT4_INODES_PER_GROUP(sb) / 8)) {
96 		ext4_unlock_group(sb, block_group);
97 		ext4_error(sb, "Corrupt inode bitmap - block_group = %u, "
98 			   "inode_bitmap = %llu", block_group, blk);
99 		grp = ext4_get_group_info(sb, block_group);
100 		if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
101 			int count;
102 			count = ext4_free_inodes_count(sb, desc);
103 			percpu_counter_sub(&sbi->s_freeinodes_counter,
104 					   count);
105 		}
106 		set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
107 		return -EFSBADCRC;
108 	}
109 	set_buffer_verified(bh);
110 verified:
111 	ext4_unlock_group(sb, block_group);
112 	return 0;
113 }
114 
115 /*
116  * Read the inode allocation bitmap for a given block_group, reading
117  * into the specified slot in the superblock's bitmap cache.
118  *
119  * Return buffer_head of bitmap on success or NULL.
120  */
121 static struct buffer_head *
ext4_read_inode_bitmap(struct super_block * sb,ext4_group_t block_group)122 ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
123 {
124 	struct ext4_group_desc *desc;
125 	struct ext4_sb_info *sbi = EXT4_SB(sb);
126 	struct buffer_head *bh = NULL;
127 	ext4_fsblk_t bitmap_blk;
128 	int err;
129 
130 	desc = ext4_get_group_desc(sb, block_group, NULL);
131 	if (!desc)
132 		return ERR_PTR(-EFSCORRUPTED);
133 
134 	bitmap_blk = ext4_inode_bitmap(sb, desc);
135 	if ((bitmap_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
136 	    (bitmap_blk >= ext4_blocks_count(sbi->s_es))) {
137 		ext4_error(sb, "Invalid inode bitmap blk %llu in "
138 			   "block_group %u", bitmap_blk, block_group);
139 		return ERR_PTR(-EFSCORRUPTED);
140 	}
141 	bh = sb_getblk(sb, bitmap_blk);
142 	if (unlikely(!bh)) {
143 		ext4_error(sb, "Cannot read inode bitmap - "
144 			    "block_group = %u, inode_bitmap = %llu",
145 			    block_group, bitmap_blk);
146 		return ERR_PTR(-EIO);
147 	}
148 	if (bitmap_uptodate(bh))
149 		goto verify;
150 
151 	lock_buffer(bh);
152 	if (bitmap_uptodate(bh)) {
153 		unlock_buffer(bh);
154 		goto verify;
155 	}
156 
157 	ext4_lock_group(sb, block_group);
158 	if (ext4_has_group_desc_csum(sb) &&
159 	    (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) {
160 		if (block_group == 0) {
161 			ext4_unlock_group(sb, block_group);
162 			unlock_buffer(bh);
163 			ext4_error(sb, "Inode bitmap for bg 0 marked "
164 				   "uninitialized");
165 			err = -EFSCORRUPTED;
166 			goto out;
167 		}
168 		memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
169 		ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
170 				     sb->s_blocksize * 8, bh->b_data);
171 		set_bitmap_uptodate(bh);
172 		set_buffer_uptodate(bh);
173 		set_buffer_verified(bh);
174 		ext4_unlock_group(sb, block_group);
175 		unlock_buffer(bh);
176 		return bh;
177 	}
178 	ext4_unlock_group(sb, block_group);
179 
180 	if (buffer_uptodate(bh)) {
181 		/*
182 		 * if not uninit if bh is uptodate,
183 		 * bitmap is also uptodate
184 		 */
185 		set_bitmap_uptodate(bh);
186 		unlock_buffer(bh);
187 		goto verify;
188 	}
189 	/*
190 	 * submit the buffer_head for reading
191 	 */
192 	trace_ext4_load_inode_bitmap(sb, block_group);
193 	bh->b_end_io = ext4_end_bitmap_read;
194 	get_bh(bh);
195 	submit_bh(READ | REQ_META | REQ_PRIO, bh);
196 	wait_on_buffer(bh);
197 	if (!buffer_uptodate(bh)) {
198 		put_bh(bh);
199 		ext4_error(sb, "Cannot read inode bitmap - "
200 			   "block_group = %u, inode_bitmap = %llu",
201 			   block_group, bitmap_blk);
202 		return ERR_PTR(-EIO);
203 	}
204 
205 verify:
206 	err = ext4_validate_inode_bitmap(sb, desc, block_group, bh);
207 	if (err)
208 		goto out;
209 	return bh;
210 out:
211 	put_bh(bh);
212 	return ERR_PTR(err);
213 }
214 
215 /*
216  * NOTE! When we get the inode, we're the only people
217  * that have access to it, and as such there are no
218  * race conditions we have to worry about. The inode
219  * is not on the hash-lists, and it cannot be reached
220  * through the filesystem because the directory entry
221  * has been deleted earlier.
222  *
223  * HOWEVER: we must make sure that we get no aliases,
224  * which means that we have to call "clear_inode()"
225  * _before_ we mark the inode not in use in the inode
226  * bitmaps. Otherwise a newly created file might use
227  * the same inode number (not actually the same pointer
228  * though), and then we'd have two inodes sharing the
229  * same inode number and space on the harddisk.
230  */
ext4_free_inode(handle_t * handle,struct inode * inode)231 void ext4_free_inode(handle_t *handle, struct inode *inode)
232 {
233 	struct super_block *sb = inode->i_sb;
234 	int is_directory;
235 	unsigned long ino;
236 	struct buffer_head *bitmap_bh = NULL;
237 	struct buffer_head *bh2;
238 	ext4_group_t block_group;
239 	unsigned long bit;
240 	struct ext4_group_desc *gdp;
241 	struct ext4_super_block *es;
242 	struct ext4_sb_info *sbi;
243 	int fatal = 0, err, count, cleared;
244 	struct ext4_group_info *grp;
245 
246 	if (!sb) {
247 		printk(KERN_ERR "EXT4-fs: %s:%d: inode on "
248 		       "nonexistent device\n", __func__, __LINE__);
249 		return;
250 	}
251 	if (atomic_read(&inode->i_count) > 1) {
252 		ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: count=%d",
253 			 __func__, __LINE__, inode->i_ino,
254 			 atomic_read(&inode->i_count));
255 		return;
256 	}
257 	if (inode->i_nlink) {
258 		ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: nlink=%d\n",
259 			 __func__, __LINE__, inode->i_ino, inode->i_nlink);
260 		return;
261 	}
262 	sbi = EXT4_SB(sb);
263 
264 	ino = inode->i_ino;
265 	ext4_debug("freeing inode %lu\n", ino);
266 	trace_ext4_free_inode(inode);
267 
268 	/*
269 	 * Note: we must free any quota before locking the superblock,
270 	 * as writing the quota to disk may need the lock as well.
271 	 */
272 	dquot_initialize(inode);
273 	ext4_xattr_delete_inode(handle, inode);
274 	dquot_free_inode(inode);
275 	dquot_drop(inode);
276 
277 	is_directory = S_ISDIR(inode->i_mode);
278 
279 	/* Do this BEFORE marking the inode not in use or returning an error */
280 	ext4_clear_inode(inode);
281 
282 	es = EXT4_SB(sb)->s_es;
283 	if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
284 		ext4_error(sb, "reserved or nonexistent inode %lu", ino);
285 		goto error_return;
286 	}
287 	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
288 	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
289 	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
290 	/* Don't bother if the inode bitmap is corrupt. */
291 	grp = ext4_get_group_info(sb, block_group);
292 	if (IS_ERR(bitmap_bh)) {
293 		fatal = PTR_ERR(bitmap_bh);
294 		bitmap_bh = NULL;
295 		goto error_return;
296 	}
297 	if (unlikely(EXT4_MB_GRP_IBITMAP_CORRUPT(grp))) {
298 		fatal = -EFSCORRUPTED;
299 		goto error_return;
300 	}
301 
302 	BUFFER_TRACE(bitmap_bh, "get_write_access");
303 	fatal = ext4_journal_get_write_access(handle, bitmap_bh);
304 	if (fatal)
305 		goto error_return;
306 
307 	fatal = -ESRCH;
308 	gdp = ext4_get_group_desc(sb, block_group, &bh2);
309 	if (gdp) {
310 		BUFFER_TRACE(bh2, "get_write_access");
311 		fatal = ext4_journal_get_write_access(handle, bh2);
312 	}
313 	ext4_lock_group(sb, block_group);
314 	cleared = ext4_test_and_clear_bit(bit, bitmap_bh->b_data);
315 	if (fatal || !cleared) {
316 		ext4_unlock_group(sb, block_group);
317 		goto out;
318 	}
319 
320 	count = ext4_free_inodes_count(sb, gdp) + 1;
321 	ext4_free_inodes_set(sb, gdp, count);
322 	if (is_directory) {
323 		count = ext4_used_dirs_count(sb, gdp) - 1;
324 		ext4_used_dirs_set(sb, gdp, count);
325 		percpu_counter_dec(&sbi->s_dirs_counter);
326 	}
327 	ext4_inode_bitmap_csum_set(sb, block_group, gdp, bitmap_bh,
328 				   EXT4_INODES_PER_GROUP(sb) / 8);
329 	ext4_group_desc_csum_set(sb, block_group, gdp);
330 	ext4_unlock_group(sb, block_group);
331 
332 	percpu_counter_inc(&sbi->s_freeinodes_counter);
333 	if (sbi->s_log_groups_per_flex) {
334 		struct flex_groups *fg;
335 
336 		fg = sbi_array_rcu_deref(sbi, s_flex_groups,
337 					 ext4_flex_group(sbi, block_group));
338 		atomic_inc(&fg->free_inodes);
339 		if (is_directory)
340 			atomic_dec(&fg->used_dirs);
341 	}
342 	BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
343 	fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
344 out:
345 	if (cleared) {
346 		BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
347 		err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
348 		if (!fatal)
349 			fatal = err;
350 	} else {
351 		ext4_error(sb, "bit already cleared for inode %lu", ino);
352 		if (gdp && !EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
353 			int count;
354 			count = ext4_free_inodes_count(sb, gdp);
355 			percpu_counter_sub(&sbi->s_freeinodes_counter,
356 					   count);
357 		}
358 		set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
359 	}
360 
361 error_return:
362 	brelse(bitmap_bh);
363 	ext4_std_error(sb, fatal);
364 }
365 
366 struct orlov_stats {
367 	__u64 free_clusters;
368 	__u32 free_inodes;
369 	__u32 used_dirs;
370 };
371 
372 /*
373  * Helper function for Orlov's allocator; returns critical information
374  * for a particular block group or flex_bg.  If flex_size is 1, then g
375  * is a block group number; otherwise it is flex_bg number.
376  */
get_orlov_stats(struct super_block * sb,ext4_group_t g,int flex_size,struct orlov_stats * stats)377 static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
378 			    int flex_size, struct orlov_stats *stats)
379 {
380 	struct ext4_group_desc *desc;
381 
382 	if (flex_size > 1) {
383 		struct flex_groups *fg = sbi_array_rcu_deref(EXT4_SB(sb),
384 							     s_flex_groups, g);
385 		stats->free_inodes = atomic_read(&fg->free_inodes);
386 		stats->free_clusters = atomic64_read(&fg->free_clusters);
387 		stats->used_dirs = atomic_read(&fg->used_dirs);
388 		return;
389 	}
390 
391 	desc = ext4_get_group_desc(sb, g, NULL);
392 	if (desc) {
393 		stats->free_inodes = ext4_free_inodes_count(sb, desc);
394 		stats->free_clusters = ext4_free_group_clusters(sb, desc);
395 		stats->used_dirs = ext4_used_dirs_count(sb, desc);
396 	} else {
397 		stats->free_inodes = 0;
398 		stats->free_clusters = 0;
399 		stats->used_dirs = 0;
400 	}
401 }
402 
403 /*
404  * Orlov's allocator for directories.
405  *
406  * We always try to spread first-level directories.
407  *
408  * If there are blockgroups with both free inodes and free clusters counts
409  * not worse than average we return one with smallest directory count.
410  * Otherwise we simply return a random group.
411  *
412  * For the rest rules look so:
413  *
414  * It's OK to put directory into a group unless
415  * it has too many directories already (max_dirs) or
416  * it has too few free inodes left (min_inodes) or
417  * it has too few free clusters left (min_clusters) or
418  * Parent's group is preferred, if it doesn't satisfy these
419  * conditions we search cyclically through the rest. If none
420  * of the groups look good we just look for a group with more
421  * free inodes than average (starting at parent's group).
422  */
423 
find_group_orlov(struct super_block * sb,struct inode * parent,ext4_group_t * group,umode_t mode,const struct qstr * qstr)424 static int find_group_orlov(struct super_block *sb, struct inode *parent,
425 			    ext4_group_t *group, umode_t mode,
426 			    const struct qstr *qstr)
427 {
428 	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
429 	struct ext4_sb_info *sbi = EXT4_SB(sb);
430 	ext4_group_t real_ngroups = ext4_get_groups_count(sb);
431 	int inodes_per_group = EXT4_INODES_PER_GROUP(sb);
432 	unsigned int freei, avefreei, grp_free;
433 	ext4_fsblk_t freec, avefreec;
434 	unsigned int ndirs;
435 	int max_dirs, min_inodes;
436 	ext4_grpblk_t min_clusters;
437 	ext4_group_t i, grp, g, ngroups;
438 	struct ext4_group_desc *desc;
439 	struct orlov_stats stats;
440 	int flex_size = ext4_flex_bg_size(sbi);
441 	struct dx_hash_info hinfo;
442 
443 	ngroups = real_ngroups;
444 	if (flex_size > 1) {
445 		ngroups = (real_ngroups + flex_size - 1) >>
446 			sbi->s_log_groups_per_flex;
447 		parent_group >>= sbi->s_log_groups_per_flex;
448 	}
449 
450 	freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
451 	avefreei = freei / ngroups;
452 	freec = percpu_counter_read_positive(&sbi->s_freeclusters_counter);
453 	avefreec = freec;
454 	do_div(avefreec, ngroups);
455 	ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);
456 
457 	if (S_ISDIR(mode) &&
458 	    ((parent == d_inode(sb->s_root)) ||
459 	     (ext4_test_inode_flag(parent, EXT4_INODE_TOPDIR)))) {
460 		int best_ndir = inodes_per_group;
461 		int ret = -1;
462 
463 		if (qstr) {
464 			hinfo.hash_version = DX_HASH_HALF_MD4;
465 			hinfo.seed = sbi->s_hash_seed;
466 			ext4fs_dirhash(qstr->name, qstr->len, &hinfo);
467 			grp = hinfo.hash;
468 		} else
469 			grp = prandom_u32();
470 		parent_group = (unsigned)grp % ngroups;
471 		for (i = 0; i < ngroups; i++) {
472 			g = (parent_group + i) % ngroups;
473 			get_orlov_stats(sb, g, flex_size, &stats);
474 			if (!stats.free_inodes)
475 				continue;
476 			if (stats.used_dirs >= best_ndir)
477 				continue;
478 			if (stats.free_inodes < avefreei)
479 				continue;
480 			if (stats.free_clusters < avefreec)
481 				continue;
482 			grp = g;
483 			ret = 0;
484 			best_ndir = stats.used_dirs;
485 		}
486 		if (ret)
487 			goto fallback;
488 	found_flex_bg:
489 		if (flex_size == 1) {
490 			*group = grp;
491 			return 0;
492 		}
493 
494 		/*
495 		 * We pack inodes at the beginning of the flexgroup's
496 		 * inode tables.  Block allocation decisions will do
497 		 * something similar, although regular files will
498 		 * start at 2nd block group of the flexgroup.  See
499 		 * ext4_ext_find_goal() and ext4_find_near().
500 		 */
501 		grp *= flex_size;
502 		for (i = 0; i < flex_size; i++) {
503 			if (grp+i >= real_ngroups)
504 				break;
505 			desc = ext4_get_group_desc(sb, grp+i, NULL);
506 			if (desc && ext4_free_inodes_count(sb, desc)) {
507 				*group = grp+i;
508 				return 0;
509 			}
510 		}
511 		goto fallback;
512 	}
513 
514 	max_dirs = ndirs / ngroups + inodes_per_group / 16;
515 	min_inodes = avefreei - inodes_per_group*flex_size / 4;
516 	if (min_inodes < 1)
517 		min_inodes = 1;
518 	min_clusters = avefreec - EXT4_CLUSTERS_PER_GROUP(sb)*flex_size / 4;
519 
520 	/*
521 	 * Start looking in the flex group where we last allocated an
522 	 * inode for this parent directory
523 	 */
524 	if (EXT4_I(parent)->i_last_alloc_group != ~0) {
525 		parent_group = EXT4_I(parent)->i_last_alloc_group;
526 		if (flex_size > 1)
527 			parent_group >>= sbi->s_log_groups_per_flex;
528 	}
529 
530 	for (i = 0; i < ngroups; i++) {
531 		grp = (parent_group + i) % ngroups;
532 		get_orlov_stats(sb, grp, flex_size, &stats);
533 		if (stats.used_dirs >= max_dirs)
534 			continue;
535 		if (stats.free_inodes < min_inodes)
536 			continue;
537 		if (stats.free_clusters < min_clusters)
538 			continue;
539 		goto found_flex_bg;
540 	}
541 
542 fallback:
543 	ngroups = real_ngroups;
544 	avefreei = freei / ngroups;
545 fallback_retry:
546 	parent_group = EXT4_I(parent)->i_block_group;
547 	for (i = 0; i < ngroups; i++) {
548 		grp = (parent_group + i) % ngroups;
549 		desc = ext4_get_group_desc(sb, grp, NULL);
550 		if (desc) {
551 			grp_free = ext4_free_inodes_count(sb, desc);
552 			if (grp_free && grp_free >= avefreei) {
553 				*group = grp;
554 				return 0;
555 			}
556 		}
557 	}
558 
559 	if (avefreei) {
560 		/*
561 		 * The free-inodes counter is approximate, and for really small
562 		 * filesystems the above test can fail to find any blockgroups
563 		 */
564 		avefreei = 0;
565 		goto fallback_retry;
566 	}
567 
568 	return -1;
569 }
570 
find_group_other(struct super_block * sb,struct inode * parent,ext4_group_t * group,umode_t mode)571 static int find_group_other(struct super_block *sb, struct inode *parent,
572 			    ext4_group_t *group, umode_t mode)
573 {
574 	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
575 	ext4_group_t i, last, ngroups = ext4_get_groups_count(sb);
576 	struct ext4_group_desc *desc;
577 	int flex_size = ext4_flex_bg_size(EXT4_SB(sb));
578 
579 	/*
580 	 * Try to place the inode is the same flex group as its
581 	 * parent.  If we can't find space, use the Orlov algorithm to
582 	 * find another flex group, and store that information in the
583 	 * parent directory's inode information so that use that flex
584 	 * group for future allocations.
585 	 */
586 	if (flex_size > 1) {
587 		int retry = 0;
588 
589 	try_again:
590 		parent_group &= ~(flex_size-1);
591 		last = parent_group + flex_size;
592 		if (last > ngroups)
593 			last = ngroups;
594 		for  (i = parent_group; i < last; i++) {
595 			desc = ext4_get_group_desc(sb, i, NULL);
596 			if (desc && ext4_free_inodes_count(sb, desc)) {
597 				*group = i;
598 				return 0;
599 			}
600 		}
601 		if (!retry && EXT4_I(parent)->i_last_alloc_group != ~0) {
602 			retry = 1;
603 			parent_group = EXT4_I(parent)->i_last_alloc_group;
604 			goto try_again;
605 		}
606 		/*
607 		 * If this didn't work, use the Orlov search algorithm
608 		 * to find a new flex group; we pass in the mode to
609 		 * avoid the topdir algorithms.
610 		 */
611 		*group = parent_group + flex_size;
612 		if (*group > ngroups)
613 			*group = 0;
614 		return find_group_orlov(sb, parent, group, mode, NULL);
615 	}
616 
617 	/*
618 	 * Try to place the inode in its parent directory
619 	 */
620 	*group = parent_group;
621 	desc = ext4_get_group_desc(sb, *group, NULL);
622 	if (desc && ext4_free_inodes_count(sb, desc) &&
623 	    ext4_free_group_clusters(sb, desc))
624 		return 0;
625 
626 	/*
627 	 * We're going to place this inode in a different blockgroup from its
628 	 * parent.  We want to cause files in a common directory to all land in
629 	 * the same blockgroup.  But we want files which are in a different
630 	 * directory which shares a blockgroup with our parent to land in a
631 	 * different blockgroup.
632 	 *
633 	 * So add our directory's i_ino into the starting point for the hash.
634 	 */
635 	*group = (*group + parent->i_ino) % ngroups;
636 
637 	/*
638 	 * Use a quadratic hash to find a group with a free inode and some free
639 	 * blocks.
640 	 */
641 	for (i = 1; i < ngroups; i <<= 1) {
642 		*group += i;
643 		if (*group >= ngroups)
644 			*group -= ngroups;
645 		desc = ext4_get_group_desc(sb, *group, NULL);
646 		if (desc && ext4_free_inodes_count(sb, desc) &&
647 		    ext4_free_group_clusters(sb, desc))
648 			return 0;
649 	}
650 
651 	/*
652 	 * That failed: try linear search for a free inode, even if that group
653 	 * has no free blocks.
654 	 */
655 	*group = parent_group;
656 	for (i = 0; i < ngroups; i++) {
657 		if (++*group >= ngroups)
658 			*group = 0;
659 		desc = ext4_get_group_desc(sb, *group, NULL);
660 		if (desc && ext4_free_inodes_count(sb, desc))
661 			return 0;
662 	}
663 
664 	return -1;
665 }
666 
667 /*
668  * In no journal mode, if an inode has recently been deleted, we want
669  * to avoid reusing it until we're reasonably sure the inode table
670  * block has been written back to disk.  (Yes, these values are
671  * somewhat arbitrary...)
672  */
673 #define RECENTCY_MIN	5
674 #define RECENTCY_DIRTY	30
675 
recently_deleted(struct super_block * sb,ext4_group_t group,int ino)676 static int recently_deleted(struct super_block *sb, ext4_group_t group, int ino)
677 {
678 	struct ext4_group_desc	*gdp;
679 	struct ext4_inode	*raw_inode;
680 	struct buffer_head	*bh;
681 	unsigned long		dtime, now;
682 	int	inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
683 	int	offset, ret = 0, recentcy = RECENTCY_MIN;
684 
685 	gdp = ext4_get_group_desc(sb, group, NULL);
686 	if (unlikely(!gdp))
687 		return 0;
688 
689 	bh = sb_getblk(sb, ext4_inode_table(sb, gdp) +
690 		       (ino / inodes_per_block));
691 	if (unlikely(!bh) || !buffer_uptodate(bh))
692 		/*
693 		 * If the block is not in the buffer cache, then it
694 		 * must have been written out.
695 		 */
696 		goto out;
697 
698 	offset = (ino % inodes_per_block) * EXT4_INODE_SIZE(sb);
699 	raw_inode = (struct ext4_inode *) (bh->b_data + offset);
700 	dtime = le32_to_cpu(raw_inode->i_dtime);
701 	now = get_seconds();
702 	if (buffer_dirty(bh))
703 		recentcy += RECENTCY_DIRTY;
704 
705 	if (dtime && (dtime < now) && (now < dtime + recentcy))
706 		ret = 1;
707 out:
708 	brelse(bh);
709 	return ret;
710 }
711 
712 /*
713  * There are two policies for allocating an inode.  If the new inode is
714  * a directory, then a forward search is made for a block group with both
715  * free space and a low directory-to-inode ratio; if that fails, then of
716  * the groups with above-average free space, that group with the fewest
717  * directories already is chosen.
718  *
719  * For other inodes, search forward from the parent directory's block
720  * group to find a free inode.
721  */
__ext4_new_inode(handle_t * handle,struct inode * dir,umode_t mode,const struct qstr * qstr,__u32 goal,uid_t * owner,int handle_type,unsigned int line_no,int nblocks)722 struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
723 			       umode_t mode, const struct qstr *qstr,
724 			       __u32 goal, uid_t *owner, int handle_type,
725 			       unsigned int line_no, int nblocks)
726 {
727 	struct super_block *sb;
728 	struct buffer_head *inode_bitmap_bh = NULL;
729 	struct buffer_head *group_desc_bh;
730 	ext4_group_t ngroups, group = 0;
731 	unsigned long ino = 0;
732 	struct inode *inode;
733 	struct ext4_group_desc *gdp = NULL;
734 	struct ext4_inode_info *ei;
735 	struct ext4_sb_info *sbi;
736 	int ret2, err;
737 	struct inode *ret;
738 	ext4_group_t i;
739 	ext4_group_t flex_group;
740 	struct ext4_group_info *grp;
741 	int encrypt = 0;
742 
743 	/* Cannot create files in a deleted directory */
744 	if (!dir || !dir->i_nlink)
745 		return ERR_PTR(-EPERM);
746 
747 	if ((ext4_encrypted_inode(dir) ||
748 	     DUMMY_ENCRYPTION_ENABLED(EXT4_SB(dir->i_sb))) &&
749 	    (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
750 		err = ext4_get_encryption_info(dir);
751 		if (err)
752 			return ERR_PTR(err);
753 		if (ext4_encryption_info(dir) == NULL)
754 			return ERR_PTR(-EPERM);
755 		if (!handle)
756 			nblocks += EXT4_DATA_TRANS_BLOCKS(dir->i_sb);
757 		encrypt = 1;
758 	}
759 
760 	sb = dir->i_sb;
761 	ngroups = ext4_get_groups_count(sb);
762 	trace_ext4_request_inode(dir, mode);
763 	inode = new_inode(sb);
764 	if (!inode)
765 		return ERR_PTR(-ENOMEM);
766 	ei = EXT4_I(inode);
767 	sbi = EXT4_SB(sb);
768 
769 	/*
770 	 * Initalize owners and quota early so that we don't have to account
771 	 * for quota initialization worst case in standard inode creating
772 	 * transaction
773 	 */
774 	if (owner) {
775 		inode->i_mode = mode;
776 		i_uid_write(inode, owner[0]);
777 		i_gid_write(inode, owner[1]);
778 	} else if (test_opt(sb, GRPID)) {
779 		inode->i_mode = mode;
780 		inode->i_uid = current_fsuid();
781 		inode->i_gid = dir->i_gid;
782 	} else
783 		inode_init_owner(inode, dir, mode);
784 	err = dquot_initialize(inode);
785 	if (err)
786 		goto out;
787 
788 	if (!goal)
789 		goal = sbi->s_inode_goal;
790 
791 	if (goal && goal <= le32_to_cpu(sbi->s_es->s_inodes_count)) {
792 		group = (goal - 1) / EXT4_INODES_PER_GROUP(sb);
793 		ino = (goal - 1) % EXT4_INODES_PER_GROUP(sb);
794 		ret2 = 0;
795 		goto got_group;
796 	}
797 
798 	if (S_ISDIR(mode))
799 		ret2 = find_group_orlov(sb, dir, &group, mode, qstr);
800 	else
801 		ret2 = find_group_other(sb, dir, &group, mode);
802 
803 got_group:
804 	EXT4_I(dir)->i_last_alloc_group = group;
805 	err = -ENOSPC;
806 	if (ret2 == -1)
807 		goto out;
808 
809 	/*
810 	 * Normally we will only go through one pass of this loop,
811 	 * unless we get unlucky and it turns out the group we selected
812 	 * had its last inode grabbed by someone else.
813 	 */
814 	for (i = 0; i < ngroups; i++, ino = 0) {
815 		err = -EIO;
816 
817 		gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
818 		if (!gdp)
819 			goto out;
820 
821 		/*
822 		 * Check free inodes count before loading bitmap.
823 		 */
824 		if (ext4_free_inodes_count(sb, gdp) == 0) {
825 			if (++group == ngroups)
826 				group = 0;
827 			continue;
828 		}
829 
830 		grp = ext4_get_group_info(sb, group);
831 		/* Skip groups with already-known suspicious inode tables */
832 		if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
833 			if (++group == ngroups)
834 				group = 0;
835 			continue;
836 		}
837 
838 		brelse(inode_bitmap_bh);
839 		inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
840 		/* Skip groups with suspicious inode tables */
841 		if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp) ||
842 		    IS_ERR(inode_bitmap_bh)) {
843 			inode_bitmap_bh = NULL;
844 			if (++group == ngroups)
845 				group = 0;
846 			continue;
847 		}
848 
849 repeat_in_this_group:
850 		ino = ext4_find_next_zero_bit((unsigned long *)
851 					      inode_bitmap_bh->b_data,
852 					      EXT4_INODES_PER_GROUP(sb), ino);
853 		if (ino >= EXT4_INODES_PER_GROUP(sb))
854 			goto next_group;
855 		if (group == 0 && (ino+1) < EXT4_FIRST_INO(sb)) {
856 			ext4_error(sb, "reserved inode found cleared - "
857 				   "inode=%lu", ino + 1);
858 			continue;
859 		}
860 		if ((EXT4_SB(sb)->s_journal == NULL) &&
861 		    recently_deleted(sb, group, ino)) {
862 			ino++;
863 			goto next_inode;
864 		}
865 		if (!handle) {
866 			BUG_ON(nblocks <= 0);
867 			handle = __ext4_journal_start_sb(dir->i_sb, line_no,
868 							 handle_type, nblocks,
869 							 0);
870 			if (IS_ERR(handle)) {
871 				err = PTR_ERR(handle);
872 				ext4_std_error(sb, err);
873 				goto out;
874 			}
875 		}
876 		BUFFER_TRACE(inode_bitmap_bh, "get_write_access");
877 		err = ext4_journal_get_write_access(handle, inode_bitmap_bh);
878 		if (err) {
879 			ext4_std_error(sb, err);
880 			goto out;
881 		}
882 		ext4_lock_group(sb, group);
883 		ret2 = ext4_test_and_set_bit(ino, inode_bitmap_bh->b_data);
884 		ext4_unlock_group(sb, group);
885 		ino++;		/* the inode bitmap is zero-based */
886 		if (!ret2)
887 			goto got; /* we grabbed the inode! */
888 next_inode:
889 		if (ino < EXT4_INODES_PER_GROUP(sb))
890 			goto repeat_in_this_group;
891 next_group:
892 		if (++group == ngroups)
893 			group = 0;
894 	}
895 	err = -ENOSPC;
896 	goto out;
897 
898 got:
899 	BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata");
900 	err = ext4_handle_dirty_metadata(handle, NULL, inode_bitmap_bh);
901 	if (err) {
902 		ext4_std_error(sb, err);
903 		goto out;
904 	}
905 
906 	BUFFER_TRACE(group_desc_bh, "get_write_access");
907 	err = ext4_journal_get_write_access(handle, group_desc_bh);
908 	if (err) {
909 		ext4_std_error(sb, err);
910 		goto out;
911 	}
912 
913 	/* We may have to initialize the block bitmap if it isn't already */
914 	if (ext4_has_group_desc_csum(sb) &&
915 	    gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
916 		struct buffer_head *block_bitmap_bh;
917 
918 		block_bitmap_bh = ext4_read_block_bitmap(sb, group);
919 		if (IS_ERR(block_bitmap_bh)) {
920 			err = PTR_ERR(block_bitmap_bh);
921 			goto out;
922 		}
923 		BUFFER_TRACE(block_bitmap_bh, "get block bitmap access");
924 		err = ext4_journal_get_write_access(handle, block_bitmap_bh);
925 		if (err) {
926 			brelse(block_bitmap_bh);
927 			ext4_std_error(sb, err);
928 			goto out;
929 		}
930 
931 		BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap");
932 		err = ext4_handle_dirty_metadata(handle, NULL, block_bitmap_bh);
933 
934 		/* recheck and clear flag under lock if we still need to */
935 		ext4_lock_group(sb, group);
936 		if (ext4_has_group_desc_csum(sb) &&
937 		    (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
938 			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
939 			ext4_free_group_clusters_set(sb, gdp,
940 				ext4_free_clusters_after_init(sb, group, gdp));
941 			ext4_block_bitmap_csum_set(sb, group, gdp,
942 						   block_bitmap_bh);
943 			ext4_group_desc_csum_set(sb, group, gdp);
944 		}
945 		ext4_unlock_group(sb, group);
946 		brelse(block_bitmap_bh);
947 
948 		if (err) {
949 			ext4_std_error(sb, err);
950 			goto out;
951 		}
952 	}
953 
954 	/* Update the relevant bg descriptor fields */
955 	if (ext4_has_group_desc_csum(sb)) {
956 		int free;
957 		struct ext4_group_info *grp = ext4_get_group_info(sb, group);
958 
959 		down_read(&grp->alloc_sem); /* protect vs itable lazyinit */
960 		ext4_lock_group(sb, group); /* while we modify the bg desc */
961 		free = EXT4_INODES_PER_GROUP(sb) -
962 			ext4_itable_unused_count(sb, gdp);
963 		if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
964 			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
965 			free = 0;
966 		}
967 		/*
968 		 * Check the relative inode number against the last used
969 		 * relative inode number in this group. if it is greater
970 		 * we need to update the bg_itable_unused count
971 		 */
972 		if (ino > free)
973 			ext4_itable_unused_set(sb, gdp,
974 					(EXT4_INODES_PER_GROUP(sb) - ino));
975 		up_read(&grp->alloc_sem);
976 	} else {
977 		ext4_lock_group(sb, group);
978 	}
979 
980 	ext4_free_inodes_set(sb, gdp, ext4_free_inodes_count(sb, gdp) - 1);
981 	if (S_ISDIR(mode)) {
982 		ext4_used_dirs_set(sb, gdp, ext4_used_dirs_count(sb, gdp) + 1);
983 		if (sbi->s_log_groups_per_flex) {
984 			ext4_group_t f = ext4_flex_group(sbi, group);
985 
986 			atomic_inc(&sbi_array_rcu_deref(sbi, s_flex_groups,
987 							f)->used_dirs);
988 		}
989 	}
990 	if (ext4_has_group_desc_csum(sb)) {
991 		ext4_inode_bitmap_csum_set(sb, group, gdp, inode_bitmap_bh,
992 					   EXT4_INODES_PER_GROUP(sb) / 8);
993 		ext4_group_desc_csum_set(sb, group, gdp);
994 	}
995 	ext4_unlock_group(sb, group);
996 
997 	BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata");
998 	err = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh);
999 	if (err) {
1000 		ext4_std_error(sb, err);
1001 		goto out;
1002 	}
1003 
1004 	percpu_counter_dec(&sbi->s_freeinodes_counter);
1005 	if (S_ISDIR(mode))
1006 		percpu_counter_inc(&sbi->s_dirs_counter);
1007 
1008 	if (sbi->s_log_groups_per_flex) {
1009 		flex_group = ext4_flex_group(sbi, group);
1010 		atomic_dec(&sbi_array_rcu_deref(sbi, s_flex_groups,
1011 						flex_group)->free_inodes);
1012 	}
1013 
1014 	inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
1015 	/* This is the optimal IO size (for stat), not the fs block size */
1016 	inode->i_blocks = 0;
1017 	inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
1018 						       ext4_current_time(inode);
1019 
1020 	memset(ei->i_data, 0, sizeof(ei->i_data));
1021 	ei->i_dir_start_lookup = 0;
1022 	ei->i_disksize = 0;
1023 
1024 	/* Don't inherit extent flag from directory, amongst others. */
1025 	ei->i_flags =
1026 		ext4_mask_flags(mode, EXT4_I(dir)->i_flags & EXT4_FL_INHERITED);
1027 	ei->i_file_acl = 0;
1028 	ei->i_dtime = 0;
1029 	ei->i_block_group = group;
1030 	ei->i_last_alloc_group = ~0;
1031 
1032 	ext4_set_inode_flags(inode);
1033 	if (IS_DIRSYNC(inode))
1034 		ext4_handle_sync(handle);
1035 	if (insert_inode_locked(inode) < 0) {
1036 		/*
1037 		 * Likely a bitmap corruption causing inode to be allocated
1038 		 * twice.
1039 		 */
1040 		err = -EIO;
1041 		ext4_error(sb, "failed to insert inode %lu: doubly allocated?",
1042 			   inode->i_ino);
1043 		goto out;
1044 	}
1045 	spin_lock(&sbi->s_next_gen_lock);
1046 	inode->i_generation = sbi->s_next_generation++;
1047 	spin_unlock(&sbi->s_next_gen_lock);
1048 
1049 	/* Precompute checksum seed for inode metadata */
1050 	if (ext4_has_metadata_csum(sb)) {
1051 		__u32 csum;
1052 		__le32 inum = cpu_to_le32(inode->i_ino);
1053 		__le32 gen = cpu_to_le32(inode->i_generation);
1054 		csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
1055 				   sizeof(inum));
1056 		ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
1057 					      sizeof(gen));
1058 	}
1059 
1060 	ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
1061 	ext4_set_inode_state(inode, EXT4_STATE_NEW);
1062 
1063 	ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
1064 	ei->i_inline_off = 0;
1065 	if (ext4_has_feature_inline_data(sb))
1066 		ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
1067 	ret = inode;
1068 	err = dquot_alloc_inode(inode);
1069 	if (err)
1070 		goto fail_drop;
1071 
1072 	err = ext4_init_acl(handle, inode, dir);
1073 	if (err)
1074 		goto fail_free_drop;
1075 
1076 	err = ext4_init_security(handle, inode, dir, qstr);
1077 	if (err)
1078 		goto fail_free_drop;
1079 
1080 	if (ext4_has_feature_extents(sb)) {
1081 		/* set extent flag only for directory, file and normal symlink*/
1082 		if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
1083 			ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
1084 			ext4_ext_tree_init(handle, inode);
1085 		}
1086 	}
1087 
1088 	if (ext4_handle_valid(handle)) {
1089 		ei->i_sync_tid = handle->h_transaction->t_tid;
1090 		ei->i_datasync_tid = handle->h_transaction->t_tid;
1091 	}
1092 
1093 	if (encrypt) {
1094 		err = ext4_inherit_context(dir, inode);
1095 		if (err)
1096 			goto fail_free_drop;
1097 	}
1098 
1099 	err = ext4_mark_inode_dirty(handle, inode);
1100 	if (err) {
1101 		ext4_std_error(sb, err);
1102 		goto fail_free_drop;
1103 	}
1104 
1105 	ext4_debug("allocating inode %lu\n", inode->i_ino);
1106 	trace_ext4_allocate_inode(inode, dir, mode);
1107 	brelse(inode_bitmap_bh);
1108 	return ret;
1109 
1110 fail_free_drop:
1111 	dquot_free_inode(inode);
1112 fail_drop:
1113 	clear_nlink(inode);
1114 	unlock_new_inode(inode);
1115 out:
1116 	dquot_drop(inode);
1117 	inode->i_flags |= S_NOQUOTA;
1118 	iput(inode);
1119 	brelse(inode_bitmap_bh);
1120 	return ERR_PTR(err);
1121 }
1122 
1123 /* Verify that we are loading a valid orphan from disk */
ext4_orphan_get(struct super_block * sb,unsigned long ino)1124 struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
1125 {
1126 	unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
1127 	ext4_group_t block_group;
1128 	int bit;
1129 	struct buffer_head *bitmap_bh = NULL;
1130 	struct inode *inode = NULL;
1131 	int err = -EFSCORRUPTED;
1132 
1133 	if (ino < EXT4_FIRST_INO(sb) || ino > max_ino)
1134 		goto bad_orphan;
1135 
1136 	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
1137 	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
1138 	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
1139 	if (IS_ERR(bitmap_bh)) {
1140 		ext4_error(sb, "inode bitmap error %ld for orphan %lu",
1141 			   ino, PTR_ERR(bitmap_bh));
1142 		return (struct inode *) bitmap_bh;
1143 	}
1144 
1145 	/* Having the inode bit set should be a 100% indicator that this
1146 	 * is a valid orphan (no e2fsck run on fs).  Orphans also include
1147 	 * inodes that were being truncated, so we can't check i_nlink==0.
1148 	 */
1149 	if (!ext4_test_bit(bit, bitmap_bh->b_data))
1150 		goto bad_orphan;
1151 
1152 	inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
1153 	if (IS_ERR(inode)) {
1154 		err = PTR_ERR(inode);
1155 		ext4_error(sb, "couldn't read orphan inode %lu (err %d)",
1156 			   ino, err);
1157 		return inode;
1158 	}
1159 
1160 	/*
1161 	 * If the orphans has i_nlinks > 0 then it should be able to
1162 	 * be truncated, otherwise it won't be removed from the orphan
1163 	 * list during processing and an infinite loop will result.
1164 	 * Similarly, it must not be a bad inode.
1165 	 */
1166 	if ((inode->i_nlink && !ext4_can_truncate(inode)) ||
1167 	    is_bad_inode(inode))
1168 		goto bad_orphan;
1169 
1170 	if (NEXT_ORPHAN(inode) > max_ino)
1171 		goto bad_orphan;
1172 	brelse(bitmap_bh);
1173 	return inode;
1174 
1175 bad_orphan:
1176 	ext4_error(sb, "bad orphan inode %lu", ino);
1177 	if (bitmap_bh)
1178 		printk(KERN_ERR "ext4_test_bit(bit=%d, block=%llu) = %d\n",
1179 		       bit, (unsigned long long)bitmap_bh->b_blocknr,
1180 		       ext4_test_bit(bit, bitmap_bh->b_data));
1181 	if (inode) {
1182 		printk(KERN_ERR "is_bad_inode(inode)=%d\n",
1183 		       is_bad_inode(inode));
1184 		printk(KERN_ERR "NEXT_ORPHAN(inode)=%u\n",
1185 		       NEXT_ORPHAN(inode));
1186 		printk(KERN_ERR "max_ino=%lu\n", max_ino);
1187 		printk(KERN_ERR "i_nlink=%u\n", inode->i_nlink);
1188 		/* Avoid freeing blocks if we got a bad deleted inode */
1189 		if (inode->i_nlink == 0)
1190 			inode->i_blocks = 0;
1191 		iput(inode);
1192 	}
1193 	brelse(bitmap_bh);
1194 	return ERR_PTR(err);
1195 }
1196 
ext4_count_free_inodes(struct super_block * sb)1197 unsigned long ext4_count_free_inodes(struct super_block *sb)
1198 {
1199 	unsigned long desc_count;
1200 	struct ext4_group_desc *gdp;
1201 	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
1202 #ifdef EXT4FS_DEBUG
1203 	struct ext4_super_block *es;
1204 	unsigned long bitmap_count, x;
1205 	struct buffer_head *bitmap_bh = NULL;
1206 
1207 	es = EXT4_SB(sb)->s_es;
1208 	desc_count = 0;
1209 	bitmap_count = 0;
1210 	gdp = NULL;
1211 	for (i = 0; i < ngroups; i++) {
1212 		gdp = ext4_get_group_desc(sb, i, NULL);
1213 		if (!gdp)
1214 			continue;
1215 		desc_count += ext4_free_inodes_count(sb, gdp);
1216 		brelse(bitmap_bh);
1217 		bitmap_bh = ext4_read_inode_bitmap(sb, i);
1218 		if (IS_ERR(bitmap_bh)) {
1219 			bitmap_bh = NULL;
1220 			continue;
1221 		}
1222 
1223 		x = ext4_count_free(bitmap_bh->b_data,
1224 				    EXT4_INODES_PER_GROUP(sb) / 8);
1225 		printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
1226 			(unsigned long) i, ext4_free_inodes_count(sb, gdp), x);
1227 		bitmap_count += x;
1228 	}
1229 	brelse(bitmap_bh);
1230 	printk(KERN_DEBUG "ext4_count_free_inodes: "
1231 	       "stored = %u, computed = %lu, %lu\n",
1232 	       le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count);
1233 	return desc_count;
1234 #else
1235 	desc_count = 0;
1236 	for (i = 0; i < ngroups; i++) {
1237 		gdp = ext4_get_group_desc(sb, i, NULL);
1238 		if (!gdp)
1239 			continue;
1240 		desc_count += ext4_free_inodes_count(sb, gdp);
1241 		cond_resched();
1242 	}
1243 	return desc_count;
1244 #endif
1245 }
1246 
1247 /* Called at mount-time, super-block is locked */
ext4_count_dirs(struct super_block * sb)1248 unsigned long ext4_count_dirs(struct super_block * sb)
1249 {
1250 	unsigned long count = 0;
1251 	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
1252 
1253 	for (i = 0; i < ngroups; i++) {
1254 		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
1255 		if (!gdp)
1256 			continue;
1257 		count += ext4_used_dirs_count(sb, gdp);
1258 	}
1259 	return count;
1260 }
1261 
1262 /*
1263  * Zeroes not yet zeroed inode table - just write zeroes through the whole
1264  * inode table. Must be called without any spinlock held. The only place
1265  * where it is called from on active part of filesystem is ext4lazyinit
1266  * thread, so we do not need any special locks, however we have to prevent
1267  * inode allocation from the current group, so we take alloc_sem lock, to
1268  * block ext4_new_inode() until we are finished.
1269  */
ext4_init_inode_table(struct super_block * sb,ext4_group_t group,int barrier)1270 int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
1271 				 int barrier)
1272 {
1273 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
1274 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1275 	struct ext4_group_desc *gdp = NULL;
1276 	struct buffer_head *group_desc_bh;
1277 	handle_t *handle;
1278 	ext4_fsblk_t blk;
1279 	int num, ret = 0, used_blks = 0;
1280 	unsigned long used_inos = 0;
1281 
1282 	/* This should not happen, but just to be sure check this */
1283 	if (sb->s_flags & MS_RDONLY) {
1284 		ret = 1;
1285 		goto out;
1286 	}
1287 
1288 	gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
1289 	if (!gdp)
1290 		goto out;
1291 
1292 	/*
1293 	 * We do not need to lock this, because we are the only one
1294 	 * handling this flag.
1295 	 */
1296 	if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))
1297 		goto out;
1298 
1299 	handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
1300 	if (IS_ERR(handle)) {
1301 		ret = PTR_ERR(handle);
1302 		goto out;
1303 	}
1304 
1305 	down_write(&grp->alloc_sem);
1306 	/*
1307 	 * If inode bitmap was already initialized there may be some
1308 	 * used inodes so we need to skip blocks with used inodes in
1309 	 * inode table.
1310 	 */
1311 	if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) {
1312 		used_inos = EXT4_INODES_PER_GROUP(sb) -
1313 			    ext4_itable_unused_count(sb, gdp);
1314 		used_blks = DIV_ROUND_UP(used_inos, sbi->s_inodes_per_block);
1315 
1316 		/* Bogus inode unused count? */
1317 		if (used_blks < 0 || used_blks > sbi->s_itb_per_group) {
1318 			ext4_error(sb, "Something is wrong with group %u: "
1319 				   "used itable blocks: %d; "
1320 				   "itable unused count: %u",
1321 				   group, used_blks,
1322 				   ext4_itable_unused_count(sb, gdp));
1323 			ret = 1;
1324 			goto err_out;
1325 		}
1326 
1327 		used_inos += group * EXT4_INODES_PER_GROUP(sb);
1328 		/*
1329 		 * Are there some uninitialized inodes in the inode table
1330 		 * before the first normal inode?
1331 		 */
1332 		if ((used_blks != sbi->s_itb_per_group) &&
1333 		     (used_inos < EXT4_FIRST_INO(sb))) {
1334 			ext4_error(sb, "Something is wrong with group %u: "
1335 				   "itable unused count: %u; "
1336 				   "itables initialized count: %ld",
1337 				   group, ext4_itable_unused_count(sb, gdp),
1338 				   used_inos);
1339 			ret = 1;
1340 			goto err_out;
1341 		}
1342 	}
1343 
1344 	blk = ext4_inode_table(sb, gdp) + used_blks;
1345 	num = sbi->s_itb_per_group - used_blks;
1346 
1347 	BUFFER_TRACE(group_desc_bh, "get_write_access");
1348 	ret = ext4_journal_get_write_access(handle,
1349 					    group_desc_bh);
1350 	if (ret)
1351 		goto err_out;
1352 
1353 	/*
1354 	 * Skip zeroout if the inode table is full. But we set the ZEROED
1355 	 * flag anyway, because obviously, when it is full it does not need
1356 	 * further zeroing.
1357 	 */
1358 	if (unlikely(num == 0))
1359 		goto skip_zeroout;
1360 
1361 	ext4_debug("going to zero out inode table in group %d\n",
1362 		   group);
1363 	ret = sb_issue_zeroout(sb, blk, num, GFP_NOFS);
1364 	if (ret < 0)
1365 		goto err_out;
1366 	if (barrier)
1367 		blkdev_issue_flush(sb->s_bdev, GFP_NOFS, NULL);
1368 
1369 skip_zeroout:
1370 	ext4_lock_group(sb, group);
1371 	gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED);
1372 	ext4_group_desc_csum_set(sb, group, gdp);
1373 	ext4_unlock_group(sb, group);
1374 
1375 	BUFFER_TRACE(group_desc_bh,
1376 		     "call ext4_handle_dirty_metadata");
1377 	ret = ext4_handle_dirty_metadata(handle, NULL,
1378 					 group_desc_bh);
1379 
1380 err_out:
1381 	up_write(&grp->alloc_sem);
1382 	ext4_journal_stop(handle);
1383 out:
1384 	return ret;
1385 }
1386