1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/ext4/balloc.c
4 *
5 * Copyright (C) 1992, 1993, 1994, 1995
6 * Remy Card (card@masi.ibp.fr)
7 * Laboratoire MASI - Institut Blaise Pascal
8 * Universite Pierre et Marie Curie (Paris VI)
9 *
10 * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
11 * Big-endian to little-endian byte-swapping/bitmaps by
12 * David S. Miller (davem@caip.rutgers.edu), 1995
13 */
14
15 #include <linux/time.h>
16 #include <linux/capability.h>
17 #include <linux/fs.h>
18 #include <linux/quotaops.h>
19 #include <linux/buffer_head.h>
20 #include "ext4.h"
21 #include "ext4_jbd2.h"
22 #include "mballoc.h"
23
24 #include <trace/events/ext4.h>
25
26 static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
27 ext4_group_t block_group);
28 /*
29 * balloc.c contains the blocks allocation and deallocation routines
30 */
31
32 /*
33 * Calculate block group number for a given block number
34 */
ext4_get_group_number(struct super_block * sb,ext4_fsblk_t block)35 ext4_group_t ext4_get_group_number(struct super_block *sb,
36 ext4_fsblk_t block)
37 {
38 ext4_group_t group;
39
40 if (test_opt2(sb, STD_GROUP_SIZE))
41 group = (block -
42 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) >>
43 (EXT4_BLOCK_SIZE_BITS(sb) + EXT4_CLUSTER_BITS(sb) + 3);
44 else
45 ext4_get_group_no_and_offset(sb, block, &group, NULL);
46 return group;
47 }
48
49 /*
50 * Calculate the block group number and offset into the block/cluster
51 * allocation bitmap, given a block number
52 */
ext4_get_group_no_and_offset(struct super_block * sb,ext4_fsblk_t blocknr,ext4_group_t * blockgrpp,ext4_grpblk_t * offsetp)53 void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
54 ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp)
55 {
56 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
57 ext4_grpblk_t offset;
58
59 blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
60 offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb)) >>
61 EXT4_SB(sb)->s_cluster_bits;
62 if (offsetp)
63 *offsetp = offset;
64 if (blockgrpp)
65 *blockgrpp = blocknr;
66
67 }
68
69 /*
70 * Check whether the 'block' lives within the 'block_group'. Returns 1 if so
71 * and 0 otherwise.
72 */
ext4_block_in_group(struct super_block * sb,ext4_fsblk_t block,ext4_group_t block_group)73 static inline int ext4_block_in_group(struct super_block *sb,
74 ext4_fsblk_t block,
75 ext4_group_t block_group)
76 {
77 ext4_group_t actual_group;
78
79 actual_group = ext4_get_group_number(sb, block);
80 return (actual_group == block_group) ? 1 : 0;
81 }
82
83 /* Return the number of clusters used for file system metadata; this
84 * represents the overhead needed by the file system.
85 */
ext4_num_overhead_clusters(struct super_block * sb,ext4_group_t block_group,struct ext4_group_desc * gdp)86 static unsigned ext4_num_overhead_clusters(struct super_block *sb,
87 ext4_group_t block_group,
88 struct ext4_group_desc *gdp)
89 {
90 unsigned num_clusters;
91 int block_cluster = -1, inode_cluster = -1, itbl_cluster = -1, i, c;
92 ext4_fsblk_t start = ext4_group_first_block_no(sb, block_group);
93 ext4_fsblk_t itbl_blk;
94 struct ext4_sb_info *sbi = EXT4_SB(sb);
95
96 /* This is the number of clusters used by the superblock,
97 * block group descriptors, and reserved block group
98 * descriptor blocks */
99 num_clusters = ext4_num_base_meta_clusters(sb, block_group);
100
101 /*
102 * For the allocation bitmaps and inode table, we first need
103 * to check to see if the block is in the block group. If it
104 * is, then check to see if the cluster is already accounted
105 * for in the clusters used for the base metadata cluster, or
106 * if we can increment the base metadata cluster to include
107 * that block. Otherwise, we will have to track the cluster
108 * used for the allocation bitmap or inode table explicitly.
109 * Normally all of these blocks are contiguous, so the special
110 * case handling shouldn't be necessary except for *very*
111 * unusual file system layouts.
112 */
113 if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) {
114 block_cluster = EXT4_B2C(sbi,
115 ext4_block_bitmap(sb, gdp) - start);
116 if (block_cluster < num_clusters)
117 block_cluster = -1;
118 else if (block_cluster == num_clusters) {
119 num_clusters++;
120 block_cluster = -1;
121 }
122 }
123
124 if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) {
125 inode_cluster = EXT4_B2C(sbi,
126 ext4_inode_bitmap(sb, gdp) - start);
127 if (inode_cluster < num_clusters)
128 inode_cluster = -1;
129 else if (inode_cluster == num_clusters) {
130 num_clusters++;
131 inode_cluster = -1;
132 }
133 }
134
135 itbl_blk = ext4_inode_table(sb, gdp);
136 for (i = 0; i < sbi->s_itb_per_group; i++) {
137 if (ext4_block_in_group(sb, itbl_blk + i, block_group)) {
138 c = EXT4_B2C(sbi, itbl_blk + i - start);
139 if ((c < num_clusters) || (c == inode_cluster) ||
140 (c == block_cluster) || (c == itbl_cluster))
141 continue;
142 if (c == num_clusters) {
143 num_clusters++;
144 continue;
145 }
146 num_clusters++;
147 itbl_cluster = c;
148 }
149 }
150
151 if (block_cluster != -1)
152 num_clusters++;
153 if (inode_cluster != -1)
154 num_clusters++;
155
156 return num_clusters;
157 }
158
num_clusters_in_group(struct super_block * sb,ext4_group_t block_group)159 static unsigned int num_clusters_in_group(struct super_block *sb,
160 ext4_group_t block_group)
161 {
162 unsigned int blocks;
163
164 if (block_group == ext4_get_groups_count(sb) - 1) {
165 /*
166 * Even though mke2fs always initializes the first and
167 * last group, just in case some other tool was used,
168 * we need to make sure we calculate the right free
169 * blocks.
170 */
171 blocks = ext4_blocks_count(EXT4_SB(sb)->s_es) -
172 ext4_group_first_block_no(sb, block_group);
173 } else
174 blocks = EXT4_BLOCKS_PER_GROUP(sb);
175 return EXT4_NUM_B2C(EXT4_SB(sb), blocks);
176 }
177
178 /* Initializes an uninitialized block bitmap */
ext4_init_block_bitmap(struct super_block * sb,struct buffer_head * bh,ext4_group_t block_group,struct ext4_group_desc * gdp)179 static int ext4_init_block_bitmap(struct super_block *sb,
180 struct buffer_head *bh,
181 ext4_group_t block_group,
182 struct ext4_group_desc *gdp)
183 {
184 unsigned int bit, bit_max;
185 struct ext4_sb_info *sbi = EXT4_SB(sb);
186 ext4_fsblk_t start, tmp;
187
188 J_ASSERT_BH(bh, buffer_locked(bh));
189
190 /* If checksum is bad mark all blocks used to prevent allocation
191 * essentially implementing a per-group read-only flag. */
192 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
193 ext4_mark_group_bitmap_corrupted(sb, block_group,
194 EXT4_GROUP_INFO_BBITMAP_CORRUPT |
195 EXT4_GROUP_INFO_IBITMAP_CORRUPT);
196 return -EFSBADCRC;
197 }
198 memset(bh->b_data, 0, sb->s_blocksize);
199
200 bit_max = ext4_num_base_meta_clusters(sb, block_group);
201 if ((bit_max >> 3) >= bh->b_size)
202 return -EFSCORRUPTED;
203
204 for (bit = 0; bit < bit_max; bit++)
205 ext4_set_bit(bit, bh->b_data);
206
207 start = ext4_group_first_block_no(sb, block_group);
208
209 /* Set bits for block and inode bitmaps, and inode table */
210 tmp = ext4_block_bitmap(sb, gdp);
211 if (ext4_block_in_group(sb, tmp, block_group))
212 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
213
214 tmp = ext4_inode_bitmap(sb, gdp);
215 if (ext4_block_in_group(sb, tmp, block_group))
216 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
217
218 tmp = ext4_inode_table(sb, gdp);
219 for (; tmp < ext4_inode_table(sb, gdp) +
220 sbi->s_itb_per_group; tmp++) {
221 if (ext4_block_in_group(sb, tmp, block_group))
222 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
223 }
224
225 /*
226 * Also if the number of blocks within the group is less than
227 * the blocksize * 8 ( which is the size of bitmap ), set rest
228 * of the block bitmap to 1
229 */
230 ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
231 sb->s_blocksize * 8, bh->b_data);
232 return 0;
233 }
234
235 /* Return the number of free blocks in a block group. It is used when
236 * the block bitmap is uninitialized, so we can't just count the bits
237 * in the bitmap. */
ext4_free_clusters_after_init(struct super_block * sb,ext4_group_t block_group,struct ext4_group_desc * gdp)238 unsigned ext4_free_clusters_after_init(struct super_block *sb,
239 ext4_group_t block_group,
240 struct ext4_group_desc *gdp)
241 {
242 return num_clusters_in_group(sb, block_group) -
243 ext4_num_overhead_clusters(sb, block_group, gdp);
244 }
245
246 /*
247 * The free blocks are managed by bitmaps. A file system contains several
248 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
249 * block for inodes, N blocks for the inode table and data blocks.
250 *
251 * The file system contains group descriptors which are located after the
252 * super block. Each descriptor contains the number of the bitmap block and
253 * the free blocks count in the block. The descriptors are loaded in memory
254 * when a file system is mounted (see ext4_fill_super).
255 */
256
257 /**
258 * ext4_get_group_desc() -- load group descriptor from disk
259 * @sb: super block
260 * @block_group: given block group
261 * @bh: pointer to the buffer head to store the block
262 * group descriptor
263 */
ext4_get_group_desc(struct super_block * sb,ext4_group_t block_group,struct buffer_head ** bh)264 struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
265 ext4_group_t block_group,
266 struct buffer_head **bh)
267 {
268 unsigned int group_desc;
269 unsigned int offset;
270 ext4_group_t ngroups = ext4_get_groups_count(sb);
271 struct ext4_group_desc *desc;
272 struct ext4_sb_info *sbi = EXT4_SB(sb);
273 struct buffer_head *bh_p;
274
275 if (block_group >= ngroups) {
276 ext4_error(sb, "block_group >= groups_count - block_group = %u,"
277 " groups_count = %u", block_group, ngroups);
278
279 return NULL;
280 }
281
282 group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
283 offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
284 bh_p = sbi_array_rcu_deref(sbi, s_group_desc, group_desc);
285 /*
286 * sbi_array_rcu_deref returns with rcu unlocked, this is ok since
287 * the pointer being dereferenced won't be dereferenced again. By
288 * looking at the usage in add_new_gdb() the value isn't modified,
289 * just the pointer, and so it remains valid.
290 */
291 if (!bh_p) {
292 ext4_error(sb, "Group descriptor not loaded - "
293 "block_group = %u, group_desc = %u, desc = %u",
294 block_group, group_desc, offset);
295 return NULL;
296 }
297
298 desc = (struct ext4_group_desc *)(
299 (__u8 *)bh_p->b_data +
300 offset * EXT4_DESC_SIZE(sb));
301 if (bh)
302 *bh = bh_p;
303 return desc;
304 }
305
ext4_valid_block_bitmap_padding(struct super_block * sb,ext4_group_t block_group,struct buffer_head * bh)306 static ext4_fsblk_t ext4_valid_block_bitmap_padding(struct super_block *sb,
307 ext4_group_t block_group,
308 struct buffer_head *bh)
309 {
310 ext4_grpblk_t next_zero_bit;
311 unsigned long bitmap_size = sb->s_blocksize * 8;
312 unsigned int offset = num_clusters_in_group(sb, block_group);
313
314 if (bitmap_size <= offset)
315 return 0;
316
317 next_zero_bit = ext4_find_next_zero_bit(bh->b_data, bitmap_size, offset);
318
319 return (next_zero_bit < bitmap_size ? next_zero_bit : 0);
320 }
321
322 /*
323 * Return the block number which was discovered to be invalid, or 0 if
324 * the block bitmap is valid.
325 */
ext4_valid_block_bitmap(struct super_block * sb,struct ext4_group_desc * desc,ext4_group_t block_group,struct buffer_head * bh)326 static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
327 struct ext4_group_desc *desc,
328 ext4_group_t block_group,
329 struct buffer_head *bh)
330 {
331 struct ext4_sb_info *sbi = EXT4_SB(sb);
332 ext4_grpblk_t offset;
333 ext4_grpblk_t next_zero_bit;
334 ext4_grpblk_t max_bit = EXT4_CLUSTERS_PER_GROUP(sb);
335 ext4_fsblk_t blk;
336 ext4_fsblk_t group_first_block;
337
338 if (ext4_has_feature_flex_bg(sb)) {
339 /* with FLEX_BG, the inode/block bitmaps and itable
340 * blocks may not be in the group at all
341 * so the bitmap validation will be skipped for those groups
342 * or it has to also read the block group where the bitmaps
343 * are located to verify they are set.
344 */
345 return 0;
346 }
347 group_first_block = ext4_group_first_block_no(sb, block_group);
348
349 /* check whether block bitmap block number is set */
350 blk = ext4_block_bitmap(sb, desc);
351 offset = blk - group_first_block;
352 if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
353 !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
354 /* bad block bitmap */
355 return blk;
356
357 /* check whether the inode bitmap block number is set */
358 blk = ext4_inode_bitmap(sb, desc);
359 offset = blk - group_first_block;
360 if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
361 !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
362 /* bad block bitmap */
363 return blk;
364
365 /* check whether the inode table block number is set */
366 blk = ext4_inode_table(sb, desc);
367 offset = blk - group_first_block;
368 if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
369 EXT4_B2C(sbi, offset + sbi->s_itb_per_group) >= max_bit)
370 return blk;
371 next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
372 EXT4_B2C(sbi, offset + sbi->s_itb_per_group),
373 EXT4_B2C(sbi, offset));
374 if (next_zero_bit <
375 EXT4_B2C(sbi, offset + sbi->s_itb_per_group))
376 /* bad bitmap for inode tables */
377 return blk;
378 return 0;
379 }
380
ext4_validate_block_bitmap(struct super_block * sb,struct ext4_group_desc * desc,ext4_group_t block_group,struct buffer_head * bh)381 static int ext4_validate_block_bitmap(struct super_block *sb,
382 struct ext4_group_desc *desc,
383 ext4_group_t block_group,
384 struct buffer_head *bh)
385 {
386 ext4_fsblk_t blk;
387 struct ext4_group_info *grp = ext4_get_group_info(sb, block_group);
388
389 if (buffer_verified(bh))
390 return 0;
391 if (EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
392 return -EFSCORRUPTED;
393
394 ext4_lock_group(sb, block_group);
395 if (buffer_verified(bh))
396 goto verified;
397 if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
398 desc, bh))) {
399 ext4_unlock_group(sb, block_group);
400 ext4_error(sb, "bg %u: bad block bitmap checksum", block_group);
401 ext4_mark_group_bitmap_corrupted(sb, block_group,
402 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
403 return -EFSBADCRC;
404 }
405 blk = ext4_valid_block_bitmap(sb, desc, block_group, bh);
406 if (unlikely(blk != 0)) {
407 ext4_unlock_group(sb, block_group);
408 ext4_error(sb, "bg %u: block %llu: invalid block bitmap",
409 block_group, blk);
410 ext4_mark_group_bitmap_corrupted(sb, block_group,
411 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
412 return -EFSCORRUPTED;
413 }
414 blk = ext4_valid_block_bitmap_padding(sb, block_group, bh);
415 if (unlikely(blk != 0)) {
416 ext4_unlock_group(sb, block_group);
417 ext4_error(sb, "bg %u: block %llu: padding at end of block bitmap is not set",
418 block_group, blk);
419 ext4_mark_group_bitmap_corrupted(sb, block_group,
420 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
421 return -EFSCORRUPTED;
422 }
423 set_buffer_verified(bh);
424 verified:
425 ext4_unlock_group(sb, block_group);
426 return 0;
427 }
428
429 /**
430 * ext4_read_block_bitmap_nowait()
431 * @sb: super block
432 * @block_group: given block group
433 *
434 * Read the bitmap for a given block_group,and validate the
435 * bits for block/inode/inode tables are set in the bitmaps
436 *
437 * Return buffer_head on success or NULL in case of failure.
438 */
439 struct buffer_head *
ext4_read_block_bitmap_nowait(struct super_block * sb,ext4_group_t block_group)440 ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
441 {
442 struct ext4_group_desc *desc;
443 struct ext4_sb_info *sbi = EXT4_SB(sb);
444 struct buffer_head *bh;
445 ext4_fsblk_t bitmap_blk;
446 int err;
447
448 desc = ext4_get_group_desc(sb, block_group, NULL);
449 if (!desc)
450 return ERR_PTR(-EFSCORRUPTED);
451 bitmap_blk = ext4_block_bitmap(sb, desc);
452 if ((bitmap_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
453 (bitmap_blk >= ext4_blocks_count(sbi->s_es))) {
454 ext4_error(sb, "Invalid block bitmap block %llu in "
455 "block_group %u", bitmap_blk, block_group);
456 ext4_mark_group_bitmap_corrupted(sb, block_group,
457 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
458 return ERR_PTR(-EFSCORRUPTED);
459 }
460 bh = sb_getblk(sb, bitmap_blk);
461 if (unlikely(!bh)) {
462 ext4_warning(sb, "Cannot get buffer for block bitmap - "
463 "block_group = %u, block_bitmap = %llu",
464 block_group, bitmap_blk);
465 return ERR_PTR(-ENOMEM);
466 }
467
468 if (bitmap_uptodate(bh))
469 goto verify;
470
471 lock_buffer(bh);
472 if (bitmap_uptodate(bh)) {
473 unlock_buffer(bh);
474 goto verify;
475 }
476 ext4_lock_group(sb, block_group);
477 if (ext4_has_group_desc_csum(sb) &&
478 (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
479 if (block_group == 0) {
480 ext4_unlock_group(sb, block_group);
481 unlock_buffer(bh);
482 ext4_error(sb, "Block bitmap for bg 0 marked "
483 "uninitialized");
484 err = -EFSCORRUPTED;
485 goto out;
486 }
487 err = ext4_init_block_bitmap(sb, bh, block_group, desc);
488 set_bitmap_uptodate(bh);
489 set_buffer_uptodate(bh);
490 set_buffer_verified(bh);
491 ext4_unlock_group(sb, block_group);
492 unlock_buffer(bh);
493 if (err) {
494 ext4_error(sb, "Failed to init block bitmap for group "
495 "%u: %d", block_group, err);
496 goto out;
497 }
498 goto verify;
499 }
500 ext4_unlock_group(sb, block_group);
501 if (buffer_uptodate(bh)) {
502 /*
503 * if not uninit if bh is uptodate,
504 * bitmap is also uptodate
505 */
506 set_bitmap_uptodate(bh);
507 unlock_buffer(bh);
508 goto verify;
509 }
510 /*
511 * submit the buffer_head for reading
512 */
513 set_buffer_new(bh);
514 trace_ext4_read_block_bitmap_load(sb, block_group);
515 bh->b_end_io = ext4_end_bitmap_read;
516 get_bh(bh);
517 submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh);
518 return bh;
519 verify:
520 err = ext4_validate_block_bitmap(sb, desc, block_group, bh);
521 if (err)
522 goto out;
523 return bh;
524 out:
525 put_bh(bh);
526 return ERR_PTR(err);
527 }
528
529 /* Returns 0 on success, 1 on error */
ext4_wait_block_bitmap(struct super_block * sb,ext4_group_t block_group,struct buffer_head * bh)530 int ext4_wait_block_bitmap(struct super_block *sb, ext4_group_t block_group,
531 struct buffer_head *bh)
532 {
533 struct ext4_group_desc *desc;
534
535 if (!buffer_new(bh))
536 return 0;
537 desc = ext4_get_group_desc(sb, block_group, NULL);
538 if (!desc)
539 return -EFSCORRUPTED;
540 wait_on_buffer(bh);
541 if (!buffer_uptodate(bh)) {
542 ext4_error(sb, "Cannot read block bitmap - "
543 "block_group = %u, block_bitmap = %llu",
544 block_group, (unsigned long long) bh->b_blocknr);
545 ext4_mark_group_bitmap_corrupted(sb, block_group,
546 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
547 return -EIO;
548 }
549 clear_buffer_new(bh);
550 /* Panic or remount fs read-only if block bitmap is invalid */
551 return ext4_validate_block_bitmap(sb, desc, block_group, bh);
552 }
553
554 struct buffer_head *
ext4_read_block_bitmap(struct super_block * sb,ext4_group_t block_group)555 ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
556 {
557 struct buffer_head *bh;
558 int err;
559
560 bh = ext4_read_block_bitmap_nowait(sb, block_group);
561 if (IS_ERR(bh))
562 return bh;
563 err = ext4_wait_block_bitmap(sb, block_group, bh);
564 if (err) {
565 put_bh(bh);
566 return ERR_PTR(err);
567 }
568 return bh;
569 }
570
571 /**
572 * ext4_has_free_clusters()
573 * @sbi: in-core super block structure.
574 * @nclusters: number of needed blocks
575 * @flags: flags from ext4_mb_new_blocks()
576 *
577 * Check if filesystem has nclusters free & available for allocation.
578 * On success return 1, return 0 on failure.
579 */
ext4_has_free_clusters(struct ext4_sb_info * sbi,s64 nclusters,unsigned int flags)580 static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
581 s64 nclusters, unsigned int flags)
582 {
583 s64 free_clusters, dirty_clusters, rsv, resv_clusters;
584 struct percpu_counter *fcc = &sbi->s_freeclusters_counter;
585 struct percpu_counter *dcc = &sbi->s_dirtyclusters_counter;
586
587 free_clusters = percpu_counter_read_positive(fcc);
588 dirty_clusters = percpu_counter_read_positive(dcc);
589 resv_clusters = atomic64_read(&sbi->s_resv_clusters);
590
591 /*
592 * r_blocks_count should always be multiple of the cluster ratio so
593 * we are safe to do a plane bit shift only.
594 */
595 rsv = (ext4_r_blocks_count(sbi->s_es) >> sbi->s_cluster_bits) +
596 resv_clusters;
597
598 if (free_clusters - (nclusters + rsv + dirty_clusters) <
599 EXT4_FREECLUSTERS_WATERMARK) {
600 free_clusters = percpu_counter_sum_positive(fcc);
601 dirty_clusters = percpu_counter_sum_positive(dcc);
602 }
603 /* Check whether we have space after accounting for current
604 * dirty clusters & root reserved clusters.
605 */
606 if (free_clusters >= (rsv + nclusters + dirty_clusters))
607 return 1;
608
609 /* Hm, nope. Are (enough) root reserved clusters available? */
610 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
611 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
612 capable(CAP_SYS_RESOURCE) ||
613 (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
614
615 if (free_clusters >= (nclusters + dirty_clusters +
616 resv_clusters))
617 return 1;
618 }
619 /* No free blocks. Let's see if we can dip into reserved pool */
620 if (flags & EXT4_MB_USE_RESERVED) {
621 if (free_clusters >= (nclusters + dirty_clusters))
622 return 1;
623 }
624
625 return 0;
626 }
627
ext4_claim_free_clusters(struct ext4_sb_info * sbi,s64 nclusters,unsigned int flags)628 int ext4_claim_free_clusters(struct ext4_sb_info *sbi,
629 s64 nclusters, unsigned int flags)
630 {
631 if (ext4_has_free_clusters(sbi, nclusters, flags)) {
632 percpu_counter_add(&sbi->s_dirtyclusters_counter, nclusters);
633 return 0;
634 } else
635 return -ENOSPC;
636 }
637
638 /**
639 * ext4_should_retry_alloc() - check if a block allocation should be retried
640 * @sb: superblock
641 * @retries: number of retry attempts made so far
642 *
643 * ext4_should_retry_alloc() is called when ENOSPC is returned while
644 * attempting to allocate blocks. If there's an indication that a pending
645 * journal transaction might free some space and allow another attempt to
646 * succeed, this function will wait for the current or committing transaction
647 * to complete and then return TRUE.
648 */
ext4_should_retry_alloc(struct super_block * sb,int * retries)649 int ext4_should_retry_alloc(struct super_block *sb, int *retries)
650 {
651 struct ext4_sb_info *sbi = EXT4_SB(sb);
652
653 if (!sbi->s_journal)
654 return 0;
655
656 if (++(*retries) > 3) {
657 percpu_counter_inc(&sbi->s_sra_exceeded_retry_limit);
658 return 0;
659 }
660
661 /*
662 * if there's no indication that blocks are about to be freed it's
663 * possible we just missed a transaction commit that did so
664 */
665 smp_mb();
666 if (sbi->s_mb_free_pending == 0)
667 return ext4_has_free_clusters(sbi, 1, 0);
668
669 /*
670 * it's possible we've just missed a transaction commit here,
671 * so ignore the returned status
672 */
673 jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
674 (void) jbd2_journal_force_commit_nested(sbi->s_journal);
675 return 1;
676 }
677
678 /*
679 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks
680 *
681 * @handle: handle to this transaction
682 * @inode: file inode
683 * @goal: given target block(filesystem wide)
684 * @count: pointer to total number of clusters needed
685 * @errp: error code
686 *
687 * Return 1st allocated block number on success, *count stores total account
688 * error stores in errp pointer
689 */
ext4_new_meta_blocks(handle_t * handle,struct inode * inode,ext4_fsblk_t goal,unsigned int flags,unsigned long * count,int * errp)690 ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
691 ext4_fsblk_t goal, unsigned int flags,
692 unsigned long *count, int *errp)
693 {
694 struct ext4_allocation_request ar;
695 ext4_fsblk_t ret;
696
697 memset(&ar, 0, sizeof(ar));
698 /* Fill with neighbour allocated blocks */
699 ar.inode = inode;
700 ar.goal = goal;
701 ar.len = count ? *count : 1;
702 ar.flags = flags;
703
704 ret = ext4_mb_new_blocks(handle, &ar, errp);
705 if (count)
706 *count = ar.len;
707 /*
708 * Account for the allocated meta blocks. We will never
709 * fail EDQUOT for metdata, but we do account for it.
710 */
711 if (!(*errp) && (flags & EXT4_MB_DELALLOC_RESERVED)) {
712 dquot_alloc_block_nofail(inode,
713 EXT4_C2B(EXT4_SB(inode->i_sb), ar.len));
714 }
715 return ret;
716 }
717
718 /**
719 * ext4_count_free_clusters() -- count filesystem free clusters
720 * @sb: superblock
721 *
722 * Adds up the number of free clusters from each block group.
723 */
ext4_count_free_clusters(struct super_block * sb)724 ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
725 {
726 ext4_fsblk_t desc_count;
727 struct ext4_group_desc *gdp;
728 ext4_group_t i;
729 ext4_group_t ngroups = ext4_get_groups_count(sb);
730 struct ext4_group_info *grp;
731 #ifdef EXT4FS_DEBUG
732 struct ext4_super_block *es;
733 ext4_fsblk_t bitmap_count;
734 unsigned int x;
735 struct buffer_head *bitmap_bh = NULL;
736
737 es = EXT4_SB(sb)->s_es;
738 desc_count = 0;
739 bitmap_count = 0;
740 gdp = NULL;
741
742 for (i = 0; i < ngroups; i++) {
743 gdp = ext4_get_group_desc(sb, i, NULL);
744 if (!gdp)
745 continue;
746 grp = NULL;
747 if (EXT4_SB(sb)->s_group_info)
748 grp = ext4_get_group_info(sb, i);
749 if (!grp || !EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
750 desc_count += ext4_free_group_clusters(sb, gdp);
751 brelse(bitmap_bh);
752 bitmap_bh = ext4_read_block_bitmap(sb, i);
753 if (IS_ERR(bitmap_bh)) {
754 bitmap_bh = NULL;
755 continue;
756 }
757
758 x = ext4_count_free(bitmap_bh->b_data,
759 EXT4_CLUSTERS_PER_GROUP(sb) / 8);
760 printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n",
761 i, ext4_free_group_clusters(sb, gdp), x);
762 bitmap_count += x;
763 }
764 brelse(bitmap_bh);
765 printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu"
766 ", computed = %llu, %llu\n",
767 EXT4_NUM_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)),
768 desc_count, bitmap_count);
769 return bitmap_count;
770 #else
771 desc_count = 0;
772 for (i = 0; i < ngroups; i++) {
773 gdp = ext4_get_group_desc(sb, i, NULL);
774 if (!gdp)
775 continue;
776 grp = NULL;
777 if (EXT4_SB(sb)->s_group_info)
778 grp = ext4_get_group_info(sb, i);
779 if (!grp || !EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
780 desc_count += ext4_free_group_clusters(sb, gdp);
781 }
782
783 return desc_count;
784 #endif
785 }
786
test_root(ext4_group_t a,int b)787 static inline int test_root(ext4_group_t a, int b)
788 {
789 while (1) {
790 if (a < b)
791 return 0;
792 if (a == b)
793 return 1;
794 if ((a % b) != 0)
795 return 0;
796 a = a / b;
797 }
798 }
799
800 /**
801 * ext4_bg_has_super - number of blocks used by the superblock in group
802 * @sb: superblock for filesystem
803 * @group: group number to check
804 *
805 * Return the number of blocks used by the superblock (primary or backup)
806 * in this group. Currently this will be only 0 or 1.
807 */
ext4_bg_has_super(struct super_block * sb,ext4_group_t group)808 int ext4_bg_has_super(struct super_block *sb, ext4_group_t group)
809 {
810 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
811
812 if (group == 0)
813 return 1;
814 if (ext4_has_feature_sparse_super2(sb)) {
815 if (group == le32_to_cpu(es->s_backup_bgs[0]) ||
816 group == le32_to_cpu(es->s_backup_bgs[1]))
817 return 1;
818 return 0;
819 }
820 if ((group <= 1) || !ext4_has_feature_sparse_super(sb))
821 return 1;
822 if (!(group & 1))
823 return 0;
824 if (test_root(group, 3) || (test_root(group, 5)) ||
825 test_root(group, 7))
826 return 1;
827
828 return 0;
829 }
830
ext4_bg_num_gdb_meta(struct super_block * sb,ext4_group_t group)831 static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb,
832 ext4_group_t group)
833 {
834 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
835 ext4_group_t first = metagroup * EXT4_DESC_PER_BLOCK(sb);
836 ext4_group_t last = first + EXT4_DESC_PER_BLOCK(sb) - 1;
837
838 if (group == first || group == first + 1 || group == last)
839 return 1;
840 return 0;
841 }
842
ext4_bg_num_gdb_nometa(struct super_block * sb,ext4_group_t group)843 static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb,
844 ext4_group_t group)
845 {
846 if (!ext4_bg_has_super(sb, group))
847 return 0;
848
849 if (ext4_has_feature_meta_bg(sb))
850 return le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
851 else
852 return EXT4_SB(sb)->s_gdb_count;
853 }
854
855 /**
856 * ext4_bg_num_gdb - number of blocks used by the group table in group
857 * @sb: superblock for filesystem
858 * @group: group number to check
859 *
860 * Return the number of blocks used by the group descriptor table
861 * (primary or backup) in this group. In the future there may be a
862 * different number of descriptor blocks in each group.
863 */
ext4_bg_num_gdb(struct super_block * sb,ext4_group_t group)864 unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group)
865 {
866 unsigned long first_meta_bg =
867 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
868 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
869
870 if (!ext4_has_feature_meta_bg(sb) || metagroup < first_meta_bg)
871 return ext4_bg_num_gdb_nometa(sb, group);
872
873 return ext4_bg_num_gdb_meta(sb,group);
874
875 }
876
877 /*
878 * This function returns the number of file system metadata clusters at
879 * the beginning of a block group, including the reserved gdt blocks.
880 */
ext4_num_base_meta_clusters(struct super_block * sb,ext4_group_t block_group)881 static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
882 ext4_group_t block_group)
883 {
884 struct ext4_sb_info *sbi = EXT4_SB(sb);
885 unsigned num;
886
887 /* Check for superblock and gdt backups in this group */
888 num = ext4_bg_has_super(sb, block_group);
889
890 if (!ext4_has_feature_meta_bg(sb) ||
891 block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) *
892 sbi->s_desc_per_block) {
893 if (num) {
894 num += ext4_bg_num_gdb(sb, block_group);
895 num += le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);
896 }
897 } else { /* For META_BG_BLOCK_GROUPS */
898 num += ext4_bg_num_gdb(sb, block_group);
899 }
900 return EXT4_NUM_B2C(sbi, num);
901 }
902 /**
903 * ext4_inode_to_goal_block - return a hint for block allocation
904 * @inode: inode for block allocation
905 *
906 * Return the ideal location to start allocating blocks for a
907 * newly created inode.
908 */
ext4_inode_to_goal_block(struct inode * inode)909 ext4_fsblk_t ext4_inode_to_goal_block(struct inode *inode)
910 {
911 struct ext4_inode_info *ei = EXT4_I(inode);
912 ext4_group_t block_group;
913 ext4_grpblk_t colour;
914 int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
915 ext4_fsblk_t bg_start;
916 ext4_fsblk_t last_block;
917
918 block_group = ei->i_block_group;
919 if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
920 /*
921 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
922 * block groups per flexgroup, reserve the first block
923 * group for directories and special files. Regular
924 * files will start at the second block group. This
925 * tends to speed up directory access and improves
926 * fsck times.
927 */
928 block_group &= ~(flex_size-1);
929 if (S_ISREG(inode->i_mode))
930 block_group++;
931 }
932 bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
933 last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
934
935 /*
936 * If we are doing delayed allocation, we don't need take
937 * colour into account.
938 */
939 if (test_opt(inode->i_sb, DELALLOC))
940 return bg_start;
941
942 if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
943 colour = (current->pid % 16) *
944 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
945 else
946 colour = (current->pid % 16) * ((last_block - bg_start) / 16);
947 return bg_start + colour;
948 }
949
950