1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/ext4/resize.c
4 *
5 * Support for resizing an ext4 filesystem while it is mounted.
6 *
7 * Copyright (C) 2001, 2002 Andreas Dilger <adilger@clusterfs.com>
8 *
9 * This could probably be made into a module, because it is not often in use.
10 */
11
12
13 #define EXT4FS_DEBUG
14
15 #include <linux/errno.h>
16 #include <linux/slab.h>
17
18 #include "ext4_jbd2.h"
19
20 struct ext4_rcu_ptr {
21 struct rcu_head rcu;
22 void *ptr;
23 };
24
ext4_rcu_ptr_callback(struct rcu_head * head)25 static void ext4_rcu_ptr_callback(struct rcu_head *head)
26 {
27 struct ext4_rcu_ptr *ptr;
28
29 ptr = container_of(head, struct ext4_rcu_ptr, rcu);
30 kvfree(ptr->ptr);
31 kfree(ptr);
32 }
33
ext4_kvfree_array_rcu(void * to_free)34 void ext4_kvfree_array_rcu(void *to_free)
35 {
36 struct ext4_rcu_ptr *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
37
38 if (ptr) {
39 ptr->ptr = to_free;
40 call_rcu(&ptr->rcu, ext4_rcu_ptr_callback);
41 return;
42 }
43 synchronize_rcu();
44 kvfree(to_free);
45 }
46
ext4_resize_begin(struct super_block * sb)47 int ext4_resize_begin(struct super_block *sb)
48 {
49 struct ext4_sb_info *sbi = EXT4_SB(sb);
50 int ret = 0;
51
52 if (!capable(CAP_SYS_RESOURCE))
53 return -EPERM;
54
55 /*
56 * If the reserved GDT blocks is non-zero, the resize_inode feature
57 * should always be set.
58 */
59 if (EXT4_SB(sb)->s_es->s_reserved_gdt_blocks &&
60 !ext4_has_feature_resize_inode(sb)) {
61 ext4_error(sb, "resize_inode disabled but reserved GDT blocks non-zero");
62 return -EFSCORRUPTED;
63 }
64
65 /*
66 * If we are not using the primary superblock/GDT copy don't resize,
67 * because the user tools have no way of handling this. Probably a
68 * bad time to do it anyways.
69 */
70 if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) !=
71 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
72 ext4_warning(sb, "won't resize using backup superblock at %llu",
73 (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
74 return -EPERM;
75 }
76
77 /*
78 * We are not allowed to do online-resizing on a filesystem mounted
79 * with error, because it can destroy the filesystem easily.
80 */
81 if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
82 ext4_warning(sb, "There are errors in the filesystem, "
83 "so online resizing is not allowed");
84 return -EPERM;
85 }
86
87 if (ext4_has_feature_sparse_super2(sb)) {
88 ext4_msg(sb, KERN_ERR, "Online resizing not supported with sparse_super2");
89 return -EOPNOTSUPP;
90 }
91
92 if (test_and_set_bit_lock(EXT4_FLAGS_RESIZING,
93 &EXT4_SB(sb)->s_ext4_flags))
94 ret = -EBUSY;
95
96 return ret;
97 }
98
ext4_resize_end(struct super_block * sb)99 void ext4_resize_end(struct super_block *sb)
100 {
101 clear_bit_unlock(EXT4_FLAGS_RESIZING, &EXT4_SB(sb)->s_ext4_flags);
102 smp_mb__after_atomic();
103 }
104
ext4_meta_bg_first_group(struct super_block * sb,ext4_group_t group)105 static ext4_group_t ext4_meta_bg_first_group(struct super_block *sb,
106 ext4_group_t group) {
107 return (group >> EXT4_DESC_PER_BLOCK_BITS(sb)) <<
108 EXT4_DESC_PER_BLOCK_BITS(sb);
109 }
110
ext4_meta_bg_first_block_no(struct super_block * sb,ext4_group_t group)111 static ext4_fsblk_t ext4_meta_bg_first_block_no(struct super_block *sb,
112 ext4_group_t group) {
113 group = ext4_meta_bg_first_group(sb, group);
114 return ext4_group_first_block_no(sb, group);
115 }
116
ext4_group_overhead_blocks(struct super_block * sb,ext4_group_t group)117 static ext4_grpblk_t ext4_group_overhead_blocks(struct super_block *sb,
118 ext4_group_t group) {
119 ext4_grpblk_t overhead;
120 overhead = ext4_bg_num_gdb(sb, group);
121 if (ext4_bg_has_super(sb, group))
122 overhead += 1 +
123 le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
124 return overhead;
125 }
126
127 #define outside(b, first, last) ((b) < (first) || (b) >= (last))
128 #define inside(b, first, last) ((b) >= (first) && (b) < (last))
129
verify_group_input(struct super_block * sb,struct ext4_new_group_data * input)130 static int verify_group_input(struct super_block *sb,
131 struct ext4_new_group_data *input)
132 {
133 struct ext4_sb_info *sbi = EXT4_SB(sb);
134 struct ext4_super_block *es = sbi->s_es;
135 ext4_fsblk_t start = ext4_blocks_count(es);
136 ext4_fsblk_t end = start + input->blocks_count;
137 ext4_group_t group = input->group;
138 ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group;
139 unsigned overhead;
140 ext4_fsblk_t metaend;
141 struct buffer_head *bh = NULL;
142 ext4_grpblk_t free_blocks_count, offset;
143 int err = -EINVAL;
144
145 if (group != sbi->s_groups_count) {
146 ext4_warning(sb, "Cannot add at group %u (only %u groups)",
147 input->group, sbi->s_groups_count);
148 return -EINVAL;
149 }
150
151 overhead = ext4_group_overhead_blocks(sb, group);
152 metaend = start + overhead;
153 input->free_clusters_count = free_blocks_count =
154 input->blocks_count - 2 - overhead - sbi->s_itb_per_group;
155
156 if (test_opt(sb, DEBUG))
157 printk(KERN_DEBUG "EXT4-fs: adding %s group %u: %u blocks "
158 "(%d free, %u reserved)\n",
159 ext4_bg_has_super(sb, input->group) ? "normal" :
160 "no-super", input->group, input->blocks_count,
161 free_blocks_count, input->reserved_blocks);
162
163 ext4_get_group_no_and_offset(sb, start, NULL, &offset);
164 if (offset != 0)
165 ext4_warning(sb, "Last group not full");
166 else if (input->reserved_blocks > input->blocks_count / 5)
167 ext4_warning(sb, "Reserved blocks too high (%u)",
168 input->reserved_blocks);
169 else if (free_blocks_count < 0)
170 ext4_warning(sb, "Bad blocks count %u",
171 input->blocks_count);
172 else if (IS_ERR(bh = ext4_sb_bread(sb, end - 1, 0))) {
173 err = PTR_ERR(bh);
174 bh = NULL;
175 ext4_warning(sb, "Cannot read last block (%llu)",
176 end - 1);
177 } else if (outside(input->block_bitmap, start, end))
178 ext4_warning(sb, "Block bitmap not in group (block %llu)",
179 (unsigned long long)input->block_bitmap);
180 else if (outside(input->inode_bitmap, start, end))
181 ext4_warning(sb, "Inode bitmap not in group (block %llu)",
182 (unsigned long long)input->inode_bitmap);
183 else if (outside(input->inode_table, start, end) ||
184 outside(itend - 1, start, end))
185 ext4_warning(sb, "Inode table not in group (blocks %llu-%llu)",
186 (unsigned long long)input->inode_table, itend - 1);
187 else if (input->inode_bitmap == input->block_bitmap)
188 ext4_warning(sb, "Block bitmap same as inode bitmap (%llu)",
189 (unsigned long long)input->block_bitmap);
190 else if (inside(input->block_bitmap, input->inode_table, itend))
191 ext4_warning(sb, "Block bitmap (%llu) in inode table "
192 "(%llu-%llu)",
193 (unsigned long long)input->block_bitmap,
194 (unsigned long long)input->inode_table, itend - 1);
195 else if (inside(input->inode_bitmap, input->inode_table, itend))
196 ext4_warning(sb, "Inode bitmap (%llu) in inode table "
197 "(%llu-%llu)",
198 (unsigned long long)input->inode_bitmap,
199 (unsigned long long)input->inode_table, itend - 1);
200 else if (inside(input->block_bitmap, start, metaend))
201 ext4_warning(sb, "Block bitmap (%llu) in GDT table (%llu-%llu)",
202 (unsigned long long)input->block_bitmap,
203 start, metaend - 1);
204 else if (inside(input->inode_bitmap, start, metaend))
205 ext4_warning(sb, "Inode bitmap (%llu) in GDT table (%llu-%llu)",
206 (unsigned long long)input->inode_bitmap,
207 start, metaend - 1);
208 else if (inside(input->inode_table, start, metaend) ||
209 inside(itend - 1, start, metaend))
210 ext4_warning(sb, "Inode table (%llu-%llu) overlaps GDT table "
211 "(%llu-%llu)",
212 (unsigned long long)input->inode_table,
213 itend - 1, start, metaend - 1);
214 else
215 err = 0;
216 brelse(bh);
217
218 return err;
219 }
220
221 /*
222 * ext4_new_flex_group_data is used by 64bit-resize interface to add a flex
223 * group each time.
224 */
225 struct ext4_new_flex_group_data {
226 struct ext4_new_group_data *groups; /* new_group_data for groups
227 in the flex group */
228 __u16 *bg_flags; /* block group flags of groups
229 in @groups */
230 ext4_group_t resize_bg; /* number of allocated
231 new_group_data */
232 ext4_group_t count; /* number of groups in @groups
233 */
234 };
235
236 /*
237 * Avoiding memory allocation failures due to too many groups added each time.
238 */
239 #define MAX_RESIZE_BG 16384
240
241 /*
242 * alloc_flex_gd() allocates a ext4_new_flex_group_data with size of
243 * @flexbg_size.
244 *
245 * Returns NULL on failure otherwise address of the allocated structure.
246 */
alloc_flex_gd(unsigned int flexbg_size)247 static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned int flexbg_size)
248 {
249 struct ext4_new_flex_group_data *flex_gd;
250
251 flex_gd = kmalloc(sizeof(*flex_gd), GFP_NOFS);
252 if (flex_gd == NULL)
253 goto out3;
254
255 if (unlikely(flexbg_size > MAX_RESIZE_BG))
256 flex_gd->resize_bg = MAX_RESIZE_BG;
257 else
258 flex_gd->resize_bg = flexbg_size;
259
260 flex_gd->groups = kmalloc_array(flex_gd->resize_bg,
261 sizeof(struct ext4_new_group_data),
262 GFP_NOFS);
263 if (flex_gd->groups == NULL)
264 goto out2;
265
266 flex_gd->bg_flags = kmalloc_array(flex_gd->resize_bg, sizeof(__u16),
267 GFP_NOFS);
268 if (flex_gd->bg_flags == NULL)
269 goto out1;
270
271 return flex_gd;
272
273 out1:
274 kfree(flex_gd->groups);
275 out2:
276 kfree(flex_gd);
277 out3:
278 return NULL;
279 }
280
free_flex_gd(struct ext4_new_flex_group_data * flex_gd)281 static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd)
282 {
283 kfree(flex_gd->bg_flags);
284 kfree(flex_gd->groups);
285 kfree(flex_gd);
286 }
287
288 /*
289 * ext4_alloc_group_tables() allocates block bitmaps, inode bitmaps
290 * and inode tables for a flex group.
291 *
292 * This function is used by 64bit-resize. Note that this function allocates
293 * group tables from the 1st group of groups contained by @flexgd, which may
294 * be a partial of a flex group.
295 *
296 * @sb: super block of fs to which the groups belongs
297 *
298 * Returns 0 on a successful allocation of the metadata blocks in the
299 * block group.
300 */
ext4_alloc_group_tables(struct super_block * sb,struct ext4_new_flex_group_data * flex_gd,unsigned int flexbg_size)301 static int ext4_alloc_group_tables(struct super_block *sb,
302 struct ext4_new_flex_group_data *flex_gd,
303 unsigned int flexbg_size)
304 {
305 struct ext4_new_group_data *group_data = flex_gd->groups;
306 ext4_fsblk_t start_blk;
307 ext4_fsblk_t last_blk;
308 ext4_group_t src_group;
309 ext4_group_t bb_index = 0;
310 ext4_group_t ib_index = 0;
311 ext4_group_t it_index = 0;
312 ext4_group_t group;
313 ext4_group_t last_group;
314 unsigned overhead;
315 __u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0;
316 int i;
317
318 BUG_ON(flex_gd->count == 0 || group_data == NULL);
319
320 src_group = group_data[0].group;
321 last_group = src_group + flex_gd->count - 1;
322
323 BUG_ON((flexbg_size > 1) && ((src_group & ~(flexbg_size - 1)) !=
324 (last_group & ~(flexbg_size - 1))));
325 next_group:
326 group = group_data[0].group;
327 if (src_group >= group_data[0].group + flex_gd->count)
328 return -ENOSPC;
329 start_blk = ext4_group_first_block_no(sb, src_group);
330 last_blk = start_blk + group_data[src_group - group].blocks_count;
331
332 overhead = ext4_group_overhead_blocks(sb, src_group);
333
334 start_blk += overhead;
335
336 /* We collect contiguous blocks as much as possible. */
337 src_group++;
338 for (; src_group <= last_group; src_group++) {
339 overhead = ext4_group_overhead_blocks(sb, src_group);
340 if (overhead == 0)
341 last_blk += group_data[src_group - group].blocks_count;
342 else
343 break;
344 }
345
346 /* Allocate block bitmaps */
347 for (; bb_index < flex_gd->count; bb_index++) {
348 if (start_blk >= last_blk)
349 goto next_group;
350 group_data[bb_index].block_bitmap = start_blk++;
351 group = ext4_get_group_number(sb, start_blk - 1);
352 group -= group_data[0].group;
353 group_data[group].mdata_blocks++;
354 flex_gd->bg_flags[group] &= uninit_mask;
355 }
356
357 /* Allocate inode bitmaps */
358 for (; ib_index < flex_gd->count; ib_index++) {
359 if (start_blk >= last_blk)
360 goto next_group;
361 group_data[ib_index].inode_bitmap = start_blk++;
362 group = ext4_get_group_number(sb, start_blk - 1);
363 group -= group_data[0].group;
364 group_data[group].mdata_blocks++;
365 flex_gd->bg_flags[group] &= uninit_mask;
366 }
367
368 /* Allocate inode tables */
369 for (; it_index < flex_gd->count; it_index++) {
370 unsigned int itb = EXT4_SB(sb)->s_itb_per_group;
371 ext4_fsblk_t next_group_start;
372
373 if (start_blk + itb > last_blk)
374 goto next_group;
375 group_data[it_index].inode_table = start_blk;
376 group = ext4_get_group_number(sb, start_blk);
377 next_group_start = ext4_group_first_block_no(sb, group + 1);
378 group -= group_data[0].group;
379
380 if (start_blk + itb > next_group_start) {
381 flex_gd->bg_flags[group + 1] &= uninit_mask;
382 overhead = start_blk + itb - next_group_start;
383 group_data[group + 1].mdata_blocks += overhead;
384 itb -= overhead;
385 }
386
387 group_data[group].mdata_blocks += itb;
388 flex_gd->bg_flags[group] &= uninit_mask;
389 start_blk += EXT4_SB(sb)->s_itb_per_group;
390 }
391
392 /* Update free clusters count to exclude metadata blocks */
393 for (i = 0; i < flex_gd->count; i++) {
394 group_data[i].free_clusters_count -=
395 EXT4_NUM_B2C(EXT4_SB(sb),
396 group_data[i].mdata_blocks);
397 }
398
399 if (test_opt(sb, DEBUG)) {
400 int i;
401 group = group_data[0].group;
402
403 printk(KERN_DEBUG "EXT4-fs: adding a flex group with "
404 "%u groups, flexbg size is %u:\n", flex_gd->count,
405 flexbg_size);
406
407 for (i = 0; i < flex_gd->count; i++) {
408 ext4_debug(
409 "adding %s group %u: %u blocks (%u free, %u mdata blocks)\n",
410 ext4_bg_has_super(sb, group + i) ? "normal" :
411 "no-super", group + i,
412 group_data[i].blocks_count,
413 group_data[i].free_clusters_count,
414 group_data[i].mdata_blocks);
415 }
416 }
417 return 0;
418 }
419
bclean(handle_t * handle,struct super_block * sb,ext4_fsblk_t blk)420 static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
421 ext4_fsblk_t blk)
422 {
423 struct buffer_head *bh;
424 int err;
425
426 bh = sb_getblk(sb, blk);
427 if (unlikely(!bh))
428 return ERR_PTR(-ENOMEM);
429 BUFFER_TRACE(bh, "get_write_access");
430 if ((err = ext4_journal_get_write_access(handle, bh))) {
431 brelse(bh);
432 bh = ERR_PTR(err);
433 } else {
434 memset(bh->b_data, 0, sb->s_blocksize);
435 set_buffer_uptodate(bh);
436 }
437
438 return bh;
439 }
440
441 /*
442 * If we have fewer than thresh credits, extend by EXT4_MAX_TRANS_DATA.
443 * If that fails, restart the transaction & regain write access for the
444 * buffer head which is used for block_bitmap modifications.
445 */
extend_or_restart_transaction(handle_t * handle,int thresh)446 static int extend_or_restart_transaction(handle_t *handle, int thresh)
447 {
448 int err;
449
450 if (ext4_handle_has_enough_credits(handle, thresh))
451 return 0;
452
453 err = ext4_journal_extend(handle, EXT4_MAX_TRANS_DATA);
454 if (err < 0)
455 return err;
456 if (err) {
457 err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA);
458 if (err)
459 return err;
460 }
461
462 return 0;
463 }
464
465 /*
466 * set_flexbg_block_bitmap() mark clusters [@first_cluster, @last_cluster] used.
467 *
468 * Helper function for ext4_setup_new_group_blocks() which set .
469 *
470 * @sb: super block
471 * @handle: journal handle
472 * @flex_gd: flex group data
473 */
set_flexbg_block_bitmap(struct super_block * sb,handle_t * handle,struct ext4_new_flex_group_data * flex_gd,ext4_fsblk_t first_cluster,ext4_fsblk_t last_cluster)474 static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
475 struct ext4_new_flex_group_data *flex_gd,
476 ext4_fsblk_t first_cluster, ext4_fsblk_t last_cluster)
477 {
478 struct ext4_sb_info *sbi = EXT4_SB(sb);
479 ext4_group_t count = last_cluster - first_cluster + 1;
480 ext4_group_t count2;
481
482 ext4_debug("mark clusters [%llu-%llu] used\n", first_cluster,
483 last_cluster);
484 for (count2 = count; count > 0;
485 count -= count2, first_cluster += count2) {
486 ext4_fsblk_t start;
487 struct buffer_head *bh;
488 ext4_group_t group;
489 int err;
490
491 group = ext4_get_group_number(sb, EXT4_C2B(sbi, first_cluster));
492 start = EXT4_B2C(sbi, ext4_group_first_block_no(sb, group));
493 group -= flex_gd->groups[0].group;
494
495 count2 = EXT4_CLUSTERS_PER_GROUP(sb) - (first_cluster - start);
496 if (count2 > count)
497 count2 = count;
498
499 if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) {
500 BUG_ON(flex_gd->count > 1);
501 continue;
502 }
503
504 err = extend_or_restart_transaction(handle, 1);
505 if (err)
506 return err;
507
508 bh = sb_getblk(sb, flex_gd->groups[group].block_bitmap);
509 if (unlikely(!bh))
510 return -ENOMEM;
511
512 BUFFER_TRACE(bh, "get_write_access");
513 err = ext4_journal_get_write_access(handle, bh);
514 if (err) {
515 brelse(bh);
516 return err;
517 }
518 ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n",
519 first_cluster, first_cluster - start, count2);
520 ext4_set_bits(bh->b_data, first_cluster - start, count2);
521
522 err = ext4_handle_dirty_metadata(handle, NULL, bh);
523 brelse(bh);
524 if (unlikely(err))
525 return err;
526 }
527
528 return 0;
529 }
530
531 /*
532 * Set up the block and inode bitmaps, and the inode table for the new groups.
533 * This doesn't need to be part of the main transaction, since we are only
534 * changing blocks outside the actual filesystem. We still do journaling to
535 * ensure the recovery is correct in case of a failure just after resize.
536 * If any part of this fails, we simply abort the resize.
537 *
538 * setup_new_flex_group_blocks handles a flex group as follow:
539 * 1. copy super block and GDT, and initialize group tables if necessary.
540 * In this step, we only set bits in blocks bitmaps for blocks taken by
541 * super block and GDT.
542 * 2. allocate group tables in block bitmaps, that is, set bits in block
543 * bitmap for blocks taken by group tables.
544 */
setup_new_flex_group_blocks(struct super_block * sb,struct ext4_new_flex_group_data * flex_gd)545 static int setup_new_flex_group_blocks(struct super_block *sb,
546 struct ext4_new_flex_group_data *flex_gd)
547 {
548 int group_table_count[] = {1, 1, EXT4_SB(sb)->s_itb_per_group};
549 ext4_fsblk_t start;
550 ext4_fsblk_t block;
551 struct ext4_sb_info *sbi = EXT4_SB(sb);
552 struct ext4_super_block *es = sbi->s_es;
553 struct ext4_new_group_data *group_data = flex_gd->groups;
554 __u16 *bg_flags = flex_gd->bg_flags;
555 handle_t *handle;
556 ext4_group_t group, count;
557 struct buffer_head *bh = NULL;
558 int reserved_gdb, i, j, err = 0, err2;
559 int meta_bg;
560
561 BUG_ON(!flex_gd->count || !group_data ||
562 group_data[0].group != sbi->s_groups_count);
563
564 reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
565 meta_bg = ext4_has_feature_meta_bg(sb);
566
567 /* This transaction may be extended/restarted along the way */
568 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
569 if (IS_ERR(handle))
570 return PTR_ERR(handle);
571
572 group = group_data[0].group;
573 for (i = 0; i < flex_gd->count; i++, group++) {
574 unsigned long gdblocks;
575 ext4_grpblk_t overhead;
576
577 gdblocks = ext4_bg_num_gdb(sb, group);
578 start = ext4_group_first_block_no(sb, group);
579
580 if (meta_bg == 0 && !ext4_bg_has_super(sb, group))
581 goto handle_itb;
582
583 if (meta_bg == 1)
584 goto handle_itb;
585
586 block = start + ext4_bg_has_super(sb, group);
587 /* Copy all of the GDT blocks into the backup in this group */
588 for (j = 0; j < gdblocks; j++, block++) {
589 struct buffer_head *gdb;
590
591 ext4_debug("update backup group %#04llx\n", block);
592 err = extend_or_restart_transaction(handle, 1);
593 if (err)
594 goto out;
595
596 gdb = sb_getblk(sb, block);
597 if (unlikely(!gdb)) {
598 err = -ENOMEM;
599 goto out;
600 }
601
602 BUFFER_TRACE(gdb, "get_write_access");
603 err = ext4_journal_get_write_access(handle, gdb);
604 if (err) {
605 brelse(gdb);
606 goto out;
607 }
608 memcpy(gdb->b_data, sbi_array_rcu_deref(sbi,
609 s_group_desc, j)->b_data, gdb->b_size);
610 set_buffer_uptodate(gdb);
611
612 err = ext4_handle_dirty_metadata(handle, NULL, gdb);
613 if (unlikely(err)) {
614 brelse(gdb);
615 goto out;
616 }
617 brelse(gdb);
618 }
619
620 /* Zero out all of the reserved backup group descriptor
621 * table blocks
622 */
623 if (ext4_bg_has_super(sb, group)) {
624 err = sb_issue_zeroout(sb, gdblocks + start + 1,
625 reserved_gdb, GFP_NOFS);
626 if (err)
627 goto out;
628 }
629
630 handle_itb:
631 /* Initialize group tables of the grop @group */
632 if (!(bg_flags[i] & EXT4_BG_INODE_ZEROED))
633 goto handle_bb;
634
635 /* Zero out all of the inode table blocks */
636 block = group_data[i].inode_table;
637 ext4_debug("clear inode table blocks %#04llx -> %#04lx\n",
638 block, sbi->s_itb_per_group);
639 err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group,
640 GFP_NOFS);
641 if (err)
642 goto out;
643
644 handle_bb:
645 if (bg_flags[i] & EXT4_BG_BLOCK_UNINIT)
646 goto handle_ib;
647
648 /* Initialize block bitmap of the @group */
649 block = group_data[i].block_bitmap;
650 err = extend_or_restart_transaction(handle, 1);
651 if (err)
652 goto out;
653
654 bh = bclean(handle, sb, block);
655 if (IS_ERR(bh)) {
656 err = PTR_ERR(bh);
657 goto out;
658 }
659 overhead = ext4_group_overhead_blocks(sb, group);
660 if (overhead != 0) {
661 ext4_debug("mark backup superblock %#04llx (+0)\n",
662 start);
663 ext4_set_bits(bh->b_data, 0,
664 EXT4_NUM_B2C(sbi, overhead));
665 }
666 ext4_mark_bitmap_end(EXT4_B2C(sbi, group_data[i].blocks_count),
667 sb->s_blocksize * 8, bh->b_data);
668 err = ext4_handle_dirty_metadata(handle, NULL, bh);
669 brelse(bh);
670 if (err)
671 goto out;
672
673 handle_ib:
674 if (bg_flags[i] & EXT4_BG_INODE_UNINIT)
675 continue;
676
677 /* Initialize inode bitmap of the @group */
678 block = group_data[i].inode_bitmap;
679 err = extend_or_restart_transaction(handle, 1);
680 if (err)
681 goto out;
682 /* Mark unused entries in inode bitmap used */
683 bh = bclean(handle, sb, block);
684 if (IS_ERR(bh)) {
685 err = PTR_ERR(bh);
686 goto out;
687 }
688
689 ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
690 sb->s_blocksize * 8, bh->b_data);
691 err = ext4_handle_dirty_metadata(handle, NULL, bh);
692 brelse(bh);
693 if (err)
694 goto out;
695 }
696
697 /* Mark group tables in block bitmap */
698 for (j = 0; j < GROUP_TABLE_COUNT; j++) {
699 count = group_table_count[j];
700 start = (&group_data[0].block_bitmap)[j];
701 block = start;
702 for (i = 1; i < flex_gd->count; i++) {
703 block += group_table_count[j];
704 if (block == (&group_data[i].block_bitmap)[j]) {
705 count += group_table_count[j];
706 continue;
707 }
708 err = set_flexbg_block_bitmap(sb, handle,
709 flex_gd,
710 EXT4_B2C(sbi, start),
711 EXT4_B2C(sbi,
712 start + count
713 - 1));
714 if (err)
715 goto out;
716 count = group_table_count[j];
717 start = (&group_data[i].block_bitmap)[j];
718 block = start;
719 }
720
721 if (count) {
722 err = set_flexbg_block_bitmap(sb, handle,
723 flex_gd,
724 EXT4_B2C(sbi, start),
725 EXT4_B2C(sbi,
726 start + count
727 - 1));
728 if (err)
729 goto out;
730 }
731 }
732
733 out:
734 err2 = ext4_journal_stop(handle);
735 if (err2 && !err)
736 err = err2;
737
738 return err;
739 }
740
741 /*
742 * Iterate through the groups which hold BACKUP superblock/GDT copies in an
743 * ext4 filesystem. The counters should be initialized to 1, 5, and 7 before
744 * calling this for the first time. In a sparse filesystem it will be the
745 * sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ...
746 * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ...
747 */
ext4_list_backups(struct super_block * sb,unsigned * three,unsigned * five,unsigned * seven)748 static unsigned ext4_list_backups(struct super_block *sb, unsigned *three,
749 unsigned *five, unsigned *seven)
750 {
751 unsigned *min = three;
752 int mult = 3;
753 unsigned ret;
754
755 if (!ext4_has_feature_sparse_super(sb)) {
756 ret = *min;
757 *min += 1;
758 return ret;
759 }
760
761 if (*five < *min) {
762 min = five;
763 mult = 5;
764 }
765 if (*seven < *min) {
766 min = seven;
767 mult = 7;
768 }
769
770 ret = *min;
771 *min *= mult;
772
773 return ret;
774 }
775
776 /*
777 * Check that all of the backup GDT blocks are held in the primary GDT block.
778 * It is assumed that they are stored in group order. Returns the number of
779 * groups in current filesystem that have BACKUPS, or -ve error code.
780 */
verify_reserved_gdb(struct super_block * sb,ext4_group_t end,struct buffer_head * primary)781 static int verify_reserved_gdb(struct super_block *sb,
782 ext4_group_t end,
783 struct buffer_head *primary)
784 {
785 const ext4_fsblk_t blk = primary->b_blocknr;
786 unsigned three = 1;
787 unsigned five = 5;
788 unsigned seven = 7;
789 unsigned grp;
790 __le32 *p = (__le32 *)primary->b_data;
791 int gdbackups = 0;
792
793 while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) {
794 if (le32_to_cpu(*p++) !=
795 grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){
796 ext4_warning(sb, "reserved GDT %llu"
797 " missing grp %d (%llu)",
798 blk, grp,
799 grp *
800 (ext4_fsblk_t)EXT4_BLOCKS_PER_GROUP(sb) +
801 blk);
802 return -EINVAL;
803 }
804 if (++gdbackups > EXT4_ADDR_PER_BLOCK(sb))
805 return -EFBIG;
806 }
807
808 return gdbackups;
809 }
810
811 /*
812 * Called when we need to bring a reserved group descriptor table block into
813 * use from the resize inode. The primary copy of the new GDT block currently
814 * is an indirect block (under the double indirect block in the resize inode).
815 * The new backup GDT blocks will be stored as leaf blocks in this indirect
816 * block, in group order. Even though we know all the block numbers we need,
817 * we check to ensure that the resize inode has actually reserved these blocks.
818 *
819 * Don't need to update the block bitmaps because the blocks are still in use.
820 *
821 * We get all of the error cases out of the way, so that we are sure to not
822 * fail once we start modifying the data on disk, because JBD has no rollback.
823 */
add_new_gdb(handle_t * handle,struct inode * inode,ext4_group_t group)824 static int add_new_gdb(handle_t *handle, struct inode *inode,
825 ext4_group_t group)
826 {
827 struct super_block *sb = inode->i_sb;
828 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
829 unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
830 ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
831 struct buffer_head **o_group_desc, **n_group_desc = NULL;
832 struct buffer_head *dind = NULL;
833 struct buffer_head *gdb_bh = NULL;
834 int gdbackups;
835 struct ext4_iloc iloc = { .bh = NULL };
836 __le32 *data;
837 int err;
838
839 if (test_opt(sb, DEBUG))
840 printk(KERN_DEBUG
841 "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n",
842 gdb_num);
843
844 gdb_bh = ext4_sb_bread(sb, gdblock, 0);
845 if (IS_ERR(gdb_bh))
846 return PTR_ERR(gdb_bh);
847
848 gdbackups = verify_reserved_gdb(sb, group, gdb_bh);
849 if (gdbackups < 0) {
850 err = gdbackups;
851 goto errout;
852 }
853
854 data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
855 dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
856 if (IS_ERR(dind)) {
857 err = PTR_ERR(dind);
858 dind = NULL;
859 goto errout;
860 }
861
862 data = (__le32 *)dind->b_data;
863 if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) {
864 ext4_warning(sb, "new group %u GDT block %llu not reserved",
865 group, gdblock);
866 err = -EINVAL;
867 goto errout;
868 }
869
870 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
871 err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
872 if (unlikely(err))
873 goto errout;
874
875 BUFFER_TRACE(gdb_bh, "get_write_access");
876 err = ext4_journal_get_write_access(handle, gdb_bh);
877 if (unlikely(err))
878 goto errout;
879
880 BUFFER_TRACE(dind, "get_write_access");
881 err = ext4_journal_get_write_access(handle, dind);
882 if (unlikely(err)) {
883 ext4_std_error(sb, err);
884 goto errout;
885 }
886
887 /* ext4_reserve_inode_write() gets a reference on the iloc */
888 err = ext4_reserve_inode_write(handle, inode, &iloc);
889 if (unlikely(err))
890 goto errout;
891
892 n_group_desc = ext4_kvmalloc((gdb_num + 1) *
893 sizeof(struct buffer_head *),
894 GFP_NOFS);
895 if (!n_group_desc) {
896 err = -ENOMEM;
897 ext4_warning(sb, "not enough memory for %lu groups",
898 gdb_num + 1);
899 goto errout;
900 }
901
902 /*
903 * Finally, we have all of the possible failures behind us...
904 *
905 * Remove new GDT block from inode double-indirect block and clear out
906 * the new GDT block for use (which also "frees" the backup GDT blocks
907 * from the reserved inode). We don't need to change the bitmaps for
908 * these blocks, because they are marked as in-use from being in the
909 * reserved inode, and will become GDT blocks (primary and backup).
910 */
911 data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0;
912 err = ext4_handle_dirty_metadata(handle, NULL, dind);
913 if (unlikely(err)) {
914 ext4_std_error(sb, err);
915 goto errout;
916 }
917 inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >>
918 (9 - EXT4_SB(sb)->s_cluster_bits);
919 ext4_mark_iloc_dirty(handle, inode, &iloc);
920 memset(gdb_bh->b_data, 0, sb->s_blocksize);
921 err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
922 if (unlikely(err)) {
923 ext4_std_error(sb, err);
924 iloc.bh = NULL;
925 goto errout;
926 }
927 brelse(dind);
928
929 rcu_read_lock();
930 o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
931 memcpy(n_group_desc, o_group_desc,
932 EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
933 rcu_read_unlock();
934 n_group_desc[gdb_num] = gdb_bh;
935 rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
936 EXT4_SB(sb)->s_gdb_count++;
937 ext4_kvfree_array_rcu(o_group_desc);
938
939 le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
940 err = ext4_handle_dirty_super(handle, sb);
941 if (err)
942 ext4_std_error(sb, err);
943 return err;
944 errout:
945 kvfree(n_group_desc);
946 brelse(iloc.bh);
947 brelse(dind);
948 brelse(gdb_bh);
949
950 ext4_debug("leaving with error %d\n", err);
951 return err;
952 }
953
954 /*
955 * add_new_gdb_meta_bg is the sister of add_new_gdb.
956 */
add_new_gdb_meta_bg(struct super_block * sb,handle_t * handle,ext4_group_t group)957 static int add_new_gdb_meta_bg(struct super_block *sb,
958 handle_t *handle, ext4_group_t group) {
959 ext4_fsblk_t gdblock;
960 struct buffer_head *gdb_bh;
961 struct buffer_head **o_group_desc, **n_group_desc;
962 unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
963 int err;
964
965 gdblock = ext4_meta_bg_first_block_no(sb, group) +
966 ext4_bg_has_super(sb, group);
967 gdb_bh = ext4_sb_bread(sb, gdblock, 0);
968 if (IS_ERR(gdb_bh))
969 return PTR_ERR(gdb_bh);
970 n_group_desc = ext4_kvmalloc((gdb_num + 1) *
971 sizeof(struct buffer_head *),
972 GFP_NOFS);
973 if (!n_group_desc) {
974 brelse(gdb_bh);
975 err = -ENOMEM;
976 ext4_warning(sb, "not enough memory for %lu groups",
977 gdb_num + 1);
978 return err;
979 }
980
981 rcu_read_lock();
982 o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
983 memcpy(n_group_desc, o_group_desc,
984 EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
985 rcu_read_unlock();
986 n_group_desc[gdb_num] = gdb_bh;
987
988 BUFFER_TRACE(gdb_bh, "get_write_access");
989 err = ext4_journal_get_write_access(handle, gdb_bh);
990 if (err) {
991 kvfree(n_group_desc);
992 brelse(gdb_bh);
993 return err;
994 }
995
996 rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
997 EXT4_SB(sb)->s_gdb_count++;
998 ext4_kvfree_array_rcu(o_group_desc);
999 return err;
1000 }
1001
1002 /*
1003 * Called when we are adding a new group which has a backup copy of each of
1004 * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks.
1005 * We need to add these reserved backup GDT blocks to the resize inode, so
1006 * that they are kept for future resizing and not allocated to files.
1007 *
1008 * Each reserved backup GDT block will go into a different indirect block.
1009 * The indirect blocks are actually the primary reserved GDT blocks,
1010 * so we know in advance what their block numbers are. We only get the
1011 * double-indirect block to verify it is pointing to the primary reserved
1012 * GDT blocks so we don't overwrite a data block by accident. The reserved
1013 * backup GDT blocks are stored in their reserved primary GDT block.
1014 */
reserve_backup_gdb(handle_t * handle,struct inode * inode,ext4_group_t group)1015 static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
1016 ext4_group_t group)
1017 {
1018 struct super_block *sb = inode->i_sb;
1019 int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
1020 int cluster_bits = EXT4_SB(sb)->s_cluster_bits;
1021 struct buffer_head **primary;
1022 struct buffer_head *dind;
1023 struct ext4_iloc iloc;
1024 ext4_fsblk_t blk;
1025 __le32 *data, *end;
1026 int gdbackups = 0;
1027 int res, i;
1028 int err;
1029
1030 primary = kmalloc_array(reserved_gdb, sizeof(*primary), GFP_NOFS);
1031 if (!primary)
1032 return -ENOMEM;
1033
1034 data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
1035 dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
1036 if (IS_ERR(dind)) {
1037 err = PTR_ERR(dind);
1038 dind = NULL;
1039 goto exit_free;
1040 }
1041
1042 blk = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + EXT4_SB(sb)->s_gdb_count;
1043 data = (__le32 *)dind->b_data + (EXT4_SB(sb)->s_gdb_count %
1044 EXT4_ADDR_PER_BLOCK(sb));
1045 end = (__le32 *)dind->b_data + EXT4_ADDR_PER_BLOCK(sb);
1046
1047 /* Get each reserved primary GDT block and verify it holds backups */
1048 for (res = 0; res < reserved_gdb; res++, blk++) {
1049 if (le32_to_cpu(*data) != blk) {
1050 ext4_warning(sb, "reserved block %llu"
1051 " not at offset %ld",
1052 blk,
1053 (long)(data - (__le32 *)dind->b_data));
1054 err = -EINVAL;
1055 goto exit_bh;
1056 }
1057 primary[res] = ext4_sb_bread(sb, blk, 0);
1058 if (IS_ERR(primary[res])) {
1059 err = PTR_ERR(primary[res]);
1060 primary[res] = NULL;
1061 goto exit_bh;
1062 }
1063 gdbackups = verify_reserved_gdb(sb, group, primary[res]);
1064 if (gdbackups < 0) {
1065 brelse(primary[res]);
1066 err = gdbackups;
1067 goto exit_bh;
1068 }
1069 if (++data >= end)
1070 data = (__le32 *)dind->b_data;
1071 }
1072
1073 for (i = 0; i < reserved_gdb; i++) {
1074 BUFFER_TRACE(primary[i], "get_write_access");
1075 if ((err = ext4_journal_get_write_access(handle, primary[i])))
1076 goto exit_bh;
1077 }
1078
1079 if ((err = ext4_reserve_inode_write(handle, inode, &iloc)))
1080 goto exit_bh;
1081
1082 /*
1083 * Finally we can add each of the reserved backup GDT blocks from
1084 * the new group to its reserved primary GDT block.
1085 */
1086 blk = group * EXT4_BLOCKS_PER_GROUP(sb);
1087 for (i = 0; i < reserved_gdb; i++) {
1088 int err2;
1089 data = (__le32 *)primary[i]->b_data;
1090 /* printk("reserving backup %lu[%u] = %lu\n",
1091 primary[i]->b_blocknr, gdbackups,
1092 blk + primary[i]->b_blocknr); */
1093 data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr);
1094 err2 = ext4_handle_dirty_metadata(handle, NULL, primary[i]);
1095 if (!err)
1096 err = err2;
1097 }
1098
1099 inode->i_blocks += reserved_gdb * sb->s_blocksize >> (9 - cluster_bits);
1100 ext4_mark_iloc_dirty(handle, inode, &iloc);
1101
1102 exit_bh:
1103 while (--res >= 0)
1104 brelse(primary[res]);
1105 brelse(dind);
1106
1107 exit_free:
1108 kfree(primary);
1109
1110 return err;
1111 }
1112
1113 /*
1114 * Update the backup copies of the ext4 metadata. These don't need to be part
1115 * of the main resize transaction, because e2fsck will re-write them if there
1116 * is a problem (basically only OOM will cause a problem). However, we
1117 * _should_ update the backups if possible, in case the primary gets trashed
1118 * for some reason and we need to run e2fsck from a backup superblock. The
1119 * important part is that the new block and inode counts are in the backup
1120 * superblocks, and the location of the new group metadata in the GDT backups.
1121 *
1122 * We do not need take the s_resize_lock for this, because these
1123 * blocks are not otherwise touched by the filesystem code when it is
1124 * mounted. We don't need to worry about last changing from
1125 * sbi->s_groups_count, because the worst that can happen is that we
1126 * do not copy the full number of backups at this time. The resize
1127 * which changed s_groups_count will backup again.
1128 */
update_backups(struct super_block * sb,sector_t blk_off,char * data,int size,int meta_bg)1129 static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
1130 int size, int meta_bg)
1131 {
1132 struct ext4_sb_info *sbi = EXT4_SB(sb);
1133 ext4_group_t last;
1134 const int bpg = EXT4_BLOCKS_PER_GROUP(sb);
1135 unsigned three = 1;
1136 unsigned five = 5;
1137 unsigned seven = 7;
1138 ext4_group_t group = 0;
1139 int rest = sb->s_blocksize - size;
1140 handle_t *handle;
1141 int err = 0, err2;
1142
1143 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
1144 if (IS_ERR(handle)) {
1145 group = 1;
1146 err = PTR_ERR(handle);
1147 goto exit_err;
1148 }
1149
1150 if (meta_bg == 0) {
1151 group = ext4_list_backups(sb, &three, &five, &seven);
1152 last = sbi->s_groups_count;
1153 } else {
1154 group = ext4_get_group_number(sb, blk_off) + 1;
1155 last = (ext4_group_t)(group + EXT4_DESC_PER_BLOCK(sb) - 2);
1156 }
1157
1158 while (group < sbi->s_groups_count) {
1159 struct buffer_head *bh;
1160 ext4_fsblk_t backup_block;
1161
1162 /* Out of journal space, and can't get more - abort - so sad */
1163 if (ext4_handle_valid(handle) &&
1164 handle->h_buffer_credits == 0 &&
1165 ext4_journal_extend(handle, EXT4_MAX_TRANS_DATA) &&
1166 (err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA)))
1167 break;
1168
1169 if (meta_bg == 0)
1170 backup_block = ((ext4_fsblk_t)group) * bpg + blk_off;
1171 else
1172 backup_block = (ext4_group_first_block_no(sb, group) +
1173 ext4_bg_has_super(sb, group));
1174
1175 bh = sb_getblk(sb, backup_block);
1176 if (unlikely(!bh)) {
1177 err = -ENOMEM;
1178 break;
1179 }
1180 ext4_debug("update metadata backup %llu(+%llu)\n",
1181 backup_block, backup_block -
1182 ext4_group_first_block_no(sb, group));
1183 BUFFER_TRACE(bh, "get_write_access");
1184 if ((err = ext4_journal_get_write_access(handle, bh))) {
1185 brelse(bh);
1186 break;
1187 }
1188 lock_buffer(bh);
1189 memcpy(bh->b_data, data, size);
1190 if (rest)
1191 memset(bh->b_data + size, 0, rest);
1192 set_buffer_uptodate(bh);
1193 unlock_buffer(bh);
1194 err = ext4_handle_dirty_metadata(handle, NULL, bh);
1195 if (unlikely(err))
1196 ext4_std_error(sb, err);
1197 brelse(bh);
1198
1199 if (meta_bg == 0)
1200 group = ext4_list_backups(sb, &three, &five, &seven);
1201 else if (group == last)
1202 break;
1203 else
1204 group = last;
1205 }
1206 if ((err2 = ext4_journal_stop(handle)) && !err)
1207 err = err2;
1208
1209 /*
1210 * Ugh! Need to have e2fsck write the backup copies. It is too
1211 * late to revert the resize, we shouldn't fail just because of
1212 * the backup copies (they are only needed in case of corruption).
1213 *
1214 * However, if we got here we have a journal problem too, so we
1215 * can't really start a transaction to mark the superblock.
1216 * Chicken out and just set the flag on the hope it will be written
1217 * to disk, and if not - we will simply wait until next fsck.
1218 */
1219 exit_err:
1220 if (err) {
1221 ext4_warning(sb, "can't update backup for group %u (err %d), "
1222 "forcing fsck on next reboot", group, err);
1223 sbi->s_mount_state &= ~EXT4_VALID_FS;
1224 sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
1225 mark_buffer_dirty(sbi->s_sbh);
1226 }
1227 }
1228
1229 /*
1230 * ext4_add_new_descs() adds @count group descriptor of groups
1231 * starting at @group
1232 *
1233 * @handle: journal handle
1234 * @sb: super block
1235 * @group: the group no. of the first group desc to be added
1236 * @resize_inode: the resize inode
1237 * @count: number of group descriptors to be added
1238 */
ext4_add_new_descs(handle_t * handle,struct super_block * sb,ext4_group_t group,struct inode * resize_inode,ext4_group_t count)1239 static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
1240 ext4_group_t group, struct inode *resize_inode,
1241 ext4_group_t count)
1242 {
1243 struct ext4_sb_info *sbi = EXT4_SB(sb);
1244 struct ext4_super_block *es = sbi->s_es;
1245 struct buffer_head *gdb_bh;
1246 int i, gdb_off, gdb_num, err = 0;
1247 int meta_bg;
1248
1249 meta_bg = ext4_has_feature_meta_bg(sb);
1250 for (i = 0; i < count; i++, group++) {
1251 int reserved_gdb = ext4_bg_has_super(sb, group) ?
1252 le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
1253
1254 gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
1255 gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1256
1257 /*
1258 * We will only either add reserved group blocks to a backup group
1259 * or remove reserved blocks for the first group in a new group block.
1260 * Doing both would be mean more complex code, and sane people don't
1261 * use non-sparse filesystems anymore. This is already checked above.
1262 */
1263 if (gdb_off) {
1264 gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
1265 gdb_num);
1266 BUFFER_TRACE(gdb_bh, "get_write_access");
1267 err = ext4_journal_get_write_access(handle, gdb_bh);
1268
1269 if (!err && reserved_gdb && ext4_bg_num_gdb(sb, group))
1270 err = reserve_backup_gdb(handle, resize_inode, group);
1271 } else if (meta_bg != 0) {
1272 err = add_new_gdb_meta_bg(sb, handle, group);
1273 } else {
1274 err = add_new_gdb(handle, resize_inode, group);
1275 }
1276 if (err)
1277 break;
1278 }
1279 return err;
1280 }
1281
ext4_get_bitmap(struct super_block * sb,__u64 block)1282 static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block)
1283 {
1284 struct buffer_head *bh = sb_getblk(sb, block);
1285 if (unlikely(!bh))
1286 return NULL;
1287 if (!bh_uptodate_or_lock(bh)) {
1288 if (bh_submit_read(bh) < 0) {
1289 brelse(bh);
1290 return NULL;
1291 }
1292 }
1293
1294 return bh;
1295 }
1296
ext4_set_bitmap_checksums(struct super_block * sb,ext4_group_t group,struct ext4_group_desc * gdp,struct ext4_new_group_data * group_data)1297 static int ext4_set_bitmap_checksums(struct super_block *sb,
1298 ext4_group_t group,
1299 struct ext4_group_desc *gdp,
1300 struct ext4_new_group_data *group_data)
1301 {
1302 struct buffer_head *bh;
1303
1304 if (!ext4_has_metadata_csum(sb))
1305 return 0;
1306
1307 bh = ext4_get_bitmap(sb, group_data->inode_bitmap);
1308 if (!bh)
1309 return -EIO;
1310 ext4_inode_bitmap_csum_set(sb, group, gdp, bh,
1311 EXT4_INODES_PER_GROUP(sb) / 8);
1312 brelse(bh);
1313
1314 bh = ext4_get_bitmap(sb, group_data->block_bitmap);
1315 if (!bh)
1316 return -EIO;
1317 ext4_block_bitmap_csum_set(sb, group, gdp, bh);
1318 brelse(bh);
1319
1320 return 0;
1321 }
1322
1323 /*
1324 * ext4_setup_new_descs() will set up the group descriptor descriptors of a flex bg
1325 */
ext4_setup_new_descs(handle_t * handle,struct super_block * sb,struct ext4_new_flex_group_data * flex_gd)1326 static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
1327 struct ext4_new_flex_group_data *flex_gd)
1328 {
1329 struct ext4_new_group_data *group_data = flex_gd->groups;
1330 struct ext4_group_desc *gdp;
1331 struct ext4_sb_info *sbi = EXT4_SB(sb);
1332 struct buffer_head *gdb_bh;
1333 ext4_group_t group;
1334 __u16 *bg_flags = flex_gd->bg_flags;
1335 int i, gdb_off, gdb_num, err = 0;
1336
1337
1338 for (i = 0; i < flex_gd->count; i++, group_data++, bg_flags++) {
1339 group = group_data->group;
1340
1341 gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
1342 gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1343
1344 /*
1345 * get_write_access() has been called on gdb_bh by ext4_add_new_desc().
1346 */
1347 gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, gdb_num);
1348 /* Update group descriptor block for new group */
1349 gdp = (struct ext4_group_desc *)(gdb_bh->b_data +
1350 gdb_off * EXT4_DESC_SIZE(sb));
1351
1352 memset(gdp, 0, EXT4_DESC_SIZE(sb));
1353 ext4_block_bitmap_set(sb, gdp, group_data->block_bitmap);
1354 ext4_inode_bitmap_set(sb, gdp, group_data->inode_bitmap);
1355 err = ext4_set_bitmap_checksums(sb, group, gdp, group_data);
1356 if (err) {
1357 ext4_std_error(sb, err);
1358 break;
1359 }
1360
1361 ext4_inode_table_set(sb, gdp, group_data->inode_table);
1362 ext4_free_group_clusters_set(sb, gdp,
1363 group_data->free_clusters_count);
1364 ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
1365 if (ext4_has_group_desc_csum(sb))
1366 ext4_itable_unused_set(sb, gdp,
1367 EXT4_INODES_PER_GROUP(sb));
1368 gdp->bg_flags = cpu_to_le16(*bg_flags);
1369 ext4_group_desc_csum_set(sb, group, gdp);
1370
1371 err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
1372 if (unlikely(err)) {
1373 ext4_std_error(sb, err);
1374 break;
1375 }
1376
1377 /*
1378 * We can allocate memory for mb_alloc based on the new group
1379 * descriptor
1380 */
1381 err = ext4_mb_add_groupinfo(sb, group, gdp);
1382 if (err)
1383 break;
1384 }
1385 return err;
1386 }
1387
1388 /*
1389 * ext4_update_super() updates the super block so that the newly added
1390 * groups can be seen by the filesystem.
1391 *
1392 * @sb: super block
1393 * @flex_gd: new added groups
1394 */
ext4_update_super(struct super_block * sb,struct ext4_new_flex_group_data * flex_gd)1395 static void ext4_update_super(struct super_block *sb,
1396 struct ext4_new_flex_group_data *flex_gd)
1397 {
1398 ext4_fsblk_t blocks_count = 0;
1399 ext4_fsblk_t free_blocks = 0;
1400 ext4_fsblk_t reserved_blocks = 0;
1401 struct ext4_new_group_data *group_data = flex_gd->groups;
1402 struct ext4_sb_info *sbi = EXT4_SB(sb);
1403 struct ext4_super_block *es = sbi->s_es;
1404 int i;
1405
1406 BUG_ON(flex_gd->count == 0 || group_data == NULL);
1407 /*
1408 * Make the new blocks and inodes valid next. We do this before
1409 * increasing the group count so that once the group is enabled,
1410 * all of its blocks and inodes are already valid.
1411 *
1412 * We always allocate group-by-group, then block-by-block or
1413 * inode-by-inode within a group, so enabling these
1414 * blocks/inodes before the group is live won't actually let us
1415 * allocate the new space yet.
1416 */
1417 for (i = 0; i < flex_gd->count; i++) {
1418 blocks_count += group_data[i].blocks_count;
1419 free_blocks += EXT4_C2B(sbi, group_data[i].free_clusters_count);
1420 }
1421
1422 reserved_blocks = ext4_r_blocks_count(es) * 100;
1423 reserved_blocks = div64_u64(reserved_blocks, ext4_blocks_count(es));
1424 reserved_blocks *= blocks_count;
1425 do_div(reserved_blocks, 100);
1426
1427 ext4_blocks_count_set(es, ext4_blocks_count(es) + blocks_count);
1428 ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + free_blocks);
1429 le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb) *
1430 flex_gd->count);
1431 le32_add_cpu(&es->s_free_inodes_count, EXT4_INODES_PER_GROUP(sb) *
1432 flex_gd->count);
1433
1434 ext4_debug("free blocks count %llu", ext4_free_blocks_count(es));
1435 /*
1436 * We need to protect s_groups_count against other CPUs seeing
1437 * inconsistent state in the superblock.
1438 *
1439 * The precise rules we use are:
1440 *
1441 * * Writers must perform a smp_wmb() after updating all
1442 * dependent data and before modifying the groups count
1443 *
1444 * * Readers must perform an smp_rmb() after reading the groups
1445 * count and before reading any dependent data.
1446 *
1447 * NB. These rules can be relaxed when checking the group count
1448 * while freeing data, as we can only allocate from a block
1449 * group after serialising against the group count, and we can
1450 * only then free after serialising in turn against that
1451 * allocation.
1452 */
1453 smp_wmb();
1454
1455 /* Update the global fs size fields */
1456 sbi->s_groups_count += flex_gd->count;
1457 sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
1458 (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
1459
1460 /* Update the reserved block counts only once the new group is
1461 * active. */
1462 ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) +
1463 reserved_blocks);
1464
1465 /* Update the free space counts */
1466 percpu_counter_add(&sbi->s_freeclusters_counter,
1467 EXT4_NUM_B2C(sbi, free_blocks));
1468 percpu_counter_add(&sbi->s_freeinodes_counter,
1469 EXT4_INODES_PER_GROUP(sb) * flex_gd->count);
1470
1471 ext4_debug("free blocks count %llu",
1472 percpu_counter_read(&sbi->s_freeclusters_counter));
1473 if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) {
1474 ext4_group_t flex_group;
1475 struct flex_groups *fg;
1476
1477 flex_group = ext4_flex_group(sbi, group_data[0].group);
1478 fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
1479 atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
1480 &fg->free_clusters);
1481 atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
1482 &fg->free_inodes);
1483 }
1484
1485 /*
1486 * Update the fs overhead information
1487 */
1488 ext4_calculate_overhead(sb);
1489 es->s_overhead_clusters = cpu_to_le32(sbi->s_overhead);
1490
1491 if (test_opt(sb, DEBUG))
1492 printk(KERN_DEBUG "EXT4-fs: added group %u:"
1493 "%llu blocks(%llu free %llu reserved)\n", flex_gd->count,
1494 blocks_count, free_blocks, reserved_blocks);
1495 }
1496
1497 /* Add a flex group to an fs. Ensure we handle all possible error conditions
1498 * _before_ we start modifying the filesystem, because we cannot abort the
1499 * transaction and not have it write the data to disk.
1500 */
ext4_flex_group_add(struct super_block * sb,struct inode * resize_inode,struct ext4_new_flex_group_data * flex_gd)1501 static int ext4_flex_group_add(struct super_block *sb,
1502 struct inode *resize_inode,
1503 struct ext4_new_flex_group_data *flex_gd)
1504 {
1505 struct ext4_sb_info *sbi = EXT4_SB(sb);
1506 struct ext4_super_block *es = sbi->s_es;
1507 ext4_fsblk_t o_blocks_count;
1508 ext4_grpblk_t last;
1509 ext4_group_t group;
1510 handle_t *handle;
1511 unsigned reserved_gdb;
1512 int err = 0, err2 = 0, credit;
1513
1514 BUG_ON(!flex_gd->count || !flex_gd->groups || !flex_gd->bg_flags);
1515
1516 reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
1517 o_blocks_count = ext4_blocks_count(es);
1518 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1519 BUG_ON(last);
1520
1521 err = setup_new_flex_group_blocks(sb, flex_gd);
1522 if (err)
1523 goto exit;
1524 /*
1525 * We will always be modifying at least the superblock and GDT
1526 * blocks. If we are adding a group past the last current GDT block,
1527 * we will also modify the inode and the dindirect block. If we
1528 * are adding a group with superblock/GDT backups we will also
1529 * modify each of the reserved GDT dindirect blocks.
1530 */
1531 credit = 3; /* sb, resize inode, resize inode dindirect */
1532 /* GDT blocks */
1533 credit += 1 + DIV_ROUND_UP(flex_gd->count, EXT4_DESC_PER_BLOCK(sb));
1534 credit += reserved_gdb; /* Reserved GDT dindirect blocks */
1535 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credit);
1536 if (IS_ERR(handle)) {
1537 err = PTR_ERR(handle);
1538 goto exit;
1539 }
1540
1541 BUFFER_TRACE(sbi->s_sbh, "get_write_access");
1542 err = ext4_journal_get_write_access(handle, sbi->s_sbh);
1543 if (err)
1544 goto exit_journal;
1545
1546 group = flex_gd->groups[0].group;
1547 BUG_ON(group != sbi->s_groups_count);
1548 err = ext4_add_new_descs(handle, sb, group,
1549 resize_inode, flex_gd->count);
1550 if (err)
1551 goto exit_journal;
1552
1553 err = ext4_setup_new_descs(handle, sb, flex_gd);
1554 if (err)
1555 goto exit_journal;
1556
1557 ext4_update_super(sb, flex_gd);
1558
1559 err = ext4_handle_dirty_super(handle, sb);
1560
1561 exit_journal:
1562 err2 = ext4_journal_stop(handle);
1563 if (!err)
1564 err = err2;
1565
1566 if (!err) {
1567 int gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1568 int gdb_num_end = ((group + flex_gd->count - 1) /
1569 EXT4_DESC_PER_BLOCK(sb));
1570 int meta_bg = ext4_has_feature_meta_bg(sb) &&
1571 gdb_num >= le32_to_cpu(es->s_first_meta_bg);
1572 sector_t padding_blocks = meta_bg ? 0 : sbi->s_sbh->b_blocknr -
1573 ext4_group_first_block_no(sb, 0);
1574 sector_t old_gdb = 0;
1575
1576 update_backups(sb, ext4_group_first_block_no(sb, 0),
1577 (char *)es, sizeof(struct ext4_super_block), 0);
1578 for (; gdb_num <= gdb_num_end; gdb_num++) {
1579 struct buffer_head *gdb_bh;
1580
1581 gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
1582 gdb_num);
1583 if (old_gdb == gdb_bh->b_blocknr)
1584 continue;
1585 update_backups(sb, gdb_bh->b_blocknr - padding_blocks,
1586 gdb_bh->b_data, gdb_bh->b_size, meta_bg);
1587 old_gdb = gdb_bh->b_blocknr;
1588 }
1589 }
1590 exit:
1591 return err;
1592 }
1593
ext4_setup_next_flex_gd(struct super_block * sb,struct ext4_new_flex_group_data * flex_gd,ext4_fsblk_t n_blocks_count)1594 static int ext4_setup_next_flex_gd(struct super_block *sb,
1595 struct ext4_new_flex_group_data *flex_gd,
1596 ext4_fsblk_t n_blocks_count)
1597 {
1598 struct ext4_sb_info *sbi = EXT4_SB(sb);
1599 struct ext4_super_block *es = sbi->s_es;
1600 struct ext4_new_group_data *group_data = flex_gd->groups;
1601 ext4_fsblk_t o_blocks_count;
1602 ext4_group_t n_group;
1603 ext4_group_t group;
1604 ext4_group_t last_group;
1605 ext4_grpblk_t last;
1606 ext4_grpblk_t clusters_per_group;
1607 unsigned long i;
1608
1609 clusters_per_group = EXT4_CLUSTERS_PER_GROUP(sb);
1610
1611 o_blocks_count = ext4_blocks_count(es);
1612
1613 if (o_blocks_count == n_blocks_count)
1614 return 0;
1615
1616 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1617 BUG_ON(last);
1618 ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &last);
1619
1620 last_group = group | (flex_gd->resize_bg - 1);
1621 if (last_group > n_group)
1622 last_group = n_group;
1623
1624 flex_gd->count = last_group - group + 1;
1625
1626 for (i = 0; i < flex_gd->count; i++) {
1627 int overhead;
1628
1629 group_data[i].group = group + i;
1630 group_data[i].blocks_count = EXT4_BLOCKS_PER_GROUP(sb);
1631 overhead = ext4_group_overhead_blocks(sb, group + i);
1632 group_data[i].mdata_blocks = overhead;
1633 group_data[i].free_clusters_count = EXT4_CLUSTERS_PER_GROUP(sb);
1634 if (ext4_has_group_desc_csum(sb)) {
1635 flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT |
1636 EXT4_BG_INODE_UNINIT;
1637 if (!test_opt(sb, INIT_INODE_TABLE))
1638 flex_gd->bg_flags[i] |= EXT4_BG_INODE_ZEROED;
1639 } else
1640 flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED;
1641 }
1642
1643 if (last_group == n_group && ext4_has_group_desc_csum(sb))
1644 /* We need to initialize block bitmap of last group. */
1645 flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT;
1646
1647 if ((last_group == n_group) && (last != clusters_per_group - 1)) {
1648 group_data[i - 1].blocks_count = EXT4_C2B(sbi, last + 1);
1649 group_data[i - 1].free_clusters_count -= clusters_per_group -
1650 last - 1;
1651 }
1652
1653 return 1;
1654 }
1655
1656 /* Add group descriptor data to an existing or new group descriptor block.
1657 * Ensure we handle all possible error conditions _before_ we start modifying
1658 * the filesystem, because we cannot abort the transaction and not have it
1659 * write the data to disk.
1660 *
1661 * If we are on a GDT block boundary, we need to get the reserved GDT block.
1662 * Otherwise, we may need to add backup GDT blocks for a sparse group.
1663 *
1664 * We only need to hold the superblock lock while we are actually adding
1665 * in the new group's counts to the superblock. Prior to that we have
1666 * not really "added" the group at all. We re-check that we are still
1667 * adding in the last group in case things have changed since verifying.
1668 */
ext4_group_add(struct super_block * sb,struct ext4_new_group_data * input)1669 int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
1670 {
1671 struct ext4_new_flex_group_data flex_gd;
1672 struct ext4_sb_info *sbi = EXT4_SB(sb);
1673 struct ext4_super_block *es = sbi->s_es;
1674 int reserved_gdb = ext4_bg_has_super(sb, input->group) ?
1675 le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
1676 struct inode *inode = NULL;
1677 int gdb_off;
1678 int err;
1679 __u16 bg_flags = 0;
1680
1681 gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb);
1682
1683 if (gdb_off == 0 && !ext4_has_feature_sparse_super(sb)) {
1684 ext4_warning(sb, "Can't resize non-sparse filesystem further");
1685 return -EPERM;
1686 }
1687
1688 if (ext4_blocks_count(es) + input->blocks_count <
1689 ext4_blocks_count(es)) {
1690 ext4_warning(sb, "blocks_count overflow");
1691 return -EINVAL;
1692 }
1693
1694 if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) <
1695 le32_to_cpu(es->s_inodes_count)) {
1696 ext4_warning(sb, "inodes_count overflow");
1697 return -EINVAL;
1698 }
1699
1700 if (reserved_gdb || gdb_off == 0) {
1701 if (!ext4_has_feature_resize_inode(sb) ||
1702 !le16_to_cpu(es->s_reserved_gdt_blocks)) {
1703 ext4_warning(sb,
1704 "No reserved GDT blocks, can't resize");
1705 return -EPERM;
1706 }
1707 inode = ext4_iget(sb, EXT4_RESIZE_INO, EXT4_IGET_SPECIAL);
1708 if (IS_ERR(inode)) {
1709 ext4_warning(sb, "Error opening resize inode");
1710 return PTR_ERR(inode);
1711 }
1712 }
1713
1714
1715 err = verify_group_input(sb, input);
1716 if (err)
1717 goto out;
1718
1719 err = ext4_alloc_flex_bg_array(sb, input->group + 1);
1720 if (err)
1721 goto out;
1722
1723 err = ext4_mb_alloc_groupinfo(sb, input->group + 1);
1724 if (err)
1725 goto out;
1726
1727 flex_gd.count = 1;
1728 flex_gd.groups = input;
1729 flex_gd.bg_flags = &bg_flags;
1730 err = ext4_flex_group_add(sb, inode, &flex_gd);
1731 out:
1732 iput(inode);
1733 return err;
1734 } /* ext4_group_add */
1735
1736 /*
1737 * extend a group without checking assuming that checking has been done.
1738 */
ext4_group_extend_no_check(struct super_block * sb,ext4_fsblk_t o_blocks_count,ext4_grpblk_t add)1739 static int ext4_group_extend_no_check(struct super_block *sb,
1740 ext4_fsblk_t o_blocks_count, ext4_grpblk_t add)
1741 {
1742 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
1743 handle_t *handle;
1744 int err = 0, err2;
1745
1746 /* We will update the superblock, one block bitmap, and
1747 * one group descriptor via ext4_group_add_blocks().
1748 */
1749 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, 3);
1750 if (IS_ERR(handle)) {
1751 err = PTR_ERR(handle);
1752 ext4_warning(sb, "error %d on journal start", err);
1753 return err;
1754 }
1755
1756 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
1757 err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
1758 if (err) {
1759 ext4_warning(sb, "error %d on journal write access", err);
1760 goto errout;
1761 }
1762
1763 ext4_blocks_count_set(es, o_blocks_count + add);
1764 ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + add);
1765 ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count,
1766 o_blocks_count + add);
1767 /* We add the blocks to the bitmap and set the group need init bit */
1768 err = ext4_group_add_blocks(handle, sb, o_blocks_count, add);
1769 if (err)
1770 goto errout;
1771 ext4_handle_dirty_super(handle, sb);
1772 ext4_debug("freed blocks %llu through %llu\n", o_blocks_count,
1773 o_blocks_count + add);
1774 errout:
1775 err2 = ext4_journal_stop(handle);
1776 if (err2 && !err)
1777 err = err2;
1778
1779 if (!err) {
1780 if (test_opt(sb, DEBUG))
1781 printk(KERN_DEBUG "EXT4-fs: extended group to %llu "
1782 "blocks\n", ext4_blocks_count(es));
1783 update_backups(sb, ext4_group_first_block_no(sb, 0),
1784 (char *)es, sizeof(struct ext4_super_block), 0);
1785 }
1786 return err;
1787 }
1788
1789 /*
1790 * Extend the filesystem to the new number of blocks specified. This entry
1791 * point is only used to extend the current filesystem to the end of the last
1792 * existing group. It can be accessed via ioctl, or by "remount,resize=<size>"
1793 * for emergencies (because it has no dependencies on reserved blocks).
1794 *
1795 * If we _really_ wanted, we could use default values to call ext4_group_add()
1796 * allow the "remount" trick to work for arbitrary resizing, assuming enough
1797 * GDT blocks are reserved to grow to the desired size.
1798 */
ext4_group_extend(struct super_block * sb,struct ext4_super_block * es,ext4_fsblk_t n_blocks_count)1799 int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
1800 ext4_fsblk_t n_blocks_count)
1801 {
1802 ext4_fsblk_t o_blocks_count;
1803 ext4_grpblk_t last;
1804 ext4_grpblk_t add;
1805 struct buffer_head *bh;
1806 int err;
1807 ext4_group_t group;
1808
1809 o_blocks_count = ext4_blocks_count(es);
1810
1811 if (test_opt(sb, DEBUG))
1812 ext4_msg(sb, KERN_DEBUG,
1813 "extending last group from %llu to %llu blocks",
1814 o_blocks_count, n_blocks_count);
1815
1816 if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
1817 return 0;
1818
1819 if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
1820 ext4_msg(sb, KERN_ERR,
1821 "filesystem too large to resize to %llu blocks safely",
1822 n_blocks_count);
1823 return -EINVAL;
1824 }
1825
1826 if (n_blocks_count < o_blocks_count) {
1827 ext4_warning(sb, "can't shrink FS - resize aborted");
1828 return -EINVAL;
1829 }
1830
1831 /* Handle the remaining blocks in the last group only. */
1832 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1833
1834 if (last == 0) {
1835 ext4_warning(sb, "need to use ext2online to resize further");
1836 return -EPERM;
1837 }
1838
1839 add = EXT4_BLOCKS_PER_GROUP(sb) - last;
1840
1841 if (o_blocks_count + add < o_blocks_count) {
1842 ext4_warning(sb, "blocks_count overflow");
1843 return -EINVAL;
1844 }
1845
1846 if (o_blocks_count + add > n_blocks_count)
1847 add = n_blocks_count - o_blocks_count;
1848
1849 if (o_blocks_count + add < n_blocks_count)
1850 ext4_warning(sb, "will only finish group (%llu blocks, %u new)",
1851 o_blocks_count + add, add);
1852
1853 /* See if the device is actually as big as what was requested */
1854 bh = sb_bread(sb, o_blocks_count + add - 1);
1855 if (!bh) {
1856 ext4_warning(sb, "can't read last block, resize aborted");
1857 return -ENOSPC;
1858 }
1859 brelse(bh);
1860
1861 err = ext4_group_extend_no_check(sb, o_blocks_count, add);
1862 return err;
1863 } /* ext4_group_extend */
1864
1865
num_desc_blocks(struct super_block * sb,ext4_group_t groups)1866 static int num_desc_blocks(struct super_block *sb, ext4_group_t groups)
1867 {
1868 return (groups + EXT4_DESC_PER_BLOCK(sb) - 1) / EXT4_DESC_PER_BLOCK(sb);
1869 }
1870
1871 /*
1872 * Release the resize inode and drop the resize_inode feature if there
1873 * are no more reserved gdt blocks, and then convert the file system
1874 * to enable meta_bg
1875 */
ext4_convert_meta_bg(struct super_block * sb,struct inode * inode)1876 static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode)
1877 {
1878 handle_t *handle;
1879 struct ext4_sb_info *sbi = EXT4_SB(sb);
1880 struct ext4_super_block *es = sbi->s_es;
1881 struct ext4_inode_info *ei = EXT4_I(inode);
1882 ext4_fsblk_t nr;
1883 int i, ret, err = 0;
1884 int credits = 1;
1885
1886 ext4_msg(sb, KERN_INFO, "Converting file system to meta_bg");
1887 if (inode) {
1888 if (es->s_reserved_gdt_blocks) {
1889 ext4_error(sb, "Unexpected non-zero "
1890 "s_reserved_gdt_blocks");
1891 return -EPERM;
1892 }
1893
1894 /* Do a quick sanity check of the resize inode */
1895 if (inode->i_blocks != 1 << (inode->i_blkbits -
1896 (9 - sbi->s_cluster_bits)))
1897 goto invalid_resize_inode;
1898 for (i = 0; i < EXT4_N_BLOCKS; i++) {
1899 if (i == EXT4_DIND_BLOCK) {
1900 if (ei->i_data[i])
1901 continue;
1902 else
1903 goto invalid_resize_inode;
1904 }
1905 if (ei->i_data[i])
1906 goto invalid_resize_inode;
1907 }
1908 credits += 3; /* block bitmap, bg descriptor, resize inode */
1909 }
1910
1911 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credits);
1912 if (IS_ERR(handle))
1913 return PTR_ERR(handle);
1914
1915 BUFFER_TRACE(sbi->s_sbh, "get_write_access");
1916 err = ext4_journal_get_write_access(handle, sbi->s_sbh);
1917 if (err)
1918 goto errout;
1919
1920 ext4_clear_feature_resize_inode(sb);
1921 ext4_set_feature_meta_bg(sb);
1922 sbi->s_es->s_first_meta_bg =
1923 cpu_to_le32(num_desc_blocks(sb, sbi->s_groups_count));
1924
1925 err = ext4_handle_dirty_super(handle, sb);
1926 if (err) {
1927 ext4_std_error(sb, err);
1928 goto errout;
1929 }
1930
1931 if (inode) {
1932 nr = le32_to_cpu(ei->i_data[EXT4_DIND_BLOCK]);
1933 ext4_free_blocks(handle, inode, NULL, nr, 1,
1934 EXT4_FREE_BLOCKS_METADATA |
1935 EXT4_FREE_BLOCKS_FORGET);
1936 ei->i_data[EXT4_DIND_BLOCK] = 0;
1937 inode->i_blocks = 0;
1938
1939 err = ext4_mark_inode_dirty(handle, inode);
1940 if (err)
1941 ext4_std_error(sb, err);
1942 }
1943
1944 errout:
1945 ret = ext4_journal_stop(handle);
1946 return err ? err : ret;
1947
1948 invalid_resize_inode:
1949 ext4_error(sb, "corrupted/inconsistent resize inode");
1950 return -EINVAL;
1951 }
1952
1953 /*
1954 * ext4_resize_fs() resizes a fs to new size specified by @n_blocks_count
1955 *
1956 * @sb: super block of the fs to be resized
1957 * @n_blocks_count: the number of blocks resides in the resized fs
1958 */
ext4_resize_fs(struct super_block * sb,ext4_fsblk_t n_blocks_count)1959 int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
1960 {
1961 struct ext4_new_flex_group_data *flex_gd = NULL;
1962 struct ext4_sb_info *sbi = EXT4_SB(sb);
1963 struct ext4_super_block *es = sbi->s_es;
1964 struct buffer_head *bh;
1965 struct inode *resize_inode = NULL;
1966 ext4_grpblk_t add, offset;
1967 unsigned long n_desc_blocks;
1968 unsigned long o_desc_blocks;
1969 ext4_group_t o_group;
1970 ext4_group_t n_group;
1971 ext4_fsblk_t o_blocks_count;
1972 ext4_fsblk_t n_blocks_count_retry = 0;
1973 unsigned long last_update_time = 0;
1974 int err = 0;
1975 int meta_bg;
1976 unsigned int flexbg_size = ext4_flex_bg_size(sbi);
1977
1978 /* See if the device is actually as big as what was requested */
1979 bh = sb_bread(sb, n_blocks_count - 1);
1980 if (!bh) {
1981 ext4_warning(sb, "can't read last block, resize aborted");
1982 return -ENOSPC;
1983 }
1984 brelse(bh);
1985
1986 /*
1987 * For bigalloc, trim the requested size to the nearest cluster
1988 * boundary to avoid creating an unusable filesystem. We do this
1989 * silently, instead of returning an error, to avoid breaking
1990 * callers that blindly resize the filesystem to the full size of
1991 * the underlying block device.
1992 */
1993 if (ext4_has_feature_bigalloc(sb))
1994 n_blocks_count &= ~((1 << EXT4_CLUSTER_BITS(sb)) - 1);
1995
1996 retry:
1997 o_blocks_count = ext4_blocks_count(es);
1998
1999 ext4_msg(sb, KERN_INFO, "resizing filesystem from %llu "
2000 "to %llu blocks", o_blocks_count, n_blocks_count);
2001
2002 if (n_blocks_count < o_blocks_count) {
2003 /* On-line shrinking not supported */
2004 ext4_warning(sb, "can't shrink FS - resize aborted");
2005 return -EINVAL;
2006 }
2007
2008 if (n_blocks_count == o_blocks_count)
2009 /* Nothing need to do */
2010 return 0;
2011
2012 n_group = ext4_get_group_number(sb, n_blocks_count - 1);
2013 if (n_group >= (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
2014 ext4_warning(sb, "resize would cause inodes_count overflow");
2015 return -EINVAL;
2016 }
2017 ext4_get_group_no_and_offset(sb, o_blocks_count - 1, &o_group, &offset);
2018
2019 n_desc_blocks = num_desc_blocks(sb, n_group + 1);
2020 o_desc_blocks = num_desc_blocks(sb, sbi->s_groups_count);
2021
2022 meta_bg = ext4_has_feature_meta_bg(sb);
2023
2024 if (ext4_has_feature_resize_inode(sb)) {
2025 if (meta_bg) {
2026 ext4_error(sb, "resize_inode and meta_bg enabled "
2027 "simultaneously");
2028 return -EINVAL;
2029 }
2030 if (n_desc_blocks > o_desc_blocks +
2031 le16_to_cpu(es->s_reserved_gdt_blocks)) {
2032 n_blocks_count_retry = n_blocks_count;
2033 n_desc_blocks = o_desc_blocks +
2034 le16_to_cpu(es->s_reserved_gdt_blocks);
2035 n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
2036 n_blocks_count = (ext4_fsblk_t)n_group *
2037 EXT4_BLOCKS_PER_GROUP(sb) +
2038 le32_to_cpu(es->s_first_data_block);
2039 n_group--; /* set to last group number */
2040 }
2041
2042 if (!resize_inode)
2043 resize_inode = ext4_iget(sb, EXT4_RESIZE_INO,
2044 EXT4_IGET_SPECIAL);
2045 if (IS_ERR(resize_inode)) {
2046 ext4_warning(sb, "Error opening resize inode");
2047 return PTR_ERR(resize_inode);
2048 }
2049 }
2050
2051 if ((!resize_inode && !meta_bg) || n_blocks_count == o_blocks_count) {
2052 err = ext4_convert_meta_bg(sb, resize_inode);
2053 if (err)
2054 goto out;
2055 if (resize_inode) {
2056 iput(resize_inode);
2057 resize_inode = NULL;
2058 }
2059 if (n_blocks_count_retry) {
2060 n_blocks_count = n_blocks_count_retry;
2061 n_blocks_count_retry = 0;
2062 goto retry;
2063 }
2064 }
2065
2066 /*
2067 * Make sure the last group has enough space so that it's
2068 * guaranteed to have enough space for all metadata blocks
2069 * that it might need to hold. (We might not need to store
2070 * the inode table blocks in the last block group, but there
2071 * will be cases where this might be needed.)
2072 */
2073 if ((ext4_group_first_block_no(sb, n_group) +
2074 ext4_group_overhead_blocks(sb, n_group) + 2 +
2075 sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) {
2076 n_blocks_count = ext4_group_first_block_no(sb, n_group);
2077 n_group--;
2078 n_blocks_count_retry = 0;
2079 if (resize_inode) {
2080 iput(resize_inode);
2081 resize_inode = NULL;
2082 }
2083 goto retry;
2084 }
2085
2086 /* extend the last group */
2087 if (n_group == o_group)
2088 add = n_blocks_count - o_blocks_count;
2089 else
2090 add = EXT4_C2B(sbi, EXT4_CLUSTERS_PER_GROUP(sb) - (offset + 1));
2091 if (add > 0) {
2092 err = ext4_group_extend_no_check(sb, o_blocks_count, add);
2093 if (err)
2094 goto out;
2095 }
2096
2097 if (ext4_blocks_count(es) == n_blocks_count && n_blocks_count_retry == 0)
2098 goto out;
2099
2100 err = ext4_alloc_flex_bg_array(sb, n_group + 1);
2101 if (err)
2102 goto out;
2103
2104 err = ext4_mb_alloc_groupinfo(sb, n_group + 1);
2105 if (err)
2106 goto out;
2107
2108 flex_gd = alloc_flex_gd(flexbg_size);
2109 if (flex_gd == NULL) {
2110 err = -ENOMEM;
2111 goto out;
2112 }
2113
2114 /* Add flex groups. Note that a regular group is a
2115 * flex group with 1 group.
2116 */
2117 while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count)) {
2118 if (jiffies - last_update_time > HZ * 10) {
2119 if (last_update_time)
2120 ext4_msg(sb, KERN_INFO,
2121 "resized to %llu blocks",
2122 ext4_blocks_count(es));
2123 last_update_time = jiffies;
2124 }
2125 if (ext4_alloc_group_tables(sb, flex_gd, flexbg_size) != 0)
2126 break;
2127 err = ext4_flex_group_add(sb, resize_inode, flex_gd);
2128 if (unlikely(err))
2129 break;
2130 }
2131
2132 if (!err && n_blocks_count_retry) {
2133 n_blocks_count = n_blocks_count_retry;
2134 n_blocks_count_retry = 0;
2135 free_flex_gd(flex_gd);
2136 flex_gd = NULL;
2137 if (resize_inode) {
2138 iput(resize_inode);
2139 resize_inode = NULL;
2140 }
2141 goto retry;
2142 }
2143
2144 out:
2145 if (flex_gd)
2146 free_flex_gd(flex_gd);
2147 if (resize_inode != NULL)
2148 iput(resize_inode);
2149 if (err)
2150 ext4_warning(sb, "error (%d) occurred during "
2151 "file system resize", err);
2152 ext4_msg(sb, KERN_INFO, "resized filesystem to %llu",
2153 ext4_blocks_count(es));
2154 return err;
2155 }
2156