1 /**
2 * resize.c
3 *
4 * Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include "fsck.h"
11
get_new_sb(struct f2fs_super_block * sb)12 static int get_new_sb(struct f2fs_super_block *sb)
13 {
14 u_int32_t zone_size_bytes, zone_align_start_offset;
15 u_int32_t blocks_for_sit, blocks_for_nat, blocks_for_ssa;
16 u_int32_t sit_segments, diff, total_meta_segments;
17 u_int32_t total_valid_blks_available;
18 u_int32_t sit_bitmap_size, max_sit_bitmap_size;
19 u_int32_t max_nat_bitmap_size, max_nat_segments;
20 u_int32_t segment_size_bytes = 1 << (get_sb(log_blocksize) +
21 get_sb(log_blocks_per_seg));
22 u_int32_t blks_per_seg = 1 << get_sb(log_blocks_per_seg);
23 u_int32_t segs_per_zone = get_sb(segs_per_sec) * get_sb(secs_per_zone);
24
25 set_sb(block_count, c.target_sectors >>
26 get_sb(log_sectors_per_block));
27
28 zone_size_bytes = segment_size_bytes * segs_per_zone;
29 zone_align_start_offset =
30 (c.start_sector * c.sector_size +
31 2 * F2FS_BLKSIZE + zone_size_bytes - 1) /
32 zone_size_bytes * zone_size_bytes -
33 c.start_sector * c.sector_size;
34
35 set_sb(segment_count, (c.target_sectors * c.sector_size -
36 zone_align_start_offset) / segment_size_bytes /
37 c.segs_per_sec * c.segs_per_sec);
38
39 blocks_for_sit = SIZE_ALIGN(get_sb(segment_count), SIT_ENTRY_PER_BLOCK);
40 sit_segments = SEG_ALIGN(blocks_for_sit);
41 set_sb(segment_count_sit, sit_segments * 2);
42 set_sb(nat_blkaddr, get_sb(sit_blkaddr) +
43 get_sb(segment_count_sit) * blks_per_seg);
44
45 total_valid_blks_available = (get_sb(segment_count) -
46 (get_sb(segment_count_ckpt) +
47 get_sb(segment_count_sit))) * blks_per_seg;
48 blocks_for_nat = SIZE_ALIGN(total_valid_blks_available,
49 NAT_ENTRY_PER_BLOCK);
50 set_sb(segment_count_nat, SEG_ALIGN(blocks_for_nat));
51
52 sit_bitmap_size = ((get_sb(segment_count_sit) / 2) <<
53 get_sb(log_blocks_per_seg)) / 8;
54 if (sit_bitmap_size > MAX_SIT_BITMAP_SIZE)
55 max_sit_bitmap_size = MAX_SIT_BITMAP_SIZE;
56 else
57 max_sit_bitmap_size = sit_bitmap_size;
58
59 /*
60 * It should be reserved minimum 1 segment for nat.
61 * When sit is too large, we should expand cp area. It requires more pages for cp.
62 */
63 if (max_sit_bitmap_size > MAX_SIT_BITMAP_SIZE_IN_CKPT) {
64 max_nat_bitmap_size = CHECKSUM_OFFSET - sizeof(struct f2fs_checkpoint) + 1;
65 set_sb(cp_payload, F2FS_BLK_ALIGN(max_sit_bitmap_size));
66 } else {
67 max_nat_bitmap_size = CHECKSUM_OFFSET - sizeof(struct f2fs_checkpoint) + 1
68 - max_sit_bitmap_size;
69 set_sb(cp_payload, 0);
70 }
71
72 max_nat_segments = (max_nat_bitmap_size * 8) >>
73 get_sb(log_blocks_per_seg);
74
75 if (get_sb(segment_count_nat) > max_nat_segments)
76 set_sb(segment_count_nat, max_nat_segments);
77
78 set_sb(segment_count_nat, get_sb(segment_count_nat) * 2);
79
80 set_sb(ssa_blkaddr, get_sb(nat_blkaddr) +
81 get_sb(segment_count_nat) * blks_per_seg);
82
83 total_valid_blks_available = (get_sb(segment_count) -
84 (get_sb(segment_count_ckpt) +
85 get_sb(segment_count_sit) +
86 get_sb(segment_count_nat))) * blks_per_seg;
87
88 blocks_for_ssa = total_valid_blks_available / blks_per_seg + 1;
89
90 set_sb(segment_count_ssa, SEG_ALIGN(blocks_for_ssa));
91
92 total_meta_segments = get_sb(segment_count_ckpt) +
93 get_sb(segment_count_sit) +
94 get_sb(segment_count_nat) +
95 get_sb(segment_count_ssa);
96
97 diff = total_meta_segments % segs_per_zone;
98 if (diff)
99 set_sb(segment_count_ssa, get_sb(segment_count_ssa) +
100 (segs_per_zone - diff));
101
102 set_sb(main_blkaddr, get_sb(ssa_blkaddr) + get_sb(segment_count_ssa) *
103 blks_per_seg);
104
105 set_sb(segment_count_main, get_sb(segment_count) -
106 (get_sb(segment_count_ckpt) +
107 get_sb(segment_count_sit) +
108 get_sb(segment_count_nat) +
109 get_sb(segment_count_ssa)));
110
111 set_sb(section_count, get_sb(segment_count_main) /
112 get_sb(segs_per_sec));
113
114 set_sb(segment_count_main, get_sb(section_count) *
115 get_sb(segs_per_sec));
116
117 /* Let's determine the best reserved and overprovisioned space */
118 c.new_overprovision = get_best_overprovision(sb);
119 c.new_reserved_segments =
120 (2 * (100 / c.new_overprovision + 1) + 6) *
121 get_sb(segs_per_sec);
122
123 if ((get_sb(segment_count_main) - 2) < c.new_reserved_segments ||
124 get_sb(segment_count_main) * blks_per_seg >
125 get_sb(block_count)) {
126 MSG(0, "\tError: Device size is not sufficient for F2FS volume, "
127 "more segment needed =%u",
128 c.new_reserved_segments -
129 (get_sb(segment_count_main) - 2));
130 return -1;
131 }
132 return 0;
133 }
134
migrate_main(struct f2fs_sb_info * sbi,unsigned int offset)135 static void migrate_main(struct f2fs_sb_info *sbi, unsigned int offset)
136 {
137 void *raw = calloc(BLOCK_SZ, 1);
138 struct seg_entry *se;
139 block_t from, to;
140 int i, j, ret;
141 struct f2fs_summary sum;
142
143 ASSERT(raw != NULL);
144
145 for (i = TOTAL_SEGS(sbi) - 1; i >= 0; i--) {
146 se = get_seg_entry(sbi, i);
147 if (!se->valid_blocks)
148 continue;
149
150 for (j = sbi->blocks_per_seg - 1; j >= 0; j--) {
151 if (!f2fs_test_bit(j, (const char *)se->cur_valid_map))
152 continue;
153
154 from = START_BLOCK(sbi, i) + j;
155 ret = dev_read_block(raw, from);
156 ASSERT(ret >= 0);
157
158 to = from + offset;
159 ret = dev_write_block(raw, to);
160 ASSERT(ret >= 0);
161
162 get_sum_entry(sbi, from, &sum);
163
164 if (IS_DATASEG(se->type))
165 update_data_blkaddr(sbi, le32_to_cpu(sum.nid),
166 le16_to_cpu(sum.ofs_in_node), to);
167 else
168 update_nat_blkaddr(sbi, 0,
169 le32_to_cpu(sum.nid), to);
170 }
171 }
172 free(raw);
173 DBG(0, "Info: Done to migrate Main area: main_blkaddr = 0x%x -> 0x%x\n",
174 START_BLOCK(sbi, 0),
175 START_BLOCK(sbi, 0) + offset);
176 }
177
move_ssa(struct f2fs_sb_info * sbi,unsigned int segno,block_t new_sum_blk_addr)178 static void move_ssa(struct f2fs_sb_info *sbi, unsigned int segno,
179 block_t new_sum_blk_addr)
180 {
181 struct f2fs_summary_block *sum_blk;
182 int type;
183
184 sum_blk = get_sum_block(sbi, segno, &type);
185 if (type < SEG_TYPE_MAX) {
186 int ret;
187
188 ret = dev_write_block(sum_blk, new_sum_blk_addr);
189 ASSERT(ret >= 0);
190 DBG(1, "Write summary block: (%d) segno=%x/%x --> (%d) %x\n",
191 type, segno, GET_SUM_BLKADDR(sbi, segno),
192 IS_SUM_NODE_SEG(sum_blk->footer),
193 new_sum_blk_addr);
194 }
195 if (type == SEG_TYPE_NODE || type == SEG_TYPE_DATA ||
196 type == SEG_TYPE_MAX) {
197 free(sum_blk);
198 }
199 DBG(1, "Info: Done to migrate SSA blocks\n");
200 }
201
migrate_ssa(struct f2fs_sb_info * sbi,struct f2fs_super_block * new_sb,unsigned int offset)202 static void migrate_ssa(struct f2fs_sb_info *sbi,
203 struct f2fs_super_block *new_sb, unsigned int offset)
204 {
205 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
206 block_t old_sum_blkaddr = get_sb(ssa_blkaddr);
207 block_t new_sum_blkaddr = get_newsb(ssa_blkaddr);
208 block_t end_sum_blkaddr = get_newsb(main_blkaddr);
209 block_t expand_sum_blkaddr = new_sum_blkaddr +
210 TOTAL_SEGS(sbi) - offset;
211 block_t blkaddr;
212 int ret;
213 void *zero_block = calloc(BLOCK_SZ, 1);
214 ASSERT(zero_block);
215
216 if (offset && new_sum_blkaddr < old_sum_blkaddr + offset) {
217 blkaddr = new_sum_blkaddr;
218 while (blkaddr < end_sum_blkaddr) {
219 if (blkaddr < expand_sum_blkaddr) {
220 move_ssa(sbi, offset++, blkaddr++);
221 } else {
222 ret = dev_write_block(zero_block, blkaddr++);
223 ASSERT(ret >=0);
224 }
225 }
226 } else {
227 blkaddr = end_sum_blkaddr - 1;
228 offset = TOTAL_SEGS(sbi) - 1;
229 while (blkaddr >= new_sum_blkaddr) {
230 if (blkaddr >= expand_sum_blkaddr) {
231 ret = dev_write_block(zero_block, blkaddr--);
232 ASSERT(ret >=0);
233 } else {
234 move_ssa(sbi, offset--, blkaddr--);
235 }
236 }
237 }
238
239 DBG(0, "Info: Done to migrate SSA blocks: sum_blkaddr = 0x%x -> 0x%x\n",
240 old_sum_blkaddr, new_sum_blkaddr);
241 free(zero_block);
242 }
243
shrink_nats(struct f2fs_sb_info * sbi,struct f2fs_super_block * new_sb)244 static int shrink_nats(struct f2fs_sb_info *sbi,
245 struct f2fs_super_block *new_sb)
246 {
247 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
248 struct f2fs_nm_info *nm_i = NM_I(sbi);
249 block_t old_nat_blkaddr = get_sb(nat_blkaddr);
250 unsigned int nat_blocks;
251 void *nat_block, *zero_block;
252 int nid, ret, new_max_nid;
253 pgoff_t block_off;
254 pgoff_t block_addr;
255 int seg_off;
256
257 nat_block = malloc(BLOCK_SZ);
258 ASSERT(nat_block);
259 zero_block = calloc(BLOCK_SZ, 1);
260 ASSERT(zero_block);
261
262 nat_blocks = get_newsb(segment_count_nat) >> 1;
263 nat_blocks = nat_blocks << get_sb(log_blocks_per_seg);
264 new_max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
265
266 for (nid = nm_i->max_nid - 1; nid > new_max_nid; nid -= NAT_ENTRY_PER_BLOCK) {
267 block_off = nid / NAT_ENTRY_PER_BLOCK;
268 seg_off = block_off >> sbi->log_blocks_per_seg;
269 block_addr = (pgoff_t)(old_nat_blkaddr +
270 (seg_off << sbi->log_blocks_per_seg << 1) +
271 (block_off & ((1 << sbi->log_blocks_per_seg) - 1)));
272
273 if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
274 block_addr += sbi->blocks_per_seg;
275
276 ret = dev_read_block(nat_block, block_addr);
277 ASSERT(ret >= 0);
278
279 if (memcmp(zero_block, nat_block, BLOCK_SZ)) {
280 ret = -1;
281 goto not_avail;
282 }
283 }
284 ret = 0;
285 nm_i->max_nid = new_max_nid;
286 not_avail:
287 free(nat_block);
288 free(zero_block);
289 return ret;
290 }
291
migrate_nat(struct f2fs_sb_info * sbi,struct f2fs_super_block * new_sb)292 static void migrate_nat(struct f2fs_sb_info *sbi,
293 struct f2fs_super_block *new_sb)
294 {
295 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
296 struct f2fs_nm_info *nm_i = NM_I(sbi);
297 block_t old_nat_blkaddr = get_sb(nat_blkaddr);
298 block_t new_nat_blkaddr = get_newsb(nat_blkaddr);
299 unsigned int nat_blocks;
300 void *nat_block;
301 int nid, ret, new_max_nid;
302 pgoff_t block_off;
303 pgoff_t block_addr;
304 int seg_off;
305
306 nat_block = malloc(BLOCK_SZ);
307 ASSERT(nat_block);
308
309 for (nid = nm_i->max_nid - 1; nid >= 0; nid -= NAT_ENTRY_PER_BLOCK) {
310 block_off = nid / NAT_ENTRY_PER_BLOCK;
311 seg_off = block_off >> sbi->log_blocks_per_seg;
312 block_addr = (pgoff_t)(old_nat_blkaddr +
313 (seg_off << sbi->log_blocks_per_seg << 1) +
314 (block_off & ((1 << sbi->log_blocks_per_seg) - 1)));
315
316 if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
317 block_addr += sbi->blocks_per_seg;
318
319 ret = dev_read_block(nat_block, block_addr);
320 ASSERT(ret >= 0);
321
322 block_addr = (pgoff_t)(new_nat_blkaddr +
323 (seg_off << sbi->log_blocks_per_seg << 1) +
324 (block_off & ((1 << sbi->log_blocks_per_seg) - 1)));
325
326 /* new bitmap should be zeros */
327 ret = dev_write_block(nat_block, block_addr);
328 ASSERT(ret >= 0);
329 }
330 /* zero out newly assigned nids */
331 memset(nat_block, 0, BLOCK_SZ);
332 nat_blocks = get_newsb(segment_count_nat) >> 1;
333 nat_blocks = nat_blocks << get_sb(log_blocks_per_seg);
334 new_max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
335
336 DBG(1, "Write NAT block: %x->%x, max_nid=%x->%x\n",
337 old_nat_blkaddr, new_nat_blkaddr,
338 get_sb(segment_count_nat),
339 get_newsb(segment_count_nat));
340
341 for (nid = nm_i->max_nid; nid < new_max_nid;
342 nid += NAT_ENTRY_PER_BLOCK) {
343 block_off = nid / NAT_ENTRY_PER_BLOCK;
344 seg_off = block_off >> sbi->log_blocks_per_seg;
345 block_addr = (pgoff_t)(new_nat_blkaddr +
346 (seg_off << sbi->log_blocks_per_seg << 1) +
347 (block_off & ((1 << sbi->log_blocks_per_seg) - 1)));
348 ret = dev_write_block(nat_block, block_addr);
349 ASSERT(ret >= 0);
350 DBG(3, "Write NAT: %lx\n", block_addr);
351 }
352 DBG(0, "Info: Done to migrate NAT blocks: nat_blkaddr = 0x%x -> 0x%x\n",
353 old_nat_blkaddr, new_nat_blkaddr);
354 }
355
migrate_sit(struct f2fs_sb_info * sbi,struct f2fs_super_block * new_sb,unsigned int offset)356 static void migrate_sit(struct f2fs_sb_info *sbi,
357 struct f2fs_super_block *new_sb, unsigned int offset)
358 {
359 struct sit_info *sit_i = SIT_I(sbi);
360 unsigned int ofs = 0, pre_ofs = 0;
361 unsigned int segno, index;
362 struct f2fs_sit_block *sit_blk = calloc(BLOCK_SZ, 1);
363 block_t sit_blks = get_newsb(segment_count_sit) <<
364 (sbi->log_blocks_per_seg - 1);
365 struct seg_entry *se;
366 block_t blk_addr = 0;
367 int ret;
368
369 ASSERT(sit_blk);
370
371 /* initialize with zeros */
372 for (index = 0; index < sit_blks; index++) {
373 ret = dev_write_block(sit_blk, get_newsb(sit_blkaddr) + index);
374 ASSERT(ret >= 0);
375 DBG(3, "Write zero sit: %x\n", get_newsb(sit_blkaddr) + index);
376 }
377
378 for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) {
379 struct f2fs_sit_entry *sit;
380
381 se = get_seg_entry(sbi, segno);
382 if (segno < offset) {
383 ASSERT(se->valid_blocks == 0);
384 continue;
385 }
386
387 ofs = SIT_BLOCK_OFFSET(sit_i, segno - offset);
388
389 if (ofs != pre_ofs) {
390 blk_addr = get_newsb(sit_blkaddr) + pre_ofs;
391 ret = dev_write_block(sit_blk, blk_addr);
392 ASSERT(ret >= 0);
393 DBG(1, "Write valid sit: %x\n", blk_addr);
394
395 pre_ofs = ofs;
396 memset(sit_blk, 0, BLOCK_SZ);
397 }
398
399 sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno - offset)];
400 memcpy(sit->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
401 sit->vblocks = cpu_to_le16((se->type << SIT_VBLOCKS_SHIFT) |
402 se->valid_blocks);
403 }
404 blk_addr = get_newsb(sit_blkaddr) + ofs;
405 ret = dev_write_block(sit_blk, blk_addr);
406 DBG(1, "Write valid sit: %x\n", blk_addr);
407 ASSERT(ret >= 0);
408
409 free(sit_blk);
410 DBG(0, "Info: Done to restore new SIT blocks: 0x%x\n",
411 get_newsb(sit_blkaddr));
412 }
413
rebuild_checkpoint(struct f2fs_sb_info * sbi,struct f2fs_super_block * new_sb,unsigned int offset)414 static void rebuild_checkpoint(struct f2fs_sb_info *sbi,
415 struct f2fs_super_block *new_sb, unsigned int offset)
416 {
417 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
418 unsigned long long cp_ver = get_cp(checkpoint_ver);
419 struct f2fs_checkpoint *new_cp;
420 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
421 unsigned int free_segment_count, new_segment_count;
422 block_t new_cp_blks = 1 + get_newsb(cp_payload);
423 block_t orphan_blks = 0;
424 block_t new_cp_blk_no, old_cp_blk_no;
425 u_int32_t crc = 0;
426 u32 flags;
427 void *buf;
428 int i, ret;
429
430 new_cp = calloc(new_cp_blks * BLOCK_SZ, 1);
431 ASSERT(new_cp);
432
433 buf = malloc(BLOCK_SZ);
434 ASSERT(buf);
435
436 /* ovp / free segments */
437 set_cp(rsvd_segment_count, c.new_reserved_segments);
438 set_cp(overprov_segment_count, (get_newsb(segment_count_main) -
439 get_cp(rsvd_segment_count)) *
440 c.new_overprovision / 100);
441 set_cp(overprov_segment_count, get_cp(overprov_segment_count) +
442 get_cp(rsvd_segment_count));
443
444 free_segment_count = get_free_segments(sbi);
445 new_segment_count = get_newsb(segment_count_main) -
446 get_sb(segment_count_main);
447
448 set_cp(free_segment_count, free_segment_count + new_segment_count);
449 set_cp(user_block_count, ((get_newsb(segment_count_main) -
450 get_cp(overprov_segment_count)) * c.blks_per_seg));
451
452 if (is_set_ckpt_flags(cp, CP_ORPHAN_PRESENT_FLAG))
453 orphan_blks = __start_sum_addr(sbi) - 1;
454
455 set_cp(cp_pack_start_sum, 1 + get_newsb(cp_payload));
456 set_cp(cp_pack_total_block_count, 8 + orphan_blks + get_newsb(cp_payload));
457
458 /* cur->segno - offset */
459 for (i = 0; i < NO_CHECK_TYPE; i++) {
460 if (i < CURSEG_HOT_NODE) {
461 set_cp(cur_data_segno[i],
462 CURSEG_I(sbi, i)->segno - offset);
463 } else {
464 int n = i - CURSEG_HOT_NODE;
465
466 set_cp(cur_node_segno[n],
467 CURSEG_I(sbi, i)->segno - offset);
468 }
469 }
470
471 /* sit / nat ver bitmap bytesize */
472 set_cp(sit_ver_bitmap_bytesize,
473 ((get_newsb(segment_count_sit) / 2) <<
474 get_newsb(log_blocks_per_seg)) / 8);
475 set_cp(nat_ver_bitmap_bytesize,
476 ((get_newsb(segment_count_nat) / 2) <<
477 get_newsb(log_blocks_per_seg)) / 8);
478
479 /* update nat_bits flag */
480 flags = update_nat_bits_flags(new_sb, cp, get_cp(ckpt_flags));
481 set_cp(ckpt_flags, flags);
482
483 memcpy(new_cp, cp, (unsigned char *)cp->sit_nat_version_bitmap -
484 (unsigned char *)cp);
485 new_cp->checkpoint_ver = cpu_to_le64(cp_ver + 1);
486
487 crc = f2fs_cal_crc32(F2FS_SUPER_MAGIC, new_cp, CHECKSUM_OFFSET);
488 *((__le32 *)((unsigned char *)new_cp + CHECKSUM_OFFSET)) =
489 cpu_to_le32(crc);
490
491 /* Write a new checkpoint in the other set */
492 new_cp_blk_no = old_cp_blk_no = get_sb(cp_blkaddr);
493 if (sbi->cur_cp == 2)
494 old_cp_blk_no += 1 << get_sb(log_blocks_per_seg);
495 else
496 new_cp_blk_no += 1 << get_sb(log_blocks_per_seg);
497
498 /* write first cp */
499 ret = dev_write_block(new_cp, new_cp_blk_no++);
500 ASSERT(ret >= 0);
501
502 memset(buf, 0, BLOCK_SZ);
503 for (i = 0; i < get_newsb(cp_payload); i++) {
504 ret = dev_write_block(buf, new_cp_blk_no++);
505 ASSERT(ret >= 0);
506 }
507
508 for (i = 0; i < orphan_blks; i++) {
509 block_t orphan_blk_no = old_cp_blk_no + 1 + get_sb(cp_payload);
510
511 ret = dev_read_block(buf, orphan_blk_no++);
512 ASSERT(ret >= 0);
513
514 ret = dev_write_block(buf, new_cp_blk_no++);
515 ASSERT(ret >= 0);
516 }
517
518 /* update summary blocks having nullified journal entries */
519 for (i = 0; i < NO_CHECK_TYPE; i++) {
520 struct curseg_info *curseg = CURSEG_I(sbi, i);
521
522 ret = dev_write_block(curseg->sum_blk, new_cp_blk_no++);
523 ASSERT(ret >= 0);
524 }
525
526 /* write the last cp */
527 ret = dev_write_block(new_cp, new_cp_blk_no++);
528 ASSERT(ret >= 0);
529
530 /* Write nat bits */
531 if (flags & CP_NAT_BITS_FLAG)
532 write_nat_bits(sbi, new_sb, new_cp, sbi->cur_cp == 1 ? 2 : 1);
533
534 /* disable old checkpoint */
535 memset(buf, 0, BLOCK_SZ);
536 ret = dev_write_block(buf, old_cp_blk_no);
537 ASSERT(ret >= 0);
538
539 free(buf);
540 free(new_cp);
541 DBG(0, "Info: Done to rebuild checkpoint blocks\n");
542 }
543
rebuild_superblock(struct f2fs_super_block * new_sb)544 static void rebuild_superblock(struct f2fs_super_block *new_sb)
545 {
546 int index, ret;
547 u_int8_t *buf;
548
549 buf = calloc(BLOCK_SZ, 1);
550
551 memcpy(buf + F2FS_SUPER_OFFSET, new_sb, sizeof(*new_sb));
552 for (index = 0; index < 2; index++) {
553 ret = dev_write_block(buf, index);
554 ASSERT(ret >= 0);
555 }
556 free(buf);
557 DBG(0, "Info: Done to rebuild superblock\n");
558 }
559
f2fs_resize(struct f2fs_sb_info * sbi)560 int f2fs_resize(struct f2fs_sb_info *sbi)
561 {
562 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
563 struct f2fs_super_block new_sb_raw;
564 struct f2fs_super_block *new_sb = &new_sb_raw;
565 block_t end_blkaddr, old_main_blkaddr, new_main_blkaddr;
566 unsigned int offset;
567 unsigned int offset_seg = 0;
568 int err = -1;
569
570 /* flush NAT/SIT journal entries */
571 flush_journal_entries(sbi);
572
573 memcpy(new_sb, F2FS_RAW_SUPER(sbi), sizeof(*new_sb));
574 if (get_new_sb(new_sb))
575 return -1;
576
577 /* check nat availability */
578 if (get_sb(segment_count_nat) > get_newsb(segment_count_nat)) {
579 err = shrink_nats(sbi, new_sb);
580 if (err) {
581 MSG(0, "\tError: Failed to shrink NATs\n");
582 return err;
583 }
584 }
585
586 print_raw_sb_info(sb);
587 print_raw_sb_info(new_sb);
588
589 old_main_blkaddr = get_sb(main_blkaddr);
590 new_main_blkaddr = get_newsb(main_blkaddr);
591 offset = new_main_blkaddr - old_main_blkaddr;
592 end_blkaddr = (get_sb(segment_count_main) <<
593 get_sb(log_blocks_per_seg)) + get_sb(main_blkaddr);
594
595 err = -EAGAIN;
596 if (new_main_blkaddr < end_blkaddr) {
597 err = f2fs_defragment(sbi, old_main_blkaddr, offset,
598 new_main_blkaddr, 0);
599 if (!err)
600 offset_seg = offset >> get_sb(log_blocks_per_seg);
601 MSG(0, "Try to do defragement: %s\n", err ? "Skip": "Done");
602 }
603 /* move whole data region */
604 if (err)
605 migrate_main(sbi, offset);
606
607 migrate_ssa(sbi, new_sb, offset_seg);
608 migrate_nat(sbi, new_sb);
609 migrate_sit(sbi, new_sb, offset_seg);
610 rebuild_checkpoint(sbi, new_sb, offset_seg);
611 rebuild_superblock(new_sb);
612 return 0;
613 }
614