1 /**
2 * resize.c
3 *
4 * Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include "fsck.h"
11
get_new_sb(struct f2fs_super_block * sb)12 static int get_new_sb(struct f2fs_super_block *sb)
13 {
14 u_int32_t zone_size_bytes;
15 u_int64_t zone_align_start_offset;
16 u_int32_t blocks_for_sit, blocks_for_nat, blocks_for_ssa;
17 u_int32_t sit_segments, nat_segments, diff, total_meta_segments;
18 u_int32_t total_valid_blks_available;
19 u_int32_t sit_bitmap_size, max_sit_bitmap_size;
20 u_int32_t max_nat_bitmap_size, max_nat_segments;
21 u_int32_t segment_size_bytes = 1 << (get_sb(log_blocksize) +
22 get_sb(log_blocks_per_seg));
23 u_int32_t blks_per_seg = 1 << get_sb(log_blocks_per_seg);
24 u_int32_t segs_per_zone = get_sb(segs_per_sec) * get_sb(secs_per_zone);
25
26 set_sb(block_count, c.target_sectors >>
27 get_sb(log_sectors_per_block));
28
29 zone_size_bytes = segment_size_bytes * segs_per_zone;
30 zone_align_start_offset =
31 ((u_int64_t) c.start_sector * DEFAULT_SECTOR_SIZE +
32 2 * F2FS_BLKSIZE + zone_size_bytes - 1) /
33 zone_size_bytes * zone_size_bytes -
34 (u_int64_t) c.start_sector * DEFAULT_SECTOR_SIZE;
35
36 set_sb(segment_count, (c.target_sectors * c.sector_size -
37 zone_align_start_offset) / segment_size_bytes /
38 c.segs_per_sec * c.segs_per_sec);
39
40 if (c.safe_resize)
41 goto safe_resize;
42
43 blocks_for_sit = SIZE_ALIGN(get_sb(segment_count), SIT_ENTRY_PER_BLOCK);
44 sit_segments = SEG_ALIGN(blocks_for_sit);
45 set_sb(segment_count_sit, sit_segments * 2);
46 set_sb(nat_blkaddr, get_sb(sit_blkaddr) +
47 get_sb(segment_count_sit) * blks_per_seg);
48
49 total_valid_blks_available = (get_sb(segment_count) -
50 (get_sb(segment_count_ckpt) +
51 get_sb(segment_count_sit))) * blks_per_seg;
52 blocks_for_nat = SIZE_ALIGN(total_valid_blks_available,
53 NAT_ENTRY_PER_BLOCK);
54
55 if (c.large_nat_bitmap) {
56 nat_segments = SEG_ALIGN(blocks_for_nat) *
57 DEFAULT_NAT_ENTRY_RATIO / 100;
58 set_sb(segment_count_nat, nat_segments ? nat_segments : 1);
59
60 max_nat_bitmap_size = (get_sb(segment_count_nat) <<
61 get_sb(log_blocks_per_seg)) / 8;
62 set_sb(segment_count_nat, get_sb(segment_count_nat) * 2);
63 } else {
64 set_sb(segment_count_nat, SEG_ALIGN(blocks_for_nat));
65 max_nat_bitmap_size = 0;
66 }
67
68 sit_bitmap_size = ((get_sb(segment_count_sit) / 2) <<
69 get_sb(log_blocks_per_seg)) / 8;
70 if (sit_bitmap_size > MAX_SIT_BITMAP_SIZE)
71 max_sit_bitmap_size = MAX_SIT_BITMAP_SIZE;
72 else
73 max_sit_bitmap_size = sit_bitmap_size;
74
75 if (c.large_nat_bitmap) {
76 /* use cp_payload if free space of f2fs_checkpoint is not enough */
77 if (max_sit_bitmap_size + max_nat_bitmap_size >
78 MAX_BITMAP_SIZE_IN_CKPT) {
79 u_int32_t diff = max_sit_bitmap_size +
80 max_nat_bitmap_size -
81 MAX_BITMAP_SIZE_IN_CKPT;
82 set_sb(cp_payload, F2FS_BLK_ALIGN(diff));
83 } else {
84 set_sb(cp_payload, 0);
85 }
86 } else {
87 /*
88 * It should be reserved minimum 1 segment for nat.
89 * When sit is too large, we should expand cp area.
90 * It requires more pages for cp.
91 */
92 if (max_sit_bitmap_size > MAX_SIT_BITMAP_SIZE_IN_CKPT) {
93 max_nat_bitmap_size = MAX_BITMAP_SIZE_IN_CKPT;
94 set_sb(cp_payload, F2FS_BLK_ALIGN(max_sit_bitmap_size));
95 } else {
96 max_nat_bitmap_size = MAX_BITMAP_SIZE_IN_CKPT -
97 max_sit_bitmap_size;
98 set_sb(cp_payload, 0);
99 }
100
101 max_nat_segments = (max_nat_bitmap_size * 8) >>
102 get_sb(log_blocks_per_seg);
103
104 if (get_sb(segment_count_nat) > max_nat_segments)
105 set_sb(segment_count_nat, max_nat_segments);
106
107 set_sb(segment_count_nat, get_sb(segment_count_nat) * 2);
108 }
109
110 set_sb(ssa_blkaddr, get_sb(nat_blkaddr) +
111 get_sb(segment_count_nat) * blks_per_seg);
112
113 total_valid_blks_available = (get_sb(segment_count) -
114 (get_sb(segment_count_ckpt) +
115 get_sb(segment_count_sit) +
116 get_sb(segment_count_nat))) * blks_per_seg;
117
118 blocks_for_ssa = total_valid_blks_available / blks_per_seg + 1;
119
120 set_sb(segment_count_ssa, SEG_ALIGN(blocks_for_ssa));
121
122 total_meta_segments = get_sb(segment_count_ckpt) +
123 get_sb(segment_count_sit) +
124 get_sb(segment_count_nat) +
125 get_sb(segment_count_ssa);
126
127 diff = total_meta_segments % segs_per_zone;
128 if (diff)
129 set_sb(segment_count_ssa, get_sb(segment_count_ssa) +
130 (segs_per_zone - diff));
131
132 set_sb(main_blkaddr, get_sb(ssa_blkaddr) + get_sb(segment_count_ssa) *
133 blks_per_seg);
134
135 safe_resize:
136 set_sb(segment_count_main, get_sb(segment_count) -
137 (get_sb(segment_count_ckpt) +
138 get_sb(segment_count_sit) +
139 get_sb(segment_count_nat) +
140 get_sb(segment_count_ssa)));
141
142 set_sb(section_count, get_sb(segment_count_main) /
143 get_sb(segs_per_sec));
144
145 set_sb(segment_count_main, get_sb(section_count) *
146 get_sb(segs_per_sec));
147
148 /* Let's determine the best reserved and overprovisioned space */
149 c.new_overprovision = get_best_overprovision(sb);
150 c.new_reserved_segments =
151 (2 * (100 / c.new_overprovision + 1) + 6) *
152 get_sb(segs_per_sec);
153
154 if ((get_sb(segment_count_main) - 2) < c.new_reserved_segments ||
155 get_sb(segment_count_main) * blks_per_seg >
156 get_sb(block_count)) {
157 MSG(0, "\tError: Device size is not sufficient for F2FS volume, "
158 "more segment needed =%u",
159 c.new_reserved_segments -
160 (get_sb(segment_count_main) - 2));
161 return -1;
162 }
163 return 0;
164 }
165
migrate_main(struct f2fs_sb_info * sbi,unsigned int offset)166 static void migrate_main(struct f2fs_sb_info *sbi, unsigned int offset)
167 {
168 void *raw = calloc(BLOCK_SZ, 1);
169 struct seg_entry *se;
170 block_t from, to;
171 int i, j, ret;
172 struct f2fs_summary sum;
173
174 ASSERT(raw != NULL);
175
176 for (i = TOTAL_SEGS(sbi) - 1; i >= 0; i--) {
177 se = get_seg_entry(sbi, i);
178 if (!se->valid_blocks)
179 continue;
180
181 for (j = sbi->blocks_per_seg - 1; j >= 0; j--) {
182 if (!f2fs_test_bit(j, (const char *)se->cur_valid_map))
183 continue;
184
185 from = START_BLOCK(sbi, i) + j;
186 ret = dev_read_block(raw, from);
187 ASSERT(ret >= 0);
188
189 to = from + offset;
190 ret = dev_write_block(raw, to);
191 ASSERT(ret >= 0);
192
193 get_sum_entry(sbi, from, &sum);
194
195 if (IS_DATASEG(se->type))
196 update_data_blkaddr(sbi, le32_to_cpu(sum.nid),
197 le16_to_cpu(sum.ofs_in_node), to);
198 else
199 update_nat_blkaddr(sbi, 0,
200 le32_to_cpu(sum.nid), to);
201 }
202 }
203 free(raw);
204 DBG(0, "Info: Done to migrate Main area: main_blkaddr = 0x%x -> 0x%x\n",
205 START_BLOCK(sbi, 0),
206 START_BLOCK(sbi, 0) + offset);
207 }
208
move_ssa(struct f2fs_sb_info * sbi,unsigned int segno,block_t new_sum_blk_addr)209 static void move_ssa(struct f2fs_sb_info *sbi, unsigned int segno,
210 block_t new_sum_blk_addr)
211 {
212 struct f2fs_summary_block *sum_blk;
213 int type;
214
215 sum_blk = get_sum_block(sbi, segno, &type);
216 if (type < SEG_TYPE_MAX) {
217 int ret;
218
219 ret = dev_write_block(sum_blk, new_sum_blk_addr);
220 ASSERT(ret >= 0);
221 DBG(1, "Write summary block: (%d) segno=%x/%x --> (%d) %x\n",
222 type, segno, GET_SUM_BLKADDR(sbi, segno),
223 IS_SUM_NODE_SEG(sum_blk->footer),
224 new_sum_blk_addr);
225 }
226 if (type == SEG_TYPE_NODE || type == SEG_TYPE_DATA ||
227 type == SEG_TYPE_MAX) {
228 free(sum_blk);
229 }
230 DBG(1, "Info: Done to migrate SSA blocks\n");
231 }
232
migrate_ssa(struct f2fs_sb_info * sbi,struct f2fs_super_block * new_sb,unsigned int offset)233 static void migrate_ssa(struct f2fs_sb_info *sbi,
234 struct f2fs_super_block *new_sb, unsigned int offset)
235 {
236 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
237 block_t old_sum_blkaddr = get_sb(ssa_blkaddr);
238 block_t new_sum_blkaddr = get_newsb(ssa_blkaddr);
239 block_t end_sum_blkaddr = get_newsb(main_blkaddr);
240 block_t expand_sum_blkaddr = new_sum_blkaddr +
241 TOTAL_SEGS(sbi) - offset;
242 block_t blkaddr;
243 int ret;
244 void *zero_block = calloc(BLOCK_SZ, 1);
245 ASSERT(zero_block);
246
247 if (offset && new_sum_blkaddr < old_sum_blkaddr + offset) {
248 blkaddr = new_sum_blkaddr;
249 while (blkaddr < end_sum_blkaddr) {
250 if (blkaddr < expand_sum_blkaddr) {
251 move_ssa(sbi, offset++, blkaddr++);
252 } else {
253 ret = dev_write_block(zero_block, blkaddr++);
254 ASSERT(ret >=0);
255 }
256 }
257 } else {
258 blkaddr = end_sum_blkaddr - 1;
259 offset = TOTAL_SEGS(sbi) - 1;
260 while (blkaddr >= new_sum_blkaddr) {
261 if (blkaddr >= expand_sum_blkaddr) {
262 ret = dev_write_block(zero_block, blkaddr--);
263 ASSERT(ret >=0);
264 } else {
265 move_ssa(sbi, offset--, blkaddr--);
266 }
267 }
268 }
269
270 DBG(0, "Info: Done to migrate SSA blocks: sum_blkaddr = 0x%x -> 0x%x\n",
271 old_sum_blkaddr, new_sum_blkaddr);
272 free(zero_block);
273 }
274
shrink_nats(struct f2fs_sb_info * sbi,struct f2fs_super_block * new_sb)275 static int shrink_nats(struct f2fs_sb_info *sbi,
276 struct f2fs_super_block *new_sb)
277 {
278 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
279 struct f2fs_nm_info *nm_i = NM_I(sbi);
280 block_t old_nat_blkaddr = get_sb(nat_blkaddr);
281 unsigned int nat_blocks;
282 void *nat_block, *zero_block;
283 int nid, ret, new_max_nid;
284 pgoff_t block_off;
285 pgoff_t block_addr;
286 int seg_off;
287
288 nat_block = malloc(BLOCK_SZ);
289 ASSERT(nat_block);
290 zero_block = calloc(BLOCK_SZ, 1);
291 ASSERT(zero_block);
292
293 nat_blocks = get_newsb(segment_count_nat) >> 1;
294 nat_blocks = nat_blocks << get_sb(log_blocks_per_seg);
295 new_max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
296
297 for (nid = nm_i->max_nid - 1; nid > new_max_nid; nid -= NAT_ENTRY_PER_BLOCK) {
298 block_off = nid / NAT_ENTRY_PER_BLOCK;
299 seg_off = block_off >> sbi->log_blocks_per_seg;
300 block_addr = (pgoff_t)(old_nat_blkaddr +
301 (seg_off << sbi->log_blocks_per_seg << 1) +
302 (block_off & ((1 << sbi->log_blocks_per_seg) - 1)));
303
304 if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
305 block_addr += sbi->blocks_per_seg;
306
307 ret = dev_read_block(nat_block, block_addr);
308 ASSERT(ret >= 0);
309
310 if (memcmp(zero_block, nat_block, BLOCK_SZ)) {
311 ret = -1;
312 goto not_avail;
313 }
314 }
315 ret = 0;
316 nm_i->max_nid = new_max_nid;
317 not_avail:
318 free(nat_block);
319 free(zero_block);
320 return ret;
321 }
322
migrate_nat(struct f2fs_sb_info * sbi,struct f2fs_super_block * new_sb)323 static void migrate_nat(struct f2fs_sb_info *sbi,
324 struct f2fs_super_block *new_sb)
325 {
326 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
327 struct f2fs_nm_info *nm_i = NM_I(sbi);
328 block_t old_nat_blkaddr = get_sb(nat_blkaddr);
329 block_t new_nat_blkaddr = get_newsb(nat_blkaddr);
330 unsigned int nat_blocks;
331 void *nat_block;
332 int nid, ret, new_max_nid;
333 pgoff_t block_off;
334 pgoff_t block_addr;
335 int seg_off;
336
337 nat_block = malloc(BLOCK_SZ);
338 ASSERT(nat_block);
339
340 for (nid = nm_i->max_nid - 1; nid >= 0; nid -= NAT_ENTRY_PER_BLOCK) {
341 block_off = nid / NAT_ENTRY_PER_BLOCK;
342 seg_off = block_off >> sbi->log_blocks_per_seg;
343 block_addr = (pgoff_t)(old_nat_blkaddr +
344 (seg_off << sbi->log_blocks_per_seg << 1) +
345 (block_off & ((1 << sbi->log_blocks_per_seg) - 1)));
346
347 /* move to set #0 */
348 if (f2fs_test_bit(block_off, nm_i->nat_bitmap)) {
349 block_addr += sbi->blocks_per_seg;
350 f2fs_clear_bit(block_off, nm_i->nat_bitmap);
351 }
352
353 ret = dev_read_block(nat_block, block_addr);
354 ASSERT(ret >= 0);
355
356 block_addr = (pgoff_t)(new_nat_blkaddr +
357 (seg_off << sbi->log_blocks_per_seg << 1) +
358 (block_off & ((1 << sbi->log_blocks_per_seg) - 1)));
359
360 /* new bitmap should be zeros */
361 ret = dev_write_block(nat_block, block_addr);
362 ASSERT(ret >= 0);
363 }
364 /* zero out newly assigned nids */
365 memset(nat_block, 0, BLOCK_SZ);
366 nat_blocks = get_newsb(segment_count_nat) >> 1;
367 nat_blocks = nat_blocks << get_sb(log_blocks_per_seg);
368 new_max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
369
370 DBG(1, "Write NAT block: %x->%x, max_nid=%x->%x\n",
371 old_nat_blkaddr, new_nat_blkaddr,
372 get_sb(segment_count_nat),
373 get_newsb(segment_count_nat));
374
375 for (nid = nm_i->max_nid; nid < new_max_nid;
376 nid += NAT_ENTRY_PER_BLOCK) {
377 block_off = nid / NAT_ENTRY_PER_BLOCK;
378 seg_off = block_off >> sbi->log_blocks_per_seg;
379 block_addr = (pgoff_t)(new_nat_blkaddr +
380 (seg_off << sbi->log_blocks_per_seg << 1) +
381 (block_off & ((1 << sbi->log_blocks_per_seg) - 1)));
382 ret = dev_write_block(nat_block, block_addr);
383 ASSERT(ret >= 0);
384 DBG(3, "Write NAT: %lx\n", block_addr);
385 }
386 free(nat_block);
387 DBG(0, "Info: Done to migrate NAT blocks: nat_blkaddr = 0x%x -> 0x%x\n",
388 old_nat_blkaddr, new_nat_blkaddr);
389 }
390
migrate_sit(struct f2fs_sb_info * sbi,struct f2fs_super_block * new_sb,unsigned int offset)391 static void migrate_sit(struct f2fs_sb_info *sbi,
392 struct f2fs_super_block *new_sb, unsigned int offset)
393 {
394 struct sit_info *sit_i = SIT_I(sbi);
395 unsigned int ofs = 0, pre_ofs = 0;
396 unsigned int segno, index;
397 struct f2fs_sit_block *sit_blk = calloc(BLOCK_SZ, 1);
398 block_t sit_blks = get_newsb(segment_count_sit) <<
399 (sbi->log_blocks_per_seg - 1);
400 struct seg_entry *se;
401 block_t blk_addr = 0;
402 int ret;
403
404 ASSERT(sit_blk);
405
406 /* initialize with zeros */
407 for (index = 0; index < sit_blks; index++) {
408 ret = dev_write_block(sit_blk, get_newsb(sit_blkaddr) + index);
409 ASSERT(ret >= 0);
410 DBG(3, "Write zero sit: %x\n", get_newsb(sit_blkaddr) + index);
411 }
412
413 for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) {
414 struct f2fs_sit_entry *sit;
415
416 se = get_seg_entry(sbi, segno);
417 if (segno < offset) {
418 ASSERT(se->valid_blocks == 0);
419 continue;
420 }
421
422 ofs = SIT_BLOCK_OFFSET(sit_i, segno - offset);
423
424 if (ofs != pre_ofs) {
425 blk_addr = get_newsb(sit_blkaddr) + pre_ofs;
426 ret = dev_write_block(sit_blk, blk_addr);
427 ASSERT(ret >= 0);
428 DBG(1, "Write valid sit: %x\n", blk_addr);
429
430 pre_ofs = ofs;
431 memset(sit_blk, 0, BLOCK_SZ);
432 }
433
434 sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno - offset)];
435 memcpy(sit->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
436 sit->vblocks = cpu_to_le16((se->type << SIT_VBLOCKS_SHIFT) |
437 se->valid_blocks);
438 }
439 blk_addr = get_newsb(sit_blkaddr) + ofs;
440 ret = dev_write_block(sit_blk, blk_addr);
441 DBG(1, "Write valid sit: %x\n", blk_addr);
442 ASSERT(ret >= 0);
443
444 free(sit_blk);
445 DBG(0, "Info: Done to restore new SIT blocks: 0x%x\n",
446 get_newsb(sit_blkaddr));
447 }
448
rebuild_checkpoint(struct f2fs_sb_info * sbi,struct f2fs_super_block * new_sb,unsigned int offset)449 static void rebuild_checkpoint(struct f2fs_sb_info *sbi,
450 struct f2fs_super_block *new_sb, unsigned int offset)
451 {
452 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
453 unsigned long long cp_ver = get_cp(checkpoint_ver);
454 struct f2fs_checkpoint *new_cp;
455 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
456 unsigned int free_segment_count, new_segment_count;
457 block_t new_cp_blks = 1 + get_newsb(cp_payload);
458 block_t orphan_blks = 0;
459 block_t new_cp_blk_no, old_cp_blk_no;
460 u_int32_t crc = 0;
461 u32 flags;
462 void *buf;
463 int i, ret;
464
465 new_cp = calloc(new_cp_blks * BLOCK_SZ, 1);
466 ASSERT(new_cp);
467
468 buf = malloc(BLOCK_SZ);
469 ASSERT(buf);
470
471 /* ovp / free segments */
472 set_cp(rsvd_segment_count, c.new_reserved_segments);
473 set_cp(overprov_segment_count, (get_newsb(segment_count_main) -
474 get_cp(rsvd_segment_count)) *
475 c.new_overprovision / 100);
476 set_cp(overprov_segment_count, get_cp(overprov_segment_count) +
477 get_cp(rsvd_segment_count));
478
479 free_segment_count = get_free_segments(sbi);
480 new_segment_count = get_newsb(segment_count_main) -
481 get_sb(segment_count_main);
482
483 set_cp(free_segment_count, free_segment_count + new_segment_count);
484 set_cp(user_block_count, ((get_newsb(segment_count_main) -
485 get_cp(overprov_segment_count)) * c.blks_per_seg));
486
487 if (is_set_ckpt_flags(cp, CP_ORPHAN_PRESENT_FLAG))
488 orphan_blks = __start_sum_addr(sbi) - 1;
489
490 set_cp(cp_pack_start_sum, 1 + get_newsb(cp_payload));
491 set_cp(cp_pack_total_block_count, 8 + orphan_blks + get_newsb(cp_payload));
492
493 /* cur->segno - offset */
494 for (i = 0; i < NO_CHECK_TYPE; i++) {
495 if (i < CURSEG_HOT_NODE) {
496 set_cp(cur_data_segno[i],
497 CURSEG_I(sbi, i)->segno - offset);
498 } else {
499 int n = i - CURSEG_HOT_NODE;
500
501 set_cp(cur_node_segno[n],
502 CURSEG_I(sbi, i)->segno - offset);
503 }
504 }
505
506 /* sit / nat ver bitmap bytesize */
507 set_cp(sit_ver_bitmap_bytesize,
508 ((get_newsb(segment_count_sit) / 2) <<
509 get_newsb(log_blocks_per_seg)) / 8);
510 set_cp(nat_ver_bitmap_bytesize,
511 ((get_newsb(segment_count_nat) / 2) <<
512 get_newsb(log_blocks_per_seg)) / 8);
513
514 /* update nat_bits flag */
515 flags = update_nat_bits_flags(new_sb, cp, get_cp(ckpt_flags));
516 if (c.large_nat_bitmap)
517 flags |= CP_LARGE_NAT_BITMAP_FLAG;
518
519 if (flags & CP_COMPACT_SUM_FLAG)
520 flags &= ~CP_COMPACT_SUM_FLAG;
521 if (flags & CP_LARGE_NAT_BITMAP_FLAG)
522 set_cp(checksum_offset, CP_MIN_CHKSUM_OFFSET);
523 else
524 set_cp(checksum_offset, CP_CHKSUM_OFFSET);
525
526 set_cp(ckpt_flags, flags);
527
528 memcpy(new_cp, cp, (unsigned char *)cp->sit_nat_version_bitmap -
529 (unsigned char *)cp);
530 if (c.safe_resize)
531 memcpy((void *)new_cp + CP_BITMAP_OFFSET,
532 (void *)cp + CP_BITMAP_OFFSET,
533 F2FS_BLKSIZE - CP_BITMAP_OFFSET);
534
535 new_cp->checkpoint_ver = cpu_to_le64(cp_ver + 1);
536
537 crc = f2fs_checkpoint_chksum(new_cp);
538 *((__le32 *)((unsigned char *)new_cp + get_cp(checksum_offset))) =
539 cpu_to_le32(crc);
540
541 /* Write a new checkpoint in the other set */
542 new_cp_blk_no = old_cp_blk_no = get_sb(cp_blkaddr);
543 if (sbi->cur_cp == 2)
544 old_cp_blk_no += 1 << get_sb(log_blocks_per_seg);
545 else
546 new_cp_blk_no += 1 << get_sb(log_blocks_per_seg);
547
548 /* write first cp */
549 ret = dev_write_block(new_cp, new_cp_blk_no++);
550 ASSERT(ret >= 0);
551
552 memset(buf, 0, BLOCK_SZ);
553 for (i = 0; i < get_newsb(cp_payload); i++) {
554 ret = dev_write_block(buf, new_cp_blk_no++);
555 ASSERT(ret >= 0);
556 }
557
558 for (i = 0; i < orphan_blks; i++) {
559 block_t orphan_blk_no = old_cp_blk_no + 1 + get_sb(cp_payload);
560
561 ret = dev_read_block(buf, orphan_blk_no++);
562 ASSERT(ret >= 0);
563
564 ret = dev_write_block(buf, new_cp_blk_no++);
565 ASSERT(ret >= 0);
566 }
567
568 /* update summary blocks having nullified journal entries */
569 for (i = 0; i < NO_CHECK_TYPE; i++) {
570 struct curseg_info *curseg = CURSEG_I(sbi, i);
571
572 ret = dev_write_block(curseg->sum_blk, new_cp_blk_no++);
573 ASSERT(ret >= 0);
574 }
575
576 /* write the last cp */
577 ret = dev_write_block(new_cp, new_cp_blk_no++);
578 ASSERT(ret >= 0);
579
580 /* Write nat bits */
581 if (flags & CP_NAT_BITS_FLAG)
582 write_nat_bits(sbi, new_sb, new_cp, sbi->cur_cp == 1 ? 2 : 1);
583
584 /* disable old checkpoint */
585 memset(buf, 0, BLOCK_SZ);
586 ret = dev_write_block(buf, old_cp_blk_no);
587 ASSERT(ret >= 0);
588
589 free(buf);
590 free(new_cp);
591 DBG(0, "Info: Done to rebuild checkpoint blocks\n");
592 }
593
f2fs_resize_grow(struct f2fs_sb_info * sbi)594 static int f2fs_resize_grow(struct f2fs_sb_info *sbi)
595 {
596 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
597 struct f2fs_super_block new_sb_raw;
598 struct f2fs_super_block *new_sb = &new_sb_raw;
599 block_t end_blkaddr, old_main_blkaddr, new_main_blkaddr;
600 unsigned int offset;
601 unsigned int offset_seg = 0;
602 int err = -1;
603
604 /* flush NAT/SIT journal entries */
605 flush_journal_entries(sbi);
606
607 memcpy(new_sb, F2FS_RAW_SUPER(sbi), sizeof(*new_sb));
608 if (get_new_sb(new_sb))
609 return -1;
610
611 /* check nat availability */
612 if (get_sb(segment_count_nat) > get_newsb(segment_count_nat)) {
613 err = shrink_nats(sbi, new_sb);
614 if (err) {
615 MSG(0, "\tError: Failed to shrink NATs\n");
616 return err;
617 }
618 }
619
620 old_main_blkaddr = get_sb(main_blkaddr);
621 new_main_blkaddr = get_newsb(main_blkaddr);
622 offset = new_main_blkaddr - old_main_blkaddr;
623 end_blkaddr = (get_sb(segment_count_main) <<
624 get_sb(log_blocks_per_seg)) + get_sb(main_blkaddr);
625
626 err = -EAGAIN;
627 if (new_main_blkaddr < end_blkaddr) {
628 err = f2fs_defragment(sbi, old_main_blkaddr, offset,
629 new_main_blkaddr, 0);
630 if (!err)
631 offset_seg = offset >> get_sb(log_blocks_per_seg);
632 MSG(0, "Try to do defragement: %s\n", err ? "Skip": "Done");
633 }
634 /* move whole data region */
635 if (err)
636 migrate_main(sbi, offset);
637
638 migrate_ssa(sbi, new_sb, offset_seg);
639 migrate_nat(sbi, new_sb);
640 migrate_sit(sbi, new_sb, offset_seg);
641 rebuild_checkpoint(sbi, new_sb, offset_seg);
642 update_superblock(new_sb, SB_MASK_ALL);
643 print_raw_sb_info(sb);
644 print_raw_sb_info(new_sb);
645
646 return 0;
647 }
648
f2fs_resize_shrink(struct f2fs_sb_info * sbi)649 static int f2fs_resize_shrink(struct f2fs_sb_info *sbi)
650 {
651 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
652 struct f2fs_super_block new_sb_raw;
653 struct f2fs_super_block *new_sb = &new_sb_raw;
654 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
655 block_t old_end_blkaddr, old_main_blkaddr;
656 block_t new_end_blkaddr, new_main_blkaddr, tmp_end_blkaddr;
657 block_t user_block_count;
658 unsigned int overprov_segment_count;
659 unsigned int offset;
660 int err = -1;
661
662 /* flush NAT/SIT journal entries */
663 flush_journal_entries(sbi);
664
665 memcpy(new_sb, F2FS_RAW_SUPER(sbi), sizeof(*new_sb));
666 if (get_new_sb(new_sb))
667 return -1;
668
669 overprov_segment_count = (get_newsb(segment_count_main) -
670 c.new_reserved_segments) *
671 c.new_overprovision / 100;
672 overprov_segment_count += c.new_reserved_segments;
673
674 user_block_count = (get_newsb(segment_count_main) -
675 overprov_segment_count) * c.blks_per_seg;
676
677 if (get_cp(valid_block_count) > user_block_count)
678 return -1;
679
680 /* check nat availability */
681 if (get_sb(segment_count_nat) > get_newsb(segment_count_nat)) {
682 err = shrink_nats(sbi, new_sb);
683 if (err) {
684 MSG(0, "\tError: Failed to shrink NATs\n");
685 return err;
686 }
687 }
688
689 old_main_blkaddr = get_sb(main_blkaddr);
690 new_main_blkaddr = get_newsb(main_blkaddr);
691 offset = old_main_blkaddr - new_main_blkaddr;
692 old_end_blkaddr = (get_sb(segment_count_main) <<
693 get_sb(log_blocks_per_seg)) + get_sb(main_blkaddr);
694 new_end_blkaddr = (get_newsb(segment_count_main) <<
695 get_newsb(log_blocks_per_seg)) + get_newsb(main_blkaddr);
696
697 tmp_end_blkaddr = new_end_blkaddr + offset;
698 err = f2fs_defragment(sbi, tmp_end_blkaddr,
699 old_end_blkaddr - tmp_end_blkaddr,
700 tmp_end_blkaddr, 1);
701 MSG(0, "Try to do defragement: %s\n", err ? "Insufficient Space": "Done");
702
703 if (err) {
704 return -ENOSPC;
705 }
706
707 update_superblock(new_sb, SB_MASK_ALL);
708 rebuild_checkpoint(sbi, new_sb, 0);
709 /*if (!c.safe_resize) {
710 migrate_sit(sbi, new_sb, offset_seg);
711 migrate_nat(sbi, new_sb);
712 migrate_ssa(sbi, new_sb, offset_seg);
713 }*/
714
715 /* move whole data region */
716 //if (err)
717 // migrate_main(sbi, offset);
718 print_raw_sb_info(sb);
719 print_raw_sb_info(new_sb);
720
721 return 0;
722 }
723
f2fs_resize(struct f2fs_sb_info * sbi)724 int f2fs_resize(struct f2fs_sb_info *sbi)
725 {
726 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
727
728 /* may different sector size */
729 if ((c.target_sectors * c.sector_size >>
730 get_sb(log_blocksize)) < get_sb(block_count))
731 if (!c.safe_resize) {
732 ASSERT_MSG("Nothing to resize, now only supports resizing with safe resize flag\n");
733 return -1;
734 } else {
735 return f2fs_resize_shrink(sbi);
736 }
737 else if (((c.target_sectors * c.sector_size >>
738 get_sb(log_blocksize)) > get_sb(block_count)) ||
739 c.force)
740 return f2fs_resize_grow(sbi);
741 else {
742 MSG(0, "Nothing to resize.\n");
743 return 0;
744 }
745 }
746