• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * resize.c
3  *
4  * Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include "fsck.h"
11 
get_new_sb(struct f2fs_super_block * sb)12 static int get_new_sb(struct f2fs_super_block *sb)
13 {
14 	uint32_t zone_size_bytes;
15 	uint64_t zone_align_start_offset;
16 	uint32_t blocks_for_sit, blocks_for_nat, blocks_for_ssa;
17 	uint32_t sit_segments, nat_segments, diff, total_meta_segments;
18 	uint32_t total_valid_blks_available;
19 	uint32_t sit_bitmap_size, max_sit_bitmap_size;
20 	uint32_t max_nat_bitmap_size, max_nat_segments;
21 	uint32_t segment_size_bytes = 1 << (get_sb(log_blocksize) +
22 					get_sb(log_blocks_per_seg));
23 	uint32_t blks_per_seg = 1 << get_sb(log_blocks_per_seg);
24 	uint32_t segs_per_zone = get_sb(segs_per_sec) * get_sb(secs_per_zone);
25 
26 	set_sb(block_count, c.target_sectors >>
27 				get_sb(log_sectors_per_block));
28 
29 	zone_size_bytes = segment_size_bytes * segs_per_zone;
30 	zone_align_start_offset =
31 		((uint64_t) c.start_sector * DEFAULT_SECTOR_SIZE +
32 		2 * F2FS_BLKSIZE + zone_size_bytes - 1) /
33 		zone_size_bytes * zone_size_bytes -
34 		(uint64_t) c.start_sector * DEFAULT_SECTOR_SIZE;
35 
36 	set_sb(segment_count, (c.target_sectors * c.sector_size -
37 				zone_align_start_offset) / segment_size_bytes /
38 				c.segs_per_sec * c.segs_per_sec);
39 
40 	if (c.safe_resize)
41 		goto safe_resize;
42 
43 	blocks_for_sit = SIZE_ALIGN(get_sb(segment_count), SIT_ENTRY_PER_BLOCK);
44 	sit_segments = SEG_ALIGN(blocks_for_sit);
45 	set_sb(segment_count_sit, sit_segments * 2);
46 	set_sb(nat_blkaddr, get_sb(sit_blkaddr) +
47 				get_sb(segment_count_sit) * blks_per_seg);
48 
49 	total_valid_blks_available = (get_sb(segment_count) -
50 			(get_sb(segment_count_ckpt) +
51 			get_sb(segment_count_sit))) * blks_per_seg;
52 	blocks_for_nat = SIZE_ALIGN(total_valid_blks_available,
53 					NAT_ENTRY_PER_BLOCK);
54 
55 	if (c.large_nat_bitmap) {
56 		nat_segments = SEG_ALIGN(blocks_for_nat) *
57 						DEFAULT_NAT_ENTRY_RATIO / 100;
58 		set_sb(segment_count_nat, nat_segments ? nat_segments : 1);
59 
60 		max_nat_bitmap_size = (get_sb(segment_count_nat) <<
61 						get_sb(log_blocks_per_seg)) / 8;
62 		set_sb(segment_count_nat, get_sb(segment_count_nat) * 2);
63 	} else {
64 		set_sb(segment_count_nat, SEG_ALIGN(blocks_for_nat));
65 		max_nat_bitmap_size = 0;
66 	}
67 
68 	sit_bitmap_size = ((get_sb(segment_count_sit) / 2) <<
69 				get_sb(log_blocks_per_seg)) / 8;
70 	if (sit_bitmap_size > MAX_SIT_BITMAP_SIZE)
71 		max_sit_bitmap_size = MAX_SIT_BITMAP_SIZE;
72 	else
73 		max_sit_bitmap_size = sit_bitmap_size;
74 
75 	if (c.large_nat_bitmap) {
76 		/* use cp_payload if free space of f2fs_checkpoint is not enough */
77 		if (max_sit_bitmap_size + max_nat_bitmap_size >
78 						MAX_BITMAP_SIZE_IN_CKPT) {
79 			uint32_t diff =  max_sit_bitmap_size +
80 						max_nat_bitmap_size -
81 						MAX_BITMAP_SIZE_IN_CKPT;
82 			set_sb(cp_payload, F2FS_BLK_ALIGN(diff));
83 		} else {
84 			set_sb(cp_payload, 0);
85 		}
86 	} else {
87 		/*
88 		 * It should be reserved minimum 1 segment for nat.
89 		 * When sit is too large, we should expand cp area.
90 		 * It requires more pages for cp.
91 		 */
92 		if (max_sit_bitmap_size > MAX_SIT_BITMAP_SIZE_IN_CKPT) {
93 			max_nat_bitmap_size = MAX_BITMAP_SIZE_IN_CKPT;
94 			set_sb(cp_payload, F2FS_BLK_ALIGN(max_sit_bitmap_size));
95 		} else {
96 			max_nat_bitmap_size = MAX_BITMAP_SIZE_IN_CKPT -
97 							max_sit_bitmap_size;
98 			set_sb(cp_payload, 0);
99 		}
100 
101 		max_nat_segments = (max_nat_bitmap_size * 8) >>
102 					get_sb(log_blocks_per_seg);
103 
104 		if (get_sb(segment_count_nat) > max_nat_segments)
105 			set_sb(segment_count_nat, max_nat_segments);
106 
107 		set_sb(segment_count_nat, get_sb(segment_count_nat) * 2);
108 	}
109 
110 	set_sb(ssa_blkaddr, get_sb(nat_blkaddr) +
111 				get_sb(segment_count_nat) * blks_per_seg);
112 
113 	total_valid_blks_available = (get_sb(segment_count) -
114 			(get_sb(segment_count_ckpt) +
115 			get_sb(segment_count_sit) +
116 			get_sb(segment_count_nat))) * blks_per_seg;
117 
118 	blocks_for_ssa = total_valid_blks_available / blks_per_seg + 1;
119 
120 	set_sb(segment_count_ssa, SEG_ALIGN(blocks_for_ssa));
121 
122 	total_meta_segments = get_sb(segment_count_ckpt) +
123 		get_sb(segment_count_sit) +
124 		get_sb(segment_count_nat) +
125 		get_sb(segment_count_ssa);
126 
127 	diff = total_meta_segments % segs_per_zone;
128 	if (diff)
129 		set_sb(segment_count_ssa, get_sb(segment_count_ssa) +
130 			(segs_per_zone - diff));
131 
132 	set_sb(main_blkaddr, get_sb(ssa_blkaddr) + get_sb(segment_count_ssa) *
133 			 blks_per_seg);
134 
135 safe_resize:
136 	set_sb(segment_count_main, get_sb(segment_count) -
137 			(get_sb(segment_count_ckpt) +
138 			 get_sb(segment_count_sit) +
139 			 get_sb(segment_count_nat) +
140 			 get_sb(segment_count_ssa)));
141 
142 	set_sb(section_count, get_sb(segment_count_main) /
143 						get_sb(segs_per_sec));
144 
145 	set_sb(segment_count_main, get_sb(section_count) *
146 						get_sb(segs_per_sec));
147 
148 	/* Let's determine the best reserved and overprovisioned space */
149 	if (c.new_overprovision == 0)
150 		c.new_overprovision = get_best_overprovision(sb);
151 
152 	c.new_reserved_segments =
153 		(2 * (100 / c.new_overprovision + 1) + 6) *
154 						get_sb(segs_per_sec);
155 
156 	if ((get_sb(segment_count_main) - 2) < c.new_reserved_segments ||
157 		get_sb(segment_count_main) * blks_per_seg >
158 						get_sb(block_count)) {
159 		MSG(0, "\tError: Device size is not sufficient for F2FS volume, "
160 			"more segment needed =%u",
161 			c.new_reserved_segments -
162 			(get_sb(segment_count_main) - 2));
163 		return -1;
164 	}
165 	return 0;
166 }
167 
migrate_main(struct f2fs_sb_info * sbi,unsigned int offset)168 static void migrate_main(struct f2fs_sb_info *sbi, unsigned int offset)
169 {
170 	void *raw = calloc(BLOCK_SZ, 1);
171 	struct seg_entry *se;
172 	block_t from, to;
173 	int i, j, ret;
174 	struct f2fs_summary sum;
175 
176 	ASSERT(raw != NULL);
177 
178 	for (i = MAIN_SEGS(sbi) - 1; i >= 0; i--) {
179 		se = get_seg_entry(sbi, i);
180 		if (!se->valid_blocks)
181 			continue;
182 
183 		for (j = sbi->blocks_per_seg - 1; j >= 0; j--) {
184 			if (!f2fs_test_bit(j, (const char *)se->cur_valid_map))
185 				continue;
186 
187 			from = START_BLOCK(sbi, i) + j;
188 			ret = dev_read_block(raw, from);
189 			ASSERT(ret >= 0);
190 
191 			to = from + offset;
192 			ret = dev_write_block(raw, to);
193 			ASSERT(ret >= 0);
194 
195 			get_sum_entry(sbi, from, &sum);
196 
197 			if (IS_DATASEG(se->type))
198 				update_data_blkaddr(sbi, le32_to_cpu(sum.nid),
199 					le16_to_cpu(sum.ofs_in_node), to);
200 			else
201 				update_nat_blkaddr(sbi, 0,
202 						le32_to_cpu(sum.nid), to);
203 		}
204 	}
205 	free(raw);
206 	DBG(0, "Info: Done to migrate Main area: main_blkaddr = 0x%x -> 0x%x\n",
207 				START_BLOCK(sbi, 0),
208 				START_BLOCK(sbi, 0) + offset);
209 }
210 
move_ssa(struct f2fs_sb_info * sbi,unsigned int segno,block_t new_sum_blk_addr)211 static void move_ssa(struct f2fs_sb_info *sbi, unsigned int segno,
212 					block_t new_sum_blk_addr)
213 {
214 	struct f2fs_summary_block *sum_blk;
215 	int type;
216 
217 	sum_blk = get_sum_block(sbi, segno, &type);
218 	if (type < SEG_TYPE_MAX) {
219 		int ret;
220 
221 		ret = dev_write_block(sum_blk, new_sum_blk_addr);
222 		ASSERT(ret >= 0);
223 		DBG(1, "Write summary block: (%d) segno=%x/%x --> (%d) %x\n",
224 				type, segno, GET_SUM_BLKADDR(sbi, segno),
225 				IS_SUM_NODE_SEG(sum_blk->footer),
226 				new_sum_blk_addr);
227 	}
228 	if (type == SEG_TYPE_NODE || type == SEG_TYPE_DATA ||
229 			type == SEG_TYPE_MAX) {
230 		free(sum_blk);
231 	}
232 	DBG(1, "Info: Done to migrate SSA blocks\n");
233 }
234 
migrate_ssa(struct f2fs_sb_info * sbi,struct f2fs_super_block * new_sb,unsigned int offset)235 static void migrate_ssa(struct f2fs_sb_info *sbi,
236 		struct f2fs_super_block *new_sb, unsigned int offset)
237 {
238 	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
239 	block_t old_sum_blkaddr = get_sb(ssa_blkaddr);
240 	block_t new_sum_blkaddr = get_newsb(ssa_blkaddr);
241 	block_t end_sum_blkaddr = get_newsb(main_blkaddr);
242 	block_t expand_sum_blkaddr = new_sum_blkaddr +
243 					MAIN_SEGS(sbi) - offset;
244 	block_t blkaddr;
245 	int ret;
246 	void *zero_block = calloc(BLOCK_SZ, 1);
247 	ASSERT(zero_block);
248 
249 	if (offset && new_sum_blkaddr < old_sum_blkaddr + offset) {
250 		blkaddr = new_sum_blkaddr;
251 		while (blkaddr < end_sum_blkaddr) {
252 			if (blkaddr < expand_sum_blkaddr) {
253 				move_ssa(sbi, offset++, blkaddr++);
254 			} else {
255 				ret = dev_write_block(zero_block, blkaddr++);
256 				ASSERT(ret >=0);
257 			}
258 		}
259 	} else {
260 		blkaddr = end_sum_blkaddr - 1;
261 		offset = MAIN_SEGS(sbi) - 1;
262 		while (blkaddr >= new_sum_blkaddr) {
263 			if (blkaddr >= expand_sum_blkaddr) {
264 				ret = dev_write_block(zero_block, blkaddr--);
265 				ASSERT(ret >=0);
266 			} else {
267 				move_ssa(sbi, offset--, blkaddr--);
268 			}
269 		}
270 	}
271 
272 	DBG(0, "Info: Done to migrate SSA blocks: sum_blkaddr = 0x%x -> 0x%x\n",
273 				old_sum_blkaddr, new_sum_blkaddr);
274 	free(zero_block);
275 }
276 
shrink_nats(struct f2fs_sb_info * sbi,struct f2fs_super_block * new_sb)277 static int shrink_nats(struct f2fs_sb_info *sbi,
278 				struct f2fs_super_block *new_sb)
279 {
280 	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
281 	struct f2fs_nm_info *nm_i = NM_I(sbi);
282 	block_t old_nat_blkaddr = get_sb(nat_blkaddr);
283 	unsigned int nat_blocks;
284 	void *nat_block, *zero_block;
285 	int nid, ret, new_max_nid;
286 	pgoff_t block_off;
287 	pgoff_t block_addr;
288 	int seg_off;
289 
290 	nat_block = malloc(BLOCK_SZ);
291 	ASSERT(nat_block);
292 	zero_block = calloc(BLOCK_SZ, 1);
293 	ASSERT(zero_block);
294 
295 	nat_blocks = get_newsb(segment_count_nat) >> 1;
296 	nat_blocks = nat_blocks << get_sb(log_blocks_per_seg);
297 	new_max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
298 
299 	for (nid = nm_i->max_nid - 1; nid > new_max_nid; nid -= NAT_ENTRY_PER_BLOCK) {
300 		block_off = nid / NAT_ENTRY_PER_BLOCK;
301 		seg_off = block_off >> sbi->log_blocks_per_seg;
302 		block_addr = (pgoff_t)(old_nat_blkaddr +
303 				(seg_off << sbi->log_blocks_per_seg << 1) +
304 				(block_off & ((1 << sbi->log_blocks_per_seg) - 1)));
305 
306 		if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
307 			block_addr += sbi->blocks_per_seg;
308 
309 		ret = dev_read_block(nat_block, block_addr);
310 		ASSERT(ret >= 0);
311 
312 		if (memcmp(zero_block, nat_block, BLOCK_SZ)) {
313 			ret = -1;
314 			goto not_avail;
315 		}
316 	}
317 	ret = 0;
318 	nm_i->max_nid = new_max_nid;
319 not_avail:
320 	free(nat_block);
321 	free(zero_block);
322 	return ret;
323 }
324 
migrate_nat(struct f2fs_sb_info * sbi,struct f2fs_super_block * new_sb)325 static void migrate_nat(struct f2fs_sb_info *sbi,
326 			struct f2fs_super_block *new_sb)
327 {
328 	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
329 	struct f2fs_nm_info *nm_i = NM_I(sbi);
330 	block_t old_nat_blkaddr = get_sb(nat_blkaddr);
331 	block_t new_nat_blkaddr = get_newsb(nat_blkaddr);
332 	unsigned int nat_blocks;
333 	void *nat_block;
334 	int nid, ret, new_max_nid;
335 	pgoff_t block_off;
336 	pgoff_t block_addr;
337 	int seg_off;
338 
339 	nat_block = malloc(BLOCK_SZ);
340 	ASSERT(nat_block);
341 
342 	for (nid = nm_i->max_nid - 1; nid >= 0; nid -= NAT_ENTRY_PER_BLOCK) {
343 		block_off = nid / NAT_ENTRY_PER_BLOCK;
344 		seg_off = block_off >> sbi->log_blocks_per_seg;
345 		block_addr = (pgoff_t)(old_nat_blkaddr +
346 				(seg_off << sbi->log_blocks_per_seg << 1) +
347 				(block_off & ((1 << sbi->log_blocks_per_seg) - 1)));
348 
349 		/* move to set #0 */
350 		if (f2fs_test_bit(block_off, nm_i->nat_bitmap)) {
351 			block_addr += sbi->blocks_per_seg;
352 			f2fs_clear_bit(block_off, nm_i->nat_bitmap);
353 		}
354 
355 		ret = dev_read_block(nat_block, block_addr);
356 		ASSERT(ret >= 0);
357 
358 		block_addr = (pgoff_t)(new_nat_blkaddr +
359 				(seg_off << sbi->log_blocks_per_seg << 1) +
360 				(block_off & ((1 << sbi->log_blocks_per_seg) - 1)));
361 
362 		/* new bitmap should be zeros */
363 		ret = dev_write_block(nat_block, block_addr);
364 		ASSERT(ret >= 0);
365 	}
366 	/* zero out newly assigned nids */
367 	memset(nat_block, 0, BLOCK_SZ);
368 	nat_blocks = get_newsb(segment_count_nat) >> 1;
369 	nat_blocks = nat_blocks << get_sb(log_blocks_per_seg);
370 	new_max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
371 
372 	DBG(1, "Write NAT block: %x->%x, max_nid=%x->%x\n",
373 			old_nat_blkaddr, new_nat_blkaddr,
374 			get_sb(segment_count_nat),
375 			get_newsb(segment_count_nat));
376 
377 	for (nid = nm_i->max_nid; nid < new_max_nid;
378 				nid += NAT_ENTRY_PER_BLOCK) {
379 		block_off = nid / NAT_ENTRY_PER_BLOCK;
380 		seg_off = block_off >> sbi->log_blocks_per_seg;
381 		block_addr = (pgoff_t)(new_nat_blkaddr +
382 				(seg_off << sbi->log_blocks_per_seg << 1) +
383 				(block_off & ((1 << sbi->log_blocks_per_seg) - 1)));
384 		ret = dev_write_block(nat_block, block_addr);
385 		ASSERT(ret >= 0);
386 		DBG(3, "Write NAT: %lx\n", block_addr);
387 	}
388 	free(nat_block);
389 	DBG(0, "Info: Done to migrate NAT blocks: nat_blkaddr = 0x%x -> 0x%x\n",
390 			old_nat_blkaddr, new_nat_blkaddr);
391 }
392 
migrate_sit(struct f2fs_sb_info * sbi,struct f2fs_super_block * new_sb,unsigned int offset)393 static void migrate_sit(struct f2fs_sb_info *sbi,
394 		struct f2fs_super_block *new_sb, unsigned int offset)
395 {
396 	struct sit_info *sit_i = SIT_I(sbi);
397 	unsigned int ofs = 0, pre_ofs = 0;
398 	unsigned int segno, index;
399 	struct f2fs_sit_block *sit_blk = calloc(BLOCK_SZ, 1);
400 	block_t sit_blks = get_newsb(segment_count_sit) <<
401 						(sbi->log_blocks_per_seg - 1);
402 	struct seg_entry *se;
403 	block_t blk_addr = 0;
404 	int ret;
405 
406 	ASSERT(sit_blk);
407 
408 	/* initialize with zeros */
409 	for (index = 0; index < sit_blks; index++) {
410 		ret = dev_write_block(sit_blk, get_newsb(sit_blkaddr) + index);
411 		ASSERT(ret >= 0);
412 		DBG(3, "Write zero sit: %x\n", get_newsb(sit_blkaddr) + index);
413 	}
414 
415 	for (segno = 0; segno < MAIN_SEGS(sbi); segno++) {
416 		struct f2fs_sit_entry *sit;
417 
418 		se = get_seg_entry(sbi, segno);
419 		if (segno < offset) {
420 			ASSERT(se->valid_blocks == 0);
421 			continue;
422 		}
423 
424 		ofs = SIT_BLOCK_OFFSET(sit_i, segno - offset);
425 
426 		if (ofs != pre_ofs) {
427 			blk_addr = get_newsb(sit_blkaddr) + pre_ofs;
428 			ret = dev_write_block(sit_blk, blk_addr);
429 			ASSERT(ret >= 0);
430 			DBG(1, "Write valid sit: %x\n", blk_addr);
431 
432 			pre_ofs = ofs;
433 			memset(sit_blk, 0, BLOCK_SZ);
434 		}
435 
436 		sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno - offset)];
437 		memcpy(sit->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
438 		sit->vblocks = cpu_to_le16((se->type << SIT_VBLOCKS_SHIFT) |
439 							se->valid_blocks);
440 	}
441 	blk_addr = get_newsb(sit_blkaddr) + ofs;
442 	ret = dev_write_block(sit_blk, blk_addr);
443 	DBG(1, "Write valid sit: %x\n", blk_addr);
444 	ASSERT(ret >= 0);
445 
446 	free(sit_blk);
447 	DBG(0, "Info: Done to restore new SIT blocks: 0x%x\n",
448 					get_newsb(sit_blkaddr));
449 }
450 
rebuild_checkpoint(struct f2fs_sb_info * sbi,struct f2fs_super_block * new_sb,unsigned int offset)451 static void rebuild_checkpoint(struct f2fs_sb_info *sbi,
452 			struct f2fs_super_block *new_sb, unsigned int offset)
453 {
454 	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
455 	unsigned long long cp_ver = get_cp(checkpoint_ver);
456 	struct f2fs_checkpoint *new_cp;
457 	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
458 	unsigned int free_segment_count, new_segment_count;
459 	block_t new_cp_blks = 1 + get_newsb(cp_payload);
460 	block_t orphan_blks = 0;
461 	block_t new_cp_blk_no, old_cp_blk_no;
462 	uint32_t crc = 0;
463 	u32 flags;
464 	void *buf;
465 	int i, ret;
466 
467 	new_cp = calloc(new_cp_blks * BLOCK_SZ, 1);
468 	ASSERT(new_cp);
469 
470 	buf = malloc(BLOCK_SZ);
471 	ASSERT(buf);
472 
473 	/* ovp / free segments */
474 	set_cp(rsvd_segment_count, c.new_reserved_segments);
475 	set_cp(overprov_segment_count, (get_newsb(segment_count_main) -
476 			get_cp(rsvd_segment_count)) *
477 			c.new_overprovision / 100);
478 	set_cp(overprov_segment_count, get_cp(overprov_segment_count) +
479 						get_cp(rsvd_segment_count));
480 
481 	DBG(0, "Info: Overprovision ratio = %.3lf%%\n", c.new_overprovision);
482 	DBG(0, "Info: Overprovision segments = %u (GC reserved = %u)\n",
483 					get_cp(overprov_segment_count),
484 					c.new_reserved_segments);
485 
486 	free_segment_count = get_free_segments(sbi);
487 	new_segment_count = get_newsb(segment_count_main) -
488 					get_sb(segment_count_main);
489 
490 	set_cp(free_segment_count, free_segment_count + new_segment_count);
491 	set_cp(user_block_count, ((get_newsb(segment_count_main) -
492 			get_cp(overprov_segment_count)) * c.blks_per_seg));
493 
494 	if (is_set_ckpt_flags(cp, CP_ORPHAN_PRESENT_FLAG))
495 		orphan_blks = __start_sum_addr(sbi) - 1;
496 
497 	set_cp(cp_pack_start_sum, 1 + get_newsb(cp_payload));
498 	set_cp(cp_pack_total_block_count, 8 + orphan_blks + get_newsb(cp_payload));
499 
500 	/* cur->segno - offset */
501 	for (i = 0; i < NO_CHECK_TYPE; i++) {
502 		if (i < CURSEG_HOT_NODE) {
503 			set_cp(cur_data_segno[i],
504 					CURSEG_I(sbi, i)->segno - offset);
505 		} else {
506 			int n = i - CURSEG_HOT_NODE;
507 
508 			set_cp(cur_node_segno[n],
509 					CURSEG_I(sbi, i)->segno - offset);
510 		}
511 	}
512 
513 	/* sit / nat ver bitmap bytesize */
514 	set_cp(sit_ver_bitmap_bytesize,
515 			((get_newsb(segment_count_sit) / 2) <<
516 			get_newsb(log_blocks_per_seg)) / 8);
517 	set_cp(nat_ver_bitmap_bytesize,
518 			((get_newsb(segment_count_nat) / 2) <<
519 			get_newsb(log_blocks_per_seg)) / 8);
520 
521 	/* update nat_bits flag */
522 	flags = update_nat_bits_flags(new_sb, cp, get_cp(ckpt_flags));
523 	if (c.large_nat_bitmap)
524 		flags |= CP_LARGE_NAT_BITMAP_FLAG;
525 
526 	if (flags & CP_COMPACT_SUM_FLAG)
527 		flags &= ~CP_COMPACT_SUM_FLAG;
528 	if (flags & CP_LARGE_NAT_BITMAP_FLAG)
529 		set_cp(checksum_offset, CP_MIN_CHKSUM_OFFSET);
530 	else
531 		set_cp(checksum_offset, CP_CHKSUM_OFFSET);
532 
533 	set_cp(ckpt_flags, flags);
534 
535 	memcpy(new_cp, cp, (unsigned char *)cp->sit_nat_version_bitmap -
536 						(unsigned char *)cp);
537 	if (c.safe_resize)
538 		memcpy((void *)new_cp + CP_BITMAP_OFFSET,
539 			(void *)cp + CP_BITMAP_OFFSET,
540 			F2FS_BLKSIZE - CP_BITMAP_OFFSET);
541 
542 	new_cp->checkpoint_ver = cpu_to_le64(cp_ver + 1);
543 
544 	crc = f2fs_checkpoint_chksum(new_cp);
545 	*((__le32 *)((unsigned char *)new_cp + get_cp(checksum_offset))) =
546 							cpu_to_le32(crc);
547 
548 	/* Write a new checkpoint in the other set */
549 	new_cp_blk_no = old_cp_blk_no = get_sb(cp_blkaddr);
550 	if (sbi->cur_cp == 2)
551 		old_cp_blk_no += 1 << get_sb(log_blocks_per_seg);
552 	else
553 		new_cp_blk_no += 1 << get_sb(log_blocks_per_seg);
554 
555 	/* write first cp */
556 	ret = dev_write_block(new_cp, new_cp_blk_no++);
557 	ASSERT(ret >= 0);
558 
559 	memset(buf, 0, BLOCK_SZ);
560 	for (i = 0; i < get_newsb(cp_payload); i++) {
561 		ret = dev_write_block(buf, new_cp_blk_no++);
562 		ASSERT(ret >= 0);
563 	}
564 
565 	for (i = 0; i < orphan_blks; i++) {
566 		block_t orphan_blk_no = old_cp_blk_no + 1 + get_sb(cp_payload);
567 
568 		ret = dev_read_block(buf, orphan_blk_no++);
569 		ASSERT(ret >= 0);
570 
571 		ret = dev_write_block(buf, new_cp_blk_no++);
572 		ASSERT(ret >= 0);
573 	}
574 
575 	/* update summary blocks having nullified journal entries */
576 	for (i = 0; i < NO_CHECK_TYPE; i++) {
577 		struct curseg_info *curseg = CURSEG_I(sbi, i);
578 
579 		ret = dev_write_block(curseg->sum_blk, new_cp_blk_no++);
580 		ASSERT(ret >= 0);
581 	}
582 
583 	/* write the last cp */
584 	ret = dev_write_block(new_cp, new_cp_blk_no++);
585 	ASSERT(ret >= 0);
586 
587 	/* Write nat bits */
588 	if (flags & CP_NAT_BITS_FLAG)
589 		write_nat_bits(sbi, new_sb, new_cp, sbi->cur_cp == 1 ? 2 : 1);
590 
591 	/* disable old checkpoint */
592 	memset(buf, 0, BLOCK_SZ);
593 	ret = dev_write_block(buf, old_cp_blk_no);
594 	ASSERT(ret >= 0);
595 
596 	free(buf);
597 	free(new_cp);
598 	DBG(0, "Info: Done to rebuild checkpoint blocks\n");
599 }
600 
f2fs_resize_check(struct f2fs_sb_info * sbi,struct f2fs_super_block * new_sb)601 static int f2fs_resize_check(struct f2fs_sb_info *sbi, struct f2fs_super_block *new_sb)
602 {
603 	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
604 	block_t user_block_count;
605 	unsigned int overprov_segment_count;
606 
607 	overprov_segment_count = (get_newsb(segment_count_main) -
608 			c.new_reserved_segments) *
609 			c.new_overprovision / 100;
610 	overprov_segment_count += c.new_reserved_segments;
611 
612 	user_block_count = (get_newsb(segment_count_main) -
613 			overprov_segment_count) * c.blks_per_seg;
614 
615 	if (get_cp(valid_block_count) > user_block_count)
616 		return -1;
617 
618 	return 0;
619 }
620 
f2fs_resize_grow(struct f2fs_sb_info * sbi)621 static int f2fs_resize_grow(struct f2fs_sb_info *sbi)
622 {
623 	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
624 	struct f2fs_super_block new_sb_raw;
625 	struct f2fs_super_block *new_sb = &new_sb_raw;
626 	block_t end_blkaddr, old_main_blkaddr, new_main_blkaddr;
627 	unsigned int offset;
628 	unsigned int offset_seg = 0;
629 	int err = -1;
630 
631 	/* flush NAT/SIT journal entries */
632 	flush_journal_entries(sbi);
633 
634 	memcpy(new_sb, F2FS_RAW_SUPER(sbi), sizeof(*new_sb));
635 	if (get_new_sb(new_sb))
636 		return -1;
637 
638 	if (f2fs_resize_check(sbi, new_sb) < 0)
639 		return -1;
640 
641 	/* check nat availability */
642 	if (get_sb(segment_count_nat) > get_newsb(segment_count_nat)) {
643 		err = shrink_nats(sbi, new_sb);
644 		if (err) {
645 			MSG(0, "\tError: Failed to shrink NATs\n");
646 			return err;
647 		}
648 	}
649 
650 	old_main_blkaddr = get_sb(main_blkaddr);
651 	new_main_blkaddr = get_newsb(main_blkaddr);
652 	offset = new_main_blkaddr - old_main_blkaddr;
653 	end_blkaddr = (get_sb(segment_count_main) <<
654 			get_sb(log_blocks_per_seg)) + get_sb(main_blkaddr);
655 
656 	err = -EAGAIN;
657 	if (new_main_blkaddr < end_blkaddr) {
658 		err = f2fs_defragment(sbi, old_main_blkaddr, offset,
659 						new_main_blkaddr, 0);
660 		if (!err)
661 			offset_seg = offset >> get_sb(log_blocks_per_seg);
662 		MSG(0, "Try to do defragement: %s\n", err ? "Skip": "Done");
663 	}
664 	/* move whole data region */
665 	if (err)
666 		migrate_main(sbi, offset);
667 
668 	migrate_ssa(sbi, new_sb, offset_seg);
669 	migrate_nat(sbi, new_sb);
670 	migrate_sit(sbi, new_sb, offset_seg);
671 	rebuild_checkpoint(sbi, new_sb, offset_seg);
672 	update_superblock(new_sb, SB_MASK_ALL);
673 	print_raw_sb_info(sb);
674 	print_raw_sb_info(new_sb);
675 
676 	return 0;
677 }
678 
f2fs_resize_shrink(struct f2fs_sb_info * sbi)679 static int f2fs_resize_shrink(struct f2fs_sb_info *sbi)
680 {
681 	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
682 	struct f2fs_super_block new_sb_raw;
683 	struct f2fs_super_block *new_sb = &new_sb_raw;
684 	block_t old_end_blkaddr, old_main_blkaddr;
685 	block_t new_end_blkaddr, new_main_blkaddr, tmp_end_blkaddr;
686 	unsigned int offset;
687 	int err = -1;
688 
689 	/* flush NAT/SIT journal entries */
690 	flush_journal_entries(sbi);
691 
692 	memcpy(new_sb, F2FS_RAW_SUPER(sbi), sizeof(*new_sb));
693 	if (get_new_sb(new_sb))
694 		return -1;
695 
696 	if (f2fs_resize_check(sbi, new_sb) < 0)
697 		return -1;
698 
699 	/* check nat availability */
700 	if (get_sb(segment_count_nat) > get_newsb(segment_count_nat)) {
701 		err = shrink_nats(sbi, new_sb);
702 		if (err) {
703 			MSG(0, "\tError: Failed to shrink NATs\n");
704 			return err;
705 		}
706 	}
707 
708 	old_main_blkaddr = get_sb(main_blkaddr);
709 	new_main_blkaddr = get_newsb(main_blkaddr);
710 	offset = old_main_blkaddr - new_main_blkaddr;
711 	old_end_blkaddr = (get_sb(segment_count_main) <<
712 			get_sb(log_blocks_per_seg)) + get_sb(main_blkaddr);
713 	new_end_blkaddr = (get_newsb(segment_count_main) <<
714 			get_newsb(log_blocks_per_seg)) + get_newsb(main_blkaddr);
715 
716 	tmp_end_blkaddr = new_end_blkaddr + offset;
717 	err = f2fs_defragment(sbi, tmp_end_blkaddr,
718 				old_end_blkaddr - tmp_end_blkaddr,
719 				tmp_end_blkaddr, 1);
720 	MSG(0, "Try to do defragement: %s\n", err ? "Insufficient Space": "Done");
721 
722 	if (err) {
723 		return -ENOSPC;
724 	}
725 
726 	update_superblock(new_sb, SB_MASK_ALL);
727 	rebuild_checkpoint(sbi, new_sb, 0);
728 	/*if (!c.safe_resize) {
729 		migrate_sit(sbi, new_sb, offset_seg);
730 		migrate_nat(sbi, new_sb);
731 		migrate_ssa(sbi, new_sb, offset_seg);
732 	}*/
733 
734 	/* move whole data region */
735 	//if (err)
736 	//	migrate_main(sbi, offset);
737 	print_raw_sb_info(sb);
738 	print_raw_sb_info(new_sb);
739 
740 	return 0;
741 }
742 
f2fs_resize(struct f2fs_sb_info * sbi)743 int f2fs_resize(struct f2fs_sb_info *sbi)
744 {
745 	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
746 
747 	/* may different sector size */
748 	if ((c.target_sectors * c.sector_size >>
749 			get_sb(log_blocksize)) < get_sb(block_count))
750 		if (!c.safe_resize) {
751 			ASSERT_MSG("Nothing to resize, now only supports resizing with safe resize flag\n");
752 			return -1;
753 		} else {
754 			return f2fs_resize_shrink(sbi);
755 		}
756 	else if (((c.target_sectors * c.sector_size >>
757 			get_sb(log_blocksize)) > get_sb(block_count)) ||
758 			c.force)
759 		return f2fs_resize_grow(sbi);
760 	else {
761 		MSG(0, "Nothing to resize.\n");
762 		return 0;
763 	}
764 }
765