• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * f2fs_format.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * Dual licensed under the GPL or LGPL version 2 licenses.
8  */
9 #define _LARGEFILE64_SOURCE
10 
11 #include <stdio.h>
12 #include <stdlib.h>
13 #include <fcntl.h>
14 #include <string.h>
15 #include <unistd.h>
16 #include <sys/stat.h>
17 #include <sys/mount.h>
18 #include <time.h>
19 #include <uuid/uuid.h>
20 
21 #include "f2fs_fs.h"
22 #include "f2fs_format_utils.h"
23 
24 extern struct f2fs_configuration c;
25 struct f2fs_super_block raw_sb;
26 struct f2fs_super_block *sb = &raw_sb;
27 struct f2fs_checkpoint *cp;
28 
29 /* Return first segment number of each area */
30 #define prev_zone(cur)		(c.cur_seg[cur] - c.segs_per_zone)
31 #define next_zone(cur)		(c.cur_seg[cur] + c.segs_per_zone)
32 #define last_zone(cur)		((cur - 1) * c.segs_per_zone)
33 #define last_section(cur)	(cur + (c.secs_per_zone - 1) * c.segs_per_sec)
34 
35 const char *media_ext_lists[] = {
36 	"jpg",
37 	"gif",
38 	"png",
39 	"avi",
40 	"divx",
41 	"mp4",
42 	"mp3",
43 	"3gp",
44 	"wmv",
45 	"wma",
46 	"mpeg",
47 	"mkv",
48 	"mov",
49 	"asx",
50 	"asf",
51 	"wmx",
52 	"svi",
53 	"wvx",
54 	"wm",
55 	"mpg",
56 	"mpe",
57 	"rm",
58 	"ogg",
59 	"jpeg",
60 	"video",
61 	"apk",	/* for android system */
62 	NULL
63 };
64 
is_extension_exist(const char * name)65 static bool is_extension_exist(const char *name)
66 {
67 	int i;
68 
69 	for (i = 0; i < F2FS_MAX_EXTENSION; i++) {
70 		char *ext = (char *)sb->extension_list[i];
71 		if (!strcmp(ext, name))
72 			return 1;
73 	}
74 
75 	return 0;
76 }
77 
cure_extension_list(void)78 static void cure_extension_list(void)
79 {
80 	const char **extlist = media_ext_lists;
81 	char *ext_str = c.extension_list;
82 	char *ue;
83 	int name_len;
84 	int i = 0;
85 
86 	set_sb(extension_count, 0);
87 	memset(sb->extension_list, 0, sizeof(sb->extension_list));
88 
89 	while (*extlist) {
90 		name_len = strlen(*extlist);
91 		memcpy(sb->extension_list[i++], *extlist, name_len);
92 		extlist++;
93 	}
94 	set_sb(extension_count, i);
95 
96 	if (!ext_str)
97 		return;
98 
99 	/* add user ext list */
100 	ue = strtok(ext_str, ", ");
101 	while (ue != NULL) {
102 		name_len = strlen(ue);
103 		if (name_len >= 8) {
104 			MSG(0, "\tWarn: Extension name (%s) is too long\n", ue);
105 			goto next;
106 		}
107 		if (!is_extension_exist(ue))
108 			memcpy(sb->extension_list[i++], ue, name_len);
109 next:
110 		ue = strtok(NULL, ", ");
111 		if (i >= F2FS_MAX_EXTENSION)
112 			break;
113 	}
114 
115 	set_sb(extension_count, i);
116 
117 	free(c.extension_list);
118 }
119 
verify_cur_segs(void)120 static void verify_cur_segs(void)
121 {
122 	int i, j;
123 
124 	for (i = 0; i < NR_CURSEG_TYPE; i++) {
125 		for (j = 0; j < NR_CURSEG_TYPE; j++)
126 			if (c.cur_seg[i] == c.cur_seg[j])
127 				break;
128 	}
129 
130 	if (i == NR_CURSEG_TYPE && j == NR_CURSEG_TYPE)
131 		return;
132 
133 	c.cur_seg[0] = 0;
134 	for (i = 1; i < NR_CURSEG_TYPE; i++)
135 		c.cur_seg[i] = next_zone(i - 1);
136 }
137 
f2fs_prepare_super_block(void)138 static int f2fs_prepare_super_block(void)
139 {
140 	u_int32_t blk_size_bytes;
141 	u_int32_t log_sectorsize, log_sectors_per_block;
142 	u_int32_t log_blocksize, log_blks_per_seg;
143 	u_int32_t segment_size_bytes, zone_size_bytes;
144 	u_int32_t sit_segments;
145 	u_int32_t blocks_for_sit, blocks_for_nat, blocks_for_ssa;
146 	u_int32_t total_valid_blks_available;
147 	u_int64_t zone_align_start_offset, diff;
148 	u_int64_t total_meta_zones, total_meta_segments;
149 	u_int32_t sit_bitmap_size, max_sit_bitmap_size;
150 	u_int32_t max_nat_bitmap_size, max_nat_segments;
151 	u_int32_t total_zones;
152 	int i;
153 
154 	set_sb(magic, F2FS_SUPER_MAGIC);
155 	set_sb(major_ver, F2FS_MAJOR_VERSION);
156 	set_sb(minor_ver, F2FS_MINOR_VERSION);
157 
158 	log_sectorsize = log_base_2(c.sector_size);
159 	log_sectors_per_block = log_base_2(c.sectors_per_blk);
160 	log_blocksize = log_sectorsize + log_sectors_per_block;
161 	log_blks_per_seg = log_base_2(c.blks_per_seg);
162 
163 	set_sb(log_sectorsize, log_sectorsize);
164 	set_sb(log_sectors_per_block, log_sectors_per_block);
165 
166 	set_sb(log_blocksize, log_blocksize);
167 	set_sb(log_blocks_per_seg, log_blks_per_seg);
168 
169 	set_sb(segs_per_sec, c.segs_per_sec);
170 	set_sb(secs_per_zone, c.secs_per_zone);
171 
172 	blk_size_bytes = 1 << log_blocksize;
173 	segment_size_bytes = blk_size_bytes * c.blks_per_seg;
174 	zone_size_bytes =
175 		blk_size_bytes * c.secs_per_zone *
176 		c.segs_per_sec * c.blks_per_seg;
177 
178 	set_sb(checksum_offset, 0);
179 
180 	set_sb(block_count, c.total_sectors >> log_sectors_per_block);
181 
182 	zone_align_start_offset =
183 		(c.start_sector * c.sector_size +
184 		2 * F2FS_BLKSIZE + zone_size_bytes - 1) /
185 		zone_size_bytes * zone_size_bytes -
186 		c.start_sector * c.sector_size;
187 
188 	if (c.start_sector % c.sectors_per_blk) {
189 		MSG(1, "\t%s: Align start sector number to the page unit\n",
190 				c.zoned_mode ? "FAIL" : "WARN");
191 		MSG(1, "\ti.e., start sector: %d, ofs:%d (sects/page: %d)\n",
192 				c.start_sector,
193 				c.start_sector % c.sectors_per_blk,
194 				c.sectors_per_blk);
195 		if (c.zoned_mode)
196 			return -1;
197 	}
198 
199 	set_sb(segment0_blkaddr, zone_align_start_offset / blk_size_bytes);
200 	sb->cp_blkaddr = sb->segment0_blkaddr;
201 
202 	MSG(0, "Info: zone aligned segment0 blkaddr: %u\n",
203 					get_sb(segment0_blkaddr));
204 
205 	if (c.zoned_mode && (get_sb(segment0_blkaddr) + c.start_sector /
206 					c.sectors_per_blk) % c.zone_blocks) {
207 		MSG(1, "\tError: Unaligned segment0 block address %u\n",
208 				get_sb(segment0_blkaddr));
209 		return -1;
210 	}
211 
212 	for (i = 0; i < c.ndevs; i++) {
213 		if (i == 0) {
214 			c.devices[i].total_segments =
215 				(c.devices[i].total_sectors *
216 				c.sector_size - zone_align_start_offset) /
217 				segment_size_bytes;
218 			c.devices[i].start_blkaddr = 0;
219 			c.devices[i].end_blkaddr = c.devices[i].total_segments *
220 						c.blks_per_seg - 1 +
221 						sb->segment0_blkaddr;
222 		} else {
223 			c.devices[i].total_segments =
224 				c.devices[i].total_sectors /
225 				(c.sectors_per_blk * c.blks_per_seg);
226 			c.devices[i].start_blkaddr =
227 					c.devices[i - 1].end_blkaddr + 1;
228 			c.devices[i].end_blkaddr = c.devices[i].start_blkaddr +
229 					c.devices[i].total_segments *
230 					c.blks_per_seg - 1;
231 		}
232 		if (c.ndevs > 1) {
233 			memcpy(sb->devs[i].path, c.devices[i].path, MAX_PATH_LEN);
234 			sb->devs[i].total_segments =
235 					cpu_to_le32(c.devices[i].total_segments);
236 		}
237 
238 		c.total_segments += c.devices[i].total_segments;
239 	}
240 	set_sb(segment_count, (c.total_segments / c.segs_per_zone *
241 						c.segs_per_zone));
242 	set_sb(segment_count_ckpt, F2FS_NUMBER_OF_CHECKPOINT_PACK);
243 
244 	set_sb(sit_blkaddr, get_sb(segment0_blkaddr) +
245 			get_sb(segment_count_ckpt) * c.blks_per_seg);
246 
247 	blocks_for_sit = ALIGN(get_sb(segment_count), SIT_ENTRY_PER_BLOCK);
248 
249 	sit_segments = SEG_ALIGN(blocks_for_sit);
250 
251 	set_sb(segment_count_sit, sit_segments * 2);
252 
253 	set_sb(nat_blkaddr, get_sb(sit_blkaddr) + get_sb(segment_count_sit) *
254 			c.blks_per_seg);
255 
256 	total_valid_blks_available = (get_sb(segment_count) -
257 			(get_sb(segment_count_ckpt) +
258 			get_sb(segment_count_sit))) * c.blks_per_seg;
259 
260 	blocks_for_nat = ALIGN(total_valid_blks_available,
261 			NAT_ENTRY_PER_BLOCK);
262 
263 	set_sb(segment_count_nat, SEG_ALIGN(blocks_for_nat));
264 	/*
265 	 * The number of node segments should not be exceeded a "Threshold".
266 	 * This number resizes NAT bitmap area in a CP page.
267 	 * So the threshold is determined not to overflow one CP page
268 	 */
269 	sit_bitmap_size = ((get_sb(segment_count_sit) / 2) <<
270 				log_blks_per_seg) / 8;
271 
272 	if (sit_bitmap_size > MAX_SIT_BITMAP_SIZE)
273 		max_sit_bitmap_size = MAX_SIT_BITMAP_SIZE;
274 	else
275 		max_sit_bitmap_size = sit_bitmap_size;
276 
277 	/*
278 	 * It should be reserved minimum 1 segment for nat.
279 	 * When sit is too large, we should expand cp area. It requires more
280 	 * pages for cp.
281 	 */
282 	if (max_sit_bitmap_size >
283 			(CHECKSUM_OFFSET -
284 				sizeof(struct f2fs_checkpoint) + 1 - 64)) {
285 		max_nat_bitmap_size = CHECKSUM_OFFSET -
286 				sizeof(struct f2fs_checkpoint) + 1;
287 		set_sb(cp_payload, F2FS_BLK_ALIGN(max_sit_bitmap_size));
288 	} else {
289 		max_nat_bitmap_size =
290 			CHECKSUM_OFFSET - sizeof(struct f2fs_checkpoint) + 1
291 			- max_sit_bitmap_size;
292 		set_sb(cp_payload, 0);
293 	}
294 
295 	max_nat_segments = (max_nat_bitmap_size * 8) >> log_blks_per_seg;
296 
297 	if (get_sb(segment_count_nat) > max_nat_segments)
298 		set_sb(segment_count_nat, max_nat_segments);
299 
300 	set_sb(segment_count_nat, get_sb(segment_count_nat) * 2);
301 
302 	set_sb(ssa_blkaddr, get_sb(nat_blkaddr) + get_sb(segment_count_nat) *
303 			c.blks_per_seg);
304 
305 	total_valid_blks_available = (get_sb(segment_count) -
306 			(get_sb(segment_count_ckpt) +
307 			get_sb(segment_count_sit) +
308 			get_sb(segment_count_nat))) *
309 			c.blks_per_seg;
310 
311 	blocks_for_ssa = total_valid_blks_available /
312 				c.blks_per_seg + 1;
313 
314 	set_sb(segment_count_ssa, SEG_ALIGN(blocks_for_ssa));
315 
316 	total_meta_segments = get_sb(segment_count_ckpt) +
317 		get_sb(segment_count_sit) +
318 		get_sb(segment_count_nat) +
319 		get_sb(segment_count_ssa);
320 	diff = total_meta_segments % (c.segs_per_zone);
321 	if (diff)
322 		set_sb(segment_count_ssa, get_sb(segment_count_ssa) +
323 			(c.segs_per_zone - diff));
324 
325 	total_meta_zones = ZONE_ALIGN(total_meta_segments *
326 						c.blks_per_seg);
327 
328 	set_sb(main_blkaddr, get_sb(segment0_blkaddr) + total_meta_zones *
329 				c.segs_per_zone * c.blks_per_seg);
330 
331 	if (c.zoned_mode) {
332 		/*
333 		 * Make sure there is enough randomly writeable
334 		 * space at the beginning of the disk.
335 		 */
336 		unsigned long main_blkzone = get_sb(main_blkaddr) / c.zone_blocks;
337 
338 		if (c.devices[0].zoned_model == F2FS_ZONED_HM &&
339 				c.devices[0].nr_rnd_zones < main_blkzone) {
340 			MSG(0, "\tError: Device does not have enough random "
341 					"write zones for F2FS volume (%lu needed)\n",
342 					main_blkzone);
343 			return -1;
344 		}
345 	}
346 
347 	total_zones = get_sb(segment_count) / (c.segs_per_zone) -
348 							total_meta_zones;
349 
350 	set_sb(section_count, total_zones * c.secs_per_zone);
351 
352 	set_sb(segment_count_main, get_sb(section_count) * c.segs_per_sec);
353 
354 	/* Let's determine the best reserved and overprovisioned space */
355 	if (c.overprovision == 0)
356 		c.overprovision = get_best_overprovision(sb);
357 
358 	if (c.overprovision == 0 || c.total_segments < F2FS_MIN_SEGMENTS ||
359 		(c.devices[0].total_sectors *
360 			c.sector_size < zone_align_start_offset) ||
361 		(get_sb(segment_count_main) - 2) < c.reserved_segments) {
362 		MSG(0, "\tError: Device size is not sufficient for F2FS volume\n");
363 		return -1;
364 	}
365 
366 	c.reserved_segments =
367 			(2 * (100 / c.overprovision + 1) + 6)
368 			* c.segs_per_sec;
369 
370 	uuid_generate(sb->uuid);
371 
372 	utf8_to_utf16(sb->volume_name, (const char *)c.vol_label,
373 				MAX_VOLUME_NAME, strlen(c.vol_label));
374 	set_sb(node_ino, 1);
375 	set_sb(meta_ino, 2);
376 	set_sb(root_ino, 3);
377 
378 	if (total_zones <= 6) {
379 		MSG(1, "\tError: %d zones: Need more zones "
380 			"by shrinking zone size\n", total_zones);
381 		return -1;
382 	}
383 
384 	if (c.heap) {
385 		c.cur_seg[CURSEG_HOT_NODE] =
386 				last_section(last_zone(total_zones));
387 		c.cur_seg[CURSEG_WARM_NODE] = prev_zone(CURSEG_HOT_NODE);
388 		c.cur_seg[CURSEG_COLD_NODE] = prev_zone(CURSEG_WARM_NODE);
389 		c.cur_seg[CURSEG_HOT_DATA] = prev_zone(CURSEG_COLD_NODE);
390 		c.cur_seg[CURSEG_COLD_DATA] = 0;
391 		c.cur_seg[CURSEG_WARM_DATA] = next_zone(CURSEG_COLD_DATA);
392 	} else {
393 		c.cur_seg[CURSEG_HOT_NODE] = 0;
394 		c.cur_seg[CURSEG_WARM_NODE] = next_zone(CURSEG_HOT_NODE);
395 		c.cur_seg[CURSEG_COLD_NODE] = next_zone(CURSEG_WARM_NODE);
396 		c.cur_seg[CURSEG_HOT_DATA] = next_zone(CURSEG_COLD_NODE);
397 		c.cur_seg[CURSEG_COLD_DATA] =
398 				max(last_zone((total_zones >> 2)),
399 					next_zone(CURSEG_COLD_NODE));
400 		c.cur_seg[CURSEG_WARM_DATA] =
401 				max(last_zone((total_zones >> 1)),
402 					next_zone(CURSEG_COLD_DATA));
403 	}
404 
405 	/* if there is redundancy, reassign it */
406 	verify_cur_segs();
407 
408 	cure_extension_list();
409 
410 	/* get kernel version */
411 	if (c.kd >= 0) {
412 		dev_read_version(c.version, 0, VERSION_LEN);
413 		get_kernel_version(c.version);
414 		MSG(0, "Info: format version with\n  \"%s\"\n", c.version);
415 	} else {
416 		memset(c.version, 0, VERSION_LEN);
417 	}
418 
419 	memcpy(sb->version, c.version, VERSION_LEN);
420 	memcpy(sb->init_version, c.version, VERSION_LEN);
421 
422 	sb->feature = c.feature;
423 
424 	return 0;
425 }
426 
f2fs_init_sit_area(void)427 static int f2fs_init_sit_area(void)
428 {
429 	u_int32_t blk_size, seg_size;
430 	u_int32_t index = 0;
431 	u_int64_t sit_seg_addr = 0;
432 	u_int8_t *zero_buf = NULL;
433 
434 	blk_size = 1 << get_sb(log_blocksize);
435 	seg_size = (1 << get_sb(log_blocks_per_seg)) * blk_size;
436 
437 	zero_buf = calloc(sizeof(u_int8_t), seg_size);
438 	if(zero_buf == NULL) {
439 		MSG(1, "\tError: Calloc Failed for sit_zero_buf!!!\n");
440 		return -1;
441 	}
442 
443 	sit_seg_addr = get_sb(sit_blkaddr);
444 	sit_seg_addr *= blk_size;
445 
446 	DBG(1, "\tFilling sit area at offset 0x%08"PRIx64"\n", sit_seg_addr);
447 	for (index = 0; index < (get_sb(segment_count_sit) / 2); index++) {
448 		if (dev_fill(zero_buf, sit_seg_addr, seg_size)) {
449 			MSG(1, "\tError: While zeroing out the sit area "
450 					"on disk!!!\n");
451 			free(zero_buf);
452 			return -1;
453 		}
454 		sit_seg_addr += seg_size;
455 	}
456 
457 	free(zero_buf);
458 	return 0 ;
459 }
460 
f2fs_init_nat_area(void)461 static int f2fs_init_nat_area(void)
462 {
463 	u_int32_t blk_size, seg_size;
464 	u_int32_t index = 0;
465 	u_int64_t nat_seg_addr = 0;
466 	u_int8_t *nat_buf = NULL;
467 
468 	blk_size = 1 << get_sb(log_blocksize);
469 	seg_size = (1 << get_sb(log_blocks_per_seg)) * blk_size;
470 
471 	nat_buf = calloc(sizeof(u_int8_t), seg_size);
472 	if (nat_buf == NULL) {
473 		MSG(1, "\tError: Calloc Failed for nat_zero_blk!!!\n");
474 		return -1;
475 	}
476 
477 	nat_seg_addr = get_sb(nat_blkaddr);
478 	nat_seg_addr *= blk_size;
479 
480 	DBG(1, "\tFilling nat area at offset 0x%08"PRIx64"\n", nat_seg_addr);
481 	for (index = 0; index < get_sb(segment_count_nat) / 2; index++) {
482 		if (dev_fill(nat_buf, nat_seg_addr, seg_size)) {
483 			MSG(1, "\tError: While zeroing out the nat area "
484 					"on disk!!!\n");
485 			free(nat_buf);
486 			return -1;
487 		}
488 		nat_seg_addr = nat_seg_addr + (2 * seg_size);
489 	}
490 
491 	free(nat_buf);
492 	return 0 ;
493 }
494 
f2fs_write_check_point_pack(void)495 static int f2fs_write_check_point_pack(void)
496 {
497 	struct f2fs_summary_block *sum = NULL;
498 	struct f2fs_journal *journal;
499 	u_int32_t blk_size_bytes;
500 	u_int32_t nat_bits_bytes, nat_bits_blocks;
501 	unsigned char *nat_bits = NULL, *empty_nat_bits;
502 	u_int64_t cp_seg_blk = 0;
503 	u_int32_t crc = 0, flags;
504 	unsigned int i;
505 	char *cp_payload = NULL;
506 	char *sum_compact, *sum_compact_p;
507 	struct f2fs_summary *sum_entry;
508 	int ret = -1;
509 
510 	cp = calloc(F2FS_BLKSIZE, 1);
511 	if (cp == NULL) {
512 		MSG(1, "\tError: Calloc Failed for f2fs_checkpoint!!!\n");
513 		return ret;
514 	}
515 
516 	sum = calloc(F2FS_BLKSIZE, 1);
517 	if (sum == NULL) {
518 		MSG(1, "\tError: Calloc Failed for summay_node!!!\n");
519 		goto free_cp;
520 	}
521 
522 	sum_compact = calloc(F2FS_BLKSIZE, 1);
523 	if (sum_compact == NULL) {
524 		MSG(1, "\tError: Calloc Failed for summay buffer!!!\n");
525 		goto free_sum;
526 	}
527 	sum_compact_p = sum_compact;
528 
529 	nat_bits_bytes = get_sb(segment_count_nat) << 5;
530 	nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) + 8 +
531 						F2FS_BLKSIZE - 1);
532 	nat_bits = calloc(F2FS_BLKSIZE, nat_bits_blocks);
533 	if (nat_bits == NULL) {
534 		MSG(1, "\tError: Calloc Failed for nat bits buffer!!!\n");
535 		goto free_sum_compact;
536 	}
537 
538 	cp_payload = calloc(F2FS_BLKSIZE, 1);
539 	if (cp_payload == NULL) {
540 		MSG(1, "\tError: Calloc Failed for cp_payload!!!\n");
541 		goto free_nat_bits;
542 	}
543 
544 	/* 1. cp page 1 of checkpoint pack 1 */
545 	cp->checkpoint_ver = rand() | 0x1;
546 	set_cp(cur_node_segno[0], c.cur_seg[CURSEG_HOT_NODE]);
547 	set_cp(cur_node_segno[1], c.cur_seg[CURSEG_WARM_NODE]);
548 	set_cp(cur_node_segno[2], c.cur_seg[CURSEG_COLD_NODE]);
549 	set_cp(cur_data_segno[0], c.cur_seg[CURSEG_HOT_DATA]);
550 	set_cp(cur_data_segno[1], c.cur_seg[CURSEG_WARM_DATA]);
551 	set_cp(cur_data_segno[2], c.cur_seg[CURSEG_COLD_DATA]);
552 	for (i = 3; i < MAX_ACTIVE_NODE_LOGS; i++) {
553 		set_cp(cur_node_segno[i], 0xffffffff);
554 		set_cp(cur_data_segno[i], 0xffffffff);
555 	}
556 
557 	set_cp(cur_node_blkoff[0], 1);
558 	set_cp(cur_data_blkoff[0], 1);
559 	set_cp(valid_block_count, 2);
560 	set_cp(rsvd_segment_count, c.reserved_segments);
561 	set_cp(overprov_segment_count, (get_sb(segment_count_main) -
562 			get_cp(rsvd_segment_count)) *
563 			c.overprovision / 100);
564 	set_cp(overprov_segment_count, get_cp(overprov_segment_count) +
565 			get_cp(rsvd_segment_count));
566 
567 	MSG(0, "Info: Overprovision ratio = %.3lf%%\n", c.overprovision);
568 	MSG(0, "Info: Overprovision segments = %u (GC reserved = %u)\n",
569 					get_cp(overprov_segment_count),
570 					c.reserved_segments);
571 
572 	/* main segments - reserved segments - (node + data segments) */
573 	set_cp(free_segment_count, get_sb(segment_count_main) - 6);
574 	set_cp(user_block_count, ((get_cp(free_segment_count) + 6 -
575 			get_cp(overprov_segment_count)) * c.blks_per_seg));
576 	/* cp page (2), data summaries (1), node summaries (3) */
577 	set_cp(cp_pack_total_block_count, 6 + get_sb(cp_payload));
578 	flags = CP_UMOUNT_FLAG | CP_COMPACT_SUM_FLAG;
579 	if (get_cp(cp_pack_total_block_count) <=
580 			(1 << get_sb(log_blocks_per_seg)) - nat_bits_blocks)
581 		flags |= CP_NAT_BITS_FLAG;
582 
583 	if (c.trimmed)
584 		flags |= CP_TRIMMED_FLAG;
585 
586 	set_cp(ckpt_flags, flags);
587 	set_cp(cp_pack_start_sum, 1 + get_sb(cp_payload));
588 	set_cp(valid_node_count, 1);
589 	set_cp(valid_inode_count, 1);
590 	set_cp(next_free_nid, get_sb(root_ino) + 1);
591 	set_cp(sit_ver_bitmap_bytesize, ((get_sb(segment_count_sit) / 2) <<
592 			get_sb(log_blocks_per_seg)) / 8);
593 
594 	set_cp(nat_ver_bitmap_bytesize, ((get_sb(segment_count_nat) / 2) <<
595 			 get_sb(log_blocks_per_seg)) / 8);
596 
597 	set_cp(checksum_offset, CHECKSUM_OFFSET);
598 
599 	crc = f2fs_cal_crc32(F2FS_SUPER_MAGIC, cp, CHECKSUM_OFFSET);
600 	*((__le32 *)((unsigned char *)cp + CHECKSUM_OFFSET)) =
601 							cpu_to_le32(crc);
602 
603 	blk_size_bytes = 1 << get_sb(log_blocksize);
604 
605 	if (blk_size_bytes != F2FS_BLKSIZE) {
606 		MSG(1, "\tError: Wrong block size %d / %d!!!\n",
607 					blk_size_bytes, F2FS_BLKSIZE);
608 		goto free_cp_payload;
609 	}
610 
611 	cp_seg_blk = get_sb(segment0_blkaddr);
612 
613 	DBG(1, "\tWriting main segments, cp at offset 0x%08"PRIx64"\n",
614 						cp_seg_blk);
615 	if (dev_write_block(cp, cp_seg_blk)) {
616 		MSG(1, "\tError: While writing the cp to disk!!!\n");
617 		goto free_cp_payload;
618 	}
619 
620 	for (i = 0; i < get_sb(cp_payload); i++) {
621 		cp_seg_blk++;
622 		if (dev_fill_block(cp_payload, cp_seg_blk)) {
623 			MSG(1, "\tError: While zeroing out the sit bitmap area "
624 					"on disk!!!\n");
625 			goto free_cp_payload;
626 		}
627 	}
628 
629 	/* Prepare and write Segment summary for HOT/WARM/COLD DATA
630 	 *
631 	 * The structure of compact summary
632 	 * +-------------------+
633 	 * | nat_journal       |
634 	 * +-------------------+
635 	 * | sit_journal       |
636 	 * +-------------------+
637 	 * | hot data summary  |
638 	 * +-------------------+
639 	 * | warm data summary |
640 	 * +-------------------+
641 	 * | cold data summary |
642 	 * +-------------------+
643 	*/
644 	memset(sum, 0, sizeof(struct f2fs_summary_block));
645 	SET_SUM_TYPE((&sum->footer), SUM_TYPE_DATA);
646 
647 	journal = &sum->journal;
648 	journal->n_nats = cpu_to_le16(1);
649 	journal->nat_j.entries[0].nid = sb->root_ino;
650 	journal->nat_j.entries[0].ne.version = 0;
651 	journal->nat_j.entries[0].ne.ino = sb->root_ino;
652 	journal->nat_j.entries[0].ne.block_addr = cpu_to_le32(
653 			get_sb(main_blkaddr) +
654 			get_cp(cur_node_segno[0]) * c.blks_per_seg);
655 
656 	memcpy(sum_compact_p, &journal->n_nats, SUM_JOURNAL_SIZE);
657 	sum_compact_p += SUM_JOURNAL_SIZE;
658 
659 	memset(sum, 0, sizeof(struct f2fs_summary_block));
660 	/* inode sit for root */
661 	journal->n_sits = cpu_to_le16(6);
662 	journal->sit_j.entries[0].segno = cp->cur_node_segno[0];
663 	journal->sit_j.entries[0].se.vblocks =
664 				cpu_to_le16((CURSEG_HOT_NODE << 10) | 1);
665 	f2fs_set_bit(0, (char *)journal->sit_j.entries[0].se.valid_map);
666 	journal->sit_j.entries[1].segno = cp->cur_node_segno[1];
667 	journal->sit_j.entries[1].se.vblocks =
668 				cpu_to_le16((CURSEG_WARM_NODE << 10));
669 	journal->sit_j.entries[2].segno = cp->cur_node_segno[2];
670 	journal->sit_j.entries[2].se.vblocks =
671 				cpu_to_le16((CURSEG_COLD_NODE << 10));
672 
673 	/* data sit for root */
674 	journal->sit_j.entries[3].segno = cp->cur_data_segno[0];
675 	journal->sit_j.entries[3].se.vblocks =
676 				cpu_to_le16((CURSEG_HOT_DATA << 10) | 1);
677 	f2fs_set_bit(0, (char *)journal->sit_j.entries[3].se.valid_map);
678 	journal->sit_j.entries[4].segno = cp->cur_data_segno[1];
679 	journal->sit_j.entries[4].se.vblocks =
680 				cpu_to_le16((CURSEG_WARM_DATA << 10));
681 	journal->sit_j.entries[5].segno = cp->cur_data_segno[2];
682 	journal->sit_j.entries[5].se.vblocks =
683 				cpu_to_le16((CURSEG_COLD_DATA << 10));
684 
685 	memcpy(sum_compact_p, &journal->n_sits, SUM_JOURNAL_SIZE);
686 	sum_compact_p += SUM_JOURNAL_SIZE;
687 
688 	/* hot data summary */
689 	sum_entry = (struct f2fs_summary *)sum_compact_p;
690 	sum_entry->nid = sb->root_ino;
691 	sum_entry->ofs_in_node = 0;
692 	/* warm data summary, nothing to do */
693 	/* cold data summary, nothing to do */
694 
695 	cp_seg_blk++;
696 	DBG(1, "\tWriting Segment summary for HOT/WARM/COLD_DATA, at offset 0x%08"PRIx64"\n",
697 			cp_seg_blk);
698 	if (dev_write_block(sum_compact, cp_seg_blk)) {
699 		MSG(1, "\tError: While writing the sum_blk to disk!!!\n");
700 		goto free_cp_payload;
701 	}
702 
703 	/* Prepare and write Segment summary for HOT_NODE */
704 	memset(sum, 0, sizeof(struct f2fs_summary_block));
705 	SET_SUM_TYPE((&sum->footer), SUM_TYPE_NODE);
706 
707 	sum->entries[0].nid = sb->root_ino;
708 	sum->entries[0].ofs_in_node = 0;
709 
710 	cp_seg_blk++;
711 	DBG(1, "\tWriting Segment summary for HOT_NODE, at offset 0x%08"PRIx64"\n",
712 			cp_seg_blk);
713 	if (dev_write_block(sum, cp_seg_blk)) {
714 		MSG(1, "\tError: While writing the sum_blk to disk!!!\n");
715 		goto free_cp_payload;
716 	}
717 
718 	/* Fill segment summary for WARM_NODE to zero. */
719 	memset(sum, 0, sizeof(struct f2fs_summary_block));
720 	SET_SUM_TYPE((&sum->footer), SUM_TYPE_NODE);
721 
722 	cp_seg_blk++;
723 	DBG(1, "\tWriting Segment summary for WARM_NODE, at offset 0x%08"PRIx64"\n",
724 			cp_seg_blk);
725 	if (dev_write_block(sum, cp_seg_blk)) {
726 		MSG(1, "\tError: While writing the sum_blk to disk!!!\n");
727 		goto free_cp_payload;
728 	}
729 
730 	/* Fill segment summary for COLD_NODE to zero. */
731 	memset(sum, 0, sizeof(struct f2fs_summary_block));
732 	SET_SUM_TYPE((&sum->footer), SUM_TYPE_NODE);
733 	cp_seg_blk++;
734 	DBG(1, "\tWriting Segment summary for COLD_NODE, at offset 0x%08"PRIx64"\n",
735 			cp_seg_blk);
736 	if (dev_write_block(sum, cp_seg_blk)) {
737 		MSG(1, "\tError: While writing the sum_blk to disk!!!\n");
738 		goto free_cp_payload;
739 	}
740 
741 	/* cp page2 */
742 	cp_seg_blk++;
743 	DBG(1, "\tWriting cp page2, at offset 0x%08"PRIx64"\n", cp_seg_blk);
744 	if (dev_write_block(cp, cp_seg_blk)) {
745 		MSG(1, "\tError: While writing the cp to disk!!!\n");
746 		goto free_cp_payload;
747 	}
748 
749 	/* write NAT bits, if possible */
750 	if (flags & CP_NAT_BITS_FLAG) {
751 		uint32_t i;
752 
753 		*(__le64 *)nat_bits = get_cp_crc(cp);
754 		empty_nat_bits = nat_bits + 8 + nat_bits_bytes;
755 		memset(empty_nat_bits, 0xff, nat_bits_bytes);
756 		test_and_clear_bit_le(0, empty_nat_bits);
757 
758 		/* write the last blocks in cp pack */
759 		cp_seg_blk = get_sb(segment0_blkaddr) + (1 <<
760 				get_sb(log_blocks_per_seg)) - nat_bits_blocks;
761 
762 		DBG(1, "\tWriting NAT bits pages, at offset 0x%08"PRIx64"\n",
763 					cp_seg_blk);
764 
765 		for (i = 0; i < nat_bits_blocks; i++) {
766 			if (dev_write_block(nat_bits + i *
767 						F2FS_BLKSIZE, cp_seg_blk + i)) {
768 				MSG(1, "\tError: write NAT bits to disk!!!\n");
769 				goto free_cp_payload;
770 			}
771 		}
772 	}
773 
774 	/* cp page 1 of check point pack 2
775 	 * Initiatialize other checkpoint pack with version zero
776 	 */
777 	cp->checkpoint_ver = 0;
778 
779 	crc = f2fs_cal_crc32(F2FS_SUPER_MAGIC, cp, CHECKSUM_OFFSET);
780 	*((__le32 *)((unsigned char *)cp + CHECKSUM_OFFSET)) =
781 							cpu_to_le32(crc);
782 	cp_seg_blk = get_sb(segment0_blkaddr) + c.blks_per_seg;
783 	DBG(1, "\tWriting cp page 1 of checkpoint pack 2, at offset 0x%08"PRIx64"\n",
784 				cp_seg_blk);
785 	if (dev_write_block(cp, cp_seg_blk)) {
786 		MSG(1, "\tError: While writing the cp to disk!!!\n");
787 		goto free_cp_payload;
788 	}
789 
790 	for (i = 0; i < get_sb(cp_payload); i++) {
791 		cp_seg_blk++;
792 		if (dev_fill_block(cp_payload, cp_seg_blk)) {
793 			MSG(1, "\tError: While zeroing out the sit bitmap area "
794 					"on disk!!!\n");
795 			goto free_cp_payload;
796 		}
797 	}
798 
799 	/* cp page 2 of check point pack 2 */
800 	cp_seg_blk += (le32_to_cpu(cp->cp_pack_total_block_count) -
801 					get_sb(cp_payload) - 1);
802 	DBG(1, "\tWriting cp page 2 of checkpoint pack 2, at offset 0x%08"PRIx64"\n",
803 				cp_seg_blk);
804 	if (dev_write_block(cp, cp_seg_blk)) {
805 		MSG(1, "\tError: While writing the cp to disk!!!\n");
806 		goto free_cp_payload;
807 	}
808 
809 	ret = 0;
810 
811 free_cp_payload:
812 	free(cp_payload);
813 free_nat_bits:
814 	free(nat_bits);
815 free_sum_compact:
816 	free(sum_compact);
817 free_sum:
818 	free(sum);
819 free_cp:
820 	free(cp);
821 	return ret;
822 }
823 
f2fs_write_super_block(void)824 static int f2fs_write_super_block(void)
825 {
826 	int index;
827 	u_int8_t *zero_buff;
828 
829 	zero_buff = calloc(F2FS_BLKSIZE, 1);
830 
831 	memcpy(zero_buff + F2FS_SUPER_OFFSET, sb, sizeof(*sb));
832 	DBG(1, "\tWriting super block, at offset 0x%08x\n", 0);
833 	for (index = 0; index < 2; index++) {
834 		if (dev_write_block(zero_buff, index)) {
835 			MSG(1, "\tError: While while writing supe_blk "
836 					"on disk!!! index : %d\n", index);
837 			free(zero_buff);
838 			return -1;
839 		}
840 	}
841 
842 	free(zero_buff);
843 	return 0;
844 }
845 
846 #ifndef WITH_ANDROID
discard_obsolete_dnode(struct f2fs_node * raw_node,u_int64_t offset)847 static int discard_obsolete_dnode(struct f2fs_node *raw_node, u_int64_t offset)
848 {
849 	u_int64_t next_blkaddr = 0;
850 	u_int64_t root_inode_pos = get_sb(main_blkaddr);
851 
852 	/* only root inode was written before truncating dnodes */
853 	root_inode_pos += c.cur_seg[CURSEG_HOT_NODE] * c.blks_per_seg;
854 
855 	if (c.zoned_mode)
856 		return 0;
857 	do {
858 		if (offset < get_sb(main_blkaddr) ||
859 			offset >= get_sb(main_blkaddr) + get_sb(block_count))
860 			break;
861 
862 		if (dev_read_block(raw_node, offset)) {
863 			MSG(1, "\tError: While traversing direct node!!!\n");
864 			return -1;
865 		}
866 
867 		next_blkaddr = le32_to_cpu(raw_node->footer.next_blkaddr);
868 		memset(raw_node, 0, F2FS_BLKSIZE);
869 
870 		DBG(1, "\tDiscard dnode, at offset 0x%08"PRIx64"\n", offset);
871 		if (dev_write_block(raw_node, offset)) {
872 			MSG(1, "\tError: While discarding direct node!!!\n");
873 			return -1;
874 		}
875 		offset = next_blkaddr;
876 		/* should avoid recursive chain due to stale data */
877 		if (offset == root_inode_pos)
878 			break;
879 	} while (1);
880 
881 	return 0;
882 }
883 #endif
884 
f2fs_write_root_inode(void)885 static int f2fs_write_root_inode(void)
886 {
887 	struct f2fs_node *raw_node = NULL;
888 	u_int64_t blk_size_bytes, data_blk_nor;
889 	u_int64_t main_area_node_seg_blk_offset = 0;
890 
891 	raw_node = calloc(F2FS_BLKSIZE, 1);
892 	if (raw_node == NULL) {
893 		MSG(1, "\tError: Calloc Failed for raw_node!!!\n");
894 		return -1;
895 	}
896 
897 	raw_node->footer.nid = sb->root_ino;
898 	raw_node->footer.ino = sb->root_ino;
899 	raw_node->footer.cp_ver = cpu_to_le64(1);
900 	raw_node->footer.next_blkaddr = cpu_to_le32(
901 			get_sb(main_blkaddr) +
902 			c.cur_seg[CURSEG_HOT_NODE] *
903 			c.blks_per_seg + 1);
904 
905 	raw_node->i.i_mode = cpu_to_le16(0x41ed);
906 	raw_node->i.i_links = cpu_to_le32(2);
907 	raw_node->i.i_uid = cpu_to_le32(getuid());
908 	raw_node->i.i_gid = cpu_to_le32(getgid());
909 
910 	blk_size_bytes = 1 << get_sb(log_blocksize);
911 	raw_node->i.i_size = cpu_to_le64(1 * blk_size_bytes); /* dentry */
912 	raw_node->i.i_blocks = cpu_to_le64(2);
913 
914 	raw_node->i.i_atime = cpu_to_le32(time(NULL));
915 	raw_node->i.i_atime_nsec = 0;
916 	raw_node->i.i_ctime = cpu_to_le32(time(NULL));
917 	raw_node->i.i_ctime_nsec = 0;
918 	raw_node->i.i_mtime = cpu_to_le32(time(NULL));
919 	raw_node->i.i_mtime_nsec = 0;
920 	raw_node->i.i_generation = 0;
921 	raw_node->i.i_xattr_nid = 0;
922 	raw_node->i.i_flags = 0;
923 	raw_node->i.i_current_depth = cpu_to_le32(1);
924 	raw_node->i.i_dir_level = DEF_DIR_LEVEL;
925 
926 	data_blk_nor = get_sb(main_blkaddr) +
927 		c.cur_seg[CURSEG_HOT_DATA] * c.blks_per_seg;
928 	raw_node->i.i_addr[0] = cpu_to_le32(data_blk_nor);
929 
930 	raw_node->i.i_ext.fofs = 0;
931 	raw_node->i.i_ext.blk_addr = 0;
932 	raw_node->i.i_ext.len = 0;
933 
934 	main_area_node_seg_blk_offset = get_sb(main_blkaddr);
935 	main_area_node_seg_blk_offset += c.cur_seg[CURSEG_HOT_NODE] *
936 					c.blks_per_seg;
937 
938 	DBG(1, "\tWriting root inode (hot node), %x %x %x at offset 0x%08"PRIu64"\n",
939 			get_sb(main_blkaddr),
940 			c.cur_seg[CURSEG_HOT_NODE],
941 			c.blks_per_seg, main_area_node_seg_blk_offset);
942 	if (dev_write_block(raw_node, main_area_node_seg_blk_offset)) {
943 		MSG(1, "\tError: While writing the raw_node to disk!!!\n");
944 		free(raw_node);
945 		return -1;
946 	}
947 
948 	/* avoid power-off-recovery based on roll-forward policy */
949 	main_area_node_seg_blk_offset = get_sb(main_blkaddr);
950 	main_area_node_seg_blk_offset += c.cur_seg[CURSEG_WARM_NODE] *
951 					c.blks_per_seg;
952 
953 #ifndef WITH_ANDROID
954 	if (discard_obsolete_dnode(raw_node, main_area_node_seg_blk_offset)) {
955 		free(raw_node);
956 		return -1;
957 	}
958 #endif
959 
960 	free(raw_node);
961 	return 0;
962 }
963 
f2fs_update_nat_root(void)964 static int f2fs_update_nat_root(void)
965 {
966 	struct f2fs_nat_block *nat_blk = NULL;
967 	u_int64_t nat_seg_blk_offset = 0;
968 
969 	nat_blk = calloc(F2FS_BLKSIZE, 1);
970 	if(nat_blk == NULL) {
971 		MSG(1, "\tError: Calloc Failed for nat_blk!!!\n");
972 		return -1;
973 	}
974 
975 	/* update root */
976 	nat_blk->entries[get_sb(root_ino)].block_addr = cpu_to_le32(
977 		get_sb(main_blkaddr) +
978 		c.cur_seg[CURSEG_HOT_NODE] * c.blks_per_seg);
979 	nat_blk->entries[get_sb(root_ino)].ino = sb->root_ino;
980 
981 	/* update node nat */
982 	nat_blk->entries[get_sb(node_ino)].block_addr = cpu_to_le32(1);
983 	nat_blk->entries[get_sb(node_ino)].ino = sb->node_ino;
984 
985 	/* update meta nat */
986 	nat_blk->entries[get_sb(meta_ino)].block_addr = cpu_to_le32(1);
987 	nat_blk->entries[get_sb(meta_ino)].ino = sb->meta_ino;
988 
989 	nat_seg_blk_offset = get_sb(nat_blkaddr);
990 
991 	DBG(1, "\tWriting nat root, at offset 0x%08"PRIx64"\n",
992 					nat_seg_blk_offset);
993 	if (dev_write_block(nat_blk, nat_seg_blk_offset)) {
994 		MSG(1, "\tError: While writing the nat_blk set0 to disk!\n");
995 		free(nat_blk);
996 		return -1;
997 	}
998 
999 	free(nat_blk);
1000 	return 0;
1001 }
1002 
f2fs_add_default_dentry_root(void)1003 static int f2fs_add_default_dentry_root(void)
1004 {
1005 	struct f2fs_dentry_block *dent_blk = NULL;
1006 	u_int64_t data_blk_offset = 0;
1007 
1008 	dent_blk = calloc(F2FS_BLKSIZE, 1);
1009 	if(dent_blk == NULL) {
1010 		MSG(1, "\tError: Calloc Failed for dent_blk!!!\n");
1011 		return -1;
1012 	}
1013 
1014 	dent_blk->dentry[0].hash_code = 0;
1015 	dent_blk->dentry[0].ino = sb->root_ino;
1016 	dent_blk->dentry[0].name_len = cpu_to_le16(1);
1017 	dent_blk->dentry[0].file_type = F2FS_FT_DIR;
1018 	memcpy(dent_blk->filename[0], ".", 1);
1019 
1020 	dent_blk->dentry[1].hash_code = 0;
1021 	dent_blk->dentry[1].ino = sb->root_ino;
1022 	dent_blk->dentry[1].name_len = cpu_to_le16(2);
1023 	dent_blk->dentry[1].file_type = F2FS_FT_DIR;
1024 	memcpy(dent_blk->filename[1], "..", 2);
1025 
1026 	/* bitmap for . and .. */
1027 	test_and_set_bit_le(0, dent_blk->dentry_bitmap);
1028 	test_and_set_bit_le(1, dent_blk->dentry_bitmap);
1029 	data_blk_offset = get_sb(main_blkaddr);
1030 	data_blk_offset += c.cur_seg[CURSEG_HOT_DATA] *
1031 				c.blks_per_seg;
1032 
1033 	DBG(1, "\tWriting default dentry root, at offset 0x%08"PRIx64"\n",
1034 				data_blk_offset);
1035 	if (dev_write_block(dent_blk, data_blk_offset)) {
1036 		MSG(1, "\tError: While writing the dentry_blk to disk!!!\n");
1037 		free(dent_blk);
1038 		return -1;
1039 	}
1040 
1041 	free(dent_blk);
1042 	return 0;
1043 }
1044 
f2fs_create_root_dir(void)1045 static int f2fs_create_root_dir(void)
1046 {
1047 	int err = 0;
1048 
1049 	err = f2fs_write_root_inode();
1050 	if (err < 0) {
1051 		MSG(1, "\tError: Failed to write root inode!!!\n");
1052 		goto exit;
1053 	}
1054 
1055 	err = f2fs_update_nat_root();
1056 	if (err < 0) {
1057 		MSG(1, "\tError: Failed to update NAT for root!!!\n");
1058 		goto exit;
1059 	}
1060 
1061 	err = f2fs_add_default_dentry_root();
1062 	if (err < 0) {
1063 		MSG(1, "\tError: Failed to add default dentries for root!!!\n");
1064 		goto exit;
1065 	}
1066 exit:
1067 	if (err)
1068 		MSG(1, "\tError: Could not create the root directory!!!\n");
1069 
1070 	return err;
1071 }
1072 
f2fs_format_device(void)1073 int f2fs_format_device(void)
1074 {
1075 	int err = 0;
1076 
1077 	err= f2fs_prepare_super_block();
1078 	if (err < 0) {
1079 		MSG(0, "\tError: Failed to prepare a super block!!!\n");
1080 		goto exit;
1081 	}
1082 
1083 	if (c.trim) {
1084 		err = f2fs_trim_devices();
1085 		if (err < 0) {
1086 			MSG(0, "\tError: Failed to trim whole device!!!\n");
1087 			goto exit;
1088 		}
1089 	}
1090 
1091 	err = f2fs_init_sit_area();
1092 	if (err < 0) {
1093 		MSG(0, "\tError: Failed to Initialise the SIT AREA!!!\n");
1094 		goto exit;
1095 	}
1096 
1097 	err = f2fs_init_nat_area();
1098 	if (err < 0) {
1099 		MSG(0, "\tError: Failed to Initialise the NAT AREA!!!\n");
1100 		goto exit;
1101 	}
1102 
1103 	err = f2fs_create_root_dir();
1104 	if (err < 0) {
1105 		MSG(0, "\tError: Failed to create the root directory!!!\n");
1106 		goto exit;
1107 	}
1108 
1109 	err = f2fs_write_check_point_pack();
1110 	if (err < 0) {
1111 		MSG(0, "\tError: Failed to write the check point pack!!!\n");
1112 		goto exit;
1113 	}
1114 
1115 	err = f2fs_write_super_block();
1116 	if (err < 0) {
1117 		MSG(0, "\tError: Failed to write the Super Block!!!\n");
1118 		goto exit;
1119 	}
1120 exit:
1121 	if (err)
1122 		MSG(0, "\tError: Could not format the device!!!\n");
1123 
1124 	return err;
1125 }
1126