• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * f2fs_format.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * Dual licensed under the GPL or LGPL version 2 licenses.
8  */
9 #ifndef _LARGEFILE64_SOURCE
10 #define _LARGEFILE64_SOURCE
11 #endif
12 
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <fcntl.h>
16 #include <string.h>
17 #include <unistd.h>
18 #include <f2fs_fs.h>
19 
20 #ifdef HAVE_SYS_STAT_H
21 #include <sys/stat.h>
22 #endif
23 #ifdef HAVE_SYS_MOUNT_H
24 #include <sys/mount.h>
25 #endif
26 #include <time.h>
27 
28 #ifdef HAVE_UUID_UUID_H
29 #include <uuid/uuid.h>
30 #endif
31 #ifndef HAVE_LIBUUID
32 #define uuid_parse(a, b) -1
33 #define uuid_generate(a)
34 #endif
35 
36 #include "quota.h"
37 #include "f2fs_format_utils.h"
38 
39 extern struct f2fs_configuration c;
40 struct f2fs_super_block raw_sb;
41 struct f2fs_super_block *sb = &raw_sb;
42 struct f2fs_checkpoint *cp;
43 
44 /* Return first segment number of each area */
45 #define prev_zone(cur)		(c.cur_seg[cur] - c.segs_per_zone)
46 #define next_zone(cur)		(c.cur_seg[cur] + c.segs_per_zone)
47 #define last_zone(cur)		((cur - 1) * c.segs_per_zone)
48 #define last_section(cur)	(cur + (c.secs_per_zone - 1) * c.segs_per_sec)
49 
50 /* Return time fixed by the user or current time by default */
51 #define mkfs_time ((c.fixed_time == -1) ? time(NULL) : c.fixed_time)
52 
53 const char *media_ext_lists[] = {
54 	/* common prefix */
55 	"mp", // Covers mp3, mp4, mpeg, mpg
56 	"wm", // Covers wma, wmb, wmv
57 	"og", // Covers oga, ogg, ogm, ogv
58 	"jp", // Covers jpg, jpeg, jp2
59 
60 	/* video */
61 	"avi",
62 	"m4v",
63 	"m4p",
64 	"mkv",
65 	"mov",
66 	"webm",
67 
68 	/* audio */
69 	"wav",
70 	"m4a",
71 	"3gp",
72 	"opus",
73 	"flac",
74 
75 	/* image */
76 	"gif",
77 	"png",
78 	"svg",
79 	"webp",
80 
81 	/* archives */
82 	"jar",
83 	"deb",
84 	"iso",
85 	"gz",
86 	"xz",
87 	"zst",
88 
89 	/* others */
90 	"pdf",
91 	"pyc", // Python bytecode
92 	"ttc",
93 	"ttf",
94 	"exe",
95 
96 	/* android */
97 	"apk",
98 	"cnt", // Image alias
99 	"exo", // YouTube
100 	"odex", // Android RunTime
101 	"vdex", // Android RunTime
102 	"so",
103 
104 	NULL
105 };
106 
107 const char *hot_ext_lists[] = {
108 	"db",
109 
110 #ifndef WITH_ANDROID
111 	/* Virtual machines */
112 	"vmdk", // VMware or VirtualBox
113 	"vdi", // VirtualBox
114 	"qcow2", // QEMU
115 #endif
116 	NULL
117 };
118 
119 const char **default_ext_list[] = {
120 	media_ext_lists,
121 	hot_ext_lists
122 };
123 
is_extension_exist(const char * name)124 static bool is_extension_exist(const char *name)
125 {
126 	int i;
127 
128 	for (i = 0; i < F2FS_MAX_EXTENSION; i++) {
129 		char *ext = (char *)sb->extension_list[i];
130 		if (!strcmp(ext, name))
131 			return 1;
132 	}
133 
134 	return 0;
135 }
136 
cure_extension_list(void)137 static void cure_extension_list(void)
138 {
139 	const char **extlist;
140 	char *ext_str;
141 	char *ue;
142 	int name_len;
143 	int i, pos = 0;
144 
145 	set_sb(extension_count, 0);
146 	memset(sb->extension_list, 0, sizeof(sb->extension_list));
147 
148 	for (i = 0; i < 2; i++) {
149 		ext_str = c.extension_list[i];
150 		extlist = default_ext_list[i];
151 
152 		while (*extlist) {
153 			name_len = strlen(*extlist);
154 			memcpy(sb->extension_list[pos++], *extlist, name_len);
155 			extlist++;
156 		}
157 		if (i == 0)
158 			set_sb(extension_count, pos);
159 		else
160 			sb->hot_ext_count = pos - get_sb(extension_count);;
161 
162 		if (!ext_str)
163 			continue;
164 
165 		/* add user ext list */
166 		ue = strtok(ext_str, ", ");
167 		while (ue != NULL) {
168 			name_len = strlen(ue);
169 			if (name_len >= F2FS_EXTENSION_LEN) {
170 				MSG(0, "\tWarn: Extension name (%s) is too long\n", ue);
171 				goto next;
172 			}
173 			if (!is_extension_exist(ue))
174 				memcpy(sb->extension_list[pos++], ue, name_len);
175 next:
176 			ue = strtok(NULL, ", ");
177 			if (pos >= F2FS_MAX_EXTENSION)
178 				break;
179 		}
180 
181 		if (i == 0)
182 			set_sb(extension_count, pos);
183 		else
184 			sb->hot_ext_count = pos - get_sb(extension_count);
185 
186 		free(c.extension_list[i]);
187 	}
188 }
189 
verify_cur_segs(void)190 static void verify_cur_segs(void)
191 {
192 	int i, j;
193 	int reorder = 0;
194 
195 	for (i = 0; i < NR_CURSEG_TYPE; i++) {
196 		for (j = i + 1; j < NR_CURSEG_TYPE; j++) {
197 			if (c.cur_seg[i] == c.cur_seg[j]) {
198 				reorder = 1;
199 				break;
200 			}
201 		}
202 	}
203 
204 	if (!reorder)
205 		return;
206 
207 	c.cur_seg[0] = 0;
208 	for (i = 1; i < NR_CURSEG_TYPE; i++)
209 		c.cur_seg[i] = next_zone(i - 1);
210 }
211 
f2fs_prepare_super_block(void)212 static int f2fs_prepare_super_block(void)
213 {
214 	uint32_t blk_size_bytes;
215 	uint32_t log_sectorsize, log_sectors_per_block;
216 	uint32_t log_blocksize, log_blks_per_seg;
217 	uint32_t segment_size_bytes, zone_size_bytes;
218 	uint32_t sit_segments, nat_segments;
219 	uint32_t blocks_for_sit, blocks_for_nat, blocks_for_ssa;
220 	uint32_t total_valid_blks_available;
221 	uint64_t zone_align_start_offset, diff;
222 	uint64_t total_meta_zones, total_meta_segments;
223 	uint32_t sit_bitmap_size, max_sit_bitmap_size;
224 	uint32_t max_nat_bitmap_size, max_nat_segments;
225 	uint32_t total_zones, avail_zones;
226 	enum quota_type qtype;
227 	int i;
228 
229 	set_sb(magic, F2FS_SUPER_MAGIC);
230 	set_sb(major_ver, F2FS_MAJOR_VERSION);
231 	set_sb(minor_ver, F2FS_MINOR_VERSION);
232 
233 	log_sectorsize = log_base_2(c.sector_size);
234 	log_sectors_per_block = log_base_2(c.sectors_per_blk);
235 	log_blocksize = log_sectorsize + log_sectors_per_block;
236 	log_blks_per_seg = log_base_2(c.blks_per_seg);
237 
238 	set_sb(log_sectorsize, log_sectorsize);
239 	set_sb(log_sectors_per_block, log_sectors_per_block);
240 
241 	set_sb(log_blocksize, log_blocksize);
242 	set_sb(log_blocks_per_seg, log_blks_per_seg);
243 
244 	set_sb(segs_per_sec, c.segs_per_sec);
245 	set_sb(secs_per_zone, c.secs_per_zone);
246 
247 	blk_size_bytes = 1 << log_blocksize;
248 	segment_size_bytes = blk_size_bytes * c.blks_per_seg;
249 	zone_size_bytes =
250 		blk_size_bytes * c.secs_per_zone *
251 		c.segs_per_sec * c.blks_per_seg;
252 
253 	set_sb(checksum_offset, 0);
254 
255 	set_sb(block_count, c.total_sectors >> log_sectors_per_block);
256 
257 	zone_align_start_offset =
258 		((uint64_t) c.start_sector * DEFAULT_SECTOR_SIZE +
259 		2 * F2FS_BLKSIZE + zone_size_bytes - 1) /
260 		zone_size_bytes * zone_size_bytes -
261 		(uint64_t) c.start_sector * DEFAULT_SECTOR_SIZE;
262 
263 	if (c.feature & cpu_to_le32(F2FS_FEATURE_RO))
264 		zone_align_start_offset = 8192;
265 
266 	if (c.start_sector % DEFAULT_SECTORS_PER_BLOCK) {
267 		MSG(1, "\t%s: Align start sector number to the page unit\n",
268 				c.zoned_mode ? "FAIL" : "WARN");
269 		MSG(1, "\ti.e., start sector: %d, ofs:%d (sects/page: %d)\n",
270 				c.start_sector,
271 				c.start_sector % DEFAULT_SECTORS_PER_BLOCK,
272 				DEFAULT_SECTORS_PER_BLOCK);
273 		if (c.zoned_mode)
274 			return -1;
275 	}
276 
277 	if (c.zoned_mode && c.ndevs > 1)
278 		zone_align_start_offset +=
279 			(c.devices[0].total_sectors * c.sector_size) % zone_size_bytes;
280 
281 	set_sb(segment0_blkaddr, zone_align_start_offset / blk_size_bytes);
282 	sb->cp_blkaddr = sb->segment0_blkaddr;
283 
284 	MSG(0, "Info: zone aligned segment0 blkaddr: %u\n",
285 					get_sb(segment0_blkaddr));
286 
287 	if (c.zoned_mode &&
288 		((c.ndevs == 1 &&
289 			(get_sb(segment0_blkaddr) + c.start_sector /
290 			DEFAULT_SECTORS_PER_BLOCK) % c.zone_blocks) ||
291 		(c.ndevs > 1 &&
292 			c.devices[1].start_blkaddr % c.zone_blocks))) {
293 		MSG(1, "\tError: Unaligned segment0 block address %u\n",
294 				get_sb(segment0_blkaddr));
295 		return -1;
296 	}
297 
298 	for (i = 0; i < c.ndevs; i++) {
299 		if (i == 0) {
300 			c.devices[i].total_segments =
301 				(c.devices[i].total_sectors *
302 				c.sector_size - zone_align_start_offset) /
303 				segment_size_bytes;
304 			c.devices[i].start_blkaddr = 0;
305 			c.devices[i].end_blkaddr = c.devices[i].total_segments *
306 						c.blks_per_seg - 1 +
307 						sb->segment0_blkaddr;
308 		} else {
309 			c.devices[i].total_segments =
310 				c.devices[i].total_sectors /
311 				(c.sectors_per_blk * c.blks_per_seg);
312 			c.devices[i].start_blkaddr =
313 					c.devices[i - 1].end_blkaddr + 1;
314 			c.devices[i].end_blkaddr = c.devices[i].start_blkaddr +
315 					c.devices[i].total_segments *
316 					c.blks_per_seg - 1;
317 		}
318 		if (c.ndevs > 1) {
319 			memcpy(sb->devs[i].path, c.devices[i].path, MAX_PATH_LEN);
320 			sb->devs[i].total_segments =
321 					cpu_to_le32(c.devices[i].total_segments);
322 		}
323 
324 		c.total_segments += c.devices[i].total_segments;
325 	}
326 	set_sb(segment_count, (c.total_segments / c.segs_per_zone *
327 						c.segs_per_zone));
328 	set_sb(segment_count_ckpt, F2FS_NUMBER_OF_CHECKPOINT_PACK);
329 
330 	set_sb(sit_blkaddr, get_sb(segment0_blkaddr) +
331 			get_sb(segment_count_ckpt) * c.blks_per_seg);
332 
333 	blocks_for_sit = SIZE_ALIGN(get_sb(segment_count), SIT_ENTRY_PER_BLOCK);
334 
335 	sit_segments = SEG_ALIGN(blocks_for_sit);
336 
337 	set_sb(segment_count_sit, sit_segments * 2);
338 
339 	set_sb(nat_blkaddr, get_sb(sit_blkaddr) + get_sb(segment_count_sit) *
340 			c.blks_per_seg);
341 
342 	total_valid_blks_available = (get_sb(segment_count) -
343 			(get_sb(segment_count_ckpt) +
344 			get_sb(segment_count_sit))) * c.blks_per_seg;
345 
346 	blocks_for_nat = SIZE_ALIGN(total_valid_blks_available,
347 			NAT_ENTRY_PER_BLOCK);
348 
349 	if (c.large_nat_bitmap) {
350 		nat_segments = SEG_ALIGN(blocks_for_nat) *
351 						DEFAULT_NAT_ENTRY_RATIO / 100;
352 		set_sb(segment_count_nat, nat_segments ? nat_segments : 1);
353 		max_nat_bitmap_size = (get_sb(segment_count_nat) <<
354 						log_blks_per_seg) / 8;
355 		set_sb(segment_count_nat, get_sb(segment_count_nat) * 2);
356 	} else {
357 		set_sb(segment_count_nat, SEG_ALIGN(blocks_for_nat));
358 		max_nat_bitmap_size = 0;
359 	}
360 
361 	/*
362 	 * The number of node segments should not be exceeded a "Threshold".
363 	 * This number resizes NAT bitmap area in a CP page.
364 	 * So the threshold is determined not to overflow one CP page
365 	 */
366 	sit_bitmap_size = ((get_sb(segment_count_sit) / 2) <<
367 				log_blks_per_seg) / 8;
368 
369 	if (sit_bitmap_size > MAX_SIT_BITMAP_SIZE)
370 		max_sit_bitmap_size = MAX_SIT_BITMAP_SIZE;
371 	else
372 		max_sit_bitmap_size = sit_bitmap_size;
373 
374 	if (c.large_nat_bitmap) {
375 		/* use cp_payload if free space of f2fs_checkpoint is not enough */
376 		if (max_sit_bitmap_size + max_nat_bitmap_size >
377 						MAX_BITMAP_SIZE_IN_CKPT) {
378 			uint32_t diff =  max_sit_bitmap_size +
379 						max_nat_bitmap_size -
380 						MAX_BITMAP_SIZE_IN_CKPT;
381 			set_sb(cp_payload, F2FS_BLK_ALIGN(diff));
382 		} else {
383 			set_sb(cp_payload, 0);
384 		}
385 	} else {
386 		/*
387 		 * It should be reserved minimum 1 segment for nat.
388 		 * When sit is too large, we should expand cp area.
389 		 * It requires more pages for cp.
390 		 */
391 		if (max_sit_bitmap_size > MAX_SIT_BITMAP_SIZE_IN_CKPT) {
392 			max_nat_bitmap_size = MAX_BITMAP_SIZE_IN_CKPT;
393 			set_sb(cp_payload, F2FS_BLK_ALIGN(max_sit_bitmap_size));
394 	        } else {
395 			max_nat_bitmap_size = MAX_BITMAP_SIZE_IN_CKPT -
396 							max_sit_bitmap_size;
397 			set_sb(cp_payload, 0);
398 		}
399 		max_nat_segments = (max_nat_bitmap_size * 8) >> log_blks_per_seg;
400 
401 		if (get_sb(segment_count_nat) > max_nat_segments)
402 			set_sb(segment_count_nat, max_nat_segments);
403 
404 		set_sb(segment_count_nat, get_sb(segment_count_nat) * 2);
405 	}
406 
407 	set_sb(ssa_blkaddr, get_sb(nat_blkaddr) + get_sb(segment_count_nat) *
408 			c.blks_per_seg);
409 
410 	total_valid_blks_available = (get_sb(segment_count) -
411 			(get_sb(segment_count_ckpt) +
412 			get_sb(segment_count_sit) +
413 			get_sb(segment_count_nat))) *
414 			c.blks_per_seg;
415 
416 	if (c.feature & cpu_to_le32(F2FS_FEATURE_RO))
417 		blocks_for_ssa = 0;
418 	else
419 		blocks_for_ssa = total_valid_blks_available /
420 				c.blks_per_seg + 1;
421 
422 	set_sb(segment_count_ssa, SEG_ALIGN(blocks_for_ssa));
423 
424 	total_meta_segments = get_sb(segment_count_ckpt) +
425 		get_sb(segment_count_sit) +
426 		get_sb(segment_count_nat) +
427 		get_sb(segment_count_ssa);
428 	diff = total_meta_segments % (c.segs_per_zone);
429 	if (diff)
430 		set_sb(segment_count_ssa, get_sb(segment_count_ssa) +
431 			(c.segs_per_zone - diff));
432 
433 	total_meta_zones = ZONE_ALIGN(total_meta_segments *
434 						c.blks_per_seg);
435 
436 	set_sb(main_blkaddr, get_sb(segment0_blkaddr) + total_meta_zones *
437 				c.segs_per_zone * c.blks_per_seg);
438 
439 	if (c.zoned_mode) {
440 		/*
441 		 * Make sure there is enough randomly writeable
442 		 * space at the beginning of the disk.
443 		 */
444 		unsigned long main_blkzone = get_sb(main_blkaddr) / c.zone_blocks;
445 
446 		if (c.devices[0].zoned_model == F2FS_ZONED_HM &&
447 				c.devices[0].nr_rnd_zones < main_blkzone) {
448 			MSG(0, "\tError: Device does not have enough random "
449 					"write zones for F2FS volume (%lu needed)\n",
450 					main_blkzone);
451 			return -1;
452 		}
453 		/*
454 		 * Check if conventional device has enough space
455 		 * to accommodate all metadata, zoned device should
456 		 * not overlap to metadata area.
457 		 */
458 		for (i = 1; i < c.ndevs; i++) {
459 			if (c.devices[i].zoned_model != F2FS_ZONED_NONE &&
460 				c.devices[i].start_blkaddr < get_sb(main_blkaddr)) {
461 				MSG(0, "\tError: Conventional device %s is too small,"
462 					" (%"PRIu64" MiB needed).\n", c.devices[0].path,
463 					(get_sb(main_blkaddr) -
464 					c.devices[i].start_blkaddr) >> 8);
465 				return -1;
466 			}
467 		}
468 	}
469 
470 	total_zones = get_sb(segment_count) / (c.segs_per_zone) -
471 							total_meta_zones;
472 	if (total_zones == 0)
473 		goto too_small;
474 	set_sb(section_count, total_zones * c.secs_per_zone);
475 
476 	set_sb(segment_count_main, get_sb(section_count) * c.segs_per_sec);
477 
478 	/*
479 	 * Let's determine the best reserved and overprovisioned space.
480 	 * For Zoned device, if zone capacity less than zone size, the segments
481 	 * starting after the zone capacity are unusable in each zone. So get
482 	 * overprovision ratio and reserved seg count based on avg usable
483 	 * segs_per_sec.
484 	 */
485 	if (c.overprovision == 0)
486 		c.overprovision = get_best_overprovision(sb);
487 
488 	c.reserved_segments =
489 			(100 / c.overprovision + 1 + NR_CURSEG_TYPE) *
490 			round_up(f2fs_get_usable_segments(sb), get_sb(section_count));
491 
492 	if (c.feature & cpu_to_le32(F2FS_FEATURE_RO)) {
493 		c.overprovision = 0;
494 		c.reserved_segments = 0;
495 	}
496 	if ((!(c.feature & cpu_to_le32(F2FS_FEATURE_RO)) &&
497 		c.overprovision == 0) ||
498 		c.total_segments < F2FS_MIN_SEGMENTS ||
499 		(c.devices[0].total_sectors *
500 			c.sector_size < zone_align_start_offset) ||
501 		(get_sb(segment_count_main) - NR_CURSEG_TYPE) <
502 						c.reserved_segments) {
503 		goto too_small;
504 	}
505 
506 	if (c.vol_uuid) {
507 		if (uuid_parse(c.vol_uuid, sb->uuid)) {
508 			MSG(0, "\tError: supplied string is not a valid UUID\n");
509 			return -1;
510 		}
511 	} else {
512 		uuid_generate(sb->uuid);
513 	}
514 
515 	/* precompute checksum seed for metadata */
516 	if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM))
517 		c.chksum_seed = f2fs_cal_crc32(~0, sb->uuid, sizeof(sb->uuid));
518 
519 	utf8_to_utf16(sb->volume_name, (const char *)c.vol_label,
520 				MAX_VOLUME_NAME, strlen(c.vol_label));
521 	set_sb(node_ino, 1);
522 	set_sb(meta_ino, 2);
523 	set_sb(root_ino, 3);
524 	c.next_free_nid = 4;
525 
526 	for (qtype = 0; qtype < F2FS_MAX_QUOTAS; qtype++) {
527 		if (!((1 << qtype) & c.quota_bits))
528 			continue;
529 		sb->qf_ino[qtype] = cpu_to_le32(c.next_free_nid++);
530 		MSG(0, "Info: add quota type = %u => %u\n",
531 					qtype, c.next_free_nid - 1);
532 	}
533 
534 	if (c.feature & cpu_to_le32(F2FS_FEATURE_LOST_FOUND))
535 		c.lpf_ino = c.next_free_nid++;
536 
537 	if (c.feature & cpu_to_le32(F2FS_FEATURE_RO))
538 		avail_zones = 2;
539 	else
540 		avail_zones = 6;
541 
542 	if (total_zones <= avail_zones) {
543 		MSG(1, "\tError: %d zones: Need more zones "
544 			"by shrinking zone size\n", total_zones);
545 		return -1;
546 	}
547 
548 	if (c.feature & cpu_to_le32(F2FS_FEATURE_RO)) {
549 		c.cur_seg[CURSEG_HOT_NODE] = last_section(last_zone(total_zones));
550 		c.cur_seg[CURSEG_WARM_NODE] = 0;
551 		c.cur_seg[CURSEG_COLD_NODE] = 0;
552 		c.cur_seg[CURSEG_HOT_DATA] = 0;
553 		c.cur_seg[CURSEG_COLD_DATA] = 0;
554 		c.cur_seg[CURSEG_WARM_DATA] = 0;
555 	} else if (c.heap) {
556 		c.cur_seg[CURSEG_HOT_NODE] =
557 				last_section(last_zone(total_zones));
558 		c.cur_seg[CURSEG_WARM_NODE] = prev_zone(CURSEG_HOT_NODE);
559 		c.cur_seg[CURSEG_COLD_NODE] = prev_zone(CURSEG_WARM_NODE);
560 		c.cur_seg[CURSEG_HOT_DATA] = prev_zone(CURSEG_COLD_NODE);
561 		c.cur_seg[CURSEG_COLD_DATA] = 0;
562 		c.cur_seg[CURSEG_WARM_DATA] = next_zone(CURSEG_COLD_DATA);
563 	} else if (c.zoned_mode) {
564 		c.cur_seg[CURSEG_HOT_NODE] = 0;
565 		c.cur_seg[CURSEG_WARM_NODE] = next_zone(CURSEG_HOT_NODE);
566 		c.cur_seg[CURSEG_COLD_NODE] = next_zone(CURSEG_WARM_NODE);
567 		c.cur_seg[CURSEG_HOT_DATA] = next_zone(CURSEG_COLD_NODE);
568 		c.cur_seg[CURSEG_WARM_DATA] = next_zone(CURSEG_HOT_DATA);
569 		c.cur_seg[CURSEG_COLD_DATA] = next_zone(CURSEG_WARM_DATA);
570 	} else {
571 		c.cur_seg[CURSEG_HOT_NODE] = 0;
572 		c.cur_seg[CURSEG_WARM_NODE] = next_zone(CURSEG_HOT_NODE);
573 		c.cur_seg[CURSEG_COLD_NODE] = next_zone(CURSEG_WARM_NODE);
574 		c.cur_seg[CURSEG_HOT_DATA] = next_zone(CURSEG_COLD_NODE);
575 		c.cur_seg[CURSEG_COLD_DATA] =
576 				max(last_zone((total_zones >> 2)),
577 					next_zone(CURSEG_HOT_DATA));
578 		c.cur_seg[CURSEG_WARM_DATA] =
579 				max(last_zone((total_zones >> 1)),
580 					next_zone(CURSEG_COLD_DATA));
581 	}
582 
583 	/* if there is redundancy, reassign it */
584 	if (!(c.feature & cpu_to_le32(F2FS_FEATURE_RO)))
585 		verify_cur_segs();
586 
587 	cure_extension_list();
588 
589 	/* get kernel version */
590 	if (c.kd >= 0) {
591 		dev_read_version(c.version, 0, VERSION_LEN);
592 		get_kernel_version(c.version);
593 	} else {
594 		get_kernel_uname_version(c.version);
595 	}
596 	MSG(0, "Info: format version with\n  \"%s\"\n", c.version);
597 
598 	memcpy(sb->version, c.version, VERSION_LEN);
599 	memcpy(sb->init_version, c.version, VERSION_LEN);
600 
601 	if (c.feature & cpu_to_le32(F2FS_FEATURE_CASEFOLD)) {
602 		set_sb(s_encoding, c.s_encoding);
603 		set_sb(s_encoding_flags, c.s_encoding_flags);
604 	}
605 
606 	sb->feature = c.feature;
607 
608 	if (get_sb(feature) & F2FS_FEATURE_SB_CHKSUM) {
609 		set_sb(checksum_offset, SB_CHKSUM_OFFSET);
610 		set_sb(crc, f2fs_cal_crc32(F2FS_SUPER_MAGIC, sb,
611 						SB_CHKSUM_OFFSET));
612 		MSG(1, "Info: SB CRC is set: offset (%d), crc (0x%x)\n",
613 					get_sb(checksum_offset), get_sb(crc));
614 	}
615 
616 	return 0;
617 
618 too_small:
619 	MSG(0, "\tError: Device size is not sufficient for F2FS volume\n");
620 	return -1;
621 }
622 
f2fs_init_sit_area(void)623 static int f2fs_init_sit_area(void)
624 {
625 	uint32_t blk_size, seg_size;
626 	uint32_t index = 0;
627 	uint64_t sit_seg_addr = 0;
628 	uint8_t *zero_buf = NULL;
629 
630 	blk_size = 1 << get_sb(log_blocksize);
631 	seg_size = (1 << get_sb(log_blocks_per_seg)) * blk_size;
632 
633 	zero_buf = calloc(sizeof(uint8_t), seg_size);
634 	if(zero_buf == NULL) {
635 		MSG(1, "\tError: Calloc Failed for sit_zero_buf!!!\n");
636 		return -1;
637 	}
638 
639 	sit_seg_addr = get_sb(sit_blkaddr);
640 	sit_seg_addr *= blk_size;
641 
642 	DBG(1, "\tFilling sit area at offset 0x%08"PRIx64"\n", sit_seg_addr);
643 	for (index = 0; index < (get_sb(segment_count_sit) / 2); index++) {
644 		if (dev_fill(zero_buf, sit_seg_addr, seg_size)) {
645 			MSG(1, "\tError: While zeroing out the sit area "
646 					"on disk!!!\n");
647 			free(zero_buf);
648 			return -1;
649 		}
650 		sit_seg_addr += seg_size;
651 	}
652 
653 	free(zero_buf);
654 	return 0 ;
655 }
656 
f2fs_init_nat_area(void)657 static int f2fs_init_nat_area(void)
658 {
659 	uint32_t blk_size, seg_size;
660 	uint32_t index = 0;
661 	uint64_t nat_seg_addr = 0;
662 	uint8_t *nat_buf = NULL;
663 
664 	blk_size = 1 << get_sb(log_blocksize);
665 	seg_size = (1 << get_sb(log_blocks_per_seg)) * blk_size;
666 
667 	nat_buf = calloc(sizeof(uint8_t), seg_size);
668 	if (nat_buf == NULL) {
669 		MSG(1, "\tError: Calloc Failed for nat_zero_blk!!!\n");
670 		return -1;
671 	}
672 
673 	nat_seg_addr = get_sb(nat_blkaddr);
674 	nat_seg_addr *= blk_size;
675 
676 	DBG(1, "\tFilling nat area at offset 0x%08"PRIx64"\n", nat_seg_addr);
677 	for (index = 0; index < get_sb(segment_count_nat) / 2; index++) {
678 		if (dev_fill(nat_buf, nat_seg_addr, seg_size)) {
679 			MSG(1, "\tError: While zeroing out the nat area "
680 					"on disk!!!\n");
681 			free(nat_buf);
682 			return -1;
683 		}
684 		nat_seg_addr = nat_seg_addr + (2 * seg_size);
685 	}
686 
687 	free(nat_buf);
688 	return 0 ;
689 }
690 
f2fs_write_check_point_pack(void)691 static int f2fs_write_check_point_pack(void)
692 {
693 	struct f2fs_summary_block *sum = NULL;
694 	struct f2fs_journal *journal;
695 	uint32_t blk_size_bytes;
696 	uint32_t nat_bits_bytes, nat_bits_blocks;
697 	unsigned char *nat_bits = NULL, *empty_nat_bits;
698 	uint64_t cp_seg_blk = 0;
699 	uint32_t crc = 0, flags;
700 	unsigned int i;
701 	char *cp_payload = NULL;
702 	char *sum_compact, *sum_compact_p;
703 	struct f2fs_summary *sum_entry;
704 	enum quota_type qtype;
705 	int off;
706 	int ret = -1;
707 
708 	cp = calloc(F2FS_BLKSIZE, 1);
709 	if (cp == NULL) {
710 		MSG(1, "\tError: Calloc failed for f2fs_checkpoint!!!\n");
711 		return ret;
712 	}
713 
714 	sum = calloc(F2FS_BLKSIZE, 1);
715 	if (sum == NULL) {
716 		MSG(1, "\tError: Calloc failed for summary_node!!!\n");
717 		goto free_cp;
718 	}
719 
720 	sum_compact = calloc(F2FS_BLKSIZE, 1);
721 	if (sum_compact == NULL) {
722 		MSG(1, "\tError: Calloc failed for summary buffer!!!\n");
723 		goto free_sum;
724 	}
725 	sum_compact_p = sum_compact;
726 
727 	nat_bits_bytes = get_sb(segment_count_nat) << 5;
728 	nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) + 8 +
729 						F2FS_BLKSIZE - 1);
730 	nat_bits = calloc(F2FS_BLKSIZE, nat_bits_blocks);
731 	if (nat_bits == NULL) {
732 		MSG(1, "\tError: Calloc failed for nat bits buffer!!!\n");
733 		goto free_sum_compact;
734 	}
735 
736 	cp_payload = calloc(F2FS_BLKSIZE, 1);
737 	if (cp_payload == NULL) {
738 		MSG(1, "\tError: Calloc failed for cp_payload!!!\n");
739 		goto free_nat_bits;
740 	}
741 
742 	/* 1. cp page 1 of checkpoint pack 1 */
743 	srand((c.fake_seed) ? 0 : time(NULL));
744 	cp->checkpoint_ver = cpu_to_le64(rand() | 0x1);
745 	set_cp(cur_node_segno[0], c.cur_seg[CURSEG_HOT_NODE]);
746 	set_cp(cur_node_segno[1], c.cur_seg[CURSEG_WARM_NODE]);
747 	set_cp(cur_node_segno[2], c.cur_seg[CURSEG_COLD_NODE]);
748 	set_cp(cur_data_segno[0], c.cur_seg[CURSEG_HOT_DATA]);
749 	set_cp(cur_data_segno[1], c.cur_seg[CURSEG_WARM_DATA]);
750 	set_cp(cur_data_segno[2], c.cur_seg[CURSEG_COLD_DATA]);
751 	for (i = 3; i < MAX_ACTIVE_NODE_LOGS; i++) {
752 		set_cp(cur_node_segno[i], 0xffffffff);
753 		set_cp(cur_data_segno[i], 0xffffffff);
754 	}
755 
756 	set_cp(cur_node_blkoff[0], 1 + c.quota_inum + c.lpf_inum);
757 	set_cp(cur_data_blkoff[0], 1 + c.quota_dnum + c.lpf_dnum);
758 	set_cp(valid_block_count, 2 + c.quota_inum + c.quota_dnum +
759 			c.lpf_inum + c.lpf_dnum);
760 	set_cp(rsvd_segment_count, c.reserved_segments);
761 
762 	/*
763 	 * For zoned devices, if zone capacity less than zone size, get
764 	 * overprovision segment count based on usable segments in the device.
765 	 */
766 	set_cp(overprov_segment_count, (f2fs_get_usable_segments(sb) -
767 			get_cp(rsvd_segment_count)) *
768 			c.overprovision / 100);
769 
770 	if (get_cp(overprov_segment_count) < get_cp(rsvd_segment_count))
771 		set_cp(overprov_segment_count, get_cp(rsvd_segment_count));
772 
773 	set_cp(overprov_segment_count, get_cp(overprov_segment_count) +
774 			2 * get_sb(segs_per_sec));
775 
776 	if (f2fs_get_usable_segments(sb) <= get_cp(overprov_segment_count)) {
777 		MSG(0, "\tError: Not enough segments to create F2FS Volume\n");
778 		goto free_cp_payload;
779 	}
780 	MSG(0, "Info: Overprovision ratio = %.3lf%%\n", c.overprovision);
781 	MSG(0, "Info: Overprovision segments = %u (GC reserved = %u)\n",
782 					get_cp(overprov_segment_count),
783 					c.reserved_segments);
784 
785 	/* main segments - reserved segments - (node + data segments) */
786 	if (c.feature & cpu_to_le32(F2FS_FEATURE_RO)) {
787 		set_cp(free_segment_count, f2fs_get_usable_segments(sb) - 2);
788 		set_cp(user_block_count, ((get_cp(free_segment_count) + 2 -
789 			get_cp(overprov_segment_count)) * c.blks_per_seg));
790 	} else {
791 		set_cp(free_segment_count, f2fs_get_usable_segments(sb) - 6);
792 		set_cp(user_block_count, ((get_cp(free_segment_count) + 6 -
793 			get_cp(overprov_segment_count)) * c.blks_per_seg));
794 	}
795 	/* cp page (2), data summaries (1), node summaries (3) */
796 	set_cp(cp_pack_total_block_count, 6 + get_sb(cp_payload));
797 	flags = CP_UMOUNT_FLAG | CP_COMPACT_SUM_FLAG;
798 	if (get_cp(cp_pack_total_block_count) <=
799 			(1 << get_sb(log_blocks_per_seg)) - nat_bits_blocks)
800 		flags |= CP_NAT_BITS_FLAG;
801 
802 	if (c.trimmed)
803 		flags |= CP_TRIMMED_FLAG;
804 
805 	if (c.large_nat_bitmap)
806 		flags |= CP_LARGE_NAT_BITMAP_FLAG;
807 
808 	set_cp(ckpt_flags, flags);
809 	set_cp(cp_pack_start_sum, 1 + get_sb(cp_payload));
810 	set_cp(valid_node_count, 1 + c.quota_inum + c.lpf_inum);
811 	set_cp(valid_inode_count, 1 + c.quota_inum + c.lpf_inum);
812 	set_cp(next_free_nid, c.next_free_nid);
813 	set_cp(sit_ver_bitmap_bytesize, ((get_sb(segment_count_sit) / 2) <<
814 			get_sb(log_blocks_per_seg)) / 8);
815 
816 	set_cp(nat_ver_bitmap_bytesize, ((get_sb(segment_count_nat) / 2) <<
817 			 get_sb(log_blocks_per_seg)) / 8);
818 
819 	if (c.large_nat_bitmap)
820 		set_cp(checksum_offset, CP_MIN_CHKSUM_OFFSET);
821 	else
822 		set_cp(checksum_offset, CP_CHKSUM_OFFSET);
823 
824 	crc = f2fs_checkpoint_chksum(cp);
825 	*((__le32 *)((unsigned char *)cp + get_cp(checksum_offset))) =
826 							cpu_to_le32(crc);
827 
828 	blk_size_bytes = 1 << get_sb(log_blocksize);
829 
830 	if (blk_size_bytes != F2FS_BLKSIZE) {
831 		MSG(1, "\tError: Wrong block size %d / %d!!!\n",
832 					blk_size_bytes, F2FS_BLKSIZE);
833 		goto free_cp_payload;
834 	}
835 
836 	cp_seg_blk = get_sb(segment0_blkaddr);
837 
838 	DBG(1, "\tWriting main segments, cp at offset 0x%08"PRIx64"\n",
839 						cp_seg_blk);
840 	if (dev_write_block(cp, cp_seg_blk)) {
841 		MSG(1, "\tError: While writing the cp to disk!!!\n");
842 		goto free_cp_payload;
843 	}
844 
845 	for (i = 0; i < get_sb(cp_payload); i++) {
846 		cp_seg_blk++;
847 		if (dev_fill_block(cp_payload, cp_seg_blk)) {
848 			MSG(1, "\tError: While zeroing out the sit bitmap area "
849 					"on disk!!!\n");
850 			goto free_cp_payload;
851 		}
852 	}
853 
854 	/* Prepare and write Segment summary for HOT/WARM/COLD DATA
855 	 *
856 	 * The structure of compact summary
857 	 * +-------------------+
858 	 * | nat_journal       |
859 	 * +-------------------+
860 	 * | sit_journal       |
861 	 * +-------------------+
862 	 * | hot data summary  |
863 	 * +-------------------+
864 	 * | warm data summary |
865 	 * +-------------------+
866 	 * | cold data summary |
867 	 * +-------------------+
868 	*/
869 	memset(sum, 0, sizeof(struct f2fs_summary_block));
870 	SET_SUM_TYPE((&sum->footer), SUM_TYPE_DATA);
871 
872 	journal = &sum->journal;
873 	journal->n_nats = cpu_to_le16(1 + c.quota_inum + c.lpf_inum);
874 	journal->nat_j.entries[0].nid = sb->root_ino;
875 	journal->nat_j.entries[0].ne.version = 0;
876 	journal->nat_j.entries[0].ne.ino = sb->root_ino;
877 	journal->nat_j.entries[0].ne.block_addr = cpu_to_le32(
878 			get_sb(main_blkaddr) +
879 			get_cp(cur_node_segno[0]) * c.blks_per_seg);
880 
881 	for (qtype = 0, i = 1; qtype < F2FS_MAX_QUOTAS; qtype++) {
882 		if (!((1 << qtype) & c.quota_bits))
883 			continue;
884 		journal->nat_j.entries[i].nid = sb->qf_ino[qtype];
885 		journal->nat_j.entries[i].ne.version = 0;
886 		journal->nat_j.entries[i].ne.ino = sb->qf_ino[qtype];
887 		journal->nat_j.entries[i].ne.block_addr = cpu_to_le32(
888 				get_sb(main_blkaddr) +
889 				get_cp(cur_node_segno[0]) *
890 				c.blks_per_seg + i);
891 		i++;
892 	}
893 
894 	if (c.lpf_inum) {
895 		journal->nat_j.entries[i].nid = cpu_to_le32(c.lpf_ino);
896 		journal->nat_j.entries[i].ne.version = 0;
897 		journal->nat_j.entries[i].ne.ino = cpu_to_le32(c.lpf_ino);
898 		journal->nat_j.entries[i].ne.block_addr = cpu_to_le32(
899 				get_sb(main_blkaddr) +
900 				get_cp(cur_node_segno[0]) *
901 				c.blks_per_seg + i);
902 	}
903 
904 	memcpy(sum_compact_p, &journal->n_nats, SUM_JOURNAL_SIZE);
905 	sum_compact_p += SUM_JOURNAL_SIZE;
906 
907 	memset(sum, 0, sizeof(struct f2fs_summary_block));
908 
909 	/* inode sit for root */
910 	if (c.feature & cpu_to_le32(F2FS_FEATURE_RO))
911 		journal->n_sits = cpu_to_le16(2);
912 	else
913 		journal->n_sits = cpu_to_le16(6);
914 
915 	journal->sit_j.entries[0].segno = cp->cur_node_segno[0];
916 	journal->sit_j.entries[0].se.vblocks =
917 				cpu_to_le16((CURSEG_HOT_NODE << 10) |
918 						(1 + c.quota_inum + c.lpf_inum));
919 	f2fs_set_bit(0, (char *)journal->sit_j.entries[0].se.valid_map);
920 	for (i = 1; i <= c.quota_inum; i++)
921 		f2fs_set_bit(i, (char *)journal->sit_j.entries[0].se.valid_map);
922 	if (c.lpf_inum)
923 		f2fs_set_bit(i, (char *)journal->sit_j.entries[0].se.valid_map);
924 
925 	if (c.feature & cpu_to_le32(F2FS_FEATURE_RO)) {
926 		/* data sit for root */
927 		journal->sit_j.entries[1].segno = cp->cur_data_segno[0];
928 		journal->sit_j.entries[1].se.vblocks =
929 					cpu_to_le16((CURSEG_HOT_DATA << 10) |
930 							(1 + c.quota_dnum + c.lpf_dnum));
931 		f2fs_set_bit(0, (char *)journal->sit_j.entries[1].se.valid_map);
932 		for (i = 1; i <= c.quota_dnum; i++)
933 			f2fs_set_bit(i, (char *)journal->sit_j.entries[1].se.valid_map);
934 		if (c.lpf_dnum)
935 			f2fs_set_bit(i, (char *)journal->sit_j.entries[1].se.valid_map);
936 	} else {
937 		journal->sit_j.entries[1].segno = cp->cur_node_segno[1];
938 		journal->sit_j.entries[1].se.vblocks =
939 					cpu_to_le16((CURSEG_WARM_NODE << 10));
940 		journal->sit_j.entries[2].segno = cp->cur_node_segno[2];
941 		journal->sit_j.entries[2].se.vblocks =
942 					cpu_to_le16((CURSEG_COLD_NODE << 10));
943 
944 		/* data sit for root */
945 		journal->sit_j.entries[3].segno = cp->cur_data_segno[0];
946 		journal->sit_j.entries[3].se.vblocks =
947 					cpu_to_le16((CURSEG_HOT_DATA << 10) |
948 							(1 + c.quota_dnum + c.lpf_dnum));
949 		f2fs_set_bit(0, (char *)journal->sit_j.entries[3].se.valid_map);
950 		for (i = 1; i <= c.quota_dnum; i++)
951 			f2fs_set_bit(i, (char *)journal->sit_j.entries[3].se.valid_map);
952 		if (c.lpf_dnum)
953 			f2fs_set_bit(i, (char *)journal->sit_j.entries[3].se.valid_map);
954 
955 		journal->sit_j.entries[4].segno = cp->cur_data_segno[1];
956 		journal->sit_j.entries[4].se.vblocks =
957 					cpu_to_le16((CURSEG_WARM_DATA << 10));
958 		journal->sit_j.entries[5].segno = cp->cur_data_segno[2];
959 		journal->sit_j.entries[5].se.vblocks =
960 					cpu_to_le16((CURSEG_COLD_DATA << 10));
961 	}
962 
963 	memcpy(sum_compact_p, &journal->n_sits, SUM_JOURNAL_SIZE);
964 	sum_compact_p += SUM_JOURNAL_SIZE;
965 
966 	/* hot data summary */
967 	sum_entry = (struct f2fs_summary *)sum_compact_p;
968 	sum_entry->nid = sb->root_ino;
969 	sum_entry->ofs_in_node = 0;
970 
971 	off = 1;
972 	for (qtype = 0; qtype < F2FS_MAX_QUOTAS; qtype++) {
973 		int j;
974 
975 		if (!((1 << qtype) & c.quota_bits))
976 			continue;
977 
978 		for (j = 0; j < QUOTA_DATA(qtype); j++) {
979 			(sum_entry + off + j)->nid = sb->qf_ino[qtype];
980 			(sum_entry + off + j)->ofs_in_node = cpu_to_le16(j);
981 		}
982 		off += QUOTA_DATA(qtype);
983 	}
984 
985 	if (c.lpf_dnum) {
986 		(sum_entry + off)->nid = cpu_to_le32(c.lpf_ino);
987 		(sum_entry + off)->ofs_in_node = 0;
988 	}
989 
990 	/* warm data summary, nothing to do */
991 	/* cold data summary, nothing to do */
992 
993 	cp_seg_blk++;
994 	DBG(1, "\tWriting Segment summary for HOT/WARM/COLD_DATA, at offset 0x%08"PRIx64"\n",
995 			cp_seg_blk);
996 	if (dev_write_block(sum_compact, cp_seg_blk)) {
997 		MSG(1, "\tError: While writing the sum_blk to disk!!!\n");
998 		goto free_cp_payload;
999 	}
1000 
1001 	/* Prepare and write Segment summary for HOT_NODE */
1002 	memset(sum, 0, sizeof(struct f2fs_summary_block));
1003 	SET_SUM_TYPE((&sum->footer), SUM_TYPE_NODE);
1004 
1005 	sum->entries[0].nid = sb->root_ino;
1006 	sum->entries[0].ofs_in_node = 0;
1007 	for (qtype = i = 0; qtype < F2FS_MAX_QUOTAS; qtype++) {
1008 		if (!((1 << qtype) & c.quota_bits))
1009 			continue;
1010 		sum->entries[1 + i].nid = sb->qf_ino[qtype];
1011 		sum->entries[1 + i].ofs_in_node = 0;
1012 		i++;
1013 	}
1014 	if (c.lpf_inum) {
1015 		i++;
1016 		sum->entries[i].nid = cpu_to_le32(c.lpf_ino);
1017 		sum->entries[i].ofs_in_node = 0;
1018 	}
1019 
1020 	cp_seg_blk++;
1021 	DBG(1, "\tWriting Segment summary for HOT_NODE, at offset 0x%08"PRIx64"\n",
1022 			cp_seg_blk);
1023 	if (dev_write_block(sum, cp_seg_blk)) {
1024 		MSG(1, "\tError: While writing the sum_blk to disk!!!\n");
1025 		goto free_cp_payload;
1026 	}
1027 
1028 	/* Fill segment summary for WARM_NODE to zero. */
1029 	memset(sum, 0, sizeof(struct f2fs_summary_block));
1030 	SET_SUM_TYPE((&sum->footer), SUM_TYPE_NODE);
1031 
1032 	cp_seg_blk++;
1033 	DBG(1, "\tWriting Segment summary for WARM_NODE, at offset 0x%08"PRIx64"\n",
1034 			cp_seg_blk);
1035 	if (dev_write_block(sum, cp_seg_blk)) {
1036 		MSG(1, "\tError: While writing the sum_blk to disk!!!\n");
1037 		goto free_cp_payload;
1038 	}
1039 
1040 	/* Fill segment summary for COLD_NODE to zero. */
1041 	memset(sum, 0, sizeof(struct f2fs_summary_block));
1042 	SET_SUM_TYPE((&sum->footer), SUM_TYPE_NODE);
1043 	cp_seg_blk++;
1044 	DBG(1, "\tWriting Segment summary for COLD_NODE, at offset 0x%08"PRIx64"\n",
1045 			cp_seg_blk);
1046 	if (dev_write_block(sum, cp_seg_blk)) {
1047 		MSG(1, "\tError: While writing the sum_blk to disk!!!\n");
1048 		goto free_cp_payload;
1049 	}
1050 
1051 	/* cp page2 */
1052 	cp_seg_blk++;
1053 	DBG(1, "\tWriting cp page2, at offset 0x%08"PRIx64"\n", cp_seg_blk);
1054 	if (dev_write_block(cp, cp_seg_blk)) {
1055 		MSG(1, "\tError: While writing the cp to disk!!!\n");
1056 		goto free_cp_payload;
1057 	}
1058 
1059 	/* write NAT bits, if possible */
1060 	if (flags & CP_NAT_BITS_FLAG) {
1061 		uint32_t i;
1062 
1063 		*(__le64 *)nat_bits = get_cp_crc(cp);
1064 		empty_nat_bits = nat_bits + 8 + nat_bits_bytes;
1065 		memset(empty_nat_bits, 0xff, nat_bits_bytes);
1066 		test_and_clear_bit_le(0, empty_nat_bits);
1067 
1068 		/* write the last blocks in cp pack */
1069 		cp_seg_blk = get_sb(segment0_blkaddr) + (1 <<
1070 				get_sb(log_blocks_per_seg)) - nat_bits_blocks;
1071 
1072 		DBG(1, "\tWriting NAT bits pages, at offset 0x%08"PRIx64"\n",
1073 					cp_seg_blk);
1074 
1075 		for (i = 0; i < nat_bits_blocks; i++) {
1076 			if (dev_write_block(nat_bits + i *
1077 						F2FS_BLKSIZE, cp_seg_blk + i)) {
1078 				MSG(1, "\tError: write NAT bits to disk!!!\n");
1079 				goto free_cp_payload;
1080 			}
1081 		}
1082 	}
1083 
1084 	/* cp page 1 of check point pack 2
1085 	 * Initialize other checkpoint pack with version zero
1086 	 */
1087 	cp->checkpoint_ver = 0;
1088 
1089 	crc = f2fs_checkpoint_chksum(cp);
1090 	*((__le32 *)((unsigned char *)cp + get_cp(checksum_offset))) =
1091 							cpu_to_le32(crc);
1092 	cp_seg_blk = get_sb(segment0_blkaddr) + c.blks_per_seg;
1093 	DBG(1, "\tWriting cp page 1 of checkpoint pack 2, at offset 0x%08"PRIx64"\n",
1094 				cp_seg_blk);
1095 	if (dev_write_block(cp, cp_seg_blk)) {
1096 		MSG(1, "\tError: While writing the cp to disk!!!\n");
1097 		goto free_cp_payload;
1098 	}
1099 
1100 	for (i = 0; i < get_sb(cp_payload); i++) {
1101 		cp_seg_blk++;
1102 		if (dev_fill_block(cp_payload, cp_seg_blk)) {
1103 			MSG(1, "\tError: While zeroing out the sit bitmap area "
1104 					"on disk!!!\n");
1105 			goto free_cp_payload;
1106 		}
1107 	}
1108 
1109 	/* cp page 2 of check point pack 2 */
1110 	cp_seg_blk += (le32_to_cpu(cp->cp_pack_total_block_count) -
1111 					get_sb(cp_payload) - 1);
1112 	DBG(1, "\tWriting cp page 2 of checkpoint pack 2, at offset 0x%08"PRIx64"\n",
1113 				cp_seg_blk);
1114 	if (dev_write_block(cp, cp_seg_blk)) {
1115 		MSG(1, "\tError: While writing the cp to disk!!!\n");
1116 		goto free_cp_payload;
1117 	}
1118 
1119 	ret = 0;
1120 
1121 free_cp_payload:
1122 	free(cp_payload);
1123 free_nat_bits:
1124 	free(nat_bits);
1125 free_sum_compact:
1126 	free(sum_compact);
1127 free_sum:
1128 	free(sum);
1129 free_cp:
1130 	free(cp);
1131 	return ret;
1132 }
1133 
f2fs_write_super_block(void)1134 static int f2fs_write_super_block(void)
1135 {
1136 	int index;
1137 	uint8_t *zero_buff;
1138 
1139 	zero_buff = calloc(F2FS_BLKSIZE, 1);
1140 	if (zero_buff == NULL) {
1141 		MSG(1, "\tError: Calloc Failed for super_blk_zero_buf!!!\n");
1142 		return -1;
1143 	}
1144 
1145 	memcpy(zero_buff + F2FS_SUPER_OFFSET, sb, sizeof(*sb));
1146 	DBG(1, "\tWriting super block, at offset 0x%08x\n", 0);
1147 	for (index = 0; index < 2; index++) {
1148 		if (dev_write_block(zero_buff, index)) {
1149 			MSG(1, "\tError: While while writing super_blk "
1150 					"on disk!!! index : %d\n", index);
1151 			free(zero_buff);
1152 			return -1;
1153 		}
1154 	}
1155 
1156 	free(zero_buff);
1157 	return 0;
1158 }
1159 
1160 #ifndef WITH_ANDROID
f2fs_discard_obsolete_dnode(void)1161 static int f2fs_discard_obsolete_dnode(void)
1162 {
1163 	struct f2fs_node *raw_node;
1164 	uint64_t next_blkaddr = 0, offset;
1165 	u64 end_blkaddr = (get_sb(segment_count_main) <<
1166 			get_sb(log_blocks_per_seg)) + get_sb(main_blkaddr);
1167 	uint64_t start_inode_pos = get_sb(main_blkaddr);
1168 	uint64_t last_inode_pos;
1169 
1170 	if (c.zoned_mode || c.feature & cpu_to_le32(F2FS_FEATURE_RO))
1171 		return 0;
1172 
1173 	raw_node = calloc(sizeof(struct f2fs_node), 1);
1174 	if (raw_node == NULL) {
1175 		MSG(1, "\tError: Calloc Failed for discard_raw_node!!!\n");
1176 		return -1;
1177 	}
1178 
1179 	/* avoid power-off-recovery based on roll-forward policy */
1180 	offset = get_sb(main_blkaddr);
1181 	offset += c.cur_seg[CURSEG_WARM_NODE] * c.blks_per_seg;
1182 
1183 	last_inode_pos = start_inode_pos +
1184 		c.cur_seg[CURSEG_HOT_NODE] * c.blks_per_seg + c.quota_inum + c.lpf_inum;
1185 
1186 	do {
1187 		if (offset < get_sb(main_blkaddr) || offset >= end_blkaddr)
1188 			break;
1189 
1190 		if (dev_read_block(raw_node, offset)) {
1191 			MSG(1, "\tError: While traversing direct node!!!\n");
1192 			free(raw_node);
1193 			return -1;
1194 		}
1195 
1196 		next_blkaddr = le32_to_cpu(raw_node->footer.next_blkaddr);
1197 		memset(raw_node, 0, F2FS_BLKSIZE);
1198 
1199 		DBG(1, "\tDiscard dnode, at offset 0x%08"PRIx64"\n", offset);
1200 		if (dev_write_block(raw_node, offset)) {
1201 			MSG(1, "\tError: While discarding direct node!!!\n");
1202 			free(raw_node);
1203 			return -1;
1204 		}
1205 		offset = next_blkaddr;
1206 		/* should avoid recursive chain due to stale data */
1207 		if (offset >= start_inode_pos || offset <= last_inode_pos)
1208 			break;
1209 	} while (1);
1210 
1211 	free(raw_node);
1212 	return 0;
1213 }
1214 #endif
1215 
f2fs_write_root_inode(void)1216 static int f2fs_write_root_inode(void)
1217 {
1218 	struct f2fs_node *raw_node = NULL;
1219 	uint64_t blk_size_bytes, data_blk_nor;
1220 	uint64_t main_area_node_seg_blk_offset = 0;
1221 
1222 	raw_node = calloc(F2FS_BLKSIZE, 1);
1223 	if (raw_node == NULL) {
1224 		MSG(1, "\tError: Calloc Failed for raw_node!!!\n");
1225 		return -1;
1226 	}
1227 
1228 	raw_node->footer.nid = sb->root_ino;
1229 	raw_node->footer.ino = sb->root_ino;
1230 	raw_node->footer.cp_ver = cpu_to_le64(1);
1231 	raw_node->footer.next_blkaddr = cpu_to_le32(
1232 			get_sb(main_blkaddr) +
1233 			c.cur_seg[CURSEG_HOT_NODE] *
1234 			c.blks_per_seg + 1);
1235 
1236 	raw_node->i.i_mode = cpu_to_le16(0x41ed);
1237 	if (c.lpf_ino)
1238 		raw_node->i.i_links = cpu_to_le32(3);
1239 	else
1240 		raw_node->i.i_links = cpu_to_le32(2);
1241 	raw_node->i.i_uid = cpu_to_le32(c.root_uid);
1242 	raw_node->i.i_gid = cpu_to_le32(c.root_gid);
1243 
1244 	blk_size_bytes = 1 << get_sb(log_blocksize);
1245 	raw_node->i.i_size = cpu_to_le64(1 * blk_size_bytes); /* dentry */
1246 	raw_node->i.i_blocks = cpu_to_le64(2);
1247 
1248 	raw_node->i.i_atime = cpu_to_le32(mkfs_time);
1249 	raw_node->i.i_atime_nsec = 0;
1250 	raw_node->i.i_ctime = cpu_to_le32(mkfs_time);
1251 	raw_node->i.i_ctime_nsec = 0;
1252 	raw_node->i.i_mtime = cpu_to_le32(mkfs_time);
1253 	raw_node->i.i_mtime_nsec = 0;
1254 	raw_node->i.i_generation = 0;
1255 	raw_node->i.i_xattr_nid = 0;
1256 	raw_node->i.i_flags = 0;
1257 	raw_node->i.i_current_depth = cpu_to_le32(1);
1258 	raw_node->i.i_dir_level = DEF_DIR_LEVEL;
1259 
1260 	if (c.feature & cpu_to_le32(F2FS_FEATURE_EXTRA_ATTR)) {
1261 		raw_node->i.i_inline = F2FS_EXTRA_ATTR;
1262 		raw_node->i.i_extra_isize = cpu_to_le16(calc_extra_isize());
1263 	}
1264 
1265 	if (c.feature & cpu_to_le32(F2FS_FEATURE_PRJQUOTA))
1266 		raw_node->i.i_projid = cpu_to_le32(F2FS_DEF_PROJID);
1267 
1268 	if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CRTIME)) {
1269 		raw_node->i.i_crtime = cpu_to_le32(mkfs_time);
1270 		raw_node->i.i_crtime_nsec = 0;
1271 	}
1272 
1273 	if (c.feature & cpu_to_le32(F2FS_FEATURE_COMPRESSION)) {
1274 		raw_node->i.i_compress_algrithm = 0;
1275 		raw_node->i.i_log_cluster_size = 0;
1276 		raw_node->i.i_padding = 0;
1277 	}
1278 
1279 	data_blk_nor = get_sb(main_blkaddr) +
1280 		c.cur_seg[CURSEG_HOT_DATA] * c.blks_per_seg;
1281 	raw_node->i.i_addr[get_extra_isize(raw_node)] = cpu_to_le32(data_blk_nor);
1282 
1283 	raw_node->i.i_ext.fofs = 0;
1284 	raw_node->i.i_ext.blk_addr = 0;
1285 	raw_node->i.i_ext.len = 0;
1286 
1287 	main_area_node_seg_blk_offset = get_sb(main_blkaddr);
1288 	main_area_node_seg_blk_offset += c.cur_seg[CURSEG_HOT_NODE] *
1289 					c.blks_per_seg;
1290 
1291 	DBG(1, "\tWriting root inode (hot node), %x %x %x at offset 0x%08"PRIu64"\n",
1292 			get_sb(main_blkaddr),
1293 			c.cur_seg[CURSEG_HOT_NODE],
1294 			c.blks_per_seg, main_area_node_seg_blk_offset);
1295 	if (write_inode(raw_node, main_area_node_seg_blk_offset) < 0) {
1296 		MSG(1, "\tError: While writing the raw_node to disk!!!\n");
1297 		free(raw_node);
1298 		return -1;
1299 	}
1300 
1301 	free(raw_node);
1302 	return 0;
1303 }
1304 
f2fs_write_default_quota(int qtype,unsigned int blkaddr,__le32 raw_id)1305 static int f2fs_write_default_quota(int qtype, unsigned int blkaddr,
1306 						__le32 raw_id)
1307 {
1308 	char *filebuf = calloc(F2FS_BLKSIZE, 2);
1309 	int file_magics[] = INITQMAGICS;
1310 	struct v2_disk_dqheader ddqheader;
1311 	struct v2_disk_dqinfo ddqinfo;
1312 	struct v2r1_disk_dqblk dqblk;
1313 
1314 	if (filebuf == NULL) {
1315 		MSG(1, "\tError: Calloc Failed for filebuf!!!\n");
1316 		return -1;
1317 	}
1318 
1319 	/* Write basic quota header */
1320 	ddqheader.dqh_magic = cpu_to_le32(file_magics[qtype]);
1321 	/* only support QF_VFSV1 */
1322 	ddqheader.dqh_version = cpu_to_le32(1);
1323 
1324 	memcpy(filebuf, &ddqheader, sizeof(ddqheader));
1325 
1326 	/* Fill Initial quota file content */
1327 	ddqinfo.dqi_bgrace = cpu_to_le32(MAX_DQ_TIME);
1328 	ddqinfo.dqi_igrace = cpu_to_le32(MAX_IQ_TIME);
1329 	ddqinfo.dqi_flags = cpu_to_le32(0);
1330 	ddqinfo.dqi_blocks = cpu_to_le32(QT_TREEOFF + 5);
1331 	ddqinfo.dqi_free_blk = cpu_to_le32(0);
1332 	ddqinfo.dqi_free_entry = cpu_to_le32(5);
1333 
1334 	memcpy(filebuf + V2_DQINFOOFF, &ddqinfo, sizeof(ddqinfo));
1335 
1336 	filebuf[1024] = 2;
1337 	filebuf[2048] = 3;
1338 	filebuf[3072] = 4;
1339 	filebuf[4096] = 5;
1340 
1341 	filebuf[5120 + 8] = 1;
1342 
1343 	dqblk.dqb_id = raw_id;
1344 	dqblk.dqb_pad = cpu_to_le32(0);
1345 	dqblk.dqb_ihardlimit = cpu_to_le64(0);
1346 	dqblk.dqb_isoftlimit = cpu_to_le64(0);
1347 	if (c.lpf_ino)
1348 		dqblk.dqb_curinodes = cpu_to_le64(2);
1349 	else
1350 		dqblk.dqb_curinodes = cpu_to_le64(1);
1351 	dqblk.dqb_bhardlimit = cpu_to_le64(0);
1352 	dqblk.dqb_bsoftlimit = cpu_to_le64(0);
1353 	if (c.lpf_ino)
1354 		dqblk.dqb_curspace = cpu_to_le64(8192);
1355 	else
1356 		dqblk.dqb_curspace = cpu_to_le64(4096);
1357 	dqblk.dqb_btime = cpu_to_le64(0);
1358 	dqblk.dqb_itime = cpu_to_le64(0);
1359 
1360 	memcpy(filebuf + 5136, &dqblk, sizeof(struct v2r1_disk_dqblk));
1361 
1362 	/* Write two blocks */
1363 	if (dev_write_block(filebuf, blkaddr) ||
1364 	    dev_write_block(filebuf + F2FS_BLKSIZE, blkaddr + 1)) {
1365 		MSG(1, "\tError: While writing the quota_blk to disk!!!\n");
1366 		free(filebuf);
1367 		return -1;
1368 	}
1369 	DBG(1, "\tWriting quota data, at offset %08x, %08x\n",
1370 					blkaddr, blkaddr + 1);
1371 	free(filebuf);
1372 	c.quota_dnum += QUOTA_DATA(qtype);
1373 	return 0;
1374 }
1375 
f2fs_write_qf_inode(int qtype,int offset)1376 static int f2fs_write_qf_inode(int qtype, int offset)
1377 {
1378 	struct f2fs_node *raw_node = NULL;
1379 	uint64_t data_blk_nor;
1380 	uint64_t main_area_node_seg_blk_offset = 0;
1381 	__le32 raw_id;
1382 	int i;
1383 
1384 	raw_node = calloc(F2FS_BLKSIZE, 1);
1385 	if (raw_node == NULL) {
1386 		MSG(1, "\tError: Calloc Failed for raw_node!!!\n");
1387 		return -1;
1388 	}
1389 	f2fs_init_qf_inode(sb, raw_node, qtype, mkfs_time);
1390 
1391 	raw_node->footer.next_blkaddr = cpu_to_le32(
1392 			get_sb(main_blkaddr) +
1393 			c.cur_seg[CURSEG_HOT_NODE] *
1394 			c.blks_per_seg + 1 + qtype + 1);
1395 	raw_node->i.i_blocks = cpu_to_le64(1 + QUOTA_DATA(qtype));
1396 
1397 	data_blk_nor = get_sb(main_blkaddr) +
1398 		c.cur_seg[CURSEG_HOT_DATA] * c.blks_per_seg + 1
1399 		+ offset * QUOTA_DATA(i);
1400 
1401 	if (qtype == 0)
1402 		raw_id = raw_node->i.i_uid;
1403 	else if (qtype == 1)
1404 		raw_id = raw_node->i.i_gid;
1405 	else if (qtype == 2)
1406 		raw_id = raw_node->i.i_projid;
1407 	else
1408 		ASSERT(0);
1409 
1410 	/* write two blocks */
1411 	if (f2fs_write_default_quota(qtype, data_blk_nor, raw_id)) {
1412 		free(raw_node);
1413 		return -1;
1414 	}
1415 
1416 	for (i = 0; i < QUOTA_DATA(qtype); i++)
1417 		raw_node->i.i_addr[get_extra_isize(raw_node) + i] =
1418 					cpu_to_le32(data_blk_nor + i);
1419 
1420 	main_area_node_seg_blk_offset = get_sb(main_blkaddr);
1421 	main_area_node_seg_blk_offset += c.cur_seg[CURSEG_HOT_NODE] *
1422 					c.blks_per_seg + offset + 1;
1423 
1424 	DBG(1, "\tWriting quota inode (hot node), %x %x %x at offset 0x%08"PRIu64"\n",
1425 			get_sb(main_blkaddr),
1426 			c.cur_seg[CURSEG_HOT_NODE],
1427 			c.blks_per_seg, main_area_node_seg_blk_offset);
1428 	if (write_inode(raw_node, main_area_node_seg_blk_offset) < 0) {
1429 		MSG(1, "\tError: While writing the raw_node to disk!!!\n");
1430 		free(raw_node);
1431 		return -1;
1432 	}
1433 
1434 	free(raw_node);
1435 	c.quota_inum++;
1436 	return 0;
1437 }
1438 
f2fs_update_nat_root(void)1439 static int f2fs_update_nat_root(void)
1440 {
1441 	struct f2fs_nat_block *nat_blk = NULL;
1442 	uint64_t nat_seg_blk_offset = 0;
1443 	enum quota_type qtype;
1444 	int i;
1445 
1446 	nat_blk = calloc(F2FS_BLKSIZE, 1);
1447 	if(nat_blk == NULL) {
1448 		MSG(1, "\tError: Calloc Failed for nat_blk!!!\n");
1449 		return -1;
1450 	}
1451 
1452 	/* update quota */
1453 	for (qtype = i = 0; qtype < F2FS_MAX_QUOTAS; qtype++) {
1454 		if (!((1 << qtype) & c.quota_bits))
1455 			continue;
1456 		nat_blk->entries[sb->qf_ino[qtype]].block_addr =
1457 				cpu_to_le32(get_sb(main_blkaddr) +
1458 				c.cur_seg[CURSEG_HOT_NODE] *
1459 				c.blks_per_seg + i + 1);
1460 		nat_blk->entries[sb->qf_ino[qtype]].ino = sb->qf_ino[qtype];
1461 		i++;
1462 	}
1463 
1464 	/* update root */
1465 	nat_blk->entries[get_sb(root_ino)].block_addr = cpu_to_le32(
1466 		get_sb(main_blkaddr) +
1467 		c.cur_seg[CURSEG_HOT_NODE] * c.blks_per_seg);
1468 	nat_blk->entries[get_sb(root_ino)].ino = sb->root_ino;
1469 
1470 	/* update node nat */
1471 	nat_blk->entries[get_sb(node_ino)].block_addr = cpu_to_le32(1);
1472 	nat_blk->entries[get_sb(node_ino)].ino = sb->node_ino;
1473 
1474 	/* update meta nat */
1475 	nat_blk->entries[get_sb(meta_ino)].block_addr = cpu_to_le32(1);
1476 	nat_blk->entries[get_sb(meta_ino)].ino = sb->meta_ino;
1477 
1478 	nat_seg_blk_offset = get_sb(nat_blkaddr);
1479 
1480 	DBG(1, "\tWriting nat root, at offset 0x%08"PRIx64"\n",
1481 					nat_seg_blk_offset);
1482 	if (dev_write_block(nat_blk, nat_seg_blk_offset)) {
1483 		MSG(1, "\tError: While writing the nat_blk set0 to disk!\n");
1484 		free(nat_blk);
1485 		return -1;
1486 	}
1487 
1488 	free(nat_blk);
1489 	return 0;
1490 }
1491 
f2fs_add_default_dentry_lpf(void)1492 static block_t f2fs_add_default_dentry_lpf(void)
1493 {
1494 	struct f2fs_dentry_block *dent_blk;
1495 	uint64_t data_blk_offset;
1496 
1497 	dent_blk = calloc(F2FS_BLKSIZE, 1);
1498 	if (dent_blk == NULL) {
1499 		MSG(1, "\tError: Calloc Failed for dent_blk!!!\n");
1500 		return 0;
1501 	}
1502 
1503 	dent_blk->dentry[0].hash_code = 0;
1504 	dent_blk->dentry[0].ino = cpu_to_le32(c.lpf_ino);
1505 	dent_blk->dentry[0].name_len = cpu_to_le16(1);
1506 	dent_blk->dentry[0].file_type = F2FS_FT_DIR;
1507 	memcpy(dent_blk->filename[0], ".", 1);
1508 
1509 	dent_blk->dentry[1].hash_code = 0;
1510 	dent_blk->dentry[1].ino = sb->root_ino;
1511 	dent_blk->dentry[1].name_len = cpu_to_le16(2);
1512 	dent_blk->dentry[1].file_type = F2FS_FT_DIR;
1513 	memcpy(dent_blk->filename[1], "..", 2);
1514 
1515 	test_and_set_bit_le(0, dent_blk->dentry_bitmap);
1516 	test_and_set_bit_le(1, dent_blk->dentry_bitmap);
1517 
1518 	data_blk_offset = get_sb(main_blkaddr);
1519 	data_blk_offset += c.cur_seg[CURSEG_HOT_DATA] * c.blks_per_seg +
1520 		1 + c.quota_dnum;
1521 
1522 	DBG(1, "\tWriting default dentry lost+found, at offset 0x%08"PRIx64"\n",
1523 			data_blk_offset);
1524 	if (dev_write_block(dent_blk, data_blk_offset)) {
1525 		MSG(1, "\tError While writing the dentry_blk to disk!!!\n");
1526 		free(dent_blk);
1527 		return 0;
1528 	}
1529 
1530 	free(dent_blk);
1531 	c.lpf_dnum++;
1532 	return data_blk_offset;
1533 }
1534 
f2fs_write_lpf_inode(void)1535 static int f2fs_write_lpf_inode(void)
1536 {
1537 	struct f2fs_node *raw_node;
1538 	uint64_t blk_size_bytes, main_area_node_seg_blk_offset;
1539 	block_t data_blk_nor;
1540 	int err = 0;
1541 
1542 	ASSERT(c.lpf_ino);
1543 
1544 	raw_node = calloc(F2FS_BLKSIZE, 1);
1545 	if (raw_node == NULL) {
1546 		MSG(1, "\tError: Calloc Failed for raw_node!!!\n");
1547 		return -1;
1548 	}
1549 
1550 	raw_node->footer.nid = cpu_to_le32(c.lpf_ino);
1551 	raw_node->footer.ino = raw_node->footer.nid;
1552 	raw_node->footer.cp_ver = cpu_to_le64(1);
1553 	raw_node->footer.next_blkaddr = cpu_to_le32(
1554 			get_sb(main_blkaddr) +
1555 			c.cur_seg[CURSEG_HOT_NODE] * c.blks_per_seg +
1556 			1 + c.quota_inum + 1);
1557 
1558 	raw_node->i.i_mode = cpu_to_le16(0x41c0); /* 0700 */
1559 	raw_node->i.i_links = cpu_to_le32(2);
1560 	raw_node->i.i_uid = cpu_to_le32(c.root_uid);
1561 	raw_node->i.i_gid = cpu_to_le32(c.root_gid);
1562 
1563 	blk_size_bytes = 1 << get_sb(log_blocksize);
1564 	raw_node->i.i_size = cpu_to_le64(1 * blk_size_bytes);
1565 	raw_node->i.i_blocks = cpu_to_le64(2);
1566 
1567 	raw_node->i.i_atime = cpu_to_le32(mkfs_time);
1568 	raw_node->i.i_atime_nsec = 0;
1569 	raw_node->i.i_ctime = cpu_to_le32(mkfs_time);
1570 	raw_node->i.i_ctime_nsec = 0;
1571 	raw_node->i.i_mtime = cpu_to_le32(mkfs_time);
1572 	raw_node->i.i_mtime_nsec = 0;
1573 	raw_node->i.i_generation = 0;
1574 	raw_node->i.i_xattr_nid = 0;
1575 	raw_node->i.i_flags = 0;
1576 	raw_node->i.i_pino = le32_to_cpu(sb->root_ino);
1577 	raw_node->i.i_namelen = le32_to_cpu(strlen(LPF));
1578 	memcpy(raw_node->i.i_name, LPF, strlen(LPF));
1579 	raw_node->i.i_current_depth = cpu_to_le32(1);
1580 	raw_node->i.i_dir_level = DEF_DIR_LEVEL;
1581 
1582 	if (c.feature & cpu_to_le32(F2FS_FEATURE_EXTRA_ATTR)) {
1583 		raw_node->i.i_inline = F2FS_EXTRA_ATTR;
1584 		raw_node->i.i_extra_isize = cpu_to_le16(calc_extra_isize());
1585 	}
1586 
1587 	if (c.feature & cpu_to_le32(F2FS_FEATURE_PRJQUOTA))
1588 		raw_node->i.i_projid = cpu_to_le32(F2FS_DEF_PROJID);
1589 
1590 	if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CRTIME)) {
1591 		raw_node->i.i_crtime = cpu_to_le32(mkfs_time);
1592 		raw_node->i.i_crtime_nsec = 0;
1593 	}
1594 
1595 	if (c.feature & cpu_to_le32(F2FS_FEATURE_COMPRESSION)) {
1596 		raw_node->i.i_compress_algrithm = 0;
1597 		raw_node->i.i_log_cluster_size = 0;
1598 		raw_node->i.i_padding = 0;
1599 	}
1600 
1601 	data_blk_nor = f2fs_add_default_dentry_lpf();
1602 	if (data_blk_nor == 0) {
1603 		MSG(1, "\tError: Failed to add default dentries for lost+found!!!\n");
1604 		err = -1;
1605 		goto exit;
1606 	}
1607 	raw_node->i.i_addr[get_extra_isize(raw_node)] = cpu_to_le32(data_blk_nor);
1608 
1609 	main_area_node_seg_blk_offset = get_sb(main_blkaddr);
1610 	main_area_node_seg_blk_offset += c.cur_seg[CURSEG_HOT_NODE] *
1611 		c.blks_per_seg + c.quota_inum + 1;
1612 
1613 	DBG(1, "\tWriting lost+found inode (hot node), %x %x %x at offset 0x%08"PRIu64"\n",
1614 			get_sb(main_blkaddr),
1615 			c.cur_seg[CURSEG_HOT_NODE],
1616 			c.blks_per_seg, main_area_node_seg_blk_offset);
1617 	if (write_inode(raw_node, main_area_node_seg_blk_offset) < 0) {
1618 		MSG(1, "\tError: While writing the raw_node to disk!!!\n");
1619 		err = -1;
1620 		goto exit;
1621 	}
1622 
1623 	c.lpf_inum++;
1624 exit:
1625 	free(raw_node);
1626 	return err;
1627 }
1628 
f2fs_add_default_dentry_root(void)1629 static int f2fs_add_default_dentry_root(void)
1630 {
1631 	struct f2fs_dentry_block *dent_blk = NULL;
1632 	uint64_t data_blk_offset = 0;
1633 
1634 	dent_blk = calloc(F2FS_BLKSIZE, 1);
1635 	if(dent_blk == NULL) {
1636 		MSG(1, "\tError: Calloc Failed for dent_blk!!!\n");
1637 		return -1;
1638 	}
1639 
1640 	dent_blk->dentry[0].hash_code = 0;
1641 	dent_blk->dentry[0].ino = sb->root_ino;
1642 	dent_blk->dentry[0].name_len = cpu_to_le16(1);
1643 	dent_blk->dentry[0].file_type = F2FS_FT_DIR;
1644 	memcpy(dent_blk->filename[0], ".", 1);
1645 
1646 	dent_blk->dentry[1].hash_code = 0;
1647 	dent_blk->dentry[1].ino = sb->root_ino;
1648 	dent_blk->dentry[1].name_len = cpu_to_le16(2);
1649 	dent_blk->dentry[1].file_type = F2FS_FT_DIR;
1650 	memcpy(dent_blk->filename[1], "..", 2);
1651 
1652 	/* bitmap for . and .. */
1653 	test_and_set_bit_le(0, dent_blk->dentry_bitmap);
1654 	test_and_set_bit_le(1, dent_blk->dentry_bitmap);
1655 
1656 	if (c.lpf_ino) {
1657 		int len = strlen(LPF);
1658 		f2fs_hash_t hash = f2fs_dentry_hash(0, 0, (unsigned char *)LPF, len);
1659 
1660 		dent_blk->dentry[2].hash_code = cpu_to_le32(hash);
1661 		dent_blk->dentry[2].ino = cpu_to_le32(c.lpf_ino);
1662 		dent_blk->dentry[2].name_len = cpu_to_le16(len);
1663 		dent_blk->dentry[2].file_type = F2FS_FT_DIR;
1664 		memcpy(dent_blk->filename[2], LPF, F2FS_SLOT_LEN);
1665 
1666 		memcpy(dent_blk->filename[3], &LPF[F2FS_SLOT_LEN],
1667 				len - F2FS_SLOT_LEN);
1668 
1669 		test_and_set_bit_le(2, dent_blk->dentry_bitmap);
1670 		test_and_set_bit_le(3, dent_blk->dentry_bitmap);
1671 	}
1672 
1673 	data_blk_offset = get_sb(main_blkaddr);
1674 	data_blk_offset += c.cur_seg[CURSEG_HOT_DATA] *
1675 				c.blks_per_seg;
1676 
1677 	DBG(1, "\tWriting default dentry root, at offset 0x%08"PRIx64"\n",
1678 				data_blk_offset);
1679 	if (dev_write_block(dent_blk, data_blk_offset)) {
1680 		MSG(1, "\tError: While writing the dentry_blk to disk!!!\n");
1681 		free(dent_blk);
1682 		return -1;
1683 	}
1684 
1685 	free(dent_blk);
1686 	return 0;
1687 }
1688 
f2fs_create_root_dir(void)1689 static int f2fs_create_root_dir(void)
1690 {
1691 	enum quota_type qtype;
1692 	int err = 0, i = 0;
1693 
1694 	err = f2fs_write_root_inode();
1695 	if (err < 0) {
1696 		MSG(1, "\tError: Failed to write root inode!!!\n");
1697 		goto exit;
1698 	}
1699 
1700 	for (qtype = 0; qtype < F2FS_MAX_QUOTAS; qtype++)  {
1701 		if (!((1 << qtype) & c.quota_bits))
1702 			continue;
1703 		err = f2fs_write_qf_inode(qtype, i++);
1704 		if (err < 0) {
1705 			MSG(1, "\tError: Failed to write quota inode!!!\n");
1706 			goto exit;
1707 		}
1708 	}
1709 
1710 	if (c.feature & cpu_to_le32(F2FS_FEATURE_LOST_FOUND)) {
1711 		err = f2fs_write_lpf_inode();
1712 		if (err < 0) {
1713 			MSG(1, "\tError: Failed to write lost+found inode!!!\n");
1714 			goto exit;
1715 		}
1716 	}
1717 
1718 #ifndef WITH_ANDROID
1719 	err = f2fs_discard_obsolete_dnode();
1720 	if (err < 0) {
1721 		MSG(1, "\tError: Failed to discard obsolete dnode!!!\n");
1722 		goto exit;
1723 	}
1724 #endif
1725 
1726 	err = f2fs_update_nat_root();
1727 	if (err < 0) {
1728 		MSG(1, "\tError: Failed to update NAT for root!!!\n");
1729 		goto exit;
1730 	}
1731 
1732 	err = f2fs_add_default_dentry_root();
1733 	if (err < 0) {
1734 		MSG(1, "\tError: Failed to add default dentries for root!!!\n");
1735 		goto exit;
1736 	}
1737 exit:
1738 	if (err)
1739 		MSG(1, "\tError: Could not create the root directory!!!\n");
1740 
1741 	return err;
1742 }
1743 
f2fs_format_device(void)1744 int f2fs_format_device(void)
1745 {
1746 	int err = 0;
1747 
1748 	err= f2fs_prepare_super_block();
1749 	if (err < 0) {
1750 		MSG(0, "\tError: Failed to prepare a super block!!!\n");
1751 		goto exit;
1752 	}
1753 
1754 	if (c.trim) {
1755 		err = f2fs_trim_devices();
1756 		if (err < 0) {
1757 			MSG(0, "\tError: Failed to trim whole device!!!\n");
1758 			goto exit;
1759 		}
1760 	}
1761 
1762 	err = f2fs_init_sit_area();
1763 	if (err < 0) {
1764 		MSG(0, "\tError: Failed to initialise the SIT AREA!!!\n");
1765 		goto exit;
1766 	}
1767 
1768 	err = f2fs_init_nat_area();
1769 	if (err < 0) {
1770 		MSG(0, "\tError: Failed to initialise the NAT AREA!!!\n");
1771 		goto exit;
1772 	}
1773 
1774 	err = f2fs_create_root_dir();
1775 	if (err < 0) {
1776 		MSG(0, "\tError: Failed to create the root directory!!!\n");
1777 		goto exit;
1778 	}
1779 
1780 	err = f2fs_write_check_point_pack();
1781 	if (err < 0) {
1782 		MSG(0, "\tError: Failed to write the check point pack!!!\n");
1783 		goto exit;
1784 	}
1785 
1786 	err = f2fs_write_super_block();
1787 	if (err < 0) {
1788 		MSG(0, "\tError: Failed to write the super block!!!\n");
1789 		goto exit;
1790 	}
1791 exit:
1792 	if (err)
1793 		MSG(0, "\tError: Could not format the device!!!\n");
1794 
1795 	return err;
1796 }
1797