1 /**
2 * f2fs_format.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * Dual licensed under the GPL or LGPL version 2 licenses.
8 */
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <fcntl.h>
12 #include <string.h>
13 #include <unistd.h>
14 #include <f2fs_fs.h>
15 #include <assert.h>
16 #include <stdbool.h>
17
18 #ifdef HAVE_SYS_STAT_H
19 #include <sys/stat.h>
20 #endif
21 #ifdef HAVE_SYS_MOUNT_H
22 #include <sys/mount.h>
23 #endif
24 #include <time.h>
25
26 #ifdef HAVE_UUID_UUID_H
27 #include <uuid/uuid.h>
28 #endif
29 #ifndef HAVE_LIBUUID
30 #define uuid_parse(a, b) -1
31 #define uuid_generate(a)
32 #define uuid_unparse(a, b) -1
33 #endif
34
35 #include "quota.h"
36 #include "f2fs_format_utils.h"
37
38 extern struct f2fs_configuration c;
39 struct f2fs_super_block raw_sb;
40 struct f2fs_super_block *sb = &raw_sb;
41 struct f2fs_checkpoint *cp;
42
device_is_aliased(unsigned int dev_num)43 static inline bool device_is_aliased(unsigned int dev_num)
44 {
45 if (dev_num >= c.ndevs)
46 return false;
47 return c.devices[dev_num].alias_filename != NULL;
48 }
49
target_device_index(uint64_t blkaddr)50 static inline unsigned int target_device_index(uint64_t blkaddr)
51 {
52 int i;
53
54 for (i = 0; i < c.ndevs; i++)
55 if (c.devices[i].start_blkaddr <= blkaddr &&
56 c.devices[i].end_blkaddr >= blkaddr)
57 return i;
58 return 0;
59 }
60
61 #define GET_SEGNO(blk_addr) ((blk_addr - get_sb(main_blkaddr)) / \
62 c.blks_per_seg)
63 #define START_BLOCK(segno) (segno * c.blks_per_seg + get_sb(main_blkaddr))
64
65 /* Return first segment number of each area */
next_zone(int seg_type)66 static inline uint32_t next_zone(int seg_type)
67 {
68 uint32_t next_seg = c.cur_seg[seg_type] + c.segs_per_zone;
69 uint64_t next_blkaddr = START_BLOCK(next_seg);
70 int dev_num;
71
72 dev_num = target_device_index(next_blkaddr);
73 if (!device_is_aliased(dev_num))
74 return GET_SEGNO(next_blkaddr);
75
76 while (dev_num < c.ndevs && device_is_aliased(dev_num))
77 dev_num++;
78
79 return GET_SEGNO(c.devices[dev_num - 1].end_blkaddr + 1);
80 }
81
last_zone(uint32_t total_zone)82 static inline uint32_t last_zone(uint32_t total_zone)
83 {
84 uint32_t last_seg = (total_zone - 1) * c.segs_per_zone;
85 uint64_t last_blkaddr = START_BLOCK(last_seg);
86 int dev_num;
87
88 dev_num = target_device_index(last_blkaddr);
89 if (!device_is_aliased(dev_num))
90 return GET_SEGNO(last_blkaddr);
91
92 while (dev_num > 0 && device_is_aliased(dev_num))
93 dev_num--;
94
95 return GET_SEGNO(c.devices[dev_num + 1].start_blkaddr) -
96 c.segs_per_zone;
97 }
98
99 #define last_section(cur) (cur + (c.secs_per_zone - 1) * c.segs_per_sec)
100
101 /* Return time fixed by the user or current time by default */
102 #define mkfs_time ((c.fixed_time == -1) ? time(NULL) : c.fixed_time)
103
104 const char *media_ext_lists[] = {
105 /* common prefix */
106 "mp", // Covers mp3, mp4, mpeg, mpg
107 "wm", // Covers wma, wmb, wmv
108 "og", // Covers oga, ogg, ogm, ogv
109 "jp", // Covers jpg, jpeg, jp2
110
111 /* video */
112 "avi",
113 "m4v",
114 "m4p",
115 "mkv",
116 "mov",
117 "webm",
118
119 /* audio */
120 "wav",
121 "m4a",
122 "3gp",
123 "opus",
124 "flac",
125
126 /* image */
127 "gif",
128 "png",
129 "svg",
130 "webp",
131
132 /* archives */
133 "jar",
134 "deb",
135 "iso",
136 "gz",
137 "xz",
138 "zst",
139
140 /* others */
141 "pdf",
142 "pyc", // Python bytecode
143 "ttc",
144 "ttf",
145 "exe",
146
147 /* android */
148 "apk",
149 "cnt", // Image alias
150 "exo", // YouTube
151 "odex", // Android RunTime
152 "vdex", // Android RunTime
153 "so",
154
155 NULL
156 };
157
158 const char *hot_ext_lists[] = {
159 "db",
160
161 #ifndef WITH_ANDROID
162 /* Virtual machines */
163 "vmdk", // VMware or VirtualBox
164 "vdi", // VirtualBox
165 "qcow2", // QEMU
166 #endif
167 NULL
168 };
169
170 const char **default_ext_list[] = {
171 media_ext_lists,
172 hot_ext_lists
173 };
174
is_extension_exist(const char * name)175 static bool is_extension_exist(const char *name)
176 {
177 int i;
178
179 for (i = 0; i < F2FS_MAX_EXTENSION; i++) {
180 char *ext = (char *)sb->extension_list[i];
181 if (!strcmp(ext, name))
182 return 1;
183 }
184
185 return 0;
186 }
187
cure_extension_list(void)188 static void cure_extension_list(void)
189 {
190 const char **extlist;
191 char *ext_str;
192 char *ue;
193 int name_len;
194 int i, pos = 0;
195
196 set_sb(extension_count, 0);
197 memset(sb->extension_list, 0, sizeof(sb->extension_list));
198
199 for (i = 0; i < 2; i++) {
200 ext_str = c.extension_list[i];
201 extlist = default_ext_list[i];
202
203 while (*extlist) {
204 name_len = strlen(*extlist);
205 memcpy(sb->extension_list[pos++], *extlist, name_len);
206 extlist++;
207 }
208 if (i == 0)
209 set_sb(extension_count, pos);
210 else
211 sb->hot_ext_count = pos - get_sb(extension_count);;
212
213 if (!ext_str)
214 continue;
215
216 /* add user ext list */
217 ue = strtok(ext_str, ", ");
218 while (ue != NULL) {
219 name_len = strlen(ue);
220 if (name_len >= F2FS_EXTENSION_LEN) {
221 MSG(0, "\tWarn: Extension name (%s) is too long\n", ue);
222 goto next;
223 }
224 if (!is_extension_exist(ue))
225 memcpy(sb->extension_list[pos++], ue, name_len);
226 next:
227 ue = strtok(NULL, ", ");
228 if (pos >= F2FS_MAX_EXTENSION)
229 break;
230 }
231
232 if (i == 0)
233 set_sb(extension_count, pos);
234 else
235 sb->hot_ext_count = pos - get_sb(extension_count);
236
237 free(c.extension_list[i]);
238 }
239 }
240
verify_cur_segs(void)241 static void verify_cur_segs(void)
242 {
243 int i, j;
244 int reorder = 0;
245
246 for (i = 0; i < NR_CURSEG_TYPE; i++) {
247 for (j = i + 1; j < NR_CURSEG_TYPE; j++) {
248 if (c.cur_seg[i] == c.cur_seg[j]) {
249 reorder = 1;
250 break;
251 }
252 }
253 }
254
255 if (!reorder)
256 return;
257
258 c.cur_seg[0] = 0;
259 for (i = 1; i < NR_CURSEG_TYPE; i++)
260 c.cur_seg[i] = next_zone(i - 1);
261 }
262
f2fs_prepare_super_block(void)263 static int f2fs_prepare_super_block(void)
264 {
265 uint32_t blk_size_bytes;
266 uint32_t log_sectorsize, log_sectors_per_block;
267 uint32_t log_blocksize, log_blks_per_seg;
268 uint32_t segment_size_bytes, zone_size_bytes;
269 uint32_t alignment_bytes;
270 uint32_t sit_segments, nat_segments;
271 uint32_t blocks_for_sit, blocks_for_nat, blocks_for_ssa;
272 uint32_t total_valid_blks_available;
273 uint64_t zone_align_start_offset, diff;
274 uint64_t total_meta_zones, total_meta_segments;
275 uint32_t sit_bitmap_size, max_sit_bitmap_size;
276 uint32_t max_nat_bitmap_size, max_nat_segments;
277 uint32_t total_zones, avail_zones = 0;
278 enum quota_type qtype;
279 int i;
280
281 set_sb(magic, F2FS_SUPER_MAGIC);
282 set_sb(major_ver, F2FS_MAJOR_VERSION);
283 set_sb(minor_ver, F2FS_MINOR_VERSION);
284
285 log_sectorsize = log_base_2(c.sector_size);
286 log_sectors_per_block = log_base_2(c.sectors_per_blk);
287 log_blocksize = log_sectorsize + log_sectors_per_block;
288 log_blks_per_seg = log_base_2(c.blks_per_seg);
289
290 set_sb(log_sectorsize, log_sectorsize);
291 set_sb(log_sectors_per_block, log_sectors_per_block);
292
293 set_sb(log_blocksize, log_blocksize);
294 set_sb(log_blocks_per_seg, log_blks_per_seg);
295
296 set_sb(segs_per_sec, c.segs_per_sec);
297 set_sb(secs_per_zone, c.secs_per_zone);
298
299 blk_size_bytes = 1 << log_blocksize;
300 segment_size_bytes = blk_size_bytes * c.blks_per_seg;
301 zone_size_bytes =
302 blk_size_bytes * c.secs_per_zone *
303 c.segs_per_sec * c.blks_per_seg;
304
305 set_sb(checksum_offset, 0);
306
307 set_sb(block_count, c.total_sectors >> log_sectors_per_block);
308
309 alignment_bytes = c.zoned_mode && c.ndevs > 1 ? segment_size_bytes : zone_size_bytes;
310
311 zone_align_start_offset =
312 ((uint64_t) c.start_sector * DEFAULT_SECTOR_SIZE +
313 2 * F2FS_BLKSIZE + alignment_bytes - 1) /
314 alignment_bytes * alignment_bytes -
315 (uint64_t) c.start_sector * DEFAULT_SECTOR_SIZE;
316
317 if (c.feature & F2FS_FEATURE_RO)
318 zone_align_start_offset = 8192;
319
320 if (c.start_sector % DEFAULT_SECTORS_PER_BLOCK) {
321 MSG(1, "\t%s: Align start sector number to the page unit\n",
322 c.zoned_mode ? "FAIL" : "WARN");
323 MSG(1, "\ti.e., start sector: %d, ofs:%d (sects/page: %d)\n",
324 c.start_sector,
325 c.start_sector % DEFAULT_SECTORS_PER_BLOCK,
326 DEFAULT_SECTORS_PER_BLOCK);
327 if (c.zoned_mode)
328 return -1;
329 }
330
331 if (c.zoned_mode && c.ndevs > 1)
332 zone_align_start_offset +=
333 (c.devices[0].total_sectors * c.sector_size -
334 zone_align_start_offset) % zone_size_bytes;
335
336 set_sb(segment0_blkaddr, zone_align_start_offset / blk_size_bytes);
337 sb->cp_blkaddr = sb->segment0_blkaddr;
338
339 MSG(0, "Info: zone aligned segment0 blkaddr: %u\n",
340 get_sb(segment0_blkaddr));
341
342 if (c.zoned_mode &&
343 ((c.ndevs == 1 &&
344 (get_sb(segment0_blkaddr) + c.start_sector /
345 DEFAULT_SECTORS_PER_BLOCK) % c.zone_blocks) ||
346 (c.ndevs > 1 &&
347 c.devices[1].start_blkaddr % c.zone_blocks))) {
348 MSG(1, "\tError: Unaligned segment0 block address %u\n",
349 get_sb(segment0_blkaddr));
350 return -1;
351 }
352
353 for (i = 0; i < c.ndevs; i++) {
354 if (i == 0) {
355 c.devices[i].total_segments =
356 ((c.devices[i].total_sectors *
357 c.sector_size - zone_align_start_offset) /
358 segment_size_bytes) / c.segs_per_zone *
359 c.segs_per_zone;
360 c.devices[i].start_blkaddr = 0;
361 c.devices[i].end_blkaddr = c.devices[i].total_segments *
362 c.blks_per_seg - 1 +
363 sb->segment0_blkaddr;
364 } else {
365 c.devices[i].total_segments =
366 (c.devices[i].total_sectors /
367 (c.sectors_per_blk * c.blks_per_seg)) /
368 c.segs_per_zone * c.segs_per_zone;
369 c.devices[i].start_blkaddr =
370 c.devices[i - 1].end_blkaddr + 1;
371 c.devices[i].end_blkaddr = c.devices[i].start_blkaddr +
372 c.devices[i].total_segments *
373 c.blks_per_seg - 1;
374 if (device_is_aliased(i)) {
375 if (c.devices[i].zoned_model ==
376 F2FS_ZONED_HM) {
377 MSG(1, "\tError: do not support "
378 "device aliasing for device[%d]\n", i);
379 return -1;
380 }
381 c.aliased_segments +=
382 c.devices[i].total_segments;
383 }
384 }
385 if (c.ndevs > 1) {
386 strncpy((char *)sb->devs[i].path, c.devices[i].path, MAX_PATH_LEN);
387 sb->devs[i].total_segments =
388 cpu_to_le32(c.devices[i].total_segments);
389 }
390
391 c.total_segments += c.devices[i].total_segments;
392 }
393 set_sb(segment_count, c.total_segments);
394 set_sb(segment_count_ckpt, F2FS_NUMBER_OF_CHECKPOINT_PACK);
395
396 set_sb(sit_blkaddr, get_sb(segment0_blkaddr) +
397 get_sb(segment_count_ckpt) * c.blks_per_seg);
398
399 blocks_for_sit = SIZE_ALIGN(get_sb(segment_count), SIT_ENTRY_PER_BLOCK);
400
401 sit_segments = SEG_ALIGN(blocks_for_sit);
402
403 set_sb(segment_count_sit, sit_segments * 2);
404
405 set_sb(nat_blkaddr, get_sb(sit_blkaddr) + get_sb(segment_count_sit) *
406 c.blks_per_seg);
407
408 total_valid_blks_available = (get_sb(segment_count) -
409 (get_sb(segment_count_ckpt) +
410 get_sb(segment_count_sit))) * c.blks_per_seg;
411
412 blocks_for_nat = SIZE_ALIGN(total_valid_blks_available,
413 NAT_ENTRY_PER_BLOCK);
414
415 if (c.large_nat_bitmap) {
416 nat_segments = SEG_ALIGN(blocks_for_nat) *
417 DEFAULT_NAT_ENTRY_RATIO / 100;
418 set_sb(segment_count_nat, nat_segments ? nat_segments : 1);
419 max_nat_bitmap_size = (get_sb(segment_count_nat) <<
420 log_blks_per_seg) / 8;
421 set_sb(segment_count_nat, get_sb(segment_count_nat) * 2);
422 } else {
423 set_sb(segment_count_nat, SEG_ALIGN(blocks_for_nat));
424 max_nat_bitmap_size = 0;
425 }
426
427 /*
428 * The number of node segments should not be exceeded a "Threshold".
429 * This number resizes NAT bitmap area in a CP page.
430 * So the threshold is determined not to overflow one CP page
431 */
432 sit_bitmap_size = ((get_sb(segment_count_sit) / 2) <<
433 log_blks_per_seg) / 8;
434
435 if (sit_bitmap_size > MAX_SIT_BITMAP_SIZE)
436 max_sit_bitmap_size = MAX_SIT_BITMAP_SIZE;
437 else
438 max_sit_bitmap_size = sit_bitmap_size;
439
440 if (c.large_nat_bitmap) {
441 /* use cp_payload if free space of f2fs_checkpoint is not enough */
442 if (max_sit_bitmap_size + max_nat_bitmap_size >
443 MAX_BITMAP_SIZE_IN_CKPT) {
444 uint32_t diff = max_sit_bitmap_size +
445 max_nat_bitmap_size -
446 MAX_BITMAP_SIZE_IN_CKPT;
447 set_sb(cp_payload, F2FS_BLK_ALIGN(diff));
448 } else {
449 set_sb(cp_payload, 0);
450 }
451 } else {
452 /*
453 * It should be reserved minimum 1 segment for nat.
454 * When sit is too large, we should expand cp area.
455 * It requires more pages for cp.
456 */
457 if (max_sit_bitmap_size > MAX_SIT_BITMAP_SIZE_IN_CKPT) {
458 max_nat_bitmap_size = MAX_BITMAP_SIZE_IN_CKPT;
459 set_sb(cp_payload, F2FS_BLK_ALIGN(max_sit_bitmap_size));
460 } else {
461 max_nat_bitmap_size = MAX_BITMAP_SIZE_IN_CKPT -
462 max_sit_bitmap_size;
463 set_sb(cp_payload, 0);
464 }
465 max_nat_segments = (max_nat_bitmap_size * 8) >> log_blks_per_seg;
466
467 if (get_sb(segment_count_nat) > max_nat_segments)
468 set_sb(segment_count_nat, max_nat_segments);
469
470 set_sb(segment_count_nat, get_sb(segment_count_nat) * 2);
471 }
472
473 set_sb(ssa_blkaddr, get_sb(nat_blkaddr) + get_sb(segment_count_nat) *
474 c.blks_per_seg);
475
476 total_valid_blks_available = (get_sb(segment_count) -
477 (get_sb(segment_count_ckpt) +
478 get_sb(segment_count_sit) +
479 get_sb(segment_count_nat))) *
480 c.blks_per_seg;
481
482 if (c.feature & F2FS_FEATURE_RO)
483 blocks_for_ssa = 0;
484 else
485 blocks_for_ssa = total_valid_blks_available /
486 c.blks_per_seg + 1;
487
488 set_sb(segment_count_ssa, SEG_ALIGN(blocks_for_ssa));
489
490 total_meta_segments = get_sb(segment_count_ckpt) +
491 get_sb(segment_count_sit) +
492 get_sb(segment_count_nat) +
493 get_sb(segment_count_ssa);
494 diff = total_meta_segments % (c.segs_per_zone);
495 if (diff)
496 set_sb(segment_count_ssa, get_sb(segment_count_ssa) +
497 (c.segs_per_zone - diff));
498
499 total_meta_zones = ZONE_ALIGN(total_meta_segments *
500 c.blks_per_seg);
501
502 set_sb(main_blkaddr, get_sb(segment0_blkaddr) + total_meta_zones *
503 c.segs_per_zone * c.blks_per_seg);
504
505 if (c.zoned_mode) {
506 /*
507 * Make sure there is enough randomly writeable
508 * space at the beginning of the disk.
509 */
510 unsigned long main_blkzone = get_sb(main_blkaddr) / c.zone_blocks;
511
512 if (c.devices[0].zoned_model == F2FS_ZONED_HM &&
513 c.devices[0].nr_rnd_zones < main_blkzone) {
514 MSG(0, "\tError: Device does not have enough random "
515 "write zones for F2FS volume (%lu needed)\n",
516 main_blkzone);
517 return -1;
518 }
519 /*
520 * Check if conventional device has enough space
521 * to accommodate all metadata, zoned device should
522 * not overlap to metadata area.
523 */
524 for (i = 1; i < c.ndevs; i++) {
525 if (c.devices[i].zoned_model != F2FS_ZONED_NONE &&
526 c.devices[i].start_blkaddr < get_sb(main_blkaddr)) {
527 MSG(0, "\tError: Conventional device %s is too small,"
528 " (%"PRIu64" MiB needed).\n", c.devices[0].path,
529 (get_sb(main_blkaddr) -
530 c.devices[i].start_blkaddr) >> 8);
531 return -1;
532 }
533 }
534 }
535
536 total_zones = get_sb(segment_count) / (c.segs_per_zone) -
537 total_meta_zones;
538 if (total_zones == 0)
539 goto too_small;
540 set_sb(section_count, total_zones * c.secs_per_zone);
541
542 set_sb(segment_count_main, get_sb(section_count) * c.segs_per_sec);
543
544 /*
545 * Let's determine the best reserved and overprovisioned space.
546 * For Zoned device, if zone capacity less than zone size, the segments
547 * starting after the zone capacity are unusable in each zone. So get
548 * overprovision ratio and reserved seg count based on avg usable
549 * segs_per_sec.
550 */
551 if (c.overprovision == 0)
552 c.overprovision = get_best_overprovision(sb);
553
554 c.reserved_segments = get_reserved(sb, c.overprovision);
555
556 if (c.feature & F2FS_FEATURE_RO) {
557 c.overprovision = 0;
558 c.reserved_segments = 0;
559 }
560 if ((!(c.feature & F2FS_FEATURE_RO) &&
561 c.overprovision == 0) ||
562 c.total_segments < F2FS_MIN_SEGMENTS ||
563 (c.devices[0].total_sectors *
564 c.sector_size < zone_align_start_offset) ||
565 (get_sb(segment_count_main) - NR_CURSEG_TYPE) <
566 c.reserved_segments) {
567 goto too_small;
568 }
569
570 if (c.vol_uuid) {
571 if (uuid_parse(c.vol_uuid, sb->uuid)) {
572 MSG(0, "\tError: supplied string is not a valid UUID\n");
573 return -1;
574 }
575 } else {
576 uuid_generate(sb->uuid);
577 }
578
579 /* precompute checksum seed for metadata */
580 if (c.feature & F2FS_FEATURE_INODE_CHKSUM)
581 c.chksum_seed = f2fs_cal_crc32(~0, sb->uuid, sizeof(sb->uuid));
582
583 utf8_to_utf16((char *)sb->volume_name, (const char *)c.vol_label,
584 MAX_VOLUME_NAME, strlen(c.vol_label));
585 set_sb(node_ino, 1);
586 set_sb(meta_ino, 2);
587 set_sb(root_ino, 3);
588 c.next_free_nid = 4;
589
590 for (qtype = 0; qtype < F2FS_MAX_QUOTAS; qtype++) {
591 if (!((1 << qtype) & c.quota_bits))
592 continue;
593 sb->qf_ino[qtype] = cpu_to_le32(c.next_free_nid++);
594 MSG(0, "Info: add quota type = %u => %u\n",
595 qtype, c.next_free_nid - 1);
596 }
597
598 if (c.feature & F2FS_FEATURE_LOST_FOUND)
599 c.lpf_ino = c.next_free_nid++;
600
601 if (c.aliased_devices) {
602 c.first_alias_ino = c.next_free_nid;
603 c.next_free_nid += c.aliased_devices;
604 avail_zones += c.aliased_segments / c.segs_per_zone;
605 }
606
607 if (c.feature & F2FS_FEATURE_RO)
608 avail_zones += 2;
609 else
610 avail_zones += 6;
611
612 if (total_zones <= avail_zones) {
613 MSG(1, "\tError: %d zones: Need more zones "
614 "by shrinking zone size\n", total_zones);
615 return -1;
616 }
617
618 if (c.feature & F2FS_FEATURE_RO) {
619 c.cur_seg[CURSEG_HOT_NODE] = last_section(last_zone(total_zones));
620 c.cur_seg[CURSEG_WARM_NODE] = 0;
621 c.cur_seg[CURSEG_COLD_NODE] = 0;
622 c.cur_seg[CURSEG_HOT_DATA] = 0;
623 c.cur_seg[CURSEG_COLD_DATA] = 0;
624 c.cur_seg[CURSEG_WARM_DATA] = 0;
625 } else if (c.zoned_mode) {
626 c.cur_seg[CURSEG_HOT_NODE] = 0;
627 if (c.zoned_model == F2FS_ZONED_HM) {
628 uint32_t conv_zones =
629 c.devices[0].total_segments / c.segs_per_zone
630 - total_meta_zones;
631
632 if (total_zones - conv_zones >= avail_zones)
633 c.cur_seg[CURSEG_HOT_NODE] =
634 (c.devices[1].start_blkaddr -
635 get_sb(main_blkaddr)) / c.blks_per_seg;
636 }
637 c.cur_seg[CURSEG_WARM_NODE] = next_zone(CURSEG_HOT_NODE);
638 c.cur_seg[CURSEG_COLD_NODE] = next_zone(CURSEG_WARM_NODE);
639 c.cur_seg[CURSEG_HOT_DATA] = next_zone(CURSEG_COLD_NODE);
640 c.cur_seg[CURSEG_WARM_DATA] = next_zone(CURSEG_HOT_DATA);
641 c.cur_seg[CURSEG_COLD_DATA] = next_zone(CURSEG_WARM_DATA);
642 } else {
643 c.cur_seg[CURSEG_HOT_NODE] = 0;
644 c.cur_seg[CURSEG_WARM_NODE] = next_zone(CURSEG_HOT_NODE);
645 c.cur_seg[CURSEG_COLD_NODE] = next_zone(CURSEG_WARM_NODE);
646 c.cur_seg[CURSEG_HOT_DATA] = next_zone(CURSEG_COLD_NODE);
647 c.cur_seg[CURSEG_COLD_DATA] =
648 max(last_zone((total_zones >> 2)),
649 next_zone(CURSEG_HOT_DATA));
650 c.cur_seg[CURSEG_WARM_DATA] =
651 max(last_zone((total_zones >> 1)),
652 next_zone(CURSEG_COLD_DATA));
653 }
654
655 /* if there is redundancy, reassign it */
656 if (!(c.feature & F2FS_FEATURE_RO))
657 verify_cur_segs();
658
659 cure_extension_list();
660
661 /* get kernel version */
662 if (c.kd >= 0) {
663 dev_read_version(c.version, 0, VERSION_LEN);
664 get_kernel_version(c.version);
665 } else {
666 get_kernel_uname_version(c.version);
667 }
668 MSG(0, "Info: format version with\n \"%s\"\n", c.version);
669
670 memcpy(sb->version, c.version, VERSION_LEN);
671 memcpy(sb->init_version, c.version, VERSION_LEN);
672
673 if (c.feature & F2FS_FEATURE_CASEFOLD) {
674 set_sb(s_encoding, c.s_encoding);
675 set_sb(s_encoding_flags, c.s_encoding_flags);
676 }
677
678 sb->feature = cpu_to_le32(c.feature);
679
680 if (c.feature & F2FS_FEATURE_SB_CHKSUM) {
681 set_sb(checksum_offset, SB_CHKSUM_OFFSET);
682 set_sb(crc, f2fs_cal_crc32(F2FS_SUPER_MAGIC, sb,
683 SB_CHKSUM_OFFSET));
684 MSG(1, "Info: SB CRC is set: offset (%d), crc (0x%x)\n",
685 get_sb(checksum_offset), get_sb(crc));
686 }
687
688 return 0;
689
690 too_small:
691 MSG(0, "\tError: Device size is not sufficient for F2FS volume\n");
692 return -1;
693 }
694
f2fs_init_sit_area(void)695 static int f2fs_init_sit_area(void)
696 {
697 uint32_t blk_size, seg_size;
698 uint32_t index = 0;
699 uint64_t sit_seg_addr = 0;
700 uint8_t *zero_buf = NULL;
701
702 blk_size = 1 << get_sb(log_blocksize);
703 seg_size = (1 << get_sb(log_blocks_per_seg)) * blk_size;
704
705 zero_buf = calloc(sizeof(uint8_t), seg_size);
706 if(zero_buf == NULL) {
707 MSG(1, "\tError: Calloc Failed for sit_zero_buf!!!\n");
708 return -1;
709 }
710
711 sit_seg_addr = get_sb(sit_blkaddr);
712 sit_seg_addr *= blk_size;
713
714 DBG(1, "\tFilling sit area at offset 0x%08"PRIx64"\n", sit_seg_addr);
715 for (index = 0; index < (get_sb(segment_count_sit) / 2); index++) {
716 if (dev_fill(zero_buf, sit_seg_addr, seg_size, WRITE_LIFE_NONE)) {
717 MSG(1, "\tError: While zeroing out the sit area "
718 "on disk!!!\n");
719 free(zero_buf);
720 return -1;
721 }
722 sit_seg_addr += seg_size;
723 }
724
725 free(zero_buf);
726 return 0 ;
727 }
728
f2fs_init_nat_area(void)729 static int f2fs_init_nat_area(void)
730 {
731 uint32_t blk_size, seg_size;
732 uint32_t index = 0;
733 uint64_t nat_seg_addr = 0;
734 uint8_t *nat_buf = NULL;
735
736 blk_size = 1 << get_sb(log_blocksize);
737 seg_size = (1 << get_sb(log_blocks_per_seg)) * blk_size;
738
739 nat_buf = calloc(sizeof(uint8_t), seg_size);
740 if (nat_buf == NULL) {
741 MSG(1, "\tError: Calloc Failed for nat_zero_blk!!!\n");
742 return -1;
743 }
744
745 nat_seg_addr = get_sb(nat_blkaddr);
746 nat_seg_addr *= blk_size;
747
748 DBG(1, "\tFilling nat area at offset 0x%08"PRIx64"\n", nat_seg_addr);
749 for (index = 0; index < get_sb(segment_count_nat) / 2; index++) {
750 if (dev_fill(nat_buf, nat_seg_addr, seg_size, WRITE_LIFE_NONE)) {
751 MSG(1, "\tError: While zeroing out the nat area "
752 "on disk!!!\n");
753 free(nat_buf);
754 return -1;
755 }
756 nat_seg_addr = nat_seg_addr + (2 * seg_size);
757 }
758
759 free(nat_buf);
760 return 0 ;
761 }
762
f2fs_write_check_point_pack(void)763 static int f2fs_write_check_point_pack(void)
764 {
765 struct f2fs_summary_block *sum;
766 struct f2fs_journal *journal;
767 uint32_t blk_size_bytes;
768 uint32_t nat_bits_bytes, nat_bits_blocks;
769 unsigned char *nat_bits = NULL, *empty_nat_bits;
770 uint64_t cp_seg_blk = 0;
771 uint32_t crc = 0, flags;
772 unsigned int i;
773 char *cp_payload = NULL;
774 char *sum_compact, *sum_compact_p;
775 struct f2fs_summary *sum_entry;
776 unsigned short vblocks;
777 uint32_t used_segments = c.aliased_segments;
778 int ret = -1;
779
780 cp = calloc(F2FS_BLKSIZE, 1);
781 if (cp == NULL) {
782 MSG(1, "\tError: Calloc failed for f2fs_checkpoint!!!\n");
783 return ret;
784 }
785
786 sum = calloc(F2FS_BLKSIZE, 1);
787 if (sum == NULL) {
788 MSG(1, "\tError: Calloc failed for summary_node!!!\n");
789 goto free_cp;
790 }
791
792 sum_compact = calloc(F2FS_BLKSIZE, 1);
793 if (sum_compact == NULL) {
794 MSG(1, "\tError: Calloc failed for summary buffer!!!\n");
795 goto free_sum;
796 }
797 sum_compact_p = sum_compact;
798
799 nat_bits_bytes = get_sb(segment_count_nat) << 5;
800 nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) + 8 +
801 F2FS_BLKSIZE - 1);
802 nat_bits = calloc(F2FS_BLKSIZE, nat_bits_blocks);
803 if (nat_bits == NULL) {
804 MSG(1, "\tError: Calloc failed for nat bits buffer!!!\n");
805 goto free_sum_compact;
806 }
807
808 cp_payload = calloc(F2FS_BLKSIZE, 1);
809 if (cp_payload == NULL) {
810 MSG(1, "\tError: Calloc failed for cp_payload!!!\n");
811 goto free_nat_bits;
812 }
813
814 /* 1. cp page 1 of checkpoint pack 1 */
815 srand((c.fake_seed) ? 0 : time(NULL));
816 cp->checkpoint_ver = cpu_to_le64(rand() | 0x1);
817 set_cp(cur_node_segno[0], c.cur_seg[CURSEG_HOT_NODE]);
818 set_cp(cur_node_segno[1], c.cur_seg[CURSEG_WARM_NODE]);
819 set_cp(cur_node_segno[2], c.cur_seg[CURSEG_COLD_NODE]);
820 set_cp(cur_data_segno[0], c.cur_seg[CURSEG_HOT_DATA]);
821 set_cp(cur_data_segno[1], c.cur_seg[CURSEG_WARM_DATA]);
822 set_cp(cur_data_segno[2], c.cur_seg[CURSEG_COLD_DATA]);
823 for (i = 3; i < MAX_ACTIVE_NODE_LOGS; i++) {
824 set_cp(cur_node_segno[i], 0xffffffff);
825 set_cp(cur_data_segno[i], 0xffffffff);
826 }
827
828 set_cp(cur_node_blkoff[0], c.curseg_offset[CURSEG_HOT_NODE]);
829 set_cp(cur_node_blkoff[2], c.curseg_offset[CURSEG_COLD_NODE]);
830 set_cp(cur_data_blkoff[0], c.curseg_offset[CURSEG_HOT_DATA]);
831 set_cp(cur_data_blkoff[2], c.curseg_offset[CURSEG_COLD_DATA]);
832 set_cp(valid_block_count, c.curseg_offset[CURSEG_HOT_NODE] +
833 c.curseg_offset[CURSEG_HOT_DATA] +
834 c.curseg_offset[CURSEG_COLD_NODE] +
835 c.curseg_offset[CURSEG_COLD_DATA] +
836 c.aliased_segments * c.blks_per_seg);
837 set_cp(rsvd_segment_count, c.reserved_segments);
838
839 /*
840 * For zoned devices, if zone capacity less than zone size, get
841 * overprovision segment count based on usable segments in the device.
842 */
843 set_cp(overprov_segment_count, (f2fs_get_usable_segments(sb) -
844 get_cp(rsvd_segment_count)) *
845 c.overprovision / 100);
846
847 /*
848 * If conf_reserved_sections has a non zero value, overprov_segment_count
849 * is set to overprov_segment_count + rsvd_segment_count.
850 */
851 if (c.conf_reserved_sections) {
852 /*
853 * Overprovision segments must be bigger than two sections.
854 * In non configurable reserved section case, overprovision
855 * segments are always bigger than two sections.
856 */
857 if (get_cp(overprov_segment_count) <
858 overprovision_segment_buffer(sb)) {
859 MSG(0, "\tError: Not enough overprovision segments (%u)\n",
860 get_cp(overprov_segment_count));
861 goto free_cp_payload;
862 }
863 set_cp(overprov_segment_count, get_cp(overprov_segment_count) +
864 get_cp(rsvd_segment_count));
865 } else {
866 /*
867 * overprov_segment_count must bigger than rsvd_segment_count.
868 */
869 set_cp(overprov_segment_count, max(get_cp(rsvd_segment_count),
870 get_cp(overprov_segment_count)) + overprovision_segment_buffer(sb));
871 }
872
873 if (f2fs_get_usable_segments(sb) <= get_cp(overprov_segment_count)) {
874 MSG(0, "\tError: Not enough segments to create F2FS Volume\n");
875 goto free_cp_payload;
876 }
877 MSG(0, "Info: Overprovision ratio = %.3lf%%\n", c.overprovision);
878 MSG(0, "Info: Overprovision segments = %u (GC reserved = %u)\n",
879 get_cp(overprov_segment_count),
880 c.reserved_segments);
881
882 /* main segments - reserved segments - (node + data segments) */
883 if (c.feature & F2FS_FEATURE_RO)
884 used_segments += 2;
885 else
886 used_segments += 6;
887
888 set_cp(user_block_count, (f2fs_get_usable_segments(sb) -
889 get_cp(overprov_segment_count)) * c.blks_per_seg);
890 set_cp(free_segment_count, f2fs_get_usable_segments(sb) -
891 used_segments);
892
893 /* cp page (2), data summaries (1), node summaries (3) */
894 set_cp(cp_pack_total_block_count, 6 + get_sb(cp_payload));
895 flags = CP_UMOUNT_FLAG | CP_COMPACT_SUM_FLAG;
896 if (!(c.disabled_feature & F2FS_FEATURE_NAT_BITS) &&
897 get_cp(cp_pack_total_block_count) <=
898 (1 << get_sb(log_blocks_per_seg)) - nat_bits_blocks)
899 flags |= CP_NAT_BITS_FLAG;
900
901 if (c.trimmed)
902 flags |= CP_TRIMMED_FLAG;
903
904 if (c.large_nat_bitmap)
905 flags |= CP_LARGE_NAT_BITMAP_FLAG;
906
907 set_cp(ckpt_flags, flags);
908 set_cp(cp_pack_start_sum, 1 + get_sb(cp_payload));
909 set_cp(valid_node_count, c.curseg_offset[CURSEG_HOT_NODE] +
910 c.curseg_offset[CURSEG_COLD_NODE]);
911 set_cp(valid_inode_count, c.curseg_offset[CURSEG_HOT_NODE] +
912 c.curseg_offset[CURSEG_COLD_NODE]);
913 set_cp(next_free_nid, c.next_free_nid);
914 set_cp(sit_ver_bitmap_bytesize, ((get_sb(segment_count_sit) / 2) <<
915 get_sb(log_blocks_per_seg)) / 8);
916
917 set_cp(nat_ver_bitmap_bytesize, ((get_sb(segment_count_nat) / 2) <<
918 get_sb(log_blocks_per_seg)) / 8);
919
920 if (c.large_nat_bitmap)
921 set_cp(checksum_offset, CP_MIN_CHKSUM_OFFSET);
922 else
923 set_cp(checksum_offset, CP_CHKSUM_OFFSET);
924
925 crc = f2fs_checkpoint_chksum(cp);
926 *((__le32 *)((unsigned char *)cp + get_cp(checksum_offset))) =
927 cpu_to_le32(crc);
928
929 blk_size_bytes = 1 << get_sb(log_blocksize);
930
931 if (blk_size_bytes != F2FS_BLKSIZE) {
932 MSG(1, "\tError: Wrong block size %d / %d!!!\n",
933 blk_size_bytes, F2FS_BLKSIZE);
934 goto free_cp_payload;
935 }
936
937 cp_seg_blk = get_sb(segment0_blkaddr);
938
939 DBG(1, "\tWriting main segments, cp at offset 0x%08"PRIx64"\n",
940 cp_seg_blk);
941 if (dev_write_block(cp, cp_seg_blk, WRITE_LIFE_NONE)) {
942 MSG(1, "\tError: While writing the cp to disk!!!\n");
943 goto free_cp_payload;
944 }
945
946 for (i = 0; i < get_sb(cp_payload); i++) {
947 cp_seg_blk++;
948 if (dev_fill_block(cp_payload, cp_seg_blk, WRITE_LIFE_NONE)) {
949 MSG(1, "\tError: While zeroing out the sit bitmap area "
950 "on disk!!!\n");
951 goto free_cp_payload;
952 }
953 }
954
955 /* Prepare and write Segment summary for HOT/WARM/COLD DATA
956 *
957 * The structure of compact summary
958 * +-------------------+
959 * | nat_journal |
960 * +-------------------+
961 * | sit_journal |
962 * +-------------------+
963 * | hot data summary |
964 * +-------------------+
965 * | warm data summary |
966 * +-------------------+
967 * | cold data summary |
968 * +-------------------+
969 */
970
971 /* nat_sjournal */
972 journal = &c.nat_jnl;
973 memcpy(sum_compact_p, &journal->n_nats, SUM_JOURNAL_SIZE);
974 sum_compact_p += SUM_JOURNAL_SIZE;
975
976 /* sit_journal */
977 journal = &c.sit_jnl;
978
979 if (c.feature & F2FS_FEATURE_RO) {
980 i = CURSEG_RO_HOT_DATA;
981 vblocks = le16_to_cpu(journal->sit_j.entries[i].se.vblocks);
982 journal->sit_j.entries[i].segno = cp->cur_data_segno[0];
983 journal->sit_j.entries[i].se.vblocks =
984 cpu_to_le16(vblocks | (CURSEG_HOT_DATA << 10));
985
986 i = CURSEG_RO_HOT_NODE;
987 vblocks = le16_to_cpu(journal->sit_j.entries[i].se.vblocks);
988 journal->sit_j.entries[i].segno = cp->cur_node_segno[0];
989 journal->sit_j.entries[i].se.vblocks |=
990 cpu_to_le16(vblocks | (CURSEG_HOT_NODE << 10));
991
992 journal->n_sits = cpu_to_le16(2);
993 } else {
994 for (i = CURSEG_HOT_DATA; i < NR_CURSEG_TYPE; i++) {
995 if (i < NR_CURSEG_DATA_TYPE)
996 journal->sit_j.entries[i].segno =
997 cp->cur_data_segno[i];
998
999 else
1000 journal->sit_j.entries[i].segno =
1001 cp->cur_node_segno[i - NR_CURSEG_DATA_TYPE];
1002
1003 vblocks =
1004 le16_to_cpu(journal->sit_j.entries[i].se.vblocks);
1005 journal->sit_j.entries[i].se.vblocks =
1006 cpu_to_le16(vblocks | (i << 10));
1007 }
1008
1009 journal->n_sits = cpu_to_le16(6);
1010 }
1011
1012 memcpy(sum_compact_p, &journal->n_sits, SUM_JOURNAL_SIZE);
1013 sum_compact_p += SUM_JOURNAL_SIZE;
1014
1015 /* hot data summary */
1016 memset(sum, 0, F2FS_BLKSIZE);
1017 SET_SUM_TYPE(sum, SUM_TYPE_DATA);
1018
1019 sum_entry = (struct f2fs_summary *)sum_compact_p;
1020 memcpy(sum_entry, c.sum[CURSEG_HOT_DATA],
1021 sizeof(struct f2fs_summary) * MAX_CACHE_SUMS);
1022
1023 /* warm data summary, nothing to do */
1024 /* cold data summary, nothing to do */
1025
1026 cp_seg_blk++;
1027 DBG(1, "\tWriting Segment summary for HOT/WARM/COLD_DATA, at offset 0x%08"PRIx64"\n",
1028 cp_seg_blk);
1029 if (dev_write_block(sum_compact, cp_seg_blk, WRITE_LIFE_NONE)) {
1030 MSG(1, "\tError: While writing the sum_blk to disk!!!\n");
1031 goto free_cp_payload;
1032 }
1033
1034 /* Prepare and write Segment summary for HOT_NODE */
1035 memset(sum, 0, F2FS_BLKSIZE);
1036 SET_SUM_TYPE(sum, SUM_TYPE_NODE);
1037 memcpy(sum->entries, c.sum[CURSEG_HOT_NODE],
1038 sizeof(struct f2fs_summary) * MAX_CACHE_SUMS);
1039
1040 cp_seg_blk++;
1041 DBG(1, "\tWriting Segment summary for HOT_NODE, at offset 0x%08"PRIx64"\n",
1042 cp_seg_blk);
1043 if (dev_write_block(sum, cp_seg_blk, WRITE_LIFE_NONE)) {
1044 MSG(1, "\tError: While writing the sum_blk to disk!!!\n");
1045 goto free_cp_payload;
1046 }
1047
1048 /* Fill segment summary for WARM_NODE to zero. */
1049 memset(sum, 0, F2FS_BLKSIZE);
1050 SET_SUM_TYPE(sum, SUM_TYPE_NODE);
1051
1052 cp_seg_blk++;
1053 DBG(1, "\tWriting Segment summary for WARM_NODE, at offset 0x%08"PRIx64"\n",
1054 cp_seg_blk);
1055 if (dev_write_block(sum, cp_seg_blk, WRITE_LIFE_NONE)) {
1056 MSG(1, "\tError: While writing the sum_blk to disk!!!\n");
1057 goto free_cp_payload;
1058 }
1059
1060 /* Prepare and write Segment summary for COLD_NODE */
1061 memset(sum, 0, F2FS_BLKSIZE);
1062 SET_SUM_TYPE(sum, SUM_TYPE_NODE);
1063 memcpy(sum->entries, c.sum[CURSEG_COLD_NODE],
1064 sizeof(struct f2fs_summary) * MAX_CACHE_SUMS);
1065
1066 cp_seg_blk++;
1067 DBG(1, "\tWriting Segment summary for COLD_NODE, at offset 0x%08"PRIx64"\n",
1068 cp_seg_blk);
1069 if (dev_write_block(sum, cp_seg_blk, WRITE_LIFE_NONE)) {
1070 MSG(1, "\tError: While writing the sum_blk to disk!!!\n");
1071 goto free_cp_payload;
1072 }
1073
1074 /* cp page2 */
1075 cp_seg_blk++;
1076 DBG(1, "\tWriting cp page2, at offset 0x%08"PRIx64"\n", cp_seg_blk);
1077 if (dev_write_block(cp, cp_seg_blk, WRITE_LIFE_NONE)) {
1078 MSG(1, "\tError: While writing the cp to disk!!!\n");
1079 goto free_cp_payload;
1080 }
1081
1082 /* write NAT bits, if possible */
1083 if (flags & CP_NAT_BITS_FLAG) {
1084 uint32_t i;
1085
1086 *(__le64 *)nat_bits = get_cp_crc(cp);
1087 empty_nat_bits = nat_bits + 8 + nat_bits_bytes;
1088 memset(empty_nat_bits, 0xff, nat_bits_bytes);
1089 test_and_clear_bit_le(0, empty_nat_bits);
1090
1091 /* write the last blocks in cp pack */
1092 cp_seg_blk = get_sb(segment0_blkaddr) + (1 <<
1093 get_sb(log_blocks_per_seg)) - nat_bits_blocks;
1094
1095 DBG(1, "\tWriting NAT bits pages, at offset 0x%08"PRIx64"\n",
1096 cp_seg_blk);
1097
1098 for (i = 0; i < nat_bits_blocks; i++) {
1099 if (dev_write_block(nat_bits + i *
1100 F2FS_BLKSIZE, cp_seg_blk + i,
1101 WRITE_LIFE_NONE)) {
1102 MSG(1, "\tError: write NAT bits to disk!!!\n");
1103 goto free_cp_payload;
1104 }
1105 }
1106 }
1107
1108 /* cp page 1 of check point pack 2
1109 * Initialize other checkpoint pack with version zero
1110 */
1111 cp->checkpoint_ver = 0;
1112
1113 crc = f2fs_checkpoint_chksum(cp);
1114 *((__le32 *)((unsigned char *)cp + get_cp(checksum_offset))) =
1115 cpu_to_le32(crc);
1116 cp_seg_blk = get_sb(segment0_blkaddr) + c.blks_per_seg;
1117 DBG(1, "\tWriting cp page 1 of checkpoint pack 2, at offset 0x%08"PRIx64"\n",
1118 cp_seg_blk);
1119 if (dev_write_block(cp, cp_seg_blk, WRITE_LIFE_NONE)) {
1120 MSG(1, "\tError: While writing the cp to disk!!!\n");
1121 goto free_cp_payload;
1122 }
1123
1124 for (i = 0; i < get_sb(cp_payload); i++) {
1125 cp_seg_blk++;
1126 if (dev_fill_block(cp_payload, cp_seg_blk, WRITE_LIFE_NONE)) {
1127 MSG(1, "\tError: While zeroing out the sit bitmap area "
1128 "on disk!!!\n");
1129 goto free_cp_payload;
1130 }
1131 }
1132
1133 /* cp page 2 of check point pack 2 */
1134 cp_seg_blk += (le32_to_cpu(cp->cp_pack_total_block_count) -
1135 get_sb(cp_payload) - 1);
1136 DBG(1, "\tWriting cp page 2 of checkpoint pack 2, at offset 0x%08"PRIx64"\n",
1137 cp_seg_blk);
1138 if (dev_write_block(cp, cp_seg_blk, WRITE_LIFE_NONE)) {
1139 MSG(1, "\tError: While writing the cp to disk!!!\n");
1140 goto free_cp_payload;
1141 }
1142
1143 ret = 0;
1144
1145 free_cp_payload:
1146 free(cp_payload);
1147 free_nat_bits:
1148 free(nat_bits);
1149 free_sum_compact:
1150 free(sum_compact);
1151 free_sum:
1152 free(sum);
1153 free_cp:
1154 free(cp);
1155 return ret;
1156 }
1157
f2fs_write_super_block(void)1158 static int f2fs_write_super_block(void)
1159 {
1160 int index;
1161 uint8_t *zero_buff;
1162
1163 zero_buff = calloc(F2FS_BLKSIZE, 1);
1164 if (zero_buff == NULL) {
1165 MSG(1, "\tError: Calloc Failed for super_blk_zero_buf!!!\n");
1166 return -1;
1167 }
1168
1169 memcpy(zero_buff + F2FS_SUPER_OFFSET, sb, sizeof(*sb));
1170 DBG(1, "\tWriting super block, at offset 0x%08x\n", 0);
1171 for (index = 0; index < 2; index++) {
1172 if (dev_write_block(zero_buff, index, WRITE_LIFE_NONE)) {
1173 MSG(1, "\tError: While while writing super_blk "
1174 "on disk!!! index : %d\n", index);
1175 free(zero_buff);
1176 return -1;
1177 }
1178 }
1179
1180 free(zero_buff);
1181 return 0;
1182 }
1183
1184 #ifndef WITH_ANDROID
f2fs_discard_obsolete_dnode(void)1185 static int f2fs_discard_obsolete_dnode(void)
1186 {
1187 struct f2fs_node *raw_node;
1188 uint64_t next_blkaddr = 0, offset;
1189 u64 end_blkaddr = (get_sb(segment_count_main) <<
1190 get_sb(log_blocks_per_seg)) + get_sb(main_blkaddr);
1191 uint64_t start_inode_pos = get_sb(main_blkaddr);
1192 uint64_t last_inode_pos;
1193
1194 if (c.zoned_mode || c.feature & F2FS_FEATURE_RO)
1195 return 0;
1196
1197 raw_node = calloc(F2FS_BLKSIZE, 1);
1198 if (raw_node == NULL) {
1199 MSG(1, "\tError: Calloc Failed for discard_raw_node!!!\n");
1200 return -1;
1201 }
1202
1203 /* avoid power-off-recovery based on roll-forward policy */
1204 offset = get_sb(main_blkaddr);
1205 offset += c.cur_seg[CURSEG_WARM_NODE] * c.blks_per_seg;
1206
1207 last_inode_pos = start_inode_pos +
1208 c.cur_seg[CURSEG_HOT_NODE] * c.blks_per_seg +
1209 c.curseg_offset[CURSEG_COLD_NODE] - 1;
1210
1211 do {
1212 if (offset < get_sb(main_blkaddr) || offset >= end_blkaddr)
1213 break;
1214
1215 if (dev_read_block(raw_node, offset)) {
1216 MSG(1, "\tError: While traversing direct node!!!\n");
1217 free(raw_node);
1218 return -1;
1219 }
1220
1221 next_blkaddr = le32_to_cpu(F2FS_NODE_FOOTER(raw_node)->next_blkaddr);
1222 memset(raw_node, 0, F2FS_BLKSIZE);
1223
1224 DBG(1, "\tDiscard dnode, at offset 0x%08"PRIx64"\n", offset);
1225 if (dev_write_block(raw_node, offset,
1226 f2fs_io_type_to_rw_hint(CURSEG_WARM_NODE))) {
1227 MSG(1, "\tError: While discarding direct node!!!\n");
1228 free(raw_node);
1229 return -1;
1230 }
1231 offset = next_blkaddr;
1232 /* should avoid recursive chain due to stale data */
1233 if (offset >= start_inode_pos || offset <= last_inode_pos)
1234 break;
1235 } while (1);
1236
1237 free(raw_node);
1238 return 0;
1239 }
1240 #endif
1241
alloc_next_free_block(int curseg_type)1242 static block_t alloc_next_free_block(int curseg_type)
1243 {
1244 block_t blkaddr;
1245
1246 blkaddr = get_sb(main_blkaddr) +
1247 c.cur_seg[curseg_type] * c.blks_per_seg +
1248 c.curseg_offset[curseg_type];
1249
1250 c.curseg_offset[curseg_type]++;
1251
1252 return blkaddr;
1253 }
1254
update_sit_journal(int curseg_type)1255 void update_sit_journal(int curseg_type)
1256 {
1257 struct f2fs_journal *sit_jnl = &c.sit_jnl;
1258 unsigned short vblocks;
1259 int idx = curseg_type;
1260
1261 if (c.feature & F2FS_FEATURE_RO) {
1262 if (curseg_type < NR_CURSEG_DATA_TYPE)
1263 idx = CURSEG_RO_HOT_DATA;
1264 else
1265 idx = CURSEG_RO_HOT_NODE;
1266 }
1267
1268 f2fs_set_bit(c.curseg_offset[curseg_type] - 1,
1269 (char *)sit_jnl->sit_j.entries[idx].se.valid_map);
1270
1271 vblocks = le16_to_cpu(sit_jnl->sit_j.entries[idx].se.vblocks);
1272 sit_jnl->sit_j.entries[idx].se.vblocks = cpu_to_le16(vblocks + 1);
1273 }
1274
update_nat_journal(nid_t nid,block_t blkaddr)1275 void update_nat_journal(nid_t nid, block_t blkaddr)
1276 {
1277 struct f2fs_journal *nat_jnl = &c.nat_jnl;
1278 unsigned short n_nats = le16_to_cpu(nat_jnl->n_nats);
1279
1280 nat_jnl->nat_j.entries[n_nats].nid = cpu_to_le32(nid);
1281 nat_jnl->nat_j.entries[n_nats].ne.version = 0;
1282 nat_jnl->nat_j.entries[n_nats].ne.ino = cpu_to_le32(nid);
1283 nat_jnl->nat_j.entries[n_nats].ne.block_addr = cpu_to_le32(blkaddr);
1284 nat_jnl->n_nats = cpu_to_le16(n_nats + 1);
1285 }
1286
update_summary_entry(int curseg_type,nid_t nid,unsigned short ofs_in_node)1287 void update_summary_entry(int curseg_type, nid_t nid,
1288 unsigned short ofs_in_node)
1289 {
1290 struct f2fs_summary *sum;
1291 unsigned int curofs = c.curseg_offset[curseg_type] - 1;
1292
1293 assert(curofs < MAX_CACHE_SUMS);
1294
1295 sum = c.sum[curseg_type] + curofs;
1296 sum->nid = cpu_to_le32(nid);
1297 sum->ofs_in_node = cpu_to_le16(ofs_in_node);
1298 }
1299
add_dentry(struct f2fs_dentry_block * dent_blk,unsigned int * didx,const char * name,uint32_t ino,u8 type)1300 static void add_dentry(struct f2fs_dentry_block *dent_blk, unsigned int *didx,
1301 const char *name, uint32_t ino, u8 type)
1302 {
1303 int len = strlen(name);
1304 f2fs_hash_t hash;
1305
1306 if (name[0] == '.' && (len == 1 || (len == 2 && name[1] == '.')))
1307 hash = 0;
1308 else
1309 hash = f2fs_dentry_hash(0, 0, (unsigned char *)name, len);
1310
1311 F2FS_DENTRY_BLOCK_DENTRY(dent_blk, *didx).hash_code = cpu_to_le32(hash);
1312 F2FS_DENTRY_BLOCK_DENTRY(dent_blk, *didx).ino = cpu_to_le32(ino);
1313 F2FS_DENTRY_BLOCK_DENTRY(dent_blk, *didx).name_len = cpu_to_le16(len);
1314 F2FS_DENTRY_BLOCK_DENTRY(dent_blk, *didx).file_type = type;
1315
1316 while (len > F2FS_SLOT_LEN) {
1317 memcpy(F2FS_DENTRY_BLOCK_FILENAME(dent_blk, *didx), name,
1318 F2FS_SLOT_LEN);
1319 test_and_set_bit_le(*didx, dent_blk->dentry_bitmap);
1320 len -= (int)F2FS_SLOT_LEN;
1321 name += F2FS_SLOT_LEN;
1322 (*didx)++;
1323 }
1324 memcpy(F2FS_DENTRY_BLOCK_FILENAME(dent_blk, *didx), name, len);
1325 test_and_set_bit_le(*didx, dent_blk->dentry_bitmap);
1326 (*didx)++;
1327 }
1328
f2fs_add_default_dentry_root(void)1329 static block_t f2fs_add_default_dentry_root(void)
1330 {
1331 struct f2fs_dentry_block *dent_blk = NULL;
1332 block_t data_blkaddr;
1333 unsigned int didx = 0;
1334
1335 dent_blk = calloc(F2FS_BLKSIZE, 1);
1336 if(dent_blk == NULL) {
1337 MSG(1, "\tError: Calloc Failed for dent_blk!!!\n");
1338 return 0;
1339 }
1340
1341 add_dentry(dent_blk, &didx, ".",
1342 le32_to_cpu(sb->root_ino), F2FS_FT_DIR);
1343 add_dentry(dent_blk, &didx, "..",
1344 le32_to_cpu(sb->root_ino), F2FS_FT_DIR);
1345
1346 if (c.lpf_ino)
1347 add_dentry(dent_blk, &didx, LPF, c.lpf_ino, F2FS_FT_DIR);
1348
1349 if (c.aliased_devices) {
1350 int i, dev_off = 0;
1351
1352 for (i = 1; i < c.ndevs; i++) {
1353 if (!device_is_aliased(i))
1354 continue;
1355
1356 add_dentry(dent_blk, &didx, c.devices[i].alias_filename,
1357 c.first_alias_ino + dev_off,
1358 F2FS_FT_REG_FILE);
1359 dev_off++;
1360 }
1361 }
1362
1363 data_blkaddr = alloc_next_free_block(CURSEG_HOT_DATA);
1364
1365 DBG(1, "\tWriting default dentry root, at offset 0x%x\n", data_blkaddr);
1366 if (dev_write_block(dent_blk, data_blkaddr,
1367 f2fs_io_type_to_rw_hint(CURSEG_HOT_DATA))) {
1368 MSG(1, "\tError: While writing the dentry_blk to disk!!!\n");
1369 free(dent_blk);
1370 return 0;
1371 }
1372
1373 update_sit_journal(CURSEG_HOT_DATA);
1374 update_summary_entry(CURSEG_HOT_DATA, le32_to_cpu(sb->root_ino), 0);
1375
1376 free(dent_blk);
1377 return data_blkaddr;
1378 }
1379
f2fs_write_root_inode(void)1380 static int f2fs_write_root_inode(void)
1381 {
1382 struct f2fs_node *raw_node = NULL;
1383 block_t data_blkaddr;
1384 block_t node_blkaddr;
1385
1386 raw_node = calloc(F2FS_BLKSIZE, 1);
1387 if (raw_node == NULL) {
1388 MSG(1, "\tError: Calloc Failed for raw_node!!!\n");
1389 return -1;
1390 }
1391
1392 f2fs_init_inode(sb, raw_node, le32_to_cpu(sb->root_ino),
1393 mkfs_time, 0x41ed);
1394
1395 if (c.lpf_ino)
1396 raw_node->i.i_links = cpu_to_le32(3);
1397
1398 data_blkaddr = f2fs_add_default_dentry_root();
1399 if (data_blkaddr == 0) {
1400 MSG(1, "\tError: Failed to add default dentries for root!!!\n");
1401 free(raw_node);
1402 return -1;
1403 }
1404
1405 raw_node->i.i_addr[get_extra_isize(raw_node)] =
1406 cpu_to_le32(data_blkaddr);
1407
1408 node_blkaddr = alloc_next_free_block(CURSEG_HOT_NODE);
1409 F2FS_NODE_FOOTER(raw_node)->next_blkaddr = cpu_to_le32(node_blkaddr + 1);
1410
1411 DBG(1, "\tWriting root inode (hot node), offset 0x%x\n", node_blkaddr);
1412 if (write_inode(raw_node, node_blkaddr,
1413 f2fs_io_type_to_rw_hint(CURSEG_HOT_NODE)) < 0) {
1414 MSG(1, "\tError: While writing the raw_node to disk!!!\n");
1415 free(raw_node);
1416 return -1;
1417 }
1418
1419 update_nat_journal(le32_to_cpu(sb->root_ino), node_blkaddr);
1420 update_sit_journal(CURSEG_HOT_NODE);
1421 update_summary_entry(CURSEG_HOT_NODE, le32_to_cpu(sb->root_ino), 0);
1422
1423 free(raw_node);
1424 return 0;
1425 }
1426
f2fs_write_default_quota(int qtype,__le32 raw_id)1427 static int f2fs_write_default_quota(int qtype, __le32 raw_id)
1428 {
1429 char *filebuf = calloc(F2FS_BLKSIZE, 2);
1430 int file_magics[] = INITQMAGICS;
1431 struct v2_disk_dqheader ddqheader;
1432 struct v2_disk_dqinfo ddqinfo;
1433 struct v2r1_disk_dqblk dqblk;
1434 block_t blkaddr;
1435 uint64_t icnt = 1, bcnt = 1;
1436 int i;
1437
1438 if (filebuf == NULL) {
1439 MSG(1, "\tError: Calloc Failed for filebuf!!!\n");
1440 return 0;
1441 }
1442
1443 /* Write basic quota header */
1444 ddqheader.dqh_magic = cpu_to_le32(file_magics[qtype]);
1445 /* only support QF_VFSV1 */
1446 ddqheader.dqh_version = cpu_to_le32(1);
1447
1448 memcpy(filebuf, &ddqheader, sizeof(ddqheader));
1449
1450 /* Fill Initial quota file content */
1451 ddqinfo.dqi_bgrace = cpu_to_le32(MAX_DQ_TIME);
1452 ddqinfo.dqi_igrace = cpu_to_le32(MAX_IQ_TIME);
1453 ddqinfo.dqi_flags = cpu_to_le32(0);
1454 ddqinfo.dqi_blocks = cpu_to_le32(QT_TREEOFF + 5);
1455 ddqinfo.dqi_free_blk = cpu_to_le32(0);
1456 ddqinfo.dqi_free_entry = cpu_to_le32(5);
1457
1458 memcpy(filebuf + V2_DQINFOOFF, &ddqinfo, sizeof(ddqinfo));
1459
1460 filebuf[1024] = 2;
1461 filebuf[2048] = 3;
1462 filebuf[3072] = 4;
1463 filebuf[4096] = 5;
1464
1465 filebuf[5120 + 8] = 1;
1466
1467 dqblk.dqb_id = raw_id;
1468 dqblk.dqb_pad = cpu_to_le32(0);
1469 dqblk.dqb_ihardlimit = cpu_to_le64(0);
1470 dqblk.dqb_isoftlimit = cpu_to_le64(0);
1471 if (c.lpf_ino) {
1472 icnt++;
1473 bcnt++;
1474 }
1475 if (c.aliased_devices) {
1476 icnt += c.aliased_devices;
1477 bcnt += c.aliased_segments * c.blks_per_seg;
1478 }
1479 dqblk.dqb_curinodes = cpu_to_le64(icnt);
1480 dqblk.dqb_bhardlimit = cpu_to_le64(0);
1481 dqblk.dqb_bsoftlimit = cpu_to_le64(0);
1482 dqblk.dqb_curspace = cpu_to_le64(F2FS_BLKSIZE * bcnt);
1483 dqblk.dqb_btime = cpu_to_le64(0);
1484 dqblk.dqb_itime = cpu_to_le64(0);
1485
1486 memcpy(filebuf + 5136, &dqblk, sizeof(struct v2r1_disk_dqblk));
1487
1488 /* Write quota blocks */
1489 for (i = 0; i < QUOTA_DATA; i++) {
1490 blkaddr = alloc_next_free_block(CURSEG_HOT_DATA);
1491
1492 if (dev_write_block(filebuf + i * F2FS_BLKSIZE, blkaddr,
1493 f2fs_io_type_to_rw_hint(CURSEG_HOT_DATA))) {
1494 MSG(1, "\tError: While writing the quota_blk to disk!!!\n");
1495 free(filebuf);
1496 return 0;
1497 }
1498
1499 update_sit_journal(CURSEG_HOT_DATA);
1500 update_summary_entry(CURSEG_HOT_DATA,
1501 le32_to_cpu(sb->qf_ino[qtype]), i);
1502 DBG(1, "\tWriting quota data, at offset %08x (%d/%d)\n",
1503 blkaddr, i + 1, QUOTA_DATA);
1504
1505 }
1506
1507 free(filebuf);
1508 return blkaddr + 1 - QUOTA_DATA;
1509 }
1510
f2fs_write_qf_inode(int qtype)1511 static int f2fs_write_qf_inode(int qtype)
1512 {
1513 struct f2fs_node *raw_node = NULL;
1514 block_t data_blkaddr;
1515 block_t node_blkaddr;
1516 __le32 raw_id;
1517 int i;
1518
1519 raw_node = calloc(F2FS_BLKSIZE, 1);
1520 if (raw_node == NULL) {
1521 MSG(1, "\tError: Calloc Failed for raw_node!!!\n");
1522 return -1;
1523 }
1524 f2fs_init_inode(sb, raw_node,
1525 le32_to_cpu(sb->qf_ino[qtype]), mkfs_time, 0x8180);
1526
1527 raw_node->i.i_size = cpu_to_le64(1024 * 6);
1528 raw_node->i.i_blocks = cpu_to_le64(1 + QUOTA_DATA);
1529 raw_node->i.i_flags = cpu_to_le32(F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL);
1530
1531 node_blkaddr = alloc_next_free_block(CURSEG_HOT_NODE);
1532 F2FS_NODE_FOOTER(raw_node)->next_blkaddr = cpu_to_le32(node_blkaddr + 1);
1533
1534 if (qtype == 0)
1535 raw_id = raw_node->i.i_uid;
1536 else if (qtype == 1)
1537 raw_id = raw_node->i.i_gid;
1538 else if (qtype == 2)
1539 raw_id = raw_node->i.i_projid;
1540 else
1541 ASSERT(0);
1542
1543 /* write quota blocks */
1544 data_blkaddr = f2fs_write_default_quota(qtype, raw_id);
1545 if (data_blkaddr == 0) {
1546 free(raw_node);
1547 return -1;
1548 }
1549
1550 for (i = 0; i < QUOTA_DATA; i++)
1551 raw_node->i.i_addr[get_extra_isize(raw_node) + i] =
1552 cpu_to_le32(data_blkaddr + i);
1553
1554 DBG(1, "\tWriting quota inode (hot node), offset 0x%x\n", node_blkaddr);
1555 if (write_inode(raw_node, node_blkaddr,
1556 f2fs_io_type_to_rw_hint(CURSEG_HOT_NODE)) < 0) {
1557 MSG(1, "\tError: While writing the raw_node to disk!!!\n");
1558 free(raw_node);
1559 return -1;
1560 }
1561
1562 update_nat_journal(le32_to_cpu(sb->qf_ino[qtype]), node_blkaddr);
1563 update_sit_journal(CURSEG_HOT_NODE);
1564 update_summary_entry(CURSEG_HOT_NODE, le32_to_cpu(sb->qf_ino[qtype]), 0);
1565
1566 free(raw_node);
1567 return 0;
1568 }
1569
f2fs_update_nat_default(void)1570 static int f2fs_update_nat_default(void)
1571 {
1572 struct f2fs_nat_block *nat_blk = NULL;
1573 uint64_t nat_seg_blk_offset = 0;
1574
1575 nat_blk = calloc(F2FS_BLKSIZE, 1);
1576 if(nat_blk == NULL) {
1577 MSG(1, "\tError: Calloc Failed for nat_blk!!!\n");
1578 return -1;
1579 }
1580
1581 /* update node nat */
1582 nat_blk->entries[get_sb(node_ino)].block_addr = cpu_to_le32(1);
1583 nat_blk->entries[get_sb(node_ino)].ino = sb->node_ino;
1584
1585 /* update meta nat */
1586 nat_blk->entries[get_sb(meta_ino)].block_addr = cpu_to_le32(1);
1587 nat_blk->entries[get_sb(meta_ino)].ino = sb->meta_ino;
1588
1589 nat_seg_blk_offset = get_sb(nat_blkaddr);
1590
1591 DBG(1, "\tWriting nat root, at offset 0x%08"PRIx64"\n",
1592 nat_seg_blk_offset);
1593 if (dev_write_block(nat_blk, nat_seg_blk_offset, WRITE_LIFE_NONE)) {
1594 MSG(1, "\tError: While writing the nat_blk set0 to disk!\n");
1595 free(nat_blk);
1596 return -1;
1597 }
1598
1599 free(nat_blk);
1600 return 0;
1601 }
1602
f2fs_add_default_dentry_lpf(void)1603 static block_t f2fs_add_default_dentry_lpf(void)
1604 {
1605 struct f2fs_dentry_block *dent_blk;
1606 block_t data_blkaddr;
1607 unsigned int didx = 0;
1608
1609 dent_blk = calloc(F2FS_BLKSIZE, 1);
1610 if (dent_blk == NULL) {
1611 MSG(1, "\tError: Calloc Failed for dent_blk!!!\n");
1612 return 0;
1613 }
1614
1615 add_dentry(dent_blk, &didx, ".", c.lpf_ino, F2FS_FT_DIR);
1616 add_dentry(dent_blk, &didx, "..", c.lpf_ino, F2FS_FT_DIR);
1617
1618 data_blkaddr = alloc_next_free_block(CURSEG_HOT_DATA);
1619
1620 DBG(1, "\tWriting default dentry lost+found, at offset 0x%x\n",
1621 data_blkaddr);
1622 if (dev_write_block(dent_blk, data_blkaddr,
1623 f2fs_io_type_to_rw_hint(CURSEG_HOT_DATA))) {
1624 MSG(1, "\tError While writing the dentry_blk to disk!!!\n");
1625 free(dent_blk);
1626 return 0;
1627 }
1628
1629 update_sit_journal(CURSEG_HOT_DATA);
1630 update_summary_entry(CURSEG_HOT_DATA, c.lpf_ino, 0);
1631
1632 free(dent_blk);
1633 return data_blkaddr;
1634 }
1635
f2fs_write_lpf_inode(void)1636 static int f2fs_write_lpf_inode(void)
1637 {
1638 struct f2fs_node *raw_node;
1639 block_t data_blkaddr;
1640 block_t node_blkaddr;
1641 int err = 0;
1642
1643 ASSERT(c.lpf_ino);
1644
1645 raw_node = calloc(F2FS_BLKSIZE, 1);
1646 if (raw_node == NULL) {
1647 MSG(1, "\tError: Calloc Failed for raw_node!!!\n");
1648 return -1;
1649 }
1650
1651 f2fs_init_inode(sb, raw_node, c.lpf_ino, mkfs_time, 0x41c0);
1652
1653 raw_node->i.i_pino = sb->root_ino;
1654 raw_node->i.i_namelen = cpu_to_le32(strlen(LPF));
1655 memcpy(raw_node->i.i_name, LPF, strlen(LPF));
1656
1657 node_blkaddr = alloc_next_free_block(CURSEG_HOT_NODE);
1658 F2FS_NODE_FOOTER(raw_node)->next_blkaddr = cpu_to_le32(node_blkaddr + 1);
1659
1660 data_blkaddr = f2fs_add_default_dentry_lpf();
1661 if (data_blkaddr == 0) {
1662 MSG(1, "\tError: Failed to add default dentries for lost+found!!!\n");
1663 err = -1;
1664 goto exit;
1665 }
1666 raw_node->i.i_addr[get_extra_isize(raw_node)] = cpu_to_le32(data_blkaddr);
1667
1668 DBG(1, "\tWriting lost+found inode (hot node), offset 0x%x\n",
1669 node_blkaddr);
1670 if (write_inode(raw_node, node_blkaddr,
1671 f2fs_io_type_to_rw_hint(CURSEG_HOT_NODE)) < 0) {
1672 MSG(1, "\tError: While writing the raw_node to disk!!!\n");
1673 err = -1;
1674 goto exit;
1675 }
1676
1677 update_nat_journal(c.lpf_ino, node_blkaddr);
1678 update_sit_journal(CURSEG_HOT_NODE);
1679 update_summary_entry(CURSEG_HOT_NODE, c.lpf_ino, 0);
1680
1681 exit:
1682 free(raw_node);
1683 return err;
1684 }
1685
allocate_blocks_for_aliased_device(struct f2fs_node * raw_node,unsigned int dev_num)1686 static void allocate_blocks_for_aliased_device(struct f2fs_node *raw_node,
1687 unsigned int dev_num)
1688 {
1689 uint32_t start_segno = (c.devices[dev_num].start_blkaddr -
1690 get_sb(main_blkaddr)) / c.blks_per_seg;
1691 uint32_t end_segno = (c.devices[dev_num].end_blkaddr -
1692 get_sb(main_blkaddr) + 1) / c.blks_per_seg;
1693 uint32_t segno;
1694 uint64_t blkcnt;
1695 struct f2fs_sit_block *sit_blk = calloc(F2FS_BLKSIZE, 1);
1696
1697 ASSERT(sit_blk);
1698
1699 for (segno = start_segno; segno < end_segno; segno++) {
1700 struct f2fs_sit_entry *sit;
1701 uint64_t sit_blk_addr = get_sb(sit_blkaddr) +
1702 (segno / SIT_ENTRY_PER_BLOCK);
1703
1704 ASSERT(dev_read_block(sit_blk, sit_blk_addr) >= 0);
1705 sit = &sit_blk->entries[segno % SIT_ENTRY_PER_BLOCK];
1706 memset(&sit->valid_map, 0xFF, SIT_VBLOCK_MAP_SIZE);
1707 sit->vblocks = cpu_to_le16((CURSEG_COLD_DATA <<
1708 SIT_VBLOCKS_SHIFT) | c.blks_per_seg);
1709 sit->mtime = cpu_to_le64(mkfs_time);
1710 ASSERT(dev_write_block(sit_blk, sit_blk_addr,
1711 f2fs_io_type_to_rw_hint(CURSEG_COLD_DATA)) >= 0);
1712 }
1713
1714 blkcnt = (end_segno - start_segno) * c.blks_per_seg;
1715 raw_node->i.i_size = cpu_to_le64(blkcnt << get_sb(log_blocksize));
1716 raw_node->i.i_blocks = cpu_to_le64(blkcnt + 1);
1717
1718 raw_node->i.i_ext.fofs = cpu_to_le32(0);
1719 raw_node->i.i_ext.blk_addr =
1720 cpu_to_le32(c.devices[dev_num].start_blkaddr);
1721 raw_node->i.i_ext.len = cpu_to_le32(blkcnt);
1722
1723 free(sit_blk);
1724 }
1725
f2fs_write_alias_inodes(void)1726 static int f2fs_write_alias_inodes(void)
1727 {
1728 struct f2fs_node *raw_node;
1729 block_t node_blkaddr;
1730 int err = 0;
1731 unsigned int i, dev_off = 0;
1732
1733 ASSERT(c.aliased_devices);
1734
1735 raw_node = calloc(F2FS_BLKSIZE, 1);
1736 if (raw_node == NULL) {
1737 MSG(1, "\tError: Calloc Failed for raw_node!!!\n");
1738 return -1;
1739 }
1740
1741 for (i = 1; i < c.ndevs; i++) {
1742 const char *filename;
1743 nid_t ino;
1744
1745 if (!device_is_aliased(i))
1746 continue;
1747
1748 ino = c.first_alias_ino + dev_off;
1749 dev_off++;
1750 f2fs_init_inode(sb, raw_node, ino, mkfs_time, 0x81c0);
1751
1752 raw_node->i.i_flags = cpu_to_le32(F2FS_DEVICE_ALIAS_FL);
1753 raw_node->i.i_inline = F2FS_PIN_FILE;
1754 raw_node->i.i_pino = sb->root_ino;
1755 filename = c.devices[i].alias_filename;
1756 raw_node->i.i_namelen = cpu_to_le32(strlen(filename));
1757 memcpy(raw_node->i.i_name, filename, strlen(filename));
1758
1759 node_blkaddr = alloc_next_free_block(CURSEG_COLD_NODE);
1760 F2FS_NODE_FOOTER(raw_node)->next_blkaddr =
1761 cpu_to_le32(node_blkaddr + 1);
1762
1763 allocate_blocks_for_aliased_device(raw_node, i);
1764
1765 DBG(1, "\tWriting aliased device inode (cold node), "
1766 "offset 0x%x\n", node_blkaddr);
1767 if (write_inode(raw_node, node_blkaddr,
1768 f2fs_io_type_to_rw_hint(CURSEG_COLD_NODE)) < 0) {
1769 MSG(1, "\tError: While writing the raw_node to "
1770 "disk!!!\n");
1771 err = -1;
1772 goto exit;
1773 }
1774
1775 update_nat_journal(ino, node_blkaddr);
1776 update_sit_journal(CURSEG_COLD_NODE);
1777 update_summary_entry(CURSEG_COLD_NODE, ino, 0);
1778 }
1779
1780 exit:
1781 free(raw_node);
1782 return err;
1783 }
1784
f2fs_create_root_dir(void)1785 static int f2fs_create_root_dir(void)
1786 {
1787 enum quota_type qtype;
1788 int err = 0;
1789
1790 err = f2fs_write_root_inode();
1791 if (err < 0) {
1792 MSG(1, "\tError: Failed to write root inode!!!\n");
1793 goto exit;
1794 }
1795
1796 for (qtype = 0; qtype < F2FS_MAX_QUOTAS; qtype++) {
1797 if (!((1 << qtype) & c.quota_bits))
1798 continue;
1799 err = f2fs_write_qf_inode(qtype);
1800 if (err < 0) {
1801 MSG(1, "\tError: Failed to write quota inode!!!\n");
1802 goto exit;
1803 }
1804 }
1805
1806 if (c.feature & F2FS_FEATURE_LOST_FOUND) {
1807 err = f2fs_write_lpf_inode();
1808 if (err < 0) {
1809 MSG(1, "\tError: Failed to write lost+found inode!!!\n");
1810 goto exit;
1811 }
1812 }
1813
1814 if (c.aliased_devices) {
1815 err = f2fs_write_alias_inodes();
1816 if (err < 0) {
1817 MSG(1, "\tError: Failed to write aliased device "
1818 "inodes!!!\n");
1819 goto exit;
1820 }
1821 }
1822
1823 #ifndef WITH_ANDROID
1824 err = f2fs_discard_obsolete_dnode();
1825 if (err < 0) {
1826 MSG(1, "\tError: Failed to discard obsolete dnode!!!\n");
1827 goto exit;
1828 }
1829 #endif
1830
1831 err = f2fs_update_nat_default();
1832 if (err < 0) {
1833 MSG(1, "\tError: Failed to update NAT for root!!!\n");
1834 goto exit;
1835 }
1836 exit:
1837 if (err)
1838 MSG(1, "\tError: Could not create the root directory!!!\n");
1839
1840 return err;
1841 }
1842
f2fs_format_device(void)1843 int f2fs_format_device(void)
1844 {
1845 int err = 0;
1846
1847 err= f2fs_prepare_super_block();
1848 if (err < 0) {
1849 MSG(0, "\tError: Failed to prepare a super block!!!\n");
1850 goto exit;
1851 }
1852
1853 if (c.trim) {
1854 err = f2fs_trim_devices();
1855 if (err < 0) {
1856 MSG(0, "\tError: Failed to trim whole device!!!\n");
1857 goto exit;
1858 }
1859 }
1860
1861 err = f2fs_init_sit_area();
1862 if (err < 0) {
1863 MSG(0, "\tError: Failed to initialise the SIT AREA!!!\n");
1864 goto exit;
1865 }
1866
1867 err = f2fs_init_nat_area();
1868 if (err < 0) {
1869 MSG(0, "\tError: Failed to initialise the NAT AREA!!!\n");
1870 goto exit;
1871 }
1872
1873 err = f2fs_create_root_dir();
1874 if (err < 0) {
1875 MSG(0, "\tError: Failed to create the root directory!!!\n");
1876 goto exit;
1877 }
1878
1879 err = f2fs_write_check_point_pack();
1880 if (err < 0) {
1881 MSG(0, "\tError: Failed to write the check point pack!!!\n");
1882 goto exit;
1883 }
1884
1885 err = f2fs_write_super_block();
1886 if (err < 0) {
1887 MSG(0, "\tError: Failed to write the super block!!!\n");
1888 goto exit;
1889 }
1890 exit:
1891 if (err)
1892 MSG(0, "\tError: Could not format the device!!!\n");
1893
1894 return err;
1895 }
1896