1 /**
2 * libf2fs.c
3 *
4 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 * Copyright (c) 2019 Google Inc.
7 * http://www.google.com/
8 * Copyright (c) 2020 Google Inc.
9 * Robin Hsu <robinhsu@google.com>
10 * : add quick-buffer for sload compression support
11 *
12 * Dual licensed under the GPL or LGPL version 2 licenses.
13 */
14 #ifndef _LARGEFILE64_SOURCE
15 #define _LARGEFILE64_SOURCE
16 #endif
17
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include <string.h>
21 #include <errno.h>
22 #include <unistd.h>
23 #include <fcntl.h>
24 #ifdef HAVE_MNTENT_H
25 #include <mntent.h>
26 #endif
27 #include <time.h>
28 #ifdef HAVE_SYS_STAT_H
29 #include <sys/stat.h>
30 #endif
31 #ifdef HAVE_SYS_MOUNT_H
32 #include <sys/mount.h>
33 #endif
34 #ifdef HAVE_SYS_IOCTL_H
35 #include <sys/ioctl.h>
36 #endif
37 #ifdef HAVE_LINUX_HDREG_H
38 #include <linux/hdreg.h>
39 #endif
40
41 #include <stdbool.h>
42 #include <assert.h>
43 #include <inttypes.h>
44 #include "f2fs_fs.h"
45
46 struct f2fs_configuration c;
47
48 #ifdef HAVE_SPARSE_SPARSE_H
49 #include <sparse/sparse.h>
50 struct sparse_file *f2fs_sparse_file;
51 static char **blocks;
52 uint64_t blocks_count;
53 static char *zeroed_block;
54 #endif
55
__get_device_fd(__u64 * offset)56 static int __get_device_fd(__u64 *offset)
57 {
58 __u64 blk_addr = *offset >> F2FS_BLKSIZE_BITS;
59 int i;
60
61 for (i = 0; i < c.ndevs; i++) {
62 if (c.devices[i].start_blkaddr <= blk_addr &&
63 c.devices[i].end_blkaddr >= blk_addr) {
64 *offset -=
65 c.devices[i].start_blkaddr << F2FS_BLKSIZE_BITS;
66 return c.devices[i].fd;
67 }
68 }
69 return -1;
70 }
71
72 #ifndef HAVE_LSEEK64
73 typedef off_t off64_t;
74
lseek64(int fd,__u64 offset,int set)75 static inline off64_t lseek64(int fd, __u64 offset, int set)
76 {
77 return lseek(fd, offset, set);
78 }
79 #endif
80
81 /* ---------- dev_cache, Least Used First (LUF) policy ------------------- */
82 /*
83 * Least used block will be the first victim to be replaced when max hash
84 * collision exceeds
85 */
86 static bool *dcache_valid; /* is the cached block valid? */
87 static off64_t *dcache_blk; /* which block it cached */
88 static uint64_t *dcache_lastused; /* last used ticks for cache entries */
89 static char *dcache_buf; /* cached block data */
90 static uint64_t dcache_usetick; /* current use tick */
91
92 static uint64_t dcache_raccess;
93 static uint64_t dcache_rhit;
94 static uint64_t dcache_rmiss;
95 static uint64_t dcache_rreplace;
96
97 static bool dcache_exit_registered = false;
98
99 /*
100 * Shadow config:
101 *
102 * Active set of the configurations.
103 * Global configuration 'dcache_config' will be transferred here when
104 * when dcache_init() is called
105 */
106 static dev_cache_config_t dcache_config = {0, 16, 1};
107 static bool dcache_initialized = false;
108
109 #define MIN_NUM_CACHE_ENTRY 1024L
110 #define MAX_MAX_HASH_COLLISION 16
111
112 static long dcache_relocate_offset0[] = {
113 20, -20, 40, -40, 80, -80, 160, -160,
114 320, -320, 640, -640, 1280, -1280, 2560, -2560,
115 };
116 static int dcache_relocate_offset[16];
117
dcache_print_statistics(void)118 static void dcache_print_statistics(void)
119 {
120 long i;
121 long useCnt;
122
123 /* Number of used cache entries */
124 useCnt = 0;
125 for (i = 0; i < dcache_config.num_cache_entry; i++)
126 if (dcache_valid[i])
127 ++useCnt;
128
129 /*
130 * c: number of cache entries
131 * u: used entries
132 * RA: number of read access blocks
133 * CH: cache hit
134 * CM: cache miss
135 * Repl: read cache replaced
136 */
137 printf ("\nc, u, RA, CH, CM, Repl=\n");
138 printf ("%ld %ld %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
139 dcache_config.num_cache_entry,
140 useCnt,
141 dcache_raccess,
142 dcache_rhit,
143 dcache_rmiss,
144 dcache_rreplace);
145 }
146
dcache_release(void)147 void dcache_release(void)
148 {
149 if (!dcache_initialized)
150 return;
151
152 dcache_initialized = false;
153
154 if (c.cache_config.dbg_en)
155 dcache_print_statistics();
156
157 if (dcache_blk != NULL)
158 free(dcache_blk);
159 if (dcache_lastused != NULL)
160 free(dcache_lastused);
161 if (dcache_buf != NULL)
162 free(dcache_buf);
163 if (dcache_valid != NULL)
164 free(dcache_valid);
165 dcache_config.num_cache_entry = 0;
166 dcache_blk = NULL;
167 dcache_lastused = NULL;
168 dcache_buf = NULL;
169 dcache_valid = NULL;
170 }
171
172 // return 0 for success, error code for failure.
dcache_alloc_all(long n)173 static int dcache_alloc_all(long n)
174 {
175 if (n <= 0)
176 return -1;
177 if ((dcache_blk = (off64_t *) malloc(sizeof(off64_t) * n)) == NULL
178 || (dcache_lastused = (uint64_t *)
179 malloc(sizeof(uint64_t) * n)) == NULL
180 || (dcache_buf = (char *) malloc (F2FS_BLKSIZE * n)) == NULL
181 || (dcache_valid = (bool *) malloc(sizeof(bool) * n)) == NULL)
182 {
183 dcache_release();
184 return -1;
185 }
186 dcache_config.num_cache_entry = n;
187 return 0;
188 }
189
dcache_relocate_init(void)190 static void dcache_relocate_init(void)
191 {
192 int i;
193 int n0 = (sizeof(dcache_relocate_offset0)
194 / sizeof(dcache_relocate_offset0[0]));
195 int n = (sizeof(dcache_relocate_offset)
196 / sizeof(dcache_relocate_offset[0]));
197
198 ASSERT(n == n0);
199 for (i = 0; i < n && i < dcache_config.max_hash_collision; i++) {
200 if (labs(dcache_relocate_offset0[i])
201 > dcache_config.num_cache_entry / 2) {
202 dcache_config.max_hash_collision = i;
203 break;
204 }
205 dcache_relocate_offset[i] =
206 dcache_config.num_cache_entry
207 + dcache_relocate_offset0[i];
208 }
209 }
210
dcache_init(void)211 void dcache_init(void)
212 {
213 long n;
214
215 if (c.cache_config.num_cache_entry <= 0)
216 return;
217
218 /* release previous cache init, if any */
219 dcache_release();
220
221 dcache_blk = NULL;
222 dcache_lastused = NULL;
223 dcache_buf = NULL;
224 dcache_valid = NULL;
225
226 dcache_config = c.cache_config;
227
228 n = max(MIN_NUM_CACHE_ENTRY, dcache_config.num_cache_entry);
229
230 /* halve alloc size until alloc succeed, or min cache reached */
231 while (dcache_alloc_all(n) != 0 && n != MIN_NUM_CACHE_ENTRY)
232 n = max(MIN_NUM_CACHE_ENTRY, n/2);
233
234 /* must be the last: data dependent on num_cache_entry */
235 dcache_relocate_init();
236 dcache_initialized = true;
237
238 if (!dcache_exit_registered) {
239 dcache_exit_registered = true;
240 atexit(dcache_release); /* auto release */
241 }
242
243 dcache_raccess = 0;
244 dcache_rhit = 0;
245 dcache_rmiss = 0;
246 dcache_rreplace = 0;
247 }
248
dcache_addr(long entry)249 static inline char *dcache_addr(long entry)
250 {
251 return dcache_buf + F2FS_BLKSIZE * entry;
252 }
253
254 /* relocate on (n+1)-th collision */
dcache_relocate(long entry,int n)255 static inline long dcache_relocate(long entry, int n)
256 {
257 assert(dcache_config.num_cache_entry != 0);
258 return (entry + dcache_relocate_offset[n]) %
259 dcache_config.num_cache_entry;
260 }
261
dcache_find(off64_t blk)262 static long dcache_find(off64_t blk)
263 {
264 register long n = dcache_config.num_cache_entry;
265 register unsigned m = dcache_config.max_hash_collision;
266 long entry, least_used, target;
267 unsigned try;
268
269 assert(n > 0);
270 target = least_used = entry = blk % n; /* simple modulo hash */
271
272 for (try = 0; try < m; try++) {
273 if (!dcache_valid[target] || dcache_blk[target] == blk)
274 return target; /* found target or empty cache slot */
275 if (dcache_lastused[target] < dcache_lastused[least_used])
276 least_used = target;
277 target = dcache_relocate(entry, try); /* next target */
278 }
279 return least_used; /* max search reached, return least used slot */
280 }
281
282 /* Physical read into cache */
dcache_io_read(int fd,long entry,off64_t offset,off64_t blk)283 static int dcache_io_read(int fd, long entry, off64_t offset, off64_t blk)
284 {
285 if (pread64(fd, dcache_buf + entry * F2FS_BLKSIZE,
286 F2FS_BLKSIZE, offset) < 0) {
287 MSG(0, "\n read() fail.\n");
288 return -1;
289 }
290 dcache_lastused[entry] = ++dcache_usetick;
291 dcache_valid[entry] = true;
292 dcache_blk[entry] = blk;
293 return 0;
294 }
295
296 /*
297 * - Note: Read/Write are not symmetric:
298 * For read, we need to do it block by block, due to the cache nature:
299 * some blocks may be cached, and others don't.
300 * For write, since we always do a write-thru, we can join all writes into one,
301 * and write it once at the caller. This function updates the cache for write, but
302 * not the do a physical write. The caller is responsible for the physical write.
303 * - Note: We concentrate read/write together, due to the fact of similar structure to find
304 * the relavant cache entries
305 * - Return values:
306 * 0: success
307 * 1: cache not available (uninitialized)
308 * -1: error
309 */
dcache_update_rw(int fd,void * buf,off64_t offset,size_t byte_count,bool is_write)310 static int dcache_update_rw(int fd, void *buf, off64_t offset,
311 size_t byte_count, bool is_write)
312 {
313 off64_t blk;
314 int addr_in_blk;
315 off64_t start;
316
317 if (!dcache_initialized)
318 dcache_init(); /* auto initialize */
319
320 if (!dcache_initialized)
321 return 1; /* not available */
322
323 blk = offset / F2FS_BLKSIZE;
324 addr_in_blk = offset % F2FS_BLKSIZE;
325 start = blk * F2FS_BLKSIZE;
326
327 while (byte_count != 0) {
328 size_t cur_size = min(byte_count,
329 (size_t)(F2FS_BLKSIZE - addr_in_blk));
330 long entry = dcache_find(blk);
331
332 if (!is_write)
333 ++dcache_raccess;
334
335 if (dcache_valid[entry] && dcache_blk[entry] == blk) {
336 /* cache hit */
337 if (is_write) /* write: update cache */
338 memcpy(dcache_addr(entry) + addr_in_blk,
339 buf, cur_size);
340 else
341 ++dcache_rhit;
342 } else {
343 /* cache miss */
344 if (!is_write) {
345 int err;
346 ++dcache_rmiss;
347 if (dcache_valid[entry])
348 ++dcache_rreplace;
349 /* read: physical I/O read into cache */
350 err = dcache_io_read(fd, entry, start, blk);
351 if (err)
352 return err;
353 }
354 }
355
356 /* read: copy data from cache */
357 /* write: nothing to do, since we don't do physical write. */
358 if (!is_write)
359 memcpy(buf, dcache_addr(entry) + addr_in_blk,
360 cur_size);
361
362 /* next block */
363 ++blk;
364 buf += cur_size;
365 start += F2FS_BLKSIZE;
366 byte_count -= cur_size;
367 addr_in_blk = 0;
368 }
369 return 0;
370 }
371
372 /*
373 * dcache_update_cache() just update cache, won't do physical I/O.
374 * Thus even no error, we need normal non-cache I/O for actual write
375 *
376 * return value: 1: cache not available
377 * 0: success, -1: I/O error
378 */
dcache_update_cache(int fd,void * buf,off64_t offset,size_t count)379 int dcache_update_cache(int fd, void *buf, off64_t offset, size_t count)
380 {
381 return dcache_update_rw(fd, buf, offset, count, true);
382 }
383
384 /* handles read into cache + read into buffer */
dcache_read(int fd,void * buf,off64_t offset,size_t count)385 int dcache_read(int fd, void *buf, off64_t offset, size_t count)
386 {
387 return dcache_update_rw(fd, buf, offset, count, false);
388 }
389
390 /*
391 * IO interfaces
392 */
dev_read_version(void * buf,__u64 offset,size_t len)393 int dev_read_version(void *buf, __u64 offset, size_t len)
394 {
395 if (c.sparse_mode)
396 return 0;
397 if (pread64(c.kd, buf, len, offset) < 0)
398 return -1;
399 return 0;
400 }
401
402 #ifdef HAVE_SPARSE_SPARSE_H
sparse_read_blk(__u64 block,int count,void * buf)403 static int sparse_read_blk(__u64 block, int count, void *buf)
404 {
405 int i;
406 char *out = buf;
407 __u64 cur_block;
408
409 for (i = 0; i < count; ++i) {
410 cur_block = block + i;
411 if (blocks[cur_block])
412 memcpy(out + (i * F2FS_BLKSIZE),
413 blocks[cur_block], F2FS_BLKSIZE);
414 else if (blocks)
415 memset(out + (i * F2FS_BLKSIZE), 0, F2FS_BLKSIZE);
416 }
417 return 0;
418 }
419
sparse_write_blk(__u64 block,int count,const void * buf)420 static int sparse_write_blk(__u64 block, int count, const void *buf)
421 {
422 int i;
423 __u64 cur_block;
424 const char *in = buf;
425
426 for (i = 0; i < count; ++i) {
427 cur_block = block + i;
428 if (blocks[cur_block] == zeroed_block)
429 blocks[cur_block] = NULL;
430 if (!blocks[cur_block]) {
431 blocks[cur_block] = calloc(1, F2FS_BLKSIZE);
432 if (!blocks[cur_block])
433 return -ENOMEM;
434 }
435 memcpy(blocks[cur_block], in + (i * F2FS_BLKSIZE),
436 F2FS_BLKSIZE);
437 }
438 return 0;
439 }
440
sparse_write_zeroed_blk(__u64 block,int count)441 static int sparse_write_zeroed_blk(__u64 block, int count)
442 {
443 int i;
444 __u64 cur_block;
445
446 for (i = 0; i < count; ++i) {
447 cur_block = block + i;
448 if (blocks[cur_block])
449 continue;
450 blocks[cur_block] = zeroed_block;
451 }
452 return 0;
453 }
454
455 #ifdef SPARSE_CALLBACK_USES_SIZE_T
sparse_import_segment(void * UNUSED (priv),const void * data,size_t len,unsigned int block,unsigned int nr_blocks)456 static int sparse_import_segment(void *UNUSED(priv), const void *data,
457 size_t len, unsigned int block, unsigned int nr_blocks)
458 #else
459 static int sparse_import_segment(void *UNUSED(priv), const void *data, int len,
460 unsigned int block, unsigned int nr_blocks)
461 #endif
462 {
463 /* Ignore chunk headers, only write the data */
464 if (!nr_blocks || len % F2FS_BLKSIZE)
465 return 0;
466
467 return sparse_write_blk(block, nr_blocks, data);
468 }
469
sparse_merge_blocks(uint64_t start,uint64_t num,int zero)470 static int sparse_merge_blocks(uint64_t start, uint64_t num, int zero)
471 {
472 char *buf;
473 uint64_t i;
474
475 if (zero) {
476 blocks[start] = NULL;
477 return sparse_file_add_fill(f2fs_sparse_file, 0x0,
478 F2FS_BLKSIZE * num, start);
479 }
480
481 buf = calloc(num, F2FS_BLKSIZE);
482 if (!buf) {
483 fprintf(stderr, "failed to alloc %llu\n",
484 (unsigned long long)num * F2FS_BLKSIZE);
485 return -ENOMEM;
486 }
487
488 for (i = 0; i < num; i++) {
489 memcpy(buf + i * F2FS_BLKSIZE, blocks[start + i], F2FS_BLKSIZE);
490 free(blocks[start + i]);
491 blocks[start + i] = NULL;
492 }
493
494 /* free_sparse_blocks will release this buf. */
495 blocks[start] = buf;
496
497 return sparse_file_add_data(f2fs_sparse_file, blocks[start],
498 F2FS_BLKSIZE * num, start);
499 }
500 #else
sparse_read_blk(__u64 UNUSED (block),int UNUSED (count),void * UNUSED (buf))501 static int sparse_read_blk(__u64 UNUSED(block),
502 int UNUSED(count), void *UNUSED(buf))
503 {
504 return 0;
505 }
506
sparse_write_blk(__u64 UNUSED (block),int UNUSED (count),const void * UNUSED (buf))507 static int sparse_write_blk(__u64 UNUSED(block),
508 int UNUSED(count), const void *UNUSED(buf))
509 {
510 return 0;
511 }
512
sparse_write_zeroed_blk(__u64 UNUSED (block),int UNUSED (count))513 static int sparse_write_zeroed_blk(__u64 UNUSED(block), int UNUSED(count))
514 {
515 return 0;
516 }
517 #endif
518
dev_read(void * buf,__u64 offset,size_t len)519 int dev_read(void *buf, __u64 offset, size_t len)
520 {
521 int fd;
522 int err;
523
524 if (c.sparse_mode)
525 return sparse_read_blk(offset / F2FS_BLKSIZE,
526 len / F2FS_BLKSIZE, buf);
527
528 fd = __get_device_fd(&offset);
529 if (fd < 0)
530 return fd;
531
532 /* err = 1: cache not available, fall back to non-cache R/W */
533 /* err = 0: success, err=-1: I/O error */
534 err = dcache_read(fd, buf, (off64_t)offset, len);
535 if (err <= 0)
536 return err;
537 if (pread64(fd, buf, len, offset) < 0)
538 return -1;
539 return 0;
540 }
541
542 #ifdef POSIX_FADV_WILLNEED
dev_readahead(__u64 offset,size_t len)543 int dev_readahead(__u64 offset, size_t len)
544 #else
545 int dev_readahead(__u64 offset, size_t UNUSED(len))
546 #endif
547 {
548 int fd = __get_device_fd(&offset);
549
550 if (fd < 0)
551 return fd;
552 #ifdef POSIX_FADV_WILLNEED
553 return posix_fadvise(fd, offset, len, POSIX_FADV_WILLNEED);
554 #else
555 return 0;
556 #endif
557 }
558
dev_write(void * buf,__u64 offset,size_t len)559 int dev_write(void *buf, __u64 offset, size_t len)
560 {
561 int fd;
562
563 if (c.dry_run)
564 return 0;
565
566 if (c.sparse_mode)
567 return sparse_write_blk(offset / F2FS_BLKSIZE,
568 len / F2FS_BLKSIZE, buf);
569
570 fd = __get_device_fd(&offset);
571 if (fd < 0)
572 return fd;
573
574 /*
575 * dcache_update_cache() just update cache, won't do I/O.
576 * Thus even no error, we need normal non-cache I/O for actual write
577 */
578 if (dcache_update_cache(fd, buf, (off64_t)offset, len) < 0)
579 return -1;
580 if (pwrite64(fd, buf, len, offset) < 0)
581 return -1;
582 return 0;
583 }
584
dev_write_block(void * buf,__u64 blk_addr)585 int dev_write_block(void *buf, __u64 blk_addr)
586 {
587 return dev_write(buf, blk_addr << F2FS_BLKSIZE_BITS, F2FS_BLKSIZE);
588 }
589
dev_write_dump(void * buf,__u64 offset,size_t len)590 int dev_write_dump(void *buf, __u64 offset, size_t len)
591 {
592 if (pwrite64(c.dump_fd, buf, len, offset) < 0)
593 return -1;
594 return 0;
595 }
596
dev_fill(void * buf,__u64 offset,size_t len)597 int dev_fill(void *buf, __u64 offset, size_t len)
598 {
599 int fd;
600
601 if (c.sparse_mode)
602 return sparse_write_zeroed_blk(offset / F2FS_BLKSIZE,
603 len / F2FS_BLKSIZE);
604
605 fd = __get_device_fd(&offset);
606 if (fd < 0)
607 return fd;
608
609 /* Only allow fill to zero */
610 if (*((__u8*)buf))
611 return -1;
612 if (pwrite64(fd, buf, len, offset) < 0)
613 return -1;
614 return 0;
615 }
616
dev_fill_block(void * buf,__u64 blk_addr)617 int dev_fill_block(void *buf, __u64 blk_addr)
618 {
619 return dev_fill(buf, blk_addr << F2FS_BLKSIZE_BITS, F2FS_BLKSIZE);
620 }
621
dev_read_block(void * buf,__u64 blk_addr)622 int dev_read_block(void *buf, __u64 blk_addr)
623 {
624 return dev_read(buf, blk_addr << F2FS_BLKSIZE_BITS, F2FS_BLKSIZE);
625 }
626
dev_reada_block(__u64 blk_addr)627 int dev_reada_block(__u64 blk_addr)
628 {
629 return dev_readahead(blk_addr << F2FS_BLKSIZE_BITS, F2FS_BLKSIZE);
630 }
631
f2fs_fsync_device(void)632 int f2fs_fsync_device(void)
633 {
634 #ifdef HAVE_FSYNC
635 int i;
636
637 for (i = 0; i < c.ndevs; i++) {
638 if (fsync(c.devices[i].fd) < 0) {
639 MSG(0, "\tError: Could not conduct fsync!!!\n");
640 return -1;
641 }
642 }
643 #endif
644 return 0;
645 }
646
f2fs_init_sparse_file(void)647 int f2fs_init_sparse_file(void)
648 {
649 #ifdef HAVE_SPARSE_SPARSE_H
650 if (c.func == MKFS) {
651 f2fs_sparse_file = sparse_file_new(F2FS_BLKSIZE, c.device_size);
652 if (!f2fs_sparse_file)
653 return -1;
654 } else {
655 f2fs_sparse_file = sparse_file_import(c.devices[0].fd,
656 true, false);
657 if (!f2fs_sparse_file)
658 return -1;
659
660 c.device_size = sparse_file_len(f2fs_sparse_file, 0, 0);
661 c.device_size &= (~((uint64_t)(F2FS_BLKSIZE - 1)));
662 }
663
664 if (sparse_file_block_size(f2fs_sparse_file) != F2FS_BLKSIZE) {
665 MSG(0, "\tError: Corrupted sparse file\n");
666 return -1;
667 }
668 blocks_count = c.device_size / F2FS_BLKSIZE;
669 blocks = calloc(blocks_count, sizeof(char *));
670 if (!blocks) {
671 MSG(0, "\tError: Calloc Failed for blocks!!!\n");
672 return -1;
673 }
674
675 zeroed_block = calloc(1, F2FS_BLKSIZE);
676 if (!zeroed_block) {
677 MSG(0, "\tError: Calloc Failed for zeroed block!!!\n");
678 return -1;
679 }
680
681 return sparse_file_foreach_chunk(f2fs_sparse_file, true, false,
682 sparse_import_segment, NULL);
683 #else
684 MSG(0, "\tError: Sparse mode is only supported for android\n");
685 return -1;
686 #endif
687 }
688
f2fs_release_sparse_resource(void)689 void f2fs_release_sparse_resource(void)
690 {
691 #ifdef HAVE_SPARSE_SPARSE_H
692 int j;
693
694 if (c.sparse_mode) {
695 if (f2fs_sparse_file != NULL) {
696 sparse_file_destroy(f2fs_sparse_file);
697 f2fs_sparse_file = NULL;
698 }
699 for (j = 0; j < blocks_count; j++)
700 free(blocks[j]);
701 free(blocks);
702 blocks = NULL;
703 free(zeroed_block);
704 zeroed_block = NULL;
705 }
706 #endif
707 }
708
709 #define MAX_CHUNK_SIZE (1 * 1024 * 1024 * 1024ULL)
710 #define MAX_CHUNK_COUNT (MAX_CHUNK_SIZE / F2FS_BLKSIZE)
f2fs_finalize_device(void)711 int f2fs_finalize_device(void)
712 {
713 int i;
714 int ret = 0;
715
716 #ifdef HAVE_SPARSE_SPARSE_H
717 if (c.sparse_mode) {
718 int64_t chunk_start = (blocks[0] == NULL) ? -1 : 0;
719 uint64_t j;
720
721 if (c.func != MKFS) {
722 sparse_file_destroy(f2fs_sparse_file);
723 ret = ftruncate(c.devices[0].fd, 0);
724 ASSERT(!ret);
725 lseek(c.devices[0].fd, 0, SEEK_SET);
726 f2fs_sparse_file = sparse_file_new(F2FS_BLKSIZE,
727 c.device_size);
728 }
729
730 for (j = 0; j < blocks_count; ++j) {
731 if (chunk_start != -1) {
732 if (j - chunk_start >= MAX_CHUNK_COUNT) {
733 ret = sparse_merge_blocks(chunk_start,
734 j - chunk_start, 0);
735 ASSERT(!ret);
736 chunk_start = -1;
737 }
738 }
739
740 if (chunk_start == -1) {
741 if (!blocks[j])
742 continue;
743
744 if (blocks[j] == zeroed_block) {
745 ret = sparse_merge_blocks(j, 1, 1);
746 ASSERT(!ret);
747 } else {
748 chunk_start = j;
749 }
750 } else {
751 if (blocks[j] && blocks[j] != zeroed_block)
752 continue;
753
754 ret = sparse_merge_blocks(chunk_start,
755 j - chunk_start, 0);
756 ASSERT(!ret);
757
758 if (blocks[j] == zeroed_block) {
759 ret = sparse_merge_blocks(j, 1, 1);
760 ASSERT(!ret);
761 }
762
763 chunk_start = -1;
764 }
765 }
766 if (chunk_start != -1) {
767 ret = sparse_merge_blocks(chunk_start,
768 blocks_count - chunk_start, 0);
769 ASSERT(!ret);
770 }
771
772 sparse_file_write(f2fs_sparse_file, c.devices[0].fd,
773 /*gzip*/0, /*sparse*/1, /*crc*/0);
774
775 f2fs_release_sparse_resource();
776 }
777 #endif
778 /*
779 * We should call fsync() to flush out all the dirty pages
780 * in the block device page cache.
781 */
782 for (i = 0; i < c.ndevs; i++) {
783 #ifdef HAVE_FSYNC
784 ret = fsync(c.devices[i].fd);
785 if (ret < 0) {
786 MSG(0, "\tError: Could not conduct fsync!!!\n");
787 break;
788 }
789 #endif
790 ret = close(c.devices[i].fd);
791 if (ret < 0) {
792 MSG(0, "\tError: Failed to close device file!!!\n");
793 break;
794 }
795 free(c.devices[i].path);
796 free(c.devices[i].zone_cap_blocks);
797 }
798 close(c.kd);
799
800 return ret;
801 }
802