• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * libf2fs.c
3  *
4  * Copyright (c) 2013 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  * Copyright (c) 2019 Google Inc.
7  *             http://www.google.com/
8  * Copyright (c) 2020 Google Inc.
9  *   Robin Hsu <robinhsu@google.com>
10  *  : add quick-buffer for sload compression support
11  *
12  * Dual licensed under the GPL or LGPL version 2 licenses.
13  */
14 #define _LARGEFILE64_SOURCE
15 
16 #include <stdio.h>
17 #include <stdlib.h>
18 #include <string.h>
19 #include <errno.h>
20 #include <unistd.h>
21 #include <fcntl.h>
22 #ifdef HAVE_MNTENT_H
23 #include <mntent.h>
24 #endif
25 #include <time.h>
26 #ifdef HAVE_SYS_STAT_H
27 #include <sys/stat.h>
28 #endif
29 #ifdef HAVE_SYS_MOUNT_H
30 #include <sys/mount.h>
31 #endif
32 #ifdef HAVE_SYS_IOCTL_H
33 #include <sys/ioctl.h>
34 #endif
35 #ifdef HAVE_LINUX_HDREG_H
36 #include <linux/hdreg.h>
37 #endif
38 
39 #include <stdbool.h>
40 #include <assert.h>
41 #include <inttypes.h>
42 #include "f2fs_fs.h"
43 
44 struct f2fs_configuration c;
45 
46 #ifdef HAVE_SPARSE_SPARSE_H
47 #include <sparse/sparse.h>
48 struct sparse_file *f2fs_sparse_file;
49 static char **blocks;
50 uint64_t blocks_count;
51 static char *zeroed_block;
52 #endif
53 
__get_device_fd(__u64 * offset)54 static int __get_device_fd(__u64 *offset)
55 {
56 	__u64 blk_addr = *offset >> F2FS_BLKSIZE_BITS;
57 	int i;
58 
59 	for (i = 0; i < c.ndevs; i++) {
60 		if (c.devices[i].start_blkaddr <= blk_addr &&
61 				c.devices[i].end_blkaddr >= blk_addr) {
62 			*offset -=
63 				c.devices[i].start_blkaddr << F2FS_BLKSIZE_BITS;
64 			return c.devices[i].fd;
65 		}
66 	}
67 	return -1;
68 }
69 
70 #ifndef HAVE_LSEEK64
71 typedef off_t	off64_t;
72 
lseek64(int fd,__u64 offset,int set)73 static inline off64_t lseek64(int fd, __u64 offset, int set)
74 {
75 	return lseek(fd, offset, set);
76 }
77 #endif
78 
79 /* ---------- dev_cache, Least Used First (LUF) policy  ------------------- */
80 /*
81  * Least used block will be the first victim to be replaced when max hash
82  * collision exceeds
83  */
84 static bool *dcache_valid; /* is the cached block valid? */
85 static off64_t  *dcache_blk; /* which block it cached */
86 static uint64_t *dcache_lastused; /* last used ticks for cache entries */
87 static char *dcache_buf; /* cached block data */
88 static uint64_t dcache_usetick; /* current use tick */
89 
90 static uint64_t dcache_raccess;
91 static uint64_t dcache_rhit;
92 static uint64_t dcache_rmiss;
93 static uint64_t dcache_rreplace;
94 
95 static bool dcache_exit_registered = false;
96 
97 /*
98  *  Shadow config:
99  *
100  *  Active set of the configurations.
101  *  Global configuration 'dcache_config' will be transferred here when
102  *  when dcache_init() is called
103  */
104 static dev_cache_config_t dcache_config = {0, 16, 1};
105 static bool dcache_initialized = false;
106 
107 #define MIN_NUM_CACHE_ENTRY  1024L
108 #define MAX_MAX_HASH_COLLISION  16
109 
110 static long dcache_relocate_offset0[] = {
111 	20, -20, 40, -40, 80, -80, 160, -160,
112 	320, -320, 640, -640, 1280, -1280, 2560, -2560,
113 };
114 static int dcache_relocate_offset[16];
115 
dcache_print_statistics(void)116 static void dcache_print_statistics(void)
117 {
118 	long i;
119 	long useCnt;
120 
121 	/* Number of used cache entries */
122 	useCnt = 0;
123 	for (i = 0; i < dcache_config.num_cache_entry; i++)
124 		if (dcache_valid[i])
125 			++useCnt;
126 
127 	/*
128 	 *  c: number of cache entries
129 	 *  u: used entries
130 	 *  RA: number of read access blocks
131 	 *  CH: cache hit
132 	 *  CM: cache miss
133 	 *  Repl: read cache replaced
134 	 */
135 	printf ("\nc, u, RA, CH, CM, Repl=\n");
136 	printf ("%ld %ld %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
137 			dcache_config.num_cache_entry,
138 			useCnt,
139 			dcache_raccess,
140 			dcache_rhit,
141 			dcache_rmiss,
142 			dcache_rreplace);
143 }
144 
dcache_release(void)145 void dcache_release(void)
146 {
147 	if (!dcache_initialized)
148 		return;
149 
150 	dcache_initialized = false;
151 
152 	if (c.cache_config.dbg_en)
153 		dcache_print_statistics();
154 
155 	if (dcache_blk != NULL)
156 		free(dcache_blk);
157 	if (dcache_lastused != NULL)
158 		free(dcache_lastused);
159 	if (dcache_buf != NULL)
160 		free(dcache_buf);
161 	if (dcache_valid != NULL)
162 		free(dcache_valid);
163 	dcache_config.num_cache_entry = 0;
164 	dcache_blk = NULL;
165 	dcache_lastused = NULL;
166 	dcache_buf = NULL;
167 	dcache_valid = NULL;
168 }
169 
170 // return 0 for success, error code for failure.
dcache_alloc_all(long n)171 static int dcache_alloc_all(long n)
172 {
173 	if (n <= 0)
174 		return -1;
175 	if ((dcache_blk = (off64_t *) malloc(sizeof(off64_t) * n)) == NULL
176 		|| (dcache_lastused = (uint64_t *)
177 				malloc(sizeof(uint64_t) * n)) == NULL
178 		|| (dcache_buf = (char *) malloc (F2FS_BLKSIZE * n)) == NULL
179 		|| (dcache_valid = (bool *) malloc(sizeof(bool) * n)) == NULL)
180 	{
181 		dcache_release();
182 		return -1;
183 	}
184 	dcache_config.num_cache_entry = n;
185 	return 0;
186 }
187 
dcache_relocate_init(void)188 static void dcache_relocate_init(void)
189 {
190 	int i;
191 	int n0 = (sizeof(dcache_relocate_offset0)
192 			/ sizeof(dcache_relocate_offset0[0]));
193 	int n = (sizeof(dcache_relocate_offset)
194 			/ sizeof(dcache_relocate_offset[0]));
195 
196 	ASSERT(n == n0);
197 	for (i = 0; i < n && i < dcache_config.max_hash_collision; i++) {
198 		if (labs(dcache_relocate_offset0[i])
199 				> dcache_config.num_cache_entry / 2) {
200 			dcache_config.max_hash_collision = i;
201 			break;
202 		}
203 		dcache_relocate_offset[i] =
204 				dcache_config.num_cache_entry
205 				+ dcache_relocate_offset0[i];
206 	}
207 }
208 
dcache_init(void)209 void dcache_init(void)
210 {
211 	long n;
212 
213 	if (c.cache_config.num_cache_entry <= 0)
214 		return;
215 
216 	/* release previous cache init, if any */
217 	dcache_release();
218 
219 	dcache_blk = NULL;
220 	dcache_lastused = NULL;
221 	dcache_buf = NULL;
222 	dcache_valid = NULL;
223 
224 	dcache_config = c.cache_config;
225 
226 	n = max(MIN_NUM_CACHE_ENTRY, dcache_config.num_cache_entry);
227 
228 	/* halve alloc size until alloc succeed, or min cache reached */
229 	while (dcache_alloc_all(n) != 0 && n !=  MIN_NUM_CACHE_ENTRY)
230 		n = max(MIN_NUM_CACHE_ENTRY, n/2);
231 
232 	/* must be the last: data dependent on num_cache_entry */
233 	dcache_relocate_init();
234 	dcache_initialized = true;
235 
236 	if (!dcache_exit_registered) {
237 		dcache_exit_registered = true;
238 		atexit(dcache_release); /* auto release */
239 	}
240 
241 	dcache_raccess = 0;
242 	dcache_rhit = 0;
243 	dcache_rmiss = 0;
244 	dcache_rreplace = 0;
245 }
246 
dcache_addr(long entry)247 static inline char *dcache_addr(long entry)
248 {
249 	return dcache_buf + F2FS_BLKSIZE * entry;
250 }
251 
252 /* relocate on (n+1)-th collision */
dcache_relocate(long entry,int n)253 static inline long dcache_relocate(long entry, int n)
254 {
255 	assert(dcache_config.num_cache_entry != 0);
256 	return (entry + dcache_relocate_offset[n]) %
257 			dcache_config.num_cache_entry;
258 }
259 
dcache_find(off64_t blk)260 static long dcache_find(off64_t blk)
261 {
262 	register long n = dcache_config.num_cache_entry;
263 	register unsigned m = dcache_config.max_hash_collision;
264 	long entry, least_used, target;
265 	unsigned try;
266 
267 	assert(n > 0);
268 	target = least_used = entry = blk % n; /* simple modulo hash */
269 
270 	for (try = 0; try < m; try++) {
271 		if (!dcache_valid[target] || dcache_blk[target] == blk)
272 			return target;  /* found target or empty cache slot */
273 		if (dcache_lastused[target] < dcache_lastused[least_used])
274 			least_used = target;
275 		target = dcache_relocate(entry, try); /* next target */
276 	}
277 	return least_used;  /* max search reached, return least used slot */
278 }
279 
280 /* Physical read into cache */
dcache_io_read(int fd,long entry,off64_t offset,off64_t blk)281 static int dcache_io_read(int fd, long entry, off64_t offset, off64_t blk)
282 {
283 	if (lseek64(fd, offset, SEEK_SET) < 0) {
284 		MSG(0, "\n lseek64 fail.\n");
285 		return -1;
286 	}
287 	if (read(fd, dcache_buf + entry * F2FS_BLKSIZE, F2FS_BLKSIZE) < 0) {
288 		MSG(0, "\n read() fail.\n");
289 		return -1;
290 	}
291 	dcache_lastused[entry] = ++dcache_usetick;
292 	dcache_valid[entry] = true;
293 	dcache_blk[entry] = blk;
294 	return 0;
295 }
296 
297 /*
298  *  - Note: Read/Write are not symmetric:
299  *       For read, we need to do it block by block, due to the cache nature:
300  *           some blocks may be cached, and others don't.
301  *       For write, since we always do a write-thru, we can join all writes into one,
302  *       and write it once at the caller.  This function updates the cache for write, but
303  *       not the do a physical write.  The caller is responsible for the physical write.
304  *  - Note: We concentrate read/write together, due to the fact of similar structure to find
305  *          the relavant cache entries
306  *  - Return values:
307  *       0: success
308  *       1: cache not available (uninitialized)
309  *      -1: error
310  */
dcache_update_rw(int fd,void * buf,off64_t offset,size_t byte_count,bool is_write)311 static int dcache_update_rw(int fd, void *buf, off64_t offset,
312 		size_t byte_count, bool is_write)
313 {
314 	off64_t blk;
315 	int addr_in_blk;
316 	off64_t start;
317 
318 	if (!dcache_initialized)
319 		dcache_init(); /* auto initialize */
320 
321 	if (!dcache_initialized)
322 		return 1; /* not available */
323 
324 	blk = offset / F2FS_BLKSIZE;
325 	addr_in_blk = offset % F2FS_BLKSIZE;
326 	start = blk * F2FS_BLKSIZE;
327 
328 	while (byte_count != 0) {
329 		size_t cur_size = min(byte_count,
330 				(size_t)(F2FS_BLKSIZE - addr_in_blk));
331 		long entry = dcache_find(blk);
332 
333 		if (!is_write)
334 			++dcache_raccess;
335 
336 		if (dcache_valid[entry] && dcache_blk[entry] == blk) {
337 			/* cache hit */
338 			if (is_write)  /* write: update cache */
339 				memcpy(dcache_addr(entry) + addr_in_blk,
340 					buf, cur_size);
341 			else
342 				++dcache_rhit;
343 		} else {
344 			/* cache miss */
345 			if (!is_write) {
346 				int err;
347 				++dcache_rmiss;
348 				if (dcache_valid[entry])
349 					++dcache_rreplace;
350 				/* read: physical I/O read into cache */
351 				err = dcache_io_read(fd, entry, start, blk);
352 				if (err)
353 					return err;
354 			}
355 		}
356 
357 		/* read: copy data from cache */
358 		/* write: nothing to do, since we don't do physical write. */
359 		if (!is_write)
360 			memcpy(buf, dcache_addr(entry) + addr_in_blk,
361 				cur_size);
362 
363 		/* next block */
364 		++blk;
365 		buf += cur_size;
366 		start += F2FS_BLKSIZE;
367 		byte_count -= cur_size;
368 		addr_in_blk = 0;
369 	}
370 	return 0;
371 }
372 
373 /*
374  * dcache_update_cache() just update cache, won't do physical I/O.
375  * Thus even no error, we need normal non-cache I/O for actual write
376  *
377  * return value: 1: cache not available
378  *               0: success, -1: I/O error
379  */
dcache_update_cache(int fd,void * buf,off64_t offset,size_t count)380 int dcache_update_cache(int fd, void *buf, off64_t offset, size_t count)
381 {
382 	return dcache_update_rw(fd, buf, offset, count, true);
383 }
384 
385 /* handles read into cache + read into buffer  */
dcache_read(int fd,void * buf,off64_t offset,size_t count)386 int dcache_read(int fd, void *buf, off64_t offset, size_t count)
387 {
388 	return dcache_update_rw(fd, buf, offset, count, false);
389 }
390 
391 /*
392  * IO interfaces
393  */
dev_read_version(void * buf,__u64 offset,size_t len)394 int dev_read_version(void *buf, __u64 offset, size_t len)
395 {
396 	if (c.sparse_mode)
397 		return 0;
398 	if (lseek64(c.kd, (off64_t)offset, SEEK_SET) < 0)
399 		return -1;
400 	if (read(c.kd, buf, len) < 0)
401 		return -1;
402 	return 0;
403 }
404 
405 #ifdef HAVE_SPARSE_SPARSE_H
sparse_read_blk(__u64 block,int count,void * buf)406 static int sparse_read_blk(__u64 block, int count, void *buf)
407 {
408 	int i;
409 	char *out = buf;
410 	__u64 cur_block;
411 
412 	for (i = 0; i < count; ++i) {
413 		cur_block = block + i;
414 		if (blocks[cur_block])
415 			memcpy(out + (i * F2FS_BLKSIZE),
416 					blocks[cur_block], F2FS_BLKSIZE);
417 		else if (blocks)
418 			memset(out + (i * F2FS_BLKSIZE), 0, F2FS_BLKSIZE);
419 	}
420 	return 0;
421 }
422 
sparse_write_blk(__u64 block,int count,const void * buf)423 static int sparse_write_blk(__u64 block, int count, const void *buf)
424 {
425 	int i;
426 	__u64 cur_block;
427 	const char *in = buf;
428 
429 	for (i = 0; i < count; ++i) {
430 		cur_block = block + i;
431 		if (blocks[cur_block] == zeroed_block)
432 			blocks[cur_block] = NULL;
433 		if (!blocks[cur_block]) {
434 			blocks[cur_block] = calloc(1, F2FS_BLKSIZE);
435 			if (!blocks[cur_block])
436 				return -ENOMEM;
437 		}
438 		memcpy(blocks[cur_block], in + (i * F2FS_BLKSIZE),
439 				F2FS_BLKSIZE);
440 	}
441 	return 0;
442 }
443 
sparse_write_zeroed_blk(__u64 block,int count)444 static int sparse_write_zeroed_blk(__u64 block, int count)
445 {
446 	int i;
447 	__u64 cur_block;
448 
449 	for (i = 0; i < count; ++i) {
450 		cur_block = block + i;
451 		if (blocks[cur_block])
452 			continue;
453 		blocks[cur_block] = zeroed_block;
454 	}
455 	return 0;
456 }
457 
458 #ifdef SPARSE_CALLBACK_USES_SIZE_T
sparse_import_segment(void * UNUSED (priv),const void * data,size_t len,unsigned int block,unsigned int nr_blocks)459 static int sparse_import_segment(void *UNUSED(priv), const void *data,
460 		size_t len, unsigned int block, unsigned int nr_blocks)
461 #else
462 static int sparse_import_segment(void *UNUSED(priv), const void *data, int len,
463 		unsigned int block, unsigned int nr_blocks)
464 #endif
465 {
466 	/* Ignore chunk headers, only write the data */
467 	if (!nr_blocks || len % F2FS_BLKSIZE)
468 		return 0;
469 
470 	return sparse_write_blk(block, nr_blocks, data);
471 }
472 
sparse_merge_blocks(uint64_t start,uint64_t num,int zero)473 static int sparse_merge_blocks(uint64_t start, uint64_t num, int zero)
474 {
475 	char *buf;
476 	uint64_t i;
477 
478 	if (zero) {
479 		blocks[start] = NULL;
480 		return sparse_file_add_fill(f2fs_sparse_file, 0x0,
481 					F2FS_BLKSIZE * num, start);
482 	}
483 
484 	buf = calloc(num, F2FS_BLKSIZE);
485 	if (!buf) {
486 		fprintf(stderr, "failed to alloc %llu\n",
487 			(unsigned long long)num * F2FS_BLKSIZE);
488 		return -ENOMEM;
489 	}
490 
491 	for (i = 0; i < num; i++) {
492 		memcpy(buf + i * F2FS_BLKSIZE, blocks[start + i], F2FS_BLKSIZE);
493 		free(blocks[start + i]);
494 		blocks[start + i] = NULL;
495 	}
496 
497 	/* free_sparse_blocks will release this buf. */
498 	blocks[start] = buf;
499 
500 	return sparse_file_add_data(f2fs_sparse_file, blocks[start],
501 					F2FS_BLKSIZE * num, start);
502 }
503 #else
sparse_read_blk(__u64 UNUSED (block),int UNUSED (count),void * UNUSED (buf))504 static int sparse_read_blk(__u64 UNUSED(block),
505 				int UNUSED(count), void *UNUSED(buf))
506 {
507 	return 0;
508 }
509 
sparse_write_blk(__u64 UNUSED (block),int UNUSED (count),const void * UNUSED (buf))510 static int sparse_write_blk(__u64 UNUSED(block),
511 				int UNUSED(count), const void *UNUSED(buf))
512 {
513 	return 0;
514 }
515 
sparse_write_zeroed_blk(__u64 UNUSED (block),int UNUSED (count))516 static int sparse_write_zeroed_blk(__u64 UNUSED(block), int UNUSED(count))
517 {
518 	return 0;
519 }
520 #endif
521 
dev_read(void * buf,__u64 offset,size_t len)522 int dev_read(void *buf, __u64 offset, size_t len)
523 {
524 	int fd;
525 	int err;
526 
527 	if (c.sparse_mode)
528 		return sparse_read_blk(offset / F2FS_BLKSIZE,
529 					len / F2FS_BLKSIZE, buf);
530 
531 	fd = __get_device_fd(&offset);
532 	if (fd < 0)
533 		return fd;
534 
535 	/* err = 1: cache not available, fall back to non-cache R/W */
536 	/* err = 0: success, err=-1: I/O error */
537 	err = dcache_read(fd, buf, (off64_t)offset, len);
538 	if (err <= 0)
539 		return err;
540 	if (lseek64(fd, (off64_t)offset, SEEK_SET) < 0)
541 		return -1;
542 	if (read(fd, buf, len) < 0)
543 		return -1;
544 	return 0;
545 }
546 
547 #ifdef POSIX_FADV_WILLNEED
dev_readahead(__u64 offset,size_t len)548 int dev_readahead(__u64 offset, size_t len)
549 #else
550 int dev_readahead(__u64 offset, size_t UNUSED(len))
551 #endif
552 {
553 	int fd = __get_device_fd(&offset);
554 
555 	if (fd < 0)
556 		return fd;
557 #ifdef POSIX_FADV_WILLNEED
558 	return posix_fadvise(fd, offset, len, POSIX_FADV_WILLNEED);
559 #else
560 	return 0;
561 #endif
562 }
563 
dev_write(void * buf,__u64 offset,size_t len)564 int dev_write(void *buf, __u64 offset, size_t len)
565 {
566 	int fd;
567 
568 	if (c.dry_run)
569 		return 0;
570 
571 	if (c.sparse_mode)
572 		return sparse_write_blk(offset / F2FS_BLKSIZE,
573 					len / F2FS_BLKSIZE, buf);
574 
575 	fd = __get_device_fd(&offset);
576 	if (fd < 0)
577 		return fd;
578 
579 	/*
580 	 * dcache_update_cache() just update cache, won't do I/O.
581 	 * Thus even no error, we need normal non-cache I/O for actual write
582 	 */
583 	if (dcache_update_cache(fd, buf, (off64_t)offset, len) < 0)
584 		return -1;
585 	if (lseek64(fd, (off64_t)offset, SEEK_SET) < 0)
586 		return -1;
587 	if (write(fd, buf, len) < 0)
588 		return -1;
589 	return 0;
590 }
591 
dev_write_block(void * buf,__u64 blk_addr)592 int dev_write_block(void *buf, __u64 blk_addr)
593 {
594 	return dev_write(buf, blk_addr << F2FS_BLKSIZE_BITS, F2FS_BLKSIZE);
595 }
596 
dev_write_dump(void * buf,__u64 offset,size_t len)597 int dev_write_dump(void *buf, __u64 offset, size_t len)
598 {
599 	if (lseek64(c.dump_fd, (off64_t)offset, SEEK_SET) < 0)
600 		return -1;
601 	if (write(c.dump_fd, buf, len) < 0)
602 		return -1;
603 	return 0;
604 }
605 
dev_fill(void * buf,__u64 offset,size_t len)606 int dev_fill(void *buf, __u64 offset, size_t len)
607 {
608 	int fd;
609 
610 	if (c.sparse_mode)
611 		return sparse_write_zeroed_blk(offset / F2FS_BLKSIZE,
612 						len / F2FS_BLKSIZE);
613 
614 	fd = __get_device_fd(&offset);
615 	if (fd < 0)
616 		return fd;
617 
618 	/* Only allow fill to zero */
619 	if (*((__u8*)buf))
620 		return -1;
621 	if (lseek64(fd, (off64_t)offset, SEEK_SET) < 0)
622 		return -1;
623 	if (write(fd, buf, len) < 0)
624 		return -1;
625 	return 0;
626 }
627 
dev_fill_block(void * buf,__u64 blk_addr)628 int dev_fill_block(void *buf, __u64 blk_addr)
629 {
630 	return dev_fill(buf, blk_addr << F2FS_BLKSIZE_BITS, F2FS_BLKSIZE);
631 }
632 
dev_read_block(void * buf,__u64 blk_addr)633 int dev_read_block(void *buf, __u64 blk_addr)
634 {
635 	return dev_read(buf, blk_addr << F2FS_BLKSIZE_BITS, F2FS_BLKSIZE);
636 }
637 
dev_reada_block(__u64 blk_addr)638 int dev_reada_block(__u64 blk_addr)
639 {
640 	return dev_readahead(blk_addr << F2FS_BLKSIZE_BITS, F2FS_BLKSIZE);
641 }
642 
f2fs_fsync_device(void)643 int f2fs_fsync_device(void)
644 {
645 #ifdef HAVE_FSYNC
646 	int i;
647 
648 	for (i = 0; i < c.ndevs; i++) {
649 		if (fsync(c.devices[i].fd) < 0) {
650 			MSG(0, "\tError: Could not conduct fsync!!!\n");
651 			return -1;
652 		}
653 	}
654 #endif
655 	return 0;
656 }
657 
f2fs_init_sparse_file(void)658 int f2fs_init_sparse_file(void)
659 {
660 #ifdef HAVE_SPARSE_SPARSE_H
661 	if (c.func == MKFS) {
662 		f2fs_sparse_file = sparse_file_new(F2FS_BLKSIZE, c.device_size);
663 		if (!f2fs_sparse_file)
664 			return -1;
665 	} else {
666 		f2fs_sparse_file = sparse_file_import(c.devices[0].fd,
667 							true, false);
668 		if (!f2fs_sparse_file)
669 			return -1;
670 
671 		c.device_size = sparse_file_len(f2fs_sparse_file, 0, 0);
672 		c.device_size &= (~((uint64_t)(F2FS_BLKSIZE - 1)));
673 	}
674 
675 	if (sparse_file_block_size(f2fs_sparse_file) != F2FS_BLKSIZE) {
676 		MSG(0, "\tError: Corrupted sparse file\n");
677 		return -1;
678 	}
679 	blocks_count = c.device_size / F2FS_BLKSIZE;
680 	blocks = calloc(blocks_count, sizeof(char *));
681 	if (!blocks) {
682 		MSG(0, "\tError: Calloc Failed for blocks!!!\n");
683 		return -1;
684 	}
685 
686 	zeroed_block = calloc(1, F2FS_BLKSIZE);
687 	if (!zeroed_block) {
688 		MSG(0, "\tError: Calloc Failed for zeroed block!!!\n");
689 		return -1;
690 	}
691 
692 	return sparse_file_foreach_chunk(f2fs_sparse_file, true, false,
693 				sparse_import_segment, NULL);
694 #else
695 	MSG(0, "\tError: Sparse mode is only supported for android\n");
696 	return -1;
697 #endif
698 }
699 
f2fs_release_sparse_resource(void)700 void f2fs_release_sparse_resource(void)
701 {
702 #ifdef HAVE_SPARSE_SPARSE_H
703 	int j;
704 
705 	if (c.sparse_mode) {
706 		if (f2fs_sparse_file != NULL) {
707 			sparse_file_destroy(f2fs_sparse_file);
708 			f2fs_sparse_file = NULL;
709 		}
710 		for (j = 0; j < blocks_count; j++)
711 			free(blocks[j]);
712 		free(blocks);
713 		blocks = NULL;
714 		free(zeroed_block);
715 		zeroed_block = NULL;
716 	}
717 #endif
718 }
719 
720 #define MAX_CHUNK_SIZE		(1 * 1024 * 1024 * 1024ULL)
721 #define MAX_CHUNK_COUNT		(MAX_CHUNK_SIZE / F2FS_BLKSIZE)
f2fs_finalize_device(void)722 int f2fs_finalize_device(void)
723 {
724 	int i;
725 	int ret = 0;
726 
727 #ifdef HAVE_SPARSE_SPARSE_H
728 	if (c.sparse_mode) {
729 		int64_t chunk_start = (blocks[0] == NULL) ? -1 : 0;
730 		uint64_t j;
731 
732 		if (c.func != MKFS) {
733 			sparse_file_destroy(f2fs_sparse_file);
734 			ret = ftruncate(c.devices[0].fd, 0);
735 			ASSERT(!ret);
736 			lseek(c.devices[0].fd, 0, SEEK_SET);
737 			f2fs_sparse_file = sparse_file_new(F2FS_BLKSIZE,
738 							c.device_size);
739 		}
740 
741 		for (j = 0; j < blocks_count; ++j) {
742 			if (chunk_start != -1) {
743 				if (j - chunk_start >= MAX_CHUNK_COUNT) {
744 					ret = sparse_merge_blocks(chunk_start,
745 							j - chunk_start, 0);
746 					ASSERT(!ret);
747 					chunk_start = -1;
748 				}
749 			}
750 
751 			if (chunk_start == -1) {
752 				if (!blocks[j])
753 					continue;
754 
755 				if (blocks[j] == zeroed_block) {
756 					ret = sparse_merge_blocks(j, 1, 1);
757 					ASSERT(!ret);
758 				} else {
759 					chunk_start = j;
760 				}
761 			} else {
762 				if (blocks[j] && blocks[j] != zeroed_block)
763 					continue;
764 
765 				ret = sparse_merge_blocks(chunk_start,
766 						j - chunk_start, 0);
767 				ASSERT(!ret);
768 
769 				if (blocks[j] == zeroed_block) {
770 					ret = sparse_merge_blocks(j, 1, 1);
771 					ASSERT(!ret);
772 				}
773 
774 				chunk_start = -1;
775 			}
776 		}
777 		if (chunk_start != -1) {
778 			ret = sparse_merge_blocks(chunk_start,
779 						blocks_count - chunk_start, 0);
780 			ASSERT(!ret);
781 		}
782 
783 		sparse_file_write(f2fs_sparse_file, c.devices[0].fd,
784 				/*gzip*/0, /*sparse*/1, /*crc*/0);
785 
786 		f2fs_release_sparse_resource();
787 	}
788 #endif
789 	/*
790 	 * We should call fsync() to flush out all the dirty pages
791 	 * in the block device page cache.
792 	 */
793 	for (i = 0; i < c.ndevs; i++) {
794 #ifdef HAVE_FSYNC
795 		ret = fsync(c.devices[i].fd);
796 		if (ret < 0) {
797 			MSG(0, "\tError: Could not conduct fsync!!!\n");
798 			break;
799 		}
800 #endif
801 		ret = close(c.devices[i].fd);
802 		if (ret < 0) {
803 			MSG(0, "\tError: Failed to close device file!!!\n");
804 			break;
805 		}
806 		free(c.devices[i].path);
807 		free(c.devices[i].zone_cap_blocks);
808 	}
809 	close(c.kd);
810 
811 	return ret;
812 }
813