• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * libf2fs.c
3  *
4  * Copyright (c) 2013 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  * Copyright (c) 2019 Google Inc.
7  *             http://www.google.com/
8  * Copyright (c) 2020 Google Inc.
9  *   Robin Hsu <robinhsu@google.com>
10  *  : add quick-buffer for sload compression support
11  *
12  * Dual licensed under the GPL or LGPL version 2 licenses.
13  */
14 #ifndef _LARGEFILE64_SOURCE
15 #define _LARGEFILE64_SOURCE
16 #endif
17 
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include <string.h>
21 #include <errno.h>
22 #include <unistd.h>
23 #include <fcntl.h>
24 #ifdef HAVE_MNTENT_H
25 #include <mntent.h>
26 #endif
27 #include <time.h>
28 #ifdef HAVE_SYS_STAT_H
29 #include <sys/stat.h>
30 #endif
31 #ifdef HAVE_SYS_MOUNT_H
32 #include <sys/mount.h>
33 #endif
34 #ifdef HAVE_SYS_IOCTL_H
35 #include <sys/ioctl.h>
36 #endif
37 #ifdef HAVE_LINUX_HDREG_H
38 #include <linux/hdreg.h>
39 #endif
40 
41 #include <stdbool.h>
42 #include <assert.h>
43 #include <inttypes.h>
44 #include "f2fs_fs.h"
45 
46 struct f2fs_configuration c;
47 
48 #ifdef HAVE_SPARSE_SPARSE_H
49 #include <sparse/sparse.h>
50 struct sparse_file *f2fs_sparse_file;
51 static char **blocks;
52 uint64_t blocks_count;
53 static char *zeroed_block;
54 #endif
55 
__get_device_fd(__u64 * offset)56 static int __get_device_fd(__u64 *offset)
57 {
58 	__u64 blk_addr = *offset >> F2FS_BLKSIZE_BITS;
59 	int i;
60 
61 	for (i = 0; i < c.ndevs; i++) {
62 		if (c.devices[i].start_blkaddr <= blk_addr &&
63 				c.devices[i].end_blkaddr >= blk_addr) {
64 			*offset -=
65 				c.devices[i].start_blkaddr << F2FS_BLKSIZE_BITS;
66 			return c.devices[i].fd;
67 		}
68 	}
69 	return -1;
70 }
71 
72 #ifndef HAVE_LSEEK64
73 typedef off_t	off64_t;
74 
lseek64(int fd,__u64 offset,int set)75 static inline off64_t lseek64(int fd, __u64 offset, int set)
76 {
77 	return lseek(fd, offset, set);
78 }
79 #endif
80 
81 /* ---------- dev_cache, Least Used First (LUF) policy  ------------------- */
82 /*
83  * Least used block will be the first victim to be replaced when max hash
84  * collision exceeds
85  */
86 static bool *dcache_valid; /* is the cached block valid? */
87 static off64_t  *dcache_blk; /* which block it cached */
88 static uint64_t *dcache_lastused; /* last used ticks for cache entries */
89 static char *dcache_buf; /* cached block data */
90 static uint64_t dcache_usetick; /* current use tick */
91 
92 static uint64_t dcache_raccess;
93 static uint64_t dcache_rhit;
94 static uint64_t dcache_rmiss;
95 static uint64_t dcache_rreplace;
96 
97 static bool dcache_exit_registered = false;
98 
99 /*
100  *  Shadow config:
101  *
102  *  Active set of the configurations.
103  *  Global configuration 'dcache_config' will be transferred here when
104  *  when dcache_init() is called
105  */
106 static dev_cache_config_t dcache_config = {0, 16, 1};
107 static bool dcache_initialized = false;
108 
109 #define MIN_NUM_CACHE_ENTRY  1024L
110 #define MAX_MAX_HASH_COLLISION  16
111 
112 static long dcache_relocate_offset0[] = {
113 	20, -20, 40, -40, 80, -80, 160, -160,
114 	320, -320, 640, -640, 1280, -1280, 2560, -2560,
115 };
116 static int dcache_relocate_offset[16];
117 
dcache_print_statistics(void)118 static void dcache_print_statistics(void)
119 {
120 	long i;
121 	long useCnt;
122 
123 	/* Number of used cache entries */
124 	useCnt = 0;
125 	for (i = 0; i < dcache_config.num_cache_entry; i++)
126 		if (dcache_valid[i])
127 			++useCnt;
128 
129 	/*
130 	 *  c: number of cache entries
131 	 *  u: used entries
132 	 *  RA: number of read access blocks
133 	 *  CH: cache hit
134 	 *  CM: cache miss
135 	 *  Repl: read cache replaced
136 	 */
137 	printf ("\nc, u, RA, CH, CM, Repl=\n");
138 	printf ("%ld %ld %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
139 			dcache_config.num_cache_entry,
140 			useCnt,
141 			dcache_raccess,
142 			dcache_rhit,
143 			dcache_rmiss,
144 			dcache_rreplace);
145 }
146 
dcache_release(void)147 void dcache_release(void)
148 {
149 	if (!dcache_initialized)
150 		return;
151 
152 	dcache_initialized = false;
153 
154 	if (c.cache_config.dbg_en)
155 		dcache_print_statistics();
156 
157 	if (dcache_blk != NULL)
158 		free(dcache_blk);
159 	if (dcache_lastused != NULL)
160 		free(dcache_lastused);
161 	if (dcache_buf != NULL)
162 		free(dcache_buf);
163 	if (dcache_valid != NULL)
164 		free(dcache_valid);
165 	dcache_config.num_cache_entry = 0;
166 	dcache_blk = NULL;
167 	dcache_lastused = NULL;
168 	dcache_buf = NULL;
169 	dcache_valid = NULL;
170 }
171 
172 // return 0 for success, error code for failure.
dcache_alloc_all(long n)173 static int dcache_alloc_all(long n)
174 {
175 	if (n <= 0)
176 		return -1;
177 	if ((dcache_blk = (off64_t *) malloc(sizeof(off64_t) * n)) == NULL
178 		|| (dcache_lastused = (uint64_t *)
179 				malloc(sizeof(uint64_t) * n)) == NULL
180 		|| (dcache_buf = (char *) malloc (F2FS_BLKSIZE * n)) == NULL
181 		|| (dcache_valid = (bool *) malloc(sizeof(bool) * n)) == NULL)
182 	{
183 		dcache_release();
184 		return -1;
185 	}
186 	dcache_config.num_cache_entry = n;
187 	return 0;
188 }
189 
dcache_relocate_init(void)190 static void dcache_relocate_init(void)
191 {
192 	int i;
193 	int n0 = (sizeof(dcache_relocate_offset0)
194 			/ sizeof(dcache_relocate_offset0[0]));
195 	int n = (sizeof(dcache_relocate_offset)
196 			/ sizeof(dcache_relocate_offset[0]));
197 
198 	ASSERT(n == n0);
199 	for (i = 0; i < n && i < dcache_config.max_hash_collision; i++) {
200 		if (labs(dcache_relocate_offset0[i])
201 				> dcache_config.num_cache_entry / 2) {
202 			dcache_config.max_hash_collision = i;
203 			break;
204 		}
205 		dcache_relocate_offset[i] =
206 				dcache_config.num_cache_entry
207 				+ dcache_relocate_offset0[i];
208 	}
209 }
210 
dcache_init(void)211 void dcache_init(void)
212 {
213 	long n;
214 
215 	if (c.cache_config.num_cache_entry <= 0)
216 		return;
217 
218 	/* release previous cache init, if any */
219 	dcache_release();
220 
221 	dcache_blk = NULL;
222 	dcache_lastused = NULL;
223 	dcache_buf = NULL;
224 	dcache_valid = NULL;
225 
226 	dcache_config = c.cache_config;
227 
228 	n = max(MIN_NUM_CACHE_ENTRY, dcache_config.num_cache_entry);
229 
230 	/* halve alloc size until alloc succeed, or min cache reached */
231 	while (dcache_alloc_all(n) != 0 && n !=  MIN_NUM_CACHE_ENTRY)
232 		n = max(MIN_NUM_CACHE_ENTRY, n/2);
233 
234 	/* must be the last: data dependent on num_cache_entry */
235 	dcache_relocate_init();
236 	dcache_initialized = true;
237 
238 	if (!dcache_exit_registered) {
239 		dcache_exit_registered = true;
240 		atexit(dcache_release); /* auto release */
241 	}
242 
243 	dcache_raccess = 0;
244 	dcache_rhit = 0;
245 	dcache_rmiss = 0;
246 	dcache_rreplace = 0;
247 }
248 
dcache_addr(long entry)249 static inline char *dcache_addr(long entry)
250 {
251 	return dcache_buf + F2FS_BLKSIZE * entry;
252 }
253 
254 /* relocate on (n+1)-th collision */
dcache_relocate(long entry,int n)255 static inline long dcache_relocate(long entry, int n)
256 {
257 	assert(dcache_config.num_cache_entry != 0);
258 	return (entry + dcache_relocate_offset[n]) %
259 			dcache_config.num_cache_entry;
260 }
261 
dcache_find(off64_t blk)262 static long dcache_find(off64_t blk)
263 {
264 	register long n = dcache_config.num_cache_entry;
265 	register unsigned m = dcache_config.max_hash_collision;
266 	long entry, least_used, target;
267 	unsigned try;
268 
269 	assert(n > 0);
270 	target = least_used = entry = blk % n; /* simple modulo hash */
271 
272 	for (try = 0; try < m; try++) {
273 		if (!dcache_valid[target] || dcache_blk[target] == blk)
274 			return target;  /* found target or empty cache slot */
275 		if (dcache_lastused[target] < dcache_lastused[least_used])
276 			least_used = target;
277 		target = dcache_relocate(entry, try); /* next target */
278 	}
279 	return least_used;  /* max search reached, return least used slot */
280 }
281 
282 /* Physical read into cache */
dcache_io_read(int fd,long entry,off64_t offset,off64_t blk)283 static int dcache_io_read(int fd, long entry, off64_t offset, off64_t blk)
284 {
285 	if (lseek64(fd, offset, SEEK_SET) < 0) {
286 		MSG(0, "\n lseek64 fail.\n");
287 		return -1;
288 	}
289 	if (read(fd, dcache_buf + entry * F2FS_BLKSIZE, F2FS_BLKSIZE) < 0) {
290 		MSG(0, "\n read() fail.\n");
291 		return -1;
292 	}
293 	dcache_lastused[entry] = ++dcache_usetick;
294 	dcache_valid[entry] = true;
295 	dcache_blk[entry] = blk;
296 	return 0;
297 }
298 
299 /*
300  *  - Note: Read/Write are not symmetric:
301  *       For read, we need to do it block by block, due to the cache nature:
302  *           some blocks may be cached, and others don't.
303  *       For write, since we always do a write-thru, we can join all writes into one,
304  *       and write it once at the caller.  This function updates the cache for write, but
305  *       not the do a physical write.  The caller is responsible for the physical write.
306  *  - Note: We concentrate read/write together, due to the fact of similar structure to find
307  *          the relavant cache entries
308  *  - Return values:
309  *       0: success
310  *       1: cache not available (uninitialized)
311  *      -1: error
312  */
dcache_update_rw(int fd,void * buf,off64_t offset,size_t byte_count,bool is_write)313 static int dcache_update_rw(int fd, void *buf, off64_t offset,
314 		size_t byte_count, bool is_write)
315 {
316 	off64_t blk;
317 	int addr_in_blk;
318 	off64_t start;
319 
320 	if (!dcache_initialized)
321 		dcache_init(); /* auto initialize */
322 
323 	if (!dcache_initialized)
324 		return 1; /* not available */
325 
326 	blk = offset / F2FS_BLKSIZE;
327 	addr_in_blk = offset % F2FS_BLKSIZE;
328 	start = blk * F2FS_BLKSIZE;
329 
330 	while (byte_count != 0) {
331 		size_t cur_size = min(byte_count,
332 				(size_t)(F2FS_BLKSIZE - addr_in_blk));
333 		long entry = dcache_find(blk);
334 
335 		if (!is_write)
336 			++dcache_raccess;
337 
338 		if (dcache_valid[entry] && dcache_blk[entry] == blk) {
339 			/* cache hit */
340 			if (is_write)  /* write: update cache */
341 				memcpy(dcache_addr(entry) + addr_in_blk,
342 					buf, cur_size);
343 			else
344 				++dcache_rhit;
345 		} else {
346 			/* cache miss */
347 			if (!is_write) {
348 				int err;
349 				++dcache_rmiss;
350 				if (dcache_valid[entry])
351 					++dcache_rreplace;
352 				/* read: physical I/O read into cache */
353 				err = dcache_io_read(fd, entry, start, blk);
354 				if (err)
355 					return err;
356 			}
357 		}
358 
359 		/* read: copy data from cache */
360 		/* write: nothing to do, since we don't do physical write. */
361 		if (!is_write)
362 			memcpy(buf, dcache_addr(entry) + addr_in_blk,
363 				cur_size);
364 
365 		/* next block */
366 		++blk;
367 		buf += cur_size;
368 		start += F2FS_BLKSIZE;
369 		byte_count -= cur_size;
370 		addr_in_blk = 0;
371 	}
372 	return 0;
373 }
374 
375 /*
376  * dcache_update_cache() just update cache, won't do physical I/O.
377  * Thus even no error, we need normal non-cache I/O for actual write
378  *
379  * return value: 1: cache not available
380  *               0: success, -1: I/O error
381  */
dcache_update_cache(int fd,void * buf,off64_t offset,size_t count)382 int dcache_update_cache(int fd, void *buf, off64_t offset, size_t count)
383 {
384 	return dcache_update_rw(fd, buf, offset, count, true);
385 }
386 
387 /* handles read into cache + read into buffer  */
dcache_read(int fd,void * buf,off64_t offset,size_t count)388 int dcache_read(int fd, void *buf, off64_t offset, size_t count)
389 {
390 	return dcache_update_rw(fd, buf, offset, count, false);
391 }
392 
393 /*
394  * IO interfaces
395  */
dev_read_version(void * buf,__u64 offset,size_t len)396 int dev_read_version(void *buf, __u64 offset, size_t len)
397 {
398 	if (c.sparse_mode)
399 		return 0;
400 	if (lseek64(c.kd, (off64_t)offset, SEEK_SET) < 0)
401 		return -1;
402 	if (read(c.kd, buf, len) < 0)
403 		return -1;
404 	return 0;
405 }
406 
407 #ifdef HAVE_SPARSE_SPARSE_H
sparse_read_blk(__u64 block,int count,void * buf)408 static int sparse_read_blk(__u64 block, int count, void *buf)
409 {
410 	int i;
411 	char *out = buf;
412 	__u64 cur_block;
413 
414 	for (i = 0; i < count; ++i) {
415 		cur_block = block + i;
416 		if (blocks[cur_block])
417 			memcpy(out + (i * F2FS_BLKSIZE),
418 					blocks[cur_block], F2FS_BLKSIZE);
419 		else if (blocks)
420 			memset(out + (i * F2FS_BLKSIZE), 0, F2FS_BLKSIZE);
421 	}
422 	return 0;
423 }
424 
sparse_write_blk(__u64 block,int count,const void * buf)425 static int sparse_write_blk(__u64 block, int count, const void *buf)
426 {
427 	int i;
428 	__u64 cur_block;
429 	const char *in = buf;
430 
431 	for (i = 0; i < count; ++i) {
432 		cur_block = block + i;
433 		if (blocks[cur_block] == zeroed_block)
434 			blocks[cur_block] = NULL;
435 		if (!blocks[cur_block]) {
436 			blocks[cur_block] = calloc(1, F2FS_BLKSIZE);
437 			if (!blocks[cur_block])
438 				return -ENOMEM;
439 		}
440 		memcpy(blocks[cur_block], in + (i * F2FS_BLKSIZE),
441 				F2FS_BLKSIZE);
442 	}
443 	return 0;
444 }
445 
sparse_write_zeroed_blk(__u64 block,int count)446 static int sparse_write_zeroed_blk(__u64 block, int count)
447 {
448 	int i;
449 	__u64 cur_block;
450 
451 	for (i = 0; i < count; ++i) {
452 		cur_block = block + i;
453 		if (blocks[cur_block])
454 			continue;
455 		blocks[cur_block] = zeroed_block;
456 	}
457 	return 0;
458 }
459 
460 #ifdef SPARSE_CALLBACK_USES_SIZE_T
sparse_import_segment(void * UNUSED (priv),const void * data,size_t len,unsigned int block,unsigned int nr_blocks)461 static int sparse_import_segment(void *UNUSED(priv), const void *data,
462 		size_t len, unsigned int block, unsigned int nr_blocks)
463 #else
464 static int sparse_import_segment(void *UNUSED(priv), const void *data, int len,
465 		unsigned int block, unsigned int nr_blocks)
466 #endif
467 {
468 	/* Ignore chunk headers, only write the data */
469 	if (!nr_blocks || len % F2FS_BLKSIZE)
470 		return 0;
471 
472 	return sparse_write_blk(block, nr_blocks, data);
473 }
474 
sparse_merge_blocks(uint64_t start,uint64_t num,int zero)475 static int sparse_merge_blocks(uint64_t start, uint64_t num, int zero)
476 {
477 	char *buf;
478 	uint64_t i;
479 
480 	if (zero) {
481 		blocks[start] = NULL;
482 		return sparse_file_add_fill(f2fs_sparse_file, 0x0,
483 					F2FS_BLKSIZE * num, start);
484 	}
485 
486 	buf = calloc(num, F2FS_BLKSIZE);
487 	if (!buf) {
488 		fprintf(stderr, "failed to alloc %llu\n",
489 			(unsigned long long)num * F2FS_BLKSIZE);
490 		return -ENOMEM;
491 	}
492 
493 	for (i = 0; i < num; i++) {
494 		memcpy(buf + i * F2FS_BLKSIZE, blocks[start + i], F2FS_BLKSIZE);
495 		free(blocks[start + i]);
496 		blocks[start + i] = NULL;
497 	}
498 
499 	/* free_sparse_blocks will release this buf. */
500 	blocks[start] = buf;
501 
502 	return sparse_file_add_data(f2fs_sparse_file, blocks[start],
503 					F2FS_BLKSIZE * num, start);
504 }
505 #else
sparse_read_blk(__u64 UNUSED (block),int UNUSED (count),void * UNUSED (buf))506 static int sparse_read_blk(__u64 UNUSED(block),
507 				int UNUSED(count), void *UNUSED(buf))
508 {
509 	return 0;
510 }
511 
sparse_write_blk(__u64 UNUSED (block),int UNUSED (count),const void * UNUSED (buf))512 static int sparse_write_blk(__u64 UNUSED(block),
513 				int UNUSED(count), const void *UNUSED(buf))
514 {
515 	return 0;
516 }
517 
sparse_write_zeroed_blk(__u64 UNUSED (block),int UNUSED (count))518 static int sparse_write_zeroed_blk(__u64 UNUSED(block), int UNUSED(count))
519 {
520 	return 0;
521 }
522 #endif
523 
dev_read(void * buf,__u64 offset,size_t len)524 int dev_read(void *buf, __u64 offset, size_t len)
525 {
526 	int fd;
527 	int err;
528 
529 	if (c.max_size < (offset + len))
530 		c.max_size = offset + len;
531 
532 	if (c.sparse_mode)
533 		return sparse_read_blk(offset / F2FS_BLKSIZE,
534 					len / F2FS_BLKSIZE, buf);
535 
536 	fd = __get_device_fd(&offset);
537 	if (fd < 0)
538 		return fd;
539 
540 	/* err = 1: cache not available, fall back to non-cache R/W */
541 	/* err = 0: success, err=-1: I/O error */
542 	err = dcache_read(fd, buf, (off64_t)offset, len);
543 	if (err <= 0)
544 		return err;
545 	if (lseek64(fd, (off64_t)offset, SEEK_SET) < 0)
546 		return -1;
547 	if (read(fd, buf, len) < 0)
548 		return -1;
549 	return 0;
550 }
551 
552 #ifdef POSIX_FADV_WILLNEED
dev_readahead(__u64 offset,size_t len)553 int dev_readahead(__u64 offset, size_t len)
554 #else
555 int dev_readahead(__u64 offset, size_t UNUSED(len))
556 #endif
557 {
558 	int fd = __get_device_fd(&offset);
559 
560 	if (fd < 0)
561 		return fd;
562 #ifdef POSIX_FADV_WILLNEED
563 	return posix_fadvise(fd, offset, len, POSIX_FADV_WILLNEED);
564 #else
565 	return 0;
566 #endif
567 }
568 
dev_write(void * buf,__u64 offset,size_t len)569 int dev_write(void *buf, __u64 offset, size_t len)
570 {
571 	int fd;
572 
573 	if (c.max_size < (offset + len))
574 		c.max_size = offset + len;
575 
576 	if (c.dry_run)
577 		return 0;
578 
579 	if (c.sparse_mode)
580 		return sparse_write_blk(offset / F2FS_BLKSIZE,
581 					len / F2FS_BLKSIZE, buf);
582 
583 	fd = __get_device_fd(&offset);
584 	if (fd < 0)
585 		return fd;
586 
587 	/*
588 	 * dcache_update_cache() just update cache, won't do I/O.
589 	 * Thus even no error, we need normal non-cache I/O for actual write
590 	 */
591 	if (dcache_update_cache(fd, buf, (off64_t)offset, len) < 0)
592 		return -1;
593 	if (lseek64(fd, (off64_t)offset, SEEK_SET) < 0)
594 		return -1;
595 	if (write(fd, buf, len) < 0)
596 		return -1;
597 	return 0;
598 }
599 
dev_write_block(void * buf,__u64 blk_addr)600 int dev_write_block(void *buf, __u64 blk_addr)
601 {
602 	return dev_write(buf, blk_addr << F2FS_BLKSIZE_BITS, F2FS_BLKSIZE);
603 }
604 
dev_write_dump(void * buf,__u64 offset,size_t len)605 int dev_write_dump(void *buf, __u64 offset, size_t len)
606 {
607 	if (lseek64(c.dump_fd, (off64_t)offset, SEEK_SET) < 0)
608 		return -1;
609 	if (write(c.dump_fd, buf, len) < 0)
610 		return -1;
611 	return 0;
612 }
613 
dev_fill(void * buf,__u64 offset,size_t len)614 int dev_fill(void *buf, __u64 offset, size_t len)
615 {
616 	int fd;
617 
618 	if (c.max_size < (offset + len))
619 		c.max_size = offset + len;
620 
621 	if (c.sparse_mode)
622 		return sparse_write_zeroed_blk(offset / F2FS_BLKSIZE,
623 						len / F2FS_BLKSIZE);
624 
625 	fd = __get_device_fd(&offset);
626 	if (fd < 0)
627 		return fd;
628 
629 	/* Only allow fill to zero */
630 	if (*((__u8*)buf))
631 		return -1;
632 	if (lseek64(fd, (off64_t)offset, SEEK_SET) < 0)
633 		return -1;
634 	if (write(fd, buf, len) < 0)
635 		return -1;
636 	return 0;
637 }
638 
dev_fill_block(void * buf,__u64 blk_addr)639 int dev_fill_block(void *buf, __u64 blk_addr)
640 {
641 	return dev_fill(buf, blk_addr << F2FS_BLKSIZE_BITS, F2FS_BLKSIZE);
642 }
643 
dev_read_block(void * buf,__u64 blk_addr)644 int dev_read_block(void *buf, __u64 blk_addr)
645 {
646 	return dev_read(buf, blk_addr << F2FS_BLKSIZE_BITS, F2FS_BLKSIZE);
647 }
648 
dev_reada_block(__u64 blk_addr)649 int dev_reada_block(__u64 blk_addr)
650 {
651 	return dev_readahead(blk_addr << F2FS_BLKSIZE_BITS, F2FS_BLKSIZE);
652 }
653 
f2fs_fsync_device(void)654 int f2fs_fsync_device(void)
655 {
656 #ifdef HAVE_FSYNC
657 	int i;
658 
659 	for (i = 0; i < c.ndevs; i++) {
660 		if (fsync(c.devices[i].fd) < 0) {
661 			MSG(0, "\tError: Could not conduct fsync!!!\n");
662 			return -1;
663 		}
664 	}
665 #endif
666 	return 0;
667 }
668 
f2fs_init_sparse_file(void)669 int f2fs_init_sparse_file(void)
670 {
671 #ifdef HAVE_SPARSE_SPARSE_H
672 	if (c.func == MKFS) {
673 		f2fs_sparse_file = sparse_file_new(F2FS_BLKSIZE, c.device_size);
674 		if (!f2fs_sparse_file)
675 			return -1;
676 	} else {
677 		f2fs_sparse_file = sparse_file_import(c.devices[0].fd,
678 							true, false);
679 		if (!f2fs_sparse_file)
680 			return -1;
681 
682 		c.device_size = sparse_file_len(f2fs_sparse_file, 0, 0);
683 		c.device_size &= (~((uint64_t)(F2FS_BLKSIZE - 1)));
684 	}
685 
686 	if (sparse_file_block_size(f2fs_sparse_file) != F2FS_BLKSIZE) {
687 		MSG(0, "\tError: Corrupted sparse file\n");
688 		return -1;
689 	}
690 	blocks_count = c.device_size / F2FS_BLKSIZE;
691 	blocks = calloc(blocks_count, sizeof(char *));
692 	if (!blocks) {
693 		MSG(0, "\tError: Calloc Failed for blocks!!!\n");
694 		return -1;
695 	}
696 
697 	zeroed_block = calloc(1, F2FS_BLKSIZE);
698 	if (!zeroed_block) {
699 		MSG(0, "\tError: Calloc Failed for zeroed block!!!\n");
700 		return -1;
701 	}
702 
703 	return sparse_file_foreach_chunk(f2fs_sparse_file, true, false,
704 				sparse_import_segment, NULL);
705 #else
706 	MSG(0, "\tError: Sparse mode is only supported for android\n");
707 	return -1;
708 #endif
709 }
710 
f2fs_release_sparse_resource(void)711 void f2fs_release_sparse_resource(void)
712 {
713 #ifdef HAVE_SPARSE_SPARSE_H
714 	int j;
715 
716 	if (c.sparse_mode) {
717 		if (f2fs_sparse_file != NULL) {
718 			sparse_file_destroy(f2fs_sparse_file);
719 			f2fs_sparse_file = NULL;
720 		}
721 		for (j = 0; j < blocks_count; j++)
722 			free(blocks[j]);
723 		free(blocks);
724 		blocks = NULL;
725 		free(zeroed_block);
726 		zeroed_block = NULL;
727 	}
728 #endif
729 }
730 
731 #define MAX_CHUNK_SIZE		(1 * 1024 * 1024 * 1024ULL)
732 #define MAX_CHUNK_COUNT		(MAX_CHUNK_SIZE / F2FS_BLKSIZE)
f2fs_finalize_device(void)733 int f2fs_finalize_device(void)
734 {
735 	int i;
736 	int ret = 0;
737 
738 #ifdef HAVE_SPARSE_SPARSE_H
739 	if (c.sparse_mode) {
740 		int64_t chunk_start = (blocks[0] == NULL) ? -1 : 0;
741 		uint64_t j;
742 
743 		if (c.func != MKFS) {
744 			sparse_file_destroy(f2fs_sparse_file);
745 			ret = ftruncate(c.devices[0].fd, 0);
746 			ASSERT(!ret);
747 			lseek(c.devices[0].fd, 0, SEEK_SET);
748 			f2fs_sparse_file = sparse_file_new(F2FS_BLKSIZE,
749 							c.device_size);
750 		}
751 
752 		for (j = 0; j < blocks_count; ++j) {
753 			if (chunk_start != -1) {
754 				if (j - chunk_start >= MAX_CHUNK_COUNT) {
755 					ret = sparse_merge_blocks(chunk_start,
756 							j - chunk_start, 0);
757 					ASSERT(!ret);
758 					chunk_start = -1;
759 				}
760 			}
761 
762 			if (chunk_start == -1) {
763 				if (!blocks[j])
764 					continue;
765 
766 				if (blocks[j] == zeroed_block) {
767 					ret = sparse_merge_blocks(j, 1, 1);
768 					ASSERT(!ret);
769 				} else {
770 					chunk_start = j;
771 				}
772 			} else {
773 				if (blocks[j] && blocks[j] != zeroed_block)
774 					continue;
775 
776 				ret = sparse_merge_blocks(chunk_start,
777 						j - chunk_start, 0);
778 				ASSERT(!ret);
779 
780 				if (blocks[j] == zeroed_block) {
781 					ret = sparse_merge_blocks(j, 1, 1);
782 					ASSERT(!ret);
783 				}
784 
785 				chunk_start = -1;
786 			}
787 		}
788 		if (chunk_start != -1) {
789 			ret = sparse_merge_blocks(chunk_start,
790 						blocks_count - chunk_start, 0);
791 			ASSERT(!ret);
792 		}
793 
794 		sparse_file_write(f2fs_sparse_file, c.devices[0].fd,
795 				/*gzip*/0, /*sparse*/1, /*crc*/0);
796 
797 		f2fs_release_sparse_resource();
798 	}
799 #endif
800 	/*
801 	 * We should call fsync() to flush out all the dirty pages
802 	 * in the block device page cache.
803 	 */
804 	for (i = 0; i < c.ndevs; i++) {
805 #ifdef HAVE_FSYNC
806 		ret = fsync(c.devices[i].fd);
807 		if (ret < 0) {
808 			MSG(0, "\tError: Could not conduct fsync!!!\n");
809 			break;
810 		}
811 #endif
812 		ret = close(c.devices[i].fd);
813 		if (ret < 0) {
814 			MSG(0, "\tError: Failed to close device file!!!\n");
815 			break;
816 		}
817 		free(c.devices[i].path);
818 		free(c.devices[i].zone_cap_blocks);
819 	}
820 	close(c.kd);
821 
822 	return ret;
823 }
824