• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * libf2fs.c
3  *
4  * Copyright (c) 2013 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  * Copyright (c) 2019 Google Inc.
7  *             http://www.google.com/
8  * Copyright (c) 2020 Google Inc.
9  *   Robin Hsu <robinhsu@google.com>
10  *  : add quick-buffer for sload compression support
11  *
12  * Dual licensed under the GPL or LGPL version 2 licenses.
13  */
14 #define _LARGEFILE64_SOURCE
15 
16 #include <stdio.h>
17 #include <stdlib.h>
18 #include <string.h>
19 #include <errno.h>
20 #include <unistd.h>
21 #include <fcntl.h>
22 #ifdef HAVE_MNTENT_H
23 #include <mntent.h>
24 #endif
25 #include <time.h>
26 #ifndef ANDROID_WINDOWS_HOST
27 #include <sys/stat.h>
28 #include <sys/mount.h>
29 #include <sys/ioctl.h>
30 #endif
31 #ifdef HAVE_LINUX_HDREG_H
32 #include <linux/hdreg.h>
33 #endif
34 
35 #include <stdbool.h>
36 #include <assert.h>
37 #include <inttypes.h>
38 #include "f2fs_fs.h"
39 
40 struct f2fs_configuration c;
41 
42 #ifdef WITH_ANDROID
43 #include <sparse/sparse.h>
44 struct sparse_file *f2fs_sparse_file;
45 static char **blocks;
46 u_int64_t blocks_count;
47 static char *zeroed_block;
48 #endif
49 
__get_device_fd(__u64 * offset)50 static int __get_device_fd(__u64 *offset)
51 {
52 	__u64 blk_addr = *offset >> F2FS_BLKSIZE_BITS;
53 	int i;
54 
55 	for (i = 0; i < c.ndevs; i++) {
56 		if (c.devices[i].start_blkaddr <= blk_addr &&
57 				c.devices[i].end_blkaddr >= blk_addr) {
58 			*offset -=
59 				c.devices[i].start_blkaddr << F2FS_BLKSIZE_BITS;
60 			return c.devices[i].fd;
61 		}
62 	}
63 	return -1;
64 }
65 
66 #ifndef HAVE_LSEEK64
67 typedef off_t	off64_t;
68 
lseek64(int fd,__u64 offset,int set)69 static inline off64_t lseek64(int fd, __u64 offset, int set)
70 {
71 	return lseek(fd, offset, set);
72 }
73 #endif
74 
75 /* ---------- dev_cache, Least Used First (LUF) policy  ------------------- */
76 /*
77  * Least used block will be the first victim to be replaced when max hash
78  * collision exceeds
79  */
80 static bool *dcache_valid; /* is the cached block valid? */
81 static off64_t  *dcache_blk; /* which block it cached */
82 static uint64_t *dcache_lastused; /* last used ticks for cache entries */
83 static char *dcache_buf; /* cached block data */
84 static uint64_t dcache_usetick; /* current use tick */
85 
86 static uint64_t dcache_raccess;
87 static uint64_t dcache_rhit;
88 static uint64_t dcache_rmiss;
89 static uint64_t dcache_rreplace;
90 
91 static bool dcache_exit_registered = false;
92 
93 /*
94  *  Shadow config:
95  *
96  *  Active set of the configurations.
97  *  Global configuration 'dcache_config' will be transferred here when
98  *  when dcache_init() is called
99  */
100 static dev_cache_config_t dcache_config = {0, 16, 1};
101 static bool dcache_initialized = false;
102 
103 #define MIN_NUM_CACHE_ENTRY  1024L
104 #define MAX_MAX_HASH_COLLISION  16
105 
106 static long dcache_relocate_offset0[] = {
107 	20, -20, 40, -40, 80, -80, 160, -160,
108 	320, -320, 640, -640, 1280, -1280, 2560, -2560,
109 };
110 static int dcache_relocate_offset[16];
111 
dcache_print_statistics(void)112 static void dcache_print_statistics(void)
113 {
114 	long i;
115 	long useCnt;
116 
117 	/* Number of used cache entries */
118 	useCnt = 0;
119 	for (i = 0; i < dcache_config.num_cache_entry; i++)
120 		if (dcache_valid[i])
121 			++useCnt;
122 
123 	/*
124 	 *  c: number of cache entries
125 	 *  u: used entries
126 	 *  RA: number of read access blocks
127 	 *  CH: cache hit
128 	 *  CM: cache miss
129 	 *  Repl: read cache replaced
130 	 */
131 	printf ("\nc, u, RA, CH, CM, Repl=\n");
132 	printf ("%ld %ld %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
133 			dcache_config.num_cache_entry,
134 			useCnt,
135 			dcache_raccess,
136 			dcache_rhit,
137 			dcache_rmiss,
138 			dcache_rreplace);
139 }
140 
dcache_release(void)141 void dcache_release(void)
142 {
143 	if (!dcache_initialized)
144 		return;
145 
146 	dcache_initialized = false;
147 
148 	if (c.cache_config.dbg_en)
149 		dcache_print_statistics();
150 
151 	if (dcache_blk != NULL)
152 		free(dcache_blk);
153 	if (dcache_lastused != NULL)
154 		free(dcache_lastused);
155 	if (dcache_buf != NULL)
156 		free(dcache_buf);
157 	if (dcache_valid != NULL)
158 		free(dcache_valid);
159 	dcache_config.num_cache_entry = 0;
160 	dcache_blk = NULL;
161 	dcache_lastused = NULL;
162 	dcache_buf = NULL;
163 	dcache_valid = NULL;
164 }
165 
166 // return 0 for success, error code for failure.
dcache_alloc_all(long n)167 static int dcache_alloc_all(long n)
168 {
169 	if (n <= 0)
170 		return -1;
171 	if ((dcache_blk = (off64_t *) malloc(sizeof(off64_t) * n)) == NULL
172 		|| (dcache_lastused = (uint64_t *)
173 				malloc(sizeof(uint64_t) * n)) == NULL
174 		|| (dcache_buf = (char *) malloc (F2FS_BLKSIZE * n)) == NULL
175 		|| (dcache_valid = (bool *) malloc(sizeof(bool) * n)) == NULL)
176 	{
177 		dcache_release();
178 		return -1;
179 	}
180 	dcache_config.num_cache_entry = n;
181 	return 0;
182 }
183 
dcache_relocate_init(void)184 static void dcache_relocate_init(void)
185 {
186 	int i;
187 	int n0 = (sizeof(dcache_relocate_offset0)
188 			/ sizeof(dcache_relocate_offset0[0]));
189 	int n = (sizeof(dcache_relocate_offset)
190 			/ sizeof(dcache_relocate_offset[0]));
191 
192 	ASSERT(n == n0);
193 	for (i = 0; i < n && i < dcache_config.max_hash_collision; i++) {
194 		if (labs(dcache_relocate_offset0[i])
195 				> dcache_config.num_cache_entry / 2) {
196 			dcache_config.max_hash_collision = i;
197 			break;
198 		}
199 		dcache_relocate_offset[i] =
200 				dcache_config.num_cache_entry
201 				+ dcache_relocate_offset0[i];
202 	}
203 }
204 
dcache_init(void)205 void dcache_init(void)
206 {
207 	long n;
208 
209 	if (c.cache_config.num_cache_entry <= 0)
210 		return;
211 
212 	/* release previous cache init, if any */
213 	dcache_release();
214 
215 	dcache_blk = NULL;
216 	dcache_lastused = NULL;
217 	dcache_buf = NULL;
218 	dcache_valid = NULL;
219 
220 	dcache_config = c.cache_config;
221 
222 	n = max(MIN_NUM_CACHE_ENTRY, dcache_config.num_cache_entry);
223 
224 	/* halve alloc size until alloc succeed, or min cache reached */
225 	while (dcache_alloc_all(n) != 0 && n !=  MIN_NUM_CACHE_ENTRY)
226 		n = max(MIN_NUM_CACHE_ENTRY, n/2);
227 
228 	/* must be the last: data dependent on num_cache_entry */
229 	dcache_relocate_init();
230 	dcache_initialized = true;
231 
232 	if (!dcache_exit_registered) {
233 		dcache_exit_registered = true;
234 		atexit(dcache_release); /* auto release */
235 	}
236 
237 	dcache_raccess = 0;
238 	dcache_rhit = 0;
239 	dcache_rmiss = 0;
240 	dcache_rreplace = 0;
241 }
242 
dcache_addr(long entry)243 static inline char *dcache_addr(long entry)
244 {
245 	return dcache_buf + F2FS_BLKSIZE * entry;
246 }
247 
248 /* relocate on (n+1)-th collision */
dcache_relocate(long entry,int n)249 static inline long dcache_relocate(long entry, int n)
250 {
251 	assert(dcache_config.num_cache_entry != 0);
252 	return (entry + dcache_relocate_offset[n]) %
253 			dcache_config.num_cache_entry;
254 }
255 
dcache_find(off64_t blk)256 static long dcache_find(off64_t blk)
257 {
258 	register long n = dcache_config.num_cache_entry;
259 	register unsigned m = dcache_config.max_hash_collision;
260 	long entry, least_used, target;
261 	unsigned try;
262 
263 	assert(n > 0);
264 	target = least_used = entry = blk % n; /* simple modulo hash */
265 
266 	for (try = 0; try < m; try++) {
267 		if (!dcache_valid[target] || dcache_blk[target] == blk)
268 			return target;  /* found target or empty cache slot */
269 		if (dcache_lastused[target] < dcache_lastused[least_used])
270 			least_used = target;
271 		target = dcache_relocate(entry, try); /* next target */
272 	}
273 	return least_used;  /* max search reached, return least used slot */
274 }
275 
276 /* Physical read into cache */
dcache_io_read(int fd,long entry,off64_t offset,off64_t blk)277 static int dcache_io_read(int fd, long entry, off64_t offset, off64_t blk)
278 {
279 	if (lseek64(fd, offset, SEEK_SET) < 0) {
280 		MSG(0, "\n lseek64 fail.\n");
281 		return -1;
282 	}
283 	if (read(fd, dcache_buf + entry * F2FS_BLKSIZE, F2FS_BLKSIZE) < 0) {
284 		MSG(0, "\n read() fail.\n");
285 		return -1;
286 	}
287 	dcache_lastused[entry] = ++dcache_usetick;
288 	dcache_valid[entry] = true;
289 	dcache_blk[entry] = blk;
290 	return 0;
291 }
292 
293 /*
294  *  - Note: Read/Write are not symmetric:
295  *       For read, we need to do it block by block, due to the cache nature:
296  *           some blocks may be cached, and others don't.
297  *       For write, since we always do a write-thru, we can join all writes into one,
298  *       and write it once at the caller.  This function updates the cache for write, but
299  *       not the do a physical write.  The caller is responsible for the physical write.
300  *  - Note: We concentrate read/write together, due to the fact of similar structure to find
301  *          the relavant cache entries
302  *  - Return values:
303  *       0: success
304  *       1: cache not available (uninitialized)
305  *      -1: error
306  */
dcache_update_rw(int fd,void * buf,off64_t offset,size_t byte_count,bool is_write)307 static int dcache_update_rw(int fd, void *buf, off64_t offset,
308 		size_t byte_count, bool is_write)
309 {
310 	off64_t blk;
311 	int addr_in_blk;
312 	off64_t start;
313 
314 	if (!dcache_initialized)
315 		dcache_init(); /* auto initialize */
316 
317 	if (!dcache_initialized)
318 		return 1; /* not available */
319 
320 	blk = offset / F2FS_BLKSIZE;
321 	addr_in_blk = offset % F2FS_BLKSIZE;
322 	start = blk * F2FS_BLKSIZE;
323 
324 	while (byte_count != 0) {
325 		size_t cur_size = min(byte_count,
326 				(size_t)(F2FS_BLKSIZE - addr_in_blk));
327 		long entry = dcache_find(blk);
328 
329 		if (!is_write)
330 			++dcache_raccess;
331 
332 		if (dcache_valid[entry] && dcache_blk[entry] == blk) {
333 			/* cache hit */
334 			if (is_write)  /* write: update cache */
335 				memcpy(dcache_addr(entry) + addr_in_blk,
336 					buf, cur_size);
337 			else
338 				++dcache_rhit;
339 		} else {
340 			/* cache miss */
341 			if (!is_write) {
342 				int err;
343 				++dcache_rmiss;
344 				if (dcache_valid[entry])
345 					++dcache_rreplace;
346 				/* read: physical I/O read into cache */
347 				err = dcache_io_read(fd, entry, start, blk);
348 				if (err)
349 					return err;
350 			}
351 		}
352 
353 		/* read: copy data from cache */
354 		/* write: nothing to do, since we don't do physical write. */
355 		if (!is_write)
356 			memcpy(buf, dcache_addr(entry) + addr_in_blk,
357 				cur_size);
358 
359 		/* next block */
360 		++blk;
361 		buf += cur_size;
362 		start += F2FS_BLKSIZE;
363 		byte_count -= cur_size;
364 		addr_in_blk = 0;
365 	}
366 	return 0;
367 }
368 
369 /*
370  * dcache_update_cache() just update cache, won't do physical I/O.
371  * Thus even no error, we need normal non-cache I/O for actual write
372  *
373  * return value: 1: cache not available
374  *               0: success, -1: I/O error
375  */
dcache_update_cache(int fd,void * buf,off64_t offset,size_t count)376 int dcache_update_cache(int fd, void *buf, off64_t offset, size_t count)
377 {
378 	return dcache_update_rw(fd, buf, offset, count, true);
379 }
380 
381 /* handles read into cache + read into buffer  */
dcache_read(int fd,void * buf,off64_t offset,size_t count)382 int dcache_read(int fd, void *buf, off64_t offset, size_t count)
383 {
384 	return dcache_update_rw(fd, buf, offset, count, false);
385 }
386 
387 /*
388  * IO interfaces
389  */
dev_read_version(void * buf,__u64 offset,size_t len)390 int dev_read_version(void *buf, __u64 offset, size_t len)
391 {
392 	if (c.sparse_mode)
393 		return 0;
394 	if (lseek64(c.kd, (off64_t)offset, SEEK_SET) < 0)
395 		return -1;
396 	if (read(c.kd, buf, len) < 0)
397 		return -1;
398 	return 0;
399 }
400 
401 #ifdef WITH_ANDROID
sparse_read_blk(__u64 block,int count,void * buf)402 static int sparse_read_blk(__u64 block, int count, void *buf)
403 {
404 	int i;
405 	char *out = buf;
406 	__u64 cur_block;
407 
408 	for (i = 0; i < count; ++i) {
409 		cur_block = block + i;
410 		if (blocks[cur_block])
411 			memcpy(out + (i * F2FS_BLKSIZE),
412 					blocks[cur_block], F2FS_BLKSIZE);
413 		else if (blocks)
414 			memset(out + (i * F2FS_BLKSIZE), 0, F2FS_BLKSIZE);
415 	}
416 	return 0;
417 }
418 
sparse_write_blk(__u64 block,int count,const void * buf)419 static int sparse_write_blk(__u64 block, int count, const void *buf)
420 {
421 	int i;
422 	__u64 cur_block;
423 	const char *in = buf;
424 
425 	for (i = 0; i < count; ++i) {
426 		cur_block = block + i;
427 		if (blocks[cur_block] == zeroed_block)
428 			blocks[cur_block] = NULL;
429 		if (!blocks[cur_block]) {
430 			blocks[cur_block] = calloc(1, F2FS_BLKSIZE);
431 			if (!blocks[cur_block])
432 				return -ENOMEM;
433 		}
434 		memcpy(blocks[cur_block], in + (i * F2FS_BLKSIZE),
435 				F2FS_BLKSIZE);
436 	}
437 	return 0;
438 }
439 
sparse_write_zeroed_blk(__u64 block,int count)440 static int sparse_write_zeroed_blk(__u64 block, int count)
441 {
442 	int i;
443 	__u64 cur_block;
444 
445 	for (i = 0; i < count; ++i) {
446 		cur_block = block + i;
447 		if (blocks[cur_block])
448 			continue;
449 		blocks[cur_block] = zeroed_block;
450 	}
451 	return 0;
452 }
453 
454 #ifdef SPARSE_CALLBACK_USES_SIZE_T
sparse_import_segment(void * UNUSED (priv),const void * data,size_t len,unsigned int block,unsigned int nr_blocks)455 static int sparse_import_segment(void *UNUSED(priv), const void *data,
456 		size_t len, unsigned int block, unsigned int nr_blocks)
457 #else
458 static int sparse_import_segment(void *UNUSED(priv), const void *data, int len,
459 		unsigned int block, unsigned int nr_blocks)
460 #endif
461 {
462 	/* Ignore chunk headers, only write the data */
463 	if (!nr_blocks || len % F2FS_BLKSIZE)
464 		return 0;
465 
466 	return sparse_write_blk(block, nr_blocks, data);
467 }
468 
sparse_merge_blocks(uint64_t start,uint64_t num,int zero)469 static int sparse_merge_blocks(uint64_t start, uint64_t num, int zero)
470 {
471 	char *buf;
472 	uint64_t i;
473 
474 	if (zero) {
475 		blocks[start] = NULL;
476 		return sparse_file_add_fill(f2fs_sparse_file, 0x0,
477 					F2FS_BLKSIZE * num, start);
478 	}
479 
480 	buf = calloc(num, F2FS_BLKSIZE);
481 	if (!buf) {
482 		fprintf(stderr, "failed to alloc %llu\n",
483 			(unsigned long long)num * F2FS_BLKSIZE);
484 		return -ENOMEM;
485 	}
486 
487 	for (i = 0; i < num; i++) {
488 		memcpy(buf + i * F2FS_BLKSIZE, blocks[start + i], F2FS_BLKSIZE);
489 		free(blocks[start + i]);
490 		blocks[start + i] = NULL;
491 	}
492 
493 	/* free_sparse_blocks will release this buf. */
494 	blocks[start] = buf;
495 
496 	return sparse_file_add_data(f2fs_sparse_file, blocks[start],
497 					F2FS_BLKSIZE * num, start);
498 }
499 #else
sparse_read_blk(__u64 block,int count,void * buf)500 static int sparse_read_blk(__u64 block, int count, void *buf) { return 0; }
sparse_write_blk(__u64 block,int count,const void * buf)501 static int sparse_write_blk(__u64 block, int count, const void *buf) { return 0; }
sparse_write_zeroed_blk(__u64 block,int count)502 static int sparse_write_zeroed_blk(__u64 block, int count) { return 0; }
503 #endif
504 
dev_read(void * buf,__u64 offset,size_t len)505 int dev_read(void *buf, __u64 offset, size_t len)
506 {
507 	int fd;
508 	int err;
509 
510 	if (c.max_size < (offset + len))
511 		c.max_size = offset + len;
512 
513 	if (c.sparse_mode)
514 		return sparse_read_blk(offset / F2FS_BLKSIZE,
515 					len / F2FS_BLKSIZE, buf);
516 
517 	fd = __get_device_fd(&offset);
518 	if (fd < 0)
519 		return fd;
520 
521 	/* err = 1: cache not available, fall back to non-cache R/W */
522 	/* err = 0: success, err=-1: I/O error */
523 	err = dcache_read(fd, buf, (off64_t)offset, len);
524 	if (err <= 0)
525 		return err;
526 	if (lseek64(fd, (off64_t)offset, SEEK_SET) < 0)
527 		return -1;
528 	if (read(fd, buf, len) < 0)
529 		return -1;
530 	return 0;
531 }
532 
533 #ifdef POSIX_FADV_WILLNEED
dev_readahead(__u64 offset,size_t len)534 int dev_readahead(__u64 offset, size_t len)
535 #else
536 int dev_readahead(__u64 offset, size_t UNUSED(len))
537 #endif
538 {
539 	int fd = __get_device_fd(&offset);
540 
541 	if (fd < 0)
542 		return fd;
543 #ifdef POSIX_FADV_WILLNEED
544 	return posix_fadvise(fd, offset, len, POSIX_FADV_WILLNEED);
545 #else
546 	return 0;
547 #endif
548 }
549 
dev_write(void * buf,__u64 offset,size_t len)550 int dev_write(void *buf, __u64 offset, size_t len)
551 {
552 	int fd;
553 
554 	if (c.max_size < (offset + len))
555 		c.max_size = offset + len;
556 
557 	if (c.dry_run)
558 		return 0;
559 
560 	if (c.sparse_mode)
561 		return sparse_write_blk(offset / F2FS_BLKSIZE,
562 					len / F2FS_BLKSIZE, buf);
563 
564 	fd = __get_device_fd(&offset);
565 	if (fd < 0)
566 		return fd;
567 
568 	/*
569 	 * dcache_update_cache() just update cache, won't do I/O.
570 	 * Thus even no error, we need normal non-cache I/O for actual write
571 	 */
572 	if (dcache_update_cache(fd, buf, (off64_t)offset, len) < 0)
573 		return -1;
574 	if (lseek64(fd, (off64_t)offset, SEEK_SET) < 0)
575 		return -1;
576 	if (write(fd, buf, len) < 0)
577 		return -1;
578 	return 0;
579 }
580 
dev_write_block(void * buf,__u64 blk_addr)581 int dev_write_block(void *buf, __u64 blk_addr)
582 {
583 	return dev_write(buf, blk_addr << F2FS_BLKSIZE_BITS, F2FS_BLKSIZE);
584 }
585 
dev_write_dump(void * buf,__u64 offset,size_t len)586 int dev_write_dump(void *buf, __u64 offset, size_t len)
587 {
588 	if (lseek64(c.dump_fd, (off64_t)offset, SEEK_SET) < 0)
589 		return -1;
590 	if (write(c.dump_fd, buf, len) < 0)
591 		return -1;
592 	return 0;
593 }
594 
dev_fill(void * buf,__u64 offset,size_t len)595 int dev_fill(void *buf, __u64 offset, size_t len)
596 {
597 	int fd;
598 
599 	if (c.max_size < (offset + len))
600 		c.max_size = offset + len;
601 
602 	if (c.sparse_mode)
603 		return sparse_write_zeroed_blk(offset / F2FS_BLKSIZE,
604 						len / F2FS_BLKSIZE);
605 
606 	fd = __get_device_fd(&offset);
607 	if (fd < 0)
608 		return fd;
609 
610 	/* Only allow fill to zero */
611 	if (*((__u8*)buf))
612 		return -1;
613 	if (lseek64(fd, (off64_t)offset, SEEK_SET) < 0)
614 		return -1;
615 	if (write(fd, buf, len) < 0)
616 		return -1;
617 	return 0;
618 }
619 
dev_fill_block(void * buf,__u64 blk_addr)620 int dev_fill_block(void *buf, __u64 blk_addr)
621 {
622 	return dev_fill(buf, blk_addr << F2FS_BLKSIZE_BITS, F2FS_BLKSIZE);
623 }
624 
dev_read_block(void * buf,__u64 blk_addr)625 int dev_read_block(void *buf, __u64 blk_addr)
626 {
627 	return dev_read(buf, blk_addr << F2FS_BLKSIZE_BITS, F2FS_BLKSIZE);
628 }
629 
dev_reada_block(__u64 blk_addr)630 int dev_reada_block(__u64 blk_addr)
631 {
632 	return dev_readahead(blk_addr << F2FS_BLKSIZE_BITS, F2FS_BLKSIZE);
633 }
634 
f2fs_fsync_device(void)635 int f2fs_fsync_device(void)
636 {
637 #ifndef ANDROID_WINDOWS_HOST
638 	int i;
639 
640 	for (i = 0; i < c.ndevs; i++) {
641 		if (fsync(c.devices[i].fd) < 0) {
642 			MSG(0, "\tError: Could not conduct fsync!!!\n");
643 			return -1;
644 		}
645 	}
646 #endif
647 	return 0;
648 }
649 
f2fs_init_sparse_file(void)650 int f2fs_init_sparse_file(void)
651 {
652 #ifdef WITH_ANDROID
653 	if (c.func == MKFS) {
654 		f2fs_sparse_file = sparse_file_new(F2FS_BLKSIZE, c.device_size);
655 		if (!f2fs_sparse_file)
656 			return -1;
657 	} else {
658 		f2fs_sparse_file = sparse_file_import(c.devices[0].fd,
659 							true, false);
660 		if (!f2fs_sparse_file)
661 			return -1;
662 
663 		c.device_size = sparse_file_len(f2fs_sparse_file, 0, 0);
664 		c.device_size &= (~((u_int64_t)(F2FS_BLKSIZE - 1)));
665 	}
666 
667 	if (sparse_file_block_size(f2fs_sparse_file) != F2FS_BLKSIZE) {
668 		MSG(0, "\tError: Corrupted sparse file\n");
669 		return -1;
670 	}
671 	blocks_count = c.device_size / F2FS_BLKSIZE;
672 	blocks = calloc(blocks_count, sizeof(char *));
673 	if (!blocks) {
674 		MSG(0, "\tError: Calloc Failed for blocks!!!\n");
675 		return -1;
676 	}
677 
678 	zeroed_block = calloc(1, F2FS_BLKSIZE);
679 	if (!zeroed_block) {
680 		MSG(0, "\tError: Calloc Failed for zeroed block!!!\n");
681 		return -1;
682 	}
683 
684 	return sparse_file_foreach_chunk(f2fs_sparse_file, true, false,
685 				sparse_import_segment, NULL);
686 #else
687 	MSG(0, "\tError: Sparse mode is only supported for android\n");
688 	return -1;
689 #endif
690 }
691 
f2fs_release_sparse_resource(void)692 void f2fs_release_sparse_resource(void)
693 {
694 #ifdef WITH_ANDROID
695 	int j;
696 
697 	if (c.sparse_mode) {
698 		if (f2fs_sparse_file != NULL) {
699 			sparse_file_destroy(f2fs_sparse_file);
700 			f2fs_sparse_file = NULL;
701 		}
702 		for (j = 0; j < blocks_count; j++)
703 			free(blocks[j]);
704 		free(blocks);
705 		blocks = NULL;
706 		free(zeroed_block);
707 		zeroed_block = NULL;
708 	}
709 #endif
710 }
711 
712 #define MAX_CHUNK_SIZE		(1 * 1024 * 1024 * 1024ULL)
713 #define MAX_CHUNK_COUNT		(MAX_CHUNK_SIZE / F2FS_BLKSIZE)
f2fs_finalize_device(void)714 int f2fs_finalize_device(void)
715 {
716 	int i;
717 	int ret = 0;
718 
719 #ifdef WITH_ANDROID
720 	if (c.sparse_mode) {
721 		int64_t chunk_start = (blocks[0] == NULL) ? -1 : 0;
722 		uint64_t j;
723 
724 		if (c.func != MKFS) {
725 			sparse_file_destroy(f2fs_sparse_file);
726 			ret = ftruncate(c.devices[0].fd, 0);
727 			ASSERT(!ret);
728 			lseek(c.devices[0].fd, 0, SEEK_SET);
729 			f2fs_sparse_file = sparse_file_new(F2FS_BLKSIZE,
730 							c.device_size);
731 		}
732 
733 		for (j = 0; j < blocks_count; ++j) {
734 			if (chunk_start != -1) {
735 				if (j - chunk_start >= MAX_CHUNK_COUNT) {
736 					ret = sparse_merge_blocks(chunk_start,
737 							j - chunk_start, 0);
738 					ASSERT(!ret);
739 					chunk_start = -1;
740 				}
741 			}
742 
743 			if (chunk_start == -1) {
744 				if (!blocks[j])
745 					continue;
746 
747 				if (blocks[j] == zeroed_block) {
748 					ret = sparse_merge_blocks(j, 1, 1);
749 					ASSERT(!ret);
750 				} else {
751 					chunk_start = j;
752 				}
753 			} else {
754 				if (blocks[j] && blocks[j] != zeroed_block)
755 					continue;
756 
757 				ret = sparse_merge_blocks(chunk_start,
758 						j - chunk_start, 0);
759 				ASSERT(!ret);
760 
761 				if (blocks[j] == zeroed_block) {
762 					ret = sparse_merge_blocks(j, 1, 1);
763 					ASSERT(!ret);
764 				}
765 
766 				chunk_start = -1;
767 			}
768 		}
769 		if (chunk_start != -1) {
770 			ret = sparse_merge_blocks(chunk_start,
771 						blocks_count - chunk_start, 0);
772 			ASSERT(!ret);
773 		}
774 
775 		sparse_file_write(f2fs_sparse_file, c.devices[0].fd,
776 				/*gzip*/0, /*sparse*/1, /*crc*/0);
777 
778 		f2fs_release_sparse_resource();
779 	}
780 #endif
781 	/*
782 	 * We should call fsync() to flush out all the dirty pages
783 	 * in the block device page cache.
784 	 */
785 	for (i = 0; i < c.ndevs; i++) {
786 #ifndef ANDROID_WINDOWS_HOST
787 		ret = fsync(c.devices[i].fd);
788 		if (ret < 0) {
789 			MSG(0, "\tError: Could not conduct fsync!!!\n");
790 			break;
791 		}
792 #endif
793 		ret = close(c.devices[i].fd);
794 		if (ret < 0) {
795 			MSG(0, "\tError: Failed to close device file!!!\n");
796 			break;
797 		}
798 		free(c.devices[i].path);
799 		free(c.devices[i].zone_cap_blocks);
800 	}
801 	close(c.kd);
802 
803 	return ret;
804 }
805