• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * libf2fs.c
3  *
4  * Copyright (c) 2013 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  * Copyright (c) 2019 Google Inc.
7  *             http://www.google.com/
8  *
9  * Dual licensed under the GPL or LGPL version 2 licenses.
10  */
11 #ifndef _LARGEFILE64_SOURCE
12 #define _LARGEFILE64_SOURCE
13 #endif
14 
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <string.h>
18 #include <errno.h>
19 #include <unistd.h>
20 #include <fcntl.h>
21 #ifdef HAVE_MNTENT_H
22 #include <mntent.h>
23 #endif
24 #include <time.h>
25 #ifndef ANDROID_WINDOWS_HOST
26 #include <sys/stat.h>
27 #include <sys/mount.h>
28 #include <sys/ioctl.h>
29 #endif
30 #ifdef HAVE_LINUX_HDREG_H
31 #include <linux/hdreg.h>
32 #endif
33 
34 #include <stdbool.h>
35 #include <assert.h>
36 #include <inttypes.h>
37 #include "f2fs_fs.h"
38 
39 struct f2fs_configuration c;
40 
41 #ifdef WITH_ANDROID
42 #include <sparse/sparse.h>
43 struct sparse_file *f2fs_sparse_file;
44 static char **blocks;
45 u_int64_t blocks_count;
46 static char *zeroed_block;
47 #endif
48 
__get_device_fd(__u64 * offset)49 static int __get_device_fd(__u64 *offset)
50 {
51 	__u64 blk_addr = *offset >> F2FS_BLKSIZE_BITS;
52 	int i;
53 
54 	for (i = 0; i < c.ndevs; i++) {
55 		if (c.devices[i].start_blkaddr <= blk_addr &&
56 				c.devices[i].end_blkaddr >= blk_addr) {
57 			*offset -=
58 				c.devices[i].start_blkaddr << F2FS_BLKSIZE_BITS;
59 			return c.devices[i].fd;
60 		}
61 	}
62 	return -1;
63 }
64 
65 #ifndef HAVE_LSEEK64
66 typedef off_t	off64_t;
67 
lseek64(int fd,__u64 offset,int set)68 static inline off64_t lseek64(int fd, __u64 offset, int set)
69 {
70 	return lseek(fd, offset, set);
71 }
72 #endif
73 
74 /* ---------- dev_cache, Least Used First (LUF) policy  ------------------- */
75 /*
76  * Least used block will be the first victim to be replaced when max hash
77  * collision exceeds
78  */
79 static bool *dcache_valid; /* is the cached block valid? */
80 static off64_t  *dcache_blk; /* which block it cached */
81 static uint64_t *dcache_lastused; /* last used ticks for cache entries */
82 static char *dcache_buf; /* cached block data */
83 static uint64_t dcache_usetick; /* current use tick */
84 
85 static uint64_t dcache_raccess;
86 static uint64_t dcache_rhit;
87 static uint64_t dcache_rmiss;
88 static uint64_t dcache_rreplace;
89 
90 static bool dcache_exit_registered = false;
91 
92 /*
93  *  Shadow config:
94  *
95  *  Active set of the configurations.
96  *  Global configuration 'dcache_config' will be transferred here when
97  *  when dcache_init() is called
98  */
99 static dev_cache_config_t dcache_config = {0, 16, 1};
100 static bool dcache_initialized = false;
101 
102 #define MIN_NUM_CACHE_ENTRY  1024L
103 #define MAX_MAX_HASH_COLLISION  16
104 
105 static long dcache_relocate_offset0[] = {
106 	20, -20, 40, -40, 80, -80, 160, -160,
107 	320, -320, 640, -640, 1280, -1280, 2560, -2560,
108 };
109 static int dcache_relocate_offset[16];
110 
dcache_print_statistics(void)111 static void dcache_print_statistics(void)
112 {
113 	long i;
114 	long useCnt;
115 
116 	/* Number of used cache entries */
117 	useCnt = 0;
118 	for (i = 0; i < dcache_config.num_cache_entry; i++)
119 		if (dcache_valid[i])
120 			++useCnt;
121 
122 	/*
123 	 *  c: number of cache entries
124 	 *  u: used entries
125 	 *  RA: number of read access blocks
126 	 *  CH: cache hit
127 	 *  CM: cache miss
128 	 *  Repl: read cache replaced
129 	 */
130 	printf ("\nc, u, RA, CH, CM, Repl=\n");
131 	printf ("%ld %ld %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
132 			dcache_config.num_cache_entry,
133 			useCnt,
134 			dcache_raccess,
135 			dcache_rhit,
136 			dcache_rmiss,
137 			dcache_rreplace);
138 }
139 
dcache_release(void)140 void dcache_release(void)
141 {
142 	if (!dcache_initialized)
143 		return;
144 
145 	dcache_initialized = false;
146 
147 	if (c.cache_config.dbg_en)
148 		dcache_print_statistics();
149 
150 	if (dcache_blk != NULL)
151 		free(dcache_blk);
152 	if (dcache_lastused != NULL)
153 		free(dcache_lastused);
154 	if (dcache_buf != NULL)
155 		free(dcache_buf);
156 	if (dcache_valid != NULL)
157 		free(dcache_valid);
158 	dcache_config.num_cache_entry = 0;
159 	dcache_blk = NULL;
160 	dcache_lastused = NULL;
161 	dcache_buf = NULL;
162 	dcache_valid = NULL;
163 }
164 
165 // return 0 for success, error code for failure.
dcache_alloc_all(long n)166 static int dcache_alloc_all(long n)
167 {
168 	if (n <= 0)
169 		return -1;
170 	if ((dcache_blk = (off64_t *) malloc(sizeof(off64_t) * n)) == NULL
171 		|| (dcache_lastused = (uint64_t *)
172 				malloc(sizeof(uint64_t) * n)) == NULL
173 		|| (dcache_buf = (char *) malloc (F2FS_BLKSIZE * n)) == NULL
174 		|| (dcache_valid = (bool *) malloc(sizeof(bool) * n)) == NULL)
175 	{
176 		dcache_release();
177 		return -1;
178 	}
179 	dcache_config.num_cache_entry = n;
180 	return 0;
181 }
182 
dcache_relocate_init(void)183 static void dcache_relocate_init(void)
184 {
185 	int i;
186 	int n0 = (sizeof(dcache_relocate_offset0)
187 			/ sizeof(dcache_relocate_offset0[0]));
188 	int n = (sizeof(dcache_relocate_offset)
189 			/ sizeof(dcache_relocate_offset[0]));
190 
191 	ASSERT(n == n0);
192 	for (i = 0; i < n && i < dcache_config.max_hash_collision; i++) {
193 		if (labs(dcache_relocate_offset0[i])
194 				> dcache_config.num_cache_entry / 2) {
195 			dcache_config.max_hash_collision = i;
196 			break;
197 		}
198 		dcache_relocate_offset[i] =
199 				dcache_config.num_cache_entry
200 				+ dcache_relocate_offset0[i];
201 	}
202 }
203 
dcache_init(void)204 void dcache_init(void)
205 {
206 	long n;
207 
208 	if (c.cache_config.num_cache_entry <= 0)
209 		return;
210 
211 	/* release previous cache init, if any */
212 	dcache_release();
213 
214 	dcache_blk = NULL;
215 	dcache_lastused = NULL;
216 	dcache_buf = NULL;
217 	dcache_valid = NULL;
218 
219 	dcache_config = c.cache_config;
220 
221 	n = max(MIN_NUM_CACHE_ENTRY, dcache_config.num_cache_entry);
222 
223 	/* halve alloc size until alloc succeed, or min cache reached */
224 	while (dcache_alloc_all(n) != 0 && n !=  MIN_NUM_CACHE_ENTRY)
225 		n = max(MIN_NUM_CACHE_ENTRY, n/2);
226 
227 	/* must be the last: data dependent on num_cache_entry */
228 	dcache_relocate_init();
229 	dcache_initialized = true;
230 
231 	if (!dcache_exit_registered) {
232 		dcache_exit_registered = true;
233 		atexit(dcache_release); /* auto release */
234 	}
235 
236 	dcache_raccess = 0;
237 	dcache_rhit = 0;
238 	dcache_rmiss = 0;
239 	dcache_rreplace = 0;
240 }
241 
dcache_addr(long entry)242 static inline char *dcache_addr(long entry)
243 {
244 	return dcache_buf + F2FS_BLKSIZE * entry;
245 }
246 
247 /* relocate on (n+1)-th collision */
dcache_relocate(long entry,int n)248 static inline long dcache_relocate(long entry, int n)
249 {
250 	assert(dcache_config.num_cache_entry != 0);
251 	return (entry + dcache_relocate_offset[n]) %
252 			dcache_config.num_cache_entry;
253 }
254 
dcache_find(off64_t blk)255 static long dcache_find(off64_t blk)
256 {
257 	register long n = dcache_config.num_cache_entry;
258 	register unsigned m = dcache_config.max_hash_collision;
259 	long entry, least_used, target;
260 	unsigned try;
261 
262 	assert(n > 0);
263 	target = least_used = entry = blk % n; /* simple modulo hash */
264 
265 	for (try = 0; try < m; try++) {
266 		if (!dcache_valid[target] || dcache_blk[target] == blk)
267 			return target;  /* found target or empty cache slot */
268 		if (dcache_lastused[target] < dcache_lastused[least_used])
269 			least_used = target;
270 		target = dcache_relocate(entry, try); /* next target */
271 	}
272 	return least_used;  /* max search reached, return least used slot */
273 }
274 
275 /* Physical read into cache */
dcache_io_read(int fd,long entry,off64_t offset,off64_t blk)276 static int dcache_io_read(int fd, long entry, off64_t offset, off64_t blk)
277 {
278 	if (lseek64(fd, offset, SEEK_SET) < 0) {
279 		MSG(0, "\n lseek64 fail.\n");
280 		return -1;
281 	}
282 	if (read(fd, dcache_buf + entry * F2FS_BLKSIZE, F2FS_BLKSIZE) < 0) {
283 		MSG(0, "\n read() fail.\n");
284 		return -1;
285 	}
286 	dcache_lastused[entry] = ++dcache_usetick;
287 	dcache_valid[entry] = true;
288 	dcache_blk[entry] = blk;
289 	return 0;
290 }
291 
292 /*
293  *  - Note: Read/Write are not symmetric:
294  *       For read, we need to do it block by block, due to the cache nature:
295  *           some blocks may be cached, and others don't.
296  *       For write, since we always do a write-thru, we can join all writes into one,
297  *       and write it once at the caller.  This function updates the cache for write, but
298  *       not the do a physical write.  The caller is responsible for the physical write.
299  *  - Note: We concentrate read/write together, due to the fact of similar structure to find
300  *          the relavant cache entries
301  *  - Return values:
302  *       0: success
303  *       1: cache not available (uninitialized)
304  *      -1: error
305  */
dcache_update_rw(int fd,void * buf,off64_t offset,size_t byte_count,bool is_write)306 static int dcache_update_rw(int fd, void *buf, off64_t offset,
307 		size_t byte_count, bool is_write)
308 {
309 	off64_t blk;
310 	int addr_in_blk;
311 	off64_t start;
312 
313 	if (!dcache_initialized)
314 		dcache_init(); /* auto initialize */
315 
316 	if (!dcache_initialized)
317 		return 1; /* not available */
318 
319 	blk = offset / F2FS_BLKSIZE;
320 	addr_in_blk = offset % F2FS_BLKSIZE;
321 	start = blk * F2FS_BLKSIZE;
322 
323 	while (byte_count != 0) {
324 		size_t cur_size = min(byte_count,
325 				(size_t)(F2FS_BLKSIZE - addr_in_blk));
326 		long entry = dcache_find(blk);
327 
328 		if (!is_write)
329 			++dcache_raccess;
330 
331 		if (dcache_valid[entry] && dcache_blk[entry] == blk) {
332 			/* cache hit */
333 			if (is_write)  /* write: update cache */
334 				memcpy(dcache_addr(entry) + addr_in_blk,
335 					buf, cur_size);
336 			else
337 				++dcache_rhit;
338 		} else {
339 			/* cache miss */
340 			if (!is_write) {
341 				int err;
342 				++dcache_rmiss;
343 				if (dcache_valid[entry])
344 					++dcache_rreplace;
345 				/* read: physical I/O read into cache */
346 				err = dcache_io_read(fd, entry, start, blk);
347 				if (err)
348 					return err;
349 			}
350 		}
351 
352 		/* read: copy data from cache */
353 		/* write: nothing to do, since we don't do physical write. */
354 		if (!is_write)
355 			memcpy(buf, dcache_addr(entry) + addr_in_blk,
356 				cur_size);
357 
358 		/* next block */
359 		++blk;
360 		buf += cur_size;
361 		start += F2FS_BLKSIZE;
362 		byte_count -= cur_size;
363 		addr_in_blk = 0;
364 	}
365 	return 0;
366 }
367 
368 /*
369  * dcache_update_cache() just update cache, won't do physical I/O.
370  * Thus even no error, we need normal non-cache I/O for actual write
371  *
372  * return value: 1: cache not available
373  *               0: success, -1: I/O error
374  */
dcache_update_cache(int fd,void * buf,off64_t offset,size_t count)375 int dcache_update_cache(int fd, void *buf, off64_t offset, size_t count)
376 {
377 	return dcache_update_rw(fd, buf, offset, count, true);
378 }
379 
380 /* handles read into cache + read into buffer  */
dcache_read(int fd,void * buf,off64_t offset,size_t count)381 int dcache_read(int fd, void *buf, off64_t offset, size_t count)
382 {
383 	return dcache_update_rw(fd, buf, offset, count, false);
384 }
385 
386 /*
387  * IO interfaces
388  */
dev_read_version(void * buf,__u64 offset,size_t len)389 int dev_read_version(void *buf, __u64 offset, size_t len)
390 {
391 	if (c.sparse_mode)
392 		return 0;
393 	if (lseek64(c.kd, (off64_t)offset, SEEK_SET) < 0)
394 		return -1;
395 	if (read(c.kd, buf, len) < 0)
396 		return -1;
397 	return 0;
398 }
399 
400 #ifdef WITH_ANDROID
sparse_read_blk(__u64 block,int count,void * buf)401 static int sparse_read_blk(__u64 block, int count, void *buf)
402 {
403 	int i;
404 	char *out = buf;
405 	__u64 cur_block;
406 
407 	for (i = 0; i < count; ++i) {
408 		cur_block = block + i;
409 		if (blocks[cur_block])
410 			memcpy(out + (i * F2FS_BLKSIZE),
411 					blocks[cur_block], F2FS_BLKSIZE);
412 		else if (blocks)
413 			memset(out + (i * F2FS_BLKSIZE), 0, F2FS_BLKSIZE);
414 	}
415 	return 0;
416 }
417 
sparse_write_blk(__u64 block,int count,const void * buf)418 static int sparse_write_blk(__u64 block, int count, const void *buf)
419 {
420 	int i;
421 	__u64 cur_block;
422 	const char *in = buf;
423 
424 	for (i = 0; i < count; ++i) {
425 		cur_block = block + i;
426 		if (blocks[cur_block] == zeroed_block)
427 			blocks[cur_block] = NULL;
428 		if (!blocks[cur_block]) {
429 			blocks[cur_block] = calloc(1, F2FS_BLKSIZE);
430 			if (!blocks[cur_block])
431 				return -ENOMEM;
432 		}
433 		memcpy(blocks[cur_block], in + (i * F2FS_BLKSIZE),
434 				F2FS_BLKSIZE);
435 	}
436 	return 0;
437 }
438 
sparse_write_zeroed_blk(__u64 block,int count)439 static int sparse_write_zeroed_blk(__u64 block, int count)
440 {
441 	int i;
442 	__u64 cur_block;
443 
444 	for (i = 0; i < count; ++i) {
445 		cur_block = block + i;
446 		if (blocks[cur_block])
447 			continue;
448 		blocks[cur_block] = zeroed_block;
449 	}
450 	return 0;
451 }
452 
453 #ifdef SPARSE_CALLBACK_USES_SIZE_T
sparse_import_segment(void * UNUSED (priv),const void * data,size_t len,unsigned int block,unsigned int nr_blocks)454 static int sparse_import_segment(void *UNUSED(priv), const void *data,
455 		size_t len, unsigned int block, unsigned int nr_blocks)
456 #else
457 static int sparse_import_segment(void *UNUSED(priv), const void *data, int len,
458 		unsigned int block, unsigned int nr_blocks)
459 #endif
460 {
461 	/* Ignore chunk headers, only write the data */
462 	if (!nr_blocks || len % F2FS_BLKSIZE)
463 		return 0;
464 
465 	return sparse_write_blk(block, nr_blocks, data);
466 }
467 
sparse_merge_blocks(uint64_t start,uint64_t num,int zero)468 static int sparse_merge_blocks(uint64_t start, uint64_t num, int zero)
469 {
470 	char *buf;
471 	uint64_t i;
472 
473 	if (zero) {
474 		blocks[start] = NULL;
475 		return sparse_file_add_fill(f2fs_sparse_file, 0x0,
476 					F2FS_BLKSIZE * num, start);
477 	}
478 
479 	buf = calloc(num, F2FS_BLKSIZE);
480 	if (!buf) {
481 		fprintf(stderr, "failed to alloc %llu\n",
482 			(unsigned long long)num * F2FS_BLKSIZE);
483 		return -ENOMEM;
484 	}
485 
486 	for (i = 0; i < num; i++) {
487 		memcpy(buf + i * F2FS_BLKSIZE, blocks[start + i], F2FS_BLKSIZE);
488 		free(blocks[start + i]);
489 		blocks[start + i] = NULL;
490 	}
491 
492 	/* free_sparse_blocks will release this buf. */
493 	blocks[start] = buf;
494 
495 	return sparse_file_add_data(f2fs_sparse_file, blocks[start],
496 					F2FS_BLKSIZE * num, start);
497 }
498 #else
sparse_read_blk(__u64 block,int count,void * buf)499 static int sparse_read_blk(__u64 block, int count, void *buf) { return 0; }
sparse_write_blk(__u64 block,int count,const void * buf)500 static int sparse_write_blk(__u64 block, int count, const void *buf) { return 0; }
sparse_write_zeroed_blk(__u64 block,int count)501 static int sparse_write_zeroed_blk(__u64 block, int count) { return 0; }
502 #endif
503 
dev_read(void * buf,__u64 offset,size_t len)504 int dev_read(void *buf, __u64 offset, size_t len)
505 {
506 	int fd;
507 	int err;
508 
509 	if (c.sparse_mode)
510 		return sparse_read_blk(offset / F2FS_BLKSIZE,
511 					len / F2FS_BLKSIZE, buf);
512 
513 	fd = __get_device_fd(&offset);
514 	if (fd < 0)
515 		return fd;
516 
517 	/* err = 1: cache not available, fall back to non-cache R/W */
518 	/* err = 0: success, err=-1: I/O error */
519 	err = dcache_read(fd, buf, (off64_t)offset, len);
520 	if (err <= 0)
521 		return err;
522 	if (lseek64(fd, (off64_t)offset, SEEK_SET) < 0)
523 		return -1;
524 	if (read(fd, buf, len) < 0)
525 		return -1;
526 	return 0;
527 }
528 
529 #ifdef POSIX_FADV_WILLNEED
dev_readahead(__u64 offset,size_t len)530 int dev_readahead(__u64 offset, size_t len)
531 #else
532 int dev_readahead(__u64 offset, size_t UNUSED(len))
533 #endif
534 {
535 	int fd = __get_device_fd(&offset);
536 
537 	if (fd < 0)
538 		return fd;
539 #ifdef POSIX_FADV_WILLNEED
540 	return posix_fadvise(fd, offset, len, POSIX_FADV_WILLNEED);
541 #else
542 	return 0;
543 #endif
544 }
545 
dev_write(void * buf,__u64 offset,size_t len)546 int dev_write(void *buf, __u64 offset, size_t len)
547 {
548 	int fd;
549 
550 	if (c.dry_run)
551 		return 0;
552 
553 	if (c.sparse_mode)
554 		return sparse_write_blk(offset / F2FS_BLKSIZE,
555 					len / F2FS_BLKSIZE, buf);
556 
557 	fd = __get_device_fd(&offset);
558 	if (fd < 0)
559 		return fd;
560 
561 	/*
562 	 * dcache_update_cache() just update cache, won't do I/O.
563 	 * Thus even no error, we need normal non-cache I/O for actual write
564 	 */
565 	if (dcache_update_cache(fd, buf, (off64_t)offset, len) < 0)
566 		return -1;
567 	if (lseek64(fd, (off64_t)offset, SEEK_SET) < 0)
568 		return -1;
569 	if (write(fd, buf, len) < 0)
570 		return -1;
571 	return 0;
572 }
573 
dev_write_block(void * buf,__u64 blk_addr)574 int dev_write_block(void *buf, __u64 blk_addr)
575 {
576 	return dev_write(buf, blk_addr << F2FS_BLKSIZE_BITS, F2FS_BLKSIZE);
577 }
578 
dev_write_dump(void * buf,__u64 offset,size_t len)579 int dev_write_dump(void *buf, __u64 offset, size_t len)
580 {
581 	if (lseek64(c.dump_fd, (off64_t)offset, SEEK_SET) < 0)
582 		return -1;
583 	if (write(c.dump_fd, buf, len) < 0)
584 		return -1;
585 	return 0;
586 }
587 
dev_fill(void * buf,__u64 offset,size_t len)588 int dev_fill(void *buf, __u64 offset, size_t len)
589 {
590 	int fd;
591 
592 	if (c.sparse_mode)
593 		return sparse_write_zeroed_blk(offset / F2FS_BLKSIZE,
594 						len / F2FS_BLKSIZE);
595 
596 	fd = __get_device_fd(&offset);
597 	if (fd < 0)
598 		return fd;
599 
600 	/* Only allow fill to zero */
601 	if (*((__u8*)buf))
602 		return -1;
603 	if (lseek64(fd, (off64_t)offset, SEEK_SET) < 0)
604 		return -1;
605 	if (write(fd, buf, len) < 0)
606 		return -1;
607 	return 0;
608 }
609 
dev_fill_block(void * buf,__u64 blk_addr)610 int dev_fill_block(void *buf, __u64 blk_addr)
611 {
612 	return dev_fill(buf, blk_addr << F2FS_BLKSIZE_BITS, F2FS_BLKSIZE);
613 }
614 
dev_read_block(void * buf,__u64 blk_addr)615 int dev_read_block(void *buf, __u64 blk_addr)
616 {
617 	return dev_read(buf, blk_addr << F2FS_BLKSIZE_BITS, F2FS_BLKSIZE);
618 }
619 
dev_reada_block(__u64 blk_addr)620 int dev_reada_block(__u64 blk_addr)
621 {
622 	return dev_readahead(blk_addr << F2FS_BLKSIZE_BITS, F2FS_BLKSIZE);
623 }
624 
f2fs_fsync_device(void)625 int f2fs_fsync_device(void)
626 {
627 #ifndef ANDROID_WINDOWS_HOST
628 	int i;
629 
630 	for (i = 0; i < c.ndevs; i++) {
631 		if (fsync(c.devices[i].fd) < 0) {
632 			MSG(0, "\tError: Could not conduct fsync!!!\n");
633 			return -1;
634 		}
635 	}
636 #endif
637 	return 0;
638 }
639 
f2fs_init_sparse_file(void)640 int f2fs_init_sparse_file(void)
641 {
642 #ifdef WITH_ANDROID
643 	if (c.func == MKFS) {
644 		f2fs_sparse_file = sparse_file_new(F2FS_BLKSIZE, c.device_size);
645 		if (!f2fs_sparse_file)
646 			return -1;
647 	} else {
648 		f2fs_sparse_file = sparse_file_import(c.devices[0].fd,
649 							true, false);
650 		if (!f2fs_sparse_file)
651 			return -1;
652 
653 		c.device_size = sparse_file_len(f2fs_sparse_file, 0, 0);
654 		c.device_size &= (~((u_int64_t)(F2FS_BLKSIZE - 1)));
655 	}
656 
657 	if (sparse_file_block_size(f2fs_sparse_file) != F2FS_BLKSIZE) {
658 		MSG(0, "\tError: Corrupted sparse file\n");
659 		return -1;
660 	}
661 	blocks_count = c.device_size / F2FS_BLKSIZE;
662 	blocks = calloc(blocks_count, sizeof(char *));
663 	if (!blocks) {
664 		MSG(0, "\tError: Calloc Failed for blocks!!!\n");
665 		return -1;
666 	}
667 
668 	zeroed_block = calloc(1, F2FS_BLKSIZE);
669 	if (!zeroed_block) {
670 		MSG(0, "\tError: Calloc Failed for zeroed block!!!\n");
671 		return -1;
672 	}
673 
674 	return sparse_file_foreach_chunk(f2fs_sparse_file, true, false,
675 				sparse_import_segment, NULL);
676 #else
677 	MSG(0, "\tError: Sparse mode is only supported for android\n");
678 	return -1;
679 #endif
680 }
681 
f2fs_release_sparse_resource(void)682 void f2fs_release_sparse_resource(void)
683 {
684 #ifdef WITH_ANDROID
685 	int j;
686 
687 	if (c.sparse_mode) {
688 		if (f2fs_sparse_file != NULL) {
689 			sparse_file_destroy(f2fs_sparse_file);
690 			f2fs_sparse_file = NULL;
691 		}
692 		for (j = 0; j < blocks_count; j++)
693 			free(blocks[j]);
694 		free(blocks);
695 		blocks = NULL;
696 		free(zeroed_block);
697 		zeroed_block = NULL;
698 	}
699 #endif
700 }
701 
702 #define MAX_CHUNK_SIZE		(1 * 1024 * 1024 * 1024ULL)
703 #define MAX_CHUNK_COUNT		(MAX_CHUNK_SIZE / F2FS_BLKSIZE)
f2fs_finalize_device(void)704 int f2fs_finalize_device(void)
705 {
706 	int i;
707 	int ret = 0;
708 
709 #ifdef WITH_ANDROID
710 	if (c.sparse_mode) {
711 		int64_t chunk_start = (blocks[0] == NULL) ? -1 : 0;
712 		uint64_t j;
713 
714 		if (c.func != MKFS) {
715 			sparse_file_destroy(f2fs_sparse_file);
716 			ret = ftruncate(c.devices[0].fd, 0);
717 			ASSERT(!ret);
718 			lseek(c.devices[0].fd, 0, SEEK_SET);
719 			f2fs_sparse_file = sparse_file_new(F2FS_BLKSIZE,
720 							c.device_size);
721 		}
722 
723 		for (j = 0; j < blocks_count; ++j) {
724 			if (chunk_start != -1) {
725 				if (j - chunk_start >= MAX_CHUNK_COUNT) {
726 					ret = sparse_merge_blocks(chunk_start,
727 							j - chunk_start, 0);
728 					ASSERT(!ret);
729 					chunk_start = -1;
730 				}
731 			}
732 
733 			if (chunk_start == -1) {
734 				if (!blocks[j])
735 					continue;
736 
737 				if (blocks[j] == zeroed_block) {
738 					ret = sparse_merge_blocks(j, 1, 1);
739 					ASSERT(!ret);
740 				} else {
741 					chunk_start = j;
742 				}
743 			} else {
744 				if (blocks[j] && blocks[j] != zeroed_block)
745 					continue;
746 
747 				ret = sparse_merge_blocks(chunk_start,
748 						j - chunk_start, 0);
749 				ASSERT(!ret);
750 
751 				if (blocks[j] == zeroed_block) {
752 					ret = sparse_merge_blocks(j, 1, 1);
753 					ASSERT(!ret);
754 				}
755 
756 				chunk_start = -1;
757 			}
758 		}
759 		if (chunk_start != -1) {
760 			ret = sparse_merge_blocks(chunk_start,
761 						blocks_count - chunk_start, 0);
762 			ASSERT(!ret);
763 		}
764 
765 		sparse_file_write(f2fs_sparse_file, c.devices[0].fd,
766 				/*gzip*/0, /*sparse*/1, /*crc*/0);
767 
768 		f2fs_release_sparse_resource();
769 	}
770 #endif
771 	/*
772 	 * We should call fsync() to flush out all the dirty pages
773 	 * in the block device page cache.
774 	 */
775 	for (i = 0; i < c.ndevs; i++) {
776 #ifndef ANDROID_WINDOWS_HOST
777 		ret = fsync(c.devices[i].fd);
778 		if (ret < 0) {
779 			MSG(0, "\tError: Could not conduct fsync!!!\n");
780 			break;
781 		}
782 #endif
783 		ret = close(c.devices[i].fd);
784 		if (ret < 0) {
785 			MSG(0, "\tError: Failed to close device file!!!\n");
786 			break;
787 		}
788 		free(c.devices[i].path);
789 		free(c.devices[i].zone_cap_blocks);
790 	}
791 	close(c.kd);
792 
793 	return ret;
794 }
795