• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * libf2fs.c
3  *
4  * Copyright (c) 2013 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  * Copyright (c) 2019 Google Inc.
7  *             http://www.google.com/
8  * Copyright (c) 2020 Google Inc.
9  *   Robin Hsu <robinhsu@google.com>
10  *  : add quick-buffer for sload compression support
11  *
12  * Dual licensed under the GPL or LGPL version 2 licenses.
13  */
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include <errno.h>
18 #include <unistd.h>
19 #include <fcntl.h>
20 #ifdef HAVE_MNTENT_H
21 #include <mntent.h>
22 #endif
23 #include <time.h>
24 #ifdef HAVE_SYS_STAT_H
25 #include <sys/stat.h>
26 #endif
27 #ifdef HAVE_SYS_MOUNT_H
28 #include <sys/mount.h>
29 #endif
30 #ifdef HAVE_SYS_IOCTL_H
31 #include <sys/ioctl.h>
32 #endif
33 #ifdef HAVE_LINUX_HDREG_H
34 #include <linux/hdreg.h>
35 #endif
36 
37 #include <stdbool.h>
38 #include <assert.h>
39 #include <inttypes.h>
40 #include "f2fs_fs.h"
41 
42 struct f2fs_configuration c;
43 
44 #ifdef HAVE_SPARSE_SPARSE_H
45 #include <sparse/sparse.h>
46 struct sparse_file *f2fs_sparse_file;
47 static char **blocks;
48 uint64_t blocks_count;
49 static char *zeroed_block;
50 #endif
51 
__get_device_fd(__u64 * offset)52 static int __get_device_fd(__u64 *offset)
53 {
54 	__u64 blk_addr = *offset >> F2FS_BLKSIZE_BITS;
55 	int i;
56 
57 	for (i = 0; i < c.ndevs; i++) {
58 		if (c.devices[i].start_blkaddr <= blk_addr &&
59 				c.devices[i].end_blkaddr >= blk_addr) {
60 			*offset -=
61 				c.devices[i].start_blkaddr << F2FS_BLKSIZE_BITS;
62 			return c.devices[i].fd;
63 		}
64 	}
65 	return -1;
66 }
67 
68 /* ---------- dev_cache, Least Used First (LUF) policy  ------------------- */
69 /*
70  * Least used block will be the first victim to be replaced when max hash
71  * collision exceeds
72  */
73 static bool *dcache_valid; /* is the cached block valid? */
74 static off_t  *dcache_blk; /* which block it cached */
75 static uint64_t *dcache_lastused; /* last used ticks for cache entries */
76 static char *dcache_buf; /* cached block data */
77 static uint64_t dcache_usetick; /* current use tick */
78 
79 static uint64_t dcache_raccess;
80 static uint64_t dcache_rhit;
81 static uint64_t dcache_rmiss;
82 static uint64_t dcache_rreplace;
83 
84 static bool dcache_exit_registered = false;
85 
86 /*
87  *  Shadow config:
88  *
89  *  Active set of the configurations.
90  *  Global configuration 'dcache_config' will be transferred here when
91  *  when dcache_init() is called
92  */
93 static dev_cache_config_t dcache_config = {0, 16, 1};
94 static bool dcache_initialized = false;
95 
96 #define MIN_NUM_CACHE_ENTRY  1024L
97 #define MAX_MAX_HASH_COLLISION  16
98 
99 static long dcache_relocate_offset0[] = {
100 	20, -20, 40, -40, 80, -80, 160, -160,
101 	320, -320, 640, -640, 1280, -1280, 2560, -2560,
102 };
103 static int dcache_relocate_offset[16];
104 
dcache_print_statistics(void)105 static void dcache_print_statistics(void)
106 {
107 	long i;
108 	long useCnt;
109 
110 	/* Number of used cache entries */
111 	useCnt = 0;
112 	for (i = 0; i < dcache_config.num_cache_entry; i++)
113 		if (dcache_valid[i])
114 			++useCnt;
115 
116 	/*
117 	 *  c: number of cache entries
118 	 *  u: used entries
119 	 *  RA: number of read access blocks
120 	 *  CH: cache hit
121 	 *  CM: cache miss
122 	 *  Repl: read cache replaced
123 	 */
124 	printf ("\nc, u, RA, CH, CM, Repl=\n");
125 	printf ("%ld %ld %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
126 			dcache_config.num_cache_entry,
127 			useCnt,
128 			dcache_raccess,
129 			dcache_rhit,
130 			dcache_rmiss,
131 			dcache_rreplace);
132 }
133 
dcache_release(void)134 void dcache_release(void)
135 {
136 	if (!dcache_initialized)
137 		return;
138 
139 	dcache_initialized = false;
140 
141 	if (c.cache_config.dbg_en)
142 		dcache_print_statistics();
143 
144 	if (dcache_blk != NULL)
145 		free(dcache_blk);
146 	if (dcache_lastused != NULL)
147 		free(dcache_lastused);
148 	if (dcache_buf != NULL)
149 		free(dcache_buf);
150 	if (dcache_valid != NULL)
151 		free(dcache_valid);
152 	dcache_config.num_cache_entry = 0;
153 	dcache_blk = NULL;
154 	dcache_lastused = NULL;
155 	dcache_buf = NULL;
156 	dcache_valid = NULL;
157 }
158 
159 // return 0 for success, error code for failure.
dcache_alloc_all(long n)160 static int dcache_alloc_all(long n)
161 {
162 	if (n <= 0)
163 		return -1;
164 	if ((dcache_blk = (off_t *) malloc(sizeof(off_t) * n)) == NULL
165 		|| (dcache_lastused = (uint64_t *)
166 				malloc(sizeof(uint64_t) * n)) == NULL
167 		|| (dcache_buf = (char *) malloc (F2FS_BLKSIZE * n)) == NULL
168 		|| (dcache_valid = (bool *) calloc(sizeof(bool) * n, 1)) == NULL)
169 	{
170 		dcache_release();
171 		return -1;
172 	}
173 	dcache_config.num_cache_entry = n;
174 	return 0;
175 }
176 
dcache_relocate_init(void)177 static void dcache_relocate_init(void)
178 {
179 	int i;
180 	int n0 = (sizeof(dcache_relocate_offset0)
181 			/ sizeof(dcache_relocate_offset0[0]));
182 	int n = (sizeof(dcache_relocate_offset)
183 			/ sizeof(dcache_relocate_offset[0]));
184 
185 	ASSERT(n == n0);
186 	for (i = 0; i < n && i < dcache_config.max_hash_collision; i++) {
187 		if (labs(dcache_relocate_offset0[i])
188 				> dcache_config.num_cache_entry / 2) {
189 			dcache_config.max_hash_collision = i;
190 			break;
191 		}
192 		dcache_relocate_offset[i] =
193 				dcache_config.num_cache_entry
194 				+ dcache_relocate_offset0[i];
195 	}
196 }
197 
dcache_init(void)198 void dcache_init(void)
199 {
200 	long n;
201 
202 	if (c.cache_config.num_cache_entry <= 0)
203 		return;
204 
205 	/* release previous cache init, if any */
206 	dcache_release();
207 
208 	dcache_blk = NULL;
209 	dcache_lastused = NULL;
210 	dcache_buf = NULL;
211 	dcache_valid = NULL;
212 
213 	dcache_config = c.cache_config;
214 
215 	n = max(MIN_NUM_CACHE_ENTRY, dcache_config.num_cache_entry);
216 
217 	/* halve alloc size until alloc succeed, or min cache reached */
218 	while (dcache_alloc_all(n) != 0 && n !=  MIN_NUM_CACHE_ENTRY)
219 		n = max(MIN_NUM_CACHE_ENTRY, n/2);
220 
221 	/* must be the last: data dependent on num_cache_entry */
222 	dcache_relocate_init();
223 	dcache_initialized = true;
224 
225 	if (!dcache_exit_registered) {
226 		dcache_exit_registered = true;
227 		atexit(dcache_release); /* auto release */
228 	}
229 
230 	dcache_raccess = 0;
231 	dcache_rhit = 0;
232 	dcache_rmiss = 0;
233 	dcache_rreplace = 0;
234 }
235 
dcache_addr(long entry)236 static inline char *dcache_addr(long entry)
237 {
238 	return dcache_buf + F2FS_BLKSIZE * entry;
239 }
240 
241 /* relocate on (n+1)-th collision */
dcache_relocate(long entry,int n)242 static inline long dcache_relocate(long entry, int n)
243 {
244 	assert(dcache_config.num_cache_entry != 0);
245 	return (entry + dcache_relocate_offset[n]) %
246 			dcache_config.num_cache_entry;
247 }
248 
dcache_find(__u64 blk)249 static long dcache_find(__u64 blk)
250 {
251 	register long n = dcache_config.num_cache_entry;
252 	register unsigned m = dcache_config.max_hash_collision;
253 	long entry, least_used, target;
254 	unsigned try;
255 
256 	assert(n > 0);
257 	target = least_used = entry = blk % n; /* simple modulo hash */
258 
259 	for (try = 0; try < m; try++) {
260 		if (!dcache_valid[target] || dcache_blk[target] == blk)
261 			return target;  /* found target or empty cache slot */
262 		if (dcache_lastused[target] < dcache_lastused[least_used])
263 			least_used = target;
264 		target = dcache_relocate(entry, try); /* next target */
265 	}
266 	return least_used;  /* max search reached, return least used slot */
267 }
268 
269 /* Physical read into cache */
dcache_io_read(long entry,__u64 offset,off_t blk)270 static int dcache_io_read(long entry, __u64 offset, off_t blk)
271 {
272 	int fd = __get_device_fd(&offset);
273 
274 	if (fd < 0)
275 		return fd;
276 
277 	if (lseek(fd, offset, SEEK_SET) < 0) {
278 		MSG(0, "\n lseek fail.\n");
279 		return -1;
280 	}
281 	if (read(fd, dcache_buf + entry * F2FS_BLKSIZE, F2FS_BLKSIZE) < 0) {
282 		MSG(0, "\n read() fail.\n");
283 		return -1;
284 	}
285 	dcache_lastused[entry] = ++dcache_usetick;
286 	dcache_valid[entry] = true;
287 	dcache_blk[entry] = blk;
288 	return 0;
289 }
290 
291 /*
292  *  - Note: Read/Write are not symmetric:
293  *       For read, we need to do it block by block, due to the cache nature:
294  *           some blocks may be cached, and others don't.
295  *       For write, since we always do a write-thru, we can join all writes into one,
296  *       and write it once at the caller.  This function updates the cache for write, but
297  *       not the do a physical write.  The caller is responsible for the physical write.
298  *  - Note: We concentrate read/write together, due to the fact of similar structure to find
299  *          the relavant cache entries
300  *  - Return values:
301  *       0: success
302  *       1: cache not available (uninitialized)
303  *      -1: error
304  */
dcache_update_rw(void * buf,__u64 offset,size_t byte_count,bool is_write)305 static int dcache_update_rw(void *buf, __u64 offset,
306 		size_t byte_count, bool is_write)
307 {
308 	__u64 blk, start;
309 	int addr_in_blk;
310 
311 	if (!dcache_initialized)
312 		dcache_init(); /* auto initialize */
313 
314 	if (!dcache_initialized)
315 		return 1; /* not available */
316 
317 	blk = offset / F2FS_BLKSIZE;
318 	addr_in_blk = offset % F2FS_BLKSIZE;
319 	start = blk * F2FS_BLKSIZE;
320 
321 	while (byte_count != 0) {
322 		size_t cur_size = min(byte_count,
323 				(size_t)(F2FS_BLKSIZE - addr_in_blk));
324 		long entry = dcache_find(blk);
325 
326 		if (!is_write)
327 			++dcache_raccess;
328 
329 		if (dcache_valid[entry] && dcache_blk[entry] == blk) {
330 			/* cache hit */
331 			if (is_write)  /* write: update cache */
332 				memcpy(dcache_addr(entry) + addr_in_blk,
333 					buf, cur_size);
334 			else
335 				++dcache_rhit;
336 		} else {
337 			/* cache miss */
338 			if (!is_write) {
339 				int err;
340 				++dcache_rmiss;
341 				if (dcache_valid[entry])
342 					++dcache_rreplace;
343 				/* read: physical I/O read into cache */
344 				err = dcache_io_read(entry, start, blk);
345 				if (err)
346 					return err;
347 			}
348 		}
349 
350 		/* read: copy data from cache */
351 		/* write: nothing to do, since we don't do physical write. */
352 		if (!is_write)
353 			memcpy(buf, dcache_addr(entry) + addr_in_blk,
354 				cur_size);
355 
356 		/* next block */
357 		++blk;
358 		buf += cur_size;
359 		start += F2FS_BLKSIZE;
360 		byte_count -= cur_size;
361 		addr_in_blk = 0;
362 	}
363 	return 0;
364 }
365 
366 /*
367  * dcache_update_cache() just update cache, won't do physical I/O.
368  * Thus even no error, we need normal non-cache I/O for actual write
369  *
370  * return value: 1: cache not available
371  *               0: success, -1: I/O error
372  */
dcache_update_cache(void * buf,__u64 offset,size_t count)373 int dcache_update_cache(void *buf, __u64 offset, size_t count)
374 {
375 	return dcache_update_rw(buf, offset, count, true);
376 }
377 
378 /* handles read into cache + read into buffer  */
dcache_read(void * buf,__u64 offset,size_t count)379 int dcache_read(void *buf, __u64 offset, size_t count)
380 {
381 	return dcache_update_rw(buf, offset, count, false);
382 }
383 
384 /*
385  * IO interfaces
386  */
dev_read_version(void * buf,__u64 offset,size_t len)387 int dev_read_version(void *buf, __u64 offset, size_t len)
388 {
389 	if (c.sparse_mode)
390 		return 0;
391 	if (lseek(c.kd, (off_t)offset, SEEK_SET) < 0)
392 		return -1;
393 	if (read(c.kd, buf, len) < 0)
394 		return -1;
395 	return 0;
396 }
397 
398 #ifdef HAVE_SPARSE_SPARSE_H
sparse_read_blk(__u64 block,int count,void * buf)399 static int sparse_read_blk(__u64 block, int count, void *buf)
400 {
401 	int i;
402 	char *out = buf;
403 	__u64 cur_block;
404 
405 	for (i = 0; i < count; ++i) {
406 		cur_block = block + i;
407 		if (blocks[cur_block])
408 			memcpy(out + (i * F2FS_BLKSIZE),
409 					blocks[cur_block], F2FS_BLKSIZE);
410 		else if (blocks)
411 			memset(out + (i * F2FS_BLKSIZE), 0, F2FS_BLKSIZE);
412 	}
413 	return 0;
414 }
415 
sparse_write_blk(__u64 block,int count,const void * buf)416 static int sparse_write_blk(__u64 block, int count, const void *buf)
417 {
418 	int i;
419 	__u64 cur_block;
420 	const char *in = buf;
421 
422 	for (i = 0; i < count; ++i) {
423 		cur_block = block + i;
424 		if (blocks[cur_block] == zeroed_block)
425 			blocks[cur_block] = NULL;
426 		if (!blocks[cur_block]) {
427 			blocks[cur_block] = calloc(1, F2FS_BLKSIZE);
428 			if (!blocks[cur_block])
429 				return -ENOMEM;
430 		}
431 		memcpy(blocks[cur_block], in + (i * F2FS_BLKSIZE),
432 				F2FS_BLKSIZE);
433 	}
434 	return 0;
435 }
436 
sparse_write_zeroed_blk(__u64 block,int count)437 static int sparse_write_zeroed_blk(__u64 block, int count)
438 {
439 	int i;
440 	__u64 cur_block;
441 
442 	for (i = 0; i < count; ++i) {
443 		cur_block = block + i;
444 		if (blocks[cur_block])
445 			continue;
446 		blocks[cur_block] = zeroed_block;
447 	}
448 	return 0;
449 }
450 
451 #ifdef SPARSE_CALLBACK_USES_SIZE_T
sparse_import_segment(void * UNUSED (priv),const void * data,size_t len,unsigned int block,unsigned int nr_blocks)452 static int sparse_import_segment(void *UNUSED(priv), const void *data,
453 		size_t len, unsigned int block, unsigned int nr_blocks)
454 #else
455 static int sparse_import_segment(void *UNUSED(priv), const void *data, int len,
456 		unsigned int block, unsigned int nr_blocks)
457 #endif
458 {
459 	/* Ignore chunk headers, only write the data */
460 	if (!nr_blocks || len % F2FS_BLKSIZE)
461 		return 0;
462 
463 	return sparse_write_blk(block, nr_blocks, data);
464 }
465 
sparse_merge_blocks(uint64_t start,uint64_t num,int zero)466 static int sparse_merge_blocks(uint64_t start, uint64_t num, int zero)
467 {
468 	char *buf;
469 	uint64_t i;
470 
471 	if (zero) {
472 		blocks[start] = NULL;
473 		return sparse_file_add_fill(f2fs_sparse_file, 0x0,
474 					F2FS_BLKSIZE * num, start);
475 	}
476 
477 	buf = calloc(num, F2FS_BLKSIZE);
478 	if (!buf) {
479 		fprintf(stderr, "failed to alloc %llu\n",
480 			(unsigned long long)num * F2FS_BLKSIZE);
481 		return -ENOMEM;
482 	}
483 
484 	for (i = 0; i < num; i++) {
485 		memcpy(buf + i * F2FS_BLKSIZE, blocks[start + i], F2FS_BLKSIZE);
486 		free(blocks[start + i]);
487 		blocks[start + i] = NULL;
488 	}
489 
490 	/* free_sparse_blocks will release this buf. */
491 	blocks[start] = buf;
492 
493 	return sparse_file_add_data(f2fs_sparse_file, blocks[start],
494 					F2FS_BLKSIZE * num, start);
495 }
496 #else
sparse_read_blk(__u64 UNUSED (block),int UNUSED (count),void * UNUSED (buf))497 static int sparse_read_blk(__u64 UNUSED(block),
498 				int UNUSED(count), void *UNUSED(buf))
499 {
500 	return 0;
501 }
502 
sparse_write_blk(__u64 UNUSED (block),int UNUSED (count),const void * UNUSED (buf))503 static int sparse_write_blk(__u64 UNUSED(block),
504 				int UNUSED(count), const void *UNUSED(buf))
505 {
506 	return 0;
507 }
508 
sparse_write_zeroed_blk(__u64 UNUSED (block),int UNUSED (count))509 static int sparse_write_zeroed_blk(__u64 UNUSED(block), int UNUSED(count))
510 {
511 	return 0;
512 }
513 #endif
514 
dev_read(void * buf,__u64 offset,size_t len)515 int dev_read(void *buf, __u64 offset, size_t len)
516 {
517 	int fd;
518 	int err;
519 
520 	if (c.sparse_mode)
521 		return sparse_read_blk(offset / F2FS_BLKSIZE,
522 					len / F2FS_BLKSIZE, buf);
523 
524 	/* err = 1: cache not available, fall back to non-cache R/W */
525 	/* err = 0: success, err=-1: I/O error */
526 	err = dcache_read(buf, offset, len);
527 	if (err <= 0)
528 		return err;
529 
530 	fd = __get_device_fd(&offset);
531 	if (fd < 0)
532 		return fd;
533 	if (lseek(fd, (off_t)offset, SEEK_SET) < 0)
534 		return -1;
535 	if (read(fd, buf, len) < 0)
536 		return -1;
537 	return 0;
538 }
539 
540 #ifdef POSIX_FADV_WILLNEED
dev_readahead(__u64 offset,size_t len)541 int dev_readahead(__u64 offset, size_t len)
542 #else
543 int dev_readahead(__u64 offset, size_t UNUSED(len))
544 #endif
545 {
546 	int fd = __get_device_fd(&offset);
547 
548 	if (fd < 0)
549 		return fd;
550 #ifdef POSIX_FADV_WILLNEED
551 	return posix_fadvise(fd, offset, len, POSIX_FADV_WILLNEED);
552 #else
553 	return 0;
554 #endif
555 }
556 
dev_write(void * buf,__u64 offset,size_t len)557 int dev_write(void *buf, __u64 offset, size_t len)
558 {
559 	int fd;
560 
561 	if (c.dry_run)
562 		return 0;
563 
564 	if (c.sparse_mode)
565 		return sparse_write_blk(offset / F2FS_BLKSIZE,
566 					len / F2FS_BLKSIZE, buf);
567 
568 	/*
569 	 * dcache_update_cache() just update cache, won't do I/O.
570 	 * Thus even no error, we need normal non-cache I/O for actual write
571 	 */
572 	if (dcache_update_cache(buf, offset, len) < 0)
573 		return -1;
574 
575 	fd = __get_device_fd(&offset);
576 	if (fd < 0)
577 		return fd;
578 
579 	if (lseek(fd, (off_t)offset, SEEK_SET) < 0)
580 		return -1;
581 	if (write(fd, buf, len) < 0)
582 		return -1;
583 	c.need_fsync = true;
584 	return 0;
585 }
586 
dev_write_block(void * buf,__u64 blk_addr)587 int dev_write_block(void *buf, __u64 blk_addr)
588 {
589 	return dev_write(buf, blk_addr << F2FS_BLKSIZE_BITS, F2FS_BLKSIZE);
590 }
591 
dev_write_dump(void * buf,__u64 offset,size_t len)592 int dev_write_dump(void *buf, __u64 offset, size_t len)
593 {
594 	if (lseek(c.dump_fd, (off_t)offset, SEEK_SET) < 0)
595 		return -1;
596 	if (write(c.dump_fd, buf, len) < 0)
597 		return -1;
598 	return 0;
599 }
600 
dev_fill(void * buf,__u64 offset,size_t len)601 int dev_fill(void *buf, __u64 offset, size_t len)
602 {
603 	int fd;
604 
605 	if (c.sparse_mode)
606 		return sparse_write_zeroed_blk(offset / F2FS_BLKSIZE,
607 						len / F2FS_BLKSIZE);
608 
609 	fd = __get_device_fd(&offset);
610 	if (fd < 0)
611 		return fd;
612 
613 	/* Only allow fill to zero */
614 	if (*((__u8*)buf))
615 		return -1;
616 	if (lseek(fd, (off_t)offset, SEEK_SET) < 0)
617 		return -1;
618 	if (write(fd, buf, len) < 0)
619 		return -1;
620 	c.need_fsync = true;
621 	return 0;
622 }
623 
dev_fill_block(void * buf,__u64 blk_addr)624 int dev_fill_block(void *buf, __u64 blk_addr)
625 {
626 	return dev_fill(buf, blk_addr << F2FS_BLKSIZE_BITS, F2FS_BLKSIZE);
627 }
628 
dev_read_block(void * buf,__u64 blk_addr)629 int dev_read_block(void *buf, __u64 blk_addr)
630 {
631 	return dev_read(buf, blk_addr << F2FS_BLKSIZE_BITS, F2FS_BLKSIZE);
632 }
633 
dev_reada_block(__u64 blk_addr)634 int dev_reada_block(__u64 blk_addr)
635 {
636 	return dev_readahead(blk_addr << F2FS_BLKSIZE_BITS, F2FS_BLKSIZE);
637 }
638 
f2fs_fsync_device(void)639 int f2fs_fsync_device(void)
640 {
641 #ifdef HAVE_FSYNC
642 	int i;
643 
644 	if (!c.need_fsync)
645 		return 0;
646 
647 	for (i = 0; i < c.ndevs; i++) {
648 		if (fsync(c.devices[i].fd) < 0) {
649 			MSG(0, "\tError: Could not conduct fsync!!!\n");
650 			return -1;
651 		}
652 	}
653 #endif
654 	return 0;
655 }
656 
f2fs_init_sparse_file(void)657 int f2fs_init_sparse_file(void)
658 {
659 #ifdef HAVE_SPARSE_SPARSE_H
660 	if (c.func == MKFS) {
661 		f2fs_sparse_file = sparse_file_new(F2FS_BLKSIZE, c.device_size);
662 		if (!f2fs_sparse_file)
663 			return -1;
664 	} else {
665 		f2fs_sparse_file = sparse_file_import(c.devices[0].fd,
666 							true, false);
667 		if (!f2fs_sparse_file)
668 			return -1;
669 
670 		c.blksize = sparse_file_block_size(f2fs_sparse_file);
671 		c.blksize_bits = log_base_2(c.blksize);
672 		if (c.blksize_bits == -1) {
673 			MSG(0, "\tError: Sparse file blocksize not a power of 2.\n");
674 			return -1;
675 		}
676 
677 		c.device_size = sparse_file_len(f2fs_sparse_file, 0, 0);
678 		c.device_size &= (~((uint64_t)(F2FS_BLKSIZE - 1)));
679 	}
680 
681 	blocks_count = c.device_size / F2FS_BLKSIZE;
682 	blocks = calloc(blocks_count, sizeof(char *));
683 	if (!blocks) {
684 		MSG(0, "\tError: Calloc Failed for blocks!!!\n");
685 		return -1;
686 	}
687 
688 	zeroed_block = calloc(1, F2FS_BLKSIZE);
689 	if (!zeroed_block) {
690 		MSG(0, "\tError: Calloc Failed for zeroed block!!!\n");
691 		return -1;
692 	}
693 
694 	return sparse_file_foreach_chunk(f2fs_sparse_file, true, false,
695 				sparse_import_segment, NULL);
696 #else
697 	MSG(0, "\tError: Sparse mode is only supported for android\n");
698 	return -1;
699 #endif
700 }
701 
f2fs_release_sparse_resource(void)702 void f2fs_release_sparse_resource(void)
703 {
704 #ifdef HAVE_SPARSE_SPARSE_H
705 	int j;
706 
707 	if (c.sparse_mode) {
708 		if (f2fs_sparse_file != NULL) {
709 			sparse_file_destroy(f2fs_sparse_file);
710 			f2fs_sparse_file = NULL;
711 		}
712 		for (j = 0; j < blocks_count; j++)
713 			free(blocks[j]);
714 		free(blocks);
715 		blocks = NULL;
716 		free(zeroed_block);
717 		zeroed_block = NULL;
718 	}
719 #endif
720 }
721 
722 #define MAX_CHUNK_SIZE		(1 * 1024 * 1024 * 1024ULL)
723 #define MAX_CHUNK_COUNT		(MAX_CHUNK_SIZE / F2FS_BLKSIZE)
f2fs_finalize_device(void)724 int f2fs_finalize_device(void)
725 {
726 	int i;
727 	int ret = 0;
728 
729 #ifdef HAVE_SPARSE_SPARSE_H
730 	if (c.sparse_mode) {
731 		int64_t chunk_start = (blocks[0] == NULL) ? -1 : 0;
732 		uint64_t j;
733 
734 		if (c.func != MKFS) {
735 			sparse_file_destroy(f2fs_sparse_file);
736 			ret = ftruncate(c.devices[0].fd, 0);
737 			ASSERT(!ret);
738 			lseek(c.devices[0].fd, 0, SEEK_SET);
739 			f2fs_sparse_file = sparse_file_new(F2FS_BLKSIZE,
740 							c.device_size);
741 		}
742 
743 		for (j = 0; j < blocks_count; ++j) {
744 			if (chunk_start != -1) {
745 				if (j - chunk_start >= MAX_CHUNK_COUNT) {
746 					ret = sparse_merge_blocks(chunk_start,
747 							j - chunk_start, 0);
748 					ASSERT(!ret);
749 					chunk_start = -1;
750 				}
751 			}
752 
753 			if (chunk_start == -1) {
754 				if (!blocks[j])
755 					continue;
756 
757 				if (blocks[j] == zeroed_block) {
758 					ret = sparse_merge_blocks(j, 1, 1);
759 					ASSERT(!ret);
760 				} else {
761 					chunk_start = j;
762 				}
763 			} else {
764 				if (blocks[j] && blocks[j] != zeroed_block)
765 					continue;
766 
767 				ret = sparse_merge_blocks(chunk_start,
768 						j - chunk_start, 0);
769 				ASSERT(!ret);
770 
771 				if (blocks[j] == zeroed_block) {
772 					ret = sparse_merge_blocks(j, 1, 1);
773 					ASSERT(!ret);
774 				}
775 
776 				chunk_start = -1;
777 			}
778 		}
779 		if (chunk_start != -1) {
780 			ret = sparse_merge_blocks(chunk_start,
781 						blocks_count - chunk_start, 0);
782 			ASSERT(!ret);
783 		}
784 
785 		sparse_file_write(f2fs_sparse_file, c.devices[0].fd,
786 				/*gzip*/0, /*sparse*/1, /*crc*/0);
787 
788 		f2fs_release_sparse_resource();
789 	}
790 #endif
791 	/*
792 	 * We should call fsync() to flush out all the dirty pages
793 	 * in the block device page cache.
794 	 */
795 	for (i = 0; i < c.ndevs; i++) {
796 #ifdef HAVE_FSYNC
797 		if (c.need_fsync) {
798 			ret = fsync(c.devices[i].fd);
799 			if (ret < 0) {
800 				MSG(0, "\tError: Could not conduct fsync!!!\n");
801 				break;
802 			}
803 		}
804 #endif
805 		ret = close(c.devices[i].fd);
806 		if (ret < 0) {
807 			MSG(0, "\tError: Failed to close device file!!!\n");
808 			break;
809 		}
810 		free(c.devices[i].path);
811 		free(c.devices[i].zone_cap_blocks);
812 	}
813 	close(c.kd);
814 
815 	return ret;
816 }
817