• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * segment.c
3  *
4  * Many parts of codes are copied from Linux kernel/fs/f2fs.
5  *
6  * Copyright (C) 2015 Huawei Ltd.
7  * Witten by:
8  *   Hou Pengyang <houpengyang@huawei.com>
9  *   Liu Shuoran <liushuoran@huawei.com>
10  *   Jaegeuk Kim <jaegeuk@kernel.org>
11  * Copyright (c) 2020 Google Inc.
12  *   Robin Hsu <robinhsu@google.com>
13  *  : add sload compression support
14  *
15  * This program is free software; you can redistribute it and/or modify
16  * it under the terms of the GNU General Public License version 2 as
17  * published by the Free Software Foundation.
18  */
19 #include "fsck.h"
20 #include "node.h"
21 #include "quotaio.h"
22 
reserve_new_block(struct f2fs_sb_info * sbi,block_t * to,struct f2fs_summary * sum,int type,bool is_inode)23 int reserve_new_block(struct f2fs_sb_info *sbi, block_t *to,
24 			struct f2fs_summary *sum, int type, bool is_inode)
25 {
26 	struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
27 	struct seg_entry *se;
28 	u64 blkaddr, offset;
29 	u64 old_blkaddr = *to;
30 	bool is_node = IS_NODESEG(type);
31 	int left = 0;
32 
33 	if (old_blkaddr == NULL_ADDR) {
34 		if (c.func == FSCK) {
35 			if (fsck->chk.valid_blk_cnt >= sbi->user_block_count) {
36 				ERR_MSG("Not enough space\n");
37 				return -ENOSPC;
38 			}
39 			if (is_node && fsck->chk.valid_node_cnt >=
40 						sbi->total_node_count) {
41 				ERR_MSG("Not enough space for node block\n");
42 				return -ENOSPC;
43 			}
44 		} else {
45 			if (sbi->total_valid_block_count >=
46 						sbi->user_block_count) {
47 				ERR_MSG("Not enough space\n");
48 				return -ENOSPC;
49 			}
50 			if (is_node && sbi->total_valid_node_count >=
51 						sbi->total_node_count) {
52 				ERR_MSG("Not enough space for node block\n");
53 				return -ENOSPC;
54 			}
55 		}
56 	}
57 
58 	blkaddr = SM_I(sbi)->main_blkaddr;
59 
60 	if (le32_to_cpu(sbi->raw_super->feature) & F2FS_FEATURE_RO) {
61 		if (IS_NODESEG(type)) {
62 			type = CURSEG_HOT_NODE;
63 			blkaddr = __end_block_addr(sbi);
64 			left = 1;
65 		} else if (IS_DATASEG(type)) {
66 			type = CURSEG_HOT_DATA;
67 			blkaddr = SM_I(sbi)->main_blkaddr;
68 			left = 0;
69 		}
70 	}
71 
72 	if (find_next_free_block(sbi, &blkaddr, left, type, false)) {
73 		ERR_MSG("Can't find free block");
74 		ASSERT(0);
75 	}
76 
77 	se = get_seg_entry(sbi, GET_SEGNO(sbi, blkaddr));
78 	offset = OFFSET_IN_SEG(sbi, blkaddr);
79 	se->type = se->orig_type = type;
80 	if (se->valid_blocks == 0)
81 		SM_I(sbi)->free_segments--;
82 	se->valid_blocks++;
83 	f2fs_set_bit(offset, (char *)se->cur_valid_map);
84 	if (need_fsync_data_record(sbi)) {
85 		se->ckpt_type = type;
86 		se->ckpt_valid_blocks++;
87 		f2fs_set_bit(offset, (char *)se->ckpt_valid_map);
88 	}
89 	if (c.func == FSCK) {
90 		f2fs_set_main_bitmap(sbi, blkaddr, type);
91 		f2fs_set_sit_bitmap(sbi, blkaddr);
92 	}
93 
94 	if (old_blkaddr == NULL_ADDR) {
95 		sbi->total_valid_block_count++;
96 		if (is_node) {
97 			sbi->total_valid_node_count++;
98 			if (is_inode)
99 				sbi->total_valid_inode_count++;
100 		}
101 		if (c.func == FSCK) {
102 			fsck->chk.valid_blk_cnt++;
103 			if (is_node) {
104 				fsck->chk.valid_nat_entry_cnt++;
105 				fsck->chk.valid_node_cnt++;
106 				if (is_inode)
107 					fsck->chk.valid_inode_cnt++;
108 			}
109 		}
110 	}
111 	se->dirty = 1;
112 
113 	/* read/write SSA */
114 	*to = (block_t)blkaddr;
115 	update_sum_entry(sbi, *to, sum);
116 
117 	return 0;
118 }
119 
new_data_block(struct f2fs_sb_info * sbi,void * block,struct dnode_of_data * dn,int type)120 int new_data_block(struct f2fs_sb_info *sbi, void *block,
121 				struct dnode_of_data *dn, int type)
122 {
123 	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
124 	struct f2fs_summary sum;
125 	struct node_info ni;
126 	unsigned int blkaddr = datablock_addr(dn->node_blk, dn->ofs_in_node);
127 	int ret;
128 
129 	if ((get_sb(feature) & F2FS_FEATURE_RO) &&
130 					type != CURSEG_HOT_DATA)
131 		type = CURSEG_HOT_DATA;
132 
133 	ASSERT(dn->node_blk);
134 	memset(block, 0, F2FS_BLKSIZE);
135 
136 	get_node_info(sbi, dn->nid, &ni);
137 	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
138 
139 	dn->data_blkaddr = blkaddr;
140 	ret = reserve_new_block(sbi, &dn->data_blkaddr, &sum, type, 0);
141 	if (ret) {
142 		c.alloc_failed = 1;
143 		return ret;
144 	}
145 
146 	if (blkaddr == NULL_ADDR)
147 		inc_inode_blocks(dn);
148 	else if (blkaddr == NEW_ADDR)
149 		dn->idirty = 1;
150 	set_data_blkaddr(dn);
151 	return 0;
152 }
153 
f2fs_quota_size(struct quota_file * qf)154 u64 f2fs_quota_size(struct quota_file *qf)
155 {
156 	struct node_info ni;
157 	struct f2fs_node *inode;
158 	u64 filesize;
159 
160 	inode = (struct f2fs_node *) calloc(F2FS_BLKSIZE, 1);
161 	ASSERT(inode);
162 
163 	/* Read inode */
164 	get_node_info(qf->sbi, qf->ino, &ni);
165 	ASSERT(dev_read_block(inode, ni.blk_addr) >= 0);
166 	ASSERT(S_ISREG(le16_to_cpu(inode->i.i_mode)));
167 
168 	filesize = le64_to_cpu(inode->i.i_size);
169 	free(inode);
170 	return filesize;
171 }
172 
f2fs_read(struct f2fs_sb_info * sbi,nid_t ino,u8 * buffer,u64 count,pgoff_t offset)173 u64 f2fs_read(struct f2fs_sb_info *sbi, nid_t ino, u8 *buffer,
174 					u64 count, pgoff_t offset)
175 {
176 	struct dnode_of_data dn;
177 	struct node_info ni;
178 	struct f2fs_node *inode;
179 	char *blk_buffer;
180 	u64 filesize;
181 	u64 off_in_blk;
182 	u64 len_in_blk;
183 	u64 read_count;
184 	u64 remained_blkentries;
185 	block_t blkaddr;
186 	void *index_node = NULL;
187 
188 	memset(&dn, 0, sizeof(dn));
189 
190 	/* Memory allocation for block buffer and inode. */
191 	blk_buffer = calloc(F2FS_BLKSIZE, 2);
192 	ASSERT(blk_buffer);
193 	inode = (struct f2fs_node*)(blk_buffer + F2FS_BLKSIZE);
194 
195 	/* Read inode */
196 	get_node_info(sbi, ino, &ni);
197 	ASSERT(dev_read_block(inode, ni.blk_addr) >= 0);
198 	ASSERT(!S_ISDIR(le16_to_cpu(inode->i.i_mode)));
199 	ASSERT(!S_ISLNK(le16_to_cpu(inode->i.i_mode)));
200 
201 	/* Adjust count with file length. */
202 	filesize = le64_to_cpu(inode->i.i_size);
203 	if (offset > filesize)
204 		count = 0;
205 	else if (count + offset > filesize)
206 		count = filesize - offset;
207 
208 	/* Main loop for file blocks */
209 	read_count = remained_blkentries = 0;
210 	while (count > 0) {
211 		if (remained_blkentries == 0) {
212 			set_new_dnode(&dn, inode, NULL, ino);
213 			get_dnode_of_data(sbi, &dn, F2FS_BYTES_TO_BLK(offset),
214 					LOOKUP_NODE);
215 			if (index_node)
216 				free(index_node);
217 			index_node = (dn.node_blk == dn.inode_blk) ?
218 							NULL : dn.node_blk;
219 			remained_blkentries = ADDRS_PER_PAGE(sbi,
220 						dn.node_blk, dn.inode_blk);
221 		}
222 		ASSERT(remained_blkentries > 0);
223 
224 		blkaddr = datablock_addr(dn.node_blk, dn.ofs_in_node);
225 		if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR)
226 			break;
227 
228 		off_in_blk = offset % F2FS_BLKSIZE;
229 		len_in_blk = F2FS_BLKSIZE - off_in_blk;
230 		if (len_in_blk > count)
231 			len_in_blk = count;
232 
233 		/* Read data from single block. */
234 		if (len_in_blk < F2FS_BLKSIZE) {
235 			ASSERT(dev_read_block(blk_buffer, blkaddr) >= 0);
236 			memcpy(buffer, blk_buffer + off_in_blk, len_in_blk);
237 		} else {
238 			/* Direct read */
239 			ASSERT(dev_read_block(buffer, blkaddr) >= 0);
240 		}
241 
242 		offset += len_in_blk;
243 		count -= len_in_blk;
244 		buffer += len_in_blk;
245 		read_count += len_in_blk;
246 
247 		dn.ofs_in_node++;
248 		remained_blkentries--;
249 	}
250 	if (index_node)
251 		free(index_node);
252 	free(blk_buffer);
253 
254 	return read_count;
255 }
256 
257 /*
258  * Do not call this function directly.  Instead, call one of the following:
259  *     u64 f2fs_write();
260  *     u64 f2fs_write_compress_data();
261  *     u64 f2fs_write_addrtag();
262  */
f2fs_write_ex(struct f2fs_sb_info * sbi,nid_t ino,u8 * buffer,u64 count,pgoff_t offset,enum wr_addr_type addr_type)263 static u64 f2fs_write_ex(struct f2fs_sb_info *sbi, nid_t ino, u8 *buffer,
264 		u64 count, pgoff_t offset, enum wr_addr_type addr_type)
265 {
266 	struct dnode_of_data dn;
267 	struct node_info ni;
268 	struct f2fs_node *inode;
269 	char *blk_buffer;
270 	void *wbuf;
271 	u64 off_in_blk;
272 	u64 len_in_blk;
273 	u64 written_count;
274 	u64 remained_blkentries;
275 	block_t blkaddr;
276 	void* index_node = NULL;
277 	int idirty = 0;
278 	int err, ret;
279 	bool datablk_alloced = false;
280 	bool has_data = (addr_type == WR_NORMAL
281 			|| addr_type == WR_COMPRESS_DATA);
282 
283 	if (count == 0)
284 		return 0;
285 
286 	/*
287 	 * Enforce calling from f2fs_write(), f2fs_write_compress_data(),
288 	 * and f2fs_write_addrtag().   Beside, check if is properly called.
289 	 */
290 	ASSERT((!has_data && buffer == NULL) || (has_data && buffer != NULL));
291 	if (addr_type != WR_NORMAL)
292 		ASSERT(offset % F2FS_BLKSIZE == 0); /* block boundary only */
293 
294 	/* Memory allocation for block buffer and inode. */
295 	blk_buffer = calloc(F2FS_BLKSIZE, 2);
296 	ASSERT(blk_buffer);
297 	inode = (struct f2fs_node*)(blk_buffer + F2FS_BLKSIZE);
298 
299 	/* Read inode */
300 	get_node_info(sbi, ino, &ni);
301 	ASSERT(dev_read_block(inode, ni.blk_addr) >= 0);
302 	ASSERT(!S_ISDIR(le16_to_cpu(inode->i.i_mode)));
303 	ASSERT(!S_ISLNK(le16_to_cpu(inode->i.i_mode)));
304 
305 	/* Main loop for file blocks */
306 	written_count = remained_blkentries = 0;
307 	while (count > 0) {
308 		if (remained_blkentries == 0) {
309 			set_new_dnode(&dn, inode, NULL, ino);
310 			err = get_dnode_of_data(sbi, &dn,
311 					F2FS_BYTES_TO_BLK(offset), ALLOC_NODE);
312 			if (err)
313 				break;
314 			idirty |= dn.idirty;
315 			free(index_node);
316 			index_node = (dn.node_blk == dn.inode_blk) ?
317 					NULL : dn.node_blk;
318 			remained_blkentries = ADDRS_PER_PAGE(sbi,
319 					dn.node_blk, dn.inode_blk) -
320 					dn.ofs_in_node;
321 		}
322 		ASSERT(remained_blkentries > 0);
323 
324 		if (!has_data) {
325 			dn.data_blkaddr = addr_type;
326 			set_data_blkaddr(&dn);
327 			idirty |= dn.idirty;
328 			if (dn.ndirty) {
329 				ret = dn.alloced ? dev_write_block(dn.node_blk,
330 					dn.node_blkaddr) :
331 					update_block(sbi, dn.node_blk,
332 					&dn.node_blkaddr, NULL);
333 				ASSERT(ret >= 0);
334 			}
335 			written_count = 0;
336 			break;
337 		}
338 
339 		datablk_alloced = false;
340 		blkaddr = datablock_addr(dn.node_blk, dn.ofs_in_node);
341 		if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR) {
342 			err = new_data_block(sbi, blk_buffer,
343 						&dn, CURSEG_WARM_DATA);
344 			if (err)
345 				break;
346 			blkaddr = dn.data_blkaddr;
347 			idirty |= dn.idirty;
348 			datablk_alloced = true;
349 		}
350 
351 		off_in_blk = offset % F2FS_BLKSIZE;
352 		len_in_blk = F2FS_BLKSIZE - off_in_blk;
353 		if (len_in_blk > count)
354 			len_in_blk = count;
355 
356 		/* Write data to single block. */
357 		if (len_in_blk < F2FS_BLKSIZE) {
358 			ASSERT(dev_read_block(blk_buffer, blkaddr) >= 0);
359 			memcpy(blk_buffer + off_in_blk, buffer, len_in_blk);
360 			wbuf = blk_buffer;
361 		} else {
362 			/* Direct write */
363 			wbuf = buffer;
364 		}
365 
366 		if (c.zoned_model == F2FS_ZONED_HM) {
367 			if (datablk_alloced) {
368 				ret = dev_write_block(wbuf, blkaddr);
369 			} else {
370 				ret = update_block(sbi, wbuf, &blkaddr,
371 						dn.node_blk);
372 				if (dn.inode_blk == dn.node_blk)
373 					idirty = 1;
374 				else
375 					dn.ndirty = 1;
376 			}
377 		} else {
378 			ret = dev_write_block(wbuf, blkaddr);
379 		}
380 		ASSERT(ret >= 0);
381 
382 		offset += len_in_blk;
383 		count -= len_in_blk;
384 		buffer += len_in_blk;
385 		written_count += len_in_blk;
386 
387 		dn.ofs_in_node++;
388 		if ((--remained_blkentries == 0 || count == 0) && (dn.ndirty)) {
389 			ret = dn.alloced ?
390 				dev_write_block(dn.node_blk, dn.node_blkaddr) :
391 				update_block(sbi, dn.node_blk, &dn.node_blkaddr, NULL);
392 			ASSERT(ret >= 0);
393 		}
394 	}
395 
396 	if (addr_type == WR_NORMAL && offset > le64_to_cpu(inode->i.i_size)) {
397 		inode->i.i_size = cpu_to_le64(offset);
398 		idirty = 1;
399 	}
400 	if (idirty) {
401 		get_node_info(sbi, ino, &ni);
402 		ASSERT(inode == dn.inode_blk);
403 		ASSERT(update_inode(sbi, inode, &ni.blk_addr) >= 0);
404 	}
405 
406 	free(index_node);
407 	free(blk_buffer);
408 
409 	return written_count;
410 }
411 
f2fs_write(struct f2fs_sb_info * sbi,nid_t ino,u8 * buffer,u64 count,pgoff_t offset)412 u64 f2fs_write(struct f2fs_sb_info *sbi, nid_t ino, u8 *buffer,
413 					u64 count, pgoff_t offset)
414 {
415 	return f2fs_write_ex(sbi, ino, buffer, count, offset, WR_NORMAL);
416 }
417 
f2fs_write_compress_data(struct f2fs_sb_info * sbi,nid_t ino,u8 * buffer,u64 count,pgoff_t offset)418 u64 f2fs_write_compress_data(struct f2fs_sb_info *sbi, nid_t ino, u8 *buffer,
419 					u64 count, pgoff_t offset)
420 {
421 	return f2fs_write_ex(sbi, ino, buffer, count, offset, WR_COMPRESS_DATA);
422 }
423 
f2fs_write_addrtag(struct f2fs_sb_info * sbi,nid_t ino,pgoff_t offset,unsigned int addrtag)424 u64 f2fs_write_addrtag(struct f2fs_sb_info *sbi, nid_t ino, pgoff_t offset,
425 		unsigned int addrtag)
426 {
427 	ASSERT(addrtag == COMPRESS_ADDR || addrtag == NEW_ADDR
428 			|| addrtag == NULL_ADDR);
429 	return f2fs_write_ex(sbi, ino, NULL, F2FS_BLKSIZE, offset, addrtag);
430 }
431 
432 /* This function updates only inode->i.i_size */
f2fs_filesize_update(struct f2fs_sb_info * sbi,nid_t ino,u64 filesize)433 void f2fs_filesize_update(struct f2fs_sb_info *sbi, nid_t ino, u64 filesize)
434 {
435 	struct node_info ni;
436 	struct f2fs_node *inode;
437 
438 	inode = calloc(F2FS_BLKSIZE, 1);
439 	ASSERT(inode);
440 	get_node_info(sbi, ino, &ni);
441 
442 	ASSERT(dev_read_block(inode, ni.blk_addr) >= 0);
443 	ASSERT(!S_ISDIR(le16_to_cpu(inode->i.i_mode)));
444 	ASSERT(!S_ISLNK(le16_to_cpu(inode->i.i_mode)));
445 
446 	inode->i.i_size = cpu_to_le64(filesize);
447 
448 	ASSERT(update_inode(sbi, inode, &ni.blk_addr) >= 0);
449 	free(inode);
450 }
451 
452 #define MAX_BULKR_RETRY 5
bulkread(int fd,void * rbuf,size_t rsize,bool * eof)453 int bulkread(int fd, void *rbuf, size_t rsize, bool *eof)
454 {
455 	int n = 0;
456 	int retry = MAX_BULKR_RETRY;
457 	int cur;
458 
459 	if (!rsize)
460 		return 0;
461 
462 	if (eof != NULL)
463 		*eof = false;
464 	while (rsize && (cur = read(fd, rbuf, rsize)) != 0) {
465 		if (cur == -1) {
466 			if (errno == EINTR && retry--)
467 				continue;
468 			return -1;
469 		}
470 		retry = MAX_BULKR_RETRY;
471 
472 		rsize -= cur;
473 		n += cur;
474 	}
475 	if (eof != NULL)
476 		*eof = (cur == 0);
477 	return n;
478 }
479 
f2fs_fix_mutable(struct f2fs_sb_info * sbi,nid_t ino,pgoff_t offset,unsigned int compressed)480 u64 f2fs_fix_mutable(struct f2fs_sb_info *sbi, nid_t ino, pgoff_t offset,
481 		unsigned int compressed)
482 {
483 	unsigned int i;
484 	u64 wlen;
485 
486 	if (c.compress.readonly)
487 		return 0;
488 
489 	for (i = 0; i < compressed - 1; i++) {
490 		wlen = f2fs_write_addrtag(sbi, ino,
491 				offset + (i << F2FS_BLKSIZE_BITS), NEW_ADDR);
492 		if (wlen)
493 			return wlen;
494 	}
495 	return 0;
496 }
497 
is_consecutive(u32 prev_addr,u32 cur_addr)498 static inline int is_consecutive(u32 prev_addr, u32 cur_addr)
499 {
500 	if (is_valid_data_blkaddr(cur_addr) && (cur_addr == prev_addr + 1))
501 		return 1;
502 	return 0;
503 }
504 
copy_extent_info(struct extent_info * t_ext,struct extent_info * s_ext)505 static inline void copy_extent_info(struct extent_info *t_ext,
506 				struct extent_info *s_ext)
507 {
508 	t_ext->fofs = s_ext->fofs;
509 	t_ext->blk = s_ext->blk;
510 	t_ext->len = s_ext->len;
511 }
512 
update_extent_info(struct f2fs_node * inode,struct extent_info * ext)513 static inline void update_extent_info(struct f2fs_node *inode,
514 				struct extent_info *ext)
515 {
516 	inode->i.i_ext.fofs = cpu_to_le32(ext->fofs);
517 	inode->i.i_ext.blk_addr = cpu_to_le32(ext->blk);
518 	inode->i.i_ext.len = cpu_to_le32(ext->len);
519 }
520 
update_largest_extent(struct f2fs_sb_info * sbi,nid_t ino)521 static void update_largest_extent(struct f2fs_sb_info *sbi, nid_t ino)
522 {
523 	struct dnode_of_data dn;
524 	struct node_info ni;
525 	struct f2fs_node *inode;
526 	u32 blkaddr, prev_blkaddr, cur_blk = 0, end_blk;
527 	struct extent_info largest_ext = { 0, }, cur_ext = { 0, };
528 	u64 remained_blkentries = 0;
529 	u32 cluster_size;
530 	int count;
531 	void *index_node = NULL;
532 
533 	memset(&dn, 0, sizeof(dn));
534 	largest_ext.len = cur_ext.len = 0;
535 
536 	inode = (struct f2fs_node *) calloc(F2FS_BLKSIZE, 1);
537 	ASSERT(inode);
538 
539 	/* Read inode info */
540 	get_node_info(sbi, ino, &ni);
541 	ASSERT(dev_read_block(inode, ni.blk_addr) >= 0);
542 	cluster_size = 1 << inode->i.i_log_cluster_size;
543 
544 	if (inode->i.i_inline & F2FS_INLINE_DATA)
545 		goto exit;
546 
547 	end_blk  = f2fs_max_file_offset(&inode->i) >> F2FS_BLKSIZE_BITS;
548 
549 	while (cur_blk <= end_blk) {
550 		if (remained_blkentries == 0) {
551 			set_new_dnode(&dn, inode, NULL, ino);
552 			get_dnode_of_data(sbi, &dn, cur_blk, LOOKUP_NODE);
553 			if (index_node)
554 				free(index_node);
555 			index_node = (dn.node_blk == dn.inode_blk) ?
556 				NULL : dn.node_blk;
557 			remained_blkentries = ADDRS_PER_PAGE(sbi,
558 					dn.node_blk, dn.inode_blk);
559 		}
560 		ASSERT(remained_blkentries > 0);
561 
562 		blkaddr = datablock_addr(dn.node_blk, dn.ofs_in_node);
563 		if (cur_ext.len > 0) {
564 			if (is_consecutive(prev_blkaddr, blkaddr))
565 				cur_ext.len++;
566 			else {
567 				if (cur_ext.len > largest_ext.len)
568 					copy_extent_info(&largest_ext,
569 							&cur_ext);
570 				cur_ext.len = 0;
571 			}
572 		}
573 
574 		if (cur_ext.len == 0 && is_valid_data_blkaddr(blkaddr)) {
575 			cur_ext.fofs = cur_blk;
576 			cur_ext.len = 1;
577 			cur_ext.blk = blkaddr;
578 		}
579 
580 		prev_blkaddr = blkaddr;
581 		count = blkaddr == COMPRESS_ADDR ? cluster_size : 1;
582 		cur_blk += count;
583 		dn.ofs_in_node += count;
584 		remained_blkentries -= count;
585 	}
586 
587 exit:
588 	if (cur_ext.len > largest_ext.len)
589 		copy_extent_info(&largest_ext, &cur_ext);
590 	if (largest_ext.len > 0) {
591 		update_extent_info(inode, &largest_ext);
592 		ASSERT(update_inode(sbi, inode, &ni.blk_addr) >= 0);
593 	}
594 
595 	if (index_node)
596 		free(index_node);
597 	free(inode);
598 }
599 
f2fs_build_file(struct f2fs_sb_info * sbi,struct dentry * de)600 int f2fs_build_file(struct f2fs_sb_info *sbi, struct dentry *de)
601 {
602 	int fd, n = -1;
603 	pgoff_t off = 0;
604 	u8 buffer[F2FS_BLKSIZE];
605 	struct node_info ni;
606 	struct f2fs_node *node_blk;
607 
608 	if (de->ino == 0)
609 		return -1;
610 
611 	if (de->from_devino) {
612 		struct hardlink_cache_entry *found_hardlink;
613 
614 		found_hardlink = f2fs_search_hardlink(sbi, de);
615 		if (found_hardlink && found_hardlink->to_ino &&
616 				found_hardlink->nbuild)
617 			return 0;
618 
619 		found_hardlink->nbuild++;
620 	}
621 
622 	fd = open(de->full_path, O_RDONLY);
623 	if (fd < 0) {
624 		MSG(0, "Skip: Fail to open %s\n", de->full_path);
625 		return -1;
626 	}
627 
628 	/* inline_data support */
629 	if (de->size <= DEF_MAX_INLINE_DATA) {
630 		int ret;
631 
632 		get_node_info(sbi, de->ino, &ni);
633 
634 		node_blk = calloc(F2FS_BLKSIZE, 1);
635 		ASSERT(node_blk);
636 
637 		ret = dev_read_block(node_blk, ni.blk_addr);
638 		ASSERT(ret >= 0);
639 
640 		node_blk->i.i_inline |= F2FS_INLINE_DATA;
641 		node_blk->i.i_inline |= F2FS_DATA_EXIST;
642 
643 		if (c.feature & F2FS_FEATURE_EXTRA_ATTR) {
644 			node_blk->i.i_inline |= F2FS_EXTRA_ATTR;
645 			node_blk->i.i_extra_isize =
646 					cpu_to_le16(calc_extra_isize());
647 		}
648 		n = read(fd, buffer, F2FS_BLKSIZE);
649 		ASSERT((unsigned long)n == de->size);
650 		memcpy(inline_data_addr(node_blk), buffer, de->size);
651 		node_blk->i.i_size = cpu_to_le64(de->size);
652 		ASSERT(update_inode(sbi, node_blk, &ni.blk_addr) >= 0);
653 		free(node_blk);
654 #ifdef WITH_SLOAD
655 	} else if (c.func == SLOAD && c.compress.enabled &&
656 			c.compress.filter_ops->filter(de->full_path)) {
657 		bool eof = false;
658 		u8 *rbuf = c.compress.cc.rbuf;
659 		unsigned int cblocks = 0;
660 
661 		node_blk = calloc(F2FS_BLKSIZE, 1);
662 		ASSERT(node_blk);
663 
664 		/* read inode */
665 		get_node_info(sbi, de->ino, &ni);
666 		ASSERT(dev_read_block(node_blk, ni.blk_addr) >= 0);
667 		/* update inode meta */
668 		node_blk->i.i_compress_algorithm = c.compress.alg;
669 		node_blk->i.i_log_cluster_size =
670 				c.compress.cc.log_cluster_size;
671 		node_blk->i.i_flags = cpu_to_le32(F2FS_COMPR_FL);
672 		if (c.compress.readonly)
673 			node_blk->i.i_inline |= F2FS_COMPRESS_RELEASED;
674 		ASSERT(update_inode(sbi, node_blk, &ni.blk_addr) >= 0);
675 
676 		while (!eof && (n = bulkread(fd, rbuf, c.compress.cc.rlen,
677 				&eof)) > 0) {
678 			int ret = c.compress.ops->compress(&c.compress.cc);
679 			u64 wlen;
680 			u32 csize = ALIGN_UP(c.compress.cc.clen +
681 					COMPRESS_HEADER_SIZE, F2FS_BLKSIZE);
682 			unsigned int cur_cblk;
683 
684 			if (ret || n < c.compress.cc.rlen ||
685 				n < (int)(csize + F2FS_BLKSIZE *
686 						c.compress.min_blocks)) {
687 				wlen = f2fs_write(sbi, de->ino, rbuf, n, off);
688 				ASSERT((int)wlen == n);
689 			} else {
690 				wlen = f2fs_write_addrtag(sbi, de->ino, off,
691 						WR_COMPRESS_ADDR);
692 				ASSERT(!wlen);
693 				wlen = f2fs_write_compress_data(sbi, de->ino,
694 						(u8 *)c.compress.cc.cbuf,
695 						csize, off + F2FS_BLKSIZE);
696 				ASSERT(wlen == csize);
697 				c.compress.ops->reset(&c.compress.cc);
698 				cur_cblk = (c.compress.cc.rlen - csize) /
699 								F2FS_BLKSIZE;
700 				cblocks += cur_cblk;
701 				wlen = f2fs_fix_mutable(sbi, de->ino,
702 						off + F2FS_BLKSIZE + csize,
703 						cur_cblk);
704 				ASSERT(!wlen);
705 			}
706 			off += n;
707 		}
708 		if (n == -1) {
709 			fprintf(stderr, "Load file '%s' failed: ",
710 					de->full_path);
711 			perror(NULL);
712 		}
713 		/* read inode */
714 		get_node_info(sbi, de->ino, &ni);
715 		ASSERT(dev_read_block(node_blk, ni.blk_addr) >= 0);
716 		/* update inode meta */
717 		node_blk->i.i_size = cpu_to_le64(off);
718 		if (!c.compress.readonly) {
719 			node_blk->i.i_compr_blocks = cpu_to_le64(cblocks);
720 			node_blk->i.i_blocks += cpu_to_le64(cblocks);
721 		}
722 		ASSERT(update_inode(sbi, node_blk, &ni.blk_addr) >= 0);
723 		free(node_blk);
724 
725 		if (!c.compress.readonly) {
726 			sbi->total_valid_block_count += cblocks;
727 			if (sbi->total_valid_block_count >=
728 					sbi->user_block_count) {
729 				ERR_MSG("Not enough space\n");
730 				ASSERT(0);
731 			}
732 		}
733 #endif
734 	} else {
735 		while ((n = read(fd, buffer, F2FS_BLKSIZE)) > 0) {
736 			f2fs_write(sbi, de->ino, buffer, n, off);
737 			off += n;
738 		}
739 	}
740 
741 	close(fd);
742 	if (n < 0)
743 		return -1;
744 
745 	if (!c.compress.enabled || (c.feature & F2FS_FEATURE_RO))
746 		update_largest_extent(sbi, de->ino);
747 	update_free_segments(sbi);
748 
749 	MSG(1, "Info: Create %s -> %s\n"
750 		"  -- ino=%x, type=%x, mode=%x, uid=%x, "
751 		"gid=%x, cap=%"PRIx64", size=%lu, pino=%x\n",
752 		de->full_path, de->path,
753 		de->ino, de->file_type, de->mode,
754 		de->uid, de->gid, de->capabilities, de->size, de->pino);
755 	return 0;
756 }
757 
update_block(struct f2fs_sb_info * sbi,void * buf,u32 * blkaddr,struct f2fs_node * node_blk)758 int update_block(struct f2fs_sb_info *sbi, void *buf, u32 *blkaddr,
759 		struct f2fs_node *node_blk)
760 {
761 	struct seg_entry *se;
762 	struct f2fs_summary sum;
763 	u64 new_blkaddr, old_blkaddr = *blkaddr, offset;
764 	int ret, type;
765 
766 	if (c.zoned_model != F2FS_ZONED_HM)
767 		return dev_write_block(buf, old_blkaddr);
768 
769 	/* update sit bitmap & valid_blocks && se->type for old block*/
770 	se = get_seg_entry(sbi, GET_SEGNO(sbi, old_blkaddr));
771 	offset = OFFSET_IN_SEG(sbi, old_blkaddr);
772 	type = se->type;
773 	se->valid_blocks--;
774 	f2fs_clear_bit(offset, (char *)se->cur_valid_map);
775 	if (need_fsync_data_record(sbi))
776 		f2fs_clear_bit(offset, (char *)se->ckpt_valid_map);
777 	se->dirty = 1;
778 	f2fs_clear_main_bitmap(sbi, old_blkaddr);
779 	f2fs_clear_sit_bitmap(sbi, old_blkaddr);
780 
781 	new_blkaddr = SM_I(sbi)->main_blkaddr;
782 	if (find_next_free_block(sbi, &new_blkaddr, 0, type, false)) {
783 		ERR_MSG("Can't find free block for the update");
784 		ASSERT(0);
785 	}
786 
787 	ret = dev_write_block(buf, new_blkaddr);
788 	ASSERT(ret >= 0);
789 
790 	*blkaddr = new_blkaddr;
791 
792 	/* update sit bitmap & valid_blocks && se->type for new block */
793 	se = get_seg_entry(sbi, GET_SEGNO(sbi, new_blkaddr));
794 	offset = OFFSET_IN_SEG(sbi, new_blkaddr);
795 	se->type = se->orig_type = type;
796 	se->valid_blocks++;
797 	f2fs_set_bit(offset, (char *)se->cur_valid_map);
798 	if (need_fsync_data_record(sbi))
799 		f2fs_set_bit(offset, (char *)se->ckpt_valid_map);
800 	se->dirty = 1;
801 	f2fs_set_main_bitmap(sbi, new_blkaddr, type);
802 	f2fs_set_sit_bitmap(sbi, new_blkaddr);
803 
804 	/* update SSA */
805 	get_sum_entry(sbi, old_blkaddr, &sum);
806 	update_sum_entry(sbi, new_blkaddr, &sum);
807 
808 	if (IS_DATASEG(type)) {
809 		update_data_blkaddr(sbi, le32_to_cpu(sum.nid),
810 				le16_to_cpu(sum.ofs_in_node), new_blkaddr, node_blk);
811 	} else
812 		update_nat_blkaddr(sbi, 0, le32_to_cpu(sum.nid), new_blkaddr);
813 
814 	DBG(1, "Update %s block %"PRIx64" -> %"PRIx64"\n",
815 		IS_DATASEG(type) ? "data" : "node", old_blkaddr, new_blkaddr);
816 	return ret;
817 }
818