1 /**
2 * segment.c
3 *
4 * Many parts of codes are copied from Linux kernel/fs/f2fs.
5 *
6 * Copyright (C) 2015 Huawei Ltd.
7 * Witten by:
8 * Hou Pengyang <houpengyang@huawei.com>
9 * Liu Shuoran <liushuoran@huawei.com>
10 * Jaegeuk Kim <jaegeuk@kernel.org>
11 * Copyright (c) 2020 Google Inc.
12 * Robin Hsu <robinhsu@google.com>
13 * : add sload compression support
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License version 2 as
17 * published by the Free Software Foundation.
18 */
19 #include "fsck.h"
20 #include "node.h"
21 #include "quotaio.h"
22
reserve_new_block(struct f2fs_sb_info * sbi,block_t * to,struct f2fs_summary * sum,int type,bool is_inode)23 int reserve_new_block(struct f2fs_sb_info *sbi, block_t *to,
24 struct f2fs_summary *sum, int type, bool is_inode)
25 {
26 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
27 struct seg_entry *se;
28 u64 blkaddr, offset;
29 u64 old_blkaddr = *to;
30 bool is_node = IS_NODESEG(type);
31
32 if (old_blkaddr == NULL_ADDR) {
33 if (c.func == FSCK) {
34 if (fsck->chk.valid_blk_cnt >= sbi->user_block_count) {
35 ERR_MSG("Not enough space\n");
36 return -ENOSPC;
37 }
38 if (is_node && fsck->chk.valid_node_cnt >=
39 sbi->total_valid_node_count) {
40 ERR_MSG("Not enough space for node block\n");
41 return -ENOSPC;
42 }
43 } else {
44 if (sbi->total_valid_block_count >=
45 sbi->user_block_count) {
46 ERR_MSG("Not enough space\n");
47 return -ENOSPC;
48 }
49 if (is_node && sbi->total_valid_node_count >=
50 sbi->total_node_count) {
51 ERR_MSG("Not enough space for node block\n");
52 return -ENOSPC;
53 }
54 }
55 }
56
57 blkaddr = SM_I(sbi)->main_blkaddr;
58
59 if (find_next_free_block(sbi, &blkaddr, 0, type, false)) {
60 ERR_MSG("Can't find free block");
61 ASSERT(0);
62 }
63
64 se = get_seg_entry(sbi, GET_SEGNO(sbi, blkaddr));
65 offset = OFFSET_IN_SEG(sbi, blkaddr);
66 se->type = type;
67 se->valid_blocks++;
68 f2fs_set_bit(offset, (char *)se->cur_valid_map);
69 if (need_fsync_data_record(sbi)) {
70 se->ckpt_type = type;
71 se->ckpt_valid_blocks++;
72 f2fs_set_bit(offset, (char *)se->ckpt_valid_map);
73 }
74 if (c.func == FSCK) {
75 f2fs_set_main_bitmap(sbi, blkaddr, type);
76 f2fs_set_sit_bitmap(sbi, blkaddr);
77 }
78
79 if (old_blkaddr == NULL_ADDR) {
80 sbi->total_valid_block_count++;
81 if (is_node) {
82 sbi->total_valid_node_count++;
83 if (is_inode)
84 sbi->total_valid_inode_count++;
85 }
86 if (c.func == FSCK) {
87 fsck->chk.valid_blk_cnt++;
88 if (is_node) {
89 fsck->chk.valid_node_cnt++;
90 if (is_inode)
91 fsck->chk.valid_inode_cnt++;
92 }
93 }
94 }
95 se->dirty = 1;
96
97 /* read/write SSA */
98 *to = (block_t)blkaddr;
99 update_sum_entry(sbi, *to, sum);
100
101 return 0;
102 }
103
new_data_block(struct f2fs_sb_info * sbi,void * block,struct dnode_of_data * dn,int type)104 int new_data_block(struct f2fs_sb_info *sbi, void *block,
105 struct dnode_of_data *dn, int type)
106 {
107 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
108 struct f2fs_summary sum;
109 struct node_info ni;
110 unsigned int blkaddr = datablock_addr(dn->node_blk, dn->ofs_in_node);
111 int ret;
112
113 if ((get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO)) &&
114 type != CURSEG_HOT_DATA)
115 type = CURSEG_HOT_DATA;
116
117 ASSERT(dn->node_blk);
118 memset(block, 0, BLOCK_SZ);
119
120 get_node_info(sbi, dn->nid, &ni);
121 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
122
123 dn->data_blkaddr = blkaddr;
124 ret = reserve_new_block(sbi, &dn->data_blkaddr, &sum, type, 0);
125 if (ret) {
126 c.alloc_failed = 1;
127 return ret;
128 }
129
130 if (blkaddr == NULL_ADDR)
131 inc_inode_blocks(dn);
132 else if (blkaddr == NEW_ADDR)
133 dn->idirty = 1;
134 set_data_blkaddr(dn);
135 return 0;
136 }
137
f2fs_quota_size(struct quota_file * qf)138 u64 f2fs_quota_size(struct quota_file *qf)
139 {
140 struct node_info ni;
141 struct f2fs_node *inode;
142 u64 filesize;
143
144 inode = (struct f2fs_node *) calloc(BLOCK_SZ, 1);
145 ASSERT(inode);
146
147 /* Read inode */
148 get_node_info(qf->sbi, qf->ino, &ni);
149 ASSERT(dev_read_block(inode, ni.blk_addr) >= 0);
150 ASSERT(S_ISREG(le16_to_cpu(inode->i.i_mode)));
151
152 filesize = le64_to_cpu(inode->i.i_size);
153 free(inode);
154 return filesize;
155 }
156
f2fs_read(struct f2fs_sb_info * sbi,nid_t ino,u8 * buffer,u64 count,pgoff_t offset)157 u64 f2fs_read(struct f2fs_sb_info *sbi, nid_t ino, u8 *buffer,
158 u64 count, pgoff_t offset)
159 {
160 struct dnode_of_data dn;
161 struct node_info ni;
162 struct f2fs_node *inode;
163 char *blk_buffer;
164 u64 filesize;
165 u64 off_in_blk;
166 u64 len_in_blk;
167 u64 read_count;
168 u64 remained_blkentries;
169 block_t blkaddr;
170 void *index_node = NULL;
171
172 memset(&dn, 0, sizeof(dn));
173
174 /* Memory allocation for block buffer and inode. */
175 blk_buffer = calloc(BLOCK_SZ, 2);
176 ASSERT(blk_buffer);
177 inode = (struct f2fs_node*)(blk_buffer + BLOCK_SZ);
178
179 /* Read inode */
180 get_node_info(sbi, ino, &ni);
181 ASSERT(dev_read_block(inode, ni.blk_addr) >= 0);
182 ASSERT(!S_ISDIR(le16_to_cpu(inode->i.i_mode)));
183 ASSERT(!S_ISLNK(le16_to_cpu(inode->i.i_mode)));
184
185 /* Adjust count with file length. */
186 filesize = le64_to_cpu(inode->i.i_size);
187 if (offset > filesize)
188 count = 0;
189 else if (count + offset > filesize)
190 count = filesize - offset;
191
192 /* Main loop for file blocks */
193 read_count = remained_blkentries = 0;
194 while (count > 0) {
195 if (remained_blkentries == 0) {
196 set_new_dnode(&dn, inode, NULL, ino);
197 get_dnode_of_data(sbi, &dn, F2FS_BYTES_TO_BLK(offset),
198 LOOKUP_NODE);
199 if (index_node)
200 free(index_node);
201 index_node = (dn.node_blk == dn.inode_blk) ?
202 NULL : dn.node_blk;
203 remained_blkentries = ADDRS_PER_PAGE(sbi,
204 dn.node_blk, dn.inode_blk);
205 }
206 ASSERT(remained_blkentries > 0);
207
208 blkaddr = datablock_addr(dn.node_blk, dn.ofs_in_node);
209 if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR)
210 break;
211
212 off_in_blk = offset % BLOCK_SZ;
213 len_in_blk = BLOCK_SZ - off_in_blk;
214 if (len_in_blk > count)
215 len_in_blk = count;
216
217 /* Read data from single block. */
218 if (len_in_blk < BLOCK_SZ) {
219 ASSERT(dev_read_block(blk_buffer, blkaddr) >= 0);
220 memcpy(buffer, blk_buffer + off_in_blk, len_in_blk);
221 } else {
222 /* Direct read */
223 ASSERT(dev_read_block(buffer, blkaddr) >= 0);
224 }
225
226 offset += len_in_blk;
227 count -= len_in_blk;
228 buffer += len_in_blk;
229 read_count += len_in_blk;
230
231 dn.ofs_in_node++;
232 remained_blkentries--;
233 }
234 if (index_node)
235 free(index_node);
236 free(blk_buffer);
237
238 return read_count;
239 }
240
241 /*
242 * Do not call this function directly. Instead, call one of the following:
243 * u64 f2fs_write();
244 * u64 f2fs_write_compress_data();
245 * u64 f2fs_write_addrtag();
246 */
f2fs_write_ex(struct f2fs_sb_info * sbi,nid_t ino,u8 * buffer,u64 count,pgoff_t offset,enum wr_addr_type addr_type)247 static u64 f2fs_write_ex(struct f2fs_sb_info *sbi, nid_t ino, u8 *buffer,
248 u64 count, pgoff_t offset, enum wr_addr_type addr_type)
249 {
250 struct dnode_of_data dn;
251 struct node_info ni;
252 struct f2fs_node *inode;
253 char *blk_buffer;
254 u64 off_in_blk;
255 u64 len_in_blk;
256 u64 written_count;
257 u64 remained_blkentries;
258 block_t blkaddr;
259 void* index_node = NULL;
260 int idirty = 0;
261 int err;
262 bool has_data = (addr_type == WR_NORMAL
263 || addr_type == WR_COMPRESS_DATA);
264
265 if (count == 0)
266 return 0;
267
268 /*
269 * Enforce calling from f2fs_write(), f2fs_write_compress_data(),
270 * and f2fs_write_addrtag(). Beside, check if is properly called.
271 */
272 ASSERT((!has_data && buffer == NULL) || (has_data && buffer != NULL));
273 if (addr_type != WR_NORMAL)
274 ASSERT(offset % F2FS_BLKSIZE == 0); /* block boundary only */
275
276 /* Memory allocation for block buffer and inode. */
277 blk_buffer = calloc(BLOCK_SZ, 2);
278 ASSERT(blk_buffer);
279 inode = (struct f2fs_node*)(blk_buffer + BLOCK_SZ);
280
281 /* Read inode */
282 get_node_info(sbi, ino, &ni);
283 ASSERT(dev_read_block(inode, ni.blk_addr) >= 0);
284 ASSERT(!S_ISDIR(le16_to_cpu(inode->i.i_mode)));
285 ASSERT(!S_ISLNK(le16_to_cpu(inode->i.i_mode)));
286
287 /* Main loop for file blocks */
288 written_count = remained_blkentries = 0;
289 while (count > 0) {
290 if (remained_blkentries == 0) {
291 set_new_dnode(&dn, inode, NULL, ino);
292 err = get_dnode_of_data(sbi, &dn,
293 F2FS_BYTES_TO_BLK(offset), ALLOC_NODE);
294 if (err)
295 break;
296 idirty |= dn.idirty;
297 free(index_node);
298 index_node = (dn.node_blk == dn.inode_blk) ?
299 NULL : dn.node_blk;
300 remained_blkentries = ADDRS_PER_PAGE(sbi,
301 dn.node_blk, dn.inode_blk) -
302 dn.ofs_in_node;
303 }
304 ASSERT(remained_blkentries > 0);
305
306 if (!has_data) {
307 dn.data_blkaddr = addr_type;
308 set_data_blkaddr(&dn);
309 idirty |= dn.idirty;
310 if (dn.ndirty)
311 ASSERT(dev_write_block(dn.node_blk,
312 dn.node_blkaddr) >= 0);
313 written_count = 0;
314 break;
315 }
316
317 blkaddr = datablock_addr(dn.node_blk, dn.ofs_in_node);
318 if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR) {
319 err = new_data_block(sbi, blk_buffer,
320 &dn, CURSEG_WARM_DATA);
321 if (err)
322 break;
323 blkaddr = dn.data_blkaddr;
324 idirty |= dn.idirty;
325 }
326
327 off_in_blk = offset % BLOCK_SZ;
328 len_in_blk = BLOCK_SZ - off_in_blk;
329 if (len_in_blk > count)
330 len_in_blk = count;
331
332 /* Write data to single block. */
333 if (len_in_blk < BLOCK_SZ) {
334 ASSERT(dev_read_block(blk_buffer, blkaddr) >= 0);
335 memcpy(blk_buffer + off_in_blk, buffer, len_in_blk);
336 ASSERT(dev_write_block(blk_buffer, blkaddr) >= 0);
337 } else {
338 /* Direct write */
339 ASSERT(dev_write_block(buffer, blkaddr) >= 0);
340 }
341
342 offset += len_in_blk;
343 count -= len_in_blk;
344 buffer += len_in_blk;
345 written_count += len_in_blk;
346
347 dn.ofs_in_node++;
348 if ((--remained_blkentries == 0 || count == 0) && (dn.ndirty))
349 ASSERT(dev_write_block(dn.node_blk, dn.node_blkaddr)
350 >= 0);
351 }
352 if (addr_type == WR_NORMAL && offset > le64_to_cpu(inode->i.i_size)) {
353 inode->i.i_size = cpu_to_le64(offset);
354 idirty = 1;
355 }
356 if (idirty) {
357 ASSERT(inode == dn.inode_blk);
358 ASSERT(write_inode(inode, ni.blk_addr) >= 0);
359 }
360
361 free(index_node);
362 free(blk_buffer);
363
364 return written_count;
365 }
366
f2fs_write(struct f2fs_sb_info * sbi,nid_t ino,u8 * buffer,u64 count,pgoff_t offset)367 u64 f2fs_write(struct f2fs_sb_info *sbi, nid_t ino, u8 *buffer,
368 u64 count, pgoff_t offset)
369 {
370 return f2fs_write_ex(sbi, ino, buffer, count, offset, WR_NORMAL);
371 }
372
f2fs_write_compress_data(struct f2fs_sb_info * sbi,nid_t ino,u8 * buffer,u64 count,pgoff_t offset)373 u64 f2fs_write_compress_data(struct f2fs_sb_info *sbi, nid_t ino, u8 *buffer,
374 u64 count, pgoff_t offset)
375 {
376 return f2fs_write_ex(sbi, ino, buffer, count, offset, WR_COMPRESS_DATA);
377 }
378
f2fs_write_addrtag(struct f2fs_sb_info * sbi,nid_t ino,pgoff_t offset,unsigned int addrtag)379 u64 f2fs_write_addrtag(struct f2fs_sb_info *sbi, nid_t ino, pgoff_t offset,
380 unsigned int addrtag)
381 {
382 ASSERT(addrtag == COMPRESS_ADDR || addrtag == NEW_ADDR
383 || addrtag == NULL_ADDR);
384 return f2fs_write_ex(sbi, ino, NULL, F2FS_BLKSIZE, offset, addrtag);
385 }
386
387 /* This function updates only inode->i.i_size */
f2fs_filesize_update(struct f2fs_sb_info * sbi,nid_t ino,u64 filesize)388 void f2fs_filesize_update(struct f2fs_sb_info *sbi, nid_t ino, u64 filesize)
389 {
390 struct node_info ni;
391 struct f2fs_node *inode;
392
393 inode = calloc(BLOCK_SZ, 1);
394 ASSERT(inode);
395 get_node_info(sbi, ino, &ni);
396
397 ASSERT(dev_read_block(inode, ni.blk_addr) >= 0);
398 ASSERT(!S_ISDIR(le16_to_cpu(inode->i.i_mode)));
399 ASSERT(!S_ISLNK(le16_to_cpu(inode->i.i_mode)));
400
401 inode->i.i_size = cpu_to_le64(filesize);
402
403 ASSERT(write_inode(inode, ni.blk_addr) >= 0);
404 free(inode);
405 }
406
407 #define MAX_BULKR_RETRY 5
bulkread(int fd,void * rbuf,size_t rsize,bool * eof)408 int bulkread(int fd, void *rbuf, size_t rsize, bool *eof)
409 {
410 int n = 0;
411 int retry = MAX_BULKR_RETRY;
412 int cur;
413
414 if (!rsize)
415 return 0;
416
417 if (eof != NULL)
418 *eof = false;
419 while (rsize && (cur = read(fd, rbuf, rsize)) != 0) {
420 if (cur == -1) {
421 if (errno == EINTR && retry--)
422 continue;
423 return -1;
424 }
425 retry = MAX_BULKR_RETRY;
426
427 rsize -= cur;
428 n += cur;
429 }
430 if (eof != NULL)
431 *eof = (cur == 0);
432 return n;
433 }
434
f2fs_fix_mutable(struct f2fs_sb_info * sbi,nid_t ino,pgoff_t offset,unsigned int compressed)435 u64 f2fs_fix_mutable(struct f2fs_sb_info *sbi, nid_t ino, pgoff_t offset,
436 unsigned int compressed)
437 {
438 unsigned int i;
439 u64 wlen;
440
441 if (c.compress.readonly)
442 return 0;
443
444 for (i = 0; i < compressed - 1; i++) {
445 wlen = f2fs_write_addrtag(sbi, ino,
446 offset + (i << F2FS_BLKSIZE_BITS), NEW_ADDR);
447 if (wlen)
448 return wlen;
449 }
450 return 0;
451 }
452
is_consecutive(u32 prev_addr,u32 cur_addr)453 static inline int is_consecutive(u32 prev_addr, u32 cur_addr)
454 {
455 if (is_valid_data_blkaddr(cur_addr) && (cur_addr == prev_addr + 1))
456 return 1;
457 return 0;
458 }
459
copy_extent_info(struct extent_info * t_ext,struct extent_info * s_ext)460 static inline void copy_extent_info(struct extent_info *t_ext,
461 struct extent_info *s_ext)
462 {
463 t_ext->fofs = s_ext->fofs;
464 t_ext->blk = s_ext->blk;
465 t_ext->len = s_ext->len;
466 }
467
update_extent_info(struct f2fs_node * inode,struct extent_info * ext)468 static inline void update_extent_info(struct f2fs_node *inode,
469 struct extent_info *ext)
470 {
471 inode->i.i_ext.fofs = cpu_to_le32(ext->fofs);
472 inode->i.i_ext.blk_addr = cpu_to_le32(ext->blk);
473 inode->i.i_ext.len = cpu_to_le32(ext->len);
474 }
475
update_largest_extent(struct f2fs_sb_info * sbi,nid_t ino)476 static void update_largest_extent(struct f2fs_sb_info *sbi, nid_t ino)
477 {
478 struct dnode_of_data dn;
479 struct node_info ni;
480 struct f2fs_node *inode;
481 u32 blkaddr, prev_blkaddr, cur_blk = 0, end_blk;
482 struct extent_info largest_ext, cur_ext;
483 u64 remained_blkentries = 0;
484 u32 cluster_size;
485 int count;
486 void *index_node = NULL;
487
488 memset(&dn, 0, sizeof(dn));
489 largest_ext.len = cur_ext.len = 0;
490
491 inode = (struct f2fs_node *) calloc(BLOCK_SZ, 1);
492 ASSERT(inode);
493
494 /* Read inode info */
495 get_node_info(sbi, ino, &ni);
496 ASSERT(dev_read_block(inode, ni.blk_addr) >= 0);
497 cluster_size = 1 << inode->i.i_log_cluster_size;
498
499 if (inode->i.i_inline & F2FS_INLINE_DATA)
500 goto exit;
501
502 end_blk = f2fs_max_file_offset(&inode->i) >> F2FS_BLKSIZE_BITS;
503
504 while (cur_blk <= end_blk) {
505 if (remained_blkentries == 0) {
506 set_new_dnode(&dn, inode, NULL, ino);
507 get_dnode_of_data(sbi, &dn, cur_blk, LOOKUP_NODE);
508 if (index_node)
509 free(index_node);
510 index_node = (dn.node_blk == dn.inode_blk) ?
511 NULL : dn.node_blk;
512 remained_blkentries = ADDRS_PER_PAGE(sbi,
513 dn.node_blk, dn.inode_blk);
514 }
515 ASSERT(remained_blkentries > 0);
516
517 blkaddr = datablock_addr(dn.node_blk, dn.ofs_in_node);
518 if (cur_ext.len > 0) {
519 if (is_consecutive(prev_blkaddr, blkaddr))
520 cur_ext.len++;
521 else {
522 if (cur_ext.len > largest_ext.len)
523 copy_extent_info(&largest_ext,
524 &cur_ext);
525 cur_ext.len = 0;
526 }
527 }
528
529 if (cur_ext.len == 0 && is_valid_data_blkaddr(blkaddr)) {
530 cur_ext.fofs = cur_blk;
531 cur_ext.len = 1;
532 cur_ext.blk = blkaddr;
533 }
534
535 prev_blkaddr = blkaddr;
536 count = blkaddr == COMPRESS_ADDR ? cluster_size : 1;
537 cur_blk += count;
538 dn.ofs_in_node += count;
539 remained_blkentries -= count;
540 }
541
542 exit:
543 if (cur_ext.len > largest_ext.len)
544 copy_extent_info(&largest_ext, &cur_ext);
545 if (largest_ext.len > 0) {
546 update_extent_info(inode, &largest_ext);
547 ASSERT(write_inode(inode, ni.blk_addr) >= 0);
548 }
549
550 if (index_node)
551 free(index_node);
552 free(inode);
553 }
554
f2fs_build_file(struct f2fs_sb_info * sbi,struct dentry * de)555 int f2fs_build_file(struct f2fs_sb_info *sbi, struct dentry *de)
556 {
557 int fd, n = -1;
558 pgoff_t off = 0;
559 u8 buffer[BLOCK_SZ];
560 struct node_info ni;
561 struct f2fs_node *node_blk;
562
563 if (de->ino == 0)
564 return -1;
565
566 if (de->from_devino) {
567 struct hardlink_cache_entry *found_hardlink;
568
569 found_hardlink = f2fs_search_hardlink(sbi, de);
570 if (found_hardlink && found_hardlink->to_ino &&
571 found_hardlink->nbuild)
572 return 0;
573
574 found_hardlink->nbuild++;
575 }
576
577 fd = open(de->full_path, O_RDONLY);
578 if (fd < 0) {
579 MSG(0, "Skip: Fail to open %s\n", de->full_path);
580 return -1;
581 }
582
583 /* inline_data support */
584 if (de->size <= DEF_MAX_INLINE_DATA) {
585 int ret;
586
587 get_node_info(sbi, de->ino, &ni);
588
589 node_blk = calloc(BLOCK_SZ, 1);
590 ASSERT(node_blk);
591
592 ret = dev_read_block(node_blk, ni.blk_addr);
593 ASSERT(ret >= 0);
594
595 node_blk->i.i_inline |= F2FS_INLINE_DATA;
596 node_blk->i.i_inline |= F2FS_DATA_EXIST;
597
598 if (c.feature & cpu_to_le32(F2FS_FEATURE_EXTRA_ATTR)) {
599 node_blk->i.i_inline |= F2FS_EXTRA_ATTR;
600 node_blk->i.i_extra_isize =
601 cpu_to_le16(calc_extra_isize());
602 }
603 n = read(fd, buffer, BLOCK_SZ);
604 ASSERT((unsigned long)n == de->size);
605 memcpy(inline_data_addr(node_blk), buffer, de->size);
606 node_blk->i.i_size = cpu_to_le64(de->size);
607 ASSERT(write_inode(node_blk, ni.blk_addr) >= 0);
608 free(node_blk);
609 #ifdef WITH_SLOAD
610 } else if (c.func == SLOAD && c.compress.enabled &&
611 c.compress.filter_ops->filter(de->full_path)) {
612 bool eof = false;
613 u8 *rbuf = c.compress.cc.rbuf;
614 unsigned int cblocks = 0;
615
616 node_blk = calloc(BLOCK_SZ, 1);
617 ASSERT(node_blk);
618
619 /* read inode */
620 get_node_info(sbi, de->ino, &ni);
621 ASSERT(dev_read_block(node_blk, ni.blk_addr) >= 0);
622 /* update inode meta */
623 node_blk->i.i_compress_algrithm = c.compress.alg;
624 node_blk->i.i_log_cluster_size =
625 c.compress.cc.log_cluster_size;
626 node_blk->i.i_flags = cpu_to_le32(F2FS_COMPR_FL);
627 if (c.compress.readonly)
628 node_blk->i.i_inline |= F2FS_COMPRESS_RELEASED;
629 ASSERT(write_inode(node_blk, ni.blk_addr) >= 0);
630
631 while (!eof && (n = bulkread(fd, rbuf, c.compress.cc.rlen,
632 &eof)) > 0) {
633 int ret = c.compress.ops->compress(&c.compress.cc);
634 u64 wlen;
635 u32 csize = ALIGN_UP(c.compress.cc.clen +
636 COMPRESS_HEADER_SIZE, BLOCK_SZ);
637 unsigned int cur_cblk;
638
639 if (ret || n < c.compress.cc.rlen ||
640 n < (int)(csize + BLOCK_SZ *
641 c.compress.min_blocks)) {
642 wlen = f2fs_write(sbi, de->ino, rbuf, n, off);
643 ASSERT((int)wlen == n);
644 } else {
645 wlen = f2fs_write_addrtag(sbi, de->ino, off,
646 WR_COMPRESS_ADDR);
647 ASSERT(!wlen);
648 wlen = f2fs_write_compress_data(sbi, de->ino,
649 (u8 *)c.compress.cc.cbuf,
650 csize, off + BLOCK_SZ);
651 ASSERT(wlen == csize);
652 c.compress.ops->reset(&c.compress.cc);
653 cur_cblk = (c.compress.cc.rlen - csize) /
654 BLOCK_SZ;
655 cblocks += cur_cblk;
656 wlen = f2fs_fix_mutable(sbi, de->ino,
657 off + BLOCK_SZ + csize,
658 cur_cblk);
659 ASSERT(!wlen);
660 }
661 off += n;
662 }
663 if (n == -1) {
664 fprintf(stderr, "Load file '%s' failed: ",
665 de->full_path);
666 perror(NULL);
667 }
668 /* read inode */
669 get_node_info(sbi, de->ino, &ni);
670 ASSERT(dev_read_block(node_blk, ni.blk_addr) >= 0);
671 /* update inode meta */
672 node_blk->i.i_size = cpu_to_le64(off);
673 if (!c.compress.readonly) {
674 node_blk->i.i_compr_blocks = cpu_to_le64(cblocks);
675 node_blk->i.i_blocks += cpu_to_le64(cblocks);
676 }
677 ASSERT(write_inode(node_blk, ni.blk_addr) >= 0);
678 free(node_blk);
679
680 if (!c.compress.readonly) {
681 sbi->total_valid_block_count += cblocks;
682 if (sbi->total_valid_block_count >=
683 sbi->user_block_count) {
684 ERR_MSG("Not enough space\n");
685 ASSERT(0);
686 }
687 }
688 #endif
689 } else {
690 while ((n = read(fd, buffer, BLOCK_SZ)) > 0) {
691 f2fs_write(sbi, de->ino, buffer, n, off);
692 off += n;
693 }
694 }
695
696 close(fd);
697 if (n < 0)
698 return -1;
699
700 if (!c.compress.enabled || (c.feature & cpu_to_le32(F2FS_FEATURE_RO)))
701 update_largest_extent(sbi, de->ino);
702 update_free_segments(sbi);
703
704 MSG(1, "Info: Create %s -> %s\n"
705 " -- ino=%x, type=%x, mode=%x, uid=%x, "
706 "gid=%x, cap=%"PRIx64", size=%lu, pino=%x\n",
707 de->full_path, de->path,
708 de->ino, de->file_type, de->mode,
709 de->uid, de->gid, de->capabilities, de->size, de->pino);
710 return 0;
711 }
712