1 // SPDX-License-Identifier: GPL-2.0+ OR Apache-2.0
2 /*
3 * Copyright (C) 2018-2019 HUAWEI, Inc.
4 * http://www.huawei.com/
5 * Created by Li Guifu <bluce.liguifu@huawei.com>
6 * with heavy changes by Gao Xiang <gaoxiang25@huawei.com>
7 */
8 #define _GNU_SOURCE
9 #include <string.h>
10 #include <stdlib.h>
11 #include <stdio.h>
12 #include <sys/stat.h>
13 #include <config.h>
14 #if defined(HAVE_SYS_SYSMACROS_H)
15 #include <sys/sysmacros.h>
16 #endif
17 #include <dirent.h>
18 #include "erofs/print.h"
19 #include "erofs/diskbuf.h"
20 #include "erofs/inode.h"
21 #include "erofs/cache.h"
22 #include "erofs/io.h"
23 #include "erofs/compress.h"
24 #include "erofs/xattr.h"
25 #include "erofs/exclude.h"
26 #include "erofs/block_list.h"
27 #include "erofs/compress_hints.h"
28 #include "erofs/blobchunk.h"
29 #include "erofs/fragments.h"
30 #include "liberofs_private.h"
31
32 #define S_SHIFT 12
33 static unsigned char erofs_ftype_by_mode[S_IFMT >> S_SHIFT] = {
34 [S_IFREG >> S_SHIFT] = EROFS_FT_REG_FILE,
35 [S_IFDIR >> S_SHIFT] = EROFS_FT_DIR,
36 [S_IFCHR >> S_SHIFT] = EROFS_FT_CHRDEV,
37 [S_IFBLK >> S_SHIFT] = EROFS_FT_BLKDEV,
38 [S_IFIFO >> S_SHIFT] = EROFS_FT_FIFO,
39 [S_IFSOCK >> S_SHIFT] = EROFS_FT_SOCK,
40 [S_IFLNK >> S_SHIFT] = EROFS_FT_SYMLINK,
41 };
42
erofs_mode_to_ftype(umode_t mode)43 unsigned char erofs_mode_to_ftype(umode_t mode)
44 {
45 return erofs_ftype_by_mode[(mode & S_IFMT) >> S_SHIFT];
46 }
47
48 static const unsigned char erofs_dtype_by_ftype[EROFS_FT_MAX] = {
49 [EROFS_FT_UNKNOWN] = DT_UNKNOWN,
50 [EROFS_FT_REG_FILE] = DT_REG,
51 [EROFS_FT_DIR] = DT_DIR,
52 [EROFS_FT_CHRDEV] = DT_CHR,
53 [EROFS_FT_BLKDEV] = DT_BLK,
54 [EROFS_FT_FIFO] = DT_FIFO,
55 [EROFS_FT_SOCK] = DT_SOCK,
56 [EROFS_FT_SYMLINK] = DT_LNK
57 };
58
erofs_ftype_to_dtype(unsigned int filetype)59 unsigned char erofs_ftype_to_dtype(unsigned int filetype)
60 {
61 if (filetype >= EROFS_FT_MAX)
62 return DT_UNKNOWN;
63
64 return erofs_dtype_by_ftype[filetype];
65 }
66
67 #define NR_INODE_HASHTABLE 16384
68
69 struct list_head inode_hashtable[NR_INODE_HASHTABLE];
70
erofs_inode_manager_init(void)71 void erofs_inode_manager_init(void)
72 {
73 unsigned int i;
74
75 for (i = 0; i < NR_INODE_HASHTABLE; ++i)
76 init_list_head(&inode_hashtable[i]);
77 }
78
erofs_insert_ihash(struct erofs_inode * inode,dev_t dev,ino_t ino)79 void erofs_insert_ihash(struct erofs_inode *inode, dev_t dev, ino_t ino)
80 {
81 list_add(&inode->i_hash,
82 &inode_hashtable[(ino ^ dev) % NR_INODE_HASHTABLE]);
83 }
84
85 /* get the inode from the (source) inode # */
erofs_iget(dev_t dev,ino_t ino)86 struct erofs_inode *erofs_iget(dev_t dev, ino_t ino)
87 {
88 struct list_head *head =
89 &inode_hashtable[(ino ^ dev) % NR_INODE_HASHTABLE];
90 struct erofs_inode *inode;
91
92 list_for_each_entry(inode, head, i_hash)
93 if (inode->i_ino[1] == ino && inode->dev == dev)
94 return erofs_igrab(inode);
95 return NULL;
96 }
97
erofs_iget_by_nid(erofs_nid_t nid)98 struct erofs_inode *erofs_iget_by_nid(erofs_nid_t nid)
99 {
100 struct list_head *head =
101 &inode_hashtable[nid % NR_INODE_HASHTABLE];
102 struct erofs_inode *inode;
103
104 list_for_each_entry(inode, head, i_hash)
105 if (inode->nid == nid)
106 return erofs_igrab(inode);
107 return NULL;
108 }
109
erofs_iput(struct erofs_inode * inode)110 unsigned int erofs_iput(struct erofs_inode *inode)
111 {
112 struct erofs_dentry *d, *t;
113
114 if (inode->i_count > 1)
115 return --inode->i_count;
116
117 list_for_each_entry_safe(d, t, &inode->i_subdirs, d_child)
118 free(d);
119
120 if (inode->eof_tailraw)
121 free(inode->eof_tailraw);
122 list_del(&inode->i_hash);
123 if (inode->i_srcpath)
124 free(inode->i_srcpath);
125 if (inode->with_diskbuf) {
126 erofs_diskbuf_close(inode->i_diskbuf);
127 free(inode->i_diskbuf);
128 } else if (inode->i_link) {
129 free(inode->i_link);
130 }
131 free(inode);
132 return 0;
133 }
134
erofs_d_alloc(struct erofs_inode * parent,const char * name)135 struct erofs_dentry *erofs_d_alloc(struct erofs_inode *parent,
136 const char *name)
137 {
138 struct erofs_dentry *d = malloc(sizeof(*d));
139
140 if (!d)
141 return ERR_PTR(-ENOMEM);
142
143 strncpy(d->name, name, EROFS_NAME_LEN - 1);
144 d->name[EROFS_NAME_LEN - 1] = '\0';
145
146 list_add_tail(&d->d_child, &parent->i_subdirs);
147 return d;
148 }
149
150 /* allocate main data for a inode */
__allocate_inode_bh_data(struct erofs_inode * inode,unsigned long nblocks,int type)151 static int __allocate_inode_bh_data(struct erofs_inode *inode,
152 unsigned long nblocks,
153 int type)
154 {
155 struct erofs_buffer_head *bh;
156 int ret;
157
158 if (!nblocks) {
159 /* it has only tail-end data */
160 inode->u.i_blkaddr = NULL_ADDR;
161 return 0;
162 }
163
164 /* allocate main data buffer */
165 bh = erofs_balloc(type, erofs_pos(inode->sbi, nblocks), 0, 0);
166 if (IS_ERR(bh))
167 return PTR_ERR(bh);
168
169 bh->op = &erofs_skip_write_bhops;
170 inode->bh_data = bh;
171
172 /* get blkaddr of the bh */
173 ret = erofs_mapbh(bh->block);
174 DBG_BUGON(ret < 0);
175
176 /* write blocks except for the tail-end block */
177 inode->u.i_blkaddr = bh->block->blkaddr;
178 return 0;
179 }
180
comp_subdir(const void * a,const void * b)181 static int comp_subdir(const void *a, const void *b)
182 {
183 const struct erofs_dentry *da, *db;
184
185 da = *((const struct erofs_dentry **)a);
186 db = *((const struct erofs_dentry **)b);
187 return strcmp(da->name, db->name);
188 }
189
erofs_prepare_dir_layout(struct erofs_inode * dir,unsigned int nr_subdirs)190 static int erofs_prepare_dir_layout(struct erofs_inode *dir,
191 unsigned int nr_subdirs)
192 {
193 struct erofs_sb_info *sbi = dir->sbi;
194 struct erofs_dentry *d, *n, **sorted_d;
195 unsigned int i;
196 unsigned int d_size = 0;
197
198 sorted_d = malloc(nr_subdirs * sizeof(d));
199 if (!sorted_d)
200 return -ENOMEM;
201 i = 0;
202 list_for_each_entry_safe(d, n, &dir->i_subdirs, d_child) {
203 list_del(&d->d_child);
204 sorted_d[i++] = d;
205 }
206 DBG_BUGON(i != nr_subdirs);
207 qsort(sorted_d, nr_subdirs, sizeof(d), comp_subdir);
208 for (i = 0; i < nr_subdirs; i++)
209 list_add_tail(&sorted_d[i]->d_child, &dir->i_subdirs);
210 free(sorted_d);
211
212 /* let's calculate dir size */
213 list_for_each_entry(d, &dir->i_subdirs, d_child) {
214 int len = strlen(d->name) + sizeof(struct erofs_dirent);
215
216 if (erofs_blkoff(sbi, d_size) + len > erofs_blksiz(sbi))
217 d_size = round_up(d_size, erofs_blksiz(sbi));
218 d_size += len;
219 }
220 dir->i_size = d_size;
221
222 /* no compression for all dirs */
223 dir->datalayout = EROFS_INODE_FLAT_INLINE;
224
225 /* it will be used in erofs_prepare_inode_buffer */
226 dir->idata_size = d_size % erofs_blksiz(sbi);
227 return 0;
228 }
229
erofs_init_empty_dir(struct erofs_inode * dir)230 int erofs_init_empty_dir(struct erofs_inode *dir)
231 {
232 struct erofs_dentry *d;
233
234 /* dot is pointed to the current dir inode */
235 d = erofs_d_alloc(dir, ".");
236 if (IS_ERR(d))
237 return PTR_ERR(d);
238 d->inode = erofs_igrab(dir);
239 d->type = EROFS_FT_DIR;
240
241 /* dotdot is pointed to the parent dir */
242 d = erofs_d_alloc(dir, "..");
243 if (IS_ERR(d))
244 return PTR_ERR(d);
245 d->inode = erofs_igrab(dir->i_parent);
246 d->type = EROFS_FT_DIR;
247
248 dir->i_nlink = 2;
249 return 0;
250 }
251
erofs_prepare_dir_file(struct erofs_inode * dir,unsigned int nr_subdirs)252 int erofs_prepare_dir_file(struct erofs_inode *dir, unsigned int nr_subdirs)
253 {
254 int ret;
255
256 ret = erofs_init_empty_dir(dir);
257 if (ret)
258 return ret;
259
260 /* sort subdirs */
261 nr_subdirs += 2;
262 return erofs_prepare_dir_layout(dir, nr_subdirs);
263 }
264
fill_dirblock(char * buf,unsigned int size,unsigned int q,struct erofs_dentry * head,struct erofs_dentry * end)265 static void fill_dirblock(char *buf, unsigned int size, unsigned int q,
266 struct erofs_dentry *head, struct erofs_dentry *end)
267 {
268 unsigned int p = 0;
269
270 /* write out all erofs_dirents + filenames */
271 while (head != end) {
272 const unsigned int namelen = strlen(head->name);
273 struct erofs_dirent d = {
274 .nid = cpu_to_le64(head->nid),
275 .nameoff = cpu_to_le16(q),
276 .file_type = head->type,
277 };
278
279 memcpy(buf + p, &d, sizeof(d));
280 memcpy(buf + q, head->name, namelen);
281 p += sizeof(d);
282 q += namelen;
283
284 head = list_next_entry(head, d_child);
285 }
286 memset(buf + q, 0, size - q);
287 }
288
write_dirblock(struct erofs_sb_info * sbi,unsigned int q,struct erofs_dentry * head,struct erofs_dentry * end,erofs_blk_t blkaddr)289 static int write_dirblock(struct erofs_sb_info *sbi,
290 unsigned int q, struct erofs_dentry *head,
291 struct erofs_dentry *end, erofs_blk_t blkaddr)
292 {
293 char buf[EROFS_MAX_BLOCK_SIZE];
294
295 fill_dirblock(buf, erofs_blksiz(sbi), q, head, end);
296 return blk_write(sbi, buf, blkaddr, 1);
297 }
298
erofs_lookupnid(struct erofs_inode * inode)299 erofs_nid_t erofs_lookupnid(struct erofs_inode *inode)
300 {
301 struct erofs_buffer_head *const bh = inode->bh;
302 struct erofs_sb_info *sbi = inode->sbi;
303 erofs_off_t off, meta_offset;
304
305 if (!bh || (long long)inode->nid > 0)
306 return inode->nid;
307
308 erofs_mapbh(bh->block);
309 off = erofs_btell(bh, false);
310
311 meta_offset = erofs_pos(sbi, sbi->meta_blkaddr);
312 DBG_BUGON(off < meta_offset);
313 inode->nid = (off - meta_offset) >> EROFS_ISLOTBITS;
314 erofs_dbg("Assign nid %llu to file %s (mode %05o)",
315 inode->nid, inode->i_srcpath, inode->i_mode);
316 return inode->nid;
317 }
318
erofs_d_invalidate(struct erofs_dentry * d)319 static void erofs_d_invalidate(struct erofs_dentry *d)
320 {
321 struct erofs_inode *const inode = d->inode;
322
323 d->nid = erofs_lookupnid(inode);
324 erofs_iput(inode);
325 }
326
erofs_write_dir_file(struct erofs_inode * dir)327 static int erofs_write_dir_file(struct erofs_inode *dir)
328 {
329 struct erofs_dentry *head = list_first_entry(&dir->i_subdirs,
330 struct erofs_dentry,
331 d_child);
332 struct erofs_sb_info *sbi = dir->sbi;
333 struct erofs_dentry *d;
334 int ret;
335 unsigned int q, used, blkno;
336
337 q = used = blkno = 0;
338
339 /* allocate dir main data */
340 ret = __allocate_inode_bh_data(dir, erofs_blknr(sbi, dir->i_size), DIRA);
341 if (ret)
342 return ret;
343
344 list_for_each_entry(d, &dir->i_subdirs, d_child) {
345 const unsigned int len = strlen(d->name) +
346 sizeof(struct erofs_dirent);
347
348 erofs_d_invalidate(d);
349 if (used + len > erofs_blksiz(sbi)) {
350 ret = write_dirblock(sbi, q, head, d,
351 dir->u.i_blkaddr + blkno);
352 if (ret)
353 return ret;
354
355 head = d;
356 q = used = 0;
357 ++blkno;
358 }
359 used += len;
360 q += sizeof(struct erofs_dirent);
361 }
362
363 DBG_BUGON(used > erofs_blksiz(sbi));
364 if (used == erofs_blksiz(sbi)) {
365 DBG_BUGON(dir->i_size % erofs_blksiz(sbi));
366 DBG_BUGON(dir->idata_size);
367 return write_dirblock(sbi, q, head, d, dir->u.i_blkaddr + blkno);
368 }
369 DBG_BUGON(used != dir->i_size % erofs_blksiz(sbi));
370 if (used) {
371 /* fill tail-end dir block */
372 dir->idata = malloc(used);
373 if (!dir->idata)
374 return -ENOMEM;
375 DBG_BUGON(used != dir->idata_size);
376 fill_dirblock(dir->idata, dir->idata_size, q, head, d);
377 }
378 return 0;
379 }
380
erofs_write_file_from_buffer(struct erofs_inode * inode,char * buf)381 int erofs_write_file_from_buffer(struct erofs_inode *inode, char *buf)
382 {
383 struct erofs_sb_info *sbi = inode->sbi;
384 const unsigned int nblocks = erofs_blknr(sbi, inode->i_size);
385 int ret;
386
387 inode->datalayout = EROFS_INODE_FLAT_INLINE;
388
389 ret = __allocate_inode_bh_data(inode, nblocks, DATA);
390 if (ret)
391 return ret;
392
393 if (nblocks)
394 blk_write(sbi, buf, inode->u.i_blkaddr, nblocks);
395 inode->idata_size = inode->i_size % erofs_blksiz(sbi);
396 if (inode->idata_size) {
397 inode->idata = malloc(inode->idata_size);
398 if (!inode->idata)
399 return -ENOMEM;
400 memcpy(inode->idata, buf + erofs_pos(sbi, nblocks),
401 inode->idata_size);
402 }
403 return 0;
404 }
405
406 /* rules to decide whether a file could be compressed or not */
erofs_file_is_compressible(struct erofs_inode * inode)407 static bool erofs_file_is_compressible(struct erofs_inode *inode)
408 {
409 if (cfg.c_compress_hints_file)
410 return z_erofs_apply_compress_hints(inode);
411 return true;
412 }
413
write_uncompressed_file_from_fd(struct erofs_inode * inode,int fd)414 static int write_uncompressed_file_from_fd(struct erofs_inode *inode, int fd)
415 {
416 int ret;
417 unsigned int nblocks, i;
418 struct erofs_sb_info *sbi = inode->sbi;
419
420 inode->datalayout = EROFS_INODE_FLAT_INLINE;
421 nblocks = inode->i_size / erofs_blksiz(sbi);
422
423 ret = __allocate_inode_bh_data(inode, nblocks, DATA);
424 if (ret)
425 return ret;
426
427 for (i = 0; i < nblocks; ++i) {
428 char buf[EROFS_MAX_BLOCK_SIZE];
429
430 ret = read(fd, buf, erofs_blksiz(sbi));
431 if (ret != erofs_blksiz(sbi)) {
432 if (ret < 0)
433 return -errno;
434 return -EAGAIN;
435 }
436
437 ret = blk_write(sbi, buf, inode->u.i_blkaddr + i, 1);
438 if (ret)
439 return ret;
440 }
441
442 /* read the tail-end data */
443 inode->idata_size = inode->i_size % erofs_blksiz(sbi);
444 if (inode->idata_size) {
445 inode->idata = malloc(inode->idata_size);
446 if (!inode->idata)
447 return -ENOMEM;
448
449 ret = read(fd, inode->idata, inode->idata_size);
450 if (ret < inode->idata_size) {
451 free(inode->idata);
452 inode->idata = NULL;
453 return -EIO;
454 }
455 }
456 erofs_droid_blocklist_write(inode, inode->u.i_blkaddr, nblocks);
457 return 0;
458 }
459
erofs_write_file(struct erofs_inode * inode,int fd,u64 fpos)460 int erofs_write_file(struct erofs_inode *inode, int fd, u64 fpos)
461 {
462 int ret;
463
464 DBG_BUGON(!inode->i_size);
465
466 if (cfg.c_chunkbits) {
467 inode->u.chunkbits = cfg.c_chunkbits;
468 /* chunk indexes when explicitly specified */
469 inode->u.chunkformat = 0;
470 if (cfg.c_force_chunkformat == FORCE_INODE_CHUNK_INDEXES)
471 inode->u.chunkformat = EROFS_CHUNK_FORMAT_INDEXES;
472 return erofs_blob_write_chunked_file(inode, fd, fpos);
473 }
474
475 if (cfg.c_compr_alg[0] && erofs_file_is_compressible(inode)) {
476 ret = erofs_write_compressed_file(inode, fd);
477 if (!ret || ret != -ENOSPC)
478 return ret;
479
480 ret = lseek(fd, fpos, SEEK_SET);
481 if (ret < 0)
482 return -errno;
483 }
484
485 /* fallback to all data uncompressed */
486 return write_uncompressed_file_from_fd(inode, fd);
487 }
488
erofs_bh_flush_write_inode(struct erofs_buffer_head * bh)489 static bool erofs_bh_flush_write_inode(struct erofs_buffer_head *bh)
490 {
491 struct erofs_inode *const inode = bh->fsprivate;
492 struct erofs_sb_info *sbi = inode->sbi;
493 const u16 icount = EROFS_INODE_XATTR_ICOUNT(inode->xattr_isize);
494 erofs_off_t off = erofs_btell(bh, false);
495 union {
496 struct erofs_inode_compact dic;
497 struct erofs_inode_extended die;
498 } u = { {0}, };
499 int ret;
500
501 switch (inode->inode_isize) {
502 case sizeof(struct erofs_inode_compact):
503 u.dic.i_format = cpu_to_le16(0 | (inode->datalayout << 1));
504 u.dic.i_xattr_icount = cpu_to_le16(icount);
505 u.dic.i_mode = cpu_to_le16(inode->i_mode);
506 u.dic.i_nlink = cpu_to_le16(inode->i_nlink);
507 u.dic.i_size = cpu_to_le32((u32)inode->i_size);
508
509 u.dic.i_ino = cpu_to_le32(inode->i_ino[0]);
510
511 u.dic.i_uid = cpu_to_le16((u16)inode->i_uid);
512 u.dic.i_gid = cpu_to_le16((u16)inode->i_gid);
513
514 switch (inode->i_mode & S_IFMT) {
515 case S_IFCHR:
516 case S_IFBLK:
517 case S_IFIFO:
518 case S_IFSOCK:
519 u.dic.i_u.rdev = cpu_to_le32(inode->u.i_rdev);
520 break;
521
522 default:
523 if (is_inode_layout_compression(inode))
524 u.dic.i_u.compressed_blocks =
525 cpu_to_le32(inode->u.i_blocks);
526 else if (inode->datalayout ==
527 EROFS_INODE_CHUNK_BASED)
528 u.dic.i_u.c.format =
529 cpu_to_le16(inode->u.chunkformat);
530 else
531 u.dic.i_u.raw_blkaddr =
532 cpu_to_le32(inode->u.i_blkaddr);
533 break;
534 }
535 break;
536 case sizeof(struct erofs_inode_extended):
537 u.die.i_format = cpu_to_le16(1 | (inode->datalayout << 1));
538 u.die.i_xattr_icount = cpu_to_le16(icount);
539 u.die.i_mode = cpu_to_le16(inode->i_mode);
540 u.die.i_nlink = cpu_to_le32(inode->i_nlink);
541 u.die.i_size = cpu_to_le64(inode->i_size);
542
543 u.die.i_ino = cpu_to_le32(inode->i_ino[0]);
544
545 u.die.i_uid = cpu_to_le32(inode->i_uid);
546 u.die.i_gid = cpu_to_le32(inode->i_gid);
547
548 u.die.i_mtime = cpu_to_le64(inode->i_mtime);
549 u.die.i_mtime_nsec = cpu_to_le32(inode->i_mtime_nsec);
550
551 switch (inode->i_mode & S_IFMT) {
552 case S_IFCHR:
553 case S_IFBLK:
554 case S_IFIFO:
555 case S_IFSOCK:
556 u.die.i_u.rdev = cpu_to_le32(inode->u.i_rdev);
557 break;
558
559 default:
560 if (is_inode_layout_compression(inode))
561 u.die.i_u.compressed_blocks =
562 cpu_to_le32(inode->u.i_blocks);
563 else if (inode->datalayout ==
564 EROFS_INODE_CHUNK_BASED)
565 u.die.i_u.c.format =
566 cpu_to_le16(inode->u.chunkformat);
567 else
568 u.die.i_u.raw_blkaddr =
569 cpu_to_le32(inode->u.i_blkaddr);
570 break;
571 }
572 break;
573 default:
574 erofs_err("unsupported on-disk inode version of nid %llu",
575 (unsigned long long)inode->nid);
576 BUG_ON(1);
577 }
578
579 ret = dev_write(sbi, &u, off, inode->inode_isize);
580 if (ret)
581 return false;
582 off += inode->inode_isize;
583
584 if (inode->xattr_isize) {
585 char *xattrs = erofs_export_xattr_ibody(inode);
586
587 if (IS_ERR(xattrs))
588 return false;
589
590 ret = dev_write(sbi, xattrs, off, inode->xattr_isize);
591 free(xattrs);
592 if (ret)
593 return false;
594
595 off += inode->xattr_isize;
596 }
597
598 if (inode->extent_isize) {
599 if (inode->datalayout == EROFS_INODE_CHUNK_BASED) {
600 ret = erofs_blob_write_chunk_indexes(inode, off);
601 if (ret)
602 return false;
603 } else {
604 /* write compression metadata */
605 off = roundup(off, 8);
606 ret = dev_write(sbi, inode->compressmeta, off,
607 inode->extent_isize);
608 if (ret)
609 return false;
610 free(inode->compressmeta);
611 }
612 }
613
614 inode->bh = NULL;
615 erofs_iput(inode);
616 return erofs_bh_flush_generic_end(bh);
617 }
618
619 static struct erofs_bhops erofs_write_inode_bhops = {
620 .flush = erofs_bh_flush_write_inode,
621 };
622
erofs_prepare_tail_block(struct erofs_inode * inode)623 static int erofs_prepare_tail_block(struct erofs_inode *inode)
624 {
625 struct erofs_sb_info *sbi = inode->sbi;
626 struct erofs_buffer_head *bh;
627 int ret;
628
629 if (!inode->idata_size)
630 return 0;
631
632 bh = inode->bh_data;
633 if (bh) {
634 /* expend a block as the tail block (should be successful) */
635 ret = erofs_bh_balloon(bh, erofs_blksiz(sbi));
636 if (ret != erofs_blksiz(sbi)) {
637 DBG_BUGON(1);
638 return -EIO;
639 }
640 } else {
641 inode->lazy_tailblock = true;
642 }
643 return 0;
644 }
645
erofs_prepare_inode_buffer(struct erofs_inode * inode)646 static int erofs_prepare_inode_buffer(struct erofs_inode *inode)
647 {
648 unsigned int inodesize;
649 struct erofs_buffer_head *bh, *ibh;
650
651 DBG_BUGON(inode->bh || inode->bh_inline);
652
653 inodesize = inode->inode_isize + inode->xattr_isize;
654 if (inode->extent_isize)
655 inodesize = roundup(inodesize, 8) + inode->extent_isize;
656
657 /* TODO: tailpacking inline of chunk-based format isn't finalized */
658 if (inode->datalayout == EROFS_INODE_CHUNK_BASED)
659 goto noinline;
660
661 if (!is_inode_layout_compression(inode)) {
662 if (!cfg.c_inline_data && S_ISREG(inode->i_mode)) {
663 inode->datalayout = EROFS_INODE_FLAT_PLAIN;
664 goto noinline;
665 }
666 /*
667 * If the file sizes of uncompressed files are block-aligned,
668 * should use the EROFS_INODE_FLAT_PLAIN data layout.
669 */
670 if (!inode->idata_size)
671 inode->datalayout = EROFS_INODE_FLAT_PLAIN;
672 }
673
674 bh = erofs_balloc(INODE, inodesize, 0, inode->idata_size);
675 if (bh == ERR_PTR(-ENOSPC)) {
676 int ret;
677
678 if (is_inode_layout_compression(inode))
679 z_erofs_drop_inline_pcluster(inode);
680 else
681 inode->datalayout = EROFS_INODE_FLAT_PLAIN;
682 noinline:
683 /* expend an extra block for tail-end data */
684 ret = erofs_prepare_tail_block(inode);
685 if (ret)
686 return ret;
687 bh = erofs_balloc(INODE, inodesize, 0, 0);
688 if (IS_ERR(bh))
689 return PTR_ERR(bh);
690 DBG_BUGON(inode->bh_inline);
691 } else if (IS_ERR(bh)) {
692 return PTR_ERR(bh);
693 } else if (inode->idata_size) {
694 if (is_inode_layout_compression(inode)) {
695 DBG_BUGON(!cfg.c_ztailpacking);
696 erofs_dbg("Inline %scompressed data (%u bytes) to %s",
697 inode->compressed_idata ? "" : "un",
698 inode->idata_size, inode->i_srcpath);
699 erofs_sb_set_ztailpacking(inode->sbi);
700 } else {
701 inode->datalayout = EROFS_INODE_FLAT_INLINE;
702 erofs_dbg("Inline tail-end data (%u bytes) to %s",
703 inode->idata_size, inode->i_srcpath);
704 }
705
706 /* allocate inline buffer */
707 ibh = erofs_battach(bh, META, inode->idata_size);
708 if (IS_ERR(ibh))
709 return PTR_ERR(ibh);
710
711 ibh->op = &erofs_skip_write_bhops;
712 inode->bh_inline = ibh;
713 }
714
715 bh->fsprivate = erofs_igrab(inode);
716 bh->op = &erofs_write_inode_bhops;
717 inode->bh = bh;
718 return 0;
719 }
720
erofs_bh_flush_write_inline(struct erofs_buffer_head * bh)721 static bool erofs_bh_flush_write_inline(struct erofs_buffer_head *bh)
722 {
723 struct erofs_inode *const inode = bh->fsprivate;
724 const erofs_off_t off = erofs_btell(bh, false);
725 int ret;
726
727 ret = dev_write(inode->sbi, inode->idata, off, inode->idata_size);
728 if (ret)
729 return false;
730
731 inode->idata_size = 0;
732 free(inode->idata);
733 inode->idata = NULL;
734
735 erofs_iput(inode);
736 return erofs_bh_flush_generic_end(bh);
737 }
738
739 static struct erofs_bhops erofs_write_inline_bhops = {
740 .flush = erofs_bh_flush_write_inline,
741 };
742
erofs_write_tail_end(struct erofs_inode * inode)743 static int erofs_write_tail_end(struct erofs_inode *inode)
744 {
745 struct erofs_sb_info *sbi = inode->sbi;
746 struct erofs_buffer_head *bh, *ibh;
747
748 bh = inode->bh_data;
749
750 if (!inode->idata_size)
751 goto out;
752
753 /* have enough room to inline data */
754 if (inode->bh_inline) {
755 ibh = inode->bh_inline;
756
757 ibh->fsprivate = erofs_igrab(inode);
758 ibh->op = &erofs_write_inline_bhops;
759
760 erofs_droid_blocklist_write_tail_end(inode, NULL_ADDR);
761 } else {
762 int ret;
763 erofs_off_t pos, zero_pos;
764
765 if (!bh) {
766 bh = erofs_balloc(DATA, erofs_blksiz(sbi), 0, 0);
767 if (IS_ERR(bh))
768 return PTR_ERR(bh);
769 bh->op = &erofs_skip_write_bhops;
770
771 /* get blkaddr of bh */
772 ret = erofs_mapbh(bh->block);
773 inode->u.i_blkaddr = bh->block->blkaddr;
774 inode->bh_data = bh;
775 } else {
776 if (inode->lazy_tailblock) {
777 /* expend a tail block (should be successful) */
778 ret = erofs_bh_balloon(bh, erofs_blksiz(sbi));
779 if (ret != erofs_blksiz(sbi)) {
780 DBG_BUGON(1);
781 return -EIO;
782 }
783 inode->lazy_tailblock = false;
784 }
785 ret = erofs_mapbh(bh->block);
786 }
787 DBG_BUGON(ret < 0);
788 pos = erofs_btell(bh, true) - erofs_blksiz(sbi);
789
790 /* 0'ed data should be padded at head for 0padding conversion */
791 if (erofs_sb_has_lz4_0padding(sbi) && inode->compressed_idata) {
792 zero_pos = pos;
793 pos += erofs_blksiz(sbi) - inode->idata_size;
794 } else {
795 /* pad 0'ed data for the other cases */
796 zero_pos = pos + inode->idata_size;
797 }
798 ret = dev_write(sbi, inode->idata, pos, inode->idata_size);
799 if (ret)
800 return ret;
801
802 DBG_BUGON(inode->idata_size > erofs_blksiz(sbi));
803 if (inode->idata_size < erofs_blksiz(sbi)) {
804 ret = dev_fillzero(sbi, zero_pos,
805 erofs_blksiz(sbi) - inode->idata_size,
806 false);
807 if (ret)
808 return ret;
809 }
810 inode->idata_size = 0;
811 free(inode->idata);
812 inode->idata = NULL;
813
814 erofs_droid_blocklist_write_tail_end(inode, erofs_blknr(sbi, pos));
815 }
816 out:
817 /* now bh_data can drop directly */
818 if (bh) {
819 /*
820 * Don't leave DATA buffers which were written in the global
821 * buffer list. It will make balloc() slowly.
822 */
823 erofs_bdrop(bh, false);
824 inode->bh_data = NULL;
825 }
826 return 0;
827 }
828
erofs_should_use_inode_extended(struct erofs_inode * inode)829 static bool erofs_should_use_inode_extended(struct erofs_inode *inode)
830 {
831 if (cfg.c_force_inodeversion == FORCE_INODE_EXTENDED)
832 return true;
833 if (inode->i_size > UINT_MAX)
834 return true;
835 if (erofs_is_packed_inode(inode))
836 return false;
837 if (inode->i_uid > USHRT_MAX)
838 return true;
839 if (inode->i_gid > USHRT_MAX)
840 return true;
841 if (inode->i_nlink > USHRT_MAX)
842 return true;
843 if ((inode->i_mtime != inode->sbi->build_time ||
844 inode->i_mtime_nsec != inode->sbi->build_time_nsec) &&
845 !cfg.c_ignore_mtime)
846 return true;
847 return false;
848 }
849
erofs_new_encode_dev(dev_t dev)850 u32 erofs_new_encode_dev(dev_t dev)
851 {
852 const unsigned int major = major(dev);
853 const unsigned int minor = minor(dev);
854
855 return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12);
856 }
857
858 #ifdef WITH_ANDROID
erofs_droid_inode_fsconfig(struct erofs_inode * inode,struct stat * st,const char * path)859 int erofs_droid_inode_fsconfig(struct erofs_inode *inode,
860 struct stat *st,
861 const char *path)
862 {
863 /* filesystem_config does not preserve file type bits */
864 mode_t stat_file_type_mask = st->st_mode & S_IFMT;
865 unsigned int uid = 0, gid = 0, mode = 0;
866 const char *fspath;
867 char *decorated = NULL;
868
869 inode->capabilities = 0;
870 if (!cfg.fs_config_file && !cfg.mount_point)
871 return 0;
872 /* avoid loading special inodes */
873 if (path == EROFS_PACKED_INODE)
874 return 0;
875
876 if (!cfg.mount_point ||
877 /* have to drop the mountpoint for rootdir of canned fsconfig */
878 (cfg.fs_config_file && erofs_fspath(path)[0] == '\0')) {
879 fspath = erofs_fspath(path);
880 } else {
881 if (asprintf(&decorated, "%s/%s", cfg.mount_point,
882 erofs_fspath(path)) <= 0)
883 return -ENOMEM;
884 fspath = decorated;
885 }
886
887 if (cfg.fs_config_file)
888 canned_fs_config(fspath, S_ISDIR(st->st_mode),
889 cfg.target_out_path,
890 &uid, &gid, &mode, &inode->capabilities);
891 else
892 fs_config(fspath, S_ISDIR(st->st_mode),
893 cfg.target_out_path,
894 &uid, &gid, &mode, &inode->capabilities);
895
896 erofs_dbg("/%s -> mode = 0x%x, uid = 0x%x, gid = 0x%x, capabilities = 0x%" PRIx64,
897 fspath, mode, uid, gid, inode->capabilities);
898
899 if (decorated)
900 free(decorated);
901 st->st_uid = uid;
902 st->st_gid = gid;
903 st->st_mode = mode | stat_file_type_mask;
904 return 0;
905 }
906 #else
erofs_droid_inode_fsconfig(struct erofs_inode * inode,struct stat * st,const char * path)907 static int erofs_droid_inode_fsconfig(struct erofs_inode *inode,
908 struct stat *st,
909 const char *path)
910 {
911 return 0;
912 }
913 #endif
914
__erofs_fill_inode(struct erofs_inode * inode,struct stat * st,const char * path)915 int __erofs_fill_inode(struct erofs_inode *inode, struct stat *st,
916 const char *path)
917 {
918 int err = erofs_droid_inode_fsconfig(inode, st, path);
919 struct erofs_sb_info *sbi = inode->sbi;
920
921 if (err)
922 return err;
923
924 inode->i_uid = cfg.c_uid == -1 ? st->st_uid : cfg.c_uid;
925 inode->i_gid = cfg.c_gid == -1 ? st->st_gid : cfg.c_gid;
926
927 if (inode->i_uid + cfg.c_uid_offset < 0)
928 erofs_err("uid overflow @ %s", path);
929 inode->i_uid += cfg.c_uid_offset;
930
931 if (inode->i_gid + cfg.c_gid_offset < 0)
932 erofs_err("gid overflow @ %s", path);
933 inode->i_gid += cfg.c_gid_offset;
934
935 inode->i_mtime = st->st_mtime;
936 inode->i_mtime_nsec = ST_MTIM_NSEC(st);
937
938 switch (cfg.c_timeinherit) {
939 case TIMESTAMP_CLAMPING:
940 if (inode->i_mtime < sbi->build_time)
941 break;
942 case TIMESTAMP_FIXED:
943 inode->i_mtime = sbi->build_time;
944 inode->i_mtime_nsec = sbi->build_time_nsec;
945 default:
946 break;
947 }
948
949 return 0;
950 }
951
erofs_fill_inode(struct erofs_inode * inode,struct stat * st,const char * path)952 static int erofs_fill_inode(struct erofs_inode *inode, struct stat *st,
953 const char *path)
954 {
955 int err = __erofs_fill_inode(inode, st, path);
956
957 if (err)
958 return err;
959
960 inode->i_mode = st->st_mode;
961 inode->i_nlink = 1; /* fix up later if needed */
962
963 switch (inode->i_mode & S_IFMT) {
964 case S_IFCHR:
965 case S_IFBLK:
966 case S_IFIFO:
967 case S_IFSOCK:
968 inode->u.i_rdev = erofs_new_encode_dev(st->st_rdev);
969 case S_IFDIR:
970 inode->i_size = 0;
971 break;
972 case S_IFREG:
973 case S_IFLNK:
974 inode->i_size = st->st_size;
975 break;
976 default:
977 return -EINVAL;
978 }
979
980 inode->i_srcpath = strdup(path);
981 if (!inode->i_srcpath)
982 return -ENOMEM;
983
984 if (!S_ISDIR(inode->i_mode)) {
985 inode->dev = st->st_dev;
986 inode->i_ino[1] = st->st_ino;
987 }
988
989 if (erofs_should_use_inode_extended(inode)) {
990 if (cfg.c_force_inodeversion == FORCE_INODE_COMPACT) {
991 erofs_err("file %s cannot be in compact form",
992 inode->i_srcpath);
993 return -EINVAL;
994 }
995 inode->inode_isize = sizeof(struct erofs_inode_extended);
996 } else {
997 inode->inode_isize = sizeof(struct erofs_inode_compact);
998 }
999
1000 erofs_insert_ihash(inode, st->st_dev, st->st_ino);
1001 return 0;
1002 }
1003
erofs_new_inode(void)1004 struct erofs_inode *erofs_new_inode(void)
1005 {
1006 struct erofs_inode *inode;
1007
1008 inode = calloc(1, sizeof(struct erofs_inode));
1009 if (!inode)
1010 return ERR_PTR(-ENOMEM);
1011
1012 inode->sbi = &sbi;
1013 inode->i_ino[0] = sbi.inos++; /* inode serial number */
1014 inode->i_count = 1;
1015 inode->datalayout = EROFS_INODE_FLAT_PLAIN;
1016
1017 init_list_head(&inode->i_hash);
1018 init_list_head(&inode->i_subdirs);
1019 init_list_head(&inode->i_xattrs);
1020 return inode;
1021 }
1022
1023 /* get the inode from the (source) path */
erofs_iget_from_path(const char * path,bool is_src)1024 static struct erofs_inode *erofs_iget_from_path(const char *path, bool is_src)
1025 {
1026 struct stat st;
1027 struct erofs_inode *inode;
1028 int ret;
1029
1030 /* currently, only source path is supported */
1031 if (!is_src)
1032 return ERR_PTR(-EINVAL);
1033
1034 ret = lstat(path, &st);
1035 if (ret)
1036 return ERR_PTR(-errno);
1037
1038 /*
1039 * lookup in hash table first, if it already exists we have a
1040 * hard-link, just return it. Also don't lookup for directories
1041 * since hard-link directory isn't allowed.
1042 */
1043 if (!S_ISDIR(st.st_mode)) {
1044 inode = erofs_iget(st.st_dev, st.st_ino);
1045 if (inode)
1046 return inode;
1047 }
1048
1049 /* cannot find in the inode cache */
1050 inode = erofs_new_inode();
1051 if (IS_ERR(inode))
1052 return inode;
1053
1054 ret = erofs_fill_inode(inode, &st, path);
1055 if (ret) {
1056 erofs_iput(inode);
1057 return ERR_PTR(ret);
1058 }
1059 return inode;
1060 }
1061
erofs_fixup_meta_blkaddr(struct erofs_inode * rootdir)1062 static void erofs_fixup_meta_blkaddr(struct erofs_inode *rootdir)
1063 {
1064 const erofs_off_t rootnid_maxoffset = 0xffff << EROFS_ISLOTBITS;
1065 struct erofs_buffer_head *const bh = rootdir->bh;
1066 struct erofs_sb_info *sbi = rootdir->sbi;
1067 erofs_off_t off, meta_offset;
1068
1069 erofs_mapbh(bh->block);
1070 off = erofs_btell(bh, false);
1071
1072 if (off > rootnid_maxoffset)
1073 meta_offset = round_up(off - rootnid_maxoffset, erofs_blksiz(sbi));
1074 else
1075 meta_offset = 0;
1076 sbi->meta_blkaddr = erofs_blknr(sbi, meta_offset);
1077 rootdir->nid = (off - meta_offset) >> EROFS_ISLOTBITS;
1078 }
1079
erofs_mkfs_build_tree(struct erofs_inode * dir,struct list_head * dirs)1080 static int erofs_mkfs_build_tree(struct erofs_inode *dir, struct list_head *dirs)
1081 {
1082 int ret;
1083 DIR *_dir;
1084 struct dirent *dp;
1085 struct erofs_dentry *d;
1086 unsigned int nr_subdirs, i_nlink;
1087
1088 ret = erofs_scan_file_xattrs(dir);
1089 if (ret < 0)
1090 return ret;
1091
1092 ret = erofs_prepare_xattr_ibody(dir);
1093 if (ret < 0)
1094 return ret;
1095
1096 if (!S_ISDIR(dir->i_mode)) {
1097 if (S_ISLNK(dir->i_mode)) {
1098 char *const symlink = malloc(dir->i_size);
1099
1100 if (!symlink)
1101 return -ENOMEM;
1102 ret = readlink(dir->i_srcpath, symlink, dir->i_size);
1103 if (ret < 0) {
1104 free(symlink);
1105 return -errno;
1106 }
1107 ret = erofs_write_file_from_buffer(dir, symlink);
1108 free(symlink);
1109 } else if (dir->i_size) {
1110 int fd = open(dir->i_srcpath, O_RDONLY | O_BINARY);
1111 if (fd < 0)
1112 return -errno;
1113
1114 ret = erofs_write_file(dir, fd, 0);
1115 close(fd);
1116 } else {
1117 ret = 0;
1118 }
1119 if (ret)
1120 return ret;
1121
1122 erofs_prepare_inode_buffer(dir);
1123 erofs_write_tail_end(dir);
1124 return 0;
1125 }
1126
1127 _dir = opendir(dir->i_srcpath);
1128 if (!_dir) {
1129 erofs_err("failed to opendir at %s: %s",
1130 dir->i_srcpath, erofs_strerror(errno));
1131 return -errno;
1132 }
1133
1134 nr_subdirs = 0;
1135 while (1) {
1136 /*
1137 * set errno to 0 before calling readdir() in order to
1138 * distinguish end of stream and from an error.
1139 */
1140 errno = 0;
1141 dp = readdir(_dir);
1142 if (!dp)
1143 break;
1144
1145 if (is_dot_dotdot(dp->d_name))
1146 continue;
1147
1148 /* skip if it's a exclude file */
1149 if (erofs_is_exclude_path(dir->i_srcpath, dp->d_name))
1150 continue;
1151
1152 d = erofs_d_alloc(dir, dp->d_name);
1153 if (IS_ERR(d)) {
1154 ret = PTR_ERR(d);
1155 goto err_closedir;
1156 }
1157 nr_subdirs++;
1158 }
1159
1160 if (errno) {
1161 ret = -errno;
1162 goto err_closedir;
1163 }
1164 closedir(_dir);
1165
1166 ret = erofs_prepare_dir_file(dir, nr_subdirs);
1167 if (ret)
1168 return ret;
1169
1170 ret = erofs_prepare_inode_buffer(dir);
1171 if (ret)
1172 return ret;
1173 dir->bh->op = &erofs_skip_write_bhops;
1174
1175 if (IS_ROOT(dir))
1176 erofs_fixup_meta_blkaddr(dir);
1177
1178 i_nlink = 0;
1179 list_for_each_entry(d, &dir->i_subdirs, d_child) {
1180 char buf[PATH_MAX];
1181 unsigned char ftype;
1182 struct erofs_inode *inode;
1183
1184 if (is_dot_dotdot(d->name)) {
1185 ++i_nlink;
1186 continue;
1187 }
1188
1189 ret = snprintf(buf, PATH_MAX, "%s/%s",
1190 dir->i_srcpath, d->name);
1191 if (ret < 0 || ret >= PATH_MAX) {
1192 /* ignore the too long path */
1193 goto fail;
1194 }
1195
1196 inode = erofs_iget_from_path(buf, true);
1197
1198 if (IS_ERR(inode)) {
1199 ret = PTR_ERR(inode);
1200 fail:
1201 d->inode = NULL;
1202 d->type = EROFS_FT_UNKNOWN;
1203 return ret;
1204 }
1205
1206 /* a hardlink to the existed inode */
1207 if (inode->i_parent) {
1208 ++inode->i_nlink;
1209 } else {
1210 inode->i_parent = dir;
1211 erofs_igrab(inode);
1212 list_add_tail(&inode->i_subdirs, dirs);
1213 }
1214 ftype = erofs_mode_to_ftype(inode->i_mode);
1215 i_nlink += (ftype == EROFS_FT_DIR);
1216 d->inode = inode;
1217 d->type = ftype;
1218 erofs_info("file %s/%s dumped (type %u)",
1219 dir->i_srcpath, d->name, d->type);
1220 }
1221 /*
1222 * if there're too many subdirs as compact form, set nlink=1
1223 * rather than upgrade to use extented form instead.
1224 */
1225 if (i_nlink > USHRT_MAX &&
1226 dir->inode_isize == sizeof(struct erofs_inode_compact))
1227 dir->i_nlink = 1;
1228 else
1229 dir->i_nlink = i_nlink;
1230 return 0;
1231
1232 err_closedir:
1233 closedir(_dir);
1234 return ret;
1235 }
1236
erofs_mkfs_build_tree_from_path(const char * path)1237 struct erofs_inode *erofs_mkfs_build_tree_from_path(const char *path)
1238 {
1239 LIST_HEAD(dirs);
1240 struct erofs_inode *inode, *root, *dumpdir;
1241
1242 root = erofs_iget_from_path(path, true);
1243 if (IS_ERR(root))
1244 return root;
1245
1246 (void)erofs_igrab(root);
1247 root->i_parent = root; /* rootdir mark */
1248 list_add(&root->i_subdirs, &dirs);
1249
1250 dumpdir = NULL;
1251 do {
1252 int err;
1253 char *trimmed;
1254
1255 inode = list_first_entry(&dirs, struct erofs_inode, i_subdirs);
1256 list_del(&inode->i_subdirs);
1257 init_list_head(&inode->i_subdirs);
1258
1259 trimmed = erofs_trim_for_progressinfo(
1260 erofs_fspath(inode->i_srcpath),
1261 sizeof("Processing ...") - 1);
1262 erofs_update_progressinfo("Processing %s ...", trimmed);
1263 free(trimmed);
1264
1265 err = erofs_mkfs_build_tree(inode, &dirs);
1266 if (err) {
1267 root = ERR_PTR(err);
1268 break;
1269 }
1270
1271 if (S_ISDIR(inode->i_mode)) {
1272 inode->next_dirwrite = dumpdir;
1273 dumpdir = inode;
1274 } else {
1275 erofs_iput(inode);
1276 }
1277 } while (!list_empty(&dirs));
1278
1279 while (dumpdir) {
1280 inode = dumpdir;
1281 erofs_write_dir_file(inode);
1282 erofs_write_tail_end(inode);
1283 inode->bh->op = &erofs_write_inode_bhops;
1284 dumpdir = inode->next_dirwrite;
1285 erofs_iput(inode);
1286 }
1287 return root;
1288 }
1289
erofs_mkfs_build_special_from_fd(int fd,const char * name)1290 struct erofs_inode *erofs_mkfs_build_special_from_fd(int fd, const char *name)
1291 {
1292 struct stat st;
1293 struct erofs_inode *inode;
1294 int ret;
1295
1296 ret = lseek(fd, 0, SEEK_SET);
1297 if (ret < 0)
1298 return ERR_PTR(-errno);
1299
1300 ret = fstat(fd, &st);
1301 if (ret)
1302 return ERR_PTR(-errno);
1303
1304 inode = erofs_new_inode();
1305 if (IS_ERR(inode))
1306 return inode;
1307
1308 if (name == EROFS_PACKED_INODE) {
1309 st.st_uid = st.st_gid = 0;
1310 st.st_nlink = 0;
1311 }
1312
1313 ret = erofs_fill_inode(inode, &st, name);
1314 if (ret) {
1315 free(inode);
1316 return ERR_PTR(ret);
1317 }
1318
1319 if (name == EROFS_PACKED_INODE) {
1320 inode->sbi->packed_nid = EROFS_PACKED_NID_UNALLOCATED;
1321 inode->nid = inode->sbi->packed_nid;
1322 }
1323
1324 ret = erofs_write_compressed_file(inode, fd);
1325 if (ret == -ENOSPC) {
1326 ret = lseek(fd, 0, SEEK_SET);
1327 if (ret < 0)
1328 return ERR_PTR(-errno);
1329
1330 ret = write_uncompressed_file_from_fd(inode, fd);
1331 }
1332
1333 if (ret) {
1334 DBG_BUGON(ret == -ENOSPC);
1335 return ERR_PTR(ret);
1336 }
1337 erofs_prepare_inode_buffer(inode);
1338 erofs_write_tail_end(inode);
1339 return inode;
1340 }
1341
erofs_rebuild_dump_tree(struct erofs_inode * dir)1342 int erofs_rebuild_dump_tree(struct erofs_inode *dir)
1343 {
1344 struct erofs_dentry *d, *n;
1345 unsigned int nr_subdirs;
1346 int ret;
1347
1348 if (erofs_should_use_inode_extended(dir)) {
1349 if (cfg.c_force_inodeversion == FORCE_INODE_COMPACT) {
1350 erofs_err("file %s cannot be in compact form",
1351 dir->i_srcpath);
1352 return -EINVAL;
1353 }
1354 dir->inode_isize = sizeof(struct erofs_inode_extended);
1355 } else {
1356 dir->inode_isize = sizeof(struct erofs_inode_compact);
1357 }
1358
1359 /* strip all unnecessary overlayfs xattrs when ovlfs_strip is enabled */
1360 if (cfg.c_ovlfs_strip)
1361 erofs_clear_opaque_xattr(dir);
1362 else if (dir->whiteouts)
1363 erofs_set_origin_xattr(dir);
1364
1365 ret = erofs_prepare_xattr_ibody(dir);
1366 if (ret < 0)
1367 return ret;
1368
1369 if (!S_ISDIR(dir->i_mode)) {
1370 if (dir->bh)
1371 return 0;
1372 if (S_ISLNK(dir->i_mode)) {
1373 ret = erofs_write_file_from_buffer(dir, dir->i_link);
1374 free(dir->i_link);
1375 dir->i_link = NULL;
1376 } else if (dir->with_diskbuf) {
1377 u64 fpos;
1378
1379 ret = erofs_diskbuf_getfd(dir->i_diskbuf, &fpos);
1380 if (ret >= 0)
1381 ret = erofs_write_file(dir, ret, fpos);
1382 erofs_diskbuf_close(dir->i_diskbuf);
1383 free(dir->i_diskbuf);
1384 dir->i_diskbuf = NULL;
1385 dir->with_diskbuf = false;
1386 } else {
1387 ret = 0;
1388 }
1389 if (ret)
1390 return ret;
1391 ret = erofs_prepare_inode_buffer(dir);
1392 if (ret)
1393 return ret;
1394 erofs_write_tail_end(dir);
1395 return 0;
1396 }
1397
1398 nr_subdirs = 0;
1399 list_for_each_entry_safe(d, n, &dir->i_subdirs, d_child) {
1400 if (cfg.c_ovlfs_strip && erofs_inode_is_whiteout(d->inode)) {
1401 erofs_dbg("remove whiteout %s", d->inode->i_srcpath);
1402 list_del(&d->d_child);
1403 erofs_d_invalidate(d);
1404 free(d);
1405 continue;
1406 }
1407 ++nr_subdirs;
1408 }
1409
1410 ret = erofs_prepare_dir_layout(dir, nr_subdirs);
1411 if (ret)
1412 return ret;
1413
1414 ret = erofs_prepare_inode_buffer(dir);
1415 if (ret)
1416 return ret;
1417 dir->bh->op = &erofs_skip_write_bhops;
1418
1419 if (IS_ROOT(dir))
1420 erofs_fixup_meta_blkaddr(dir);
1421
1422 list_for_each_entry(d, &dir->i_subdirs, d_child) {
1423 struct erofs_inode *inode;
1424
1425 if (is_dot_dotdot(d->name))
1426 continue;
1427
1428 inode = erofs_igrab(d->inode);
1429 ret = erofs_rebuild_dump_tree(inode);
1430 dir->i_nlink += (erofs_mode_to_ftype(inode->i_mode) == EROFS_FT_DIR);
1431 erofs_iput(inode);
1432 if (ret)
1433 return ret;
1434 }
1435 erofs_write_dir_file(dir);
1436 erofs_write_tail_end(dir);
1437 dir->bh->op = &erofs_write_inode_bhops;
1438 return 0;
1439 }
1440