1 // SPDX-License-Identifier: GPL-2.0+ OR Apache-2.0
2 /*
3 * Copyright (C) 2018-2019 HUAWEI, Inc.
4 * http://www.huawei.com/
5 * Created by Li Guifu <bluce.liguifu@huawei.com>
6 * with heavy changes by Gao Xiang <gaoxiang25@huawei.com>
7 */
8 #define _GNU_SOURCE
9 #include <string.h>
10 #include <stdlib.h>
11 #include <stdio.h>
12 #include <sys/stat.h>
13 #include <config.h>
14 #if defined(HAVE_SYS_SYSMACROS_H)
15 #include <sys/sysmacros.h>
16 #endif
17 #include <dirent.h>
18 #include "erofs/print.h"
19 #include "erofs/inode.h"
20 #include "erofs/cache.h"
21 #include "erofs/io.h"
22 #include "erofs/compress.h"
23 #include "erofs/xattr.h"
24 #include "erofs/exclude.h"
25 #include "erofs/block_list.h"
26 #include "erofs/compress_hints.h"
27 #include "erofs/blobchunk.h"
28 #include "liberofs_private.h"
29
30 #define S_SHIFT 12
31 static unsigned char erofs_ftype_by_mode[S_IFMT >> S_SHIFT] = {
32 [S_IFREG >> S_SHIFT] = EROFS_FT_REG_FILE,
33 [S_IFDIR >> S_SHIFT] = EROFS_FT_DIR,
34 [S_IFCHR >> S_SHIFT] = EROFS_FT_CHRDEV,
35 [S_IFBLK >> S_SHIFT] = EROFS_FT_BLKDEV,
36 [S_IFIFO >> S_SHIFT] = EROFS_FT_FIFO,
37 [S_IFSOCK >> S_SHIFT] = EROFS_FT_SOCK,
38 [S_IFLNK >> S_SHIFT] = EROFS_FT_SYMLINK,
39 };
40
erofs_mode_to_ftype(umode_t mode)41 unsigned char erofs_mode_to_ftype(umode_t mode)
42 {
43 return erofs_ftype_by_mode[(mode & S_IFMT) >> S_SHIFT];
44 }
45
46 #define NR_INODE_HASHTABLE 16384
47
48 struct list_head inode_hashtable[NR_INODE_HASHTABLE];
49
erofs_inode_manager_init(void)50 void erofs_inode_manager_init(void)
51 {
52 unsigned int i;
53
54 for (i = 0; i < NR_INODE_HASHTABLE; ++i)
55 init_list_head(&inode_hashtable[i]);
56 }
57
erofs_igrab(struct erofs_inode * inode)58 static struct erofs_inode *erofs_igrab(struct erofs_inode *inode)
59 {
60 ++inode->i_count;
61 return inode;
62 }
63
64 /* get the inode from the (source) inode # */
erofs_iget(dev_t dev,ino_t ino)65 struct erofs_inode *erofs_iget(dev_t dev, ino_t ino)
66 {
67 struct list_head *head =
68 &inode_hashtable[(ino ^ dev) % NR_INODE_HASHTABLE];
69 struct erofs_inode *inode;
70
71 list_for_each_entry(inode, head, i_hash)
72 if (inode->i_ino[1] == ino && inode->dev == dev)
73 return erofs_igrab(inode);
74 return NULL;
75 }
76
erofs_iget_by_nid(erofs_nid_t nid)77 struct erofs_inode *erofs_iget_by_nid(erofs_nid_t nid)
78 {
79 struct list_head *head =
80 &inode_hashtable[nid % NR_INODE_HASHTABLE];
81 struct erofs_inode *inode;
82
83 list_for_each_entry(inode, head, i_hash)
84 if (inode->nid == nid)
85 return erofs_igrab(inode);
86 return NULL;
87 }
88
erofs_iput(struct erofs_inode * inode)89 unsigned int erofs_iput(struct erofs_inode *inode)
90 {
91 struct erofs_dentry *d, *t;
92
93 if (inode->i_count > 1)
94 return --inode->i_count;
95
96 list_for_each_entry_safe(d, t, &inode->i_subdirs, d_child)
97 free(d);
98
99 if (inode->eof_tailraw)
100 free(inode->eof_tailraw);
101 list_del(&inode->i_hash);
102 free(inode);
103 return 0;
104 }
105
erofs_d_alloc(struct erofs_inode * parent,const char * name)106 struct erofs_dentry *erofs_d_alloc(struct erofs_inode *parent,
107 const char *name)
108 {
109 struct erofs_dentry *d = malloc(sizeof(*d));
110
111 if (!d)
112 return ERR_PTR(-ENOMEM);
113
114 strncpy(d->name, name, EROFS_NAME_LEN - 1);
115 d->name[EROFS_NAME_LEN - 1] = '\0';
116
117 list_add_tail(&d->d_child, &parent->i_subdirs);
118 return d;
119 }
120
121 /* allocate main data for a inode */
__allocate_inode_bh_data(struct erofs_inode * inode,unsigned long nblocks)122 static int __allocate_inode_bh_data(struct erofs_inode *inode,
123 unsigned long nblocks)
124 {
125 struct erofs_buffer_head *bh;
126 int ret;
127
128 if (!nblocks) {
129 /* it has only tail-end data */
130 inode->u.i_blkaddr = NULL_ADDR;
131 return 0;
132 }
133
134 /* allocate main data buffer */
135 bh = erofs_balloc(DATA, blknr_to_addr(nblocks), 0, 0);
136 if (IS_ERR(bh))
137 return PTR_ERR(bh);
138
139 bh->op = &erofs_skip_write_bhops;
140 inode->bh_data = bh;
141
142 /* get blkaddr of the bh */
143 ret = erofs_mapbh(bh->block);
144 DBG_BUGON(ret < 0);
145
146 /* write blocks except for the tail-end block */
147 inode->u.i_blkaddr = bh->block->blkaddr;
148 return 0;
149 }
150
comp_subdir(const void * a,const void * b)151 static int comp_subdir(const void *a, const void *b)
152 {
153 const struct erofs_dentry *da, *db;
154
155 da = *((const struct erofs_dentry **)a);
156 db = *((const struct erofs_dentry **)b);
157 return strcmp(da->name, db->name);
158 }
159
erofs_prepare_dir_file(struct erofs_inode * dir,unsigned int nr_subdirs)160 int erofs_prepare_dir_file(struct erofs_inode *dir, unsigned int nr_subdirs)
161 {
162 struct erofs_dentry *d, *n, **sorted_d;
163 unsigned int d_size, i_nlink, i;
164 int ret;
165
166 /* dot is pointed to the current dir inode */
167 d = erofs_d_alloc(dir, ".");
168 if (IS_ERR(d))
169 return PTR_ERR(d);
170 d->inode = erofs_igrab(dir);
171 d->type = EROFS_FT_DIR;
172
173 /* dotdot is pointed to the parent dir */
174 d = erofs_d_alloc(dir, "..");
175 if (IS_ERR(d))
176 return PTR_ERR(d);
177 d->inode = erofs_igrab(dir->i_parent);
178 d->type = EROFS_FT_DIR;
179
180 /* sort subdirs */
181 nr_subdirs += 2;
182 sorted_d = malloc(nr_subdirs * sizeof(d));
183 if (!sorted_d)
184 return -ENOMEM;
185 i = 0;
186 list_for_each_entry_safe(d, n, &dir->i_subdirs, d_child) {
187 list_del(&d->d_child);
188 sorted_d[i++] = d;
189 }
190 DBG_BUGON(i != nr_subdirs);
191 qsort(sorted_d, nr_subdirs, sizeof(d), comp_subdir);
192 for (i = 0; i < nr_subdirs; i++)
193 list_add_tail(&sorted_d[i]->d_child, &dir->i_subdirs);
194 free(sorted_d);
195
196 /* let's calculate dir size and update i_nlink */
197 d_size = 0;
198 i_nlink = 0;
199 list_for_each_entry(d, &dir->i_subdirs, d_child) {
200 int len = strlen(d->name) + sizeof(struct erofs_dirent);
201
202 if (d_size % EROFS_BLKSIZ + len > EROFS_BLKSIZ)
203 d_size = round_up(d_size, EROFS_BLKSIZ);
204 d_size += len;
205
206 i_nlink += (d->type == EROFS_FT_DIR);
207 }
208 dir->i_size = d_size;
209 /*
210 * if there're too many subdirs as compact form, set nlink=1
211 * rather than upgrade to use extented form instead.
212 */
213 if (i_nlink > USHRT_MAX &&
214 dir->inode_isize == sizeof(struct erofs_inode_compact))
215 dir->i_nlink = 1;
216 else
217 dir->i_nlink = i_nlink;
218
219 /* no compression for all dirs */
220 dir->datalayout = EROFS_INODE_FLAT_INLINE;
221
222 /* allocate dir main data */
223 ret = __allocate_inode_bh_data(dir, erofs_blknr(d_size));
224 if (ret)
225 return ret;
226
227 /* it will be used in erofs_prepare_inode_buffer */
228 dir->idata_size = d_size % EROFS_BLKSIZ;
229 return 0;
230 }
231
fill_dirblock(char * buf,unsigned int size,unsigned int q,struct erofs_dentry * head,struct erofs_dentry * end)232 static void fill_dirblock(char *buf, unsigned int size, unsigned int q,
233 struct erofs_dentry *head, struct erofs_dentry *end)
234 {
235 unsigned int p = 0;
236
237 /* write out all erofs_dirents + filenames */
238 while (head != end) {
239 const unsigned int namelen = strlen(head->name);
240 struct erofs_dirent d = {
241 .nid = cpu_to_le64(head->nid),
242 .nameoff = cpu_to_le16(q),
243 .file_type = head->type,
244 };
245
246 memcpy(buf + p, &d, sizeof(d));
247 memcpy(buf + q, head->name, namelen);
248 p += sizeof(d);
249 q += namelen;
250
251 head = list_next_entry(head, d_child);
252 }
253 memset(buf + q, 0, size - q);
254 }
255
write_dirblock(unsigned int q,struct erofs_dentry * head,struct erofs_dentry * end,erofs_blk_t blkaddr)256 static int write_dirblock(unsigned int q, struct erofs_dentry *head,
257 struct erofs_dentry *end, erofs_blk_t blkaddr)
258 {
259 char buf[EROFS_BLKSIZ];
260
261 fill_dirblock(buf, EROFS_BLKSIZ, q, head, end);
262 return blk_write(buf, blkaddr, 1);
263 }
264
erofs_write_dir_file(struct erofs_inode * dir)265 static int erofs_write_dir_file(struct erofs_inode *dir)
266 {
267 struct erofs_dentry *head = list_first_entry(&dir->i_subdirs,
268 struct erofs_dentry,
269 d_child);
270 struct erofs_dentry *d;
271 int ret;
272 unsigned int q, used, blkno;
273
274 q = used = blkno = 0;
275
276 list_for_each_entry(d, &dir->i_subdirs, d_child) {
277 const unsigned int len = strlen(d->name) +
278 sizeof(struct erofs_dirent);
279
280 if (used + len > EROFS_BLKSIZ) {
281 ret = write_dirblock(q, head, d,
282 dir->u.i_blkaddr + blkno);
283 if (ret)
284 return ret;
285
286 head = d;
287 q = used = 0;
288 ++blkno;
289 }
290 used += len;
291 q += sizeof(struct erofs_dirent);
292 }
293
294 DBG_BUGON(used > EROFS_BLKSIZ);
295 if (used == EROFS_BLKSIZ) {
296 DBG_BUGON(dir->i_size % EROFS_BLKSIZ);
297 DBG_BUGON(dir->idata_size);
298 return write_dirblock(q, head, d, dir->u.i_blkaddr + blkno);
299 }
300 DBG_BUGON(used != dir->i_size % EROFS_BLKSIZ);
301 if (used) {
302 /* fill tail-end dir block */
303 dir->idata = malloc(used);
304 if (!dir->idata)
305 return -ENOMEM;
306 DBG_BUGON(used != dir->idata_size);
307 fill_dirblock(dir->idata, dir->idata_size, q, head, d);
308 }
309 return 0;
310 }
311
erofs_write_file_from_buffer(struct erofs_inode * inode,char * buf)312 static int erofs_write_file_from_buffer(struct erofs_inode *inode, char *buf)
313 {
314 const unsigned int nblocks = erofs_blknr(inode->i_size);
315 int ret;
316
317 inode->datalayout = EROFS_INODE_FLAT_INLINE;
318
319 ret = __allocate_inode_bh_data(inode, nblocks);
320 if (ret)
321 return ret;
322
323 if (nblocks)
324 blk_write(buf, inode->u.i_blkaddr, nblocks);
325 inode->idata_size = inode->i_size % EROFS_BLKSIZ;
326 if (inode->idata_size) {
327 inode->idata = malloc(inode->idata_size);
328 if (!inode->idata)
329 return -ENOMEM;
330 memcpy(inode->idata, buf + blknr_to_addr(nblocks),
331 inode->idata_size);
332 }
333 return 0;
334 }
335
336 /* rules to decide whether a file could be compressed or not */
erofs_file_is_compressible(struct erofs_inode * inode)337 static bool erofs_file_is_compressible(struct erofs_inode *inode)
338 {
339 if (cfg.c_compress_hints_file)
340 return z_erofs_apply_compress_hints(inode);
341 return true;
342 }
343
write_uncompressed_file_from_fd(struct erofs_inode * inode,int fd)344 static int write_uncompressed_file_from_fd(struct erofs_inode *inode, int fd)
345 {
346 int ret;
347 unsigned int nblocks, i;
348
349 inode->datalayout = EROFS_INODE_FLAT_INLINE;
350 nblocks = inode->i_size / EROFS_BLKSIZ;
351
352 ret = __allocate_inode_bh_data(inode, nblocks);
353 if (ret)
354 return ret;
355
356 for (i = 0; i < nblocks; ++i) {
357 char buf[EROFS_BLKSIZ];
358
359 ret = read(fd, buf, EROFS_BLKSIZ);
360 if (ret != EROFS_BLKSIZ) {
361 if (ret < 0)
362 return -errno;
363 return -EAGAIN;
364 }
365
366 ret = blk_write(buf, inode->u.i_blkaddr + i, 1);
367 if (ret)
368 return ret;
369 }
370
371 /* read the tail-end data */
372 inode->idata_size = inode->i_size % EROFS_BLKSIZ;
373 if (inode->idata_size) {
374 inode->idata = malloc(inode->idata_size);
375 if (!inode->idata)
376 return -ENOMEM;
377
378 ret = read(fd, inode->idata, inode->idata_size);
379 if (ret < inode->idata_size) {
380 free(inode->idata);
381 inode->idata = NULL;
382 return -EIO;
383 }
384 }
385 erofs_droid_blocklist_write(inode, inode->u.i_blkaddr, nblocks);
386 return 0;
387 }
388
erofs_write_file(struct erofs_inode * inode)389 int erofs_write_file(struct erofs_inode *inode)
390 {
391 int ret, fd;
392
393 if (!inode->i_size) {
394 inode->datalayout = EROFS_INODE_FLAT_PLAIN;
395 return 0;
396 }
397
398 if (cfg.c_chunkbits) {
399 inode->u.chunkbits = cfg.c_chunkbits;
400 /* chunk indexes when explicitly specified */
401 inode->u.chunkformat = 0;
402 if (cfg.c_force_chunkformat == FORCE_INODE_CHUNK_INDEXES)
403 inode->u.chunkformat = EROFS_CHUNK_FORMAT_INDEXES;
404 return erofs_blob_write_chunked_file(inode);
405 }
406
407 if (cfg.c_compr_alg_master && erofs_file_is_compressible(inode)) {
408 ret = erofs_write_compressed_file(inode);
409
410 if (!ret || ret != -ENOSPC)
411 return ret;
412 }
413
414 /* fallback to all data uncompressed */
415 fd = open(inode->i_srcpath, O_RDONLY | O_BINARY);
416 if (fd < 0)
417 return -errno;
418
419 ret = write_uncompressed_file_from_fd(inode, fd);
420 close(fd);
421 return ret;
422 }
423
erofs_bh_flush_write_inode(struct erofs_buffer_head * bh)424 static bool erofs_bh_flush_write_inode(struct erofs_buffer_head *bh)
425 {
426 struct erofs_inode *const inode = bh->fsprivate;
427 const u16 icount = EROFS_INODE_XATTR_ICOUNT(inode->xattr_isize);
428 erofs_off_t off = erofs_btell(bh, false);
429 union {
430 struct erofs_inode_compact dic;
431 struct erofs_inode_extended die;
432 } u = { {0}, };
433 int ret;
434
435 switch (inode->inode_isize) {
436 case sizeof(struct erofs_inode_compact):
437 u.dic.i_format = cpu_to_le16(0 | (inode->datalayout << 1));
438 u.dic.i_xattr_icount = cpu_to_le16(icount);
439 u.dic.i_mode = cpu_to_le16(inode->i_mode);
440 u.dic.i_nlink = cpu_to_le16(inode->i_nlink);
441 u.dic.i_size = cpu_to_le32((u32)inode->i_size);
442
443 u.dic.i_ino = cpu_to_le32(inode->i_ino[0]);
444
445 u.dic.i_uid = cpu_to_le16((u16)inode->i_uid);
446 u.dic.i_gid = cpu_to_le16((u16)inode->i_gid);
447
448 switch (inode->i_mode & S_IFMT) {
449 case S_IFCHR:
450 case S_IFBLK:
451 case S_IFIFO:
452 case S_IFSOCK:
453 u.dic.i_u.rdev = cpu_to_le32(inode->u.i_rdev);
454 break;
455
456 default:
457 if (is_inode_layout_compression(inode))
458 u.dic.i_u.compressed_blocks =
459 cpu_to_le32(inode->u.i_blocks);
460 else if (inode->datalayout ==
461 EROFS_INODE_CHUNK_BASED)
462 u.dic.i_u.c.format =
463 cpu_to_le16(inode->u.chunkformat);
464 else
465 u.dic.i_u.raw_blkaddr =
466 cpu_to_le32(inode->u.i_blkaddr);
467 break;
468 }
469 break;
470 case sizeof(struct erofs_inode_extended):
471 u.die.i_format = cpu_to_le16(1 | (inode->datalayout << 1));
472 u.die.i_xattr_icount = cpu_to_le16(icount);
473 u.die.i_mode = cpu_to_le16(inode->i_mode);
474 u.die.i_nlink = cpu_to_le32(inode->i_nlink);
475 u.die.i_size = cpu_to_le64(inode->i_size);
476
477 u.die.i_ino = cpu_to_le32(inode->i_ino[0]);
478
479 u.die.i_uid = cpu_to_le32(inode->i_uid);
480 u.die.i_gid = cpu_to_le32(inode->i_gid);
481
482 u.die.i_mtime = cpu_to_le64(inode->i_mtime);
483 u.die.i_mtime_nsec = cpu_to_le32(inode->i_mtime_nsec);
484
485 switch (inode->i_mode & S_IFMT) {
486 case S_IFCHR:
487 case S_IFBLK:
488 case S_IFIFO:
489 case S_IFSOCK:
490 u.die.i_u.rdev = cpu_to_le32(inode->u.i_rdev);
491 break;
492
493 default:
494 if (is_inode_layout_compression(inode))
495 u.die.i_u.compressed_blocks =
496 cpu_to_le32(inode->u.i_blocks);
497 else if (inode->datalayout ==
498 EROFS_INODE_CHUNK_BASED)
499 u.die.i_u.c.format =
500 cpu_to_le16(inode->u.chunkformat);
501 else
502 u.die.i_u.raw_blkaddr =
503 cpu_to_le32(inode->u.i_blkaddr);
504 break;
505 }
506 break;
507 default:
508 erofs_err("unsupported on-disk inode version of nid %llu",
509 (unsigned long long)inode->nid);
510 BUG_ON(1);
511 }
512
513 ret = dev_write(&u, off, inode->inode_isize);
514 if (ret)
515 return false;
516 off += inode->inode_isize;
517
518 if (inode->xattr_isize) {
519 char *xattrs = erofs_export_xattr_ibody(&inode->i_xattrs,
520 inode->xattr_isize);
521 if (IS_ERR(xattrs))
522 return false;
523
524 ret = dev_write(xattrs, off, inode->xattr_isize);
525 free(xattrs);
526 if (ret)
527 return false;
528
529 off += inode->xattr_isize;
530 }
531
532 if (inode->extent_isize) {
533 if (inode->datalayout == EROFS_INODE_CHUNK_BASED) {
534 ret = erofs_blob_write_chunk_indexes(inode, off);
535 if (ret)
536 return false;
537 } else {
538 /* write compression metadata */
539 off = Z_EROFS_VLE_EXTENT_ALIGN(off);
540 ret = dev_write(inode->compressmeta, off,
541 inode->extent_isize);
542 if (ret)
543 return false;
544 free(inode->compressmeta);
545 }
546 }
547
548 inode->bh = NULL;
549 erofs_iput(inode);
550 return erofs_bh_flush_generic_end(bh);
551 }
552
553 static struct erofs_bhops erofs_write_inode_bhops = {
554 .flush = erofs_bh_flush_write_inode,
555 };
556
erofs_prepare_tail_block(struct erofs_inode * inode)557 static int erofs_prepare_tail_block(struct erofs_inode *inode)
558 {
559 struct erofs_buffer_head *bh;
560 int ret;
561
562 if (!inode->idata_size)
563 return 0;
564
565 bh = inode->bh_data;
566 if (!bh) {
567 bh = erofs_balloc(DATA, EROFS_BLKSIZ, 0, 0);
568 if (IS_ERR(bh))
569 return PTR_ERR(bh);
570 bh->op = &erofs_skip_write_bhops;
571
572 /* get blkaddr of bh */
573 ret = erofs_mapbh(bh->block);
574 DBG_BUGON(ret < 0);
575 inode->u.i_blkaddr = bh->block->blkaddr;
576
577 inode->bh_data = bh;
578 return 0;
579 }
580 /* expend a block as the tail block (should be successful) */
581 ret = erofs_bh_balloon(bh, EROFS_BLKSIZ);
582 DBG_BUGON(ret != EROFS_BLKSIZ);
583 return 0;
584 }
585
erofs_prepare_inode_buffer(struct erofs_inode * inode)586 static int erofs_prepare_inode_buffer(struct erofs_inode *inode)
587 {
588 unsigned int inodesize;
589 struct erofs_buffer_head *bh, *ibh;
590
591 DBG_BUGON(inode->bh || inode->bh_inline);
592
593 inodesize = inode->inode_isize + inode->xattr_isize;
594 if (inode->extent_isize)
595 inodesize = Z_EROFS_VLE_EXTENT_ALIGN(inodesize) +
596 inode->extent_isize;
597
598 /* TODO: tailpacking inline of chunk-based format isn't finalized */
599 if (inode->datalayout == EROFS_INODE_CHUNK_BASED)
600 goto noinline;
601
602 if (!is_inode_layout_compression(inode)) {
603 if (cfg.c_noinline_data && S_ISREG(inode->i_mode)) {
604 inode->datalayout = EROFS_INODE_FLAT_PLAIN;
605 goto noinline;
606 }
607 /*
608 * If the file sizes of uncompressed files are block-aligned,
609 * should use the EROFS_INODE_FLAT_PLAIN data layout.
610 */
611 if (!inode->idata_size)
612 inode->datalayout = EROFS_INODE_FLAT_PLAIN;
613 }
614
615 bh = erofs_balloc(INODE, inodesize, 0, inode->idata_size);
616 if (bh == ERR_PTR(-ENOSPC)) {
617 int ret;
618
619 if (is_inode_layout_compression(inode))
620 z_erofs_drop_inline_pcluster(inode);
621 else
622 inode->datalayout = EROFS_INODE_FLAT_PLAIN;
623 noinline:
624 /* expend an extra block for tail-end data */
625 ret = erofs_prepare_tail_block(inode);
626 if (ret)
627 return ret;
628 bh = erofs_balloc(INODE, inodesize, 0, 0);
629 if (IS_ERR(bh))
630 return PTR_ERR(bh);
631 DBG_BUGON(inode->bh_inline);
632 } else if (IS_ERR(bh)) {
633 return PTR_ERR(bh);
634 } else if (inode->idata_size) {
635 if (is_inode_layout_compression(inode)) {
636 DBG_BUGON(!cfg.c_ztailpacking);
637 erofs_dbg("Inline %scompressed data (%u bytes) to %s",
638 inode->compressed_idata ? "" : "un",
639 inode->idata_size, inode->i_srcpath);
640 erofs_sb_set_ztailpacking();
641 } else {
642 inode->datalayout = EROFS_INODE_FLAT_INLINE;
643 erofs_dbg("Inline tail-end data (%u bytes) to %s",
644 inode->idata_size, inode->i_srcpath);
645 }
646
647 /* allocate inline buffer */
648 ibh = erofs_battach(bh, META, inode->idata_size);
649 if (IS_ERR(ibh))
650 return PTR_ERR(ibh);
651
652 ibh->op = &erofs_skip_write_bhops;
653 inode->bh_inline = ibh;
654 }
655
656 bh->fsprivate = erofs_igrab(inode);
657 bh->op = &erofs_write_inode_bhops;
658 inode->bh = bh;
659 return 0;
660 }
661
erofs_bh_flush_write_inline(struct erofs_buffer_head * bh)662 static bool erofs_bh_flush_write_inline(struct erofs_buffer_head *bh)
663 {
664 struct erofs_inode *const inode = bh->fsprivate;
665 const erofs_off_t off = erofs_btell(bh, false);
666 int ret;
667
668 ret = dev_write(inode->idata, off, inode->idata_size);
669 if (ret)
670 return false;
671
672 inode->idata_size = 0;
673 free(inode->idata);
674 inode->idata = NULL;
675
676 erofs_iput(inode);
677 return erofs_bh_flush_generic_end(bh);
678 }
679
680 static struct erofs_bhops erofs_write_inline_bhops = {
681 .flush = erofs_bh_flush_write_inline,
682 };
683
erofs_write_tail_end(struct erofs_inode * inode)684 static int erofs_write_tail_end(struct erofs_inode *inode)
685 {
686 struct erofs_buffer_head *bh, *ibh;
687
688 bh = inode->bh_data;
689
690 if (!inode->idata_size)
691 goto out;
692
693 /* have enough room to inline data */
694 if (inode->bh_inline) {
695 ibh = inode->bh_inline;
696
697 ibh->fsprivate = erofs_igrab(inode);
698 ibh->op = &erofs_write_inline_bhops;
699
700 erofs_droid_blocklist_write_tail_end(inode, NULL_ADDR);
701 } else {
702 int ret;
703 erofs_off_t pos, zero_pos;
704
705 erofs_mapbh(bh->block);
706 pos = erofs_btell(bh, true) - EROFS_BLKSIZ;
707
708 /* 0'ed data should be padded at head for 0padding conversion */
709 if (erofs_sb_has_lz4_0padding() && inode->compressed_idata) {
710 zero_pos = pos;
711 pos += EROFS_BLKSIZ - inode->idata_size;
712 } else {
713 /* pad 0'ed data for the other cases */
714 zero_pos = pos + inode->idata_size;
715 }
716 ret = dev_write(inode->idata, pos, inode->idata_size);
717 if (ret)
718 return ret;
719
720 DBG_BUGON(inode->idata_size > EROFS_BLKSIZ);
721 if (inode->idata_size < EROFS_BLKSIZ) {
722 ret = dev_fillzero(zero_pos,
723 EROFS_BLKSIZ - inode->idata_size,
724 false);
725 if (ret)
726 return ret;
727 }
728 inode->idata_size = 0;
729 free(inode->idata);
730 inode->idata = NULL;
731
732 erofs_droid_blocklist_write_tail_end(inode, erofs_blknr(pos));
733 }
734 out:
735 /* now bh_data can drop directly */
736 if (bh) {
737 /*
738 * Don't leave DATA buffers which were written in the global
739 * buffer list. It will make balloc() slowly.
740 */
741 erofs_bdrop(bh, false);
742 inode->bh_data = NULL;
743 }
744 return 0;
745 }
746
erofs_should_use_inode_extended(struct erofs_inode * inode)747 static bool erofs_should_use_inode_extended(struct erofs_inode *inode)
748 {
749 if (cfg.c_force_inodeversion == FORCE_INODE_EXTENDED)
750 return true;
751 if (inode->i_size > UINT_MAX)
752 return true;
753 if (inode->i_uid > USHRT_MAX)
754 return true;
755 if (inode->i_gid > USHRT_MAX)
756 return true;
757 if (inode->i_nlink > USHRT_MAX)
758 return true;
759 if ((inode->i_mtime != sbi.build_time ||
760 inode->i_mtime_nsec != sbi.build_time_nsec) &&
761 !cfg.c_ignore_mtime)
762 return true;
763 return false;
764 }
765
erofs_new_encode_dev(dev_t dev)766 static u32 erofs_new_encode_dev(dev_t dev)
767 {
768 const unsigned int major = major(dev);
769 const unsigned int minor = minor(dev);
770
771 return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12);
772 }
773
774 #ifdef WITH_ANDROID
erofs_droid_inode_fsconfig(struct erofs_inode * inode,struct stat64 * st,const char * path)775 int erofs_droid_inode_fsconfig(struct erofs_inode *inode,
776 struct stat64 *st,
777 const char *path)
778 {
779 /* filesystem_config does not preserve file type bits */
780 mode_t stat_file_type_mask = st->st_mode & S_IFMT;
781 unsigned int uid = 0, gid = 0, mode = 0;
782 const char *fspath;
783 char *decorated = NULL;
784
785 inode->capabilities = 0;
786 if (!cfg.fs_config_file && !cfg.mount_point)
787 return 0;
788
789 if (!cfg.mount_point ||
790 /* have to drop the mountpoint for rootdir of canned fsconfig */
791 (cfg.fs_config_file && erofs_fspath(path)[0] == '\0')) {
792 fspath = erofs_fspath(path);
793 } else {
794 if (asprintf(&decorated, "%s/%s", cfg.mount_point,
795 erofs_fspath(path)) <= 0)
796 return -ENOMEM;
797 fspath = decorated;
798 }
799
800 if (cfg.fs_config_file)
801 canned_fs_config(fspath, S_ISDIR(st->st_mode),
802 cfg.target_out_path,
803 &uid, &gid, &mode, &inode->capabilities);
804 else
805 fs_config(fspath, S_ISDIR(st->st_mode),
806 cfg.target_out_path,
807 &uid, &gid, &mode, &inode->capabilities);
808
809 erofs_dbg("/%s -> mode = 0x%x, uid = 0x%x, gid = 0x%x, capabilities = 0x%" PRIx64,
810 fspath, mode, uid, gid, inode->capabilities);
811
812 if (decorated)
813 free(decorated);
814 st->st_uid = uid;
815 st->st_gid = gid;
816 st->st_mode = mode | stat_file_type_mask;
817 return 0;
818 }
819 #else
erofs_droid_inode_fsconfig(struct erofs_inode * inode,struct stat64 * st,const char * path)820 static int erofs_droid_inode_fsconfig(struct erofs_inode *inode,
821 struct stat64 *st,
822 const char *path)
823 {
824 return 0;
825 }
826 #endif
827
erofs_fill_inode(struct erofs_inode * inode,struct stat64 * st,const char * path)828 static int erofs_fill_inode(struct erofs_inode *inode,
829 struct stat64 *st,
830 const char *path)
831 {
832 int err = erofs_droid_inode_fsconfig(inode, st, path);
833
834 if (err)
835 return err;
836 inode->i_mode = st->st_mode;
837 inode->i_uid = cfg.c_uid == -1 ? st->st_uid : cfg.c_uid;
838 inode->i_gid = cfg.c_gid == -1 ? st->st_gid : cfg.c_gid;
839 inode->i_mtime = st->st_mtime;
840 inode->i_mtime_nsec = ST_MTIM_NSEC(st);
841
842 switch (cfg.c_timeinherit) {
843 case TIMESTAMP_CLAMPING:
844 if (inode->i_mtime < sbi.build_time)
845 break;
846 case TIMESTAMP_FIXED:
847 inode->i_mtime = sbi.build_time;
848 inode->i_mtime_nsec = sbi.build_time_nsec;
849 default:
850 break;
851 }
852 inode->i_nlink = 1; /* fix up later if needed */
853
854 switch (inode->i_mode & S_IFMT) {
855 case S_IFCHR:
856 case S_IFBLK:
857 case S_IFIFO:
858 case S_IFSOCK:
859 inode->u.i_rdev = erofs_new_encode_dev(st->st_rdev);
860 case S_IFDIR:
861 inode->i_size = 0;
862 break;
863 case S_IFREG:
864 case S_IFLNK:
865 inode->i_size = st->st_size;
866 break;
867 default:
868 return -EINVAL;
869 }
870
871 strncpy(inode->i_srcpath, path, sizeof(inode->i_srcpath) - 1);
872 inode->i_srcpath[sizeof(inode->i_srcpath) - 1] = '\0';
873
874 inode->dev = st->st_dev;
875 inode->i_ino[1] = st->st_ino;
876
877 if (erofs_should_use_inode_extended(inode)) {
878 if (cfg.c_force_inodeversion == FORCE_INODE_COMPACT) {
879 erofs_err("file %s cannot be in compact form",
880 inode->i_srcpath);
881 return -EINVAL;
882 }
883 inode->inode_isize = sizeof(struct erofs_inode_extended);
884 } else {
885 inode->inode_isize = sizeof(struct erofs_inode_compact);
886 }
887
888 list_add(&inode->i_hash,
889 &inode_hashtable[(st->st_ino ^ st->st_dev) %
890 NR_INODE_HASHTABLE]);
891 return 0;
892 }
893
erofs_new_inode(void)894 static struct erofs_inode *erofs_new_inode(void)
895 {
896 struct erofs_inode *inode;
897
898 inode = calloc(1, sizeof(struct erofs_inode));
899 if (!inode)
900 return ERR_PTR(-ENOMEM);
901
902 inode->i_ino[0] = sbi.inos++; /* inode serial number */
903 inode->i_count = 1;
904
905 init_list_head(&inode->i_subdirs);
906 init_list_head(&inode->i_xattrs);
907 return inode;
908 }
909
910 /* get the inode from the (source) path */
erofs_iget_from_path(const char * path,bool is_src)911 static struct erofs_inode *erofs_iget_from_path(const char *path, bool is_src)
912 {
913 struct stat64 st;
914 struct erofs_inode *inode;
915 int ret;
916
917 /* currently, only source path is supported */
918 if (!is_src)
919 return ERR_PTR(-EINVAL);
920
921 ret = lstat64(path, &st);
922 if (ret)
923 return ERR_PTR(-errno);
924
925 /*
926 * lookup in hash table first, if it already exists we have a
927 * hard-link, just return it. Also don't lookup for directories
928 * since hard-link directory isn't allowed.
929 */
930 if (!S_ISDIR(st.st_mode)) {
931 inode = erofs_iget(st.st_dev, st.st_ino);
932 if (inode)
933 return inode;
934 }
935
936 /* cannot find in the inode cache */
937 inode = erofs_new_inode();
938 if (IS_ERR(inode))
939 return inode;
940
941 ret = erofs_fill_inode(inode, &st, path);
942 if (ret) {
943 free(inode);
944 return ERR_PTR(ret);
945 }
946
947 return inode;
948 }
949
erofs_fixup_meta_blkaddr(struct erofs_inode * rootdir)950 static void erofs_fixup_meta_blkaddr(struct erofs_inode *rootdir)
951 {
952 const erofs_off_t rootnid_maxoffset = 0xffff << EROFS_ISLOTBITS;
953 struct erofs_buffer_head *const bh = rootdir->bh;
954 erofs_off_t off, meta_offset;
955
956 erofs_mapbh(bh->block);
957 off = erofs_btell(bh, false);
958
959 if (off > rootnid_maxoffset)
960 meta_offset = round_up(off - rootnid_maxoffset, EROFS_BLKSIZ);
961 else
962 meta_offset = 0;
963 sbi.meta_blkaddr = erofs_blknr(meta_offset);
964 rootdir->nid = (off - meta_offset) >> EROFS_ISLOTBITS;
965 }
966
erofs_lookupnid(struct erofs_inode * inode)967 erofs_nid_t erofs_lookupnid(struct erofs_inode *inode)
968 {
969 struct erofs_buffer_head *const bh = inode->bh;
970 erofs_off_t off, meta_offset;
971
972 if (!bh)
973 return inode->nid;
974
975 erofs_mapbh(bh->block);
976 off = erofs_btell(bh, false);
977
978 meta_offset = blknr_to_addr(sbi.meta_blkaddr);
979 DBG_BUGON(off < meta_offset);
980 return inode->nid = (off - meta_offset) >> EROFS_ISLOTBITS;
981 }
982
erofs_d_invalidate(struct erofs_dentry * d)983 static void erofs_d_invalidate(struct erofs_dentry *d)
984 {
985 struct erofs_inode *const inode = d->inode;
986
987 d->nid = erofs_lookupnid(inode);
988 erofs_iput(inode);
989 }
990
erofs_mkfs_build_tree(struct erofs_inode * dir)991 static struct erofs_inode *erofs_mkfs_build_tree(struct erofs_inode *dir)
992 {
993 int ret;
994 DIR *_dir;
995 struct dirent *dp;
996 struct erofs_dentry *d;
997 unsigned int nr_subdirs;
998
999 ret = erofs_prepare_xattr_ibody(dir);
1000 if (ret < 0)
1001 return ERR_PTR(ret);
1002
1003 if (!S_ISDIR(dir->i_mode)) {
1004 if (S_ISLNK(dir->i_mode)) {
1005 char *const symlink = malloc(dir->i_size);
1006
1007 if (!symlink)
1008 return ERR_PTR(-ENOMEM);
1009 ret = readlink(dir->i_srcpath, symlink, dir->i_size);
1010 if (ret < 0) {
1011 free(symlink);
1012 return ERR_PTR(-errno);
1013 }
1014
1015 ret = erofs_write_file_from_buffer(dir, symlink);
1016 free(symlink);
1017 if (ret)
1018 return ERR_PTR(ret);
1019 } else {
1020 ret = erofs_write_file(dir);
1021 if (ret)
1022 return ERR_PTR(ret);
1023 }
1024
1025 erofs_prepare_inode_buffer(dir);
1026 erofs_write_tail_end(dir);
1027 return dir;
1028 }
1029
1030 _dir = opendir(dir->i_srcpath);
1031 if (!_dir) {
1032 erofs_err("failed to opendir at %s: %s",
1033 dir->i_srcpath, erofs_strerror(errno));
1034 return ERR_PTR(-errno);
1035 }
1036
1037 nr_subdirs = 0;
1038 while (1) {
1039 /*
1040 * set errno to 0 before calling readdir() in order to
1041 * distinguish end of stream and from an error.
1042 */
1043 errno = 0;
1044 dp = readdir(_dir);
1045 if (!dp)
1046 break;
1047
1048 if (is_dot_dotdot(dp->d_name) ||
1049 !strncmp(dp->d_name, "lost+found", strlen("lost+found")))
1050 continue;
1051
1052 /* skip if it's a exclude file */
1053 if (erofs_is_exclude_path(dir->i_srcpath, dp->d_name))
1054 continue;
1055
1056 d = erofs_d_alloc(dir, dp->d_name);
1057 if (IS_ERR(d)) {
1058 ret = PTR_ERR(d);
1059 goto err_closedir;
1060 }
1061 nr_subdirs++;
1062
1063 /* to count i_nlink for directories */
1064 d->type = (dp->d_type == DT_DIR ?
1065 EROFS_FT_DIR : EROFS_FT_UNKNOWN);
1066 }
1067
1068 if (errno) {
1069 ret = -errno;
1070 goto err_closedir;
1071 }
1072 closedir(_dir);
1073
1074 ret = erofs_prepare_dir_file(dir, nr_subdirs);
1075 if (ret)
1076 goto err;
1077
1078 ret = erofs_prepare_inode_buffer(dir);
1079 if (ret)
1080 goto err;
1081
1082 if (IS_ROOT(dir))
1083 erofs_fixup_meta_blkaddr(dir);
1084
1085 list_for_each_entry(d, &dir->i_subdirs, d_child) {
1086 char buf[PATH_MAX], *trimmed;
1087 unsigned char ftype;
1088
1089 if (is_dot_dotdot(d->name)) {
1090 erofs_d_invalidate(d);
1091 continue;
1092 }
1093
1094 ret = snprintf(buf, PATH_MAX, "%s/%s",
1095 dir->i_srcpath, d->name);
1096 if (ret < 0 || ret >= PATH_MAX) {
1097 /* ignore the too long path */
1098 goto fail;
1099 }
1100
1101 trimmed = erofs_trim_for_progressinfo(erofs_fspath(buf),
1102 sizeof("Processing ...") - 1);
1103 erofs_update_progressinfo("Processing %s ...", trimmed);
1104 free(trimmed);
1105 d->inode = erofs_mkfs_build_tree_from_path(dir, buf);
1106 if (IS_ERR(d->inode)) {
1107 ret = PTR_ERR(d->inode);
1108 fail:
1109 d->inode = NULL;
1110 d->type = EROFS_FT_UNKNOWN;
1111 goto err;
1112 }
1113
1114 ftype = erofs_mode_to_ftype(d->inode->i_mode);
1115 DBG_BUGON(ftype == EROFS_FT_DIR && d->type != ftype);
1116 d->type = ftype;
1117
1118 erofs_d_invalidate(d);
1119 erofs_info("add file %s/%s (nid %llu, type %u)",
1120 dir->i_srcpath, d->name, (unsigned long long)d->nid,
1121 d->type);
1122 }
1123 erofs_write_dir_file(dir);
1124 erofs_write_tail_end(dir);
1125 return dir;
1126
1127 err_closedir:
1128 closedir(_dir);
1129 err:
1130 return ERR_PTR(ret);
1131 }
1132
erofs_mkfs_build_tree_from_path(struct erofs_inode * parent,const char * path)1133 struct erofs_inode *erofs_mkfs_build_tree_from_path(struct erofs_inode *parent,
1134 const char *path)
1135 {
1136 struct erofs_inode *const inode = erofs_iget_from_path(path, true);
1137
1138 if (IS_ERR(inode))
1139 return inode;
1140
1141 /* a hardlink to the existed inode */
1142 if (inode->i_parent) {
1143 ++inode->i_nlink;
1144 return inode;
1145 }
1146
1147 /* a completely new inode is found */
1148 if (parent)
1149 inode->i_parent = parent;
1150 else
1151 inode->i_parent = inode; /* rootdir mark */
1152
1153 return erofs_mkfs_build_tree(inode);
1154 }
1155