1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Copyright (C) 2021, Alibaba Cloud
6 */
7 #include "xattr.h"
8
9 #include <trace/events/erofs.h>
10
erofs_read_inode(struct erofs_buf * buf,struct inode * inode,unsigned int * ofs)11 static void *erofs_read_inode(struct erofs_buf *buf,
12 struct inode *inode, unsigned int *ofs)
13 {
14 struct super_block *sb = inode->i_sb;
15 struct erofs_sb_info *sbi = EROFS_SB(sb);
16 struct erofs_inode *vi = EROFS_I(inode);
17 const erofs_off_t inode_loc = erofs_iloc(inode);
18
19 erofs_blk_t blkaddr, nblks = 0;
20 void *kaddr;
21 struct erofs_inode_compact *dic;
22 struct erofs_inode_extended *die, *copied = NULL;
23 unsigned int ifmt;
24 int err;
25
26 blkaddr = erofs_blknr(sb, inode_loc);
27 *ofs = erofs_blkoff(sb, inode_loc);
28
29 erofs_dbg("%s, reading inode nid %llu at %u of blkaddr %u",
30 __func__, vi->nid, *ofs, blkaddr);
31
32 kaddr = erofs_read_metabuf(buf, sb, blkaddr, EROFS_KMAP);
33 if (IS_ERR(kaddr)) {
34 erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld",
35 vi->nid, PTR_ERR(kaddr));
36 return kaddr;
37 }
38
39 dic = kaddr + *ofs;
40 ifmt = le16_to_cpu(dic->i_format);
41
42 if (ifmt & ~EROFS_I_ALL) {
43 erofs_err(inode->i_sb, "unsupported i_format %u of nid %llu",
44 ifmt, vi->nid);
45 err = -EOPNOTSUPP;
46 goto err_out;
47 }
48
49 vi->datalayout = erofs_inode_datalayout(ifmt);
50 if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) {
51 erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu",
52 vi->datalayout, vi->nid);
53 err = -EOPNOTSUPP;
54 goto err_out;
55 }
56
57 switch (erofs_inode_version(ifmt)) {
58 case EROFS_INODE_LAYOUT_EXTENDED:
59 vi->inode_isize = sizeof(struct erofs_inode_extended);
60 /* check if the extended inode acrosses block boundary */
61 if (*ofs + vi->inode_isize <= sb->s_blocksize) {
62 *ofs += vi->inode_isize;
63 die = (struct erofs_inode_extended *)dic;
64 } else {
65 const unsigned int gotten = sb->s_blocksize - *ofs;
66
67 copied = kmalloc(vi->inode_isize, GFP_NOFS);
68 if (!copied) {
69 err = -ENOMEM;
70 goto err_out;
71 }
72 memcpy(copied, dic, gotten);
73 kaddr = erofs_read_metabuf(buf, sb, blkaddr + 1,
74 EROFS_KMAP);
75 if (IS_ERR(kaddr)) {
76 erofs_err(sb, "failed to get inode payload block (nid: %llu), err %ld",
77 vi->nid, PTR_ERR(kaddr));
78 kfree(copied);
79 return kaddr;
80 }
81 *ofs = vi->inode_isize - gotten;
82 memcpy((u8 *)copied + gotten, kaddr, *ofs);
83 die = copied;
84 }
85 vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount);
86
87 inode->i_mode = le16_to_cpu(die->i_mode);
88 switch (inode->i_mode & S_IFMT) {
89 case S_IFREG:
90 case S_IFDIR:
91 case S_IFLNK:
92 vi->raw_blkaddr = le32_to_cpu(die->i_u.raw_blkaddr);
93 break;
94 case S_IFCHR:
95 case S_IFBLK:
96 inode->i_rdev =
97 new_decode_dev(le32_to_cpu(die->i_u.rdev));
98 break;
99 case S_IFIFO:
100 case S_IFSOCK:
101 inode->i_rdev = 0;
102 break;
103 default:
104 goto bogusimode;
105 }
106 i_uid_write(inode, le32_to_cpu(die->i_uid));
107 i_gid_write(inode, le32_to_cpu(die->i_gid));
108 set_nlink(inode, le32_to_cpu(die->i_nlink));
109
110 /* extended inode has its own timestamp */
111 inode->i_ctime.tv_sec = le64_to_cpu(die->i_mtime);
112 inode->i_ctime.tv_nsec = le32_to_cpu(die->i_mtime_nsec);
113
114 inode->i_size = le64_to_cpu(die->i_size);
115
116 /* total blocks for compressed files */
117 if (erofs_inode_is_data_compressed(vi->datalayout))
118 nblks = le32_to_cpu(die->i_u.compressed_blocks);
119 else if (vi->datalayout == EROFS_INODE_CHUNK_BASED)
120 /* fill chunked inode summary info */
121 vi->chunkformat = le16_to_cpu(die->i_u.c.format);
122 kfree(copied);
123 copied = NULL;
124 break;
125 case EROFS_INODE_LAYOUT_COMPACT:
126 vi->inode_isize = sizeof(struct erofs_inode_compact);
127 *ofs += vi->inode_isize;
128 vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount);
129
130 inode->i_mode = le16_to_cpu(dic->i_mode);
131 switch (inode->i_mode & S_IFMT) {
132 case S_IFREG:
133 case S_IFDIR:
134 case S_IFLNK:
135 vi->raw_blkaddr = le32_to_cpu(dic->i_u.raw_blkaddr);
136 break;
137 case S_IFCHR:
138 case S_IFBLK:
139 inode->i_rdev =
140 new_decode_dev(le32_to_cpu(dic->i_u.rdev));
141 break;
142 case S_IFIFO:
143 case S_IFSOCK:
144 inode->i_rdev = 0;
145 break;
146 default:
147 goto bogusimode;
148 }
149 i_uid_write(inode, le16_to_cpu(dic->i_uid));
150 i_gid_write(inode, le16_to_cpu(dic->i_gid));
151 set_nlink(inode, le16_to_cpu(dic->i_nlink));
152
153 /* use build time for compact inodes */
154 inode->i_ctime.tv_sec = sbi->build_time;
155 inode->i_ctime.tv_nsec = sbi->build_time_nsec;
156
157 inode->i_size = le32_to_cpu(dic->i_size);
158 if (erofs_inode_is_data_compressed(vi->datalayout))
159 nblks = le32_to_cpu(dic->i_u.compressed_blocks);
160 else if (vi->datalayout == EROFS_INODE_CHUNK_BASED)
161 vi->chunkformat = le16_to_cpu(dic->i_u.c.format);
162 break;
163 default:
164 erofs_err(inode->i_sb,
165 "unsupported on-disk inode version %u of nid %llu",
166 erofs_inode_version(ifmt), vi->nid);
167 err = -EOPNOTSUPP;
168 goto err_out;
169 }
170
171 if (vi->datalayout == EROFS_INODE_CHUNK_BASED) {
172 if (vi->chunkformat & ~EROFS_CHUNK_FORMAT_ALL) {
173 erofs_err(inode->i_sb,
174 "unsupported chunk format %x of nid %llu",
175 vi->chunkformat, vi->nid);
176 err = -EOPNOTSUPP;
177 goto err_out;
178 }
179 vi->chunkbits = sb->s_blocksize_bits +
180 (vi->chunkformat & EROFS_CHUNK_FORMAT_BLKBITS_MASK);
181 }
182 inode->i_mtime.tv_sec = inode->i_ctime.tv_sec;
183 inode->i_atime.tv_sec = inode->i_ctime.tv_sec;
184 inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec;
185 inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec;
186
187 inode->i_flags &= ~S_DAX;
188 if (test_opt(&sbi->opt, DAX_ALWAYS) && S_ISREG(inode->i_mode) &&
189 (vi->datalayout == EROFS_INODE_FLAT_PLAIN ||
190 vi->datalayout == EROFS_INODE_CHUNK_BASED))
191 inode->i_flags |= S_DAX;
192
193 if (!nblks)
194 /* measure inode.i_blocks as generic filesystems */
195 inode->i_blocks = round_up(inode->i_size, sb->s_blocksize) >> 9;
196 else
197 inode->i_blocks = nblks << (sb->s_blocksize_bits - 9);
198 return kaddr;
199
200 bogusimode:
201 erofs_err(inode->i_sb, "bogus i_mode (%o) @ nid %llu",
202 inode->i_mode, vi->nid);
203 err = -EFSCORRUPTED;
204 err_out:
205 DBG_BUGON(1);
206 kfree(copied);
207 erofs_put_metabuf(buf);
208 return ERR_PTR(err);
209 }
210
erofs_fill_symlink(struct inode * inode,void * kaddr,unsigned int m_pofs)211 static int erofs_fill_symlink(struct inode *inode, void *kaddr,
212 unsigned int m_pofs)
213 {
214 struct erofs_inode *vi = EROFS_I(inode);
215 unsigned int bsz = i_blocksize(inode);
216 char *lnk;
217
218 /* if it cannot be handled with fast symlink scheme */
219 if (vi->datalayout != EROFS_INODE_FLAT_INLINE ||
220 inode->i_size >= bsz || inode->i_size < 0) {
221 inode->i_op = &erofs_symlink_iops;
222 return 0;
223 }
224
225 lnk = kmalloc(inode->i_size + 1, GFP_KERNEL);
226 if (!lnk)
227 return -ENOMEM;
228
229 m_pofs += vi->xattr_isize;
230 /* inline symlink data shouldn't cross block boundary */
231 if (m_pofs + inode->i_size > bsz) {
232 kfree(lnk);
233 erofs_err(inode->i_sb,
234 "inline data cross block boundary @ nid %llu",
235 vi->nid);
236 DBG_BUGON(1);
237 return -EFSCORRUPTED;
238 }
239 memcpy(lnk, kaddr + m_pofs, inode->i_size);
240 lnk[inode->i_size] = '\0';
241
242 inode->i_link = lnk;
243 inode->i_op = &erofs_fast_symlink_iops;
244 return 0;
245 }
246
erofs_fill_inode(struct inode * inode)247 static int erofs_fill_inode(struct inode *inode)
248 {
249 struct erofs_inode *vi = EROFS_I(inode);
250 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
251 void *kaddr;
252 unsigned int ofs;
253 int err = 0;
254
255 trace_erofs_fill_inode(inode);
256
257 /* read inode base data from disk */
258 kaddr = erofs_read_inode(&buf, inode, &ofs);
259 if (IS_ERR(kaddr))
260 return PTR_ERR(kaddr);
261
262 /* setup the new inode */
263 switch (inode->i_mode & S_IFMT) {
264 case S_IFREG:
265 inode->i_op = &erofs_generic_iops;
266 if (erofs_inode_is_data_compressed(vi->datalayout))
267 inode->i_fop = &generic_ro_fops;
268 else
269 inode->i_fop = &erofs_file_fops;
270 break;
271 case S_IFDIR:
272 inode->i_op = &erofs_dir_iops;
273 inode->i_fop = &erofs_dir_fops;
274 break;
275 case S_IFLNK:
276 err = erofs_fill_symlink(inode, kaddr, ofs);
277 if (err)
278 goto out_unlock;
279 inode_nohighmem(inode);
280 break;
281 case S_IFCHR:
282 case S_IFBLK:
283 case S_IFIFO:
284 case S_IFSOCK:
285 inode->i_op = &erofs_generic_iops;
286 init_special_inode(inode, inode->i_mode, inode->i_rdev);
287 goto out_unlock;
288 default:
289 err = -EFSCORRUPTED;
290 goto out_unlock;
291 }
292
293 if (erofs_inode_is_data_compressed(vi->datalayout)) {
294 if (!erofs_is_fscache_mode(inode->i_sb)) {
295 DO_ONCE_LITE_IF(inode->i_sb->s_blocksize != PAGE_SIZE,
296 erofs_info, inode->i_sb,
297 "EXPERIMENTAL EROFS subpage compressed block support in use. Use at your own risk!");
298 err = z_erofs_fill_inode(inode);
299 }
300 else
301 err = -EOPNOTSUPP;
302 goto out_unlock;
303 }
304 inode->i_mapping->a_ops = &erofs_raw_access_aops;
305 if (!erofs_is_fscache_mode(inode->i_sb))
306 mapping_set_large_folios(inode->i_mapping);
307 #ifdef CONFIG_EROFS_FS_ONDEMAND
308 if (erofs_is_fscache_mode(inode->i_sb))
309 inode->i_mapping->a_ops = &erofs_fscache_access_aops;
310 #endif
311
312 out_unlock:
313 erofs_put_metabuf(&buf);
314 return err;
315 }
316
317 /*
318 * erofs nid is 64bits, but i_ino is 'unsigned long', therefore
319 * we should do more for 32-bit platform to find the right inode.
320 */
erofs_ilookup_test_actor(struct inode * inode,void * opaque)321 static int erofs_ilookup_test_actor(struct inode *inode, void *opaque)
322 {
323 const erofs_nid_t nid = *(erofs_nid_t *)opaque;
324
325 return EROFS_I(inode)->nid == nid;
326 }
327
erofs_iget_set_actor(struct inode * inode,void * opaque)328 static int erofs_iget_set_actor(struct inode *inode, void *opaque)
329 {
330 const erofs_nid_t nid = *(erofs_nid_t *)opaque;
331
332 inode->i_ino = erofs_inode_hash(nid);
333 return 0;
334 }
335
erofs_iget(struct super_block * sb,erofs_nid_t nid)336 struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid)
337 {
338 const unsigned long hashval = erofs_inode_hash(nid);
339 struct inode *inode;
340
341 inode = iget5_locked(sb, hashval, erofs_ilookup_test_actor,
342 erofs_iget_set_actor, &nid);
343 if (!inode)
344 return ERR_PTR(-ENOMEM);
345
346 if (inode->i_state & I_NEW) {
347 int err;
348 struct erofs_inode *vi = EROFS_I(inode);
349
350 vi->nid = nid;
351
352 err = erofs_fill_inode(inode);
353 if (!err) {
354 unlock_new_inode(inode);
355 } else {
356 iget_failed(inode);
357 inode = ERR_PTR(err);
358 }
359 }
360 return inode;
361 }
362
erofs_getattr(struct user_namespace * mnt_userns,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int query_flags)363 int erofs_getattr(struct user_namespace *mnt_userns, const struct path *path,
364 struct kstat *stat, u32 request_mask,
365 unsigned int query_flags)
366 {
367 struct inode *const inode = d_inode(path->dentry);
368
369 if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout))
370 stat->attributes |= STATX_ATTR_COMPRESSED;
371
372 stat->attributes |= STATX_ATTR_IMMUTABLE;
373 stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
374 STATX_ATTR_IMMUTABLE);
375
376 generic_fillattr(mnt_userns, inode, stat);
377 return 0;
378 }
379
380 const struct inode_operations erofs_generic_iops = {
381 .getattr = erofs_getattr,
382 .listxattr = erofs_listxattr,
383 .get_acl = erofs_get_acl,
384 .fiemap = erofs_fiemap,
385 };
386
387 const struct inode_operations erofs_symlink_iops = {
388 .get_link = page_get_link,
389 .getattr = erofs_getattr,
390 .listxattr = erofs_listxattr,
391 .get_acl = erofs_get_acl,
392 };
393
394 const struct inode_operations erofs_fast_symlink_iops = {
395 .get_link = simple_get_link,
396 .getattr = erofs_getattr,
397 .listxattr = erofs_listxattr,
398 .get_acl = erofs_get_acl,
399 };
400