1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Created by Gao Xiang <gaoxiang25@huawei.com>
6 */
7 #include <linux/security.h>
8 #include "xattr.h"
9
10 struct xattr_iter {
11 struct super_block *sb;
12 struct page *page;
13 void *kaddr;
14
15 erofs_blk_t blkaddr;
16 unsigned int ofs;
17 };
18
xattr_iter_end(struct xattr_iter * it,bool atomic)19 static inline void xattr_iter_end(struct xattr_iter *it, bool atomic)
20 {
21 /* the only user of kunmap() is 'init_inode_xattrs' */
22 if (!atomic)
23 kunmap(it->page);
24 else
25 kunmap_atomic(it->kaddr);
26
27 unlock_page(it->page);
28 put_page(it->page);
29 }
30
xattr_iter_end_final(struct xattr_iter * it)31 static inline void xattr_iter_end_final(struct xattr_iter *it)
32 {
33 if (!it->page)
34 return;
35
36 xattr_iter_end(it, true);
37 }
38
init_inode_xattrs(struct inode * inode)39 static int init_inode_xattrs(struct inode *inode)
40 {
41 struct erofs_inode *const vi = EROFS_I(inode);
42 struct xattr_iter it;
43 unsigned int i;
44 struct erofs_xattr_ibody_header *ih;
45 struct super_block *sb;
46 struct erofs_sb_info *sbi;
47 bool atomic_map;
48 int ret = 0;
49
50 /* the most case is that xattrs of this inode are initialized. */
51 if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags)) {
52 /*
53 * paired with smp_mb() at the end of the function to ensure
54 * fields will only be observed after the bit is set.
55 */
56 smp_mb();
57 return 0;
58 }
59
60 if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_XATTR_BIT, TASK_KILLABLE))
61 return -ERESTARTSYS;
62
63 /* someone has initialized xattrs for us? */
64 if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags))
65 goto out_unlock;
66
67 /*
68 * bypass all xattr operations if ->xattr_isize is not greater than
69 * sizeof(struct erofs_xattr_ibody_header), in detail:
70 * 1) it is not enough to contain erofs_xattr_ibody_header then
71 * ->xattr_isize should be 0 (it means no xattr);
72 * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk
73 * undefined right now (maybe use later with some new sb feature).
74 */
75 if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) {
76 erofs_err(inode->i_sb,
77 "xattr_isize %d of nid %llu is not supported yet",
78 vi->xattr_isize, vi->nid);
79 ret = -EOPNOTSUPP;
80 goto out_unlock;
81 } else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) {
82 if (vi->xattr_isize) {
83 erofs_err(inode->i_sb,
84 "bogus xattr ibody @ nid %llu", vi->nid);
85 DBG_BUGON(1);
86 ret = -EFSCORRUPTED;
87 goto out_unlock; /* xattr ondisk layout error */
88 }
89 ret = -ENOATTR;
90 goto out_unlock;
91 }
92
93 sb = inode->i_sb;
94 sbi = EROFS_SB(sb);
95 it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize);
96 it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
97
98 it.page = erofs_get_meta_page(sb, it.blkaddr);
99 if (IS_ERR(it.page)) {
100 ret = PTR_ERR(it.page);
101 goto out_unlock;
102 }
103
104 /* read in shared xattr array (non-atomic, see kmalloc below) */
105 it.kaddr = kmap(it.page);
106 atomic_map = false;
107
108 ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);
109
110 vi->xattr_shared_count = ih->h_shared_count;
111 vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
112 sizeof(uint), GFP_KERNEL);
113 if (!vi->xattr_shared_xattrs) {
114 xattr_iter_end(&it, atomic_map);
115 ret = -ENOMEM;
116 goto out_unlock;
117 }
118
119 /* let's skip ibody header */
120 it.ofs += sizeof(struct erofs_xattr_ibody_header);
121
122 for (i = 0; i < vi->xattr_shared_count; ++i) {
123 if (it.ofs >= EROFS_BLKSIZ) {
124 /* cannot be unaligned */
125 DBG_BUGON(it.ofs != EROFS_BLKSIZ);
126 xattr_iter_end(&it, atomic_map);
127
128 it.page = erofs_get_meta_page(sb, ++it.blkaddr);
129 if (IS_ERR(it.page)) {
130 kfree(vi->xattr_shared_xattrs);
131 vi->xattr_shared_xattrs = NULL;
132 ret = PTR_ERR(it.page);
133 goto out_unlock;
134 }
135
136 it.kaddr = kmap_atomic(it.page);
137 atomic_map = true;
138 it.ofs = 0;
139 }
140 vi->xattr_shared_xattrs[i] =
141 le32_to_cpu(*(__le32 *)(it.kaddr + it.ofs));
142 it.ofs += sizeof(__le32);
143 }
144 xattr_iter_end(&it, atomic_map);
145
146 /* paired with smp_mb() at the beginning of the function. */
147 smp_mb();
148 set_bit(EROFS_I_EA_INITED_BIT, &vi->flags);
149
150 out_unlock:
151 clear_and_wake_up_bit(EROFS_I_BL_XATTR_BIT, &vi->flags);
152 return ret;
153 }
154
155 /*
156 * the general idea for these return values is
157 * if 0 is returned, go on processing the current xattr;
158 * 1 (> 0) is returned, skip this round to process the next xattr;
159 * -err (< 0) is returned, an error (maybe ENOXATTR) occurred
160 * and need to be handled
161 */
162 struct xattr_iter_handlers {
163 int (*entry)(struct xattr_iter *_it, struct erofs_xattr_entry *entry);
164 int (*name)(struct xattr_iter *_it, unsigned int processed, char *buf,
165 unsigned int len);
166 int (*alloc_buffer)(struct xattr_iter *_it, unsigned int value_sz);
167 void (*value)(struct xattr_iter *_it, unsigned int processed, char *buf,
168 unsigned int len);
169 };
170
xattr_iter_fixup(struct xattr_iter * it)171 static inline int xattr_iter_fixup(struct xattr_iter *it)
172 {
173 if (it->ofs < EROFS_BLKSIZ)
174 return 0;
175
176 xattr_iter_end(it, true);
177
178 it->blkaddr += erofs_blknr(it->ofs);
179
180 it->page = erofs_get_meta_page(it->sb, it->blkaddr);
181 if (IS_ERR(it->page)) {
182 int err = PTR_ERR(it->page);
183
184 it->page = NULL;
185 return err;
186 }
187
188 it->kaddr = kmap_atomic(it->page);
189 it->ofs = erofs_blkoff(it->ofs);
190 return 0;
191 }
192
inline_xattr_iter_begin(struct xattr_iter * it,struct inode * inode)193 static int inline_xattr_iter_begin(struct xattr_iter *it,
194 struct inode *inode)
195 {
196 struct erofs_inode *const vi = EROFS_I(inode);
197 struct erofs_sb_info *const sbi = EROFS_SB(inode->i_sb);
198 unsigned int xattr_header_sz, inline_xattr_ofs;
199
200 xattr_header_sz = inlinexattr_header_size(inode);
201 if (xattr_header_sz >= vi->xattr_isize) {
202 DBG_BUGON(xattr_header_sz > vi->xattr_isize);
203 return -ENOATTR;
204 }
205
206 inline_xattr_ofs = vi->inode_isize + xattr_header_sz;
207
208 it->blkaddr = erofs_blknr(iloc(sbi, vi->nid) + inline_xattr_ofs);
209 it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs);
210
211 it->page = erofs_get_meta_page(inode->i_sb, it->blkaddr);
212 if (IS_ERR(it->page))
213 return PTR_ERR(it->page);
214
215 it->kaddr = kmap_atomic(it->page);
216 return vi->xattr_isize - xattr_header_sz;
217 }
218
219 /*
220 * Regardless of success or failure, `xattr_foreach' will end up with
221 * `ofs' pointing to the next xattr item rather than an arbitrary position.
222 */
xattr_foreach(struct xattr_iter * it,const struct xattr_iter_handlers * op,unsigned int * tlimit)223 static int xattr_foreach(struct xattr_iter *it,
224 const struct xattr_iter_handlers *op,
225 unsigned int *tlimit)
226 {
227 struct erofs_xattr_entry entry;
228 unsigned int value_sz, processed, slice;
229 int err;
230
231 /* 0. fixup blkaddr, ofs, ipage */
232 err = xattr_iter_fixup(it);
233 if (err)
234 return err;
235
236 /*
237 * 1. read xattr entry to the memory,
238 * since we do EROFS_XATTR_ALIGN
239 * therefore entry should be in the page
240 */
241 entry = *(struct erofs_xattr_entry *)(it->kaddr + it->ofs);
242 if (tlimit) {
243 unsigned int entry_sz = erofs_xattr_entry_size(&entry);
244
245 /* xattr on-disk corruption: xattr entry beyond xattr_isize */
246 if (*tlimit < entry_sz) {
247 DBG_BUGON(1);
248 return -EFSCORRUPTED;
249 }
250 *tlimit -= entry_sz;
251 }
252
253 it->ofs += sizeof(struct erofs_xattr_entry);
254 value_sz = le16_to_cpu(entry.e_value_size);
255
256 /* handle entry */
257 err = op->entry(it, &entry);
258 if (err) {
259 it->ofs += entry.e_name_len + value_sz;
260 goto out;
261 }
262
263 /* 2. handle xattr name (ofs will finally be at the end of name) */
264 processed = 0;
265
266 while (processed < entry.e_name_len) {
267 if (it->ofs >= EROFS_BLKSIZ) {
268 DBG_BUGON(it->ofs > EROFS_BLKSIZ);
269
270 err = xattr_iter_fixup(it);
271 if (err)
272 goto out;
273 it->ofs = 0;
274 }
275
276 slice = min_t(unsigned int, PAGE_SIZE - it->ofs,
277 entry.e_name_len - processed);
278
279 /* handle name */
280 err = op->name(it, processed, it->kaddr + it->ofs, slice);
281 if (err) {
282 it->ofs += entry.e_name_len - processed + value_sz;
283 goto out;
284 }
285
286 it->ofs += slice;
287 processed += slice;
288 }
289
290 /* 3. handle xattr value */
291 processed = 0;
292
293 if (op->alloc_buffer) {
294 err = op->alloc_buffer(it, value_sz);
295 if (err) {
296 it->ofs += value_sz;
297 goto out;
298 }
299 }
300
301 while (processed < value_sz) {
302 if (it->ofs >= EROFS_BLKSIZ) {
303 DBG_BUGON(it->ofs > EROFS_BLKSIZ);
304
305 err = xattr_iter_fixup(it);
306 if (err)
307 goto out;
308 it->ofs = 0;
309 }
310
311 slice = min_t(unsigned int, PAGE_SIZE - it->ofs,
312 value_sz - processed);
313 op->value(it, processed, it->kaddr + it->ofs, slice);
314 it->ofs += slice;
315 processed += slice;
316 }
317
318 out:
319 /* xattrs should be 4-byte aligned (on-disk constraint) */
320 it->ofs = EROFS_XATTR_ALIGN(it->ofs);
321 return err < 0 ? err : 0;
322 }
323
324 struct getxattr_iter {
325 struct xattr_iter it;
326
327 char *buffer;
328 int buffer_size, index;
329 struct qstr name;
330 };
331
xattr_entrymatch(struct xattr_iter * _it,struct erofs_xattr_entry * entry)332 static int xattr_entrymatch(struct xattr_iter *_it,
333 struct erofs_xattr_entry *entry)
334 {
335 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
336
337 return (it->index != entry->e_name_index ||
338 it->name.len != entry->e_name_len) ? -ENOATTR : 0;
339 }
340
xattr_namematch(struct xattr_iter * _it,unsigned int processed,char * buf,unsigned int len)341 static int xattr_namematch(struct xattr_iter *_it,
342 unsigned int processed, char *buf, unsigned int len)
343 {
344 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
345
346 return memcmp(buf, it->name.name + processed, len) ? -ENOATTR : 0;
347 }
348
xattr_checkbuffer(struct xattr_iter * _it,unsigned int value_sz)349 static int xattr_checkbuffer(struct xattr_iter *_it,
350 unsigned int value_sz)
351 {
352 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
353 int err = it->buffer_size < value_sz ? -ERANGE : 0;
354
355 it->buffer_size = value_sz;
356 return !it->buffer ? 1 : err;
357 }
358
xattr_copyvalue(struct xattr_iter * _it,unsigned int processed,char * buf,unsigned int len)359 static void xattr_copyvalue(struct xattr_iter *_it,
360 unsigned int processed,
361 char *buf, unsigned int len)
362 {
363 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
364
365 memcpy(it->buffer + processed, buf, len);
366 }
367
368 static const struct xattr_iter_handlers find_xattr_handlers = {
369 .entry = xattr_entrymatch,
370 .name = xattr_namematch,
371 .alloc_buffer = xattr_checkbuffer,
372 .value = xattr_copyvalue
373 };
374
inline_getxattr(struct inode * inode,struct getxattr_iter * it)375 static int inline_getxattr(struct inode *inode, struct getxattr_iter *it)
376 {
377 int ret;
378 unsigned int remaining;
379
380 ret = inline_xattr_iter_begin(&it->it, inode);
381 if (ret < 0)
382 return ret;
383
384 remaining = ret;
385 while (remaining) {
386 ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
387 if (ret != -ENOATTR)
388 break;
389 }
390 xattr_iter_end_final(&it->it);
391
392 return ret ? ret : it->buffer_size;
393 }
394
shared_getxattr(struct inode * inode,struct getxattr_iter * it)395 static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
396 {
397 struct erofs_inode *const vi = EROFS_I(inode);
398 struct super_block *const sb = inode->i_sb;
399 struct erofs_sb_info *const sbi = EROFS_SB(sb);
400 unsigned int i;
401 int ret = -ENOATTR;
402
403 for (i = 0; i < vi->xattr_shared_count; ++i) {
404 erofs_blk_t blkaddr =
405 xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
406
407 it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
408
409 if (!i || blkaddr != it->it.blkaddr) {
410 if (i)
411 xattr_iter_end(&it->it, true);
412
413 it->it.page = erofs_get_meta_page(sb, blkaddr);
414 if (IS_ERR(it->it.page))
415 return PTR_ERR(it->it.page);
416
417 it->it.kaddr = kmap_atomic(it->it.page);
418 it->it.blkaddr = blkaddr;
419 }
420
421 ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL);
422 if (ret != -ENOATTR)
423 break;
424 }
425 if (vi->xattr_shared_count)
426 xattr_iter_end_final(&it->it);
427
428 return ret ? ret : it->buffer_size;
429 }
430
erofs_xattr_user_list(struct dentry * dentry)431 static bool erofs_xattr_user_list(struct dentry *dentry)
432 {
433 return test_opt(&EROFS_SB(dentry->d_sb)->ctx, XATTR_USER);
434 }
435
erofs_xattr_trusted_list(struct dentry * dentry)436 static bool erofs_xattr_trusted_list(struct dentry *dentry)
437 {
438 return capable(CAP_SYS_ADMIN);
439 }
440
erofs_getxattr(struct inode * inode,int index,const char * name,void * buffer,size_t buffer_size)441 int erofs_getxattr(struct inode *inode, int index,
442 const char *name,
443 void *buffer, size_t buffer_size)
444 {
445 int ret;
446 struct getxattr_iter it;
447
448 if (!name)
449 return -EINVAL;
450
451 ret = init_inode_xattrs(inode);
452 if (ret)
453 return ret;
454
455 it.index = index;
456
457 it.name.len = strlen(name);
458 if (it.name.len > EROFS_NAME_LEN)
459 return -ERANGE;
460 it.name.name = name;
461
462 it.buffer = buffer;
463 it.buffer_size = buffer_size;
464
465 it.it.sb = inode->i_sb;
466 ret = inline_getxattr(inode, &it);
467 if (ret == -ENOATTR)
468 ret = shared_getxattr(inode, &it);
469 return ret;
470 }
471
erofs_xattr_generic_get(const struct xattr_handler * handler,struct dentry * unused,struct inode * inode,const char * name,void * buffer,size_t size,int flags)472 static int erofs_xattr_generic_get(const struct xattr_handler *handler,
473 struct dentry *unused, struct inode *inode,
474 const char *name, void *buffer, size_t size,
475 int flags)
476 {
477 struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
478
479 switch (handler->flags) {
480 case EROFS_XATTR_INDEX_USER:
481 if (!test_opt(&sbi->ctx, XATTR_USER))
482 return -EOPNOTSUPP;
483 break;
484 case EROFS_XATTR_INDEX_TRUSTED:
485 break;
486 case EROFS_XATTR_INDEX_SECURITY:
487 break;
488 default:
489 return -EINVAL;
490 }
491
492 return erofs_getxattr(inode, handler->flags, name, buffer, size);
493 }
494
495 const struct xattr_handler erofs_xattr_user_handler = {
496 .prefix = XATTR_USER_PREFIX,
497 .flags = EROFS_XATTR_INDEX_USER,
498 .list = erofs_xattr_user_list,
499 .get = erofs_xattr_generic_get,
500 };
501
502 const struct xattr_handler erofs_xattr_trusted_handler = {
503 .prefix = XATTR_TRUSTED_PREFIX,
504 .flags = EROFS_XATTR_INDEX_TRUSTED,
505 .list = erofs_xattr_trusted_list,
506 .get = erofs_xattr_generic_get,
507 };
508
509 #ifdef CONFIG_EROFS_FS_SECURITY
510 const struct xattr_handler __maybe_unused erofs_xattr_security_handler = {
511 .prefix = XATTR_SECURITY_PREFIX,
512 .flags = EROFS_XATTR_INDEX_SECURITY,
513 .get = erofs_xattr_generic_get,
514 };
515 #endif
516
517 const struct xattr_handler *erofs_xattr_handlers[] = {
518 &erofs_xattr_user_handler,
519 #ifdef CONFIG_EROFS_FS_POSIX_ACL
520 &posix_acl_access_xattr_handler,
521 &posix_acl_default_xattr_handler,
522 #endif
523 &erofs_xattr_trusted_handler,
524 #ifdef CONFIG_EROFS_FS_SECURITY
525 &erofs_xattr_security_handler,
526 #endif
527 NULL,
528 };
529
530 struct listxattr_iter {
531 struct xattr_iter it;
532
533 struct dentry *dentry;
534 char *buffer;
535 int buffer_size, buffer_ofs;
536 };
537
xattr_entrylist(struct xattr_iter * _it,struct erofs_xattr_entry * entry)538 static int xattr_entrylist(struct xattr_iter *_it,
539 struct erofs_xattr_entry *entry)
540 {
541 struct listxattr_iter *it =
542 container_of(_it, struct listxattr_iter, it);
543 unsigned int prefix_len;
544 const char *prefix;
545
546 const struct xattr_handler *h =
547 erofs_xattr_handler(entry->e_name_index);
548
549 if (!h || (h->list && !h->list(it->dentry)))
550 return 1;
551
552 prefix = xattr_prefix(h);
553 prefix_len = strlen(prefix);
554
555 if (!it->buffer) {
556 it->buffer_ofs += prefix_len + entry->e_name_len + 1;
557 return 1;
558 }
559
560 if (it->buffer_ofs + prefix_len
561 + entry->e_name_len + 1 > it->buffer_size)
562 return -ERANGE;
563
564 memcpy(it->buffer + it->buffer_ofs, prefix, prefix_len);
565 it->buffer_ofs += prefix_len;
566 return 0;
567 }
568
xattr_namelist(struct xattr_iter * _it,unsigned int processed,char * buf,unsigned int len)569 static int xattr_namelist(struct xattr_iter *_it,
570 unsigned int processed, char *buf, unsigned int len)
571 {
572 struct listxattr_iter *it =
573 container_of(_it, struct listxattr_iter, it);
574
575 memcpy(it->buffer + it->buffer_ofs, buf, len);
576 it->buffer_ofs += len;
577 return 0;
578 }
579
xattr_skipvalue(struct xattr_iter * _it,unsigned int value_sz)580 static int xattr_skipvalue(struct xattr_iter *_it,
581 unsigned int value_sz)
582 {
583 struct listxattr_iter *it =
584 container_of(_it, struct listxattr_iter, it);
585
586 it->buffer[it->buffer_ofs++] = '\0';
587 return 1;
588 }
589
590 static const struct xattr_iter_handlers list_xattr_handlers = {
591 .entry = xattr_entrylist,
592 .name = xattr_namelist,
593 .alloc_buffer = xattr_skipvalue,
594 .value = NULL
595 };
596
inline_listxattr(struct listxattr_iter * it)597 static int inline_listxattr(struct listxattr_iter *it)
598 {
599 int ret;
600 unsigned int remaining;
601
602 ret = inline_xattr_iter_begin(&it->it, d_inode(it->dentry));
603 if (ret < 0)
604 return ret;
605
606 remaining = ret;
607 while (remaining) {
608 ret = xattr_foreach(&it->it, &list_xattr_handlers, &remaining);
609 if (ret)
610 break;
611 }
612 xattr_iter_end_final(&it->it);
613 return ret ? ret : it->buffer_ofs;
614 }
615
shared_listxattr(struct listxattr_iter * it)616 static int shared_listxattr(struct listxattr_iter *it)
617 {
618 struct inode *const inode = d_inode(it->dentry);
619 struct erofs_inode *const vi = EROFS_I(inode);
620 struct super_block *const sb = inode->i_sb;
621 struct erofs_sb_info *const sbi = EROFS_SB(sb);
622 unsigned int i;
623 int ret = 0;
624
625 for (i = 0; i < vi->xattr_shared_count; ++i) {
626 erofs_blk_t blkaddr =
627 xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
628
629 it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
630 if (!i || blkaddr != it->it.blkaddr) {
631 if (i)
632 xattr_iter_end(&it->it, true);
633
634 it->it.page = erofs_get_meta_page(sb, blkaddr);
635 if (IS_ERR(it->it.page))
636 return PTR_ERR(it->it.page);
637
638 it->it.kaddr = kmap_atomic(it->it.page);
639 it->it.blkaddr = blkaddr;
640 }
641
642 ret = xattr_foreach(&it->it, &list_xattr_handlers, NULL);
643 if (ret)
644 break;
645 }
646 if (vi->xattr_shared_count)
647 xattr_iter_end_final(&it->it);
648
649 return ret ? ret : it->buffer_ofs;
650 }
651
erofs_listxattr(struct dentry * dentry,char * buffer,size_t buffer_size)652 ssize_t erofs_listxattr(struct dentry *dentry,
653 char *buffer, size_t buffer_size)
654 {
655 int ret;
656 struct listxattr_iter it;
657
658 ret = init_inode_xattrs(d_inode(dentry));
659 if (ret == -ENOATTR)
660 return 0;
661 if (ret)
662 return ret;
663
664 it.dentry = dentry;
665 it.buffer = buffer;
666 it.buffer_size = buffer_size;
667 it.buffer_ofs = 0;
668
669 it.it.sb = dentry->d_sb;
670
671 ret = inline_listxattr(&it);
672 if (ret < 0 && ret != -ENOATTR)
673 return ret;
674 return shared_listxattr(&it);
675 }
676
677 #ifdef CONFIG_EROFS_FS_POSIX_ACL
erofs_get_acl(struct inode * inode,int type)678 struct posix_acl *erofs_get_acl(struct inode *inode, int type)
679 {
680 struct posix_acl *acl;
681 int prefix, rc;
682 char *value = NULL;
683
684 switch (type) {
685 case ACL_TYPE_ACCESS:
686 prefix = EROFS_XATTR_INDEX_POSIX_ACL_ACCESS;
687 break;
688 case ACL_TYPE_DEFAULT:
689 prefix = EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT;
690 break;
691 default:
692 return ERR_PTR(-EINVAL);
693 }
694
695 rc = erofs_getxattr(inode, prefix, "", NULL, 0);
696 if (rc > 0) {
697 value = kmalloc(rc, GFP_KERNEL);
698 if (!value)
699 return ERR_PTR(-ENOMEM);
700 rc = erofs_getxattr(inode, prefix, "", value, rc);
701 }
702
703 if (rc == -ENOATTR)
704 acl = NULL;
705 else if (rc < 0)
706 acl = ERR_PTR(rc);
707 else
708 acl = posix_acl_from_xattr(&init_user_ns, value, rc);
709 kfree(value);
710 return acl;
711 }
712 #endif
713
714