1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
4 * https://www.huawei.com/
5 */
6 #include <linux/security.h>
7 #include "xattr.h"
8
9 struct xattr_iter {
10 struct super_block *sb;
11 struct page *page;
12 void *kaddr;
13
14 erofs_blk_t blkaddr;
15 unsigned int ofs;
16 };
17
xattr_iter_end(struct xattr_iter * it,bool atomic)18 static inline void xattr_iter_end(struct xattr_iter *it, bool atomic)
19 {
20 /* the only user of kunmap() is 'init_inode_xattrs' */
21 if (!atomic)
22 kunmap(it->page);
23 else
24 kunmap_atomic(it->kaddr);
25
26 unlock_page(it->page);
27 put_page(it->page);
28 }
29
xattr_iter_end_final(struct xattr_iter * it)30 static inline void xattr_iter_end_final(struct xattr_iter *it)
31 {
32 if (!it->page)
33 return;
34
35 xattr_iter_end(it, true);
36 }
37
init_inode_xattrs(struct inode * inode)38 static int init_inode_xattrs(struct inode *inode)
39 {
40 struct erofs_inode *const vi = EROFS_I(inode);
41 struct xattr_iter it;
42 unsigned int i;
43 struct erofs_xattr_ibody_header *ih;
44 struct super_block *sb;
45 struct erofs_sb_info *sbi;
46 bool atomic_map;
47 int ret = 0;
48
49 /* the most case is that xattrs of this inode are initialized. */
50 if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags)) {
51 /*
52 * paired with smp_mb() at the end of the function to ensure
53 * fields will only be observed after the bit is set.
54 */
55 smp_mb();
56 return 0;
57 }
58
59 if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_XATTR_BIT, TASK_KILLABLE))
60 return -ERESTARTSYS;
61
62 /* someone has initialized xattrs for us? */
63 if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags))
64 goto out_unlock;
65
66 /*
67 * bypass all xattr operations if ->xattr_isize is not greater than
68 * sizeof(struct erofs_xattr_ibody_header), in detail:
69 * 1) it is not enough to contain erofs_xattr_ibody_header then
70 * ->xattr_isize should be 0 (it means no xattr);
71 * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk
72 * undefined right now (maybe use later with some new sb feature).
73 */
74 if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) {
75 erofs_err(inode->i_sb,
76 "xattr_isize %d of nid %llu is not supported yet",
77 vi->xattr_isize, vi->nid);
78 ret = -EOPNOTSUPP;
79 goto out_unlock;
80 } else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) {
81 if (vi->xattr_isize) {
82 erofs_err(inode->i_sb,
83 "bogus xattr ibody @ nid %llu", vi->nid);
84 DBG_BUGON(1);
85 ret = -EFSCORRUPTED;
86 goto out_unlock; /* xattr ondisk layout error */
87 }
88 ret = -ENOATTR;
89 goto out_unlock;
90 }
91
92 sb = inode->i_sb;
93 sbi = EROFS_SB(sb);
94 it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize);
95 it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
96
97 it.page = erofs_get_meta_page(sb, it.blkaddr);
98 if (IS_ERR(it.page)) {
99 ret = PTR_ERR(it.page);
100 goto out_unlock;
101 }
102
103 /* read in shared xattr array (non-atomic, see kmalloc below) */
104 it.kaddr = kmap(it.page);
105 atomic_map = false;
106
107 ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);
108
109 vi->xattr_shared_count = ih->h_shared_count;
110 vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
111 sizeof(uint), GFP_KERNEL);
112 if (!vi->xattr_shared_xattrs) {
113 xattr_iter_end(&it, atomic_map);
114 ret = -ENOMEM;
115 goto out_unlock;
116 }
117
118 /* let's skip ibody header */
119 it.ofs += sizeof(struct erofs_xattr_ibody_header);
120
121 for (i = 0; i < vi->xattr_shared_count; ++i) {
122 if (it.ofs >= EROFS_BLKSIZ) {
123 /* cannot be unaligned */
124 DBG_BUGON(it.ofs != EROFS_BLKSIZ);
125 xattr_iter_end(&it, atomic_map);
126
127 it.page = erofs_get_meta_page(sb, ++it.blkaddr);
128 if (IS_ERR(it.page)) {
129 kfree(vi->xattr_shared_xattrs);
130 vi->xattr_shared_xattrs = NULL;
131 ret = PTR_ERR(it.page);
132 goto out_unlock;
133 }
134
135 it.kaddr = kmap_atomic(it.page);
136 atomic_map = true;
137 it.ofs = 0;
138 }
139 vi->xattr_shared_xattrs[i] =
140 le32_to_cpu(*(__le32 *)(it.kaddr + it.ofs));
141 it.ofs += sizeof(__le32);
142 }
143 xattr_iter_end(&it, atomic_map);
144
145 /* paired with smp_mb() at the beginning of the function. */
146 smp_mb();
147 set_bit(EROFS_I_EA_INITED_BIT, &vi->flags);
148
149 out_unlock:
150 clear_and_wake_up_bit(EROFS_I_BL_XATTR_BIT, &vi->flags);
151 return ret;
152 }
153
154 /*
155 * the general idea for these return values is
156 * if 0 is returned, go on processing the current xattr;
157 * 1 (> 0) is returned, skip this round to process the next xattr;
158 * -err (< 0) is returned, an error (maybe ENOXATTR) occurred
159 * and need to be handled
160 */
161 struct xattr_iter_handlers {
162 int (*entry)(struct xattr_iter *_it, struct erofs_xattr_entry *entry);
163 int (*name)(struct xattr_iter *_it, unsigned int processed, char *buf,
164 unsigned int len);
165 int (*alloc_buffer)(struct xattr_iter *_it, unsigned int value_sz);
166 void (*value)(struct xattr_iter *_it, unsigned int processed, char *buf,
167 unsigned int len);
168 };
169
xattr_iter_fixup(struct xattr_iter * it)170 static inline int xattr_iter_fixup(struct xattr_iter *it)
171 {
172 if (it->ofs < EROFS_BLKSIZ)
173 return 0;
174
175 xattr_iter_end(it, true);
176
177 it->blkaddr += erofs_blknr(it->ofs);
178
179 it->page = erofs_get_meta_page(it->sb, it->blkaddr);
180 if (IS_ERR(it->page)) {
181 int err = PTR_ERR(it->page);
182
183 it->page = NULL;
184 return err;
185 }
186
187 it->kaddr = kmap_atomic(it->page);
188 it->ofs = erofs_blkoff(it->ofs);
189 return 0;
190 }
191
inline_xattr_iter_begin(struct xattr_iter * it,struct inode * inode)192 static int inline_xattr_iter_begin(struct xattr_iter *it,
193 struct inode *inode)
194 {
195 struct erofs_inode *const vi = EROFS_I(inode);
196 struct erofs_sb_info *const sbi = EROFS_SB(inode->i_sb);
197 unsigned int xattr_header_sz, inline_xattr_ofs;
198
199 xattr_header_sz = inlinexattr_header_size(inode);
200 if (xattr_header_sz >= vi->xattr_isize) {
201 DBG_BUGON(xattr_header_sz > vi->xattr_isize);
202 return -ENOATTR;
203 }
204
205 inline_xattr_ofs = vi->inode_isize + xattr_header_sz;
206
207 it->blkaddr = erofs_blknr(iloc(sbi, vi->nid) + inline_xattr_ofs);
208 it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs);
209
210 it->page = erofs_get_meta_page(inode->i_sb, it->blkaddr);
211 if (IS_ERR(it->page))
212 return PTR_ERR(it->page);
213
214 it->kaddr = kmap_atomic(it->page);
215 return vi->xattr_isize - xattr_header_sz;
216 }
217
218 /*
219 * Regardless of success or failure, `xattr_foreach' will end up with
220 * `ofs' pointing to the next xattr item rather than an arbitrary position.
221 */
xattr_foreach(struct xattr_iter * it,const struct xattr_iter_handlers * op,unsigned int * tlimit)222 static int xattr_foreach(struct xattr_iter *it,
223 const struct xattr_iter_handlers *op,
224 unsigned int *tlimit)
225 {
226 struct erofs_xattr_entry entry;
227 unsigned int value_sz, processed, slice;
228 int err;
229
230 /* 0. fixup blkaddr, ofs, ipage */
231 err = xattr_iter_fixup(it);
232 if (err)
233 return err;
234
235 /*
236 * 1. read xattr entry to the memory,
237 * since we do EROFS_XATTR_ALIGN
238 * therefore entry should be in the page
239 */
240 entry = *(struct erofs_xattr_entry *)(it->kaddr + it->ofs);
241 if (tlimit) {
242 unsigned int entry_sz = erofs_xattr_entry_size(&entry);
243
244 /* xattr on-disk corruption: xattr entry beyond xattr_isize */
245 if (*tlimit < entry_sz) {
246 DBG_BUGON(1);
247 return -EFSCORRUPTED;
248 }
249 *tlimit -= entry_sz;
250 }
251
252 it->ofs += sizeof(struct erofs_xattr_entry);
253 value_sz = le16_to_cpu(entry.e_value_size);
254
255 /* handle entry */
256 err = op->entry(it, &entry);
257 if (err) {
258 it->ofs += entry.e_name_len + value_sz;
259 goto out;
260 }
261
262 /* 2. handle xattr name (ofs will finally be at the end of name) */
263 processed = 0;
264
265 while (processed < entry.e_name_len) {
266 if (it->ofs >= EROFS_BLKSIZ) {
267 DBG_BUGON(it->ofs > EROFS_BLKSIZ);
268
269 err = xattr_iter_fixup(it);
270 if (err)
271 goto out;
272 it->ofs = 0;
273 }
274
275 slice = min_t(unsigned int, PAGE_SIZE - it->ofs,
276 entry.e_name_len - processed);
277
278 /* handle name */
279 err = op->name(it, processed, it->kaddr + it->ofs, slice);
280 if (err) {
281 it->ofs += entry.e_name_len - processed + value_sz;
282 goto out;
283 }
284
285 it->ofs += slice;
286 processed += slice;
287 }
288
289 /* 3. handle xattr value */
290 processed = 0;
291
292 if (op->alloc_buffer) {
293 err = op->alloc_buffer(it, value_sz);
294 if (err) {
295 it->ofs += value_sz;
296 goto out;
297 }
298 }
299
300 while (processed < value_sz) {
301 if (it->ofs >= EROFS_BLKSIZ) {
302 DBG_BUGON(it->ofs > EROFS_BLKSIZ);
303
304 err = xattr_iter_fixup(it);
305 if (err)
306 goto out;
307 it->ofs = 0;
308 }
309
310 slice = min_t(unsigned int, PAGE_SIZE - it->ofs,
311 value_sz - processed);
312 op->value(it, processed, it->kaddr + it->ofs, slice);
313 it->ofs += slice;
314 processed += slice;
315 }
316
317 out:
318 /* xattrs should be 4-byte aligned (on-disk constraint) */
319 it->ofs = EROFS_XATTR_ALIGN(it->ofs);
320 return err < 0 ? err : 0;
321 }
322
323 struct getxattr_iter {
324 struct xattr_iter it;
325
326 char *buffer;
327 int buffer_size, index;
328 struct qstr name;
329 };
330
xattr_entrymatch(struct xattr_iter * _it,struct erofs_xattr_entry * entry)331 static int xattr_entrymatch(struct xattr_iter *_it,
332 struct erofs_xattr_entry *entry)
333 {
334 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
335
336 return (it->index != entry->e_name_index ||
337 it->name.len != entry->e_name_len) ? -ENOATTR : 0;
338 }
339
xattr_namematch(struct xattr_iter * _it,unsigned int processed,char * buf,unsigned int len)340 static int xattr_namematch(struct xattr_iter *_it,
341 unsigned int processed, char *buf, unsigned int len)
342 {
343 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
344
345 return memcmp(buf, it->name.name + processed, len) ? -ENOATTR : 0;
346 }
347
xattr_checkbuffer(struct xattr_iter * _it,unsigned int value_sz)348 static int xattr_checkbuffer(struct xattr_iter *_it,
349 unsigned int value_sz)
350 {
351 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
352 int err = it->buffer_size < value_sz ? -ERANGE : 0;
353
354 it->buffer_size = value_sz;
355 return !it->buffer ? 1 : err;
356 }
357
xattr_copyvalue(struct xattr_iter * _it,unsigned int processed,char * buf,unsigned int len)358 static void xattr_copyvalue(struct xattr_iter *_it,
359 unsigned int processed,
360 char *buf, unsigned int len)
361 {
362 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
363
364 memcpy(it->buffer + processed, buf, len);
365 }
366
367 static const struct xattr_iter_handlers find_xattr_handlers = {
368 .entry = xattr_entrymatch,
369 .name = xattr_namematch,
370 .alloc_buffer = xattr_checkbuffer,
371 .value = xattr_copyvalue
372 };
373
inline_getxattr(struct inode * inode,struct getxattr_iter * it)374 static int inline_getxattr(struct inode *inode, struct getxattr_iter *it)
375 {
376 int ret;
377 unsigned int remaining;
378
379 ret = inline_xattr_iter_begin(&it->it, inode);
380 if (ret < 0)
381 return ret;
382
383 remaining = ret;
384 while (remaining) {
385 ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
386 if (ret != -ENOATTR)
387 break;
388 }
389 xattr_iter_end_final(&it->it);
390
391 return ret ? ret : it->buffer_size;
392 }
393
shared_getxattr(struct inode * inode,struct getxattr_iter * it)394 static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
395 {
396 struct erofs_inode *const vi = EROFS_I(inode);
397 struct super_block *const sb = inode->i_sb;
398 struct erofs_sb_info *const sbi = EROFS_SB(sb);
399 unsigned int i;
400 int ret = -ENOATTR;
401
402 for (i = 0; i < vi->xattr_shared_count; ++i) {
403 erofs_blk_t blkaddr =
404 xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
405
406 it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
407
408 if (!i || blkaddr != it->it.blkaddr) {
409 if (i)
410 xattr_iter_end(&it->it, true);
411
412 it->it.page = erofs_get_meta_page(sb, blkaddr);
413 if (IS_ERR(it->it.page))
414 return PTR_ERR(it->it.page);
415
416 it->it.kaddr = kmap_atomic(it->it.page);
417 it->it.blkaddr = blkaddr;
418 }
419
420 ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL);
421 if (ret != -ENOATTR)
422 break;
423 }
424 if (vi->xattr_shared_count)
425 xattr_iter_end_final(&it->it);
426
427 return ret ? ret : it->buffer_size;
428 }
429
erofs_xattr_user_list(struct dentry * dentry)430 static bool erofs_xattr_user_list(struct dentry *dentry)
431 {
432 return test_opt(&EROFS_SB(dentry->d_sb)->opt, XATTR_USER);
433 }
434
erofs_xattr_trusted_list(struct dentry * dentry)435 static bool erofs_xattr_trusted_list(struct dentry *dentry)
436 {
437 return capable(CAP_SYS_ADMIN);
438 }
439
erofs_getxattr(struct inode * inode,int index,const char * name,void * buffer,size_t buffer_size)440 int erofs_getxattr(struct inode *inode, int index,
441 const char *name,
442 void *buffer, size_t buffer_size)
443 {
444 int ret;
445 struct getxattr_iter it;
446
447 if (!name)
448 return -EINVAL;
449
450 ret = init_inode_xattrs(inode);
451 if (ret)
452 return ret;
453
454 it.index = index;
455
456 it.name.len = strlen(name);
457 if (it.name.len > EROFS_NAME_LEN)
458 return -ERANGE;
459 it.name.name = name;
460
461 it.buffer = buffer;
462 it.buffer_size = buffer_size;
463
464 it.it.sb = inode->i_sb;
465 ret = inline_getxattr(inode, &it);
466 if (ret == -ENOATTR)
467 ret = shared_getxattr(inode, &it);
468 return ret;
469 }
470
erofs_xattr_generic_get(const struct xattr_handler * handler,struct dentry * unused,struct inode * inode,const char * name,void * buffer,size_t size)471 static int erofs_xattr_generic_get(const struct xattr_handler *handler,
472 struct dentry *unused, struct inode *inode,
473 const char *name, void *buffer, size_t size)
474 {
475 struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
476
477 switch (handler->flags) {
478 case EROFS_XATTR_INDEX_USER:
479 if (!test_opt(&sbi->opt, XATTR_USER))
480 return -EOPNOTSUPP;
481 break;
482 case EROFS_XATTR_INDEX_TRUSTED:
483 break;
484 case EROFS_XATTR_INDEX_SECURITY:
485 break;
486 default:
487 return -EINVAL;
488 }
489
490 return erofs_getxattr(inode, handler->flags, name, buffer, size);
491 }
492
493 const struct xattr_handler erofs_xattr_user_handler = {
494 .prefix = XATTR_USER_PREFIX,
495 .flags = EROFS_XATTR_INDEX_USER,
496 .list = erofs_xattr_user_list,
497 .get = erofs_xattr_generic_get,
498 };
499
500 const struct xattr_handler erofs_xattr_trusted_handler = {
501 .prefix = XATTR_TRUSTED_PREFIX,
502 .flags = EROFS_XATTR_INDEX_TRUSTED,
503 .list = erofs_xattr_trusted_list,
504 .get = erofs_xattr_generic_get,
505 };
506
507 #ifdef CONFIG_EROFS_FS_SECURITY
508 const struct xattr_handler __maybe_unused erofs_xattr_security_handler = {
509 .prefix = XATTR_SECURITY_PREFIX,
510 .flags = EROFS_XATTR_INDEX_SECURITY,
511 .get = erofs_xattr_generic_get,
512 };
513 #endif
514
515 const struct xattr_handler *erofs_xattr_handlers[] = {
516 &erofs_xattr_user_handler,
517 #ifdef CONFIG_EROFS_FS_POSIX_ACL
518 &posix_acl_access_xattr_handler,
519 &posix_acl_default_xattr_handler,
520 #endif
521 &erofs_xattr_trusted_handler,
522 #ifdef CONFIG_EROFS_FS_SECURITY
523 &erofs_xattr_security_handler,
524 #endif
525 NULL,
526 };
527
528 struct listxattr_iter {
529 struct xattr_iter it;
530
531 struct dentry *dentry;
532 char *buffer;
533 int buffer_size, buffer_ofs;
534 };
535
xattr_entrylist(struct xattr_iter * _it,struct erofs_xattr_entry * entry)536 static int xattr_entrylist(struct xattr_iter *_it,
537 struct erofs_xattr_entry *entry)
538 {
539 struct listxattr_iter *it =
540 container_of(_it, struct listxattr_iter, it);
541 unsigned int prefix_len;
542 const char *prefix;
543
544 const struct xattr_handler *h =
545 erofs_xattr_handler(entry->e_name_index);
546
547 if (!h || (h->list && !h->list(it->dentry)))
548 return 1;
549
550 prefix = xattr_prefix(h);
551 prefix_len = strlen(prefix);
552
553 if (!it->buffer) {
554 it->buffer_ofs += prefix_len + entry->e_name_len + 1;
555 return 1;
556 }
557
558 if (it->buffer_ofs + prefix_len
559 + entry->e_name_len + 1 > it->buffer_size)
560 return -ERANGE;
561
562 memcpy(it->buffer + it->buffer_ofs, prefix, prefix_len);
563 it->buffer_ofs += prefix_len;
564 return 0;
565 }
566
xattr_namelist(struct xattr_iter * _it,unsigned int processed,char * buf,unsigned int len)567 static int xattr_namelist(struct xattr_iter *_it,
568 unsigned int processed, char *buf, unsigned int len)
569 {
570 struct listxattr_iter *it =
571 container_of(_it, struct listxattr_iter, it);
572
573 memcpy(it->buffer + it->buffer_ofs, buf, len);
574 it->buffer_ofs += len;
575 return 0;
576 }
577
xattr_skipvalue(struct xattr_iter * _it,unsigned int value_sz)578 static int xattr_skipvalue(struct xattr_iter *_it,
579 unsigned int value_sz)
580 {
581 struct listxattr_iter *it =
582 container_of(_it, struct listxattr_iter, it);
583
584 it->buffer[it->buffer_ofs++] = '\0';
585 return 1;
586 }
587
588 static const struct xattr_iter_handlers list_xattr_handlers = {
589 .entry = xattr_entrylist,
590 .name = xattr_namelist,
591 .alloc_buffer = xattr_skipvalue,
592 .value = NULL
593 };
594
inline_listxattr(struct listxattr_iter * it)595 static int inline_listxattr(struct listxattr_iter *it)
596 {
597 int ret;
598 unsigned int remaining;
599
600 ret = inline_xattr_iter_begin(&it->it, d_inode(it->dentry));
601 if (ret < 0)
602 return ret;
603
604 remaining = ret;
605 while (remaining) {
606 ret = xattr_foreach(&it->it, &list_xattr_handlers, &remaining);
607 if (ret)
608 break;
609 }
610 xattr_iter_end_final(&it->it);
611 return ret ? ret : it->buffer_ofs;
612 }
613
shared_listxattr(struct listxattr_iter * it)614 static int shared_listxattr(struct listxattr_iter *it)
615 {
616 struct inode *const inode = d_inode(it->dentry);
617 struct erofs_inode *const vi = EROFS_I(inode);
618 struct super_block *const sb = inode->i_sb;
619 struct erofs_sb_info *const sbi = EROFS_SB(sb);
620 unsigned int i;
621 int ret = 0;
622
623 for (i = 0; i < vi->xattr_shared_count; ++i) {
624 erofs_blk_t blkaddr =
625 xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
626
627 it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
628 if (!i || blkaddr != it->it.blkaddr) {
629 if (i)
630 xattr_iter_end(&it->it, true);
631
632 it->it.page = erofs_get_meta_page(sb, blkaddr);
633 if (IS_ERR(it->it.page))
634 return PTR_ERR(it->it.page);
635
636 it->it.kaddr = kmap_atomic(it->it.page);
637 it->it.blkaddr = blkaddr;
638 }
639
640 ret = xattr_foreach(&it->it, &list_xattr_handlers, NULL);
641 if (ret)
642 break;
643 }
644 if (vi->xattr_shared_count)
645 xattr_iter_end_final(&it->it);
646
647 return ret ? ret : it->buffer_ofs;
648 }
649
erofs_listxattr(struct dentry * dentry,char * buffer,size_t buffer_size)650 ssize_t erofs_listxattr(struct dentry *dentry,
651 char *buffer, size_t buffer_size)
652 {
653 int ret;
654 struct listxattr_iter it;
655
656 ret = init_inode_xattrs(d_inode(dentry));
657 if (ret == -ENOATTR)
658 return 0;
659 if (ret)
660 return ret;
661
662 it.dentry = dentry;
663 it.buffer = buffer;
664 it.buffer_size = buffer_size;
665 it.buffer_ofs = 0;
666
667 it.it.sb = dentry->d_sb;
668
669 ret = inline_listxattr(&it);
670 if (ret < 0 && ret != -ENOATTR)
671 return ret;
672 return shared_listxattr(&it);
673 }
674
675 #ifdef CONFIG_EROFS_FS_POSIX_ACL
erofs_get_acl(struct inode * inode,int type,bool rcu)676 struct posix_acl *erofs_get_acl(struct inode *inode, int type, bool rcu)
677 {
678 struct posix_acl *acl;
679 int prefix, rc;
680 char *value = NULL;
681
682 if (rcu)
683 return ERR_PTR(-ECHILD);
684
685 switch (type) {
686 case ACL_TYPE_ACCESS:
687 prefix = EROFS_XATTR_INDEX_POSIX_ACL_ACCESS;
688 break;
689 case ACL_TYPE_DEFAULT:
690 prefix = EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT;
691 break;
692 default:
693 return ERR_PTR(-EINVAL);
694 }
695
696 rc = erofs_getxattr(inode, prefix, "", NULL, 0);
697 if (rc > 0) {
698 value = kmalloc(rc, GFP_KERNEL);
699 if (!value)
700 return ERR_PTR(-ENOMEM);
701 rc = erofs_getxattr(inode, prefix, "", value, rc);
702 }
703
704 if (rc == -ENOATTR)
705 acl = NULL;
706 else if (rc < 0)
707 acl = ERR_PTR(rc);
708 else
709 acl = posix_acl_from_xattr(&init_user_ns, value, rc);
710 kfree(value);
711 return acl;
712 }
713 #endif
714