• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/fs/ext2/xattr.c
4  *
5  * Copyright (C) 2001-2003 Andreas Gruenbacher <agruen@suse.de>
6  *
7  * Fix by Harrison Xing <harrison@mountainviewdata.com>.
8  * Extended attributes for symlinks and special files added per
9  *  suggestion of Luka Renko <luka.renko@hermes.si>.
10  * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>,
11  *  Red Hat Inc.
12  *
13  */
14 
15 /*
16  * Extended attributes are stored on disk blocks allocated outside of
17  * any inode. The i_file_acl field is then made to point to this allocated
18  * block. If all extended attributes of an inode are identical, these
19  * inodes may share the same extended attribute block. Such situations
20  * are automatically detected by keeping a cache of recent attribute block
21  * numbers and hashes over the block's contents in memory.
22  *
23  *
24  * Extended attribute block layout:
25  *
26  *   +------------------+
27  *   | header           |
28  *   | entry 1          | |
29  *   | entry 2          | | growing downwards
30  *   | entry 3          | v
31  *   | four null bytes  |
32  *   | . . .            |
33  *   | value 1          | ^
34  *   | value 3          | | growing upwards
35  *   | value 2          | |
36  *   +------------------+
37  *
38  * The block header is followed by multiple entry descriptors. These entry
39  * descriptors are variable in size, and aligned to EXT2_XATTR_PAD
40  * byte boundaries. The entry descriptors are sorted by attribute name,
41  * so that two extended attribute blocks can be compared efficiently.
42  *
43  * Attribute values are aligned to the end of the block, stored in
44  * no specific order. They are also padded to EXT2_XATTR_PAD byte
45  * boundaries. No additional gaps are left between them.
46  *
47  * Locking strategy
48  * ----------------
49  * EXT2_I(inode)->i_file_acl is protected by EXT2_I(inode)->xattr_sem.
50  * EA blocks are only changed if they are exclusive to an inode, so
51  * holding xattr_sem also means that nothing but the EA block's reference
52  * count will change. Multiple writers to an EA block are synchronized
53  * by the bh lock. No more than a single bh lock is held at any time
54  * to avoid deadlocks.
55  */
56 
57 #include <linux/buffer_head.h>
58 #include <linux/init.h>
59 #include <linux/printk.h>
60 #include <linux/slab.h>
61 #include <linux/mbcache.h>
62 #include <linux/quotaops.h>
63 #include <linux/rwsem.h>
64 #include <linux/security.h>
65 #include "ext2.h"
66 #include "xattr.h"
67 #include "acl.h"
68 
69 #define HDR(bh) ((struct ext2_xattr_header *)((bh)->b_data))
70 #define ENTRY(ptr) ((struct ext2_xattr_entry *)(ptr))
71 #define FIRST_ENTRY(bh) ENTRY(HDR(bh)+1)
72 #define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0)
73 
74 #ifdef EXT2_XATTR_DEBUG
75 # define ea_idebug(inode, f...) do { \
76 		printk(KERN_DEBUG "inode %s:%ld: ", \
77 			inode->i_sb->s_id, inode->i_ino); \
78 		printk(f); \
79 		printk("\n"); \
80 	} while (0)
81 # define ea_bdebug(bh, f...) do { \
82 		printk(KERN_DEBUG "block %pg:%lu: ", \
83 			bh->b_bdev, (unsigned long) bh->b_blocknr); \
84 		printk(f); \
85 		printk("\n"); \
86 	} while (0)
87 #else
88 # define ea_idebug(inode, f...)	no_printk(f)
89 # define ea_bdebug(bh, f...)	no_printk(f)
90 #endif
91 
92 static int ext2_xattr_set2(struct inode *, struct buffer_head *,
93 			   struct ext2_xattr_header *);
94 
95 static int ext2_xattr_cache_insert(struct mb_cache *, struct buffer_head *);
96 static struct buffer_head *ext2_xattr_cache_find(struct inode *,
97 						 struct ext2_xattr_header *);
98 static void ext2_xattr_rehash(struct ext2_xattr_header *,
99 			      struct ext2_xattr_entry *);
100 
101 static const struct xattr_handler *ext2_xattr_handler_map[] = {
102 	[EXT2_XATTR_INDEX_USER]		     = &ext2_xattr_user_handler,
103 #ifdef CONFIG_EXT2_FS_POSIX_ACL
104 	[EXT2_XATTR_INDEX_POSIX_ACL_ACCESS]  = &posix_acl_access_xattr_handler,
105 	[EXT2_XATTR_INDEX_POSIX_ACL_DEFAULT] = &posix_acl_default_xattr_handler,
106 #endif
107 	[EXT2_XATTR_INDEX_TRUSTED]	     = &ext2_xattr_trusted_handler,
108 #ifdef CONFIG_EXT2_FS_SECURITY
109 	[EXT2_XATTR_INDEX_SECURITY]	     = &ext2_xattr_security_handler,
110 #endif
111 };
112 
113 const struct xattr_handler *ext2_xattr_handlers[] = {
114 	&ext2_xattr_user_handler,
115 	&ext2_xattr_trusted_handler,
116 #ifdef CONFIG_EXT2_FS_POSIX_ACL
117 	&posix_acl_access_xattr_handler,
118 	&posix_acl_default_xattr_handler,
119 #endif
120 #ifdef CONFIG_EXT2_FS_SECURITY
121 	&ext2_xattr_security_handler,
122 #endif
123 	NULL
124 };
125 
126 #define EA_BLOCK_CACHE(inode)	(EXT2_SB(inode->i_sb)->s_ea_block_cache)
127 
128 static inline const struct xattr_handler *
ext2_xattr_handler(int name_index)129 ext2_xattr_handler(int name_index)
130 {
131 	const struct xattr_handler *handler = NULL;
132 
133 	if (name_index > 0 && name_index < ARRAY_SIZE(ext2_xattr_handler_map))
134 		handler = ext2_xattr_handler_map[name_index];
135 	return handler;
136 }
137 
138 static bool
ext2_xattr_header_valid(struct ext2_xattr_header * header)139 ext2_xattr_header_valid(struct ext2_xattr_header *header)
140 {
141 	if (header->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) ||
142 	    header->h_blocks != cpu_to_le32(1))
143 		return false;
144 
145 	return true;
146 }
147 
148 static bool
ext2_xattr_entry_valid(struct ext2_xattr_entry * entry,char * end,size_t end_offs)149 ext2_xattr_entry_valid(struct ext2_xattr_entry *entry,
150 		       char *end, size_t end_offs)
151 {
152 	struct ext2_xattr_entry *next;
153 	size_t size;
154 
155 	next = EXT2_XATTR_NEXT(entry);
156 	if ((char *)next >= end)
157 		return false;
158 
159 	if (entry->e_value_block != 0)
160 		return false;
161 
162 	size = le32_to_cpu(entry->e_value_size);
163 	if (size > end_offs ||
164 	    le16_to_cpu(entry->e_value_offs) + size > end_offs)
165 		return false;
166 
167 	return true;
168 }
169 
170 static int
ext2_xattr_cmp_entry(int name_index,size_t name_len,const char * name,struct ext2_xattr_entry * entry)171 ext2_xattr_cmp_entry(int name_index, size_t name_len, const char *name,
172 		     struct ext2_xattr_entry *entry)
173 {
174 	int cmp;
175 
176 	cmp = name_index - entry->e_name_index;
177 	if (!cmp)
178 		cmp = name_len - entry->e_name_len;
179 	if (!cmp)
180 		cmp = memcmp(name, entry->e_name, name_len);
181 
182 	return cmp;
183 }
184 
185 /*
186  * ext2_xattr_get()
187  *
188  * Copy an extended attribute into the buffer
189  * provided, or compute the buffer size required.
190  * Buffer is NULL to compute the size of the buffer required.
191  *
192  * Returns a negative error number on failure, or the number of bytes
193  * used / required on success.
194  */
195 int
ext2_xattr_get(struct inode * inode,int name_index,const char * name,void * buffer,size_t buffer_size)196 ext2_xattr_get(struct inode *inode, int name_index, const char *name,
197 	       void *buffer, size_t buffer_size)
198 {
199 	struct buffer_head *bh = NULL;
200 	struct ext2_xattr_entry *entry;
201 	size_t name_len, size;
202 	char *end;
203 	int error, not_found;
204 	struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
205 
206 	ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
207 		  name_index, name, buffer, (long)buffer_size);
208 
209 	if (name == NULL)
210 		return -EINVAL;
211 	name_len = strlen(name);
212 	if (name_len > 255)
213 		return -ERANGE;
214 
215 	down_read(&EXT2_I(inode)->xattr_sem);
216 	error = -ENODATA;
217 	if (!EXT2_I(inode)->i_file_acl)
218 		goto cleanup;
219 	ea_idebug(inode, "reading block %d", EXT2_I(inode)->i_file_acl);
220 	bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl);
221 	error = -EIO;
222 	if (!bh)
223 		goto cleanup;
224 	ea_bdebug(bh, "b_count=%d, refcount=%d",
225 		atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount));
226 	end = bh->b_data + bh->b_size;
227 	if (!ext2_xattr_header_valid(HDR(bh))) {
228 bad_block:
229 		ext2_error(inode->i_sb, "ext2_xattr_get",
230 			"inode %ld: bad block %d", inode->i_ino,
231 			EXT2_I(inode)->i_file_acl);
232 		error = -EIO;
233 		goto cleanup;
234 	}
235 
236 	/* find named attribute */
237 	entry = FIRST_ENTRY(bh);
238 	while (!IS_LAST_ENTRY(entry)) {
239 		if (!ext2_xattr_entry_valid(entry, end,
240 		    inode->i_sb->s_blocksize))
241 			goto bad_block;
242 
243 		not_found = ext2_xattr_cmp_entry(name_index, name_len, name,
244 						 entry);
245 		if (!not_found)
246 			goto found;
247 		if (not_found < 0)
248 			break;
249 
250 		entry = EXT2_XATTR_NEXT(entry);
251 	}
252 	if (ext2_xattr_cache_insert(ea_block_cache, bh))
253 		ea_idebug(inode, "cache insert failed");
254 	error = -ENODATA;
255 	goto cleanup;
256 found:
257 	size = le32_to_cpu(entry->e_value_size);
258 	if (ext2_xattr_cache_insert(ea_block_cache, bh))
259 		ea_idebug(inode, "cache insert failed");
260 	if (buffer) {
261 		error = -ERANGE;
262 		if (size > buffer_size)
263 			goto cleanup;
264 		/* return value of attribute */
265 		memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs),
266 			size);
267 	}
268 	error = size;
269 
270 cleanup:
271 	brelse(bh);
272 	up_read(&EXT2_I(inode)->xattr_sem);
273 
274 	return error;
275 }
276 
277 /*
278  * ext2_xattr_list()
279  *
280  * Copy a list of attribute names into the buffer
281  * provided, or compute the buffer size required.
282  * Buffer is NULL to compute the size of the buffer required.
283  *
284  * Returns a negative error number on failure, or the number of bytes
285  * used / required on success.
286  */
287 static int
ext2_xattr_list(struct dentry * dentry,char * buffer,size_t buffer_size)288 ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
289 {
290 	struct inode *inode = d_inode(dentry);
291 	struct buffer_head *bh = NULL;
292 	struct ext2_xattr_entry *entry;
293 	char *end;
294 	size_t rest = buffer_size;
295 	int error;
296 	struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
297 
298 	ea_idebug(inode, "buffer=%p, buffer_size=%ld",
299 		  buffer, (long)buffer_size);
300 
301 	down_read(&EXT2_I(inode)->xattr_sem);
302 	error = 0;
303 	if (!EXT2_I(inode)->i_file_acl)
304 		goto cleanup;
305 	ea_idebug(inode, "reading block %d", EXT2_I(inode)->i_file_acl);
306 	bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl);
307 	error = -EIO;
308 	if (!bh)
309 		goto cleanup;
310 	ea_bdebug(bh, "b_count=%d, refcount=%d",
311 		atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount));
312 	end = bh->b_data + bh->b_size;
313 	if (!ext2_xattr_header_valid(HDR(bh))) {
314 bad_block:
315 		ext2_error(inode->i_sb, "ext2_xattr_list",
316 			"inode %ld: bad block %d", inode->i_ino,
317 			EXT2_I(inode)->i_file_acl);
318 		error = -EIO;
319 		goto cleanup;
320 	}
321 
322 	/* check the on-disk data structure */
323 	entry = FIRST_ENTRY(bh);
324 	while (!IS_LAST_ENTRY(entry)) {
325 		if (!ext2_xattr_entry_valid(entry, end,
326 		    inode->i_sb->s_blocksize))
327 			goto bad_block;
328 		entry = EXT2_XATTR_NEXT(entry);
329 	}
330 	if (ext2_xattr_cache_insert(ea_block_cache, bh))
331 		ea_idebug(inode, "cache insert failed");
332 
333 	/* list the attribute names */
334 	for (entry = FIRST_ENTRY(bh); !IS_LAST_ENTRY(entry);
335 	     entry = EXT2_XATTR_NEXT(entry)) {
336 		const struct xattr_handler *handler =
337 			ext2_xattr_handler(entry->e_name_index);
338 
339 		if (handler && (!handler->list || handler->list(dentry))) {
340 			const char *prefix = handler->prefix ?: handler->name;
341 			size_t prefix_len = strlen(prefix);
342 			size_t size = prefix_len + entry->e_name_len + 1;
343 
344 			if (buffer) {
345 				if (size > rest) {
346 					error = -ERANGE;
347 					goto cleanup;
348 				}
349 				memcpy(buffer, prefix, prefix_len);
350 				buffer += prefix_len;
351 				memcpy(buffer, entry->e_name, entry->e_name_len);
352 				buffer += entry->e_name_len;
353 				*buffer++ = 0;
354 			}
355 			rest -= size;
356 		}
357 	}
358 	error = buffer_size - rest;  /* total size */
359 
360 cleanup:
361 	brelse(bh);
362 	up_read(&EXT2_I(inode)->xattr_sem);
363 
364 	return error;
365 }
366 
367 /*
368  * Inode operation listxattr()
369  *
370  * d_inode(dentry)->i_mutex: don't care
371  */
372 ssize_t
ext2_listxattr(struct dentry * dentry,char * buffer,size_t size)373 ext2_listxattr(struct dentry *dentry, char *buffer, size_t size)
374 {
375 	return ext2_xattr_list(dentry, buffer, size);
376 }
377 
378 /*
379  * If the EXT2_FEATURE_COMPAT_EXT_ATTR feature of this file system is
380  * not set, set it.
381  */
ext2_xattr_update_super_block(struct super_block * sb)382 static void ext2_xattr_update_super_block(struct super_block *sb)
383 {
384 	if (EXT2_HAS_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR))
385 		return;
386 
387 	spin_lock(&EXT2_SB(sb)->s_lock);
388 	ext2_update_dynamic_rev(sb);
389 	EXT2_SET_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR);
390 	spin_unlock(&EXT2_SB(sb)->s_lock);
391 	mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
392 }
393 
394 /*
395  * ext2_xattr_set()
396  *
397  * Create, replace or remove an extended attribute for this inode.  Value
398  * is NULL to remove an existing extended attribute, and non-NULL to
399  * either replace an existing extended attribute, or create a new extended
400  * attribute. The flags XATTR_REPLACE and XATTR_CREATE
401  * specify that an extended attribute must exist and must not exist
402  * previous to the call, respectively.
403  *
404  * Returns 0, or a negative error number on failure.
405  */
406 int
ext2_xattr_set(struct inode * inode,int name_index,const char * name,const void * value,size_t value_len,int flags)407 ext2_xattr_set(struct inode *inode, int name_index, const char *name,
408 	       const void *value, size_t value_len, int flags)
409 {
410 	struct super_block *sb = inode->i_sb;
411 	struct buffer_head *bh = NULL;
412 	struct ext2_xattr_header *header = NULL;
413 	struct ext2_xattr_entry *here = NULL, *last = NULL;
414 	size_t name_len, free, min_offs = sb->s_blocksize;
415 	int not_found = 1, error;
416 	char *end;
417 
418 	/*
419 	 * header -- Points either into bh, or to a temporarily
420 	 *           allocated buffer.
421 	 * here -- The named entry found, or the place for inserting, within
422 	 *         the block pointed to by header.
423 	 * last -- Points right after the last named entry within the block
424 	 *         pointed to by header.
425 	 * min_offs -- The offset of the first value (values are aligned
426 	 *             towards the end of the block).
427 	 * end -- Points right after the block pointed to by header.
428 	 */
429 
430 	ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld",
431 		  name_index, name, value, (long)value_len);
432 
433 	if (value == NULL)
434 		value_len = 0;
435 	if (name == NULL)
436 		return -EINVAL;
437 	name_len = strlen(name);
438 	if (name_len > 255 || value_len > sb->s_blocksize)
439 		return -ERANGE;
440 	error = dquot_initialize(inode);
441 	if (error)
442 		return error;
443 	down_write(&EXT2_I(inode)->xattr_sem);
444 	if (EXT2_I(inode)->i_file_acl) {
445 		/* The inode already has an extended attribute block. */
446 		bh = sb_bread(sb, EXT2_I(inode)->i_file_acl);
447 		error = -EIO;
448 		if (!bh)
449 			goto cleanup;
450 		ea_bdebug(bh, "b_count=%d, refcount=%d",
451 			atomic_read(&(bh->b_count)),
452 			le32_to_cpu(HDR(bh)->h_refcount));
453 		header = HDR(bh);
454 		end = bh->b_data + bh->b_size;
455 		if (!ext2_xattr_header_valid(header)) {
456 bad_block:
457 			ext2_error(sb, "ext2_xattr_set",
458 				"inode %ld: bad block %d", inode->i_ino,
459 				   EXT2_I(inode)->i_file_acl);
460 			error = -EIO;
461 			goto cleanup;
462 		}
463 		/*
464 		 * Find the named attribute. If not found, 'here' will point
465 		 * to entry where the new attribute should be inserted to
466 		 * maintain sorting.
467 		 */
468 		last = FIRST_ENTRY(bh);
469 		while (!IS_LAST_ENTRY(last)) {
470 			if (!ext2_xattr_entry_valid(last, end, sb->s_blocksize))
471 				goto bad_block;
472 			if (last->e_value_size) {
473 				size_t offs = le16_to_cpu(last->e_value_offs);
474 				if (offs < min_offs)
475 					min_offs = offs;
476 			}
477 			if (not_found > 0) {
478 				not_found = ext2_xattr_cmp_entry(name_index,
479 								 name_len,
480 								 name, last);
481 				if (not_found <= 0)
482 					here = last;
483 			}
484 			last = EXT2_XATTR_NEXT(last);
485 		}
486 		if (not_found > 0)
487 			here = last;
488 
489 		/* Check whether we have enough space left. */
490 		free = min_offs - ((char*)last - (char*)header) - sizeof(__u32);
491 	} else {
492 		/* We will use a new extended attribute block. */
493 		free = sb->s_blocksize -
494 			sizeof(struct ext2_xattr_header) - sizeof(__u32);
495 	}
496 
497 	if (not_found) {
498 		/* Request to remove a nonexistent attribute? */
499 		error = -ENODATA;
500 		if (flags & XATTR_REPLACE)
501 			goto cleanup;
502 		error = 0;
503 		if (value == NULL)
504 			goto cleanup;
505 	} else {
506 		/* Request to create an existing attribute? */
507 		error = -EEXIST;
508 		if (flags & XATTR_CREATE)
509 			goto cleanup;
510 		free += EXT2_XATTR_SIZE(le32_to_cpu(here->e_value_size));
511 		free += EXT2_XATTR_LEN(name_len);
512 	}
513 	error = -ENOSPC;
514 	if (free < EXT2_XATTR_LEN(name_len) + EXT2_XATTR_SIZE(value_len))
515 		goto cleanup;
516 
517 	/* Here we know that we can set the new attribute. */
518 
519 	if (header) {
520 		/* assert(header == HDR(bh)); */
521 		lock_buffer(bh);
522 		if (header->h_refcount == cpu_to_le32(1)) {
523 			__u32 hash = le32_to_cpu(header->h_hash);
524 
525 			ea_bdebug(bh, "modifying in-place");
526 			/*
527 			 * This must happen under buffer lock for
528 			 * ext2_xattr_set2() to reliably detect modified block
529 			 */
530 			mb_cache_entry_delete(EA_BLOCK_CACHE(inode), hash,
531 					      bh->b_blocknr);
532 
533 			/* keep the buffer locked while modifying it. */
534 		} else {
535 			int offset;
536 
537 			unlock_buffer(bh);
538 			ea_bdebug(bh, "cloning");
539 			header = kmemdup(HDR(bh), bh->b_size, GFP_KERNEL);
540 			error = -ENOMEM;
541 			if (header == NULL)
542 				goto cleanup;
543 			header->h_refcount = cpu_to_le32(1);
544 
545 			offset = (char *)here - bh->b_data;
546 			here = ENTRY((char *)header + offset);
547 			offset = (char *)last - bh->b_data;
548 			last = ENTRY((char *)header + offset);
549 		}
550 	} else {
551 		/* Allocate a buffer where we construct the new block. */
552 		header = kzalloc(sb->s_blocksize, GFP_KERNEL);
553 		error = -ENOMEM;
554 		if (header == NULL)
555 			goto cleanup;
556 		end = (char *)header + sb->s_blocksize;
557 		header->h_magic = cpu_to_le32(EXT2_XATTR_MAGIC);
558 		header->h_blocks = header->h_refcount = cpu_to_le32(1);
559 		last = here = ENTRY(header+1);
560 	}
561 
562 	/* Iff we are modifying the block in-place, bh is locked here. */
563 
564 	if (not_found) {
565 		/* Insert the new name. */
566 		size_t size = EXT2_XATTR_LEN(name_len);
567 		size_t rest = (char *)last - (char *)here;
568 		memmove((char *)here + size, here, rest);
569 		memset(here, 0, size);
570 		here->e_name_index = name_index;
571 		here->e_name_len = name_len;
572 		memcpy(here->e_name, name, name_len);
573 	} else {
574 		if (here->e_value_size) {
575 			char *first_val = (char *)header + min_offs;
576 			size_t offs = le16_to_cpu(here->e_value_offs);
577 			char *val = (char *)header + offs;
578 			size_t size = EXT2_XATTR_SIZE(
579 				le32_to_cpu(here->e_value_size));
580 
581 			if (size == EXT2_XATTR_SIZE(value_len)) {
582 				/* The old and the new value have the same
583 				   size. Just replace. */
584 				here->e_value_size = cpu_to_le32(value_len);
585 				memset(val + size - EXT2_XATTR_PAD, 0,
586 				       EXT2_XATTR_PAD); /* Clear pad bytes. */
587 				memcpy(val, value, value_len);
588 				goto skip_replace;
589 			}
590 
591 			/* Remove the old value. */
592 			memmove(first_val + size, first_val, val - first_val);
593 			memset(first_val, 0, size);
594 			min_offs += size;
595 
596 			/* Adjust all value offsets. */
597 			last = ENTRY(header+1);
598 			while (!IS_LAST_ENTRY(last)) {
599 				size_t o = le16_to_cpu(last->e_value_offs);
600 				if (o < offs)
601 					last->e_value_offs =
602 						cpu_to_le16(o + size);
603 				last = EXT2_XATTR_NEXT(last);
604 			}
605 
606 			here->e_value_offs = 0;
607 		}
608 		if (value == NULL) {
609 			/* Remove the old name. */
610 			size_t size = EXT2_XATTR_LEN(name_len);
611 			last = ENTRY((char *)last - size);
612 			memmove(here, (char*)here + size,
613 				(char*)last - (char*)here);
614 			memset(last, 0, size);
615 		}
616 	}
617 
618 	if (value != NULL) {
619 		/* Insert the new value. */
620 		here->e_value_size = cpu_to_le32(value_len);
621 		if (value_len) {
622 			size_t size = EXT2_XATTR_SIZE(value_len);
623 			char *val = (char *)header + min_offs - size;
624 			here->e_value_offs =
625 				cpu_to_le16((char *)val - (char *)header);
626 			memset(val + size - EXT2_XATTR_PAD, 0,
627 			       EXT2_XATTR_PAD); /* Clear the pad bytes. */
628 			memcpy(val, value, value_len);
629 		}
630 	}
631 
632 skip_replace:
633 	if (IS_LAST_ENTRY(ENTRY(header+1))) {
634 		/* This block is now empty. */
635 		if (bh && header == HDR(bh))
636 			unlock_buffer(bh);  /* we were modifying in-place. */
637 		error = ext2_xattr_set2(inode, bh, NULL);
638 	} else {
639 		ext2_xattr_rehash(header, here);
640 		if (bh && header == HDR(bh))
641 			unlock_buffer(bh);  /* we were modifying in-place. */
642 		error = ext2_xattr_set2(inode, bh, header);
643 	}
644 
645 cleanup:
646 	if (!(bh && header == HDR(bh)))
647 		kfree(header);
648 	brelse(bh);
649 	up_write(&EXT2_I(inode)->xattr_sem);
650 
651 	return error;
652 }
653 
654 /*
655  * Second half of ext2_xattr_set(): Update the file system.
656  */
657 static int
ext2_xattr_set2(struct inode * inode,struct buffer_head * old_bh,struct ext2_xattr_header * header)658 ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
659 		struct ext2_xattr_header *header)
660 {
661 	struct super_block *sb = inode->i_sb;
662 	struct buffer_head *new_bh = NULL;
663 	int error;
664 	struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
665 
666 	if (header) {
667 		new_bh = ext2_xattr_cache_find(inode, header);
668 		if (new_bh) {
669 			/* We found an identical block in the cache. */
670 			if (new_bh == old_bh) {
671 				ea_bdebug(new_bh, "keeping this block");
672 			} else {
673 				/* The old block is released after updating
674 				   the inode.  */
675 				ea_bdebug(new_bh, "reusing block");
676 
677 				error = dquot_alloc_block(inode, 1);
678 				if (error) {
679 					unlock_buffer(new_bh);
680 					goto cleanup;
681 				}
682 				le32_add_cpu(&HDR(new_bh)->h_refcount, 1);
683 				ea_bdebug(new_bh, "refcount now=%d",
684 					le32_to_cpu(HDR(new_bh)->h_refcount));
685 			}
686 			unlock_buffer(new_bh);
687 		} else if (old_bh && header == HDR(old_bh)) {
688 			/* Keep this block. No need to lock the block as we
689 			   don't need to change the reference count. */
690 			new_bh = old_bh;
691 			get_bh(new_bh);
692 			ext2_xattr_cache_insert(ea_block_cache, new_bh);
693 		} else {
694 			/* We need to allocate a new block */
695 			ext2_fsblk_t goal = ext2_group_first_block_no(sb,
696 						EXT2_I(inode)->i_block_group);
697 			ext2_fsblk_t block = ext2_new_block(inode, goal, &error);
698 			if (error)
699 				goto cleanup;
700 			ea_idebug(inode, "creating block %lu", block);
701 
702 			new_bh = sb_getblk(sb, block);
703 			if (unlikely(!new_bh)) {
704 				ext2_free_blocks(inode, block, 1);
705 				mark_inode_dirty(inode);
706 				error = -ENOMEM;
707 				goto cleanup;
708 			}
709 			lock_buffer(new_bh);
710 			memcpy(new_bh->b_data, header, new_bh->b_size);
711 			set_buffer_uptodate(new_bh);
712 			unlock_buffer(new_bh);
713 			ext2_xattr_cache_insert(ea_block_cache, new_bh);
714 
715 			ext2_xattr_update_super_block(sb);
716 		}
717 		mark_buffer_dirty(new_bh);
718 		if (IS_SYNC(inode)) {
719 			sync_dirty_buffer(new_bh);
720 			error = -EIO;
721 			if (buffer_req(new_bh) && !buffer_uptodate(new_bh))
722 				goto cleanup;
723 		}
724 	}
725 
726 	/* Update the inode. */
727 	EXT2_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0;
728 	inode->i_ctime = current_time(inode);
729 	if (IS_SYNC(inode)) {
730 		error = sync_inode_metadata(inode, 1);
731 		/* In case sync failed due to ENOSPC the inode was actually
732 		 * written (only some dirty data were not) so we just proceed
733 		 * as if nothing happened and cleanup the unused block */
734 		if (error && error != -ENOSPC) {
735 			if (new_bh && new_bh != old_bh) {
736 				dquot_free_block_nodirty(inode, 1);
737 				mark_inode_dirty(inode);
738 			}
739 			goto cleanup;
740 		}
741 	} else
742 		mark_inode_dirty(inode);
743 
744 	error = 0;
745 	if (old_bh && old_bh != new_bh) {
746 		/*
747 		 * If there was an old block and we are no longer using it,
748 		 * release the old block.
749 		 */
750 		lock_buffer(old_bh);
751 		if (HDR(old_bh)->h_refcount == cpu_to_le32(1)) {
752 			__u32 hash = le32_to_cpu(HDR(old_bh)->h_hash);
753 
754 			/*
755 			 * This must happen under buffer lock for
756 			 * ext2_xattr_set2() to reliably detect freed block
757 			 */
758 			mb_cache_entry_delete(ea_block_cache, hash,
759 					      old_bh->b_blocknr);
760 			/* Free the old block. */
761 			ea_bdebug(old_bh, "freeing");
762 			ext2_free_blocks(inode, old_bh->b_blocknr, 1);
763 			mark_inode_dirty(inode);
764 			/* We let our caller release old_bh, so we
765 			 * need to duplicate the buffer before. */
766 			get_bh(old_bh);
767 			bforget(old_bh);
768 		} else {
769 			/* Decrement the refcount only. */
770 			le32_add_cpu(&HDR(old_bh)->h_refcount, -1);
771 			dquot_free_block_nodirty(inode, 1);
772 			mark_inode_dirty(inode);
773 			mark_buffer_dirty(old_bh);
774 			ea_bdebug(old_bh, "refcount now=%d",
775 				le32_to_cpu(HDR(old_bh)->h_refcount));
776 		}
777 		unlock_buffer(old_bh);
778 	}
779 
780 cleanup:
781 	brelse(new_bh);
782 
783 	return error;
784 }
785 
786 /*
787  * ext2_xattr_delete_inode()
788  *
789  * Free extended attribute resources associated with this inode. This
790  * is called immediately before an inode is freed.
791  */
792 void
ext2_xattr_delete_inode(struct inode * inode)793 ext2_xattr_delete_inode(struct inode *inode)
794 {
795 	struct buffer_head *bh = NULL;
796 	struct ext2_sb_info *sbi = EXT2_SB(inode->i_sb);
797 
798 	/*
799 	 * We are the only ones holding inode reference. The xattr_sem should
800 	 * better be unlocked! We could as well just not acquire xattr_sem at
801 	 * all but this makes the code more futureproof. OTOH we need trylock
802 	 * here to avoid false-positive warning from lockdep about reclaim
803 	 * circular dependency.
804 	 */
805 	if (WARN_ON_ONCE(!down_write_trylock(&EXT2_I(inode)->xattr_sem)))
806 		return;
807 	if (!EXT2_I(inode)->i_file_acl)
808 		goto cleanup;
809 
810 	if (!ext2_data_block_valid(sbi, EXT2_I(inode)->i_file_acl, 1)) {
811 		ext2_error(inode->i_sb, "ext2_xattr_delete_inode",
812 			"inode %ld: xattr block %d is out of data blocks range",
813 			inode->i_ino, EXT2_I(inode)->i_file_acl);
814 		goto cleanup;
815 	}
816 
817 	bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl);
818 	if (!bh) {
819 		ext2_error(inode->i_sb, "ext2_xattr_delete_inode",
820 			"inode %ld: block %d read error", inode->i_ino,
821 			EXT2_I(inode)->i_file_acl);
822 		goto cleanup;
823 	}
824 	ea_bdebug(bh, "b_count=%d", atomic_read(&(bh->b_count)));
825 	if (!ext2_xattr_header_valid(HDR(bh))) {
826 		ext2_error(inode->i_sb, "ext2_xattr_delete_inode",
827 			"inode %ld: bad block %d", inode->i_ino,
828 			EXT2_I(inode)->i_file_acl);
829 		goto cleanup;
830 	}
831 	lock_buffer(bh);
832 	if (HDR(bh)->h_refcount == cpu_to_le32(1)) {
833 		__u32 hash = le32_to_cpu(HDR(bh)->h_hash);
834 
835 		/*
836 		 * This must happen under buffer lock for ext2_xattr_set2() to
837 		 * reliably detect freed block
838 		 */
839 		mb_cache_entry_delete(EA_BLOCK_CACHE(inode), hash,
840 				      bh->b_blocknr);
841 		ext2_free_blocks(inode, EXT2_I(inode)->i_file_acl, 1);
842 		get_bh(bh);
843 		bforget(bh);
844 		unlock_buffer(bh);
845 	} else {
846 		le32_add_cpu(&HDR(bh)->h_refcount, -1);
847 		ea_bdebug(bh, "refcount now=%d",
848 			le32_to_cpu(HDR(bh)->h_refcount));
849 		unlock_buffer(bh);
850 		mark_buffer_dirty(bh);
851 		if (IS_SYNC(inode))
852 			sync_dirty_buffer(bh);
853 		dquot_free_block_nodirty(inode, 1);
854 	}
855 	EXT2_I(inode)->i_file_acl = 0;
856 
857 cleanup:
858 	brelse(bh);
859 	up_write(&EXT2_I(inode)->xattr_sem);
860 }
861 
862 /*
863  * ext2_xattr_cache_insert()
864  *
865  * Create a new entry in the extended attribute cache, and insert
866  * it unless such an entry is already in the cache.
867  *
868  * Returns 0, or a negative error number on failure.
869  */
870 static int
ext2_xattr_cache_insert(struct mb_cache * cache,struct buffer_head * bh)871 ext2_xattr_cache_insert(struct mb_cache *cache, struct buffer_head *bh)
872 {
873 	__u32 hash = le32_to_cpu(HDR(bh)->h_hash);
874 	int error;
875 
876 	error = mb_cache_entry_create(cache, GFP_NOFS, hash, bh->b_blocknr,
877 				      true);
878 	if (error) {
879 		if (error == -EBUSY) {
880 			ea_bdebug(bh, "already in cache");
881 			error = 0;
882 		}
883 	} else
884 		ea_bdebug(bh, "inserting [%x]", (int)hash);
885 	return error;
886 }
887 
888 /*
889  * ext2_xattr_cmp()
890  *
891  * Compare two extended attribute blocks for equality.
892  *
893  * Returns 0 if the blocks are equal, 1 if they differ, and
894  * a negative error number on errors.
895  */
896 static int
ext2_xattr_cmp(struct ext2_xattr_header * header1,struct ext2_xattr_header * header2)897 ext2_xattr_cmp(struct ext2_xattr_header *header1,
898 	       struct ext2_xattr_header *header2)
899 {
900 	struct ext2_xattr_entry *entry1, *entry2;
901 
902 	entry1 = ENTRY(header1+1);
903 	entry2 = ENTRY(header2+1);
904 	while (!IS_LAST_ENTRY(entry1)) {
905 		if (IS_LAST_ENTRY(entry2))
906 			return 1;
907 		if (entry1->e_hash != entry2->e_hash ||
908 		    entry1->e_name_index != entry2->e_name_index ||
909 		    entry1->e_name_len != entry2->e_name_len ||
910 		    entry1->e_value_size != entry2->e_value_size ||
911 		    memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len))
912 			return 1;
913 		if (entry1->e_value_block != 0 || entry2->e_value_block != 0)
914 			return -EIO;
915 		if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs),
916 			   (char *)header2 + le16_to_cpu(entry2->e_value_offs),
917 			   le32_to_cpu(entry1->e_value_size)))
918 			return 1;
919 
920 		entry1 = EXT2_XATTR_NEXT(entry1);
921 		entry2 = EXT2_XATTR_NEXT(entry2);
922 	}
923 	if (!IS_LAST_ENTRY(entry2))
924 		return 1;
925 	return 0;
926 }
927 
928 /*
929  * ext2_xattr_cache_find()
930  *
931  * Find an identical extended attribute block.
932  *
933  * Returns a locked buffer head to the block found, or NULL if such
934  * a block was not found or an error occurred.
935  */
936 static struct buffer_head *
ext2_xattr_cache_find(struct inode * inode,struct ext2_xattr_header * header)937 ext2_xattr_cache_find(struct inode *inode, struct ext2_xattr_header *header)
938 {
939 	__u32 hash = le32_to_cpu(header->h_hash);
940 	struct mb_cache_entry *ce;
941 	struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
942 
943 	if (!header->h_hash)
944 		return NULL;  /* never share */
945 	ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
946 again:
947 	ce = mb_cache_entry_find_first(ea_block_cache, hash);
948 	while (ce) {
949 		struct buffer_head *bh;
950 
951 		bh = sb_bread(inode->i_sb, ce->e_value);
952 		if (!bh) {
953 			ext2_error(inode->i_sb, "ext2_xattr_cache_find",
954 				"inode %ld: block %ld read error",
955 				inode->i_ino, (unsigned long) ce->e_value);
956 		} else {
957 			lock_buffer(bh);
958 			/*
959 			 * We have to be careful about races with freeing or
960 			 * rehashing of xattr block. Once we hold buffer lock
961 			 * xattr block's state is stable so we can check
962 			 * whether the block got freed / rehashed or not.
963 			 * Since we unhash mbcache entry under buffer lock when
964 			 * freeing / rehashing xattr block, checking whether
965 			 * entry is still hashed is reliable.
966 			 */
967 			if (hlist_bl_unhashed(&ce->e_hash_list)) {
968 				mb_cache_entry_put(ea_block_cache, ce);
969 				unlock_buffer(bh);
970 				brelse(bh);
971 				goto again;
972 			} else if (le32_to_cpu(HDR(bh)->h_refcount) >
973 				   EXT2_XATTR_REFCOUNT_MAX) {
974 				ea_idebug(inode, "block %ld refcount %d>%d",
975 					  (unsigned long) ce->e_value,
976 					  le32_to_cpu(HDR(bh)->h_refcount),
977 					  EXT2_XATTR_REFCOUNT_MAX);
978 			} else if (!ext2_xattr_cmp(header, HDR(bh))) {
979 				ea_bdebug(bh, "b_count=%d",
980 					  atomic_read(&(bh->b_count)));
981 				mb_cache_entry_touch(ea_block_cache, ce);
982 				mb_cache_entry_put(ea_block_cache, ce);
983 				return bh;
984 			}
985 			unlock_buffer(bh);
986 			brelse(bh);
987 		}
988 		ce = mb_cache_entry_find_next(ea_block_cache, ce);
989 	}
990 	return NULL;
991 }
992 
993 #define NAME_HASH_SHIFT 5
994 #define VALUE_HASH_SHIFT 16
995 
996 /*
997  * ext2_xattr_hash_entry()
998  *
999  * Compute the hash of an extended attribute.
1000  */
ext2_xattr_hash_entry(struct ext2_xattr_header * header,struct ext2_xattr_entry * entry)1001 static inline void ext2_xattr_hash_entry(struct ext2_xattr_header *header,
1002 					 struct ext2_xattr_entry *entry)
1003 {
1004 	__u32 hash = 0;
1005 	char *name = entry->e_name;
1006 	int n;
1007 
1008 	for (n=0; n < entry->e_name_len; n++) {
1009 		hash = (hash << NAME_HASH_SHIFT) ^
1010 		       (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
1011 		       *name++;
1012 	}
1013 
1014 	if (entry->e_value_block == 0 && entry->e_value_size != 0) {
1015 		__le32 *value = (__le32 *)((char *)header +
1016 			le16_to_cpu(entry->e_value_offs));
1017 		for (n = (le32_to_cpu(entry->e_value_size) +
1018 		     EXT2_XATTR_ROUND) >> EXT2_XATTR_PAD_BITS; n; n--) {
1019 			hash = (hash << VALUE_HASH_SHIFT) ^
1020 			       (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^
1021 			       le32_to_cpu(*value++);
1022 		}
1023 	}
1024 	entry->e_hash = cpu_to_le32(hash);
1025 }
1026 
1027 #undef NAME_HASH_SHIFT
1028 #undef VALUE_HASH_SHIFT
1029 
1030 #define BLOCK_HASH_SHIFT 16
1031 
1032 /*
1033  * ext2_xattr_rehash()
1034  *
1035  * Re-compute the extended attribute hash value after an entry has changed.
1036  */
ext2_xattr_rehash(struct ext2_xattr_header * header,struct ext2_xattr_entry * entry)1037 static void ext2_xattr_rehash(struct ext2_xattr_header *header,
1038 			      struct ext2_xattr_entry *entry)
1039 {
1040 	struct ext2_xattr_entry *here;
1041 	__u32 hash = 0;
1042 
1043 	ext2_xattr_hash_entry(header, entry);
1044 	here = ENTRY(header+1);
1045 	while (!IS_LAST_ENTRY(here)) {
1046 		if (!here->e_hash) {
1047 			/* Block is not shared if an entry's hash value == 0 */
1048 			hash = 0;
1049 			break;
1050 		}
1051 		hash = (hash << BLOCK_HASH_SHIFT) ^
1052 		       (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^
1053 		       le32_to_cpu(here->e_hash);
1054 		here = EXT2_XATTR_NEXT(here);
1055 	}
1056 	header->h_hash = cpu_to_le32(hash);
1057 }
1058 
1059 #undef BLOCK_HASH_SHIFT
1060 
1061 #define HASH_BUCKET_BITS 10
1062 
ext2_xattr_create_cache(void)1063 struct mb_cache *ext2_xattr_create_cache(void)
1064 {
1065 	return mb_cache_create(HASH_BUCKET_BITS);
1066 }
1067 
ext2_xattr_destroy_cache(struct mb_cache * cache)1068 void ext2_xattr_destroy_cache(struct mb_cache *cache)
1069 {
1070 	if (cache)
1071 		mb_cache_destroy(cache);
1072 }
1073