• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2012 Xyratex Technology Limited
3  *
4  * Author: Andrew Perepechko <Andrew_Perepechko@xyratex.com>
5  *
6  */
7 
8 #define DEBUG_SUBSYSTEM S_LLITE
9 
10 #include <linux/fs.h>
11 #include <linux/sched.h>
12 #include <linux/mm.h>
13 #include "../include/obd_support.h"
14 #include "../include/lustre_lite.h"
15 #include "../include/lustre_dlm.h"
16 #include "../include/lustre_ver.h"
17 #include "llite_internal.h"
18 
19 /* If we ever have hundreds of extended attributes, we might want to consider
20  * using a hash or a tree structure instead of list for faster lookups.
21  */
22 struct ll_xattr_entry {
23 	struct list_head	xe_list;    /* protected with
24 					     * lli_xattrs_list_rwsem */
25 	char			*xe_name;   /* xattr name, \0-terminated */
26 	char			*xe_value;  /* xattr value */
27 	unsigned		xe_namelen; /* strlen(xe_name) + 1 */
28 	unsigned		xe_vallen;  /* xattr value length */
29 };
30 
31 static struct kmem_cache *xattr_kmem;
32 static struct lu_kmem_descr xattr_caches[] = {
33 	{
34 		.ckd_cache = &xattr_kmem,
35 		.ckd_name  = "xattr_kmem",
36 		.ckd_size  = sizeof(struct ll_xattr_entry)
37 	},
38 	{
39 		.ckd_cache = NULL
40 	}
41 };
42 
ll_xattr_init(void)43 int ll_xattr_init(void)
44 {
45 	return lu_kmem_init(xattr_caches);
46 }
47 
ll_xattr_fini(void)48 void ll_xattr_fini(void)
49 {
50 	lu_kmem_fini(xattr_caches);
51 }
52 
53 /**
54  * Initializes xattr cache for an inode.
55  *
56  * This initializes the xattr list and marks cache presence.
57  */
ll_xattr_cache_init(struct ll_inode_info * lli)58 static void ll_xattr_cache_init(struct ll_inode_info *lli)
59 {
60 
61 	LASSERT(lli != NULL);
62 
63 	INIT_LIST_HEAD(&lli->lli_xattrs);
64 	lli->lli_flags |= LLIF_XATTR_CACHE;
65 }
66 
67 /**
68  *  This looks for a specific extended attribute.
69  *
70  *  Find in @cache and return @xattr_name attribute in @xattr,
71  *  for the NULL @xattr_name return the first cached @xattr.
72  *
73  *  \retval 0        success
74  *  \retval -ENODATA if not found
75  */
ll_xattr_cache_find(struct list_head * cache,const char * xattr_name,struct ll_xattr_entry ** xattr)76 static int ll_xattr_cache_find(struct list_head *cache,
77 			       const char *xattr_name,
78 			       struct ll_xattr_entry **xattr)
79 {
80 	struct ll_xattr_entry *entry;
81 
82 	list_for_each_entry(entry, cache, xe_list) {
83 		/* xattr_name == NULL means look for any entry */
84 		if (xattr_name == NULL ||
85 		    strcmp(xattr_name, entry->xe_name) == 0) {
86 			*xattr = entry;
87 			CDEBUG(D_CACHE, "find: [%s]=%.*s\n",
88 			       entry->xe_name, entry->xe_vallen,
89 			       entry->xe_value);
90 			return 0;
91 		}
92 	}
93 
94 	return -ENODATA;
95 }
96 
97 /**
98  * This adds an xattr.
99  *
100  * Add @xattr_name attr with @xattr_val value and @xattr_val_len length,
101  *
102  * \retval 0       success
103  * \retval -ENOMEM if no memory could be allocated for the cached attr
104  * \retval -EPROTO if duplicate xattr is being added
105  */
ll_xattr_cache_add(struct list_head * cache,const char * xattr_name,const char * xattr_val,unsigned xattr_val_len)106 static int ll_xattr_cache_add(struct list_head *cache,
107 			      const char *xattr_name,
108 			      const char *xattr_val,
109 			      unsigned xattr_val_len)
110 {
111 	struct ll_xattr_entry *xattr;
112 
113 	if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) {
114 		CDEBUG(D_CACHE, "duplicate xattr: [%s]\n", xattr_name);
115 		return -EPROTO;
116 	}
117 
118 	xattr = kmem_cache_alloc(xattr_kmem, GFP_NOFS | __GFP_ZERO);
119 	if (xattr == NULL) {
120 		CDEBUG(D_CACHE, "failed to allocate xattr\n");
121 		return -ENOMEM;
122 	}
123 
124 	xattr->xe_name = kstrdup(xattr_name, GFP_NOFS);
125 	if (!xattr->xe_name) {
126 		CDEBUG(D_CACHE, "failed to alloc xattr name %u\n",
127 		       xattr->xe_namelen);
128 		goto err_name;
129 	}
130 	xattr->xe_value = kmemdup(xattr_val, xattr_val_len, GFP_NOFS);
131 	if (!xattr->xe_value)
132 		goto err_value;
133 
134 	xattr->xe_vallen = xattr_val_len;
135 	list_add(&xattr->xe_list, cache);
136 
137 	CDEBUG(D_CACHE, "set: [%s]=%.*s\n", xattr_name,
138 		xattr_val_len, xattr_val);
139 
140 	return 0;
141 err_value:
142 	kfree(xattr->xe_name);
143 err_name:
144 	kmem_cache_free(xattr_kmem, xattr);
145 
146 	return -ENOMEM;
147 }
148 
149 /**
150  * This removes an extended attribute from cache.
151  *
152  * Remove @xattr_name attribute from @cache.
153  *
154  * \retval 0        success
155  * \retval -ENODATA if @xattr_name is not cached
156  */
ll_xattr_cache_del(struct list_head * cache,const char * xattr_name)157 static int ll_xattr_cache_del(struct list_head *cache,
158 			      const char *xattr_name)
159 {
160 	struct ll_xattr_entry *xattr;
161 
162 	CDEBUG(D_CACHE, "del xattr: %s\n", xattr_name);
163 
164 	if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) {
165 		list_del(&xattr->xe_list);
166 		kfree(xattr->xe_name);
167 		kfree(xattr->xe_value);
168 		kmem_cache_free(xattr_kmem, xattr);
169 
170 		return 0;
171 	}
172 
173 	return -ENODATA;
174 }
175 
176 /**
177  * This iterates cached extended attributes.
178  *
179  * Walk over cached attributes in @cache and
180  * fill in @xld_buffer or only calculate buffer
181  * size if @xld_buffer is NULL.
182  *
183  * \retval >= 0     buffer list size
184  * \retval -ENODATA if the list cannot fit @xld_size buffer
185  */
ll_xattr_cache_list(struct list_head * cache,char * xld_buffer,int xld_size)186 static int ll_xattr_cache_list(struct list_head *cache,
187 			       char *xld_buffer,
188 			       int xld_size)
189 {
190 	struct ll_xattr_entry *xattr, *tmp;
191 	int xld_tail = 0;
192 
193 	list_for_each_entry_safe(xattr, tmp, cache, xe_list) {
194 		CDEBUG(D_CACHE, "list: buffer=%p[%d] name=%s\n",
195 			xld_buffer, xld_tail, xattr->xe_name);
196 
197 		if (xld_buffer) {
198 			xld_size -= xattr->xe_namelen;
199 			if (xld_size < 0)
200 				break;
201 			memcpy(&xld_buffer[xld_tail],
202 			       xattr->xe_name, xattr->xe_namelen);
203 		}
204 		xld_tail += xattr->xe_namelen;
205 	}
206 
207 	if (xld_size < 0)
208 		return -ERANGE;
209 
210 	return xld_tail;
211 }
212 
213 /**
214  * Check if the xattr cache is initialized (filled).
215  *
216  * \retval 0 @cache is not initialized
217  * \retval 1 @cache is initialized
218  */
ll_xattr_cache_valid(struct ll_inode_info * lli)219 static int ll_xattr_cache_valid(struct ll_inode_info *lli)
220 {
221 	return !!(lli->lli_flags & LLIF_XATTR_CACHE);
222 }
223 
224 /**
225  * This finalizes the xattr cache.
226  *
227  * Free all xattr memory. @lli is the inode info pointer.
228  *
229  * \retval 0 no error occurred
230  */
ll_xattr_cache_destroy_locked(struct ll_inode_info * lli)231 static int ll_xattr_cache_destroy_locked(struct ll_inode_info *lli)
232 {
233 
234 	if (!ll_xattr_cache_valid(lli))
235 		return 0;
236 
237 	while (ll_xattr_cache_del(&lli->lli_xattrs, NULL) == 0)
238 		; /* empty loop */
239 	lli->lli_flags &= ~LLIF_XATTR_CACHE;
240 
241 	return 0;
242 }
243 
ll_xattr_cache_destroy(struct inode * inode)244 int ll_xattr_cache_destroy(struct inode *inode)
245 {
246 	struct ll_inode_info *lli = ll_i2info(inode);
247 	int rc;
248 
249 	down_write(&lli->lli_xattrs_list_rwsem);
250 	rc = ll_xattr_cache_destroy_locked(lli);
251 	up_write(&lli->lli_xattrs_list_rwsem);
252 
253 	return rc;
254 }
255 
256 /**
257  * Match or enqueue a PR lock.
258  *
259  * Find or request an LDLM lock with xattr data.
260  * Since LDLM does not provide API for atomic match_or_enqueue,
261  * the function handles it with a separate enq lock.
262  * If successful, the function exits with the list lock held.
263  *
264  * \retval 0       no error occurred
265  * \retval -ENOMEM not enough memory
266  */
ll_xattr_find_get_lock(struct inode * inode,struct lookup_intent * oit,struct ptlrpc_request ** req)267 static int ll_xattr_find_get_lock(struct inode *inode,
268 				  struct lookup_intent *oit,
269 				  struct ptlrpc_request **req)
270 {
271 	ldlm_mode_t mode;
272 	struct lustre_handle lockh = { 0 };
273 	struct md_op_data *op_data;
274 	struct ll_inode_info *lli = ll_i2info(inode);
275 	struct ldlm_enqueue_info einfo = { .ei_type = LDLM_IBITS,
276 					   .ei_mode = it_to_lock_mode(oit),
277 					   .ei_cb_bl = ll_md_blocking_ast,
278 					   .ei_cb_cp = ldlm_completion_ast };
279 	struct ll_sb_info *sbi = ll_i2sbi(inode);
280 	struct obd_export *exp = sbi->ll_md_exp;
281 	int rc;
282 
283 	mutex_lock(&lli->lli_xattrs_enq_lock);
284 	/* inode may have been shrunk and recreated, so data is gone, match lock
285 	 * only when data exists. */
286 	if (ll_xattr_cache_valid(lli)) {
287 		/* Try matching first. */
288 		mode = ll_take_md_lock(inode, MDS_INODELOCK_XATTR, &lockh, 0,
289 				       LCK_PR);
290 		if (mode != 0) {
291 			/* fake oit in mdc_revalidate_lock() manner */
292 			oit->d.lustre.it_lock_handle = lockh.cookie;
293 			oit->d.lustre.it_lock_mode = mode;
294 			goto out;
295 		}
296 	}
297 
298 	/* Enqueue if the lock isn't cached locally. */
299 	op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
300 				     LUSTRE_OPC_ANY, NULL);
301 	if (IS_ERR(op_data)) {
302 		mutex_unlock(&lli->lli_xattrs_enq_lock);
303 		return PTR_ERR(op_data);
304 	}
305 
306 	op_data->op_valid = OBD_MD_FLXATTR | OBD_MD_FLXATTRLS;
307 
308 	rc = md_enqueue(exp, &einfo, oit, op_data, &lockh, NULL, 0, NULL, 0);
309 	ll_finish_md_op_data(op_data);
310 
311 	if (rc < 0) {
312 		CDEBUG(D_CACHE,
313 		       "md_intent_lock failed with %d for fid "DFID"\n",
314 		       rc, PFID(ll_inode2fid(inode)));
315 		mutex_unlock(&lli->lli_xattrs_enq_lock);
316 		return rc;
317 	}
318 
319 	*req = (struct ptlrpc_request *)oit->d.lustre.it_data;
320 out:
321 	down_write(&lli->lli_xattrs_list_rwsem);
322 	mutex_unlock(&lli->lli_xattrs_enq_lock);
323 
324 	return 0;
325 }
326 
327 /**
328  * Refill the xattr cache.
329  *
330  * Fetch and cache the whole of xattrs for @inode, acquiring
331  * a read or a write xattr lock depending on operation in @oit.
332  * Intent is dropped on exit unless the operation is setxattr.
333  *
334  * \retval 0       no error occurred
335  * \retval -EPROTO network protocol error
336  * \retval -ENOMEM not enough memory for the cache
337  */
ll_xattr_cache_refill(struct inode * inode,struct lookup_intent * oit)338 static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit)
339 {
340 	struct ll_sb_info *sbi = ll_i2sbi(inode);
341 	struct ptlrpc_request *req = NULL;
342 	const char *xdata, *xval, *xtail, *xvtail;
343 	struct ll_inode_info *lli = ll_i2info(inode);
344 	struct mdt_body *body;
345 	__u32 *xsizes;
346 	int rc, i;
347 
348 	rc = ll_xattr_find_get_lock(inode, oit, &req);
349 	if (rc)
350 		goto out_no_unlock;
351 
352 	/* Do we have the data at this point? */
353 	if (ll_xattr_cache_valid(lli)) {
354 		ll_stats_ops_tally(sbi, LPROC_LL_GETXATTR_HITS, 1);
355 		rc = 0;
356 		goto out_maybe_drop;
357 	}
358 
359 	/* Matched but no cache? Cancelled on error by a parallel refill. */
360 	if (unlikely(req == NULL)) {
361 		CDEBUG(D_CACHE, "cancelled by a parallel getxattr\n");
362 		rc = -EIO;
363 		goto out_maybe_drop;
364 	}
365 
366 	if (oit->d.lustre.it_status < 0) {
367 		CDEBUG(D_CACHE, "getxattr intent returned %d for fid "DFID"\n",
368 		       oit->d.lustre.it_status, PFID(ll_inode2fid(inode)));
369 		rc = oit->d.lustre.it_status;
370 		/* xattr data is so large that we don't want to cache it */
371 		if (rc == -ERANGE)
372 			rc = -EAGAIN;
373 		goto out_destroy;
374 	}
375 
376 	body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
377 	if (body == NULL) {
378 		CERROR("no MDT BODY in the refill xattr reply\n");
379 		rc = -EPROTO;
380 		goto out_destroy;
381 	}
382 	/* do not need swab xattr data */
383 	xdata = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA,
384 						body->eadatasize);
385 	xval = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS,
386 						body->aclsize);
387 	xsizes = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS_LENS,
388 					      body->max_mdsize * sizeof(__u32));
389 	if (xdata == NULL || xval == NULL || xsizes == NULL) {
390 		CERROR("wrong setxattr reply\n");
391 		rc = -EPROTO;
392 		goto out_destroy;
393 	}
394 
395 	xtail = xdata + body->eadatasize;
396 	xvtail = xval + body->aclsize;
397 
398 	CDEBUG(D_CACHE, "caching: xdata=%p xtail=%p\n", xdata, xtail);
399 
400 	ll_xattr_cache_init(lli);
401 
402 	for (i = 0; i < body->max_mdsize; i++) {
403 		CDEBUG(D_CACHE, "caching [%s]=%.*s\n", xdata, *xsizes, xval);
404 		/* Perform consistency checks: attr names and vals in pill */
405 		if (memchr(xdata, 0, xtail - xdata) == NULL) {
406 			CERROR("xattr protocol violation (names are broken)\n");
407 			rc = -EPROTO;
408 		} else if (xval + *xsizes > xvtail) {
409 			CERROR("xattr protocol violation (vals are broken)\n");
410 			rc = -EPROTO;
411 		} else if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_XATTR_ENOMEM)) {
412 			rc = -ENOMEM;
413 		} else if (!strcmp(xdata, XATTR_NAME_ACL_ACCESS)) {
414 			/* Filter out ACL ACCESS since it's cached separately */
415 			CDEBUG(D_CACHE, "not caching %s\n",
416 			       XATTR_NAME_ACL_ACCESS);
417 			rc = 0;
418 		} else {
419 			rc = ll_xattr_cache_add(&lli->lli_xattrs, xdata, xval,
420 						*xsizes);
421 		}
422 		if (rc < 0) {
423 			ll_xattr_cache_destroy_locked(lli);
424 			goto out_destroy;
425 		}
426 		xdata += strlen(xdata) + 1;
427 		xval  += *xsizes;
428 		xsizes++;
429 	}
430 
431 	if (xdata != xtail || xval != xvtail)
432 		CERROR("a hole in xattr data\n");
433 
434 	ll_set_lock_data(sbi->ll_md_exp, inode, oit, NULL);
435 
436 	goto out_maybe_drop;
437 out_maybe_drop:
438 
439 		ll_intent_drop_lock(oit);
440 
441 	if (rc != 0)
442 		up_write(&lli->lli_xattrs_list_rwsem);
443 out_no_unlock:
444 	ptlrpc_req_finished(req);
445 
446 	return rc;
447 
448 out_destroy:
449 	up_write(&lli->lli_xattrs_list_rwsem);
450 
451 	ldlm_lock_decref_and_cancel((struct lustre_handle *)
452 					&oit->d.lustre.it_lock_handle,
453 					oit->d.lustre.it_lock_mode);
454 
455 	goto out_no_unlock;
456 }
457 
458 /**
459  * Get an xattr value or list xattrs using the write-through cache.
460  *
461  * Get the xattr value (@valid has OBD_MD_FLXATTR set) of @name or
462  * list xattr names (@valid has OBD_MD_FLXATTRLS set) for @inode.
463  * The resulting value/list is stored in @buffer if the former
464  * is not larger than @size.
465  *
466  * \retval 0        no error occurred
467  * \retval -EPROTO  network protocol error
468  * \retval -ENOMEM  not enough memory for the cache
469  * \retval -ERANGE  the buffer is not large enough
470  * \retval -ENODATA no such attr or the list is empty
471  */
ll_xattr_cache_get(struct inode * inode,const char * name,char * buffer,size_t size,__u64 valid)472 int ll_xattr_cache_get(struct inode *inode,
473 			const char *name,
474 			char *buffer,
475 			size_t size,
476 			__u64 valid)
477 {
478 	struct lookup_intent oit = { .it_op = IT_GETXATTR };
479 	struct ll_inode_info *lli = ll_i2info(inode);
480 	int rc = 0;
481 
482 	LASSERT(!!(valid & OBD_MD_FLXATTR) ^ !!(valid & OBD_MD_FLXATTRLS));
483 
484 	down_read(&lli->lli_xattrs_list_rwsem);
485 	if (!ll_xattr_cache_valid(lli)) {
486 		up_read(&lli->lli_xattrs_list_rwsem);
487 		rc = ll_xattr_cache_refill(inode, &oit);
488 		if (rc)
489 			return rc;
490 		downgrade_write(&lli->lli_xattrs_list_rwsem);
491 	} else {
492 		ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR_HITS, 1);
493 	}
494 
495 	if (valid & OBD_MD_FLXATTR) {
496 		struct ll_xattr_entry *xattr;
497 
498 		rc = ll_xattr_cache_find(&lli->lli_xattrs, name, &xattr);
499 		if (rc == 0) {
500 			rc = xattr->xe_vallen;
501 			/* zero size means we are only requested size in rc */
502 			if (size != 0) {
503 				if (size >= xattr->xe_vallen)
504 					memcpy(buffer, xattr->xe_value,
505 						xattr->xe_vallen);
506 				else
507 					rc = -ERANGE;
508 			}
509 		}
510 	} else if (valid & OBD_MD_FLXATTRLS) {
511 		rc = ll_xattr_cache_list(&lli->lli_xattrs,
512 					 size ? buffer : NULL, size);
513 	}
514 
515 	goto out;
516 out:
517 	up_read(&lli->lli_xattrs_list_rwsem);
518 
519 	return rc;
520 }
521