3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 only,
7 * as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License version 2 for more details (a copy is included
13 * in the LICENSE file that accompanied this code).
15 * You should have received a copy of the GNU General Public License
16 * version 2 along with this program; If not, see http://www.gnu.org/licenses
18 * Please visit http://www.xyratex.com/contact if you need additional
19 * information or have any questions.
25 * Copyright 2012 Xyratex Technology Limited
27 * Author: Andrew Perepechko <Andrew_Perepechko@xyratex.com>
31 #define DEBUG_SUBSYSTEM S_LLITE
34 #include <linux/sched.h>
36 #include <obd_support.h>
37 #include <lustre_lite.h>
38 #include <lustre_dlm.h>
39 #include <lustre_ver.h>
40 #include "llite_internal.h"
42 /* If we ever have hundreds of extended attributes, we might want to consider
43 * using a hash or a tree structure instead of list for faster lookups.
45 struct ll_xattr_entry {
46 struct list_head xe_list; /* protected with
47 * lli_xattrs_list_rwsem */
48 char *xe_name; /* xattr name, \0-terminated */
49 char *xe_value; /* xattr value */
50 unsigned xe_namelen; /* strlen(xe_name) + 1 */
51 unsigned xe_vallen; /* xattr value length */
54 static struct kmem_cache *xattr_kmem;
55 static struct lu_kmem_descr xattr_caches[] = {
57 .ckd_cache = &xattr_kmem,
58 .ckd_name = "xattr_kmem",
59 .ckd_size = sizeof(struct ll_xattr_entry)
66 int ll_xattr_init(void)
68 return lu_kmem_init(xattr_caches);
71 void ll_xattr_fini(void)
73 lu_kmem_fini(xattr_caches);
77 * Initializes xattr cache for an inode.
79 * This initializes the xattr list and marks cache presence.
81 static void ll_xattr_cache_init(struct ll_inode_info *lli)
87 INIT_LIST_HEAD(&lli->lli_xattrs);
88 lli->lli_flags |= LLIF_XATTR_CACHE;
92 * This looks for a specific extended attribute.
94 * Find in @cache and return @xattr_name attribute in @xattr,
95 * for the NULL @xattr_name return the first cached @xattr.
98 * \retval -ENODATA if not found
100 static int ll_xattr_cache_find(struct list_head *cache,
101 const char *xattr_name,
102 struct ll_xattr_entry **xattr)
104 struct ll_xattr_entry *entry;
108 list_for_each_entry(entry, cache, xe_list) {
109 /* xattr_name == NULL means look for any entry */
110 if (xattr_name == NULL ||
111 strcmp(xattr_name, entry->xe_name) == 0) {
113 CDEBUG(D_CACHE, "find: [%s]=%.*s\n",
114 entry->xe_name, entry->xe_vallen,
124 * This adds an xattr.
126 * Add @xattr_name attr with @xattr_val value and @xattr_val_len length,
129 * \retval -ENOMEM if no memory could be allocated for the cached attr
130 * \retval -EPROTO if duplicate xattr is being added
132 static int ll_xattr_cache_add(struct list_head *cache,
133 const char *xattr_name,
134 const char *xattr_val,
135 unsigned xattr_val_len)
137 struct ll_xattr_entry *xattr;
141 if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) {
142 CDEBUG(D_CACHE, "duplicate xattr: [%s]\n", xattr_name);
146 OBD_SLAB_ALLOC_PTR_GFP(xattr, xattr_kmem, GFP_NOFS);
148 CDEBUG(D_CACHE, "failed to allocate xattr\n");
152 xattr->xe_namelen = strlen(xattr_name) + 1;
154 OBD_ALLOC(xattr->xe_name, xattr->xe_namelen);
155 if (!xattr->xe_name) {
156 CDEBUG(D_CACHE, "failed to alloc xattr name %u\n",
160 OBD_ALLOC(xattr->xe_value, xattr_val_len);
161 if (!xattr->xe_value) {
162 CDEBUG(D_CACHE, "failed to alloc xattr value %d\n",
167 memcpy(xattr->xe_name, xattr_name, xattr->xe_namelen);
168 memcpy(xattr->xe_value, xattr_val, xattr_val_len);
169 xattr->xe_vallen = xattr_val_len;
170 list_add(&xattr->xe_list, cache);
172 CDEBUG(D_CACHE, "set: [%s]=%.*s\n", xattr_name,
173 xattr_val_len, xattr_val);
177 OBD_FREE(xattr->xe_name, xattr->xe_namelen);
179 OBD_SLAB_FREE_PTR(xattr, xattr_kmem);
185 * This removes an extended attribute from cache.
187 * Remove @xattr_name attribute from @cache.
190 * \retval -ENODATA if @xattr_name is not cached
192 static int ll_xattr_cache_del(struct list_head *cache,
193 const char *xattr_name)
195 struct ll_xattr_entry *xattr;
199 CDEBUG(D_CACHE, "del xattr: %s\n", xattr_name);
201 if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) {
202 list_del(&xattr->xe_list);
203 OBD_FREE(xattr->xe_name, xattr->xe_namelen);
204 OBD_FREE(xattr->xe_value, xattr->xe_vallen);
205 OBD_SLAB_FREE_PTR(xattr, xattr_kmem);
214 * This iterates cached extended attributes.
216 * Walk over cached attributes in @cache and
217 * fill in @xld_buffer or only calculate buffer
218 * size if @xld_buffer is NULL.
220 * \retval >= 0 buffer list size
221 * \retval -ENODATA if the list cannot fit @xld_size buffer
223 static int ll_xattr_cache_list(struct list_head *cache,
227 struct ll_xattr_entry *xattr, *tmp;
232 list_for_each_entry_safe(xattr, tmp, cache, xe_list) {
233 CDEBUG(D_CACHE, "list: buffer=%p[%d] name=%s\n",
234 xld_buffer, xld_tail, xattr->xe_name);
237 xld_size -= xattr->xe_namelen;
240 memcpy(&xld_buffer[xld_tail],
241 xattr->xe_name, xattr->xe_namelen);
243 xld_tail += xattr->xe_namelen;
253 * Check if the xattr cache is initialized (filled).
255 * \retval 0 @cache is not initialized
256 * \retval 1 @cache is initialized
258 static int ll_xattr_cache_valid(struct ll_inode_info *lli)
260 return !!(lli->lli_flags & LLIF_XATTR_CACHE);
264 * This finalizes the xattr cache.
266 * Free all xattr memory. @lli is the inode info pointer.
268 * \retval 0 no error occured
270 static int ll_xattr_cache_destroy_locked(struct ll_inode_info *lli)
274 if (!ll_xattr_cache_valid(lli))
277 while (ll_xattr_cache_del(&lli->lli_xattrs, NULL) == 0)
279 lli->lli_flags &= ~LLIF_XATTR_CACHE;
284 int ll_xattr_cache_destroy(struct inode *inode)
286 struct ll_inode_info *lli = ll_i2info(inode);
291 down_write(&lli->lli_xattrs_list_rwsem);
292 rc = ll_xattr_cache_destroy_locked(lli);
293 up_write(&lli->lli_xattrs_list_rwsem);
299 * Match or enqueue a PR lock.
301 * Find or request an LDLM lock with xattr data.
302 * Since LDLM does not provide API for atomic match_or_enqueue,
303 * the function handles it with a separate enq lock.
304 * If successful, the function exits with the list lock held.
306 * \retval 0 no error occured
307 * \retval -ENOMEM not enough memory
309 static int ll_xattr_find_get_lock(struct inode *inode,
310 struct lookup_intent *oit,
311 struct ptlrpc_request **req)
314 struct lustre_handle lockh = { 0 };
315 struct md_op_data *op_data;
316 struct ll_inode_info *lli = ll_i2info(inode);
317 struct ldlm_enqueue_info einfo = {
318 .ei_type = LDLM_IBITS,
319 .ei_mode = it_to_lock_mode(oit),
320 .ei_cb_bl = &ll_md_blocking_ast,
321 .ei_cb_cp = &ldlm_completion_ast,
323 struct ll_sb_info *sbi = ll_i2sbi(inode);
324 struct obd_export *exp = sbi->ll_md_exp;
329 mutex_lock(&lli->lli_xattrs_enq_lock);
330 /* inode may have been shrunk and recreated, so data is gone, match lock
331 * only when data exists. */
332 if (ll_xattr_cache_valid(lli)) {
333 /* Try matching first. */
334 mode = ll_take_md_lock(inode, MDS_INODELOCK_XATTR, &lockh, 0,
337 /* fake oit in mdc_revalidate_lock() manner */
338 oit->d.lustre.it_lock_handle = lockh.cookie;
339 oit->d.lustre.it_lock_mode = mode;
344 /* Enqueue if the lock isn't cached locally. */
345 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
346 LUSTRE_OPC_ANY, NULL);
347 if (IS_ERR(op_data)) {
348 mutex_unlock(&lli->lli_xattrs_enq_lock);
349 RETURN(PTR_ERR(op_data));
352 op_data->op_valid = OBD_MD_FLXATTR | OBD_MD_FLXATTRLS;
354 rc = md_enqueue(exp, &einfo, NULL, oit, op_data, &lockh, 0);
355 ll_finish_md_op_data(op_data);
358 CDEBUG(D_CACHE, "md_intent_lock failed with %d for fid "DFID"\n",
359 rc, PFID(ll_inode2fid(inode)));
360 mutex_unlock(&lli->lli_xattrs_enq_lock);
364 *req = (struct ptlrpc_request *)oit->d.lustre.it_data;
366 down_write(&lli->lli_xattrs_list_rwsem);
367 mutex_unlock(&lli->lli_xattrs_enq_lock);
373 * Refill the xattr cache.
375 * Fetch and cache the whole of xattrs for @inode, acquiring
376 * a read or a write xattr lock depending on operation in @oit.
377 * Intent is dropped on exit unless the operation is setxattr.
379 * \retval 0 no error occured
380 * \retval -EPROTO network protocol error
381 * \retval -ENOMEM not enough memory for the cache
383 static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit)
385 struct ll_sb_info *sbi = ll_i2sbi(inode);
386 struct ptlrpc_request *req = NULL;
387 const char *xdata, *xval, *xtail, *xvtail;
388 struct ll_inode_info *lli = ll_i2info(inode);
389 struct mdt_body *body;
395 rc = ll_xattr_find_get_lock(inode, oit, &req);
397 GOTO(out_no_unlock, rc);
399 /* Do we have the data at this point? */
400 if (ll_xattr_cache_valid(lli)) {
401 ll_stats_ops_tally(sbi, LPROC_LL_GETXATTR_HITS, 1);
402 GOTO(out_maybe_drop, rc = 0);
405 /* Matched but no cache? Cancelled on error by a parallel refill. */
406 if (unlikely(req == NULL)) {
407 CDEBUG(D_CACHE, "cancelled by a parallel getxattr\n");
408 GOTO(out_maybe_drop, rc = -EIO);
411 if (oit->d.lustre.it_status < 0) {
412 CDEBUG(D_CACHE, "getxattr intent returned %d for fid "DFID"\n",
413 oit->d.lustre.it_status, PFID(ll_inode2fid(inode)));
414 rc = oit->d.lustre.it_status;
415 /* xattr data is so large that we don't want to cache it */
418 GOTO(out_destroy, rc);
421 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
423 CERROR("no MDT BODY in the refill xattr reply\n");
424 GOTO(out_destroy, rc = -EPROTO);
426 /* do not need swab xattr data */
427 xdata = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA,
428 body->mbo_eadatasize);
429 xval = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS,
431 xsizes = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS_LENS,
432 body->mbo_max_mdsize *
434 if (xdata == NULL || xval == NULL || xsizes == NULL) {
435 CERROR("wrong setxattr reply\n");
436 GOTO(out_destroy, rc = -EPROTO);
439 xtail = xdata + body->mbo_eadatasize;
440 xvtail = xval + body->mbo_aclsize;
442 CDEBUG(D_CACHE, "caching: xdata=%p xtail=%p\n", xdata, xtail);
444 ll_xattr_cache_init(lli);
446 for (i = 0; i < body->mbo_max_mdsize; i++) {
447 CDEBUG(D_CACHE, "caching [%s]=%.*s\n", xdata, *xsizes, xval);
448 /* Perform consistency checks: attr names and vals in pill */
449 if (memchr(xdata, 0, xtail - xdata) == NULL) {
450 CERROR("xattr protocol violation (names are broken)\n");
452 } else if (xval + *xsizes > xvtail) {
453 CERROR("xattr protocol violation (vals are broken)\n");
455 } else if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_XATTR_ENOMEM)) {
457 } else if (!strcmp(xdata, XATTR_NAME_ACL_ACCESS)) {
458 /* Filter out ACL ACCESS since it's cached separately */
459 CDEBUG(D_CACHE, "not caching %s\n",
460 XATTR_NAME_ACL_ACCESS);
463 rc = ll_xattr_cache_add(&lli->lli_xattrs, xdata, xval,
467 ll_xattr_cache_destroy_locked(lli);
468 GOTO(out_destroy, rc);
470 xdata += strlen(xdata) + 1;
475 if (xdata != xtail || xval != xvtail)
476 CERROR("a hole in xattr data\n");
478 ll_set_lock_data(sbi->ll_md_exp, inode, oit, NULL);
480 GOTO(out_maybe_drop, rc);
483 ll_intent_drop_lock(oit);
486 up_write(&lli->lli_xattrs_list_rwsem);
488 ptlrpc_req_finished(req);
493 up_write(&lli->lli_xattrs_list_rwsem);
495 ldlm_lock_decref_and_cancel((struct lustre_handle *)
496 &oit->d.lustre.it_lock_handle,
497 oit->d.lustre.it_lock_mode);
503 * Get an xattr value or list xattrs using the write-through cache.
505 * Get the xattr value (@valid has OBD_MD_FLXATTR set) of @name or
506 * list xattr names (@valid has OBD_MD_FLXATTRLS set) for @inode.
507 * The resulting value/list is stored in @buffer if the former
508 * is not larger than @size.
510 * \retval 0 no error occured
511 * \retval -EPROTO network protocol error
512 * \retval -ENOMEM not enough memory for the cache
513 * \retval -ERANGE the buffer is not large enough
514 * \retval -ENODATA no such attr or the list is empty
516 int ll_xattr_cache_get(struct inode *inode,
522 struct lookup_intent oit = { .it_op = IT_GETXATTR };
523 struct ll_inode_info *lli = ll_i2info(inode);
528 LASSERT(!!(valid & OBD_MD_FLXATTR) ^ !!(valid & OBD_MD_FLXATTRLS));
530 down_read(&lli->lli_xattrs_list_rwsem);
531 if (!ll_xattr_cache_valid(lli)) {
532 up_read(&lli->lli_xattrs_list_rwsem);
533 rc = ll_xattr_cache_refill(inode, &oit);
536 downgrade_write(&lli->lli_xattrs_list_rwsem);
538 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR_HITS, 1);
541 if (valid & OBD_MD_FLXATTR) {
542 struct ll_xattr_entry *xattr;
544 rc = ll_xattr_cache_find(&lli->lli_xattrs, name, &xattr);
546 rc = xattr->xe_vallen;
547 /* zero size means we are only requested size in rc */
549 if (size >= xattr->xe_vallen)
550 memcpy(buffer, xattr->xe_value,
556 } else if (valid & OBD_MD_FLXATTRLS) {
557 rc = ll_xattr_cache_list(&lli->lli_xattrs,
558 size ? buffer : NULL, size);
563 up_read(&lli->lli_xattrs_list_rwsem);