3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 only,
7 * as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License version 2 for more details (a copy is included
13 * in the LICENSE file that accompanied this code).
15 * You should have received a copy of the GNU General Public License
16 * version 2 along with this program; If not, see http://www.gnu.org/licenses
18 * Please visit http://www.xyratex.com/contact if you need additional
19 * information or have any questions.
25 * Copyright 2012 Xyratex Technology Limited
27 * Copyright (c) 2013, 2017, Intel Corporation.
29 * Author: Andrew Perepechko <Andrew_Perepechko@xyratex.com>
33 #define DEBUG_SUBSYSTEM S_LLITE
36 #include <linux/sched.h>
38 #include <obd_support.h>
39 #include <lustre_dlm.h>
40 #include "llite_internal.h"
42 /* If we ever have hundreds of extended attributes, we might want to consider
43 * using a hash or a tree structure instead of list for faster lookups.
45 struct ll_xattr_entry {
46 struct list_head xe_list; /* protected with
47 * lli_xattrs_list_rwsem */
48 char *xe_name; /* xattr name, \0-terminated */
49 char *xe_value; /* xattr value */
50 unsigned xe_namelen; /* strlen(xe_name) + 1 */
51 unsigned xe_vallen; /* xattr value length */
54 static struct kmem_cache *xattr_kmem;
55 static struct lu_kmem_descr xattr_caches[] = {
57 .ckd_cache = &xattr_kmem,
58 .ckd_name = "xattr_kmem",
59 .ckd_size = sizeof(struct ll_xattr_entry)
66 int ll_xattr_init(void)
68 return lu_kmem_init(xattr_caches);
71 void ll_xattr_fini(void)
73 lu_kmem_fini(xattr_caches);
77 * Initializes xattr cache for an inode.
79 * This initializes the xattr list and marks cache presence.
81 static void ll_xattr_cache_init(struct ll_inode_info *lli)
87 INIT_LIST_HEAD(&lli->lli_xattrs);
88 set_bit(LLIF_XATTR_CACHE, &lli->lli_flags);
92 * This looks for a specific extended attribute.
94 * Find in @cache and return @xattr_name attribute in @xattr,
95 * for the NULL @xattr_name return the first cached @xattr.
98 * \retval -ENODATA if not found
100 static int ll_xattr_cache_find(struct list_head *cache,
101 const char *xattr_name,
102 struct ll_xattr_entry **xattr)
104 struct ll_xattr_entry *entry;
108 list_for_each_entry(entry, cache, xe_list) {
109 /* xattr_name == NULL means look for any entry */
110 if (xattr_name == NULL ||
111 strcmp(xattr_name, entry->xe_name) == 0) {
113 CDEBUG(D_CACHE, "find: [%s]=%.*s\n",
114 entry->xe_name, entry->xe_vallen,
124 * This adds an xattr.
126 * Add @xattr_name attr with @xattr_val value and @xattr_val_len length,
129 * \retval -ENOMEM if no memory could be allocated for the cached attr
130 * \retval -EPROTO if duplicate xattr is being added
132 static int ll_xattr_cache_add(struct list_head *cache,
133 const char *xattr_name,
134 const char *xattr_val,
135 unsigned xattr_val_len)
137 struct ll_xattr_entry *xattr;
141 if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) {
142 if (!strcmp(xattr_name, LL_XATTR_NAME_ENCRYPTION_CONTEXT))
143 /* it means enc ctx was already in cache,
144 * ignore error as it cannot be modified
148 CDEBUG(D_CACHE, "duplicate xattr: [%s]\n", xattr_name);
152 OBD_SLAB_ALLOC_PTR_GFP(xattr, xattr_kmem, GFP_NOFS);
154 CDEBUG(D_CACHE, "failed to allocate xattr\n");
158 xattr->xe_namelen = strlen(xattr_name) + 1;
160 OBD_ALLOC(xattr->xe_name, xattr->xe_namelen);
161 if (!xattr->xe_name) {
162 CDEBUG(D_CACHE, "failed to alloc xattr name %u\n",
166 OBD_ALLOC(xattr->xe_value, xattr_val_len);
167 if (!xattr->xe_value) {
168 CDEBUG(D_CACHE, "failed to alloc xattr value %d\n",
173 memcpy(xattr->xe_name, xattr_name, xattr->xe_namelen);
174 memcpy(xattr->xe_value, xattr_val, xattr_val_len);
175 xattr->xe_vallen = xattr_val_len;
176 list_add(&xattr->xe_list, cache);
178 CDEBUG(D_CACHE, "set: [%s]=%.*s\n", xattr_name,
179 xattr_val_len, xattr_val);
183 OBD_FREE(xattr->xe_name, xattr->xe_namelen);
185 OBD_SLAB_FREE_PTR(xattr, xattr_kmem);
191 * This removes an extended attribute from cache.
193 * Remove @xattr_name attribute from @cache.
196 * \retval -ENODATA if @xattr_name is not cached
198 static int ll_xattr_cache_del(struct list_head *cache,
199 const char *xattr_name)
201 struct ll_xattr_entry *xattr;
205 CDEBUG(D_CACHE, "del xattr: %s\n", xattr_name);
207 if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) {
208 list_del(&xattr->xe_list);
209 OBD_FREE(xattr->xe_name, xattr->xe_namelen);
210 OBD_FREE(xattr->xe_value, xattr->xe_vallen);
211 OBD_SLAB_FREE_PTR(xattr, xattr_kmem);
220 * This iterates cached extended attributes.
222 * Walk over cached attributes in @cache and
223 * fill in @xld_buffer or only calculate buffer
224 * size if @xld_buffer is NULL.
226 * \retval >= 0 buffer list size
227 * \retval -ENODATA if the list cannot fit @xld_size buffer
229 static int ll_xattr_cache_list(struct list_head *cache,
233 struct ll_xattr_entry *xattr, *tmp;
238 list_for_each_entry_safe(xattr, tmp, cache, xe_list) {
239 CDEBUG(D_CACHE, "list: buffer=%p[%d] name=%s\n",
240 xld_buffer, xld_tail, xattr->xe_name);
243 xld_size -= xattr->xe_namelen;
246 memcpy(&xld_buffer[xld_tail],
247 xattr->xe_name, xattr->xe_namelen);
249 xld_tail += xattr->xe_namelen;
259 * Check if the xattr cache is initialized.
261 * \retval 0 @cache is not initialized
262 * \retval 1 @cache is initialized
264 static int ll_xattr_cache_valid(struct ll_inode_info *lli)
266 return test_bit(LLIF_XATTR_CACHE, &lli->lli_flags);
270 * Check if the xattr cache is filled.
272 * \retval 0 @cache is not filled
273 * \retval 1 @cache is filled
275 static int ll_xattr_cache_filled(struct ll_inode_info *lli)
277 return test_bit(LLIF_XATTR_CACHE_FILLED, &lli->lli_flags);
281 * This finalizes the xattr cache.
283 * Free all xattr memory. @lli is the inode info pointer.
285 * \retval 0 no error occured
287 static int ll_xattr_cache_destroy_locked(struct ll_inode_info *lli)
291 if (!ll_xattr_cache_valid(lli))
294 while (ll_xattr_cache_del(&lli->lli_xattrs, NULL) == 0)
297 clear_bit(LLIF_XATTR_CACHE_FILLED, &lli->lli_flags);
298 clear_bit(LLIF_XATTR_CACHE, &lli->lli_flags);
303 int ll_xattr_cache_destroy(struct inode *inode)
305 struct ll_inode_info *lli = ll_i2info(inode);
310 down_write(&lli->lli_xattrs_list_rwsem);
311 rc = ll_xattr_cache_destroy_locked(lli);
312 up_write(&lli->lli_xattrs_list_rwsem);
318 * Match or enqueue a PR lock.
320 * Find or request an LDLM lock with xattr data.
321 * Since LDLM does not provide API for atomic match_or_enqueue,
322 * the function handles it with a separate enq lock.
323 * If successful, the function exits with a write lock held
324 * on lli_xattrs_list_rwsem.
326 * \retval 0 no error occured
327 * \retval -ENOMEM not enough memory
329 static int ll_xattr_find_get_lock(struct inode *inode,
330 struct lookup_intent *oit,
331 struct ptlrpc_request **req)
334 struct lustre_handle lockh = { 0 };
335 struct md_op_data *op_data;
336 struct ll_inode_info *lli = ll_i2info(inode);
337 struct ll_sb_info *sbi = ll_i2sbi(inode);
338 struct obd_export *exp = sbi->ll_md_exp;
343 mutex_lock(&lli->lli_xattrs_enq_lock);
344 /* inode may have been shrunk and recreated, so data is gone, match lock
345 * only when data exists. */
346 if (ll_xattr_cache_filled(lli)) {
347 /* Try matching first. */
348 mode = ll_take_md_lock(inode, MDS_INODELOCK_XATTR, &lockh, 0,
351 /* fake oit in mdc_revalidate_lock() manner */
352 oit->it_lock_handle = lockh.cookie;
353 oit->it_lock_mode = mode;
358 /* Enqueue if the lock isn't cached locally. */
359 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
360 LUSTRE_OPC_ANY, NULL);
361 if (IS_ERR(op_data)) {
362 mutex_unlock(&lli->lli_xattrs_enq_lock);
363 RETURN(PTR_ERR(op_data));
366 op_data->op_valid = OBD_MD_FLXATTR | OBD_MD_FLXATTRLS;
368 rc = md_intent_lock(exp, op_data, oit, req, &ll_md_blocking_ast, 0);
369 ll_finish_md_op_data(op_data);
370 *req = oit->it_request;
373 CDEBUG(D_CACHE, "md_intent_lock failed with %d for fid "DFID"\n",
374 rc, PFID(ll_inode2fid(inode)));
375 mutex_unlock(&lli->lli_xattrs_enq_lock);
380 down_write(&lli->lli_xattrs_list_rwsem);
381 mutex_unlock(&lli->lli_xattrs_enq_lock);
387 * Refill the xattr cache.
389 * Fetch and cache the whole of xattrs for @inode, thanks to the write lock
390 * on lli_xattrs_list_rwsem obtained from ll_xattr_find_get_lock().
391 * If successful, this write lock is kept.
393 * \retval 0 no error occured
394 * \retval -EPROTO network protocol error
395 * \retval -ENOMEM not enough memory for the cache
397 static int ll_xattr_cache_refill(struct inode *inode)
399 struct lookup_intent oit = { .it_op = IT_GETXATTR };
400 struct ll_sb_info *sbi = ll_i2sbi(inode);
401 struct ptlrpc_request *req = NULL;
402 const char *xdata, *xval, *xtail, *xvtail;
403 struct ll_inode_info *lli = ll_i2info(inode);
404 struct mdt_body *body;
410 rc = ll_xattr_find_get_lock(inode, &oit, &req);
414 /* Do we have the data at this point? */
415 if (ll_xattr_cache_filled(lli)) {
416 ll_stats_ops_tally(sbi, LPROC_LL_GETXATTR_HITS, 1);
417 ll_intent_drop_lock(&oit);
418 GOTO(err_req, rc = 0);
421 /* Matched but no cache? Cancelled on error by a parallel refill. */
422 if (unlikely(req == NULL)) {
423 CDEBUG(D_CACHE, "cancelled by a parallel getxattr\n");
424 ll_intent_drop_lock(&oit);
425 GOTO(err_unlock, rc = -EAGAIN);
428 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
430 CERROR("no MDT BODY in the refill xattr reply\n");
431 GOTO(err_cancel, rc = -EPROTO);
433 /* do not need swab xattr data */
434 xdata = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA,
435 body->mbo_eadatasize);
436 xval = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS,
438 xsizes = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS_LENS,
439 body->mbo_max_mdsize *
441 if (xdata == NULL || xval == NULL || xsizes == NULL) {
442 CERROR("wrong setxattr reply\n");
443 GOTO(err_cancel, rc = -EPROTO);
446 xtail = xdata + body->mbo_eadatasize;
447 xvtail = xval + body->mbo_aclsize;
449 CDEBUG(D_CACHE, "caching: xdata=%p xtail=%p\n", xdata, xtail);
451 if (!ll_xattr_cache_valid(lli))
452 ll_xattr_cache_init(lli);
454 for (i = 0; i < body->mbo_max_mdsize; i++) {
455 CDEBUG(D_CACHE, "caching [%s]=%.*s\n", xdata, *xsizes, xval);
456 /* Perform consistency checks: attr names and vals in pill */
457 if (memchr(xdata, 0, xtail - xdata) == NULL) {
458 CERROR("xattr protocol violation (names are broken)\n");
460 } else if (xval + *xsizes > xvtail) {
461 CERROR("xattr protocol violation (vals are broken)\n");
463 } else if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_XATTR_ENOMEM)) {
465 } else if (!strcmp(xdata, XATTR_NAME_ACL_ACCESS)) {
466 /* Filter out ACL ACCESS since it's cached separately */
467 CDEBUG(D_CACHE, "not caching %s\n",
468 XATTR_NAME_ACL_ACCESS);
470 } else if (!strcmp(xdata, "security.selinux")) {
471 /* Filter out security.selinux, it is cached in slab */
472 CDEBUG(D_CACHE, "not caching security.selinux\n");
475 rc = ll_xattr_cache_add(&lli->lli_xattrs, xdata, xval,
479 ll_xattr_cache_destroy_locked(lli);
480 GOTO(err_cancel, rc);
482 xdata += strlen(xdata) + 1;
487 if (xdata != xtail || xval != xvtail)
488 CERROR("a hole in xattr data\n");
490 set_bit(LLIF_XATTR_CACHE_FILLED, &lli->lli_flags);
492 ll_set_lock_data(sbi->ll_md_exp, inode, &oit, NULL);
493 ll_intent_drop_lock(&oit);
495 ptlrpc_req_finished(req);
499 ldlm_lock_decref_and_cancel((struct lustre_handle *)
503 up_write(&lli->lli_xattrs_list_rwsem);
508 ptlrpc_req_finished(req);
513 * Get an xattr value or list xattrs using the write-through cache.
515 * Get the xattr value (@valid has OBD_MD_FLXATTR set) of @name or
516 * list xattr names (@valid has OBD_MD_FLXATTRLS set) for @inode.
517 * The resulting value/list is stored in @buffer if the former
518 * is not larger than @size.
520 * \retval 0 no error occured
521 * \retval -EPROTO network protocol error
522 * \retval -ENOMEM not enough memory for the cache
523 * \retval -ERANGE the buffer is not large enough
524 * \retval -ENODATA no such attr or the list is empty
526 int ll_xattr_cache_get(struct inode *inode,
532 struct ll_inode_info *lli = ll_i2info(inode);
537 LASSERT(!!(valid & OBD_MD_FLXATTR) ^ !!(valid & OBD_MD_FLXATTRLS));
539 down_read(&lli->lli_xattrs_list_rwsem);
540 /* For performance reasons, we do not want to refill complete xattr
541 * cache if we are just interested in encryption context.
543 if ((valid & OBD_MD_FLXATTRLS ||
544 strcmp(name, LL_XATTR_NAME_ENCRYPTION_CONTEXT) != 0) &&
545 !ll_xattr_cache_filled(lli)) {
546 up_read(&lli->lli_xattrs_list_rwsem);
547 rc = ll_xattr_cache_refill(inode);
550 /* Turn the write lock obtained in ll_xattr_cache_refill()
553 downgrade_write(&lli->lli_xattrs_list_rwsem);
555 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR_HITS, 1);
558 if (!ll_xattr_cache_valid(lli))
559 GOTO(out, rc = -ENODATA);
561 if (valid & OBD_MD_FLXATTR) {
562 struct ll_xattr_entry *xattr;
564 rc = ll_xattr_cache_find(&lli->lli_xattrs, name, &xattr);
566 rc = xattr->xe_vallen;
567 /* zero size means we are only requested size in rc */
569 if (size >= xattr->xe_vallen)
570 memcpy(buffer, xattr->xe_value,
576 } else if (valid & OBD_MD_FLXATTRLS) {
577 rc = ll_xattr_cache_list(&lli->lli_xattrs,
578 size ? buffer : NULL, size);
583 up_read(&lli->lli_xattrs_list_rwsem);
589 * Insert an xattr value into the cache.
591 * Add @name xattr with @buffer value and @size length for @inode.
592 * Init cache for @inode if necessary.
595 * \retval < 0 from ll_xattr_cache_add(), except -EPROTO is ignored for
596 * LL_XATTR_NAME_ENCRYPTION_CONTEXT xattr
598 int ll_xattr_cache_insert(struct inode *inode,
603 struct ll_inode_info *lli = ll_i2info(inode);
608 down_write(&lli->lli_xattrs_list_rwsem);
609 if (!ll_xattr_cache_valid(lli))
610 ll_xattr_cache_init(lli);
611 rc = ll_xattr_cache_add(&lli->lli_xattrs, name, buffer, size);
612 up_write(&lli->lli_xattrs_list_rwsem);