X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fllite%2Fxattr_cache.c;h=f1022b0296f4744937582715ad225b276d376f32;hb=33d08805f27bb9a4c374754505f2ef5431b9fd31;hp=9ef62c5aa961eaa220da00760aa43f343e84148b;hpb=adb0b75acfb2c924d15eb9b2c8f9f8f277fa5b6f;p=fs%2Flustre-release.git diff --git a/lustre/llite/xattr_cache.c b/lustre/llite/xattr_cache.c index 9ef62c5..f1022b0 100644 --- a/lustre/llite/xattr_cache.c +++ b/lustre/llite/xattr_cache.c @@ -24,6 +24,8 @@ /* * Copyright 2012 Xyratex Technology Limited * + * Copyright (c) 2013, 2017, Intel Corporation. + * * Author: Andrew Perepechko * */ @@ -34,9 +36,7 @@ #include #include #include -#include #include -#include #include "llite_internal.h" /* If we ever have hundreds of extended attributes, we might want to consider @@ -84,8 +84,8 @@ static void ll_xattr_cache_init(struct ll_inode_info *lli) LASSERT(lli != NULL); - CFS_INIT_LIST_HEAD(&lli->lli_xattrs); - lli->lli_flags |= LLIF_XATTR_CACHE; + INIT_LIST_HEAD(&lli->lli_xattrs); + ll_file_set_flag(lli, LLIF_XATTR_CACHE); } /** @@ -257,7 +257,7 @@ static int ll_xattr_cache_list(struct list_head *cache, */ static int ll_xattr_cache_valid(struct ll_inode_info *lli) { - return !!(lli->lli_flags & LLIF_XATTR_CACHE); + return ll_file_test_flag(lli, LLIF_XATTR_CACHE); } /** @@ -276,7 +276,8 @@ static int ll_xattr_cache_destroy_locked(struct ll_inode_info *lli) while (ll_xattr_cache_del(&lli->lli_xattrs, NULL) == 0) /* empty loop */ ; - lli->lli_flags &= ~LLIF_XATTR_CACHE; + + ll_file_clear_flag(lli, LLIF_XATTR_CACHE); RETURN(0); } @@ -310,14 +311,10 @@ static int ll_xattr_find_get_lock(struct inode *inode, struct lookup_intent *oit, struct ptlrpc_request **req) { - ldlm_mode_t mode; + enum ldlm_mode mode; struct lustre_handle lockh = { 0 }; struct md_op_data *op_data; struct ll_inode_info *lli = ll_i2info(inode); - struct ldlm_enqueue_info einfo = { .ei_type = LDLM_IBITS, - .ei_mode = it_to_lock_mode(oit), - .ei_cb_bl = ll_md_blocking_ast, - .ei_cb_cp = ldlm_completion_ast }; struct ll_sb_info *sbi = ll_i2sbi(inode); struct obd_export *exp = sbi->ll_md_exp; int rc; @@ -325,13 +322,18 @@ static int ll_xattr_find_get_lock(struct inode *inode, ENTRY; mutex_lock(&lli->lli_xattrs_enq_lock); - /* Try matching first. */ - mode = ll_take_md_lock(inode, MDS_INODELOCK_XATTR, &lockh, 0, LCK_PR); - if (mode != 0) { - /* fake oit in mdc_revalidate_lock() manner */ - oit->d.lustre.it_lock_handle = lockh.cookie; - oit->d.lustre.it_lock_mode = mode; - goto out; + /* inode may have been shrunk and recreated, so data is gone, match lock + * only when data exists. */ + if (ll_xattr_cache_valid(lli)) { + /* Try matching first. */ + mode = ll_take_md_lock(inode, MDS_INODELOCK_XATTR, &lockh, 0, + LCK_PR); + if (mode != 0) { + /* fake oit in mdc_revalidate_lock() manner */ + oit->it_lock_handle = lockh.cookie; + oit->it_lock_mode = mode; + goto out; + } } /* Enqueue if the lock isn't cached locally. */ @@ -344,8 +346,9 @@ static int ll_xattr_find_get_lock(struct inode *inode, op_data->op_valid = OBD_MD_FLXATTR | OBD_MD_FLXATTRLS; - rc = md_enqueue(exp, &einfo, oit, op_data, &lockh, NULL, 0, NULL, 0); + rc = md_intent_lock(exp, op_data, oit, req, &ll_md_blocking_ast, 0); ll_finish_md_op_data(op_data); + *req = oit->it_request; if (rc < 0) { CDEBUG(D_CACHE, "md_intent_lock failed with %d for fid "DFID"\n", @@ -354,7 +357,6 @@ static int ll_xattr_find_get_lock(struct inode *inode, RETURN(rc); } - *req = (struct ptlrpc_request *)oit->d.lustre.it_data; out: down_write(&lli->lli_xattrs_list_rwsem); mutex_unlock(&lli->lli_xattrs_enq_lock); @@ -365,16 +367,15 @@ out: /** * Refill the xattr cache. * - * Fetch and cache the whole of xattrs for @inode, acquiring - * a read or a write xattr lock depending on operation in @oit. - * Intent is dropped on exit unless the operation is setxattr. + * Fetch and cache the whole of xattrs for @inode, acquiring a read lock. * * \retval 0 no error occured * \retval -EPROTO network protocol error * \retval -ENOMEM not enough memory for the cache */ -static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit) +static int ll_xattr_cache_refill(struct inode *inode) { + struct lookup_intent oit = { .it_op = IT_GETXATTR }; struct ll_sb_info *sbi = ll_i2sbi(inode); struct ptlrpc_request *req = NULL; const char *xdata, *xval, *xtail, *xvtail; @@ -385,57 +386,50 @@ static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit) ENTRY; - rc = ll_xattr_find_get_lock(inode, oit, &req); + rc = ll_xattr_find_get_lock(inode, &oit, &req); if (rc) - GOTO(out_no_unlock, rc); + GOTO(err_req, rc); /* Do we have the data at this point? */ if (ll_xattr_cache_valid(lli)) { ll_stats_ops_tally(sbi, LPROC_LL_GETXATTR_HITS, 1); - GOTO(out_maybe_drop, rc = 0); + ll_intent_drop_lock(&oit); + GOTO(err_req, rc = 0); } /* Matched but no cache? Cancelled on error by a parallel refill. */ if (unlikely(req == NULL)) { CDEBUG(D_CACHE, "cancelled by a parallel getxattr\n"); - GOTO(out_maybe_drop, rc = -EIO); - } - - if (oit->d.lustre.it_status < 0) { - CDEBUG(D_CACHE, "getxattr intent returned %d for fid "DFID"\n", - oit->d.lustre.it_status, PFID(ll_inode2fid(inode))); - rc = oit->d.lustre.it_status; - /* xattr data is so large that we don't want to cache it */ - if (rc == -ERANGE) - rc = -EAGAIN; - GOTO(out_destroy, rc); + ll_intent_drop_lock(&oit); + GOTO(err_unlock, rc = -EAGAIN); } body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); if (body == NULL) { CERROR("no MDT BODY in the refill xattr reply\n"); - GOTO(out_destroy, rc = -EPROTO); + GOTO(err_cancel, rc = -EPROTO); } /* do not need swab xattr data */ xdata = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA, - body->eadatasize); + body->mbo_eadatasize); xval = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS, - body->aclsize); + body->mbo_aclsize); xsizes = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS_LENS, - body->max_mdsize * sizeof(__u32)); + body->mbo_max_mdsize * + sizeof(__u32)); if (xdata == NULL || xval == NULL || xsizes == NULL) { CERROR("wrong setxattr reply\n"); - GOTO(out_destroy, rc = -EPROTO); + GOTO(err_cancel, rc = -EPROTO); } - xtail = xdata + body->eadatasize; - xvtail = xval + body->aclsize; + xtail = xdata + body->mbo_eadatasize; + xvtail = xval + body->mbo_aclsize; CDEBUG(D_CACHE, "caching: xdata=%p xtail=%p\n", xdata, xtail); ll_xattr_cache_init(lli); - for (i = 0; i < body->max_mdsize; i++) { + for (i = 0; i < body->mbo_max_mdsize; i++) { CDEBUG(D_CACHE, "caching [%s]=%.*s\n", xdata, *xsizes, xval); /* Perform consistency checks: attr names and vals in pill */ if (memchr(xdata, 0, xtail - xdata) == NULL) { @@ -451,13 +445,17 @@ static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit) CDEBUG(D_CACHE, "not caching %s\n", XATTR_NAME_ACL_ACCESS); rc = 0; + } else if (!strcmp(xdata, "security.selinux")) { + /* Filter out security.selinux, it is cached in slab */ + CDEBUG(D_CACHE, "not caching security.selinux\n"); + rc = 0; } else { rc = ll_xattr_cache_add(&lli->lli_xattrs, xdata, xval, *xsizes); } if (rc < 0) { ll_xattr_cache_destroy_locked(lli); - GOTO(out_destroy, rc); + GOTO(err_cancel, rc); } xdata += strlen(xdata) + 1; xval += *xsizes; @@ -467,28 +465,24 @@ static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit) if (xdata != xtail || xval != xvtail) CERROR("a hole in xattr data\n"); - ll_set_lock_data(sbi->ll_md_exp, inode, oit, NULL); - - GOTO(out_maybe_drop, rc); -out_maybe_drop: - - ll_intent_drop_lock(oit); + ll_set_lock_data(sbi->ll_md_exp, inode, &oit, NULL); + ll_intent_drop_lock(&oit); - if (rc != 0) - up_write(&lli->lli_xattrs_list_rwsem); -out_no_unlock: ptlrpc_req_finished(req); + RETURN(0); - return rc; - -out_destroy: - up_write(&lli->lli_xattrs_list_rwsem); - +err_cancel: ldlm_lock_decref_and_cancel((struct lustre_handle *) - &oit->d.lustre.it_lock_handle, - oit->d.lustre.it_lock_mode); + &oit.it_lock_handle, + oit.it_lock_mode); +err_unlock: + up_write(&lli->lli_xattrs_list_rwsem); +err_req: + if (rc == -ERANGE) + rc = -EAGAIN; - goto out_no_unlock; + ptlrpc_req_finished(req); + RETURN(rc); } /** @@ -511,7 +505,6 @@ int ll_xattr_cache_get(struct inode *inode, size_t size, __u64 valid) { - struct lookup_intent oit = { .it_op = IT_GETXATTR }; struct ll_inode_info *lli = ll_i2info(inode); int rc = 0; @@ -522,7 +515,7 @@ int ll_xattr_cache_get(struct inode *inode, down_read(&lli->lli_xattrs_list_rwsem); if (!ll_xattr_cache_valid(lli)) { up_read(&lli->lli_xattrs_list_rwsem); - rc = ll_xattr_cache_refill(inode, &oit); + rc = ll_xattr_cache_refill(inode); if (rc) RETURN(rc); downgrade_write(&lli->lli_xattrs_list_rwsem); @@ -554,6 +547,6 @@ int ll_xattr_cache_get(struct inode *inode, out: up_read(&lli->lli_xattrs_list_rwsem); - return rc; + RETURN(rc); }