X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fmdt%2Fmdt_handler.c;h=94a675a81fe7146449b9aed269195eb1c32c641f;hp=aafd3b1d1ebb7fad7c2fb390a6246d6981cfcd54;hb=75a417fa0065d52a31215daaaaf41c0fa9751a89;hpb=1cb9e85039c8df5fbe061aad3b0666c59ff7aa4e diff --git a/lustre/mdt/mdt_handler.c b/lustre/mdt/mdt_handler.c index aafd3b1..94a675a 100644 --- a/lustre/mdt/mdt_handler.c +++ b/lustre/mdt/mdt_handler.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -27,7 +23,7 @@ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2010, 2015, Intel Corporation. + * Copyright (c) 2010, 2017, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -54,23 +50,22 @@ #include #include #include -#include +#include #include #include -#include #include #include -#include +#include #include #include #include #include - +#include +#include #include #include "mdt_internal.h" - static unsigned int max_mod_rpcs_per_client = 8; module_param(max_mod_rpcs_per_client, uint, 0644); MODULE_PARM_DESC(max_mod_rpcs_per_client, "maximum number of modify RPCs in flight allowed per client"); @@ -98,7 +93,6 @@ enum ldlm_mode mdt_dlm_lock_modes[] = { }; static struct mdt_device *mdt_dev(struct lu_device *d); -static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags); static const struct lu_object_operations mdt_obj_ops; @@ -178,8 +172,8 @@ void mdt_lock_pdo_init(struct mdt_lock_handle *lh, enum ldlm_mode lock_mode, lh->mlh_type = MDT_PDO_LOCK; if (lu_name_is_valid(lname)) { - lh->mlh_pdo_hash = full_name_hash(lname->ln_name, - lname->ln_namelen); + lh->mlh_pdo_hash = ll_full_name_hash(NULL, lname->ln_name, + lname->ln_namelen); /* XXX Workaround for LU-2856 * * Zero is a valid return value of full_name_hash, but @@ -274,18 +268,13 @@ static int mdt_lookup_fileset(struct mdt_thread_info *info, const char *fileset, { struct mdt_device *mdt = info->mti_mdt; struct lu_name *lname = &info->mti_name; - char *name = NULL; + char *filename = info->mti_filename; struct mdt_object *parent; u32 mode; int rc = 0; LASSERT(!info->mti_cross_ref); - OBD_ALLOC(name, NAME_MAX + 1); - if (name == NULL) - return -ENOMEM; - lname->ln_name = name; - /* * We may want to allow this to mount a completely separate * fileset from the MDT in the future, but keeping it to @@ -321,8 +310,9 @@ static int mdt_lookup_fileset(struct mdt_thread_info *info, const char *fileset, break; } - strncpy(name, s1, lname->ln_namelen); - name[lname->ln_namelen] = '\0'; + strncpy(filename, s1, lname->ln_namelen); + filename[lname->ln_namelen] = '\0'; + lname->ln_name = filename; parent = mdt_object_find(info->mti_env, mdt, fid); if (IS_ERR(parent)) { @@ -347,8 +337,6 @@ static int mdt_lookup_fileset(struct mdt_thread_info *info, const char *fileset, } } - OBD_FREE(name, NAME_MAX + 1); - return rc; } @@ -417,12 +405,16 @@ out: static int mdt_statfs(struct tgt_session_info *tsi) { - struct ptlrpc_request *req = tgt_ses_req(tsi); - struct mdt_thread_info *info = tsi2mdt_info(tsi); - struct md_device *next = info->mti_mdt->mdt_child; - struct ptlrpc_service_part *svcpt; - struct obd_statfs *osfs; - int rc; + struct ptlrpc_request *req = tgt_ses_req(tsi); + struct mdt_thread_info *info = tsi2mdt_info(tsi); + struct mdt_device *mdt = info->mti_mdt; + struct tg_grants_data *tgd = &mdt->mdt_lut.lut_tgd; + struct md_device *next = mdt->mdt_child; + struct ptlrpc_service_part *svcpt; + struct obd_statfs *osfs; + struct mdt_body *reqbody = NULL; + struct mdt_statfs_cache *msf; + int rc; ENTRY; @@ -444,24 +436,72 @@ static int mdt_statfs(struct tgt_session_info *tsi) if (!osfs) GOTO(out, rc = -EPROTO); - /** statfs information are cached in the mdt_device */ - if (cfs_time_before_64(info->mti_mdt->mdt_osfs_age, - cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS))) { - /** statfs data is too old, get up-to-date one */ - rc = next->md_ops->mdo_statfs(info->mti_env, next, osfs); - if (rc) - GOTO(out, rc); - spin_lock(&info->mti_mdt->mdt_lock); - info->mti_mdt->mdt_osfs = *osfs; - info->mti_mdt->mdt_osfs_age = cfs_time_current_64(); - spin_unlock(&info->mti_mdt->mdt_lock); - } else { - /** use cached statfs data */ - spin_lock(&info->mti_mdt->mdt_lock); - *osfs = info->mti_mdt->mdt_osfs; - spin_unlock(&info->mti_mdt->mdt_lock); + if (mdt_is_sum_statfs_client(req->rq_export) && + lustre_packed_msg_size(req->rq_reqmsg) == + req_capsule_fmt_size(req->rq_reqmsg->lm_magic, + &RQF_MDS_STATFS_NEW, RCL_CLIENT)) { + req_capsule_extend(info->mti_pill, &RQF_MDS_STATFS_NEW); + reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY); } + if (reqbody && reqbody->mbo_valid & OBD_MD_FLAGSTATFS) + msf = &mdt->mdt_sum_osfs; + else + msf = &mdt->mdt_osfs; + + if (msf->msf_age + OBD_STATFS_CACHE_SECONDS <= ktime_get_seconds()) { + /** statfs data is too old, get up-to-date one */ + if (reqbody && reqbody->mbo_valid & OBD_MD_FLAGSTATFS) + rc = next->md_ops->mdo_statfs(info->mti_env, + next, osfs); + else + rc = dt_statfs(info->mti_env, mdt->mdt_bottom, + osfs); + if (rc) + GOTO(out, rc); + spin_lock(&mdt->mdt_lock); + msf->msf_osfs = *osfs; + msf->msf_age = ktime_get_seconds(); + spin_unlock(&mdt->mdt_lock); + } else { + /** use cached statfs data */ + spin_lock(&mdt->mdt_lock); + *osfs = msf->msf_osfs; + spin_unlock(&mdt->mdt_lock); + } + + /* at least try to account for cached pages. its still racy and + * might be under-reporting if clients haven't announced their + * caches with brw recently */ + CDEBUG(D_SUPER | D_CACHE, "blocks cached %llu granted %llu" + " pending %llu free %llu avail %llu\n", + tgd->tgd_tot_dirty, tgd->tgd_tot_granted, + tgd->tgd_tot_pending, + osfs->os_bfree << tgd->tgd_blockbits, + osfs->os_bavail << tgd->tgd_blockbits); + + osfs->os_bavail -= min_t(u64, osfs->os_bavail, + ((tgd->tgd_tot_dirty + tgd->tgd_tot_pending + + osfs->os_bsize - 1) >> tgd->tgd_blockbits)); + + tgt_grant_sanity_check(mdt->mdt_lu_dev.ld_obd, __func__); + CDEBUG(D_CACHE, "%llu blocks: %llu free, %llu avail; " + "%llu objects: %llu free; state %x\n", + osfs->os_blocks, osfs->os_bfree, osfs->os_bavail, + osfs->os_files, osfs->os_ffree, osfs->os_state); + + if (!exp_grant_param_supp(tsi->tsi_exp) && + tgd->tgd_blockbits > COMPAT_BSIZE_SHIFT) { + /* clients which don't support OBD_CONNECT_GRANT_PARAM + * should not see a block size > page size, otherwise + * cl_lost_grant goes mad. Therefore, we emulate a 4KB (=2^12) + * block size which is the biggest block size known to work + * with all client's page size. */ + osfs->os_blocks <<= tgd->tgd_blockbits - COMPAT_BSIZE_SHIFT; + osfs->os_bfree <<= tgd->tgd_blockbits - COMPAT_BSIZE_SHIFT; + osfs->os_bavail <<= tgd->tgd_blockbits - COMPAT_BSIZE_SHIFT; + osfs->os_bsize = 1 << COMPAT_BSIZE_SHIFT; + } if (rc == 0) mdt_counter_incr(req, LPROC_MDT_STATFS); out: @@ -469,6 +509,52 @@ out: RETURN(rc); } +/** + * Pack size attributes into the reply. + */ +int mdt_pack_size2body(struct mdt_thread_info *info, + const struct lu_fid *fid, struct lustre_handle *lh) +{ + struct mdt_body *b; + struct md_attr *ma = &info->mti_attr; + int dom_stripe; + bool dom_lock = false; + + ENTRY; + + LASSERT(ma->ma_attr.la_valid & LA_MODE); + + if (!S_ISREG(ma->ma_attr.la_mode) || + !(ma->ma_valid & MA_LOV && ma->ma_lmm != NULL)) + RETURN(-ENODATA); + + dom_stripe = mdt_lmm_dom_entry(ma->ma_lmm); + /* no DoM stripe, no size in reply */ + if (dom_stripe == LMM_NO_DOM) + RETURN(-ENOENT); + + if (lustre_handle_is_used(lh)) { + struct ldlm_lock *lock; + + lock = ldlm_handle2lock(lh); + if (lock != NULL) { + dom_lock = ldlm_has_dom(lock); + LDLM_LOCK_PUT(lock); + } + } + + /* no DoM lock, no size in reply */ + if (!dom_lock) + RETURN(0); + + /* Either DoM lock exists or LMM has only DoM stripe then + * return size on body. */ + b = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY); + + mdt_dom_object_size(info->mti_env, info->mti_mdt, fid, b, dom_lock); + RETURN(0); +} + #ifdef CONFIG_FS_POSIX_ACL /* * Pack ACL data into the reply. UIDs/GIDs are mapped and filtered by nodemap. @@ -486,14 +572,18 @@ int mdt_pack_acl2body(struct mdt_thread_info *info, struct mdt_body *repbody, const struct lu_env *env = info->mti_env; struct md_object *next = mdt_object_child(o); struct lu_buf *buf = &info->mti_buf; + struct mdt_device *mdt = info->mti_mdt; + struct req_capsule *pill = info->mti_pill; int rc; - buf->lb_buf = req_capsule_server_get(info->mti_pill, &RMF_ACL); - buf->lb_len = req_capsule_get_size(info->mti_pill, &RMF_ACL, - RCL_SERVER); + ENTRY; + + buf->lb_buf = req_capsule_server_get(pill, &RMF_ACL); + buf->lb_len = req_capsule_get_size(pill, &RMF_ACL, RCL_SERVER); if (buf->lb_len == 0) - return 0; + RETURN(0); +again: rc = mo_xattr_get(env, next, buf, XATTR_NAME_ACL_ACCESS); if (rc < 0) { if (rc == -ENODATA) { @@ -503,28 +593,117 @@ int mdt_pack_acl2body(struct mdt_thread_info *info, struct mdt_body *repbody, } else if (rc == -EOPNOTSUPP) { rc = 0; } else { + if (rc == -ERANGE && + exp_connect_large_acl(info->mti_exp) && + buf->lb_buf != info->mti_big_acl) { + if (info->mti_big_acl == NULL) { + OBD_ALLOC_LARGE(info->mti_big_acl, + mdt->mdt_max_ea_size); + if (info->mti_big_acl == NULL) { + CERROR("%s: unable to grow " + DFID" ACL buffer\n", + mdt_obd_name(mdt), + PFID(mdt_object_fid(o))); + RETURN(-ENOMEM); + } + + info->mti_big_aclsize = + mdt->mdt_max_ea_size; + } + + CDEBUG(D_INODE, "%s: grow the "DFID + " ACL buffer to size %d\n", + mdt_obd_name(mdt), + PFID(mdt_object_fid(o)), + mdt->mdt_max_ea_size); + + buf->lb_buf = info->mti_big_acl; + buf->lb_len = info->mti_big_aclsize; + + goto again; + } + CERROR("%s: unable to read "DFID" ACL: rc = %d\n", - mdt_obd_name(info->mti_mdt), - PFID(mdt_object_fid(o)), rc); + mdt_obd_name(mdt), PFID(mdt_object_fid(o)), rc); } } else { + int client; + int server; + int acl_buflen; + int lmm_buflen = 0; + int lmmsize = 0; + + acl_buflen = req_capsule_get_size(pill, &RMF_ACL, RCL_SERVER); + if (acl_buflen >= rc) + goto map; + + /* If LOV/LMA EA is small, we can reuse part of their buffer */ + client = ptlrpc_req_get_repsize(pill->rc_req); + server = lustre_packed_msg_size(pill->rc_req->rq_repmsg); + if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER)) { + lmm_buflen = req_capsule_get_size(pill, &RMF_MDT_MD, + RCL_SERVER); + lmmsize = repbody->mbo_eadatasize; + } + + if (client < server - acl_buflen - lmm_buflen + rc + lmmsize) { + CDEBUG(D_INODE, "%s: client prepared buffer size %d " + "is not big enough with the ACL size %d (%d)\n", + mdt_obd_name(mdt), client, rc, + server - acl_buflen - lmm_buflen + rc + lmmsize); + repbody->mbo_aclsize = 0; + repbody->mbo_valid &= ~OBD_MD_FLACL; + RETURN(-ERANGE); + } + +map: + if (buf->lb_buf == info->mti_big_acl) + info->mti_big_acl_used = 1; + rc = nodemap_map_acl(nodemap, buf->lb_buf, rc, NODEMAP_FS_TO_CLIENT); /* if all ACLs mapped out, rc is still >= 0 */ if (rc < 0) { CERROR("%s: nodemap_map_acl unable to parse "DFID - " ACL: rc = %d\n", mdt_obd_name(info->mti_mdt), + " ACL: rc = %d\n", mdt_obd_name(mdt), PFID(mdt_object_fid(o)), rc); + repbody->mbo_aclsize = 0; + repbody->mbo_valid &= ~OBD_MD_FLACL; } else { repbody->mbo_aclsize = rc; repbody->mbo_valid |= OBD_MD_FLACL; rc = 0; } } - return rc; + + RETURN(rc); } #endif +/* XXX Look into layout in MDT layer. */ +static inline bool mdt_hsm_is_released(struct lov_mds_md *lmm) +{ + struct lov_comp_md_v1 *comp_v1; + struct lov_mds_md *v1; + int i; + + if (lmm->lmm_magic == LOV_MAGIC_COMP_V1) { + comp_v1 = (struct lov_comp_md_v1 *)lmm; + + for (i = 0; i < comp_v1->lcm_entry_count; i++) { + v1 = (struct lov_mds_md *)((char *)comp_v1 + + comp_v1->lcm_entries[i].lcme_offset); + /* We don't support partial release for now */ + if (!(v1->lmm_pattern & LOV_PATTERN_F_RELEASED)) + return false; + } + return true; + } else { + return (lmm->lmm_pattern & LOV_PATTERN_F_RELEASED) ? + true : false; + } +} + void mdt_pack_attr2body(struct mdt_thread_info *info, struct mdt_body *b, const struct lu_attr *attr, const struct lu_fid *fid) { @@ -572,6 +751,12 @@ void mdt_pack_attr2body(struct mdt_thread_info *info, struct mdt_body *b, b->mbo_valid |= OBD_MD_FLGID; } + if (attr->la_valid & LA_PROJID) { + /* TODO, nodemap for project id */ + b->mbo_projid = attr->la_projid; + b->mbo_valid |= OBD_MD_FLPROJID; + } + b->mbo_mode = attr->la_mode; if (attr->la_valid & LA_MODE) b->mbo_valid |= OBD_MD_FLMODE; @@ -581,7 +766,7 @@ void mdt_pack_attr2body(struct mdt_thread_info *info, struct mdt_body *b, if (fid != NULL) { b->mbo_fid1 = *fid; b->mbo_valid |= OBD_MD_FLID; - CDEBUG(D_INODE, DFID": nlink=%d, mode=%o, valid="LPX64"\n", + CDEBUG(D_INODE, DFID": nlink=%d, mode=%o, valid=%#llx\n", PFID(fid), b->mbo_nlink, b->mbo_mode, b->mbo_valid); } @@ -602,17 +787,20 @@ void mdt_pack_attr2body(struct mdt_thread_info *info, struct mdt_body *b, /* if no object is allocated on osts, the size on mds is valid. * b=22272 */ b->mbo_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS; - } else if ((ma->ma_valid & MA_LOV) && ma->ma_lmm != NULL && - ma->ma_lmm->lmm_pattern & LOV_PATTERN_F_RELEASED) { - /* A released file stores its size on MDS. */ - /* But return 1 block for released file, unless tools like tar - * will consider it fully sparse. (LU-3864) - */ - if (unlikely(b->mbo_size == 0)) - b->mbo_blocks = 0; - else - b->mbo_blocks = 1; - b->mbo_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS; + } else if ((ma->ma_valid & MA_LOV) && ma->ma_lmm != NULL) { + if (mdt_hsm_is_released(ma->ma_lmm)) { + /* A released file stores its size on MDS. */ + /* But return 1 block for released file, unless tools + * like tar will consider it fully sparse. (LU-3864) + */ + if (unlikely(b->mbo_size == 0)) + b->mbo_blocks = 0; + else + b->mbo_blocks = 1; + b->mbo_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS; + } else if (info->mti_som_valid) { /* som is valid */ + b->mbo_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS; + } } if (fid != NULL && (b->mbo_valid & OBD_MD_FLSIZE)) @@ -751,6 +939,8 @@ int mdt_stripe_get(struct mdt_thread_info *info, struct mdt_object *o, return -EINVAL; } + LASSERT(buf->lb_buf); + rc = mo_xattr_get(info->mti_env, next, buf, name); if (rc > 0) { @@ -803,8 +993,8 @@ got: return rc; } -static int mdt_attr_get_pfid(struct mdt_thread_info *info, - struct mdt_object *o, struct lu_fid *pfid) +int mdt_attr_get_pfid(struct mdt_thread_info *info, struct mdt_object *o, + struct lu_fid *pfid) { struct lu_buf *buf = &info->mti_buf; struct link_ea_header *leh; @@ -872,6 +1062,9 @@ int mdt_attr_get_complex(struct mdt_thread_info *info, rc = mo_attr_get(env, next, ma); if (rc) GOTO(out, rc); + + if (S_ISREG(mode)) + (void) mdt_get_som(info, o, ma); ma->ma_valid |= MA_INODE; } @@ -901,6 +1094,15 @@ int mdt_attr_get_complex(struct mdt_thread_info *info, GOTO(out, rc); } + /* + * In the handle of MA_INODE, we may already get the SOM attr. + */ + if (need & MA_SOM && S_ISREG(mode) && !(ma->ma_valid & MA_SOM)) { + rc = mdt_get_som(info, o, ma); + if (rc != 0) + GOTO(out, rc); + } + if (need & MA_HSM && S_ISREG(mode)) { buf->lb_buf = info->mti_xattr_buf; buf->lb_len = sizeof(info->mti_xattr_buf); @@ -931,7 +1133,7 @@ int mdt_attr_get_complex(struct mdt_thread_info *info, #endif out: ma->ma_need = need; - CDEBUG(D_INODE, "after getattr rc = %d, ma_valid = "LPX64" ma_lmm=%p\n", + CDEBUG(D_INODE, "after getattr rc = %d, ma_valid = %#llx ma_lmm=%p\n", rc, ma->ma_valid, ma->ma_lmm); RETURN(rc); } @@ -950,7 +1152,6 @@ static int mdt_getattr_internal(struct mdt_thread_info *info, struct lu_buf *buffer = &info->mti_buf; struct obd_export *exp = info->mti_exp; int rc; - int is_root; ENTRY; if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK)) @@ -1032,32 +1233,6 @@ static int mdt_getattr_internal(struct mdt_thread_info *info, repbody->mbo_t_state = MS_RESTORE; } - is_root = lu_fid_eq(mdt_object_fid(o), &info->mti_mdt->mdt_md_root_fid); - - /* the Lustre protocol supposes to return default striping - * on the user-visible root if explicitly requested */ - if ((ma->ma_valid & MA_LOV) == 0 && S_ISDIR(la->la_mode) && - (ma->ma_need & MA_LOV_DEF && is_root) && ma->ma_need & MA_LOV) { - struct lu_fid rootfid; - struct mdt_object *root; - struct mdt_device *mdt = info->mti_mdt; - - rc = dt_root_get(env, mdt->mdt_bottom, &rootfid); - if (rc) - RETURN(rc); - root = mdt_object_find(env, mdt, &rootfid); - if (IS_ERR(root)) - RETURN(PTR_ERR(root)); - rc = mdt_stripe_get(info, root, ma, XATTR_NAME_LOV); - mdt_object_put(info->mti_env, root); - if (unlikely(rc)) { - CERROR("%s: getattr error for "DFID": rc = %d\n", - mdt_obd_name(info->mti_mdt), - PFID(mdt_object_fid(o)), rc); - RETURN(rc); - } - } - if (likely(ma->ma_valid & MA_INODE)) mdt_pack_attr2body(info, repbody, la, mdt_object_fid(o)); else @@ -1088,6 +1263,7 @@ static int mdt_getattr_internal(struct mdt_thread_info *info, if (!mdt_is_striped_client(req->rq_export)) RETURN(-ENOTSUPP); LASSERT(S_ISDIR(la->la_mode)); + mdt_dump_lmv(D_INFO, ma->ma_lmv); repbody->mbo_eadatasize = ma->ma_lmv_size; repbody->mbo_valid |= (OBD_MD_FLDIREA | OBD_MD_DEFAULT_MEA); @@ -1173,6 +1349,12 @@ static int mdt_getattr(struct tgt_session_info *tsi) LASSERT(obj != NULL); LASSERT(lu_object_assert_exists(&obj->mot_obj)); + /* Special case for Data-on-MDT files to get data version */ + if (unlikely(reqbody->mbo_valid & OBD_MD_FLDATAVERSION)) { + rc = mdt_data_version_get(tsi); + GOTO(out, rc); + } + /* Unlike intent case where we need to pre-fill out buffers early on * in intent policy for ldlm reasons, here we can have a much better * guess at EA size by just reading it from disk. @@ -1182,7 +1364,6 @@ static int mdt_getattr(struct tgt_session_info *tsi) /* No easy way to know how long is the symlink, but it cannot * be more than PATH_MAX, so we allocate +1 */ rc = PATH_MAX + 1; - /* A special case for fs ROOT: getattr there might fetch * default EA for entire fs, not just for this dir! */ @@ -1204,6 +1385,12 @@ static int mdt_getattr(struct tgt_session_info *tsi) req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, rc); + /* Set ACL reply buffer size as LUSTRE_POSIX_ACL_MAX_SIZE_OLD + * by default. If the target object has more ACL entries, then + * enlarge the buffer when necessary. */ + req_capsule_set_size(pill, &RMF_ACL, RCL_SERVER, + LUSTRE_POSIX_ACL_MAX_SIZE_OLD); + rc = req_capsule_server_pack(pill); if (unlikely(rc != 0)) GOTO(out, rc = err_serious(rc)); @@ -1220,18 +1407,60 @@ static int mdt_getattr(struct tgt_session_info *tsi) info->mti_cross_ref = !!(reqbody->mbo_valid & OBD_MD_FLCROSSREF); rc = mdt_getattr_internal(info, obj, 0); - EXIT; + EXIT; out_shrink: - mdt_client_compatibility(info); - rc2 = mdt_fix_reply(info); - if (rc == 0) - rc = rc2; + mdt_client_compatibility(info); + rc2 = mdt_fix_reply(info); + if (rc == 0) + rc = rc2; out: mdt_thread_info_fini(info); return rc; } /** + * Handler of layout intent RPC requiring the layout modification + * + * \param[in] info thread environment + * \param[in] obj object + * \param[in] layout layout change descriptor + * + * \retval 0 on success + * \retval < 0 error code + */ +int mdt_layout_change(struct mdt_thread_info *info, struct mdt_object *obj, + struct md_layout_change *layout) +{ + struct mdt_lock_handle *lh = &info->mti_lh[MDT_LH_LOCAL]; + int rc; + ENTRY; + + if (!mdt_object_exists(obj)) + GOTO(out, rc = -ENOENT); + + if (!S_ISREG(lu_object_attr(&obj->mot_obj))) + GOTO(out, rc = -EINVAL); + + rc = mo_permission(info->mti_env, NULL, mdt_object_child(obj), NULL, + MAY_WRITE); + if (rc) + GOTO(out, rc); + + /* take layout lock to prepare layout change */ + mdt_lock_reg_init(lh, LCK_EX); + rc = mdt_object_lock(info, obj, lh, MDS_INODELOCK_LAYOUT); + if (rc) + GOTO(out, rc); + + mutex_lock(&obj->mot_som_mutex); + rc = mo_layout_change(info->mti_env, mdt_object_child(obj), layout); + mutex_unlock(&obj->mot_som_mutex); + mdt_object_unlock(info, obj, lh, 1); +out: + RETURN(rc); +} + +/** * Exchange MOF_LOV_CREATED flags between two objects after a * layout swap. No assumption is made on whether o1 or o2 have * created objects or not. @@ -1298,12 +1527,12 @@ static int mdt_swap_layouts(struct tgt_session_info *tsi) /* permission check. Make sure the calling process having permission * to write both files. */ rc = mo_permission(info->mti_env, NULL, mdt_object_child(o1), NULL, - MAY_WRITE); + MAY_WRITE); if (rc < 0) GOTO(put, rc); rc = mo_permission(info->mti_env, NULL, mdt_object_child(o2), NULL, - MAY_WRITE); + MAY_WRITE); if (rc < 0) GOTO(put, rc); @@ -1384,18 +1613,19 @@ static int mdt_getattr_name_lock(struct mdt_thread_info *info, __u64 child_bits, struct ldlm_reply *ldlm_rep) { - struct ptlrpc_request *req = mdt_info_req(info); - struct mdt_body *reqbody = NULL; - struct mdt_object *parent = info->mti_object; - struct mdt_object *child; - struct lu_fid *child_fid = &info->mti_tmp_fid1; - struct lu_name *lname = NULL; - struct mdt_lock_handle *lhp = NULL; - struct ldlm_lock *lock; - bool is_resent; - bool try_layout; - int ma_need = 0; - int rc; + struct ptlrpc_request *req = mdt_info_req(info); + struct mdt_body *reqbody = NULL; + struct mdt_object *parent = info->mti_object; + struct mdt_object *child; + struct lu_fid *child_fid = &info->mti_tmp_fid1; + struct lu_name *lname = NULL; + struct mdt_lock_handle *lhp = NULL; + struct ldlm_lock *lock; + __u64 try_bits = 0; + bool is_resent; + int ma_need = 0; + int rc; + ENTRY; is_resent = lustre_handle_is_used(&lhc->mlh_reg_lh); @@ -1557,19 +1787,18 @@ static int mdt_getattr_name_lock(struct mdt_thread_info *info, if (rc < 0) { GOTO(out_child, rc); } else if (rc > 0) { - mdt_lock_handle_init(lhc); + mdt_lock_handle_init(lhc); mdt_lock_reg_init(lhc, LCK_PR); - try_layout = false; if (!(child_bits & MDS_INODELOCK_UPDATE) && mdt_object_exists(child) && !mdt_object_remote(child)) { - struct md_attr *ma = &info->mti_attr; + struct md_attr *ma = &info->mti_attr; - ma->ma_valid = 0; - ma->ma_need = MA_INODE; + ma->ma_valid = 0; + ma->ma_need = MA_INODE; rc = mdt_attr_get_complex(info, child, ma); - if (unlikely(rc != 0)) - GOTO(out_child, rc); + if (unlikely(rc != 0)) + GOTO(out_child, rc); /* If the file has not been changed for some time, we * return not only a LOOKUP lock, but also an UPDATE @@ -1579,35 +1808,32 @@ static int mdt_getattr_name_lock(struct mdt_thread_info *info, if (ma->ma_valid & MA_INODE && ma->ma_attr.la_valid & LA_CTIME && info->mti_mdt->mdt_namespace->ns_ctime_age_limit + - ma->ma_attr.la_ctime < cfs_time_current_sec()) + ma->ma_attr.la_ctime < ktime_get_real_seconds()) child_bits |= MDS_INODELOCK_UPDATE; } /* layout lock must be granted in a best-effort way * for IT operations */ LASSERT(!(child_bits & MDS_INODELOCK_LAYOUT)); - if (!OBD_FAIL_CHECK(OBD_FAIL_MDS_NO_LL_GETATTR) && - exp_connect_layout(info->mti_exp) && - S_ISREG(lu_object_attr(&child->mot_obj)) && + if (S_ISREG(lu_object_attr(&child->mot_obj)) && !mdt_object_remote(child) && ldlm_rep != NULL) { - /* try to grant layout lock for regular file. */ - try_layout = true; + if (!OBD_FAIL_CHECK(OBD_FAIL_MDS_NO_LL_GETATTR) && + exp_connect_layout(info->mti_exp)) { + /* try to grant layout lock for regular file. */ + try_bits = MDS_INODELOCK_LAYOUT; + } + /* Acquire DOM lock in advance for data-on-mdt file */ + if (child != parent) + try_bits |= MDS_INODELOCK_DOM; } - rc = 0; - if (try_layout) { - child_bits |= MDS_INODELOCK_LAYOUT; + if (try_bits != 0) { /* try layout lock, it may fail to be granted due to * contention at LOOKUP or UPDATE */ - if (!mdt_object_lock_try(info, child, lhc, - child_bits)) { - child_bits &= ~MDS_INODELOCK_LAYOUT; - LASSERT(child_bits != 0); - rc = mdt_object_lock(info, child, lhc, - child_bits); - } else { + rc = mdt_object_lock_try(info, child, lhc, &child_bits, + try_bits, false); + if (child_bits & MDS_INODELOCK_LAYOUT) ma_need |= MA_LOV; - } } else { /* Do not enqueue the UPDATE lock from MDT(cross-MDT), * client will enqueue the lock to the remote MDT */ @@ -1633,17 +1859,38 @@ static int mdt_getattr_name_lock(struct mdt_thread_info *info, "Lock res_id: "DLDLMRES", fid: "DFID"\n", PLDLMRES(lock->l_resource), PFID(mdt_object_fid(child))); - } - if (lock) - LDLM_LOCK_PUT(lock); - EXIT; + if (S_ISREG(lu_object_attr(&child->mot_obj)) && + mdt_object_exists(child) && !mdt_object_remote(child) && + child != parent) { + LDLM_LOCK_PUT(lock); + mdt_object_put(info->mti_env, child); + /* NB: call the mdt_pack_size2body always after + * mdt_object_put(), that is why this special + * exit path is used. */ + rc = mdt_pack_size2body(info, child_fid, + &lhc->mlh_reg_lh); + if (rc != 0 && child_bits & MDS_INODELOCK_DOM) { + /* DOM lock was taken in advance but this is + * not DoM file. Drop the lock. */ + lock_res_and_lock(lock); + ldlm_inodebits_drop(lock, MDS_INODELOCK_DOM); + unlock_res_and_lock(lock); + } + + GOTO(out_parent, rc = 0); + } + } + if (lock) + LDLM_LOCK_PUT(lock); + + EXIT; out_child: - mdt_object_put(info->mti_env, child); + mdt_object_put(info->mti_env, child); out_parent: - if (lhp) - mdt_object_unlock(info, parent, lhp, 1); - return rc; + if (lhp) + mdt_object_unlock(info, parent, lhp, 1); + return rc; } /* normal handler: should release the child lock */ @@ -1775,7 +2022,7 @@ static int mdt_readpage(struct tgt_session_info *tsi) */ rdpg->rp_hash = reqbody->mbo_size; if (rdpg->rp_hash != reqbody->mbo_size) { - CERROR("Invalid hash: "LPX64" != "LPX64"\n", + CERROR("Invalid hash: %#llx != %#llx\n", rdpg->rp_hash, reqbody->mbo_size); RETURN(-EFAULT); } @@ -1864,6 +2111,13 @@ static int mdt_reint_internal(struct mdt_thread_info *info, if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER)) req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER, 0); + /* Set ACL reply buffer size as LUSTRE_POSIX_ACL_MAX_SIZE_OLD + * by default. If the target object has more ACL entries, then + * enlarge the buffer when necessary. */ + if (req_capsule_has_field(pill, &RMF_ACL, RCL_SERVER)) + req_capsule_set_size(pill, &RMF_ACL, RCL_SERVER, + LUSTRE_POSIX_ACL_MAX_SIZE_OLD); + rc = req_capsule_server_pack(pill); if (rc != 0) { CERROR("Can't pack response, rc %d\n", rc); @@ -1905,11 +2159,24 @@ static int mdt_reint_internal(struct mdt_thread_info *info, out_ucred: mdt_exit_ucred(info); out_shrink: - mdt_client_compatibility(info); - rc2 = mdt_fix_reply(info); - if (rc == 0) - rc = rc2; - return rc; + mdt_client_compatibility(info); + + rc2 = mdt_fix_reply(info); + if (rc == 0) + rc = rc2; + + /* + * Data-on-MDT optimization - read data along with OPEN and return it + * in reply. Do that only if we have both DOM and LAYOUT locks. + */ + if (rc == 0 && op == REINT_OPEN && + info->mti_attr.ma_lmm != NULL && + mdt_lmm_dom_entry(info->mti_attr.ma_lmm) == LMM_DOM_ONLY) { + rc = mdt_dom_read_on_open(info, info->mti_mdt, + &lhc->mlh_reg_lh); + } + + return rc; } static long mdt_reint_opcode(struct ptlrpc_request *req, @@ -1951,7 +2218,8 @@ static int mdt_reint(struct tgt_session_info *tsi) [REINT_OPEN] = &RQF_MDS_REINT_OPEN, [REINT_SETXATTR] = &RQF_MDS_REINT_SETXATTR, [REINT_RMENTRY] = &RQF_MDS_REINT_UNLINK, - [REINT_MIGRATE] = &RQF_MDS_REINT_RENAME + [REINT_MIGRATE] = &RQF_MDS_REINT_MIGRATE, + [REINT_RESYNC] = &RQF_MDS_REINT_RESYNC, }; ENTRY; @@ -1974,7 +2242,7 @@ static int mdt_reint(struct tgt_session_info *tsi) } /* this should sync the whole device */ -static int mdt_device_sync(const struct lu_env *env, struct mdt_device *mdt) +int mdt_device_sync(const struct lu_env *env, struct mdt_device *mdt) { struct dt_device *dt = mdt->mdt_bottom; int rc; @@ -1985,20 +2253,21 @@ static int mdt_device_sync(const struct lu_env *env, struct mdt_device *mdt) } /* this should sync this object */ -static int mdt_object_sync(struct mdt_thread_info *info) +static int mdt_object_sync(const struct lu_env *env, struct obd_export *exp, + struct mdt_object *mo) { - struct md_object *next; int rc; + ENTRY; - if (!mdt_object_exists(info->mti_object)) { + if (!mdt_object_exists(mo)) { CWARN("%s: non existing object "DFID": rc = %d\n", - mdt_obd_name(info->mti_mdt), - PFID(mdt_object_fid(info->mti_object)), -ESTALE); + exp->exp_obd->obd_name, PFID(mdt_object_fid(mo)), + -ESTALE); RETURN(-ESTALE); } - next = mdt_object_child(info->mti_object); - rc = mo_object_sync(info->mti_env, next); + + rc = mo_object_sync(env, mdt_object_child(mo)); RETURN(rc); } @@ -2021,7 +2290,8 @@ static int mdt_sync(struct tgt_session_info *tsi) struct mdt_thread_info *info = tsi2mdt_info(tsi); /* sync an object */ - rc = mdt_object_sync(info); + rc = mdt_object_sync(tsi->tsi_env, tsi->tsi_exp, + info->mti_object); if (rc == 0) { const struct lu_fid *fid; struct lu_attr *la = &info->mti_attr.ma_attr; @@ -2045,6 +2315,54 @@ static int mdt_sync(struct tgt_session_info *tsi) RETURN(rc); } +static int mdt_data_sync(struct tgt_session_info *tsi) +{ + struct mdt_thread_info *info; + struct mdt_device *mdt = mdt_exp2dev(tsi->tsi_exp); + struct ost_body *body = tsi->tsi_ost_body; + struct ost_body *repbody; + struct mdt_object *mo = NULL; + struct md_attr *ma; + int rc = 0; + + ENTRY; + + repbody = req_capsule_server_get(tsi->tsi_pill, &RMF_OST_BODY); + + /* if no fid is specified then do nothing, + * device sync is done via MDS_SYNC */ + if (fid_is_zero(&tsi->tsi_fid)) + RETURN(0); + + mo = mdt_object_find(tsi->tsi_env, mdt, &tsi->tsi_fid); + if (IS_ERR(mo)) + RETURN(PTR_ERR(mo)); + + rc = mdt_object_sync(tsi->tsi_env, tsi->tsi_exp, mo); + if (rc) + GOTO(put, rc); + + repbody->oa.o_oi = body->oa.o_oi; + repbody->oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP; + + info = tsi2mdt_info(tsi); + ma = &info->mti_attr; + ma->ma_need = MA_INODE; + ma->ma_valid = 0; + rc = mdt_attr_get_complex(info, mo, ma); + if (rc == 0) + obdo_from_la(&repbody->oa, &ma->ma_attr, VALID_FLAGS); + else + rc = 0; + mdt_thread_info_fini(info); + + EXIT; +put: + if (mo != NULL) + mdt_object_put(tsi->tsi_env, mo); + return rc; +} + /* * Handle quota control requests to consult current usage/limit, but also * to configure quota enforcement @@ -2076,10 +2394,12 @@ static int mdt_quotactl(struct tgt_session_info *tsi) /* master quotactl */ case Q_SETINFO: case Q_SETQUOTA: + case LUSTRE_Q_SETDEFAULT: if (!nodemap_can_setquota(nodemap)) GOTO(out_nodemap, rc = -EPERM); case Q_GETINFO: case Q_GETQUOTA: + case LUSTRE_Q_GETDEFAULT: if (qmt == NULL) GOTO(out_nodemap, rc = -EOPNOTSUPP); /* slave quotactl */ @@ -2092,26 +2412,45 @@ static int mdt_quotactl(struct tgt_session_info *tsi) } id = oqctl->qc_id; - if (oqctl->qc_type == USRQUOTA) + switch (oqctl->qc_type) { + case USRQUOTA: id = nodemap_map_id(nodemap, NODEMAP_UID, NODEMAP_CLIENT_TO_FS, id); - else if (oqctl->qc_type == GRPQUOTA) - id = nodemap_map_id(nodemap, NODEMAP_UID, + break; + case GRPQUOTA: + id = nodemap_map_id(nodemap, NODEMAP_GID, NODEMAP_CLIENT_TO_FS, id); - + break; + case PRJQUOTA: + /* todo: check/map project id */ + id = oqctl->qc_id; + break; + default: + GOTO(out_nodemap, rc = -EOPNOTSUPP); + } repoqc = req_capsule_server_get(pill, &RMF_OBD_QUOTACTL); if (repoqc == NULL) GOTO(out_nodemap, rc = err_serious(-EFAULT)); + if (oqctl->qc_cmd == Q_SETINFO || oqctl->qc_cmd == Q_SETQUOTA) + barrier_exit(tsi->tsi_tgt->lut_bottom); + if (oqctl->qc_id != id) swap(oqctl->qc_id, id); + if (oqctl->qc_cmd == Q_SETINFO || oqctl->qc_cmd == Q_SETQUOTA) { + if (unlikely(!barrier_entry(tsi->tsi_tgt->lut_bottom))) + RETURN(-EINPROGRESS); + } + switch (oqctl->qc_cmd) { case Q_GETINFO: case Q_SETINFO: case Q_SETQUOTA: case Q_GETQUOTA: + case LUSTRE_Q_SETDEFAULT: + case LUSTRE_Q_GETDEFAULT: /* forward quotactl request to QMT */ rc = qmt_hdls.qmth_quotactl(tsi->tsi_env, qmt, oqctl); break; @@ -2315,49 +2654,72 @@ static inline int mdt_is_lock_sync(struct ldlm_lock *lock) * \see ldlm_blocking_ast_nocheck */ int mdt_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, - void *data, int flag) + void *data, int flag) { - struct obd_device *obd = ldlm_lock_to_ns(lock)->ns_obd; - struct mdt_device *mdt = mdt_dev(obd->obd_lu_dev); - int rc; - ENTRY; + struct obd_device *obd = ldlm_lock_to_ns(lock)->ns_obd; + struct mdt_device *mdt = mdt_dev(obd->obd_lu_dev); + struct ldlm_cb_set_arg *arg = data; + bool commit_async = false; + int rc; + ENTRY; - if (flag == LDLM_CB_CANCELING) - RETURN(0); + if (flag == LDLM_CB_CANCELING) + RETURN(0); - lock_res_and_lock(lock); - if (lock->l_blocking_ast != mdt_blocking_ast) { - unlock_res_and_lock(lock); - RETURN(0); - } - if (lock->l_req_mode & (LCK_PW | LCK_EX) && - lock->l_blocking_lock != NULL) { - if (mdt_cos_is_enabled(mdt) && - lock->l_client_cookie != - lock->l_blocking_lock->l_client_cookie) - mdt_set_lock_sync(lock); - else if (mdt_slc_is_enabled(mdt) && - ldlm_is_cos_incompat(lock->l_blocking_lock)) + lock_res_and_lock(lock); + if (lock->l_blocking_ast != mdt_blocking_ast) { + unlock_res_and_lock(lock); + RETURN(0); + } + + /* A blocking ast may be sent from ldlm_lock_decref_internal + * when the last reference to a local lock was released and + * during blocking event from ldlm_work_bl_ast_lock(). + * The 'data' parameter is l_ast_data in the first case and + * callback arguments in the second one. Distinguish them by that. + */ + if (!data || data == lock->l_ast_data || !arg->bl_desc) + goto skip_cos_checks; + + if (lock->l_req_mode & (LCK_PW | LCK_EX)) { + if (mdt_cos_is_enabled(mdt)) { + if (!arg->bl_desc->bl_same_client) + mdt_set_lock_sync(lock); + } else if (mdt_slc_is_enabled(mdt) && + arg->bl_desc->bl_cos_incompat) { mdt_set_lock_sync(lock); + /* + * we may do extra commit here, but there is a small + * window to miss a commit: lock was unlocked (saved), + * then a conflict lock queued and we come here, but + * REP-ACK not received, so lock was not converted to + * COS mode yet. + * Fortunately this window is quite small, so the + * extra commit should be rare (not to say distributed + * operation is rare too). + */ + commit_async = true; + } + } else if (lock->l_req_mode == LCK_COS) { + commit_async = true; } - rc = ldlm_blocking_ast_nocheck(lock); - /* There is no lock conflict if l_blocking_lock == NULL, - * it indicates a blocking ast sent from ldlm_lock_decref_internal - * when the last reference to a local lock was released */ - if (lock->l_req_mode == LCK_COS && lock->l_blocking_lock != NULL) { - struct lu_env env; +skip_cos_checks: + rc = ldlm_blocking_ast_nocheck(lock); + + if (commit_async) { + struct lu_env env; rc = lu_env_init(&env, LCT_LOCAL); if (unlikely(rc != 0)) CWARN("%s: lu_env initialization failed, cannot " "start asynchronous commit: rc = %d\n", obd->obd_name, rc); - else - mdt_device_commit_async(&env, mdt); - lu_env_fini(&env); - } - RETURN(rc); + else + mdt_device_commit_async(&env, mdt); + lu_env_fini(&env); + } + RETURN(rc); } /* @@ -2392,11 +2754,15 @@ int mdt_remote_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, break; } case LDLM_CB_CANCELING: { + struct obd_device *obd = ldlm_lock_to_ns(lock)->ns_obd; + struct mdt_device *mdt = + mdt_dev(obd->obd_lu_dev->ld_site->ls_top_dev); + LDLM_DEBUG(lock, "Revoke remote lock\n"); /* discard slc lock here so that it can be cleaned anytime, * especially for cleanup_resource() */ - tgt_discard_slc_lock(lock); + tgt_discard_slc_lock(&mdt->mdt_lut, lock); /* once we cache lock, l_ast_data is set to mdt_object */ if (lock->l_ast_data != NULL) { @@ -2405,9 +2771,6 @@ int mdt_remote_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, rc = lu_env_init(&env, LCT_MD_THREAD); if (unlikely(rc != 0)) { - struct obd_device *obd; - - obd = ldlm_lock_to_ns(lock)->ns_obd; CWARN("%s: lu_env initialization failed, object" "%p "DFID" is leaked!\n", obd->obd_name, mo, @@ -2447,7 +2810,7 @@ int mdt_check_resent_lock(struct mdt_thread_info *info, /* Lock is pinned by ldlm_handle_enqueue0() as it is * a resend case, however, it could be already destroyed * due to client eviction or a raced cancel RPC. */ - LDLM_DEBUG_NOLOCK("Invalid lock handle "LPX64, + LDLM_DEBUG_NOLOCK("Invalid lock handle %#llx", lhc->mlh_reg_lh.cookie); RETURN(-ESTALE); } @@ -2467,12 +2830,12 @@ int mdt_check_resent_lock(struct mdt_thread_info *info, return 1; } -int mdt_remote_object_lock(struct mdt_thread_info *mti, struct mdt_object *o, - const struct lu_fid *fid, struct lustre_handle *lh, - enum ldlm_mode mode, __u64 ibits, bool nonblock, - bool cache) +int mdt_remote_object_lock_try(struct mdt_thread_info *mti, + struct mdt_object *o, const struct lu_fid *fid, + struct lustre_handle *lh, enum ldlm_mode mode, + __u64 *ibits, __u64 trybits, bool cache) { - struct ldlm_enqueue_info *einfo = &mti->mti_einfo; + struct ldlm_enqueue_info *einfo = &mti->mti_remote_einfo; union ldlm_policy_data *policy = &mti->mti_policy; struct ldlm_res_id *res_id = &mti->mti_res_id; int rc = 0; @@ -2489,8 +2852,7 @@ int mdt_remote_object_lock(struct mdt_thread_info *mti, struct mdt_object *o, einfo->ei_cb_cp = ldlm_completion_ast; einfo->ei_enq_slave = 0; einfo->ei_res_id = res_id; - if (nonblock) - einfo->ei_nonblock = 1; + if (cache) { /* * if we cache lock, couple lock with mdt_object, so that object @@ -2501,26 +2863,42 @@ int mdt_remote_object_lock(struct mdt_thread_info *mti, struct mdt_object *o, } memset(policy, 0, sizeof(*policy)); - policy->l_inodebits.bits = ibits; + policy->l_inodebits.bits = *ibits; + policy->l_inodebits.try_bits = trybits; rc = mo_object_lock(mti->mti_env, mdt_object_child(o), lh, einfo, policy); - if (rc < 0 && cache) { + if (rc < 0 && cache) mdt_object_put(mti->mti_env, o); - einfo->ei_cbdata = NULL; + + /* Return successfully acquired bits to a caller */ + if (rc == 0) { + struct ldlm_lock *lock = ldlm_handle2lock(lh); + + LASSERT(lock); + *ibits = lock->l_policy_data.l_inodebits.bits; + LDLM_LOCK_PUT(lock); } RETURN(rc); } +int mdt_remote_object_lock(struct mdt_thread_info *mti, struct mdt_object *o, + const struct lu_fid *fid, struct lustre_handle *lh, + enum ldlm_mode mode, __u64 ibits, bool cache) +{ + return mdt_remote_object_lock_try(mti, o, fid, lh, mode, &ibits, 0, + cache); +} + static int mdt_object_local_lock(struct mdt_thread_info *info, struct mdt_object *o, - struct mdt_lock_handle *lh, __u64 ibits, - bool nonblock, bool cos_incompat) + struct mdt_lock_handle *lh, __u64 *ibits, + __u64 trybits, bool cos_incompat) { struct ldlm_namespace *ns = info->mti_mdt->mdt_namespace; union ldlm_policy_data *policy = &info->mti_policy; struct ldlm_res_id *res_id = &info->mti_res_id; - __u64 dlmflags = 0; + __u64 dlmflags = 0, *cookie = NULL; int rc; ENTRY; @@ -2538,8 +2916,7 @@ static int mdt_object_local_lock(struct mdt_thread_info *info, } /* Only enqueue LOOKUP lock for remote object */ - if (mdt_object_remote(o)) - LASSERT(ibits == MDS_INODELOCK_LOOKUP); + LASSERT(ergo(mdt_object_remote(o), *ibits == MDS_INODELOCK_LOOKUP)); if (lh->mlh_type == MDT_PDO_LOCK) { /* check for exists after object is locked */ @@ -2550,34 +2927,38 @@ static int mdt_object_local_lock(struct mdt_thread_info *info, /* Non-dir object shouldn't have PDO lock */ if (!S_ISDIR(lu_object_attr(&o->mot_obj))) RETURN(-ENOTDIR); - } - } - - memset(policy, 0, sizeof(*policy)); - fid_build_reg_res_name(mdt_object_fid(o), res_id); + } + } + fid_build_reg_res_name(mdt_object_fid(o), res_id); dlmflags |= LDLM_FL_ATOMIC_CB; - if (nonblock) - dlmflags |= LDLM_FL_BLOCK_NOWAIT; - /* - * Take PDO lock on whole directory and build correct @res_id for lock - * on part of directory. - */ - if (lh->mlh_pdo_hash != 0) { - LASSERT(lh->mlh_type == MDT_PDO_LOCK); - mdt_lock_pdo_mode(info, o, lh); - if (lh->mlh_pdo_mode != LCK_NL) { - /* - * Do not use LDLM_FL_LOCAL_ONLY for parallel lock, it - * is never going to be sent to client and we do not - * want it slowed down due to possible cancels. - */ - policy->l_inodebits.bits = MDS_INODELOCK_UPDATE; - rc = mdt_fid_lock(ns, &lh->mlh_pdo_lh, lh->mlh_pdo_mode, - policy, res_id, dlmflags, - info->mti_exp == NULL ? NULL : - &info->mti_exp->exp_handle.h_cookie); + if (info->mti_exp) + cookie = &info->mti_exp->exp_handle.h_cookie; + + /* + * Take PDO lock on whole directory and build correct @res_id for lock + * on part of directory. + */ + if (lh->mlh_pdo_hash != 0) { + LASSERT(lh->mlh_type == MDT_PDO_LOCK); + mdt_lock_pdo_mode(info, o, lh); + if (lh->mlh_pdo_mode != LCK_NL) { + /* + * Do not use LDLM_FL_LOCAL_ONLY for parallel lock, it + * is never going to be sent to client and we do not + * want it slowed down due to possible cancels. + */ + policy->l_inodebits.bits = + *ibits & MDS_INODELOCK_UPDATE; + policy->l_inodebits.try_bits = + trybits & MDS_INODELOCK_UPDATE; + /* at least one of them should be set */ + LASSERT(policy->l_inodebits.bits | + policy->l_inodebits.try_bits); + rc = mdt_fid_lock(info->mti_env, ns, &lh->mlh_pdo_lh, + lh->mlh_pdo_mode, policy, res_id, + dlmflags, cookie); if (unlikely(rc != 0)) GOTO(out_unlock, rc); } @@ -2589,17 +2970,17 @@ static int mdt_object_local_lock(struct mdt_thread_info *info, res_id->name[LUSTRE_RES_ID_HSH_OFF] = lh->mlh_pdo_hash; } - policy->l_inodebits.bits = ibits; + policy->l_inodebits.bits = *ibits; + policy->l_inodebits.try_bits = trybits; /* * Use LDLM_FL_LOCAL_ONLY for this lock. We do not know yet if it is * going to be sent to client. If it is - mdt_intent_policy() path will * fix it up and turn FL_LOCAL flag off. */ - rc = mdt_fid_lock(ns, &lh->mlh_reg_lh, lh->mlh_reg_mode, policy, - res_id, LDLM_FL_LOCAL_ONLY | dlmflags, - info->mti_exp == NULL ? NULL : - &info->mti_exp->exp_handle.h_cookie); + rc = mdt_fid_lock(info->mti_env, ns, &lh->mlh_reg_lh, lh->mlh_reg_mode, + policy, res_id, LDLM_FL_LOCAL_ONLY | dlmflags, + cookie); out_unlock: if (rc != 0) mdt_object_unlock(info, o, lh, 1); @@ -2608,39 +2989,49 @@ out_unlock: (lh->mlh_reg_mode == LCK_PW || lh->mlh_reg_mode == LCK_EX)) OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_PDO_LOCK, 15); + /* Return successfully acquired bits to a caller */ + if (rc == 0) { + struct ldlm_lock *lock = ldlm_handle2lock(&lh->mlh_reg_lh); + + LASSERT(lock); + *ibits = lock->l_policy_data.l_inodebits.bits; + LDLM_LOCK_PUT(lock); + } RETURN(rc); } static int mdt_object_lock_internal(struct mdt_thread_info *info, struct mdt_object *o, - struct mdt_lock_handle *lh, __u64 ibits, bool nonblock, - bool cos_incompat) + struct mdt_lock_handle *lh, __u64 *ibits, + __u64 trybits, bool cos_incompat) { struct mdt_lock_handle *local_lh = NULL; int rc; ENTRY; if (!mdt_object_remote(o)) { - rc = mdt_object_local_lock(info, o, lh, ibits, nonblock, + rc = mdt_object_local_lock(info, o, lh, ibits, trybits, cos_incompat); RETURN(rc); } /* XXX do not support PERM/LAYOUT/XATTR lock for remote object yet */ - ibits &= ~(MDS_INODELOCK_PERM | MDS_INODELOCK_LAYOUT | - MDS_INODELOCK_XATTR); + *ibits &= ~(MDS_INODELOCK_PERM | MDS_INODELOCK_LAYOUT | + MDS_INODELOCK_XATTR); /* Only enqueue LOOKUP lock for remote object */ - if (ibits & MDS_INODELOCK_LOOKUP) { - rc = mdt_object_local_lock(info, o, lh, MDS_INODELOCK_LOOKUP, - nonblock, cos_incompat); + if (*ibits & MDS_INODELOCK_LOOKUP) { + __u64 local = MDS_INODELOCK_LOOKUP; + + rc = mdt_object_local_lock(info, o, lh, &local, 0, + cos_incompat); if (rc != ELDLM_OK) RETURN(rc); local_lh = lh; } - if (ibits & MDS_INODELOCK_UPDATE) { + if ((*ibits | trybits) & MDS_INODELOCK_UPDATE) { /* Sigh, PDO needs to enqueue 2 locks right now, but * enqueue RPC can only request 1 lock, to avoid extra * RPC, so it will instead enqueue EX lock for remote @@ -2654,11 +3045,11 @@ mdt_object_lock_internal(struct mdt_thread_info *info, struct mdt_object *o, lh->mlh_rreg_mode = LCK_EX; lh->mlh_type = MDT_REG_LOCK; } - rc = mdt_remote_object_lock(info, o, mdt_object_fid(o), - &lh->mlh_rreg_lh, - lh->mlh_rreg_mode, - MDS_INODELOCK_UPDATE, nonblock, - false); + + rc = mdt_remote_object_lock_try(info, o, mdt_object_fid(o), + &lh->mlh_rreg_lh, + lh->mlh_rreg_mode, + ibits, trybits, false); if (rc != ELDLM_OK) { if (local_lh != NULL) mdt_object_unlock(info, o, local_lh, rc); @@ -2666,13 +3057,17 @@ mdt_object_lock_internal(struct mdt_thread_info *info, struct mdt_object *o, } } + /* other components like LFSCK can use lockless access + * and populate cache, so we better invalidate it */ + mo_invalidate(info->mti_env, mdt_object_child(o)); + RETURN(0); } int mdt_object_lock(struct mdt_thread_info *info, struct mdt_object *o, struct mdt_lock_handle *lh, __u64 ibits) { - return mdt_object_lock_internal(info, o, lh, ibits, false, false); + return mdt_object_lock_internal(info, o, lh, &ibits, 0, false); } int mdt_reint_object_lock(struct mdt_thread_info *info, struct mdt_object *o, @@ -2680,36 +3075,25 @@ int mdt_reint_object_lock(struct mdt_thread_info *info, struct mdt_object *o, bool cos_incompat) { LASSERT(lh->mlh_reg_mode == LCK_PW || lh->mlh_reg_mode == LCK_EX); - return mdt_object_lock_internal(info, o, lh, ibits, false, + return mdt_object_lock_internal(info, o, lh, &ibits, 0, cos_incompat); } int mdt_object_lock_try(struct mdt_thread_info *info, struct mdt_object *o, - struct mdt_lock_handle *lh, __u64 ibits) -{ - struct mdt_lock_handle tmp = *lh; - int rc; - - rc = mdt_object_lock_internal(info, o, &tmp, ibits, true, false); - if (rc == 0) - *lh = tmp; - - return rc == 0; -} - -int mdt_reint_object_lock_try(struct mdt_thread_info *info, - struct mdt_object *o, struct mdt_lock_handle *lh, - __u64 ibits, bool cos_incompat) + struct mdt_lock_handle *lh, __u64 *ibits, + __u64 trybits, bool cos_incompat) { - struct mdt_lock_handle tmp = *lh; + bool trylock_only = *ibits == 0; int rc; - LASSERT(lh->mlh_reg_mode == LCK_PW || lh->mlh_reg_mode == LCK_EX); - rc = mdt_object_lock_internal(info, o, &tmp, ibits, true, cos_incompat); - if (rc == 0) - *lh = tmp; - - return rc == 0; + LASSERT(!(*ibits & trybits)); + rc = mdt_object_lock_internal(info, o, lh, ibits, trybits, + cos_incompat); + if (rc && trylock_only) { /* clear error for try ibits lock only */ + LASSERT(*ibits == 0); + rc = 0; + } + return rc; } /** @@ -2725,8 +3109,8 @@ int mdt_reint_object_lock_try(struct mdt_thread_info *info, * \param mode lock mode * \param decref force immediate lock releasing */ -static void mdt_save_lock(struct mdt_thread_info *info, struct lustre_handle *h, - enum ldlm_mode mode, int decref) +void mdt_save_lock(struct mdt_thread_info *info, struct lustre_handle *h, + enum ldlm_mode mode, int decref) { ENTRY; @@ -2738,25 +3122,27 @@ static void mdt_save_lock(struct mdt_thread_info *info, struct lustre_handle *h, struct mdt_device *mdt = info->mti_mdt; struct ldlm_lock *lock = ldlm_handle2lock(h); struct ptlrpc_request *req = mdt_info_req(info); - int cos; + bool cos = mdt_cos_is_enabled(mdt); + bool convert_lock = !cos && mdt_slc_is_enabled(mdt); - cos = (mdt_cos_is_enabled(mdt) || - mdt_slc_is_enabled(mdt)); - - LASSERTF(lock != NULL, "no lock for cookie "LPX64"\n", + LASSERTF(lock != NULL, "no lock for cookie %#llx\n", h->cookie); /* there is no request if mdt_object_unlock() is called * from mdt_export_cleanup()->mdt_add_dirty_flag() */ if (likely(req != NULL)) { - CDEBUG(D_HA, "request = %p reply state = %p" - " transno = "LPD64"\n", req, - req->rq_reply_state, req->rq_transno); + LDLM_DEBUG(lock, "save lock request %p reply " + "state %p transno %lld\n", req, + req->rq_reply_state, req->rq_transno); if (cos) { - ldlm_lock_downgrade(lock, LCK_COS); + ldlm_lock_mode_downgrade(lock, LCK_COS); mode = LCK_COS; } - ptlrpc_save_lock(req, h, mode, cos); + if (req->rq_export->exp_disconnected) + mdt_fid_unlock(h, mode); + else + ptlrpc_save_lock(req, h, mode, cos, + convert_lock); } else { mdt_fid_unlock(h, mode); } @@ -2807,7 +3193,8 @@ static void mdt_save_remote_lock(struct mdt_thread_info *info, struct ptlrpc_request *req = mdt_info_req(info); LASSERT(req != NULL); - tgt_save_slc_lock(lock, req->rq_transno); + tgt_save_slc_lock(&info->mti_mdt->mdt_lut, lock, + req->rq_transno); ldlm_lock_decref(h, mode); } h->cookie = 0ull; @@ -2827,6 +3214,8 @@ static void mdt_save_remote_lock(struct mdt_thread_info *info, * \param o mdt object * \param lh mdt lock handle referencing regular and PDO locks * \param decref force immediate lock releasing + * + * XXX o is not used and may be NULL, see hsm_cdt_request_completed(). */ void mdt_object_unlock(struct mdt_thread_info *info, struct mdt_object *o, struct mdt_lock_handle *lh, int decref) @@ -2882,7 +3271,8 @@ void mdt_object_unlock_put(struct mdt_thread_info * info, * actually exists on storage (lu_object_exists()). * */ -static int mdt_body_unpack(struct mdt_thread_info *info, __u32 flags) +static int mdt_body_unpack(struct mdt_thread_info *info, + enum tgt_handler_flags flags) { const struct mdt_body *body; struct mdt_object *obj; @@ -2921,7 +3311,8 @@ static int mdt_body_unpack(struct mdt_thread_info *info, __u32 flags) RETURN(rc); } -static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags) +static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, + enum tgt_handler_flags flags) { struct req_capsule *pill = info->mti_pill; int rc; @@ -2941,6 +3332,13 @@ static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags) req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER, 0); + /* Set ACL reply buffer size as LUSTRE_POSIX_ACL_MAX_SIZE_OLD + * by default. If the target object has more ACL entries, then + * enlarge the buffer when necessary. */ + if (req_capsule_has_field(pill, &RMF_ACL, RCL_SERVER)) + req_capsule_set_size(pill, &RMF_ACL, RCL_SERVER, + LUSTRE_POSIX_ACL_MAX_SIZE_OLD); + rc = req_capsule_server_pack(pill); } RETURN(rc); @@ -2997,6 +3395,8 @@ void mdt_thread_info_init(struct ptlrpc_request *req, info->mti_cross_ref = 0; info->mti_opdata = 0; info->mti_big_lmm_used = 0; + info->mti_big_acl_used = 0; + info->mti_som_valid = 0; info->mti_spec.no_create = 0; info->mti_spec.sp_rm_entry = 0; @@ -3056,118 +3456,25 @@ static int mdt_tgt_connect(struct tgt_session_info *tsi) return tgt_connect(tsi); } -enum mdt_it_code { - MDT_IT_OPEN, - MDT_IT_OCREAT, - MDT_IT_CREATE, - MDT_IT_GETATTR, - MDT_IT_READDIR, - MDT_IT_LOOKUP, - MDT_IT_UNLINK, - MDT_IT_TRUNC, - MDT_IT_GETXATTR, - MDT_IT_LAYOUT, - MDT_IT_QUOTA, - MDT_IT_NR -}; - -static int mdt_intent_getattr(enum mdt_it_code opcode, - struct mdt_thread_info *info, - struct ldlm_lock **, - __u64); - -static int mdt_intent_getxattr(enum mdt_it_code opcode, - struct mdt_thread_info *info, - struct ldlm_lock **lockp, - __u64 flags); - -static int mdt_intent_layout(enum mdt_it_code opcode, - struct mdt_thread_info *info, - struct ldlm_lock **, - __u64); -static int mdt_intent_reint(enum mdt_it_code opcode, - struct mdt_thread_info *info, - struct ldlm_lock **, - __u64); - -static struct mdt_it_flavor { - const struct req_format *it_fmt; - __u32 it_flags; - int (*it_act)(enum mdt_it_code , - struct mdt_thread_info *, - struct ldlm_lock **, - __u64); - long it_reint; -} mdt_it_flavor[] = { - [MDT_IT_OPEN] = { - .it_fmt = &RQF_LDLM_INTENT, - /*.it_flags = HABEO_REFERO,*/ - .it_flags = 0, - .it_act = mdt_intent_reint, - .it_reint = REINT_OPEN - }, - [MDT_IT_OCREAT] = { - .it_fmt = &RQF_LDLM_INTENT, - /* - * OCREAT is not a MUTABOR request as if the file - * already exists. - * We do the extra check of OBD_CONNECT_RDONLY in - * mdt_reint_open() when we really need to create - * the object. - */ - .it_flags = 0, - .it_act = mdt_intent_reint, - .it_reint = REINT_OPEN - }, - [MDT_IT_CREATE] = { - .it_fmt = &RQF_LDLM_INTENT, - .it_flags = MUTABOR, - .it_act = mdt_intent_reint, - .it_reint = REINT_CREATE - }, - [MDT_IT_GETATTR] = { - .it_fmt = &RQF_LDLM_INTENT_GETATTR, - .it_flags = HABEO_REFERO, - .it_act = mdt_intent_getattr - }, - [MDT_IT_READDIR] = { - .it_fmt = NULL, - .it_flags = 0, - .it_act = NULL - }, - [MDT_IT_LOOKUP] = { - .it_fmt = &RQF_LDLM_INTENT_GETATTR, - .it_flags = HABEO_REFERO, - .it_act = mdt_intent_getattr - }, - [MDT_IT_UNLINK] = { - .it_fmt = &RQF_LDLM_INTENT_UNLINK, - .it_flags = MUTABOR, - .it_act = NULL, - .it_reint = REINT_UNLINK - }, - [MDT_IT_TRUNC] = { - .it_fmt = NULL, - .it_flags = MUTABOR, - .it_act = NULL - }, - [MDT_IT_GETXATTR] = { - .it_fmt = &RQF_LDLM_INTENT_GETXATTR, - .it_flags = HABEO_CORPUS, - .it_act = mdt_intent_getxattr - }, - [MDT_IT_LAYOUT] = { - .it_fmt = &RQF_LDLM_INTENT_LAYOUT, - .it_flags = 0, - .it_act = mdt_intent_layout - } -}; +static int mdt_intent_glimpse(enum ldlm_intent_flags it_opc, + struct mdt_thread_info *info, + struct ldlm_lock **lockp, __u64 flags) +{ + return mdt_glimpse_enqueue(info, info->mti_mdt->mdt_namespace, + lockp, flags); +} +static int mdt_intent_brw(enum ldlm_intent_flags it_opc, + struct mdt_thread_info *info, + struct ldlm_lock **lockp, __u64 flags) +{ + return mdt_brw_enqueue(info, info->mti_mdt->mdt_namespace, + lockp, flags); +} -static int -mdt_intent_lock_replace(struct mdt_thread_info *info, - struct ldlm_lock **lockp, - struct mdt_lock_handle *lh, - __u64 flags, int result) +int mdt_intent_lock_replace(struct mdt_thread_info *info, + struct ldlm_lock **lockp, + struct mdt_lock_handle *lh, + __u64 flags, int result) { struct ptlrpc_request *req = mdt_info_req(info); struct ldlm_lock *lock = *lockp; @@ -3185,14 +3492,14 @@ mdt_intent_lock_replace(struct mdt_thread_info *info, /* Lock is pinned by ldlm_handle_enqueue0() as it is * a resend case, however, it could be already destroyed * due to client eviction or a raced cancel RPC. */ - LDLM_DEBUG_NOLOCK("Invalid lock handle "LPX64"\n", + LDLM_DEBUG_NOLOCK("Invalid lock handle %#llx\n", lh->mlh_reg_lh.cookie); lh->mlh_reg_lh.cookie = 0; RETURN(-ESTALE); } LASSERTF(new_lock != NULL, - "lockh "LPX64" flags "LPX64" rc %d\n", + "lockh %#llx flags %#llx : rc = %d\n", lh->mlh_reg_lh.cookie, flags, result); /* @@ -3243,6 +3550,8 @@ mdt_intent_lock_replace(struct mdt_thread_info *info, new_lock->l_export = class_export_lock_get(req->rq_export, new_lock); new_lock->l_blocking_ast = lock->l_blocking_ast; new_lock->l_completion_ast = lock->l_completion_ast; + if (ldlm_has_dom(new_lock)) + new_lock->l_glimpse_ast = ldlm_server_glimpse_ast; new_lock->l_remote_handle = lock->l_remote_handle; new_lock->l_flags &= ~LDLM_FL_LOCAL; @@ -3258,10 +3567,9 @@ mdt_intent_lock_replace(struct mdt_thread_info *info, RETURN(ELDLM_LOCK_REPLACED); } -static void mdt_intent_fixup_resent(struct mdt_thread_info *info, - struct ldlm_lock *new_lock, - struct mdt_lock_handle *lh, - __u64 flags) +void mdt_intent_fixup_resent(struct mdt_thread_info *info, + struct ldlm_lock *new_lock, + struct mdt_lock_handle *lh, __u64 flags) { struct ptlrpc_request *req = mdt_info_req(info); struct ldlm_request *dlmreq; @@ -3279,7 +3587,7 @@ static void mdt_intent_fixup_resent(struct mdt_thread_info *info, lh->mlh_reg_mode = new_lock->l_granted_mode; LDLM_DEBUG(new_lock, "Restoring lock cookie"); - DEBUG_REQ(D_DLMTRACE, req, "restoring lock cookie "LPX64, + DEBUG_REQ(D_DLMTRACE, req, "restoring lock cookie %#llx", lh->mlh_reg_lh.cookie); return; } @@ -3298,14 +3606,14 @@ static void mdt_intent_fixup_resent(struct mdt_thread_info *info, */ lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT); - DEBUG_REQ(D_DLMTRACE, req, "no existing lock with rhandle "LPX64, + DEBUG_REQ(D_DLMTRACE, req, "no existing lock with rhandle %#llx", dlmreq->lock_handle[0].cookie); } -static int mdt_intent_getxattr(enum mdt_it_code opcode, - struct mdt_thread_info *info, - struct ldlm_lock **lockp, - __u64 flags) +static int mdt_intent_getxattr(enum ldlm_intent_flags it_opc, + struct mdt_thread_info *info, + struct ldlm_lock **lockp, + __u64 flags) { struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_RMT]; struct ldlm_reply *ldlm_rep = NULL; @@ -3352,7 +3660,7 @@ static int mdt_intent_getxattr(enum mdt_it_code opcode, RETURN(rc); } -static int mdt_intent_getattr(enum mdt_it_code opcode, +static int mdt_intent_getattr(enum ldlm_intent_flags it_opc, struct mdt_thread_info *info, struct ldlm_lock **lockp, __u64 flags) @@ -3375,18 +3683,19 @@ static int mdt_intent_getattr(enum mdt_it_code opcode, repbody->mbo_eadatasize = 0; repbody->mbo_aclsize = 0; - switch (opcode) { - case MDT_IT_LOOKUP: + switch (it_opc) { + case IT_LOOKUP: child_bits = MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM; - break; - case MDT_IT_GETATTR: + break; + case IT_GETATTR: child_bits = MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE | MDS_INODELOCK_PERM; - break; - default: - CERROR("Unsupported intent (%d)\n", opcode); - GOTO(out_shrink, rc = -EINVAL); - } + break; + default: + CERROR("%s: unsupported intent %#x\n", + mdt_obd_name(info->mti_mdt), (unsigned int)it_opc); + GOTO(out_shrink, rc = -EINVAL); + } rc = mdt_init_ucred_intent_getattr(info, reqbody); if (rc) @@ -3421,37 +3730,55 @@ out_shrink: return rc; } -static int mdt_intent_layout(enum mdt_it_code opcode, +static int mdt_intent_layout(enum ldlm_intent_flags it_opc, struct mdt_thread_info *info, struct ldlm_lock **lockp, __u64 flags) { struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_LAYOUT]; - struct layout_intent *layout; - struct lu_fid *fid; + struct md_layout_change layout = { .mlc_opc = MD_LAYOUT_NOP }; + struct layout_intent *intent; + struct lu_fid *fid = &info->mti_tmp_fid2; struct mdt_object *obj = NULL; int layout_size = 0; int rc = 0; ENTRY; - if (opcode != MDT_IT_LAYOUT) { - CERROR("%s: Unknown intent (%d)\n", mdt_obd_name(info->mti_mdt), - opcode); - RETURN(-EINVAL); - } + fid_extract_from_res_name(fid, &(*lockp)->l_resource->lr_name); - layout = req_capsule_client_get(info->mti_pill, &RMF_LAYOUT_INTENT); - if (layout == NULL) + intent = req_capsule_client_get(info->mti_pill, &RMF_LAYOUT_INTENT); + if (intent == NULL) RETURN(-EPROTO); - if (layout->li_opc != LAYOUT_INTENT_ACCESS) { + CDEBUG(D_INFO, DFID "got layout change request from client: " + "opc:%u flags:%#x extent "DEXT"\n", + PFID(fid), intent->li_opc, intent->li_flags, + PEXT(&intent->li_extent)); + + switch (intent->li_opc) { + case LAYOUT_INTENT_TRUNC: + case LAYOUT_INTENT_WRITE: + layout.mlc_opc = MD_LAYOUT_WRITE; + layout.mlc_intent = intent; + break; + case LAYOUT_INTENT_ACCESS: + break; + case LAYOUT_INTENT_READ: + case LAYOUT_INTENT_GLIMPSE: + case LAYOUT_INTENT_RELEASE: + case LAYOUT_INTENT_RESTORE: CERROR("%s: Unsupported layout intent opc %d\n", - mdt_obd_name(info->mti_mdt), layout->li_opc); - RETURN(-EINVAL); + mdt_obd_name(info->mti_mdt), intent->li_opc); + rc = -ENOTSUPP; + break; + default: + CERROR("%s: Unknown layout intent opc %d\n", + mdt_obd_name(info->mti_mdt), intent->li_opc); + rc = -EINVAL; + break; } - - fid = &info->mti_tmp_fid2; - fid_extract_from_res_name(fid, &(*lockp)->l_resource->lr_name); + if (rc < 0) + RETURN(rc); /* Get lock from request for possible resent case. */ mdt_intent_fixup_resent(info, *lockp, lhc, flags); @@ -3460,21 +3787,86 @@ static int mdt_intent_layout(enum mdt_it_code opcode, if (IS_ERR(obj)) GOTO(out, rc = PTR_ERR(obj)); + if (mdt_object_exists(obj) && !mdt_object_remote(obj)) { - layout_size = mdt_attr_get_eabuf_size(info, obj); - if (layout_size < 0) - GOTO(out_obj, rc = layout_size); + /* if layout is going to be changed don't use the current EA + * size but the maximum one. That buffer will be shrinked + * to the actual size in req_capsule_shrink() before reply. + */ + if (layout.mlc_opc == MD_LAYOUT_WRITE) { + layout_size = info->mti_mdt->mdt_max_mdsize; + } else { + layout_size = mdt_attr_get_eabuf_size(info, obj); + if (layout_size < 0) + GOTO(out_obj, rc = layout_size); - if (layout_size > info->mti_mdt->mdt_max_mdsize) - info->mti_mdt->mdt_max_mdsize = layout_size; + if (layout_size > info->mti_mdt->mdt_max_mdsize) + info->mti_mdt->mdt_max_mdsize = layout_size; + } + CDEBUG(D_INFO, "%s: layout_size %d\n", + mdt_obd_name(info->mti_mdt), layout_size); } + /* + * set reply buffer size, so that ldlm_handle_enqueue0()-> + * ldlm_lvbo_fill() will fill the reply buffer with lovea. + */ (*lockp)->l_lvb_type = LVB_T_LAYOUT; req_capsule_set_size(info->mti_pill, &RMF_DLM_LVB, RCL_SERVER, layout_size); rc = req_capsule_server_pack(info->mti_pill); - GOTO(out_obj, rc); + if (rc) + GOTO(out_obj, rc); + + + if (layout.mlc_opc != MD_LAYOUT_NOP) { + struct lu_buf *buf = &layout.mlc_buf; + + /** + * mdt_layout_change is a reint operation, when the request + * is resent, layout write shouldn't reprocess it again. + */ + rc = mdt_check_resent(info, mdt_reconstruct_generic, lhc); + if (rc) + GOTO(out_obj, rc = rc < 0 ? rc : 0); + /** + * There is another resent case: the client's job has been + * done by another client, referring lod_declare_layout_change + * -EALREADY case, and it became a operation w/o transaction, + * so we should not do the layout change, otherwise + * mdt_layout_change() will try to cancel the granted server + * CR lock whose remote counterpart is still in hold on the + * client, and a deadlock ensues. + */ + rc = mdt_check_resent_lock(info, obj, lhc); + if (rc <= 0) + GOTO(out_obj, rc); + + buf->lb_buf = NULL; + buf->lb_len = 0; + if (unlikely(req_is_replay(mdt_info_req(info)))) { + buf->lb_buf = req_capsule_client_get(info->mti_pill, + &RMF_EADATA); + buf->lb_len = req_capsule_get_size(info->mti_pill, + &RMF_EADATA, RCL_CLIENT); + /* + * If it's a replay of layout write intent RPC, the + * client has saved the extended lovea when + * it get reply then. + */ + if (buf->lb_len > 0) + mdt_fix_lov_magic(info, buf->lb_buf); + } + /* + * Instantiate some layout components, if @buf contains + * lovea, then it's a replay of the layout intent write + * RPC. + */ + rc = mdt_layout_change(info, obj, &layout); + if (rc) + GOTO(out_obj, rc); + } out_obj: mdt_object_put(info->mti_env, obj); @@ -3484,13 +3876,13 @@ out_obj: out: lhc->mlh_reg_lh.cookie = 0; - return rc; + RETURN(rc); } -static int mdt_intent_reint(enum mdt_it_code opcode, - struct mdt_thread_info *info, - struct ldlm_lock **lockp, - __u64 flags) +static int mdt_intent_open(enum ldlm_intent_flags it_opc, + struct mdt_thread_info *info, + struct ldlm_lock **lockp, + __u64 flags) { struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_RMT]; struct ldlm_reply *rep = NULL; @@ -3508,12 +3900,6 @@ static int mdt_intent_reint(enum mdt_it_code opcode, if (opc < 0) RETURN(opc); - if (mdt_it_flavor[opcode].it_reint != opc) { - CERROR("Reint code %ld doesn't match intent: %d\n", - opc, opcode); - RETURN(err_serious(-EPROTO)); - } - /* Get lock from request for possible resent case. */ mdt_intent_fixup_resent(info, *lockp, lhc, flags); @@ -3563,74 +3949,68 @@ static int mdt_intent_reint(enum mdt_it_code opcode, RETURN(ELDLM_LOCK_ABORTED); } -static int mdt_intent_code(enum ldlm_intent_flags itcode) -{ +static int mdt_intent_opc(enum ldlm_intent_flags it_opc, + struct mdt_thread_info *info, + struct ldlm_lock **lockp, + u64 flags /* LDLM_FL_* */) +{ + struct req_capsule *pill = info->mti_pill; + struct ptlrpc_request *req = mdt_info_req(info); + const struct req_format *it_format; + int (*it_handler)(enum ldlm_intent_flags, + struct mdt_thread_info *, + struct ldlm_lock **, + u64); + enum tgt_handler_flags it_handler_flags = 0; + struct ldlm_reply *rep; int rc; + ENTRY; - switch (itcode) { + switch (it_opc) { case IT_OPEN: - rc = MDT_IT_OPEN; - break; case IT_OPEN|IT_CREAT: - rc = MDT_IT_OCREAT; - break; - case IT_CREAT: - rc = MDT_IT_CREATE; - break; - case IT_READDIR: - rc = MDT_IT_READDIR; + /* + * OCREAT is not a MUTABOR request since the file may + * already exist. We do the extra check of + * OBD_CONNECT_RDONLY in mdt_reint_open() when we + * really need to create the object. + */ + it_format = &RQF_LDLM_INTENT; + it_handler = &mdt_intent_open; break; case IT_GETATTR: - rc = MDT_IT_GETATTR; - break; case IT_LOOKUP: - rc = MDT_IT_LOOKUP; - break; - case IT_UNLINK: - rc = MDT_IT_UNLINK; - break; - case IT_TRUNC: - rc = MDT_IT_TRUNC; + it_format = &RQF_LDLM_INTENT_GETATTR; + it_handler = &mdt_intent_getattr; + it_handler_flags = HABEO_REFERO; break; case IT_GETXATTR: - rc = MDT_IT_GETXATTR; + it_format = &RQF_LDLM_INTENT_GETXATTR; + it_handler = &mdt_intent_getxattr; + it_handler_flags = HABEO_CORPUS; break; case IT_LAYOUT: - rc = MDT_IT_LAYOUT; + it_format = &RQF_LDLM_INTENT_LAYOUT; + it_handler = &mdt_intent_layout; break; - case IT_QUOTA_DQACQ: - case IT_QUOTA_CONN: - rc = MDT_IT_QUOTA; + case IT_GLIMPSE: + it_format = &RQF_LDLM_INTENT; + it_handler = &mdt_intent_glimpse; break; - default: - CERROR("Unknown intent opcode: 0x%08x\n", itcode); - rc = -EINVAL; + case IT_BRW: + it_format = &RQF_LDLM_INTENT; + it_handler = &mdt_intent_brw; break; - } - return rc; -} - -static int mdt_intent_opc(enum ldlm_intent_flags itopc, - struct mdt_thread_info *info, - struct ldlm_lock **lockp, __u64 flags) -{ - struct req_capsule *pill = info->mti_pill; - struct ptlrpc_request *req = mdt_info_req(info); - struct mdt_it_flavor *flv; - int opc; - int rc; - ENTRY; - - opc = mdt_intent_code(itopc); - if (opc < 0) - RETURN(-EINVAL); - - if (opc == MDT_IT_QUOTA) { + case IT_QUOTA_DQACQ: + case IT_QUOTA_CONN: { struct lu_device *qmt = info->mti_mdt->mdt_qmt_dev; if (qmt == NULL) RETURN(-EOPNOTSUPP); + if (mdt_rdonly(req->rq_export)) + RETURN(-EROFS); + (*lockp)->l_lvb_type = LVB_T_LQUOTA; /* pass the request to quota master */ rc = qmt_hdls.qmth_intent_policy(info->mti_env, qmt, @@ -3638,88 +4018,101 @@ static int mdt_intent_opc(enum ldlm_intent_flags itopc, flags); RETURN(rc); } + default: + CERROR("%s: unknown intent code %#x\n", + mdt_obd_name(info->mti_mdt), it_opc); + RETURN(-EPROTO); + } - flv = &mdt_it_flavor[opc]; - if (flv->it_fmt != NULL) - req_capsule_extend(pill, flv->it_fmt); + req_capsule_extend(pill, it_format); - rc = mdt_unpack_req_pack_rep(info, flv->it_flags); + rc = mdt_unpack_req_pack_rep(info, it_handler_flags); if (rc < 0) RETURN(rc); - if (flv->it_flags & MUTABOR && - exp_connect_flags(req->rq_export) & OBD_CONNECT_RDONLY) + if (it_handler_flags & MUTABOR && mdt_rdonly(req->rq_export)) RETURN(-EROFS); - if (flv->it_act != NULL) { - struct ldlm_reply *rep; - - OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_INTENT_DELAY, 10); + OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_INTENT_DELAY, 10); - /* execute policy */ - rc = flv->it_act(opc, info, lockp, flags); + /* execute policy */ + rc = (*it_handler)(it_opc, info, lockp, flags); - /* Check whether the reply has been packed successfully. */ - if (req->rq_repmsg != NULL) { - rep = req_capsule_server_get(info->mti_pill, - &RMF_DLM_REP); - rep->lock_policy_res2 = - ptlrpc_status_hton(rep->lock_policy_res2); - } + /* Check whether the reply has been packed successfully. */ + if (req->rq_repmsg != NULL) { + rep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP); + rep->lock_policy_res2 = + ptlrpc_status_hton(rep->lock_policy_res2); } RETURN(rc); } -static int mdt_intent_policy(struct ldlm_namespace *ns, - struct ldlm_lock **lockp, void *req_cookie, - enum ldlm_mode mode, __u64 flags, void *data) +static void mdt_ptlrpc_stats_update(struct ptlrpc_request *req, + enum ldlm_intent_flags it_opc) +{ + struct lprocfs_stats *srv_stats = ptlrpc_req2svc(req)->srv_stats; + + /* update stats when IT code is known */ + if (srv_stats != NULL) + lprocfs_counter_incr(srv_stats, + PTLRPC_LAST_CNTR + (it_opc == IT_GLIMPSE ? + LDLM_GLIMPSE_ENQUEUE : LDLM_IBITS_ENQUEUE)); +} + +static int mdt_intent_policy(const struct lu_env *env, + struct ldlm_namespace *ns, + struct ldlm_lock **lockp, + void *req_cookie, + enum ldlm_mode mode, + __u64 flags, void *data) { struct tgt_session_info *tsi; struct mdt_thread_info *info; struct ptlrpc_request *req = req_cookie; struct ldlm_intent *it; struct req_capsule *pill; + const struct ldlm_lock_desc *ldesc; int rc; ENTRY; LASSERT(req != NULL); - tsi = tgt_ses_info(req->rq_svc_thread->t_env); + tsi = tgt_ses_info(env); info = tsi2mdt_info(tsi); - LASSERT(info != NULL); - pill = info->mti_pill; - LASSERT(pill->rc_req == req); + LASSERT(info != NULL); + pill = info->mti_pill; + LASSERT(pill->rc_req == req); + ldesc = &info->mti_dlm_req->lock_desc; - if (req->rq_reqmsg->lm_bufcount > DLM_INTENT_IT_OFF) { + if (req->rq_reqmsg->lm_bufcount > DLM_INTENT_IT_OFF) { req_capsule_extend(pill, &RQF_LDLM_INTENT_BASIC); - it = req_capsule_client_get(pill, &RMF_LDLM_INTENT); - if (it != NULL) { - rc = mdt_intent_opc(it->opc, info, lockp, flags); - if (rc == 0) - rc = ELDLM_OK; - - /* Lock without inodebits makes no sense and will oops - * later in ldlm. Let's check it now to see if we have - * ibits corrupted somewhere in mdt_intent_opc(). - * The case for client miss to set ibits has been - * processed by others. */ - LASSERT(ergo(info->mti_dlm_req->lock_desc.l_resource.\ - lr_type == LDLM_IBITS, - info->mti_dlm_req->lock_desc.\ - l_policy_data.l_inodebits.bits != 0)); - } else - rc = err_serious(-EFAULT); - } else { - /* No intent was provided */ - LASSERT(pill->rc_fmt == &RQF_LDLM_ENQUEUE); + it = req_capsule_client_get(pill, &RMF_LDLM_INTENT); + if (it != NULL) { + mdt_ptlrpc_stats_update(req, it->opc); + rc = mdt_intent_opc(it->opc, info, lockp, flags); + if (rc == 0) + rc = ELDLM_OK; + + /* Lock without inodebits makes no sense and will oops + * later in ldlm. Let's check it now to see if we have + * ibits corrupted somewhere in mdt_intent_opc(). + * The case for client miss to set ibits has been + * processed by others. */ + LASSERT(ergo(ldesc->l_resource.lr_type == LDLM_IBITS, + ldesc->l_policy_data.l_inodebits.bits != 0)); + } else { + rc = err_serious(-EFAULT); + } + } else { + /* No intent was provided */ req_capsule_set_size(pill, &RMF_DLM_LVB, RCL_SERVER, 0); - rc = req_capsule_server_pack(pill); - if (rc) - rc = err_serious(rc); - } + rc = req_capsule_server_pack(pill); + if (rc) + rc = err_serious(rc); + } mdt_thread_info_fini(info); RETURN(rc); } @@ -3995,12 +4388,13 @@ static void mdt_stack_pre_fini(const struct lu_env *env, * by osd only doesn't have mdt/mdd slices -bzzz */ lustre_cfg_bufs_reset(bufs, mdt_obd_name(m)); lustre_cfg_bufs_set_string(bufs, 1, NULL); - lcfg = lustre_cfg_new(LCFG_PRE_CLEANUP, bufs); - if (lcfg == NULL) + OBD_ALLOC(lcfg, lustre_cfg_len(bufs->lcfg_bufcount, bufs->lcfg_buflen)); + if (!lcfg) RETURN_EXIT; + lustre_cfg_init(lcfg, LCFG_PRE_CLEANUP, bufs); top->ld_ops->ldo_process_config(env, top, lcfg); - lustre_cfg_free(lcfg); + OBD_FREE(lcfg, lustre_cfg_len(lcfg->lcfg_bufcount, lcfg->lcfg_buflens)); EXIT; } @@ -4030,13 +4424,14 @@ static void mdt_stack_fini(const struct lu_env *env, if (obd->obd_fail) strcat(flags, "A"); lustre_cfg_bufs_set_string(bufs, 1, flags); - lcfg = lustre_cfg_new(LCFG_CLEANUP, bufs); - if (lcfg == NULL) + OBD_ALLOC(lcfg, lustre_cfg_len(bufs->lcfg_bufcount, bufs->lcfg_buflen)); + if (!lcfg) RETURN_EXIT; + lustre_cfg_init(lcfg, LCFG_CLEANUP, bufs); LASSERT(top); top->ld_ops->ldo_process_config(env, top, lcfg); - lustre_cfg_free(lcfg); + OBD_FREE(lcfg, lustre_cfg_len(lcfg->lcfg_bufcount, lcfg->lcfg_buflens)); lu_site_purge(env, top->ld_site, -1); @@ -4156,9 +4551,10 @@ static int mdt_stack_init(const struct lu_env *env, struct mdt_device *mdt, lustre_cfg_bufs_set_string(bufs, 2, uuid); lustre_cfg_bufs_set_string(bufs, 3, lprof->lp_dt); - lcfg = lustre_cfg_new(LCFG_ATTACH, bufs); - if (lcfg == NULL) + OBD_ALLOC(lcfg, lustre_cfg_len(bufs->lcfg_bufcount, bufs->lcfg_buflen)); + if (!lcfg) GOTO(put_profile, rc = -ENOMEM); + lustre_cfg_init(lcfg, LCFG_ATTACH, bufs); rc = class_attach(lcfg); if (rc) @@ -4171,16 +4567,17 @@ static int mdt_stack_init(const struct lu_env *env, struct mdt_device *mdt, GOTO(lcfg_cleanup, rc = -EINVAL); } - lustre_cfg_free(lcfg); + OBD_FREE(lcfg, lustre_cfg_len(lcfg->lcfg_bufcount, lcfg->lcfg_buflens)); lustre_cfg_bufs_reset(bufs, name); lustre_cfg_bufs_set_string(bufs, 1, uuid); lustre_cfg_bufs_set_string(bufs, 2, dev); lustre_cfg_bufs_set_string(bufs, 3, lprof->lp_dt); - lcfg = lustre_cfg_new(LCFG_SETUP, bufs); - if (lcfg == NULL) + OBD_ALLOC(lcfg, lustre_cfg_len(bufs->lcfg_bufcount, bufs->lcfg_buflen)); + if (!lcfg) GOTO(class_detach, rc = -ENOMEM); + lustre_cfg_init(lcfg, LCFG_SETUP, bufs); rc = class_setup(obd, lcfg); if (rc) @@ -4217,7 +4614,7 @@ class_detach: if (rc) class_detach(obd, lcfg); lcfg_cleanup: - lustre_cfg_free(lcfg); + OBD_FREE(lcfg, lustre_cfg_len(lcfg->lcfg_bufcount, lcfg->lcfg_buflens)); put_profile: class_put_profile(lprof); free_bufs: @@ -4288,9 +4685,10 @@ static int mdt_quota_init(const struct lu_env *env, struct mdt_device *mdt, lustre_cfg_bufs_set_string(bufs, 2, uuid); lustre_cfg_bufs_set_string(bufs, 3, lprof->lp_dt); - lcfg = lustre_cfg_new(LCFG_ATTACH, bufs); - if (lcfg == NULL) + OBD_ALLOC(lcfg, lustre_cfg_len(bufs->lcfg_bufcount, bufs->lcfg_buflen)); + if (!lcfg) GOTO(put_profile, rc = -ENOMEM); + lustre_cfg_init(lcfg, LCFG_ATTACH, bufs); rc = class_attach(lcfg); if (rc) @@ -4303,7 +4701,7 @@ static int mdt_quota_init(const struct lu_env *env, struct mdt_device *mdt, GOTO(lcfg_cleanup, rc = -EINVAL); } - lustre_cfg_free(lcfg); + OBD_FREE(lcfg, lustre_cfg_len(lcfg->lcfg_bufcount, lcfg->lcfg_buflens)); lustre_cfg_bufs_reset(bufs, qmtname); lustre_cfg_bufs_set_string(bufs, 1, uuid); @@ -4313,9 +4711,10 @@ static int mdt_quota_init(const struct lu_env *env, struct mdt_device *mdt, lustre_cfg_bufs_set_string(bufs, 3, mdt->mdt_bottom->dd_lu_dev.ld_obd->obd_name); - lcfg = lustre_cfg_new(LCFG_SETUP, bufs); - if (lcfg == NULL) + OBD_ALLOC(lcfg, lustre_cfg_len(bufs->lcfg_bufcount, bufs->lcfg_buflen)); + if (!lcfg) GOTO(class_detach, rc = -ENOMEM); + lustre_cfg_init(lcfg, LCFG_SETUP, bufs); rc = class_setup(obd, lcfg); if (rc) @@ -4351,7 +4750,7 @@ class_detach: if (rc) class_detach(obd, lcfg); lcfg_cleanup: - lustre_cfg_free(lcfg); + OBD_FREE(lcfg, lustre_cfg_len(lcfg->lcfg_bufcount, lcfg->lcfg_buflens)); put_profile: class_put_profile(lprof); cleanup_mem: @@ -4396,6 +4795,11 @@ static int mdt_tgt_getxattr(struct tgt_session_info *tsi) return rc; } +#define OBD_FAIL_OST_READ_NET OBD_FAIL_OST_BRW_NET +#define OBD_FAIL_OST_WRITE_NET OBD_FAIL_OST_BRW_NET +#define OST_BRW_READ OST_READ +#define OST_BRW_WRITE OST_WRITE + static struct tgt_handler mdt_tgt_handlers[] = { TGT_RPC_HANDLER(MDS_FIRST_OPC, 0, MDS_CONNECT, mdt_tgt_connect, @@ -4436,6 +4840,17 @@ TGT_MDT_HDL(HABEO_CLAVIS | HABEO_CORPUS | HABEO_REFERO | MUTABOR, mdt_swap_layouts), }; +static struct tgt_handler mdt_io_ops[] = { +TGT_OST_HDL_HP(HABEO_CORPUS | HABEO_REFERO, OST_BRW_READ, tgt_brw_read, + mdt_hp_brw), +TGT_OST_HDL_HP(HABEO_CORPUS | MUTABOR, OST_BRW_WRITE, tgt_brw_write, + mdt_hp_brw), +TGT_OST_HDL_HP(HABEO_CORPUS | HABEO_REFERO | MUTABOR, + OST_PUNCH, mdt_punch_hdl, + mdt_hp_punch), +TGT_OST_HDL(HABEO_CORPUS | HABEO_REFERO, OST_SYNC, mdt_data_sync), +}; + static struct tgt_handler mdt_sec_ctx_ops[] = { TGT_SEC_HDL_VAR(0, SEC_CTX_INIT, mdt_sec_ctx_handle), TGT_SEC_HDL_VAR(0, SEC_CTX_INIT_CONT,mdt_sec_ctx_handle), @@ -4497,7 +4912,11 @@ static struct tgt_opc_slice mdt_common_slice[] = { .tos_opc_end = LFSCK_LAST_OPC, .tos_hs = tgt_lfsck_handlers }, - + { + .tos_opc_start = OST_FIRST_OPC, + .tos_opc_end = OST_LAST_OPC, + .tos_hs = mdt_io_ops + }, { .tos_hs = NULL } @@ -4505,58 +4924,71 @@ static struct tgt_opc_slice mdt_common_slice[] = { static void mdt_fini(const struct lu_env *env, struct mdt_device *m) { - struct md_device *next = m->mdt_child; - struct lu_device *d = &m->mdt_lu_dev; - struct obd_device *obd = mdt2obd_dev(m); - struct lfsck_stop stop; - ENTRY; - - if (m->mdt_md_root != NULL) { - mdt_object_put(env, m->mdt_md_root); - m->mdt_md_root = NULL; - } + struct md_device *next = m->mdt_child; + struct lu_device *d = &m->mdt_lu_dev; + struct obd_device *obd = mdt2obd_dev(m); + struct lfsck_stop stop; + ENTRY; stop.ls_status = LS_PAUSED; stop.ls_flags = 0; next->md_ops->mdo_iocontrol(env, next, OBD_IOC_STOP_LFSCK, 0, &stop); mdt_stack_pre_fini(env, m, md2lu_dev(m->mdt_child)); - target_recovery_fini(obd); ping_evictor_stop(); - if (m->mdt_opts.mo_coordinator) - mdt_hsm_cdt_stop(m); - - mdt_hsm_cdt_fini(m); + /* Remove the HSM /proc entry so the coordinator cannot be + * restarted by a user while it's shutting down. */ + hsm_cdt_procfs_fini(m); + mdt_hsm_cdt_stop(m); mdt_llog_ctxt_unclone(env, m, LLOG_AGENT_ORIG_CTXT); - mdt_llog_ctxt_unclone(env, m, LLOG_CHANGELOG_ORIG_CTXT); + mdt_llog_ctxt_unclone(env, m, LLOG_CHANGELOG_ORIG_CTXT); if (m->mdt_namespace != NULL) ldlm_namespace_free_prior(m->mdt_namespace, NULL, d->ld_obd->obd_force); - obd_exports_barrier(obd); - obd_zombie_barrier(); + obd_exports_barrier(obd); + obd_zombie_barrier(); + + mdt_quota_fini(env, m); + + cfs_free_nidlist(&m->mdt_squash.rsi_nosquash_nids); + + /* Calling the cleanup functions in the same order as in the mdt_init0 + * error path + */ + mdt_procfs_fini(m); + + target_recovery_fini(obd); + upcall_cache_cleanup(m->mdt_identity_cache); + m->mdt_identity_cache = NULL; + + mdt_fs_cleanup(env, m); + + tgt_fini(env, &m->mdt_lut); - mdt_procfs_fini(m); + mdt_hsm_cdt_fini(m); - tgt_fini(env, &m->mdt_lut); - mdt_fs_cleanup(env, m); - upcall_cache_cleanup(m->mdt_identity_cache); - m->mdt_identity_cache = NULL; + if (m->mdt_los != NULL) { + local_oid_storage_fini(env, m->mdt_los); + m->mdt_los = NULL; + } if (m->mdt_namespace != NULL) { ldlm_namespace_free_post(m->mdt_namespace); d->ld_obd->obd_namespace = m->mdt_namespace = NULL; } - mdt_quota_fini(env, m); + if (m->mdt_md_root != NULL) { + mdt_object_put(env, m->mdt_md_root); + m->mdt_md_root = NULL; + } - cfs_free_nidlist(&m->mdt_squash.rsi_nosquash_nids); + mdt_seq_fini(env, m); - mdt_seq_fini(env, m); - mdt_fld_fini(env, m); + mdt_fld_fini(env, m); /* * Finish the stack @@ -4573,71 +5005,74 @@ static void mdt_fini(const struct lu_env *env, struct mdt_device *m) static int mdt_postrecov(const struct lu_env *, struct mdt_device *); static int mdt_init0(const struct lu_env *env, struct mdt_device *m, - struct lu_device_type *ldt, struct lustre_cfg *cfg) -{ - struct mdt_thread_info *info; - struct obd_device *obd; - const char *dev = lustre_cfg_string(cfg, 0); - const char *num = lustre_cfg_string(cfg, 2); - struct lustre_mount_info *lmi = NULL; - struct lustre_sb_info *lsi; - struct lu_site *s; - struct seq_server_site *ss_site; - const char *identity_upcall = "NONE"; - struct md_device *next; - int rc; - long node_id; - mntopt_t mntopts; - ENTRY; + struct lu_device_type *ldt, struct lustre_cfg *cfg) +{ + const struct dt_device_param *dt_conf; + struct mdt_thread_info *info; + struct obd_device *obd; + const char *dev = lustre_cfg_string(cfg, 0); + const char *num = lustre_cfg_string(cfg, 2); + struct tg_grants_data *tgd = &m->mdt_lut.lut_tgd; + struct lustre_mount_info *lmi = NULL; + struct lustre_sb_info *lsi; + struct lu_site *s; + struct seq_server_site *ss_site; + const char *identity_upcall = "NONE"; + struct md_device *next; + struct lu_fid fid; + int rc; + long node_id; + mntopt_t mntopts; + ENTRY; lu_device_init(&m->mdt_lu_dev, ldt); - /* - * Environment (env) might be missing mdt_thread_key values at that - * point, if device is allocated when mdt_thread_key is in QUIESCENT - * mode. - * - * Usually device allocation path doesn't use module key values, but - * mdt has to do a lot of work here, so allocate key value. - */ - rc = lu_env_refill((struct lu_env *)env); - if (rc != 0) - RETURN(rc); + /* + * Environment (env) might be missing mdt_thread_key values at that + * point, if device is allocated when mdt_thread_key is in QUIESCENT + * mode. + * + * Usually device allocation path doesn't use module key values, but + * mdt has to do a lot of work here, so allocate key value. + */ + rc = lu_env_refill((struct lu_env *)env); + if (rc != 0) + RETURN(rc); - info = lu_context_key_get(&env->le_ctx, &mdt_thread_key); - LASSERT(info != NULL); + info = lu_context_key_get(&env->le_ctx, &mdt_thread_key); + LASSERT(info != NULL); - obd = class_name2obd(dev); - LASSERT(obd != NULL); + obd = class_name2obd(dev); + LASSERT(obd != NULL); - m->mdt_max_mdsize = MAX_MD_SIZE; /* 4 stripes */ + m->mdt_max_mdsize = MAX_MD_SIZE_OLD; m->mdt_opts.mo_evict_tgt_nids = 1; - m->mdt_opts.mo_cos = MDT_COS_DEFAULT; - - /* default is coordinator off, it is started through conf_param - * or /proc */ - m->mdt_opts.mo_coordinator = 0; + m->mdt_opts.mo_cos = MDT_COS_DEFAULT; lmi = server_get_mount(dev); - if (lmi == NULL) { - CERROR("Cannot get mount info for %s!\n", dev); - RETURN(-EFAULT); - } else { - lsi = s2lsi(lmi->lmi_sb); - /* CMD is supported only in IAM mode */ - LASSERT(num); - node_id = simple_strtol(num, NULL, 10); + if (lmi == NULL) { + CERROR("Cannot get mount info for %s!\n", dev); + RETURN(-EFAULT); + } else { + lsi = s2lsi(lmi->lmi_sb); + /* CMD is supported only in IAM mode */ + LASSERT(num); + node_id = simple_strtol(num, NULL, 10); obd->u.obt.obt_magic = OBT_MAGIC; if (lsi->lsi_lmd != NULL && lsi->lsi_lmd->lmd_flags & LMD_FLG_SKIP_LFSCK) m->mdt_skip_lfsck = 1; } + /* DoM files get IO lock at open by default */ + m->mdt_opts.mo_dom_lock = ALWAYS_DOM_LOCK_ON_OPEN; + /* DoM files are read at open and data is packed in the reply */ + m->mdt_opts.mo_dom_read_open = 1; + m->mdt_squash.rsi_uid = 0; m->mdt_squash.rsi_gid = 0; INIT_LIST_HEAD(&m->mdt_squash.rsi_nosquash_nids); init_rwsem(&m->mdt_squash.rsi_sem); spin_lock_init(&m->mdt_lock); - m->mdt_osfs_age = cfs_time_shift_64(-1000); m->mdt_enable_remote_dir = 0; m->mdt_enable_remote_dir_gid = 0; @@ -4662,16 +5097,16 @@ static int mdt_init0(const struct lu_env *env, struct mdt_device *m, s->ld_seq_site = ss_site; ss_site->ss_lu = s; - /* set server index */ + /* set server index */ ss_site->ss_node_id = node_id; /* failover is the default * FIXME: we do not failout mds0/mgs, which may cause some problems. * assumed whose ss_node_id == 0 XXX * */ - obd->obd_replayable = 1; - /* No connection accepted until configurations will finish */ - obd->obd_no_conn = 1; + obd->obd_replayable = 1; + /* No connection accepted until configurations will finish */ + obd->obd_no_conn = 1; if (cfg->lcfg_bufcount > 4 && LUSTRE_CFG_BUFLEN(cfg, 4) > 0) { char *str = lustre_cfg_string(cfg, 4); @@ -4691,58 +5126,71 @@ static int mdt_init0(const struct lu_env *env, struct mdt_device *m, snprintf(info->mti_u.ns_name, sizeof(info->mti_u.ns_name), "%s-%s", LUSTRE_MDT_NAME, obd->obd_uuid.uuid); - m->mdt_namespace = ldlm_namespace_new(obd, info->mti_u.ns_name, - LDLM_NAMESPACE_SERVER, - LDLM_NAMESPACE_GREEDY, - LDLM_NS_TYPE_MDT); - if (m->mdt_namespace == NULL) - GOTO(err_fini_seq, rc = -ENOMEM); + m->mdt_namespace = ldlm_namespace_new(obd, info->mti_u.ns_name, + LDLM_NAMESPACE_SERVER, + LDLM_NAMESPACE_GREEDY, + LDLM_NS_TYPE_MDT); + if (m->mdt_namespace == NULL) + GOTO(err_fini_seq, rc = -ENOMEM); m->mdt_namespace->ns_lvbp = m; m->mdt_namespace->ns_lvbo = &mdt_lvbo; - ldlm_register_intent(m->mdt_namespace, mdt_intent_policy); - /* set obd_namespace for compatibility with old code */ - obd->obd_namespace = m->mdt_namespace; - - rc = mdt_hsm_cdt_init(m); - if (rc != 0) { - CERROR("%s: error initializing coordinator, rc %d\n", - mdt_obd_name(m), rc); - GOTO(err_free_ns, rc); - } + ldlm_register_intent(m->mdt_namespace, mdt_intent_policy); + /* set obd_namespace for compatibility with old code */ + obd->obd_namespace = m->mdt_namespace; rc = tgt_init(env, &m->mdt_lut, obd, m->mdt_bottom, mdt_common_slice, OBD_FAIL_MDS_ALL_REQUEST_NET, OBD_FAIL_MDS_ALL_REPLY_NET); if (rc) - GOTO(err_free_hsm, rc); + GOTO(err_free_ns, rc); + + /* Amount of available space excluded from granting and reserved + * for metadata. It is in percentage and 50% is default value. */ + tgd->tgd_reserved_pcnt = 50; + + if (ONE_MB_BRW_SIZE < (1U << tgd->tgd_blockbits)) + m->mdt_brw_size = 1U << tgd->tgd_blockbits; + else + m->mdt_brw_size = ONE_MB_BRW_SIZE; rc = mdt_fs_setup(env, m, obd, lsi); if (rc) GOTO(err_tgt, rc); - tgt_adapt_sptlrpc_conf(&m->mdt_lut, 1); - - next = m->mdt_child; - rc = next->md_ops->mdo_iocontrol(env, next, OBD_IOC_GET_MNTOPT, 0, - &mntopts); - if (rc) + fid.f_seq = FID_SEQ_LOCAL_NAME; + fid.f_oid = 1; + fid.f_ver = 0; + rc = local_oid_storage_init(env, m->mdt_bottom, &fid, &m->mdt_los); + if (rc != 0) GOTO(err_fs_cleanup, rc); - if (mntopts & MNTOPT_USERXATTR) - m->mdt_opts.mo_user_xattr = 1; - else - m->mdt_opts.mo_user_xattr = 0; + rc = mdt_hsm_cdt_init(m); + if (rc != 0) { + CERROR("%s: error initializing coordinator, rc %d\n", + mdt_obd_name(m), rc); + GOTO(err_los_fini, rc); + } - rc = next->md_ops->mdo_maxeasize_get(env, next, &m->mdt_max_ea_size); - if (rc) - GOTO(err_fs_cleanup, rc); + tgt_adapt_sptlrpc_conf(&m->mdt_lut); - if (mntopts & MNTOPT_ACL) - m->mdt_opts.mo_acl = 1; - else - m->mdt_opts.mo_acl = 0; + next = m->mdt_child; + dt_conf = next->md_ops->mdo_dtconf_get(env, next); + + mntopts = dt_conf->ddp_mntopts; + + if (mntopts & MNTOPT_USERXATTR) + m->mdt_opts.mo_user_xattr = 1; + else + m->mdt_opts.mo_user_xattr = 0; + + m->mdt_max_ea_size = dt_conf->ddp_max_ea_size; + + if (mntopts & MNTOPT_ACL) + m->mdt_opts.mo_acl = 1; + else + m->mdt_opts.mo_acl = 0; /* XXX: to support suppgid for ACL, we enable identity_upcall * by default, otherwise, maybe got unexpected -EACCESS. */ @@ -4755,14 +5203,14 @@ static int mdt_init0(const struct lu_env *env, struct mdt_device *m, if (IS_ERR(m->mdt_identity_cache)) { rc = PTR_ERR(m->mdt_identity_cache); m->mdt_identity_cache = NULL; - GOTO(err_fs_cleanup, rc); + GOTO(err_free_hsm, rc); } - rc = mdt_procfs_init(m, dev); - if (rc) { - CERROR("Can't init MDT lprocfs, rc %d\n", rc); - GOTO(err_recovery, rc); - } + rc = mdt_procfs_init(m, dev); + if (rc) { + CERROR("Can't init MDT lprocfs, rc %d\n", rc); + GOTO(err_recovery, rc); + } rc = mdt_quota_init(env, m, cfg); if (rc) @@ -4778,25 +5226,28 @@ static int mdt_init0(const struct lu_env *env, struct mdt_device *m, * when the whole stack is complete and ready * to serve the requests */ - /* Reduce the initial timeout on an MDS because it doesn't need such - * a long timeout as an OST does. Adaptive timeouts will adjust this - * value appropriately. */ - if (ldlm_timeout == LDLM_TIMEOUT_DEFAULT) - ldlm_timeout = MDS_LDLM_TIMEOUT_DEFAULT; + /* Reduce the initial timeout on an MDS because it doesn't need such + * a long timeout as an OST does. Adaptive timeouts will adjust this + * value appropriately. */ + if (ldlm_timeout == LDLM_TIMEOUT_DEFAULT) + ldlm_timeout = MDS_LDLM_TIMEOUT_DEFAULT; - RETURN(0); + RETURN(0); err_procfs: mdt_procfs_fini(m); err_recovery: target_recovery_fini(obd); upcall_cache_cleanup(m->mdt_identity_cache); m->mdt_identity_cache = NULL; +err_free_hsm: + mdt_hsm_cdt_fini(m); +err_los_fini: + local_oid_storage_fini(env, m->mdt_los); + m->mdt_los = NULL; err_fs_cleanup: mdt_fs_cleanup(env, m); err_tgt: tgt_fini(env, &m->mdt_lut); -err_free_hsm: - mdt_hsm_cdt_fini(m); err_free_ns: ldlm_namespace_free(m->mdt_namespace, NULL, 0); obd->obd_namespace = m->mdt_namespace = NULL; @@ -4884,9 +5335,9 @@ static int mdt_process_config(const struct lu_env *env, cfg); } - if (old_cfg != NULL) - lustre_cfg_free(cfg); - + if (old_cfg) + OBD_FREE(cfg, lustre_cfg_len(cfg->lcfg_bufcount, + cfg->lcfg_buflens)); break; } default: @@ -4917,8 +5368,11 @@ static struct lu_object *mdt_object_alloc(const struct lu_env *env, lu_object_add_top(h, o); o->lo_ops = &mdt_obj_ops; spin_lock_init(&mo->mot_write_lock); + mutex_init(&mo->mot_som_mutex); mutex_init(&mo->mot_lov_mutex); + init_rwsem(&mo->mot_dom_sem); init_rwsem(&mo->mot_open_sem); + atomic_set(&mo->mot_open_count, 0); RETURN(o); } RETURN(NULL); @@ -5053,7 +5507,7 @@ static int mdt_obd_set_info_async(const struct lu_env *env, ENTRY; if (KEY_IS(KEY_SPTLRPC_CONF)) { - rc = tgt_adapt_sptlrpc_conf(class_exp2tgt(exp), 0); + rc = tgt_adapt_sptlrpc_conf(class_exp2tgt(exp)); RETURN(rc); } @@ -5073,6 +5527,10 @@ static int mdt_obd_set_info_async(const struct lu_env *env, * connect flags from the obd_connect_data::ocd_connect_flags field of the * reply. \see mdt_connect(). * + * Before 2.7.50 clients will send a struct obd_connect_data_v1 rather than a + * full struct obd_connect_data. So care must be taken when accessing fields + * that are not present in struct obd_connect_data_v1. See LU-16. + * * \param exp the obd_export associated with this client/target pair * \param mdt the target device for the connection * \param data stores data for this connect request @@ -5081,24 +5539,25 @@ static int mdt_obd_set_info_async(const struct lu_env *env, * \retval -EPROTO \a data unexpectedly has zero obd_connect_data::ocd_brw_size * \retval -EBADE client and server feature requirements are incompatible */ -static int mdt_connect_internal(struct obd_export *exp, +static int mdt_connect_internal(const struct lu_env *env, + struct obd_export *exp, struct mdt_device *mdt, - struct obd_connect_data *data) + struct obd_connect_data *data, bool reconnect) { + const char *obd_name = mdt_obd_name(mdt); LASSERT(data != NULL); data->ocd_connect_flags &= MDT_CONNECT_SUPPORTED; - data->ocd_connect_flags2 &= MDT_CONNECT_SUPPORTED2; - data->ocd_ibits_known &= MDS_INODELOCK_FULL; - if (!(data->ocd_connect_flags & OBD_CONNECT_MDS_MDS) && - !(data->ocd_connect_flags & OBD_CONNECT_IBITS)) { - CWARN("%s: client %s does not support ibits lock, either " - "very old or an invalid client: flags "LPX64"\n", - mdt_obd_name(mdt), exp->exp_client_uuid.uuid, - data->ocd_connect_flags); - return -EBADE; - } + if (mdt->mdt_bottom->dd_rdonly && + !(data->ocd_connect_flags & OBD_CONNECT_MDS_MDS) && + !(data->ocd_connect_flags & OBD_CONNECT_RDONLY)) + RETURN(-EACCES); + + if (data->ocd_connect_flags & OBD_CONNECT_FLAGS2) + data->ocd_connect_flags2 &= MDT_CONNECT_SUPPORTED2; + + data->ocd_ibits_known &= MDS_INODELOCK_FULL; if (!mdt->mdt_opts.mo_acl) data->ocd_connect_flags &= ~OBD_CONNECT_ACL; @@ -5106,24 +5565,45 @@ static int mdt_connect_internal(struct obd_export *exp, if (!mdt->mdt_opts.mo_user_xattr) data->ocd_connect_flags &= ~OBD_CONNECT_XATTR; - if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE) { + if (OCD_HAS_FLAG(data, BRW_SIZE)) { data->ocd_brw_size = min(data->ocd_brw_size, - (__u32)MD_MAX_BRW_SIZE); + mdt->mdt_brw_size); if (data->ocd_brw_size == 0) { - CERROR("%s: cli %s/%p ocd_connect_flags: "LPX64 - " ocd_version: %x ocd_grant: %d " - "ocd_index: %u ocd_brw_size is " - "unexpectedly zero, network data " - "corruption? Refusing connection of this" - " client\n", - mdt_obd_name(mdt), - exp->exp_client_uuid.uuid, + CERROR("%s: cli %s/%p ocd_connect_flags: %#llx " + "ocd_version: %x ocd_grant: %d ocd_index: %u " + "ocd_brw_size unexpectedly zero, network data " + "corruption? Refusing to connect this client\n", + obd_name, exp->exp_client_uuid.uuid, exp, data->ocd_connect_flags, data->ocd_version, data->ocd_grant, data->ocd_index); return -EPROTO; } } + if (OCD_HAS_FLAG(data, GRANT_PARAM)) { + struct dt_device_param *ddp = &mdt->mdt_lut.lut_dt_conf; + + /* client is reporting its page size, for future use */ + exp->exp_target_data.ted_pagebits = data->ocd_grant_blkbits; + data->ocd_grant_blkbits = mdt->mdt_lut.lut_tgd.tgd_blockbits; + /* ddp_inodespace may not be power-of-two value, eg. for ldiskfs + * it's LDISKFS_DIR_REC_LEN(20) = 28. */ + data->ocd_grant_inobits = fls(ddp->ddp_inodespace - 1); + /* ocd_grant_tax_kb is in 1K byte blocks */ + data->ocd_grant_tax_kb = ddp->ddp_extent_tax >> 10; + data->ocd_grant_max_blks = ddp->ddp_max_extent_blks; + } + + /* Save connect_data we have so far because tgt_grant_connect() + * uses it to calculate grant, and we want to save the client + * version before it is overwritten by LUSTRE_VERSION_CODE. */ + exp->exp_connect_data = *data; + if (OCD_HAS_FLAG(data, GRANT)) + tgt_grant_connect(env, exp, data, !reconnect); + + if (OCD_HAS_FLAG(data, MAXBYTES)) + data->ocd_maxbytes = mdt->mdt_lut.lut_dt_conf.ddp_maxbytes; + /* NB: Disregard the rule against updating * exp_connect_data.ocd_connect_flags in this case, since * tgt_client_new() needs to know if this is a lightweight @@ -5139,7 +5619,7 @@ static int mdt_connect_internal(struct obd_export *exp, if ((data->ocd_connect_flags & OBD_CONNECT_FID) == 0) { CWARN("%s: MDS requires FID support, but client not\n", - mdt_obd_name(mdt)); + obd_name); return -EBADE; } @@ -5167,6 +5647,33 @@ static int mdt_connect_internal(struct obd_export *exp, spin_unlock(&exp->exp_lock); } + if (OCD_HAS_FLAG(data, CKSUM)) { + __u32 cksum_types = data->ocd_cksum_types; + + /* The client set in ocd_cksum_types the checksum types it + * supports. We have to mask off the algorithms that we don't + * support */ + data->ocd_cksum_types &= + obd_cksum_types_supported_server(obd_name); + + if (unlikely(data->ocd_cksum_types == 0)) { + CERROR("%s: Connect with checksum support but no " + "ocd_cksum_types is set\n", + exp->exp_obd->obd_name); + RETURN(-EPROTO); + } + + CDEBUG(D_RPCTRACE, "%s: cli %s supports cksum type %x, return " + "%x\n", exp->exp_obd->obd_name, obd_export_nid2str(exp), + cksum_types, data->ocd_cksum_types); + } else { + /* This client does not support OBD_CONNECT_CKSUM + * fall back to CRC32 */ + CDEBUG(D_RPCTRACE, "%s: cli %s does not support " + "OBD_CONNECT_CKSUM, CRC32 will be used\n", + exp->exp_obd->obd_name, obd_export_nid2str(exp)); + } + return 0; } @@ -5215,7 +5722,7 @@ static int mdt_export_cleanup(struct obd_export *exp) /* Remove mfd handle so it can't be found again. * We are consuming the mfd_list reference here. */ - class_handle_unhash(&mfd->mfd_handle); + class_handle_unhash(&mfd->mfd_open_handle); list_move_tail(&mfd->mfd_list, &closing_list); } spin_unlock(&med->med_open_lock); @@ -5256,7 +5763,7 @@ static int mdt_export_cleanup(struct obd_export *exp) * archive request into a noop if it's not actually * dirty. */ - if (mfd->mfd_mode & FMODE_WRITE) + if (mfd->mfd_open_flags & MDS_FMODE_WRITE) rc = mdt_ctxt_add_dirty_flag(&env, info, mfd); /* Don't unlink orphan on failover umount, LU-184 */ @@ -5291,11 +5798,15 @@ static inline void mdt_disable_slc(struct mdt_device *mdt) static int mdt_obd_disconnect(struct obd_export *exp) { - int rc; - ENTRY; + int rc; + + ENTRY; + + LASSERT(exp); + class_export_get(exp); - LASSERT(exp); - class_export_get(exp); + if (!(exp->exp_flags & OBD_OPT_FORCE)) + tgt_grant_sanity_check(exp->exp_obd, __func__); if ((exp_connect_flags(exp) & OBD_CONNECT_MDS_MDS) && !(exp_connect_flags(exp) & OBD_CONNECT_LIGHTWEIGHT)) { @@ -5309,6 +5820,8 @@ static int mdt_obd_disconnect(struct obd_export *exp) if (rc != 0) CDEBUG(D_IOCTL, "server disconnect error: rc = %d\n", rc); + tgt_grant_discard(exp); + rc = mdt_export_cleanup(exp); nodemap_del_member(exp); class_export_put(exp); @@ -5370,7 +5883,7 @@ static int mdt_obd_connect(const struct lu_env *env, if (rc != 0 && rc != -EEXIST) GOTO(out, rc); - rc = mdt_connect_internal(lexp, mdt, data); + rc = mdt_connect_internal(env, lexp, mdt, data, false); if (rc == 0) { struct lsd_client_data *lcd = lexp->exp_target_data.ted_lcd; @@ -5416,7 +5929,8 @@ static int mdt_obd_reconnect(const struct lu_env *env, if (rc != 0 && rc != -EEXIST) RETURN(rc); - rc = mdt_connect_internal(exp, mdt_dev(obd->obd_lu_dev), data); + rc = mdt_connect_internal(env, exp, mdt_dev(obd->obd_lu_dev), data, + true); if (rc == 0) mdt_export_stats_init(obd, exp, localdata); else @@ -5478,6 +5992,17 @@ static int mdt_destroy_export(struct obd_export *exp) LASSERT(list_empty(&exp->exp_outstanding_replies)); LASSERT(list_empty(&exp->exp_mdt_data.med_open_head)); + /* + * discard grants once we're sure no more + * interaction with the client is possible + */ + tgt_grant_discard(exp); + if (exp_connect_flags(exp) & OBD_CONNECT_GRANT) + exp->exp_obd->u.obt.obt_lut->lut_tgd.tgd_tot_granted_clients--; + + if (!(exp->exp_flags & OBD_OPT_FORCE)) + tgt_grant_sanity_check(exp->exp_obd, __func__); + RETURN(0); } @@ -5509,7 +6034,7 @@ int mdt_links_read(struct mdt_thread_info *info, struct mdt_object *mdt_obj, if (rc < 0) return rc; - return linkea_init(ldata); + return linkea_init_with_rec(ldata); } /** @@ -5563,9 +6088,14 @@ static int mdt_path_current(struct mdt_thread_info *info, lu_fid_eq(&mdt->mdt_md_root_fid, &fp->gf_fid)) GOTO(out, rc = -ENOENT); - mdt_obj = mdt_object_find(info->mti_env, mdt, tmpfid); - if (IS_ERR(mdt_obj)) - GOTO(out, rc = PTR_ERR(mdt_obj)); + if (lu_fid_eq(mdt_object_fid(obj), tmpfid)) { + mdt_obj = obj; + mdt_object_get(info->mti_env, mdt_obj); + } else { + mdt_obj = mdt_object_find(info->mti_env, mdt, tmpfid); + if (IS_ERR(mdt_obj)) + GOTO(out, rc = PTR_ERR(mdt_obj)); + } if (!mdt_object_exists(mdt_obj)) { mdt_object_put(info->mti_env, mdt_obj); @@ -5711,16 +6241,16 @@ static int mdt_fid2path(struct mdt_thread_info *info, int rc; ENTRY; - CDEBUG(D_IOCTL, "path get "DFID" from "LPU64" #%d\n", + CDEBUG(D_IOCTL, "path get "DFID" from %llu #%d\n", PFID(&fp->gf_fid), fp->gf_recno, fp->gf_linkno); if (!fid_is_sane(&fp->gf_fid)) RETURN(-EINVAL); if (!fid_is_namespace_visible(&fp->gf_fid)) { - CWARN("%s: "DFID" is invalid, sequence should be " - ">= "LPX64"\n", mdt_obd_name(mdt), - PFID(&fp->gf_fid), (__u64)FID_SEQ_NORMAL); + CDEBUG(D_INFO, "%s: "DFID" is invalid, f_seq should be >= %#llx" + ", or f_oid != 0, or f_ver == 0\n", mdt_obd_name(mdt), + PFID(&fp->gf_fid), (__u64)FID_SEQ_NORMAL); RETURN(-EINVAL); } @@ -5748,7 +6278,7 @@ static int mdt_fid2path(struct mdt_thread_info *info, rc = mdt_path(info, obj, fp, root_fid); - CDEBUG(D_INFO, "fid "DFID", path %s recno "LPX64" linkno %u\n", + CDEBUG(D_INFO, "fid "DFID", path %s recno %#llx linkno %u\n", PFID(&fp->gf_fid), fp->gf_u.gf_path, fp->gf_recno, fp->gf_linkno); @@ -5838,30 +6368,6 @@ int mdt_get_info(struct tgt_session_info *tsi) RETURN(rc); } -/* Pass the ioc down */ -static int mdt_ioc_child(struct lu_env *env, struct mdt_device *mdt, - unsigned int cmd, int len, void *data) -{ - struct lu_context ioctl_session; - struct md_device *next = mdt->mdt_child; - int rc; - ENTRY; - - rc = lu_context_init(&ioctl_session, LCT_SERVER_SESSION); - if (rc) - RETURN(rc); - ioctl_session.lc_thread = (struct ptlrpc_thread *)current; - lu_context_enter(&ioctl_session); - env->le_ses = &ioctl_session; - - LASSERT(next->md_ops->mdo_iocontrol); - rc = next->md_ops->mdo_iocontrol(env, next, cmd, len, data); - - lu_context_exit(&ioctl_session); - lu_context_fini(&ioctl_session); - RETURN(rc); -} - static int mdt_ioc_version_get(struct mdt_thread_info *mti, void *karg) { struct obd_ioctl_data *data = karg; @@ -5943,7 +6449,9 @@ static int mdt_iocontrol(unsigned int cmd, struct obd_export *exp, int len, case OBD_IOC_CHANGELOG_REG: case OBD_IOC_CHANGELOG_DEREG: case OBD_IOC_CHANGELOG_CLEAR: - rc = mdt_ioc_child(&env, mdt, cmd, len, karg); + rc = mdt->mdt_child->md_ops->mdo_iocontrol(&env, + mdt->mdt_child, + cmd, len, karg); break; case OBD_IOC_START_LFSCK: { struct md_device *next = mdt->mdt_child; @@ -6020,7 +6528,7 @@ static int mdt_postrecov(const struct lu_env *env, struct mdt_device *mdt) int rc; ENTRY; - if (!mdt->mdt_skip_lfsck) { + if (!mdt->mdt_skip_lfsck && !mdt->mdt_bottom->dd_rdonly) { struct lfsck_start_param lsp; lsp.lsp_start = NULL; @@ -6060,6 +6568,9 @@ static struct obd_ops mdt_obd_device_ops = { .o_destroy_export = mdt_destroy_export, .o_iocontrol = mdt_iocontrol, .o_postrecov = mdt_obd_postrecov, + /* Data-on-MDT IO methods */ + .o_preprw = mdt_obd_preprw, + .o_commitrw = mdt_obd_commitrw, }; static struct lu_device* mdt_device_fini(const struct lu_env *env, @@ -6120,6 +6631,13 @@ static void mdt_key_fini(const struct lu_context *ctx, info->mti_big_lmm = NULL; info->mti_big_lmmsize = 0; } + + if (info->mti_big_acl) { + OBD_FREE_LARGE(info->mti_big_acl, info->mti_big_aclsize); + info->mti_big_acl = NULL; + info->mti_big_aclsize = 0; + } + OBD_FREE_PTR(info); } @@ -6144,12 +6662,12 @@ struct lu_ucred *mdt_ucred_check(const struct mdt_thread_info *info) * \param mdt mdt device * \param val 0 disables COS, other values enable COS */ -void mdt_enable_cos(struct mdt_device *mdt, int val) +void mdt_enable_cos(struct mdt_device *mdt, bool val) { struct lu_env env; int rc; - mdt->mdt_opts.mo_cos = !!val; + mdt->mdt_opts.mo_cos = val; rc = lu_env_init(&env, LCT_LOCAL); if (unlikely(rc != 0)) { CWARN("%s: lu_env initialization failed, cannot "