X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fmdt%2Fmdt_handler.c;h=3b6918b137d5f0e6489f42210515ff60a321e468;hp=66ad174f7de7f8969f354dc9ef7e2f67c5a1947e;hb=5e11d744becb81e79930b23ea0f998c7e7f07c08;hpb=b1e7be00cb6e1e7b3e35877cf8b8a209e456a2ea;ds=sidebyside diff --git a/lustre/mdt/mdt_handler.c b/lustre/mdt/mdt_handler.c index 66ad174..3b6918b 100644 --- a/lustre/mdt/mdt_handler.c +++ b/lustre/mdt/mdt_handler.c @@ -66,7 +66,6 @@ #include "mdt_internal.h" - static unsigned int max_mod_rpcs_per_client = 8; module_param(max_mod_rpcs_per_client, uint, 0644); MODULE_PARM_DESC(max_mod_rpcs_per_client, "maximum number of modify RPCs in flight allowed per client"); @@ -94,7 +93,6 @@ enum ldlm_mode mdt_dlm_lock_modes[] = { }; static struct mdt_device *mdt_dev(struct lu_device *d); -static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags); static const struct lu_object_operations mdt_obj_ops; @@ -270,18 +268,13 @@ static int mdt_lookup_fileset(struct mdt_thread_info *info, const char *fileset, { struct mdt_device *mdt = info->mti_mdt; struct lu_name *lname = &info->mti_name; - char *name = NULL; + char *filename = info->mti_filename; struct mdt_object *parent; u32 mode; int rc = 0; LASSERT(!info->mti_cross_ref); - OBD_ALLOC(name, NAME_MAX + 1); - if (name == NULL) - return -ENOMEM; - lname->ln_name = name; - /* * We may want to allow this to mount a completely separate * fileset from the MDT in the future, but keeping it to @@ -317,8 +310,9 @@ static int mdt_lookup_fileset(struct mdt_thread_info *info, const char *fileset, break; } - strncpy(name, s1, lname->ln_namelen); - name[lname->ln_namelen] = '\0'; + strncpy(filename, s1, lname->ln_namelen); + filename[lname->ln_namelen] = '\0'; + lname->ln_name = filename; parent = mdt_object_find(info->mti_env, mdt, fid); if (IS_ERR(parent)) { @@ -343,8 +337,6 @@ static int mdt_lookup_fileset(struct mdt_thread_info *info, const char *fileset, } } - OBD_FREE(name, NAME_MAX + 1); - return rc; } @@ -413,13 +405,16 @@ out: static int mdt_statfs(struct tgt_session_info *tsi) { - struct ptlrpc_request *req = tgt_ses_req(tsi); - struct mdt_thread_info *info = tsi2mdt_info(tsi); - struct mdt_device *mdt = info->mti_mdt; - struct tg_grants_data *tgd = &mdt->mdt_lut.lut_tgd; - struct ptlrpc_service_part *svcpt; - struct obd_statfs *osfs; - int rc; + struct ptlrpc_request *req = tgt_ses_req(tsi); + struct mdt_thread_info *info = tsi2mdt_info(tsi); + struct mdt_device *mdt = info->mti_mdt; + struct tg_grants_data *tgd = &mdt->mdt_lut.lut_tgd; + struct md_device *next = mdt->mdt_child; + struct ptlrpc_service_part *svcpt; + struct obd_statfs *osfs; + struct mdt_body *reqbody = NULL; + struct mdt_statfs_cache *msf; + int rc; ENTRY; @@ -441,11 +436,39 @@ static int mdt_statfs(struct tgt_session_info *tsi) if (!osfs) GOTO(out, rc = -EPROTO); - rc = tgt_statfs_internal(tsi->tsi_env, &mdt->mdt_lut, osfs, - ktime_get_seconds() - OBD_STATFS_CACHE_SECONDS, - NULL); - if (unlikely(rc)) - GOTO(out, rc); + if (mdt_is_sum_statfs_client(req->rq_export) && + lustre_packed_msg_size(req->rq_reqmsg) == + req_capsule_fmt_size(req->rq_reqmsg->lm_magic, + &RQF_MDS_STATFS_NEW, RCL_CLIENT)) { + req_capsule_extend(info->mti_pill, &RQF_MDS_STATFS_NEW); + reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY); + } + + if (reqbody && reqbody->mbo_valid & OBD_MD_FLAGSTATFS) + msf = &mdt->mdt_sum_osfs; + else + msf = &mdt->mdt_osfs; + + if (msf->msf_age + OBD_STATFS_CACHE_SECONDS <= ktime_get_seconds()) { + /** statfs data is too old, get up-to-date one */ + if (reqbody && reqbody->mbo_valid & OBD_MD_FLAGSTATFS) + rc = next->md_ops->mdo_statfs(info->mti_env, + next, osfs); + else + rc = dt_statfs(info->mti_env, mdt->mdt_bottom, + osfs); + if (rc) + GOTO(out, rc); + spin_lock(&mdt->mdt_lock); + msf->msf_osfs = *osfs; + msf->msf_age = ktime_get_seconds(); + spin_unlock(&mdt->mdt_lock); + } else { + /** use cached statfs data */ + spin_lock(&mdt->mdt_lock); + *osfs = msf->msf_osfs; + spin_unlock(&mdt->mdt_lock); + } /* at least try to account for cached pages. its still racy and * might be under-reporting if clients haven't announced their @@ -490,11 +513,12 @@ out: * Pack size attributes into the reply. */ int mdt_pack_size2body(struct mdt_thread_info *info, - const struct lu_fid *fid, bool dom_lock) + const struct lu_fid *fid, struct lustre_handle *lh) { struct mdt_body *b; struct md_attr *ma = &info->mti_attr; int dom_stripe; + bool dom_lock = false; ENTRY; @@ -509,6 +533,16 @@ int mdt_pack_size2body(struct mdt_thread_info *info, if (dom_stripe == LMM_NO_DOM) RETURN(-ENOENT); + if (lustre_handle_is_used(lh)) { + struct ldlm_lock *lock; + + lock = ldlm_handle2lock(lh); + if (lock != NULL) { + dom_lock = ldlm_has_dom(lock); + LDLM_LOCK_PUT(lock); + } + } + /* no DoM lock, no size in reply */ if (!dom_lock) RETURN(0); @@ -521,7 +555,7 @@ int mdt_pack_size2body(struct mdt_thread_info *info, RETURN(0); } -#ifdef CONFIG_FS_POSIX_ACL +#ifdef CONFIG_LUSTRE_FS_POSIX_ACL /* * Pack ACL data into the reply. UIDs/GIDs are mapped and filtered by nodemap. * @@ -563,25 +597,26 @@ again: exp_connect_large_acl(info->mti_exp) && buf->lb_buf != info->mti_big_acl) { if (info->mti_big_acl == NULL) { + info->mti_big_aclsize = + MIN(mdt->mdt_max_ea_size, + XATTR_SIZE_MAX); OBD_ALLOC_LARGE(info->mti_big_acl, - mdt->mdt_max_ea_size); + info->mti_big_aclsize); if (info->mti_big_acl == NULL) { + info->mti_big_aclsize = 0; CERROR("%s: unable to grow " DFID" ACL buffer\n", mdt_obd_name(mdt), PFID(mdt_object_fid(o))); RETURN(-ENOMEM); } - - info->mti_big_aclsize = - mdt->mdt_max_ea_size; } CDEBUG(D_INODE, "%s: grow the "DFID " ACL buffer to size %d\n", mdt_obd_name(mdt), PFID(mdt_object_fid(o)), - mdt->mdt_max_ea_size); + info->mti_big_aclsize); buf->lb_buf = info->mti_big_acl; buf->lb_len = info->mti_big_aclsize; @@ -766,10 +801,15 @@ void mdt_pack_attr2body(struct mdt_thread_info *info, struct mdt_body *b, b->mbo_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS; } else if (info->mti_som_valid) { /* som is valid */ b->mbo_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS; + } else if (ma->ma_valid & MA_SOM) { /* lsom is valid */ + b->mbo_valid |= OBD_MD_FLLAZYSIZE | OBD_MD_FLLAZYBLOCKS; + b->mbo_size = ma->ma_som.ms_size; + b->mbo_blocks = ma->ma_som.ms_blocks; } } - if (fid != NULL && (b->mbo_valid & OBD_MD_FLSIZE)) + if (fid != NULL && (b->mbo_valid & OBD_MD_FLSIZE || + b->mbo_valid & OBD_MD_FLLAZYSIZE)) CDEBUG(D_VFSTRACE, DFID": returning size %llu\n", PFID(fid), (unsigned long long)b->mbo_size); @@ -898,13 +938,15 @@ int mdt_stripe_get(struct mdt_thread_info *info, struct mdt_object *o, buf->lb_len = ma->ma_lmv_size; LASSERT(!(ma->ma_valid & MA_LMV)); } else if (strcmp(name, XATTR_NAME_DEFAULT_LMV) == 0) { - buf->lb_buf = ma->ma_lmv; - buf->lb_len = ma->ma_lmv_size; + buf->lb_buf = ma->ma_default_lmv; + buf->lb_len = ma->ma_default_lmv_size; LASSERT(!(ma->ma_valid & MA_LMV_DEF)); } else { return -EINVAL; } + LASSERT(buf->lb_buf); + rc = mo_xattr_get(info->mti_env, next, buf, name); if (rc > 0) { @@ -930,7 +972,7 @@ got: ma->ma_lmv_size = rc; ma->ma_valid |= MA_LMV; } else if (strcmp(name, XATTR_NAME_DEFAULT_LMV) == 0) { - ma->ma_lmv_size = rc; + ma->ma_default_lmv_size = rc; ma->ma_valid |= MA_LMV_DEF; } @@ -957,8 +999,8 @@ got: return rc; } -static int mdt_attr_get_pfid(struct mdt_thread_info *info, - struct mdt_object *o, struct lu_fid *pfid) +int mdt_attr_get_pfid(struct mdt_thread_info *info, struct mdt_object *o, + struct lu_fid *pfid) { struct lu_buf *buf = &info->mti_buf; struct link_ea_header *leh; @@ -1028,7 +1070,7 @@ int mdt_attr_get_complex(struct mdt_thread_info *info, GOTO(out, rc); if (S_ISREG(mode)) - (void) mdt_get_som(info, o, &ma->ma_attr); + (void) mdt_get_som(info, o, ma); ma->ma_valid |= MA_INODE; } @@ -1058,11 +1100,20 @@ int mdt_attr_get_complex(struct mdt_thread_info *info, GOTO(out, rc); } + /* + * In the handle of MA_INODE, we may already get the SOM attr. + */ + if (need & MA_SOM && S_ISREG(mode) && !(ma->ma_valid & MA_SOM)) { + rc = mdt_get_som(info, o, ma); + if (rc != 0) + GOTO(out, rc); + } + if (need & MA_HSM && S_ISREG(mode)) { buf->lb_buf = info->mti_xattr_buf; buf->lb_len = sizeof(info->mti_xattr_buf); - CLASSERT(sizeof(struct hsm_attrs) <= - sizeof(info->mti_xattr_buf)); + BUILD_BUG_ON(sizeof(struct hsm_attrs) > + sizeof(info->mti_xattr_buf)); rc2 = mo_xattr_get(info->mti_env, next, buf, XATTR_NAME_HSM); rc2 = lustre_buf2hsm(info->mti_xattr_buf, rc2, &ma->ma_hsm); if (rc2 == 0) @@ -1071,7 +1122,7 @@ int mdt_attr_get_complex(struct mdt_thread_info *info, GOTO(out, rc = rc2); } -#ifdef CONFIG_FS_POSIX_ACL +#ifdef CONFIG_LUSTRE_FS_POSIX_ACL if (need & MA_ACL_DEF && S_ISDIR(mode)) { buf->lb_buf = ma->ma_acl; buf->lb_len = ma->ma_acl_size; @@ -1142,25 +1193,55 @@ static int mdt_getattr_internal(struct mdt_thread_info *info, req->rq_export->exp_client_uuid.uuid); } - /* If it is dir object and client require MEA, then we got MEA */ + /* from 2.12.58 intent_getattr pack default LMV in reply */ if (S_ISDIR(lu_object_attr(&next->mo_lu)) && - (reqbody->mbo_valid & (OBD_MD_MEA | OBD_MD_DEFAULT_MEA))) { + ((reqbody->mbo_valid & (OBD_MD_MEA | OBD_MD_DEFAULT_MEA)) == + (OBD_MD_MEA | OBD_MD_DEFAULT_MEA)) && + req_capsule_has_field(&req->rq_pill, &RMF_DEFAULT_MDT_MD, + RCL_SERVER)) { + ma->ma_lmv = buffer->lb_buf; + ma->ma_lmv_size = buffer->lb_len; + ma->ma_default_lmv = req_capsule_server_get(pill, + &RMF_DEFAULT_MDT_MD); + ma->ma_default_lmv_size = req_capsule_get_size(pill, + &RMF_DEFAULT_MDT_MD, + RCL_SERVER); + ma->ma_need = MA_INODE; + if (ma->ma_lmv_size > 0) + ma->ma_need |= MA_LMV; + if (ma->ma_default_lmv_size > 0) + ma->ma_need |= MA_LMV_DEF; + } else if (S_ISDIR(lu_object_attr(&next->mo_lu)) && + (reqbody->mbo_valid & (OBD_MD_MEA | OBD_MD_DEFAULT_MEA))) { + /* If it is dir and client require MEA, then we got MEA */ /* Assumption: MDT_MD size is enough for lmv size. */ ma->ma_lmv = buffer->lb_buf; ma->ma_lmv_size = buffer->lb_len; ma->ma_need = MA_INODE; if (ma->ma_lmv_size > 0) { - if (reqbody->mbo_valid & OBD_MD_MEA) + if (reqbody->mbo_valid & OBD_MD_MEA) { ma->ma_need |= MA_LMV; - else if (reqbody->mbo_valid & OBD_MD_DEFAULT_MEA) + } else if (reqbody->mbo_valid & OBD_MD_DEFAULT_MEA) { ma->ma_need |= MA_LMV_DEF; + ma->ma_default_lmv = buffer->lb_buf; + ma->ma_lmv = NULL; + ma->ma_default_lmv_size = buffer->lb_len; + ma->ma_lmv_size = 0; + } } } else { ma->ma_lmm = buffer->lb_buf; ma->ma_lmm_size = buffer->lb_len; ma->ma_need = MA_INODE | MA_HSM; - if (ma->ma_lmm_size > 0) + if (ma->ma_lmm_size > 0) { ma->ma_need |= MA_LOV; + /* Older clients may crash if they getattr overstriped + * files + */ + if (!exp_connect_overstriping(exp) && + mdt_lmm_is_overstriping(ma->ma_lmm)) + RETURN(-EOPNOTSUPP); + } } if (S_ISDIR(lu_object_attr(&next->mo_lu)) && @@ -1218,8 +1299,13 @@ static int mdt_getattr_internal(struct mdt_thread_info *info, if (!mdt_is_striped_client(req->rq_export)) RETURN(-ENOTSUPP); LASSERT(S_ISDIR(la->la_mode)); - mdt_dump_lmv(D_INFO, ma->ma_lmv); - repbody->mbo_eadatasize = ma->ma_lmv_size; + /* + * when ll_dir_getstripe() gets default LMV, it + * checks mbo_eadatasize. + */ + if (!(ma->ma_valid & MA_LMV)) + repbody->mbo_eadatasize = + ma->ma_default_lmv_size; repbody->mbo_valid |= (OBD_MD_FLDIREA | OBD_MD_DEFAULT_MEA); } @@ -1270,7 +1356,7 @@ static int mdt_getattr_internal(struct mdt_thread_info *info, repbody->mbo_max_mdsize); } -#ifdef CONFIG_FS_POSIX_ACL +#ifdef CONFIG_LUSTRE_FS_POSIX_ACL if ((exp_connect_flags(req->rq_export) & OBD_CONNECT_ACL) && (reqbody->mbo_valid & OBD_MD_FLACL)) { struct lu_nodemap *nodemap = nodemap_get_from_exp(exp); @@ -1299,9 +1385,11 @@ static int mdt_getattr(struct tgt_session_info *tsi) int rc, rc2; ENTRY; - reqbody = req_capsule_client_get(pill, &RMF_MDT_BODY); - LASSERT(reqbody); - LASSERT(obj != NULL); + if (unlikely(info->mti_object == NULL)) + RETURN(-EPROTO); + + reqbody = req_capsule_client_get(pill, &RMF_MDT_BODY); + LASSERT(reqbody); LASSERT(lu_object_assert_exists(&obj->mot_obj)); /* Special case for Data-on-MDT files to get data version */ @@ -1376,41 +1464,54 @@ out: /** * Handler of layout intent RPC requiring the layout modification * - * \param[in] info thread environment - * \param[in] obj object - * \param[in] layout layout change descriptor + * \param[in] info thread environment + * \param[in] obj object + * \param[out] lhc object ldlm lock handle + * \param[in] layout layout change descriptor * * \retval 0 on success * \retval < 0 error code */ int mdt_layout_change(struct mdt_thread_info *info, struct mdt_object *obj, + struct mdt_lock_handle *lhc, struct md_layout_change *layout) { - struct mdt_lock_handle *lh = &info->mti_lh[MDT_LH_LOCAL]; int rc; + ENTRY; if (!mdt_object_exists(obj)) - GOTO(out, rc = -ENOENT); + RETURN(-ENOENT); if (!S_ISREG(lu_object_attr(&obj->mot_obj))) - GOTO(out, rc = -EINVAL); + RETURN(-EINVAL); rc = mo_permission(info->mti_env, NULL, mdt_object_child(obj), NULL, MAY_WRITE); if (rc) - GOTO(out, rc); + RETURN(rc); - /* take layout lock to prepare layout change */ - mdt_lock_reg_init(lh, LCK_EX); - rc = mdt_object_lock(info, obj, lh, MDS_INODELOCK_LAYOUT); - if (rc) - GOTO(out, rc); + rc = mdt_check_resent_lock(info, obj, lhc); + if (rc < 0) + RETURN(rc); + + if (rc > 0) { + /* not resent */ + mdt_lock_handle_init(lhc); + mdt_lock_reg_init(lhc, LCK_EX); + rc = mdt_reint_object_lock(info, obj, lhc, MDS_INODELOCK_LAYOUT, + false); + if (rc) + RETURN(rc); + } + mutex_lock(&obj->mot_som_mutex); rc = mo_layout_change(info->mti_env, mdt_object_child(obj), layout); + mutex_unlock(&obj->mot_som_mutex); + + if (rc) + mdt_object_unlock(info, obj, lhc, 1); - mdt_object_unlock(info, obj, lh, 1); -out: RETURN(rc); } @@ -1458,6 +1559,8 @@ static int mdt_swap_layouts(struct tgt_session_info *tsi) RETURN(-EOPNOTSUPP); info = tsi2mdt_info(tsi); + if (unlikely(info->mti_object == NULL)) + RETURN(-EPROTO); if (info->mti_dlm_req != NULL) ldlm_request_cancel(req, info->mti_dlm_req, 0, LATF_SKIP); @@ -1481,12 +1584,12 @@ static int mdt_swap_layouts(struct tgt_session_info *tsi) /* permission check. Make sure the calling process having permission * to write both files. */ rc = mo_permission(info->mti_env, NULL, mdt_object_child(o1), NULL, - MAY_WRITE); + MAY_WRITE); if (rc < 0) GOTO(put, rc); rc = mo_permission(info->mti_env, NULL, mdt_object_child(o2), NULL, - MAY_WRITE); + MAY_WRITE); if (rc < 0) GOTO(put, rc); @@ -1575,6 +1678,7 @@ static int mdt_getattr_name_lock(struct mdt_thread_info *info, struct lu_name *lname = NULL; struct mdt_lock_handle *lhp = NULL; struct ldlm_lock *lock; + struct req_capsule *pill = info->mti_pill; __u64 try_bits = 0; bool is_resent; int ma_need = 0; @@ -1606,12 +1710,15 @@ static int mdt_getattr_name_lock(struct mdt_thread_info *info, mdt_lock_reg_init(lhc, LCK_PR); /* - * Object's name is on another MDS, no lookup or layout - * lock is needed here but update lock is. + * Object's name entry is on another MDS, it will + * request PERM lock only because LOOKUP lock is owned + * by the MDS where name entry resides. + * + * TODO: it should try layout lock too. - Jinshan */ child_bits &= ~(MDS_INODELOCK_LOOKUP | MDS_INODELOCK_LAYOUT); - child_bits |= MDS_INODELOCK_PERM | MDS_INODELOCK_UPDATE; + child_bits |= MDS_INODELOCK_PERM; rc = mdt_object_lock(info, child, lhc, child_bits); if (rc < 0) @@ -1628,21 +1735,26 @@ static int mdt_getattr_name_lock(struct mdt_thread_info *info, } rc = mdt_getattr_internal(info, child, 0); - if (unlikely(rc != 0)) + if (unlikely(rc != 0)) { mdt_object_unlock(info, child, lhc, 1); + RETURN(rc); + } - RETURN(rc); - } + rc = mdt_pack_secctx_in_reply(info, child); + if (unlikely(rc)) + mdt_object_unlock(info, child, lhc, 1); + RETURN(rc); + } lname = &info->mti_name; - mdt_name_unpack(info->mti_pill, &RMF_NAME, lname, MNF_FIX_ANON); + mdt_name_unpack(pill, &RMF_NAME, lname, MNF_FIX_ANON); if (lu_name_is_valid(lname)) { CDEBUG(D_INODE, "getattr with lock for "DFID"/"DNAME", " "ldlm_rep = %p\n", PFID(mdt_object_fid(parent)), PNAME(lname), ldlm_rep); } else { - reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY); + reqbody = req_capsule_client_get(pill, &RMF_MDT_BODY); if (unlikely(reqbody == NULL)) RETURN(err_serious(-EPROTO)); @@ -1745,7 +1857,7 @@ static int mdt_getattr_name_lock(struct mdt_thread_info *info, mdt_lock_reg_init(lhc, LCK_PR); if (!(child_bits & MDS_INODELOCK_UPDATE) && - mdt_object_exists(child) && !mdt_object_remote(child)) { + !mdt_object_remote(child)) { struct md_attr *ma = &info->mti_attr; ma->ma_valid = 0; @@ -1759,12 +1871,12 @@ static int mdt_getattr_name_lock(struct mdt_thread_info *info, * lock and this might save us RPC on later STAT. For * directories, it also let negative dentry cache start * working for this dir. */ - if (ma->ma_valid & MA_INODE && - ma->ma_attr.la_valid & LA_CTIME && - info->mti_mdt->mdt_namespace->ns_ctime_age_limit + - ma->ma_attr.la_ctime < ktime_get_real_seconds()) - child_bits |= MDS_INODELOCK_UPDATE; - } + if (ma->ma_valid & MA_INODE && + ma->ma_attr.la_valid & LA_CTIME && + info->mti_mdt->mdt_namespace->ns_ctime_age_limit + + ma->ma_attr.la_ctime < ktime_get_real_seconds()) + child_bits |= MDS_INODELOCK_UPDATE; + } /* layout lock must be granted in a best-effort way * for IT operations */ @@ -1795,17 +1907,25 @@ static int mdt_getattr_name_lock(struct mdt_thread_info *info, child_bits &= ~MDS_INODELOCK_UPDATE; rc = mdt_object_lock(info, child, lhc, child_bits); } - if (unlikely(rc != 0)) - GOTO(out_child, rc); - } + if (unlikely(rc != 0)) + GOTO(out_child, rc); + } + + /* finally, we can get attr for child. */ + rc = mdt_getattr_internal(info, child, ma_need); + if (unlikely(rc != 0)) { + mdt_object_unlock(info, child, lhc, 1); + GOTO(out_child, rc); + } - lock = ldlm_handle2lock(&lhc->mlh_reg_lh); + rc = mdt_pack_secctx_in_reply(info, child); + if (unlikely(rc)) { + mdt_object_unlock(info, child, lhc, 1); + GOTO(out_child, rc); + } - /* finally, we can get attr for child. */ - rc = mdt_getattr_internal(info, child, ma_need); - if (unlikely(rc != 0)) { - mdt_object_unlock(info, child, lhc, 1); - } else if (lock) { + lock = ldlm_handle2lock(&lhc->mlh_reg_lh); + if (lock) { /* Debugging code. */ LDLM_DEBUG(lock, "Returning lock to client"); LASSERTF(fid_res_name_eq(mdt_object_fid(child), @@ -1815,36 +1935,31 @@ static int mdt_getattr_name_lock(struct mdt_thread_info *info, PFID(mdt_object_fid(child))); if (S_ISREG(lu_object_attr(&child->mot_obj)) && - mdt_object_exists(child) && !mdt_object_remote(child) && - child != parent) { - LDLM_LOCK_PUT(lock); + !mdt_object_remote(child) && child != parent) { mdt_object_put(info->mti_env, child); - /* NB: call the mdt_pack_size2body always after - * mdt_object_put(), that is why this speacial - * exit path is used. */ rc = mdt_pack_size2body(info, child_fid, - child_bits & MDS_INODELOCK_DOM); + &lhc->mlh_reg_lh); if (rc != 0 && child_bits & MDS_INODELOCK_DOM) { /* DOM lock was taken in advance but this is - * not DoM file. Drop the lock. */ + * not DoM file. Drop the lock. + */ lock_res_and_lock(lock); ldlm_inodebits_drop(lock, MDS_INODELOCK_DOM); unlock_res_and_lock(lock); } - + LDLM_LOCK_PUT(lock); GOTO(out_parent, rc = 0); } - } - if (lock) - LDLM_LOCK_PUT(lock); + LDLM_LOCK_PUT(lock); + } - EXIT; + EXIT; out_child: - mdt_object_put(info->mti_env, child); + mdt_object_put(info->mti_env, child); out_parent: - if (lhp) - mdt_object_unlock(info, parent, lhp, 1); - return rc; + if (lhp) + mdt_object_unlock(info, parent, lhp, 1); + return rc; } /* normal handler: should release the child lock */ @@ -1886,6 +2001,219 @@ out_shrink: return rc; } +static int mdt_rmfid_unlink(struct mdt_thread_info *info, + const struct lu_fid *pfid, + const struct lu_name *name, + struct mdt_object *obj, s64 ctime) +{ + struct lu_fid *child_fid = &info->mti_tmp_fid1; + struct ldlm_enqueue_info *einfo = &info->mti_einfo[0]; + struct mdt_device *mdt = info->mti_mdt; + struct md_attr *ma = &info->mti_attr; + struct mdt_lock_handle *parent_lh; + struct mdt_lock_handle *child_lh; + struct mdt_object *pobj; + bool cos_incompat = false; + int rc; + ENTRY; + + pobj = mdt_object_find(info->mti_env, mdt, pfid); + if (IS_ERR(pobj)) + GOTO(out, rc = PTR_ERR(pobj)); + + parent_lh = &info->mti_lh[MDT_LH_PARENT]; + mdt_lock_pdo_init(parent_lh, LCK_PW, name); + rc = mdt_object_lock(info, pobj, parent_lh, MDS_INODELOCK_UPDATE); + if (rc != 0) + GOTO(put_parent, rc); + + if (mdt_object_remote(pobj)) + cos_incompat = true; + + rc = mdo_lookup(info->mti_env, mdt_object_child(pobj), + name, child_fid, &info->mti_spec); + if (rc != 0) + GOTO(unlock_parent, rc); + + if (!lu_fid_eq(child_fid, mdt_object_fid(obj))) + GOTO(unlock_parent, rc = -EREMCHG); + + child_lh = &info->mti_lh[MDT_LH_CHILD]; + mdt_lock_reg_init(child_lh, LCK_EX); + rc = mdt_reint_striped_lock(info, obj, child_lh, + MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE, + einfo, cos_incompat); + if (rc != 0) + GOTO(unlock_parent, rc); + + if (atomic_read(&obj->mot_open_count)) { + CDEBUG(D_OTHER, "object "DFID" open, skip\n", + PFID(mdt_object_fid(obj))); + GOTO(unlock_child, rc = -EBUSY); + } + + ma->ma_need = 0; + ma->ma_valid = MA_INODE; + ma->ma_attr.la_valid = LA_CTIME; + ma->ma_attr.la_ctime = ctime; + + mutex_lock(&obj->mot_lov_mutex); + + rc = mdo_unlink(info->mti_env, mdt_object_child(pobj), + mdt_object_child(obj), name, ma, 0); + + mutex_unlock(&obj->mot_lov_mutex); + +unlock_child: + mdt_reint_striped_unlock(info, obj, child_lh, einfo, 1); +unlock_parent: + mdt_object_unlock(info, pobj, parent_lh, 1); +put_parent: + mdt_object_put(info->mti_env, pobj); +out: + RETURN(rc); +} + +static int mdt_rmfid_check_permission(struct mdt_thread_info *info, + struct mdt_object *obj) +{ + struct lu_ucred *uc = lu_ucred(info->mti_env); + struct md_attr *ma = &info->mti_attr; + struct lu_attr *la = &ma->ma_attr; + int rc = 0; + ENTRY; + + ma->ma_need = MA_INODE; + rc = mo_attr_get(info->mti_env, mdt_object_child(obj), ma); + if (rc) + GOTO(out, rc); + + if (la->la_flags & LUSTRE_IMMUTABLE_FL) + rc = -EACCES; + + if (md_capable(uc, CFS_CAP_DAC_OVERRIDE)) + RETURN(0); + if (uc->uc_fsuid == la->la_uid) { + if ((la->la_mode & S_IWUSR) == 0) + rc = -EACCES; + } else if (uc->uc_fsgid == la->la_gid) { + if ((la->la_mode & S_IWGRP) == 0) + rc = -EACCES; + } else if ((la->la_mode & S_IWOTH) == 0) { + rc = -EACCES; + } + +out: + RETURN(rc); +} + +static int mdt_rmfid_one(struct mdt_thread_info *info, struct lu_fid *fid, + s64 ctime) +{ + struct mdt_device *mdt = info->mti_mdt; + struct mdt_object *obj = NULL; + struct linkea_data ldata = { NULL }; + struct lu_buf *buf = &info->mti_big_buf; + struct lu_name *name = &info->mti_name; + struct lu_fid *pfid = &info->mti_tmp_fid1; + struct link_ea_header *leh; + struct link_ea_entry *lee; + int reclen, count, rc = 0; + ENTRY; + + if (!fid_is_sane(fid)) + GOTO(out, rc = -EINVAL); + + if (!fid_is_namespace_visible(fid)) + GOTO(out, rc = -EINVAL); + + obj = mdt_object_find(info->mti_env, mdt, fid); + if (IS_ERR(obj)) + GOTO(out, rc = PTR_ERR(obj)); + + if (mdt_object_remote(obj)) + GOTO(out, rc = -EREMOTE); + if (!mdt_object_exists(obj) || lu_object_is_dying(&obj->mot_header)) + GOTO(out, rc = -ENOENT); + + rc = mdt_rmfid_check_permission(info, obj); + if (rc) + GOTO(out, rc); + + /* take LinkEA */ + buf = lu_buf_check_and_alloc(buf, PATH_MAX); + if (!buf->lb_buf) + GOTO(out, rc = -ENOMEM); + + ldata.ld_buf = buf; + rc = mdt_links_read(info, obj, &ldata); + if (rc) + GOTO(out, rc); + + leh = buf->lb_buf; + lee = (struct link_ea_entry *)(leh + 1); + for (count = 0; count < leh->leh_reccount; count++) { + /* remove every hardlink */ + linkea_entry_unpack(lee, &reclen, name, pfid); + lee = (struct link_ea_entry *) ((char *)lee + reclen); + rc = mdt_rmfid_unlink(info, pfid, name, obj, ctime); + if (rc) + break; + } + +out: + if (obj && !IS_ERR(obj)) + mdt_object_put(info->mti_env, obj); + if (info->mti_big_buf.lb_buf) + lu_buf_free(&info->mti_big_buf); + + RETURN(rc); +} + +static int mdt_rmfid(struct tgt_session_info *tsi) +{ + struct mdt_thread_info *mti = tsi2mdt_info(tsi); + struct mdt_body *reqbody; + struct lu_fid *fids, *rfids; + int bufsize, rc; + __u32 *rcs; + int i, nr; + ENTRY; + + reqbody = req_capsule_client_get(tsi->tsi_pill, &RMF_MDT_BODY); + if (reqbody == NULL) + RETURN(-EPROTO); + bufsize = req_capsule_get_size(tsi->tsi_pill, &RMF_FID_ARRAY, + RCL_CLIENT); + nr = bufsize / sizeof(struct lu_fid); + if (nr * sizeof(struct lu_fid) != bufsize) + RETURN(-EINVAL); + req_capsule_set_size(tsi->tsi_pill, &RMF_RCS, + RCL_SERVER, nr * sizeof(__u32)); + req_capsule_set_size(tsi->tsi_pill, &RMF_FID_ARRAY, + RCL_SERVER, nr * sizeof(struct lu_fid)); + rc = req_capsule_server_pack(tsi->tsi_pill); + if (rc) + GOTO(out, rc = err_serious(rc)); + fids = req_capsule_client_get(tsi->tsi_pill, &RMF_FID_ARRAY); + if (fids == NULL) + RETURN(-EPROTO); + rcs = req_capsule_server_get(tsi->tsi_pill, &RMF_RCS); + LASSERT(rcs); + rfids = req_capsule_server_get(tsi->tsi_pill, &RMF_FID_ARRAY); + LASSERT(rfids); + + mdt_init_ucred(mti, reqbody); + for (i = 0; i < nr; i++) { + rfids[i] = fids[i]; + rcs[i] = mdt_rmfid_one(mti, fids + i, reqbody->mbo_ctime); + } + mdt_exit_ucred(mti); + +out: + RETURN(rc); +} + static int mdt_iocontrol(unsigned int cmd, struct obd_export *exp, int len, void *karg, void __user *uarg); @@ -2040,6 +2368,34 @@ static int mdt_fix_attr_ucred(struct mdt_thread_info *info, __u32 op) return 0; } +static inline bool mdt_is_readonly_open(struct mdt_thread_info *info, __u32 op) +{ + return op == REINT_OPEN && + !(info->mti_spec.sp_cr_flags & (MDS_FMODE_WRITE | MDS_OPEN_CREAT)); +} + +static void mdt_preset_secctx_size(struct mdt_thread_info *info) +{ + struct req_capsule *pill = info->mti_pill; + + if (req_capsule_has_field(pill, &RMF_FILE_SECCTX, + RCL_SERVER) && + req_capsule_has_field(pill, &RMF_FILE_SECCTX_NAME, + RCL_CLIENT)) { + if (req_capsule_get_size(pill, &RMF_FILE_SECCTX_NAME, + RCL_CLIENT) != 0) { + /* pre-set size in server part with max size */ + req_capsule_set_size(pill, &RMF_FILE_SECCTX, + RCL_SERVER, + info->mti_mdt->mdt_max_ea_size); + } else { + req_capsule_set_size(pill, &RMF_FILE_SECCTX, + RCL_SERVER, 0); + } + } + +} + static int mdt_reint_internal(struct mdt_thread_info *info, struct mdt_lock_handle *lhc, __u32 op) @@ -2050,19 +2406,25 @@ static int mdt_reint_internal(struct mdt_thread_info *info, ENTRY; - rc = mdt_reint_unpack(info, op); - if (rc != 0) { - CERROR("Can't unpack reint, rc %d\n", rc); - RETURN(err_serious(rc)); - } + rc = mdt_reint_unpack(info, op); + if (rc != 0) { + CERROR("Can't unpack reint, rc %d\n", rc); + RETURN(err_serious(rc)); + } + - /* for replay (no_create) lmm is not needed, client has it already */ - if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER)) - req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, + /* check if the file system is set to readonly. O_RDONLY open + * is still allowed even the file system is set to readonly mode */ + if (mdt_rdonly(info->mti_exp) && !mdt_is_readonly_open(info, op)) + RETURN(err_serious(-EROFS)); + + /* for replay (no_create) lmm is not needed, client has it already */ + if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER)) + req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, DEF_REP_MD_SIZE); /* llog cookies are always 0, the field is kept for compatibility */ - if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER)) + if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER)) req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER, 0); /* Set ACL reply buffer size as LUSTRE_POSIX_ACL_MAX_SIZE_OLD @@ -2072,52 +2434,67 @@ static int mdt_reint_internal(struct mdt_thread_info *info, req_capsule_set_size(pill, &RMF_ACL, RCL_SERVER, LUSTRE_POSIX_ACL_MAX_SIZE_OLD); - rc = req_capsule_server_pack(pill); - if (rc != 0) { - CERROR("Can't pack response, rc %d\n", rc); - RETURN(err_serious(rc)); - } + mdt_preset_secctx_size(info); + + rc = req_capsule_server_pack(pill); + if (rc != 0) { + CERROR("Can't pack response, rc %d\n", rc); + RETURN(err_serious(rc)); + } - if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_SERVER)) { - repbody = req_capsule_server_get(pill, &RMF_MDT_BODY); - LASSERT(repbody); + if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_SERVER)) { + repbody = req_capsule_server_get(pill, &RMF_MDT_BODY); + LASSERT(repbody); repbody->mbo_eadatasize = 0; repbody->mbo_aclsize = 0; - } + } - OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_REINT_DELAY, 10); + OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_REINT_DELAY, 10); - /* for replay no cookkie / lmm need, because client have this already */ - if (info->mti_spec.no_create) - if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER)) - req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, 0); + /* for replay no cookkie / lmm need, because client have this already */ + if (info->mti_spec.no_create) + if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER)) + req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, 0); - rc = mdt_init_ucred_reint(info); - if (rc) - GOTO(out_shrink, rc); + rc = mdt_init_ucred_reint(info); + if (rc) + GOTO(out_shrink, rc); - rc = mdt_fix_attr_ucred(info, op); - if (rc != 0) - GOTO(out_ucred, rc = err_serious(rc)); + rc = mdt_fix_attr_ucred(info, op); + if (rc != 0) + GOTO(out_ucred, rc = err_serious(rc)); rc = mdt_check_resent(info, mdt_reconstruct, lhc); if (rc < 0) { GOTO(out_ucred, rc); } else if (rc == 1) { - DEBUG_REQ(D_INODE, mdt_info_req(info), "resent opt."); + DEBUG_REQ(D_INODE, mdt_info_req(info), "resent opt"); rc = lustre_msg_get_status(mdt_info_req(info)->rq_repmsg); - GOTO(out_ucred, rc); - } - rc = mdt_reint_rec(info, lhc); - EXIT; + GOTO(out_ucred, rc); + } + rc = mdt_reint_rec(info, lhc); + EXIT; out_ucred: - mdt_exit_ucred(info); + mdt_exit_ucred(info); out_shrink: - mdt_client_compatibility(info); - rc2 = mdt_fix_reply(info); - if (rc == 0) - rc = rc2; - return rc; + mdt_client_compatibility(info); + + rc2 = mdt_fix_reply(info); + if (rc == 0) + rc = rc2; + + /* + * Data-on-MDT optimization - read data along with OPEN and return it + * in reply. Do that only if we have both DOM and LAYOUT locks. + */ + if (rc == 0 && op == REINT_OPEN && !req_is_replay(pill->rc_req) && + info->mti_attr.ma_lmm != NULL && + mdt_lmm_dom_entry(info->mti_attr.ma_lmm) == LMM_DOM_ONLY) { + rc = mdt_dom_read_on_open(info, info->mti_mdt, + &lhc->mlh_reg_lh); + } + + return rc; } static long mdt_reint_opcode(struct ptlrpc_request *req, @@ -2159,7 +2536,7 @@ static int mdt_reint(struct tgt_session_info *tsi) [REINT_OPEN] = &RQF_MDS_REINT_OPEN, [REINT_SETXATTR] = &RQF_MDS_REINT_SETXATTR, [REINT_RMENTRY] = &RQF_MDS_REINT_UNLINK, - [REINT_MIGRATE] = &RQF_MDS_REINT_RENAME, + [REINT_MIGRATE] = &RQF_MDS_REINT_MIGRATE, [REINT_RESYNC] = &RQF_MDS_REINT_RESYNC, }; @@ -2183,7 +2560,7 @@ static int mdt_reint(struct tgt_session_info *tsi) } /* this should sync the whole device */ -static int mdt_device_sync(const struct lu_env *env, struct mdt_device *mdt) +int mdt_device_sync(const struct lu_env *env, struct mdt_device *mdt) { struct dt_device *dt = mdt->mdt_bottom; int rc; @@ -2197,7 +2574,7 @@ static int mdt_device_sync(const struct lu_env *env, struct mdt_device *mdt) static int mdt_object_sync(const struct lu_env *env, struct obd_export *exp, struct mdt_object *mo) { - int rc; + int rc = 0; ENTRY; @@ -2208,7 +2585,16 @@ static int mdt_object_sync(const struct lu_env *env, struct obd_export *exp, RETURN(-ESTALE); } - rc = mo_object_sync(env, mdt_object_child(mo)); + if (S_ISREG(lu_object_attr(&mo->mot_obj))) { + struct lu_target *tgt = tgt_ses_info(env)->tsi_tgt; + dt_obj_version_t version; + + version = dt_version_get(env, mdt_obj2dt(mo)); + if (version > tgt->lut_obd->obd_last_committed) + rc = mo_object_sync(env, mdt_object_child(mo)); + } else { + rc = mo_object_sync(env, mdt_object_child(mo)); + } RETURN(rc); } @@ -2230,6 +2616,9 @@ static int mdt_sync(struct tgt_session_info *tsi) } else { struct mdt_thread_info *info = tsi2mdt_info(tsi); + if (unlikely(info->mti_object == NULL)) + RETURN(-EPROTO); + /* sync an object */ rc = mdt_object_sync(tsi->tsi_env, tsi->tsi_exp, info->mti_object); @@ -2335,13 +2724,17 @@ static int mdt_quotactl(struct tgt_session_info *tsi) /* master quotactl */ case Q_SETINFO: case Q_SETQUOTA: + case LUSTRE_Q_SETDEFAULT: if (!nodemap_can_setquota(nodemap)) GOTO(out_nodemap, rc = -EPERM); + /* fallthrough */ case Q_GETINFO: case Q_GETQUOTA: + case LUSTRE_Q_GETDEFAULT: if (qmt == NULL) GOTO(out_nodemap, rc = -EOPNOTSUPP); /* slave quotactl */ + /* fallthrough */ case Q_GETOINFO: case Q_GETOQUOTA: break; @@ -2388,6 +2781,8 @@ static int mdt_quotactl(struct tgt_session_info *tsi) case Q_SETINFO: case Q_SETQUOTA: case Q_GETQUOTA: + case LUSTRE_Q_SETDEFAULT: + case LUSTRE_Q_GETDEFAULT: /* forward quotactl request to QMT */ rc = qmt_hdls.qmth_quotactl(tsi->tsi_env, qmt, oqctl); break; @@ -2595,6 +2990,7 @@ int mdt_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, { struct obd_device *obd = ldlm_lock_to_ns(lock)->ns_obd; struct mdt_device *mdt = mdt_dev(obd->obd_lu_dev); + struct ldlm_cb_set_arg *arg = data; bool commit_async = false; int rc; ENTRY; @@ -2607,17 +3003,22 @@ int mdt_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, unlock_res_and_lock(lock); RETURN(0); } - /* There is no lock conflict if l_blocking_lock == NULL, - * it indicates a blocking ast sent from ldlm_lock_decref_internal - * when the last reference to a local lock was released */ - if (lock->l_req_mode & (LCK_PW | LCK_EX) && - lock->l_blocking_lock != NULL) { + + /* A blocking ast may be sent from ldlm_lock_decref_internal + * when the last reference to a local lock was released and + * during blocking event from ldlm_work_bl_ast_lock(). + * The 'data' parameter is l_ast_data in the first case and + * callback arguments in the second one. Distinguish them by that. + */ + if (!data || data == lock->l_ast_data || !arg->bl_desc) + goto skip_cos_checks; + + if (lock->l_req_mode & (LCK_PW | LCK_EX)) { if (mdt_cos_is_enabled(mdt)) { - if (lock->l_client_cookie != - lock->l_blocking_lock->l_client_cookie) + if (!arg->bl_desc->bl_same_client) mdt_set_lock_sync(lock); } else if (mdt_slc_is_enabled(mdt) && - ldlm_is_cos_incompat(lock->l_blocking_lock)) { + arg->bl_desc->bl_cos_incompat) { mdt_set_lock_sync(lock); /* * we may do extra commit here, but there is a small @@ -2631,11 +3032,11 @@ int mdt_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, */ commit_async = true; } - } else if (lock->l_req_mode == LCK_COS && - lock->l_blocking_lock != NULL) { + } else if (lock->l_req_mode == LCK_COS) { commit_async = true; } +skip_cos_checks: rc = ldlm_blocking_ast_nocheck(lock); if (commit_async) { @@ -2761,6 +3162,11 @@ int mdt_check_resent_lock(struct mdt_thread_info *info, return 1; } +static void mdt_remote_object_lock_created_cb(struct ldlm_lock *lock) +{ + mdt_object_get(NULL, lock->l_ast_data); +} + int mdt_remote_object_lock_try(struct mdt_thread_info *mti, struct mdt_object *o, const struct lu_fid *fid, struct lustre_handle *lh, enum ldlm_mode mode, @@ -2789,8 +3195,8 @@ int mdt_remote_object_lock_try(struct mdt_thread_info *mti, * if we cache lock, couple lock with mdt_object, so that object * can be easily found in lock ASTs. */ - mdt_object_get(mti->mti_env, o); einfo->ei_cbdata = o; + einfo->ei_cb_created = mdt_remote_object_lock_created_cb; } memset(policy, 0, sizeof(*policy)); @@ -2799,8 +3205,6 @@ int mdt_remote_object_lock_try(struct mdt_thread_info *mti, rc = mo_object_lock(mti->mti_env, mdt_object_child(o), lh, einfo, policy); - if (rc < 0 && cache) - mdt_object_put(mti->mti_env, o); /* Return successfully acquired bits to a caller */ if (rc == 0) { @@ -2829,7 +3233,7 @@ static int mdt_object_local_lock(struct mdt_thread_info *info, struct ldlm_namespace *ns = info->mti_mdt->mdt_namespace; union ldlm_policy_data *policy = &info->mti_policy; struct ldlm_res_id *res_id = &info->mti_res_id; - __u64 dlmflags = 0; + __u64 dlmflags = 0, *cookie = NULL; int rc; ENTRY; @@ -2849,6 +3253,11 @@ static int mdt_object_local_lock(struct mdt_thread_info *info, /* Only enqueue LOOKUP lock for remote object */ LASSERT(ergo(mdt_object_remote(o), *ibits == MDS_INODELOCK_LOOKUP)); + /* Lease lock are granted with LDLM_FL_CANCEL_ON_BLOCK */ + if (lh->mlh_type == MDT_REG_LOCK && lh->mlh_reg_mode == LCK_EX && + *ibits == MDS_INODELOCK_OPEN) + dlmflags |= LDLM_FL_CANCEL_ON_BLOCK; + if (lh->mlh_type == MDT_PDO_LOCK) { /* check for exists after object is locked */ if (mdt_object_exists(o) == 0) { @@ -2861,10 +3270,12 @@ static int mdt_object_local_lock(struct mdt_thread_info *info, } } - fid_build_reg_res_name(mdt_object_fid(o), res_id); dlmflags |= LDLM_FL_ATOMIC_CB; + if (info->mti_exp) + cookie = &info->mti_exp->exp_handle.h_cookie; + /* * Take PDO lock on whole directory and build correct @res_id for lock * on part of directory. @@ -2878,12 +3289,16 @@ static int mdt_object_local_lock(struct mdt_thread_info *info, * is never going to be sent to client and we do not * want it slowed down due to possible cancels. */ - policy->l_inodebits.bits = MDS_INODELOCK_UPDATE; - policy->l_inodebits.try_bits = 0; - rc = mdt_fid_lock(ns, &lh->mlh_pdo_lh, lh->mlh_pdo_mode, - policy, res_id, dlmflags, - info->mti_exp == NULL ? NULL : - &info->mti_exp->exp_handle.h_cookie); + policy->l_inodebits.bits = + *ibits & MDS_INODELOCK_UPDATE; + policy->l_inodebits.try_bits = + trybits & MDS_INODELOCK_UPDATE; + /* at least one of them should be set */ + LASSERT(policy->l_inodebits.bits | + policy->l_inodebits.try_bits); + rc = mdt_fid_lock(info->mti_env, ns, &lh->mlh_pdo_lh, + lh->mlh_pdo_mode, policy, res_id, + dlmflags, cookie); if (unlikely(rc != 0)) GOTO(out_unlock, rc); } @@ -2903,10 +3318,9 @@ static int mdt_object_local_lock(struct mdt_thread_info *info, * going to be sent to client. If it is - mdt_intent_policy() path will * fix it up and turn FL_LOCAL flag off. */ - rc = mdt_fid_lock(ns, &lh->mlh_reg_lh, lh->mlh_reg_mode, policy, - res_id, LDLM_FL_LOCAL_ONLY | dlmflags, - info->mti_exp == NULL ? NULL : - &info->mti_exp->exp_handle.h_cookie); + rc = mdt_fid_lock(info->mti_env, ns, &lh->mlh_reg_lh, lh->mlh_reg_mode, + policy, res_id, LDLM_FL_LOCAL_ONLY | dlmflags, + cookie); out_unlock: if (rc != 0) mdt_object_unlock(info, o, lh, 1); @@ -2964,8 +3378,9 @@ mdt_object_lock_internal(struct mdt_thread_info *info, struct mdt_object *o, * object anyway XXX*/ if (lh->mlh_type == MDT_PDO_LOCK && lh->mlh_pdo_hash != 0) { - CDEBUG(D_INFO, "%s: "DFID" convert PDO lock to" - "EX lock.\n", mdt_obd_name(info->mti_mdt), + CDEBUG(D_INFO, + "%s: "DFID" convert PDO lock to EX lock.\n", + mdt_obd_name(info->mti_mdt), PFID(mdt_object_fid(o))); lh->mlh_pdo_hash = 0; lh->mlh_rreg_mode = LCK_EX; @@ -2983,6 +3398,10 @@ mdt_object_lock_internal(struct mdt_thread_info *info, struct mdt_object *o, } } + /* other components like LFSCK can use lockless access + * and populate cache, so we better invalidate it */ + mo_invalidate(info->mti_env, mdt_object_child(o)); + RETURN(0); } @@ -3189,11 +3608,12 @@ void mdt_object_unlock_put(struct mdt_thread_info * info, * - create lu_object, corresponding to the fid in mdt_body, and save it in * @info; * - * - if HABEO_CORPUS flag is set for this request type check whether object + * - if HAS_BODY flag is set for this request type check whether object * actually exists on storage (lu_object_exists()). * */ -static int mdt_body_unpack(struct mdt_thread_info *info, __u32 flags) +static int mdt_body_unpack(struct mdt_thread_info *info, + enum tgt_handler_flags flags) { const struct mdt_body *body; struct mdt_object *obj; @@ -3219,7 +3639,7 @@ static int mdt_body_unpack(struct mdt_thread_info *info, __u32 flags) obj = mdt_object_find(env, info->mti_mdt, &body->mbo_fid1); if (!IS_ERR(obj)) { - if ((flags & HABEO_CORPUS) && !mdt_object_exists(obj)) { + if ((flags & HAS_BODY) && !mdt_object_exists(obj)) { mdt_object_put(env, obj); rc = -ENOENT; } else { @@ -3232,23 +3652,25 @@ static int mdt_body_unpack(struct mdt_thread_info *info, __u32 flags) RETURN(rc); } -static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags) +static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, + enum tgt_handler_flags flags) { - struct req_capsule *pill = info->mti_pill; - int rc; - ENTRY; + struct req_capsule *pill = info->mti_pill; + int rc; - if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_CLIENT)) - rc = mdt_body_unpack(info, flags); - else - rc = 0; + ENTRY; + + if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_CLIENT)) + rc = mdt_body_unpack(info, flags); + else + rc = 0; - if (rc == 0 && (flags & HABEO_REFERO)) { - /* Pack reply. */ - if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER)) - req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, + if (rc == 0 && (flags & HAS_REPLY)) { + /* Pack reply. */ + if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER)) + req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, DEF_REP_MD_SIZE); - if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER)) + if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER)) req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER, 0); @@ -3259,9 +3681,14 @@ static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags) req_capsule_set_size(pill, &RMF_ACL, RCL_SERVER, LUSTRE_POSIX_ACL_MAX_SIZE_OLD); - rc = req_capsule_server_pack(pill); - } - RETURN(rc); + mdt_preset_secctx_size(info); + + rc = req_capsule_server_pack(pill); + if (rc) + CWARN("%s: cannot pack response: rc = %d\n", + mdt_obd_name(info->mti_mdt), rc); + } + RETURN(rc); } void mdt_lock_handle_init(struct mdt_lock_handle *lh) @@ -3370,54 +3797,20 @@ static int mdt_tgt_connect(struct tgt_session_info *tsi) cfs_fail_val == tsi2mdt_info(tsi)->mti_mdt->mdt_seq_site.ss_node_id) { set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(msecs_to_jiffies(3 * MSEC_PER_SEC)); + schedule_timeout(cfs_time_seconds(3)); } return tgt_connect(tsi); } -enum mdt_it_code { - MDT_IT_OPEN, - MDT_IT_OCREAT, - MDT_IT_CREATE, - MDT_IT_GETATTR, - MDT_IT_READDIR, - MDT_IT_LOOKUP, - MDT_IT_UNLINK, - MDT_IT_TRUNC, - MDT_IT_GETXATTR, - MDT_IT_LAYOUT, - MDT_IT_QUOTA, - MDT_IT_GLIMPSE, - MDT_IT_BRW, - MDT_IT_NR -}; - -static int mdt_intent_getattr(enum mdt_it_code opcode, - struct mdt_thread_info *info, - struct ldlm_lock **, __u64); - -static int mdt_intent_getxattr(enum mdt_it_code opcode, - struct mdt_thread_info *info, - struct ldlm_lock **lockp, - __u64 flags); - -static int mdt_intent_layout(enum mdt_it_code opcode, - struct mdt_thread_info *info, - struct ldlm_lock **, - __u64); -static int mdt_intent_reint(enum mdt_it_code opcode, - struct mdt_thread_info *info, - struct ldlm_lock **, - __u64); -static int mdt_intent_glimpse(enum mdt_it_code opcode, +static int mdt_intent_glimpse(enum ldlm_intent_flags it_opc, struct mdt_thread_info *info, struct ldlm_lock **lockp, __u64 flags) { return mdt_glimpse_enqueue(info, info->mti_mdt->mdt_namespace, lockp, flags); } -static int mdt_intent_brw(enum mdt_it_code opcode, +static int mdt_intent_brw(enum ldlm_intent_flags it_opc, struct mdt_thread_info *info, struct ldlm_lock **lockp, __u64 flags) { @@ -3425,68 +3818,6 @@ static int mdt_intent_brw(enum mdt_it_code opcode, lockp, flags); } -static struct mdt_it_flavor { - const struct req_format *it_fmt; - __u32 it_flags; - int (*it_act)(enum mdt_it_code , - struct mdt_thread_info *, - struct ldlm_lock **, - __u64); - long it_reint; -} mdt_it_flavor[] = { - [MDT_IT_OPEN] = { - .it_fmt = &RQF_LDLM_INTENT, - /*.it_flags = HABEO_REFERO,*/ - .it_flags = 0, - .it_act = mdt_intent_reint, - .it_reint = REINT_OPEN - }, - [MDT_IT_OCREAT] = { - .it_fmt = &RQF_LDLM_INTENT, - /* - * OCREAT is not a MUTABOR request as if the file - * already exists. - * We do the extra check of OBD_CONNECT_RDONLY in - * mdt_reint_open() when we really need to create - * the object. - */ - .it_flags = 0, - .it_act = mdt_intent_reint, - .it_reint = REINT_OPEN - }, - [MDT_IT_GETATTR] = { - .it_fmt = &RQF_LDLM_INTENT_GETATTR, - .it_flags = HABEO_REFERO, - .it_act = mdt_intent_getattr - }, - [MDT_IT_LOOKUP] = { - .it_fmt = &RQF_LDLM_INTENT_GETATTR, - .it_flags = HABEO_REFERO, - .it_act = mdt_intent_getattr - }, - [MDT_IT_GETXATTR] = { - .it_fmt = &RQF_LDLM_INTENT_GETXATTR, - .it_flags = HABEO_CORPUS, - .it_act = mdt_intent_getxattr - }, - [MDT_IT_LAYOUT] = { - .it_fmt = &RQF_LDLM_INTENT_LAYOUT, - .it_flags = 0, - .it_act = mdt_intent_layout - }, - [MDT_IT_GLIMPSE] = { - .it_fmt = &RQF_LDLM_INTENT, - .it_flags = 0, - .it_act = mdt_intent_glimpse, - }, - [MDT_IT_BRW] = { - .it_fmt = &RQF_LDLM_INTENT, - .it_flags = 0, - .it_act = mdt_intent_brw, - }, - -}; - int mdt_intent_lock_replace(struct mdt_thread_info *info, struct ldlm_lock **lockp, struct mdt_lock_handle *lh, @@ -3612,7 +3943,7 @@ void mdt_intent_fixup_resent(struct mdt_thread_info *info, * If the xid matches, then we know this is a resent request, and allow * it. (It's probably an OPEN, for which we don't send a lock. */ - if (req_can_reconstruct(req, NULL)) + if (req_can_reconstruct(req, NULL) != 0) return; /* @@ -3626,10 +3957,10 @@ void mdt_intent_fixup_resent(struct mdt_thread_info *info, dlmreq->lock_handle[0].cookie); } -static int mdt_intent_getxattr(enum mdt_it_code opcode, - struct mdt_thread_info *info, - struct ldlm_lock **lockp, - __u64 flags) +static int mdt_intent_getxattr(enum ldlm_intent_flags it_opc, + struct mdt_thread_info *info, + struct ldlm_lock **lockp, + __u64 flags) { struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_RMT]; struct ldlm_reply *ldlm_rep = NULL; @@ -3658,7 +3989,10 @@ static int mdt_intent_getxattr(enum mdt_it_code opcode, if (ldlm_rep == NULL || OBD_FAIL_CHECK(OBD_FAIL_MDS_XATTR_REP)) { mdt_object_unlock(info, info->mti_object, lhc, 1); - RETURN(err_serious(-EFAULT)); + if (is_serious(rc)) + RETURN(rc); + else + RETURN(err_serious(-EFAULT)); } ldlm_rep->lock_policy_res2 = clear_serious(rc); @@ -3676,7 +4010,7 @@ static int mdt_intent_getxattr(enum mdt_it_code opcode, RETURN(rc); } -static int mdt_intent_getattr(enum mdt_it_code opcode, +static int mdt_intent_getattr(enum ldlm_intent_flags it_opc, struct mdt_thread_info *info, struct ldlm_lock **lockp, __u64 flags) @@ -3699,18 +4033,19 @@ static int mdt_intent_getattr(enum mdt_it_code opcode, repbody->mbo_eadatasize = 0; repbody->mbo_aclsize = 0; - switch (opcode) { - case MDT_IT_LOOKUP: + switch (it_opc) { + case IT_LOOKUP: child_bits = MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM; - break; - case MDT_IT_GETATTR: + break; + case IT_GETATTR: child_bits = MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE | MDS_INODELOCK_PERM; - break; - default: - CERROR("Unsupported intent (%d)\n", opcode); - GOTO(out_shrink, rc = -EINVAL); - } + break; + default: + CERROR("%s: unsupported intent %#x\n", + mdt_obd_name(info->mti_mdt), (unsigned int)it_opc); + GOTO(out_shrink, rc = -EINVAL); + } rc = mdt_init_ucred_intent_getattr(info, reqbody); if (rc) @@ -3745,25 +4080,22 @@ out_shrink: return rc; } -static int mdt_intent_layout(enum mdt_it_code opcode, +static int mdt_intent_layout(enum ldlm_intent_flags it_opc, struct mdt_thread_info *info, struct ldlm_lock **lockp, __u64 flags) { - struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_LAYOUT]; + struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_RMT]; struct md_layout_change layout = { .mlc_opc = MD_LAYOUT_NOP }; struct layout_intent *intent; + struct ldlm_reply *ldlm_rep; struct lu_fid *fid = &info->mti_tmp_fid2; struct mdt_object *obj = NULL; int layout_size = 0; + struct lu_buf *buf = &layout.mlc_buf; int rc = 0; - ENTRY; - if (opcode != MDT_IT_LAYOUT) { - CERROR("%s: Unknown intent (%d)\n", mdt_obd_name(info->mti_mdt), - opcode); - RETURN(-EINVAL); - } + ENTRY; fid_extract_from_res_name(fid, &(*lockp)->l_resource->lr_name); @@ -3790,24 +4122,16 @@ static int mdt_intent_layout(enum mdt_it_code opcode, case LAYOUT_INTENT_RESTORE: CERROR("%s: Unsupported layout intent opc %d\n", mdt_obd_name(info->mti_mdt), intent->li_opc); - rc = -ENOTSUPP; - break; + RETURN(-ENOTSUPP); default: CERROR("%s: Unknown layout intent opc %d\n", mdt_obd_name(info->mti_mdt), intent->li_opc); - rc = -EINVAL; - break; + RETURN(-EINVAL); } - if (rc < 0) - RETURN(rc); - - /* Get lock from request for possible resent case. */ - mdt_intent_fixup_resent(info, *lockp, lhc, flags); obj = mdt_object_find(info->mti_env, info->mti_mdt, fid); if (IS_ERR(obj)) - GOTO(out, rc = PTR_ERR(obj)); - + RETURN(PTR_ERR(obj)); if (mdt_object_exists(obj) && !mdt_object_remote(obj)) { /* if layout is going to be changed don't use the current EA @@ -3819,89 +4143,87 @@ static int mdt_intent_layout(enum mdt_it_code opcode, } else { layout_size = mdt_attr_get_eabuf_size(info, obj); if (layout_size < 0) - GOTO(out_obj, rc = layout_size); + GOTO(out, rc = layout_size); if (layout_size > info->mti_mdt->mdt_max_mdsize) info->mti_mdt->mdt_max_mdsize = layout_size; } + CDEBUG(D_INFO, "%s: layout_size %d\n", + mdt_obd_name(info->mti_mdt), layout_size); } /* * set reply buffer size, so that ldlm_handle_enqueue0()-> * ldlm_lvbo_fill() will fill the reply buffer with lovea. */ - (*lockp)->l_lvb_type = LVB_T_LAYOUT; req_capsule_set_size(info->mti_pill, &RMF_DLM_LVB, RCL_SERVER, layout_size); rc = req_capsule_server_pack(info->mti_pill); if (rc) - GOTO(out_obj, rc); + GOTO(out, rc); + ldlm_rep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP); + if (!ldlm_rep) + GOTO(out, rc = -EPROTO); - if (layout.mlc_opc != MD_LAYOUT_NOP) { - struct lu_buf *buf = &layout.mlc_buf; + mdt_set_disposition(info, ldlm_rep, DISP_IT_EXECD); - /** - * mdt_layout_change is a reint operation, when the request - * is resent, layout write shouldn't reprocess it again. - */ - rc = mdt_check_resent(info, mdt_reconstruct_generic, lhc); - if (rc) - GOTO(out_obj, rc = rc < 0 ? rc : 0); + /* take lock in ldlm_lock_enqueue() for LAYOUT_INTENT_ACCESS */ + if (layout.mlc_opc == MD_LAYOUT_NOP) + GOTO(out, rc = 0); - /** - * There is another resent case: the client's job has been - * done by another client, referring lod_declare_layout_change - * -EALREADY case, and it became a operation w/o transaction, - * so we should not do the layout change, otherwise - * mdt_layout_change() will try to cancel the granted server - * CR lock whose remote counterpart is still in hold on the - * client, and a deadlock ensues. - */ - rc = mdt_check_resent_lock(info, obj, lhc); - if (rc <= 0) - GOTO(out_obj, rc); - - buf->lb_buf = NULL; - buf->lb_len = 0; - if (unlikely(req_is_replay(mdt_info_req(info)))) { - buf->lb_buf = req_capsule_client_get(info->mti_pill, - &RMF_EADATA); - buf->lb_len = req_capsule_get_size(info->mti_pill, - &RMF_EADATA, RCL_CLIENT); - /* - * If it's a replay of layout write intent RPC, the - * client has saved the extended lovea when - * it get reply then. - */ - if (buf->lb_len > 0) - mdt_fix_lov_magic(info, buf->lb_buf); - } + rc = mdt_check_resent(info, mdt_reconstruct_generic, lhc); + if (rc < 0) + GOTO(out, rc); + if (rc == 1) { + DEBUG_REQ(D_INODE, mdt_info_req(info), "resent opt."); + rc = lustre_msg_get_status(mdt_info_req(info)->rq_repmsg); + GOTO(out, rc); + } + + buf->lb_buf = NULL; + buf->lb_len = 0; + if (unlikely(req_is_replay(mdt_info_req(info)))) { + buf->lb_buf = req_capsule_client_get(info->mti_pill, + &RMF_EADATA); + buf->lb_len = req_capsule_get_size(info->mti_pill, + &RMF_EADATA, RCL_CLIENT); /* - * Instantiate some layout components, if @buf contains - * lovea, then it's a replay of the layout intent write - * RPC. + * If it's a replay of layout write intent RPC, the client has + * saved the extended lovea when it get reply then. */ - rc = mdt_layout_change(info, obj, &layout); - if (rc) - GOTO(out_obj, rc); + if (buf->lb_len > 0) + mdt_fix_lov_magic(info, buf->lb_buf); } -out_obj: - mdt_object_put(info->mti_env, obj); - if (rc == 0 && lustre_handle_is_used(&lhc->mlh_reg_lh)) + /* Get lock from request for possible resent case. */ + mdt_intent_fixup_resent(info, *lockp, lhc, flags); + (*lockp)->l_lvb_type = LVB_T_LAYOUT; + + /* + * Instantiate some layout components, if @buf contains lovea, then it's + * a replay of the layout intent write RPC. + */ + rc = mdt_layout_change(info, obj, lhc, &layout); + ldlm_rep->lock_policy_res2 = clear_serious(rc); + + if (lustre_handle_is_used(&lhc->mlh_reg_lh)) { rc = mdt_intent_lock_replace(info, lockp, lhc, flags, rc); + if (rc == ELDLM_LOCK_REPLACED && + (*lockp)->l_granted_mode == LCK_EX) + ldlm_lock_mode_downgrade(*lockp, LCK_CR); + } + EXIT; out: - lhc->mlh_reg_lh.cookie = 0; - + mdt_object_put(info->mti_env, obj); return rc; } -static int mdt_intent_reint(enum mdt_it_code opcode, - struct mdt_thread_info *info, - struct ldlm_lock **lockp, - __u64 flags) +static int mdt_intent_open(enum ldlm_intent_flags it_opc, + struct mdt_thread_info *info, + struct ldlm_lock **lockp, + __u64 flags) { struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_RMT]; struct ldlm_reply *rep = NULL; @@ -3919,22 +4241,20 @@ static int mdt_intent_reint(enum mdt_it_code opcode, if (opc < 0) RETURN(opc); - if (mdt_it_flavor[opcode].it_reint != opc) { - CERROR("Reint code %ld doesn't match intent: %d\n", - opc, opcode); - RETURN(err_serious(-EPROTO)); - } - /* Get lock from request for possible resent case. */ mdt_intent_fixup_resent(info, *lockp, lhc, flags); rc = mdt_reint_internal(info, lhc, opc); - /* Check whether the reply has been packed successfully. */ - if (mdt_info_req(info)->rq_repmsg != NULL) - rep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP); - if (rep == NULL) - RETURN(err_serious(-EFAULT)); + /* Check whether the reply has been packed successfully. */ + if (mdt_info_req(info)->rq_repmsg != NULL) + rep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP); + if (rep == NULL) { + if (is_serious(rc)) + RETURN(rc); + else + RETURN(err_serious(-EFAULT)); + } /* MDC expects this in any case */ if (rc != 0) @@ -3974,75 +4294,64 @@ static int mdt_intent_reint(enum mdt_it_code opcode, RETURN(ELDLM_LOCK_ABORTED); } -static int mdt_intent_code(enum ldlm_intent_flags itcode) +static int mdt_intent_opc(enum ldlm_intent_flags it_opc, + struct mdt_thread_info *info, + struct ldlm_lock **lockp, + u64 flags /* LDLM_FL_* */) { + struct req_capsule *pill = info->mti_pill; + struct ptlrpc_request *req = mdt_info_req(info); + const struct req_format *it_format; + int (*it_handler)(enum ldlm_intent_flags, + struct mdt_thread_info *, + struct ldlm_lock **, + u64); + enum tgt_handler_flags it_handler_flags = 0; + struct ldlm_reply *rep; + bool check_mdt_object = false; int rc; + ENTRY; - switch (itcode) { + switch (it_opc) { case IT_OPEN: - rc = MDT_IT_OPEN; - break; case IT_OPEN|IT_CREAT: - rc = MDT_IT_OCREAT; - break; - case IT_CREAT: - rc = MDT_IT_CREATE; - break; - case IT_READDIR: - rc = MDT_IT_READDIR; + /* + * OCREAT is not a IS_MUTABLE request since the file may + * already exist. We do the extra check of + * OBD_CONNECT_RDONLY in mdt_reint_open() when we + * really need to create the object. + */ + it_format = &RQF_LDLM_INTENT; + it_handler = &mdt_intent_open; break; case IT_GETATTR: - rc = MDT_IT_GETATTR; - break; + check_mdt_object = true; + /* fallthrough */ case IT_LOOKUP: - rc = MDT_IT_LOOKUP; - break; - case IT_UNLINK: - rc = MDT_IT_UNLINK; - break; - case IT_TRUNC: - rc = MDT_IT_TRUNC; + it_format = &RQF_LDLM_INTENT_GETATTR; + it_handler = &mdt_intent_getattr; + it_handler_flags = HAS_REPLY; break; case IT_GETXATTR: - rc = MDT_IT_GETXATTR; + check_mdt_object = true; + it_format = &RQF_LDLM_INTENT_GETXATTR; + it_handler = &mdt_intent_getxattr; + it_handler_flags = HAS_BODY; break; case IT_LAYOUT: - rc = MDT_IT_LAYOUT; - break; - case IT_QUOTA_DQACQ: - case IT_QUOTA_CONN: - rc = MDT_IT_QUOTA; + it_format = &RQF_LDLM_INTENT_LAYOUT; + it_handler = &mdt_intent_layout; break; case IT_GLIMPSE: - rc = MDT_IT_GLIMPSE; + it_format = &RQF_LDLM_INTENT; + it_handler = &mdt_intent_glimpse; break; case IT_BRW: - rc = MDT_IT_BRW; - break; - default: - CERROR("Unknown intent opcode: 0x%08x\n", itcode); - rc = -EINVAL; + it_format = &RQF_LDLM_INTENT; + it_handler = &mdt_intent_brw; break; - } - return rc; -} - -static int mdt_intent_opc(enum ldlm_intent_flags itopc, - struct mdt_thread_info *info, - struct ldlm_lock **lockp, __u64 flags) -{ - struct req_capsule *pill = info->mti_pill; - struct ptlrpc_request *req = mdt_info_req(info); - struct mdt_it_flavor *flv; - int opc; - int rc; - ENTRY; - - opc = mdt_intent_code(itopc); - if (opc < 0) - RETURN(-EINVAL); - - if (opc == MDT_IT_QUOTA) { + case IT_QUOTA_DQACQ: + case IT_QUOTA_CONN: { struct lu_device *qmt = info->mti_mdt->mdt_qmt_dev; if (qmt == NULL) @@ -4058,37 +4367,34 @@ static int mdt_intent_opc(enum ldlm_intent_flags itopc, flags); RETURN(rc); } - - flv = &mdt_it_flavor[opc]; - - /* Fail early on unknown requests. */ - if (flv->it_fmt == NULL) + default: + CERROR("%s: unknown intent code %#x\n", + mdt_obd_name(info->mti_mdt), it_opc); RETURN(-EPROTO); + } - req_capsule_extend(pill, flv->it_fmt); + req_capsule_extend(pill, it_format); - rc = mdt_unpack_req_pack_rep(info, flv->it_flags); + rc = mdt_unpack_req_pack_rep(info, it_handler_flags); if (rc < 0) RETURN(rc); - if (flv->it_flags & MUTABOR && mdt_rdonly(req->rq_export)) - RETURN(-EROFS); + if (unlikely(info->mti_object == NULL && check_mdt_object)) + RETURN(-EPROTO); - if (flv->it_act != NULL) { - struct ldlm_reply *rep; + if (it_handler_flags & IS_MUTABLE && mdt_rdonly(req->rq_export)) + RETURN(-EROFS); - OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_INTENT_DELAY, 10); + OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_INTENT_DELAY, 10); - /* execute policy */ - rc = flv->it_act(opc, info, lockp, flags); + /* execute policy */ + rc = (*it_handler)(it_opc, info, lockp, flags); - /* Check whether the reply has been packed successfully. */ - if (req->rq_repmsg != NULL) { - rep = req_capsule_server_get(info->mti_pill, - &RMF_DLM_REP); - rep->lock_policy_res2 = - ptlrpc_status_hton(rep->lock_policy_res2); - } + /* Check whether the reply has been packed successfully. */ + if (req->rq_repmsg != NULL) { + rep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP); + rep->lock_policy_res2 = + ptlrpc_status_hton(rep->lock_policy_res2); } RETURN(rc); @@ -4106,9 +4412,12 @@ static void mdt_ptlrpc_stats_update(struct ptlrpc_request *req, LDLM_GLIMPSE_ENQUEUE : LDLM_IBITS_ENQUEUE)); } -static int mdt_intent_policy(struct ldlm_namespace *ns, - struct ldlm_lock **lockp, void *req_cookie, - enum ldlm_mode mode, __u64 flags, void *data) +static int mdt_intent_policy(const struct lu_env *env, + struct ldlm_namespace *ns, + struct ldlm_lock **lockp, + void *req_cookie, + enum ldlm_mode mode, + __u64 flags, void *data) { struct tgt_session_info *tsi; struct mdt_thread_info *info; @@ -4122,7 +4431,7 @@ static int mdt_intent_policy(struct ldlm_namespace *ns, LASSERT(req != NULL); - tsi = tgt_ses_info(req->rq_svc_thread->t_env); + tsi = tgt_ses_info(env); info = tsi2mdt_info(tsi); LASSERT(info != NULL); @@ -4766,9 +5075,12 @@ static int mdt_quota_init(const struct lu_env *env, struct mdt_device *mdt, mdt->mdt_qmt_dev = obd->obd_lu_dev; /* configure local quota objects */ - rc = mdt->mdt_qmt_dev->ld_ops->ldo_prepare(env, - &mdt->mdt_lu_dev, - mdt->mdt_qmt_dev); + if (OBD_FAIL_CHECK(OBD_FAIL_QUOTA_INIT)) + rc = -EBADF; + else + rc = mdt->mdt_qmt_dev->ld_ops->ldo_prepare(env, + &mdt->mdt_lu_dev, + mdt->mdt_qmt_dev); if (rc) GOTO(class_cleanup, rc); @@ -4788,6 +5100,7 @@ class_cleanup: if (rc) { class_manual_cleanup(obd); mdt->mdt_qmt_dev = NULL; + GOTO(lcfg_cleanup, rc); } class_detach: if (rc) @@ -4832,6 +5145,9 @@ static int mdt_tgt_getxattr(struct tgt_session_info *tsi) struct mdt_thread_info *info = tsi2mdt_info(tsi); int rc; + if (unlikely(info->mti_object == NULL)) + return -EPROTO; + rc = mdt_getxattr(info); mdt_thread_info_fini(info); @@ -4851,47 +5167,48 @@ TGT_RPC_HANDLER(MDS_FIRST_OPC, 0, MDS_DISCONNECT, tgt_disconnect, &RQF_MDS_DISCONNECT, LUSTRE_OBD_VERSION), TGT_RPC_HANDLER(MDS_FIRST_OPC, - HABEO_REFERO, MDS_SET_INFO, mdt_set_info, + HAS_REPLY, MDS_SET_INFO, mdt_set_info, &RQF_OBD_SET_INFO, LUSTRE_MDS_VERSION), TGT_MDT_HDL(0, MDS_GET_INFO, mdt_get_info), -TGT_MDT_HDL(0 | HABEO_REFERO, MDS_GET_ROOT, mdt_get_root), -TGT_MDT_HDL(HABEO_CORPUS, MDS_GETATTR, mdt_getattr), -TGT_MDT_HDL(HABEO_CORPUS| HABEO_REFERO, MDS_GETATTR_NAME, +TGT_MDT_HDL(HAS_REPLY, MDS_GET_ROOT, mdt_get_root), +TGT_MDT_HDL(HAS_BODY, MDS_GETATTR, mdt_getattr), +TGT_MDT_HDL(HAS_BODY | HAS_REPLY, MDS_GETATTR_NAME, mdt_getattr_name), -TGT_MDT_HDL(HABEO_CORPUS, MDS_GETXATTR, mdt_tgt_getxattr), -TGT_MDT_HDL(0 | HABEO_REFERO, MDS_STATFS, mdt_statfs), -TGT_MDT_HDL(0 | MUTABOR, MDS_REINT, mdt_reint), -TGT_MDT_HDL(HABEO_CORPUS, MDS_CLOSE, mdt_close), -TGT_MDT_HDL(HABEO_CORPUS| HABEO_REFERO, MDS_READPAGE, mdt_readpage), -TGT_MDT_HDL(HABEO_CORPUS| HABEO_REFERO, MDS_SYNC, mdt_sync), +TGT_MDT_HDL(HAS_BODY, MDS_GETXATTR, mdt_tgt_getxattr), +TGT_MDT_HDL(HAS_REPLY, MDS_STATFS, mdt_statfs), +TGT_MDT_HDL(IS_MUTABLE, MDS_REINT, mdt_reint), +TGT_MDT_HDL(HAS_BODY, MDS_CLOSE, mdt_close), +TGT_MDT_HDL(HAS_BODY | HAS_REPLY, MDS_READPAGE, mdt_readpage), +TGT_MDT_HDL(HAS_BODY | HAS_REPLY, MDS_SYNC, mdt_sync), TGT_MDT_HDL(0, MDS_QUOTACTL, mdt_quotactl), -TGT_MDT_HDL(HABEO_CORPUS| HABEO_REFERO | MUTABOR, MDS_HSM_PROGRESS, +TGT_MDT_HDL(HAS_BODY | HAS_REPLY | IS_MUTABLE, MDS_HSM_PROGRESS, mdt_hsm_progress), -TGT_MDT_HDL(HABEO_CORPUS| HABEO_REFERO | MUTABOR, MDS_HSM_CT_REGISTER, +TGT_MDT_HDL(HAS_BODY | HAS_REPLY | IS_MUTABLE, MDS_HSM_CT_REGISTER, mdt_hsm_ct_register), -TGT_MDT_HDL(HABEO_CORPUS| HABEO_REFERO | MUTABOR, MDS_HSM_CT_UNREGISTER, +TGT_MDT_HDL(HAS_BODY | HAS_REPLY | IS_MUTABLE, MDS_HSM_CT_UNREGISTER, mdt_hsm_ct_unregister), -TGT_MDT_HDL(HABEO_CORPUS| HABEO_REFERO, MDS_HSM_STATE_GET, +TGT_MDT_HDL(HAS_BODY | HAS_REPLY, MDS_HSM_STATE_GET, mdt_hsm_state_get), -TGT_MDT_HDL(HABEO_CORPUS| HABEO_REFERO | MUTABOR, MDS_HSM_STATE_SET, +TGT_MDT_HDL(HAS_BODY | HAS_REPLY | IS_MUTABLE, MDS_HSM_STATE_SET, mdt_hsm_state_set), -TGT_MDT_HDL(HABEO_CORPUS| HABEO_REFERO, MDS_HSM_ACTION, mdt_hsm_action), -TGT_MDT_HDL(HABEO_CORPUS| HABEO_REFERO, MDS_HSM_REQUEST, +TGT_MDT_HDL(HAS_BODY | HAS_REPLY, MDS_HSM_ACTION, mdt_hsm_action), +TGT_MDT_HDL(HAS_BODY | HAS_REPLY, MDS_HSM_REQUEST, mdt_hsm_request), -TGT_MDT_HDL(HABEO_CLAVIS | HABEO_CORPUS | HABEO_REFERO | MUTABOR, +TGT_MDT_HDL(HAS_KEY | HAS_BODY | HAS_REPLY | IS_MUTABLE, MDS_SWAP_LAYOUTS, mdt_swap_layouts), +TGT_MDT_HDL(IS_MUTABLE, MDS_RMFID, mdt_rmfid), }; static struct tgt_handler mdt_io_ops[] = { -TGT_OST_HDL_HP(HABEO_CORPUS | HABEO_REFERO, OST_BRW_READ, tgt_brw_read, +TGT_OST_HDL_HP(HAS_BODY | HAS_REPLY, OST_BRW_READ, tgt_brw_read, mdt_hp_brw), -TGT_OST_HDL_HP(HABEO_CORPUS | MUTABOR, OST_BRW_WRITE, tgt_brw_write, +TGT_OST_HDL_HP(HAS_BODY | IS_MUTABLE, OST_BRW_WRITE, tgt_brw_write, mdt_hp_brw), -TGT_OST_HDL_HP(HABEO_CORPUS | HABEO_REFERO | MUTABOR, +TGT_OST_HDL_HP(HAS_BODY | HAS_REPLY | IS_MUTABLE, OST_PUNCH, mdt_punch_hdl, mdt_hp_punch), -TGT_OST_HDL(HABEO_CORPUS | HABEO_REFERO, OST_SYNC, mdt_data_sync), +TGT_OST_HDL(HAS_BODY | HAS_REPLY, OST_SYNC, mdt_data_sync), }; static struct tgt_handler mdt_sec_ctx_ops[] = { @@ -4901,7 +5218,7 @@ TGT_SEC_HDL_VAR(0, SEC_CTX_FINI, mdt_sec_ctx_handle) }; static struct tgt_handler mdt_quota_ops[] = { -TGT_QUOTA_HDL(HABEO_REFERO, QUOTA_DQACQ, mdt_quota_dqacq), +TGT_QUOTA_HDL(HAS_REPLY, QUOTA_DQACQ, mdt_quota_dqacq), }; static struct tgt_opc_slice mdt_common_slice[] = { @@ -4981,8 +5298,8 @@ static void mdt_fini(const struct lu_env *env, struct mdt_device *m) ping_evictor_stop(); /* Remove the HSM /proc entry so the coordinator cannot be - * restarted by a user while it's shutting down. */ - hsm_cdt_procfs_fini(m); + * restarted by a user while it's shutting down. + */ mdt_hsm_cdt_stop(m); mdt_llog_ctxt_unclone(env, m, LLOG_AGENT_ORIG_CTXT); @@ -5002,7 +5319,7 @@ static void mdt_fini(const struct lu_env *env, struct mdt_device *m) /* Calling the cleanup functions in the same order as in the mdt_init0 * error path */ - mdt_procfs_fini(m); + mdt_tunables_fini(m); target_recovery_fini(obd); upcall_cache_cleanup(m->mdt_identity_cache); @@ -5087,7 +5404,7 @@ static int mdt_init0(const struct lu_env *env, struct mdt_device *m, obd = class_name2obd(dev); LASSERT(obd != NULL); - m->mdt_max_mdsize = MAX_MD_SIZE; /* 4 stripes */ + m->mdt_max_mdsize = MAX_MD_SIZE_OLD; m->mdt_opts.mo_evict_tgt_nids = 1; m->mdt_opts.mo_cos = MDT_COS_DEFAULT; @@ -5107,15 +5424,21 @@ static int mdt_init0(const struct lu_env *env, struct mdt_device *m, } /* DoM files get IO lock at open by default */ - m->mdt_opts.mo_dom_lock = 1; + m->mdt_opts.mo_dom_lock = ALWAYS_DOM_LOCK_ON_OPEN; + /* DoM files are read at open and data is packed in the reply */ + m->mdt_opts.mo_dom_read_open = 1; m->mdt_squash.rsi_uid = 0; m->mdt_squash.rsi_gid = 0; INIT_LIST_HEAD(&m->mdt_squash.rsi_nosquash_nids); - init_rwsem(&m->mdt_squash.rsi_sem); + spin_lock_init(&m->mdt_squash.rsi_lock); spin_lock_init(&m->mdt_lock); - m->mdt_enable_remote_dir = 0; + m->mdt_enable_remote_dir = 1; + m->mdt_enable_striped_dir = 1; + m->mdt_enable_dir_migration = 1; m->mdt_enable_remote_dir_gid = 0; + m->mdt_enable_chprojid_gid = 0; + m->mdt_enable_remote_rename = 1; atomic_set(&m->mdt_mds_mds_conns, 0); atomic_set(&m->mdt_async_commit_count, 0); @@ -5247,7 +5570,7 @@ static int mdt_init0(const struct lu_env *env, struct mdt_device *m, GOTO(err_free_hsm, rc); } - rc = mdt_procfs_init(m, dev); + rc = mdt_tunables_init(m, dev); if (rc) { CERROR("Can't init MDT lprocfs, rc %d\n", rc); GOTO(err_recovery, rc); @@ -5275,9 +5598,8 @@ static int mdt_init0(const struct lu_env *env, struct mdt_device *m, RETURN(0); err_procfs: - mdt_procfs_fini(m); + mdt_tunables_fini(m); err_recovery: - target_recovery_fini(obd); upcall_cache_cleanup(m->mdt_identity_cache); m->mdt_identity_cache = NULL; err_free_hsm: @@ -5288,6 +5610,11 @@ err_los_fini: err_fs_cleanup: mdt_fs_cleanup(env, m); err_tgt: + /* keep recoverable clients */ + obd->obd_fail = 1; + target_recovery_fini(obd); + obd_exports_barrier(obd); + obd_zombie_barrier(); tgt_fini(env, &m->mdt_lut); err_free_ns: ldlm_namespace_free(m->mdt_namespace, NULL, 0); @@ -5329,12 +5656,12 @@ static int mdt_process_config(const struct lu_env *env, switch (cfg->lcfg_command) { case LCFG_PARAM: { - struct obd_device *obd = d->ld_obd; - + struct obd_device *obd = d->ld_obd; /* For interoperability */ - struct cfg_interop_param *ptr = NULL; - struct lustre_cfg *old_cfg = NULL; - char *param = NULL; + struct cfg_interop_param *ptr = NULL; + struct lustre_cfg *old_cfg = NULL; + char *param = NULL; + ssize_t count; param = lustre_cfg_string(cfg, 1); if (param == NULL) { @@ -5363,17 +5690,22 @@ static int mdt_process_config(const struct lu_env *env, } } - rc = class_process_proc_param(PARAM_MDT, obd->obd_vars, - cfg, obd); - if (rc > 0 || rc == -ENOSYS) { + count = class_modify_config(cfg, PARAM_MDT, + &obd->obd_kset.kobj); + if (count < 0) { + struct coordinator *cdt = &m->mdt_coordinator; + /* is it an HSM var ? */ - rc = class_process_proc_param(PARAM_HSM, - hsm_cdt_get_proc_vars(), - cfg, obd); - if (rc > 0 || rc == -ENOSYS) + count = class_modify_config(cfg, PARAM_HSM, + &cdt->cdt_hsm_kobj); + if (count < 0) /* we don't understand; pass it on */ rc = next->ld_ops->ldo_process_config(env, next, cfg); + else + rc = count > 0 ? 0 : count; + } else { + rc = count > 0 ? 0 : count; } if (old_cfg) @@ -5409,6 +5741,7 @@ static struct lu_object *mdt_object_alloc(const struct lu_env *env, lu_object_add_top(h, o); o->lo_ops = &mdt_obj_ops; spin_lock_init(&mo->mot_write_lock); + mutex_init(&mo->mot_som_mutex); mutex_init(&mo->mot_lov_mutex); init_rwsem(&mo->mot_dom_sem); init_rwsem(&mo->mot_open_sem); @@ -5744,7 +6077,7 @@ static int mdt_ctxt_add_dirty_flag(struct lu_env *env, static int mdt_export_cleanup(struct obd_export *exp) { - struct list_head closing_list; + LIST_HEAD(closing_list); struct mdt_export_data *med = &exp->exp_mdt_data; struct obd_device *obd = exp->exp_obd; struct mdt_device *mdt; @@ -5754,7 +6087,6 @@ static int mdt_export_cleanup(struct obd_export *exp) int rc = 0; ENTRY; - INIT_LIST_HEAD(&closing_list); spin_lock(&med->med_open_lock); while (!list_empty(&med->med_open_head)) { struct list_head *tmp = med->med_open_head.next; @@ -5762,7 +6094,7 @@ static int mdt_export_cleanup(struct obd_export *exp) /* Remove mfd handle so it can't be found again. * We are consuming the mfd_list reference here. */ - class_handle_unhash(&mfd->mfd_handle); + class_handle_unhash(&mfd->mfd_open_handle); list_move_tail(&mfd->mfd_list, &closing_list); } spin_unlock(&med->med_open_lock); @@ -5803,7 +6135,7 @@ static int mdt_export_cleanup(struct obd_export *exp) * archive request into a noop if it's not actually * dirty. */ - if (mfd->mfd_mode & MDS_FMODE_WRITE) + if (mfd->mfd_open_flags & MDS_FMODE_WRITE) rc = mdt_ctxt_add_dirty_flag(&env, info, mfd); /* Don't unlink orphan on failover umount, LU-184 */ @@ -5811,13 +6143,14 @@ static int mdt_export_cleanup(struct obd_export *exp) ma->ma_valid = MA_FLAGS; ma->ma_attr_flags |= MDS_KEEP_ORPHAN; } - mdt_mfd_close(info, mfd); - } - } - info->mti_mdt = NULL; - /* cleanup client slot early */ - /* Do not erase record for recoverable client. */ - if (!(exp->exp_flags & OBD_OPT_FAILOVER) || exp->exp_failed) + ma->ma_valid |= MA_FORCE_LOG; + mdt_mfd_close(info, mfd); + } + } + info->mti_mdt = NULL; + /* cleanup client slot early */ + /* Do not erase record for recoverable client. */ + if (!(exp->exp_flags & OBD_OPT_FAILOVER) || exp->exp_failed) tgt_client_del(&env, exp); lu_env_fini(&env); @@ -5826,14 +6159,14 @@ static int mdt_export_cleanup(struct obd_export *exp) static inline void mdt_enable_slc(struct mdt_device *mdt) { - if (mdt->mdt_lut.lut_sync_lock_cancel == NEVER_SYNC_ON_CANCEL) - mdt->mdt_lut.lut_sync_lock_cancel = BLOCKING_SYNC_ON_CANCEL; + if (mdt->mdt_lut.lut_sync_lock_cancel == SYNC_LOCK_CANCEL_NEVER) + mdt->mdt_lut.lut_sync_lock_cancel = SYNC_LOCK_CANCEL_BLOCKING; } static inline void mdt_disable_slc(struct mdt_device *mdt) { - if (mdt->mdt_lut.lut_sync_lock_cancel == BLOCKING_SYNC_ON_CANCEL) - mdt->mdt_lut.lut_sync_lock_cancel = NEVER_SYNC_ON_CANCEL; + if (mdt->mdt_lut.lut_sync_lock_cancel == SYNC_LOCK_CANCEL_BLOCKING) + mdt->mdt_lut.lut_sync_lock_cancel = SYNC_LOCK_CANCEL_NEVER; } static int mdt_obd_disconnect(struct obd_export *exp) @@ -5992,6 +6325,11 @@ static int mdt_init_export(struct obd_export *exp) exp->exp_connecting = 1; spin_unlock(&exp->exp_lock); + OBD_ALLOC(exp->exp_used_slots, + BITS_TO_LONGS(OBD_MAX_RIF_MAX) * sizeof(long)); + if (exp->exp_used_slots == NULL) + RETURN(-ENOMEM); + /* self-export doesn't need client data and ldlm initialization */ if (unlikely(obd_uuid_equals(&exp->exp_obd->obd_uuid, &exp->exp_client_uuid))) @@ -6010,6 +6348,10 @@ static int mdt_init_export(struct obd_export *exp) err_free: tgt_client_free(exp); err: + OBD_FREE(exp->exp_used_slots, + BITS_TO_LONGS(OBD_MAX_RIF_MAX) * sizeof(long)); + exp->exp_used_slots = NULL; + CERROR("%s: Failed to initialize export: rc = %d\n", exp->exp_obd->obd_name, rc); return rc; @@ -6020,6 +6362,10 @@ static int mdt_destroy_export(struct obd_export *exp) ENTRY; target_destroy_export(exp); + if (exp->exp_used_slots) + OBD_FREE(exp->exp_used_slots, + BITS_TO_LONGS(OBD_MAX_RIF_MAX) * sizeof(long)); + /* destroy can be called from failed obd_setup, so * checking uuid is safer than obd_self_export */ if (unlikely(obd_uuid_equals(&exp->exp_obd->obd_uuid, @@ -6598,7 +6944,7 @@ static int mdt_obd_postrecov(struct obd_device *obd) return rc; } -static struct obd_ops mdt_obd_device_ops = { +static const struct obd_ops mdt_obd_device_ops = { .o_owner = THIS_MODULE, .o_set_info_async = mdt_obd_set_info_async, .o_connect = mdt_obd_connect, @@ -6747,10 +7093,10 @@ static int __init mdt_init(void) { int rc; - CLASSERT(sizeof("0x0123456789ABCDEF:0x01234567:0x01234567") == - FID_NOBRACE_LEN + 1); - CLASSERT(sizeof("[0x0123456789ABCDEF:0x01234567:0x01234567]") == - FID_LEN + 1); + BUILD_BUG_ON(sizeof("0x0123456789ABCDEF:0x01234567:0x01234567") != + FID_NOBRACE_LEN + 1); + BUILD_BUG_ON(sizeof("[0x0123456789ABCDEF:0x01234567:0x01234567]") != + FID_LEN + 1); rc = lu_kmem_init(mdt_caches); if (rc) return rc;