* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2010, 2013, Intel Corporation.
+ * Copyright (c) 2010, 2014, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
}
};
-int mdt_get_disposition(struct ldlm_reply *rep, int flag)
+__u64 mdt_get_disposition(struct ldlm_reply *rep, __u64 op_flag)
{
- if (!rep)
- return 0;
- return (rep->lock_policy_res1 & flag);
+ if (!rep)
+ return 0;
+ return rep->lock_policy_res1 & op_flag;
}
void mdt_clear_disposition(struct mdt_thread_info *info,
- struct ldlm_reply *rep, int flag)
+ struct ldlm_reply *rep, __u64 op_flag)
{
if (info) {
- info->mti_opdata &= ~flag;
- tgt_opdata_clear(info->mti_env, flag);
+ info->mti_opdata &= ~op_flag;
+ tgt_opdata_clear(info->mti_env, op_flag);
}
if (rep)
- rep->lock_policy_res1 &= ~flag;
+ rep->lock_policy_res1 &= ~op_flag;
}
void mdt_set_disposition(struct mdt_thread_info *info,
- struct ldlm_reply *rep, int flag)
+ struct ldlm_reply *rep, __u64 op_flag)
{
if (info) {
- info->mti_opdata |= flag;
- tgt_opdata_set(info->mti_env, flag);
+ info->mti_opdata |= op_flag;
+ tgt_opdata_set(info->mti_env, op_flag);
}
if (rep)
- rep->lock_policy_res1 |= flag;
+ rep->lock_policy_res1 |= op_flag;
}
void mdt_lock_reg_init(struct mdt_lock_handle *lh, ldlm_mode_t lm)
repbody->mbo_fid1 = mdt->mdt_md_root_fid;
repbody->mbo_valid |= OBD_MD_FLID;
- if (tsi->tsi_tgt->lut_mds_capa &&
- exp_connect_flags(info->mti_exp) & OBD_CONNECT_MDS_CAPA) {
- struct mdt_object *root;
- struct lustre_capa *capa;
-
- root = mdt_object_find(info->mti_env, mdt, &repbody->mbo_fid1);
- if (IS_ERR(root))
- GOTO(out, rc = PTR_ERR(root));
-
- capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA1);
- LASSERT(capa);
- capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
- rc = mo_capa_get(info->mti_env, mdt_object_child(root), capa,
- 0);
- mdt_object_put(info->mti_env, root);
- if (rc == 0)
- repbody->mbo_valid |= OBD_MD_FLMDSCAPA;
- }
EXIT;
out:
mdt_thread_info_fini(info);
RETURN(rc);
}
-/**
- * Pack SOM attributes into the reply.
- * Call under a DLM UPDATE lock.
- */
-static void mdt_pack_size2body(struct mdt_thread_info *info,
- struct mdt_object *mo)
-{
- struct mdt_body *b;
- struct md_attr *ma = &info->mti_attr;
-
- LASSERT(ma->ma_attr.la_valid & LA_MODE);
- b = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
-
- /* Check if Size-on-MDS is supported, if this is a regular file,
- * if SOM is enabled on the object and if SOM cache exists and valid.
- * Otherwise do not pack Size-on-MDS attributes to the reply. */
- if (!(mdt_conn_flags(info) & OBD_CONNECT_SOM) ||
- !S_ISREG(ma->ma_attr.la_mode) ||
- !mdt_object_is_som_enabled(mo) ||
- !(ma->ma_valid & MA_SOM))
- return;
-
- b->mbo_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
- b->mbo_size = ma->ma_som->msd_size;
- b->mbo_blocks = ma->ma_som->msd_blocks;
-}
-
#ifdef CONFIG_FS_POSIX_ACL
/*
* Pack ACL data into the reply. UIDs/GIDs are mapped and filtered by nodemap.
LASSERT(ma->ma_valid & MA_INODE);
- b->mbo_atime = attr->la_atime;
- b->mbo_mtime = attr->la_mtime;
- b->mbo_ctime = attr->la_ctime;
- b->mbo_mode = attr->la_mode;
- b->mbo_size = attr->la_size;
- b->mbo_blocks = attr->la_blocks;
- b->mbo_uid = nodemap_map_id(nodemap, NODEMAP_UID,
- NODEMAP_FS_TO_CLIENT,
- attr->la_uid);
- b->mbo_gid = nodemap_map_id(nodemap, NODEMAP_GID,
- NODEMAP_FS_TO_CLIENT,
- attr->la_gid);
- b->mbo_flags = attr->la_flags;
- b->mbo_nlink = attr->la_nlink;
- b->mbo_rdev = attr->la_rdev;
-
- /* XXX should pack the reply body according to lu_valid */
- b->mbo_valid |= OBD_MD_FLCTIME | OBD_MD_FLUID |
- OBD_MD_FLGID | OBD_MD_FLTYPE |
- OBD_MD_FLMODE | OBD_MD_FLNLINK | OBD_MD_FLFLAGS |
- OBD_MD_FLATIME | OBD_MD_FLMTIME ;
+ if (attr->la_valid & LA_ATIME) {
+ b->mbo_atime = attr->la_atime;
+ b->mbo_valid |= OBD_MD_FLATIME;
+ }
+ if (attr->la_valid & LA_MTIME) {
+ b->mbo_mtime = attr->la_mtime;
+ b->mbo_valid |= OBD_MD_FLMTIME;
+ }
+ if (attr->la_valid & LA_CTIME) {
+ b->mbo_ctime = attr->la_ctime;
+ b->mbo_valid |= OBD_MD_FLCTIME;
+ }
+ if (attr->la_valid & LA_FLAGS) {
+ b->mbo_flags = attr->la_flags;
+ b->mbo_valid |= OBD_MD_FLFLAGS;
+ }
+ if (attr->la_valid & LA_NLINK) {
+ b->mbo_nlink = attr->la_nlink;
+ b->mbo_valid |= OBD_MD_FLNLINK;
+ }
+ if (attr->la_valid & LA_UID) {
+ b->mbo_uid = nodemap_map_id(nodemap, NODEMAP_UID,
+ NODEMAP_FS_TO_CLIENT,
+ attr->la_uid);
+ b->mbo_valid |= OBD_MD_FLUID;
+ }
+ if (attr->la_valid & LA_GID) {
+ b->mbo_gid = nodemap_map_id(nodemap, NODEMAP_GID,
+ NODEMAP_FS_TO_CLIENT,
+ attr->la_gid);
+ b->mbo_valid |= OBD_MD_FLGID;
+ }
+ b->mbo_mode = attr->la_mode;
+ if (attr->la_valid & LA_MODE)
+ b->mbo_valid |= OBD_MD_FLMODE;
+ if (attr->la_valid & LA_TYPE)
+ b->mbo_valid |= OBD_MD_FLTYPE;
+
+ if (fid != NULL) {
+ b->mbo_fid1 = *fid;
+ b->mbo_valid |= OBD_MD_FLID;
+ CDEBUG(D_INODE, DFID": nlink=%d, mode=%o, valid="LPX64"\n",
+ PFID(fid), b->mbo_nlink, b->mbo_mode, b->mbo_valid);
+ }
+
+ if (info != NULL)
+ mdt_body_reverse_idmap(info, b);
+
+ if (!(attr->la_valid & LA_TYPE))
+ return;
+
+ b->mbo_rdev = attr->la_rdev;
+ b->mbo_size = attr->la_size;
+ b->mbo_blocks = attr->la_blocks;
if (!S_ISREG(attr->la_mode)) {
b->mbo_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS | OBD_MD_FLRDEV;
b->mbo_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
}
- if (fid) {
- b->mbo_fid1 = *fid;
- b->mbo_valid |= OBD_MD_FLID;
- CDEBUG(D_INODE, DFID": nlink=%d, mode=%o, size="LPU64"\n",
- PFID(fid), b->mbo_nlink,
- b->mbo_mode, b->mbo_size);
- }
-
- if (info)
- mdt_body_reverse_idmap(info, b);
-
if (fid != NULL && (b->mbo_valid & OBD_MD_FLSIZE))
CDEBUG(D_VFSTRACE, DFID": returning size %llu\n",
PFID(fid), (unsigned long long)b->mbo_size);
EXIT;
}
-int mdt_attr_get_eabuf_size(struct mdt_thread_info *info, struct mdt_object *o)
+static int mdt_attr_get_eabuf_size(struct mdt_thread_info *info,
+ struct mdt_object *o)
{
const struct lu_env *env = info->mti_env;
int rc, rc2;
GOTO(out, rc);
}
- if (need & MA_SOM && S_ISREG(mode)) {
- buf->lb_buf = info->mti_xattr_buf;
- buf->lb_len = sizeof(info->mti_xattr_buf);
- CLASSERT(sizeof(struct som_attrs) <=
- sizeof(info->mti_xattr_buf));
- rc2 = mo_xattr_get(info->mti_env, next, buf, XATTR_NAME_SOM);
- rc2 = lustre_buf2som(info->mti_xattr_buf, rc2, ma->ma_som);
- if (rc2 == 0)
- ma->ma_valid |= MA_SOM;
- else if (rc2 < 0 && rc2 != -ENODATA)
- GOTO(out, rc = rc2);
- }
-
if (need & MA_HSM && S_ISREG(mode)) {
buf->lb_buf = info->mti_xattr_buf;
buf->lb_len = sizeof(info->mti_xattr_buf);
ma->ma_need |= MA_LOV_DEF;
}
ma->ma_need |= ma_need;
- if (ma->ma_need & MA_SOM)
- ma->ma_som = &info->mti_u.som.data;
rc = mdt_attr_get_complex(info, o, ma);
if (unlikely(rc)) {
}
if (reqbody->mbo_valid & OBD_MD_FLMODEASIZE) {
- repbody->mbo_max_cookiesize = 0;
repbody->mbo_max_mdsize = info->mti_mdt->mdt_max_mdsize;
repbody->mbo_valid |= OBD_MD_FLMODEASIZE;
- CDEBUG(D_INODE, "I am going to change the MAX_MD_SIZE & "
- "MAX_COOKIE to : %d:%d\n", repbody->mbo_max_mdsize,
- repbody->mbo_max_cookiesize);
+ CDEBUG(D_INODE, "changing the max MD size to %u\n",
+ repbody->mbo_max_mdsize);
}
if (exp_connect_rmtclient(info->mti_exp) &&
rc = mdt_pack_acl2body(info, repbody, o, nodemap);
#endif
- if (reqbody->mbo_valid & OBD_MD_FLMDSCAPA &&
- info->mti_mdt->mdt_lut.lut_mds_capa &&
- exp_connect_flags(info->mti_exp) & OBD_CONNECT_MDS_CAPA) {
- struct lustre_capa *capa;
-
- capa = req_capsule_server_get(pill, &RMF_CAPA1);
- LASSERT(capa);
- capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
- rc = mo_capa_get(env, next, capa, 0);
- if (rc)
- RETURN(rc);
- repbody->mbo_valid |= OBD_MD_FLMDSCAPA;
- }
-
out:
if (rc == 0)
mdt_counter_incr(req, LPROC_MDT_GETATTR);
RETURN(rc);
}
-static int mdt_renew_capa(struct mdt_thread_info *info)
-{
- struct mdt_object *obj = info->mti_object;
- struct mdt_body *body;
- struct lustre_capa *capa, *c;
- int rc;
- ENTRY;
-
- /* if object doesn't exist, or server has disabled capability,
- * return directly, client will find body->valid OBD_MD_FLOSSCAPA
- * flag not set.
- */
- if (!obj || !info->mti_mdt->mdt_lut.lut_oss_capa ||
- !(exp_connect_flags(info->mti_exp) & OBD_CONNECT_OSS_CAPA))
- RETURN(0);
-
- body = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
- LASSERT(body != NULL);
-
- c = req_capsule_client_get(info->mti_pill, &RMF_CAPA1);
- LASSERT(c);
-
- capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA2);
- LASSERT(capa);
-
- *capa = *c;
- rc = mo_capa_get(info->mti_env, mdt_object_child(obj), capa, 1);
- if (rc == 0)
- body->mbo_valid |= OBD_MD_FLOSSCAPA;
- RETURN(rc);
-}
-
static int mdt_getattr(struct tgt_session_info *tsi)
{
struct mdt_thread_info *info = tsi2mdt_info(tsi);
reqbody = req_capsule_client_get(pill, &RMF_MDT_BODY);
LASSERT(reqbody);
-
- if (reqbody->mbo_valid & OBD_MD_FLOSSCAPA) {
- rc = req_capsule_server_pack(pill);
- if (unlikely(rc))
- RETURN(err_serious(rc));
- rc = mdt_renew_capa(info);
- GOTO(out_shrink, rc);
- }
-
LASSERT(obj != NULL);
LASSERT(lu_object_assert_exists(&obj->mot_obj));
info->mti_cross_ref = !!(reqbody->mbo_valid & OBD_MD_FLCROSSREF);
- /*
- * Don't check capability at all, because rename might getattr for
- * remote obj, and at that time no capability is available.
- */
- mdt_set_capainfo(info, 1, &reqbody->mbo_fid1, BYPASS_CAPA);
rc = mdt_getattr_internal(info, obj, 0);
if (reqbody->mbo_valid & OBD_MD_FLRMTPERM)
mdt_exit_ucred(info);
return rc;
}
+/**
+ * Exchange MOF_LOV_CREATED flags between two objects after a
+ * layout swap. No assumption is made on whether o1 or o2 have
+ * created objects or not.
+ *
+ * \param[in,out] o1 First swap layout object
+ * \param[in,out] o2 Second swap layout object
+ */
+static void mdt_swap_lov_flag(struct mdt_object *o1, struct mdt_object *o2)
+{
+ __u64 o1_flags;
+
+ mutex_lock(&o1->mot_lov_mutex);
+ mutex_lock(&o2->mot_lov_mutex);
+
+ o1_flags = o1->mot_flags;
+ o1->mot_flags = (o1->mot_flags & ~MOF_LOV_CREATED) |
+ (o2->mot_flags & MOF_LOV_CREATED);
+
+ o2->mot_flags = (o2->mot_flags & ~MOF_LOV_CREATED) |
+ (o1_flags & MOF_LOV_CREATED);
+
+ mutex_unlock(&o2->mot_lov_mutex);
+ mutex_unlock(&o1->mot_lov_mutex);
+}
+
static int mdt_swap_layouts(struct tgt_session_info *tsi)
{
struct mdt_thread_info *info;
if (info->mti_dlm_req != NULL)
ldlm_request_cancel(req, info->mti_dlm_req, 0, LATF_SKIP);
- if (req_capsule_get_size(info->mti_pill, &RMF_CAPA1, RCL_CLIENT))
- mdt_set_capainfo(info, 0, &info->mti_body->mbo_fid1,
- req_capsule_client_get(info->mti_pill,
- &RMF_CAPA1));
-
- if (req_capsule_get_size(info->mti_pill, &RMF_CAPA2, RCL_CLIENT))
- mdt_set_capainfo(info, 1, &info->mti_body->mbo_fid2,
- req_capsule_client_get(info->mti_pill,
- &RMF_CAPA2));
-
o1 = info->mti_object;
o = o2 = mdt_object_find(info->mti_env, info->mti_mdt,
&info->mti_body->mbo_fid2);
rc = mo_swap_layouts(info->mti_env, mdt_object_child(o1),
mdt_object_child(o2), msl->msl_flags);
- GOTO(unlock2, rc);
+ if (rc < 0)
+ GOTO(unlock2, rc);
+
+ mdt_swap_lov_flag(o1, o2);
+
unlock2:
mdt_object_unlock(info, o2, lh2, rc);
unlock1:
}
/*
- * UPDATE lock should be taken against parent, and be release before exit;
+ * UPDATE lock should be taken against parent, and be released before exit;
* child_bits lock should be taken against child, and be returned back:
* (1)normal request should release the child lock;
* (2)intent request will grant the lock to client.
RETURN(-ENOENT);
}
- mdt_set_capainfo(info, 0, mdt_object_fid(child), BYPASS_CAPA);
rc = mdt_getattr_internal(info, child, 0);
if (unlikely(rc != 0))
mdt_object_unlock(info, child, lhc, 1);
if (unlikely(IS_ERR(child)))
GOTO(out_parent, rc = PTR_ERR(child));
+ OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_RESEND, obd_timeout * 2);
rc = mdt_check_resent_lock(info, child, lhc);
if (rc < 0) {
GOTO(out_child, rc);
} else if (rc > 0) {
- OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_RESEND, obd_timeout*2);
mdt_lock_handle_init(lhc);
mdt_lock_reg_init(lhc, LCK_PR);
try_layout = false;
}
lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
- /* Get MA_SOM attributes if update lock is given. */
- if (lock &&
- lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_UPDATE &&
- S_ISREG(lu_object_attr(&mdt_object_child(child)->mo_lu)))
- ma_need |= MA_SOM;
/* finally, we can get attr for child. */
- mdt_set_capainfo(info, 1, child_fid, BYPASS_CAPA);
rc = mdt_getattr_internal(info, child, ma_need);
if (unlikely(rc != 0)) {
mdt_object_unlock(info, child, lhc, 1);
"Lock res_id: "DLDLMRES", fid: "DFID"\n",
PLDLMRES(lock->l_resource),
PFID(mdt_object_fid(child)));
- if (mdt_object_exists(child) && !mdt_object_remote(child))
- mdt_pack_size2body(info, child);
}
if (lock)
LDLM_LOCK_PUT(lock);
RETURN(err_serious(rc));
switch (oqctl->qc_cmd) {
- case Q_QUOTACHECK:
- case LUSTRE_Q_INVALIDATE:
- case LUSTRE_Q_FINVALIDATE:
- case Q_QUOTAON:
- case Q_QUOTAOFF:
- case Q_INITQUOTA:
- /* deprecated, not used any more */
- RETURN(-EOPNOTSUPP);
/* master quotactl */
case Q_SETINFO:
case Q_SETQUOTA:
struct mdt_lock_handle *lhc)
{
/* the lock might already be gotten in ldlm_handle_enqueue() */
- if (lustre_handle_is_used(&lhc->mlh_reg_lh)) {
+ if (unlikely(lustre_handle_is_used(&lhc->mlh_reg_lh))) {
struct ptlrpc_request *req = mdt_info_req(info);
struct ldlm_lock *lock;
LASSERT(mdt_object_remote(o));
- LASSERT(ibits == MDS_INODELOCK_UPDATE);
-
fid_build_reg_res_name(fid, res_id);
memset(einfo, 0, sizeof(*einfo));
* Unlock mdt object.
*
* Immeditely release the regular lock and the PDO lock or save the
- * lock in reqeuest and keep them referenced until client ACK or
+ * lock in request and keep them referenced until client ACK or
* transaction commit.
*
* \param info thread info object
RETURN(-EINVAL);
}
- /*
- * Do not get size or any capa fields before we check that request
- * contains capa actually. There are some requests which do not, for
- * instance MDS_IS_SUBDIR.
- */
- if (req_capsule_has_field(pill, &RMF_CAPA1, RCL_CLIENT) &&
- req_capsule_get_size(pill, &RMF_CAPA1, RCL_CLIENT))
- mdt_set_capainfo(info, 0, &body->mbo_fid1,
- req_capsule_client_get(pill, &RMF_CAPA1));
-
obj = mdt_object_find(env, info->mti_mdt, &body->mbo_fid1);
if (!IS_ERR(obj)) {
- if ((flags & HABEO_CORPUS) &&
- !mdt_object_exists(obj)) {
+ if ((flags & HABEO_CORPUS) && !mdt_object_exists(obj)) {
mdt_object_put(env, obj);
- /* for capability renew ENOENT will be handled in
- * mdt_renew_capa */
- if (body->mbo_valid & OBD_MD_FLOSSCAPA)
- rc = 0;
- else
- rc = -ENOENT;
+ rc = -ENOENT;
} else {
info->mti_object = obj;
rc = 0;
RETURN(rc);
}
-static int mdt_init_capa_ctxt(const struct lu_env *env, struct mdt_device *m)
-{
- struct md_device *next = m->mdt_child;
-
- return next->md_ops->mdo_init_capa_ctxt(env, next,
- m->mdt_lut.lut_mds_capa,
- m->mdt_capa_timeout,
- m->mdt_capa_alg,
- m->mdt_capa_keys);
-}
-
void mdt_lock_handle_init(struct mdt_lock_handle *lh)
{
lh->mlh_type = MDT_NUL_LOCK;
struct mdt_thread_info *tsi2mdt_info(struct tgt_session_info *tsi)
{
struct mdt_thread_info *mti;
- struct lustre_capa *lc;
mti = mdt_th_info(tsi->tsi_env);
LASSERT(mti != NULL);
mdt_thread_info_init(tgt_ses_req(tsi), mti);
if (tsi->tsi_corpus != NULL) {
- struct req_capsule *pill = tsi->tsi_pill;
-
mti->mti_object = mdt_obj(tsi->tsi_corpus);
lu_object_get(tsi->tsi_corpus);
-
- /*
- * XXX: must be part of tgt_mdt_body_unpack but moved here
- * due to mdt_set_capainfo().
- */
- if (req_capsule_has_field(pill, &RMF_CAPA1, RCL_CLIENT) &&
- req_capsule_get_size(pill, &RMF_CAPA1, RCL_CLIENT) > 0) {
- lc = req_capsule_client_get(pill, &RMF_CAPA1);
- mdt_set_capainfo(mti, 0, &tsi->tsi_mdt_body->mbo_fid1,
- lc);
- }
}
mti->mti_body = tsi->tsi_mdt_body;
mti->mti_dlm_req = tsi->tsi_dlm_req;
if (OBD_FAIL_CHECK(OBD_FAIL_TGT_DELAY_CONDITIONAL) &&
cfs_fail_val ==
- tsi2mdt_info(tsi)->mti_mdt->mdt_seq_site.ss_node_id)
- schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE,
- msecs_to_jiffies(3 * MSEC_PER_SEC));
+ tsi2mdt_info(tsi)->mti_mdt->mdt_seq_site.ss_node_id) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(msecs_to_jiffies(3 * MSEC_PER_SEC));
+ }
rc = tgt_connect(tsi);
if (rc != 0)
/* the open lock or the lock for cross-ref object should be
* returned to the client */
- if (rc == -MDT_EREMOTE_OPEN || mdt_get_disposition(rep, DISP_OPEN_LOCK)) {
- LASSERT(lustre_handle_is_used(&lhc->mlh_reg_lh));
+ if (lustre_handle_is_used(&lhc->mlh_reg_lh) &&
+ (rc == 0 || rc == -MDT_EREMOTE_OPEN)) {
rep->lock_policy_res2 = 0;
rc = mdt_intent_lock_replace(info, lockp, lhc, flags);
RETURN(rc);
mdt_get_disposition(rep, DISP_LOOKUP_NEG))
rep->lock_policy_res2 = 0;
+ lhc->mlh_reg_lh.cookie = 0ull;
if (rc == -ENOTCONN || rc == -ENODEV ||
rc == -EOVERFLOW) { /**< if VBR failure then return error */
/*
* will detect this, then disconnect, reconnect the import
* immediately, instead of impacting the following the rpc.
*/
- lhc->mlh_reg_lh.cookie = 0ull;
RETURN(rc);
- } else {
- /*
- * For other cases, the error will be returned by intent.
- * and client will retrieve the result from intent.
- */
- /*
- * FIXME: when open lock is finished, that should be
- * checked here.
- */
- if (lustre_handle_is_used(&lhc->mlh_reg_lh)) {
- LASSERTF(rc == 0, "Error occurred but lock handle "
- "is still in use, rc = %d\n", rc);
- rep->lock_policy_res2 = 0;
- rc = mdt_intent_lock_replace(info, lockp, lhc, flags);
- RETURN(rc);
- } else {
- lhc->mlh_reg_lh.cookie = 0ull;
- RETURN(ELDLM_LOCK_ABORTED);
- }
}
+ /*
+ * For other cases, the error will be returned by intent, and client
+ * will retrieve the result from intent.
+ */
+ RETURN(ELDLM_LOCK_ABORTED);
}
static int mdt_intent_code(long itcode)
LASSERT(mdt_seq_site(mdt)->ss_node_id != 0);
- if (!likely(fld->lsf_new))
- RETURN(0);
-
rc = lu_env_init(&env, LCT_MD_THREAD);
- if (rc) {
+ if (rc < 0) {
CERROR("%s: cannot init env: rc = %d\n", mdt_obd_name(mdt), rc);
RETURN(rc);
}
- rc = fld_update_from_controller(&env, fld);
- if (rc != 0) {
- CERROR("%s: cannot update controller: rc = %d\n",
- mdt_obd_name(mdt), rc);
+ /* Allocate new sequence now to avoid creating local transaction
+ * in the normal transaction process */
+ rc = seq_server_check_and_alloc_super(&env,
+ mdt_seq_site(mdt)->ss_server_seq);
+ if (rc < 0)
GOTO(out, rc);
+
+ if (fld->lsf_new) {
+ rc = fld_update_from_controller(&env, fld);
+ if (rc != 0) {
+ CERROR("%s: cannot update controller: rc = %d\n",
+ mdt_obd_name(mdt), rc);
+ GOTO(out, rc);
+ }
}
out:
lu_env_fini(&env);
GOTO(out_seq_fini, rc);
if (ss->ss_node_id != 0)
- /* register controler export through lwp */
+ /* register controller export through lwp */
rc = mdt_register_seq_exp(mdt);
EXIT;
lustre_cfg_bufs_reset(bufs, mdt_obd_name(m));
lustre_cfg_bufs_set_string(bufs, 1, NULL);
lcfg = lustre_cfg_new(LCFG_PRE_CLEANUP, bufs);
- if (!lcfg) {
- CERROR("%s: cannot alloc lcfg\n", mdt_obd_name(m));
- return;
- }
+ if (lcfg == NULL)
+ RETURN_EXIT;
+
top->ld_ops->ldo_process_config(env, top, lcfg);
lustre_cfg_free(lcfg);
EXIT;
strcat(flags, "A");
lustre_cfg_bufs_set_string(bufs, 1, flags);
lcfg = lustre_cfg_new(LCFG_CLEANUP, bufs);
- if (!lcfg) {
- CERROR("Cannot alloc lcfg!\n");
- return;
- }
+ if (lcfg == NULL)
+ RETURN_EXIT;
+
LASSERT(top);
top->ld_ops->ldo_process_config(env, top, lcfg);
lustre_cfg_free(lcfg);
lustre_cfg_bufs_set_string(bufs, 3, lprof->lp_dt);
lcfg = lustre_cfg_new(LCFG_ATTACH, bufs);
- if (!lcfg)
+ if (lcfg == NULL)
GOTO(free_bufs, rc = -ENOMEM);
rc = class_attach(lcfg);
lustre_cfg_bufs_set_string(bufs, 3, lprof->lp_dt);
lcfg = lustre_cfg_new(LCFG_SETUP, bufs);
+ if (lcfg == NULL)
+ GOTO(class_detach, rc = -ENOMEM);
rc = class_setup(obd, lcfg);
if (rc)
lustre_cfg_bufs_set_string(bufs, 3, lprof->lp_dt);
lcfg = lustre_cfg_new(LCFG_ATTACH, bufs);
- if (!lcfg)
+ if (lcfg == NULL)
GOTO(cleanup_mem, rc = -ENOMEM);
rc = class_attach(lcfg);
mdt->mdt_bottom->dd_lu_dev.ld_obd->obd_name);
lcfg = lustre_cfg_new(LCFG_SETUP, bufs);
+ if (lcfg == NULL)
+ GOTO(class_detach, rc = -ENOMEM);
rc = class_setup(obd, lcfg);
if (rc)
/* mdt_getxattr() is used from mdt_intent_getxattr(), use this wrapper
* for now. This will be removed along with converting rest of MDT code
* to use tgt_session_info */
-int mdt_tgt_getxattr(struct tgt_session_info *tsi)
+static int mdt_tgt_getxattr(struct tgt_session_info *tsi)
{
struct mdt_thread_info *info = tsi2mdt_info(tsi);
int rc;
TGT_MDT_HDL(0 | HABEO_REFERO, MDS_STATFS, mdt_statfs),
TGT_MDT_HDL(0 | MUTABOR, MDS_REINT, mdt_reint),
TGT_MDT_HDL(HABEO_CORPUS, MDS_CLOSE, mdt_close),
-TGT_MDT_HDL(HABEO_CORPUS, MDS_DONE_WRITING,
- mdt_done_writing),
TGT_MDT_HDL(HABEO_CORPUS| HABEO_REFERO, MDS_READPAGE, mdt_readpage),
TGT_MDT_HDL(HABEO_CORPUS| HABEO_REFERO, MDS_SYNC, mdt_sync),
TGT_MDT_HDL(0, MDS_QUOTACTL, mdt_quotactl),
mdt_seq_fini(env, m);
mdt_fld_fini(env, m);
- next->md_ops->mdo_init_capa_ctxt(env, next, 0, 0, 0, NULL);
- cfs_timer_disarm(&m->mdt_ck_timer);
- mdt_ck_thread_stop(m);
-
/*
* Finish the stack
*/
m->mdt_max_mdsize = MAX_MD_SIZE; /* 4 stripes */
- m->mdt_som_conf = 0;
-
m->mdt_opts.mo_cos = MDT_COS_DEFAULT;
/* default is coordinator off, it is started through conf_param
obd->u.obt.obt_magic = OBT_MAGIC;
}
- spin_lock_init(&m->mdt_ioepoch_lock);
- m->mdt_capa_timeout = CAPA_TIMEOUT;
- m->mdt_capa_alg = CAPA_HMAC_ALG_SHA1;
- m->mdt_ck_timeout = CAPA_KEY_TIMEOUT;
m->mdt_squash.rsi_uid = 0;
m->mdt_squash.rsi_gid = 0;
INIT_LIST_HEAD(&m->mdt_squash.rsi_nosquash_nids);
/* set obd_namespace for compatibility with old code */
obd->obd_namespace = m->mdt_namespace;
- cfs_timer_init(&m->mdt_ck_timer, mdt_ck_timer_callback, m);
-
rc = mdt_hsm_cdt_init(m);
if (rc != 0) {
CERROR("%s: error initializing coordinator, rc %d\n",
GOTO(err_free_ns, rc);
}
- rc = mdt_ck_thread_start(m);
- if (rc)
- GOTO(err_free_hsm, rc);
-
rc = tgt_init(env, &m->mdt_lut, obd, m->mdt_bottom, mdt_common_slice,
OBD_FAIL_MDS_ALL_REQUEST_NET,
OBD_FAIL_MDS_ALL_REPLY_NET);
if (rc)
- GOTO(err_capa, rc);
+ GOTO(err_free_hsm, rc);
rc = mdt_fs_setup(env, m, obd, lsi);
if (rc)
* when the whole stack is complete and ready
* to serve the requests */
- mdt_init_capa_ctxt(env, m);
-
/* Reduce the initial timeout on an MDS because it doesn't need such
* a long timeout as an OST does. Adaptive timeouts will adjust this
* value appropriately. */
mdt_fs_cleanup(env, m);
err_tgt:
tgt_fini(env, &m->mdt_lut);
-err_capa:
- cfs_timer_disarm(&m->mdt_ck_timer);
- mdt_ck_thread_stop(m);
err_free_hsm:
mdt_hsm_cdt_fini(m);
err_free_ns:
lu_object_init(o, h, d);
lu_object_add_top(h, o);
o->lo_ops = &mdt_obj_ops;
- mutex_init(&mo->mot_ioepoch_mutex);
+ spin_lock_init(&mo->mot_write_lock);
mutex_init(&mo->mot_lov_mutex);
init_rwsem(&mo->mot_open_sem);
RETURN(o);
}
static int mdt_object_print(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct lu_object *o)
+ lu_printer_t p, const struct lu_object *o)
{
- struct mdt_object *mdto = mdt_obj((struct lu_object *)o);
- return (*p)(env, cookie, LUSTRE_MDT_NAME"-object@%p(ioepoch="LPU64" "
- "flags="LPX64", epochcount=%d, writecount=%d)",
- mdto, mdto->mot_ioepoch, mdto->mot_flags,
- mdto->mot_ioepoch_count, mdto->mot_writecount);
+ struct mdt_object *mdto = mdt_obj((struct lu_object *)o);
+
+ return (*p)(env, cookie,
+ LUSTRE_MDT_NAME"-object@%p(flags=%d, writecount=%d)",
+ mdto, mdto->mot_flags, mdto->mot_write_count);
}
static int mdt_prepare(const struct lu_env *env,
}
LASSERT(!test_bit(MDT_FL_CFGLOG, &mdt->mdt_state));
+
target_recovery_init(&mdt->mdt_lut, tgt_request_handle);
set_bit(MDT_FL_CFGLOG, &mdt->mdt_state);
LASSERT(obd->obd_no_conn);
if (!mdt->mdt_opts.mo_user_xattr)
data->ocd_connect_flags &= ~OBD_CONNECT_XATTR;
- if (!mdt->mdt_som_conf)
- data->ocd_connect_flags &= ~OBD_CONNECT_SOM;
-
if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE) {
data->ocd_brw_size = min(data->ocd_brw_size,
(__u32)MD_MAX_BRW_SIZE);
return -EBADE;
}
- if (mdt->mdt_som_conf &&
- !(data->ocd_connect_flags & (OBD_CONNECT_LIGHTWEIGHT |
- OBD_CONNECT_MDS_MDS |
- OBD_CONNECT_SOM))) {
- CWARN("%s: MDS has SOM enabled, but client does not support "
- "it\n", mdt_obd_name(mdt));
- return -EBADE;
- }
-
if (OCD_HAS_FLAG(data, PINGLESS)) {
if (ptlrpc_pinger_suppress_pings()) {
spin_lock(&exp->exp_obd->obd_dev_lock);
* archive request into a noop if it's not actually
* dirty.
*/
- if (mfd->mfd_mode & (FMODE_WRITE|MDS_FMODE_TRUNC))
+ if (mfd->mfd_mode & FMODE_WRITE)
rc = mdt_ctxt_add_dirty_flag(&env, info, mfd);
/* Don't unlink orphan on failover umount, LU-184 */
* at some point we should find a better one
*/
if (!test_bit(MDT_FL_SYNCED, &mdt->mdt_state) && data != NULL &&
- !(data->ocd_connect_flags & OBD_CONNECT_LIGHTWEIGHT)) {
+ !(data->ocd_connect_flags & OBD_CONNECT_LIGHTWEIGHT) &&
+ !(data->ocd_connect_flags & OBD_CONNECT_MDS_MDS)) {
rc = obd_get_info(env, mdt->mdt_child_exp,
sizeof(KEY_OSP_CONNECTED),
- KEY_OSP_CONNECTED, NULL, NULL, NULL);
+ KEY_OSP_CONNECTED, NULL, NULL);
if (rc)
RETURN(-EAGAIN);
set_bit(MDT_FL_SYNCED, &mdt->mdt_state);
RETURN(0);
}
-/** The maximum depth that fid2path() will search.
- * This is limited only because we want to store the fids for
- * historical path lookup purposes.
- */
-#define MAX_PATH_DEPTH 100
-
-/** mdt_path() lookup structure. */
-struct path_lookup_info {
- __u64 pli_recno; /**< history point */
- __u64 pli_currec; /**< current record */
- struct lu_fid pli_fid;
- struct lu_fid pli_fids[MAX_PATH_DEPTH]; /**< path, in fids */
- struct mdt_object *pli_mdt_obj;
- char *pli_path; /**< full path */
- int pli_pathlen;
- int pli_linkno; /**< which hardlink to follow */
- int pli_fidcount; /**< number of \a pli_fids */
-};
-
int mdt_links_read(struct mdt_thread_info *info, struct mdt_object *mdt_obj,
struct linkea_data *ldata)
{
return linkea_init(ldata);
}
+/**
+ * Given an MDT object, try to look up the full path to the object.
+ * Part of the MDT layer implementation of lfs fid2path.
+ *
+ * \param[in] info Per-thread common data shared by MDT level handlers.
+ * \param[in] obj Object to do path lookup of
+ * \param[in,out] fp User-provided struct to store path information
+ *
+ * \retval 0 Lookup successful, path information stored in fp
+ * \retval -EAGAIN Lookup failed, usually because object is being moved
+ * \retval negative errno if there was a problem
+ */
static int mdt_path_current(struct mdt_thread_info *info,
- struct path_lookup_info *pli)
+ struct mdt_object *obj,
+ struct getinfo_fid2path *fp)
{
struct mdt_device *mdt = info->mti_mdt;
struct mdt_object *mdt_obj;
struct lu_buf *buf = &info->mti_big_buf;
char *ptr;
int reclen;
- struct linkea_data ldata = { 0 };
+ struct linkea_data ldata = { NULL };
int rc = 0;
+ bool first = true;
ENTRY;
/* temp buffer for path element, the buffer will be finally freed
RETURN(-ENOMEM);
ldata.ld_buf = buf;
- ptr = pli->pli_path + pli->pli_pathlen - 1;
+ ptr = fp->gf_path + fp->gf_pathlen - 1;
*ptr = 0;
--ptr;
- pli->pli_fidcount = 0;
- pli->pli_fids[0] = *(struct lu_fid *)mdt_object_fid(pli->pli_mdt_obj);
- *tmpfid = pli->pli_fids[0];
+ *tmpfid = fp->gf_fid = *mdt_object_fid(obj);
+
/* root FID only exists on MDT0, and fid2path should also ends at MDT0,
* so checking root_fid can only happen on MDT0. */
- while (!lu_fid_eq(&mdt->mdt_md_root_fid,
- &pli->pli_fids[pli->pli_fidcount])) {
+ while (!lu_fid_eq(&mdt->mdt_md_root_fid, &fp->gf_fid)) {
struct lu_buf lmv_buf;
mdt_obj = mdt_object_find(info->mti_env, mdt, tmpfid);
linkea_entry_unpack(lee, &reclen, tmpname, tmpfid);
/* If set, use link #linkno for path lookup, otherwise use
link #0. Only do this for the final path element. */
- if (pli->pli_fidcount == 0 &&
- pli->pli_linkno < leh->leh_reccount) {
+ if (first && fp->gf_linkno < leh->leh_reccount) {
int count;
- for (count = 0; count < pli->pli_linkno; count++) {
+ for (count = 0; count < fp->gf_linkno; count++) {
lee = (struct link_ea_entry *)
((char *)lee + reclen);
linkea_entry_unpack(lee, &reclen, tmpname,
tmpfid);
}
- if (pli->pli_linkno < leh->leh_reccount - 1)
+ if (fp->gf_linkno < leh->leh_reccount - 1)
/* indicate to user there are more links */
- pli->pli_linkno++;
+ fp->gf_linkno++;
}
lmv_buf.lb_buf = info->mti_xattr_buf;
/* For slave stripes, get its master */
if (le32_to_cpu(lmm->lmv_magic) == LMV_MAGIC_STRIPE) {
- pli->pli_fids[pli->pli_fidcount] = *tmpfid;
+ fp->gf_fid = *tmpfid;
continue;
}
} else if (rc < 0 && rc != -ENODATA) {
/* Pack the name in the end of the buffer */
ptr -= tmpname->ln_namelen;
- if (ptr - 1 <= pli->pli_path)
+ if (ptr - 1 <= fp->gf_path)
GOTO(out, rc = -EOVERFLOW);
strncpy(ptr, tmpname->ln_name, tmpname->ln_namelen);
*(--ptr) = '/';
- /* Store the parent fid for historic lookup */
- if (++pli->pli_fidcount >= MAX_PATH_DEPTH)
- GOTO(out, rc = -EOVERFLOW);
- pli->pli_fids[pli->pli_fidcount] = *tmpfid;
+ /* keep the last resolved fid to the client, so the
+ * client will build the left path on another MDT for
+ * remote object */
+ fp->gf_fid = *tmpfid;
+
+ first = false;
}
remote_out:
ptr++; /* skip leading / */
- memmove(pli->pli_path, ptr, pli->pli_path + pli->pli_pathlen - ptr);
+ memmove(fp->gf_path, ptr, fp->gf_path + fp->gf_pathlen - ptr);
- EXIT;
out:
- return rc;
+ RETURN(rc);
}
-/* Returns the full path to this fid, as of changelog record recno. */
+/**
+ * Given an MDT object, use mdt_path_current to get the path.
+ * Essentially a wrapper to retry mdt_path_current a set number of times
+ * if -EAGAIN is returned (usually because an object is being moved).
+ *
+ * Part of the MDT layer implementation of lfs fid2path.
+ *
+ * \param[in] info Per-thread common data shared by mdt level handlers.
+ * \param[in] obj Object to do path lookup of
+ * \param[in,out] fp User-provided struct for arguments and to store path
+ * information
+ *
+ * \retval 0 Lookup successful, path information stored in fp
+ * \retval negative errno if there was a problem
+ */
static int mdt_path(struct mdt_thread_info *info, struct mdt_object *obj,
- char *path, int pathlen, __u64 *recno, int *linkno,
- struct lu_fid *fid)
+ struct getinfo_fid2path *fp)
{
struct mdt_device *mdt = info->mti_mdt;
- struct path_lookup_info *pli;
int tries = 3;
int rc = -EAGAIN;
ENTRY;
- if (pathlen < 3)
+ if (fp->gf_pathlen < 3)
RETURN(-EOVERFLOW);
if (lu_fid_eq(&mdt->mdt_md_root_fid, mdt_object_fid(obj))) {
- path[0] = '\0';
+ fp->gf_path[0] = '\0';
RETURN(0);
}
- OBD_ALLOC_PTR(pli);
- if (pli == NULL)
- RETURN(-ENOMEM);
-
- pli->pli_mdt_obj = obj;
- pli->pli_recno = *recno;
- pli->pli_path = path;
- pli->pli_pathlen = pathlen;
- pli->pli_linkno = *linkno;
-
/* Retry multiple times in case file is being moved */
while (tries-- && rc == -EAGAIN)
- rc = mdt_path_current(info, pli);
-
- /* return the last resolved fids to the client, so the client will
- * build the left path on another MDT for remote object */
- *fid = pli->pli_fids[pli->pli_fidcount];
-
- *recno = pli->pli_currec;
- /* Return next link index to caller */
- *linkno = pli->pli_linkno;
-
- OBD_FREE_PTR(pli);
+ rc = mdt_path_current(info, obj, fp);
RETURN(rc);
}
+/**
+ * Get the full path of the provided FID, as of changelog record recno.
+ *
+ * This checks sanity and looks up object for user provided FID
+ * before calling the actual path lookup code.
+ *
+ * Part of the MDT layer implementation of lfs fid2path.
+ *
+ * \param[in] info Per-thread common data shared by mdt level handlers.
+ * \param[in,out] fp User-provided struct for arguments and to store path
+ * information
+ *
+ * \retval 0 Lookup successful, path information and recno stored in fp
+ * \retval -ENOENT, object does not exist
+ * \retval negative errno if there was a problem
+ */
static int mdt_fid2path(struct mdt_thread_info *info,
struct getinfo_fid2path *fp)
{
}
obj = mdt_object_find(info->mti_env, mdt, &fp->gf_fid);
- if (obj == NULL || IS_ERR(obj)) {
- CDEBUG(D_IOCTL, "no object "DFID": %ld\n", PFID(&fp->gf_fid),
- PTR_ERR(obj));
- RETURN(-EINVAL);
+ if (IS_ERR(obj)) {
+ rc = PTR_ERR(obj);
+ CDEBUG(D_IOCTL, "cannot find "DFID": rc = %d\n",
+ PFID(&fp->gf_fid), rc);
+ RETURN(rc);
}
if (mdt_object_remote(obj))
RETURN(rc);
}
- rc = mdt_path(info, obj, fp->gf_path, fp->gf_pathlen, &fp->gf_recno,
- &fp->gf_linkno, &fp->gf_fid);
+ rc = mdt_path(info, obj, fp);
CDEBUG(D_INFO, "fid "DFID", path %s recno "LPX64" linkno %u\n",
PFID(&fp->gf_fid), fp->gf_path, fp->gf_recno, fp->gf_linkno);
break;
case OBD_IOC_ABORT_RECOVERY:
CERROR("%s: Aborting recovery for device\n", mdt_obd_name(mdt));
+ obd->obd_force_abort_recovery = 1;
target_stop_recovery_thread(obd);
rc = 0;
break;
MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("Lustre Metadata Target ("LUSTRE_MDT_NAME")");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
MODULE_LICENSE("GPL");
-cfs_module(mdt, LUSTRE_VERSION_STRING, mdt_mod_init, mdt_mod_exit);
+module_init(mdt_mod_init);
+module_exit(mdt_mod_exit);