RETURN(rc);
}
+static int cml_file_lock(const struct lu_env *env, struct md_object *mo,
+ struct lov_mds_md *lmm, struct ldlm_extent *extent,
+ struct lustre_handle *lockh)
+{
+ int rc;
+ ENTRY;
+ rc = mo_file_lock(env, md_object_next(mo), lmm, extent, lockh);
+ RETURN(rc);
+}
+
+static int cml_file_unlock(const struct lu_env *env, struct md_object *mo,
+ struct lov_mds_md *lmm, struct lustre_handle *lockh)
+{
+ int rc;
+ ENTRY;
+ rc = mo_file_unlock(env, md_object_next(mo), lmm, lockh);
+ RETURN(rc);
+}
+
static int cml_object_sync(const struct lu_env *env, struct md_object *mo)
{
int rc;
.moo_version_get = cml_version_get,
.moo_version_set = cml_version_set,
.moo_path = cml_path,
+ .moo_file_lock = cml_file_lock,
+ .moo_file_unlock = cml_file_unlock,
};
/** @} */
return -EFAULT;
}
+static int cmr_file_lock(const struct lu_env *env, struct md_object *mo,
+ struct lov_mds_md *lmm, struct ldlm_extent *extent,
+ struct lustre_handle *lockh)
+{
+ return -EREMOTE;
+}
+
+static int cmr_file_unlock(const struct lu_env *env, struct md_object *mo,
+ struct lov_mds_md *lmm, struct lustre_handle *lockh)
+{
+ return -EREMOTE;
+}
+
/**
* cmr moo_version_get().
*/
.moo_version_get = cmr_version_get,
.moo_version_set = cmr_version_set,
.moo_path = cmr_path,
+ .moo_file_lock = cmr_file_lock,
+ .moo_file_unlock = cmr_file_unlock,
};
/** @} */
dt_obj_version_t);
int (*moo_path)(const struct lu_env *env, struct md_object *obj,
char *path, int pathlen, __u64 *recno, int *linkno);
+ int (*moo_file_lock)(const struct lu_env *env, struct md_object *obj,
+ struct lov_mds_md *lmm, struct ldlm_extent *extent,
+ struct lustre_handle *lockh);
+ int (*moo_file_unlock)(const struct lu_env *env, struct md_object *obj,
+ struct lov_mds_md *lmm,
+ struct lustre_handle *lockh);
};
/**
return m->mo_ops->moo_version_set(env, m, ver);
}
+static inline int mo_file_lock(const struct lu_env *env, struct md_object *m,
+ struct lov_mds_md *lmm,
+ struct ldlm_extent *extent,
+ struct lustre_handle *lockh)
+{
+ LASSERT(m->mo_ops->moo_file_lock);
+ return m->mo_ops->moo_file_lock(env, m, lmm, extent, lockh);
+}
+
+static inline int mo_file_unlock(const struct lu_env *env, struct md_object *m,
+ struct lov_mds_md *lmm,
+ struct lustre_handle *lockh)
+{
+ LASSERT(m->mo_ops->moo_file_unlock);
+ return m->mo_ops->moo_file_unlock(env, m, lmm, lockh);
+}
+
static inline int mdo_lookup(const struct lu_env *env,
struct md_object *p,
const struct lu_name *lname,
return -ENOSYS;
}
+static int dot_file_lock(const struct lu_env *env, struct md_object *obj,
+ struct lov_mds_md *lmm, struct ldlm_extent *extent,
+ struct lustre_handle *lockh)
+{
+ return -ENOSYS;
+}
+
+static int dot_file_unlock(const struct lu_env *env, struct md_object *obj,
+ struct lov_mds_md *lmm, struct lustre_handle *lockh)
+{
+ return -ENOSYS;
+}
static struct md_object_operations mdd_dot_lustre_obj_ops = {
.moo_permission = dot_lustre_mdd_permission,
.moo_version_get = dot_lustre_mdd_version_get,
.moo_version_set = dot_lustre_mdd_version_set,
.moo_path = dot_lustre_mdd_path,
+ .moo_file_lock = dot_file_lock,
+ .moo_file_unlock = dot_file_unlock,
};
void mdd_lov_create_finish(const struct lu_env *env, struct mdd_device *mdd,
struct lov_mds_md *lmm, int lmm_size,
const struct md_op_spec *spec);
+int mdd_file_lock(const struct lu_env *env, struct md_object *obj,
+ struct lov_mds_md *lmm, struct ldlm_extent *extent,
+ struct lustre_handle *lockh);
+int mdd_file_unlock(const struct lu_env *env, struct md_object *obj,
+ struct lov_mds_md *lmm, struct lustre_handle *lockh);
int mdd_get_md(const struct lu_env *env, struct mdd_object *obj,
void *md, int *md_size, const char *name);
int mdd_get_md_locked(const struct lu_env *env, struct mdd_object *obj,
lmm_size, logcookies, fid, NULL);
RETURN(rc);
}
+
+static int grouplock_blocking_ast(struct ldlm_lock *lock,
+ struct ldlm_lock_desc *desc,
+ void *data, int flag)
+{
+ struct md_attr *ma = data;
+ struct lustre_handle lockh;
+ int rc = 0;
+ ENTRY;
+
+ switch (flag)
+ {
+ case LDLM_CB_BLOCKING :
+ /* lock is canceled */
+ CDEBUG(D_DLMTRACE, "Lock %p is canceled\n", lock);
+
+ ldlm_lock2handle(lock, &lockh);
+ rc = ldlm_cli_cancel(&lockh);
+
+ break;
+ case LDLM_CB_CANCELING :
+ CDEBUG(D_DLMTRACE,
+ "Lock %p has been canceled, do cleaning\n",
+ lock);
+
+ if (ma && ma->ma_som)
+ OBD_FREE_PTR(ma->ma_som);
+ if (ma)
+ OBD_FREE_PTR(ma);
+ break;
+ default:
+ LBUG();
+ }
+ RETURN(rc);
+}
+
+static int grouplock_glimpse_ast(struct ldlm_lock *lock, void *data)
+{
+ struct ptlrpc_request *req = data;
+ struct ost_lvb *lvb;
+ int rc;
+ struct md_attr *ma;
+ ENTRY;
+
+ ma = lock->l_ast_data;
+
+ req_capsule_extend(&req->rq_pill, &RQF_LDLM_GL_CALLBACK);
+ req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
+ sizeof(*lvb));
+ rc = req_capsule_server_pack(&req->rq_pill);
+ if (rc) {
+ CERROR("failed pack reply: %d\n", rc);
+ GOTO(out, rc);
+ }
+
+ lvb = req_capsule_server_get(&req->rq_pill, &RMF_DLM_LVB);
+
+ if ((ma) && (ma->ma_valid & MA_SOM)) {
+ lvb->lvb_size = ma->ma_som->msd_size;
+ lvb->lvb_blocks = ma->ma_som->msd_blocks;
+ } else if ((ma) && (ma->ma_valid & MA_INODE)) {
+ lvb->lvb_size = ma->ma_attr.la_size;
+ lvb->lvb_blocks = ma->ma_attr.la_blocks;
+ } else {
+ lvb->lvb_size = 0;
+ rc = -ELDLM_NO_LOCK_DATA;
+ }
+
+ EXIT;
+out:
+ if (rc == -ELDLM_NO_LOCK_DATA)
+ lustre_pack_reply(req, 1, NULL, NULL);
+
+ req->rq_status = rc;
+ return rc;
+}
+
+int mdd_file_lock(const struct lu_env *env, struct md_object *obj,
+ struct lov_mds_md *lmm, struct ldlm_extent *extent,
+ struct lustre_handle *lockh)
+{
+ struct ldlm_enqueue_info einfo = { 0 };
+ struct obd_info oinfo = { { { 0 } } };
+ struct obd_device *obd;
+ struct obd_export *lov_exp;
+ struct lov_stripe_md *lsm = NULL;
+ struct md_attr *ma = NULL;
+ int rc;
+ ENTRY;
+
+ obd = mdo2mdd(obj)->mdd_obd_dev;
+ lov_exp = obd->u.mds.mds_lov_exp;
+
+ obd_unpackmd(lov_exp, &lsm, lmm,
+ lov_mds_md_size(lmm->lmm_stripe_count, lmm->lmm_magic));
+
+ OBD_ALLOC_PTR(ma);
+ if (ma == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ OBD_ALLOC_PTR(ma->ma_som);
+ if (ma->ma_som == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ ma->ma_need = MA_SOM | MA_INODE;
+ mo_attr_get(env, obj, ma);
+
+ einfo.ei_type = LDLM_EXTENT;
+ einfo.ei_mode = LCK_GROUP;
+ einfo.ei_cb_bl = grouplock_blocking_ast;
+ einfo.ei_cb_cp = ldlm_completion_ast;
+ einfo.ei_cb_gl = grouplock_glimpse_ast;
+
+ if (ma->ma_valid & (MA_SOM | MA_INODE))
+ einfo.ei_cbdata = ma;
+ else
+ einfo.ei_cbdata = NULL;
+
+ memset(&oinfo.oi_policy, 0, sizeof(oinfo.oi_policy));
+ oinfo.oi_policy.l_extent = *extent;
+ oinfo.oi_lockh = lockh;
+ oinfo.oi_md = lsm;
+ oinfo.oi_flags = 0;
+
+ rc = obd_enqueue(lov_exp, &oinfo, &einfo, NULL);
+ /* ei_cbdata is used as a free flag at exit */
+ if (rc)
+ einfo.ei_cbdata = NULL;
+
+ obd_unpackmd(lov_exp, &lsm, NULL, 0);
+
+out:
+ /* ma is freed if not used as callback data */
+ if ((einfo.ei_cbdata == NULL) && ma && ma->ma_som)
+ OBD_FREE_PTR(ma->ma_som);
+ if ((einfo.ei_cbdata == NULL) && ma)
+ OBD_FREE_PTR(ma);
+
+ RETURN(rc);
+}
+
+int mdd_file_unlock(const struct lu_env *env, struct md_object *obj,
+ struct lov_mds_md *lmm, struct lustre_handle *lockh)
+{
+ struct obd_device *obd;
+ struct obd_export *lov_exp;
+ struct lov_stripe_md *lsm = NULL;
+ int rc;
+ ENTRY;
+
+ LASSERT(lustre_handle_is_used(lockh));
+
+ obd = mdo2mdd(obj)->mdd_obd_dev;
+ lov_exp = obd->u.mds.mds_lov_exp;
+
+ obd_unpackmd(lov_exp, &lsm, lmm,
+ lov_mds_md_size(lmm->lmm_stripe_count, lmm->lmm_magic));
+
+ rc = obd_cancel(lov_exp, lsm, LCK_GROUP, lockh);
+
+ obd_unpackmd(lov_exp, &lsm, NULL, 0);
+
+ RETURN(rc);
+}
.moo_version_get = mdd_version_get,
.moo_version_set = mdd_version_set,
.moo_path = mdd_path,
+ .moo_file_lock = mdd_file_lock,
+ .moo_file_unlock = mdd_file_unlock,
};
cfs_spin_unlock(&hdr->coh_lock_guard);
if (conflict) {
- CDEBUG(D_DLMTRACE, "lock %p is confliced with %p, will wait\n",
- lock, conflict);
- lu_ref_add(&conflict->cll_reference, "cancel-wait", lock);
- LASSERT(lock->cll_conflict == NULL);
- lock->cll_conflict = conflict;
- rc = CLO_WAIT;
+ if (lock->cll_descr.cld_mode == CLM_GROUP) {
+ /* we want a group lock but a previous lock request
+ * conflicts, we do not wait but return 0 so the
+ * request is send to the server
+ */
+ CDEBUG(D_DLMTRACE, "group lock %p is conflicted "
+ "with %p, no wait, send to server\n",
+ lock, conflict);
+ cl_lock_put(env, conflict);
+ rc = 0;
+ } else {
+ CDEBUG(D_DLMTRACE, "lock %p is conflicted with %p, "
+ "will wait\n",
+ lock, conflict);
+ LASSERT(lock->cll_conflict == NULL);
+ lu_ref_add(&conflict->cll_reference, "cancel-wait",
+ lock);
+ lock->cll_conflict = conflict;
+ rc = CLO_WAIT;
+ }
}
RETURN(rc);
}