return set;
}
-int mdc_dom_lock_match(const struct lu_env *env, struct obd_export *exp,
- struct ldlm_res_id *res_id, enum ldlm_type type,
- union ldlm_policy_data *policy, enum ldlm_mode mode,
- __u64 *flags, struct osc_object *obj,
- struct lustre_handle *lockh,
- enum ldlm_match_flags match_flags)
+static int mdc_dom_lock_match(const struct lu_env *env, struct obd_export *exp,
+ struct ldlm_res_id *res_id, enum ldlm_type type,
+ union ldlm_policy_data *policy,
+ enum ldlm_mode mode, __u64 *flags,
+ struct osc_object *obj,
+ struct lustre_handle *lockh,
+ enum ldlm_match_flags match_flags)
{
struct obd_device *obd = exp->exp_obd;
__u64 lflags = *flags;
* Finds an existing lock covering a page with given index.
* Copy of osc_obj_dlmlock_at_pgoff() but for DoM IBITS lock.
*/
-struct ldlm_lock *mdc_dlmlock_at_pgoff(const struct lu_env *env,
- struct osc_object *obj, pgoff_t index,
- enum osc_dap_flags dap_flags)
+static struct ldlm_lock *mdc_dlmlock_at_pgoff(const struct lu_env *env,
+ struct osc_object *obj,
+ pgoff_t index,
+ enum osc_dap_flags dap_flags)
{
struct osc_thread_info *info = osc_env_info(env);
struct ldlm_res_id *resname = &info->oti_resname;
RETURN(result);
}
-void mdc_lock_lockless_cancel(const struct lu_env *env,
- const struct cl_lock_slice *slice)
+static void mdc_lock_lockless_cancel(const struct lu_env *env,
+ const struct cl_lock_slice *slice)
{
struct osc_lock *ols = cl2osc_lock(slice);
struct osc_object *osc = cl2osc(slice->cls_obj);
struct cl_attr *attr = &osc_env_info(env)->oti_attr;
/* Destroy pages covered by the extent of the DLM lock */
- result = mdc_lock_flush(env, cl2osc(obj), cl_index(obj, 0),
+ result = mdc_lock_flush(env, cl2osc(obj), 0,
CL_PAGE_EOF, mode, discard);
/* Losing a lock, set KMS to 0.
* NB: assumed that DOM lock covers whole data on MDT.
/* extend the lock extent, otherwise it will have problem when
* we decide whether to grant a lockless lock. */
descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
- descr->cld_start = cl_index(descr->cld_obj, 0);
+ descr->cld_start = 0;
descr->cld_end = CL_PAGE_EOF;
/* no lvb update for matched lock */
RETURN(0);
}
-int mdc_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
- osc_enqueue_upcall_f upcall, void *cookie,
- struct lustre_handle *lockh, enum ldlm_mode mode,
- __u64 *flags, int errcode)
+static int mdc_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
+ osc_enqueue_upcall_f upcall, void *cookie,
+ struct lustre_handle *lockh, enum ldlm_mode mode,
+ __u64 *flags, int errcode)
{
struct osc_lock *ols = cookie;
bool glimpse = *flags & LDLM_FL_HAS_INTENT;
RETURN(rc);
}
-int mdc_enqueue_interpret(const struct lu_env *env, struct ptlrpc_request *req,
- void *args, int rc)
+static int mdc_enqueue_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req,
+ void *args, int rc)
{
struct osc_enqueue_args *aa = args;
struct ldlm_lock *lock;
ldlm_lock_addref(lockh, mode);
/* Let cl_lock_state_wait fail with -ERESTARTSYS to unuse sublocks. */
- OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_HANG, 2);
+ CFS_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_HANG, 2);
/* Let CP AST to grant the lock first. */
- OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
+ CFS_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
/* Complete obtaining the lock procedure. */
- rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, &einfo, 1, aa->oa_flags,
- aa->oa_lvb, aa->oa_lvb ?
+ rc = ldlm_cli_enqueue_fini(aa->oa_exp, &req->rq_pill, &einfo, 1,
+ aa->oa_flags, aa->oa_lvb, aa->oa_lvb ?
sizeof(*aa->oa_lvb) : 0, lockh, rc, true);
/* Complete mdc stuff. */
rc = mdc_enqueue_fini(aa->oa_exp, req, aa->oa_upcall, aa->oa_cookie,
lockh, mode, aa->oa_flags, rc);
- OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
+ CFS_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
ldlm_lock_decref(lockh, mode);
LDLM_LOCK_PUT(lock);
* when other sync requests do not get released lock from a client, the client
* is excluded from the cluster -- such scenarious make the life difficult, so
* release locks just after they are obtained. */
-int mdc_enqueue_send(const struct lu_env *env, struct obd_export *exp,
- struct ldlm_res_id *res_id, __u64 *flags,
- union ldlm_policy_data *policy, struct ost_lvb *lvb,
- osc_enqueue_upcall_f upcall, void *cookie,
- struct ldlm_enqueue_info *einfo, int async)
+static int mdc_enqueue_send(const struct lu_env *env, struct obd_export *exp,
+ struct ldlm_res_id *res_id, __u64 *flags,
+ union ldlm_policy_data *policy, struct ost_lvb *lvb,
+ osc_enqueue_upcall_f upcall, void *cookie,
+ struct ldlm_enqueue_info *einfo, int async)
{
struct obd_device *obd = exp->exp_obd;
struct lustre_handle lockh = { 0 };
struct ldlm_intent *lit;
enum ldlm_mode mode;
bool glimpse = *flags & LDLM_FL_HAS_INTENT;
- __u64 match_flags = *flags;
+ __u64 search_flags = *flags;
+ __u64 match_flags = 0;
LIST_HEAD(cancels);
int rc, count;
int lvb_size;
if (einfo->ei_mode == LCK_PR)
mode |= LCK_PW;
- match_flags |= LDLM_FL_LVB_READY;
+ search_flags |= LDLM_FL_LVB_READY;
if (glimpse)
- match_flags |= LDLM_FL_BLOCK_GRANTED;
- mode = ldlm_lock_match(obd->obd_namespace, match_flags, res_id,
- einfo->ei_type, policy, mode, &lockh);
+ search_flags |= LDLM_FL_BLOCK_GRANTED;
+ if (mode == LCK_GROUP)
+ match_flags = LDLM_MATCH_GROUP;
+ mode = ldlm_lock_match_with_skip(obd->obd_namespace, search_flags, 0,
+ res_id, einfo->ei_type, policy, mode,
+ &lockh, match_flags);
if (mode) {
struct ldlm_lock *matched;
matched = ldlm_handle2lock(&lockh);
- if (OBD_FAIL_CHECK(OBD_FAIL_MDC_GLIMPSE_DDOS))
+ if (CFS_FAIL_CHECK(OBD_FAIL_MDC_GLIMPSE_DDOS))
ldlm_set_kms_ignore(matched);
if (mdc_set_dom_lock_data(matched, einfo->ei_cbdata)) {
.clo_print = osc_lock_print,
};
-int mdc_lock_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io)
+static int mdc_lock_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_lock *lock, const struct cl_io *io)
{
struct osc_lock *ols;
__u32 enqflags = lock->cll_descr.cld_enq_flags;
ols->ols_flags = flags;
ols->ols_speculative = !!(enqflags & CEF_SPECULATIVE);
- if (lock->cll_descr.cld_mode == CLM_GROUP)
- ols->ols_flags |= LDLM_FL_ATOMIC_CB;
if (ols->ols_flags & LDLM_FL_HAS_INTENT) {
ols->ols_flags |= LDLM_FL_BLOCK_GRANTED;
RETURN(0);
}
-int mdc_io_fsync_start(const struct lu_env *env,
- const struct cl_io_slice *slice)
+static int mdc_io_fsync_start(const struct lu_env *env,
+ const struct cl_io_slice *slice)
{
struct cl_io *io = slice->cis_io;
struct cl_fsync_io *fio = &io->u.ci_fsync;
.cio_extent_release = osc_io_extent_release,
};
-int mdc_io_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io)
+static int mdc_io_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_io *io)
{
struct osc_io *oio = osc_env_io(env);
RETURN(LDLM_ITER_CONTINUE);
}
-int mdc_object_prune(const struct lu_env *env, struct cl_object *obj)
+static int mdc_object_prune(const struct lu_env *env, struct cl_object *obj)
{
struct osc_object *osc = cl2osc(obj);
struct ldlm_res_id *resname = &osc_env_info(env)->oti_resname;
.loo_object_invariant = NULL
};
-struct lu_object *mdc_object_alloc(const struct lu_env *env,
- const struct lu_object_header *unused,
- struct lu_device *dev)
+static struct lu_object *mdc_object_alloc(const struct lu_env *env,
+ const struct lu_object_header *unused,
+ struct lu_device *dev)
{
struct osc_object *osc;
struct lu_object *obj;
struct lustre_cfg *cfg)
{
struct lu_device *d;
- struct osc_device *od;
+ struct osc_device *oc;
struct obd_device *obd;
int rc;
- OBD_ALLOC_PTR(od);
- if (od == NULL)
+ OBD_ALLOC_PTR(oc);
+ if (oc == NULL)
RETURN(ERR_PTR(-ENOMEM));
- cl_device_init(&od->od_cl, t);
- d = osc2lu_dev(od);
+ cl_device_init(&oc->osc_cl, t);
+ d = osc2lu_dev(oc);
d->ld_ops = &mdc_lu_ops;
/* Setup MDC OBD */
osc_device_free(env, d);
RETURN(ERR_PTR(rc));
}
- od->od_exp = obd->obd_self_export;
+ oc->osc_exp = obd->obd_self_export;
+ oc->osc_stats.os_init = ktime_get_real();
RETURN(d);
}