#include <obd_class.h>
#include <lustre_osc.h>
+#include <linux/falloc.h>
#include <uapi/linux/lustre/lustre_param.h>
#include "mdc_internal.h"
einfo->ei_cb_cp = ldlm_completion_ast;
einfo->ei_cb_gl = mdc_ldlm_glimpse_ast;
einfo->ei_cbdata = osc; /* value to be put into ->l_ast_data */
+ einfo->ei_req_slot = 1;
}
static void mdc_lock_lvb_update(const struct lu_env *env,
return set;
}
-int mdc_dom_lock_match(const struct lu_env *env, struct obd_export *exp,
- struct ldlm_res_id *res_id, enum ldlm_type type,
- union ldlm_policy_data *policy, enum ldlm_mode mode,
- __u64 *flags, struct osc_object *obj,
- struct lustre_handle *lockh,
- enum ldlm_match_flags match_flags)
+static int mdc_dom_lock_match(const struct lu_env *env, struct obd_export *exp,
+ struct ldlm_res_id *res_id, enum ldlm_type type,
+ union ldlm_policy_data *policy,
+ enum ldlm_mode mode, __u64 *flags,
+ struct osc_object *obj,
+ struct lustre_handle *lockh,
+ enum ldlm_match_flags match_flags)
{
struct obd_device *obd = exp->exp_obd;
__u64 lflags = *flags;
* Finds an existing lock covering a page with given index.
* Copy of osc_obj_dlmlock_at_pgoff() but for DoM IBITS lock.
*/
-struct ldlm_lock *mdc_dlmlock_at_pgoff(const struct lu_env *env,
- struct osc_object *obj, pgoff_t index,
- enum osc_dap_flags dap_flags)
+static struct ldlm_lock *mdc_dlmlock_at_pgoff(const struct lu_env *env,
+ struct osc_object *obj,
+ pgoff_t index,
+ enum osc_dap_flags dap_flags)
{
struct osc_thread_info *info = osc_env_info(env);
struct ldlm_res_id *resname = &info->oti_resname;
RETURN(result);
}
-void mdc_lock_lockless_cancel(const struct lu_env *env,
- const struct cl_lock_slice *slice)
+static void mdc_lock_lockless_cancel(const struct lu_env *env,
+ const struct cl_lock_slice *slice)
{
struct osc_lock *ols = cl2osc_lock(slice);
struct osc_object *osc = cl2osc(slice->cls_obj);
struct cl_attr *attr = &osc_env_info(env)->oti_attr;
/* Destroy pages covered by the extent of the DLM lock */
- result = mdc_lock_flush(env, cl2osc(obj), cl_index(obj, 0),
+ result = mdc_lock_flush(env, cl2osc(obj), 0,
CL_PAGE_EOF, mode, discard);
/* Losing a lock, set KMS to 0.
* NB: assumed that DOM lock covers whole data on MDT.
/* lock reference taken by ldlm_handle2lock_long() is
* owned by osc_lock and released in osc_lock_detach()
*/
- lu_ref_add(&dlmlock->l_reference, "osc_lock", oscl);
+ lu_ref_add_atomic(&dlmlock->l_reference, "osc_lock", oscl);
oscl->ols_has_ref = 1;
LASSERT(oscl->ols_dlmlock == NULL);
/* extend the lock extent, otherwise it will have problem when
* we decide whether to grant a lockless lock. */
descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
- descr->cld_start = cl_index(descr->cld_obj, 0);
+ descr->cld_start = 0;
descr->cld_end = CL_PAGE_EOF;
/* no lvb update for matched lock */
RETURN(0);
}
-int mdc_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
- osc_enqueue_upcall_f upcall, void *cookie,
- struct lustre_handle *lockh, enum ldlm_mode mode,
- __u64 *flags, int errcode)
+static int mdc_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
+ osc_enqueue_upcall_f upcall, void *cookie,
+ struct lustre_handle *lockh, enum ldlm_mode mode,
+ __u64 *flags, int errcode)
{
struct osc_lock *ols = cookie;
bool glimpse = *flags & LDLM_FL_HAS_INTENT;
RETURN(rc);
}
-int mdc_enqueue_interpret(const struct lu_env *env, struct ptlrpc_request *req,
- void *args, int rc)
+static int mdc_enqueue_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req,
+ void *args, int rc)
{
struct osc_enqueue_args *aa = args;
struct ldlm_lock *lock;
ldlm_lock_addref(lockh, mode);
/* Let cl_lock_state_wait fail with -ERESTARTSYS to unuse sublocks. */
- OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_HANG, 2);
+ CFS_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_HANG, 2);
/* Let CP AST to grant the lock first. */
- OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
+ CFS_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
/* Complete obtaining the lock procedure. */
- rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, &einfo, 1, aa->oa_flags,
- aa->oa_lvb, aa->oa_lvb ?
- sizeof(*aa->oa_lvb) : 0, lockh, rc);
+ rc = ldlm_cli_enqueue_fini(aa->oa_exp, &req->rq_pill, &einfo, 1,
+ aa->oa_flags, aa->oa_lvb, aa->oa_lvb ?
+ sizeof(*aa->oa_lvb) : 0, lockh, rc, true);
/* Complete mdc stuff. */
rc = mdc_enqueue_fini(aa->oa_exp, req, aa->oa_upcall, aa->oa_cookie,
lockh, mode, aa->oa_flags, rc);
- OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
+ CFS_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
ldlm_lock_decref(lockh, mode);
LDLM_LOCK_PUT(lock);
* when other sync requests do not get released lock from a client, the client
* is excluded from the cluster -- such scenarious make the life difficult, so
* release locks just after they are obtained. */
-int mdc_enqueue_send(const struct lu_env *env, struct obd_export *exp,
- struct ldlm_res_id *res_id, __u64 *flags,
- union ldlm_policy_data *policy, struct ost_lvb *lvb,
- osc_enqueue_upcall_f upcall, void *cookie,
- struct ldlm_enqueue_info *einfo, int async)
+static int mdc_enqueue_send(const struct lu_env *env, struct obd_export *exp,
+ struct ldlm_res_id *res_id, __u64 *flags,
+ union ldlm_policy_data *policy, struct ost_lvb *lvb,
+ osc_enqueue_upcall_f upcall, void *cookie,
+ struct ldlm_enqueue_info *einfo, int async)
{
struct obd_device *obd = exp->exp_obd;
struct lustre_handle lockh = { 0 };
struct ldlm_intent *lit;
enum ldlm_mode mode;
bool glimpse = *flags & LDLM_FL_HAS_INTENT;
- __u64 match_flags = *flags;
+ __u64 search_flags = *flags;
+ __u64 match_flags = 0;
LIST_HEAD(cancels);
int rc, count;
int lvb_size;
if (einfo->ei_mode == LCK_PR)
mode |= LCK_PW;
- match_flags |= LDLM_FL_LVB_READY;
+ search_flags |= LDLM_FL_LVB_READY;
if (glimpse)
- match_flags |= LDLM_FL_BLOCK_GRANTED;
- mode = ldlm_lock_match(obd->obd_namespace, match_flags, res_id,
- einfo->ei_type, policy, mode, &lockh);
+ search_flags |= LDLM_FL_BLOCK_GRANTED;
+ if (mode == LCK_GROUP)
+ match_flags = LDLM_MATCH_GROUP;
+ mode = ldlm_lock_match_with_skip(obd->obd_namespace, search_flags, 0,
+ res_id, einfo->ei_type, policy, mode,
+ &lockh, match_flags);
if (mode) {
struct ldlm_lock *matched;
matched = ldlm_handle2lock(&lockh);
- if (OBD_FAIL_CHECK(OBD_FAIL_MDC_GLIMPSE_DDOS))
+ if (CFS_FAIL_CHECK(OBD_FAIL_MDC_GLIMPSE_DDOS))
ldlm_set_kms_ignore(matched);
if (mdc_set_dom_lock_data(matched, einfo->ei_cbdata)) {
.clo_print = osc_lock_print,
};
-int mdc_lock_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io)
+static int mdc_lock_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_lock *lock, const struct cl_io *io)
{
struct osc_lock *ols;
__u32 enqflags = lock->cll_descr.cld_enq_flags;
ols->ols_flags = flags;
ols->ols_speculative = !!(enqflags & CEF_SPECULATIVE);
- if (lock->cll_descr.cld_mode == CLM_GROUP)
- ols->ols_flags |= LDLM_FL_ATOMIC_CB;
if (ols->ols_flags & LDLM_FL_HAS_INTENT) {
ols->ols_flags |= LDLM_FL_BLOCK_GRANTED;
if (io->ci_type == CIT_WRITE || cl_io_is_mkwrite(io))
osc_lock_set_writer(env, io, obj, ols);
+ else if (io->ci_type == CIT_READ ||
+ (io->ci_type == CIT_FAULT && !io->u.ci_fault.ft_mkwrite))
+ osc_lock_set_reader(env, io, obj, ols);
LDLM_DEBUG_NOLOCK("lock %p, mdc lock %p, flags %llx\n",
lock, ols, ols->ols_flags);
resname = &osc_env_info(env)->oti_resname;
fid_build_reg_res_name(lu_object_fid(osc2lu(osc)), resname);
- res = ldlm_resource_get(osc_export(osc)->exp_obd->obd_namespace,
- NULL, resname, LDLM_IBITS, 0);
- ldlm_resource_dump(D_ERROR, res);
+ res = ldlm_resource_get(osc_export(osc)->
+ exp_obd->obd_namespace,
+ resname, LDLM_IBITS, 0);
+ if (IS_ERR(res))
+ CERROR("No lock resource for "DFID"\n",
+ PFID(lu_object_fid(osc2lu(osc))));
+ else
+ ldlm_resource_dump(D_ERROR, res);
libcfs_debug_dumpstack(NULL);
return -ENOENT;
} else {
&oio->oi_trunc);
if (rc < 0)
return rc;
+ } else if (cl_io_is_fallocate(io) &&
+ io->u.ci_setattr.sa_falloc_mode & FALLOC_FL_PUNCH_HOLE) {
+ rc = osc_punch_start(env, io, obj);
+ if (rc < 0)
+ return rc;
}
- if (cl_io_is_fallocate(io))
- return -EOPNOTSUPP;
-
if (oio->oi_lockless == 0) {
cl_object_attr_lock(obj);
rc = cl_object_attr_get(env, obj, attr);
return rc;
}
- if (!(ia_avalid & ATTR_SIZE))
+ if (!(ia_avalid & ATTR_SIZE) && !cl_io_is_fallocate(io))
return 0;
memset(oa, 0, sizeof(*oa));
oa->o_mtime = attr->cat_mtime;
oa->o_atime = attr->cat_atime;
oa->o_ctime = attr->cat_ctime;
-
- oa->o_size = size;
- oa->o_blocks = OBD_OBJECT_EOF;
oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLATIME |
OBD_MD_FLCTIME | OBD_MD_FLMTIME | OBD_MD_FLSIZE |
OBD_MD_FLBLOCKS;
+
if (oio->oi_lockless) {
oa->o_flags = OBD_FL_SRVLOCK;
oa->o_valid |= OBD_MD_FLFLAGS;
}
init_completion(&cbargs->opc_sync);
+ if (cl_io_is_fallocate(io)) {
+ int falloc_mode = io->u.ci_setattr.sa_falloc_mode;
- rc = osc_punch_send(osc_export(cl2osc(obj)), oa,
- mdc_async_upcall, cbargs);
+ oa->o_size = io->u.ci_setattr.sa_falloc_offset;
+ oa->o_blocks = io->u.ci_setattr.sa_falloc_end;
+ rc = osc_fallocate_base(osc_export(cl2osc(obj)), oa,
+ mdc_async_upcall, cbargs, falloc_mode);
+ } else {
+ oa->o_size = size;
+ oa->o_blocks = OBD_OBJECT_EOF;
+ rc = osc_punch_send(osc_export(cl2osc(obj)), oa,
+ mdc_async_upcall, cbargs);
+ }
cbargs->opc_rpc_sent = rc == 0;
return rc;
}
RETURN(0);
}
-int mdc_io_fsync_start(const struct lu_env *env,
- const struct cl_io_slice *slice)
+static int mdc_io_fsync_start(const struct lu_env *env,
+ const struct cl_io_slice *slice)
{
struct cl_io *io = slice->cis_io;
struct cl_fsync_io *fio = &io->u.ci_fsync;
req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER, 0);
req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, 0);
+ req_capsule_set_size(&req->rq_pill, &RMF_FILE_ENCCTX, RCL_SERVER, 0);
ptlrpc_request_set_replen(req);
req->rq_interpret_reply = mdc_data_version_interpret;
.cio_extent_release = osc_io_extent_release,
};
-int mdc_io_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io)
+static int mdc_io_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_io *io)
{
struct osc_io *oio = osc_env_io(env);
RETURN(LDLM_ITER_CONTINUE);
}
-int mdc_object_prune(const struct lu_env *env, struct cl_object *obj)
+static int mdc_object_prune(const struct lu_env *env, struct cl_object *obj)
{
struct osc_object *osc = cl2osc(obj);
struct ldlm_res_id *resname = &osc_env_info(env)->oti_resname;
.loo_object_invariant = NULL
};
-struct lu_object *mdc_object_alloc(const struct lu_env *env,
- const struct lu_object_header *unused,
- struct lu_device *dev)
+static struct lu_object *mdc_object_alloc(const struct lu_env *env,
+ const struct lu_object_header *unused,
+ struct lu_device *dev)
{
struct osc_object *osc;
struct lu_object *obj;
struct lustre_cfg *cfg)
{
struct lu_device *d;
- struct osc_device *od;
+ struct osc_device *oc;
struct obd_device *obd;
int rc;
- OBD_ALLOC_PTR(od);
- if (od == NULL)
+ OBD_ALLOC_PTR(oc);
+ if (oc == NULL)
RETURN(ERR_PTR(-ENOMEM));
- cl_device_init(&od->od_cl, t);
- d = osc2lu_dev(od);
+ cl_device_init(&oc->osc_cl, t);
+ d = osc2lu_dev(oc);
d->ld_ops = &mdc_lu_ops;
/* Setup MDC OBD */
osc_device_free(env, d);
RETURN(ERR_PTR(rc));
}
- od->od_exp = obd->obd_self_export;
+ oc->osc_exp = obd->obd_self_export;
+ oc->osc_stats.os_init = ktime_get_real();
RETURN(d);
}