* GPL HEADER END
*/
/*
- * Copyright (c) 2012, 2017 Intel Corporation.
+ * Copyright (c) 2017, Intel Corporation.
*/
/*
* lustre/mdt/mdt_io.c
up_write(&mo->mot_dom_sem);
}
+/**
+ * Lock prolongation for Data-on-MDT.
+ * This is similar to OFD code but for DOM ibits lock.
+ */
+static inline time64_t prolong_timeout(struct ptlrpc_request *req)
+{
+ struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
+ time64_t req_timeout;
+
+ if (AT_OFF)
+ return obd_timeout / 2;
+
+ req_timeout = req->rq_deadline - req->rq_arrival_time.tv_sec;
+ return max_t(time64_t, at_est2timeout(at_get(&svcpt->scp_at_estimate)),
+ req_timeout);
+}
+
+static void mdt_dom_resource_prolong(struct ldlm_prolong_args *arg)
+{
+ struct ldlm_resource *res;
+ struct ldlm_lock *lock;
+
+ ENTRY;
+
+ res = ldlm_resource_get(arg->lpa_export->exp_obd->obd_namespace, NULL,
+ &arg->lpa_resid, LDLM_EXTENT, 0);
+ if (IS_ERR(res)) {
+ CDEBUG(D_DLMTRACE,
+ "Failed to get resource for resid %llu/%llu\n",
+ arg->lpa_resid.name[0], arg->lpa_resid.name[1]);
+ RETURN_EXIT;
+ }
+
+ lock_res(res);
+ list_for_each_entry(lock, &res->lr_granted, l_res_link) {
+ if (ldlm_has_dom(lock)) {
+ LDLM_DEBUG(lock, "DOM lock to prolong ");
+ ldlm_lock_prolong_one(lock, arg);
+ break;
+ }
+ }
+ unlock_res(res);
+ ldlm_resource_putref(res);
+
+ EXIT;
+}
+
+static void mdt_prolong_dom_lock(struct tgt_session_info *tsi,
+ struct ldlm_prolong_args *data)
+{
+ struct obdo *oa = &tsi->tsi_ost_body->oa;
+ struct ldlm_lock *lock;
+
+ ENTRY;
+
+ data->lpa_timeout = prolong_timeout(tgt_ses_req(tsi));
+ data->lpa_export = tsi->tsi_exp;
+ data->lpa_resid = tsi->tsi_resid;
+
+ CDEBUG(D_RPCTRACE, "Prolong DOM lock for req %p with x%llu\n",
+ tgt_ses_req(tsi), tgt_ses_req(tsi)->rq_xid);
+
+ if (oa->o_valid & OBD_MD_FLHANDLE) {
+ /* mostly a request should be covered by only one lock, try
+ * fast path. */
+ lock = ldlm_handle2lock(&oa->o_handle);
+ if (lock != NULL) {
+ LASSERT(lock->l_export == data->lpa_export);
+ ldlm_lock_prolong_one(lock, data);
+ lock->l_last_used = ktime_get();
+ LDLM_LOCK_PUT(lock);
+ if (data->lpa_locks_cnt > 0)
+ RETURN_EXIT;
+ }
+ }
+ mdt_dom_resource_prolong(data);
+ EXIT;
+}
+
+static int mdt_rw_hpreq_lock_match(struct ptlrpc_request *req,
+ struct ldlm_lock *lock)
+{
+ struct obd_ioobj *ioo;
+ enum ldlm_mode mode;
+ __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
+
+ ENTRY;
+
+ if (!(lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_DOM))
+ RETURN(0);
+
+ ioo = req_capsule_client_get(&req->rq_pill, &RMF_OBD_IOOBJ);
+ LASSERT(ioo != NULL);
+
+ LASSERT(lock->l_resource != NULL);
+ if (!fid_res_name_eq(&ioo->ioo_oid.oi_fid, &lock->l_resource->lr_name))
+ RETURN(0);
+
+ /* a bulk write can only hold a reference on a PW extent lock. */
+ mode = LCK_PW;
+ if (opc == OST_READ)
+ /* whereas a bulk read can be protected by either a PR or PW
+ * extent lock */
+ mode |= LCK_PR;
+
+ if (!(lock->l_granted_mode & mode))
+ RETURN(0);
+
+ RETURN(1);
+}
+
+static int mdt_rw_hpreq_check(struct ptlrpc_request *req)
+{
+ struct tgt_session_info *tsi;
+ struct obd_ioobj *ioo;
+ struct niobuf_remote *rnb;
+ int opc;
+ struct ldlm_prolong_args pa = { 0 };
+
+ ENTRY;
+
+ /* Don't use tgt_ses_info() to get session info, because lock_match()
+ * can be called while request has no processing thread yet. */
+ tsi = lu_context_key_get(&req->rq_session, &tgt_session_key);
+
+ /*
+ * Use LASSERT below because malformed RPCs should have
+ * been filtered out in tgt_hpreq_handler().
+ */
+ opc = lustre_msg_get_opc(req->rq_reqmsg);
+ LASSERT(opc == OST_READ || opc == OST_WRITE);
+
+ ioo = req_capsule_client_get(&req->rq_pill, &RMF_OBD_IOOBJ);
+ LASSERT(ioo != NULL);
+
+ rnb = req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE);
+ LASSERT(rnb != NULL);
+ LASSERT(!(rnb->rnb_flags & OBD_BRW_SRVLOCK));
+
+ pa.lpa_mode = LCK_PW;
+ if (opc == OST_READ)
+ pa.lpa_mode |= LCK_PR;
+
+ DEBUG_REQ(D_RPCTRACE, req, "%s %s: refresh rw locks for " DFID,
+ tgt_name(tsi->tsi_tgt), current->comm, PFID(&tsi->tsi_fid));
+
+ mdt_prolong_dom_lock(tsi, &pa);
+
+ if (pa.lpa_blocks_cnt > 0) {
+ CDEBUG(D_DLMTRACE,
+ "%s: refreshed %u locks timeout for req %p",
+ tgt_name(tsi->tsi_tgt), pa.lpa_blocks_cnt, req);
+ RETURN(1);
+ }
+
+ RETURN(pa.lpa_locks_cnt > 0 ? 0 : -ESTALE);
+}
+
+static void mdt_rw_hpreq_fini(struct ptlrpc_request *req)
+{
+ mdt_rw_hpreq_check(req);
+}
+
+static struct ptlrpc_hpreq_ops mdt_hpreq_rw = {
+ .hpreq_lock_match = mdt_rw_hpreq_lock_match,
+ .hpreq_check = mdt_rw_hpreq_check,
+ .hpreq_fini = mdt_rw_hpreq_fini
+};
+
+/**
+ * Assign high priority operations to an IO request.
+ *
+ * Check if the incoming request is a candidate for
+ * high-priority processing. If it is, assign it a high
+ * priority operations table.
+ *
+ * \param[in] tsi target session environment for this request
+ */
+void mdt_hp_brw(struct tgt_session_info *tsi)
+{
+ struct niobuf_remote *rnb;
+ struct obd_ioobj *ioo;
+
+ ENTRY;
+
+ ioo = req_capsule_client_get(tsi->tsi_pill, &RMF_OBD_IOOBJ);
+ LASSERT(ioo != NULL); /* must exist after request preprocessing */
+ if (ioo->ioo_bufcnt > 0) {
+ rnb = req_capsule_client_get(tsi->tsi_pill, &RMF_NIOBUF_REMOTE);
+ LASSERT(rnb != NULL); /* must exist after preprocessing */
+
+ /* no high priority if server lock is needed */
+ if (rnb->rnb_flags & OBD_BRW_SRVLOCK ||
+ (lustre_msg_get_flags(tgt_ses_req(tsi)->rq_reqmsg) &
+ MSG_REPLAY))
+ return;
+ }
+ tgt_ses_req(tsi)->rq_ops = &mdt_hpreq_rw;
+}
+
+static int mdt_punch_hpreq_lock_match(struct ptlrpc_request *req,
+ struct ldlm_lock *lock)
+{
+ struct tgt_session_info *tsi;
+ struct obdo *oa;
+
+ ENTRY;
+
+ /* Don't use tgt_ses_info() to get session info, because lock_match()
+ * can be called while request has no processing thread yet. */
+ tsi = lu_context_key_get(&req->rq_session, &tgt_session_key);
+
+ /*
+ * Use LASSERT below because malformed RPCs should have
+ * been filtered out in tgt_hpreq_handler().
+ */
+ LASSERT(tsi->tsi_ost_body != NULL);
+ if (tsi->tsi_ost_body->oa.o_valid & OBD_MD_FLHANDLE &&
+ tsi->tsi_ost_body->oa.o_handle.cookie == lock->l_handle.h_cookie)
+ RETURN(1);
+
+ oa = &tsi->tsi_ost_body->oa;
+
+ LASSERT(lock->l_resource != NULL);
+ if (!fid_res_name_eq(&oa->o_oi.oi_fid, &lock->l_resource->lr_name))
+ RETURN(0);
+
+ if (!(lock->l_granted_mode & LCK_PW))
+ RETURN(0);
+
+ RETURN(1);
+}
+
+/**
+ * Implementation of ptlrpc_hpreq_ops::hpreq_lock_check for OST_PUNCH request.
+ *
+ * High-priority queue request check for whether the given punch request
+ * (\a req) is blocking an LDLM lock cancel. Also checks whether the request is
+ * covered by an LDLM lock.
+ *
+
+ *
+ * \param[in] req the incoming request
+ *
+ * \retval 1 if \a req is blocking an LDLM lock cancel
+ * \retval 0 if it is not
+ * \retval -ESTALE if lock is not found
+ */
+static int mdt_punch_hpreq_check(struct ptlrpc_request *req)
+{
+ struct tgt_session_info *tsi;
+ struct obdo *oa;
+ struct ldlm_prolong_args pa = { 0 };
+
+ ENTRY;
+
+ /* Don't use tgt_ses_info() to get session info, because lock_match()
+ * can be called while request has no processing thread yet. */
+ tsi = lu_context_key_get(&req->rq_session, &tgt_session_key);
+ LASSERT(tsi != NULL);
+ oa = &tsi->tsi_ost_body->oa;
+
+ LASSERT(!(oa->o_valid & OBD_MD_FLFLAGS &&
+ oa->o_flags & OBD_FL_SRVLOCK));
+
+ pa.lpa_mode = LCK_PW;
+
+ CDEBUG(D_DLMTRACE, "%s: refresh DOM lock for "DFID"\n",
+ tgt_name(tsi->tsi_tgt), PFID(&tsi->tsi_fid));
+
+ mdt_prolong_dom_lock(tsi, &pa);
+
+
+ if (pa.lpa_blocks_cnt > 0) {
+ CDEBUG(D_DLMTRACE,
+ "%s: refreshed %u locks timeout for req %p.\n",
+ tgt_name(tsi->tsi_tgt), pa.lpa_blocks_cnt, req);
+ RETURN(1);
+ }
+
+ RETURN(pa.lpa_locks_cnt > 0 ? 0 : -ESTALE);
+}
+
+/**
+ * Implementation of ptlrpc_hpreq_ops::hpreq_lock_fini for OST_PUNCH request.
+ *
+ * Called after the request has been handled. It refreshes lock timeout again
+ * so that client has more time to send lock cancel RPC.
+ *
+ * \param[in] req request which is being processed.
+ */
+static void mdt_punch_hpreq_fini(struct ptlrpc_request *req)
+{
+ mdt_punch_hpreq_check(req);
+}
+
+static struct ptlrpc_hpreq_ops mdt_hpreq_punch = {
+ .hpreq_lock_match = mdt_punch_hpreq_lock_match,
+ .hpreq_check = mdt_punch_hpreq_check,
+ .hpreq_fini = mdt_punch_hpreq_fini
+};
+
+void mdt_hp_punch(struct tgt_session_info *tsi)
+{
+ LASSERT(tsi->tsi_ost_body != NULL); /* must exists if we are here */
+ /* no high-priority if server lock is needed */
+ if ((tsi->tsi_ost_body->oa.o_valid & OBD_MD_FLFLAGS &&
+ tsi->tsi_ost_body->oa.o_flags & OBD_FL_SRVLOCK) ||
+ tgt_conn_flags(tsi) & OBD_CONNECT_MDS ||
+ lustre_msg_get_flags(tgt_ses_req(tsi)->rq_reqmsg) & MSG_REPLAY)
+ return;
+ tgt_ses_req(tsi)->rq_ops = &mdt_hpreq_punch;
+}
+
static int mdt_preprw_read(const struct lu_env *env, struct obd_export *exp,
struct mdt_device *mdt, struct mdt_object *mo,
struct lu_attr *la, int niocount,
{
struct dt_object *dob;
int i, j, rc, tot_bytes = 0;
+ int maxlnb = *nr_local;
+ int level;
ENTRY;
mdt_dom_read_lock(mo);
- if (!mdt_object_exists(mo))
- GOTO(unlock, rc = -ENOENT);
+ *nr_local = 0;
+ /* the only valid case when READ can find object is missing or stale
+ * when export is just evicted and open files are closed forcefully
+ * on server while client's READ can be in progress.
+ * This should not happen on healthy export, object can't be missing
+ * or dying because both states means it was finally destroyed.
+ */
+ level = exp->exp_failed ? D_INFO : D_ERROR;
+ if (!mdt_object_exists(mo)) {
+ CDEBUG_LIMIT(level,
+ "%s: READ IO to missing obj "DFID": rc = %d\n",
+ exp->exp_obd->obd_name, PFID(mdt_object_fid(mo)),
+ -ENOENT);
+ /* return 0 and continue with empty commit to skip such READ
+ * without more BRW errors.
+ */
+ RETURN(0);
+ }
+ if (lu_object_is_dying(&mo->mot_header)) {
+ CDEBUG_LIMIT(level,
+ "%s: READ IO to stale obj "DFID": rc = %d\n",
+ exp->exp_obd->obd_name, PFID(mdt_object_fid(mo)),
+ -ESTALE);
+ /* return 0 and continue with empty commit to skip such READ
+ * without more BRW errors.
+ */
+ RETURN(0);
+ }
dob = mdt_obj2dt(mo);
/* parse remote buffers to local buffers and prepare the latter */
- *nr_local = 0;
for (i = 0, j = 0; i < niocount; i++) {
- rc = dt_bufs_get(env, dob, rnb + i, lnb + j, 0);
+ rc = dt_bufs_get(env, dob, rnb + i, lnb + j, maxlnb, 0);
if (unlikely(rc < 0))
GOTO(buf_put, rc);
/* correct index for local buffers to continue with */
j += rc;
+ maxlnb -= rc;
*nr_local += rc;
tot_bytes += rnb[i].rnb_len;
}
RETURN(0);
buf_put:
dt_bufs_put(env, dob, lnb, *nr_local);
-unlock:
mdt_dom_read_unlock(mo);
return rc;
}
{
struct dt_object *dob;
int i, j, k, rc = 0, tot_bytes = 0;
+ int maxlnb = *nr_local;
ENTRY;
tgt_grant_prepare_write(env, exp, oa, rnb, obj->ioo_bufcnt);
mdt_dom_read_lock(mo);
+ *nr_local = 0;
+ /* don't report error in cases with failed export */
if (!mdt_object_exists(mo)) {
- CDEBUG(D_ERROR, "%s: BRW to missing obj "DFID"\n",
- exp->exp_obd->obd_name, PFID(mdt_object_fid(mo)));
- GOTO(unlock, rc = -ENOENT);
+ int level = exp->exp_failed ? D_INFO : D_ERROR;
+
+ rc = -ENOENT;
+ CDEBUG_LIMIT(level,
+ "%s: WRITE IO to missing obj "DFID": rc = %d\n",
+ exp->exp_obd->obd_name, PFID(mdt_object_fid(mo)),
+ rc);
+ /* exit with no data written, note nr_local = 0 above */
+ GOTO(unlock, rc);
+ }
+ if (lu_object_is_dying(&mo->mot_header)) {
+ /* This is possible race between object destroy followed by
+ * discard BL AST and client cache flushing. Object is
+ * referenced until discard finish.
+ */
+ CDEBUG(D_INODE, "WRITE IO to stale object "DFID"\n",
+ PFID(mdt_object_fid(mo)));
+ /* Note: continue with no error here to don't cause BRW errors
+ * but skip transaction in commitrw silently so no data is
+ * written.
+ */
}
dob = mdt_obj2dt(mo);
/* parse remote buffers to local buffers and prepare the latter */
- *nr_local = 0;
for (i = 0, j = 0; i < obj->ioo_bufcnt; i++) {
- rc = dt_bufs_get(env, dob, rnb + i, lnb + j, 1);
+ rc = dt_bufs_get(env, dob, rnb + i, lnb + j, maxlnb, 1);
if (unlikely(rc < 0))
GOTO(err, rc);
/* correct index for local buffers to continue with */
lnb[j + k].lnb_rc = -ENOSPC;
}
j += rc;
+ maxlnb -= rc;
*nr_local += rc;
tot_bytes += rnb[i].rnb_len;
}
ENTRY;
- LASSERT(niocount > 0);
-
dob = mdt_obj2dt(mo);
- dt_bufs_put(env, dob, lnb, niocount);
+ if (niocount)
+ dt_bufs_put(env, dob, lnb, niocount);
mdt_dom_read_unlock(mo);
RETURN(rc);
retry:
if (!dt_object_exists(dob))
GOTO(out, rc = -ENOENT);
+ if (lu_object_is_dying(&mo->mot_header)) {
+ /* Commit to stale object can be just skipped silently. */
+ CDEBUG(D_INODE, "skip commit to stale object "DFID"\n",
+ PFID(mdt_object_fid(mo)));
+ GOTO(out, rc = 0);
+ }
+
+ if (niocount == 0) {
+ rc = -EPROTO;
+ DEBUG_REQ(D_WARNING, tgt_ses_req(tgt_ses_info(env)),
+ "%s: commit with no pages for "DFID": rc = %d\n",
+ exp->exp_obd->obd_name, PFID(mdt_object_fid(mo)), rc);
+ GOTO(out, rc);
+ }
th = dt_trans_create(env, dt);
if (IS_ERR(th))
GOTO(out_stop, rc);
}
+ tgt_vbr_obj_set(env, dob);
rc = dt_trans_start(env, dt, th);
if (rc)
GOTO(out_stop, rc);
__u64 valid;
int rc = 0;
- if (npages == 0) {
- CERROR("%s: no pages to commit\n",
- exp->exp_obd->obd_name);
- rc = -EPROTO;
- }
-
LASSERT(mo);
if (cmd == OBD_BRW_WRITE) {
* doesn't already exist so we can store the reservation handle
* there. */
valid = OBD_MD_FLUID | OBD_MD_FLGID;
- valid |= OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME;
+ if (tgt_fmd_check(exp, mdt_object_fid(mo),
+ mdt_info_req(info)->rq_xid))
+ valid |= OBD_MD_FLATIME | OBD_MD_FLMTIME |
+ OBD_MD_FLCTIME;
la_from_obdo(la, oa, valid);
oa->o_flags = OBD_FL_NO_GRPQUOTA;
}
+ if (lnb[0].lnb_flags & OBD_BRW_OVER_PRJQUOTA) {
+ if (oa->o_valid & OBD_MD_FLFLAGS)
+ oa->o_flags |= OBD_FL_NO_PRJQUOTA;
+ else
+ oa->o_flags = OBD_FL_NO_PRJQUOTA;
+ }
+
oa->o_valid |= OBD_MD_FLFLAGS | OBD_MD_FLUSRQUOTA |
- OBD_MD_FLGRPQUOTA;
+ OBD_MD_FLGRPQUOTA | OBD_MD_FLPRJQUOTA;
}
} else if (cmd == OBD_BRW_READ) {
/* If oa != NULL then mdt_preprw_read updated the inode
} else {
rc = -EPROTO;
}
- /* this put is pair to object_get in ofd_preprw_write */
mdt_thread_info_fini(info);
RETURN(rc);
}
ENTRY;
/* check that we do support OBD_CONNECT_TRUNCLOCK. */
- CLASSERT(OST_CONNECT_SUPPORTED & OBD_CONNECT_TRUNCLOCK);
+ BUILD_BUG_ON(!(OST_CONNECT_SUPPORTED & OBD_CONNECT_TRUNCLOCK));
if ((oa->o_valid & (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS)) !=
(OBD_MD_FLSIZE | OBD_MD_FLBLOCKS))
if (IS_ERR(mo))
GOTO(out_unlock, rc = PTR_ERR(mo));
- mdt_dom_write_lock(mo);
if (!mdt_object_exists(mo))
GOTO(out_put, rc = -ENOENT);
+
+ /* Shouldn't happen on dirs */
+ if (S_ISDIR(lu_object_attr(&mo->mot_obj))) {
+ rc = -EPERM;
+ CERROR("%s: Truncate on dir "DFID": rc = %d\n",
+ exp->exp_obd->obd_name, PFID(&tsi->tsi_fid), rc);
+ GOTO(out_put, rc);
+ }
+
+ mdt_dom_write_lock(mo);
dob = mdt_obj2dt(mo);
la_from_obdo(la, oa, OBD_MD_FLMTIME | OBD_MD_FLATIME | OBD_MD_FLCTIME);
la->la_size = start;
la->la_valid |= LA_SIZE;
+ /* MDT supports FMD for Data-on-MDT needs */
+ if (la->la_valid & (LA_ATIME | LA_MTIME | LA_CTIME))
+ tgt_fmd_update(tsi->tsi_exp, &tsi->tsi_fid,
+ tgt_ses_req(tsi)->rq_xid);
+
rc = mdt_object_punch(tsi->tsi_env, mdt->mdt_bottom, dob,
start, end, la);
mdt_dom_write_unlock(mo);
lu_object_put(tsi->tsi_env, &mo->mot_obj);
out_unlock:
if (srvlock)
- mdt_save_lock(info, &lh, LCK_PW, rc);
+ tgt_extent_unlock(&lh, LCK_PW);
out:
mdt_thread_info_fini(info);
return rc;
/* There can be only one write lock covering data, try to match it. */
policy.l_inodebits.bits = MDS_INODELOCK_DOM;
- mode = ldlm_lock_match(ns, LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK,
+ mode = ldlm_lock_match(ns, LDLM_FL_TEST_LOCK,
&res->lr_name, LDLM_IBITS, &policy,
LCK_PW, &lockh, 0);
if (flags & LDLM_FL_RESENT) {
rc = LDLM_ITER_CONTINUE;
} else {
- __u64 tmpflags = 0;
+ __u64 tmpflags = LDLM_FL_BLOCK_NOWAIT;
enum ldlm_error err;
rc = policy(lock, &tmpflags, LDLM_PROCESS_RESCAN, &err, NULL);
RETURN(rc);
}
-void mdt_dom_discard_data(struct mdt_thread_info *info,
- const struct lu_fid *fid)
-{
- struct mdt_device *mdt = info->mti_mdt;
- union ldlm_policy_data *policy = &info->mti_policy;
- struct ldlm_res_id *res_id = &info->mti_res_id;
- struct lustre_handle dom_lh;
- __u64 flags = LDLM_FL_AST_DISCARD_DATA;
- int rc = 0;
-
- policy->l_inodebits.bits = MDS_INODELOCK_DOM;
- policy->l_inodebits.try_bits = 0;
- fid_build_reg_res_name(fid, res_id);
-
- /* Tell the clients that the object is gone now and that they should
- * throw away any cached pages. */
- rc = ldlm_cli_enqueue_local(mdt->mdt_namespace, res_id, LDLM_IBITS,
- policy, LCK_PW, &flags, ldlm_blocking_ast,
- ldlm_completion_ast, NULL, NULL, 0,
- LVB_T_NONE, NULL, &dom_lh);
-
- /* We only care about the side-effects, just drop the lock. */
- if (rc == ELDLM_OK)
- ldlm_lock_decref(&dom_lh, LCK_PW);
-}
-
/* check if client has already DoM lock for given resource */
bool mdt_dom_client_has_lock(struct mdt_thread_info *info,
const struct lu_fid *fid)
return rc;
}
+/**
+ * MDT request handler for OST_GETATTR RPC.
+ *
+ * This is data-specific request to get object and layout versions under
+ * IO lock. It is reliable only for Data-on-MDT files.
+ *
+ * \param[in] tsi target session environment for this request
+ *
+ * \retval 0 if successful
+ * \retval negative value on error
+ */
+int mdt_data_version_get(struct tgt_session_info *tsi)
+{
+ struct mdt_thread_info *mti = mdt_th_info(tsi->tsi_env);
+ struct mdt_device *mdt = mti->mti_mdt;
+ struct mdt_body *repbody;
+ struct mdt_object *mo = mti->mti_object;
+ struct lov_comp_md_v1 *comp;
+ struct lustre_handle lh = { 0 };
+ __u64 flags = 0;
+ __s64 version;
+ enum ldlm_mode lock_mode = LCK_PR;
+ bool srvlock;
+ int rc;
+
+ ENTRY;
+
+ req_capsule_set_size(tsi->tsi_pill, &RMF_MDT_MD, RCL_SERVER, 0);
+ req_capsule_set_size(tsi->tsi_pill, &RMF_ACL, RCL_SERVER, 0);
+ rc = req_capsule_server_pack(tsi->tsi_pill);
+ if (unlikely(rc != 0))
+ RETURN(err_serious(rc));
+
+ repbody = req_capsule_server_get(tsi->tsi_pill, &RMF_MDT_BODY);
+ if (repbody == NULL)
+ RETURN(-ENOMEM);
+
+ srvlock = tsi->tsi_mdt_body->mbo_valid & OBD_MD_FLFLAGS &&
+ tsi->tsi_mdt_body->mbo_flags & OBD_FL_SRVLOCK;
+
+ if (srvlock) {
+ if (unlikely(tsi->tsi_mdt_body->mbo_flags & OBD_FL_FLUSH))
+ lock_mode = LCK_PW;
+
+ fid_build_reg_res_name(&tsi->tsi_fid, &tsi->tsi_resid);
+ rc = tgt_mdt_data_lock(mdt->mdt_namespace, &tsi->tsi_resid,
+ &lh, lock_mode, &flags);
+ if (rc != 0)
+ RETURN(rc);
+ }
+
+ if (!mdt_object_exists(mo))
+ GOTO(out, rc = -ENOENT);
+ if (mdt_object_remote(mo))
+ GOTO(out, rc = -EREMOTE);
+ if (!S_ISREG(lu_object_attr(&mo->mot_obj)))
+ GOTO(out, rc = -EBADF);
+
+ /* Get version first */
+ version = dt_version_get(tsi->tsi_env, mdt_obj2dt(mo));
+ if (version && version != -EOPNOTSUPP) {
+ repbody->mbo_valid |= OBD_MD_FLDATAVERSION;
+ /* re-use mbo_ioepoch to transfer version */
+ repbody->mbo_version = version;
+ }
+
+ /* Read layout to get its version */
+ rc = mdt_big_xattr_get(mti, mo, XATTR_NAME_LOV);
+ if (rc == -ENODATA) /* File has no layout yet */
+ GOTO(out, rc = 0);
+ else if (rc < 0)
+ GOTO(out, rc);
+
+ comp = mti->mti_buf.lb_buf;
+ if (le32_to_cpu(comp->lcm_magic) != LOV_MAGIC_COMP_V1) {
+ CDEBUG(D_INFO, DFID" has no composite layout",
+ PFID(&tsi->tsi_fid));
+ GOTO(out, rc = -ESTALE);
+ }
+
+ CDEBUG(D_INODE, DFID": layout version: %u\n",
+ PFID(&tsi->tsi_fid), le32_to_cpu(comp->lcm_layout_gen));
+
+ repbody->mbo_valid |= OBD_MD_LAYOUT_VERSION;
+ /* re-use mbo_rdev for that */
+ repbody->mbo_layout_gen = le32_to_cpu(comp->lcm_layout_gen);
+ rc = 0;
+out:
+ if (srvlock)
+ tgt_mdt_data_unlock(&lh, lock_mode);
+
+ repbody->mbo_valid |= OBD_MD_FLFLAGS;
+ repbody->mbo_flags = OBD_FL_FLUSH;
+ RETURN(rc);
+}
+
+/* read file data to the buffer */
+int mdt_dom_read_on_open(struct mdt_thread_info *mti, struct mdt_device *mdt,
+ struct lustre_handle *lh)
+{
+ const struct lu_env *env = mti->mti_env;
+ struct tgt_session_info *tsi = tgt_ses_info(env);
+ struct req_capsule *pill = tsi->tsi_pill;
+ const struct lu_fid *fid;
+ struct ptlrpc_request *req = tgt_ses_req(tsi);
+ struct mdt_body *mbo;
+ struct dt_device *dt = mdt->mdt_bottom;
+ struct dt_object *mo;
+ void *buf;
+ struct niobuf_remote *rnb = NULL;
+ struct niobuf_local *lnb;
+ int rc;
+ loff_t offset;
+ unsigned int len, copied = 0;
+ int lnbs, nr_local, i;
+ bool dom_lock = false;
+
+ ENTRY;
+
+ if (!req_capsule_field_present(pill, &RMF_NIOBUF_INLINE, RCL_SERVER)) {
+ /* There is no reply buffers for this field, this means that
+ * client has no support for data in reply.
+ */
+ RETURN(0);
+ }
+
+ mbo = req_capsule_server_get(pill, &RMF_MDT_BODY);
+
+ if (lustre_handle_is_used(lh)) {
+ struct ldlm_lock *lock;
+
+ lock = ldlm_handle2lock(lh);
+ if (lock) {
+ dom_lock = ldlm_has_dom(lock) && ldlm_has_layout(lock);
+ LDLM_LOCK_PUT(lock);
+ }
+ }
+
+ /* return data along with open only along with DoM lock */
+ if (!dom_lock || !mdt->mdt_opts.mo_dom_read_open)
+ RETURN(0);
+
+ if (!(mbo->mbo_valid & OBD_MD_DOM_SIZE))
+ RETURN(0);
+
+ if (mbo->mbo_dom_size == 0)
+ RETURN(0);
+
+ CDEBUG(D_INFO, "File size %llu, reply sizes %d/%d\n",
+ mbo->mbo_dom_size, req->rq_reqmsg->lm_repsize, req->rq_replen);
+ len = req->rq_reqmsg->lm_repsize - req->rq_replen;
+
+ /* NB: at this moment we have the following sizes:
+ * - req->rq_replen: used data in reply
+ * - req->rq_reqmsg->lm_repsize: total allocated reply buffer at client
+ *
+ * Ideal case when file size fits in allocated reply buffer,
+ * that mean we can return whole data in reply. We can also fit more
+ * data up to max_reply_size in total reply size, but this will cause
+ * re-allocation on client and resend with larger buffer. This is still
+ * faster than separate READ IO.
+ * Third case if file is too big to fit even in maximum size, in that
+ * case we return just tail to optimize possible append.
+ *
+ * At the moment the following strategy is used:
+ * 1) try to fit into the buffer we have
+ * 2) return just file tail otherwise.
+ */
+ if (mbo->mbo_dom_size <= len) {
+ /* can fit whole data */
+ len = mbo->mbo_dom_size;
+ offset = 0;
+ } else {
+ int tail, pgbits;
+
+ /* File tail offset must be aligned with larger page size
+ * between client and server, so the maximum page size is
+ * used here to align offset.
+ *
+ * NB: DOM feature was introduced when server supports pagebits
+ * already, so it should be always non-zero value. Report error
+ * if it is not for some reason.
+ */
+ if (!req->rq_export->exp_target_data.ted_pagebits) {
+ CERROR("%s: client page bits are not saved on server\n",
+ mdt_obd_name(mdt));
+ RETURN(0);
+ }
+ pgbits = max_t(int, PAGE_SHIFT,
+ req->rq_export->exp_target_data.ted_pagebits);
+ tail = mbo->mbo_dom_size % (1 << pgbits);
+
+ /* no partial tail or tail can't fit in reply */
+ if (tail == 0 || len < tail)
+ RETURN(0);
+
+ len = tail;
+ offset = mbo->mbo_dom_size - len;
+ }
+ LASSERT((offset % PAGE_SIZE) == 0);
+ rc = req_capsule_server_grow(pill, &RMF_NIOBUF_INLINE,
+ sizeof(*rnb) + len);
+ if (rc != 0) {
+ /* failed to grow data buffer, just exit */
+ GOTO(out, rc = -E2BIG);
+ }
+
+ /* re-take MDT_BODY and NIOBUF_INLINE buffers after the buffer grow */
+ mbo = req_capsule_server_get(pill, &RMF_MDT_BODY);
+ fid = &mbo->mbo_fid1;
+ if (!fid_is_sane(fid))
+ GOTO(out, rc = -EINVAL);
+
+ rnb = req_capsule_server_get(tsi->tsi_pill, &RMF_NIOBUF_INLINE);
+ if (rnb == NULL)
+ GOTO(out, rc = -EPROTO);
+
+ buf = (char *)rnb + sizeof(*rnb);
+ rnb->rnb_len = len;
+ rnb->rnb_offset = offset;
+
+ mo = dt_locate(env, dt, fid);
+ if (IS_ERR(mo))
+ GOTO(out_rnb, rc = PTR_ERR(mo));
+ LASSERT(mo != NULL);
+
+ dt_read_lock(env, mo, 0);
+ if (!dt_object_exists(mo))
+ GOTO(unlock, rc = -ENOENT);
+
+ /* parse remote buffers to local buffers and prepare the latter */
+ lnbs = (len >> PAGE_SHIFT) + 1;
+ OBD_ALLOC(lnb, sizeof(*lnb) * lnbs);
+ if (lnb == NULL)
+ GOTO(unlock, rc = -ENOMEM);
+
+ rc = dt_bufs_get(env, mo, rnb, lnb, lnbs, 0);
+ if (unlikely(rc < 0))
+ GOTO(free, rc);
+ LASSERT(rc <= lnbs);
+ nr_local = rc;
+ rc = dt_read_prep(env, mo, lnb, nr_local);
+ if (unlikely(rc))
+ GOTO(buf_put, rc);
+ /* copy data to the buffer finally */
+ for (i = 0; i < nr_local; i++) {
+ char *p = kmap(lnb[i].lnb_page);
+ long off;
+
+ LASSERT(lnb[i].lnb_page_offset == 0);
+ off = lnb[i].lnb_len & ~PAGE_MASK;
+ if (off > 0)
+ memset(p + off, 0, PAGE_SIZE - off);
+
+ memcpy(buf + (i << PAGE_SHIFT), p, lnb[i].lnb_len);
+ kunmap(lnb[i].lnb_page);
+ copied += lnb[i].lnb_len;
+ LASSERT(rc <= len);
+ }
+ CDEBUG(D_INFO, "Read %i (wanted %u) bytes from %llu\n", copied,
+ len, offset);
+ if (copied < len) {
+ CWARN("%s: read %i bytes for "DFID
+ " but wanted %u, is size wrong?\n",
+ tsi->tsi_exp->exp_obd->obd_name, copied,
+ PFID(&tsi->tsi_fid), len);
+ /* Ignore partially copied data */
+ copied = 0;
+ }
+ EXIT;
+buf_put:
+ dt_bufs_put(env, mo, lnb, nr_local);
+free:
+ OBD_FREE(lnb, sizeof(*lnb) * lnbs);
+unlock:
+ dt_read_unlock(env, mo);
+ lu_object_put(env, &mo->do_lu);
+out_rnb:
+ rnb->rnb_len = copied;
+out:
+ /* Don't fail OPEN request if read-on-open is failed, but drop
+ * a message in log about the error.
+ */
+ if (rc)
+ CDEBUG(D_INFO, "Read-on-open is failed, rc = %d", rc);
+
+ RETURN(0);
+}
+
+/**
+ * Completion AST for DOM discard locks:
+ *
+ * CP AST an DOM discard lock is called always right after enqueue or from
+ * reprocess if lock was blocked, in the latest case l_ast_data is set to
+ * the mdt_object which is kept while there are pending locks on it.
+ */
+int ldlm_dom_discard_cp_ast(struct ldlm_lock *lock, __u64 flags, void *data)
+{
+ struct mdt_object *mo;
+ struct lustre_handle dom_lh;
+ struct lu_env *env;
+
+ ENTRY;
+
+ /* l_ast_data is set when lock was not granted immediately
+ * in mdt_dom_discard_data() below but put into waiting list,
+ * so this CP callback means we are finished and corresponding
+ * MDT object should be released finally as well as lock itself.
+ */
+ lock_res_and_lock(lock);
+ if (!lock->l_ast_data) {
+ unlock_res_and_lock(lock);
+ RETURN(0);
+ }
+
+ mo = lock->l_ast_data;
+ lock->l_ast_data = NULL;
+ unlock_res_and_lock(lock);
+
+ ldlm_lock2handle(lock, &dom_lh);
+ ldlm_lock_decref(&dom_lh, LCK_PW);
+
+ env = lu_env_find();
+ LASSERT(env);
+ mdt_object_put(env, mo);
+
+ RETURN(0);
+}
+
+void mdt_dom_discard_data(struct mdt_thread_info *info,
+ struct mdt_object *mo)
+{
+ struct ptlrpc_request *req = mdt_info_req(info);
+ struct mdt_device *mdt = mdt_dev(mo->mot_obj.lo_dev);
+ union ldlm_policy_data policy;
+ struct ldlm_res_id res_id;
+ struct lustre_handle dom_lh;
+ struct ldlm_lock *lock;
+ __u64 flags = LDLM_FL_AST_DISCARD_DATA;
+ int rc = 0;
+ bool old_client;
+
+ ENTRY;
+
+ if (req && req_is_replay(req))
+ RETURN_EXIT;
+
+ policy.l_inodebits.bits = MDS_INODELOCK_DOM;
+ policy.l_inodebits.try_bits = 0;
+ fid_build_reg_res_name(mdt_object_fid(mo), &res_id);
+
+ /* Keep blocking version of discard for an old client to avoid
+ * crashes on non-patched clients. LU-11359.
+ */
+ old_client = req && !(exp_connect_flags2(req->rq_export) &
+ OBD_CONNECT2_ASYNC_DISCARD);
+
+ /* Tell the clients that the object is gone now and that they should
+ * throw away any cached pages. */
+ rc = ldlm_cli_enqueue_local(info->mti_env, mdt->mdt_namespace, &res_id,
+ LDLM_IBITS, &policy, LCK_PW, &flags,
+ ldlm_blocking_ast, old_client ?
+ ldlm_completion_ast :
+ ldlm_dom_discard_cp_ast,
+ NULL, NULL, 0, LVB_T_NONE, NULL, &dom_lh);
+ if (rc != ELDLM_OK) {
+ CDEBUG(D_DLMTRACE,
+ "Failed to issue discard lock, rc = %d\n", rc);
+ RETURN_EXIT;
+ }
+
+ lock = ldlm_handle2lock(&dom_lh);
+ lock_res_and_lock(lock);
+ /* if lock is not granted then there are BL ASTs in progress and
+ * lock will be granted in result of reprocessing with CP callback
+ * notifying about that. The mdt object has to be kept until that and
+ * it is saved in l_ast_data of the lock. Lock reference is kept too
+ * until that to prevent it from canceling.
+ */
+ if (!is_granted_or_cancelled_nolock(lock)) {
+ mdt_object_get(info->mti_env, mo);
+ lock->l_ast_data = mo;
+ unlock_res_and_lock(lock);
+ } else {
+ unlock_res_and_lock(lock);
+ ldlm_lock_decref_and_cancel(&dom_lh, LCK_PW);
+ }
+ LDLM_LOCK_PUT(lock);
+
+ RETURN_EXIT;
+}