Whamcloud - gitweb
LU-11595 mdt: fix read-on-open for big PAGE_SIZE
[fs/lustre-release.git] / lustre / mdt / mdt_io.c
index caaadf7..3124c8c 100644 (file)
@@ -20,7 +20,7 @@
  * GPL HEADER END
  */
 /*
- * Copyright (c) 2012, 2017 Intel Corporation.
+ * Copyright (c) 2017, Intel Corporation.
  */
 /*
  * lustre/mdt/mdt_io.c
@@ -61,6 +61,320 @@ static inline void mdt_dom_write_unlock(struct mdt_object *mo)
        up_write(&mo->mot_dom_sem);
 }
 
+/**
+ * Lock prolongation for Data-on-MDT.
+ * This is similar to OFD code but for DOM ibits lock.
+ */
+static inline time64_t prolong_timeout(struct ptlrpc_request *req)
+{
+       struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
+       time64_t req_timeout;
+
+       if (AT_OFF)
+               return obd_timeout / 2;
+
+       req_timeout = req->rq_deadline - req->rq_arrival_time.tv_sec;
+       return max_t(time64_t, at_est2timeout(at_get(&svcpt->scp_at_estimate)),
+                    req_timeout);
+}
+
+static void mdt_dom_resource_prolong(struct ldlm_prolong_args *arg)
+{
+       struct ldlm_resource *res;
+       struct ldlm_lock *lock;
+
+       ENTRY;
+
+       res = ldlm_resource_get(arg->lpa_export->exp_obd->obd_namespace, NULL,
+                               &arg->lpa_resid, LDLM_EXTENT, 0);
+       if (IS_ERR(res)) {
+               CDEBUG(D_DLMTRACE,
+                      "Failed to get resource for resid %llu/%llu\n",
+                      arg->lpa_resid.name[0], arg->lpa_resid.name[1]);
+               RETURN_EXIT;
+       }
+
+       lock_res(res);
+       list_for_each_entry(lock, &res->lr_granted, l_res_link) {
+               if (ldlm_has_dom(lock)) {
+                       LDLM_DEBUG(lock, "DOM lock to prolong ");
+                       ldlm_lock_prolong_one(lock, arg);
+                       break;
+               }
+       }
+       unlock_res(res);
+       ldlm_resource_putref(res);
+
+       EXIT;
+}
+
+static void mdt_prolong_dom_lock(struct tgt_session_info *tsi,
+                                struct ldlm_prolong_args *data)
+{
+       struct obdo *oa = &tsi->tsi_ost_body->oa;
+       struct ldlm_lock *lock;
+
+       ENTRY;
+
+       data->lpa_timeout = prolong_timeout(tgt_ses_req(tsi));
+       data->lpa_export = tsi->tsi_exp;
+       data->lpa_resid = tsi->tsi_resid;
+
+       CDEBUG(D_RPCTRACE, "Prolong DOM lock for req %p with x%llu\n",
+              tgt_ses_req(tsi), tgt_ses_req(tsi)->rq_xid);
+
+       if (oa->o_valid & OBD_MD_FLHANDLE) {
+               /* mostly a request should be covered by only one lock, try
+                * fast path. */
+               lock = ldlm_handle2lock(&oa->o_handle);
+               if (lock != NULL) {
+                       LASSERT(lock->l_export == data->lpa_export);
+                       ldlm_lock_prolong_one(lock, data);
+                       lock->l_last_used = ktime_get();
+                       LDLM_LOCK_PUT(lock);
+                       if (data->lpa_locks_cnt > 0)
+                               RETURN_EXIT;
+               }
+       }
+       mdt_dom_resource_prolong(data);
+       EXIT;
+}
+
+static int mdt_rw_hpreq_lock_match(struct ptlrpc_request *req,
+                                  struct ldlm_lock *lock)
+{
+       struct obd_ioobj *ioo;
+       enum ldlm_mode mode;
+       __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
+
+       ENTRY;
+
+       if (!(lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_DOM))
+               RETURN(0);
+
+       ioo = req_capsule_client_get(&req->rq_pill, &RMF_OBD_IOOBJ);
+       LASSERT(ioo != NULL);
+
+       LASSERT(lock->l_resource != NULL);
+       if (!fid_res_name_eq(&ioo->ioo_oid.oi_fid, &lock->l_resource->lr_name))
+               RETURN(0);
+
+       /* a bulk write can only hold a reference on a PW extent lock. */
+       mode = LCK_PW;
+       if (opc == OST_READ)
+               /* whereas a bulk read can be protected by either a PR or PW
+                * extent lock */
+               mode |= LCK_PR;
+
+       if (!(lock->l_granted_mode & mode))
+               RETURN(0);
+
+       RETURN(1);
+}
+
+static int mdt_rw_hpreq_check(struct ptlrpc_request *req)
+{
+       struct tgt_session_info *tsi;
+       struct obd_ioobj *ioo;
+       struct niobuf_remote *rnb;
+       int opc;
+       struct ldlm_prolong_args pa = { 0 };
+
+       ENTRY;
+
+       /* Don't use tgt_ses_info() to get session info, because lock_match()
+        * can be called while request has no processing thread yet. */
+       tsi = lu_context_key_get(&req->rq_session, &tgt_session_key);
+
+       /*
+        * Use LASSERT below because malformed RPCs should have
+        * been filtered out in tgt_hpreq_handler().
+        */
+       opc = lustre_msg_get_opc(req->rq_reqmsg);
+       LASSERT(opc == OST_READ || opc == OST_WRITE);
+
+       ioo = req_capsule_client_get(&req->rq_pill, &RMF_OBD_IOOBJ);
+       LASSERT(ioo != NULL);
+
+       rnb = req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE);
+       LASSERT(rnb != NULL);
+       LASSERT(!(rnb->rnb_flags & OBD_BRW_SRVLOCK));
+
+       pa.lpa_mode = LCK_PW;
+       if (opc == OST_READ)
+               pa.lpa_mode |= LCK_PR;
+
+       DEBUG_REQ(D_RPCTRACE, req, "%s %s: refresh rw locks: "DFID"\n",
+                 tgt_name(tsi->tsi_tgt), current->comm, PFID(&tsi->tsi_fid));
+
+       mdt_prolong_dom_lock(tsi, &pa);
+
+       if (pa.lpa_blocks_cnt > 0) {
+               CDEBUG(D_DLMTRACE,
+                      "%s: refreshed %u locks timeout for req %p.\n",
+                      tgt_name(tsi->tsi_tgt), pa.lpa_blocks_cnt, req);
+               RETURN(1);
+       }
+
+       RETURN(pa.lpa_locks_cnt > 0 ? 0 : -ESTALE);
+}
+
+static void mdt_rw_hpreq_fini(struct ptlrpc_request *req)
+{
+       mdt_rw_hpreq_check(req);
+}
+
+static struct ptlrpc_hpreq_ops mdt_hpreq_rw = {
+       .hpreq_lock_match = mdt_rw_hpreq_lock_match,
+       .hpreq_check = mdt_rw_hpreq_check,
+       .hpreq_fini = mdt_rw_hpreq_fini
+};
+
+/**
+ * Assign high priority operations to an IO request.
+ *
+ * Check if the incoming request is a candidate for
+ * high-priority processing. If it is, assign it a high
+ * priority operations table.
+ *
+ * \param[in] tsi      target session environment for this request
+ */
+void mdt_hp_brw(struct tgt_session_info *tsi)
+{
+       struct niobuf_remote    *rnb;
+       struct obd_ioobj        *ioo;
+
+       ENTRY;
+
+       ioo = req_capsule_client_get(tsi->tsi_pill, &RMF_OBD_IOOBJ);
+       LASSERT(ioo != NULL); /* must exist after request preprocessing */
+       if (ioo->ioo_bufcnt > 0) {
+               rnb = req_capsule_client_get(tsi->tsi_pill, &RMF_NIOBUF_REMOTE);
+               LASSERT(rnb != NULL); /* must exist after preprocessing */
+
+               /* no high priority if server lock is needed */
+               if (rnb->rnb_flags & OBD_BRW_SRVLOCK ||
+                   (lustre_msg_get_flags(tgt_ses_req(tsi)->rq_reqmsg) &
+                    MSG_REPLAY))
+                       return;
+       }
+       tgt_ses_req(tsi)->rq_ops = &mdt_hpreq_rw;
+}
+
+static int mdt_punch_hpreq_lock_match(struct ptlrpc_request *req,
+                                     struct ldlm_lock *lock)
+{
+       struct tgt_session_info *tsi;
+       struct obdo *oa;
+
+       ENTRY;
+
+       /* Don't use tgt_ses_info() to get session info, because lock_match()
+        * can be called while request has no processing thread yet. */
+       tsi = lu_context_key_get(&req->rq_session, &tgt_session_key);
+
+       /*
+        * Use LASSERT below because malformed RPCs should have
+        * been filtered out in tgt_hpreq_handler().
+        */
+       LASSERT(tsi->tsi_ost_body != NULL);
+       if (tsi->tsi_ost_body->oa.o_valid & OBD_MD_FLHANDLE &&
+           tsi->tsi_ost_body->oa.o_handle.cookie == lock->l_handle.h_cookie)
+               RETURN(1);
+
+       oa = &tsi->tsi_ost_body->oa;
+
+       LASSERT(lock->l_resource != NULL);
+       if (!fid_res_name_eq(&oa->o_oi.oi_fid, &lock->l_resource->lr_name))
+               RETURN(0);
+
+       if (!(lock->l_granted_mode & LCK_PW))
+               RETURN(0);
+
+       RETURN(1);
+}
+
+/**
+ * Implementation of ptlrpc_hpreq_ops::hpreq_lock_check for OST_PUNCH request.
+ *
+ * High-priority queue request check for whether the given punch request
+ * (\a req) is blocking an LDLM lock cancel. Also checks whether the request is
+ * covered by an LDLM lock.
+ *
+
+ *
+ * \param[in] req      the incoming request
+ *
+ * \retval             1 if \a req is blocking an LDLM lock cancel
+ * \retval             0 if it is not
+ * \retval             -ESTALE if lock is not found
+ */
+static int mdt_punch_hpreq_check(struct ptlrpc_request *req)
+{
+       struct tgt_session_info *tsi;
+       struct obdo *oa;
+       struct ldlm_prolong_args pa = { 0 };
+
+       ENTRY;
+
+       /* Don't use tgt_ses_info() to get session info, because lock_match()
+        * can be called while request has no processing thread yet. */
+       tsi = lu_context_key_get(&req->rq_session, &tgt_session_key);
+       LASSERT(tsi != NULL);
+       oa = &tsi->tsi_ost_body->oa;
+
+       LASSERT(!(oa->o_valid & OBD_MD_FLFLAGS &&
+                 oa->o_flags & OBD_FL_SRVLOCK));
+
+       pa.lpa_mode = LCK_PW;
+
+       CDEBUG(D_DLMTRACE, "%s: refresh DOM lock for "DFID"\n",
+              tgt_name(tsi->tsi_tgt), PFID(&tsi->tsi_fid));
+
+       mdt_prolong_dom_lock(tsi, &pa);
+
+
+       if (pa.lpa_blocks_cnt > 0) {
+               CDEBUG(D_DLMTRACE,
+                      "%s: refreshed %u locks timeout for req %p.\n",
+                      tgt_name(tsi->tsi_tgt), pa.lpa_blocks_cnt, req);
+               RETURN(1);
+       }
+
+       RETURN(pa.lpa_locks_cnt > 0 ? 0 : -ESTALE);
+}
+
+/**
+ * Implementation of ptlrpc_hpreq_ops::hpreq_lock_fini for OST_PUNCH request.
+ *
+ * Called after the request has been handled. It refreshes lock timeout again
+ * so that client has more time to send lock cancel RPC.
+ *
+ * \param[in] req      request which is being processed.
+ */
+static void mdt_punch_hpreq_fini(struct ptlrpc_request *req)
+{
+       mdt_punch_hpreq_check(req);
+}
+
+static struct ptlrpc_hpreq_ops mdt_hpreq_punch = {
+       .hpreq_lock_match = mdt_punch_hpreq_lock_match,
+       .hpreq_check = mdt_punch_hpreq_check,
+       .hpreq_fini = mdt_punch_hpreq_fini
+};
+
+void mdt_hp_punch(struct tgt_session_info *tsi)
+{
+       LASSERT(tsi->tsi_ost_body != NULL); /* must exists if we are here */
+       /* no high-priority if server lock is needed */
+       if ((tsi->tsi_ost_body->oa.o_valid & OBD_MD_FLFLAGS &&
+            tsi->tsi_ost_body->oa.o_flags & OBD_FL_SRVLOCK) ||
+           tgt_conn_flags(tsi) & OBD_CONNECT_MDS ||
+           lustre_msg_get_flags(tgt_ses_req(tsi)->rq_reqmsg) & MSG_REPLAY)
+               return;
+       tgt_ses_req(tsi)->rq_ops = &mdt_hpreq_punch;
+}
+
 static int mdt_preprw_read(const struct lu_env *env, struct obd_export *exp,
                           struct mdt_device *mdt, struct mdt_object *mo,
                           struct lu_attr *la, int niocount,
@@ -292,6 +606,7 @@ retry:
                        GOTO(out_stop, rc);
        }
 
+       tgt_vbr_obj_set(env, dob);
        rc = dt_trans_start(env, dt, th);
        if (rc)
                GOTO(out_stop, rc);
@@ -417,8 +732,15 @@ int mdt_obd_commitrw(const struct lu_env *env, int cmd, struct obd_export *exp,
                                        oa->o_flags = OBD_FL_NO_GRPQUOTA;
                        }
 
+                       if (lnb[0].lnb_flags & OBD_BRW_OVER_PRJQUOTA) {
+                               if (oa->o_valid & OBD_MD_FLFLAGS)
+                                       oa->o_flags |= OBD_FL_NO_PRJQUOTA;
+                               else
+                                       oa->o_flags = OBD_FL_NO_PRJQUOTA;
+                       }
+
                        oa->o_valid |= OBD_MD_FLFLAGS | OBD_MD_FLUSRQUOTA |
-                                      OBD_MD_FLGRPQUOTA;
+                                      OBD_MD_FLGRPQUOTA | OBD_MD_FLPRJQUOTA;
                }
        } else if (cmd == OBD_BRW_READ) {
                /* If oa != NULL then mdt_preprw_read updated the inode
@@ -550,9 +872,18 @@ int mdt_punch_hdl(struct tgt_session_info *tsi)
        if (IS_ERR(mo))
                GOTO(out_unlock, rc = PTR_ERR(mo));
 
-       mdt_dom_write_lock(mo);
        if (!mdt_object_exists(mo))
                GOTO(out_put, rc = -ENOENT);
+
+       /* Shouldn't happen on dirs */
+       if (S_ISDIR(lu_object_attr(&mo->mot_obj))) {
+               rc = -EPERM;
+               CERROR("%s: Truncate on dir "DFID": rc = %d\n",
+                      exp->exp_obd->obd_name, PFID(&tsi->tsi_fid), rc);
+               GOTO(out_put, rc);
+       }
+
+       mdt_dom_write_lock(mo);
        dob = mdt_obj2dt(mo);
 
        la_from_obdo(la, oa, OBD_MD_FLMTIME | OBD_MD_FLATIME | OBD_MD_FLCTIME);
@@ -573,7 +904,7 @@ out_put:
        lu_object_put(tsi->tsi_env, &mo->mot_obj);
 out_unlock:
        if (srvlock)
-               mdt_save_lock(info, &lh, LCK_PW, rc);
+               tgt_extent_unlock(&lh, LCK_PW);
 out:
        mdt_thread_info_fini(info);
        return rc;
@@ -601,7 +932,7 @@ int mdt_do_glimpse(const struct lu_env *env, struct ldlm_namespace *ns,
 
        /* There can be only one write lock covering data, try to match it. */
        policy.l_inodebits.bits = MDS_INODELOCK_DOM;
-       mode = ldlm_lock_match(ns, LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK,
+       mode = ldlm_lock_match(ns, LDLM_FL_TEST_LOCK,
                               &res->lr_name, LDLM_IBITS, &policy,
                               LCK_PW, &lockh, 0);
 
@@ -664,8 +995,8 @@ static void mdt_lvb2body(struct ldlm_resource *res, struct mdt_body *mb)
 
        lock_res(res);
        res_lvb = res->lr_lvb_data;
-       mb->mbo_size = res_lvb->lvb_size;
-       mb->mbo_blocks = res_lvb->lvb_blocks;
+       mb->mbo_dom_size = res_lvb->lvb_size;
+       mb->mbo_dom_blocks = res_lvb->lvb_blocks;
        mb->mbo_mtime = res_lvb->lvb_mtime;
        mb->mbo_ctime = res_lvb->lvb_ctime;
        mb->mbo_atime = res_lvb->lvb_atime;
@@ -673,7 +1004,7 @@ static void mdt_lvb2body(struct ldlm_resource *res, struct mdt_body *mb)
        CDEBUG(D_DLMTRACE, "size %llu\n", res_lvb->lvb_size);
 
        mb->mbo_valid |= OBD_MD_FLATIME | OBD_MD_FLCTIME | OBD_MD_FLMTIME |
-                        OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
+                        OBD_MD_DOM_SIZE;
        unlock_res(res);
 }
 
@@ -697,23 +1028,14 @@ int mdt_dom_object_size(const struct lu_env *env, struct mdt_device *mdt,
        fid_build_reg_res_name(fid, &resid);
        res = ldlm_resource_get(mdt->mdt_namespace, NULL, &resid,
                                LDLM_IBITS, 1);
-       if (IS_ERR(res) || res->lr_lvb_data == NULL)
+       if (IS_ERR(res))
                RETURN(-ENOENT);
 
-       /* if there is no DOM bit in the lock then glimpse is needed
-        * to return valid size */
-       if (!dom_lock) {
-               rc = mdt_do_glimpse(env, mdt->mdt_namespace, res);
-               if (rc < 0)
-                       GOTO(out, rc);
-       }
-
        /* Update lvbo data if DoM lock returned or if LVB is not yet valid. */
        if (dom_lock || !mdt_dom_lvb_is_valid(res))
-               mdt_dom_lvbo_update(res, NULL, NULL, false);
+               mdt_dom_lvbo_update(env, res, NULL, NULL, false);
 
        mdt_lvb2body(res, mb);
-out:
        ldlm_resource_putref(res);
        RETURN(rc);
 }
@@ -772,10 +1094,10 @@ int mdt_glimpse_enqueue(struct mdt_thread_info *mti, struct ldlm_namespace *ns,
        if (flags & LDLM_FL_RESENT) {
                rc = LDLM_ITER_CONTINUE;
        } else {
-               __u64 tmpflags = 0;
+               __u64 tmpflags = LDLM_FL_BLOCK_NOWAIT;
                enum ldlm_error err;
 
-               rc = policy(lock, &tmpflags, 0, &err, NULL);
+               rc = policy(lock, &tmpflags, LDLM_PROCESS_RESCAN, &err, NULL);
                check_res_locked(res);
        }
        unlock_res(res);
@@ -809,7 +1131,7 @@ int mdt_glimpse_enqueue(struct mdt_thread_info *mti, struct ldlm_namespace *ns,
 fill_mbo:
        /* LVB can be without valid data in case of DOM */
        if (!mdt_dom_lvb_is_valid(res))
-               mdt_dom_lvbo_update(res, lock, NULL, false);
+               mdt_dom_lvbo_update(mti->mti_env, res, lock, NULL, false);
        mdt_lvb2body(res, mbo);
        RETURN(rc);
 }
@@ -902,7 +1224,7 @@ void mdt_dom_discard_data(struct mdt_thread_info *info,
        struct ldlm_res_id *res_id = &info->mti_res_id;
        struct lustre_handle dom_lh;
        __u64 flags = LDLM_FL_AST_DISCARD_DATA;
-       __u64 rc = 0;
+       int rc = 0;
 
        policy->l_inodebits.bits = MDS_INODELOCK_DOM;
        policy->l_inodebits.try_bits = 0;
@@ -910,13 +1232,352 @@ void mdt_dom_discard_data(struct mdt_thread_info *info,
 
        /* Tell the clients that the object is gone now and that they should
         * throw away any cached pages. */
-       rc = ldlm_cli_enqueue_local(mdt->mdt_namespace, res_id, LDLM_IBITS,
-                                   policy, LCK_PW, &flags, ldlm_blocking_ast,
-                                   ldlm_completion_ast, NULL, NULL, 0,
-                                   LVB_T_NONE, NULL, &dom_lh);
+       rc = ldlm_cli_enqueue_local(info->mti_env, mdt->mdt_namespace, res_id,
+                                   LDLM_IBITS, policy, LCK_PW, &flags,
+                                   ldlm_blocking_ast, ldlm_completion_ast,
+                                   NULL, NULL, 0, LVB_T_NONE, NULL, &dom_lh);
 
        /* We only care about the side-effects, just drop the lock. */
        if (rc == ELDLM_OK)
-               ldlm_lock_decref(&dom_lh, LCK_PW);
+               ldlm_lock_decref_and_cancel(&dom_lh, LCK_PW);
+}
+
+/* check if client has already DoM lock for given resource */
+bool mdt_dom_client_has_lock(struct mdt_thread_info *info,
+                            const struct lu_fid *fid)
+{
+       struct mdt_device *mdt = info->mti_mdt;
+       union ldlm_policy_data *policy = &info->mti_policy;
+       struct ldlm_res_id *res_id = &info->mti_res_id;
+       struct lustre_handle lockh;
+       enum ldlm_mode mode;
+       struct ldlm_lock *lock;
+       bool rc;
+
+       policy->l_inodebits.bits = MDS_INODELOCK_DOM;
+       fid_build_reg_res_name(fid, res_id);
+
+       mode = ldlm_lock_match(mdt->mdt_namespace, LDLM_FL_BLOCK_GRANTED |
+                              LDLM_FL_TEST_LOCK, res_id, LDLM_IBITS, policy,
+                              LCK_PW, &lockh, 0);
+
+       /* There is no other PW lock on this object; finished. */
+       if (mode == 0)
+               return false;
+
+       lock = ldlm_handle2lock(&lockh);
+       if (lock == 0)
+               return false;
+
+       /* check if lock from the same client */
+       rc = (lock->l_export->exp_handle.h_cookie ==
+             info->mti_exp->exp_handle.h_cookie);
+       LDLM_LOCK_PUT(lock);
+       return rc;
+}
+
+/**
+ * MDT request handler for OST_GETATTR RPC.
+ *
+ * This is data-specific request to get object and layout versions under
+ * IO lock. It is reliable only for Data-on-MDT files.
+ *
+ * \param[in] tsi target session environment for this request
+ *
+ * \retval 0 if successful
+ * \retval negative value on error
+ */
+int mdt_data_version_get(struct tgt_session_info *tsi)
+{
+       struct mdt_thread_info *mti = mdt_th_info(tsi->tsi_env);
+       struct mdt_device *mdt = mti->mti_mdt;
+       struct mdt_body *repbody;
+       struct mdt_object *mo = mti->mti_object;
+       struct lov_comp_md_v1 *comp;
+       struct lustre_handle lh = { 0 };
+       __u64 flags = 0;
+       __s64 version;
+       enum ldlm_mode lock_mode = LCK_PR;
+       bool srvlock;
+       int rc;
+
+       ENTRY;
+
+       req_capsule_set_size(tsi->tsi_pill, &RMF_MDT_MD, RCL_SERVER, 0);
+       req_capsule_set_size(tsi->tsi_pill, &RMF_ACL, RCL_SERVER, 0);
+       rc = req_capsule_server_pack(tsi->tsi_pill);
+       if (unlikely(rc != 0))
+               RETURN(err_serious(rc));
+
+       repbody = req_capsule_server_get(tsi->tsi_pill, &RMF_MDT_BODY);
+       if (repbody == NULL)
+               RETURN(-ENOMEM);
+
+       srvlock = tsi->tsi_mdt_body->mbo_valid & OBD_MD_FLFLAGS &&
+                 tsi->tsi_mdt_body->mbo_flags & OBD_FL_SRVLOCK;
+
+       if (srvlock) {
+               if (unlikely(tsi->tsi_mdt_body->mbo_flags & OBD_FL_FLUSH))
+                       lock_mode = LCK_PW;
+
+               fid_build_reg_res_name(&tsi->tsi_fid, &tsi->tsi_resid);
+               rc = tgt_mdt_data_lock(mdt->mdt_namespace, &tsi->tsi_resid,
+                                      &lh, lock_mode, &flags);
+               if (rc != 0)
+                       RETURN(rc);
+       }
+
+       if (!mdt_object_exists(mo))
+               GOTO(out, rc = -ENOENT);
+       if (mdt_object_remote(mo))
+               GOTO(out, rc = -EREMOTE);
+       if (!S_ISREG(lu_object_attr(&mo->mot_obj)))
+               GOTO(out, rc = -EBADF);
+
+       /* Get version first */
+       version = dt_version_get(tsi->tsi_env, mdt_obj2dt(mo));
+       if (version && version != -EOPNOTSUPP) {
+               repbody->mbo_valid |= OBD_MD_FLDATAVERSION;
+               /* re-use mbo_ioepoch to transfer version */
+               repbody->mbo_version = version;
+       }
+
+       /* Read layout to get its version */
+       rc = mdt_big_xattr_get(mti, mo, XATTR_NAME_LOV);
+       if (rc == -ENODATA) /* File has no layout yet */
+               GOTO(out, rc = 0);
+       else if (rc < 0)
+               GOTO(out, rc);
+
+       comp = mti->mti_buf.lb_buf;
+       if (le32_to_cpu(comp->lcm_magic) != LOV_MAGIC_COMP_V1) {
+               CDEBUG(D_INFO, DFID" has no composite layout",
+                      PFID(&tsi->tsi_fid));
+               GOTO(out, rc = -ESTALE);
+       }
+
+       CDEBUG(D_INODE, DFID": layout version: %u\n",
+              PFID(&tsi->tsi_fid), le32_to_cpu(comp->lcm_layout_gen));
+
+       repbody->mbo_valid |= OBD_MD_LAYOUT_VERSION;
+       /* re-use mbo_rdev for that */
+       repbody->mbo_layout_gen = le32_to_cpu(comp->lcm_layout_gen);
+       rc = 0;
+out:
+       if (srvlock)
+               tgt_mdt_data_unlock(&lh, lock_mode);
+
+       repbody->mbo_valid |= OBD_MD_FLFLAGS;
+       repbody->mbo_flags = OBD_FL_FLUSH;
+       RETURN(rc);
+}
+
+/* read file data to the buffer */
+int mdt_dom_read_on_open(struct mdt_thread_info *mti, struct mdt_device *mdt,
+                        struct lustre_handle *lh)
+{
+       const struct lu_env *env = mti->mti_env;
+       struct tgt_session_info *tsi = tgt_ses_info(env);
+       struct req_capsule *pill = tsi->tsi_pill;
+       const struct lu_fid *fid;
+       struct ptlrpc_request *req = tgt_ses_req(tsi);
+       struct mdt_body *mbo;
+       struct dt_device *dt = mdt->mdt_bottom;
+       struct dt_object *mo;
+       void *buf;
+       struct niobuf_remote *rnb = NULL;
+       struct niobuf_local *lnb;
+       int rc;
+       int max_reply_len;
+       loff_t offset;
+       unsigned int len, copied = 0;
+       int lnbs, nr_local, i;
+       bool dom_lock = false;
+
+       ENTRY;
+
+       if (!req_capsule_field_present(pill, &RMF_NIOBUF_INLINE, RCL_SERVER)) {
+               /* There is no reply buffers for this field, this means that
+                * client has no support for data in reply.
+                */
+               RETURN(0);
+       }
+
+       mbo = req_capsule_server_get(pill, &RMF_MDT_BODY);
+
+       if (lustre_handle_is_used(lh)) {
+               struct ldlm_lock *lock;
+
+               lock = ldlm_handle2lock(lh);
+               if (lock) {
+                       dom_lock = ldlm_has_dom(lock) && ldlm_has_layout(lock);
+                       LDLM_LOCK_PUT(lock);
+               }
+       }
+
+       /* return data along with open only along with DoM lock */
+       if (!dom_lock || !mdt->mdt_opts.mo_dom_read_open)
+               RETURN(0);
+
+       if (!(mbo->mbo_valid & OBD_MD_DOM_SIZE))
+               RETURN(0);
+
+       if (mbo->mbo_dom_size == 0)
+               RETURN(0);
+
+       /* check the maximum size available in reply */
+       max_reply_len =
+               req->rq_rqbd->rqbd_svcpt->scp_service->srv_max_reply_size;
+
+       CDEBUG(D_INFO, "File size %llu, reply sizes %d/%d/%d\n",
+              mbo->mbo_dom_size, max_reply_len, req->rq_reqmsg->lm_repsize,
+              req->rq_replen);
+       len = req->rq_reqmsg->lm_repsize - req->rq_replen;
+       max_reply_len -= req->rq_replen;
+
+       /* NB: at this moment we have the following sizes:
+        * - req->rq_replen: used data in reply
+        * - req->rq_reqmsg->lm_repsize: total allocated reply buffer at client
+        * - max_reply_len: maximum reply size allowed by protocol
+        *
+        * Ideal case when file size fits in allocated reply buffer,
+        * that mean we can return whole data in reply. We can also fit more
+        * data up to max_reply_size in total reply size, but this will cause
+        * re-allocation on client and resend with larger buffer. This is still
+        * faster than separate READ IO.
+        * Third case if file is too big to fit even in maximum size, in that
+        * case we return just tail to optimize possible append.
+        *
+        * At the moment the following strategy is used:
+        * 1) try to fit into the buffer we have
+        * 2) respond with bigger buffer so client will re-allocate it and
+        *    resend (up to srv_max_reply_size value).
+        * 3) return just file tail otherwise.
+        */
+       if (mbo->mbo_dom_size <= len) {
+               /* can fit whole data */
+               len = mbo->mbo_dom_size;
+               offset = 0;
+       } else if (mbo->mbo_dom_size <= max_reply_len) {
+               /* It is worth to make this tunable ON/OFF because this will
+                * cause buffer re-allocation and resend
+                */
+               len = mbo->mbo_dom_size;
+               offset = 0;
+       } else {
+               int tail, pgbits;
+
+               /* File tail offset must be aligned with larger page size
+                * between client and server, so the maximum page size is
+                * used here to align offset.
+                *
+                * NB: DOM feature was introduced when server supports pagebits
+                * already, so it should be always non-zero value. Report error
+                * if it is not for some reason.
+                */
+               if (!req->rq_export->exp_target_data.ted_pagebits) {
+                       CERROR("%s: client page bits are not saved on server\n",
+                              mdt_obd_name(mdt));
+                       RETURN(0);
+               }
+               pgbits = max_t(int, PAGE_SHIFT,
+                              req->rq_export->exp_target_data.ted_pagebits);
+               tail = mbo->mbo_dom_size % (1 << pgbits);
+
+               /* no partial tail or tail can't fit in reply */
+               if (tail == 0 || len < tail)
+                       RETURN(0);
+
+               len = tail;
+               offset = mbo->mbo_dom_size - len;
+       }
+       LASSERT((offset % PAGE_SIZE) == 0);
+       rc = req_capsule_server_grow(pill, &RMF_NIOBUF_INLINE,
+                                    sizeof(*rnb) + len);
+       if (rc != 0) {
+               /* failed to grow data buffer, just exit */
+               GOTO(out, rc = -E2BIG);
+       }
+
+       /* re-take MDT_BODY and NIOBUF_INLINE buffers after the buffer grow */
+       mbo = req_capsule_server_get(pill, &RMF_MDT_BODY);
+       fid = &mbo->mbo_fid1;
+       if (!fid_is_sane(fid))
+               GOTO(out, rc = -EINVAL);
+
+       rnb = req_capsule_server_get(tsi->tsi_pill, &RMF_NIOBUF_INLINE);
+       if (rnb == NULL)
+               GOTO(out, rc = -EPROTO);
+
+       buf = (char *)rnb + sizeof(*rnb);
+       rnb->rnb_len = len;
+       rnb->rnb_offset = offset;
+
+       mo = dt_locate(env, dt, fid);
+       if (IS_ERR(mo))
+               GOTO(out_rnb, rc = PTR_ERR(mo));
+       LASSERT(mo != NULL);
+
+       dt_read_lock(env, mo, 0);
+       if (!dt_object_exists(mo))
+               GOTO(unlock, rc = -ENOENT);
+
+       /* parse remote buffers to local buffers and prepare the latter */
+       lnbs = (len >> PAGE_SHIFT) + 1;
+       OBD_ALLOC(lnb, sizeof(*lnb) * lnbs);
+       if (lnb == NULL)
+               GOTO(unlock, rc = -ENOMEM);
+
+       rc = dt_bufs_get(env, mo, rnb, lnb, 0);
+       if (unlikely(rc < 0))
+               GOTO(free, rc);
+       LASSERT(rc <= lnbs);
+       nr_local = rc;
+       rc = dt_read_prep(env, mo, lnb, nr_local);
+       if (unlikely(rc))
+               GOTO(buf_put, rc);
+       /* copy data to the buffer finally */
+       for (i = 0; i < nr_local; i++) {
+               char *p = kmap(lnb[i].lnb_page);
+               long off;
+
+               LASSERT(lnb[i].lnb_page_offset == 0);
+               off = lnb[i].lnb_len & ~PAGE_MASK;
+               if (off > 0)
+                       memset(p + off, 0, PAGE_SIZE - off);
+
+               memcpy(buf + (i << PAGE_SHIFT), p, lnb[i].lnb_len);
+               kunmap(lnb[i].lnb_page);
+               copied += lnb[i].lnb_len;
+               LASSERT(rc <= len);
+       }
+       CDEBUG(D_INFO, "Read %i (wanted %u) bytes from %llu\n", copied,
+              len, offset);
+       if (copied < len) {
+               CWARN("%s: read %i bytes for "DFID
+                     " but wanted %u, is size wrong?\n",
+                     tsi->tsi_exp->exp_obd->obd_name, copied,
+                     PFID(&tsi->tsi_fid), len);
+               /* Ignore partially copied data */
+               copied = 0;
+       }
+       EXIT;
+buf_put:
+       dt_bufs_put(env, mo, lnb, nr_local);
+free:
+       OBD_FREE(lnb, sizeof(*lnb) * lnbs);
+unlock:
+       dt_read_unlock(env, mo);
+       lu_object_put(env, &mo->do_lu);
+out_rnb:
+       rnb->rnb_len = copied;
+out:
+       /* Don't fail OPEN request if read-on-open is failed, but drop
+        * a message in log about the error.
+        */
+       if (rc)
+               CDEBUG(D_INFO, "Read-on-open is failed, rc = %d", rc);
+
+       RETURN(0);
 }