Whamcloud - gitweb
LU-9683 ptlrpc: fix argument misorder
[fs/lustre-release.git] / lustre / ptlrpc / client.c
index 16a72d3..4409de6 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -27,7 +23,7 @@
  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  *
- * Copyright (c) 2011, 2015, Intel Corporation.
+ * Copyright (c) 2011, 2016, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
@@ -83,31 +79,33 @@ EXPORT_SYMBOL(ptlrpc_init_client);
 /**
  * Return PortalRPC connection for remore uud \a uuid
  */
-struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid)
+struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid,
+                                                   lnet_nid_t nid4refnet)
 {
-        struct ptlrpc_connection *c;
-        lnet_nid_t                self;
-        lnet_process_id_t         peer;
-        int                       err;
+       struct ptlrpc_connection *c;
+       lnet_nid_t                self;
+       struct lnet_process_id peer;
+       int                       err;
 
        /* ptlrpc_uuid_to_peer() initializes its 2nd parameter
         * before accessing its values. */
        /* coverity[uninit_use_in_call] */
-        err = ptlrpc_uuid_to_peer(uuid, &peer, &self);
-        if (err != 0) {
-                CNETERR("cannot find peer %s!\n", uuid->uuid);
-                return NULL;
-        }
+       peer.nid = nid4refnet;
+       err = ptlrpc_uuid_to_peer(uuid, &peer, &self);
+       if (err != 0) {
+               CNETERR("cannot find peer %s!\n", uuid->uuid);
+               return NULL;
+       }
 
-        c = ptlrpc_connection_get(peer, self, uuid);
-        if (c) {
-                memcpy(c->c_remote_uuid.uuid,
-                       uuid->uuid, sizeof(c->c_remote_uuid.uuid));
-        }
+       c = ptlrpc_connection_get(peer, self, uuid);
+       if (c) {
+               memcpy(c->c_remote_uuid.uuid,
+                      uuid->uuid, sizeof(c->c_remote_uuid.uuid));
+       }
 
-        CDEBUG(D_INFO, "%s -> %p\n", uuid->uuid, c);
+       CDEBUG(D_INFO, "%s -> %p\n", uuid->uuid, c);
 
-        return c;
+       return c;
 }
 
 /**
@@ -128,19 +126,21 @@ struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned nfrags, unsigned max_brw,
                (ptlrpc_is_bulk_desc_kvec(type) &&
                 ops->add_iov_frag != NULL));
 
+       OBD_ALLOC_PTR(desc);
+       if (desc == NULL)
+               return NULL;
        if (type & PTLRPC_BULK_BUF_KIOV) {
-               OBD_ALLOC(desc,
-                         offsetof(struct ptlrpc_bulk_desc,
-                                  bd_u.bd_kiov.bd_vec[nfrags]));
+               OBD_ALLOC_LARGE(GET_KIOV(desc),
+                               nfrags * sizeof(*GET_KIOV(desc)));
+               if (GET_KIOV(desc) == NULL)
+                       goto out;
        } else {
-               OBD_ALLOC(desc,
-                         offsetof(struct ptlrpc_bulk_desc,
-                                  bd_u.bd_kvec.bd_kvec[nfrags]));
+               OBD_ALLOC_LARGE(GET_KVEC(desc),
+                               nfrags * sizeof(*GET_KVEC(desc)));
+               if (GET_KVEC(desc) == NULL)
+                       goto out;
        }
 
-       if (!desc)
-               return NULL;
-
        spin_lock_init(&desc->bd_lock);
        init_waitqueue_head(&desc->bd_waitq);
        desc->bd_max_iov = nfrags;
@@ -154,9 +154,12 @@ struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned nfrags, unsigned max_brw,
        /* PTLRPC_BULK_OPS_COUNT is the compile-time transfer limit for this
         * node. Negotiated ocd_brw_size will always be <= this number. */
        for (i = 0; i < PTLRPC_BULK_OPS_COUNT; i++)
-               LNetInvalidateHandle(&desc->bd_mds[i]);
+               LNetInvalidateMDHandle(&desc->bd_mds[i]);
 
        return desc;
+out:
+       OBD_FREE_PTR(desc);
+       return NULL;
 }
 
 /**
@@ -207,7 +210,7 @@ void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
        LASSERT(page != NULL);
        LASSERT(pageoffset >= 0);
        LASSERT(len > 0);
-       LASSERT(pageoffset + len <= PAGE_CACHE_SIZE);
+       LASSERT(pageoffset + len <= PAGE_SIZE);
        LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
 
        kiov = &BD_GET_KIOV(desc, desc->bd_iov_count);
@@ -215,7 +218,7 @@ void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
        desc->bd_nob += len;
 
        if (pin)
-               page_cache_get(page);
+               get_page(page);
 
        kiov->kiov_page = page;
        kiov->kiov_offset = pageoffset;
@@ -271,13 +274,12 @@ void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
                desc->bd_frag_ops->release_frags(desc);
 
        if (ptlrpc_is_bulk_desc_kiov(desc->bd_type))
-               OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc,
-                                       bd_u.bd_kiov.bd_vec[desc->bd_max_iov]));
+               OBD_FREE_LARGE(GET_KIOV(desc),
+                       desc->bd_max_iov * sizeof(*GET_KIOV(desc)));
        else
-               OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc,
-                                       bd_u.bd_kvec.bd_kvec[desc->
-                                               bd_max_iov]));
-
+               OBD_FREE_LARGE(GET_KVEC(desc),
+                       desc->bd_max_iov * sizeof(*GET_KVEC(desc)));
+       OBD_FREE_PTR(desc);
        EXIT;
 }
 EXPORT_SYMBOL(ptlrpc_free_bulk);
@@ -355,7 +357,7 @@ void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req,
 {
         unsigned int nl, oldnl;
         struct imp_at *at;
-        time_t now = cfs_time_current_sec();
+       time64_t now = ktime_get_real_seconds();
 
         LASSERT(req->rq_import);
 
@@ -369,9 +371,8 @@ void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req,
                 */
                CDEBUG((lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) ?
                       D_ADAPTTO : D_WARNING,
-                      "Reported service time %u > total measured time "
-                      CFS_DURATION_T"\n", service_time,
-                      cfs_time_sub(now, req->rq_sent));
+                      "Reported service time %u > total measured time %lld\n",
+                      service_time, now - req->rq_sent);
                return;
        }
 
@@ -417,11 +418,11 @@ static int unpack_reply(struct ptlrpc_request *req)
 static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
 __must_hold(&req->rq_lock)
 {
-        struct ptlrpc_request *early_req;
-        time_t                 olddl;
-        int                    rc;
-        ENTRY;
+       struct ptlrpc_request *early_req;
+       time64_t olddl;
+       int rc;
 
+       ENTRY;
         req->rq_early = 0;
        spin_unlock(&req->rq_lock);
 
@@ -462,10 +463,10 @@ __must_hold(&req->rq_lock)
                           ptlrpc_at_get_net_latency(req);
 
        DEBUG_REQ(D_ADAPTTO, req,
-                 "Early reply #%d, new deadline in "CFS_DURATION_T"s "
-                 "("CFS_DURATION_T"s)", req->rq_early_count,
-                 cfs_time_sub(req->rq_deadline, cfs_time_current_sec()),
-                 cfs_time_sub(req->rq_deadline, olddl));
+                 "Early reply #%d, new deadline in %llds (%llds)",
+                 req->rq_early_count,
+                 req->rq_deadline - ktime_get_real_seconds(),
+                 req->rq_deadline - olddl);
 
        RETURN(rc);
 }
@@ -553,10 +554,10 @@ int ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq)
                if (!msg) {
                        ptlrpc_request_cache_free(req);
                        return i;
-                }
-                req->rq_reqbuf = msg;
-                req->rq_reqbuf_len = size;
-                req->rq_pool = pool;
+               }
+               req->rq_reqbuf = msg;
+               req->rq_reqbuf_len = size;
+               req->rq_pool = pool;
                spin_lock(&pool->prp_lock);
                list_add_tail(&req->rq_list, &pool->prp_req_list);
        }
@@ -727,6 +728,8 @@ int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
        request->rq_reply_cbid.cbid_arg = request;
 
        request->rq_reply_deadline = 0;
+       request->rq_bulk_deadline = 0;
+       request->rq_req_deadline = 0;
        request->rq_phase = RQ_PHASE_NEW;
        request->rq_next_phase = RQ_PHASE_UNDEFINED;
 
@@ -738,6 +741,36 @@ int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
        lustre_msg_set_opc(request->rq_reqmsg, opcode);
        ptlrpc_assign_next_xid(request);
 
+       /* Let's setup deadline for req/reply/bulk unlink for opcode. */
+       if (cfs_fail_val == opcode) {
+               time64_t *fail_t = NULL, *fail2_t = NULL;
+
+               if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK))
+                       fail_t = &request->rq_bulk_deadline;
+               else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK))
+                       fail_t = &request->rq_reply_deadline;
+               else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REQ_UNLINK))
+                       fail_t = &request->rq_req_deadline;
+               else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BOTH_UNLINK)) {
+                       fail_t = &request->rq_reply_deadline;
+                       fail2_t = &request->rq_bulk_deadline;
+               }
+
+               if (fail_t) {
+                       *fail_t = ktime_get_real_seconds() + LONG_UNLINK;
+
+                       if (fail2_t)
+                               *fail2_t = ktime_get_real_seconds() +
+                                          LONG_UNLINK;
+
+                       /*
+                        * The RPC is infected, let the test to change the
+                        * fail_loc
+                        */
+                       msleep(4 * MSEC_PER_SEC);
+               }
+       }
+
        RETURN(0);
 
 out_ctx:
@@ -1048,6 +1081,9 @@ void ptlrpc_set_add_req(struct ptlrpc_request_set *set,
 {
        LASSERT(list_empty(&req->rq_set_chain));
 
+       if (req->rq_allow_intr)
+               set->set_allow_intr = 1;
+
        /* The set takes over the caller's request reference */
        list_add_tail(&req->rq_set_chain, &set->set_requests);
        req->rq_set = set;
@@ -1152,7 +1188,7 @@ static int ptlrpc_import_delay_req(struct obd_import *imp,
                if (atomic_read(&imp->imp_inval_count) != 0) {
                         DEBUG_REQ(D_ERROR, req, "invalidate in flight");
                         *status = -EIO;
-                } else if (imp->imp_dlm_fake || req->rq_no_delay) {
+               } else if (req->rq_no_delay) {
                         *status = -EWOULDBLOCK;
                } else if (req->rq_allow_replay &&
                          (imp->imp_state == LUSTRE_IMP_REPLAY ||
@@ -1220,7 +1256,9 @@ static int ptlrpc_check_status(struct ptlrpc_request *req)
                lnet_nid_t nid = imp->imp_connection->c_peer.nid;
                __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
 
-               if (ptlrpc_console_allow(req))
+               /* -EAGAIN is normal when using POSIX flocks */
+               if (ptlrpc_console_allow(req) &&
+                   !(opc == LDLM_ENQUEUE && err == -EAGAIN))
                        LCONSOLE_ERROR_MSG(0x11, "%s: operation %s to node %s "
                                           "failed: rc = %d\n",
                                           imp->imp_obd->obd_name,
@@ -1256,7 +1294,7 @@ static void ptlrpc_save_versions(struct ptlrpc_request *req)
 
         LASSERT(versions);
         lustre_msg_set_versions(reqmsg, versions);
-        CDEBUG(D_INFO, "Client save versions ["LPX64"/"LPX64"]\n",
+       CDEBUG(D_INFO, "Client save versions [%#llx/%#llx]\n",
                versions[0], versions[1]);
 
         EXIT;
@@ -1272,7 +1310,7 @@ __u64 ptlrpc_known_replied_xid(struct obd_import *imp)
 
        req = list_entry(imp->imp_unreplied_list.next, struct ptlrpc_request,
                         rq_unreplied_list);
-       LASSERTF(req->rq_xid >= 1, "XID:"LPU64"\n", req->rq_xid);
+       LASSERTF(req->rq_xid >= 1, "XID:%llu\n", req->rq_xid);
 
        if (imp->imp_known_replied_xid < req->rq_xid - 1)
                imp->imp_known_replied_xid = req->rq_xid - 1;
@@ -1289,14 +1327,14 @@ __u64 ptlrpc_known_replied_xid(struct obd_import *imp)
  */
 static int after_reply(struct ptlrpc_request *req)
 {
-        struct obd_import *imp = req->rq_import;
-        struct obd_device *obd = req->rq_import->imp_obd;
-        int rc;
-        struct timeval work_start;
-       __u64 committed;
-        long timediff;
-        ENTRY;
+       struct obd_import *imp = req->rq_import;
+       struct obd_device *obd = req->rq_import->imp_obd;
+       ktime_t work_start;
+       u64 committed;
+       s64 timediff;
+       int rc;
 
+       ENTRY;
         LASSERT(obd != NULL);
         /* repbuf must be unlinked */
        LASSERT(!req->rq_receiving_reply && req->rq_reply_unlinked);
@@ -1322,8 +1360,8 @@ static int after_reply(struct ptlrpc_request *req)
                 RETURN(0);
         }
 
-       do_gettimeofday(&work_start);
-       timediff = cfs_timeval_sub(&work_start, &req->rq_sent_tv, NULL);
+       work_start = ktime_get_real();
+       timediff = ktime_us_delta(work_start, req->rq_sent_ns);
 
         /*
          * NB Until this point, the whole of the incoming message,
@@ -1348,7 +1386,7 @@ static int after_reply(struct ptlrpc_request *req)
        /* retry indefinitely on EINPROGRESS */
        if (lustre_msg_get_status(req->rq_repmsg) == -EINPROGRESS &&
            ptlrpc_no_resend(req) == 0 && !req->rq_no_retry_einprogress) {
-               time_t  now = cfs_time_current_sec();
+               time64_t now = ktime_get_real_seconds();
 
                DEBUG_REQ(D_RPCTRACE, req, "Resending request on EINPROGRESS");
                spin_lock(&req->rq_lock);
@@ -1499,7 +1537,7 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req)
                    pool_is_at_full_capacity())
                        RETURN(-ENOMEM);
 
-        if (req->rq_sent && (req->rq_sent > cfs_time_current_sec()) &&
+       if (req->rq_sent && (req->rq_sent > ktime_get_real_seconds()) &&
             (!req->rq_generation_set ||
              req->rq_import_generation == imp->imp_generation))
                 RETURN (0);
@@ -1519,8 +1557,7 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req)
                req->rq_waiting = 1;
                spin_unlock(&req->rq_lock);
 
-               DEBUG_REQ(D_HA, req, "req from PID %d waiting for recovery: "
-                         "(%s != %s)", lustre_msg_get_status(req->rq_reqmsg),
+               DEBUG_REQ(D_HA, req, "req waiting for recovery: (%s != %s)",
                          ptlrpc_import_state_name(req->rq_send_state),
                          ptlrpc_import_state_name(imp->imp_state));
                LASSERT(list_empty(&req->rq_list));
@@ -1575,7 +1612,7 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req)
         }
 
        CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:nid:opc"
-              " %s:%s:%d:"LPU64":%s:%d\n", current_comm(),
+              " %s:%s:%d:%llu:%s:%d\n", current_comm(),
               imp->imp_obd->obd_uuid.uuid,
               lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
               libcfs_nid2str(imp->imp_connection->c_peer.nid),
@@ -1651,8 +1688,14 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
                                   rq_set_chain);
                struct obd_import *imp = req->rq_import;
                int unregistered = 0;
+               int async = 1;
                int rc = 0;
 
+               if (req->rq_phase == RQ_PHASE_COMPLETE) {
+                       list_move_tail(&req->rq_set_chain, &comp_reqs);
+                       continue;
+               }
+
                /* This schedule point is mainly for the ptlrpcd caller of this
                 * function.  Most ptlrpc sets are not long-lived and unbounded
                 * in length, but at the least the set used by the ptlrpcd is.
@@ -1669,46 +1712,62 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
                        req->rq_status = -EINTR;
                        ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
 
+                       /* Since it is interpreted and we have to wait for
+                        * the reply to be unlinked, then use sync mode. */
+                       async = 0;
+
                        GOTO(interpret, req->rq_status);
                }
 
-                if (req->rq_phase == RQ_PHASE_NEW &&
-                    ptlrpc_send_new_req(req)) {
-                        force_timer_recalc = 1;
-                }
+               if (req->rq_phase == RQ_PHASE_NEW && ptlrpc_send_new_req(req))
+                       force_timer_recalc = 1;
 
-                /* delayed send - skip */
-                if (req->rq_phase == RQ_PHASE_NEW && req->rq_sent)
+               /* delayed send - skip */
+               if (req->rq_phase == RQ_PHASE_NEW && req->rq_sent)
                        continue;
 
                /* delayed resend - skip */
                if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend &&
-                   req->rq_sent > cfs_time_current_sec())
+                   req->rq_sent > ktime_get_real_seconds())
                        continue;
 
-                if (!(req->rq_phase == RQ_PHASE_RPC ||
-                      req->rq_phase == RQ_PHASE_BULK ||
-                      req->rq_phase == RQ_PHASE_INTERPRET ||
-                      req->rq_phase == RQ_PHASE_UNREGISTERING ||
-                      req->rq_phase == RQ_PHASE_COMPLETE)) {
-                        DEBUG_REQ(D_ERROR, req, "bad phase %x", req->rq_phase);
-                        LBUG();
-                }
+               if (!(req->rq_phase == RQ_PHASE_RPC ||
+                     req->rq_phase == RQ_PHASE_BULK ||
+                     req->rq_phase == RQ_PHASE_INTERPRET ||
+                     req->rq_phase == RQ_PHASE_UNREG_RPC ||
+                     req->rq_phase == RQ_PHASE_UNREG_BULK)) {
+                       DEBUG_REQ(D_ERROR, req, "bad phase %x", req->rq_phase);
+                       LBUG();
+               }
 
-                if (req->rq_phase == RQ_PHASE_UNREGISTERING) {
-                        LASSERT(req->rq_next_phase != req->rq_phase);
-                        LASSERT(req->rq_next_phase != RQ_PHASE_UNDEFINED);
+               if (req->rq_phase == RQ_PHASE_UNREG_RPC ||
+                   req->rq_phase == RQ_PHASE_UNREG_BULK) {
+                       LASSERT(req->rq_next_phase != req->rq_phase);
+                       LASSERT(req->rq_next_phase != RQ_PHASE_UNDEFINED);
+
+                       if (req->rq_req_deadline &&
+                           !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REQ_UNLINK))
+                               req->rq_req_deadline = 0;
+                       if (req->rq_reply_deadline &&
+                           !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK))
+                               req->rq_reply_deadline = 0;
+                       if (req->rq_bulk_deadline &&
+                           !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK))
+                               req->rq_bulk_deadline = 0;
 
-                        /*
-                         * Skip processing until reply is unlinked. We
-                         * can't return to pool before that and we can't
-                         * call interpret before that. We need to make
-                         * sure that all rdma transfers finished and will
-                         * not corrupt any data.
-                         */
-                        if (ptlrpc_client_recv_or_unlink(req) ||
-                            ptlrpc_client_bulk_active(req))
-                                continue;
+                       /*
+                        * Skip processing until reply is unlinked. We
+                        * can't return to pool before that and we can't
+                        * call interpret before that. We need to make
+                        * sure that all rdma transfers finished and will
+                        * not corrupt any data.
+                        */
+                       if (req->rq_phase == RQ_PHASE_UNREG_RPC &&
+                           ptlrpc_client_recv_or_unlink(req))
+                               continue;
+                       if (req->rq_phase == RQ_PHASE_UNREG_BULK &&
+                           ptlrpc_client_bulk_active(req))
+                               continue;
 
                         /*
                          * Turn fail_loc off to prevent it from looping
@@ -1730,11 +1789,6 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
                         ptlrpc_rqphase_move(req, req->rq_next_phase);
                 }
 
-                if (req->rq_phase == RQ_PHASE_COMPLETE) {
-                       list_move_tail(&req->rq_set_chain, &comp_reqs);
-                        continue;
-               }
-
                 if (req->rq_phase == RQ_PHASE_INTERPRET)
                         GOTO(interpret, req->rq_status);
 
@@ -1951,27 +2005,27 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
                        req->rq_status = -EIO;
                }
 
-                ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
+               ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
 
-        interpret:
-                LASSERT(req->rq_phase == RQ_PHASE_INTERPRET);
+       interpret:
+               LASSERT(req->rq_phase == RQ_PHASE_INTERPRET);
 
-                /* This moves to "unregistering" phase we need to wait for
-                 * reply unlink. */
-                if (!unregistered && !ptlrpc_unregister_reply(req, 1)) {
-                        /* start async bulk unlink too */
-                        ptlrpc_unregister_bulk(req, 1);
-                        continue;
-                }
+               /* This moves to "unregistering" phase we need to wait for
+                * reply unlink. */
+               if (!unregistered && !ptlrpc_unregister_reply(req, async)) {
+                       /* start async bulk unlink too */
+                       ptlrpc_unregister_bulk(req, 1);
+                       continue;
+               }
 
-                if (!ptlrpc_unregister_bulk(req, 1))
-                        continue;
+               if (!ptlrpc_unregister_bulk(req, async))
+                       continue;
 
-                /* When calling interpret receiving already should be
-                 * finished. */
-                LASSERT(!req->rq_receiving_reply);
+               /* When calling interpret receiving already should be
+                * finished. */
+               LASSERT(!req->rq_receiving_reply);
 
-                ptlrpc_req_interpret(env, req, req->rq_status);
+               ptlrpc_req_interpret(env, req, req->rq_status);
 
                if (ptlrpcd_check_work(req)) {
                        atomic_dec(&set->set_remaining);
@@ -1979,13 +2033,15 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
                }
                ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
 
-               CDEBUG(req->rq_reqmsg != NULL ? D_RPCTRACE : 0,
-                       "Completed RPC pname:cluuid:pid:xid:nid:"
-                       "opc %s:%s:%d:"LPU64":%s:%d\n",
-                       current_comm(), imp->imp_obd->obd_uuid.uuid,
-                       lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
-                       libcfs_nid2str(imp->imp_connection->c_peer.nid),
-                       lustre_msg_get_opc(req->rq_reqmsg));
+               if (req->rq_reqmsg != NULL)
+                       CDEBUG(D_RPCTRACE,
+                              "Completed RPC pname:cluuid:pid:xid:nid:"
+                              "opc %s:%s:%d:%llu:%s:%d\n", current_comm(),
+                              imp->imp_obd->obd_uuid.uuid,
+                              lustre_msg_get_status(req->rq_reqmsg),
+                              req->rq_xid,
+                              libcfs_nid2str(imp->imp_connection->c_peer.nid),
+                              lustre_msg_get_opc(req->rq_reqmsg));
 
                spin_lock(&imp->imp_lock);
                /* Request already may be not on sending or delaying list. This
@@ -2048,17 +2104,16 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
        req->rq_timedout = 1;
        spin_unlock(&req->rq_lock);
 
-       DEBUG_REQ(D_WARNING, req, "Request sent has %s: [sent "CFS_DURATION_T
-                 "/real "CFS_DURATION_T"]",
+       DEBUG_REQ(D_WARNING, req, "Request sent has %s: [sent %lld/real %lld]",
                   req->rq_net_err ? "failed due to network error" :
                      ((req->rq_real_sent == 0 ||
-                       cfs_time_before(req->rq_real_sent, req->rq_sent) ||
-                       cfs_time_aftereq(req->rq_real_sent, req->rq_deadline)) ?
+                      req->rq_real_sent < req->rq_sent ||
+                      req->rq_real_sent >= req->rq_deadline) ?
                       "timed out for sent delay" : "timed out for slow reply"),
-                  req->rq_sent, req->rq_real_sent);
+                 (s64)req->rq_sent, (s64)req->rq_real_sent);
 
-        if (imp != NULL && obd_debug_peer_on_timeout)
-                LNetCtl(IOC_LIBCFS_DEBUG_PEER, &imp->imp_connection->c_peer);
+       if (imp != NULL && obd_debug_peer_on_timeout)
+               LNetDebugPeer(imp->imp_connection->c_peer);
 
         ptlrpc_unregister_reply(req, async_unlink);
         ptlrpc_unregister_bulk(req, async_unlink);
@@ -2111,11 +2166,11 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
  */
 int ptlrpc_expired_set(void *data)
 {
-       struct ptlrpc_request_set       *set = data;
-       struct list_head                *tmp;
-       time_t                          now = cfs_time_current_sec();
-       ENTRY;
+       struct ptlrpc_request_set *set = data;
+       struct list_head *tmp;
+       time64_t now = ktime_get_real_seconds();
 
+       ENTRY;
        LASSERT(set != NULL);
 
        /*
@@ -2180,8 +2235,11 @@ static void ptlrpc_interrupted_set(void *data)
                struct ptlrpc_request *req =
                        list_entry(tmp, struct ptlrpc_request, rq_set_chain);
 
+               if (req->rq_intr)
+                       continue;
+
                if (req->rq_phase != RQ_PHASE_RPC &&
-                   req->rq_phase != RQ_PHASE_UNREGISTERING &&
+                   req->rq_phase != RQ_PHASE_UNREG_RPC &&
                    !req->rq_allow_intr)
                        continue;
 
@@ -2194,13 +2252,13 @@ static void ptlrpc_interrupted_set(void *data)
  */
 int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
 {
-       struct list_head        *tmp;
-       time_t                   now = cfs_time_current_sec();
-       int                      timeout = 0;
-       struct ptlrpc_request   *req;
-       int                      deadline;
-       ENTRY;
+       struct list_head *tmp;
+       time64_t now = ktime_get_real_seconds();
+       int timeout = 0;
+       struct ptlrpc_request *req;
+       time64_t deadline;
 
+       ENTRY;
        list_for_each(tmp, &set->set_requests) {
                req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
 
@@ -2274,17 +2332,12 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set)
                 CDEBUG(D_RPCTRACE, "set %p going to sleep for %d seconds\n",
                        set, timeout);
 
-               if (timeout == 0 && !signal_pending(current))
-                        /*
-                         * No requests are in-flight (ether timed out
-                         * or delayed), so we can allow interrupts.
-                         * We still want to block for a limited time,
-                         * so we allow interrupts during the timeout.
-                         */
-                       lwi = LWI_TIMEOUT_INTR_ALL(cfs_time_seconds(1),
-                                                   ptlrpc_expired_set,
-                                                   ptlrpc_interrupted_set, set);
-               else if (set->set_allow_intr)
+               if ((timeout == 0 && !signal_pending(current)) ||
+                   set->set_allow_intr)
+                       /* No requests are in-flight (ether timed out
+                        * or delayed), so we can allow interrupts.
+                        * We still want to block for a limited time,
+                        * so we allow interrupts during the timeout. */
                        lwi = LWI_TIMEOUT_INTR_ALL(
                                        cfs_time_seconds(timeout ? timeout : 1),
                                        ptlrpc_expired_set,
@@ -2452,29 +2505,54 @@ void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request)
  * Drops one reference count for request \a request.
  * \a locked set indicates that caller holds import imp_lock.
  * Frees the request whe reference count reaches zero.
+ *
+ * \retval 1   the request is freed
+ * \retval 0   some others still hold references on the request
  */
 static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked)
 {
-        ENTRY;
-        if (request == NULL)
-                RETURN(1);
+       int count;
+       ENTRY;
 
-        if (request == LP_POISON ||
-            request->rq_reqmsg == LP_POISON) {
-                CERROR("dereferencing freed request (bug 575)\n");
-                LBUG();
-                RETURN(1);
-        }
+       if (!request)
+               RETURN(1);
 
-        DEBUG_REQ(D_INFO, request, "refcount now %u",
+       LASSERT(request != LP_POISON);
+       LASSERT(request->rq_reqmsg != LP_POISON);
+
+       DEBUG_REQ(D_INFO, request, "refcount now %u",
                  atomic_read(&request->rq_refcount) - 1);
 
-       if (atomic_dec_and_test(&request->rq_refcount)) {
-                __ptlrpc_free_req(request, locked);
-                RETURN(1);
-        }
+       spin_lock(&request->rq_lock);
+       count = atomic_dec_return(&request->rq_refcount);
+       LASSERTF(count >= 0, "Invalid ref count %d\n", count);
 
-        RETURN(0);
+       /* For open RPC, the client does not know the EA size (LOV, ACL, and
+        * so on) before replied, then the client has to reserve very large
+        * reply buffer. Such buffer will not be released until the RPC freed.
+        * Since The open RPC is replayable, we need to keep it in the replay
+        * list until close. If there are a lot of files opened concurrently,
+        * then the client may be OOM.
+        *
+        * If fact, it is unnecessary to keep reply buffer for open replay,
+        * related EAs have already been saved via mdc_save_lovea() before
+        * coming here. So it is safe to free the reply buffer some earlier
+        * before releasing the RPC to avoid client OOM. LU-9514 */
+       if (count == 1 && request->rq_early_free_repbuf && request->rq_repbuf) {
+               spin_lock(&request->rq_early_free_lock);
+               sptlrpc_cli_free_repbuf(request);
+               request->rq_repbuf = NULL;
+               request->rq_repbuf_len = 0;
+               request->rq_repdata = NULL;
+               request->rq_reqdata_len = 0;
+               spin_unlock(&request->rq_early_free_lock);
+       }
+       spin_unlock(&request->rq_lock);
+
+       if (!count)
+               __ptlrpc_free_req(request, locked);
+
+       RETURN(!count);
 }
 
 /**
@@ -2512,12 +2590,11 @@ static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
         */
        LASSERT(!in_interrupt());
 
-       /*
-        * Let's setup deadline for reply unlink.
-        */
-        if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
-            async && request->rq_reply_deadline == 0)
-                request->rq_reply_deadline = cfs_time_current_sec()+LONG_UNLINK;
+       /* Let's setup deadline for reply unlink. */
+       if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
+           async && request->rq_reply_deadline == 0 && cfs_fail_val == 0)
+               request->rq_reply_deadline = ktime_get_real_seconds() +
+                                            LONG_UNLINK;
 
         /*
          * Nothing left to do.
@@ -2533,10 +2610,8 @@ static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
         if (!ptlrpc_client_recv_or_unlink(request))
                 RETURN(1);
 
-        /*
-         * Move to "Unregistering" phase as reply was not unlinked yet.
-         */
-        ptlrpc_rqphase_move(request, RQ_PHASE_UNREGISTERING);
+       /* Move to "Unregistering" phase as reply was not unlinked yet. */
+       ptlrpc_rqphase_move(request, RQ_PHASE_UNREG_RPC);
 
         /*
          * Do not wait for unlink to finish.
@@ -2628,11 +2703,11 @@ void ptlrpc_free_committed(struct obd_import *imp)
 
         if (imp->imp_peer_committed_transno == imp->imp_last_transno_checked &&
             imp->imp_generation == imp->imp_last_generation_checked) {
-                CDEBUG(D_INFO, "%s: skip recheck: last_committed "LPU64"\n",
+               CDEBUG(D_INFO, "%s: skip recheck: last_committed %llu\n",
                        imp->imp_obd->obd_name, imp->imp_peer_committed_transno);
                RETURN_EXIT;
         }
-        CDEBUG(D_RPCTRACE, "%s: committing for last_committed "LPU64" gen %d\n",
+       CDEBUG(D_RPCTRACE, "%s: committing for last_committed %llu gen %d\n",
                imp->imp_obd->obd_name, imp->imp_peer_committed_transno,
                imp->imp_generation);
 
@@ -2671,7 +2746,7 @@ void ptlrpc_free_committed(struct obd_import *imp)
                        continue;
                }
 
-                DEBUG_REQ(D_INFO, req, "commit (last_committed "LPU64")",
+               DEBUG_REQ(D_INFO, req, "commit (last_committed %llu)",
                           imp->imp_peer_committed_transno);
 free_req:
                ptlrpc_free_request(req);
@@ -2681,13 +2756,18 @@ free_req:
                GOTO(out, 0);
 
        list_for_each_entry_safe(req, saved, &imp->imp_committed_list,
-                                    rq_replay_list) {
+                                rq_replay_list) {
                LASSERT(req->rq_transno != 0);
-               if (req->rq_import_generation < imp->imp_generation) {
-                       DEBUG_REQ(D_RPCTRACE, req, "free stale open request");
-                       ptlrpc_free_request(req);
-               } else if (!req->rq_replay) {
-                       DEBUG_REQ(D_RPCTRACE, req, "free closed open request");
+               if (req->rq_import_generation < imp->imp_generation ||
+                   !req->rq_replay) {
+                       DEBUG_REQ(D_RPCTRACE, req, "free %s open request",
+                                 req->rq_import_generation <
+                                 imp->imp_generation ? "stale" : "closed");
+
+                       if (imp->imp_replay_cursor == &req->rq_replay_list)
+                               imp->imp_replay_cursor =
+                                       req->rq_replay_list.next;
+
                        ptlrpc_free_request(req);
                }
        }
@@ -2834,9 +2914,6 @@ int ptlrpc_queue_wait(struct ptlrpc_request *req)
                RETURN(-ENOMEM);
        }
 
-       if (req->rq_allow_intr)
-               set->set_allow_intr = 1;
-
        /* for distributed debugging */
        lustre_msg_set_status(req->rq_reqmsg, current_pid());
 
@@ -2894,7 +2971,7 @@ static int ptlrpc_replay_interpret(const struct lu_env *env,
                 LASSERTF(lustre_msg_get_transno(req->rq_reqmsg) ==
                          lustre_msg_get_transno(req->rq_repmsg) ||
                          lustre_msg_get_transno(req->rq_repmsg) == 0,
-                         LPX64"/"LPX64"\n",
+                        "%#llx/%#llx\n",
                          lustre_msg_get_transno(req->rq_reqmsg),
                          lustre_msg_get_transno(req->rq_repmsg));
         }
@@ -2910,8 +2987,8 @@ static int ptlrpc_replay_interpret(const struct lu_env *env,
         /* transaction number shouldn't be bigger than the latest replayed */
         if (req->rq_transno > lustre_msg_get_transno(req->rq_reqmsg)) {
                 DEBUG_REQ(D_ERROR, req,
-                          "Reported transno "LPU64" is bigger than the "
-                          "replayed one: "LPU64, req->rq_transno,
+                         "Reported transno %llu is bigger than the "
+                         "replayed one: %llu", req->rq_transno,
                           lustre_msg_get_transno(req->rq_reqmsg));
                 GOTO(out, rc = -EINVAL);
         }
@@ -3029,6 +3106,9 @@ int ptlrpc_replay_req(struct ptlrpc_request *req)
         DEBUG_REQ(D_HA, req, "REPLAY");
 
        atomic_inc(&req->rq_import->imp_replay_inflight);
+       spin_lock(&req->rq_lock);
+       req->rq_early_free_repbuf = 0;
+       spin_unlock(&req->rq_lock);
        ptlrpc_request_addref(req);     /* ptlrpcd needs a ref */
 
        ptlrpcd_add_req(req);
@@ -3142,7 +3222,7 @@ static spinlock_t ptlrpc_last_xid_lock;
 #define YEAR_2004 (1ULL << 30)
 void ptlrpc_init_xid(void)
 {
-       time_t now = cfs_time_current_sec();
+       time64_t now = ktime_get_real_seconds();
 
        spin_lock_init(&ptlrpc_last_xid_lock);
        if (now < YEAR_2004) {
@@ -3195,21 +3275,36 @@ void ptlrpc_set_bulk_mbits(struct ptlrpc_request *req)
 
        LASSERT(bd != NULL);
 
-       if (!req->rq_resend) {
-               /* this request has a new xid, just use it as bulk matchbits */
-               req->rq_mbits = req->rq_xid;
-
-       } else { /* needs to generate a new matchbits for resend */
-               __u64   old_mbits = req->rq_mbits;
-
-               if ((bd->bd_import->imp_connect_data.ocd_connect_flags &
-                   OBD_CONNECT_BULK_MBITS) != 0)
+       /* Generate new matchbits for all resend requests, including
+        * resend replay. */
+       if (req->rq_resend) {
+               __u64 old_mbits = req->rq_mbits;
+
+               /* First time resend on -EINPROGRESS will generate new xid,
+                * so we can actually use the rq_xid as rq_mbits in such case,
+                * however, it's bit hard to distinguish such resend with a
+                * 'resend for the -EINPROGRESS resend'. To make it simple,
+                * we opt to generate mbits for all resend cases. */
+               if (OCD_HAS_FLAG(&bd->bd_import->imp_connect_data, BULK_MBITS)){
                        req->rq_mbits = ptlrpc_next_xid();
-               else /* old version transfers rq_xid to peer as matchbits */
-                       req->rq_mbits = req->rq_xid = ptlrpc_next_xid();
-
-               CDEBUG(D_HA, "resend bulk old x"LPU64" new x"LPU64"\n",
+               } else {
+                       /* Old version transfers rq_xid to peer as
+                        * matchbits. */
+                       spin_lock(&req->rq_import->imp_lock);
+                       list_del_init(&req->rq_unreplied_list);
+                       ptlrpc_assign_next_xid_nolock(req);
+                       spin_unlock(&req->rq_import->imp_lock);
+                       req->rq_mbits = req->rq_xid;
+               }
+               CDEBUG(D_HA, "resend bulk old x%llu new x%llu\n",
                       old_mbits, req->rq_mbits);
+       } else if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)) {
+               /* Request being sent first time, use xid as matchbits. */
+               req->rq_mbits = req->rq_xid;
+       } else {
+               /* Replay request, xid and matchbits have already been
+                * correctly assigned. */
+               return;
        }
 
        /* For multi-bulk RPCs, rq_mbits is the last mbits needed for bulks so
@@ -3217,6 +3312,14 @@ void ptlrpc_set_bulk_mbits(struct ptlrpc_request *req)
         * see LU-1431 */
        req->rq_mbits += ((bd->bd_iov_count + LNET_MAX_IOV - 1) /
                          LNET_MAX_IOV) - 1;
+
+       /* Set rq_xid as rq_mbits to indicate the final bulk for the old
+        * server which does not support OBD_CONNECT_BULK_MBITS. LU-6808.
+        *
+        * It's ok to directly set the rq_xid here, since this xid bump
+        * won't affect the request position in unreplied list. */
+       if (!OCD_HAS_FLAG(&bd->bd_import->imp_connect_data, BULK_MBITS))
+               req->rq_xid = req->rq_mbits;
 }
 
 /**
@@ -3267,9 +3370,8 @@ static void ptlrpcd_add_work_req(struct ptlrpc_request *req)
 {
        /* re-initialize the req */
        req->rq_timeout         = obd_timeout;
-       req->rq_sent            = cfs_time_current_sec();
+       req->rq_sent            = ktime_get_real_seconds();
        req->rq_deadline        = req->rq_sent + req->rq_timeout;
-       req->rq_reply_deadline  = req->rq_deadline;
        req->rq_phase           = RQ_PHASE_INTERPRET;
        req->rq_next_phase      = RQ_PHASE_COMPLETE;
        req->rq_xid             = ptlrpc_next_xid();