Whamcloud - gitweb
LU-6684 lfsck: set the lfsck notify as interruptable
[fs/lustre-release.git] / lustre / ptlrpc / client.c
index 091cbcb..bb7fe52 100644 (file)
@@ -27,7 +27,7 @@
  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  *
- * Copyright (c) 2011, 2014, Intel Corporation.
+ * Copyright (c) 2011, 2015, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
 
 #include "ptlrpc_internal.h"
 
+const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_pin_ops = {
+       .add_kiov_frag  = ptlrpc_prep_bulk_page_pin,
+       .release_frags  = ptlrpc_release_bulk_page_pin,
+};
+EXPORT_SYMBOL(ptlrpc_bulk_kiov_pin_ops);
+
+const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_nopin_ops = {
+       .add_kiov_frag  = ptlrpc_prep_bulk_page_nopin,
+       .release_frags  = ptlrpc_release_bulk_noop,
+};
+EXPORT_SYMBOL(ptlrpc_bulk_kiov_nopin_ops);
+
+const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kvec_ops = {
+       .add_iov_frag = ptlrpc_prep_bulk_frag,
+};
+EXPORT_SYMBOL(ptlrpc_bulk_kvec_ops);
+
 static int ptlrpc_send_new_req(struct ptlrpc_request *req);
 static int ptlrpcd_check_work(struct ptlrpc_request *req);
 static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async);
@@ -97,23 +114,41 @@ struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid)
  * Allocate and initialize new bulk descriptor on the sender.
  * Returns pointer to the descriptor or NULL on error.
  */
-struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw,
-                                        unsigned type, unsigned portal)
+struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned nfrags, unsigned max_brw,
+                                        enum ptlrpc_bulk_op_type type,
+                                        unsigned portal,
+                                        const struct ptlrpc_bulk_frag_ops *ops)
 {
        struct ptlrpc_bulk_desc *desc;
        int i;
 
-       OBD_ALLOC(desc, offsetof(struct ptlrpc_bulk_desc, bd_iov[npages]));
+       /* ensure that only one of KIOV or IOVEC is set but not both */
+       LASSERT((ptlrpc_is_bulk_desc_kiov(type) &&
+                ops->add_kiov_frag != NULL) ||
+               (ptlrpc_is_bulk_desc_kvec(type) &&
+                ops->add_iov_frag != NULL));
+
+       if (type & PTLRPC_BULK_BUF_KIOV) {
+               OBD_ALLOC(desc,
+                         offsetof(struct ptlrpc_bulk_desc,
+                                  bd_u.bd_kiov.bd_vec[nfrags]));
+       } else {
+               OBD_ALLOC(desc,
+                         offsetof(struct ptlrpc_bulk_desc,
+                                  bd_u.bd_kvec.bd_kvec[nfrags]));
+       }
+
        if (!desc)
                return NULL;
 
        spin_lock_init(&desc->bd_lock);
        init_waitqueue_head(&desc->bd_waitq);
-       desc->bd_max_iov = npages;
+       desc->bd_max_iov = nfrags;
        desc->bd_iov_count = 0;
        desc->bd_portal = portal;
        desc->bd_type = type;
        desc->bd_md_count = 0;
+       desc->bd_frag_ops = (struct ptlrpc_bulk_frag_ops *) ops;
        LASSERT(max_brw > 0);
        desc->bd_md_max_brw = min(max_brw, PTLRPC_BULK_OPS_COUNT);
        /* PTLRPC_BULK_OPS_COUNT is the compile-time transfer limit for this
@@ -126,21 +161,25 @@ struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw,
 
 /**
  * Prepare bulk descriptor for specified outgoing request \a req that
- * can fit \a npages * pages. \a type is bulk type. \a portal is where
+ * can fit \a nfrags * pages. \a type is bulk type. \a portal is where
  * the bulk to be sent. Used on client-side.
  * Returns pointer to newly allocatrd initialized bulk descriptor or NULL on
  * error.
  */
 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
-                                             unsigned npages, unsigned max_brw,
-                                             unsigned type, unsigned portal)
+                                             unsigned nfrags, unsigned max_brw,
+                                             unsigned int type,
+                                             unsigned portal,
+                                             const struct ptlrpc_bulk_frag_ops
+                                               *ops)
 {
        struct obd_import *imp = req->rq_import;
        struct ptlrpc_bulk_desc *desc;
 
        ENTRY;
-       LASSERT(type == BULK_PUT_SINK || type == BULK_GET_SOURCE);
-       desc = ptlrpc_new_bulk(npages, max_brw, type, portal);
+       LASSERT(ptlrpc_is_bulk_op_passive(type));
+
+       desc = ptlrpc_new_bulk(nfrags, max_brw, type, portal, ops);
        if (desc == NULL)
                RETURN(NULL);
 
@@ -158,60 +197,90 @@ struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
 }
 EXPORT_SYMBOL(ptlrpc_prep_bulk_imp);
 
-/*
- * Add a page \a page to the bulk descriptor \a desc.
- * Data to transfer in the page starts at offset \a pageoffset and
- * amount of data to transfer from the page is \a len
- */
 void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
-                            struct page *page, int pageoffset, int len, int pin)
+                            struct page *page, int pageoffset, int len,
+                            int pin)
 {
+       lnet_kiov_t *kiov;
+
        LASSERT(desc->bd_iov_count < desc->bd_max_iov);
        LASSERT(page != NULL);
        LASSERT(pageoffset >= 0);
        LASSERT(len > 0);
        LASSERT(pageoffset + len <= PAGE_CACHE_SIZE);
+       LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
+
+       kiov = &BD_GET_KIOV(desc, desc->bd_iov_count);
 
        desc->bd_nob += len;
 
        if (pin)
                page_cache_get(page);
 
-       ptlrpc_add_bulk_page(desc, page, pageoffset, len);
+       kiov->kiov_page = page;
+       kiov->kiov_offset = pageoffset;
+       kiov->kiov_len = len;
+
+       desc->bd_iov_count++;
 }
 EXPORT_SYMBOL(__ptlrpc_prep_bulk_page);
 
-/**
- * Uninitialize and free bulk descriptor \a desc.
- * Works on bulk descriptors both from server and client side.
- */
-void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin)
+int ptlrpc_prep_bulk_frag(struct ptlrpc_bulk_desc *desc,
+                         void *frag, int len)
+{
+       struct kvec *iovec;
+       ENTRY;
+
+       LASSERT(desc->bd_iov_count < desc->bd_max_iov);
+       LASSERT(frag != NULL);
+       LASSERT(len > 0);
+       LASSERT(ptlrpc_is_bulk_desc_kvec(desc->bd_type));
+
+       iovec = &BD_GET_KVEC(desc, desc->bd_iov_count);
+
+       desc->bd_nob += len;
+
+       iovec->iov_base = frag;
+       iovec->iov_len = len;
+
+       desc->bd_iov_count++;
+
+       RETURN(desc->bd_nob);
+}
+EXPORT_SYMBOL(ptlrpc_prep_bulk_frag);
+
+void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
 {
-       int i;
        ENTRY;
 
        LASSERT(desc != NULL);
        LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */
        LASSERT(desc->bd_md_count == 0);         /* network hands off */
        LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL));
+       LASSERT(desc->bd_frag_ops != NULL);
 
-       sptlrpc_enc_pool_put_pages(desc);
+       if (ptlrpc_is_bulk_desc_kiov(desc->bd_type))
+               sptlrpc_enc_pool_put_pages(desc);
 
        if (desc->bd_export)
                class_export_put(desc->bd_export);
        else
                class_import_put(desc->bd_import);
 
-       if (unpin) {
-               for (i = 0; i < desc->bd_iov_count ; i++)
-                       page_cache_release(desc->bd_iov[i].kiov_page);
-       }
+       if (desc->bd_frag_ops->release_frags != NULL)
+               desc->bd_frag_ops->release_frags(desc);
+
+       if (ptlrpc_is_bulk_desc_kiov(desc->bd_type))
+               OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc,
+                                       bd_u.bd_kiov.bd_vec[desc->bd_max_iov]));
+       else
+               OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc,
+                                       bd_u.bd_kvec.bd_kvec[desc->
+                                               bd_max_iov]));
 
-       OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc,
-                               bd_iov[desc->bd_max_iov]));
        EXIT;
 }
-EXPORT_SYMBOL(__ptlrpc_free_bulk);
+EXPORT_SYMBOL(ptlrpc_free_bulk);
 
 /**
  * Set server timelimit for this req, i.e. how long are we willing to wait
@@ -457,7 +526,7 @@ EXPORT_SYMBOL(ptlrpc_free_rq_pool);
 /**
  * Allocates, initializes and adds \a num_rq requests to the pool \a pool
  */
-void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq)
+int ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq)
 {
         int i;
         int size = 1;
@@ -479,11 +548,11 @@ void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq)
                spin_unlock(&pool->prp_lock);
                req = ptlrpc_request_cache_alloc(GFP_NOFS);
                if (!req)
-                       return;
+                       return i;
                OBD_ALLOC_LARGE(msg, size);
                if (!msg) {
                        ptlrpc_request_cache_free(req);
-                       return;
+                       return i;
                 }
                 req->rq_reqbuf = msg;
                 req->rq_reqbuf_len = size;
@@ -492,7 +561,7 @@ void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq)
                list_add_tail(&req->rq_list, &pool->prp_req_list);
        }
        spin_unlock(&pool->prp_lock);
-       return;
+       return num_rq;
 }
 EXPORT_SYMBOL(ptlrpc_add_rqs_to_pool);
 
@@ -506,7 +575,7 @@ EXPORT_SYMBOL(ptlrpc_add_rqs_to_pool);
  */
 struct ptlrpc_request_pool *
 ptlrpc_init_rq_pool(int num_rq, int msgsize,
-                   void (*populate_pool)(struct ptlrpc_request_pool *, int))
+                   int (*populate_pool)(struct ptlrpc_request_pool *, int))
 {
        struct ptlrpc_request_pool *pool;
 
@@ -524,11 +593,6 @@ ptlrpc_init_rq_pool(int num_rq, int msgsize,
 
        populate_pool(pool, num_rq);
 
-       if (list_empty(&pool->prp_req_list)) {
-               /* have not allocated a single request for the pool */
-               OBD_FREE(pool, sizeof(struct ptlrpc_request_pool));
-               pool = NULL;
-       }
        return pool;
 }
 EXPORT_SYMBOL(ptlrpc_init_rq_pool);
@@ -587,72 +651,103 @@ static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request)
        spin_unlock(&pool->prp_lock);
 }
 
-static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
-                                      __u32 version, int opcode,
-                                      int count, __u32 *lengths, char **bufs,
-                                      struct ptlrpc_cli_ctx *ctx)
+void ptlrpc_add_unreplied(struct ptlrpc_request *req)
 {
-        struct obd_import  *imp = request->rq_import;
-        int                 rc;
-        ENTRY;
+       struct obd_import       *imp = req->rq_import;
+       struct list_head        *tmp;
+       struct ptlrpc_request   *iter;
 
-        if (unlikely(ctx))
-                request->rq_cli_ctx = sptlrpc_cli_ctx_get(ctx);
-        else {
-                rc = sptlrpc_req_get_ctx(request);
-                if (rc)
-                        GOTO(out_free, rc);
-        }
+       assert_spin_locked(&imp->imp_lock);
+       LASSERT(list_empty(&req->rq_unreplied_list));
 
-        sptlrpc_req_set_flavor(request, opcode);
+       /* unreplied list is sorted by xid in ascending order */
+       list_for_each_prev(tmp, &imp->imp_unreplied_list) {
+               iter = list_entry(tmp, struct ptlrpc_request,
+                                 rq_unreplied_list);
 
-        rc = lustre_pack_request(request, imp->imp_msg_magic, count,
-                                 lengths, bufs);
-        if (rc) {
-                LASSERT(!request->rq_pool);
-                GOTO(out_ctx, rc);
-        }
+               LASSERT(req->rq_xid != iter->rq_xid);
+               if (req->rq_xid < iter->rq_xid)
+                       continue;
+               list_add(&req->rq_unreplied_list, &iter->rq_unreplied_list);
+               return;
+       }
+       list_add(&req->rq_unreplied_list, &imp->imp_unreplied_list);
+}
 
-        lustre_msg_add_version(request->rq_reqmsg, version);
-        request->rq_send_state = LUSTRE_IMP_FULL;
-        request->rq_type = PTL_RPC_MSG_REQUEST;
+void ptlrpc_assign_next_xid_nolock(struct ptlrpc_request *req)
+{
+       req->rq_xid = ptlrpc_next_xid();
+       ptlrpc_add_unreplied(req);
+}
+
+static inline void ptlrpc_assign_next_xid(struct ptlrpc_request *req)
+{
+       spin_lock(&req->rq_import->imp_lock);
+       ptlrpc_assign_next_xid_nolock(req);
+       spin_unlock(&req->rq_import->imp_lock);
+}
 
-        request->rq_req_cbid.cbid_fn  = request_out_callback;
-        request->rq_req_cbid.cbid_arg = request;
+int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
+                            __u32 version, int opcode, char **bufs,
+                            struct ptlrpc_cli_ctx *ctx)
+{
+       int count;
+       struct obd_import *imp;
+       __u32 *lengths;
+       int rc;
 
-        request->rq_reply_cbid.cbid_fn  = reply_in_callback;
-        request->rq_reply_cbid.cbid_arg = request;
+       ENTRY;
 
-        request->rq_reply_deadline = 0;
-        request->rq_phase = RQ_PHASE_NEW;
-        request->rq_next_phase = RQ_PHASE_UNDEFINED;
+       count = req_capsule_filled_sizes(&request->rq_pill, RCL_CLIENT);
+       imp = request->rq_import;
+       lengths = request->rq_pill.rc_area[RCL_CLIENT];
 
-        request->rq_request_portal = imp->imp_client->cli_request_portal;
-        request->rq_reply_portal = imp->imp_client->cli_reply_portal;
+       if (ctx != NULL) {
+               request->rq_cli_ctx = sptlrpc_cli_ctx_get(ctx);
+       } else {
+               rc = sptlrpc_req_get_ctx(request);
+               if (rc)
+                       GOTO(out_free, rc);
+       }
+       sptlrpc_req_set_flavor(request, opcode);
 
-        ptlrpc_at_set_req_timeout(request);
+       rc = lustre_pack_request(request, imp->imp_msg_magic, count,
+                                lengths, bufs);
+       if (rc)
+               GOTO(out_ctx, rc);
+
+       lustre_msg_add_version(request->rq_reqmsg, version);
+       request->rq_send_state = LUSTRE_IMP_FULL;
+       request->rq_type = PTL_RPC_MSG_REQUEST;
+
+       request->rq_req_cbid.cbid_fn  = request_out_callback;
+       request->rq_req_cbid.cbid_arg = request;
+
+       request->rq_reply_cbid.cbid_fn  = reply_in_callback;
+       request->rq_reply_cbid.cbid_arg = request;
+
+       request->rq_reply_deadline = 0;
+       request->rq_phase = RQ_PHASE_NEW;
+       request->rq_next_phase = RQ_PHASE_UNDEFINED;
+
+       request->rq_request_portal = imp->imp_client->cli_request_portal;
+       request->rq_reply_portal = imp->imp_client->cli_reply_portal;
+
+       ptlrpc_at_set_req_timeout(request);
 
-       request->rq_xid = ptlrpc_next_xid();
        lustre_msg_set_opc(request->rq_reqmsg, opcode);
+       ptlrpc_assign_next_xid(request);
 
        RETURN(0);
+
 out_ctx:
+       LASSERT(!request->rq_pool);
        sptlrpc_cli_ctx_put(request->rq_cli_ctx, 1);
 out_free:
        class_import_put(imp);
-       return rc;
-}
 
-int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
-                             __u32 version, int opcode, char **bufs,
-                             struct ptlrpc_cli_ctx *ctx)
-{
-        int count;
+       return rc;
 
-        count = req_capsule_filled_sizes(&request->rq_pill, RCL_CLIENT);
-        return __ptlrpc_request_bufs_pack(request, version, opcode, count,
-                                          request->rq_pill.rc_area[RCL_CLIENT],
-                                          bufs, ctx);
 }
 EXPORT_SYMBOL(ptlrpc_request_bufs_pack);
 
@@ -702,11 +797,10 @@ struct ptlrpc_request *__ptlrpc_request_alloc(struct obd_import *imp,
 {
        struct ptlrpc_request *request = NULL;
 
-       if (pool)
-               request = ptlrpc_prep_req_from_pool(pool);
+       request = ptlrpc_request_cache_alloc(GFP_NOFS);
 
-       if (!request)
-               request = ptlrpc_request_cache_alloc(GFP_NOFS);
+       if (!request && pool)
+               request = ptlrpc_prep_req_from_pool(pool);
 
        if (request) {
                ptlrpc_cli_req_init(request);
@@ -809,55 +903,17 @@ struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp,
 EXPORT_SYMBOL(ptlrpc_request_alloc_pack);
 
 /**
- * Prepare request (fetched from pool \a poolif not NULL) on import \a imp
- * for operation \a opcode. Request would contain \a count buffers.
- * Sizes of buffers are described in array \a lengths and buffers themselves
- * are provided by a pointer \a bufs.
- * Returns prepared request structure pointer or NULL on error.
- */
-struct ptlrpc_request *
-ptlrpc_prep_req_pool(struct obd_import *imp,
-                     __u32 version, int opcode,
-                     int count, __u32 *lengths, char **bufs,
-                     struct ptlrpc_request_pool *pool)
-{
-        struct ptlrpc_request *request;
-        int                    rc;
-
-        request = __ptlrpc_request_alloc(imp, pool);
-        if (!request)
-                return NULL;
-
-        rc = __ptlrpc_request_bufs_pack(request, version, opcode, count,
-                                        lengths, bufs, NULL);
-        if (rc) {
-                ptlrpc_request_free(request);
-                request = NULL;
-        }
-        return request;
-}
-
-/**
- * Same as ptlrpc_prep_req_pool, but without pool
- */
-struct ptlrpc_request *
-ptlrpc_prep_req(struct obd_import *imp, __u32 version, int opcode, int count,
-                __u32 *lengths, char **bufs)
-{
-        return ptlrpc_prep_req_pool(imp, version, opcode, count, lengths, bufs,
-                                    NULL);
-}
-
-/**
- * Allocate and initialize new request set structure.
+ * Allocate and initialize new request set structure on the current CPT.
  * Returns a pointer to the newly allocated set structure or NULL on error.
  */
 struct ptlrpc_request_set *ptlrpc_prep_set(void)
 {
-       struct ptlrpc_request_set *set;
+       struct ptlrpc_request_set       *set;
+       int                             cpt;
 
        ENTRY;
-       OBD_ALLOC(set, sizeof *set);
+       cpt = cfs_cpt_current(cfs_cpt_table, 0);
+       OBD_CPT_ALLOC(set, cfs_cpt_table, cpt, sizeof *set);
        if (!set)
                RETURN(NULL);
        atomic_set(&set->set_refcount, 1);
@@ -992,6 +1048,9 @@ void ptlrpc_set_add_req(struct ptlrpc_request_set *set,
 {
        LASSERT(list_empty(&req->rq_set_chain));
 
+       if (req->rq_allow_intr)
+               set->set_allow_intr = 1;
+
        /* The set takes over the caller's request reference */
        list_add_tail(&req->rq_set_chain, &set->set_requests);
        req->rq_set = set;
@@ -1206,6 +1265,24 @@ static void ptlrpc_save_versions(struct ptlrpc_request *req)
         EXIT;
 }
 
+__u64 ptlrpc_known_replied_xid(struct obd_import *imp)
+{
+       struct ptlrpc_request *req;
+
+       assert_spin_locked(&imp->imp_lock);
+       if (list_empty(&imp->imp_unreplied_list))
+               return 0;
+
+       req = list_entry(imp->imp_unreplied_list.next, struct ptlrpc_request,
+                        rq_unreplied_list);
+       LASSERTF(req->rq_xid >= 1, "XID:"LPU64"\n", req->rq_xid);
+
+       if (imp->imp_known_replied_xid < req->rq_xid - 1)
+               imp->imp_known_replied_xid = req->rq_xid - 1;
+
+       return req->rq_xid - 1;
+}
+
 /**
  * Callback function called when client receives RPC reply for \a req.
  * Returns 0 on success or error code.
@@ -1219,6 +1296,7 @@ static int after_reply(struct ptlrpc_request *req)
         struct obd_device *obd = req->rq_import->imp_obd;
         int rc;
         struct timeval work_start;
+       __u64 committed;
         long timediff;
         ENTRY;
 
@@ -1281,15 +1359,6 @@ static int after_reply(struct ptlrpc_request *req)
                spin_unlock(&req->rq_lock);
                req->rq_nr_resend++;
 
-               /* allocate new xid to avoid reply reconstruction */
-               if (!req->rq_bulk) {
-                       /* new xid is already allocated for bulk in
-                        * ptlrpc_check_set() */
-                       req->rq_xid = ptlrpc_next_xid();
-                       DEBUG_REQ(D_RPCTRACE, req, "Allocating new xid for "
-                                 "resend on EINPROGRESS");
-               }
-
                /* Readjust the timeout for current conditions */
                ptlrpc_at_set_req_timeout(req);
                /* delay resend to give a chance to the server to get ready.
@@ -1301,6 +1370,11 @@ static int after_reply(struct ptlrpc_request *req)
                else
                        req->rq_sent = now + req->rq_nr_resend;
 
+               /* Resend for EINPROGRESS will use a new XID */
+               spin_lock(&imp->imp_lock);
+               list_del_init(&req->rq_unreplied_list);
+               spin_unlock(&imp->imp_lock);
+
                RETURN(0);
        }
 
@@ -1382,10 +1456,9 @@ static int after_reply(struct ptlrpc_request *req)
                 /*
                  * Replay-enabled imports return commit-status information.
                  */
-                if (lustre_msg_get_last_committed(req->rq_repmsg)) {
-                        imp->imp_peer_committed_transno =
-                                lustre_msg_get_last_committed(req->rq_repmsg);
-                }
+               committed = lustre_msg_get_last_committed(req->rq_repmsg);
+               if (likely(committed > imp->imp_peer_committed_transno))
+                       imp->imp_peer_committed_transno = committed;
 
                ptlrpc_free_committed(imp);
 
@@ -1417,10 +1490,18 @@ static int after_reply(struct ptlrpc_request *req)
 static int ptlrpc_send_new_req(struct ptlrpc_request *req)
 {
         struct obd_import     *imp = req->rq_import;
+       __u64                  min_xid = 0;
         int rc;
         ENTRY;
 
         LASSERT(req->rq_phase == RQ_PHASE_NEW);
+
+       /* do not try to go further if there is not enough memory in enc_pool */
+       if (req->rq_sent && req->rq_bulk != NULL)
+               if (req->rq_bulk->bd_iov_count > get_free_pages_in_pool() &&
+                   pool_is_at_full_capacity())
+                       RETURN(-ENOMEM);
+
         if (req->rq_sent && (req->rq_sent > cfs_time_current_sec()) &&
             (!req->rq_generation_set ||
              req->rq_import_generation == imp->imp_generation))
@@ -1430,6 +1511,9 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req)
 
        spin_lock(&imp->imp_lock);
 
+       LASSERT(req->rq_xid != 0);
+       LASSERT(!list_empty(&req->rq_unreplied_list));
+
        if (!req->rq_generation_set)
                req->rq_import_generation = imp->imp_generation;
 
@@ -1459,8 +1543,25 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req)
        LASSERT(list_empty(&req->rq_list));
        list_add_tail(&req->rq_list, &imp->imp_sending_list);
        atomic_inc(&req->rq_import->imp_inflight);
+
+       /* find the known replied XID from the unreplied list, CONNECT
+        * and DISCONNECT requests are skipped to make the sanity check
+        * on server side happy. see process_req_last_xid().
+        *
+        * For CONNECT: Because replay requests have lower XID, it'll
+        * break the sanity check if CONNECT bump the exp_last_xid on
+        * server.
+        *
+        * For DISCONNECT: Since client will abort inflight RPC before
+        * sending DISCONNECT, DISCONNECT may carry an XID which higher
+        * than the inflight RPC.
+        */
+       if (!ptlrpc_req_is_connect(req) && !ptlrpc_req_is_disconnect(req))
+               min_xid = ptlrpc_known_replied_xid(imp);
        spin_unlock(&imp->imp_lock);
 
+       lustre_msg_set_last_xid(req->rq_reqmsg, min_xid);
+
        lustre_msg_set_status(req->rq_reqmsg, current_pid());
 
         rc = sptlrpc_req_refresh_ctx(req, -1);
@@ -1484,6 +1585,16 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req)
               lustre_msg_get_opc(req->rq_reqmsg));
 
         rc = ptl_send_rpc(req, 0);
+       if (rc == -ENOMEM) {
+               spin_lock(&imp->imp_lock);
+               if (!list_empty(&req->rq_list)) {
+                       list_del_init(&req->rq_list);
+                       atomic_dec(&req->rq_import->imp_inflight);
+               }
+               spin_unlock(&imp->imp_lock);
+               ptlrpc_rqphase_move(req, RQ_PHASE_NEW);
+               RETURN(rc);
+       }
         if (rc) {
                 DEBUG_REQ(D_HA, req, "send failed (%d); expect timeout", rc);
                spin_lock(&req->rq_lock);
@@ -1543,8 +1654,14 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
                                   rq_set_chain);
                struct obd_import *imp = req->rq_import;
                int unregistered = 0;
+               int async = 1;
                int rc = 0;
 
+               if (req->rq_phase == RQ_PHASE_COMPLETE) {
+                       list_move_tail(&req->rq_set_chain, &comp_reqs);
+                       continue;
+               }
+
                /* This schedule point is mainly for the ptlrpcd caller of this
                 * function.  Most ptlrpc sets are not long-lived and unbounded
                 * in length, but at the least the set used by the ptlrpcd is.
@@ -1553,13 +1670,26 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
                 */
                cond_resched();
 
-                if (req->rq_phase == RQ_PHASE_NEW &&
-                    ptlrpc_send_new_req(req)) {
-                        force_timer_recalc = 1;
-                }
+               /* If the caller requires to allow to be interpreted by force
+                * and it has really been interpreted, then move the request
+                * to RQ_PHASE_INTERPRET phase in spite of what the current
+                * phase is. */
+               if (unlikely(req->rq_allow_intr && req->rq_intr)) {
+                       req->rq_status = -EINTR;
+                       ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
+
+                       /* Since it is interpreted and we have to wait for
+                        * the reply to be unlinked, then use sync mode. */
+                       async = 0;
+
+                       GOTO(interpret, req->rq_status);
+               }
 
-                /* delayed send - skip */
-                if (req->rq_phase == RQ_PHASE_NEW && req->rq_sent)
+               if (req->rq_phase == RQ_PHASE_NEW && ptlrpc_send_new_req(req))
+                       force_timer_recalc = 1;
+
+               /* delayed send - skip */
+               if (req->rq_phase == RQ_PHASE_NEW && req->rq_sent)
                        continue;
 
                /* delayed resend - skip */
@@ -1567,11 +1697,10 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
                    req->rq_sent > cfs_time_current_sec())
                        continue;
 
-                if (!(req->rq_phase == RQ_PHASE_RPC ||
-                      req->rq_phase == RQ_PHASE_BULK ||
-                      req->rq_phase == RQ_PHASE_INTERPRET ||
-                      req->rq_phase == RQ_PHASE_UNREGISTERING ||
-                      req->rq_phase == RQ_PHASE_COMPLETE)) {
+               if (!(req->rq_phase == RQ_PHASE_RPC ||
+                     req->rq_phase == RQ_PHASE_BULK ||
+                     req->rq_phase == RQ_PHASE_INTERPRET ||
+                     req->rq_phase == RQ_PHASE_UNREGISTERING)) {
                         DEBUG_REQ(D_ERROR, req, "bad phase %x", req->rq_phase);
                         LBUG();
                 }
@@ -1611,11 +1740,6 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
                         ptlrpc_rqphase_move(req, req->rq_next_phase);
                 }
 
-                if (req->rq_phase == RQ_PHASE_COMPLETE) {
-                       list_move_tail(&req->rq_set_chain, &comp_reqs);
-                        continue;
-               }
-
                 if (req->rq_phase == RQ_PHASE_INTERPRET)
                         GOTO(interpret, req->rq_status);
 
@@ -1722,20 +1846,10 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
                                        spin_lock(&req->rq_lock);
                                        req->rq_resend = 1;
                                        spin_unlock(&req->rq_lock);
-                                        if (req->rq_bulk) {
-                                                __u64 old_xid;
-
-                                                if (!ptlrpc_unregister_bulk(req, 1))
-                                                        continue;
-
-                                                /* ensure previous bulk fails */
-                                                old_xid = req->rq_xid;
-                                                req->rq_xid = ptlrpc_next_xid();
-                                                CDEBUG(D_HA, "resend bulk "
-                                                       "old x"LPU64
-                                                       " new x"LPU64"\n",
-                                                       old_xid, req->rq_xid);
-                                        }
+
+                                       if (req->rq_bulk != NULL &&
+                                           !ptlrpc_unregister_bulk(req, 1))
+                                               continue;
                                 }
                                 /*
                                  * rq_wait_ctx is only touched by ptlrpcd,
@@ -1763,6 +1877,14 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
                                }
 
                                rc = ptl_send_rpc(req, 0);
+                               if (rc == -ENOMEM) {
+                                       spin_lock(&imp->imp_lock);
+                                       if (!list_empty(&req->rq_list))
+                                               list_del_init(&req->rq_list);
+                                       spin_unlock(&imp->imp_lock);
+                                       ptlrpc_rqphase_move(req, RQ_PHASE_NEW);
+                                       continue;
+                               }
                                if (rc) {
                                        DEBUG_REQ(D_HA, req,
                                                  "send failed: rc = %d", rc);
@@ -1834,27 +1956,27 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
                        req->rq_status = -EIO;
                }
 
-                ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
+               ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
 
-        interpret:
-                LASSERT(req->rq_phase == RQ_PHASE_INTERPRET);
+       interpret:
+               LASSERT(req->rq_phase == RQ_PHASE_INTERPRET);
 
-                /* This moves to "unregistering" phase we need to wait for
-                 * reply unlink. */
-                if (!unregistered && !ptlrpc_unregister_reply(req, 1)) {
-                        /* start async bulk unlink too */
-                        ptlrpc_unregister_bulk(req, 1);
-                        continue;
-                }
+               /* This moves to "unregistering" phase we need to wait for
+                * reply unlink. */
+               if (!unregistered && !ptlrpc_unregister_reply(req, async)) {
+                       /* start async bulk unlink too */
+                       ptlrpc_unregister_bulk(req, 1);
+                       continue;
+               }
 
-                if (!ptlrpc_unregister_bulk(req, 1))
-                        continue;
+               if (!ptlrpc_unregister_bulk(req, async))
+                       continue;
 
-                /* When calling interpret receiving already should be
-                 * finished. */
-                LASSERT(!req->rq_receiving_reply);
+               /* When calling interpret receiving already should be
+                * finished. */
+               LASSERT(!req->rq_receiving_reply);
 
-                ptlrpc_req_interpret(env, req, req->rq_status);
+               ptlrpc_req_interpret(env, req, req->rq_status);
 
                if (ptlrpcd_check_work(req)) {
                        atomic_dec(&set->set_remaining);
@@ -1879,6 +2001,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
                        list_del_init(&req->rq_list);
                        atomic_dec(&imp->imp_inflight);
                }
+               list_del_init(&req->rq_unreplied_list);
                spin_unlock(&imp->imp_lock);
 
                atomic_dec(&set->set_remaining);
@@ -1939,8 +2062,8 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
                       "timed out for sent delay" : "timed out for slow reply"),
                   req->rq_sent, req->rq_real_sent);
 
-        if (imp != NULL && obd_debug_peer_on_timeout)
-                LNetCtl(IOC_LIBCFS_DEBUG_PEER, &imp->imp_connection->c_peer);
+       if (imp != NULL && obd_debug_peer_on_timeout)
+               LNetDebugPeer(imp->imp_connection->c_peer);
 
         ptlrpc_unregister_reply(req, async_unlink);
         ptlrpc_unregister_bulk(req, async_unlink);
@@ -2062,8 +2185,12 @@ static void ptlrpc_interrupted_set(void *data)
                struct ptlrpc_request *req =
                        list_entry(tmp, struct ptlrpc_request, rq_set_chain);
 
+               if (req->rq_intr)
+                       continue;
+
                if (req->rq_phase != RQ_PHASE_RPC &&
-                   req->rq_phase != RQ_PHASE_UNREGISTERING)
+                   req->rq_phase != RQ_PHASE_UNREGISTERING &&
+                   !req->rq_allow_intr)
                        continue;
 
                ptlrpc_mark_interrupted(req);
@@ -2155,16 +2282,16 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set)
                 CDEBUG(D_RPCTRACE, "set %p going to sleep for %d seconds\n",
                        set, timeout);
 
-               if (timeout == 0 && !signal_pending(current))
-                        /*
-                         * No requests are in-flight (ether timed out
-                         * or delayed), so we can allow interrupts.
-                         * We still want to block for a limited time,
-                         * so we allow interrupts during the timeout.
-                         */
-                       lwi = LWI_TIMEOUT_INTR_ALL(cfs_time_seconds(1),
-                                                   ptlrpc_expired_set,
-                                                   ptlrpc_interrupted_set, set);
+               if ((timeout == 0 && !signal_pending(current)) ||
+                   set->set_allow_intr)
+                       /* No requests are in-flight (ether timed out
+                        * or delayed), so we can allow interrupts.
+                        * We still want to block for a limited time,
+                        * so we allow interrupts during the timeout. */
+                       lwi = LWI_TIMEOUT_INTR_ALL(
+                                       cfs_time_seconds(timeout ? timeout : 1),
+                                       ptlrpc_expired_set,
+                                       ptlrpc_interrupted_set, set);
                 else
                         /*
                          * At least one request is in flight, so no
@@ -2179,7 +2306,8 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set)
                 /* LU-769 - if we ignored the signal because it was already
                  * pending when we started, we need to handle it now or we risk
                  * it being ignored forever */
-               if (rc == -ETIMEDOUT && !lwi.lwi_allow_intr &&
+               if (rc == -ETIMEDOUT &&
+                   (!lwi.lwi_allow_intr || set->set_allow_intr) &&
                    signal_pending(current)) {
                        sigset_t blocked_sigs =
                                           cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
@@ -2276,6 +2404,7 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
                if (!locked)
                        spin_lock(&request->rq_import->imp_lock);
                list_del_init(&request->rq_replay_list);
+               list_del_init(&request->rq_unreplied_list);
                if (!locked)
                        spin_unlock(&request->rq_import->imp_lock);
         }
@@ -2295,7 +2424,7 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
                 request->rq_import = NULL;
         }
        if (request->rq_bulk != NULL)
-               ptlrpc_free_bulk_pin(request->rq_bulk);
+               ptlrpc_free_bulk(request->rq_bulk);
 
         if (request->rq_reqbuf != NULL || request->rq_clrbuf != NULL)
                 sptlrpc_cli_free_reqbuf(request);
@@ -2600,14 +2729,7 @@ void ptlrpc_resend_req(struct ptlrpc_request *req)
         req->rq_resend = 1;
         req->rq_net_err = 0;
         req->rq_timedout = 0;
-        if (req->rq_bulk) {
-                __u64 old_xid = req->rq_xid;
 
-                /* ensure previous bulk fails */
-                req->rq_xid = ptlrpc_next_xid();
-                CDEBUG(D_HA, "resend bulk old x"LPU64" new x"LPU64"\n",
-                       old_xid, req->rq_xid);
-        }
         ptlrpc_client_wake_req(req);
        spin_unlock(&req->rq_lock);
 }
@@ -2662,6 +2784,10 @@ void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
 
        lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY);
 
+       spin_lock(&req->rq_lock);
+       req->rq_resend = 0;
+       spin_unlock(&req->rq_lock);
+
        LASSERT(imp->imp_replayable);
        /* Balanced in ptlrpc_free_committed, usually. */
        ptlrpc_request_addref(req);
@@ -2739,10 +2865,15 @@ static int ptlrpc_replay_interpret(const struct lu_env *env,
        ENTRY;
        atomic_dec(&imp->imp_replay_inflight);
 
-        if (!ptlrpc_client_replied(req)) {
-                CERROR("request replay timed out, restarting recovery\n");
-                GOTO(out, rc = -ETIMEDOUT);
-        }
+       /* Note: if it is bulk replay (MDS-MDS replay), then even if
+        * server got the request, but bulk transfer timeout, let's
+        * replay the bulk req again */
+       if (!ptlrpc_client_replied(req) ||
+           (req->rq_bulk != NULL &&
+            lustre_msg_get_status(req->rq_repmsg) == -ETIMEDOUT)) {
+               DEBUG_REQ(D_ERROR, req, "request replay timed out.\n");
+               GOTO(out, rc = -ETIMEDOUT);
+       }
 
         if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR &&
             (lustre_msg_get_status(req->rq_repmsg) == -ENOTCONN ||
@@ -2796,6 +2927,49 @@ static int ptlrpc_replay_interpret(const struct lu_env *env,
                 DEBUG_REQ(D_ERROR, req, "status %d, old was %d",
                           lustre_msg_get_status(req->rq_repmsg),
                           aa->praa_old_status);
+
+               /* Note: If the replay fails for MDT-MDT recovery, let's
+                * abort all of the following requests in the replay
+                * and sending list, because MDT-MDT update requests
+                * are dependent on each other, see LU-7039 */
+               if (imp->imp_connect_flags_orig & OBD_CONNECT_MDS_MDS) {
+                       struct ptlrpc_request *free_req;
+                       struct ptlrpc_request *tmp;
+
+                       spin_lock(&imp->imp_lock);
+                       list_for_each_entry_safe(free_req, tmp,
+                                                &imp->imp_replay_list,
+                                                rq_replay_list) {
+                               ptlrpc_free_request(free_req);
+                       }
+
+                       list_for_each_entry_safe(free_req, tmp,
+                                                &imp->imp_committed_list,
+                                                rq_replay_list) {
+                               ptlrpc_free_request(free_req);
+                       }
+
+                       list_for_each_entry_safe(free_req, tmp,
+                                               &imp->imp_delayed_list,
+                                               rq_list) {
+                               spin_lock(&free_req->rq_lock);
+                               free_req->rq_err = 1;
+                               free_req->rq_status = -EIO;
+                               ptlrpc_client_wake_req(free_req);
+                               spin_unlock(&free_req->rq_lock);
+                       }
+
+                       list_for_each_entry_safe(free_req, tmp,
+                                               &imp->imp_sending_list,
+                                               rq_list) {
+                               spin_lock(&free_req->rq_lock);
+                               free_req->rq_err = 1;
+                               free_req->rq_status = -EIO;
+                               ptlrpc_client_wake_req(free_req);
+                               spin_unlock(&free_req->rq_lock);
+                       }
+                       spin_unlock(&imp->imp_lock);
+               }
         } else {
                 /* Put it back for re-replay. */
                 lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
@@ -2857,7 +3031,7 @@ int ptlrpc_replay_req(struct ptlrpc_request *req)
        atomic_inc(&req->rq_import->imp_replay_inflight);
        ptlrpc_request_addref(req);     /* ptlrpcd needs a ref */
 
-       ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
+       ptlrpcd_add_req(req);
        RETURN(0);
 }
 
@@ -3009,6 +3183,43 @@ __u64 ptlrpc_next_xid(void)
 }
 
 /**
+ * If request has a new allocated XID (new request or EINPROGRESS resend),
+ * use this XID as matchbits of bulk, otherwise allocate a new matchbits for
+ * request to ensure previous bulk fails and avoid problems with lost replies
+ * and therefore several transfers landing into the same buffer from different
+ * sending attempts.
+ */
+void ptlrpc_set_bulk_mbits(struct ptlrpc_request *req)
+{
+       struct ptlrpc_bulk_desc *bd = req->rq_bulk;
+
+       LASSERT(bd != NULL);
+
+       if (!req->rq_resend) {
+               /* this request has a new xid, just use it as bulk matchbits */
+               req->rq_mbits = req->rq_xid;
+
+       } else { /* needs to generate a new matchbits for resend */
+               __u64   old_mbits = req->rq_mbits;
+
+               if ((bd->bd_import->imp_connect_data.ocd_connect_flags &
+                   OBD_CONNECT_BULK_MBITS) != 0)
+                       req->rq_mbits = ptlrpc_next_xid();
+               else /* old version transfers rq_xid to peer as matchbits */
+                       req->rq_mbits = req->rq_xid = ptlrpc_next_xid();
+
+               CDEBUG(D_HA, "resend bulk old x"LPU64" new x"LPU64"\n",
+                      old_mbits, req->rq_mbits);
+       }
+
+       /* For multi-bulk RPCs, rq_mbits is the last mbits needed for bulks so
+        * that server can infer the number of bulks that were prepared,
+        * see LU-1431 */
+       req->rq_mbits += ((bd->bd_iov_count + LNET_MAX_IOV - 1) /
+                         LNET_MAX_IOV) - 1;
+}
+
+/**
  * Get a glimpse at what next xid value might have been.
  * Returns possible next xid.
  */
@@ -3064,7 +3275,7 @@ static void ptlrpcd_add_work_req(struct ptlrpc_request *req)
        req->rq_xid             = ptlrpc_next_xid();
        req->rq_import_generation = req->rq_import->imp_generation;
 
-       ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
+       ptlrpcd_add_req(req);
 }
 
 static int work_interpreter(const struct lu_env *env,