Whamcloud - gitweb
LU-6496 ptlrpc: Fix wrong code indentation in plain_authorize
[fs/lustre-release.git] / lustre / ptlrpc / niobuf.c
index 240b226..c560e72 100644 (file)
@@ -1,6 +1,4 @@
-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
  * GPL HEADER START
  *
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -28,6 +26,8 @@
 /*
  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, 2014, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
  */
 
 #define DEBUG_SUBSYSTEM S_RPC
-#ifndef __KERNEL__
-#include <liblustre.h>
-#endif
 #include <obd_support.h>
 #include <lustre_net.h>
 #include <lustre_lib.h>
 #include <obd.h>
+#include <obd_class.h>
 #include "ptlrpc_internal.h"
 
 /**
@@ -100,75 +98,148 @@ static int ptl_send_buf (lnet_handle_md_t *mdh, void *base, int len,
         RETURN (0);
 }
 
-/**
- * Starts bulk transfer for descriptor \a desc
- * Returns 0 on success or error code.
- */
-int ptlrpc_start_bulk_transfer(struct ptlrpc_bulk_desc *desc)
+static void mdunlink_iterate_helper(lnet_handle_md_t *bd_mds, int count)
 {
-        struct ptlrpc_connection *conn = desc->bd_export->exp_connection;
-        int                       rc;
-        int                       rc2;
-        lnet_md_t                 md;
-        __u64                     xid;
-        ENTRY;
-
-        if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_PUT_NET))
-                RETURN(0);
+       int i;
 
-        /* NB no locking required until desc is on the network */
-        LASSERT (!desc->bd_network_rw);
-        LASSERT (desc->bd_type == BULK_PUT_SOURCE ||
-                 desc->bd_type == BULK_GET_SINK);
-        desc->bd_success = 0;
+       for (i = 0; i < count; i++)
+               LNetMDUnlink(bd_mds[i]);
+}
 
-        md.user_ptr = &desc->bd_cbid;
-        md.eq_handle = ptlrpc_eq_h;
-        md.threshold = 2; /* SENT and ACK/REPLY */
-        md.options = PTLRPC_MD_OPTIONS;
-        ptlrpc_fill_bulk_md(&md, desc);
+#ifdef HAVE_SERVER_SUPPORT
+/**
+ * Prepare bulk descriptor for specified incoming request \a req that
+ * can fit \a npages * pages. \a type is bulk type. \a portal is where
+ * the bulk to be sent. Used on server-side after request was already
+ * received.
+ * Returns pointer to newly allocatrd initialized bulk descriptor or NULL on
+ * error.
+ */
+struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp(struct ptlrpc_request *req,
+                                             unsigned npages, unsigned max_brw,
+                                             unsigned type, unsigned portal)
+{
+       struct obd_export *exp = req->rq_export;
+       struct ptlrpc_bulk_desc *desc;
 
-        LASSERT (desc->bd_cbid.cbid_fn == server_bulk_callback);
-        LASSERT (desc->bd_cbid.cbid_arg == desc);
+       ENTRY;
+       LASSERT(type == BULK_PUT_SOURCE || type == BULK_GET_SINK);
 
-        /* NB total length may be 0 for a read past EOF, so we send a 0
-         * length bulk, since the client expects a bulk event. */
+       desc = ptlrpc_new_bulk(npages, max_brw, type, portal);
+       if (desc == NULL)
+               RETURN(NULL);
 
-        rc = LNetMDBind(md, LNET_UNLINK, &desc->bd_md_h);
-        if (rc != 0) {
-                CERROR("LNetMDBind failed: %d\n", rc);
-                LASSERT (rc == -ENOMEM);
-                RETURN(-ENOMEM);
-        }
+        desc->bd_export = class_export_get(exp);
+        desc->bd_req = req;
 
-        /* Client's bulk and reply matchbits are the same */
-        xid = desc->bd_req->rq_xid;
-        CDEBUG(D_NET, "Transferring %u pages %u bytes via portal %d "
-               "id %s xid "LPX64"\n", desc->bd_iov_count,
-               desc->bd_nob, desc->bd_portal,
-               libcfs_id2str(conn->c_peer), xid);
+        desc->bd_cbid.cbid_fn  = server_bulk_callback;
+        desc->bd_cbid.cbid_arg = desc;
 
-        /* Network is about to get at the memory */
-        desc->bd_network_rw = 1;
+        /* NB we don't assign rq_bulk here; server-side requests are
+         * re-used, and the handler frees the bulk desc explicitly. */
 
-        if (desc->bd_type == BULK_PUT_SOURCE)
-                rc = LNetPut (conn->c_self, desc->bd_md_h, LNET_ACK_REQ,
-                              conn->c_peer, desc->bd_portal, xid, 0, 0);
-        else
-                rc = LNetGet (conn->c_self, desc->bd_md_h,
-                              conn->c_peer, desc->bd_portal, xid, 0);
-
-        if (rc != 0) {
-                /* Can't send, so we unlink the MD bound above.  The UNLINK
-                 * event this creates will signal completion with failure,
-                 * so we return SUCCESS here! */
-                CERROR("Transfer(%s, %d, "LPX64") failed: %d\n",
-                       libcfs_id2str(conn->c_peer), desc->bd_portal, xid, rc);
-                rc2 = LNetMDUnlink(desc->bd_md_h);
-                LASSERT (rc2 == 0);
-        }
+        return desc;
+}
+EXPORT_SYMBOL(ptlrpc_prep_bulk_exp);
 
-        RETURN(0);
+/**
+ * Starts bulk transfer for descriptor \a desc on the server.
+ * Returns 0 on success or error code.
+ */
+int ptlrpc_start_bulk_transfer(struct ptlrpc_bulk_desc *desc)
+{
+       struct obd_export        *exp = desc->bd_export;
+       struct ptlrpc_connection *conn = exp->exp_connection;
+       int                       rc = 0;
+       __u64                     xid;
+       int                       posted_md;
+       int                       total_md;
+       lnet_md_t                 md;
+       ENTRY;
+
+       if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_PUT_NET))
+               RETURN(0);
+
+       /* NB no locking required until desc is on the network */
+       LASSERT(desc->bd_md_count == 0);
+       LASSERT(desc->bd_type == BULK_PUT_SOURCE ||
+               desc->bd_type == BULK_GET_SINK);
+
+       LASSERT(desc->bd_cbid.cbid_fn == server_bulk_callback);
+       LASSERT(desc->bd_cbid.cbid_arg == desc);
+
+       /* NB total length may be 0 for a read past EOF, so we send 0
+        * length bulks, since the client expects bulk events.
+        *
+        * The client may not need all of the bulk XIDs for the RPC.  The RPC
+        * used the XID of the highest bulk XID needed, and the server masks
+        * off high bits to get bulk count for this RPC. LU-1431 */
+       xid = desc->bd_req->rq_xid & ~((__u64)desc->bd_md_max_brw - 1);
+       total_md = desc->bd_req->rq_xid - xid + 1;
+
+       desc->bd_md_count = total_md;
+       desc->bd_failure = 0;
+
+       md.user_ptr = &desc->bd_cbid;
+       md.eq_handle = ptlrpc_eq_h;
+       md.threshold = 2; /* SENT and ACK/REPLY */
+
+       for (posted_md = 0; posted_md < total_md; xid++) {
+               md.options = PTLRPC_MD_OPTIONS;
+
+               /* NB it's assumed that source and sink buffer frags are
+                * page-aligned. Otherwise we'd have to send client bulk
+                * sizes over and split server buffer accordingly */
+               ptlrpc_fill_bulk_md(&md, desc, posted_md);
+               rc = LNetMDBind(md, LNET_UNLINK, &desc->bd_mds[posted_md]);
+               if (rc != 0) {
+                       CERROR("%s: LNetMDBind failed for MD %u: rc = %d\n",
+                              exp->exp_obd->obd_name, posted_md, rc);
+                       LASSERT(rc == -ENOMEM);
+                       if (posted_md == 0) {
+                               desc->bd_md_count = 0;
+                               RETURN(-ENOMEM);
+                       }
+                       break;
+               }
+               /* Network is about to get at the memory */
+               if (desc->bd_type == BULK_PUT_SOURCE)
+                       rc = LNetPut(conn->c_self, desc->bd_mds[posted_md],
+                                    LNET_ACK_REQ, conn->c_peer,
+                                    desc->bd_portal, xid, 0, 0);
+               else
+                       rc = LNetGet(conn->c_self, desc->bd_mds[posted_md],
+                                    conn->c_peer, desc->bd_portal, xid, 0);
+
+               posted_md++;
+               if (rc != 0) {
+                       CERROR("%s: failed bulk transfer with %s:%u x"LPU64": "
+                              "rc = %d\n", exp->exp_obd->obd_name,
+                              libcfs_id2str(conn->c_peer), desc->bd_portal,
+                              xid, rc);
+                       break;
+               }
+       }
+
+       if (rc != 0) {
+               /* Can't send, so we unlink the MD bound above.  The UNLINK
+                * event this creates will signal completion with failure,
+                * so we return SUCCESS here! */
+               spin_lock(&desc->bd_lock);
+               desc->bd_md_count -= total_md - posted_md;
+               spin_unlock(&desc->bd_lock);
+               LASSERT(desc->bd_md_count >= 0);
+
+               mdunlink_iterate_helper(desc->bd_mds, posted_md);
+               RETURN(0);
+       }
+
+       CDEBUG(D_NET, "Transferring %u pages %u bytes via portal %d "
+              "id %s xid "LPX64"-"LPX64"\n", desc->bd_iov_count,
+              desc->bd_nob, desc->bd_portal, libcfs_id2str(conn->c_peer),
+              xid - posted_md, xid - 1);
+
+       RETURN(0);
 }
 
 /**
@@ -177,120 +248,166 @@ int ptlrpc_start_bulk_transfer(struct ptlrpc_bulk_desc *desc)
  */
 void ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc)
 {
-        struct l_wait_info       lwi;
-        int                      rc;
-
-        LASSERT(!cfs_in_interrupt());           /* might sleep */
-
-        if (!ptlrpc_server_bulk_active(desc))   /* completed or */
-                return;                         /* never started */
-
-        /* We used to poison the pages with 0xab here because we did not want to
-         * send any meaningful data over the wire for evicted clients (bug 9297)
-         * However, this is no longer safe now that we use the page cache on the
-         * OSS (bug 20560) */
-
-        /* The unlink ensures the callback happens ASAP and is the last
-         * one.  If it fails, it must be because completion just happened,
-         * but we must still l_wait_event() in this case, to give liblustre
-         * a chance to run server_bulk_callback()*/
-
-        LNetMDUnlink(desc->bd_md_h);
-
-        for (;;) {
-                /* Network access will complete in finite time but the HUGE
-                 * timeout lets us CWARN for visibility of sluggish NALs */
-                lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
-                                           cfs_time_seconds(1), NULL, NULL);
-                rc = l_wait_event(desc->bd_waitq,
-                                  !ptlrpc_server_bulk_active(desc), &lwi);
-                if (rc == 0)
-                        return;
-
-                LASSERT(rc == -ETIMEDOUT);
-                CWARN("Unexpectedly long timeout: desc %p\n", desc);
-        }
+       struct l_wait_info       lwi;
+       int                      rc;
+
+       LASSERT(!in_interrupt());           /* might sleep */
+
+       if (!ptlrpc_server_bulk_active(desc))   /* completed or */
+               return;                         /* never started */
+
+       /* We used to poison the pages with 0xab here because we did not want to
+        * send any meaningful data over the wire for evicted clients (bug 9297)
+        * However, this is no longer safe now that we use the page cache on the
+        * OSS (bug 20560) */
+
+       /* The unlink ensures the callback happens ASAP and is the last
+        * one.  If it fails, it must be because completion just happened,
+        * but we must still l_wait_event() in this case, to give liblustre
+        * a chance to run server_bulk_callback()*/
+       mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_count);
+
+       for (;;) {
+               /* Network access will complete in finite time but the HUGE
+                * timeout lets us CWARN for visibility of sluggish NALs */
+               lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
+                                          cfs_time_seconds(1), NULL, NULL);
+               rc = l_wait_event(desc->bd_waitq,
+                                 !ptlrpc_server_bulk_active(desc), &lwi);
+               if (rc == 0)
+                       return;
+
+               LASSERT(rc == -ETIMEDOUT);
+               CWARN("Unexpectedly long timeout: desc %p\n", desc);
+       }
 }
+#endif /* HAVE_SERVER_SUPPORT */
 
 /**
- * Register bulk for later transfer
+ * Register bulk at the sender for later transfer.
  * Returns 0 on success or error code.
  */
 int ptlrpc_register_bulk(struct ptlrpc_request *req)
 {
-        struct ptlrpc_bulk_desc *desc = req->rq_bulk;
-        lnet_process_id_t peer;
-        int rc;
-        int rc2;
-        lnet_handle_me_t  me_h;
-        lnet_md_t         md;
-        ENTRY;
+       struct ptlrpc_bulk_desc *desc = req->rq_bulk;
+       lnet_process_id_t peer;
+       int rc = 0;
+       int rc2;
+       int posted_md;
+       int total_md;
+       __u64 xid;
+       lnet_handle_me_t  me_h;
+       lnet_md_t         md;
+       ENTRY;
 
         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_GET_NET))
                 RETURN(0);
 
-        /* NB no locking required until desc is on the network */
-        LASSERT (desc->bd_nob > 0);
-        LASSERT (!desc->bd_network_rw);
-        LASSERT (desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES);
-        LASSERT (desc->bd_req != NULL);
-        LASSERT (desc->bd_type == BULK_PUT_SINK ||
-                 desc->bd_type == BULK_GET_SOURCE);
-
-        desc->bd_success = 0;
-
-        peer = desc->bd_import->imp_connection->c_peer;
-
-        md.user_ptr = &desc->bd_cbid;
-        md.eq_handle = ptlrpc_eq_h;
-        md.threshold = 1;                       /* PUT or GET */
-        md.options = PTLRPC_MD_OPTIONS |
-                     ((desc->bd_type == BULK_GET_SOURCE) ?
-                      LNET_MD_OP_GET : LNET_MD_OP_PUT);
-        ptlrpc_fill_bulk_md(&md, desc);
-
-        LASSERT (desc->bd_cbid.cbid_fn == client_bulk_callback);
-        LASSERT (desc->bd_cbid.cbid_arg == desc);
-
-        /* XXX Registering the same xid on retried bulk makes my head
-         * explode trying to understand how the original request's bulk
-         * might interfere with the retried request -eeb
-         * On the other hand replaying with the same xid is fine, since
-         * we are guaranteed old request have completed. -green */
-        LASSERTF(!(desc->bd_registered &&
-                 req->rq_send_state != LUSTRE_IMP_REPLAY) ||
-                 req->rq_xid != desc->bd_last_xid,
-                 "registered: %d  rq_xid: "LPU64" bd_last_xid: "LPU64"\n",
-                 desc->bd_registered, req->rq_xid, desc->bd_last_xid);
-        desc->bd_registered = 1;
-        desc->bd_last_xid = req->rq_xid;
-
-        rc = LNetMEAttach(desc->bd_portal, peer,
-                         req->rq_xid, 0, LNET_UNLINK, LNET_INS_AFTER, &me_h);
-        if (rc != 0) {
-                CERROR("LNetMEAttach failed: %d\n", rc);
-                LASSERT (rc == -ENOMEM);
-                RETURN (-ENOMEM);
-        }
-
-        /* About to let the network at it... */
-        desc->bd_network_rw = 1;
-        rc = LNetMDAttach(me_h, md, LNET_UNLINK, &desc->bd_md_h);
-        if (rc != 0) {
-                CERROR("LNetMDAttach failed: %d\n", rc);
-                LASSERT (rc == -ENOMEM);
-                desc->bd_network_rw = 0;
-                rc2 = LNetMEUnlink (me_h);
-                LASSERT (rc2 == 0);
-                RETURN (-ENOMEM);
-        }
-
-        CDEBUG(D_NET, "Setup bulk %s buffers: %u pages %u bytes, xid "LPU64", "
-               "portal %u\n",
-               desc->bd_type == BULK_GET_SOURCE ? "get-source" : "put-sink",
-               desc->bd_iov_count, desc->bd_nob,
-               req->rq_xid, desc->bd_portal);
-        RETURN(0);
+       /* NB no locking required until desc is on the network */
+       LASSERT(desc->bd_nob > 0);
+       LASSERT(desc->bd_md_count == 0);
+       LASSERT(desc->bd_md_max_brw <= PTLRPC_BULK_OPS_COUNT);
+       LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES);
+       LASSERT(desc->bd_req != NULL);
+       LASSERT(desc->bd_type == BULK_PUT_SINK ||
+               desc->bd_type == BULK_GET_SOURCE);
+
+       /* cleanup the state of the bulk for it will be reused */
+       if (req->rq_resend || req->rq_send_state == LUSTRE_IMP_REPLAY)
+               desc->bd_nob_transferred = 0;
+       else
+               LASSERT(desc->bd_nob_transferred == 0);
+
+       desc->bd_failure = 0;
+
+       peer = desc->bd_import->imp_connection->c_peer;
+
+       LASSERT(desc->bd_cbid.cbid_fn == client_bulk_callback);
+       LASSERT(desc->bd_cbid.cbid_arg == desc);
+
+       /* An XID is only used for a single request from the client.
+        * For retried bulk transfers, a new XID will be allocated in
+        * in ptlrpc_check_set() if it needs to be resent, so it is not
+        * using the same RDMA match bits after an error.
+        *
+        * For multi-bulk RPCs, rq_xid is the last XID needed for bulks. The
+        * first bulk XID is power-of-two aligned before rq_xid. LU-1431 */
+       xid = req->rq_xid & ~((__u64)desc->bd_md_max_brw - 1);
+       LASSERTF(!(desc->bd_registered &&
+                  req->rq_send_state != LUSTRE_IMP_REPLAY) ||
+                xid != desc->bd_last_xid,
+                "registered: %d  rq_xid: "LPU64" bd_last_xid: "LPU64"\n",
+                desc->bd_registered, xid, desc->bd_last_xid);
+
+       total_md = (desc->bd_iov_count + LNET_MAX_IOV - 1) / LNET_MAX_IOV;
+       desc->bd_registered = 1;
+       desc->bd_last_xid = xid;
+       desc->bd_md_count = total_md;
+       md.user_ptr = &desc->bd_cbid;
+       md.eq_handle = ptlrpc_eq_h;
+       md.threshold = 1;                       /* PUT or GET */
+
+       for (posted_md = 0; posted_md < total_md; posted_md++, xid++) {
+               md.options = PTLRPC_MD_OPTIONS |
+                            ((desc->bd_type == BULK_GET_SOURCE) ?
+                             LNET_MD_OP_GET : LNET_MD_OP_PUT);
+               ptlrpc_fill_bulk_md(&md, desc, posted_md);
+
+               rc = LNetMEAttach(desc->bd_portal, peer, xid, 0,
+                                 LNET_UNLINK, LNET_INS_AFTER, &me_h);
+               if (rc != 0) {
+                       CERROR("%s: LNetMEAttach failed x"LPU64"/%d: rc = %d\n",
+                              desc->bd_import->imp_obd->obd_name, xid,
+                              posted_md, rc);
+                       break;
+               }
+
+               /* About to let the network at it... */
+               rc = LNetMDAttach(me_h, md, LNET_UNLINK,
+                                 &desc->bd_mds[posted_md]);
+               if (rc != 0) {
+                       CERROR("%s: LNetMDAttach failed x"LPU64"/%d: rc = %d\n",
+                              desc->bd_import->imp_obd->obd_name, xid,
+                              posted_md, rc);
+                       rc2 = LNetMEUnlink(me_h);
+                       LASSERT(rc2 == 0);
+                       break;
+               }
+       }
+
+       if (rc != 0) {
+               LASSERT(rc == -ENOMEM);
+               spin_lock(&desc->bd_lock);
+               desc->bd_md_count -= total_md - posted_md;
+               spin_unlock(&desc->bd_lock);
+               LASSERT(desc->bd_md_count >= 0);
+               mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
+               req->rq_status = -ENOMEM;
+               RETURN(-ENOMEM);
+       }
+
+       /* Set rq_xid to matchbits of the final bulk so that server can
+        * infer the number of bulks that were prepared */
+       req->rq_xid = --xid;
+       LASSERTF(desc->bd_last_xid == (req->rq_xid & PTLRPC_BULK_OPS_MASK),
+                "bd_last_xid = x"LPU64", rq_xid = x"LPU64"\n",
+                desc->bd_last_xid, req->rq_xid);
+
+       spin_lock(&desc->bd_lock);
+       /* Holler if peer manages to touch buffers before he knows the xid */
+       if (desc->bd_md_count != total_md)
+               CWARN("%s: Peer %s touched %d buffers while I registered\n",
+                     desc->bd_import->imp_obd->obd_name, libcfs_id2str(peer),
+                     total_md - desc->bd_md_count);
+       spin_unlock(&desc->bd_lock);
+
+       CDEBUG(D_NET, "Setup %u bulk %s buffers: %u pages %u bytes, "
+              "xid x"LPX64"-"LPX64", portal %u\n", desc->bd_md_count,
+              desc->bd_type == BULK_GET_SOURCE ? "get-source" : "put-sink",
+              desc->bd_iov_count, desc->bd_nob,
+              desc->bd_last_xid, req->rq_xid, desc->bd_portal);
+
+       RETURN(0);
 }
 
 /**
@@ -301,33 +418,31 @@ int ptlrpc_register_bulk(struct ptlrpc_request *req)
  */
 int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
 {
-        struct ptlrpc_bulk_desc *desc = req->rq_bulk;
-        cfs_waitq_t             *wq;
-        struct l_wait_info       lwi;
-        int                      rc;
-        ENTRY;
-
-        LASSERT(!cfs_in_interrupt());     /* might sleep */
+       struct ptlrpc_bulk_desc *desc = req->rq_bulk;
+       struct l_wait_info       lwi;
+       int                      rc;
+       ENTRY;
 
-        /* Let's setup deadline for reply unlink. */
-        if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
-            async && req->rq_bulk_deadline == 0)
-                req->rq_bulk_deadline = cfs_time_current_sec() + LONG_UNLINK;
+       LASSERT(!in_interrupt());     /* might sleep */
 
-        if (!ptlrpc_client_bulk_active(req))  /* completed or */
-                RETURN(1);                    /* never registered */
+       /* Let's setup deadline for reply unlink. */
+       if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
+           async && req->rq_bulk_deadline == 0)
+               req->rq_bulk_deadline = cfs_time_current_sec() + LONG_UNLINK;
 
-        LASSERT(desc->bd_req == req);  /* bd_req NULL until registered */
+       if (ptlrpc_client_bulk_active(req) == 0)        /* completed or */
+               RETURN(1);                              /* never registered */
 
-        /* the unlink ensures the callback happens ASAP and is the last
-         * one.  If it fails, it must be because completion just happened,
-         * but we must still l_wait_event() in this case to give liblustre
-         * a chance to run client_bulk_callback() */
+       LASSERT(desc->bd_req == req);  /* bd_req NULL until registered */
 
-        LNetMDUnlink(desc->bd_md_h);
+       /* the unlink ensures the callback happens ASAP and is the last
+        * one.  If it fails, it must be because completion just happened,
+        * but we must still l_wait_event() in this case to give liblustre
+        * a chance to run client_bulk_callback() */
+       mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
 
-        if (!ptlrpc_client_bulk_active(req))  /* completed or */
-                RETURN(1);                    /* never registered */
+       if (ptlrpc_client_bulk_active(req) == 0)        /* completed or */
+               RETURN(1);                              /* never registered */
 
         /* Move to "Unregistering" phase as bulk was not unlinked yet. */
         ptlrpc_rqphase_move(req, RQ_PHASE_UNREGISTERING);
@@ -336,12 +451,11 @@ int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
         if (async)
                 RETURN(0);
 
-        if (req->rq_set != NULL)
-                wq = &req->rq_set->set_waitq;
-        else
-                wq = &req->rq_reply_waitq;
-
         for (;;) {
+               /* The wq argument is ignored by user-space wait_event macros */
+               wait_queue_head_t *wq = (req->rq_set != NULL) ?
+                                       &req->rq_set->set_waitq :
+                                       &req->rq_reply_waitq;
                 /* Network access will complete in finite time but the HUGE
                  * timeout lets us CWARN for visibility of sluggish NALs */
                 lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
@@ -361,7 +475,8 @@ int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
 
 static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags)
 {
-        struct ptlrpc_service *svc = req->rq_rqbd->rqbd_service;
+       struct ptlrpc_service_part      *svcpt = req->rq_rqbd->rqbd_svcpt;
+       struct ptlrpc_service           *svc = svcpt->scp_service;
         int service_time = max_t(int, cfs_time_current_sec() -
                                  req->rq_arrival_time.tv_sec, 1);
 
@@ -373,44 +488,44 @@ static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags)
                MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE))) {
                 /* early replies, errors and recovery requests don't count
                  * toward our service time estimate */
-                int oldse = at_measured(&svc->srv_at_estimate, service_time);
-                if (oldse != 0)
-                        DEBUG_REQ(D_ADAPTTO, req,
-                                  "svc %s changed estimate from %d to %d",
-                                  svc->srv_name, oldse,
-                                  at_get(&svc->srv_at_estimate));
+               int oldse = at_measured(&svcpt->scp_at_estimate, service_time);
+
+               if (oldse != 0) {
+                       DEBUG_REQ(D_ADAPTTO, req,
+                                 "svc %s changed estimate from %d to %d",
+                                 svc->srv_name, oldse,
+                                 at_get(&svcpt->scp_at_estimate));
+               }
         }
         /* Report actual service time for client latency calc */
         lustre_msg_set_service_time(req->rq_repmsg, service_time);
         /* Report service time estimate for future client reqs, but report 0
-         * (to be ignored by client) if it's a error reply during recovery.
+        * (to be ignored by client) if it's an error reply during recovery.
          * (bz15815) */
         if (req->rq_type == PTL_RPC_MSG_ERR &&
             (req->rq_export == NULL || req->rq_export->exp_obd->obd_recovering))
                 lustre_msg_set_timeout(req->rq_repmsg, 0);
         else
                 lustre_msg_set_timeout(req->rq_repmsg,
-                                       at_get(&svc->srv_at_estimate));
-
-        if (req->rq_reqmsg &&
-            !(lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
-                CDEBUG(D_ADAPTTO, "No early reply support: flags=%#x "
-                       "req_flags=%#x magic=%d:%x/%x len=%d\n",
-                       flags, lustre_msg_get_flags(req->rq_reqmsg),
-                       lustre_msg_is_v1(req->rq_reqmsg),
-                       lustre_msg_get_magic(req->rq_reqmsg),
-                       lustre_msg_get_magic(req->rq_repmsg), req->rq_replen);
-        }
+                                      at_get(&svcpt->scp_at_estimate));
+
+       if (req->rq_reqmsg &&
+           !(lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
+               CDEBUG(D_ADAPTTO, "No early reply support: flags=%#x "
+                      "req_flags=%#x magic=%x/%x len=%d\n",
+                      flags, lustre_msg_get_flags(req->rq_reqmsg),
+                      lustre_msg_get_magic(req->rq_reqmsg),
+                      lustre_msg_get_magic(req->rq_repmsg), req->rq_replen);
+       }
 }
 
 /**
  * Send request reply from request \a req reply buffer.
  * \a flags defines reply types
- * Returns 0 on sucess or error code
+ * Returns 0 on success or error code
  */
 int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
 {
-        struct ptlrpc_service     *svc = req->rq_rqbd->rqbd_service;
         struct ptlrpc_reply_state *rs = req->rq_reply_state;
         struct ptlrpc_connection  *conn;
         int                        rc;
@@ -442,11 +557,22 @@ int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
                        req->rq_export->exp_obd->obd_minor);
         }
 
+       /* In order to keep interoprability with the client (< 2.3) which
+        * doesn't have pb_jobid in ptlrpc_body, We have to shrink the
+        * ptlrpc_body in reply buffer to ptlrpc_body_v2, otherwise, the
+        * reply buffer on client will be overflow.
+        *
+        * XXX Remove this whenver we drop the interoprability with such client.
+        */
+       req->rq_replen = lustre_shrink_msg(req->rq_repmsg, 0,
+                                          sizeof(struct ptlrpc_body_v2), 1);
+
         if (req->rq_type != PTL_RPC_MSG_ERR)
                 req->rq_type = PTL_RPC_MSG_REPLY;
 
         lustre_msg_set_type(req->rq_repmsg, req->rq_type);
-        lustre_msg_set_status(req->rq_repmsg, req->rq_status);
+       lustre_msg_set_status(req->rq_repmsg,
+                             ptlrpc_status_hton(req->rq_status));
         lustre_msg_set_opc(req->rq_repmsg,
                 req->rq_reqmsg ? lustre_msg_get_opc(req->rq_reqmsg) : 0);
 
@@ -474,7 +600,8 @@ int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
         rc = ptl_send_buf (&rs->rs_md_h, rs->rs_repbuf, rs->rs_repdata_len,
                            (rs->rs_difficult && !rs->rs_no_ack) ?
                            LNET_ACK_REQ : LNET_NOACK_REQ,
-                           &rs->rs_cb_id, conn, svc->srv_rep_portal,
+                          &rs->rs_cb_id, conn,
+                          ptlrpc_req2svc(req)->srv_rep_portal,
                            req->rq_xid, req->rq_reply_off);
 out:
         if (unlikely(rc != 0))
@@ -509,7 +636,10 @@ int ptlrpc_send_error(struct ptlrpc_request *req, int may_be_difficult)
                         RETURN(rc);
         }
 
-        req->rq_type = PTL_RPC_MSG_ERR;
+        if (req->rq_status != -ENOSPC && req->rq_status != -EACCES &&
+           req->rq_status != -EPERM && req->rq_status != -ENOENT &&
+           req->rq_status != -EINPROGRESS && req->rq_status != -EDQUOT)
+                req->rq_type = PTL_RPC_MSG_ERR;
 
         rc = ptlrpc_send_reply(req, may_be_difficult);
         RETURN(rc);
@@ -534,7 +664,8 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
         struct ptlrpc_connection *connection;
         lnet_handle_me_t  reply_me_h;
         lnet_md_t         reply_md;
-        struct obd_device *obd = request->rq_import->imp_obd;
+       struct obd_import *imp = request->rq_import;
+       struct obd_device *obd = imp->imp_obd;
         ENTRY;
 
         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_RPC))
@@ -546,30 +677,42 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
         /* If this is a re-transmit, we're required to have disengaged
          * cleanly from the previous attempt */
         LASSERT(!request->rq_receiving_reply);
-
-        if (request->rq_import->imp_obd &&
-            request->rq_import->imp_obd->obd_fail) {
-                CDEBUG(D_HA, "muting rpc for failed imp obd %s\n",
-                       request->rq_import->imp_obd->obd_name);
-                /* this prevents us from waiting in ptlrpc_queue_wait */
-                request->rq_err = 1;
+       LASSERT(!((lustre_msg_get_flags(request->rq_reqmsg) & MSG_REPLAY) &&
+               (imp->imp_state == LUSTRE_IMP_FULL)));
+
+       if (unlikely(obd != NULL && obd->obd_fail)) {
+               CDEBUG(D_HA, "muting rpc for failed imp obd %s\n",
+                       obd->obd_name);
+               /* this prevents us from waiting in ptlrpc_queue_wait */
+               spin_lock(&request->rq_lock);
+               request->rq_err = 1;
+               spin_unlock(&request->rq_lock);
                 request->rq_status = -ENODEV;
                 RETURN(-ENODEV);
         }
 
-        connection = request->rq_import->imp_connection;
-
-        lustre_msg_set_handle(request->rq_reqmsg,
-                              &request->rq_import->imp_remote_handle);
-        lustre_msg_set_type(request->rq_reqmsg, PTL_RPC_MSG_REQUEST);
-        lustre_msg_set_conn_cnt(request->rq_reqmsg,
-                                request->rq_import->imp_conn_cnt);
-        lustre_msghdr_set_flags(request->rq_reqmsg,
-                                request->rq_import->imp_msghdr_flags);
-
-        if (request->rq_resend)
-                lustre_msg_add_flags(request->rq_reqmsg, MSG_RESENT);
-
+       connection = imp->imp_connection;
+
+       lustre_msg_set_handle(request->rq_reqmsg,
+                             &imp->imp_remote_handle);
+       lustre_msg_set_type(request->rq_reqmsg, PTL_RPC_MSG_REQUEST);
+       lustre_msg_set_conn_cnt(request->rq_reqmsg,
+                               imp->imp_conn_cnt);
+       lustre_msghdr_set_flags(request->rq_reqmsg,
+                               imp->imp_msghdr_flags);
+
+       /** For enabled AT all request should have AT_SUPPORT in the
+        * FULL import state when OBD_CONNECT_AT is set */
+       LASSERT(AT_OFF || imp->imp_state != LUSTRE_IMP_FULL ||
+               (imp->imp_msghdr_flags & MSGHDR_AT_SUPPORT) ||
+               !(imp->imp_connect_data.ocd_connect_flags &
+               OBD_CONNECT_AT));
+
+       if (request->rq_resend) {
+               lustre_msg_add_flags(request->rq_reqmsg, MSG_RESENT);
+               if (request->rq_resend_cb != NULL)
+                       request->rq_resend_cb(request, &request->rq_async_args);
+       }
         if (request->rq_memalloc)
                 mpflag = cfs_memory_pressure_get_and_set();
 
@@ -594,7 +737,9 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
                         if (rc) {
                                 /* this prevents us from looping in
                                  * ptlrpc_queue_wait */
-                                request->rq_err = 1;
+                               spin_lock(&request->rq_lock);
+                               request->rq_err = 1;
+                               spin_unlock(&request->rq_lock);
                                 request->rq_status = rc;
                                 GOTO(cleanup_bulk, rc);
                         }
@@ -613,20 +758,20 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
                 }
         }
 
-        cfs_spin_lock(&request->rq_lock);
-        /* If the MD attach succeeds, there _will_ be a reply_in callback */
-        request->rq_receiving_reply = !noreply;
-        /* We are responsible for unlinking the reply buffer */
-        request->rq_must_unlink = !noreply;
-        /* Clear any flags that may be present from previous sends. */
+       spin_lock(&request->rq_lock);
+       /* We are responsible for unlinking the reply buffer */
+       request->rq_reply_unlinked = noreply;
+       request->rq_receiving_reply = !noreply;
+       /* Clear any flags that may be present from previous sends. */
+       request->rq_req_unlinked = 0;
         request->rq_replied = 0;
         request->rq_err = 0;
         request->rq_timedout = 0;
         request->rq_net_err = 0;
         request->rq_resend = 0;
         request->rq_restart = 0;
-        request->rq_reply_truncate = 0;
-        cfs_spin_unlock(&request->rq_lock);
+       request->rq_reply_truncated = 0;
+       spin_unlock(&request->rq_lock);
 
         if (!noreply) {
                 reply_md.start     = request->rq_repbuf;
@@ -640,17 +785,17 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
                 reply_md.user_ptr  = &request->rq_reply_cbid;
                 reply_md.eq_handle = ptlrpc_eq_h;
 
-                /* We must see the unlink callback to unset rq_must_unlink,
-                   so we can't auto-unlink */
+               /* We must see the unlink callback to set rq_reply_unlinked,
+                * so we can't auto-unlink */
                 rc = LNetMDAttach(reply_me_h, reply_md, LNET_RETAIN,
                                   &request->rq_reply_md_h);
                 if (rc != 0) {
                         CERROR("LNetMDAttach failed: %d\n", rc);
                         LASSERT (rc == -ENOMEM);
-                        cfs_spin_lock(&request->rq_lock);
-                        /* ...but the MD attach didn't succeed... */
-                        request->rq_receiving_reply = 0;
-                        cfs_spin_unlock(&request->rq_lock);
+                       spin_lock(&request->rq_lock);
+                       /* ...but the MD attach didn't succeed... */
+                       request->rq_receiving_reply = 0;
+                       spin_unlock(&request->rq_lock);
                         GOTO(cleanup_me, rc = -ENOMEM);
                 }
 
@@ -662,20 +807,20 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
 
         /* add references on request for request_out_callback */
         ptlrpc_request_addref(request);
-        if (obd->obd_svc_stats != NULL)
-                lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQACTIVE_CNTR,
-                        cfs_atomic_read(&request->rq_import->imp_inflight));
+       if (obd != NULL && obd->obd_svc_stats != NULL)
+               lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQACTIVE_CNTR,
+                       atomic_read(&imp->imp_inflight));
 
-        OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5);
+       OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5);
 
-        cfs_gettimeofday(&request->rq_arrival_time);
-        request->rq_sent = cfs_time_current_sec();
-        /* We give the server rq_timeout secs to process the req, and
-           add the network latency for our local timeout. */
+       do_gettimeofday(&request->rq_sent_tv);
+       request->rq_sent = cfs_time_current_sec();
+       /* We give the server rq_timeout secs to process the req, and
+          add the network latency for our local timeout. */
         request->rq_deadline = request->rq_sent + request->rq_timeout +
                 ptlrpc_at_get_net_latency(request);
 
-        ptlrpc_pinger_sending_on_import(request->rq_import);
+       ptlrpc_pinger_sending_on_import(imp);
 
         DEBUG_REQ(D_INFO, request, "send flg=%x",
                   lustre_msg_get_flags(request->rq_reqmsg));
@@ -685,9 +830,10 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
                           connection,
                           request->rq_request_portal,
                           request->rq_xid, 0);
-        if (rc == 0)
-                GOTO(out, rc);
+       if (likely(rc == 0))
+               GOTO(out, rc);
 
+       request->rq_req_unlinked = 1;
         ptlrpc_req_finished(request);
         if (noreply)
                 GOTO(out, rc);
@@ -710,15 +856,16 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
                 cfs_memory_pressure_restore(mpflag);
         return rc;
 }
+EXPORT_SYMBOL(ptl_send_rpc);
 
 /**
  * Register request buffer descriptor for request receiving.
  */
 int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
 {
-        struct ptlrpc_service   *service = rqbd->rqbd_service;
-        static lnet_process_id_t  match_id = {LNET_NID_ANY, LNET_PID_ANY};
-        int                      rc;
+       struct ptlrpc_service     *service = rqbd->rqbd_svcpt->scp_service;
+       static lnet_process_id_t  match_id = {LNET_NID_ANY, LNET_PID_ANY};
+       int                       rc;
         lnet_md_t                 md;
         lnet_handle_me_t          me_h;
 
@@ -728,8 +875,13 @@ int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_RQBD))
                 return (-ENOMEM);
 
-        rc = LNetMEAttach(service->srv_req_portal,
-                          match_id, 0, ~0, LNET_UNLINK, LNET_INS_AFTER, &me_h);
+       /* NB: CPT affinity service should use new LNet flag LNET_INS_LOCAL,
+        * which means buffer can only be attached on local CPT, and LND
+        * threads can find it by grabbing a local lock */
+       rc = LNetMEAttach(service->srv_req_portal,
+                         match_id, 0, ~0, LNET_UNLINK,
+                         rqbd->rqbd_svcpt->scp_cpt >= 0 ?
+                         LNET_INS_LOCAL : LNET_INS_AFTER, &me_h);
         if (rc != 0) {
                 CERROR("LNetMEAttach failed: %d\n", rc);
                 return (-ENOMEM);