Whamcloud - gitweb
LU-13004 ptlrpc: Allow BULK_BUF_KIOV to accept a kvec
[fs/lustre-release.git] / lustre / ptlrpc / niobuf.c
index 6cb29ef..4720592 100644 (file)
@@ -23,7 +23,7 @@
  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  *
- * Copyright (c) 2012, 2016, Intel Corporation.
+ * Copyright (c) 2012, 2017, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
@@ -37,6 +37,7 @@
 #include <obd.h>
 #include <obd_class.h>
 #include "ptlrpc_internal.h"
+#include <lnet/lib-lnet.h> /* for CFS_FAIL_PTLRPC_OST_BULK_CB2 */
 
 /**
  * Helper function. Sends \a len bytes from \a base at offset \a offset
@@ -216,13 +217,10 @@ int ptlrpc_start_bulk_transfer(struct ptlrpc_bulk_desc *desc)
                        break;
                }
 
-               /* LU-6441: last md is not sent and desc->bd_md_count == 1 */
-               if (OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB3,
-                                        CFS_FAIL_ONCE) &&
-                   total_md > 1 && posted_md == total_md - 1) {
-                       posted_md++;
-                       continue;
-               }
+               /* sanity.sh 224c: lets skip last md */
+               if (posted_md == desc->bd_md_max_brw - 1)
+                       OBD_FAIL_CHECK_RESET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB3,
+                                            CFS_FAIL_PTLRPC_OST_BULK_CB2);
 
                /* Network is about to get at the memory */
                if (ptlrpc_is_bulk_put_source(desc->bd_type))
@@ -231,7 +229,7 @@ int ptlrpc_start_bulk_transfer(struct ptlrpc_bulk_desc *desc)
                                     desc->bd_portal, mbits, 0, 0);
                else
                        rc = LNetGet(self_nid, desc->bd_mds[posted_md],
-                                    peer_id, desc->bd_portal, mbits, 0);
+                                    peer_id, desc->bd_portal, mbits, 0, false);
 
                posted_md++;
                if (rc != 0) {
@@ -336,8 +334,11 @@ int ptlrpc_register_bulk(struct ptlrpc_request *req)
        /* cleanup the state of the bulk for it will be reused */
        if (req->rq_resend || req->rq_send_state == LUSTRE_IMP_REPLAY)
                desc->bd_nob_transferred = 0;
-       else
-               LASSERT(desc->bd_nob_transferred == 0);
+       else if (desc->bd_nob_transferred != 0)
+               /* If the network failed after an RPC was sent, this condition
+                * could happen.  Rather than assert (was here before), return
+                * an EIO error. */
+               RETURN(-EIO);
 
        desc->bd_failure = 0;
 
@@ -372,8 +373,13 @@ int ptlrpc_register_bulk(struct ptlrpc_request *req)
                              LNET_MD_OP_GET : LNET_MD_OP_PUT);
                ptlrpc_fill_bulk_md(&md, desc, posted_md);
 
-               rc = LNetMEAttach(desc->bd_portal, peer, mbits, 0,
+               if (posted_md > 0 && posted_md + 1 == total_md &&
+                   OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_ATTACH)) {
+                       rc = -ENOMEM;
+               } else {
+                       rc = LNetMEAttach(desc->bd_portal, peer, mbits, 0,
                                  LNET_UNLINK, LNET_INS_AFTER, &me_h);
+               }
                if (rc != 0) {
                        CERROR("%s: LNetMEAttach failed x%llu/%d: rc = %d\n",
                               desc->bd_import->imp_obd->obd_name, mbits,
@@ -402,6 +408,7 @@ int ptlrpc_register_bulk(struct ptlrpc_request *req)
                LASSERT(desc->bd_md_count >= 0);
                mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
                req->rq_status = -ENOMEM;
+               desc->bd_registered = 0;
                RETURN(-ENOMEM);
        }
 
@@ -468,21 +475,23 @@ int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
                wait_queue_head_t *wq = (req->rq_set != NULL) ?
                                        &req->rq_set->set_waitq :
                                        &req->rq_reply_waitq;
-                /* Network access will complete in finite time but the HUGE
-                 * timeout lets us CWARN for visibility of sluggish NALs */
-                lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
-                                           cfs_time_seconds(1), NULL, NULL);
-                rc = l_wait_event(*wq, !ptlrpc_client_bulk_active(req), &lwi);
-                if (rc == 0) {
-                        ptlrpc_rqphase_move(req, req->rq_next_phase);
-                        RETURN(1);
-                }
+               /*
+                * Network access will complete in finite time but the HUGE
+                * timeout lets us CWARN for visibility of sluggish NALs.
+                */
+               lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
+                                          cfs_time_seconds(1), NULL, NULL);
+               rc = l_wait_event(*wq, !ptlrpc_client_bulk_active(req), &lwi);
+               if (rc == 0) {
+                       ptlrpc_rqphase_move(req, req->rq_next_phase);
+                       RETURN(1);
+               }
 
-                LASSERT(rc == -ETIMEDOUT);
-                DEBUG_REQ(D_WARNING, req, "Unexpectedly long timeout: desc %p",
-                          desc);
-        }
-        RETURN(0);
+               LASSERT(rc == -ETIMEDOUT);
+               DEBUG_REQ(D_WARNING, req, "Unexpectedly long timeout: desc %p",
+                         desc);
+       }
+       RETURN(0);
 }
 
 static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags)
@@ -586,16 +595,6 @@ int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
                        req->rq_export->exp_obd->obd_minor);
         }
 
-       /* In order to keep interoprability with the client (< 2.3) which
-        * doesn't have pb_jobid in ptlrpc_body, We have to shrink the
-        * ptlrpc_body in reply buffer to ptlrpc_body_v2, otherwise, the
-        * reply buffer on client will be overflow.
-        *
-        * XXX Remove this whenver we drop the interoprability with such client.
-        */
-       req->rq_replen = lustre_shrink_msg(req->rq_repmsg, 0,
-                                          sizeof(struct ptlrpc_body_v2), 1);
-
         if (req->rq_type != PTL_RPC_MSG_ERR)
                 req->rq_type = PTL_RPC_MSG_REPLY;
 
@@ -747,8 +746,8 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
                spin_unlock(&imp->imp_lock);
 
                lustre_msg_set_last_xid(request->rq_reqmsg, min_xid);
-               DEBUG_REQ(D_RPCTRACE, request, "Allocating new xid for "
-                         "resend on EINPROGRESS");
+               DEBUG_REQ(D_RPCTRACE, request,
+                         "Allocating new XID for resend on EINPROGRESS");
        }
 
        if (request->rq_bulk != NULL) {
@@ -758,9 +757,9 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
 
        if (list_empty(&request->rq_unreplied_list) ||
            request->rq_xid <= imp->imp_known_replied_xid) {
-               DEBUG_REQ(D_ERROR, request, "xid: %llu, replied: %llu, "
-                         "list_empty:%d\n", request->rq_xid,
-                         imp->imp_known_replied_xid,
+               DEBUG_REQ(D_ERROR, request,
+                         "xid=%llu, replied=%llu, list_empty=%d",
+                         request->rq_xid, imp->imp_known_replied_xid,
                          list_empty(&request->rq_unreplied_list));
                LBUG();
        }
@@ -777,8 +776,8 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
                if (request->rq_resend_cb != NULL)
                        request->rq_resend_cb(request, &request->rq_async_args);
        }
-        if (request->rq_memalloc)
-                mpflag = cfs_memory_pressure_get_and_set();
+       if (request->rq_memalloc)
+               mpflag = cfs_memory_pressure_get_and_set();
 
        rc = sptlrpc_cli_wrap_request(request);
        if (rc)
@@ -788,7 +787,7 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
        if (request->rq_bulk != NULL) {
                rc = ptlrpc_register_bulk (request);
                if (rc != 0)
-                       GOTO(out, rc);
+                       GOTO(cleanup_bulk, rc);
                /*
                 * All the mds in the request will have the same cpt
                 * encoded in the cookie. So we can just get the first
@@ -810,13 +809,13 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
                                spin_lock(&request->rq_lock);
                                request->rq_err = 1;
                                spin_unlock(&request->rq_lock);
-                                request->rq_status = rc;
-                                GOTO(cleanup_bulk, rc);
-                        }
-                } else {
-                        request->rq_repdata = NULL;
-                        request->rq_repmsg = NULL;
-                }
+                               request->rq_status = rc;
+                               GOTO(cleanup_bulk, rc);
+                       }
+               } else {
+                       request->rq_repdata = NULL;
+                       request->rq_repmsg = NULL;
+               }
 
                 rc = LNetMEAttach(request->rq_reply_portal,/*XXX FIXME bug 249*/
                                   connection->c_peer, request->rq_xid, 0,
@@ -892,7 +891,7 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
 
        ptlrpc_pinger_sending_on_import(imp);
 
-       DEBUG_REQ(D_INFO, request, "send flg=%x",
+       DEBUG_REQ(D_INFO, request, "send flags=%x",
                  lustre_msg_get_flags(request->rq_reqmsg));
        rc = ptl_send_buf(&request->rq_req_md_h,
                          request->rq_reqbuf, request->rq_reqdata_len,
@@ -909,18 +908,20 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
                 GOTO(out, rc);
 
  cleanup_me:
-        /* MEUnlink is safe; the PUT didn't even get off the ground, and
-         * nobody apart from the PUT's target has the right nid+XID to
-         * access the reply buffer. */
-        rc2 = LNetMEUnlink(reply_me_h);
-        LASSERT (rc2 == 0);
-        /* UNLINKED callback called synchronously */
-        LASSERT(!request->rq_receiving_reply);
+       /* MEUnlink is safe; the PUT didn't even get off the ground, and
+        * nobody apart from the PUT's target has the right nid+XID to
+        * access the reply buffer. */
+       rc2 = LNetMEUnlink(reply_me_h);
+       LASSERT (rc2 == 0);
+       /* UNLINKED callback called synchronously */
+       LASSERT(!request->rq_receiving_reply);
 
  cleanup_bulk:
-        /* We do sync unlink here as there was no real transfer here so
-         * the chance to have long unlink to sluggish net is smaller here. */
+       /* We do sync unlink here as there was no real transfer here so
+        * the chance to have long unlink to sluggish net is smaller here. */
         ptlrpc_unregister_bulk(request, 0);
+       if (request->rq_bulk != NULL)
+               request->rq_bulk->bd_registered = 0;
  out:
        if (rc == -ENOMEM) {
                /* set rq_sent so that this request is treated
@@ -941,7 +942,10 @@ EXPORT_SYMBOL(ptl_send_rpc);
 int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
 {
        struct ptlrpc_service *service = rqbd->rqbd_svcpt->scp_service;
-       static struct lnet_process_id match_id = {LNET_NID_ANY, LNET_PID_ANY};
+       static struct lnet_process_id match_id = {
+               .nid = LNET_NID_ANY,
+               .pid = LNET_PID_ANY
+       };
        int rc;
        struct lnet_md md;
        struct lnet_handle_me me_h;