Whamcloud - gitweb
LU-904 ptlrpc: redo io on -EINPROGRESS
[fs/lustre-release.git] / lustre / ptlrpc / client.c
index e9979df..6b50207 100644 (file)
@@ -29,8 +29,7 @@
  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  *
- * Copyright (c) 2011 Whamcloud, Inc.
- *
+ * Copyright (c) 2011, 2012, Whamcloud, Inc.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
@@ -97,7 +96,7 @@ struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid)
  * Allocate and initialize new bulk descriptor
  * Returns pointer to the descriptor or NULL on error.
  */
-static inline struct ptlrpc_bulk_desc *new_bulk(int npages, int type, int portal)
+struct ptlrpc_bulk_desc *new_bulk(int npages, int type, int portal)
 {
         struct ptlrpc_bulk_desc *desc;
 
@@ -149,39 +148,6 @@ struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
 }
 
 /**
- * Prepare bulk descriptor for specified incoming request \a req that
- * can fit \a npages * pages. \a type is bulk type. \a portal is where
- * the bulk to be sent. Used on server-side after request was already
- * received.
- * Returns pointer to newly allocatrd initialized bulk descriptor or NULL on
- * error.
- */
-struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp(struct ptlrpc_request *req,
-                                              int npages, int type, int portal)
-{
-        struct obd_export *exp = req->rq_export;
-        struct ptlrpc_bulk_desc *desc;
-
-        ENTRY;
-        LASSERT(type == BULK_PUT_SOURCE || type == BULK_GET_SINK);
-
-        desc = new_bulk(npages, type, portal);
-        if (desc == NULL)
-                RETURN(NULL);
-
-        desc->bd_export = class_export_get(exp);
-        desc->bd_req = req;
-
-        desc->bd_cbid.cbid_fn  = server_bulk_callback;
-        desc->bd_cbid.cbid_arg = desc;
-
-        /* NB we don't assign rq_bulk here; server-side requests are
-         * re-used, and the handler frees the bulk desc explicitly. */
-
-        return desc;
-}
-
-/**
  * Add a page \a page to the bulk descriptor \a desc.
  * Data to transfer in the page starts at offset \a pageoffset and
  * amount of data to transfer from the page is \a len
@@ -1101,9 +1067,16 @@ static int ptlrpc_import_delay_req(struct obd_import *imp,
  */
 static int ptlrpc_console_allow(struct ptlrpc_request *req)
 {
-        __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
+        __u32 opc;
         int err;
 
+        /* Fake requests include no rq_reqmsg */
+        if (req->rq_fake)
+                return 0;
+
+        LASSERT(req->rq_reqmsg != NULL);
+        opc = lustre_msg_get_opc(req->rq_reqmsg);
+
         /* Suppress particular reconnect errors which are to be expected.  No
          * errors are suppressed for the initial connection on an import */
         if ((lustre_handle_is_used(&req->rq_import->imp_remote_handle)) &&
@@ -1344,23 +1317,25 @@ static int after_reply(struct ptlrpc_request *req)
  * Helper function to send request \a req over the network for the first time
  * Also adjusts request phase.
  * Returns 0 on success or error code.
- */ 
+ */
 static int ptlrpc_send_new_req(struct ptlrpc_request *req)
 {
-        struct obd_import     *imp;
+        struct obd_import     *imp = req->rq_import;
         int rc;
         ENTRY;
 
         LASSERT(req->rq_phase == RQ_PHASE_NEW);
-        if (req->rq_sent && (req->rq_sent > cfs_time_current_sec()))
+        if (req->rq_sent && (req->rq_sent > cfs_time_current_sec()) &&
+            (!req->rq_generation_set ||
+             req->rq_import_generation == imp->imp_generation))
                 RETURN (0);
 
         ptlrpc_rqphase_move(req, RQ_PHASE_RPC);
 
-        imp = req->rq_import;
         cfs_spin_lock(&imp->imp_lock);
 
-        req->rq_import_generation = imp->imp_generation;
+        if (!req->rq_generation_set)
+                req->rq_import_generation = imp->imp_generation;
 
         if (ptlrpc_import_delay_req(imp, req, &rc)) {
                 cfs_spin_lock(&req->rq_lock);
@@ -1691,7 +1666,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
                          * process the reply. Similarly if the RPC returned
                          * an error, and therefore the bulk will never arrive.
                          */
-                        if (req->rq_bulk == NULL || req->rq_status != 0) {
+                        if (req->rq_bulk == NULL || req->rq_status < 0) {
                                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
                                 GOTO(interpret, req->rq_status);
                         }
@@ -1709,7 +1684,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
                          * was good after getting the REPLY for her GET or
                          * the ACK for her PUT. */
                         DEBUG_REQ(D_ERROR, req, "bulk transfer failed");
-                        LBUG();
+                        req->rq_status = -EIO;
                 }
 
                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
@@ -2115,7 +2090,6 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
         LASSERTF(cfs_list_empty(&request->rq_set_chain), "req %p\n", request);
         LASSERTF(cfs_list_empty(&request->rq_exp_list), "req %p\n", request);
         LASSERTF(!request->rq_replay, "req %p\n", request);
-        LASSERT(request->rq_cli_ctx || request->rq_fake);
 
         req_capsule_fini(&request->rq_pill);
 
@@ -2651,6 +2625,10 @@ int ptlrpc_replay_req(struct ptlrpc_request *req)
         /* Readjust the timeout for current conditions */
         ptlrpc_at_set_req_timeout(req);
 
+        /* Tell server the net_latency, so the server can calculate how long
+         * it should wait for next replay */
+        lustre_msg_set_service_time(req->rq_reqmsg,
+                                    ptlrpc_at_get_net_latency(req));
         DEBUG_REQ(D_HA, req, "REPLAY");
 
         cfs_atomic_inc(&req->rq_import->imp_replay_inflight);
@@ -2809,3 +2787,136 @@ __u64 ptlrpc_sample_next_xid(void)
 #endif
 }
 EXPORT_SYMBOL(ptlrpc_sample_next_xid);
+
+/**
+ * Functions for operating ptlrpc workers.
+ *
+ * A ptlrpc work is a function which will be running inside ptlrpc context.
+ * The callback shouldn't sleep otherwise it will block that ptlrpcd thread.
+ *
+ * 1. after a work is created, it can be used many times, that is:
+ *         handler = ptlrpcd_alloc_work();
+ *         ptlrpcd_queue_work();
+ *
+ *    queue it again when necessary:
+ *         ptlrpcd_queue_work();
+ *         ptlrpcd_destroy_work();
+ * 2. ptlrpcd_queue_work() can be called by multiple processes meanwhile, but
+ *    it will only be queued once in any time. Also as its name implies, it may
+ *    have delay before it really runs by ptlrpcd thread.
+ */
+struct ptlrpc_work_async_args {
+        __u64   magic;
+        int   (*cb)(const struct lu_env *, void *);
+        void   *cbdata;
+};
+
+#define PTLRPC_WORK_MAGIC 0x6655436b676f4f44ULL /* magic code */
+
+static int work_interpreter(const struct lu_env *env,
+                            struct ptlrpc_request *req, void *data, int rc)
+{
+        struct ptlrpc_work_async_args *arg = data;
+
+        LASSERT(arg->magic == PTLRPC_WORK_MAGIC);
+        LASSERT(arg->cb != NULL);
+
+        return arg->cb(env, arg->cbdata);
+}
+
+/**
+ * Create a work for ptlrpc.
+ */
+void *ptlrpcd_alloc_work(struct obd_import *imp,
+                         int (*cb)(const struct lu_env *, void *), void *cbdata)
+{
+        struct ptlrpc_request         *req = NULL;
+        struct ptlrpc_work_async_args *args;
+        ENTRY;
+
+        cfs_might_sleep();
+
+        if (cb == NULL)
+                RETURN(ERR_PTR(-EINVAL));
+
+        /* copy some code from deprecated fakereq. */
+        OBD_ALLOC_PTR(req);
+        if (req == NULL) {
+                CERROR("ptlrpc: run out of memory!\n");
+                RETURN(ERR_PTR(-ENOMEM));
+        }
+
+        req->rq_send_state = LUSTRE_IMP_FULL;
+        req->rq_type = PTL_RPC_MSG_REQUEST;
+        req->rq_import = class_import_get(imp);
+        req->rq_export = NULL;
+        req->rq_interpret_reply = work_interpreter;
+        /* don't want reply */
+        req->rq_receiving_reply = 0;
+        req->rq_must_unlink = 0;
+        req->rq_no_delay = req->rq_no_resend = 1;
+
+        cfs_spin_lock_init(&req->rq_lock);
+        CFS_INIT_LIST_HEAD(&req->rq_list);
+        CFS_INIT_LIST_HEAD(&req->rq_replay_list);
+        CFS_INIT_LIST_HEAD(&req->rq_set_chain);
+        CFS_INIT_LIST_HEAD(&req->rq_history_list);
+        CFS_INIT_LIST_HEAD(&req->rq_exp_list);
+        cfs_waitq_init(&req->rq_reply_waitq);
+        cfs_waitq_init(&req->rq_set_waitq);
+        cfs_atomic_set(&req->rq_refcount, 1);
+
+        CLASSERT (sizeof(*args) <= sizeof(req->rq_async_args));
+        args = ptlrpc_req_async_args(req);
+        args->magic  = PTLRPC_WORK_MAGIC;
+        args->cb     = cb;
+        args->cbdata = cbdata;
+
+        RETURN(req);
+}
+EXPORT_SYMBOL(ptlrpcd_alloc_work);
+
+void ptlrpcd_destroy_work(void *handler)
+{
+        struct ptlrpc_request *req = handler;
+
+        if (req)
+                ptlrpc_req_finished(req);
+}
+EXPORT_SYMBOL(ptlrpcd_destroy_work);
+
+int ptlrpcd_queue_work(void *handler)
+{
+        struct ptlrpc_request *req = handler;
+
+        /*
+         * Check if the req is already being queued.
+         *
+         * Here comes a trick: it lacks a way of checking if a req is being
+         * processed reliably in ptlrpc. Here I have to use refcount of req
+         * for this purpose. This is okay because the caller should use this
+         * req as opaque data. - Jinshan
+         */
+        LASSERT(cfs_atomic_read(&req->rq_refcount) > 0);
+        if (cfs_atomic_read(&req->rq_refcount) > 1)
+                return -EBUSY;
+
+        if (cfs_atomic_inc_return(&req->rq_refcount) > 2) { /* race */
+                cfs_atomic_dec(&req->rq_refcount);
+                return -EBUSY;
+        }
+
+        /* re-initialize the req */
+        req->rq_timeout        = obd_timeout;
+        req->rq_sent           = cfs_time_current_sec();
+        req->rq_deadline       = req->rq_sent + req->rq_timeout;
+        req->rq_reply_deadline = req->rq_deadline;
+        req->rq_phase          = RQ_PHASE_INTERPRET;
+        req->rq_next_phase     = RQ_PHASE_COMPLETE;
+        req->rq_xid            = ptlrpc_next_xid();
+        req->rq_import_generation = req->rq_import->imp_generation;
+
+        ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
+        return 0;
+}
+EXPORT_SYMBOL(ptlrpcd_queue_work);