Whamcloud - gitweb
LU-6496 ptlrpc: Fix wrong code indentation in plain_authorize
[fs/lustre-release.git] / lustre / ptlrpc / client.c
index 2e82e53..091cbcb 100644 (file)
@@ -27,7 +27,7 @@
  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  *
- * Copyright (c) 2011, 2013, Intel Corporation.
+ * Copyright (c) 2011, 2014, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
@@ -49,6 +49,7 @@
 
 static int ptlrpc_send_new_req(struct ptlrpc_request *req);
 static int ptlrpcd_check_work(struct ptlrpc_request *req);
+static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async);
 
 /**
  * Initialize passed in client structure \a cl.
@@ -91,7 +92,6 @@ struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid)
 
         return c;
 }
-EXPORT_SYMBOL(ptlrpc_uuid_to_connection);
 
 /**
  * Allocate and initialize new bulk descriptor on the sender.
@@ -359,27 +359,29 @@ __must_hold(&req->rq_lock)
        rc = sptlrpc_cli_unwrap_early_reply(req, &early_req);
        if (rc) {
                spin_lock(&req->rq_lock);
-                RETURN(rc);
-        }
-
-        rc = unpack_reply(early_req);
-        if (rc == 0) {
-                /* Expecting to increase the service time estimate here */
-                ptlrpc_at_adj_service(req,
-                        lustre_msg_get_timeout(early_req->rq_repmsg));
-                ptlrpc_at_adj_net_latency(req,
-                        lustre_msg_get_service_time(early_req->rq_repmsg));
-        }
-
-        sptlrpc_cli_finish_early_reply(early_req);
+               RETURN(rc);
+       }
 
+       rc = unpack_reply(early_req);
        if (rc != 0) {
+               sptlrpc_cli_finish_early_reply(early_req);
                spin_lock(&req->rq_lock);
                RETURN(rc);
        }
 
-       /* Adjust the local timeout for this req */
-       ptlrpc_at_set_req_timeout(req);
+       /* Use new timeout value just to adjust the local value for this
+        * request, don't include it into at_history. It is unclear yet why
+        * service time increased and should it be counted or skipped, e.g.
+        * that can be recovery case or some error or server, the real reply
+        * will add all new data if it is worth to add. */
+       req->rq_timeout = lustre_msg_get_timeout(early_req->rq_repmsg);
+       lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout);
+
+       /* Network latency can be adjusted, it is pure network delays */
+       ptlrpc_at_adj_net_latency(req,
+                       lustre_msg_get_service_time(early_req->rq_repmsg));
+
+       sptlrpc_cli_finish_early_reply(early_req);
 
        spin_lock(&req->rq_lock);
        olddl = req->rq_deadline;
@@ -399,7 +401,7 @@ __must_hold(&req->rq_lock)
        RETURN(rc);
 }
 
-struct kmem_cache *request_cache;
+static struct kmem_cache *request_cache;
 
 int ptlrpc_request_cache_init(void)
 {
@@ -834,7 +836,6 @@ ptlrpc_prep_req_pool(struct obd_import *imp,
         }
         return request;
 }
-EXPORT_SYMBOL(ptlrpc_prep_req_pool);
 
 /**
  * Same as ptlrpc_prep_req_pool, but without pool
@@ -846,7 +847,6 @@ ptlrpc_prep_req(struct obd_import *imp, __u32 version, int opcode, int count,
         return ptlrpc_prep_req_pool(imp, version, opcode, count, lengths, bufs,
                                     NULL);
 }
-EXPORT_SYMBOL(ptlrpc_prep_req);
 
 /**
  * Allocate and initialize new request set structure.
@@ -901,7 +901,6 @@ struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func,
 
        RETURN(set);
 }
-EXPORT_SYMBOL(ptlrpc_prep_fcset);
 
 /**
  * Wind down and free request set structure previously allocated with
@@ -983,7 +982,6 @@ int ptlrpc_set_add_cb(struct ptlrpc_request_set *set,
 
        RETURN(0);
 }
-EXPORT_SYMBOL(ptlrpc_set_add_cb);
 
 /**
  * Add a new request to the general purpose request set.
@@ -1045,7 +1043,6 @@ void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
                        wake_up(&pc->pc_partners[i]->pc_set->set_waitq);
        }
 }
-EXPORT_SYMBOL(ptlrpc_set_add_new_req);
 
 /**
  * Based on the current state of the import, determine if the request
@@ -1227,9 +1224,9 @@ static int after_reply(struct ptlrpc_request *req)
 
         LASSERT(obd != NULL);
         /* repbuf must be unlinked */
-       LASSERT(!req->rq_receiving_reply && !req->rq_reply_unlink);
+       LASSERT(!req->rq_receiving_reply && req->rq_reply_unlinked);
 
-        if (req->rq_reply_truncate) {
+       if (req->rq_reply_truncated) {
                 if (ptlrpc_no_resend(req)) {
                         DEBUG_REQ(D_ERROR, req, "reply buffer overflow,"
                                   " expected: %d, actual size: %d",
@@ -1250,6 +1247,9 @@ static int after_reply(struct ptlrpc_request *req)
                 RETURN(0);
         }
 
+       do_gettimeofday(&work_start);
+       timediff = cfs_timeval_sub(&work_start, &req->rq_sent_tv, NULL);
+
         /*
          * NB Until this point, the whole of the incoming message,
          * including buflens, status etc is in the sender's byte order.
@@ -1304,8 +1304,6 @@ static int after_reply(struct ptlrpc_request *req)
                RETURN(0);
        }
 
-       do_gettimeofday(&work_start);
-       timediff = cfs_timeval_sub(&work_start, &req->rq_sent_tv, NULL);
        if (obd->obd_svc_stats != NULL) {
                lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR,
                                    timediff);
@@ -1656,7 +1654,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
 
                 /* ptlrpc_set_wait->l_wait_event sets lwi_allow_intr
                  * so it sets rq_intr regardless of individual rpc
-                 * timeouts. The synchronous IO waiting path sets 
+                * timeouts. The synchronous IO waiting path sets
                  * rq_intr irrespective of whether ptlrpcd
                  * has seen a timeout.  Our policy is to only interpret
                  * interrupted rpcs after they have timed out, so we
@@ -2036,7 +2034,6 @@ int ptlrpc_expired_set(void *data)
          */
         RETURN(1);
 }
-EXPORT_SYMBOL(ptlrpc_expired_set);
 
 /**
  * Sets rq_intr flag in \a req under spinlock.
@@ -2053,7 +2050,7 @@ EXPORT_SYMBOL(ptlrpc_mark_interrupted);
  * Interrupts (sets interrupted flag) all uncompleted requests in
  * a set \a data. Callback for l_wait_event for interruptible waits.
  */
-void ptlrpc_interrupted_set(void *data)
+static void ptlrpc_interrupted_set(void *data)
 {
        struct ptlrpc_request_set *set = data;
        struct list_head *tmp;
@@ -2072,7 +2069,6 @@ void ptlrpc_interrupted_set(void *data)
                ptlrpc_mark_interrupted(req);
        }
 }
-EXPORT_SYMBOL(ptlrpc_interrupted_set);
 
 /**
  * Get the smallest timeout in the set; this does NOT set a timeout.
@@ -2123,7 +2119,6 @@ int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
         }
         RETURN(timeout);
 }
-EXPORT_SYMBOL(ptlrpc_set_next_timeout);
 
 /**
  * Send all unset request from the set and then wait untill all
@@ -2160,21 +2155,21 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set)
                 CDEBUG(D_RPCTRACE, "set %p going to sleep for %d seconds\n",
                        set, timeout);
 
-                if (timeout == 0 && !cfs_signal_pending())
+               if (timeout == 0 && !signal_pending(current))
                         /*
                          * No requests are in-flight (ether timed out
                          * or delayed), so we can allow interrupts.
                          * We still want to block for a limited time,
                          * so we allow interrupts during the timeout.
                          */
-                        lwi = LWI_TIMEOUT_INTR_ALL(cfs_time_seconds(1), 
+                       lwi = LWI_TIMEOUT_INTR_ALL(cfs_time_seconds(1),
                                                    ptlrpc_expired_set,
                                                    ptlrpc_interrupted_set, set);
                 else
                         /*
                          * At least one request is in flight, so no
                          * interrupts are allowed. Wait until all
-                         * complete, or an in-flight req times out. 
+                        * complete, or an in-flight req times out.
                          */
                         lwi = LWI_TIMEOUT(cfs_time_seconds(timeout? timeout : 1),
                                           ptlrpc_expired_set, set);
@@ -2185,7 +2180,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set)
                  * pending when we started, we need to handle it now or we risk
                  * it being ignored forever */
                if (rc == -ETIMEDOUT && !lwi.lwi_allow_intr &&
-                   cfs_signal_pending()) {
+                   signal_pending(current)) {
                        sigset_t blocked_sigs =
                                           cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
 
@@ -2193,7 +2188,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set)
                         * like SIGINT or SIGKILL. We still ignore less
                         * important signals since ptlrpc set is not easily
                         * reentrant from userspace again */
-                       if (cfs_signal_pending())
+                       if (signal_pending(current))
                                ptlrpc_interrupted_set(set);
                        cfs_restore_sigs(blocked_sigs);
                }
@@ -2318,14 +2313,13 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
 static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked);
 /**
  * Drop one request reference. Must be called with import imp_lock held.
- * When reference count drops to zero, reuqest is freed.
+ * When reference count drops to zero, request is freed.
  */
 void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request)
 {
        assert_spin_locked(&request->rq_import->imp_lock);
        (void)__ptlrpc_req_finished(request, 1);
 }
-EXPORT_SYMBOL(ptlrpc_req_finished_with_imp_lock);
 
 /**
  * Helper function
@@ -2382,7 +2376,7 @@ EXPORT_SYMBOL(ptlrpc_req_xid);
  * The request owner (i.e. the thread doing the I/O) must call...
  * Returns 0 on success or 1 if unregistering cannot be made.
  */
-int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
+static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
 {
        int                rc;
        struct l_wait_info lwi;
@@ -2446,13 +2440,14 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
                 }
 
                 LASSERT(rc == -ETIMEDOUT);
-                DEBUG_REQ(D_WARNING, request, "Unexpectedly long timeout "
-                         "rvcng=%d unlnk=%d/%d", request->rq_receiving_reply,
-                         request->rq_req_unlink, request->rq_reply_unlink);
+               DEBUG_REQ(D_WARNING, request, "Unexpectedly long timeout "
+                         "receiving_reply=%d req_ulinked=%d reply_unlinked=%d",
+                         request->rq_receiving_reply,
+                         request->rq_req_unlinked,
+                         request->rq_reply_unlinked);
         }
         RETURN(0);
 }
-EXPORT_SYMBOL(ptlrpc_unregister_reply);
 
 static void ptlrpc_free_request(struct ptlrpc_request *req)
 {
@@ -2579,7 +2574,6 @@ void ptlrpc_cleanup_client(struct obd_import *imp)
         ENTRY;
         EXIT;
 }
-EXPORT_SYMBOL(ptlrpc_cleanup_client);
 
 /**
  * Schedule previously sent request for resend.
@@ -2617,7 +2611,6 @@ void ptlrpc_resend_req(struct ptlrpc_request *req)
         ptlrpc_client_wake_req(req);
        spin_unlock(&req->rq_lock);
 }
-EXPORT_SYMBOL(ptlrpc_resend_req);
 
 /* XXX: this function and rq_status are currently unused */
 void ptlrpc_restart_req(struct ptlrpc_request *req)
@@ -2631,7 +2624,6 @@ void ptlrpc_restart_req(struct ptlrpc_request *req)
        ptlrpc_client_wake_req(req);
        spin_unlock(&req->rq_lock);
 }
-EXPORT_SYMBOL(ptlrpc_restart_req);
 
 /**
  * Grab additional reference on a request \a req
@@ -2699,7 +2691,6 @@ void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
 
        list_add(&req->rq_replay_list, &imp->imp_replay_list);
 }
-EXPORT_SYMBOL(ptlrpc_retain_replayable_request);
 
 /**
  * Send request and wait until it completes.
@@ -2735,7 +2726,7 @@ EXPORT_SYMBOL(ptlrpc_queue_wait);
 
 /**
  * Callback used for replayed requests reply processing.
- * In case of succesful reply calls registeresd request replay callback.
+ * In case of successful reply calls registered request replay callback.
  * In case of error restart replay process.
  */
 static int ptlrpc_replay_interpret(const struct lu_env *env,
@@ -2869,7 +2860,6 @@ int ptlrpc_replay_req(struct ptlrpc_request *req)
        ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
        RETURN(0);
 }
-EXPORT_SYMBOL(ptlrpc_replay_req);
 
 /**
  * Aborts all in-flight request on import \a imp sending and delayed lists
@@ -2929,7 +2919,6 @@ void ptlrpc_abort_inflight(struct obd_import *imp)
 
        EXIT;
 }
-EXPORT_SYMBOL(ptlrpc_abort_inflight);
 
 /**
  * Abort all uncompleted requests in request set \a set
@@ -3018,7 +3007,6 @@ __u64 ptlrpc_next_xid(void)
 
        return next;
 }
-EXPORT_SYMBOL(ptlrpc_next_xid);
 
 /**
  * Get a glimpse at what next xid value might have been.
@@ -3135,8 +3123,6 @@ void *ptlrpcd_alloc_work(struct obd_import *imp,
        req->rq_import = class_import_get(imp);
        req->rq_interpret_reply = work_interpreter;
        /* don't want reply */
-       req->rq_receiving_reply = 0;
-       req->rq_req_unlink = req->rq_reply_unlink = 0;
        req->rq_no_delay = req->rq_no_resend = 1;
        req->rq_pill.rc_fmt = (void *)&worker_format;