Whamcloud - gitweb
b=16509 port recovery ending tests to master
[fs/lustre-release.git] / lustre / ptlrpc / ptlrpcd.c
index 8338231..78c0546 100644 (file)
@@ -26,7 +26,7 @@
  * GPL HEADER END
  */
 /*
- * Copyright  2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  */
 /*
  * lustre/ptlrpc/ptlrpcd.c
  */
 
+/** \defgroup ptlrpcd PortalRPC daemon
+ *
+ * ptlrpcd is a special thread with its own set where other user might add
+ * requests when they don't want to wait for their completion.
+ * PtlRPCD will take care of sending such requests and then processing their
+ * replies and calling completion callbacks as necessary.
+ * The callbacks are called directly from ptlrpcd context.
+ * It is important to never significantly block (esp. on RPCs!) within such
+ * completion handler or a deadlock might occur where ptlrpcd enters some
+ * callback that attempts to send another RPC and wait for it to return,
+ * during which time ptlrpcd is completely blocked, so e.g. if import
+ * fails, recovery cannot progress because connection requests are also
+ * sent by ptlrpcd.
+ *
+ * @{
+ */
+
 #define DEBUG_SUBSYSTEM S_RPC
 
 #ifdef __KERNEL__
@@ -90,7 +107,7 @@ static struct ptlrpcd_scope_ctl ptlrpcd_scopes[PSCOPE_NR] = {
         }
 };
 
-struct semaphore ptlrpcd_sem;
+cfs_semaphore_t ptlrpcd_sem;
 static int ptlrpcd_users = 0;
 
 void ptlrpcd_wake(struct ptlrpc_request *req)
@@ -102,29 +119,30 @@ void ptlrpcd_wake(struct ptlrpc_request *req)
         cfs_waitq_signal(&rq_set->set_waitq);
 }
 
-/*
+/**
  * Move all request from an existing request set to the ptlrpcd queue.
  * All requests from the set must be in phase RQ_PHASE_NEW.
  */
 void ptlrpcd_add_rqset(struct ptlrpc_request_set *set)
 {
-        struct list_head *tmp, *pos;
+        cfs_list_t *tmp, *pos;
 
-        list_for_each_safe(pos, tmp, &set->set_requests) {
+        cfs_list_for_each_safe(pos, tmp, &set->set_requests) {
                 struct ptlrpc_request *req =
-                        list_entry(pos, struct ptlrpc_request, rq_set_chain);
+                        cfs_list_entry(pos, struct ptlrpc_request,
+                                       rq_set_chain);
 
                 LASSERT(req->rq_phase == RQ_PHASE_NEW);
-                list_del_init(&req->rq_set_chain);
+                cfs_list_del_init(&req->rq_set_chain);
                 req->rq_set = NULL;
                 ptlrpcd_add_req(req, PSCOPE_OTHER);
-                set->set_remaining--;
+                cfs_atomic_dec(&set->set_remaining);
         }
-        LASSERT(set->set_remaining == 0);
+        LASSERT(cfs_atomic_read(&set->set_remaining) == 0);
 }
 EXPORT_SYMBOL(ptlrpcd_add_rqset);
 
-/*
+/**
  * Requests that are added to the ptlrpcd queue are sent via
  * ptlrpcd_check->ptlrpc_check_set().
  */
@@ -135,65 +153,97 @@ int ptlrpcd_add_req(struct ptlrpc_request *req, enum ptlrpcd_scope scope)
         int rc;
 
         LASSERT(scope < PSCOPE_NR);
+        
+        cfs_spin_lock(&req->rq_lock);
+        if (req->rq_invalid_rqset) {
+                cfs_duration_t timeout;
+                struct l_wait_info lwi;
+
+                req->rq_invalid_rqset = 0;
+                cfs_spin_unlock(&req->rq_lock);
+
+                timeout = cfs_time_seconds(5);
+                lwi = LWI_TIMEOUT(timeout, back_to_sleep, NULL);
+                l_wait_event(req->rq_set_waitq, (req->rq_set == NULL), &lwi);
+        } else if (req->rq_set) {
+                LASSERT(req->rq_phase == RQ_PHASE_NEW);
+                LASSERT(req->rq_send_state == LUSTRE_IMP_REPLAY);
+
+                /* ptlrpc_check_set will decrease the count */
+                cfs_atomic_inc(&req->rq_set->set_remaining);
+                cfs_spin_unlock(&req->rq_lock);
+
+                cfs_waitq_signal(&req->rq_set->set_waitq);
+        } else {
+                cfs_spin_unlock(&req->rq_lock);
+        }
+
         pt = req->rq_send_state == LUSTRE_IMP_FULL ? PT_NORMAL : PT_RECOVERY;
         pc = &ptlrpcd_scopes[scope].pscope_thread[pt].pt_ctl;
         rc = ptlrpc_set_add_new_req(pc, req);
         /*
          * XXX disable this for CLIO: environment is needed for interpreter.
+         *     add debug temporary to check rc.
          */
+        LASSERTF(rc == 0, "ptlrpcd_add_req failed (rc = %d)\n", rc);
         if (rc && 0) {
-                ptlrpc_interpterer_t interpreter;
-
-                interpreter = req->rq_interpret_reply;
-
                 /*
                  * Thread is probably in stop now so we need to
                  * kill this rpc as it was not added. Let's call
                  * interpret for it to let know we're killing it
-                 * so that higher levels might free assosiated
+                 * so that higher levels might free associated
                  * resources.
                  */
                 ptlrpc_req_interpret(NULL, req, -EBADR);
                 req->rq_set = NULL;
                 ptlrpc_req_finished(req);
+        } else if (req->rq_send_state == LUSTRE_IMP_CONNECTING) {
+                /*
+                 * The request is for recovery, should be sent ASAP.
+                 */
+                cfs_waitq_signal(&pc->pc_set->set_waitq);
         }
 
         return rc;
 }
 
+/**
+ * Check if there is more work to do on ptlrpcd set.
+ * Returns 1 if yes.
+ */
 static int ptlrpcd_check(const struct lu_env *env, struct ptlrpcd_ctl *pc)
 {
-        struct list_head *tmp, *pos;
+        cfs_list_t *tmp, *pos;
         struct ptlrpc_request *req;
         int rc = 0;
         ENTRY;
 
-        spin_lock(&pc->pc_set->set_new_req_lock);
-        list_for_each_safe(pos, tmp, &pc->pc_set->set_new_requests) {
-                req = list_entry(pos, struct ptlrpc_request, rq_set_chain);
-                list_del_init(&req->rq_set_chain);
+        cfs_spin_lock(&pc->pc_set->set_new_req_lock);
+        cfs_list_for_each_safe(pos, tmp, &pc->pc_set->set_new_requests) {
+                req = cfs_list_entry(pos, struct ptlrpc_request, rq_set_chain);
+                cfs_list_del_init(&req->rq_set_chain);
                 ptlrpc_set_add_req(pc->pc_set, req);
                 /*
                  * Need to calculate its timeout.
                  */
                 rc = 1;
         }
-        spin_unlock(&pc->pc_set->set_new_req_lock);
+        cfs_spin_unlock(&pc->pc_set->set_new_req_lock);
 
-        if (pc->pc_set->set_remaining) {
+        if (cfs_atomic_read(&pc->pc_set->set_remaining)) {
                 rc = rc | ptlrpc_check_set(env, pc->pc_set);
 
                 /*
                  * XXX: our set never completes, so we prune the completed
                  * reqs after each iteration. boy could this be smarter.
                  */
-                list_for_each_safe(pos, tmp, &pc->pc_set->set_requests) {
-                        req = list_entry(pos, struct ptlrpc_request,
+                cfs_list_for_each_safe(pos, tmp, &pc->pc_set->set_requests) {
+                        req = cfs_list_entry(pos, struct ptlrpc_request,
                                          rq_set_chain);
                         if (req->rq_phase != RQ_PHASE_COMPLETE)
                                 continue;
 
-                        list_del_init(&req->rq_set_chain);
+                        cfs_list_del_init(&req->rq_set_chain);
                         req->rq_set = NULL;
                         ptlrpc_req_finished (req);
                 }
@@ -203,19 +253,20 @@ static int ptlrpcd_check(const struct lu_env *env, struct ptlrpcd_ctl *pc)
                 /*
                  * If new requests have been added, make sure to wake up.
                  */
-                spin_lock(&pc->pc_set->set_new_req_lock);
-                rc = !list_empty(&pc->pc_set->set_new_requests);
-                spin_unlock(&pc->pc_set->set_new_req_lock);
+                cfs_spin_lock(&pc->pc_set->set_new_req_lock);
+                rc = !cfs_list_empty(&pc->pc_set->set_new_requests);
+                cfs_spin_unlock(&pc->pc_set->set_new_req_lock);
         }
 
         RETURN(rc);
 }
 
 #ifdef __KERNEL__
-/*
+/**
+ * Main ptlrpcd thread.
  * ptlrpc's code paths like to execute in process context, so we have this
- * thread which spins on a set which contains the io rpcs. llite specifies
- * ptlrpcd's set when it pushes pages down into the oscs.
+ * thread which spins on a set which contains the rpcs and sends them.
+ *
  */
 static int ptlrpcd(void *arg)
 {
@@ -235,7 +286,7 @@ static int ptlrpcd(void *arg)
                                      LCT_CL_THREAD|LCT_REMEMBER|LCT_NOREF);
         }
 
-        complete(&pc->pc_starting);
+        cfs_complete(&pc->pc_starting);
 
         if (rc != 0)
                 RETURN(rc);
@@ -279,8 +330,8 @@ static int ptlrpcd(void *arg)
                 /*
                  * Abort inflight rpcs for forced stop case.
                  */
-                if (test_bit(LIOD_STOP, &pc->pc_flags)) {
-                        if (test_bit(LIOD_FORCE, &pc->pc_flags))
+                if (cfs_test_bit(LIOD_STOP, &pc->pc_flags)) {
+                        if (cfs_test_bit(LIOD_FORCE, &pc->pc_flags))
                                 ptlrpc_abort_set(pc->pc_set);
                         exit++;
                 }
@@ -294,19 +345,24 @@ static int ptlrpcd(void *arg)
         /*
          * Wait for inflight requests to drain.
          */
-        if (!list_empty(&pc->pc_set->set_requests))
+        if (!cfs_list_empty(&pc->pc_set->set_requests))
                 ptlrpc_set_wait(pc->pc_set);
         lu_context_fini(&env.le_ctx);
-        complete(&pc->pc_finishing);
+        cfs_complete(&pc->pc_finishing);
 
-        clear_bit(LIOD_START, &pc->pc_flags);
-        clear_bit(LIOD_STOP, &pc->pc_flags);
-        clear_bit(LIOD_FORCE, &pc->pc_flags);
+        cfs_clear_bit(LIOD_START, &pc->pc_flags);
+        cfs_clear_bit(LIOD_STOP, &pc->pc_flags);
+        cfs_clear_bit(LIOD_FORCE, &pc->pc_flags);
         return 0;
 }
 
 #else /* !__KERNEL__ */
 
+/**
+ * In liblustre we do not have separate threads, so this function
+ * is called from time to time all across common code to see
+ * if something needs to be processed on ptlrpcd set.
+ */
 int ptlrpcd_check_async_rpcs(void *arg)
 {
         struct ptlrpcd_ctl *pc = arg;
@@ -328,7 +384,7 @@ int ptlrpcd_check_async_rpcs(void *arg)
                         /*
                          * XXX: send replay requests.
                          */
-                        if (test_bit(LIOD_RECOVERY, &pc->pc_flags))
+                        if (cfs_test_bit(LIOD_RECOVERY, &pc->pc_flags))
                                 rc = ptlrpcd_check(&pc->pc_env, pc);
                 }
         }
@@ -341,8 +397,8 @@ int ptlrpcd_idle(void *arg)
 {
         struct ptlrpcd_ctl *pc = arg;
 
-        return (list_empty(&pc->pc_set->set_new_requests) &&
-                pc->pc_set->set_remaining == 0);
+        return (cfs_list_empty(&pc->pc_set->set_new_requests) &&
+                cfs_atomic_read(&pc->pc_set->set_remaining) == 0);
 }
 
 #endif
@@ -355,16 +411,16 @@ int ptlrpcd_start(const char *name, struct ptlrpcd_ctl *pc)
         /*
          * Do not allow start second thread for one pc.
          */
-        if (test_and_set_bit(LIOD_START, &pc->pc_flags)) {
+        if (cfs_test_and_set_bit(LIOD_START, &pc->pc_flags)) {
                 CERROR("Starting second thread (%s) for same pc %p\n",
                        name, pc);
                 RETURN(-EALREADY);
         }
 
-        init_completion(&pc->pc_starting);
-        init_completion(&pc->pc_finishing);
-        spin_lock_init(&pc->pc_lock);
-        snprintf (pc->pc_name, sizeof (pc->pc_name), name);
+        cfs_init_completion(&pc->pc_starting);
+        cfs_init_completion(&pc->pc_finishing);
+        cfs_spin_lock_init(&pc->pc_lock);
+        strncpy(pc->pc_name, name, sizeof(pc->pc_name) - 1);
         pc->pc_set = ptlrpc_prep_set();
         if (pc->pc_set == NULL)
                 GOTO(out, rc = -ENOMEM);
@@ -387,7 +443,7 @@ int ptlrpcd_start(const char *name, struct ptlrpcd_ctl *pc)
                 GOTO(out, rc);
         }
         rc = 0;
-        wait_for_completion(&pc->pc_starting);
+        cfs_wait_for_completion(&pc->pc_starting);
 #else
         pc->pc_wait_callback =
                 liblustre_register_wait_callback("ptlrpcd_check_async_rpcs",
@@ -398,23 +454,23 @@ int ptlrpcd_start(const char *name, struct ptlrpcd_ctl *pc)
 #endif
 out:
         if (rc)
-                clear_bit(LIOD_START, &pc->pc_flags);
+                cfs_clear_bit(LIOD_START, &pc->pc_flags);
         RETURN(rc);
 }
 
 void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force)
 {
-        if (!test_bit(LIOD_START, &pc->pc_flags)) {
+        if (!cfs_test_bit(LIOD_START, &pc->pc_flags)) {
                 CERROR("Thread for pc %p was not started\n", pc);
                 return;
         }
 
-        set_bit(LIOD_STOP, &pc->pc_flags);
+        cfs_set_bit(LIOD_STOP, &pc->pc_flags);
         if (force)
-                set_bit(LIOD_FORCE, &pc->pc_flags);
+                cfs_set_bit(LIOD_FORCE, &pc->pc_flags);
         cfs_waitq_signal(&pc->pc_set->set_waitq);
 #ifdef __KERNEL__
-        wait_for_completion(&pc->pc_finishing);
+        cfs_wait_for_completion(&pc->pc_finishing);
 #else
         liblustre_deregister_wait_callback(pc->pc_wait_callback);
         liblustre_deregister_idle_callback(pc->pc_idle_callback);
@@ -436,7 +492,7 @@ void ptlrpcd_fini(void)
 
                         pc = &ptlrpcd_scopes[i].pscope_thread[j].pt_ctl;
 
-                        if (test_bit(LIOD_START, &pc->pc_flags))
+                        if (cfs_test_bit(LIOD_START, &pc->pc_flags))
                                 ptlrpcd_stop(pc, 0);
                 }
         }
@@ -450,7 +506,7 @@ int ptlrpcd_addref(void)
         int j;
         ENTRY;
 
-        mutex_down(&ptlrpcd_sem);
+        cfs_mutex_down(&ptlrpcd_sem);
         if (++ptlrpcd_users == 1) {
                 for (i = 0; rc == 0 && i < PSCOPE_NR; ++i) {
                         for (j = 0; rc == 0 && j < PT_NR; ++j) {
@@ -460,7 +516,7 @@ int ptlrpcd_addref(void)
                                 pt = &ptlrpcd_scopes[i].pscope_thread[j];
                                 pc = &pt->pt_ctl;
                                 if (j == PT_RECOVERY)
-                                        set_bit(LIOD_RECOVERY, &pc->pc_flags);
+                                        cfs_set_bit(LIOD_RECOVERY, &pc->pc_flags);
                                 rc = ptlrpcd_start(pt->pt_name, pc);
                         }
                 }
@@ -469,14 +525,15 @@ int ptlrpcd_addref(void)
                         ptlrpcd_fini();
                 }
         }
-        mutex_up(&ptlrpcd_sem);
+        cfs_mutex_up(&ptlrpcd_sem);
         RETURN(rc);
 }
 
 void ptlrpcd_decref(void)
 {
-        mutex_down(&ptlrpcd_sem);
+        cfs_mutex_down(&ptlrpcd_sem);
         if (--ptlrpcd_users == 0)
                 ptlrpcd_fini();
-        mutex_up(&ptlrpcd_sem);
+        cfs_mutex_up(&ptlrpcd_sem);
 }
+/** @} ptlrpcd */