Whamcloud - gitweb
b=21448 send recovery rpc ASAP
[fs/lustre-release.git] / lustre / ptlrpc / ptlrpcd.c
index efde23a..e6bbf18 100644 (file)
@@ -1,27 +1,39 @@
 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
  * vim:expandtab:shiftwidth=8:tabstop=8:
  *
- *  Copyright (C) 2001-2003 Cluster File Systems, Inc.
- *   Author Peter Braam <braam@clusterfs.com>
+ * GPL HEADER START
  *
- *   This file is part of the Lustre file system, http://www.lustre.org
- *   Lustre is a trademark of Cluster File Systems, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
- *   You may have signed or agreed to another license before downloading
- *   this software.  If so, you are bound by the terms and conditions
- *   of that agreement, and the following does not apply to you.  See the
- *   LICENSE file included with this distribution for more information.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
  *
- *   If you did not agree to a different license, then this copy of Lustre
- *   is open source software; you can redistribute it and/or modify it
- *   under the terms of version 2 of the GNU General Public License as
- *   published by the Free Software Foundation.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
  *
- *   In either case, Lustre is distributed in the hope that it will be
- *   useful, but WITHOUT ANY WARRANTY; without even the implied warranty
- *   of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   license text for more details.
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
  *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright  2008 Sun Microsystems, Inc. All rights reserved
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ptlrpc/ptlrpcd.c
  */
 
 #define DEBUG_SUBSYSTEM S_RPC
 # include <ctype.h>
 #endif
 
-#include <libcfs/kp30.h>
 #include <lustre_net.h>
 # include <lustre_lib.h>
 
 #include <lustre_ha.h>
 #include <obd_class.h>   /* for obd_zombie */
 #include <obd_support.h> /* for OBD_FAIL_CHECK */
+#include <cl_object.h> /* cl_env_{get,put}() */
 #include <lprocfs_status.h>
 
-#define LIOD_STOP 0
-struct ptlrpcd_ctl {
-        unsigned long             pc_flags;
-        spinlock_t                pc_lock;
-        struct completion         pc_starting;
-        struct completion         pc_finishing;
-        struct list_head          pc_req_list;
-        cfs_waitq_t               pc_waitq;
-        struct ptlrpc_request_set *pc_set;
-        char                      pc_name[16];
-#ifndef __KERNEL__
-        int                       pc_recurred;
-        void                     *pc_callback;
-#endif
+enum pscope_thread {
+        PT_NORMAL,
+        PT_RECOVERY,
+        PT_NR
 };
 
-static struct ptlrpcd_ctl ptlrpcd_pc;
-static struct ptlrpcd_ctl ptlrpcd_recovery_pc;
+struct ptlrpcd_scope_ctl {
+        struct ptlrpcd_thread {
+                const char        *pt_name;
+                struct ptlrpcd_ctl pt_ctl;
+        } pscope_thread[PT_NR];
+};
+
+static struct ptlrpcd_scope_ctl ptlrpcd_scopes[PSCOPE_NR] = {
+        [PSCOPE_BRW] = {
+                .pscope_thread = {
+                        [PT_NORMAL] = {
+                                .pt_name = "ptlrpcd-brw"
+                        },
+                        [PT_RECOVERY] = {
+                                .pt_name = "ptlrpcd-brw-rcv"
+                        }
+                }
+        },
+        [PSCOPE_OTHER] = {
+                .pscope_thread = {
+                        [PT_NORMAL] = {
+                                .pt_name = "ptlrpcd"
+                        },
+                        [PT_RECOVERY] = {
+                                .pt_name = "ptlrpcd-rcv"
+                        }
+                }
+        }
+};
 
-struct semaphore ptlrpcd_sem;
+cfs_semaphore_t ptlrpcd_sem;
 static int ptlrpcd_users = 0;
 
 void ptlrpcd_wake(struct ptlrpc_request *req)
 {
-        struct ptlrpcd_ctl *pc = req->rq_ptlrpcd_data;
+        struct ptlrpc_request_set *rq_set = req->rq_set;
 
-        LASSERT(pc != NULL);
+        LASSERT(rq_set != NULL);
 
-        cfs_waitq_signal(&pc->pc_waitq);
+        cfs_waitq_signal(&rq_set->set_waitq);
 }
 
-/* requests that are added to the ptlrpcd queue are sent via
- * ptlrpcd_check->ptlrpc_check_set() */
-void ptlrpcd_add_req(struct ptlrpc_request *req)
+/*
+ * Move all request from an existing request set to the ptlrpcd queue.
+ * All requests from the set must be in phase RQ_PHASE_NEW.
+ */
+void ptlrpcd_add_rqset(struct ptlrpc_request_set *set)
+{
+        cfs_list_t *tmp, *pos;
+
+        cfs_list_for_each_safe(pos, tmp, &set->set_requests) {
+                struct ptlrpc_request *req =
+                        cfs_list_entry(pos, struct ptlrpc_request,
+                                       rq_set_chain);
+
+                LASSERT(req->rq_phase == RQ_PHASE_NEW);
+                cfs_list_del_init(&req->rq_set_chain);
+                req->rq_set = NULL;
+                ptlrpcd_add_req(req, PSCOPE_OTHER);
+                set->set_remaining--;
+        }
+        LASSERT(set->set_remaining == 0);
+}
+EXPORT_SYMBOL(ptlrpcd_add_rqset);
+
+/*
+ * Requests that are added to the ptlrpcd queue are sent via
+ * ptlrpcd_check->ptlrpc_check_set().
+ */
+int ptlrpcd_add_req(struct ptlrpc_request *req, enum ptlrpcd_scope scope)
 {
         struct ptlrpcd_ctl *pc;
+        enum pscope_thread  pt;
+        int rc;
 
-        if (req->rq_send_state == LUSTRE_IMP_FULL)
-                pc = &ptlrpcd_pc;
-        else
-                pc = &ptlrpcd_recovery_pc;
+        LASSERT(scope < PSCOPE_NR);
+        pt = req->rq_send_state == LUSTRE_IMP_FULL ? PT_NORMAL : PT_RECOVERY;
+        pc = &ptlrpcd_scopes[scope].pscope_thread[pt].pt_ctl;
+        rc = ptlrpc_set_add_new_req(pc, req);
+        /*
+         * XXX disable this for CLIO: environment is needed for interpreter.
+         *     add debug temporary to check rc.
+         */
+        LASSERTF(rc == 0, "ptlrpcd_add_req failed (rc = %d)\n", rc);
+        if (rc && 0) {
+                /*
+                 * Thread is probably in stop now so we need to
+                 * kill this rpc as it was not added. Let's call
+                 * interpret for it to let know we're killing it
+                 * so that higher levels might free associated
+                 * resources.
+                 */
+                ptlrpc_req_interpret(NULL, req, -EBADR);
+                req->rq_set = NULL;
+                ptlrpc_req_finished(req);
+        } else if (req->rq_send_state == LUSTRE_IMP_CONNECTING) {
+                /*
+                 * The request is for recovery, should be sent ASAP.
+                 */
+                cfs_waitq_signal(&pc->pc_set->set_waitq);
+        }
 
-        req->rq_ptlrpcd_data = pc;
-        ptlrpc_set_add_new_req(pc->pc_set, req);
-        wake_up(&pc->pc_waitq);
+        return rc;
 }
 
-static int ptlrpcd_check(struct ptlrpcd_ctl *pc)
+static int ptlrpcd_check(const struct lu_env *env, struct ptlrpcd_ctl *pc)
 {
-        struct list_head *tmp, *pos;
+        cfs_list_t *tmp, *pos;
         struct ptlrpc_request *req;
         int rc = 0;
         ENTRY;
 
-        if (test_bit(LIOD_STOP, &pc->pc_flags))
-                RETURN(1);
-
-        obd_zombie_impexp_cull();
-
-        spin_lock(&pc->pc_set->set_new_req_lock);
-        list_for_each_safe(pos, tmp, &pc->pc_set->set_new_requests) {
-                req = list_entry(pos, struct ptlrpc_request, rq_set_chain);
-                list_del_init(&req->rq_set_chain);
+        cfs_spin_lock(&pc->pc_set->set_new_req_lock);
+        cfs_list_for_each_safe(pos, tmp, &pc->pc_set->set_new_requests) {
+                req = cfs_list_entry(pos, struct ptlrpc_request, rq_set_chain);
+                cfs_list_del_init(&req->rq_set_chain);
                 ptlrpc_set_add_req(pc->pc_set, req);
-                rc = 1; /* need to calculate its timeout */
+                /*
+                 * Need to calculate its timeout.
+                 */
+                rc = 1;
         }
-        spin_unlock(&pc->pc_set->set_new_req_lock);
+        cfs_spin_unlock(&pc->pc_set->set_new_req_lock);
 
         if (pc->pc_set->set_remaining) {
-                rc = rc | ptlrpc_check_set(pc->pc_set);
-
-                /* XXX our set never completes, so we prune the completed
-                 * reqs after each iteration. boy could this be smarter. */
-                list_for_each_safe(pos, tmp, &pc->pc_set->set_requests) {
-                        req = list_entry(pos, struct ptlrpc_request,
+                rc = rc | ptlrpc_check_set(env, pc->pc_set);
+
+                /*
+                 * XXX: our set never completes, so we prune the completed
+                 * reqs after each iteration. boy could this be smarter.
+                 */
+                cfs_list_for_each_safe(pos, tmp, &pc->pc_set->set_requests) {
+                        req = cfs_list_entry(pos, struct ptlrpc_request,
                                          rq_set_chain);
                         if (req->rq_phase != RQ_PHASE_COMPLETE)
                                 continue;
 
-                        list_del_init(&req->rq_set_chain);
+                        cfs_list_del_init(&req->rq_set_chain);
                         req->rq_set = NULL;
                         ptlrpc_req_finished (req);
                 }
         }
 
         if (rc == 0) {
-                /* If new requests have been added, make sure to wake up */
-                spin_lock(&pc->pc_set->set_new_req_lock);
-                rc = !list_empty(&pc->pc_set->set_new_requests);
-                spin_unlock(&pc->pc_set->set_new_req_lock);
+                /*
+                 * If new requests have been added, make sure to wake up.
+                 */
+                cfs_spin_lock(&pc->pc_set->set_new_req_lock);
+                rc = !cfs_list_empty(&pc->pc_set->set_new_requests);
+                cfs_spin_unlock(&pc->pc_set->set_new_req_lock);
         }
 
         RETURN(rc);
 }
 
 #ifdef __KERNEL__
-/* ptlrpc's code paths like to execute in process context, so we have this
- * thread which spins on a set which contains the io rpcs.  llite specifies
- * ptlrpcd's set when it pushes pages down into the oscs */
+/*
+ * ptlrpc's code paths like to execute in process context, so we have this
+ * thread which spins on a set which contains the io rpcs. llite specifies
+ * ptlrpcd's set when it pushes pages down into the oscs.
+ */
 static int ptlrpcd(void *arg)
 {
         struct ptlrpcd_ctl *pc = arg;
-        int rc;
+        struct lu_env env = { .le_ses = NULL };
+        int rc, exit = 0;
         ENTRY;
 
-        if ((rc = cfs_daemonize_ctxt(pc->pc_name))) {
-                complete(&pc->pc_starting);
-                return rc;
+        rc = cfs_daemonize_ctxt(pc->pc_name);
+        if (rc == 0) {
+                /*
+                 * XXX So far only "client" ptlrpcd uses an environment. In
+                 * the future, ptlrpcd thread (or a thread-set) has to given
+                 * an argument, describing its "scope".
+                 */
+                rc = lu_context_init(&env.le_ctx,
+                                     LCT_CL_THREAD|LCT_REMEMBER|LCT_NOREF);
         }
 
-        complete(&pc->pc_starting);
+        cfs_complete(&pc->pc_starting);
 
-        /* this mainloop strongly resembles ptlrpc_set_wait except
-         * that our set never completes.  ptlrpcd_check calls ptlrpc_check_set
-         * when there are requests in the set.  new requests come in
-         * on the set's new_req_list and ptlrpcd_check moves them into
-         * the set. */
-        while (1) {
-                cfs_waitlink_t set_wait;
+        if (rc != 0)
+                RETURN(rc);
+        env.le_ctx.lc_cookie = 0x7;
+
+        /*
+         * This mainloop strongly resembles ptlrpc_set_wait() except that our
+         * set never completes.  ptlrpcd_check() calls ptlrpc_check_set() when
+         * there are requests in the set. New requests come in on the set's
+         * new_req_list and ptlrpcd_check() moves them into the set.
+         */
+        do {
                 struct l_wait_info lwi;
-                cfs_duration_t timeout;
+                int timeout;
+
+                rc = lu_env_refill(&env);
+                if (rc != 0) {
+                        /*
+                         * XXX This is very awkward situation, because
+                         * execution can neither continue (request
+                         * interpreters assume that env is set up), nor repeat
+                         * the loop (as this potentially results in a tight
+                         * loop of -ENOMEM's).
+                         *
+                         * Fortunately, refill only ever does something when
+                         * new modules are loaded, i.e., early during boot up.
+                         */
+                        CERROR("Failure to refill session: %d\n", rc);
+                        continue;
+                }
 
-                timeout = cfs_time_seconds(ptlrpc_set_next_timeout(pc->pc_set));
-                lwi = LWI_TIMEOUT(timeout, ptlrpc_expired_set, pc->pc_set);
+                timeout = ptlrpc_set_next_timeout(pc->pc_set);
+                lwi = LWI_TIMEOUT(cfs_time_seconds(timeout ? timeout : 1),
+                                  ptlrpc_expired_set, pc->pc_set);
+
+                lu_context_enter(&env.le_ctx);
+                l_wait_event(pc->pc_set->set_waitq,
+                             ptlrpcd_check(&env, pc), &lwi);
+                lu_context_exit(&env.le_ctx);
+
+                /*
+                 * Abort inflight rpcs for forced stop case.
+                 */
+                if (cfs_test_bit(LIOD_STOP, &pc->pc_flags)) {
+                        if (cfs_test_bit(LIOD_FORCE, &pc->pc_flags))
+                                ptlrpc_abort_set(pc->pc_set);
+                        exit++;
+                }
 
-                /* ala the pinger, wait on pc's waitqueue and the set's */
-                cfs_waitlink_init(&set_wait);
-                cfs_waitq_add(&pc->pc_set->set_waitq, &set_wait);
-                cfs_waitq_forward(&set_wait, &pc->pc_waitq);
-                l_wait_event(pc->pc_waitq, ptlrpcd_check(pc), &lwi);
-                cfs_waitq_del(&pc->pc_set->set_waitq, &set_wait);
+                /*
+                 * Let's make one more loop to make sure that ptlrpcd_check()
+                 * copied all raced new rpcs into the set so we can kill them.
+                 */
+        } while (exit < 2);
 
-                if (test_bit(LIOD_STOP, &pc->pc_flags))
-                        break;
-        }
-        /* wait for inflight requests to drain */
-        if (!list_empty(&pc->pc_set->set_requests))
+        /*
+         * Wait for inflight requests to drain.
+         */
+        if (!cfs_list_empty(&pc->pc_set->set_requests))
                 ptlrpc_set_wait(pc->pc_set);
-        complete(&pc->pc_finishing);
+        lu_context_fini(&env.le_ctx);
+        cfs_complete(&pc->pc_finishing);
+
+        cfs_clear_bit(LIOD_START, &pc->pc_flags);
+        cfs_clear_bit(LIOD_STOP, &pc->pc_flags);
+        cfs_clear_bit(LIOD_FORCE, &pc->pc_flags);
         return 0;
 }
 
-static void ptlrpcd_zombie_impexp_notify(void)
-{
-        cfs_waitq_signal(&ptlrpcd_pc.pc_waitq);
-}
-#else
+#else /* !__KERNEL__ */
 
 int ptlrpcd_check_async_rpcs(void *arg)
 {
         struct ptlrpcd_ctl *pc = arg;
-        int                  rc = 0;
+        int                 rc = 0;
 
-        /* single threaded!! */
+        /*
+         * Single threaded!!
+         */
         pc->pc_recurred++;
 
         if (pc->pc_recurred == 1) {
-                rc = ptlrpcd_check(pc);
-                if (!rc)
-                        ptlrpc_expired_set(pc->pc_set);
-                /*XXX send replay requests */
-                if (pc == &ptlrpcd_recovery_pc)
-                        rc = ptlrpcd_check(pc);
+                rc = lu_env_refill(&pc->pc_env);
+                if (rc == 0) {
+                        lu_context_enter(&pc->pc_env.le_ctx);
+                        rc = ptlrpcd_check(&pc->pc_env, pc);
+                        lu_context_exit(&pc->pc_env.le_ctx);
+                        if (!rc)
+                                ptlrpc_expired_set(pc->pc_set);
+                        /*
+                         * XXX: send replay requests.
+                         */
+                        if (cfs_test_bit(LIOD_RECOVERY, &pc->pc_flags))
+                                rc = ptlrpcd_check(&pc->pc_env, pc);
+                }
         }
 
         pc->pc_recurred--;
         return rc;
 }
+
+int ptlrpcd_idle(void *arg)
+{
+        struct ptlrpcd_ctl *pc = arg;
+
+        return (cfs_list_empty(&pc->pc_set->set_new_requests) &&
+                pc->pc_set->set_remaining == 0);
+}
+
 #endif
 
-static int ptlrpcd_start(char *name, struct ptlrpcd_ctl *pc)
+int ptlrpcd_start(const char *name, struct ptlrpcd_ctl *pc)
 {
         int rc;
-
         ENTRY;
-        memset(pc, 0, sizeof(*pc));
-        init_completion(&pc->pc_starting);
-        init_completion(&pc->pc_finishing);
-        cfs_waitq_init(&pc->pc_waitq);
-        pc->pc_flags = 0;
-        spin_lock_init(&pc->pc_lock);
-        CFS_INIT_LIST_HEAD(&pc->pc_req_list);
-        snprintf (pc->pc_name, sizeof (pc->pc_name), name);
 
+        /*
+         * Do not allow start second thread for one pc.
+         */
+        if (cfs_test_and_set_bit(LIOD_START, &pc->pc_flags)) {
+                CERROR("Starting second thread (%s) for same pc %p\n",
+                       name, pc);
+                RETURN(-EALREADY);
+        }
+
+        cfs_init_completion(&pc->pc_starting);
+        cfs_init_completion(&pc->pc_finishing);
+        cfs_spin_lock_init(&pc->pc_lock);
+        strncpy(pc->pc_name, name, sizeof(pc->pc_name) - 1);
         pc->pc_set = ptlrpc_prep_set();
         if (pc->pc_set == NULL)
-                RETURN(-ENOMEM);
+                GOTO(out, rc = -ENOMEM);
+        /*
+         * So far only "client" ptlrpcd uses an environment. In the future,
+         * ptlrpcd thread (or a thread-set) has to be given an argument,
+         * describing its "scope".
+         */
+        rc = lu_context_init(&pc->pc_env.le_ctx, LCT_CL_THREAD|LCT_REMEMBER);
+        if (rc != 0) {
+                ptlrpc_set_destroy(pc->pc_set);
+                GOTO(out, rc);
+        }
 
 #ifdef __KERNEL__
-        /* wake ptlrpcd when zombie imports or exports exist */
-        obd_zombie_impexp_notify = ptlrpcd_zombie_impexp_notify;
-        
         rc = cfs_kernel_thread(ptlrpcd, pc, 0);
         if (rc < 0)  {
+                lu_context_fini(&pc->pc_env.le_ctx);
                 ptlrpc_set_destroy(pc->pc_set);
-                RETURN(rc);
+                GOTO(out, rc);
         }
-
-        wait_for_completion(&pc->pc_starting);
+        rc = 0;
+        cfs_wait_for_completion(&pc->pc_starting);
 #else
-        pc->pc_callback =
-                liblustre_register_wait_callback(&ptlrpcd_check_async_rpcs, pc);
-        (void)rc;
+        pc->pc_wait_callback =
+                liblustre_register_wait_callback("ptlrpcd_check_async_rpcs",
+                                                 &ptlrpcd_check_async_rpcs, pc);
+        pc->pc_idle_callback =
+                liblustre_register_idle_callback("ptlrpcd_check_idle_rpcs",
+                                                 &ptlrpcd_idle, pc);
 #endif
-        RETURN(0);
+out:
+        if (rc)
+                cfs_clear_bit(LIOD_START, &pc->pc_flags);
+        RETURN(rc);
 }
 
-static void ptlrpcd_stop(struct ptlrpcd_ctl *pc)
+void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force)
 {
-        set_bit(LIOD_STOP, &pc->pc_flags);
-        cfs_waitq_signal(&pc->pc_waitq);
+        if (!cfs_test_bit(LIOD_START, &pc->pc_flags)) {
+                CERROR("Thread for pc %p was not started\n", pc);
+                return;
+        }
+
+        cfs_set_bit(LIOD_STOP, &pc->pc_flags);
+        if (force)
+                cfs_set_bit(LIOD_FORCE, &pc->pc_flags);
+        cfs_waitq_signal(&pc->pc_set->set_waitq);
 #ifdef __KERNEL__
-        obd_zombie_impexp_notify = NULL;
-        wait_for_completion(&pc->pc_finishing);
+        cfs_wait_for_completion(&pc->pc_finishing);
 #else
-        liblustre_deregister_wait_callback(pc->pc_callback);
+        liblustre_deregister_wait_callback(pc->pc_wait_callback);
+        liblustre_deregister_idle_callback(pc->pc_idle_callback);
 #endif
+        lu_context_fini(&pc->pc_env.le_ctx);
         ptlrpc_set_destroy(pc->pc_set);
 }
 
-int ptlrpcd_addref(void)
+void ptlrpcd_fini(void)
 {
-        int rc = 0;
+        int i;
+        int j;
+
         ENTRY;
 
-        mutex_down(&ptlrpcd_sem);
-        if (++ptlrpcd_users != 1)
-                GOTO(out, rc);
+        for (i = 0; i < PSCOPE_NR; ++i) {
+                for (j = 0; j < PT_NR; ++j) {
+                        struct ptlrpcd_ctl *pc;
 
-        rc = ptlrpcd_start("ptlrpcd", &ptlrpcd_pc);
-        if (rc) {
-                --ptlrpcd_users;
-                GOTO(out, rc);
+                        pc = &ptlrpcd_scopes[i].pscope_thread[j].pt_ctl;
+
+                        if (cfs_test_bit(LIOD_START, &pc->pc_flags))
+                                ptlrpcd_stop(pc, 0);
+                }
         }
+        EXIT;
+}
 
-        rc = ptlrpcd_start("ptlrpcd-recov", &ptlrpcd_recovery_pc);
-        if (rc) {
-                ptlrpcd_stop(&ptlrpcd_pc);
-                --ptlrpcd_users;
-                GOTO(out, rc);
+int ptlrpcd_addref(void)
+{
+        int rc = 0;
+        int i;
+        int j;
+        ENTRY;
+
+        cfs_mutex_down(&ptlrpcd_sem);
+        if (++ptlrpcd_users == 1) {
+                for (i = 0; rc == 0 && i < PSCOPE_NR; ++i) {
+                        for (j = 0; rc == 0 && j < PT_NR; ++j) {
+                                struct ptlrpcd_thread *pt;
+                                struct ptlrpcd_ctl    *pc;
+
+                                pt = &ptlrpcd_scopes[i].pscope_thread[j];
+                                pc = &pt->pt_ctl;
+                                if (j == PT_RECOVERY)
+                                        cfs_set_bit(LIOD_RECOVERY, &pc->pc_flags);
+                                rc = ptlrpcd_start(pt->pt_name, pc);
+                        }
+                }
+                if (rc != 0) {
+                        --ptlrpcd_users;
+                        ptlrpcd_fini();
+                }
         }
-out:
-        mutex_up(&ptlrpcd_sem);
+        cfs_mutex_up(&ptlrpcd_sem);
         RETURN(rc);
 }
 
 void ptlrpcd_decref(void)
 {
-        mutex_down(&ptlrpcd_sem);
-        if (--ptlrpcd_users == 0) {
-                ptlrpcd_stop(&ptlrpcd_pc);
-                ptlrpcd_stop(&ptlrpcd_recovery_pc);
-        }
-        mutex_up(&ptlrpcd_sem);
+        cfs_mutex_down(&ptlrpcd_sem);
+        if (--ptlrpcd_users == 0)
+                ptlrpcd_fini();
+        cfs_mutex_up(&ptlrpcd_sem);
 }