#include <obd_support.h> /* for OBD_FAIL_CHECK */
#include <lprocfs_status.h>
-#define LIOD_STOP 0
-struct ptlrpcd_ctl {
- unsigned long pc_flags;
- spinlock_t pc_lock;
- struct completion pc_starting;
- struct completion pc_finishing;
- struct ptlrpc_request_set *pc_set;
- char pc_name[16];
-#ifndef __KERNEL__
- int pc_recurred;
- void *pc_callback;
- void *pc_wait_callback;
- void *pc_idle_callback;
-#endif
-};
-
static struct ptlrpcd_ctl ptlrpcd_pc;
static struct ptlrpcd_ctl ptlrpcd_recovery_pc;
cfs_waitq_signal(&rq_set->set_waitq);
}
-/* requests that are added to the ptlrpcd queue are sent via
- * ptlrpcd_check->ptlrpc_check_set() */
+/*
+ * Requests that are added to the ptlrpcd queue are sent via
+ * ptlrpcd_check->ptlrpc_check_set().
+ */
void ptlrpcd_add_req(struct ptlrpc_request *req)
{
struct ptlrpcd_ctl *pc;
+ int rc;
if (req->rq_send_state == LUSTRE_IMP_FULL)
pc = &ptlrpcd_pc;
else
pc = &ptlrpcd_recovery_pc;
- ptlrpc_set_add_new_req(pc->pc_set, req);
- cfs_waitq_signal(&pc->pc_set->set_waitq);
+ rc = ptlrpc_set_add_new_req(pc, req);
+ if (rc) {
+ int (*interpreter)(struct ptlrpc_request *,
+ void *, int);
+
+ interpreter = req->rq_interpret_reply;
+
+ /*
+ * Thread is probably in stop now so we need to
+ * kill this rpc as it was not added. Let's call
+ * interpret for it to let know we're killing it
+ * so that higher levels might free assosiated
+ * resources.
+ */
+ req->rq_status = -EBADR;
+ interpreter(req, &req->rq_async_args,
+ req->rq_status);
+ req->rq_set = NULL;
+ ptlrpc_req_finished(req);
+ }
}
static int ptlrpcd_check(struct ptlrpcd_ctl *pc)
req = list_entry(pos, struct ptlrpc_request, rq_set_chain);
list_del_init(&req->rq_set_chain);
ptlrpc_set_add_req(pc->pc_set, req);
- rc = 1; /* need to calculate its timeout */
+ /*
+ * Need to calculate its timeout.
+ */
+ rc = 1;
}
spin_unlock(&pc->pc_set->set_new_req_lock);
if (pc->pc_set->set_remaining) {
rc = rc | ptlrpc_check_set(pc->pc_set);
- /* XXX our set never completes, so we prune the completed
- * reqs after each iteration. boy could this be smarter. */
+ /*
+ * XXX: our set never completes, so we prune the completed
+ * reqs after each iteration. boy could this be smarter.
+ */
list_for_each_safe(pos, tmp, &pc->pc_set->set_requests) {
req = list_entry(pos, struct ptlrpc_request,
rq_set_chain);
}
if (rc == 0) {
- /* If new requests have been added, make sure to wake up */
+ /*
+ * If new requests have been added, make sure to wake up.
+ */
spin_lock(&pc->pc_set->set_new_req_lock);
rc = !list_empty(&pc->pc_set->set_new_requests);
spin_unlock(&pc->pc_set->set_new_req_lock);
}
#ifdef __KERNEL__
-/* ptlrpc's code paths like to execute in process context, so we have this
- * thread which spins on a set which contains the io rpcs. llite specifies
- * ptlrpcd's set when it pushes pages down into the oscs */
+/*
+ * ptlrpc's code paths like to execute in process context, so we have this
+ * thread which spins on a set which contains the io rpcs. llite specifies
+ * ptlrpcd's set when it pushes pages down into the oscs.
+ */
static int ptlrpcd(void *arg)
{
struct ptlrpcd_ctl *pc = arg;
if ((rc = cfs_daemonize_ctxt(pc->pc_name))) {
complete(&pc->pc_starting);
- return rc;
+ goto out;
}
complete(&pc->pc_starting);
- /* this mainloop strongly resembles ptlrpc_set_wait except
- * that our set never completes. ptlrpcd_check calls ptlrpc_check_set
- * when there are requests in the set. new requests come in
- * on the set's new_req_list and ptlrpcd_check moves them into
- * the set. */
+ /*
+ * This mainloop strongly resembles ptlrpc_set_wait() except that our
+ * set never completes. ptlrpcd_check() calls ptlrpc_check_set() when
+ * there are requests in the set. New requests come in on the set's
+ * new_req_list and ptlrpcd_check() moves them into the set.
+ */
while (1) {
struct l_wait_info lwi;
cfs_duration_t timeout;
l_wait_event(pc->pc_set->set_waitq, ptlrpcd_check(pc), &lwi);
+ /*
+ * Abort inflight rpcs for forced stop case.
+ */
+ if (test_bit(LIOD_STOP_FORCE, &pc->pc_flags))
+ ptlrpc_abort_set(pc->pc_set);
+
if (test_bit(LIOD_STOP, &pc->pc_flags))
break;
}
- /* wait for inflight requests to drain */
+
+ /*
+ * Wait for inflight requests to drain.
+ */
if (!list_empty(&pc->pc_set->set_requests))
ptlrpc_set_wait(pc->pc_set);
+
complete(&pc->pc_finishing);
+out:
+ clear_bit(LIOD_START, &pc->pc_flags);
+ clear_bit(LIOD_STOP, &pc->pc_flags);
return 0;
}
struct ptlrpcd_ctl *pc = arg;
int rc = 0;
- /* single threaded!! */
+ /*
+ * Single threaded!!
+ */
pc->pc_recurred++;
if (pc->pc_recurred == 1) {
rc = ptlrpcd_check(pc);
if (!rc)
ptlrpc_expired_set(pc->pc_set);
- /*XXX send replay requests */
+ /*
+ * XXX: send replay requests.
+ */
if (pc == &ptlrpcd_recovery_pc)
rc = ptlrpcd_check(pc);
}
#endif
-static int ptlrpcd_start(char *name, struct ptlrpcd_ctl *pc)
+int ptlrpcd_start(char *name, struct ptlrpcd_ctl *pc)
{
- int rc;
-
+ int rc = 0;
ENTRY;
- memset(pc, 0, sizeof(*pc));
+
+ /*
+ * Do not allow start second thread for one pc.
+ */
+ if (test_and_set_bit(LIOD_START, &pc->pc_flags)) {
+ CERROR("Starting second thread (%s) for same pc %p\n",
+ name, pc);
+ RETURN(-EALREADY);
+ }
+
init_completion(&pc->pc_starting);
init_completion(&pc->pc_finishing);
- pc->pc_flags = 0;
spin_lock_init(&pc->pc_lock);
snprintf (pc->pc_name, sizeof (pc->pc_name), name);
pc->pc_set = ptlrpc_prep_set();
if (pc->pc_set == NULL)
- RETURN(-ENOMEM);
+ GOTO(out, rc = -ENOMEM);
#ifdef __KERNEL__
rc = cfs_kernel_thread(ptlrpcd, pc, 0);
if (rc < 0) {
ptlrpc_set_destroy(pc->pc_set);
- RETURN(rc);
+ GOTO(out, rc);
}
-
+ rc = 0;
wait_for_completion(&pc->pc_starting);
#else
pc->pc_wait_callback =
pc->pc_idle_callback =
liblustre_register_idle_callback("ptlrpcd_check_idle_rpcs",
&ptlrpcd_idle, pc);
- (void)rc;
#endif
- RETURN(0);
+out:
+ if (rc)
+ clear_bit(LIOD_START, &pc->pc_flags);
+ RETURN(rc);
}
-static void ptlrpcd_stop(struct ptlrpcd_ctl *pc)
+void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force)
{
+ if (!test_bit(LIOD_START, &pc->pc_flags)) {
+ CERROR("Thread for pc %p was not started\n", pc);
+ return;
+ }
+
set_bit(LIOD_STOP, &pc->pc_flags);
+ if (force)
+ set_bit(LIOD_STOP_FORCE, &pc->pc_flags);
cfs_waitq_signal(&pc->pc_set->set_waitq);
#ifdef __KERNEL__
wait_for_completion(&pc->pc_finishing);
rc = ptlrpcd_start("ptlrpcd-recov", &ptlrpcd_recovery_pc);
if (rc) {
- ptlrpcd_stop(&ptlrpcd_pc);
+ ptlrpcd_stop(&ptlrpcd_pc, 0);
--ptlrpcd_users;
GOTO(out, rc);
}
{
mutex_down(&ptlrpcd_sem);
if (--ptlrpcd_users == 0) {
- ptlrpcd_stop(&ptlrpcd_pc);
- ptlrpcd_stop(&ptlrpcd_recovery_pc);
+ ptlrpcd_stop(&ptlrpcd_pc, 0);
+ ptlrpcd_stop(&ptlrpcd_recovery_pc, 0);
}
mutex_up(&ptlrpcd_sem);
}