*
*/
-#define EXPORT_SYMTAB
#define DEBUG_SUBSYSTEM S_RPC
-#include <linux/kmod.h>
#include <linux/lustre_lite.h>
#include <linux/lustre_ha.h>
+#include <linux/obd_support.h>
-struct recovd_obd *ptlrpc_connmgr;
-
-void connmgr_cli_manage(struct recovd_obd *recovd, struct ptlrpc_client *cli)
+void recovd_conn_manage(struct ptlrpc_connection *conn,
+ struct recovd_obd *recovd, ptlrpc_recovery_cb_t recover)
{
+ struct recovd_data *rd = &conn->c_recovd_data;
ENTRY;
- cli->cli_recovd = recovd;
+
+ rd->rd_recovd = recovd;
+ rd->rd_recover = recover;
+ rd->rd_phase = RD_IDLE;
+ rd->rd_next_phase = RD_TROUBLED;
+
spin_lock(&recovd->recovd_lock);
- list_add(&cli->cli_ha_item, &recovd->recovd_connections_lh);
+ list_add(&rd->rd_managed_chain, &recovd->recovd_managed_items);
spin_unlock(&recovd->recovd_lock);
- EXIT;
-}
-void connmgr_cli_fail(struct ptlrpc_client *cli)
-{
- ENTRY;
- spin_lock(&cli->cli_recovd->recovd_lock);
- cli->cli_recovd->recovd_flags |= SVC_HA_EVENT;
- list_del(&cli->cli_ha_item);
- list_add(&cli->cli_ha_item, &cli->cli_recovd->recovd_troubled_lh);
- spin_unlock(&cli->cli_recovd->recovd_lock);
- wake_up(&cli->cli_recovd->recovd_waitq);
EXIT;
}
-static int connmgr_upcall(void)
+void recovd_conn_fail(struct ptlrpc_connection *conn)
{
- char *argv[2];
- char *envp[3];
-
- argv[0] = "/usr/src/obd/utils/ha_assist.sh";
- argv[1] = NULL;
-
- envp [0] = "HOME=/";
- envp [1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
- envp [2] = NULL;
-
- return call_usermodehelper(argv[0], argv, envp);
-}
-
-static void connmgr_unpack_body(struct ptlrpc_request *req)
-{
- struct connmgr_body *b = lustre_msg_buf(req->rq_repmsg, 0);
- if (b == NULL)
- LBUG();
-
- b->generation = NTOH__u32(b->generation);
-}
-
-int connmgr_connect(struct recovd_obd *recovd, struct ptlrpc_connection *conn)
-{
- struct ptlrpc_request *req;
- struct ptlrpc_client *cl;
- struct connmgr_body *body;
- int rc, size = sizeof(*body);
+ struct recovd_data *rd = &conn->c_recovd_data;
+ struct recovd_obd *recovd = rd->rd_recovd;
ENTRY;
if (!recovd) {
- CERROR("no manager\n");
- LBUG();
- }
- cl = recovd->recovd_client;
-
- req = ptlrpc_prep_req(cl, conn, CONNMGR_CONNECT, 1, &size, NULL);
- if (!req)
- GOTO(out, rc = -ENOMEM);
-
- body = lustre_msg_buf(req->rq_reqmsg, 0);
- body->generation = HTON__u32(conn->c_generation);
- body->conn = (__u64)(unsigned long)conn;
- body->conn_token = conn->c_token;
-
- req->rq_replen = lustre_msg_size(1, &size);
-
- rc = ptlrpc_queue_wait(req);
- rc = ptlrpc_check_status(req, rc);
- if (!rc) {
- connmgr_unpack_body(req);
- body = lustre_msg_buf(req->rq_repmsg, 0);
- CDEBUG(D_NET, "remote generation: %o\n", body->generation);
- conn->c_level = LUSTRE_CONN_CON;
- conn->c_remote_conn = body->conn;
- conn->c_remote_token = body->conn_token;
+ CERROR("no recovd for connection %p\n", conn);
+ EXIT;
+ return;
}
- ptlrpc_free_req(req);
- EXIT;
- out:
- return rc;
-}
-
-static int connmgr_handle_connect(struct ptlrpc_request *req)
-{
- struct connmgr_body *body;
- int rc, size = sizeof(*body);
- ENTRY;
- rc = lustre_pack_msg(1, &size, NULL, &req->rq_replen, &req->rq_repmsg);
- if (rc) {
- CERROR("connmgr: out of memory\n");
- req->rq_status = -ENOMEM;
- RETURN(0);
+ spin_lock(&recovd->recovd_lock);
+ if (rd->rd_phase != RD_IDLE) {
+ CDEBUG(D_INFO, "connection %p to %s already in recovery\n",
+ conn, conn->c_remote_uuid);
+ /* XXX need to distinguish from failure-in-recovery */
+ spin_unlock(&recovd->recovd_lock);
+ EXIT;
+ return;
}
+
+ CERROR("connection %p to %s failed\n", conn, conn->c_remote_uuid);
+ list_del(&rd->rd_managed_chain);
+ list_add_tail(&rd->rd_managed_chain, &recovd->recovd_troubled_items);
+ rd->rd_phase = RD_TROUBLED;
+ spin_unlock(&recovd->recovd_lock);
- body = lustre_msg_buf(req->rq_reqmsg, 0);
- connmgr_unpack_body(req);
-
- req->rq_connection->c_remote_conn = body->conn;
- req->rq_connection->c_remote_token = body->conn_token;
-
- CERROR("incoming generation %d\n", body->generation);
- body = lustre_msg_buf(req->rq_repmsg, 0);
- body->generation = 4711;
- body->conn = (__u64)(unsigned long)req->rq_connection;
- body->conn_token = req->rq_connection->c_token;
+ wake_up(&recovd->recovd_waitq);
- req->rq_connection->c_level = LUSTRE_CONN_CON;
- RETURN(0);
+ EXIT;
}
-int connmgr_handle(struct obd_device *dev, struct ptlrpc_service *svc,
- struct ptlrpc_request *req)
+/* this function must be called with recovd->recovd_lock held */
+void recovd_conn_fixed(struct ptlrpc_connection *conn)
{
- int rc;
+ struct recovd_data *rd = &conn->c_recovd_data;
ENTRY;
- rc = lustre_unpack_msg(req->rq_reqmsg, req->rq_reqlen);
- if (rc) {
- CERROR("Invalid request\n");
- GOTO(out, rc);
- }
-
- if (req->rq_reqmsg->type != NTOH__u32(PTL_RPC_MSG_REQUEST)) {
- CERROR("wrong packet type sent %d\n",
- req->rq_reqmsg->type);
- GOTO(out, rc = -EINVAL);
- }
-
- switch (req->rq_reqmsg->opc) {
- case CONNMGR_CONNECT:
- CDEBUG(D_INODE, "connmgr connect\n");
- rc = connmgr_handle_connect(req);
- break;
-
- default:
- rc = ptlrpc_error(svc, req);
- RETURN(rc);
- }
+ spin_lock(&rd->rd_recovd->recovd_lock);
+ list_del(&rd->rd_managed_chain);
+ rd->rd_phase = RD_IDLE;
+ rd->rd_next_phase = RD_TROUBLED;
+ list_add(&rd->rd_managed_chain, &rd->rd_recovd->recovd_managed_items);
+ spin_unlock(&rd->rd_recovd->recovd_lock);
EXIT;
-out:
- if (rc) {
- ptlrpc_error(svc, req);
- } else {
- CDEBUG(D_NET, "sending reply\n");
- ptlrpc_reply(svc, req);
- }
-
- return 0;
}
+
static int recovd_check_event(struct recovd_obd *recovd)
{
int rc = 0;
+ struct list_head *tmp;
+
ENTRY;
spin_lock(&recovd->recovd_lock);
- if (!(recovd->recovd_flags & MGR_WORKING) &&
- !list_empty(&recovd->recovd_troubled_lh)) {
-
- CERROR("connection in trouble - state: WORKING, upcall\n");
- recovd->recovd_flags = MGR_WORKING;
-
- recovd->recovd_waketime = CURRENT_TIME;
- recovd->recovd_timeout = 5 * HZ;
- schedule_timeout(recovd->recovd_timeout);
- }
+ if (recovd->recovd_state == RECOVD_STOPPING)
+ GOTO(out, rc = 1);
- if (recovd->recovd_flags & MGR_WORKING &&
- CURRENT_TIME <= recovd->recovd_waketime + recovd->recovd_timeout) {
- CERROR("WORKING: new event\n");
+ list_for_each(tmp, &recovd->recovd_troubled_items) {
- recovd->recovd_waketime = CURRENT_TIME;
- schedule_timeout(recovd->recovd_timeout);
- }
+ struct recovd_data *rd = list_entry(tmp, struct recovd_data,
+ rd_managed_chain);
- if (recovd->recovd_flags & MGR_STOPPING) {
- CERROR("ha mgr stopping\n");
- rc = 1;
+ if (rd->rd_phase == rd->rd_next_phase ||
+ rd->rd_phase == RD_FAILED)
+ GOTO(out, rc = 1);
}
+ out:
spin_unlock(&recovd->recovd_lock);
RETURN(rc);
}
-static int recovd_handle_event(struct recovd_obd *recovd)
+static void dump_connection_list(struct list_head *head)
{
- spin_lock(&recovd->recovd_lock);
-
- if (!(recovd->recovd_flags & MGR_WORKING) &&
- !list_empty(&recovd->recovd_troubled_lh)) {
-
- CERROR("connection in trouble - state: WORKING, upcall\n");
- recovd->recovd_flags = MGR_WORKING;
-
+ struct list_head *tmp;
- connmgr_upcall();
- recovd->recovd_waketime = CURRENT_TIME;
- recovd->recovd_timeout = 5 * HZ;
- schedule_timeout(recovd->recovd_timeout);
+ list_for_each(tmp, head) {
+ struct ptlrpc_connection *conn =
+ list_entry(tmp, struct ptlrpc_connection,
+ c_recovd_data.rd_managed_chain);
+ CDEBUG(D_NET, " %p = %s\n", conn, conn->c_remote_uuid);
}
+}
- if (recovd->recovd_flags & MGR_WORKING &&
- CURRENT_TIME <= recovd->recovd_waketime + recovd->recovd_timeout) {
- CERROR("WORKING: new event\n");
+static int recovd_handle_event(struct recovd_obd *recovd)
+{
+ struct list_head *tmp, *n;
+ int rc = 0;
+ ENTRY;
- recovd->recovd_waketime = CURRENT_TIME;
- schedule_timeout(recovd->recovd_timeout);
- }
+ spin_lock(&recovd->recovd_lock);
+ CDEBUG(D_NET, "managed: \n");
+ dump_connection_list(&recovd->recovd_managed_items);
+ CDEBUG(D_NET, "troubled: \n");
+ dump_connection_list(&recovd->recovd_troubled_items);
+
+ /*
+ * We use _safe here because one of the callbacks, expecially
+ * FAILURE or PREPARED, could move list items around.
+ */
+ list_for_each_safe(tmp, n, &recovd->recovd_troubled_items) {
+ struct recovd_data *rd = list_entry(tmp, struct recovd_data,
+ rd_managed_chain);
+
+ if (rd->rd_phase != RD_FAILED &&
+ rd->rd_phase != rd->rd_next_phase)
+ continue;
+
+ switch (rd->rd_phase) {
+ case RD_FAILED:
+ cb_failed: /* must always reach here with recovd_lock held! */
+ CERROR("recovery FAILED for rd %p (conn %p): %d\n",
+ rd, class_rd2conn(rd), rc);
+
+ spin_unlock(&recovd->recovd_lock);
+ (void)rd->rd_recover(rd, PTLRPC_RECOVD_PHASE_FAILURE);
+ spin_lock(&recovd->recovd_lock);
+ break;
+
+ case RD_TROUBLED:
+ if (!rd->rd_recover) {
+ CERROR("no rd_recover for rd %p (conn %p)\n",
+ rd, class_rd2conn(rd));
+ rc = -EINVAL;
+ break;
+ }
+ CERROR("starting recovery for rd %p (conn %p)\n",
+ rd, class_rd2conn(rd));
+ rd->rd_phase = RD_PREPARING;
+
+ spin_unlock(&recovd->recovd_lock);
+ rc = rd->rd_recover(rd, PTLRPC_RECOVD_PHASE_PREPARE);
+ spin_lock(&recovd->recovd_lock);
+ if (rc)
+ goto cb_failed;
+
+ rd->rd_next_phase = RD_PREPARED;
+ break;
+
+ case RD_PREPARED:
+ rd->rd_phase = RD_RECOVERING;
+
+ CERROR("recovery prepared for rd %p (conn %p)\n",
+ rd, class_rd2conn(rd));
+
+ spin_unlock(&recovd->recovd_lock);
+ rc = rd->rd_recover(rd, PTLRPC_RECOVD_PHASE_RECOVER);
+ spin_lock(&recovd->recovd_lock);
+ if (rc)
+ goto cb_failed;
+
+ rd->rd_next_phase = RD_RECOVERED;
+ break;
+
+ case RD_RECOVERED:
+ rd->rd_phase = RD_IDLE;
+ rd->rd_next_phase = RD_TROUBLED;
+
+ CERROR("recovery complete for rd %p (conn %p)\n",
+ rd, class_rd2conn(rd));
+ break;
+
+ default:
+ break;
+ }
+ }
spin_unlock(&recovd->recovd_lock);
- return 0;
+ RETURN(0);
}
static int recovd_main(void *arg)
spin_unlock_irq(¤t->sigmask_lock);
sprintf(current->comm, "lustre_recovd");
+ unlock_kernel();
- /* Record that the thread is running */
+ /* Signal that the thread is running. */
recovd->recovd_thread = current;
- recovd->recovd_flags = MGR_RUNNING;
+ recovd->recovd_state = RECOVD_READY;
wake_up(&recovd->recovd_ctl_waitq);
- /* And now, loop forever on requests */
+ /* And now, loop forever on requests. */
while (1) {
- wait_event_interruptible(recovd->recovd_waitq,
- recovd_check_event(recovd));
-
- spin_lock(&recovd->recovd_lock);
- if (recovd->recovd_flags & MGR_STOPPING) {
- spin_unlock(&recovd->recovd_lock);
- CERROR("lustre_hamgr quitting\n");
- EXIT;
+ wait_event(recovd->recovd_waitq, recovd_check_event(recovd));
+ if (recovd->recovd_state == RECOVD_STOPPING)
break;
- }
-
recovd_handle_event(recovd);
- spin_unlock(&recovd->recovd_lock);
}
recovd->recovd_thread = NULL;
- recovd->recovd_flags = MGR_STOPPED;
+ recovd->recovd_state = RECOVD_STOPPED;
wake_up(&recovd->recovd_ctl_waitq);
CDEBUG(D_NET, "mgr exiting process %d\n", current->pid);
RETURN(0);
int recovd_setup(struct recovd_obd *recovd)
{
int rc;
+
ENTRY;
- INIT_LIST_HEAD(&recovd->recovd_connections_lh);
- INIT_LIST_HEAD(&recovd->recovd_troubled_lh);
+ INIT_LIST_HEAD(&recovd->recovd_managed_items);
+ INIT_LIST_HEAD(&recovd->recovd_troubled_items);
spin_lock_init(&recovd->recovd_lock);
init_waitqueue_head(&recovd->recovd_waitq);
CERROR("cannot start thread\n");
RETURN(-EINVAL);
}
- wait_event(recovd->recovd_ctl_waitq, recovd->recovd_flags & MGR_RUNNING);
+ wait_event(recovd->recovd_ctl_waitq,
+ recovd->recovd_state == RECOVD_READY);
+
+ ptlrpc_recovd = recovd;
RETURN(0);
}
int recovd_cleanup(struct recovd_obd *recovd)
{
- recovd->recovd_flags = MGR_STOPPING;
-
+ spin_lock(&recovd->recovd_lock);
+ recovd->recovd_state = RECOVD_STOPPING;
wake_up(&recovd->recovd_waitq);
- wait_event_interruptible(recovd->recovd_ctl_waitq,
- (recovd->recovd_flags & MGR_STOPPED));
+ spin_unlock(&recovd->recovd_lock);
+
+ wait_event(recovd->recovd_ctl_waitq,
+ (recovd->recovd_state == RECOVD_STOPPED));
RETURN(0);
}
+
+struct recovd_obd *ptlrpc_recovd;