RETURN(-EINVAL);
}
- if (lcfg->lcfg_inllen1 < 1) {
+
+ if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1) {
CERROR("requires a TARGET UUID\n");
RETURN(-EINVAL);
}
- if (lcfg->lcfg_inllen1 > 37) {
+ if (LUSTRE_CFG_BUFLEN(lcfg, 1) > 37) {
CERROR("client UUID must be less than 38 characters\n");
RETURN(-EINVAL);
}
- if (lcfg->lcfg_inllen2 < 1) {
+ if (LUSTRE_CFG_BUFLEN(lcfg, 2) < 1) {
CERROR("setup requires a SERVER UUID\n");
RETURN(-EINVAL);
}
- if (lcfg->lcfg_inllen2 > 37) {
+ if (LUSTRE_CFG_BUFLEN(lcfg, 2) > 37) {
CERROR("target UUID must be less than 38 characters\n");
RETURN(-EINVAL);
}
sema_init(&cli->cl_sem, 1);
cli->cl_conn_count = 0;
- memcpy(server_uuid.uuid, lcfg->lcfg_inlbuf2,
- min_t(unsigned int, lcfg->lcfg_inllen2, sizeof(server_uuid)));
+ memcpy(server_uuid.uuid, lustre_cfg_buf(lcfg, 2),
+ min_t(unsigned int, LUSTRE_CFG_BUFLEN(lcfg, 2),
+ sizeof(server_uuid)));
cli->cl_dirty = 0;
cli->cl_avail_grant = 0;
imp->imp_generation = 0;
imp->imp_initial_recov = 1;
INIT_LIST_HEAD(&imp->imp_pinger_chain);
- memcpy(imp->imp_target_uuid.uuid, lcfg->lcfg_inlbuf1,
- lcfg->lcfg_inllen1);
+ memcpy(imp->imp_target_uuid.uuid, lustre_cfg_buf(lcfg, 1),
+ LUSTRE_CFG_BUFLEN(lcfg, 1));
class_import_put(imp);
rc = client_import_add_conn(imp, &server_uuid, 1);
cli->cl_max_mds_cookiesize = sizeof(struct llog_cookie);
cli->cl_sandev = to_kdev_t(0);
- if (lcfg->lcfg_inllen3 != 0) {
- if (!strcmp(lcfg->lcfg_inlbuf3, "inactive")) {
+ if (LUSTRE_CFG_BUFLEN(lcfg, 3) > 0) {
+ if (!strcmp(lustre_cfg_string(lcfg, 3), "inactive")) {
CDEBUG(D_HA, "marking %s %s->%s as inactive\n",
name, obddev->obd_name,
imp->imp_target_uuid.uuid);
imp->imp_invalid = 1;
- if (lcfg->lcfg_inllen4 != 0)
- mgmt_name = lcfg->lcfg_inlbuf4;
+ if (LUSTRE_CFG_BUFLEN(lcfg, 4) > 0)
+ mgmt_name = lustre_cfg_string(lcfg, 4);
} else {
- mgmt_name = lcfg->lcfg_inlbuf3;
+ mgmt_name = lustre_cfg_string(lcfg, 3);
}
}
GOTO(err_import, rc = -ENOSYS);
}
- register_f = inter_module_get("mgmtcli_register_for_events");
+ register_f = (mgmtcli_register_for_events_t)symbol_get("mgmtcli_register_for_events");
if (!register_f) {
CERROR("can't i_m_g mgmtcli_register_for_events\n");
GOTO(err_import, rc = -ENOSYS);
}
rc = register_f(mgmt_obd, obddev, &imp->imp_target_uuid);
- inter_module_put("mgmtcli_register_for_events");
+ symbol_put("mgmtcli_register_for_events");
if (!rc)
cli->cl_mgmtcli_obd = mgmt_obd;
if (cli->cl_mgmtcli_obd) {
mgmtcli_deregister_for_events_t dereg_f;
- dereg_f = inter_module_get("mgmtcli_deregister_for_events");
+ dereg_f = (mgmtcli_deregister_for_events_t)symbol_get("mgmtcli_deregister_for_events");
dereg_f(cli->cl_mgmtcli_obd, obddev);
- inter_module_put("mgmtcli_deregister_for_events");
+ symbol_put("mgmtcli_deregister_for_events");
}
/* Here we try to drop the security structure after destroy import,
int client_connect_import(struct lustre_handle *dlm_handle,
struct obd_device *obd,
struct obd_uuid *cluuid,
+ struct obd_connect_data *conn_data,
unsigned long connect_flags)
{
struct client_obd *cli = &obd->u.cli;
GOTO(out_ldlm, rc);
imp->imp_connect_flags = connect_flags;
+ if (conn_data)
+ memcpy(&imp->imp_connect_data, conn_data, sizeof(*conn_data));
+
rc = ptlrpc_connect_import(imp, NULL);
if (rc != 0) {
LASSERT (imp->imp_state == LUSTRE_IMP_DISCON);
struct obd_uuid cluuid;
struct obd_uuid remote_uuid;
struct list_head *p;
+ struct obd_connect_data *conn_data;
+ int conn_data_size = sizeof(*conn_data);
char *str, *tmp;
int rc = 0;
unsigned long flags;
int initial_conn = 0;
char peer_str[PTL_NALFMT_SIZE];
+ const int offset = 1;
ENTRY;
OBD_RACE(OBD_FAIL_TGT_CONN_RACE);
- LASSERT_REQSWAB (req, 0);
- str = lustre_msg_string(req->rq_reqmsg, 0, sizeof(tgtuuid) - 1);
+ LASSERT_REQSWAB (req, offset + 0);
+ str = lustre_msg_string(req->rq_reqmsg, offset + 0,
+ sizeof(tgtuuid) - 1);
if (str == NULL) {
CERROR("bad target UUID for connect\n");
GOTO(out, rc = -EINVAL);
GOTO(out, rc = -ENODEV);
}
- LASSERT_REQSWAB (req, 1);
- str = lustre_msg_string(req->rq_reqmsg, 1, sizeof(cluuid) - 1);
+ LASSERT_REQSWAB (req, offset + 1);
+ str = lustre_msg_string(req->rq_reqmsg, offset + 1, sizeof(cluuid) - 1);
if (str == NULL) {
CERROR("bad client UUID for connect\n");
GOTO(out, rc = -EINVAL);
LBUG();
}
- tmp = lustre_msg_buf(req->rq_reqmsg, 2, sizeof conn);
+ tmp = lustre_msg_buf(req->rq_reqmsg, offset + 2, sizeof conn);
if (tmp == NULL)
GOTO(out, rc = -EPROTO);
memcpy(&conn, tmp, sizeof conn);
- cfp = lustre_msg_buf(req->rq_reqmsg, 3, sizeof(unsigned long));
+ cfp = lustre_msg_buf(req->rq_reqmsg, offset + 3, sizeof(unsigned long));
LASSERT(cfp != NULL);
connect_flags = *cfp;
- rc = lustre_pack_reply(req, 0, NULL, NULL);
+ conn_data = lustre_swab_reqbuf(req, offset + 4, sizeof(*conn_data),
+ lustre_swab_connect);
+ if (!conn_data)
+ GOTO(out, rc = -EPROTO);
+
+ rc = lustre_pack_reply(req, 1, &conn_data_size, NULL);
if (rc)
GOTO(out, rc);
/* Tell the client if we're in recovery. */
/* If this is the first client, start the recovery timer */
- CWARN("%s: connection from %s@%s/%lu %s\n", target->obd_name, cluuid.uuid,
+ CWARN("%s: connection from %s@%s/%lu %st"LPU64"\n", target->obd_name, cluuid.uuid,
ptlrpc_peernid2str(&req->rq_peer, peer_str), *cfp,
- target->obd_recovering ? "(recovering)" : "");
+ target->obd_recovering ? "recovering/" : "",
+ conn_data->transno);
if (target->obd_recovering) {
lustre_msg_add_op_flags(req->rq_repmsg, MSG_CONNECT_RECOVERING);
rc = -EBUSY;
} else {
dont_check_exports:
- rc = obd_connect(&conn, target, &cluuid, connect_flags);
+ rc = obd_connect(&conn, target, &cluuid, conn_data,
+ connect_flags);
}
}
+
+ /* Return only the parts of obd_connect_data that we understand, so the
+ * client knows that we don't understand the rest. */
+ conn_data->ocd_connect_flags &= OBD_CONNECT_SUPPORTED;
+ memcpy(lustre_msg_buf(req->rq_repmsg, 0, sizeof(*conn_data)), conn_data,
+ sizeof(*conn_data));
+
/* Tell the client if we support replayable requests */
if (target->obd_replayable)
lustre_msg_add_op_flags(req->rq_repmsg, MSG_CONNECT_REPLAYABLE);
GOTO(out, rc = 0);
}
- if (target->obd_recovering)
+ spin_lock_bh(&target->obd_processing_task_lock);
+ if (target->obd_recovering && export->exp_connected == 0) {
+ __u64 t = conn_data->transno;
+ export->exp_connected = 1;
+ if ((lustre_msg_get_op_flags(req->rq_reqmsg) & MSG_CONNECT_TRANSNO)
+ && t < target->obd_next_recovery_transno)
+ target->obd_next_recovery_transno = t;
target->obd_connected_clients++;
+ if (target->obd_connected_clients == target->obd_max_recoverable_clients)
+ wake_up(&target->obd_next_transno_waitq);
+ }
+ spin_unlock_bh(&target->obd_processing_task_lock);
- memcpy(&conn, lustre_msg_buf(req->rq_reqmsg, 2, sizeof(conn)),
+ memcpy(&conn, lustre_msg_buf(req->rq_reqmsg, offset + 2, sizeof(conn)),
sizeof(conn));
if (export->exp_imp_reverse != NULL) {
class_import_put(revimp);
- rc = obd_connect_post(export, connect_flags);
+ rc = obd_connect_post(export, initial_conn, connect_flags);
out:
if (rc)
req->rq_status = rc;
static void target_finish_recovery(struct obd_device *obd)
{
- struct list_head *tmp, *n;
int rc;
- CWARN("%s: sending delayed replies to recovered clients\n",
- obd->obd_name);
-
ldlm_reprocess_all_ns(obd->obd_namespace);
/* when recovery finished, cleanup orphans on mds and ost */
CERROR("postrecov failed %d\n", rc);
}
+ obd->obd_recovery_end = LTIME_S(CURRENT_TIME);
+ return;
+}
- list_for_each_safe(tmp, n, &obd->obd_delayed_reply_queue) {
- struct ptlrpc_request *req;
+static void abort_req_replay_queue(struct obd_device *obd)
+{
+ struct ptlrpc_request *req;
+ struct list_head *tmp, *n;
+ int rc;
+
+ list_for_each_safe(tmp, n, &obd->obd_req_replay_queue) {
req = list_entry(tmp, struct ptlrpc_request, rq_list);
list_del(&req->rq_list);
- DEBUG_REQ(D_ERROR, req, "delayed:");
- ptlrpc_reply(req);
+ DEBUG_REQ(D_ERROR, req, "aborted:");
+ req->rq_status = -ENOTCONN;
+ req->rq_type = PTL_RPC_MSG_ERR;
+ rc = lustre_pack_reply(req, 0, NULL, NULL);
+ if (rc == 0) {
+ ptlrpc_reply(req);
+ } else {
+ DEBUG_REQ(D_ERROR, req,
+ "packing failed for abort-reply; skipping");
+ }
target_release_saved_req(req);
}
- obd->obd_recovery_end = LTIME_S(CURRENT_TIME);
- return;
}
-static void abort_recovery_queue(struct obd_device *obd)
+static void abort_lock_replay_queue(struct obd_device *obd)
{
struct ptlrpc_request *req;
struct list_head *tmp, *n;
int rc;
- list_for_each_safe(tmp, n, &obd->obd_recovery_queue) {
+ list_for_each_safe(tmp, n, &obd->obd_lock_replay_queue) {
req = list_entry(tmp, struct ptlrpc_request, rq_list);
list_del(&req->rq_list);
DEBUG_REQ(D_ERROR, req, "aborted:");
target_cancel_recovery_timer(obd);
spin_unlock_bh(&obd->obd_processing_task_lock);
- list_for_each_safe(tmp, n, &obd->obd_delayed_reply_queue) {
+ list_for_each_safe(tmp, n, &obd->obd_req_replay_queue) {
req = list_entry(tmp, struct ptlrpc_request, rq_list);
list_del(&req->rq_list);
- LASSERT (req->rq_reply_state);
- lustre_free_reply_state(req->rq_reply_state);
+ LASSERT (req->rq_reply_state == 0);
target_release_saved_req(req);
}
- list_for_each_safe(tmp, n, &obd->obd_recovery_queue) {
+ list_for_each_safe(tmp, n, &obd->obd_lock_replay_queue) {
+ req = list_entry(tmp, struct ptlrpc_request, rq_list);
+ list_del(&req->rq_list);
+ LASSERT (req->rq_reply_state == 0);
+ target_release_saved_req(req);
+ }
+ list_for_each_safe(tmp, n, &obd->obd_final_req_queue) {
req = list_entry(tmp, struct ptlrpc_request, rq_list);
list_del(&req->rq_list);
LASSERT (req->rq_reply_state == 0);
}
}
+#if 0
static void target_abort_recovery(void *data)
{
struct obd_device *obd = data;
target_finish_recovery(obd);
ptlrpc_run_recovery_over_upcall(obd);
}
+#endif
static void target_recovery_expired(unsigned long castmeharder)
{
struct obd_device *obd = (struct obd_device *)castmeharder;
- CERROR("recovery timed out, aborting\n");
spin_lock_bh(&obd->obd_processing_task_lock);
if (obd->obd_recovering)
obd->obd_abort_recovery = 1;
__u64 next_transno, req_transno;
spin_lock_bh(&obd->obd_processing_task_lock);
- if (!list_empty(&obd->obd_recovery_queue)) {
- req = list_entry(obd->obd_recovery_queue.next,
+ if (!list_empty(&obd->obd_req_replay_queue)) {
+ req = list_entry(obd->obd_req_replay_queue.next,
struct ptlrpc_request, rq_list);
req_transno = req->rq_reqmsg->transno;
} else {
if (obd->obd_abort_recovery) {
CDEBUG(D_HA, "waking for aborted recovery\n");
wake_up = 1;
- } else if (max == completed) {
+ } else if (atomic_read(&obd->obd_req_replay_clients) == 0) {
CDEBUG(D_HA, "waking for completed recovery\n");
wake_up = 1;
} else if (req_transno == next_transno) {
wake_up = 1;
} else if (queue_len + completed == max) {
LASSERT(req->rq_reqmsg->transno >= next_transno);
- CDEBUG(D_ERROR,
+ CDEBUG(req_transno > obd->obd_last_committed ? D_ERROR : D_HA,
"waking for skipped transno (skip: "LPD64
", ql: %d, comp: %d, conn: %d, next: "LPD64")\n",
next_transno, queue_len, completed, max, req_transno);
obd->obd_next_recovery_transno = req_transno;
wake_up = 1;
+ } else if (queue_len == atomic_read(&obd->obd_req_replay_clients)) {
+ /* some clients haven't connected in time, but we need
+ * their requests to continue recovery. so, we abort ... */
+ CDEBUG(D_ERROR, "abort due to missed clients: queue: %d max: %d\n",
+ queue_len, max);
+ obd->obd_abort_recovery = 1;
+ wake_up = 1;
}
spin_unlock_bh(&obd->obd_processing_task_lock);
spin_lock_bh(&obd->obd_processing_task_lock);
if (obd->obd_abort_recovery) {
req = NULL;
- } else if (!list_empty(&obd->obd_recovery_queue)) {
- req = list_entry(obd->obd_recovery_queue.next,
+ } else if (!list_empty(&obd->obd_req_replay_queue)) {
+ req = list_entry(obd->obd_req_replay_queue.next,
struct ptlrpc_request, rq_list);
list_del_init(&req->rq_list);
obd->obd_requests_queued_for_recovery--;
return req;
}
+static int check_for_next_lock(struct obd_device *obd)
+{
+ struct ptlrpc_request *req = NULL;
+ int wake_up = 0;
+
+ spin_lock_bh(&obd->obd_processing_task_lock);
+ if (!list_empty(&obd->obd_lock_replay_queue)) {
+ req = list_entry(obd->obd_lock_replay_queue.next,
+ struct ptlrpc_request, rq_list);
+ CDEBUG(D_HA, "waking for next lock\n");
+ wake_up = 1;
+ } else if (atomic_read(&obd->obd_lock_replay_clients) == 0) {
+ CDEBUG(D_HA, "waking for completed lock replay\n");
+ wake_up = 1;
+ } else if (obd->obd_abort_recovery) {
+ CDEBUG(D_HA, "waking for aborted recovery\n");
+ wake_up = 1;
+ }
+ spin_unlock_bh(&obd->obd_processing_task_lock);
+
+ return wake_up;
+}
+
+static struct ptlrpc_request *
+target_next_replay_lock(struct obd_device *obd)
+{
+ struct l_wait_info lwi = { 0 };
+ struct ptlrpc_request *req;
+
+ CDEBUG(D_HA, "Waiting for lock\n");
+ l_wait_event(obd->obd_next_transno_waitq,
+ check_for_next_lock(obd), &lwi);
+
+ spin_lock_bh(&obd->obd_processing_task_lock);
+ if (obd->obd_abort_recovery) {
+ req = NULL;
+ } else if (!list_empty(&obd->obd_lock_replay_queue)) {
+ req = list_entry(obd->obd_lock_replay_queue.next,
+ struct ptlrpc_request, rq_list);
+ list_del_init(&req->rq_list);
+ } else {
+ req = NULL;
+ }
+ spin_unlock_bh(&obd->obd_processing_task_lock);
+ return req;
+}
+
+static struct ptlrpc_request *
+target_next_final_ping(struct obd_device *obd)
+{
+ struct ptlrpc_request *req;
+
+ spin_lock_bh(&obd->obd_processing_task_lock);
+ if (!list_empty(&obd->obd_final_req_queue)) {
+ req = list_entry(obd->obd_final_req_queue.next,
+ struct ptlrpc_request, rq_list);
+ list_del_init(&req->rq_list);
+ } else {
+ req = NULL;
+ }
+ spin_unlock_bh(&obd->obd_processing_task_lock);
+ return req;
+}
+
+static int req_replay_done(struct obd_export *exp)
+{
+ if (exp->exp_req_replay_needed)
+ return 0;
+ return 1;
+}
+
+static int lock_replay_done(struct obd_export *exp)
+{
+ if (exp->exp_lock_replay_needed)
+ return 0;
+ return 1;
+}
+
+static int connect_done(struct obd_export *exp)
+{
+ if (exp->exp_connected)
+ return 1;
+ return 0;
+}
+
+static int check_for_clients(struct obd_device *obd)
+{
+ if (obd->obd_abort_recovery)
+ return 1;
+ LASSERT(obd->obd_connected_clients <= obd->obd_max_recoverable_clients);
+ if (obd->obd_connected_clients == obd->obd_max_recoverable_clients)
+ return 1;
+ return 0;
+}
+
static int target_recovery_thread(void *arg)
{
struct obd_device *obd = arg;
struct ptlrpc_request *req;
struct target_recovery_data *trd = &obd->obd_recovery_data;
+ char peer_str[PTL_NALFMT_SIZE];
+ struct l_wait_info lwi = { 0 };
unsigned long flags;
ENTRY;
obd->obd_recovering = 1;
complete(&trd->trd_starting);
- while (obd->obd_recovering) {
+ /* first of all, we have to know the first transno to replay */
+ obd->obd_abort_recovery = 0;
+ l_wait_event(obd->obd_next_transno_waitq,
+ check_for_clients(obd), &lwi);
+
+ spin_lock_bh(&obd->obd_processing_task_lock);
+ target_cancel_recovery_timer(obd);
+ spin_unlock_bh(&obd->obd_processing_task_lock);
+
+ /* If some clients haven't connected in time, evict them */
+ if (obd->obd_abort_recovery) {
+ int stale;
+ CERROR("some clients haven't connect in time, evict them ...\n");
+ obd->obd_abort_recovery = 0;
+ stale = class_disconnect_stale_exports(obd, connect_done, 0);
+ atomic_sub(stale, &obd->obd_req_replay_clients);
+ atomic_sub(stale, &obd->obd_lock_replay_clients);
+ }
+
+ /* next stage: replay requests */
+ CWARN("1: request replay stage - %d clients from t"LPU64"\n",
+ atomic_read(&obd->obd_req_replay_clients),
+ obd->obd_next_recovery_transno);
+ while ((req = target_next_replay_req(obd))) {
LASSERT(trd->trd_processing_task == current->pid);
- req = target_next_replay_req(obd);
- if (req != NULL) {
- char peer_str[PTL_NALFMT_SIZE];
- DEBUG_REQ(D_HA, req, "processing t"LPD64" from %s: ",
- req->rq_reqmsg->transno,
- ptlrpc_peernid2str(&req->rq_peer, peer_str));
- (void)trd->trd_recovery_handler(req);
- obd->obd_replayed_requests++;
- reset_recovery_timer(obd);
- /* bug 1580: decide how to properly sync() in recovery*/
- //mds_fsync_super(mds->mds_sb);
- ptlrpc_free_clone(req);
- spin_lock_bh(&obd->obd_processing_task_lock);
- obd->obd_next_recovery_transno++;
- spin_unlock_bh(&obd->obd_processing_task_lock);
- } else {
- /* recovery is over */
- spin_lock_bh(&obd->obd_processing_task_lock);
- obd->obd_recovering = 0;
- target_cancel_recovery_timer(obd);
- if (obd->obd_abort_recovery) {
- obd->obd_abort_recovery = 0;
- spin_unlock_bh(&obd->obd_processing_task_lock);
- target_abort_recovery(obd);
- } else {
- LASSERT(obd->obd_recoverable_clients == 0);
- spin_unlock_bh(&obd->obd_processing_task_lock);
- target_finish_recovery(obd);
- }
- }
+ DEBUG_REQ(D_HA, req, "processing t"LPD64" from %s: ",
+ req->rq_reqmsg->transno,
+ ptlrpc_peernid2str(&req->rq_peer, peer_str));
+ (void)trd->trd_recovery_handler(req);
+ obd->obd_replayed_requests++;
+ reset_recovery_timer(obd);
+ /* bug 1580: decide how to properly sync() in recovery*/
+ //mds_fsync_super(mds->mds_sb);
+ ptlrpc_free_clone(req);
+ spin_lock_bh(&obd->obd_processing_task_lock);
+ obd->obd_next_recovery_transno++;
+ spin_unlock_bh(&obd->obd_processing_task_lock);
}
+ spin_lock_bh(&obd->obd_processing_task_lock);
+ target_cancel_recovery_timer(obd);
+ spin_unlock_bh(&obd->obd_processing_task_lock);
+
+ /* If some clients haven't replayed requests in time, evict them */
+ if (obd->obd_abort_recovery) {
+ int stale;
+ CERROR("req replay timed out, aborting ...\n");
+ obd->obd_abort_recovery = 0;
+ stale = class_disconnect_stale_exports(obd, req_replay_done, 0);
+ atomic_sub(stale, &obd->obd_lock_replay_clients);
+ abort_req_replay_queue(obd);
+ }
+
+ /* The second stage: replay locks */
+ CWARN("2: lock replay stage - %d clients\n",
+ atomic_read(&obd->obd_lock_replay_clients));
+ while ((req = target_next_replay_lock(obd))) {
+ LASSERT(trd->trd_processing_task == current->pid);
+ DEBUG_REQ(D_HA, req, "processing lock from %s: ",
+ ptlrpc_peernid2str(&req->rq_peer, peer_str));
+ (void)trd->trd_recovery_handler(req);
+ reset_recovery_timer(obd);
+ ptlrpc_free_clone(req);
+ obd->obd_replayed_locks++;
+ }
+
+ spin_lock_bh(&obd->obd_processing_task_lock);
+ target_cancel_recovery_timer(obd);
+ spin_unlock_bh(&obd->obd_processing_task_lock);
+
+ /* If some clients haven't replayed requests in time, evict them */
+ if (obd->obd_abort_recovery) {
+ int stale;
+ CERROR("lock replay timed out, aborting ...\n");
+ obd->obd_abort_recovery = 0;
+ stale = class_disconnect_stale_exports(obd, lock_replay_done, 0);
+ abort_lock_replay_queue(obd);
+ }
+
+ /* We drop recoverying flag to forward all new requests
+ * to regular mds_handle() since now */
+ spin_lock_bh(&obd->obd_processing_task_lock);
+ obd->obd_recovering = 0;
+ spin_unlock_bh(&obd->obd_processing_task_lock);
+
+ /* The third stage: reply on final pings */
+ CWARN("3: final stage - process recovery completion pings\n");
+ while ((req = target_next_final_ping(obd))) {
+ LASSERT(trd->trd_processing_task == current->pid);
+ DEBUG_REQ(D_HA, req, "processing final ping from %s: ",
+ ptlrpc_peernid2str(&req->rq_peer, peer_str));
+ (void)trd->trd_recovery_handler(req);
+ ptlrpc_free_clone(req);
+ }
+
+ CWARN("4: recovery completed - %d/%d reqs/locks replayed\n",
+ obd->obd_replayed_requests, obd->obd_replayed_locks);
+ target_finish_recovery(obd);
+
trd->trd_processing_task = 0;
complete(&trd->trd_finishing);
return 0;
init_completion(&trd->trd_finishing);
trd->trd_recovery_handler = handler;
- if (kernel_thread(target_recovery_thread, obd, 0) == 0)
+ if (kernel_thread(target_recovery_thread, obd, 0) > 0) {
wait_for_completion(&trd->trd_starting);
- else
+ LASSERT(obd->obd_recovering != 0);
+ } else
rc = -ECHILD;
return rc;
}
#endif
+int target_process_req_flags(struct obd_device *obd, struct ptlrpc_request *req)
+{
+ struct obd_export *exp = req->rq_export;
+ LASSERT(exp != NULL);
+ if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REQ_REPLAY_DONE) {
+ /* client declares he's ready to replay locks */
+ spin_lock_bh(&obd->obd_processing_task_lock);
+ if (exp->exp_req_replay_needed) {
+ LASSERT(atomic_read(&obd->obd_req_replay_clients) > 0);
+ exp->exp_req_replay_needed = 0;
+ atomic_dec(&obd->obd_req_replay_clients);
+ obd->obd_recoverable_clients--;
+ if (atomic_read(&obd->obd_req_replay_clients) == 0) {
+ CDEBUG(D_HA, "all clients have replayed reqs\n");
+ wake_up(&obd->obd_next_transno_waitq);
+ }
+ }
+ spin_unlock_bh(&obd->obd_processing_task_lock);
+ }
+ if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_LOCK_REPLAY_DONE) {
+ /* client declares he's ready to complete recovery
+ * so, we put the request on th final queue */
+ spin_lock_bh(&obd->obd_processing_task_lock);
+ if (exp->exp_lock_replay_needed) {
+ LASSERT(atomic_read(&obd->obd_lock_replay_clients) > 0);
+ exp->exp_lock_replay_needed = 0;
+ atomic_dec(&obd->obd_lock_replay_clients);
+ if (atomic_read(&obd->obd_lock_replay_clients) == 0) {
+ CDEBUG(D_HA, "all clients have replayed locks\n");
+ wake_up(&obd->obd_next_transno_waitq);
+ }
+ }
+ spin_unlock_bh(&obd->obd_processing_task_lock);
+ }
+
+ return 0;
+}
+
int target_queue_recovery_request(struct ptlrpc_request *req,
struct obd_device *obd)
{
int inserted = 0;
__u64 transno = req->rq_reqmsg->transno;
+ if (obd->obd_recovery_data.trd_processing_task == current->pid) {
+ /* Processing the queue right now, don't re-add. */
+ return 1;
+ }
+
+ target_process_req_flags(obd, req);
+
+ if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_LOCK_REPLAY_DONE) {
+ /* client declares he's ready to complete recovery
+ * so, we put the request on th final queue */
+ req = ptlrpc_clone_req(req);
+ if (req == NULL)
+ return -ENOMEM;
+ DEBUG_REQ(D_HA, req, "queue final req");
+ spin_lock_bh(&obd->obd_processing_task_lock);
+ list_add_tail(&req->rq_list, &obd->obd_final_req_queue);
+ spin_unlock_bh(&obd->obd_processing_task_lock);
+ return 0;
+ }
+ if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REQ_REPLAY_DONE) {
+ /* client declares he's ready to replay locks */
+ req = ptlrpc_clone_req(req);
+ if (req == NULL)
+ return -ENOMEM;
+ DEBUG_REQ(D_HA, req, "queue lock replay req");
+ spin_lock_bh(&obd->obd_processing_task_lock);
+ list_add_tail(&req->rq_list, &obd->obd_lock_replay_queue);
+ spin_unlock_bh(&obd->obd_processing_task_lock);
+ wake_up(&obd->obd_next_transno_waitq);
+ return 0;
+ }
+
+
/* CAVEAT EMPTOR: The incoming request message has been swabbed
* (i.e. buflens etc are in my own byte order), but type-dependent
* buffers (eg mds_body, ost_body etc) have NOT been swabbed. */
* handled will pass through here and be processed immediately.
*/
spin_lock_bh(&obd->obd_processing_task_lock);
- if (obd->obd_recovery_data.trd_processing_task == current->pid ||
- transno < obd->obd_next_recovery_transno) {
+ if (transno < obd->obd_next_recovery_transno && check_for_clients(obd)) {
/* Processing the queue right now, don't re-add. */
LASSERT(list_empty(&req->rq_list));
spin_unlock_bh(&obd->obd_processing_task_lock);
spin_lock_bh(&obd->obd_processing_task_lock);
/* XXX O(n^2) */
- list_for_each(tmp, &obd->obd_recovery_queue) {
+ list_for_each(tmp, &obd->obd_req_replay_queue) {
struct ptlrpc_request *reqiter =
list_entry(tmp, struct ptlrpc_request, rq_list);
}
if (!inserted)
- list_add_tail(&req->rq_list, &obd->obd_recovery_queue);
+ list_add_tail(&req->rq_list, &obd->obd_req_replay_queue);
obd->obd_requests_queued_for_recovery++;
wake_up(&obd->obd_next_transno_waitq);
return req->rq_export->exp_obd;
}
-int target_queue_final_reply(struct ptlrpc_request *req, int rc)
-{
- struct obd_device *obd = target_req2obd(req);
-
- LASSERT ((rc == 0) == (req->rq_reply_state != NULL));
-
- if (rc) {
- /* Just like ptlrpc_error, but without the sending. */
- rc = lustre_pack_reply(req, 0, NULL, NULL);
- LASSERT(rc == 0); /* XXX handle this */
- req->rq_type = PTL_RPC_MSG_ERR;
- }
-
- LASSERT (!req->rq_reply_state->rs_difficult);
- LASSERT(list_empty(&req->rq_list));
-
- req = ptlrpc_clone_req(req);
-
- spin_lock_bh(&obd->obd_processing_task_lock);
-
- list_add(&req->rq_list, &obd->obd_delayed_reply_queue);
-
- /* only count the first "replay over" request from each
- export */
- if (req->rq_export->exp_replay_needed) {
- --obd->obd_recoverable_clients;
- req->rq_export->exp_replay_needed = 0;
- CWARN("%s: %d recoverable clients remain\n",
- obd->obd_name, obd->obd_recoverable_clients);
- }
- wake_up(&obd->obd_next_transno_waitq);
- spin_unlock_bh(&obd->obd_processing_task_lock);
- return 1;
-}
-
int
target_send_reply_msg (struct ptlrpc_request *req, int rc, int fail_id)
{