ptlrpc_connection_put(imp->imp_connection);
imp->imp_connection = NULL;
- dlmexp = class_conn2export(&imp->imp_dlm_handle);
- if (dlmexp && dlmexp->exp_connection) {
- LASSERT(dlmexp->exp_connection ==
- imp_conn->oic_conn);
- ptlrpc_connection_put(dlmexp->exp_connection);
- dlmexp->exp_connection = NULL;
- }
- }
+ dlmexp = class_conn2export(&imp->imp_dlm_handle);
+ if (dlmexp && dlmexp->exp_connection) {
+ LASSERT(dlmexp->exp_connection ==
+ imp_conn->oic_conn);
+ ptlrpc_connection_put(dlmexp->exp_connection);
+ dlmexp->exp_connection = NULL;
+ }
+
+ if (dlmexp != NULL)
+ class_export_put(dlmexp);
+ }
list_del(&imp_conn->oic_item);
ptlrpc_connection_put(imp_conn->oic_conn);
#endif
atomic_set(&cli->cl_resends, OSC_DEFAULT_RESENDS);
- /* This value may be reduced at connect time in
- * ptlrpc_connect_interpret() . We initialize it to only
- * 1MB until we know what the performance looks like.
- * In the future this should likely be increased. LU-1431 */
- cli->cl_max_pages_per_rpc = min_t(int, PTLRPC_MAX_BRW_PAGES,
- LNET_MTU >> PAGE_CACHE_SHIFT);
+ /* Set it to possible maximum size. It may be reduced by ocd_brw_size
+ * from OFD after connecting. */
+ cli->cl_max_pages_per_rpc = PTLRPC_MAX_BRW_PAGES;
/* set cl_chunkbits default value to PAGE_CACHE_SHIFT,
* it will be updated at OSC connection time. */
bool is_mdc = false;
ENTRY;
- *exp = NULL;
+ *exp = NULL;
down_write(&cli->cl_sem);
if (cli->cl_conn_count > 0)
GOTO(out_sem, rc = -EALREADY);
- rc = class_connect(&conn, obd, cluuid);
- if (rc)
- GOTO(out_sem, rc);
+ rc = class_connect(&conn, obd, cluuid);
+ if (rc)
+ GOTO(out_sem, rc);
- cli->cl_conn_count++;
- *exp = class_conn2export(&conn);
+ cli->cl_conn_count++;
+ *exp = class_conn2export(&conn);
- LASSERT(obd->obd_namespace);
+ LASSERT(obd->obd_namespace);
- imp->imp_dlm_handle = conn;
- rc = ptlrpc_init_import(imp);
- if (rc != 0)
- GOTO(out_ldlm, rc);
+ imp->imp_dlm_handle = conn;
+ rc = ptlrpc_init_import(imp);
+ if (rc != 0)
+ GOTO(out_ldlm, rc);
- ocd = &imp->imp_connect_data;
- if (data) {
- *ocd = *data;
+ ocd = &imp->imp_connect_data;
+ if (data) {
+ *ocd = *data;
is_mdc = strncmp(imp->imp_obd->obd_type->typ_name,
LUSTRE_MDC_NAME, 3) == 0;
if (is_mdc)
data->ocd_connect_flags |= OBD_CONNECT_MULTIMODRPCS;
- imp->imp_connect_flags_orig = data->ocd_connect_flags;
- }
+ imp->imp_connect_flags_orig = data->ocd_connect_flags;
+ imp->imp_connect_flags2_orig = data->ocd_connect_flags2;
+ }
- rc = ptlrpc_connect_import(imp);
- if (rc != 0) {
- LASSERT (imp->imp_state == LUSTRE_IMP_DISCON);
- GOTO(out_ldlm, rc);
- }
+ rc = ptlrpc_connect_import(imp);
+ if (rc != 0) {
+ if (data && is_mdc)
+ data->ocd_connect_flags &= ~OBD_CONNECT_MULTIMODRPCS;
+ LASSERT(imp->imp_state == LUSTRE_IMP_DISCON);
+ GOTO(out_ldlm, rc);
+ }
LASSERT(*exp != NULL && (*exp)->exp_connection);
- if (data) {
- LASSERTF((ocd->ocd_connect_flags & data->ocd_connect_flags) ==
- ocd->ocd_connect_flags, "old "LPX64", new "LPX64"\n",
- data->ocd_connect_flags, ocd->ocd_connect_flags);
- data->ocd_connect_flags = ocd->ocd_connect_flags;
+ if (data) {
+ LASSERTF((ocd->ocd_connect_flags & data->ocd_connect_flags) ==
+ ocd->ocd_connect_flags, "old %#llx, new %#llx\n",
+ data->ocd_connect_flags, ocd->ocd_connect_flags);
+ data->ocd_connect_flags = ocd->ocd_connect_flags;
/* clear the flag as it was not set and is not known
* by upper layers */
if (is_mdc)
data->ocd_connect_flags &= ~OBD_CONNECT_MULTIMODRPCS;
- }
+ }
- ptlrpc_pinger_add_import(imp);
+ ptlrpc_pinger_add_import(imp);
- EXIT;
+ EXIT;
- if (rc) {
+ if (rc) {
out_ldlm:
- cli->cl_conn_count--;
- class_disconnect(*exp);
- *exp = NULL;
- }
+ cli->cl_conn_count--;
+ class_disconnect(*exp);
+ *exp = NULL;
+ }
out_sem:
up_write(&cli->cl_sem);
ENTRY;
if (!obd) {
- CERROR("invalid export for disconnect: exp %p cookie "LPX64"\n",
+ CERROR("invalid export for disconnect: exp %p cookie %#llx\n",
exp, exp ? exp->exp_handle.h_cookie : -1);
RETURN(-EINVAL);
}
struct obd_export *exp,
struct obd_uuid *cluuid)
{
+ struct obd_device *target;
struct lustre_handle *hdl;
+ cfs_time_t now;
+ cfs_time_t deadline;
+ int timeout;
+ int rc = 0;
ENTRY;
hdl = &exp->exp_imp_reverse->imp_remote_handle;
- if (exp->exp_connection && lustre_handle_is_used(hdl)) {
- struct obd_device *target;
-
- target = exp->exp_obd;
-
- /* Might be a re-connect after a partition. */
- if (!memcmp(&conn->cookie, &hdl->cookie, sizeof conn->cookie)) {
- if (target->obd_recovering) {
- int timeout = cfs_duration_sec(cfs_time_sub(
- cfs_timer_deadline(
- &target->obd_recovery_timer),
- cfs_time_current()));
-
- LCONSOLE_WARN("%s: Client %s (at %s) reconnect"
- "ing, waiting for %d clients in recov"
- "ery for %d:%.02d\n", target->obd_name,
- obd_uuid2str(&exp->exp_client_uuid),
- obd_export_nid2str(exp),
- target->obd_max_recoverable_clients,
- timeout / 60, timeout % 60);
- } else {
- LCONSOLE_WARN("%s: Client %s (at %s) "
- "reconnecting\n", target->obd_name,
- obd_uuid2str(&exp->exp_client_uuid),
- obd_export_nid2str(exp));
- }
+ if (!exp->exp_connection || !lustre_handle_is_used(hdl)) {
+ conn->cookie = exp->exp_handle.h_cookie;
+ CDEBUG(D_HA, "connect export for UUID '%s' at %p,"
+ " cookie %#llx\n", cluuid->uuid, exp, conn->cookie);
+ RETURN(0);
+ }
- conn->cookie = exp->exp_handle.h_cookie;
- /* target_handle_connect() treats EALREADY and
- * -EALREADY differently. EALREADY means we are
- * doing a valid reconnect from the same client. */
- RETURN(EALREADY);
- } else {
- LCONSOLE_WARN("%s: already connected client %s (at %s) "
- "with handle "LPX64". Rejecting client "
- "with the same UUID trying to reconnect "
- "with handle "LPX64"\n", target->obd_name,
- obd_uuid2str(&exp->exp_client_uuid),
- obd_export_nid2str(exp),
- hdl->cookie, conn->cookie);
- memset(conn, 0, sizeof *conn);
- /* target_handle_connect() treats EALREADY and
- * -EALREADY differently. -EALREADY is an error
- * (same UUID, different handle). */
- RETURN(-EALREADY);
- }
- }
+ target = exp->exp_obd;
+
+ /* Might be a re-connect after a partition. */
+ if (memcmp(&conn->cookie, &hdl->cookie, sizeof conn->cookie)) {
+ LCONSOLE_WARN("%s: already connected client %s (at %s) "
+ "with handle %#llx. Rejecting client "
+ "with the same UUID trying to reconnect "
+ "with handle %#llx\n", target->obd_name,
+ obd_uuid2str(&exp->exp_client_uuid),
+ obd_export_nid2str(exp),
+ hdl->cookie, conn->cookie);
+ memset(conn, 0, sizeof *conn);
+ /* target_handle_connect() treats EALREADY and
+ * -EALREADY differently. -EALREADY is an error
+ * (same UUID, different handle). */
+ RETURN(-EALREADY);
+ }
- conn->cookie = exp->exp_handle.h_cookie;
- CDEBUG(D_HA, "connect export for UUID '%s' at %p, cookie "LPX64"\n",
- cluuid->uuid, exp, conn->cookie);
- RETURN(0);
+ if (!target->obd_recovering) {
+ LCONSOLE_WARN("%s: Client %s (at %s) reconnecting\n",
+ target->obd_name, obd_uuid2str(&exp->exp_client_uuid),
+ obd_export_nid2str(exp));
+ GOTO(out_already, rc);
+ }
+
+ now = cfs_time_current();
+ deadline = cfs_timer_deadline(&target->obd_recovery_timer);
+ if (cfs_time_before(now, deadline)) {
+ timeout = cfs_duration_sec(cfs_time_sub(deadline, now));
+ LCONSOLE_WARN("%s: Client %s (at %s) reconnecting,"
+ " waiting for %d clients in recovery for"
+ " %d:%.02d\n", target->obd_name,
+ obd_uuid2str(&exp->exp_client_uuid),
+ obd_export_nid2str(exp),
+ target->obd_max_recoverable_clients,
+ timeout / 60, timeout % 60);
+ } else {
+ timeout = cfs_duration_sec(cfs_time_sub(now, deadline));
+ LCONSOLE_WARN("%s: Recovery already passed deadline"
+ " %d:%.02d, It is most likely due to DNE"
+ " recovery is failed or stuck, please wait a"
+ " few more minutes or abort the recovery.\n",
+ target->obd_name, timeout / 60, timeout % 60);
+ }
+
+out_already:
+ conn->cookie = exp->exp_handle.h_cookie;
+ /* target_handle_connect() treats EALREADY and
+ * -EALREADY differently. EALREADY means we are
+ * doing a valid reconnect from the same client. */
+ RETURN(EALREADY);
}
void target_client_add_cb(struct obd_device *obd, __u64 transno, void *cb_data,
class_export_put(export);
export = NULL;
rc = -EALREADY;
- } else if ((mds_conn || lw_client) && export->exp_connection != NULL) {
+ } else if ((mds_conn || lw_client ||
+ data->ocd_connect_flags & OBD_CONNECT_MDS_MDS) &&
+ export->exp_connection != NULL) {
spin_unlock(&export->exp_lock);
- if (req->rq_peer.nid != export->exp_connection->c_peer.nid)
+ if (req->rq_peer.nid != export->exp_connection->c_peer.nid) {
/* MDS or LWP reconnected after failover. */
LCONSOLE_WARN("%s: Received %s connection from "
"%s, removing former export from %s\n",
target->obd_name, mds_conn ? "MDS" : "LWP",
libcfs_nid2str(req->rq_peer.nid),
libcfs_nid2str(export->exp_connection->c_peer.nid));
- else
+ } else {
/* New MDS connection from the same NID. */
LCONSOLE_WARN("%s: Received new %s connection from "
"%s, removing former export from same NID\n",
target->obd_name, mds_conn ? "MDS" : "LWP",
libcfs_nid2str(req->rq_peer.nid));
- class_fail_export(export);
- class_export_put(export);
- export = NULL;
- rc = 0;
+ }
+
+ if (req->rq_peer.nid == export->exp_connection->c_peer.nid &&
+ data->ocd_connect_flags & OBD_CONNECT_MDS_MDS) {
+ /* Because exports between MDTs will always be
+ * kept, let's do not fail such export if they
+ * come from the same NID, otherwise it might
+ * cause eviction between MDTs, which might
+ * cause namespace inconsistency */
+ spin_lock(&export->exp_lock);
+ export->exp_connecting = 1;
+ spin_unlock(&export->exp_lock);
+ conn.cookie = export->exp_handle.h_cookie;
+ rc = EALREADY;
+ } else {
+ class_fail_export(export);
+ class_export_put(export);
+ export = NULL;
+ rc = 0;
+ }
} else if (export->exp_connection != NULL &&
req->rq_peer.nid != export->exp_connection->c_peer.nid &&
(lustre_msg_get_op_flags(req->rq_reqmsg) &
GOTO(out, rc);
}
- CDEBUG(D_HA, "%s: connection from %s@%s %st"LPU64" exp %p cur %ld last %ld\n",
+ CDEBUG(D_HA, "%s: connection from %s@%s %st%llu exp %p cur %ld last %ld\n",
target->obd_name, cluuid.uuid, libcfs_nid2str(req->rq_peer.nid),
target->obd_recovering ? "recovering/" : "", data->ocd_transno,
export, (long)cfs_time_current_sec(),
}
spin_unlock(&obd->obd_recovery_task_lock);
- if (lut->lut_tdtd != NULL &&
- (!list_empty(&lut->lut_tdtd->tdtd_replay_list) ||
- !list_empty(&lut->lut_tdtd->tdtd_replay_finish_list))) {
- dtrq_list_dump(lut->lut_tdtd, D_ERROR);
- dtrq_list_destroy(lut->lut_tdtd);
- }
-
obd->obd_recovery_end = cfs_time_current_sec();
/* When recovery finished, cleanup orphans on MDS and OST. */
return;
}
obd->obd_recovering = obd->obd_abort_recovery = 0;
- obd->obd_force_abort_recovery = 0;
spin_unlock(&obd->obd_dev_lock);
spin_lock(&obd->obd_recovery_task_lock);
return;
spin_lock(&obd->obd_dev_lock);
- if (!obd->obd_recovering || obd->obd_abort_recovery ||
- obd->obd_force_abort_recovery) {
+ if (!obd->obd_recovering || obd->obd_abort_recovery) {
spin_unlock(&obd->obd_dev_lock);
return;
}
int to;
spin_lock(&obd->obd_dev_lock);
- if (!obd->obd_recovering || obd->obd_abort_recovery ||
- obd->obd_force_abort_recovery) {
+ if (!obd->obd_recovering || obd->obd_abort_recovery) {
spin_unlock(&obd->obd_dev_lock);
return;
}
return (!exp->exp_req_replay_needed ||
atomic_read(&exp->exp_replay_count) > 0);
}
+
+
+static inline int exp_req_replay_healthy_or_from_mdt(struct obd_export *exp)
+{
+ return (exp_connect_flags(exp) & OBD_CONNECT_MDS_MDS) ||
+ exp_req_replay_healthy(exp);
+}
+
/** if export done lock_replay or has replay in queue */
static inline int exp_lock_replay_healthy(struct obd_export *exp)
{
return (exp->exp_in_recovery && !exp->exp_lock_replay_needed);
}
+static inline int exp_finished_or_from_mdt(struct obd_export *exp)
+{
+ return (exp_connect_flags(exp) & OBD_CONNECT_MDS_MDS) ||
+ exp_finished(exp);
+}
+
static int check_for_next_transno(struct lu_target *lut)
{
struct ptlrpc_request *req = NULL;
next_transno = obd->obd_next_recovery_transno;
CDEBUG(D_HA, "max: %d, connected: %d, completed: %d, queue_len: %d, "
- "req_transno: "LPU64", next_transno: "LPU64"\n",
+ "req_transno: %llu, next_transno: %llu\n",
obd->obd_max_recoverable_clients, connected, completed,
queue_len, req_transno, next_transno);
- if (obd->obd_abort_recovery || obd->obd_force_abort_recovery) {
+ if (obd->obd_abort_recovery) {
CDEBUG(D_HA, "waking for aborted recovery\n");
wake_up = 1;
} else if (obd->obd_recovery_expired) {
wake_up = 1;
} else if (tdtd != NULL && req != NULL &&
is_req_replayed_by_update(req)) {
- LASSERTF(req_transno < next_transno, "req_transno "LPU64
- "next_transno"LPU64"\n", req_transno, next_transno);
- CDEBUG(D_HA, "waking for duplicate req ("LPU64")\n",
+ LASSERTF(req_transno < next_transno, "req_transno %llu"
+ "next_transno%llu\n", req_transno, next_transno);
+ CDEBUG(D_HA, "waking for duplicate req (%llu)\n",
req_transno);
wake_up = 1;
} else if (req_transno == next_transno ||
(update_transno != 0 && update_transno <= next_transno)) {
- CDEBUG(D_HA, "waking for next ("LPD64")\n", next_transno);
+ CDEBUG(D_HA, "waking for next (%lld)\n", next_transno);
wake_up = 1;
} else if (queue_len > 0 &&
queue_len == atomic_read(&obd->obd_req_replay_clients)) {
- int d_lvl = D_HA;
/** handle gaps occured due to lost reply or VBR */
LASSERTF(req_transno >= next_transno,
- "req_transno: "LPU64", next_transno: "LPU64"\n",
+ "req_transno: %llu, next_transno: %llu\n",
req_transno, next_transno);
- if (req_transno > obd->obd_last_committed &&
- !obd->obd_version_recov)
- d_lvl = D_ERROR;
- CDEBUG(d_lvl,
+ CDEBUG(D_HA,
"%s: waking for gap in transno, VBR is %s (skip: "
- LPD64", ql: %d, comp: %d, conn: %d, next: "LPD64
- ", next_update "LPD64" last_committed: "LPD64")\n",
+ "%lld, ql: %d, comp: %d, conn: %d, next: %lld"
+ ", next_update %lld last_committed: %lld)\n",
obd->obd_name, obd->obd_version_recov ? "ON" : "OFF",
next_transno, queue_len, completed, connected,
req_transno, update_transno, obd->obd_last_committed);
wake_up = 1;
} else if (OBD_FAIL_CHECK(OBD_FAIL_MDS_RECOVERY_ACCEPTS_GAPS)) {
CDEBUG(D_HA, "accepting transno gaps is explicitly allowed"
- " by fail_lock, waking up ("LPD64")\n", next_transno);
+ " by fail_lock, waking up (%lld)\n", next_transno);
obd->obd_next_recovery_transno = req_transno;
wake_up = 1;
}
} else if (atomic_read(&obd->obd_lock_replay_clients) == 0) {
CDEBUG(D_HA, "waking for completed lock replay\n");
wake_up = 1;
- } else if (obd->obd_abort_recovery || obd->obd_force_abort_recovery) {
+ } else if (obd->obd_abort_recovery) {
CDEBUG(D_HA, "waking for aborted recovery\n");
wake_up = 1;
} else if (obd->obd_recovery_expired) {
int (*health_check)(struct obd_export *))
{
struct obd_device *obd = lut->lut_obd;
+ struct target_distribute_txn_data *tdtd;
repeat:
if ((obd->obd_recovery_start != 0) && (cfs_time_current_sec() >=
(obd->obd_recovery_start + obd->obd_recovery_time_hard))) {
- CWARN("recovery is aborted by hard timeout\n");
- obd->obd_abort_recovery = 1;
+ __u64 next_update_transno = 0;
+
+ /* Only abort the recovery if there are no update recovery
+ * left in the queue */
+ spin_lock(&obd->obd_recovery_task_lock);
+ if (lut->lut_tdtd != NULL) {
+ next_update_transno =
+ distribute_txn_get_next_transno(lut->lut_tdtd);
+
+ tdtd = lut->lut_tdtd;
+ /* If next_update_transno == 0, it probably because
+ * updatelog retrieve threads did not get any records
+ * yet, let's wait those threads stopped */
+ if (next_update_transno == 0) {
+ struct l_wait_info lwi = { 0 };
+
+ l_wait_event(tdtd->tdtd_recovery_threads_waitq,
+ atomic_read(
+ &tdtd->tdtd_recovery_threads_count) == 0,
+ &lwi);
+
+ next_update_transno =
+ distribute_txn_get_next_transno(
+ lut->lut_tdtd);
+ }
+ }
+
+ if (next_update_transno != 0 && !obd->obd_abort_recovery) {
+ obd->obd_next_recovery_transno = next_update_transno;
+ spin_unlock(&obd->obd_recovery_task_lock);
+ /* Disconnect unfinished exports from clients, and
+ * keep connection from MDT to make sure the update
+ * recovery will still keep trying until some one
+ * manually abort the recovery */
+ class_disconnect_stale_exports(obd,
+ exp_finished_or_from_mdt);
+ /* Abort all of replay and replay lock req from
+ * clients */
+ abort_req_replay_queue(obd);
+ abort_lock_replay_queue(obd);
+ CDEBUG(D_HA, "%s: there are still update replay (%#llx"
+ ")in the queue.\n", obd->obd_name,
+ next_update_transno);
+ } else {
+ obd->obd_abort_recovery = 1;
+ spin_unlock(&obd->obd_recovery_task_lock);
+ CWARN("%s recovery is aborted by hard timeout\n",
+ obd->obd_name);
+ }
}
while (wait_event_timeout(obd->obd_next_transno_waitq,
msecs_to_jiffies(60 * MSEC_PER_SEC)) == 0)
/* wait indefinitely for event, but don't trigger watchdog */;
- if (obd->obd_abort_recovery || obd->obd_force_abort_recovery) {
+ if (obd->obd_abort_recovery) {
CWARN("recovery is aborted, evict exports in recovery\n");
+ if (lut->lut_tdtd != NULL) {
+ struct l_wait_info lwi = { 0 };
+
+ tdtd = lut->lut_tdtd;
+ /* Let's wait all of the update log recovery thread
+ * finished */
+ l_wait_event(tdtd->tdtd_recovery_threads_waitq,
+ atomic_read(&tdtd->tdtd_recovery_threads_count) == 0,
+ &lwi);
+ /* Then abort the update recovery list */
+ dtrq_list_dump(lut->lut_tdtd, D_ERROR);
+ dtrq_list_destroy(lut->lut_tdtd);
+ }
+
/** evict exports which didn't finish recovery yet */
class_disconnect_stale_exports(obd, exp_finished);
return 1;
"evict stale exports\n", obd->obd_name);
/** evict cexports with no replay in queue, they are stalled */
class_disconnect_stale_exports(obd, health_check);
+
/** continue with VBR */
spin_lock(&obd->obd_dev_lock);
obd->obd_version_recov = 1;
obd->obd_max_recoverable_clients, obd->obd_abort_recovery,
obd->obd_recovery_expired);
- if (obd->obd_force_abort_recovery)
- return 1;
-
if (!obd->obd_abort_recovery && !obd->obd_recovery_expired) {
LASSERT(clnts <= obd->obd_max_recoverable_clients);
if (clnts + obd->obd_stale_clients <
}
if (lut->lut_tdtd != NULL) {
- if (!lut->lut_tdtd->tdtd_replay_ready) {
+ if (!lut->lut_tdtd->tdtd_replay_ready &&
+ !obd->obd_abort_recovery) {
/* Let's extend recovery timer, in case the recovery
* timer expired, and some clients got evicted */
extend_recovery_timer(obd, obd->obd_recovery_timeout,
true);
+ CDEBUG(D_HA, "%s update recovery is not ready,"
+ " extend recovery %d\n", obd->obd_name,
+ obd->obd_recovery_timeout);
return 0;
} else {
dtrq_list_dump(lut->lut_tdtd, D_HA);
return transno;
}
-__u64 get_next_transno(struct lu_target *lut, int *type)
+
+static __u64 get_next_transno(struct lu_target *lut, int *type)
{
struct obd_device *obd = lut->lut_obd;
struct target_distribute_txn_data *tdtd = lut->lut_tdtd;
struct obd_device *obd,
struct ptlrpc_request *req)
{
- DEBUG_REQ(D_HA, req, "remove t"LPD64" from %s because of duplicate"
+ DEBUG_REQ(D_HA, req, "remove t%lld from %s because of duplicate"
" update records are found.\n",
lustre_msg_get_transno(req->rq_reqmsg),
libcfs_nid2str(req->rq_peer.nid));
__u64 transno;
ENTRY;
- CDEBUG(D_HA, "Waiting for transno "LPD64"\n",
+ CDEBUG(D_HA, "Waiting for transno %lld\n",
obd->obd_next_recovery_transno);
/* Replay all of request and update by transno */
CFS_FAIL_TIMEOUT_MS(OBD_FAIL_TGT_REPLAY_DELAY, 300);
if (target_recovery_overseer(lut, check_for_next_transno,
- exp_req_replay_healthy)) {
+ exp_req_replay_healthy_or_from_mdt)) {
abort_req_replay_queue(obd);
abort_lock_replay_queue(obd);
+ goto abort;
}
spin_lock(&obd->obd_recovery_task_lock);
}
LASSERT(trd->trd_processing_task == current_pid());
- DEBUG_REQ(D_HA, req, "processing t"LPD64" from %s",
+ DEBUG_REQ(D_HA, req, "processing t%lld from %s",
lustre_msg_get_transno(req->rq_reqmsg),
libcfs_nid2str(req->rq_peer.nid));
obd->obd_replayed_requests++;
} else if (type == UPDATE_RECOVERY && transno != 0) {
struct distribute_txn_replay_req *dtrq;
- bool update_transno = false;
+ int rc;
spin_unlock(&obd->obd_recovery_task_lock);
LASSERT(tdtd != NULL);
dtrq = distribute_txn_get_next_req(tdtd);
lu_context_enter(&thread->t_env->le_ctx);
- tdtd->tdtd_replay_handler(env, tdtd, dtrq);
+ rc = tdtd->tdtd_replay_handler(env, tdtd, dtrq);
lu_context_exit(&thread->t_env->le_ctx);
extend_recovery_timer(obd, obd_timeout, true);
- /* Add it to the replay finish list */
- spin_lock(&tdtd->tdtd_replay_list_lock);
- if (dtrq->dtrq_xid != 0) {
- CDEBUG(D_HA, "Move x"LPU64" t"LPU64
+ if (rc == 0 && dtrq->dtrq_xid != 0) {
+ CDEBUG(D_HA, "Move x%llu t%llu"
" to finish list\n", dtrq->dtrq_xid,
dtrq->dtrq_master_transno);
+
+ /* Add it to the replay finish list */
+ spin_lock(&tdtd->tdtd_replay_list_lock);
list_add(&dtrq->dtrq_list,
&tdtd->tdtd_replay_finish_list);
- update_transno = true;
- } else {
- dtrq_destroy(dtrq);
- }
- spin_unlock(&tdtd->tdtd_replay_list_lock);
+ spin_unlock(&tdtd->tdtd_replay_list_lock);
- if (update_transno) {
spin_lock(&obd->obd_recovery_task_lock);
if (transno == obd->obd_next_recovery_transno)
obd->obd_next_recovery_transno++;
obd->obd_next_recovery_transno =
transno + 1;
spin_unlock(&obd->obd_recovery_task_lock);
+ } else {
+ dtrq_destroy(dtrq);
}
} else {
spin_unlock(&obd->obd_recovery_task_lock);
+abort:
LASSERT(list_empty(&obd->obd_req_replay_queue));
LASSERT(atomic_read(&obd->obd_req_replay_clients) == 0);
/** evict exports failed VBR */
/* next stage: replay requests or update */
delta = jiffies;
- CDEBUG(D_INFO, "1: request replay stage - %d clients from t"LPU64"\n",
+ CDEBUG(D_INFO, "1: request replay stage - %d clients from t%llu\n",
atomic_read(&obd->obd_req_replay_clients),
obd->obd_next_recovery_transno);
replay_request_or_update(env, lut, trd, thread);
}
CDEBUG(D_HA, "RECOVERY: service %s, %d recoverable clients, "
- "last_transno "LPU64"\n", obd->obd_name,
+ "last_transno %llu\n", obd->obd_name,
obd->obd_max_recoverable_clients, obd->obd_last_committed);
LASSERT(obd->obd_stopping == 0);
obd->obd_next_recovery_transno = obd->obd_last_committed + 1;
wake_up(&obd->obd_next_transno_waitq);
spin_lock(&obd->obd_recovery_task_lock);
if (obd->obd_recovering) {
+ struct ptlrpc_request *tmp;
+ struct ptlrpc_request *duplicate = NULL;
+
+ if (likely(!req->rq_export->exp_replay_done)) {
+ req->rq_export->exp_replay_done = 1;
+ list_add_tail(&req->rq_list,
+ &obd->obd_final_req_queue);
+ spin_unlock(&obd->obd_recovery_task_lock);
+ RETURN(0);
+ }
+
+ /* XXX O(n), but only happens if final ping is
+ * timed out, probably reorganize the list as
+ * a hash list later */
+ list_for_each_entry_safe(reqiter, tmp,
+ &obd->obd_final_req_queue,
+ rq_list) {
+ if (reqiter->rq_export == req->rq_export) {
+ list_del_init(&reqiter->rq_list);
+ duplicate = reqiter;
+ break;
+ }
+ }
+
list_add_tail(&req->rq_list,
- &obd->obd_final_req_queue);
+ &obd->obd_final_req_queue);
+ req->rq_export->exp_replay_done = 1;
+ spin_unlock(&obd->obd_recovery_task_lock);
+
+ if (duplicate != NULL) {
+ DEBUG_REQ(D_HA, duplicate,
+ "put prev final req\n");
+ target_request_copy_put(duplicate);
+ }
+ RETURN(0);
} else {
spin_unlock(&obd->obd_recovery_task_lock);
target_request_copy_put(req);
RETURN(obd->obd_stopping ? -ENOTCONN : 1);
}
- spin_unlock(&obd->obd_recovery_task_lock);
- RETURN(0);
}
if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REQ_REPLAY_DONE) {
/* client declares he's ready to replay locks */
* Also, a resent, replayed request that has already been
* handled will pass through here and be processed immediately.
*/
- CDEBUG(D_HA, "Next recovery transno: "LPU64
- ", current: "LPU64", replaying\n",
+ CDEBUG(D_HA, "Next recovery transno: %llu"
+ ", current: %llu, replaying\n",
obd->obd_next_recovery_transno, transno);
/* If the request has been replayed by update replay, then sends this
"%d)", exp->exp_obd->obd_no_transno,
req->rq_repmsg == NULL);
- CDEBUG(D_INFO, "last_committed "LPU64", transno "LPU64", xid "LPU64"\n",
+ CDEBUG(D_INFO, "last_committed %llu, transno %llu, xid %llu\n",
exp->exp_last_committed, req->rq_transno, req->rq_xid);
}
rs->rs_opc = lustre_msg_get_opc(req->rq_reqmsg);
spin_lock(&exp->exp_uncommitted_replies_lock);
- CDEBUG(D_NET, "rs transno = "LPU64", last committed = "LPU64"\n",
+ CDEBUG(D_NET, "rs transno = %llu, last committed = %llu\n",
rs->rs_transno, exp->exp_last_committed);
if (rs->rs_transno > exp->exp_last_committed) {
/* not committed already */