name, obddev->obd_name,
cli->cl_target_uuid.uuid);
spin_lock(&imp->imp_lock);
- imp->imp_invalid = 1;
+ imp->imp_deactive = 1;
spin_unlock(&imp->imp_lock);
}
}
+ obddev->obd_namespace = ldlm_namespace_new(obddev, obddev->obd_name,
+ LDLM_NAMESPACE_CLIENT,
+ LDLM_NAMESPACE_GREEDY);
+ if (obddev->obd_namespace == NULL) {
+ CERROR("Unable to create client namespace - %s\n",
+ obddev->obd_name);
+ GOTO(err_import, rc = -ENOMEM);
+ }
+
cli->cl_qchk_stat = CL_NOT_QUOTACHECKED;
RETURN(rc);
int client_obd_cleanup(struct obd_device *obddev)
{
ENTRY;
+
+ ldlm_namespace_free_post(obddev->obd_namespace);
+ obddev->obd_namespace = NULL;
+
ldlm_put_ref();
RETURN(0);
}
struct client_obd *cli = &obd->u.cli;
struct obd_import *imp = cli->cl_import;
struct obd_connect_data *ocd;
- struct ldlm_namespace *to_be_freed = NULL;
struct lustre_handle conn = { 0 };
int rc;
ENTRY;
rc = class_connect(&conn, obd, cluuid);
if (rc)
GOTO(out_sem, rc);
-
+
cli->cl_conn_count++;
*exp = class_conn2export(&conn);
- if (obd->obd_namespace != NULL)
- CERROR("already have namespace!\n");
-
- /*
- * Deadlock case - bug 18380
- */
- up_write(&cli->cl_sem);
- obd->obd_namespace = ldlm_namespace_new(obd, obd->obd_name,
- LDLM_NAMESPACE_CLIENT,
- LDLM_NAMESPACE_GREEDY);
- down_write(&cli->cl_sem);
- if (obd->obd_namespace == NULL)
- GOTO(out_disco, rc = -ENOMEM);
+ LASSERT(obd->obd_namespace);
imp->imp_dlm_handle = conn;
rc = ptlrpc_init_import(imp);
if (rc) {
out_ldlm:
- ldlm_namespace_free_prior(obd->obd_namespace, imp, 0);
- to_be_freed = obd->obd_namespace;
- obd->obd_namespace = NULL;
-out_disco:
cli->cl_conn_count--;
class_disconnect(*exp);
*exp = NULL;
}
out_sem:
up_write(&cli->cl_sem);
- if (to_be_freed)
- ldlm_namespace_free_post(to_be_freed);
return rc;
}
struct client_obd *cli;
struct obd_import *imp;
int rc = 0, err;
- struct ldlm_namespace *to_be_freed = NULL;
ENTRY;
if (!obd) {
imp = cli->cl_import;
down_write(&cli->cl_sem);
+ CDEBUG(D_INFO, "disconnect %s - %d\n", obd->obd_name,
+ cli->cl_conn_count);
+
if (!cli->cl_conn_count) {
CERROR("disconnecting disconnected device (%s)\n",
obd->obd_name);
- GOTO(out_sem, rc = -EINVAL);
+ GOTO(out_disconnect, rc = -EINVAL);
}
cli->cl_conn_count--;
if (cli->cl_conn_count)
- GOTO(out_no_disconnect, rc = 0);
+ GOTO(out_disconnect, rc = 0);
/* Mark import deactivated now, so we don't try to reconnect if any
* of the cleanup RPCs fails (e.g. ldlm cancel, etc). We don't
obd->obd_force ? LDLM_FL_LOCAL_ONLY:0,
NULL);
ldlm_namespace_free_prior(obd->obd_namespace, imp, obd->obd_force);
- to_be_freed = obd->obd_namespace;
}
/*
down_write(&cli->cl_sem);
ptlrpc_invalidate_import(imp);
- /* set obd_namespace to NULL only after invalidate, because we can have
- * some connect requests in flight, and his need store a connect flags
- * in obd_namespace. bug 14260 */
- obd->obd_namespace = NULL;
if (imp->imp_rq_pool) {
ptlrpc_free_rq_pool(imp->imp_rq_pool);
cli->cl_import = NULL;
EXIT;
- out_no_disconnect:
+
+ out_disconnect:
+ /* use server style - class_disconnect should be always called for
+ * o_disconnect */
err = class_disconnect(exp);
if (!rc && err)
rc = err;
- out_sem:
+
up_write(&cli->cl_sem);
- if (to_be_freed)
- ldlm_namespace_free_post(to_be_freed);
RETURN(rc);
}
GOTO(out, rc = -EBUSY);
} else if (req->rq_export != NULL &&
(atomic_read(&export->exp_rpc_count) > 1)) {
+ /* the current connect rpc has increased exp_rpc_count */
CWARN("%s: refuse reconnection from %s@%s to 0x%p/%d\n",
target->obd_name, cluuid.uuid,
libcfs_nid2str(req->rq_peer.nid),
- export, atomic_read(&export->exp_rpc_count));
+ export, atomic_read(&export->exp_rpc_count) - 1);
+ spin_lock(&export->exp_lock);
+ if (req->rq_export->exp_conn_cnt <
+ lustre_msg_get_conn_cnt(req->rq_reqmsg))
+ /* try to abort active requests */
+ req->rq_export->exp_abort_active_req = 1;
+ spin_unlock(&export->exp_lock);
GOTO(out, rc = -EBUSY);
} else if (lustre_msg_get_conn_cnt(req->rq_reqmsg) == 1) {
CERROR("%s: NID %s (%s) reconnected with 1 conn_cnt; "
"%d clients in recovery for "CFS_TIME_T"s\n",
target->obd_name,
libcfs_nid2str(req->rq_peer.nid), cluuid.uuid,
- target->obd_recoverable_clients,
+ atomic_read(&target->obd_lock_replay_clients),
cfs_duration_sec(t));
rc = -EBUSY;
} else {
GOTO(out, rc = -EALREADY);
}
export->exp_conn_cnt = lustre_msg_get_conn_cnt(req->rq_reqmsg);
+ export->exp_abort_active_req = 0;
/* request from liblustre? Don't evict it for not pinging. */
if (lustre_msg_get_op_flags(req->rq_reqmsg) & MSG_CONNECT_LIBCLIENT) {
&& data->ocd_transno < target->obd_next_recovery_transno)
target->obd_next_recovery_transno = data->ocd_transno;
target->obd_connected_clients++;
- /* each connected client is counted as recoverable */
- target->obd_recoverable_clients++;
atomic_inc(&target->obd_req_replay_clients);
atomic_inc(&target->obd_lock_replay_clients);
if (target->obd_connected_clients ==
obd_set_info_async(export, sizeof(KEY_REVIMP_UPD),
KEY_REVIMP_UPD, 0, NULL, NULL);
- /* in some recovery senarios, previous ctx init rpc handled
- * in sptlrpc_target_export_check() might be used to install
- * a reverse ctx in this reverse import, and later OBD_CONNECT
- * using the same gss ctx could reach here and following new
- * reverse import. note all reverse ctx in new/old import are
- * actually based on the same gss ctx. so we invalidate ctx
- * here before destroy import, otherwise flush old import will
- * lead to remote reverse ctx be destroied, thus the reverse
- * ctx of new import will lost its peer.
- * there might be a better way to deal with this???
- */
- sptlrpc_import_inval_all_ctx(export->exp_imp_reverse);
-
client_destroy_import(export->exp_imp_reverse);
}
revimp->imp_remote_handle = conn;
revimp->imp_dlm_fake = 1;
revimp->imp_state = LUSTRE_IMP_FULL;
- /* this is a bit of a layering violation, but much less risk than
- * changing this very complex and race-prone code. bug=16839 */
- if (data->ocd_connect_flags & OBD_CONNECT_MDS)
- obd_set_info_async(export, sizeof(KEY_MDS_CONN), KEY_MDS_CONN,
- 0, NULL, NULL);
/* unknown versions will be caught in
* ptlrpc_handle_server_req_in->lustre_unpack_msg() */
if (exp->exp_imp_reverse != NULL)
client_destroy_import(exp->exp_imp_reverse);
- /* We cancel locks at disconnect time, but this will catch any locks
- * granted in a race with recovery-induced disconnect. */
- if (exp->exp_obd->obd_namespace != NULL)
- ldlm_cancel_locks_for_export(exp);
+ LASSERT(atomic_read(&exp->exp_locks_count) == 0);
+ LASSERT(atomic_read(&exp->exp_rpc_count) == 0);
+ LASSERT(atomic_read(&exp->exp_cb_count) == 0);
+ LASSERT(atomic_read(&exp->exp_replay_count) == 0);
}
/*
orig_req->rq_repmsg = NULL;
orig_req->rq_replen = 0;
}
-
+ /** let export know it has replays to be handled */
+ atomic_inc(©_req->rq_export->exp_replay_count);
return copy_req;
}
ptlrpc_req_drop_rs(req);
sptlrpc_svc_ctx_decref(req);
+ LASSERT(atomic_read(&req->rq_export->exp_replay_count) > 0);
+ atomic_dec(&req->rq_export->exp_replay_count);
class_export_rpc_put(req->rq_export);
list_del_init(&req->rq_list);
/* when recovery finished, cleanup orphans on mds and ost */
if (OBT(obd) && OBP(obd, postrecov)) {
int rc = OBP(obd, postrecov)(obd);
- LCONSOLE_WARN("%s: recovery %s: rc %d\n", obd->obd_name,
- rc < 0 ? "failed" : "complete", rc);
+ if (rc < 0)
+ LCONSOLE_WARN("%s: Post recovery failed, rc %d\n",
+ obd->obd_name, rc);
}
obd->obd_recovery_end = cfs_time_current_sec();
spin_unlock_bh(&obd->obd_processing_task_lock);
return;
}
- CWARN("%s: starting recovery timer\n", obd->obd_name);
+ CDEBUG(D_HA, "%s: starting recovery timer\n", obd->obd_name);
obd->obd_recovery_start = cfs_time_current_sec();
/* minimum */
obd->obd_recovery_timeout = OBD_RECOVERY_FACTOR * obd_timeout;
}
#ifdef __KERNEL__
+
+/** Health checking routines */
+static inline int exp_connect_healthy(struct obd_export *exp)
+{
+ return (exp->exp_in_recovery);
+}
+
+/** if export done req_replay or has replay in queue */
+static inline int exp_req_replay_healthy(struct obd_export *exp)
+{
+ return (!exp->exp_req_replay_needed ||
+ atomic_read(&exp->exp_replay_count) > 0);
+}
+/** if export done lock_replay or has replay in queue */
+static inline int exp_lock_replay_healthy(struct obd_export *exp)
+{
+ return (!exp->exp_lock_replay_needed ||
+ atomic_read(&exp->exp_replay_count) > 0);
+}
+
+static inline int exp_vbr_healthy(struct obd_export *exp)
+{
+ return (!exp->exp_vbr_failed);
+}
+
+static inline int exp_finished(struct obd_export *exp)
+{
+ return (exp->exp_in_recovery && !exp->exp_lock_replay_needed);
+}
+
+/** Checking routines for recovery */
+static int check_for_clients(struct obd_device *obd)
+{
+ if (obd->obd_abort_recovery || obd->obd_recovery_expired)
+ return 1;
+ LASSERT(obd->obd_connected_clients <= obd->obd_max_recoverable_clients);
+ if (obd->obd_no_conn == 0 &&
+ obd->obd_connected_clients + obd->obd_stale_clients ==
+ obd->obd_max_recoverable_clients)
+ return 1;
+ return 0;
+}
+
static int check_for_next_transno(struct obd_device *obd)
{
struct ptlrpc_request *req = NULL;
- int wake_up = 0, connected, completed, queue_len, max;
+ int wake_up = 0, connected, completed, queue_len;
__u64 next_transno, req_transno;
ENTRY;
spin_lock_bh(&obd->obd_processing_task_lock);
req_transno = 0;
}
- max = obd->obd_max_recoverable_clients;
connected = obd->obd_connected_clients;
- completed = connected - obd->obd_recoverable_clients;
+ completed = connected - atomic_read(&obd->obd_req_replay_clients);
queue_len = obd->obd_requests_queued_for_recovery;
next_transno = obd->obd_next_recovery_transno;
CDEBUG(D_HA, "max: %d, connected: %d, completed: %d, queue_len: %d, "
"req_transno: "LPU64", next_transno: "LPU64"\n",
- max, connected, completed, queue_len, req_transno, next_transno);
+ obd->obd_max_recoverable_clients, connected, completed,
+ queue_len, req_transno, next_transno);
if (obd->obd_abort_recovery) {
CDEBUG(D_HA, "waking for aborted recovery\n");
wake_up = 1;
+ } else if (obd->obd_recovery_expired) {
+ CDEBUG(D_HA, "waking for expired recovery\n");
+ wake_up = 1;
} else if (atomic_read(&obd->obd_req_replay_clients) == 0) {
CDEBUG(D_HA, "waking for completed recovery\n");
wake_up = 1;
} else if (req_transno == next_transno) {
CDEBUG(D_HA, "waking for next ("LPD64")\n", next_transno);
wake_up = 1;
- } else if (queue_len + completed == max) {
- /* handle gaps occured due to lost reply. It is allowed gaps
- * because all clients are connected and there will be resend
- * for missed transaction */
+ } else if (queue_len == atomic_read(&obd->obd_req_replay_clients)) {
+ int d_lvl = D_HA;
+ /** handle gaps occured due to lost reply or VBR */
LASSERTF(req_transno >= next_transno,
"req_transno: "LPU64", next_transno: "LPU64"\n",
req_transno, next_transno);
-
- CDEBUG(req_transno > obd->obd_last_committed ? D_ERROR : D_HA,
- "waking for skipped transno (skip: "LPD64
- ", ql: %d, comp: %d, conn: %d, next: "LPD64")\n",
- next_transno, queue_len, completed, connected, req_transno);
+ if (req_transno > obd->obd_last_committed &&
+ !obd->obd_version_recov)
+ d_lvl = D_ERROR;
+ CDEBUG(d_lvl,
+ "%s: waking for gap in transno, VBR is %s (skip: "
+ LPD64", ql: %d, comp: %d, conn: %d, next: "LPD64
+ ", last_committed: "LPD64")\n",
+ obd->obd_name, obd->obd_version_recov ? "ON" : "OFF",
+ next_transno, queue_len, completed, connected,
+ req_transno, obd->obd_last_committed);
obd->obd_next_recovery_transno = req_transno;
wake_up = 1;
} else if (OBD_FAIL_CHECK(OBD_FAIL_MDS_RECOVERY_ACCEPTS_GAPS)) {
" by fail_lock, waking up ("LPD64")\n", next_transno);
obd->obd_next_recovery_transno = req_transno;
wake_up = 1;
- } else if (queue_len == atomic_read(&obd->obd_req_replay_clients)) {
- /* some clients haven't connected in time, but we can try
- * to replay requests that demand on already committed ones
- * also, we can replay first non-committed transation */
- LASSERT(req_transno != 0);
- if (obd->obd_version_recov ||
- req_transno == obd->obd_last_committed + 1) {
- obd->obd_next_recovery_transno = req_transno;
- } else if (req_transno > obd->obd_last_committed) {
- /* can't continue recovery: have no needed transno */
- obd->obd_abort_recovery = 1;
- CDEBUG(D_ERROR, "abort due to missed clients. max: %d, "
- "connected: %d, completed: %d, queue_len: %d, "
- "req_transno: "LPU64", next_transno: "LPU64"\n",
- max, connected, completed, queue_len,
- req_transno, next_transno);
- }
- wake_up = 1;
}
-
spin_unlock_bh(&obd->obd_processing_task_lock);
return wake_up;
}
-static struct ptlrpc_request *target_next_replay_req(struct obd_device *obd)
-{
- struct l_wait_info lwi = { 0 };
- struct ptlrpc_request *req;
-
- CDEBUG(D_HA, "Waiting for transno "LPD64"\n",
- obd->obd_next_recovery_transno);
- l_wait_event(obd->obd_next_transno_waitq,
- check_for_next_transno(obd), &lwi);
-
- spin_lock_bh(&obd->obd_processing_task_lock);
- if (obd->obd_abort_recovery) {
- req = NULL;
- } else if (!list_empty(&obd->obd_req_replay_queue)) {
- req = list_entry(obd->obd_req_replay_queue.next,
- struct ptlrpc_request, rq_list);
- target_exp_dequeue_req_replay(req);
- list_del_init(&req->rq_list);
- obd->obd_requests_queued_for_recovery--;
- } else {
- req = NULL;
- }
- spin_unlock_bh(&obd->obd_processing_task_lock);
- RETURN(req);
-}
-
static int check_for_next_lock(struct obd_device *obd)
{
- struct ptlrpc_request *req = NULL;
int wake_up = 0;
spin_lock_bh(&obd->obd_processing_task_lock);
if (!list_empty(&obd->obd_lock_replay_queue)) {
- req = list_entry(obd->obd_lock_replay_queue.next,
- struct ptlrpc_request, rq_list);
CDEBUG(D_HA, "waking for next lock\n");
wake_up = 1;
} else if (atomic_read(&obd->obd_lock_replay_clients) == 0) {
} else if (obd->obd_abort_recovery) {
CDEBUG(D_HA, "waking for aborted recovery\n");
wake_up = 1;
+ } else if (obd->obd_recovery_expired) {
+ CDEBUG(D_HA, "waking for expired recovery\n");
+ wake_up = 1;
}
spin_unlock_bh(&obd->obd_processing_task_lock);
return wake_up;
}
+/**
+ * wait for recovery events,
+ * check its status with help of check_routine
+ * evict dead clients via health_check
+ */
+static int target_recovery_overseer(struct obd_device *obd,
+ int (*check_routine)(struct obd_device *),
+ int (*health_check)(struct obd_export *))
+{
+ int abort = 0, expired = 1;
+
+ do {
+ cfs_wait_event(obd->obd_next_transno_waitq, check_routine(obd));
+ spin_lock_bh(&obd->obd_processing_task_lock);
+ abort = obd->obd_abort_recovery;
+ expired = obd->obd_recovery_expired;
+ obd->obd_recovery_expired = 0;
+ spin_unlock_bh(&obd->obd_processing_task_lock);
+ if (abort) {
+ CWARN("recovery is aborted, evict exports in recovery\n");
+ /** evict exports which didn't finish recovery yet */
+ class_disconnect_stale_exports(obd, exp_finished);
+ } else if (expired) {
+ /** If some clients died being recovered, evict them */
+ CDEBUG(D_WARNING, "recovery is timed out, evict stale exports\n");
+ /** evict cexports with no replay in queue, they are stalled */
+ class_disconnect_stale_exports(obd, health_check);
+ /** continue with VBR */
+ spin_lock_bh(&obd->obd_processing_task_lock);
+ obd->obd_version_recov = 1;
+ spin_unlock_bh(&obd->obd_processing_task_lock);
+ /**
+ * reset timer, recovery will proceed with versions now,
+ * timeout is set just to handle reconnection delays
+ */
+ reset_recovery_timer(obd, RECONNECT_DELAY_MAX * 2, 1);
+ /** Wait for recovery events again, after evicting bad clients */
+ }
+ } while (!abort && expired);
+
+ return abort;
+}
+
+static struct ptlrpc_request *target_next_replay_req(struct obd_device *obd)
+{
+ struct ptlrpc_request *req = NULL;
+ ENTRY;
+
+ CDEBUG(D_HA, "Waiting for transno "LPD64"\n",
+ obd->obd_next_recovery_transno);
+
+ if (target_recovery_overseer(obd, check_for_next_transno,
+ exp_req_replay_healthy)) {
+ abort_req_replay_queue(obd);
+ abort_lock_replay_queue(obd);
+ }
+
+ spin_lock_bh(&obd->obd_processing_task_lock);
+ if (!list_empty(&obd->obd_req_replay_queue)) {
+ req = list_entry(obd->obd_req_replay_queue.next,
+ struct ptlrpc_request, rq_list);
+ list_del_init(&req->rq_list);
+ obd->obd_requests_queued_for_recovery--;
+ spin_unlock_bh(&obd->obd_processing_task_lock);
+ } else {
+ spin_unlock_bh(&obd->obd_processing_task_lock);
+ LASSERT(list_empty(&obd->obd_req_replay_queue));
+ LASSERT(atomic_read(&obd->obd_req_replay_clients) == 0);
+ /** evict exports failed VBR */
+ class_disconnect_stale_exports(obd, exp_vbr_healthy);
+ }
+ RETURN(req);
+}
+
static struct ptlrpc_request *target_next_replay_lock(struct obd_device *obd)
{
- struct l_wait_info lwi = { 0 };
- struct ptlrpc_request *req;
+ struct ptlrpc_request *req = NULL;
CDEBUG(D_HA, "Waiting for lock\n");
- l_wait_event(obd->obd_next_transno_waitq,
- check_for_next_lock(obd), &lwi);
+ if (target_recovery_overseer(obd, check_for_next_lock,
+ exp_lock_replay_healthy))
+ abort_lock_replay_queue(obd);
spin_lock_bh(&obd->obd_processing_task_lock);
- if (obd->obd_abort_recovery) {
- req = NULL;
- } else if (!list_empty(&obd->obd_lock_replay_queue)) {
+ if (!list_empty(&obd->obd_lock_replay_queue)) {
req = list_entry(obd->obd_lock_replay_queue.next,
struct ptlrpc_request, rq_list);
list_del_init(&req->rq_list);
+ spin_unlock_bh(&obd->obd_processing_task_lock);
} else {
- req = NULL;
+ spin_unlock_bh(&obd->obd_processing_task_lock);
+ LASSERT(list_empty(&obd->obd_lock_replay_queue));
+ LASSERT(atomic_read(&obd->obd_lock_replay_clients) == 0);
+ /** evict exports failed VBR */
+ class_disconnect_stale_exports(obd, exp_vbr_healthy);
}
- spin_unlock_bh(&obd->obd_processing_task_lock);
return req;
}
static struct ptlrpc_request *target_next_final_ping(struct obd_device *obd)
{
- struct ptlrpc_request *req;
+ struct ptlrpc_request *req = NULL;
spin_lock_bh(&obd->obd_processing_task_lock);
if (!list_empty(&obd->obd_final_req_queue)) {
req->rq_export->exp_in_recovery = 0;
spin_unlock(&req->rq_export->exp_lock);
}
- } else {
- req = NULL;
}
spin_unlock_bh(&obd->obd_processing_task_lock);
return req;
}
-static inline int req_vbr_done(struct obd_export *exp)
-{
- return (exp->exp_vbr_failed == 0);
-}
-
-static inline int req_replay_done(struct obd_export *exp)
-{
- return (exp->exp_req_replay_needed == 0);
-}
-
-static inline int lock_replay_done(struct obd_export *exp)
-{
- return (exp->exp_lock_replay_needed == 0);
-}
-
-static inline int connect_done(struct obd_export *exp)
-{
- return (exp->exp_in_recovery != 0);
-}
-
-static int check_for_clients(struct obd_device *obd)
-{
- if (obd->obd_abort_recovery || obd->obd_version_recov)
- return 1;
- LASSERT(obd->obd_connected_clients <= obd->obd_max_recoverable_clients);
- if (obd->obd_no_conn == 0 &&
- obd->obd_connected_clients == obd->obd_max_recoverable_clients)
- return 1;
- return 0;
-}
-
static int handle_recovery_req(struct ptlrpc_thread *thread,
struct ptlrpc_request *req,
svc_handler_t handler)
rc = lu_context_init(&req->rq_session, LCT_SESSION);
if (rc) {
CERROR("Failure to initialize session: %d\n", rc);
- return rc;
+ GOTO(free_clone, rc);
}
+ /**
+ * export can be evicted during recovery, no need to handle replays for
+ * it after that, discard such request silently
+ */
+ if (req->rq_export->exp_disconnected)
+ GOTO(free_clone, rc);
+
req->rq_session.lc_thread = thread;
lu_context_enter(&req->rq_session);
req->rq_svc_thread = thread;
lu_context_exit(&req->rq_session);
lu_context_fini(&req->rq_session);
/* don't reset timer for final stage */
- if (!req_replay_done(req->rq_export) ||
- !lock_replay_done(req->rq_export))
+ if (!exp_finished(req->rq_export))
reset_recovery_timer(class_exp2obd(req->rq_export),
- AT_OFF ? obd_timeout :
+ AT_OFF ? obd_timeout :
at_get(&req->rq_rqbd->rqbd_service->srv_at_estimate), 1);
+
+ /**
+ * bz18031: increase next_recovery_transno before ptlrpc_free_clone()
+ * will drop exp_rpc reference
+ */
+ if (req->rq_export->exp_req_replay_needed) {
+ spin_lock_bh(&req->rq_export->exp_obd->obd_processing_task_lock);
+ req->rq_export->exp_obd->obd_next_recovery_transno++;
+ spin_unlock_bh(&req->rq_export->exp_obd->obd_processing_task_lock);
+ target_exp_dequeue_req_replay(req);
+ }
+free_clone:
ptlrpc_free_clone(req);
RETURN(0);
}
struct obd_device *obd = lut->lut_obd;
struct ptlrpc_request *req;
struct target_recovery_data *trd = &obd->obd_recovery_data;
- struct l_wait_info lwi = { 0 };
unsigned long delta;
unsigned long flags;
struct lu_env env;
complete(&trd->trd_starting);
/* first of all, we have to know the first transno to replay */
- obd->obd_abort_recovery = 0;
- l_wait_event(obd->obd_next_transno_waitq,
- check_for_clients(obd), &lwi);
-
- /* If some clients haven't connected in time, evict them */
- if (obd->obd_connected_clients < obd->obd_max_recoverable_clients) {
- CWARN("Some clients haven't connect in time (%d/%d),"
- "evict them\n", obd->obd_connected_clients,
- obd->obd_max_recoverable_clients);
- class_disconnect_stale_exports(obd, connect_done,
- exp_flags_from_obd(obd) |
- OBD_OPT_ABORT_RECOV);
- /**
- * if recovery proceeds with versions then some clients may be
- * timed out waiting for others and trying to reconnect.
- * Extend timer for such reconnect cases.
- */
- if (obd->obd_version_recov)
- reset_recovery_timer(obd, RECONNECT_DELAY_MAX * 2, 1);
+ if (target_recovery_overseer(obd, check_for_clients,
+ exp_connect_healthy)) {
+ abort_req_replay_queue(obd);
+ abort_lock_replay_queue(obd);
}
/* next stage: replay requests */
handle_recovery_req(thread, req,
trd->trd_recovery_handler);
obd->obd_replayed_requests++;
- spin_lock_bh(&obd->obd_processing_task_lock);
- obd->obd_next_recovery_transno++;
- spin_unlock_bh(&obd->obd_processing_task_lock);
}
- /* If some clients haven't replayed requests in time, evict them */
- if (obd->obd_abort_recovery) {
- CDEBUG(D_WARNING, "req replay is aborted\n");
- class_disconnect_stale_exports(obd, req_replay_done,
- exp_flags_from_obd(obd) |
- OBD_OPT_ABORT_RECOV);
- abort_req_replay_queue(obd);
- }
- LASSERT(list_empty(&obd->obd_req_replay_queue));
-
- /* The second stage: replay locks */
+ /**
+ * The second stage: replay locks
+ */
CDEBUG(D_INFO, "2: lock replay stage - %d clients\n",
atomic_read(&obd->obd_lock_replay_clients));
while ((req = target_next_replay_lock(obd))) {
obd->obd_replayed_locks++;
}
- /* If some clients haven't replayed requests in time, evict them */
- if (obd->obd_abort_recovery) {
- int stale;
- CERROR("lock replay is aborted\n");
- stale = class_disconnect_stale_exports(obd, lock_replay_done,
- exp_flags_from_obd(obd) |
- OBD_OPT_ABORT_RECOV);
- abort_lock_replay_queue(obd);
- }
- LASSERT(list_empty(&obd->obd_lock_replay_queue));
-
- /* The third stage: reply on final pings */
+ /**
+ * The third stage: reply on final pings, at this moment all clients
+ * must have request in final queue
+ */
CDEBUG(D_INFO, "3: final stage - process recovery completion pings\n");
- /** evict exports failed VBR */
- class_disconnect_stale_exports(obd, req_vbr_done,
- exp_flags_from_obd(obd) |
- OBD_OPT_ABORT_RECOV);
/** Update server last boot epoch */
lut_boot_epoch_update(lut);
/* We drop recoverying flag to forward all new requests
delta = (jiffies - delta) / HZ;
CDEBUG(D_INFO,"4: recovery completed in %lus - %d/%d reqs/locks\n",
delta, obd->obd_replayed_requests, obd->obd_replayed_locks);
- LASSERT(atomic_read(&obd->obd_req_replay_clients) == 0);
- LASSERT(atomic_read(&obd->obd_lock_replay_clients) == 0);
if (delta > obd_timeout * OBD_RECOVERY_FACTOR) {
CWARN("too long recovery - read logs\n");
libcfs_debug_dumplog();
static void target_recovery_expired(unsigned long castmeharder)
{
struct obd_device *obd = (struct obd_device *)castmeharder;
- LCONSOLE_WARN("%s: recovery timed out; %d clients never reconnected "
- "after %lds (%d clients did)\n",
- obd->obd_name, obd->obd_recoverable_clients,
- cfs_time_current_sec()- obd->obd_recovery_start,
- obd->obd_connected_clients);
+ CDEBUG(D_HA, "%s: recovery timed out; %d clients are still in recovery"
+ " after %lds (%d clients connected)\n",
+ obd->obd_name, atomic_read(&obd->obd_lock_replay_clients),
+ cfs_time_current_sec()- obd->obd_recovery_start,
+ obd->obd_connected_clients);
spin_lock_bh(&obd->obd_processing_task_lock);
- obd->obd_version_recov = 1;
- CDEBUG(D_INFO, "VBR is used for %d clients from t"LPU64"\n",
- atomic_read(&obd->obd_req_replay_clients),
- obd->obd_next_recovery_transno);
+ obd->obd_recovery_expired = 1;
cfs_waitq_signal(&obd->obd_next_transno_waitq);
spin_unlock_bh(&obd->obd_processing_task_lock);
}
void target_recovery_init(struct lu_target *lut, svc_handler_t handler)
{
struct obd_device *obd = lut->lut_obd;
- if (obd->obd_max_recoverable_clients == 0)
+ if (obd->obd_max_recoverable_clients == 0) {
+ /** Update server last boot epoch */
+ lut_boot_epoch_update(lut);
return;
+ }
CWARN("RECOVERY: service %s, %d recoverable clients, "
"last_transno "LPU64"\n", obd->obd_name,
exp->exp_req_replay_needed = 0;
spin_unlock(&exp->exp_lock);
atomic_dec(&obd->obd_req_replay_clients);
- LASSERT(obd->obd_recoverable_clients > 0);
- obd->obd_recoverable_clients--;
- if (atomic_read(&obd->obd_req_replay_clients) == 0)
- CDEBUG(D_HA, "all clients have replayed reqs\n");
}
spin_unlock_bh(&obd->obd_processing_task_lock);
}
exp->exp_lock_replay_needed = 0;
spin_unlock(&exp->exp_lock);
atomic_dec(&obd->obd_lock_replay_clients);
- if (atomic_read(&obd->obd_lock_replay_clients) == 0)
- CDEBUG(D_HA, "all clients have replayed locks\n");
}
spin_unlock_bh(&obd->obd_processing_task_lock);
}
RETURN(0);
}
-struct obd_device * target_req2obd(struct ptlrpc_request *req)
-{
- return req->rq_export->exp_obd;
-}
-
-static inline struct ldlm_pool *ldlm_exp2pl(struct obd_export *exp)
-{
- LASSERT(exp != NULL);
- return &exp->exp_obd->obd_namespace->ns_pool;
-}
-
/**
* Packs current SLV and Limit into \a req.
*/
rs->rs_xid = req->rq_xid;
rs->rs_transno = req->rq_transno;
rs->rs_export = exp;
+ rs->rs_opc = lustre_msg_get_opc(rs->rs_msg);
spin_lock(&exp->exp_uncommitted_replies_lock);
CDEBUG(D_NET, "rs transno = "LPU64", last committed = "LPU64"\n",
LASSERT(req->rq_export);
- OBD_ALLOC(qdata, sizeof(struct qunit_data));
- if (!qdata)
- RETURN(-ENOMEM);
- rc = quota_get_qdata(req, qdata, QUOTA_REQUEST, QUOTA_EXPORT);
- if (rc < 0) {
+ qdata = quota_get_qdata(req, QUOTA_REQUEST, QUOTA_EXPORT);
+ if (IS_ERR(qdata)) {
+ rc = PTR_ERR(qdata);
CDEBUG(D_ERROR, "Can't unpack qunit_data(rc: %d)\n", rc);
+ req->rq_status = rc;
GOTO(out, rc);
}
if (!obd->obd_observer || !obd->obd_observer->obd_observer) {
CERROR("Can't find the observer, it is recovering\n");
req->rq_status = -EAGAIN;
- GOTO(send_reply, rc = -EAGAIN);
+ GOTO(out, rc);
}
master_obd = obd->obd_observer->obd_observer;
CDEBUG(D_QUOTA, "quota_type not processed yet, return "
"-EAGAIN\n");
req->rq_status = -EAGAIN;
- rc = ptlrpc_reply(req);
GOTO(out, rc);
}
CDEBUG(D_QUOTA, "quota_ctxt is not ready yet, return "
"-EAGAIN\n");
req->rq_status = -EAGAIN;
- rc = ptlrpc_reply(req);
GOTO(out, rc);
}
up_read(&obt->obt_rwsem);
if (rc && rc != -EDQUOT)
CDEBUG(rc == -EBUSY ? D_QUOTA : D_ERROR,
- "dqacq failed! (rc:%d)\n", rc);
+ "dqacq/dqrel failed! (rc:%d)\n", rc);
req->rq_status = rc;
- /* there are three forms of qunit(historic causes), so we need to
- * adjust the same form to different forms slaves needed */
rc = quota_copy_qdata(req, qdata, QUOTA_REPLY, QUOTA_EXPORT);
if (rc < 0) {
- CDEBUG(D_ERROR, "Can't pack qunit_data(rc: %d)\n", rc);
+ CERROR("Can't pack qunit_data(rc: %d)\n", rc);
GOTO(out, rc);
}
/* Block the quota req. b=14840 */
OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_BLOCK_QUOTA_REQ, obd_timeout);
-send_reply:
- rc = ptlrpc_reply(req);
+ EXIT;
+
out:
- OBD_FREE(qdata, sizeof(struct qunit_data));
- RETURN(rc);
+ rc = ptlrpc_reply(req);
+ return rc;
#else
return 0;
#endif /* !__KERNEL__ */