/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
- */
-/*
- * Copyright (c) 2011 Whamcloud, Inc.
+ *
+ * Copyright (c) 2011, 2012, Whamcloud, Inc.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
imp->imp_connect_flags_orig = data->ocd_connect_flags;
}
- rc = ptlrpc_connect_import(imp, NULL);
+ rc = ptlrpc_connect_import(imp);
if (rc != 0) {
LASSERT (imp->imp_state == LUSTRE_IMP_DISCON);
GOTO(out_ldlm, rc);
}
EXPORT_SYMBOL(target_client_add_cb);
+#ifdef __KERNEL__
static void
-target_start_and_reset_recovery_timer(struct obd_device *obd,
- struct ptlrpc_request *req,
- int new_client);
+check_and_start_recovery_timer(struct obd_device *obd,
+ struct ptlrpc_request *req, int new_client);
+#else
+static inline void
+check_and_start_recovery_timer(struct obd_device *obd,
+ struct ptlrpc_request *req, int new_client)
+{
+}
+#endif
int target_handle_connect(struct ptlrpc_request *req)
{
if (!target || target->obd_stopping || !target->obd_set_up) {
LCONSOLE_ERROR_MSG(0x137, "UUID '%s' is not available "
- " for connect (%s)\n", str,
+ "for connect (%s)\n", str,
!target ? "no target" :
(target->obd_stopping ? "stopping" :
"not set up"));
cfs_spin_lock(&export->exp_lock);
export->exp_connecting = 1;
cfs_spin_unlock(&export->exp_lock);
- class_export_put(export);
LASSERT(export->exp_obd == target);
rc = target_handle_reconnect(&conn, export, &cluuid);
GOTO(out, rc);
}
- CWARN("%s: connection from %s@%s %st"LPU64" exp %p cur %ld last %ld\n",
+ CDEBUG(D_HA, "%s: connection from %s@%s %st"LPU64" exp %p cur %ld last %ld\n",
target->obd_name, cluuid.uuid, libcfs_nid2str(req->rq_peer.nid),
target->obd_recovering ? "recovering/" : "", data->ocd_transno,
export, (long)cfs_time_current_sec(),
export ? (long)export->exp_last_request_time : 0);
- /* If this is the first time a client connects,
- * reset the recovery timer */
+ /* If this is the first time a client connects, reset the recovery
+ * timer */
if (rc == 0 && target->obd_recovering)
- target_start_and_reset_recovery_timer(target, req, !export);
+ check_and_start_recovery_timer(target, req, export == NULL);
/* We want to handle EALREADY but *not* -EALREADY from
* target_handle_reconnect(), return reconnection state in a flag */
rc = obd_connect(req->rq_svc_thread->t_env,
&export, target, &cluuid, data,
client_nid);
- if (rc == 0)
+ if (rc == 0) {
conn.cookie = export->exp_handle.h_cookie;
+ /* LU-1092 reconnect put export refcount in the
+ * end, connect needs take one here too. */
+ class_export_get(export);
+ }
}
} else {
rc = obd_reconnect(req->rq_svc_thread->t_env,
}
if (rc)
GOTO(out, rc);
+
+ LASSERT(target->u.obt.obt_magic == OBT_MAGIC);
+ data->ocd_instance = target->u.obt.obt_instance;
+
/* Return only the parts of obd_connect_data that we understand, so the
* client knows that we don't understand the rest. */
if (data) {
&export->exp_connection->c_peer.nid,
&export->exp_nid_hash);
}
-
- cfs_spin_lock(&target->obd_recovery_task_lock);
+ /**
+ class_disconnect->class_export_recovery_cleanup() race
+ */
if (target->obd_recovering && !export->exp_in_recovery) {
+ int has_transno;
+ __u64 transno = data->ocd_transno;
+
cfs_spin_lock(&export->exp_lock);
export->exp_in_recovery = 1;
export->exp_req_replay_needed = 1;
export->exp_lock_replay_needed = 1;
cfs_spin_unlock(&export->exp_lock);
- if ((lustre_msg_get_op_flags(req->rq_reqmsg) & MSG_CONNECT_TRANSNO)
- && (data->ocd_transno == 0))
+
+ has_transno = !!(lustre_msg_get_op_flags(req->rq_reqmsg) &
+ MSG_CONNECT_TRANSNO);
+ if (has_transno && transno == 0)
CWARN("Connect with zero transno!\n");
- if ((lustre_msg_get_op_flags(req->rq_reqmsg) & MSG_CONNECT_TRANSNO)
- && data->ocd_transno < target->obd_next_recovery_transno &&
- data->ocd_transno > target->obd_last_committed)
- target->obd_next_recovery_transno = data->ocd_transno;
- target->obd_connected_clients++;
+ if (has_transno && transno > 0 &&
+ transno < target->obd_next_recovery_transno &&
+ transno > target->obd_last_committed) {
+ /* another way is to use cmpxchg() so it will be
+ * lock free */
+ cfs_spin_lock(&target->obd_recovery_task_lock);
+ if (transno < target->obd_next_recovery_transno)
+ target->obd_next_recovery_transno = transno;
+ cfs_spin_unlock(&target->obd_recovery_task_lock);
+ }
+
cfs_atomic_inc(&target->obd_req_replay_clients);
cfs_atomic_inc(&target->obd_lock_replay_clients);
- if (target->obd_connected_clients ==
+ if (cfs_atomic_inc_return(&target->obd_connected_clients) ==
target->obd_max_recoverable_clients)
cfs_waitq_signal(&target->obd_next_transno_waitq);
}
- cfs_spin_unlock(&target->obd_recovery_task_lock);
/* Tell the client we're in recovery, when client is involved in it. */
if (target->obd_recovering)
cfs_spin_lock(&export->exp_lock);
export->exp_connecting = 0;
cfs_spin_unlock(&export->exp_lock);
+
+ class_export_put(export);
}
if (targref)
class_decref(targref, __FUNCTION__, cfs_current());
#ifdef __KERNEL__
static void target_finish_recovery(struct obd_device *obd)
{
+ time_t elapsed_time = max_t(time_t, 1, cfs_time_current_sec() -
+ obd->obd_recovery_start);
ENTRY;
- LCONSOLE_INFO("%s: sending delayed replies to recovered clients\n",
- obd->obd_name);
+
+ LCONSOLE_INFO("%s: Recovery over after %d:%.02d, of %d clients "
+ "%d recovered and %d %s evicted.\n", obd->obd_name,
+ (int)elapsed_time / 60, (int)elapsed_time % 60,
+ obd->obd_max_recoverable_clients,
+ cfs_atomic_read(&obd->obd_connected_clients),
+ obd->obd_stale_clients,
+ obd->obd_stale_clients == 1 ? "was" : "were");
ldlm_reprocess_all_ns(obd->obd_namespace);
cfs_spin_lock(&obd->obd_recovery_task_lock);
target_request_copy_put(req);
}
}
-#endif
/* Called from a cleanup function if the device is being cleaned up
forcefully. The exports should all have been disconnected already,
cfs_timer_disarm(&obd->obd_recovery_timer);
}
-/* extend = 1 means require at least "duration" seconds left in the timer,
- extend = 0 means set the total duration (start_recovery_timer) */
-static void reset_recovery_timer(struct obd_device *obd, int duration,
- int extend)
+static void target_start_recovery_timer(struct obd_device *obd)
{
- cfs_time_t now = cfs_time_current_sec();
- cfs_duration_t left;
+ if (obd->obd_recovery_start != 0)
+ return;
- cfs_spin_lock(&obd->obd_recovery_task_lock);
+ cfs_spin_lock(&obd->obd_dev_lock);
if (!obd->obd_recovering || obd->obd_abort_recovery) {
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
+ cfs_spin_unlock(&obd->obd_dev_lock);
return;
}
- left = cfs_time_sub(obd->obd_recovery_end, now);
+ LASSERT(obd->obd_recovery_timeout != 0);
- if (extend && (duration > left))
- obd->obd_recovery_timeout += duration - left;
- else if (!extend && (duration > obd->obd_recovery_timeout))
- /* Track the client's largest expected replay time */
- obd->obd_recovery_timeout = duration;
+ if (obd->obd_recovery_start != 0) {
+ cfs_spin_unlock(&obd->obd_dev_lock);
+ return;
+ }
- /* Hard limit of obd_recovery_time_hard which should not happen */
- if (obd->obd_recovery_timeout > obd->obd_recovery_time_hard)
- obd->obd_recovery_timeout = obd->obd_recovery_time_hard;
+ cfs_timer_arm(&obd->obd_recovery_timer,
+ cfs_time_shift(obd->obd_recovery_timeout));
+ obd->obd_recovery_start = cfs_time_current_sec();
+ cfs_spin_unlock(&obd->obd_dev_lock);
- obd->obd_recovery_end = obd->obd_recovery_start +
- obd->obd_recovery_timeout;
- if (!cfs_timer_is_armed(&obd->obd_recovery_timer) ||
- cfs_time_before(now, obd->obd_recovery_end)) {
- left = cfs_time_sub(obd->obd_recovery_end, now);
- cfs_timer_arm(&obd->obd_recovery_timer, cfs_time_shift(left));
- }
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
- CDEBUG(D_HA, "%s: recovery timer will expire in %u seconds\n",
- obd->obd_name, (unsigned)left);
+ LCONSOLE_WARN("%s: Will be in recovery for at least %d:%.02d, "
+ "or until %d client%s reconnect%s\n",
+ obd->obd_name,
+ obd->obd_recovery_timeout / 60,
+ obd->obd_recovery_timeout % 60,
+ obd->obd_max_recoverable_clients,
+ (obd->obd_max_recoverable_clients == 1) ? "" : "s",
+ (obd->obd_max_recoverable_clients == 1) ? "s": "");
}
-static void check_and_start_recovery_timer(struct obd_device *obd)
+/**
+ * extend recovery window.
+ *
+ * if @extend is true, extend recovery window to have @drt remaining at least;
+ * otherwise, make sure the recovery timeout value is not less than @drt.
+ */
+static void extend_recovery_timer(struct obd_device *obd, int drt, bool extend)
{
- cfs_spin_lock(&obd->obd_recovery_task_lock);
- if (cfs_timer_is_armed(&obd->obd_recovery_timer)) {
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
+ cfs_time_t now;
+ cfs_time_t end;
+ cfs_duration_t left;
+ int to;
+
+ cfs_spin_lock(&obd->obd_dev_lock);
+ if (!obd->obd_recovering || obd->obd_abort_recovery) {
+ cfs_spin_unlock(&obd->obd_dev_lock);
return;
}
- CDEBUG(D_HA, "%s: starting recovery timer\n", obd->obd_name);
- obd->obd_recovery_start = cfs_time_current_sec();
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
- reset_recovery_timer(obd, obd->obd_recovery_timeout, 0);
+ LASSERT(obd->obd_recovery_start != 0);
+
+ now = cfs_time_current_sec();
+ to = obd->obd_recovery_timeout;
+ end = obd->obd_recovery_start + to;
+ left = cfs_time_sub(end, now);
+
+ if (extend && (drt > left)) {
+ to += drt - left;
+ } else if (!extend && (drt > to)) {
+ to = drt;
+ /* reduce drt by already passed time */
+ drt -= obd->obd_recovery_timeout - left;
+ }
+
+ if (to > obd->obd_recovery_time_hard)
+ to = obd->obd_recovery_time_hard;
+ if (obd->obd_recovery_timeout < to) {
+ obd->obd_recovery_timeout = to;
+ cfs_timer_arm(&obd->obd_recovery_timer,
+ cfs_time_shift(drt));
+ }
+ cfs_spin_unlock(&obd->obd_dev_lock);
+
+ CDEBUG(D_HA, "%s: recovery timer will expire in %u seconds\n",
+ obd->obd_name, (unsigned)drt);
}
/* Reset the timer with each new client connection */
*/
static void
-target_start_and_reset_recovery_timer(struct obd_device *obd,
- struct ptlrpc_request *req,
- int new_client)
+check_and_start_recovery_timer(struct obd_device *obd,
+ struct ptlrpc_request *req,
+ int new_client)
{
int service_time = lustre_msg_get_service_time(req->rq_reqmsg);
+ struct obd_device_target *obt = &obd->u.obt;
+ struct lustre_sb_info *lsi;
if (!new_client && service_time)
/* Teach server about old server's estimates, as first guess
at_measured(&req->rq_rqbd->rqbd_service->srv_at_estimate,
service_time);
- check_and_start_recovery_timer(obd);
+ target_start_recovery_timer(obd);
/* convert the service time to rpc timeout,
* reuse service_time to limit stack usage */
/* We expect other clients to timeout within service_time, then try
* to reconnect, then try the failover server. The max delay between
* connect attempts is SWITCH_MAX + SWITCH_INC + INITIAL */
- service_time += 2 * (CONNECTION_SWITCH_MAX + CONNECTION_SWITCH_INC +
- INITIAL_CONNECT_TIMEOUT);
+ service_time += 2 * INITIAL_CONNECT_TIMEOUT;
+
+ LASSERT(obt->obt_magic == OBT_MAGIC);
+ lsi = s2lsi(obt->obt_sb);
+ if (!(lsi->lsi_flags | LSI_IR_CAPABLE))
+ service_time += 2 * (CONNECTION_SWITCH_MAX +
+ CONNECTION_SWITCH_INC);
if (service_time > obd->obd_recovery_timeout && !new_client)
- reset_recovery_timer(obd, service_time, 0);
+ extend_recovery_timer(obd, service_time, false);
}
-#ifdef __KERNEL__
-
/** Health checking routines */
static inline int exp_connect_healthy(struct obd_export *exp)
{
/** Checking routines for recovery */
static int check_for_clients(struct obd_device *obd)
{
+ unsigned int clnts = cfs_atomic_read(&obd->obd_connected_clients);
+
if (obd->obd_abort_recovery || obd->obd_recovery_expired)
return 1;
- LASSERT(obd->obd_connected_clients <= obd->obd_max_recoverable_clients);
+ LASSERT(clnts <= obd->obd_max_recoverable_clients);
if (obd->obd_no_conn == 0 &&
- obd->obd_connected_clients + obd->obd_stale_clients ==
- obd->obd_max_recoverable_clients)
+ clnts + obd->obd_stale_clients == obd->obd_max_recoverable_clients)
return 1;
return 0;
}
req_transno = 0;
}
- connected = obd->obd_connected_clients;
+ connected = cfs_atomic_read(&obd->obd_connected_clients);
completed = connected - cfs_atomic_read(&obd->obd_req_replay_clients);
queue_len = obd->obd_requests_queued_for_recovery;
next_transno = obd->obd_next_recovery_transno;
* reset timer, recovery will proceed with versions now,
* timeout is set just to handle reconnection delays
*/
- reset_recovery_timer(obd, RECONNECT_DELAY_MAX, 1);
+ extend_recovery_timer(obd, RECONNECT_DELAY_MAX, true);
/** Wait for recovery events again, after evicting bad clients */
goto repeat;
}
lu_context_fini(&req->rq_recov_session);
/* don't reset timer for final stage */
if (!exp_finished(req->rq_export)) {
+ int to = obd_timeout;
+
/**
* Add request timeout to the recovery time so next request from
* this client may come in recovery time
*/
- reset_recovery_timer(class_exp2obd(req->rq_export),
- AT_OFF ? obd_timeout :
- lustre_msg_get_timeout(req->rq_reqmsg), 1);
+ if (!AT_OFF) {
+ struct ptlrpc_service *svc = req->rq_rqbd->rqbd_service;
+ /* If the server sent early reply for this request,
+ * the client will recalculate the timeout according to
+ * current server estimate service time, so we will
+ * use the maxium timeout here for waiting the client
+ * sending the next req */
+ to = max((int)at_est2timeout(
+ at_get(&svc->srv_at_estimate)),
+ (int)lustre_msg_get_timeout(req->rq_reqmsg));
+ /* Add net_latency (see ptlrpc_replay_req) */
+ to += lustre_msg_get_service_time(req->rq_reqmsg);
+ }
+ extend_recovery_timer(class_exp2obd(req->rq_export), to, true);
}
reqcopy_put:
RETURN(rc);
struct target_recovery_data *trd = &obd->obd_recovery_data;
unsigned long delta;
unsigned long flags;
- struct lu_env env;
- struct ptlrpc_thread fake_svc_thread, *thread = &fake_svc_thread;
+ struct lu_env *env;
+ struct ptlrpc_thread *thread = NULL;
int rc = 0;
ENTRY;
RECALC_SIGPENDING;
SIGNAL_MASK_UNLOCK(current, flags);
- rc = lu_context_init(&env.le_ctx, LCT_MD_THREAD);
- if (rc)
+ OBD_ALLOC_PTR(thread);
+ if (thread == NULL)
+ RETURN(-ENOMEM);
+
+ OBD_ALLOC_PTR(env);
+ if (env == NULL) {
+ OBD_FREE_PTR(thread);
+ RETURN(-ENOMEM);
+ }
+
+ rc = lu_context_init(&env->le_ctx, LCT_MD_THREAD);
+ if (rc) {
+ OBD_FREE_PTR(thread);
+ OBD_FREE_PTR(env);
RETURN(rc);
+ }
- thread->t_env = &env;
+ thread->t_env = env;
thread->t_id = -1; /* force filter_iobuf_get/put to use local buffers */
- env.le_ctx.lc_thread = thread;
+ env->le_ctx.lc_thread = thread;
thread->t_data = NULL;
+ thread->t_watchdog = NULL;
- CERROR("%s: started recovery thread pid %d\n", obd->obd_name,
+ CDEBUG(D_HA, "%s: started recovery thread pid %d\n", obd->obd_name,
cfs_curproc_pid());
trd->trd_processing_task = cfs_curproc_pid();
target_finish_recovery(obd);
- lu_context_fini(&env.le_ctx);
+ lu_context_fini(&env->le_ctx);
trd->trd_processing_task = 0;
cfs_complete(&trd->trd_finishing);
+
+ OBD_FREE_PTR(thread);
+ OBD_FREE_PTR(env);
RETURN(rc);
}
" after %lds (%d clients connected)\n",
obd->obd_name, cfs_atomic_read(&obd->obd_lock_replay_clients),
cfs_time_current_sec()- obd->obd_recovery_start,
- obd->obd_connected_clients);
+ cfs_atomic_read(&obd->obd_connected_clients));
obd->obd_recovery_expired = 1;
cfs_waitq_signal(&obd->obd_next_transno_waitq);
obd->obd_recovery_start = 0;
obd->obd_recovery_end = 0;
- /* both values can be get from mount data already */
- if (obd->obd_recovery_timeout == 0)
- obd->obd_recovery_timeout = OBD_RECOVERY_TIME_SOFT;
- if (obd->obd_recovery_time_hard == 0)
- obd->obd_recovery_time_hard = OBD_RECOVERY_TIME_HARD;
cfs_timer_init(&obd->obd_recovery_timer, target_recovery_expired, obd);
target_start_recovery_thread(lut, handler);
}
/* CAVEAT EMPTOR: The incoming request message has been swabbed
* (i.e. buflens etc are in my own byte order), but type-dependent
- * buffers (eg mds_body, ost_body etc) have NOT been swabbed. */
+ * buffers (eg mdt_body, ost_body etc) have NOT been swabbed. */
if (!transno) {
CFS_INIT_LIST_HEAD(&req->rq_list);
{
int netrc;
struct ptlrpc_reply_state *rs;
- struct obd_device *obd;
struct obd_export *exp;
struct ptlrpc_service *svc;
ENTRY;
LASSERT (cfs_list_empty(&rs->rs_exp_list));
exp = class_export_get (req->rq_export);
- obd = exp->exp_obd;
/* disable reply scheduling while I'm setting up */
rs->rs_scheduled = 1;