* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2010, 2016, Intel Corporation.
+ * Copyright (c) 2010, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#define DEBUG_SUBSYSTEM S_LDLM
+#include <linux/jiffies.h>
#include <linux/kthread.h>
#include <libcfs/libcfs.h>
#include <obd.h>
sizeof(server_uuid)));
cli->cl_dirty_pages = 0;
+ cli->cl_dirty_max_pages = 0;
cli->cl_avail_grant = 0;
/* FIXME: Should limit this for the sum of all cl_dirty_max_pages. */
/* cl_dirty_max_pages may be changed at connect time in
spin_lock_init(&cli->cl_lru_list_lock);
atomic_long_set(&cli->cl_unstable_count, 0);
INIT_LIST_HEAD(&cli->cl_shrink_list);
+ INIT_LIST_HEAD(&cli->cl_grant_chain);
+
+ INIT_LIST_HEAD(&cli->cl_flight_waiters);
+ cli->cl_rpcs_in_flight = 0;
init_waitqueue_head(&cli->cl_destroy_waitq);
atomic_set(&cli->cl_destroy_in_flight, 0);
+
+ cli->cl_supp_cksum_types = OBD_CKSUM_CRC32;
#ifdef ENABLE_CHECKSUM
/* Turn on checksumming by default. */
cli->cl_checksum = 1;
* Set cl_chksum* to CRC32 for now to avoid returning screwed info
* through procfs.
*/
- cli->cl_cksum_type = cli->cl_supp_cksum_types = OBD_CKSUM_CRC32;
+ cli->cl_cksum_type = cli->cl_supp_cksum_types;
#endif
atomic_set(&cli->cl_resends, OSC_DEFAULT_RESENDS);
* from OFD after connecting. */
cli->cl_max_pages_per_rpc = PTLRPC_MAX_BRW_PAGES;
+ cli->cl_max_short_io_bytes = OBD_MAX_SHORT_IO_BYTES;
+
/* set cl_chunkbits default value to PAGE_SHIFT,
* it will be updated at OSC connection time. */
cli->cl_chunkbits = PAGE_SHIFT;
cli->cl_max_rpcs_in_flight = OBD_MAX_RIF_MAX;
else
cli->cl_max_rpcs_in_flight = OBD_MAX_RIF_DEFAULT;
- }
+ }
spin_lock_init(&cli->cl_mod_rpcs_lock);
spin_lock_init(&cli->cl_mod_rpcs_hist.oh_lock);
{
struct obd_device *target;
struct lustre_handle *hdl;
- cfs_time_t now;
- cfs_time_t deadline;
- int timeout;
+ ktime_t remaining;
+ s64 timeout;
int rc = 0;
- ENTRY;
+ ENTRY;
hdl = &exp->exp_imp_reverse->imp_remote_handle;
if (!exp->exp_connection || !lustre_handle_is_used(hdl)) {
conn->cookie = exp->exp_handle.h_cookie;
GOTO(out_already, rc);
}
- now = cfs_time_current();
- deadline = target->obd_recovery_timer.expires;
- if (cfs_time_before(now, deadline)) {
- struct target_distribute_txn_data *tdtd =
- class_exp2tgt(exp)->lut_tdtd;
+ remaining = hrtimer_expires_remaining(&target->obd_recovery_timer);
+ timeout = ktime_divns(remaining, NSEC_PER_SEC);
+ if (timeout > 0) {
+ LCONSOLE_WARN("%s: Client %s (at %s) reconnected, waiting for %d clients in recovery for %lld:%.02lld\n",
+ target->obd_name,
+ obd_uuid2str(&exp->exp_client_uuid),
+ obd_export_nid2str(exp),
+ target->obd_max_recoverable_clients,
+ timeout / 60, timeout % 60);
+ } else {
+ struct target_distribute_txn_data *tdtd;
int size = 0;
int count = 0;
char *buf = NULL;
- timeout = cfs_duration_sec(cfs_time_sub(deadline, now));
+ tdtd = class_exp2tgt(exp)->lut_tdtd;
if (tdtd && tdtd->tdtd_show_update_logs_retrievers)
buf = tdtd->tdtd_show_update_logs_retrievers(
tdtd->tdtd_show_retrievers_cbdata,
&size, &count);
if (count > 0)
- LCONSOLE_WARN("%s: Recovery already passed deadline "
- "%d:%.02d. It is due to DNE recovery "
- "failed/stuck on the %d MDT(s):%s. "
- "Please wait until all MDTs recovered "
- "or abort the recovery by force.\n",
- target->obd_name, timeout / 60,
- timeout % 60, count,
- buf ? buf : "unknown (not enough RAM)");
+ LCONSOLE_WARN("%s: Client %s (at %s) reconnecting, waiting for %d MDTs (%s) in recovery for %lld:%.02lld. Please wait until all MDTs recovered or you may force MDT evicition via 'lctl --device %s abort_recovery.\n",
+ target->obd_name,
+ obd_uuid2str(&exp->exp_client_uuid),
+ obd_export_nid2str(exp), count,
+ buf ? buf : "unknown (not enough RAM)",
+ (abs(timeout) + target->obd_recovery_timeout) / 60,
+ (abs(timeout) + target->obd_recovery_timeout) % 60,
+ target->obd_name);
else
- LCONSOLE_WARN("%s: Recovery already passed deadline "
- "%d:%.02d. If you do not want to wait "
- "more, please abort the recovery by "
- "force.\n", target->obd_name,
- timeout / 60, timeout % 60);
+ LCONSOLE_WARN("%s: Recovery already passed deadline %lld:%.02lld. If you do not want to wait more, you may force taget eviction via 'lctl --device %s abort_recovery.\n",
+ target->obd_name, abs(timeout) / 60,
+ abs(timeout) % 60, target->obd_name);
if (buf != NULL)
OBD_FREE(buf, size);
- } else {
- timeout = cfs_duration_sec(cfs_time_sub(now, deadline));
- LCONSOLE_WARN("%s: Recovery already passed deadline"
- " %d:%.02d, It is most likely due to DNE"
- " recovery is failed or stuck, please wait a"
- " few more minutes or abort the recovery.\n",
- target->obd_name, timeout / 60, timeout % 60);
}
out_already:
* reconnect case */
struct lustre_handle conn;
struct lustre_handle *tmp;
- struct obd_uuid tgtuuid;
struct obd_uuid cluuid;
char *str;
int rc = 0;
bool mds_conn = false, lw_client = false, initial_conn = false;
bool mds_mds_conn = false;
bool new_mds_mds_conn = false;
- bool target_referenced = false;
struct obd_connect_data *data, *tmpdata;
int size, tmpsize;
lnet_nid_t *client_nid = NULL;
GOTO(out, rc = -EINVAL);
}
- obd_str2uuid(&tgtuuid, str);
- target = class_uuid2obd(&tgtuuid);
- if (!target)
- target = class_name2obd(str);
-
+ target = class_dev_by_str(str);
if (!target) {
deuuidify(str, NULL, &target_start, &target_len);
LCONSOLE_ERROR_MSG(0x137, "%s: not available for connect "
}
spin_lock(&target->obd_dev_lock);
+
+ target->obd_conn_inprogress++;
+
if (target->obd_stopping || !target->obd_set_up) {
spin_unlock(&target->obd_dev_lock);
GOTO(out, rc = -EAGAIN);
}
- /* Make sure the target isn't cleaned up while we're here. Yes,
- * there's still a race between the above check and our incref here.
- * Really, class_uuid2obd should take the ref. */
- class_incref(target, __func__, current);
- target_referenced = true;
-
- target->obd_conn_inprogress++;
spin_unlock(&target->obd_dev_lock);
str = req_capsule_client_get(&req->rq_pill, &RMF_CLUUID);
*/
if (!(data->ocd_connect_flags & OBD_CONNECT_FULL20))
GOTO(out, rc = -EPROTO);
-#endif
+ /* Don't allow liblustre clients to connect.
+ * - testing was disabled in v2_2_50_0-61-g6a75d65
+ * - building was disabled in v2_5_58_0-28-g7277179
+ * - client code was deleted in v2_6_50_0-101-gcdfbc72,
+ * - clients were refused connect for version difference > 0.0.1.32 */
if (lustre_msg_get_op_flags(req->rq_reqmsg) & MSG_CONNECT_LIBCLIENT) {
- if (data->ocd_version < LUSTRE_VERSION_CODE -
- LUSTRE_VERSION_ALLOWED_OFFSET ||
- data->ocd_version > LUSTRE_VERSION_CODE +
- LUSTRE_VERSION_ALLOWED_OFFSET) {
- DEBUG_REQ(D_WARNING, req, "Refusing %s (%d.%d.%d.%d) "
- "libclient connection attempt",
- data->ocd_version < LUSTRE_VERSION_CODE ?
- "old" : "new",
- OBD_OCD_VERSION_MAJOR(data->ocd_version),
- OBD_OCD_VERSION_MINOR(data->ocd_version),
- OBD_OCD_VERSION_PATCH(data->ocd_version),
- OBD_OCD_VERSION_FIX(data->ocd_version));
- data = req_capsule_server_sized_get(&req->rq_pill,
- &RMF_CONNECT_DATA,
- offsetof(typeof(*data), ocd_version) +
- sizeof(data->ocd_version));
- if (data) {
- data->ocd_connect_flags = OBD_CONNECT_VERSION;
- data->ocd_version = LUSTRE_VERSION_CODE;
- }
- GOTO(out, rc = -EPROTO);
- }
+ DEBUG_REQ(D_WARNING, req, "Refusing libclient connection");
+ GOTO(out, rc = -EPROTO);
}
+#endif
/* Note: lw_client is needed in MDS-MDS failover during update log
* processing, so we needs to allow lw_client to be connected at
*
* Via check OBD_CONNECT_FID, we can distinguish whether
* the OBD_CONNECT_MDS_MDS/OBD_CONNECT_MNE_SWAB is from
- * MGC or MDT. */
+ * MGC or MDT, since MGC does not use OBD_CONNECT_FID.
+ */
if (!lw_client &&
(data->ocd_connect_flags & OBD_CONNECT_MDS_MDS) &&
(data->ocd_connect_flags & OBD_CONNECT_FID) &&
* cause namespace inconsistency */
spin_lock(&export->exp_lock);
export->exp_connecting = 1;
+ export->exp_conn_cnt = 0;
spin_unlock(&export->exp_lock);
conn.cookie = export->exp_handle.h_cookie;
rc = EALREADY;
target->obd_name, cluuid.uuid,
libcfs_nid2str(req->rq_peer.nid),
atomic_read(&export->exp_refcount));
- GOTO(out, rc = -EBUSY);
- } else if (lustre_msg_get_conn_cnt(req->rq_reqmsg) == 1) {
- if (!strstr(cluuid.uuid, "mdt"))
- LCONSOLE_WARN("%s: Rejecting reconnect from the "
- "known client %s (at %s) because it "
- "is indicating it is a new client",
- target->obd_name, cluuid.uuid,
- libcfs_nid2str(req->rq_peer.nid));
- GOTO(out, rc = -EALREADY);
- } else {
- OBD_FAIL_TIMEOUT(OBD_FAIL_TGT_DELAY_RECONNECT, 2 * obd_timeout);
- }
+ GOTO(out, rc = -EBUSY);
+ } else if (lustre_msg_get_conn_cnt(req->rq_reqmsg) == 1 &&
+ rc != EALREADY) {
+ if (!strstr(cluuid.uuid, "mdt"))
+ LCONSOLE_WARN("%s: Rejecting reconnect from the "
+ "known client %s (at %s) because it "
+ "is indicating it is a new client",
+ target->obd_name, cluuid.uuid,
+ libcfs_nid2str(req->rq_peer.nid));
+ GOTO(out, rc = -EALREADY);
+ } else {
+ OBD_FAIL_TIMEOUT(OBD_FAIL_TGT_DELAY_RECONNECT, 2 * obd_timeout);
+ }
if (rc < 0) {
GOTO(out, rc);
/* allow "new" MDT to be connected during recovery, since we
* need retrieve recovery update records from it */
if (target->obd_recovering && !lw_client && !mds_mds_conn) {
- cfs_time_t t;
- int c; /* connected */
- int i; /* in progress */
- int k; /* known */
- int s; /* stale/evicted */
-
- c = atomic_read(&target->obd_connected_clients);
- i = atomic_read(&target->obd_lock_replay_clients);
- k = target->obd_max_recoverable_clients;
- s = target->obd_stale_clients;
- t = target->obd_recovery_timer.expires;
- t = cfs_time_sub(t, cfs_time_current());
- t = cfs_duration_sec(t);
- LCONSOLE_WARN("%s: Denying connection for new client %s"
- "(at %s), waiting for %d known clients "
- "(%d recovered, %d in progress, and %d "
- "evicted) to recover in %d:%.02d\n",
+ struct hrtimer *timer = &target->obd_recovery_timer;
+ ktime_t remaining;
+ s64 timeout, left;
+ int in_progress;
+ int connected;
+ int known;
+ int stale;
+ char *msg;
+
+ connected = atomic_read(&target->obd_connected_clients);
+ in_progress = atomic_read(&target->obd_lock_replay_clients);
+ known = target->obd_max_recoverable_clients;
+ stale = target->obd_stale_clients;
+ remaining = hrtimer_expires_remaining(timer);
+ left = ktime_divns(remaining, NSEC_PER_SEC);
+ if (ktime_to_ns(remaining) > 0) {
+ msg = "to recover in";
+ timeout = left;
+ } else {
+ msg = "already passed deadline";
+ timeout = -left;
+ }
+
+ LCONSOLE_WARN("%s: Denying connection for new client %s (at %s), waiting for %d known clients (%d recovered, %d in progress, and %d evicted) %s %lld:%.02lld\n",
target->obd_name, cluuid.uuid,
- libcfs_nid2str(req->rq_peer.nid), k,
- c - i, i, s, (int)t / 60,
- (int)t % 60);
+ libcfs_nid2str(req->rq_peer.nid), known,
+ connected - in_progress, in_progress,
+ stale, msg, timeout / 60, timeout % 60);
rc = -EBUSY;
} else {
dont_check_exports:
spin_unlock(&export->exp_lock);
CDEBUG(D_RPCTRACE, "%s: %s already connected at greater "
"or equal conn_cnt: %d >= %d\n",
- cluuid.uuid, libcfs_nid2str(req->rq_peer.nid),
- export->exp_conn_cnt,
- lustre_msg_get_conn_cnt(req->rq_reqmsg));
+ cluuid.uuid, libcfs_nid2str(req->rq_peer.nid),
+ export->exp_conn_cnt,
+ lustre_msg_get_conn_cnt(req->rq_reqmsg));
- GOTO(out, rc = -EALREADY);
- }
- LASSERT(lustre_msg_get_conn_cnt(req->rq_reqmsg) > 0);
- export->exp_conn_cnt = lustre_msg_get_conn_cnt(req->rq_reqmsg);
-
- /* Don't evict liblustre clients for not pinging. */
- if (lustre_msg_get_op_flags(req->rq_reqmsg) & MSG_CONNECT_LIBCLIENT) {
- export->exp_libclient = 1;
- spin_unlock(&export->exp_lock);
-
- spin_lock(&target->obd_dev_lock);
- list_del_init(&export->exp_obd_chain_timed);
- spin_unlock(&target->obd_dev_lock);
- } else {
- spin_unlock(&export->exp_lock);
+ GOTO(out, rc = -EALREADY);
}
+ LASSERT(lustre_msg_get_conn_cnt(req->rq_reqmsg) > 0);
+ export->exp_conn_cnt = lustre_msg_get_conn_cnt(req->rq_reqmsg);
+ spin_unlock(&export->exp_lock);
- if (export->exp_connection != NULL) {
+ if (export->exp_connection != NULL) {
/* Check to see if connection came from another NID. */
- if ((export->exp_connection->c_peer.nid != req->rq_peer.nid) &&
+ if ((export->exp_connection->c_peer.nid != req->rq_peer.nid) &&
!hlist_unhashed(&export->exp_nid_hash))
- cfs_hash_del(export->exp_obd->obd_nid_hash,
- &export->exp_connection->c_peer.nid,
- &export->exp_nid_hash);
+ cfs_hash_del(export->exp_obd->obd_nid_hash,
+ &export->exp_connection->c_peer.nid,
+ &export->exp_nid_hash);
- ptlrpc_connection_put(export->exp_connection);
- }
+ ptlrpc_connection_put(export->exp_connection);
+ }
export->exp_connection = ptlrpc_connection_get(req->rq_peer,
req->rq_self,
class_export_put(export);
}
- if (target_referenced == true && target != NULL) {
+ if (target != NULL) {
spin_lock(&target->obd_dev_lock);
target->obd_conn_inprogress--;
spin_unlock(&target->obd_dev_lock);
-
- class_decref(target, __func__, current);
+ class_decref(target, "find", current);
}
req->rq_status = rc;
RETURN(rc);
obd->obd_recovery_end = ktime_get_real_seconds();
/* When recovery finished, cleanup orphans on MDS and OST. */
- if (OBT(obd) && OBP(obd, postrecov)) {
- int rc = OBP(obd, postrecov)(obd);
- if (rc < 0)
- LCONSOLE_WARN("%s: Post recovery failed, rc %d\n",
- obd->obd_name, rc);
- }
+ if (obd->obd_type && OBP(obd, postrecov)) {
+ int rc = OBP(obd, postrecov)(obd);
+
+ if (rc < 0)
+ LCONSOLE_WARN("%s: Post recovery failed, rc %d\n",
+ obd->obd_name, rc);
+ }
EXIT;
}
/* obd_recovery_task_lock should be held */
void target_cancel_recovery_timer(struct obd_device *obd)
{
- CDEBUG(D_HA, "%s: cancel recovery timer\n", obd->obd_name);
- del_timer(&obd->obd_recovery_timer);
+ CDEBUG(D_HA, "%s: cancel recovery timer\n", obd->obd_name);
+ hrtimer_cancel(&obd->obd_recovery_timer);
}
static void target_start_recovery_timer(struct obd_device *obd)
{
+ ktime_t delay;
+
if (obd->obd_recovery_start != 0)
return;
return;
}
- mod_timer(&obd->obd_recovery_timer,
- cfs_time_shift(obd->obd_recovery_timeout));
+ delay = ktime_set(obd->obd_recovery_timeout, 0);
+ hrtimer_start(&obd->obd_recovery_timer, delay, HRTIMER_MODE_REL);
obd->obd_recovery_start = ktime_get_real_seconds();
spin_unlock(&obd->obd_dev_lock);
* if @extend is true, extend recovery window to have @drt remaining at least;
* otherwise, make sure the recovery timeout value is not less than @drt.
*/
-static void extend_recovery_timer(struct obd_device *obd, int drt,
+static void extend_recovery_timer(struct obd_device *obd, time_t drt,
bool extend)
{
- time64_t now;
- time64_t end;
- time64_t left;
- time64_t to;
+ ktime_t left_ns;
+ time_t left;
+ time_t to;
spin_lock(&obd->obd_dev_lock);
if (!obd->obd_recovering || obd->obd_abort_recovery) {
}
LASSERT(obd->obd_recovery_start != 0);
- now = ktime_get_real_seconds();
to = obd->obd_recovery_timeout;
- end = obd->obd_recovery_start + to;
- left = end - now;
-
- if (extend && (drt > left)) {
- to += drt - left;
- } else if (!extend && (drt > to)) {
- to = drt;
- }
+ left_ns = hrtimer_expires_remaining(&obd->obd_recovery_timer);
+ left = ktime_divns(left_ns, NSEC_PER_SEC);
+ if (extend && (drt > left))
+ to += drt - left;
+ else if (!extend && (drt > to))
+ to = drt;
if (to > obd->obd_recovery_time_hard) {
to = obd->obd_recovery_time_hard;
- CWARN("%s: extended recovery timer reaching hard limit: %lld, extend: %d\n",
+ CWARN("%s: extended recovery timer reaching hard limit: %ld, extend: %d\n",
obd->obd_name, to, extend);
}
if (obd->obd_recovery_timeout < to) {
- obd->obd_recovery_timeout = to;
- end = obd->obd_recovery_start + to;
- mod_timer(&obd->obd_recovery_timer,
- cfs_time_shift(end - now));
- }
+ ktime_t now = ktime_get_real();
+ ktime_t end;
+
+ obd->obd_recovery_timeout = to;
+ end = ktime_set(obd->obd_recovery_start + to, 0);
+ left_ns = ktime_sub(end, now);
+ hrtimer_forward_now(&obd->obd_recovery_timer, left_ns);
+ left = ktime_divns(left_ns, NSEC_PER_MSEC);
+ }
spin_unlock(&obd->obd_dev_lock);
- CDEBUG(D_HA, "%s: recovery timer will expire in %lld seconds\n",
- obd->obd_name, (s64)(end - now));
+ CDEBUG(D_HA, "%s: recovery timer will expire in %ld seconds\n",
+ obd->obd_name, left);
}
/* Reset the timer with each new client connection */
* be extended to make sure the client could be reconnected, in the
* process, the timeout from the new client should be ignored.
*/
-
static void
check_and_start_recovery_timer(struct obd_device *obd,
- struct ptlrpc_request *req,
- int new_client)
+ struct ptlrpc_request *req,
+ int new_client)
{
- int service_time = lustre_msg_get_service_time(req->rq_reqmsg);
- struct obd_device_target *obt = &obd->u.obt;
+ time_t service_time = lustre_msg_get_service_time(req->rq_reqmsg);
+ struct obd_device_target *obt = &obd->u.obt;
- if (!new_client && service_time)
- /* Teach server about old server's estimates, as first guess
- * at how long new requests will take. */
+ if (!new_client && service_time)
+ /* Teach server about old server's estimates, as first guess
+ * at how long new requests will take.
+ */
at_measured(&req->rq_rqbd->rqbd_svcpt->scp_at_estimate,
- service_time);
+ service_time);
- target_start_recovery_timer(obd);
+ target_start_recovery_timer(obd);
/* Convert the service time to RPC timeout,
- * and reuse service_time to limit stack usage. */
+ * and reuse service_time to limit stack usage.
+ */
service_time = at_est2timeout(service_time);
if (OBD_FAIL_CHECK(OBD_FAIL_TGT_SLUGGISH_NET) &&
{
struct obd_device *obd = lut->lut_obd;
struct target_distribute_txn_data *tdtd;
+ time64_t last = 0;
+ time64_t now;
repeat:
+ if (obd->obd_recovering && obd->obd_recovery_start == 0) {
+ now = ktime_get_seconds();
+ if (now - last > 600) {
+ LCONSOLE_INFO("%s: in recovery but waiting for "
+ "the first client to connect\n",
+ obd->obd_name);
+ last = now;
+ }
+ }
if (obd->obd_recovery_start != 0 && ktime_get_real_seconds() >=
(obd->obd_recovery_start + obd->obd_recovery_time_hard)) {
__u64 next_update_transno = 0;
(void)handler(req);
lu_context_exit(&thread->t_env->le_ctx);
- /* don't reset timer for final stage */
- if (!exp_finished(req->rq_export)) {
- int to = obd_timeout;
+ /* don't reset timer for final stage */
+ if (!exp_finished(req->rq_export)) {
+ time_t to = obd_timeout;
- /**
- * Add request timeout to the recovery time so next request from
- * this client may come in recovery time
- */
- if (!AT_OFF) {
+ /**
+ * Add request timeout to the recovery time so next request from
+ * this client may come in recovery time
+ */
+ if (!AT_OFF) {
struct ptlrpc_service_part *svcpt;
svcpt = req->rq_rqbd->rqbd_svcpt;
* the client will recalculate the timeout according to
* current server estimate service time, so we will
* use the maxium timeout here for waiting the client
- * sending the next req */
- to = max((int)at_est2timeout(
- at_get(&svcpt->scp_at_estimate)),
- (int)lustre_msg_get_timeout(req->rq_reqmsg));
+ * sending the next req
+ */
+ to = max_t(time_t, at_est2timeout(at_get(&svcpt->scp_at_estimate)),
+ lustre_msg_get_timeout(req->rq_reqmsg));
/* Add 2 net_latency, one for balance rq_deadline
* (see ptl_send_rpc), one for resend the req to server,
* Note: client will pack net_latency in replay req
RETURN(rc);
}
- thread->t_env = env;
- thread->t_id = -1; /* force filter_iobuf_get/put to use local buffers */
- env->le_ctx.lc_thread = thread;
+ thread->t_env = env;
+ thread->t_id = -1; /* force filter_iobuf_get/put to use local buffers */
+ env->le_ctx.lc_thread = thread;
tgt_io_thread_init(thread); /* init thread_big_cache for IO requests */
- thread->t_watchdog = NULL;
CDEBUG(D_HA, "%s: started recovery thread pid %d\n", obd->obd_name,
current_pid());
}
EXPORT_SYMBOL(target_recovery_fini);
-static void target_recovery_expired(unsigned long castmeharder)
+static enum hrtimer_restart target_recovery_expired(struct hrtimer *timer)
{
- struct obd_device *obd = (struct obd_device *)castmeharder;
- CDEBUG(D_HA, "%s: recovery timed out; %d clients are still in recovery"
- " after %llus (%d clients connected)\n",
+ struct obd_device *obd = container_of(timer, struct obd_device,
+ obd_recovery_timer);
+
+ CDEBUG(D_HA,
+ "%s: recovery timed out; %d clients are still in recovery after %llu seconds (%d clients connected)\n",
obd->obd_name, atomic_read(&obd->obd_lock_replay_clients),
- (s64)(ktime_get_real_seconds() - obd->obd_recovery_start),
+ ktime_get_real_seconds() - obd->obd_recovery_start,
atomic_read(&obd->obd_connected_clients));
obd->obd_recovery_expired = 1;
wake_up(&obd->obd_next_transno_waitq);
+ return HRTIMER_NORESTART;
}
void target_recovery_init(struct lu_target *lut, svc_handler_t handler)
CDEBUG(D_HA, "RECOVERY: service %s, %d recoverable clients, "
"last_transno %llu\n", obd->obd_name,
obd->obd_max_recoverable_clients, obd->obd_last_committed);
- LASSERT(obd->obd_stopping == 0);
- obd->obd_next_recovery_transno = obd->obd_last_committed + 1;
- obd->obd_recovery_start = 0;
- obd->obd_recovery_end = 0;
-
- setup_timer(&obd->obd_recovery_timer, target_recovery_expired,
- (unsigned long)obd);
+ LASSERT(obd->obd_stopping == 0);
+ obd->obd_next_recovery_transno = obd->obd_last_committed + 1;
+ obd->obd_recovery_start = 0;
+ obd->obd_recovery_end = 0;
+
+ hrtimer_init(&obd->obd_recovery_timer, CLOCK_REALTIME,
+ HRTIMER_MODE_REL);
+ obd->obd_recovery_timer.function = &target_recovery_expired;
target_start_recovery_thread(lut, handler);
}
EXPORT_SYMBOL(target_recovery_init);
target_process_req_flags(obd, req);
if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_LOCK_REPLAY_DONE) {
+ if (unlikely(OBD_FAIL_CHECK(OBD_FAIL_TGT_RECOVERY_REQ_RACE))) {
+ if (cfs_fail_val == 1) {
+ cfs_race_state = 1;
+ cfs_fail_val = 0;
+ wake_up(&cfs_race_waitq);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
+ }
+ }
+
/* client declares he's ready to complete recovery
* so, we put the request on th final queue */
target_request_copy_get(req);
RETURN(0);
}
-int target_handle_ping(struct ptlrpc_request *req)
-{
- obd_ping(req->rq_svc_thread->t_env, req->rq_export);
- return req_capsule_server_pack(&req->rq_pill);
-}
-
void target_committed_to_req(struct ptlrpc_request *req)
{
struct obd_export *exp = req->rq_export;
int target_bulk_io(struct obd_export *exp, struct ptlrpc_bulk_desc *desc,
struct l_wait_info *lwi)
{
- struct ptlrpc_request *req = desc->bd_req;
- time_t start = cfs_time_current_sec();
- time_t deadline;
- int rc = 0;
+ struct ptlrpc_request *req = desc->bd_req;
+ time64_t start = ktime_get_real_seconds();
+ time64_t deadline;
+ int rc = 0;
ENTRY;
deadline = req->rq_deadline;
do {
- long timeoutl = deadline - cfs_time_current_sec();
- cfs_duration_t timeout = timeoutl <= 0 ?
- CFS_TICK : cfs_time_seconds(timeoutl);
- time_t rq_deadline;
+ time64_t timeoutl = deadline - ktime_get_real_seconds();
+ long timeout_jiffies = timeoutl <= 0 ?
+ 1 : cfs_time_seconds(timeoutl);
+ time64_t rq_deadline;
- *lwi = LWI_TIMEOUT_INTERVAL(timeout, cfs_time_seconds(1),
+ *lwi = LWI_TIMEOUT_INTERVAL(timeout_jiffies,
+ cfs_time_seconds(1),
target_bulk_timeout, desc);
rc = l_wait_event(desc->bd_waitq,
!ptlrpc_server_bulk_active(desc) ||
lwi);
LASSERT(rc == 0 || rc == -ETIMEDOUT);
/* Wait again if we changed rq_deadline. */
- rq_deadline = ACCESS_ONCE(req->rq_deadline);
+ rq_deadline = READ_ONCE(req->rq_deadline);
deadline = start + bulk_timeout;
if (deadline > rq_deadline)
deadline = rq_deadline;
- } while ((rc == -ETIMEDOUT) &&
- (deadline > cfs_time_current_sec()));
+ } while (rc == -ETIMEDOUT &&
+ deadline > ktime_get_real_seconds());
if (rc == -ETIMEDOUT) {
- DEBUG_REQ(D_ERROR, req, "timeout on bulk %s after %ld%+lds",
+ DEBUG_REQ(D_ERROR, req, "timeout on bulk %s after %lld%+llds",
bulk2type(req), deadline - start,
- cfs_time_current_sec() - deadline);
+ ktime_get_real_seconds() - deadline);
ptlrpc_abort_bulk(desc);
} else if (exp->exp_failed) {
DEBUG_REQ(D_ERROR, req, "Eviction on bulk %s",