* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2010, 2014, Intel Corporation.
+ * Copyright (c) 2010, 2015, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
struct obd_uuid server_uuid;
int rq_portal, rp_portal, connect_op;
char *name = obddev->obd_type->typ_name;
- ldlm_ns_type_t ns_type = LDLM_NS_TYPE_UNKNOWN;
- int rc;
+ enum ldlm_ns_type ns_type = LDLM_NS_TYPE_UNKNOWN;
char *cli_name = lustre_cfg_buf(lcfg, 0);
+ int rc;
ENTRY;
- /* In a more perfect world, we would hang a ptlrpc_client off of
- * obd_type and just use the values from there. */
+ /* In a more perfect world, we would hang a ptlrpc_client off of
+ * obd_type and just use the values from there. */
if (!strcmp(name, LUSTRE_OSC_NAME)) {
rq_portal = OST_REQUEST_PORTAL;
rp_portal = OSC_REPLY_PORTAL;
INIT_LIST_HEAD(&cli->cl_lru_list);
spin_lock_init(&cli->cl_lru_list_lock);
atomic_long_set(&cli->cl_unstable_count, 0);
+ INIT_LIST_HEAD(&cli->cl_shrink_list);
init_waitqueue_head(&cli->cl_destroy_waitq);
atomic_set(&cli->cl_destroy_in_flight, 0);
struct obd_export *exp,
struct obd_uuid *cluuid)
{
- ENTRY;
+ struct obd_device *target;
+ struct lustre_handle *hdl;
+ cfs_time_t now;
+ cfs_time_t deadline;
+ int timeout;
+ int rc = 0;
+ ENTRY;
- if (exp->exp_connection && exp->exp_imp_reverse) {
- struct lustre_handle *hdl;
- struct obd_device *target;
-
- hdl = &exp->exp_imp_reverse->imp_remote_handle;
- target = exp->exp_obd;
-
- /* Might be a re-connect after a partition. */
- if (!memcmp(&conn->cookie, &hdl->cookie, sizeof conn->cookie)) {
- if (target->obd_recovering) {
- int timeout = cfs_duration_sec(cfs_time_sub(
- cfs_timer_deadline(
- &target->obd_recovery_timer),
- cfs_time_current()));
-
- LCONSOLE_WARN("%s: Client %s (at %s) reconnect"
- "ing, waiting for %d clients in recov"
- "ery for %d:%.02d\n", target->obd_name,
- obd_uuid2str(&exp->exp_client_uuid),
- obd_export_nid2str(exp),
- target->obd_max_recoverable_clients,
- timeout / 60, timeout % 60);
- } else {
- LCONSOLE_WARN("%s: Client %s (at %s) "
- "reconnecting\n", target->obd_name,
- obd_uuid2str(&exp->exp_client_uuid),
- obd_export_nid2str(exp));
- }
+ hdl = &exp->exp_imp_reverse->imp_remote_handle;
+ if (!exp->exp_connection || !lustre_handle_is_used(hdl)) {
+ conn->cookie = exp->exp_handle.h_cookie;
+ CDEBUG(D_HA, "connect export for UUID '%s' at %p,"
+ " cookie "LPX64"\n", cluuid->uuid, exp, conn->cookie);
+ RETURN(0);
+ }
- conn->cookie = exp->exp_handle.h_cookie;
- /* target_handle_connect() treats EALREADY and
- * -EALREADY differently. EALREADY means we are
- * doing a valid reconnect from the same client. */
- RETURN(EALREADY);
- } else {
- LCONSOLE_WARN("%s: already connected client %s (at %s) "
- "with handle "LPX64". Rejecting client "
- "with the same UUID trying to reconnect "
- "with handle "LPX64"\n", target->obd_name,
- obd_uuid2str(&exp->exp_client_uuid),
- obd_export_nid2str(exp),
- hdl->cookie, conn->cookie);
- memset(conn, 0, sizeof *conn);
- /* target_handle_connect() treats EALREADY and
- * -EALREADY differently. -EALREADY is an error
- * (same UUID, different handle). */
- RETURN(-EALREADY);
- }
- }
+ target = exp->exp_obd;
+
+ /* Might be a re-connect after a partition. */
+ if (memcmp(&conn->cookie, &hdl->cookie, sizeof conn->cookie)) {
+ LCONSOLE_WARN("%s: already connected client %s (at %s) "
+ "with handle "LPX64". Rejecting client "
+ "with the same UUID trying to reconnect "
+ "with handle "LPX64"\n", target->obd_name,
+ obd_uuid2str(&exp->exp_client_uuid),
+ obd_export_nid2str(exp),
+ hdl->cookie, conn->cookie);
+ memset(conn, 0, sizeof *conn);
+ /* target_handle_connect() treats EALREADY and
+ * -EALREADY differently. -EALREADY is an error
+ * (same UUID, different handle). */
+ RETURN(-EALREADY);
+ }
- conn->cookie = exp->exp_handle.h_cookie;
- CDEBUG(D_HA, "connect export for UUID '%s' at %p, cookie "LPX64"\n",
- cluuid->uuid, exp, conn->cookie);
- RETURN(0);
+ if (!target->obd_recovering) {
+ LCONSOLE_WARN("%s: Client %s (at %s) reconnecting\n",
+ target->obd_name, obd_uuid2str(&exp->exp_client_uuid),
+ obd_export_nid2str(exp));
+ GOTO(out_already, rc);
+ }
+
+ now = cfs_time_current();
+ deadline = cfs_timer_deadline(&target->obd_recovery_timer);
+ if (cfs_time_before(now, deadline)) {
+ timeout = cfs_duration_sec(cfs_time_sub(deadline, now));
+ LCONSOLE_WARN("%s: Client %s (at %s) reconnecting,"
+ " waiting for %d clients in recovery for"
+ " %d:%.02d\n", target->obd_name,
+ obd_uuid2str(&exp->exp_client_uuid),
+ obd_export_nid2str(exp),
+ target->obd_max_recoverable_clients,
+ timeout / 60, timeout % 60);
+ } else {
+ timeout = cfs_duration_sec(cfs_time_sub(now, deadline));
+ LCONSOLE_WARN("%s: Recovery already passed deadline"
+ " %d:%.02d, It is most likely due to DNE"
+ " recovery is failed or stuck, please wait a"
+ " few more minutes or abort the recovery.\n",
+ target->obd_name, timeout / 60, timeout % 60);
+ }
+
+out_already:
+ conn->cookie = exp->exp_handle.h_cookie;
+ /* target_handle_connect() treats EALREADY and
+ * -EALREADY differently. EALREADY means we are
+ * doing a valid reconnect from the same client. */
+ RETURN(EALREADY);
}
void target_client_add_cb(struct obd_device *obd, __u64 transno, void *cb_data,
check_and_start_recovery_timer(struct obd_device *obd,
struct ptlrpc_request *req, int new_client);
+/**
+ * update flags for import during reconnect process
+ */
+static int rev_import_flags_update(struct obd_import *revimp,
+ struct ptlrpc_request *req)
+{
+ int rc;
+ struct obd_connect_data *data;
+
+ data = req_capsule_client_get(&req->rq_pill, &RMF_CONNECT_DATA);
+
+ if (data->ocd_connect_flags & OBD_CONNECT_AT)
+ revimp->imp_msghdr_flags |= MSGHDR_AT_SUPPORT;
+ else
+ revimp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT;
+
+ revimp->imp_msghdr_flags |= MSGHDR_CKSUM_INCOMPAT18;
+
+ rc = sptlrpc_import_sec_adapt(revimp, req->rq_svc_ctx, &req->rq_flvr);
+ if (rc) {
+ CERROR("%s: cannot get reverse import %s security: rc = %d\n",
+ revimp->imp_client->cli_name,
+ libcfs_id2str(req->rq_peer), rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * Allocate a new reverse import for an export.
+ *
+ * \retval -errno in case error hit
+ * \retval 0 if reverse import correctly init
+ **/
+int rev_import_init(struct obd_export *export)
+{
+ struct obd_device *obd = export->exp_obd;
+ struct obd_import *revimp;
+
+ LASSERT(export->exp_imp_reverse == NULL);
+
+ revimp = class_new_import(obd);
+ if (revimp == NULL)
+ return -ENOMEM;
+
+ revimp->imp_remote_handle.cookie = 0ULL;
+ revimp->imp_client = &obd->obd_ldlm_client;
+ revimp->imp_dlm_fake = 1;
+
+ /* it is safe to connect import in new state as no sends possible */
+ spin_lock(&export->exp_lock);
+ export->exp_imp_reverse = revimp;
+ spin_unlock(&export->exp_lock);
+ class_import_put(revimp);
+
+ return 0;
+}
+EXPORT_SYMBOL(rev_import_init);
+
+/**
+ * Handle reconnect for an export.
+ *
+ * \param exp export to handle reconnect process
+ * \param req client reconnect request
+ *
+ * \retval -rc in case securitfy flavor can't be changed
+ * \retval 0 in case none problems
+ */
+static int rev_import_reconnect(struct obd_export *exp,
+ struct ptlrpc_request *req)
+{
+ struct obd_import *revimp = exp->exp_imp_reverse;
+ struct lustre_handle *lh;
+ int rc;
+
+ /* avoid sending a request until import flags are changed */
+ ptlrpc_import_enter_resend(revimp);
+
+ if (revimp->imp_connection != NULL)
+ ptlrpc_connection_put(revimp->imp_connection);
+
+ /*
+ * client from recovery don't have a handle so we need to take from
+ * request. it may produce situation when wrong client connected
+ * to recovery as we trust a client uuid
+ */
+ lh = req_capsule_client_get(&req->rq_pill, &RMF_CONN);
+ revimp->imp_remote_handle = *lh;
+
+ /* unknown versions will be caught in
+ * ptlrpc_handle_server_req_in->lustre_unpack_msg() */
+ revimp->imp_msg_magic = req->rq_reqmsg->lm_magic;
+
+ revimp->imp_connection = ptlrpc_connection_addref(exp->exp_connection);
+
+ rc = rev_import_flags_update(revimp, req);
+ if (rc != 0) {
+ /* it is safe to still be in RECOVERY phase as we are not able
+ * to setup correct security flavor so requests are not able to
+ * be delivered correctly */
+ return rc;
+ }
+
+ /* resend all rpc's via new connection */
+ return ptlrpc_import_recovery_state_machine(revimp);
+}
+
int target_handle_connect(struct ptlrpc_request *req)
{
- struct obd_device *target = NULL, *targref = NULL;
- struct obd_export *export = NULL;
- struct obd_import *revimp;
- struct obd_import *tmp_imp = NULL;
- struct lustre_handle conn;
- struct lustre_handle *tmp;
+ struct obd_device *target = NULL;
+ struct obd_export *export = NULL;
+ /* connect handle - filled from target_handle_reconnect in
+ * reconnect case */
+ struct lustre_handle conn;
+ struct lustre_handle *tmp;
struct obd_uuid tgtuuid;
struct obd_uuid cluuid;
- struct obd_uuid remote_uuid;
char *str;
int rc = 0;
char *target_start;
bool mds_conn = false, lw_client = false;
bool mds_mds_conn = false;
bool new_mds_mds_conn = false;
+ bool target_referenced = false;
struct obd_connect_data *data, *tmpdata;
int size, tmpsize;
lnet_nid_t *client_nid = NULL;
/* Make sure the target isn't cleaned up while we're here. Yes,
* there's still a race between the above check and our incref here.
* Really, class_uuid2obd should take the ref. */
- targref = class_incref(target, __FUNCTION__, current);
+ class_incref(target, __func__, current);
+ target_referenced = true;
target->obd_conn_inprogress++;
spin_unlock(&target->obd_dev_lock);
obd_str2uuid(&cluuid, str);
- /* XXX Extract a nettype and format accordingly. */
- switch (sizeof(lnet_nid_t)) {
- /* NB the casts only avoid compiler warnings. */
- case 8:
- snprintf(remote_uuid.uuid, sizeof remote_uuid,
- "NET_"LPX64"_UUID", (__u64)req->rq_peer.nid);
- break;
- case 4:
- snprintf(remote_uuid.uuid, sizeof remote_uuid,
- "NET_%x_UUID", (__u32)req->rq_peer.nid);
- break;
- default:
- LBUG();
- }
-
tmp = req_capsule_client_get(&req->rq_pill, &RMF_CONN);
if (tmp == NULL)
GOTO(out, rc = -EPROTO);
if (mds_conn && OBD_FAIL_CHECK(OBD_FAIL_TGT_RCVG_FLAG))
lustre_msg_add_op_flags(req->rq_repmsg,
MSG_CONNECT_RECOVERING);
- if (rc == 0)
+ if (rc == 0) {
conn.cookie = export->exp_handle.h_cookie;
+ rc = rev_import_init(export);
+ }
if (mds_mds_conn)
new_mds_mds_conn = true;
memcpy(tmpdata, data, min(tmpsize, size));
}
- /* If all else goes well, this is our RPC return code. */
- req->rq_status = 0;
-
- lustre_msg_set_handle(req->rq_repmsg, &conn);
-
/* If the client and the server are the same node, we will already
* have an export that really points to the client's DLM export,
* because we have a shared handles table.
ptlrpc_connection_put(export->exp_connection);
}
- export->exp_connection = ptlrpc_connection_get(req->rq_peer,
- req->rq_self,
- &remote_uuid);
- if (hlist_unhashed(&export->exp_nid_hash)) {
- cfs_hash_add(export->exp_obd->obd_nid_hash,
- &export->exp_connection->c_peer.nid,
- &export->exp_nid_hash);
- }
+ export->exp_connection = ptlrpc_connection_get(req->rq_peer,
+ req->rq_self,
+ &cluuid);
+ if (hlist_unhashed(&export->exp_nid_hash))
+ cfs_hash_add(export->exp_obd->obd_nid_hash,
+ &export->exp_connection->c_peer.nid,
+ &export->exp_nid_hash);
+
+ lustre_msg_set_handle(req->rq_repmsg, &conn);
+
+ rc = rev_import_reconnect(export, req);
+ if (rc != 0)
+ GOTO(out, rc);
if (target->obd_recovering && !export->exp_in_recovery && !lw_client) {
int has_transno;
if (target->obd_recovering && !lw_client)
lustre_msg_add_op_flags(req->rq_repmsg, MSG_CONNECT_RECOVERING);
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_CONN);
- conn = *tmp;
-
- /* Return -ENOTCONN in case of errors to let client reconnect. */
- revimp = class_new_import(target);
- if (revimp == NULL) {
- CERROR("fail to alloc new reverse import.\n");
- GOTO(out, rc = -ENOTCONN);
- }
-
- spin_lock(&export->exp_lock);
- if (export->exp_imp_reverse != NULL)
- /* destroyed import can be still referenced in ctxt */
- tmp_imp = export->exp_imp_reverse;
- export->exp_imp_reverse = revimp;
- spin_unlock(&export->exp_lock);
-
- revimp->imp_connection = ptlrpc_connection_addref(export->exp_connection);
- revimp->imp_client = &export->exp_obd->obd_ldlm_client;
- revimp->imp_remote_handle = conn;
- revimp->imp_dlm_fake = 1;
- revimp->imp_state = LUSTRE_IMP_FULL;
-
- /* Unknown versions will be caught in
- * ptlrpc_handle_server_req_in->lustre_unpack_msg(). */
- revimp->imp_msg_magic = req->rq_reqmsg->lm_magic;
-
- if (data->ocd_connect_flags & OBD_CONNECT_AT)
- revimp->imp_msghdr_flags |= MSGHDR_AT_SUPPORT;
- else
- revimp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT;
-
- revimp->imp_msghdr_flags |= MSGHDR_CKSUM_INCOMPAT18;
-
- rc = sptlrpc_import_sec_adapt(revimp, req->rq_svc_ctx, &req->rq_flvr);
- if (rc) {
- CERROR("Failed to get sec for reverse import: %d\n", rc);
- spin_lock(&export->exp_lock);
- export->exp_imp_reverse = NULL;
- spin_unlock(&export->exp_lock);
- class_destroy_import(revimp);
- }
-
- class_import_put(revimp);
-
out:
- if (tmp_imp != NULL)
- client_destroy_import(tmp_imp);
if (export) {
spin_lock(&export->exp_lock);
export->exp_connecting = 0;
class_export_put(export);
}
- if (targref) {
+ if (target_referenced == true && target != NULL) {
spin_lock(&target->obd_dev_lock);
target->obd_conn_inprogress--;
spin_unlock(&target->obd_dev_lock);
- class_decref(targref, __func__, current);
+ class_decref(target, __func__, current);
}
- if (rc)
- req->rq_status = rc;
+ req->rq_status = rc;
RETURN(rc);
}
}
spin_unlock(&obd->obd_recovery_task_lock);
- if (lut->lut_tdtd != NULL &&
- !list_empty(&lut->lut_tdtd->tdtd_replay_list))
- dtrq_list_dump(lut->lut_tdtd, D_ERROR);
-
obd->obd_recovery_end = cfs_time_current_sec();
/* When recovery finished, cleanup orphans on MDS and OST. */
{
struct ptlrpc_request *req, *n;
struct list_head clean_list;
- ENTRY;
INIT_LIST_HEAD(&clean_list);
spin_lock(&obd->obd_dev_lock);
return;
}
obd->obd_recovering = obd->obd_abort_recovery = 0;
- obd->obd_force_abort_recovery = 0;
spin_unlock(&obd->obd_dev_lock);
spin_lock(&obd->obd_recovery_task_lock);
return;
spin_lock(&obd->obd_dev_lock);
- if (!obd->obd_recovering || obd->obd_abort_recovery ||
- obd->obd_force_abort_recovery) {
+ if (!obd->obd_recovering || obd->obd_abort_recovery) {
spin_unlock(&obd->obd_dev_lock);
return;
}
int to;
spin_lock(&obd->obd_dev_lock);
- if (!obd->obd_recovering || obd->obd_abort_recovery ||
- obd->obd_force_abort_recovery) {
+ if (!obd->obd_recovering || obd->obd_abort_recovery) {
spin_unlock(&obd->obd_dev_lock);
return;
}
return (!exp->exp_req_replay_needed ||
atomic_read(&exp->exp_replay_count) > 0);
}
+
+
+static inline int exp_req_replay_healthy_or_from_mdt(struct obd_export *exp)
+{
+ return (exp_connect_flags(exp) & OBD_CONNECT_MDS_MDS) ||
+ exp_req_replay_healthy(exp);
+}
+
/** if export done lock_replay or has replay in queue */
static inline int exp_lock_replay_healthy(struct obd_export *exp)
{
return (exp->exp_in_recovery && !exp->exp_lock_replay_needed);
}
+static inline int exp_finished_or_from_mdt(struct obd_export *exp)
+{
+ return (exp_connect_flags(exp) & OBD_CONNECT_MDS_MDS) ||
+ exp_finished(exp);
+}
+
static int check_for_next_transno(struct lu_target *lut)
{
struct ptlrpc_request *req = NULL;
struct obd_device *obd = lut->lut_obd;
+ struct target_distribute_txn_data *tdtd = lut->lut_tdtd;
int wake_up = 0, connected, completed, queue_len;
__u64 req_transno = 0;
__u64 update_transno = 0;
req_transno = lustre_msg_get_transno(req->rq_reqmsg);
}
- if (lut->lut_tdtd != NULL) {
- struct target_distribute_txn_data *tdtd;
-
- tdtd = lut->lut_tdtd;
- update_transno = distribute_txn_get_next_transno(lut->lut_tdtd);
- }
+ if (tdtd != NULL)
+ update_transno = distribute_txn_get_next_transno(tdtd);
connected = atomic_read(&obd->obd_connected_clients);
completed = connected - atomic_read(&obd->obd_req_replay_clients);
obd->obd_max_recoverable_clients, connected, completed,
queue_len, req_transno, next_transno);
- if (obd->obd_abort_recovery || obd->obd_force_abort_recovery) {
+ if (obd->obd_abort_recovery) {
CDEBUG(D_HA, "waking for aborted recovery\n");
wake_up = 1;
} else if (obd->obd_recovery_expired) {
CDEBUG(D_HA, "waking for expired recovery\n");
wake_up = 1;
+ } else if (tdtd != NULL && req != NULL &&
+ is_req_replayed_by_update(req)) {
+ LASSERTF(req_transno < next_transno, "req_transno "LPU64
+ "next_transno"LPU64"\n", req_transno, next_transno);
+ CDEBUG(D_HA, "waking for duplicate req ("LPU64")\n",
+ req_transno);
+ wake_up = 1;
} else if (req_transno == next_transno ||
(update_transno != 0 && update_transno <= next_transno)) {
CDEBUG(D_HA, "waking for next ("LPD64")\n", next_transno);
} else if (atomic_read(&obd->obd_lock_replay_clients) == 0) {
CDEBUG(D_HA, "waking for completed lock replay\n");
wake_up = 1;
- } else if (obd->obd_abort_recovery || obd->obd_force_abort_recovery) {
+ } else if (obd->obd_abort_recovery) {
CDEBUG(D_HA, "waking for aborted recovery\n");
wake_up = 1;
} else if (obd->obd_recovery_expired) {
int (*health_check)(struct obd_export *))
{
struct obd_device *obd = lut->lut_obd;
+ struct target_distribute_txn_data *tdtd;
repeat:
if ((obd->obd_recovery_start != 0) && (cfs_time_current_sec() >=
(obd->obd_recovery_start + obd->obd_recovery_time_hard))) {
- CWARN("recovery is aborted by hard timeout\n");
- obd->obd_abort_recovery = 1;
+ __u64 next_update_transno = 0;
+
+ /* Only abort the recovery if there are no update recovery
+ * left in the queue */
+ spin_lock(&obd->obd_recovery_task_lock);
+ if (lut->lut_tdtd != NULL) {
+ next_update_transno =
+ distribute_txn_get_next_transno(lut->lut_tdtd);
+
+ tdtd = lut->lut_tdtd;
+ /* If next_update_transno == 0, it probably because
+ * updatelog retrieve threads did not get any records
+ * yet, let's wait those threads stopped */
+ if (next_update_transno == 0) {
+ struct l_wait_info lwi = { 0 };
+
+ l_wait_event(tdtd->tdtd_recovery_threads_waitq,
+ atomic_read(
+ &tdtd->tdtd_recovery_threads_count) == 0,
+ &lwi);
+
+ next_update_transno =
+ distribute_txn_get_next_transno(
+ lut->lut_tdtd);
+ }
+ }
+
+ if (next_update_transno != 0 && !obd->obd_abort_recovery) {
+ obd->obd_next_recovery_transno = next_update_transno;
+ spin_unlock(&obd->obd_recovery_task_lock);
+ /* Disconnect unfinished exports from clients, and
+ * keep connection from MDT to make sure the update
+ * recovery will still keep trying until some one
+ * manually abort the recovery */
+ class_disconnect_stale_exports(obd,
+ exp_finished_or_from_mdt);
+ /* Abort all of replay and replay lock req from
+ * clients */
+ abort_req_replay_queue(obd);
+ abort_lock_replay_queue(obd);
+ CDEBUG(D_HA, "%s: there are still update replay ("LPX64
+ ")in the queue.\n", obd->obd_name,
+ next_update_transno);
+ } else {
+ obd->obd_abort_recovery = 1;
+ spin_unlock(&obd->obd_recovery_task_lock);
+ CWARN("%s recovery is aborted by hard timeout\n",
+ obd->obd_name);
+ }
}
while (wait_event_timeout(obd->obd_next_transno_waitq,
msecs_to_jiffies(60 * MSEC_PER_SEC)) == 0)
/* wait indefinitely for event, but don't trigger watchdog */;
- if (obd->obd_abort_recovery || obd->obd_force_abort_recovery) {
+ if (obd->obd_abort_recovery) {
CWARN("recovery is aborted, evict exports in recovery\n");
+ if (lut->lut_tdtd != NULL) {
+ struct l_wait_info lwi = { 0 };
+
+ tdtd = lut->lut_tdtd;
+ /* Let's wait all of the update log recovery thread
+ * finished */
+ l_wait_event(tdtd->tdtd_recovery_threads_waitq,
+ atomic_read(&tdtd->tdtd_recovery_threads_count) == 0,
+ &lwi);
+ /* Then abort the update recovery list */
+ dtrq_list_dump(lut->lut_tdtd, D_ERROR);
+ dtrq_list_destroy(lut->lut_tdtd);
+ }
+
/** evict exports which didn't finish recovery yet */
class_disconnect_stale_exports(obd, exp_finished);
return 1;
"evict stale exports\n", obd->obd_name);
/** evict cexports with no replay in queue, they are stalled */
class_disconnect_stale_exports(obd, health_check);
+
/** continue with VBR */
spin_lock(&obd->obd_dev_lock);
obd->obd_version_recov = 1;
obd->obd_max_recoverable_clients, obd->obd_abort_recovery,
obd->obd_recovery_expired);
- if (obd->obd_force_abort_recovery)
- return 1;
-
if (!obd->obd_abort_recovery && !obd->obd_recovery_expired) {
LASSERT(clnts <= obd->obd_max_recoverable_clients);
if (clnts + obd->obd_stale_clients <
}
if (lut->lut_tdtd != NULL) {
- if (!lut->lut_tdtd->tdtd_replay_ready) {
+ if (!lut->lut_tdtd->tdtd_replay_ready &&
+ !obd->obd_abort_recovery) {
/* Let's extend recovery timer, in case the recovery
* timer expired, and some clients got evicted */
extend_recovery_timer(obd, obd->obd_recovery_timeout,
true);
+ CDEBUG(D_HA, "%s update recovery is not ready,"
+ " extend recovery %d\n", obd->obd_name,
+ obd->obd_recovery_timeout);
return 0;
} else {
dtrq_list_dump(lut->lut_tdtd, D_HA);
return transno;
}
-__u64 get_next_transno(struct lu_target *lut, int *type)
+
+static __u64 get_next_transno(struct lu_target *lut, int *type)
{
struct obd_device *obd = lut->lut_obd;
struct target_distribute_txn_data *tdtd = lut->lut_tdtd;
obd->obd_replayed_requests++;
}
-/**
- * Update last_rcvd of the update
- *
- * Because update recovery might update the last_rcvd by updates, i.e.
- * it will not update the last_rcvd information in memory, so we need
- * refresh these information in memory after update recovery.
- *
- * \param[in] obd obd_device under recoverying.
- * \param[in] dtrq the update replay requests being replayed.
- */
-static void target_update_lcd(struct lu_env *env, struct lu_target *lut,
- struct distribute_txn_replay_req *dtrq)
-{
- struct obd_device *obd = lut->lut_obd;
- struct obd_export *export;
- struct tg_export_data *ted;
- struct distribute_txn_replay_req_sub *dtrqs;
- struct seq_server_site *site;
- struct update_records *ur;
- const struct lu_fid *fid;
- struct update_ops *ops;
- struct update_params *params;
- struct update_op *op;
- __u32 mdt_index;
- unsigned int i;
- struct lsd_client_data *lcd = NULL;
-
- /* if Updates has been executed(committed) on the recovery target,
- * i.e. the updates is not being executed on the target, so we do
- * not need update it in memory */
- site = lu_site2seq(obd->obd_lu_dev->ld_site);
- mdt_index = site->ss_node_id;
- dtrqs = dtrq_sub_lookup(dtrq, mdt_index);
- if (dtrqs != NULL)
- return;
-
- if (dtrq->dtrq_lur == NULL)
- return;
-
- /* Find the update last_rcvd record */
- fid = lu_object_fid(&lut->lut_last_rcvd->do_lu);
- ur = &dtrq->dtrq_lur->lur_update_rec;
- ops = &ur->ur_ops;
- params = update_records_get_params(ur);
- for (i = 0, op = &ops->uops_op[0]; i < ur->ur_update_count;
- i++, op = update_op_next_op(op)) {
- __u64 pos;
- __u16 size;
- void *buf;
-
- if (!lu_fid_eq(&op->uop_fid, fid))
- continue;
-
- if (op->uop_type != OUT_WRITE)
- continue;
-
- buf = update_params_get_param_buf(params, op->uop_params_off[1],
- ur->ur_param_count, NULL);
- if (buf == NULL)
- continue;
-
- pos = le64_to_cpu(*(__u64 *)buf);
- if (pos == 0)
- continue;
-
- buf = update_params_get_param_buf(params, op->uop_params_off[0],
- ur->ur_param_count, &size);
- if (buf == NULL)
- continue;
-
- if (size != sizeof(*lcd))
- continue;
- lcd = buf;
- }
-
- if (lcd == NULL || lcd->lcd_uuid[0] == '\0')
- return;
-
- /* locate the export then update the exp_target_data if needed */
- export = cfs_hash_lookup(obd->obd_uuid_hash, lcd->lcd_uuid);
- if (export == NULL)
- return;
-
- ted = &export->exp_target_data;
- if (lcd->lcd_last_xid > ted->ted_lcd->lcd_last_xid) {
- CDEBUG(D_HA, "%s update xid from "LPU64" to "LPU64"\n",
- lut->lut_obd->obd_name, ted->ted_lcd->lcd_last_xid,
- lcd->lcd_last_xid);
- ted->ted_lcd->lcd_last_xid = lcd->lcd_last_xid;
- ted->ted_lcd->lcd_last_result = lcd->lcd_last_result;
- }
- class_export_put(export);
-}
-
static void replay_request_or_update(struct lu_env *env,
struct lu_target *lut,
struct target_recovery_data *trd,
CFS_FAIL_TIMEOUT_MS(OBD_FAIL_TGT_REPLAY_DELAY, 300);
if (target_recovery_overseer(lut, check_for_next_transno,
- exp_req_replay_healthy)) {
+ exp_req_replay_healthy_or_from_mdt)) {
abort_req_replay_queue(obd);
abort_lock_replay_queue(obd);
+ goto abort;
}
spin_lock(&obd->obd_recovery_task_lock);
transno = get_next_transno(lut, &type);
- if (type == REQUEST_RECOVERY && tdtd != NULL &&
- transno == tdtd->tdtd_last_update_transno) {
+ if (type == REQUEST_RECOVERY && transno != 0) {
/* Drop replay request from client side, if the
* replay has been executed by update with the
* same transno */
req = list_entry(obd->obd_req_replay_queue.next,
struct ptlrpc_request, rq_list);
+
list_del_init(&req->rq_list);
obd->obd_requests_queued_for_recovery--;
spin_unlock(&obd->obd_recovery_task_lock);
- drop_duplicate_replay_req(env, obd, req);
- } else if (type == REQUEST_RECOVERY && transno != 0) {
- req = list_entry(obd->obd_req_replay_queue.next,
- struct ptlrpc_request, rq_list);
- list_del_init(&req->rq_list);
- obd->obd_requests_queued_for_recovery--;
- spin_unlock(&obd->obd_recovery_task_lock);
+
+ /* Let's check if the request has been redone by
+ * update replay */
+ if (is_req_replayed_by_update(req)) {
+ struct distribute_txn_replay_req *dtrq;
+
+ dtrq = distribute_txn_lookup_finish_list(tdtd,
+ req->rq_xid);
+ LASSERT(dtrq != NULL);
+ spin_lock(&tdtd->tdtd_replay_list_lock);
+ list_del_init(&dtrq->dtrq_list);
+ spin_unlock(&tdtd->tdtd_replay_list_lock);
+ dtrq_destroy(dtrq);
+
+ drop_duplicate_replay_req(env, obd, req);
+
+ continue;
+ }
+
LASSERT(trd->trd_processing_task == current_pid());
DEBUG_REQ(D_HA, req, "processing t"LPD64" from %s",
lustre_msg_get_transno(req->rq_reqmsg),
obd->obd_replayed_requests++;
} else if (type == UPDATE_RECOVERY && transno != 0) {
struct distribute_txn_replay_req *dtrq;
+ int rc;
spin_unlock(&obd->obd_recovery_task_lock);
LASSERT(tdtd != NULL);
dtrq = distribute_txn_get_next_req(tdtd);
lu_context_enter(&thread->t_env->le_ctx);
- tdtd->tdtd_replay_handler(env, tdtd, dtrq);
+ rc = tdtd->tdtd_replay_handler(env, tdtd, dtrq);
lu_context_exit(&thread->t_env->le_ctx);
extend_recovery_timer(obd, obd_timeout, true);
- LASSERT(tdtd->tdtd_last_update_transno <= transno);
- tdtd->tdtd_last_update_transno = transno;
- spin_lock(&obd->obd_recovery_task_lock);
- if (transno > obd->obd_next_recovery_transno)
- obd->obd_next_recovery_transno = transno;
- spin_unlock(&obd->obd_recovery_task_lock);
- target_update_lcd(env, lut, dtrq);
- dtrq_destroy(dtrq);
+
+ if (rc == 0 && dtrq->dtrq_xid != 0) {
+ CDEBUG(D_HA, "Move x"LPU64" t"LPU64
+ " to finish list\n", dtrq->dtrq_xid,
+ dtrq->dtrq_master_transno);
+
+ /* Add it to the replay finish list */
+ spin_lock(&tdtd->tdtd_replay_list_lock);
+ list_add(&dtrq->dtrq_list,
+ &tdtd->tdtd_replay_finish_list);
+ spin_unlock(&tdtd->tdtd_replay_list_lock);
+
+ spin_lock(&obd->obd_recovery_task_lock);
+ if (transno == obd->obd_next_recovery_transno)
+ obd->obd_next_recovery_transno++;
+ else if (transno >
+ obd->obd_next_recovery_transno)
+ obd->obd_next_recovery_transno =
+ transno + 1;
+ spin_unlock(&obd->obd_recovery_task_lock);
+ } else {
+ dtrq_destroy(dtrq);
+ }
} else {
spin_unlock(&obd->obd_recovery_task_lock);
+abort:
LASSERT(list_empty(&obd->obd_req_replay_queue));
LASSERT(atomic_read(&obd->obd_req_replay_clients) == 0);
/** evict exports failed VBR */
struct obd_device *obd = lut->lut_obd;
int rc = 0;
struct target_recovery_data *trd = &obd->obd_recovery_data;
+ int index;
memset(trd, 0, sizeof(*trd));
init_completion(&trd->trd_starting);
init_completion(&trd->trd_finishing);
trd->trd_recovery_handler = handler;
+ rc = server_name2index(obd->obd_name, &index, NULL);
+ if (rc < 0)
+ return rc;
+
if (!IS_ERR(kthread_run(target_recovery_thread,
- lut, "tgt_recov"))) {
+ lut, "tgt_recover_%d", index))) {
wait_for_completion(&trd->trd_starting);
LASSERT(obd->obd_recovering != 0);
} else {
CDEBUG(D_HA, "Next recovery transno: "LPU64
", current: "LPU64", replaying\n",
obd->obd_next_recovery_transno, transno);
+
+ /* If the request has been replayed by update replay, then sends this
+ * request to the recovery thread (replay_request_or_update()), where
+ * it will be handled */
spin_lock(&obd->obd_recovery_task_lock);
- if (transno < obd->obd_next_recovery_transno) {
+ if (transno < obd->obd_next_recovery_transno &&
+ !is_req_replayed_by_update(req)) {
/* Processing the queue right now, don't re-add. */
LASSERT(list_empty(&req->rq_list));
spin_unlock(&obd->obd_recovery_task_lock);
DEBUG_REQ(D_ERROR, req, "dropping reply");
return -ECOMM;
}
- if (unlikely(lustre_msg_get_opc(req->rq_reqmsg) == MDS_REINT &&
- OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_MULTI_NET_REP)))
+ /* We can have a null rq_reqmsg in the event of bad signature or
+ * no context when unwrapping */
+ if (req->rq_reqmsg &&
+ unlikely(lustre_msg_get_opc(req->rq_reqmsg) == MDS_REINT &&
+ OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_MULTI_NET_REP)))
return -ECOMM;
if (unlikely(rc)) {
EXIT;
}
-ldlm_mode_t lck_compat_array[] = {
+enum ldlm_mode lck_compat_array[] = {
[LCK_EX] = LCK_COMPAT_EX,
[LCK_PW] = LCK_COMPAT_PW,
[LCK_PR] = LCK_COMPAT_PR,
* Rather arbitrary mapping from LDLM error codes to errno values. This should
* not escape to the user level.
*/
-int ldlm_error2errno(ldlm_error_t error)
+int ldlm_error2errno(enum ldlm_error error)
{
- int result;
+ int result;
- switch (error) {
- case ELDLM_OK:
+ switch (error) {
+ case ELDLM_OK:
case ELDLM_LOCK_MATCHED:
result = 0;
break;
case ELDLM_BAD_NAMESPACE:
result = -EBADF;
break;
- default:
- if (((int)error) < 0) /* cast to signed type */
- result = error; /* as ldlm_error_t can be unsigned */
- else {
- CERROR("Invalid DLM result code: %d\n", error);
- result = -EPROTO;
- }
- }
- return result;
+ default:
+ if (((int)error) < 0) { /* cast to signed type */
+ result = error; /* as ldlm_error can be unsigned */
+ } else {
+ CERROR("Invalid DLM result code: %d\n", error);
+ result = -EPROTO;
+ }
+ }
+ return result;
}
EXPORT_SYMBOL(ldlm_error2errno);
/**
- * Dual to ldlm_error2errno(): maps errno values back to ldlm_error_t.
+ * Dual to ldlm_error2errno(): maps errno values back to enum ldlm_error.
*/
-ldlm_error_t ldlm_errno2error(int err_no)
+enum ldlm_error ldlm_errno2error(int err_no)
{
int error;