/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
* vim:expandtab:shiftwidth=8:tabstop=8:
*
- * Copyright (c) 2002, 2003 Cluster File Systems, Inc.
- * Author: Mike Shaver <shaver@clusterfs.com>
+ * GPL HEADER START
*
- * This file is part of the Lustre file system, http://www.lustre.org
- * Lustre is a trademark of Cluster File Systems, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
- * You may have signed or agreed to another license before downloading
- * this software. If so, you are bound by the terms and conditions
- * of that agreement, and the following does not apply to you. See the
- * LICENSE file included with this distribution for more information.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
*
- * If you did not agree to a different license, then this copy of Lustre
- * is open source software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
*
- * In either case, Lustre is distributed in the hope that it will be
- * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * license text for more details.
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ptlrpc/import.c
+ *
+ * Author: Mike Shaver <shaver@clusterfs.com>
*/
#define DEBUG_SUBSYSTEM S_RPC
int pcaa_initial_connect;
};
+static void __import_set_state(struct obd_import *imp,
+ enum lustre_imp_state state)
+{
+ imp->imp_state = state;
+ imp->imp_state_hist[imp->imp_state_hist_idx].ish_state = state;
+ imp->imp_state_hist[imp->imp_state_hist_idx].ish_time =
+ cfs_time_current_sec();
+ imp->imp_state_hist_idx = (imp->imp_state_hist_idx + 1) %
+ IMP_STATE_HIST_LEN;
+}
+
/* A CLOSED import should remain so. */
#define IMPORT_SET_STATE_NOLOCK(imp, state) \
do { \
imp, obd2cli_tgt(imp->imp_obd), \
ptlrpc_import_state_name(imp->imp_state), \
ptlrpc_import_state_name(state)); \
- imp->imp_state = state; \
+ __import_set_state(imp, state); \
} \
} while(0)
#define IMPORT_SET_STATE(imp, state) \
do { \
- spin_lock(&imp->imp_lock); \
+ cfs_spin_lock(&imp->imp_lock); \
IMPORT_SET_STATE_NOLOCK(imp, state); \
- spin_unlock(&imp->imp_lock); \
+ cfs_spin_unlock(&imp->imp_lock); \
} while(0)
-static int ptlrpc_connect_interpret(struct ptlrpc_request *request,
+static int ptlrpc_connect_interpret(const struct lu_env *env,
+ struct ptlrpc_request *request,
void * data, int rc);
int ptlrpc_import_recovery_state_machine(struct obd_import *imp);
* though. */
int ptlrpc_init_import(struct obd_import *imp)
{
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
imp->imp_generation++;
imp->imp_state = LUSTRE_IMP_NEW;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
return 0;
}
{
int rc = 0;
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
if (imp->imp_state == LUSTRE_IMP_FULL &&
(conn_cnt == 0 || conn_cnt == imp->imp_conn_cnt)) {
libcfs_nid2str(imp->imp_connection->c_peer.nid));
} else {
LCONSOLE_ERROR_MSG(0x166, "%s: Connection to service "
- "%.*s via nid %s was lost; in progress"
- "operations using this service will"
- "fail.\n",
- imp->imp_obd->obd_name,
- target_len, target_start,
- libcfs_nid2str(imp->imp_connection->c_peer.nid));
+ "%.*s via nid %s was lost; in progress "
+ "operations using this service will fail.\n",
+ imp->imp_obd->obd_name,
+ target_len, target_start,
+ libcfs_nid2str(imp->imp_connection->c_peer.nid));
}
ptlrpc_deactivate_timeouts(imp);
IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON);
- spin_unlock(&imp->imp_lock);
-
+ cfs_spin_unlock(&imp->imp_lock);
+
if (obd_dump_on_timeout)
libcfs_debug_dumplog();
obd_import_event(imp->imp_obd, imp, IMP_EVENT_DISCON);
rc = 1;
} else {
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
CDEBUG(D_HA, "%s: import %p already %s (conn %u, was %u): %s\n",
imp->imp_client->cli_name, imp,
(imp->imp_state == LUSTRE_IMP_FULL &&
CDEBUG(D_HA, "setting import %s INVALID\n", obd2cli_tgt(imp->imp_obd));
imp->imp_invalid = 1;
imp->imp_generation++;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
ptlrpc_abort_inflight(imp);
obd_import_event(imp->imp_obd, imp, IMP_EVENT_INACTIVE);
*/
void ptlrpc_deactivate_import(struct obd_import *imp)
{
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
ptlrpc_deactivate_and_unlock_import(imp);
}
+static unsigned int
+ptlrpc_inflight_deadline(struct ptlrpc_request *req, time_t now)
+{
+ long dl;
+
+ if (!(((req->rq_phase == RQ_PHASE_RPC) && !req->rq_waiting) ||
+ (req->rq_phase == RQ_PHASE_BULK) ||
+ (req->rq_phase == RQ_PHASE_NEW)))
+ return 0;
+
+ if (req->rq_timedout)
+ return 0;
+
+ if (req->rq_phase == RQ_PHASE_NEW)
+ dl = req->rq_sent;
+ else
+ dl = req->rq_deadline;
+
+ if (dl <= now)
+ return 0;
+
+ return dl - now;
+}
+
+static unsigned int ptlrpc_inflight_timeout(struct obd_import *imp)
+{
+ time_t now = cfs_time_current_sec();
+ cfs_list_t *tmp, *n;
+ struct ptlrpc_request *req;
+ unsigned int timeout = 0;
+
+ cfs_spin_lock(&imp->imp_lock);
+ cfs_list_for_each_safe(tmp, n, &imp->imp_sending_list) {
+ req = cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
+ timeout = max(ptlrpc_inflight_deadline(req, now), timeout);
+ }
+ cfs_spin_unlock(&imp->imp_lock);
+ return timeout;
+}
+
/*
* This function will invalidate the import, if necessary, then block
* for all the RPC completions, and finally notify the obd to
*/
void ptlrpc_invalidate_import(struct obd_import *imp)
{
- struct list_head *tmp, *n;
+ cfs_list_t *tmp, *n;
struct ptlrpc_request *req;
struct l_wait_info lwi;
+ unsigned int timeout;
int rc;
- atomic_inc(&imp->imp_inval_count);
+ cfs_atomic_inc(&imp->imp_inval_count);
/*
* If this is an invalid MGC connection, then don't bother
* waiting for imp_inflight to drop to 0.
*/
- if (imp->imp_invalid && imp->imp_recon_bk && !imp->imp_obd->obd_no_recov)
+ if (imp->imp_invalid && imp->imp_recon_bk &&!imp->imp_obd->obd_no_recov)
goto out;
if (!imp->imp_invalid || imp->imp_obd->obd_no_recov)
LASSERT(imp->imp_invalid);
- /* wait for all requests to error out and call completion callbacks.
- Cap it at obd_timeout -- these should all have been locally
- cancelled by ptlrpc_abort_inflight. */
- lwi = LWI_TIMEOUT_INTERVAL(
- cfs_timeout_cap(cfs_time_seconds(obd_timeout)),
- cfs_time_seconds(1), NULL, NULL);
- rc = l_wait_event(imp->imp_recovery_waitq,
- (atomic_read(&imp->imp_inflight) == 0), &lwi);
-
- if (rc) {
- CERROR("%s: rc = %d waiting for callback (%d != 0)\n",
- obd2cli_tgt(imp->imp_obd), rc,
- atomic_read(&imp->imp_inflight));
- spin_lock(&imp->imp_lock);
- list_for_each_safe(tmp, n, &imp->imp_sending_list) {
- req = list_entry(tmp, struct ptlrpc_request, rq_list);
- DEBUG_REQ(D_ERROR, req, "still on sending list");
- }
- list_for_each_safe(tmp, n, &imp->imp_delayed_list) {
- req = list_entry(tmp, struct ptlrpc_request, rq_list);
- DEBUG_REQ(D_ERROR, req, "still on delayed list");
+ /* Wait forever until inflight == 0. We really can't do it another
+ * way because in some cases we need to wait for very long reply
+ * unlink. We can't do anything before that because there is really
+ * no guarantee that some rdma transfer is not in progress right now. */
+ do {
+ /* Calculate max timeout for waiting on rpcs to error
+ * out. Use obd_timeout if calculated value is smaller
+ * than it. */
+ if (!OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) {
+ timeout = ptlrpc_inflight_timeout(imp);
+ timeout += timeout / 3;
+
+ if (timeout == 0)
+ timeout = obd_timeout;
+ } else {
+ /* decrease the interval to increase race condition */
+ timeout = 1;
}
- spin_unlock(&imp->imp_lock);
- LASSERT(atomic_read(&imp->imp_inflight) == 0);
- }
+ CDEBUG(D_RPCTRACE,"Sleeping %d sec for inflight to error out\n",
+ timeout);
+
+ /* Wait for all requests to error out and call completion
+ * callbacks. Cap it at obd_timeout -- these should all
+ * have been locally cancelled by ptlrpc_abort_inflight. */
+ lwi = LWI_TIMEOUT_INTERVAL(
+ cfs_timeout_cap(cfs_time_seconds(timeout)),
+ (timeout > 1)?cfs_time_seconds(1):cfs_time_seconds(1)/2,
+ NULL, NULL);
+ rc = l_wait_event(imp->imp_recovery_waitq,
+ (cfs_atomic_read(&imp->imp_inflight) == 0),
+ &lwi);
+ if (rc) {
+ const char *cli_tgt = obd2cli_tgt(imp->imp_obd);
+
+ CERROR("%s: rc = %d waiting for callback (%d != 0)\n",
+ cli_tgt, rc,
+ cfs_atomic_read(&imp->imp_inflight));
+
+ cfs_spin_lock(&imp->imp_lock);
+ if (cfs_atomic_read(&imp->imp_inflight) == 0) {
+ int count = cfs_atomic_read(&imp->imp_unregistering);
+
+ /* We know that "unregistering" rpcs only can
+ * survive in sending or delaying lists (they
+ * maybe waiting for long reply unlink in
+ * sluggish nets). Let's check this. If there
+ * is no inflight and unregistering != 0, this
+ * is bug. */
+ LASSERTF(count == 0, "Some RPCs are still "
+ "unregistering: %d\n", count);
+
+ /* Let's save one loop as soon as inflight have
+ * dropped to zero. No new inflights possible at
+ * this point. */
+ rc = 0;
+ } else {
+ cfs_list_for_each_safe(tmp, n,
+ &imp->imp_sending_list) {
+ req = cfs_list_entry(tmp,
+ struct ptlrpc_request,
+ rq_list);
+ DEBUG_REQ(D_ERROR, req,
+ "still on sending list");
+ }
+ cfs_list_for_each_safe(tmp, n,
+ &imp->imp_delayed_list) {
+ req = cfs_list_entry(tmp,
+ struct ptlrpc_request,
+ rq_list);
+ DEBUG_REQ(D_ERROR, req,
+ "still on delayed list");
+ }
+
+ CERROR("%s: RPCs in \"%s\" phase found (%d). "
+ "Network is sluggish? Waiting them "
+ "to error out.\n", cli_tgt,
+ ptlrpc_phase2str(RQ_PHASE_UNREGISTERING),
+ cfs_atomic_read(&imp->
+ imp_unregistering));
+ }
+ cfs_spin_unlock(&imp->imp_lock);
+ }
+ } while (rc != 0);
+
+ /*
+ * Let's additionally check that no new rpcs added to import in
+ * "invalidate" state.
+ */
+ LASSERT(cfs_atomic_read(&imp->imp_inflight) == 0);
out:
obd_import_event(imp->imp_obd, imp, IMP_EVENT_INVALIDATE);
sptlrpc_import_flush_all_ctx(imp);
- atomic_dec(&imp->imp_inval_count);
- cfs_waitq_signal(&imp->imp_recovery_waitq);
+ cfs_atomic_dec(&imp->imp_inval_count);
+ cfs_waitq_broadcast(&imp->imp_recovery_waitq);
}
/* unset imp_invalid */
{
struct obd_device *obd = imp->imp_obd;
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
imp->imp_invalid = 0;
ptlrpc_activate_timeouts(imp);
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
obd_import_event(obd, imp, IMP_EVENT_ACTIVE);
}
CDEBUG(D_HA, "%s: waking up pinger\n",
obd2cli_tgt(imp->imp_obd));
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
imp->imp_force_verify = 1;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
ptlrpc_pinger_wake_up();
}
int ptlrpc_reconnect_import(struct obd_import *imp)
{
-
- ptlrpc_set_import_discon(imp, 0);
+ ptlrpc_set_import_discon(imp, 0);
/* Force a new connect attempt */
ptlrpc_invalidate_import(imp);
/* Do a fresh connect next time by zeroing the handle */
ptlrpc_disconnect_import(imp, 1);
/* Wait for all invalidate calls to finish */
- if (atomic_read(&imp->imp_inval_count) > 0) {
+ if (cfs_atomic_read(&imp->imp_inval_count) > 0) {
int rc;
struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
rc = l_wait_event(imp->imp_recovery_waitq,
- (atomic_read(&imp->imp_inval_count) == 0),
+ (cfs_atomic_read(&imp->imp_inval_count) == 0),
&lwi);
if (rc)
- CERROR("Interrupted, inval=%d\n",
- atomic_read(&imp->imp_inval_count));
+ CERROR("Interrupted, inval=%d\n",
+ cfs_atomic_read(&imp->imp_inval_count));
}
/* Allow reconnect attempts */
int tried_all = 1;
ENTRY;
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
- if (list_empty(&imp->imp_conn_list)) {
+ if (cfs_list_empty(&imp->imp_conn_list)) {
CERROR("%s: no connections available\n",
imp->imp_obd->obd_name);
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
RETURN(-EINVAL);
}
- list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
+ cfs_list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
CDEBUG(D_HA, "%s: connect to NID %s last attempt "LPU64"\n",
imp->imp_obd->obd_name,
libcfs_nid2str(conn->oic_conn->c_peer.nid),
continue;
}
- /* If we have not tried this connection since the
+ /* If we have not tried this connection since
the last successful attempt, go with this one */
if ((conn->oic_last_attempt == 0) ||
cfs_time_beforeq_64(conn->oic_last_attempt,
}
/* if not found, simply choose the current one */
- if (!imp_conn) {
+ if (!imp_conn || imp->imp_force_reconnect) {
LASSERT(imp->imp_conn_current);
imp_conn = imp->imp_conn_current;
tried_all = 0;
!imp->imp_recon_bk /* not retrying */) {
if (at_get(&imp->imp_at.iat_net_latency) <
CONNECTION_SWITCH_MAX) {
- at_add(&imp->imp_at.iat_net_latency,
- at_get(&imp->imp_at.iat_net_latency) +
- CONNECTION_SWITCH_INC);
+ at_measured(&imp->imp_at.iat_net_latency,
+ at_get(&imp->imp_at.iat_net_latency) +
+ CONNECTION_SWITCH_INC);
}
LASSERT(imp_conn->oic_last_attempt);
CWARN("%s: tried all connections, increasing latency to %ds\n",
/* switch connection, don't mind if it's same as the current one */
if (imp->imp_connection)
- ptlrpc_put_connection(imp->imp_connection);
+ ptlrpc_connection_put(imp->imp_connection);
imp->imp_connection = ptlrpc_connection_addref(imp_conn->oic_conn);
dlmexp = class_conn2export(&imp->imp_dlm_handle);
LASSERT(dlmexp != NULL);
if (dlmexp->exp_connection)
- ptlrpc_put_connection(dlmexp->exp_connection);
+ ptlrpc_connection_put(dlmexp->exp_connection);
dlmexp->exp_connection = ptlrpc_connection_addref(imp_conn->oic_conn);
class_export_put(dlmexp);
if (imp->imp_conn_current != imp_conn) {
if (imp->imp_conn_current)
- LCONSOLE_INFO("Changing connection for %s to %s/%s\n",
- imp->imp_obd->obd_name,
- imp_conn->oic_uuid.uuid,
- libcfs_nid2str(imp_conn->oic_conn->c_peer.nid));
+ CDEBUG(D_HA, "Changing connection for %s to %s/%s\n",
+ imp->imp_obd->obd_name, imp_conn->oic_uuid.uuid,
+ libcfs_nid2str(imp_conn->oic_conn->c_peer.nid));
imp->imp_conn_current = imp_conn;
}
imp->imp_obd->obd_name, imp, imp_conn->oic_uuid.uuid,
libcfs_nid2str(imp_conn->oic_conn->c_peer.nid));
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
RETURN(0);
}
/*
* must be called under imp_lock
*/
-int ptlrpc_first_transno(struct obd_import *imp, __u64 *transno)
+static int ptlrpc_first_transno(struct obd_import *imp, __u64 *transno)
{
struct ptlrpc_request *req;
- struct list_head *tmp;
+ cfs_list_t *tmp;
- if (list_empty(&imp->imp_replay_list))
+ if (cfs_list_empty(&imp->imp_replay_list))
return 0;
tmp = imp->imp_replay_list.next;
- req = list_entry(tmp, struct ptlrpc_request, rq_replay_list);
+ req = cfs_list_entry(tmp, struct ptlrpc_request, rq_replay_list);
*transno = req->rq_transno;
if (req->rq_transno == 0) {
DEBUG_REQ(D_ERROR, req, "zero transno in replay");
int rc;
ENTRY;
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
if (imp->imp_state == LUSTRE_IMP_CLOSED) {
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
CERROR("can't connect to a closed import\n");
RETURN(-EINVAL);
} else if (imp->imp_state == LUSTRE_IMP_FULL) {
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
CERROR("already connected\n");
RETURN(0);
} else if (imp->imp_state == LUSTRE_IMP_CONNECTING) {
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
CERROR("already connecting\n");
RETURN(-EALREADY);
}
else
committed_before_reconnect = imp->imp_peer_committed_transno;
- set_transno = ptlrpc_first_transno(imp, &imp->imp_connect_data.ocd_transno);
- spin_unlock(&imp->imp_lock);
+ set_transno = ptlrpc_first_transno(imp,
+ &imp->imp_connect_data.ocd_transno);
+ cfs_spin_unlock(&imp->imp_lock);
if (new_uuid) {
struct obd_uuid uuid;
if (imp->imp_recon_bk) {
CDEBUG(D_HA, "Last reconnection attempt (%d) for %s\n",
imp->imp_conn_cnt, obd2cli_tgt(imp->imp_obd));
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
imp->imp_last_recon = 1;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
}
}
imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT;
rc = obd_reconnect(NULL, imp->imp_obd->obd_self_export, obd,
- &obd->obd_uuid, &imp->imp_connect_data);
+ &obd->obd_uuid, &imp->imp_connect_data, NULL);
if (rc)
GOTO(out, rc);
GOTO(out, rc);
}
+ /* Report the rpc service time to the server so that it knows how long
+ * to wait for clients to join recovery */
+ lustre_msg_set_service_time(request->rq_reqmsg,
+ at_timeout2est(request->rq_timeout));
+
+ /* The amount of time we give the server to process the connect req.
+ * import_select_connection will increase the net latency on
+ * repeated reconnect attempts to cover slow networks.
+ * We override/ignore the server rpc completion estimate here,
+ * which may be large if this is a reconnect attempt */
+ request->rq_timeout = INITIAL_CONNECT_TIMEOUT;
+ lustre_msg_set_timeout(request->rq_reqmsg, request->rq_timeout);
+
#ifndef __KERNEL__
lustre_msg_add_op_flags(request->rq_reqmsg, MSG_CONNECT_LIBCLIENT);
#endif
lustre_msg_add_op_flags(request->rq_reqmsg, MSG_CONNECT_NEXT_VER);
+ request->rq_no_resend = request->rq_no_delay = 1;
request->rq_send_state = LUSTRE_IMP_CONNECTING;
/* Allow a slightly larger reply for future growth compatibility */
req_capsule_set_size(&request->rq_pill, &RMF_CONNECT_DATA, RCL_SERVER,
aa->pcaa_initial_connect = initial_connect;
if (aa->pcaa_initial_connect) {
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
imp->imp_replayable = 1;
- spin_unlock(&imp->imp_lock);
- lustre_msg_add_op_flags(request->rq_reqmsg,
+ cfs_spin_unlock(&imp->imp_lock);
+ lustre_msg_add_op_flags(request->rq_reqmsg,
MSG_CONNECT_INITIAL);
- if (AT_OFF)
- /* AT will use INITIAL_CONNECT_TIMEOUT the first
- time, adaptive after that. */
- request->rq_timeout = INITIAL_CONNECT_TIMEOUT;
}
if (set_transno)
- lustre_msg_add_op_flags(request->rq_reqmsg,
+ lustre_msg_add_op_flags(request->rq_reqmsg,
MSG_CONNECT_TRANSNO);
- DEBUG_REQ(D_RPCTRACE, request, "(re)connect request");
- ptlrpcd_add_req(request);
+ DEBUG_REQ(D_RPCTRACE, request, "(re)connect request (timeout %d)",
+ request->rq_timeout);
+ ptlrpcd_add_req(request, PSCOPE_OTHER);
rc = 0;
out:
if (rc != 0) {
ENTRY;
- spin_lock(&imp->imp_lock);
- if (list_empty(&imp->imp_conn_list))
+ cfs_spin_lock(&imp->imp_lock);
+ if (cfs_list_empty(&imp->imp_conn_list))
GOTO(unlock, 0);
#ifdef __KERNEL__
- imp_conn = list_entry(imp->imp_conn_list.prev,
- struct obd_import_conn,
- oic_item);
+ imp_conn = cfs_list_entry(imp->imp_conn_list.prev,
+ struct obd_import_conn,
+ oic_item);
/* XXX: When the failover node is the primary node, it is possible
- * to have two identical connections in imp_conn_list. We must
+ * to have two identical connections in imp_conn_list. We must
* compare not conn's pointers but NIDs, otherwise we can defeat
* connection throttling. (See bug 14774.) */
- if (imp->imp_conn_current->oic_conn->c_self !=
- imp_conn->oic_conn->c_self) {
+ if (imp->imp_conn_current->oic_conn->c_peer.nid !=
+ imp_conn->oic_conn->c_peer.nid) {
ptlrpc_ping_import_soon(imp);
wake_pinger = 1;
}
#else
- /* liblustre has no pinger thead, so we wakup pinger anyway */
+ /* liblustre has no pinger thread, so we wakeup pinger anyway */
wake_pinger = 1;
-#endif
+#endif
unlock:
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
if (wake_pinger)
ptlrpc_pinger_wake_up();
EXIT;
}
-static int ptlrpc_connect_interpret(struct ptlrpc_request *request,
- void * data, int rc)
+static int ptlrpc_busy_reconnect(int rc)
+{
+ return (rc == -EBUSY) || (rc == -EAGAIN);
+}
+
+
+static int ptlrpc_connect_interpret(const struct lu_env *env,
+ struct ptlrpc_request *request,
+ void *data, int rc)
{
struct ptlrpc_connect_async_args *aa = data;
struct obd_import *imp = request->rq_import;
struct client_obd *cli = &imp->imp_obd->u.cli;
struct lustre_handle old_hdl;
+ __u64 old_connect_flags;
int msg_flags;
ENTRY;
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
if (imp->imp_state == LUSTRE_IMP_CLOSED) {
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
RETURN(0);
}
- spin_unlock(&imp->imp_lock);
- if (rc)
+ if (rc) {
+ /* if this reconnect to busy export - not need select new target
+ * for connecting*/
+ imp->imp_force_reconnect = ptlrpc_busy_reconnect(rc);
+ cfs_spin_unlock(&imp->imp_lock);
GOTO(out, rc);
+ }
LASSERT(imp->imp_conn_current);
msg_flags = lustre_msg_get_op_flags(request->rq_repmsg);
/* All imports are pingable */
- spin_lock(&imp->imp_lock);
imp->imp_pingable = 1;
+ imp->imp_force_reconnect = 0;
if (aa->pcaa_initial_connect) {
if (msg_flags & MSG_CONNECT_REPLAYABLE) {
imp->imp_replayable = 1;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
CDEBUG(D_HA, "connected to replayable target: %s\n",
obd2cli_tgt(imp->imp_obd));
} else {
imp->imp_replayable = 0;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
}
/* if applies, adjust the imp->imp_msg_magic here
imp->imp_remote_handle =
*lustre_msg_get_handle(request->rq_repmsg);
- IMPORT_SET_STATE(imp, LUSTRE_IMP_FULL);
- spin_lock(&imp->imp_lock);
- if (imp->imp_invalid) {
- spin_unlock(&imp->imp_lock);
- ptlrpc_activate_import(imp);
+ /* Initial connects are allowed for clients with non-random
+ * uuids when servers are in recovery. Simply signal the
+ * servers replay is complete and wait in REPLAY_WAIT. */
+ if (msg_flags & MSG_CONNECT_RECOVERING) {
+ CDEBUG(D_HA, "connect to %s during recovery\n",
+ obd2cli_tgt(imp->imp_obd));
+ IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_LOCKS);
} else {
- spin_unlock(&imp->imp_lock);
+ IMPORT_SET_STATE(imp, LUSTRE_IMP_FULL);
+ ptlrpc_activate_import(imp);
}
GOTO(finish, rc = 0);
} else {
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
}
/* Determine what recovery state to move the import to. */
if (memcmp(&imp->imp_remote_handle,
lustre_msg_get_handle(request->rq_repmsg),
sizeof(imp->imp_remote_handle))) {
+ int level = msg_flags & MSG_CONNECT_RECOVERING ?
+ D_HA : D_WARNING;
+
+ /* Bug 16611/14775: if server handle have changed,
+ * that means some sort of disconnection happened.
+ * If the server is not in recovery, that also means it
+ * already erased all of our state because of previous
+ * eviction. If it is in recovery - we are safe to
+ * participate since we can reestablish all of our state
+ * with server again */
+ CDEBUG(level,"%s@%s changed server handle from "
+ LPX64" to "LPX64"%s\n",
+ obd2cli_tgt(imp->imp_obd),
+ imp->imp_connection->c_remote_uuid.uuid,
+ imp->imp_remote_handle.cookie,
+ lustre_msg_get_handle(request->rq_repmsg)->
+ cookie,
+ (MSG_CONNECT_RECOVERING & msg_flags) ?
+ " but is still in recovery" : "");
- CWARN("%s@%s changed server handle from "
- LPX64" to "LPX64" - evicting.\n",
- obd2cli_tgt(imp->imp_obd),
- imp->imp_connection->c_remote_uuid.uuid,
- imp->imp_remote_handle.cookie,
- lustre_msg_get_handle(request->rq_repmsg)->
- cookie);
imp->imp_remote_handle =
*lustre_msg_get_handle(request->rq_repmsg);
- IMPORT_SET_STATE(imp, LUSTRE_IMP_EVICTED);
- GOTO(finish, rc = 0);
+ if (!(MSG_CONNECT_RECOVERING & msg_flags)) {
+ IMPORT_SET_STATE(imp, LUSTRE_IMP_EVICTED);
+ GOTO(finish, rc = 0);
+ }
+
} else {
CDEBUG(D_HA, "reconnected to %s@%s after partition\n",
obd2cli_tgt(imp->imp_obd),
imp->imp_obd->obd_name,
obd2cli_tgt(imp->imp_obd));
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
imp->imp_resend_replay = 1;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY);
} else {
aa->pcaa_peer_committed) {
CERROR("%s went back in time (transno "LPD64
" was previously committed, server now claims "LPD64
- ")! See https://bugzilla.clusterfs.com/"
- "long_list.cgi?buglist=9646\n",
+ ")! See https://bugzilla.lustre.org/show_bug.cgi?"
+ "id=9646\n",
obd2cli_tgt(imp->imp_obd), aa->pcaa_peer_committed,
lustre_msg_get_last_committed(request->rq_repmsg));
}
ocd = req_capsule_server_sized_get(&request->rq_pill,
&RMF_CONNECT_DATA, ret);
- spin_lock(&imp->imp_lock);
- list_del(&imp->imp_conn_current->oic_item);
- list_add(&imp->imp_conn_current->oic_item, &imp->imp_conn_list);
+ cfs_spin_lock(&imp->imp_lock);
+ cfs_list_del(&imp->imp_conn_current->oic_item);
+ cfs_list_add(&imp->imp_conn_current->oic_item,
+ &imp->imp_conn_list);
imp->imp_last_success_conn =
imp->imp_conn_current->oic_last_attempt;
if (ocd == NULL) {
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
CERROR("Wrong connect data from server\n");
rc = -EPROTO;
GOTO(out, rc);
imp->imp_connect_data = *ocd;
exp = class_conn2export(&imp->imp_dlm_handle);
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
/* check that server granted subset of flags we asked for. */
LASSERTF((ocd->ocd_connect_flags &
imp->imp_connect_flags_orig, ocd->ocd_connect_flags);
if (!exp) {
- /* This could happen if export is cleaned during the
+ /* This could happen if export is cleaned during the
connect attempt */
- CERROR("Missing export for %s\n",
+ CERROR("Missing export for %s\n",
imp->imp_obd->obd_name);
GOTO(out, rc = -ENODEV);
}
+ old_connect_flags = exp->exp_connect_flags;
exp->exp_connect_flags = ocd->ocd_connect_flags;
- imp->imp_obd->obd_self_export->exp_connect_flags =
+ imp->imp_obd->obd_self_export->exp_connect_flags =
ocd->ocd_connect_flags;
class_export_put(exp);
}
if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE) {
- cli->cl_max_pages_per_rpc =
+ cli->cl_max_pages_per_rpc =
ocd->ocd_brw_size >> CFS_PAGE_SHIFT;
}
- imp->imp_obd->obd_namespace->ns_connect_flags =
- ocd->ocd_connect_flags;
- imp->imp_obd->obd_namespace->ns_orig_connect_flags =
- ocd->ocd_connect_flags;
+ /* Reset ns_connect_flags only for initial connect. It might be
+ * changed in while using FS and if we reset it in reconnect
+ * this leads to losing user settings done before such as
+ * disable lru_resize, etc. */
+ if (old_connect_flags != exp->exp_connect_flags ||
+ aa->pcaa_initial_connect) {
+ CDEBUG(D_HA, "%s: Resetting ns_connect_flags to server "
+ "flags: "LPX64"\n", imp->imp_obd->obd_name,
+ ocd->ocd_connect_flags);
+ imp->imp_obd->obd_namespace->ns_connect_flags =
+ ocd->ocd_connect_flags;
+ imp->imp_obd->obd_namespace->ns_orig_connect_flags =
+ ocd->ocd_connect_flags;
+ }
if ((ocd->ocd_connect_flags & OBD_CONNECT_AT) &&
(imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V2))
out:
if (rc != 0) {
IMPORT_SET_STATE(imp, LUSTRE_IMP_DISCON);
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
if (aa->pcaa_initial_connect && !imp->imp_initial_recov &&
(request->rq_import_generation == imp->imp_generation))
ptlrpc_deactivate_and_unlock_import(imp);
else
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
if ((imp->imp_recon_bk && imp->imp_last_recon) ||
(rc == -EACCES)) {
obd2cli_tgt(imp->imp_obd),
(char *)imp->imp_connection->c_remote_uuid.uuid, rc);
}
-
- spin_lock(&imp->imp_lock);
+
+ cfs_spin_lock(&imp->imp_lock);
imp->imp_last_recon = 0;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
- cfs_waitq_signal(&imp->imp_recovery_waitq);
+ cfs_waitq_broadcast(&imp->imp_recovery_waitq);
RETURN(rc);
}
-static int completed_replay_interpret(struct ptlrpc_request *req,
- void * data, int rc)
+static int completed_replay_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req,
+ void * data, int rc)
{
ENTRY;
- atomic_dec(&req->rq_import->imp_replay_inflight);
- if (req->rq_status == 0) {
+ cfs_atomic_dec(&req->rq_import->imp_replay_inflight);
+ if (req->rq_status == 0 &&
+ !req->rq_import->imp_vbr_failed) {
ptlrpc_import_recovery_state_machine(req->rq_import);
} else {
- CDEBUG(D_HA, "%s: LAST_REPLAY message error: %d, "
- "reconnecting\n",
- req->rq_import->imp_obd->obd_name, req->rq_status);
+ if (req->rq_import->imp_vbr_failed) {
+ CDEBUG(D_WARNING,
+ "%s: version recovery fails, reconnecting\n",
+ req->rq_import->imp_obd->obd_name);
+ } else {
+ CDEBUG(D_HA, "%s: LAST_REPLAY message error: %d, "
+ "reconnecting\n",
+ req->rq_import->imp_obd->obd_name,
+ req->rq_status);
+ }
ptlrpc_connect_import(req->rq_import, NULL);
}
struct ptlrpc_request *req;
ENTRY;
- LASSERT(atomic_read(&imp->imp_replay_inflight) == 0);
- atomic_inc(&imp->imp_replay_inflight);
+ LASSERT(cfs_atomic_read(&imp->imp_replay_inflight) == 0);
+ cfs_atomic_inc(&imp->imp_replay_inflight);
req = ptlrpc_request_alloc_pack(imp, &RQF_OBD_PING, LUSTRE_OBD_VERSION,
OBD_PING);
if (req == NULL) {
- atomic_dec(&imp->imp_replay_inflight);
+ cfs_atomic_dec(&imp->imp_replay_inflight);
RETURN(-ENOMEM);
}
ptlrpc_request_set_replen(req);
req->rq_send_state = LUSTRE_IMP_REPLAY_WAIT;
- lustre_msg_add_flags(req->rq_reqmsg,
+ lustre_msg_add_flags(req->rq_reqmsg,
MSG_LOCK_REPLAY_DONE | MSG_REQ_REPLAY_DONE);
- req->rq_timeout *= 3;
+ if (AT_OFF)
+ req->rq_timeout *= 3;
req->rq_interpret_reply = completed_replay_interpret;
- ptlrpcd_add_req(req);
+ ptlrpcd_add_req(req, PSCOPE_OTHER);
RETURN(0);
}
ENTRY;
- ptlrpc_daemonize("ll_imp_inval");
-
+ cfs_daemonize_ctxt("ll_imp_inval");
+
CDEBUG(D_HA, "thread invalidate import %s to %s@%s\n",
imp->imp_obd->obd_name, obd2cli_tgt(imp->imp_obd),
imp->imp_connection->c_remote_uuid.uuid);
IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER);
ptlrpc_import_recovery_state_machine(imp);
+ class_import_put(imp);
RETURN(0);
}
#endif
CDEBUG(D_HA, "evicted from %s@%s; invalidating\n",
obd2cli_tgt(imp->imp_obd),
imp->imp_connection->c_remote_uuid.uuid);
+ /* reset vbr_failed flag upon eviction */
+ cfs_spin_lock(&imp->imp_lock);
+ imp->imp_vbr_failed = 0;
+ cfs_spin_unlock(&imp->imp_lock);
#ifdef __KERNEL__
+ /* bug 17802: XXX client_disconnect_export vs connect request
+ * race. if client will evicted at this time, we start
+ * invalidate thread without reference to import and import can
+ * be freed at same time. */
+ class_import_get(imp);
rc = cfs_kernel_thread(ptlrpc_invalidate_import_thread, imp,
CLONE_VM | CLONE_FILES);
- if (rc < 0)
+ if (rc < 0) {
+ class_import_put(imp);
CERROR("error starting invalidate thread: %d\n", rc);
- else
+ } else {
rc = 0;
+ }
RETURN(rc);
#else
ptlrpc_invalidate_import(imp);
obd2cli_tgt(imp->imp_obd));
rc = ptlrpc_replay_next(imp, &inflight);
if (inflight == 0 &&
- atomic_read(&imp->imp_replay_inflight) == 0) {
+ cfs_atomic_read(&imp->imp_replay_inflight) == 0) {
IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_LOCKS);
rc = ldlm_replay_locks(imp);
if (rc)
}
if (imp->imp_state == LUSTRE_IMP_REPLAY_LOCKS) {
- if (atomic_read(&imp->imp_replay_inflight) == 0) {
+ if (cfs_atomic_read(&imp->imp_replay_inflight) == 0) {
IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_WAIT);
rc = signal_completed_replay(imp);
if (rc)
}
if (imp->imp_state == LUSTRE_IMP_REPLAY_WAIT) {
- if (atomic_read(&imp->imp_replay_inflight) == 0) {
+ if (cfs_atomic_read(&imp->imp_replay_inflight) == 0) {
IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER);
}
}
}
if (imp->imp_state == LUSTRE_IMP_FULL) {
- cfs_waitq_signal(&imp->imp_recovery_waitq);
+ cfs_waitq_broadcast(&imp->imp_recovery_waitq);
ptlrpc_wake_delayed(imp);
}
timeout = cfs_time_seconds(
at_get(&imp->imp_at.iat_service_estimate[idx]));
}
-
- lwi = LWI_TIMEOUT_INTR(cfs_timeout_cap(timeout),
+
+ lwi = LWI_TIMEOUT_INTR(cfs_timeout_cap(timeout),
back_to_sleep, LWI_ON_SIGNAL_NOOP, NULL);
rc = l_wait_event(imp->imp_recovery_waitq,
!ptlrpc_import_in_recovery(imp), &lwi);
}
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
if (imp->imp_state != LUSTRE_IMP_FULL)
GOTO(out, 0);
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_DISCONNECT,
LUSTRE_OBD_VERSION, rq_opc);
}
set_state:
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
out:
- if (noclose)
+ if (noclose)
IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON);
else
IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CLOSED);
memset(&imp->imp_remote_handle, 0, sizeof(imp->imp_remote_handle));
- imp->imp_conn_cnt = 0;
/* Try all connections in the future - bz 12758 */
imp->imp_last_recon = 0;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
RETURN(rc);
}
+void ptlrpc_cleanup_imp(struct obd_import *imp)
+{
+ ENTRY;
+
+ cfs_spin_lock(&imp->imp_lock);
+ IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CLOSED);
+ imp->imp_generation++;
+ cfs_spin_unlock(&imp->imp_lock);
+ ptlrpc_abort_inflight(imp);
+
+ EXIT;
+}
/* Adaptive Timeout utils */
extern unsigned int at_min, at_max, at_history;
This gives us a max of the last binlimit*AT_BINS secs without the storage,
but still smoothing out a return to normalcy from a slow response.
(E.g. remember the maximum latency in each minute of the last 4 minutes.) */
-int at_add(struct adaptive_timeout *at, unsigned int val)
+int at_measured(struct adaptive_timeout *at, unsigned int val)
{
unsigned int old = at->at_current;
time_t now = cfs_time_current_sec();
time_t binlimit = max_t(time_t, at_history / AT_BINS, 1);
LASSERT(at);
-#if 0
- CDEBUG(D_INFO, "add %u to %p time=%lu v=%u (%u %u %u %u)\n",
+ CDEBUG(D_OTHER, "add %u to %p time=%lu v=%u (%u %u %u %u)\n",
val, at, now - at->at_binstart, at->at_current,
at->at_hist[0], at->at_hist[1], at->at_hist[2], at->at_hist[3]);
-#endif
+
if (val == 0)
/* 0's don't count, because we never want our timeout to
drop to 0, and because 0 could mean an error */
return 0;
- spin_lock(&at->at_lock);
+ cfs_spin_lock(&at->at_lock);
if (unlikely(at->at_binstart == 0)) {
/* Special case to remove default from history */
at->at_current = min(at->at_current, at_max);
at->at_current = max(at->at_current, at_min);
-#if 0
if (at->at_current != old)
- CDEBUG(D_ADAPTTO, "AT %p change: old=%u new=%u delta=%d "
+ CDEBUG(D_OTHER, "AT %p change: old=%u new=%u delta=%d "
"(val=%u) hist %u %u %u %u\n", at,
old, at->at_current, at->at_current - old, val,
at->at_hist[0], at->at_hist[1], at->at_hist[2],
at->at_hist[3]);
-#endif
/* if we changed, report the old value */
old = (at->at_current != old) ? old : 0;
- spin_unlock(&at->at_lock);
+ cfs_spin_unlock(&at->at_lock);
return old;
}
}
/* Not found in list, add it under a lock */
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
/* Check unused under lock */
for (; i < IMP_AT_MAX_PORTALS; i++) {
at->iat_portal[i] = portal;
out:
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
return i;
}
-