X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fptlrpc%2Fimport.c;h=8ca1dec6853694a26915390ff7161fc01648af42;hb=93d20d171c20491a96e5e85d7442a002f300619d;hp=43dda4028866f11bc2fbcee037ad32fa379a8884;hpb=98fc84a0f0ddcd49d249d375243969269f351c84;p=fs%2Flustre-release.git diff --git a/lustre/ptlrpc/import.c b/lustre/ptlrpc/import.c index 43dda40..8ca1dec 100644 --- a/lustre/ptlrpc/import.c +++ b/lustre/ptlrpc/import.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -27,7 +23,7 @@ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, Whamcloud, Inc. + * Copyright (c) 2011, 2017, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -39,10 +35,8 @@ */ #define DEBUG_SUBSYSTEM S_RPC -#ifndef __KERNEL__ -# include -#endif +#include #include #include #include @@ -66,10 +60,22 @@ struct ptlrpc_connect_async_args { static void __import_set_state(struct obd_import *imp, enum lustre_imp_state state) { + switch (state) { + case LUSTRE_IMP_CLOSED: + case LUSTRE_IMP_NEW: + case LUSTRE_IMP_DISCON: + case LUSTRE_IMP_CONNECTING: + break; + case LUSTRE_IMP_REPLAY_WAIT: + imp->imp_replay_state = LUSTRE_IMP_REPLAY_LOCKS; + break; + default: + imp->imp_replay_state = LUSTRE_IMP_REPLAY; + } imp->imp_state = state; imp->imp_state_hist[imp->imp_state_hist_idx].ish_state = state; imp->imp_state_hist[imp->imp_state_hist_idx].ish_time = - cfs_time_current_sec(); + ktime_get_real_seconds(); imp->imp_state_hist_idx = (imp->imp_state_hist_idx + 1) % IMP_STATE_HIST_LEN; } @@ -86,13 +92,19 @@ do { \ } \ } while(0) -#define IMPORT_SET_STATE(imp, state) \ -do { \ - cfs_spin_lock(&imp->imp_lock); \ - IMPORT_SET_STATE_NOLOCK(imp, state); \ - cfs_spin_unlock(&imp->imp_lock); \ +#define IMPORT_SET_STATE(imp, state) \ +do { \ + spin_lock(&imp->imp_lock); \ + IMPORT_SET_STATE_NOLOCK(imp, state); \ + spin_unlock(&imp->imp_lock); \ } while(0) +void ptlrpc_import_enter_resend(struct obd_import *imp) +{ + IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER); +} +EXPORT_SYMBOL(ptlrpc_import_enter_resend); + static int ptlrpc_connect_interpret(const struct lu_env *env, struct ptlrpc_request *request, @@ -106,14 +118,14 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp); * though. */ int ptlrpc_init_import(struct obd_import *imp) { - cfs_spin_lock(&imp->imp_lock); + spin_lock(&imp->imp_lock); - imp->imp_generation++; - imp->imp_state = LUSTRE_IMP_NEW; + imp->imp_generation++; + imp->imp_state = LUSTRE_IMP_NEW; - cfs_spin_unlock(&imp->imp_lock); + spin_unlock(&imp->imp_lock); - return 0; + return 0; } EXPORT_SYMBOL(ptlrpc_init_import); @@ -132,7 +144,6 @@ void deuuidify(char *uuid, const char *prefix, char **uuid_start, int *uuid_len) UUID_STR, strlen(UUID_STR))) *uuid_len -= strlen(UUID_STR); } -EXPORT_SYMBOL(deuuidify); /** * Returns true if import was FULL, false if import was already not @@ -147,9 +158,9 @@ EXPORT_SYMBOL(deuuidify); */ int ptlrpc_set_import_discon(struct obd_import *imp, __u32 conn_cnt) { - int rc = 0; + int rc = 0; - cfs_spin_lock(&imp->imp_lock); + spin_lock(&imp->imp_lock); if (imp->imp_state == LUSTRE_IMP_FULL && (conn_cnt == 0 || conn_cnt == imp->imp_conn_cnt)) { @@ -164,26 +175,24 @@ int ptlrpc_set_import_discon(struct obd_import *imp, __u32 conn_cnt) "lost; in progress operations using this " "service will wait for recovery to complete\n", imp->imp_obd->obd_name, target_len, target_start, - libcfs_nid2str(imp->imp_connection->c_peer.nid)); - } else { - LCONSOLE_ERROR_MSG(0x166, "%s: Connection to " - "%.*s (at %s) was lost; in progress " - "operations using this service will fail\n", - imp->imp_obd->obd_name, - target_len, target_start, - libcfs_nid2str(imp->imp_connection->c_peer.nid)); - } - ptlrpc_deactivate_timeouts(imp); - IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON); - cfs_spin_unlock(&imp->imp_lock); + obd_import_nid2str(imp)); + } else { + LCONSOLE_ERROR_MSG(0x166, "%s: Connection to " + "%.*s (at %s) was lost; in progress " + "operations using this service will fail\n", + imp->imp_obd->obd_name, target_len, target_start, + obd_import_nid2str(imp)); + } + IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON); + spin_unlock(&imp->imp_lock); - if (obd_dump_on_timeout) - libcfs_debug_dumplog(); + if (obd_dump_on_timeout) + libcfs_debug_dumplog(); - obd_import_event(imp->imp_obd, imp, IMP_EVENT_DISCON); - rc = 1; - } else { - cfs_spin_unlock(&imp->imp_lock); + obd_import_event(imp->imp_obd, imp, IMP_EVENT_DISCON); + rc = 1; + } else { + spin_unlock(&imp->imp_lock); CDEBUG(D_HA, "%s: import %p already %s (conn %u, was %u): %s\n", imp->imp_client->cli_name, imp, (imp->imp_state == LUSTRE_IMP_FULL && @@ -198,18 +207,18 @@ int ptlrpc_set_import_discon(struct obd_import *imp, __u32 conn_cnt) /* Must be called with imp_lock held! */ static void ptlrpc_deactivate_and_unlock_import(struct obd_import *imp) { - ENTRY; - LASSERT_SPIN_LOCKED(&imp->imp_lock); + ENTRY; + assert_spin_locked(&imp->imp_lock); - CDEBUG(D_HA, "setting import %s INVALID\n", obd2cli_tgt(imp->imp_obd)); - imp->imp_invalid = 1; - imp->imp_generation++; - cfs_spin_unlock(&imp->imp_lock); + CDEBUG(D_HA, "setting import %s INVALID\n", obd2cli_tgt(imp->imp_obd)); + imp->imp_invalid = 1; + imp->imp_generation++; + spin_unlock(&imp->imp_lock); - ptlrpc_abort_inflight(imp); - obd_import_event(imp->imp_obd, imp, IMP_EVENT_INACTIVE); + ptlrpc_abort_inflight(imp); + obd_import_event(imp->imp_obd, imp, IMP_EVENT_INACTIVE); - EXIT; + EXIT; } /* @@ -218,15 +227,15 @@ static void ptlrpc_deactivate_and_unlock_import(struct obd_import *imp) */ void ptlrpc_deactivate_import(struct obd_import *imp) { - cfs_spin_lock(&imp->imp_lock); - ptlrpc_deactivate_and_unlock_import(imp); + spin_lock(&imp->imp_lock); + ptlrpc_deactivate_and_unlock_import(imp); } EXPORT_SYMBOL(ptlrpc_deactivate_import); -static unsigned int -ptlrpc_inflight_deadline(struct ptlrpc_request *req, time_t now) +static time64_t ptlrpc_inflight_deadline(struct ptlrpc_request *req, + time64_t now) { - long dl; + time64_t dl; if (!(((req->rq_phase == RQ_PHASE_RPC) && !req->rq_waiting) || (req->rq_phase == RQ_PHASE_BULK) || @@ -247,20 +256,20 @@ ptlrpc_inflight_deadline(struct ptlrpc_request *req, time_t now) return dl - now; } -static unsigned int ptlrpc_inflight_timeout(struct obd_import *imp) +static time64_t ptlrpc_inflight_timeout(struct obd_import *imp) { - time_t now = cfs_time_current_sec(); - cfs_list_t *tmp, *n; - struct ptlrpc_request *req; - unsigned int timeout = 0; - - cfs_spin_lock(&imp->imp_lock); - cfs_list_for_each_safe(tmp, n, &imp->imp_sending_list) { - req = cfs_list_entry(tmp, struct ptlrpc_request, rq_list); - timeout = max(ptlrpc_inflight_deadline(req, now), timeout); - } - cfs_spin_unlock(&imp->imp_lock); - return timeout; + time64_t now = ktime_get_real_seconds(); + struct list_head *tmp, *n; + struct ptlrpc_request *req; + time64_t timeout = 0; + + spin_lock(&imp->imp_lock); + list_for_each_safe(tmp, n, &imp->imp_sending_list) { + req = list_entry(tmp, struct ptlrpc_request, rq_list); + timeout = max(ptlrpc_inflight_deadline(req, now), timeout); + } + spin_unlock(&imp->imp_lock); + return timeout; } /** @@ -271,130 +280,151 @@ static unsigned int ptlrpc_inflight_timeout(struct obd_import *imp) */ void ptlrpc_invalidate_import(struct obd_import *imp) { - cfs_list_t *tmp, *n; - struct ptlrpc_request *req; - struct l_wait_info lwi; - unsigned int timeout; - int rc; + struct list_head *tmp, *n; + struct ptlrpc_request *req; + struct l_wait_info lwi; + time64_t timeout; + int rc; - cfs_atomic_inc(&imp->imp_inval_count); + atomic_inc(&imp->imp_inval_count); - if (!imp->imp_invalid || imp->imp_obd->obd_no_recov) - ptlrpc_deactivate_import(imp); + if (!imp->imp_invalid || imp->imp_obd->obd_no_recov) + ptlrpc_deactivate_import(imp); - LASSERT(imp->imp_invalid); + CFS_FAIL_TIMEOUT(OBD_FAIL_MGS_CONNECT_NET, 3 * cfs_fail_val / 2); + LASSERT(imp->imp_invalid); /* Wait forever until inflight == 0. We really can't do it another * way because in some cases we need to wait for very long reply * unlink. We can't do anything before that because there is really * no guarantee that some rdma transfer is not in progress right now. */ do { + long timeout_jiffies; + /* Calculate max timeout for waiting on rpcs to error * out. Use obd_timeout if calculated value is smaller - * than it. */ - if (!OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) { - timeout = ptlrpc_inflight_timeout(imp); - timeout += timeout / 3; - - if (timeout == 0) - timeout = obd_timeout; - } else { - /* decrease the interval to increase race condition */ - timeout = 1; - } - - CDEBUG(D_RPCTRACE,"Sleeping %d sec for inflight to error out\n", - timeout); + * than it. + */ + if (!OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) { + timeout = ptlrpc_inflight_timeout(imp); + timeout += div_u64(timeout, 3); + + if (timeout == 0) + timeout = obd_timeout; + } else { + /* decrease the interval to increase race condition */ + timeout = 1; + } - /* Wait for all requests to error out and call completion - * callbacks. Cap it at obd_timeout -- these should all - * have been locally cancelled by ptlrpc_abort_inflight. */ - lwi = LWI_TIMEOUT_INTERVAL( - cfs_timeout_cap(cfs_time_seconds(timeout)), - (timeout > 1)?cfs_time_seconds(1):cfs_time_seconds(1)/2, - NULL, NULL); - rc = l_wait_event(imp->imp_recovery_waitq, - (cfs_atomic_read(&imp->imp_inflight) == 0), - &lwi); - if (rc) { - const char *cli_tgt = obd2cli_tgt(imp->imp_obd); - - CERROR("%s: rc = %d waiting for callback (%d != 0)\n", - cli_tgt, rc, - cfs_atomic_read(&imp->imp_inflight)); - - cfs_spin_lock(&imp->imp_lock); - if (cfs_atomic_read(&imp->imp_inflight) == 0) { - int count = cfs_atomic_read(&imp->imp_unregistering); - - /* We know that "unregistering" rpcs only can - * survive in sending or delaying lists (they - * maybe waiting for long reply unlink in - * sluggish nets). Let's check this. If there - * is no inflight and unregistering != 0, this - * is bug. */ - LASSERTF(count == 0, "Some RPCs are still " - "unregistering: %d\n", count); - - /* Let's save one loop as soon as inflight have - * dropped to zero. No new inflights possible at - * this point. */ - rc = 0; - } else { - cfs_list_for_each_safe(tmp, n, - &imp->imp_sending_list) { - req = cfs_list_entry(tmp, - struct ptlrpc_request, - rq_list); - DEBUG_REQ(D_ERROR, req, - "still on sending list"); - } - cfs_list_for_each_safe(tmp, n, - &imp->imp_delayed_list) { - req = cfs_list_entry(tmp, - struct ptlrpc_request, - rq_list); - DEBUG_REQ(D_ERROR, req, - "still on delayed list"); - } - - CERROR("%s: RPCs in \"%s\" phase found (%d). " - "Network is sluggish? Waiting them " - "to error out.\n", cli_tgt, - ptlrpc_phase2str(RQ_PHASE_UNREGISTERING), - cfs_atomic_read(&imp-> - imp_unregistering)); - } - cfs_spin_unlock(&imp->imp_lock); - } - } while (rc != 0); - - /* - * Let's additionally check that no new rpcs added to import in - * "invalidate" state. - */ - LASSERT(cfs_atomic_read(&imp->imp_inflight) == 0); - obd_import_event(imp->imp_obd, imp, IMP_EVENT_INVALIDATE); - sptlrpc_import_flush_all_ctx(imp); - - cfs_atomic_dec(&imp->imp_inval_count); - cfs_waitq_broadcast(&imp->imp_recovery_waitq); + CDEBUG(D_RPCTRACE, "Sleeping %llds for inflight to error out\n", + timeout); + + /* Wait for all requests to error out and call completion + * callbacks. Cap it at obd_timeout -- these should all + * have been locally cancelled by ptlrpc_abort_inflight. + */ + timeout_jiffies = max_t(long, cfs_time_seconds(timeout), 1); + lwi = LWI_TIMEOUT_INTERVAL(timeout_jiffies, + (timeout > 1) ? cfs_time_seconds(1) : + cfs_time_seconds(1) / 2, + NULL, NULL); + rc = l_wait_event(imp->imp_recovery_waitq, + (atomic_read(&imp->imp_inflight) == 0), + &lwi); + if (rc) { + const char *cli_tgt = obd2cli_tgt(imp->imp_obd); + + CERROR("%s: rc = %d waiting for callback (%d != 0)\n", + cli_tgt, rc, atomic_read(&imp->imp_inflight)); + + spin_lock(&imp->imp_lock); + if (atomic_read(&imp->imp_inflight) == 0) { + int count = atomic_read(&imp->imp_unregistering); + + /* We know that "unregistering" rpcs only can + * survive in sending or delaying lists (they + * maybe waiting for long reply unlink in + * sluggish nets). Let's check this. If there + * is no inflight and unregistering != 0, this + * is bug. */ + LASSERTF(count == 0, "Some RPCs are still " + "unregistering: %d\n", count); + + /* Let's save one loop as soon as inflight have + * dropped to zero. No new inflights possible at + * this point. */ + rc = 0; + } else { + list_for_each_safe(tmp, n, + &imp->imp_sending_list) { + req = list_entry(tmp, + struct ptlrpc_request, + rq_list); + DEBUG_REQ(D_ERROR, req, + "still on sending list"); + } + list_for_each_safe(tmp, n, + &imp->imp_delayed_list) { + req = list_entry(tmp, + struct ptlrpc_request, + rq_list); + DEBUG_REQ(D_ERROR, req, + "still on delayed list"); + } + + CERROR("%s: Unregistering RPCs found (%d). " + "Network is sluggish? Waiting them " + "to error out.\n", cli_tgt, + atomic_read(&imp->imp_unregistering)); + } + spin_unlock(&imp->imp_lock); + } + } while (rc != 0); + + /* + * Let's additionally check that no new rpcs added to import in + * "invalidate" state. + */ + LASSERT(atomic_read(&imp->imp_inflight) == 0); + obd_import_event(imp->imp_obd, imp, IMP_EVENT_INVALIDATE); + sptlrpc_import_flush_all_ctx(imp); + + atomic_dec(&imp->imp_inval_count); + wake_up_all(&imp->imp_recovery_waitq); } EXPORT_SYMBOL(ptlrpc_invalidate_import); /* unset imp_invalid */ void ptlrpc_activate_import(struct obd_import *imp) { - struct obd_device *obd = imp->imp_obd; + struct obd_device *obd = imp->imp_obd; + + spin_lock(&imp->imp_lock); + if (imp->imp_deactive != 0) { + spin_unlock(&imp->imp_lock); + return; + } - cfs_spin_lock(&imp->imp_lock); - imp->imp_invalid = 0; - ptlrpc_activate_timeouts(imp); - cfs_spin_unlock(&imp->imp_lock); - obd_import_event(obd, imp, IMP_EVENT_ACTIVE); + imp->imp_invalid = 0; + spin_unlock(&imp->imp_lock); + obd_import_event(obd, imp, IMP_EVENT_ACTIVE); } EXPORT_SYMBOL(ptlrpc_activate_import); +void ptlrpc_pinger_force(struct obd_import *imp) +{ + CDEBUG(D_HA, "%s: waking up pinger s:%s\n", obd2cli_tgt(imp->imp_obd), + ptlrpc_import_state_name(imp->imp_state)); + + spin_lock(&imp->imp_lock); + imp->imp_force_verify = 1; + spin_unlock(&imp->imp_lock); + + if (imp->imp_state != LUSTRE_IMP_CONNECTING) + ptlrpc_pinger_wake_up(); +} +EXPORT_SYMBOL(ptlrpc_pinger_force); + void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt) { ENTRY; @@ -411,45 +441,56 @@ void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt) ptlrpc_deactivate_import(imp); } - CDEBUG(D_HA, "%s: waking up pinger\n", - obd2cli_tgt(imp->imp_obd)); - - cfs_spin_lock(&imp->imp_lock); - imp->imp_force_verify = 1; - cfs_spin_unlock(&imp->imp_lock); - - ptlrpc_pinger_wake_up(); - } - EXIT; + ptlrpc_pinger_force(imp); + } + EXIT; } -EXPORT_SYMBOL(ptlrpc_fail_import); int ptlrpc_reconnect_import(struct obd_import *imp) { - ptlrpc_set_import_discon(imp, 0); - /* Force a new connect attempt */ - ptlrpc_invalidate_import(imp); - /* Do a fresh connect next time by zeroing the handle */ - ptlrpc_disconnect_import(imp, 1); - /* Wait for all invalidate calls to finish */ - if (cfs_atomic_read(&imp->imp_inval_count) > 0) { - int rc; - struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL); - rc = l_wait_event(imp->imp_recovery_waitq, - (cfs_atomic_read(&imp->imp_inval_count) == 0), - &lwi); - if (rc) - CERROR("Interrupted, inval=%d\n", - cfs_atomic_read(&imp->imp_inval_count)); - } +#ifdef ENABLE_PINGER + long timeout_jiffies = cfs_time_seconds(obd_timeout); + struct l_wait_info lwi; + int rc; + + ptlrpc_pinger_force(imp); + + CDEBUG(D_HA, "%s: recovery started, waiting %u seconds\n", + obd2cli_tgt(imp->imp_obd), obd_timeout); + + lwi = LWI_TIMEOUT(timeout_jiffies, NULL, NULL); + rc = l_wait_event(imp->imp_recovery_waitq, + !ptlrpc_import_in_recovery(imp), &lwi); + CDEBUG(D_HA, "%s: recovery finished s:%s\n", obd2cli_tgt(imp->imp_obd), + ptlrpc_import_state_name(imp->imp_state)); + return rc; +#else + ptlrpc_set_import_discon(imp, 0); + /* Force a new connect attempt */ + ptlrpc_invalidate_import(imp); + /* Do a fresh connect next time by zeroing the handle */ + ptlrpc_disconnect_import(imp, 1); + /* Wait for all invalidate calls to finish */ + if (atomic_read(&imp->imp_inval_count) > 0) { + struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL); + int rc; + + rc = l_wait_event(imp->imp_recovery_waitq, + (atomic_read(&imp->imp_inval_count) == 0), + &lwi); + if (rc) + CERROR("Interrupted, inval=%d\n", + atomic_read(&imp->imp_inval_count)); + } - /* Allow reconnect attempts */ - imp->imp_obd->obd_no_recov = 0; - /* Remove 'invalid' flag */ - ptlrpc_activate_import(imp); - /* Attempt a new connect */ - ptlrpc_recover_import(imp, NULL, 0); - return 0; + /* Allow reconnect attempts */ + imp->imp_obd->obd_no_recov = 0; + /* Remove 'invalid' flag */ + ptlrpc_activate_import(imp); + /* Attempt a new connect */ + ptlrpc_recover_import(imp, NULL, 0); + return 0; +#endif } EXPORT_SYMBOL(ptlrpc_reconnect_import); @@ -466,17 +507,17 @@ static int import_select_connection(struct obd_import *imp) int target_len, tried_all = 1; ENTRY; - cfs_spin_lock(&imp->imp_lock); + spin_lock(&imp->imp_lock); - if (cfs_list_empty(&imp->imp_conn_list)) { - CERROR("%s: no connections available\n", - imp->imp_obd->obd_name); - cfs_spin_unlock(&imp->imp_lock); - RETURN(-EINVAL); - } + if (list_empty(&imp->imp_conn_list)) { + CERROR("%s: no connections available\n", + imp->imp_obd->obd_name); + spin_unlock(&imp->imp_lock); + RETURN(-EINVAL); + } - cfs_list_for_each_entry(conn, &imp->imp_conn_list, oic_item) { - CDEBUG(D_HA, "%s: connect to NID %s last attempt "LPU64"\n", + list_for_each_entry(conn, &imp->imp_conn_list, oic_item) { + CDEBUG(D_HA, "%s: connect to NID %s last attempt %lld\n", imp->imp_obd->obd_name, libcfs_nid2str(conn->oic_conn->c_peer.nid), conn->oic_last_attempt); @@ -484,8 +525,7 @@ static int import_select_connection(struct obd_import *imp) /* If we have not tried this connection since the last successful attempt, go with this one */ if ((conn->oic_last_attempt == 0) || - cfs_time_beforeq_64(conn->oic_last_attempt, - imp->imp_last_success_conn)) { + conn->oic_last_attempt <= imp->imp_last_success_conn) { imp_conn = conn; tried_all = 0; break; @@ -496,8 +536,7 @@ static int import_select_connection(struct obd_import *imp) least recently used */ if (!imp_conn) imp_conn = conn; - else if (cfs_time_before_64(conn->oic_last_attempt, - imp_conn->oic_last_attempt)) + else if (imp_conn->oic_last_attempt > conn->oic_last_attempt) imp_conn = conn; } @@ -526,7 +565,7 @@ static int import_select_connection(struct obd_import *imp) "to %ds\n", imp->imp_obd->obd_name, at_get(at)); } - imp_conn->oic_last_attempt = cfs_time_current_64(); + imp_conn->oic_last_attempt = ktime_get_seconds(); /* switch connection, don't mind if it's same as the current one */ if (imp->imp_connection) @@ -559,9 +598,9 @@ static int import_select_connection(struct obd_import *imp) imp->imp_obd->obd_name, imp, imp_conn->oic_uuid.uuid, libcfs_nid2str(imp_conn->oic_conn->c_peer.nid)); - cfs_spin_unlock(&imp->imp_lock); + spin_unlock(&imp->imp_lock); - RETURN(0); + RETURN(0); } /* @@ -569,20 +608,32 @@ static int import_select_connection(struct obd_import *imp) */ static int ptlrpc_first_transno(struct obd_import *imp, __u64 *transno) { - struct ptlrpc_request *req; - cfs_list_t *tmp; - - if (cfs_list_empty(&imp->imp_replay_list)) - return 0; - tmp = imp->imp_replay_list.next; - req = cfs_list_entry(tmp, struct ptlrpc_request, rq_replay_list); - *transno = req->rq_transno; - if (req->rq_transno == 0) { - DEBUG_REQ(D_ERROR, req, "zero transno in replay"); - LBUG(); - } - - return 1; + struct ptlrpc_request *req; + struct list_head *tmp; + + /* The requests in committed_list always have smaller transnos than + * the requests in replay_list */ + if (!list_empty(&imp->imp_committed_list)) { + tmp = imp->imp_committed_list.next; + req = list_entry(tmp, struct ptlrpc_request, rq_replay_list); + *transno = req->rq_transno; + if (req->rq_transno == 0) { + DEBUG_REQ(D_ERROR, req, "zero transno in committed_list"); + LBUG(); + } + return 1; + } + if (!list_empty(&imp->imp_replay_list)) { + tmp = imp->imp_replay_list.next; + req = list_entry(tmp, struct ptlrpc_request, rq_replay_list); + *transno = req->rq_transno; + if (req->rq_transno == 0) { + DEBUG_REQ(D_ERROR, req, "zero transno in replay_list"); + LBUG(); + } + return 1; + } + return 0; } /** @@ -593,152 +644,145 @@ static int ptlrpc_first_transno(struct obd_import *imp, __u64 *transno) */ int ptlrpc_connect_import(struct obd_import *imp) { - struct obd_device *obd = imp->imp_obd; - int initial_connect = 0; - int set_transno = 0; - __u64 committed_before_reconnect = 0; - struct ptlrpc_request *request; - char *bufs[] = { NULL, - obd2cli_tgt(imp->imp_obd), - obd->obd_uuid.uuid, - (char *)&imp->imp_dlm_handle, - (char *)&imp->imp_connect_data }; - struct ptlrpc_connect_async_args *aa; - int rc; - ENTRY; + struct obd_device *obd = imp->imp_obd; + int initial_connect = 0; + int set_transno = 0; + __u64 committed_before_reconnect = 0; + struct ptlrpc_request *request; + char *bufs[] = { NULL, + obd2cli_tgt(imp->imp_obd), + obd->obd_uuid.uuid, + (char *)&imp->imp_dlm_handle, + (char *)&imp->imp_connect_data }; + struct ptlrpc_connect_async_args *aa; + int rc; + ENTRY; + + spin_lock(&imp->imp_lock); + if (imp->imp_state == LUSTRE_IMP_CLOSED) { + spin_unlock(&imp->imp_lock); + CERROR("can't connect to a closed import\n"); + RETURN(-EINVAL); + } else if (imp->imp_state == LUSTRE_IMP_FULL) { + spin_unlock(&imp->imp_lock); + CERROR("already connected\n"); + RETURN(0); + } else if (imp->imp_state == LUSTRE_IMP_CONNECTING || + imp->imp_connected) { + spin_unlock(&imp->imp_lock); + CERROR("already connecting\n"); + RETURN(-EALREADY); + } - cfs_spin_lock(&imp->imp_lock); - if (imp->imp_state == LUSTRE_IMP_CLOSED) { - cfs_spin_unlock(&imp->imp_lock); - CERROR("can't connect to a closed import\n"); - RETURN(-EINVAL); - } else if (imp->imp_state == LUSTRE_IMP_FULL) { - cfs_spin_unlock(&imp->imp_lock); - CERROR("already connected\n"); - RETURN(0); - } else if (imp->imp_state == LUSTRE_IMP_CONNECTING) { - cfs_spin_unlock(&imp->imp_lock); - CERROR("already connecting\n"); - RETURN(-EALREADY); - } + IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CONNECTING); - IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CONNECTING); + imp->imp_conn_cnt++; + imp->imp_resend_replay = 0; - imp->imp_conn_cnt++; - imp->imp_resend_replay = 0; + if (!lustre_handle_is_used(&imp->imp_remote_handle)) + initial_connect = 1; + else + committed_before_reconnect = imp->imp_peer_committed_transno; - if (!lustre_handle_is_used(&imp->imp_remote_handle)) - initial_connect = 1; - else - committed_before_reconnect = imp->imp_peer_committed_transno; - - set_transno = ptlrpc_first_transno(imp, - &imp->imp_connect_data.ocd_transno); - cfs_spin_unlock(&imp->imp_lock); - - rc = import_select_connection(imp); - if (rc) - GOTO(out, rc); - - rc = sptlrpc_import_sec_adapt(imp, NULL, 0); - if (rc) - GOTO(out, rc); - - /* Reset connect flags to the originally requested flags, in case - * the server is updated on-the-fly we will get the new features. */ - imp->imp_connect_data.ocd_connect_flags = imp->imp_connect_flags_orig; - imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT; - imp->imp_msghdr_flags &= ~MSGHDR_CKSUM_INCOMPAT18; - - rc = obd_reconnect(NULL, imp->imp_obd->obd_self_export, obd, - &obd->obd_uuid, &imp->imp_connect_data, NULL); - if (rc) - GOTO(out, rc); - - request = ptlrpc_request_alloc(imp, &RQF_MDS_CONNECT); - if (request == NULL) - GOTO(out, rc = -ENOMEM); - - rc = ptlrpc_request_bufs_pack(request, LUSTRE_OBD_VERSION, - imp->imp_connect_op, bufs, NULL); - if (rc) { - ptlrpc_request_free(request); - GOTO(out, rc); - } + set_transno = ptlrpc_first_transno(imp, + &imp->imp_connect_data.ocd_transno); + spin_unlock(&imp->imp_lock); - /* Report the rpc service time to the server so that it knows how long - * to wait for clients to join recovery */ - lustre_msg_set_service_time(request->rq_reqmsg, - at_timeout2est(request->rq_timeout)); - - /* The amount of time we give the server to process the connect req. - * import_select_connection will increase the net latency on - * repeated reconnect attempts to cover slow networks. - * We override/ignore the server rpc completion estimate here, - * which may be large if this is a reconnect attempt */ - request->rq_timeout = INITIAL_CONNECT_TIMEOUT; - lustre_msg_set_timeout(request->rq_reqmsg, request->rq_timeout); - -#ifndef __KERNEL__ - lustre_msg_add_op_flags(request->rq_reqmsg, MSG_CONNECT_LIBCLIENT); -#endif - lustre_msg_add_op_flags(request->rq_reqmsg, MSG_CONNECT_NEXT_VER); - - request->rq_no_resend = request->rq_no_delay = 1; - request->rq_send_state = LUSTRE_IMP_CONNECTING; - /* Allow a slightly larger reply for future growth compatibility */ - req_capsule_set_size(&request->rq_pill, &RMF_CONNECT_DATA, RCL_SERVER, - sizeof(struct obd_connect_data)+16*sizeof(__u64)); - ptlrpc_request_set_replen(request); - request->rq_interpret_reply = ptlrpc_connect_interpret; - - CLASSERT(sizeof (*aa) <= sizeof (request->rq_async_args)); - aa = ptlrpc_req_async_args(request); - memset(aa, 0, sizeof *aa); - - aa->pcaa_peer_committed = committed_before_reconnect; - aa->pcaa_initial_connect = initial_connect; - - if (aa->pcaa_initial_connect) { - cfs_spin_lock(&imp->imp_lock); - imp->imp_replayable = 1; - cfs_spin_unlock(&imp->imp_lock); - lustre_msg_add_op_flags(request->rq_reqmsg, - MSG_CONNECT_INITIAL); - } + rc = import_select_connection(imp); + if (rc) + GOTO(out, rc); + + rc = sptlrpc_import_sec_adapt(imp, NULL, NULL); + if (rc) + GOTO(out, rc); + + /* Reset connect flags to the originally requested flags, in case + * the server is updated on-the-fly we will get the new features. */ + imp->imp_connect_data.ocd_connect_flags = imp->imp_connect_flags_orig; + imp->imp_connect_data.ocd_connect_flags2 = imp->imp_connect_flags2_orig; + /* Reset ocd_version each time so the server knows the exact versions */ + imp->imp_connect_data.ocd_version = LUSTRE_VERSION_CODE; + imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT; + imp->imp_msghdr_flags &= ~MSGHDR_CKSUM_INCOMPAT18; + + rc = obd_reconnect(NULL, imp->imp_obd->obd_self_export, obd, + &obd->obd_uuid, &imp->imp_connect_data, NULL); + if (rc) + GOTO(out, rc); - if (set_transno) - lustre_msg_add_op_flags(request->rq_reqmsg, - MSG_CONNECT_TRANSNO); + request = ptlrpc_request_alloc(imp, &RQF_MDS_CONNECT); + if (request == NULL) + GOTO(out, rc = -ENOMEM); - DEBUG_REQ(D_RPCTRACE, request, "(re)connect request (timeout %d)", - request->rq_timeout); - ptlrpcd_add_req(request, PDL_POLICY_ROUND, -1); - rc = 0; + rc = ptlrpc_request_bufs_pack(request, LUSTRE_OBD_VERSION, + imp->imp_connect_op, bufs, NULL); + if (rc) { + ptlrpc_request_free(request); + GOTO(out, rc); + } + + /* Report the rpc service time to the server so that it knows how long + * to wait for clients to join recovery */ + lustre_msg_set_service_time(request->rq_reqmsg, + at_timeout2est(request->rq_timeout)); + + /* The amount of time we give the server to process the connect req. + * import_select_connection will increase the net latency on + * repeated reconnect attempts to cover slow networks. + * We override/ignore the server rpc completion estimate here, + * which may be large if this is a reconnect attempt */ + request->rq_timeout = INITIAL_CONNECT_TIMEOUT; + lustre_msg_set_timeout(request->rq_reqmsg, request->rq_timeout); + + request->rq_no_resend = request->rq_no_delay = 1; + request->rq_send_state = LUSTRE_IMP_CONNECTING; + /* Allow a slightly larger reply for future growth compatibility */ + req_capsule_set_size(&request->rq_pill, &RMF_CONNECT_DATA, RCL_SERVER, + sizeof(struct obd_connect_data)+16*sizeof(__u64)); + ptlrpc_request_set_replen(request); + request->rq_interpret_reply = ptlrpc_connect_interpret; + + CLASSERT(sizeof(*aa) <= sizeof(request->rq_async_args)); + aa = ptlrpc_req_async_args(request); + memset(aa, 0, sizeof *aa); + + aa->pcaa_peer_committed = committed_before_reconnect; + aa->pcaa_initial_connect = initial_connect; + + if (aa->pcaa_initial_connect) { + spin_lock(&imp->imp_lock); + imp->imp_replayable = 1; + spin_unlock(&imp->imp_lock); + lustre_msg_add_op_flags(request->rq_reqmsg, + MSG_CONNECT_INITIAL); + } + + if (set_transno) + lustre_msg_add_op_flags(request->rq_reqmsg, + MSG_CONNECT_TRANSNO); + + DEBUG_REQ(D_RPCTRACE, request, "(re)connect request (timeout %ld)", + request->rq_timeout); + ptlrpcd_add_req(request); + rc = 0; out: - if (rc != 0) { - IMPORT_SET_STATE(imp, LUSTRE_IMP_DISCON); - } + if (rc != 0) + IMPORT_SET_STATE(imp, LUSTRE_IMP_DISCON); - RETURN(rc); + RETURN(rc); } EXPORT_SYMBOL(ptlrpc_connect_import); static void ptlrpc_maybe_ping_import_soon(struct obd_import *imp) { -#ifdef __KERNEL__ - int force_verify; + int force_verify; - cfs_spin_lock(&imp->imp_lock); - force_verify = imp->imp_force_verify != 0; - cfs_spin_unlock(&imp->imp_lock); + spin_lock(&imp->imp_lock); + force_verify = imp->imp_force_verify != 0; + spin_unlock(&imp->imp_lock); - if (force_verify) - ptlrpc_pinger_wake_up(); -#else - /* liblustre has no pinger thread, so we wakeup pinger anyway */ - ptlrpc_pinger_wake_up(); -#endif + if (force_verify) + ptlrpc_pinger_wake_up(); } static int ptlrpc_busy_reconnect(int rc) @@ -746,6 +790,156 @@ static int ptlrpc_busy_reconnect(int rc) return (rc == -EBUSY) || (rc == -EAGAIN); } +static int ptlrpc_connect_set_flags(struct obd_import *imp, + struct obd_connect_data *ocd, + __u64 old_connect_flags, + struct obd_export *exp, int init_connect) +{ + static bool warned; + struct client_obd *cli = &imp->imp_obd->u.cli; + + spin_lock(&imp->imp_lock); + list_del(&imp->imp_conn_current->oic_item); + list_add(&imp->imp_conn_current->oic_item, + &imp->imp_conn_list); + imp->imp_last_success_conn = + imp->imp_conn_current->oic_last_attempt; + + spin_unlock(&imp->imp_lock); + + if (!warned && (ocd->ocd_connect_flags & OBD_CONNECT_VERSION) && + (ocd->ocd_version > LUSTRE_VERSION_CODE + + LUSTRE_VERSION_OFFSET_WARN || + ocd->ocd_version < LUSTRE_VERSION_CODE - + LUSTRE_VERSION_OFFSET_WARN)) { + /* Sigh, some compilers do not like #ifdef in the middle + of macro arguments */ + const char *older = "older than client. " + "Consider upgrading server"; + const char *newer = "newer than client. " + "Consider upgrading client"; + + LCONSOLE_WARN("Server %s version (%d.%d.%d.%d) " + "is much %s (%s)\n", + obd2cli_tgt(imp->imp_obd), + OBD_OCD_VERSION_MAJOR(ocd->ocd_version), + OBD_OCD_VERSION_MINOR(ocd->ocd_version), + OBD_OCD_VERSION_PATCH(ocd->ocd_version), + OBD_OCD_VERSION_FIX(ocd->ocd_version), + ocd->ocd_version > LUSTRE_VERSION_CODE ? + newer : older, LUSTRE_VERSION_STRING); + warned = true; + } + + if (ocd->ocd_connect_flags & OBD_CONNECT_CKSUM) { + /* We sent to the server ocd_cksum_types with bits set + * for algorithms we understand. The server masked off + * the checksum types it doesn't support */ + if ((ocd->ocd_cksum_types & + obd_cksum_types_supported_client()) == 0) { + LCONSOLE_ERROR("The negotiation of the checksum " + "alogrithm to use with server %s " + "failed (%x/%x)\n", + obd2cli_tgt(imp->imp_obd), + ocd->ocd_cksum_types, + obd_cksum_types_supported_client()); + return -EPROTO; + } else { + cli->cl_supp_cksum_types = ocd->ocd_cksum_types; + } + } else { + /* The server does not support OBD_CONNECT_CKSUM. + * Enforce ADLER for backward compatibility*/ + cli->cl_supp_cksum_types = OBD_CKSUM_ADLER; + } + cli->cl_cksum_type = obd_cksum_type_select(imp->imp_obd->obd_name, + cli->cl_supp_cksum_types); + + if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE) + cli->cl_max_pages_per_rpc = + min(ocd->ocd_brw_size >> PAGE_SHIFT, + cli->cl_max_pages_per_rpc); + else if (imp->imp_connect_op == MDS_CONNECT || + imp->imp_connect_op == MGS_CONNECT) + cli->cl_max_pages_per_rpc = 1; + + LASSERT((cli->cl_max_pages_per_rpc <= PTLRPC_MAX_BRW_PAGES) && + (cli->cl_max_pages_per_rpc > 0)); + + client_adjust_max_dirty(cli); + + /* Update client max modify RPCs in flight with value returned + * by the server */ + if (ocd->ocd_connect_flags & OBD_CONNECT_MULTIMODRPCS) + cli->cl_max_mod_rpcs_in_flight = min( + cli->cl_max_mod_rpcs_in_flight, + ocd->ocd_maxmodrpcs); + else + cli->cl_max_mod_rpcs_in_flight = 1; + + /* Reset ns_connect_flags only for initial connect. It might be + * changed in while using FS and if we reset it in reconnect + * this leads to losing user settings done before such as + * disable lru_resize, etc. */ + if (old_connect_flags != exp_connect_flags(exp) || init_connect) { + CDEBUG(D_HA, "%s: Resetting ns_connect_flags to server " + "flags: %#llx\n", imp->imp_obd->obd_name, + ocd->ocd_connect_flags); + imp->imp_obd->obd_namespace->ns_connect_flags = + ocd->ocd_connect_flags; + imp->imp_obd->obd_namespace->ns_orig_connect_flags = + ocd->ocd_connect_flags; + } + + if (ocd->ocd_connect_flags & OBD_CONNECT_AT) + /* We need a per-message support flag, because + * a. we don't know if the incoming connect reply + * supports AT or not (in reply_in_callback) + * until we unpack it. + * b. failovered server means export and flags are gone + * (in ptlrpc_send_reply). + * Can only be set when we know AT is supported at + * both ends */ + imp->imp_msghdr_flags |= MSGHDR_AT_SUPPORT; + else + imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT; + + imp->imp_msghdr_flags |= MSGHDR_CKSUM_INCOMPAT18; + + return 0; +} + +/** + * Add all replay requests back to unreplied list before start replay, + * so that we can make sure the known replied XID is always increased + * only even if when replaying requests. + */ +static void ptlrpc_prepare_replay(struct obd_import *imp) +{ + struct ptlrpc_request *req; + + if (imp->imp_state != LUSTRE_IMP_REPLAY || + imp->imp_resend_replay) + return; + + /* If the server was restart during repaly, the requests may + * have been added to the unreplied list in former replay. */ + spin_lock(&imp->imp_lock); + + list_for_each_entry(req, &imp->imp_committed_list, rq_replay_list) { + if (list_empty(&req->rq_unreplied_list)) + ptlrpc_add_unreplied(req); + } + + list_for_each_entry(req, &imp->imp_replay_list, rq_replay_list) { + if (list_empty(&req->rq_unreplied_list)) + ptlrpc_add_unreplied(req); + } + + imp->imp_known_replied_xid = ptlrpc_known_replied_xid(imp); + spin_unlock(&imp->imp_lock); +} + /** * interpret_reply callback for connect RPCs. * Looks into returned status of connect operation and decides @@ -758,34 +952,54 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, { struct ptlrpc_connect_async_args *aa = data; struct obd_import *imp = request->rq_import; - struct client_obd *cli = &imp->imp_obd->u.cli; struct lustre_handle old_hdl; __u64 old_connect_flags; int msg_flags; struct obd_connect_data *ocd; - struct obd_export *exp; + struct obd_export *exp = NULL; int ret; - ENTRY; + ENTRY; - cfs_spin_lock(&imp->imp_lock); - if (imp->imp_state == LUSTRE_IMP_CLOSED) { - cfs_spin_unlock(&imp->imp_lock); - RETURN(0); - } + spin_lock(&imp->imp_lock); + if (imp->imp_state == LUSTRE_IMP_CLOSED) { + imp->imp_connect_tried = 1; + spin_unlock(&imp->imp_lock); + RETURN(0); + } - if (rc) { - /* if this reconnect to busy export - not need select new target - * for connecting*/ - imp->imp_force_reconnect = ptlrpc_busy_reconnect(rc); - cfs_spin_unlock(&imp->imp_lock); - ptlrpc_maybe_ping_import_soon(imp); - GOTO(out, rc); - } - cfs_spin_unlock(&imp->imp_lock); + if (rc) { + struct ptlrpc_request *free_req; + struct ptlrpc_request *tmp; + + /* abort all delayed requests initiated connection */ + list_for_each_entry_safe(free_req, tmp, &imp->imp_delayed_list, + rq_list) { + spin_lock(&free_req->rq_lock); + if (free_req->rq_no_resend) { + free_req->rq_err = 1; + free_req->rq_status = -EIO; + ptlrpc_client_wake_req(free_req); + } + spin_unlock(&free_req->rq_lock); + } - LASSERT(imp->imp_conn_current); + /* if this reconnect to busy export - not need select new target + * for connecting*/ + imp->imp_force_reconnect = ptlrpc_busy_reconnect(rc); + spin_unlock(&imp->imp_lock); + ptlrpc_maybe_ping_import_soon(imp); + GOTO(out, rc); + } + + /* LU-7558: indicate that we are interpretting connect reply, + * pltrpc_connect_import() will not try to reconnect until + * interpret will finish. */ + imp->imp_connected = 1; + spin_unlock(&imp->imp_lock); - msg_flags = lustre_msg_get_op_flags(request->rq_repmsg); + LASSERT(imp->imp_conn_current); + + msg_flags = lustre_msg_get_op_flags(request->rq_repmsg); ret = req_capsule_get_size(&request->rq_pill, &RMF_CONNECT_DATA, RCL_SERVER); @@ -800,7 +1014,7 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, GOTO(out, rc); } - cfs_spin_lock(&imp->imp_lock); + spin_lock(&imp->imp_lock); /* All imports are pingable */ imp->imp_pingable = 1; @@ -813,44 +1027,93 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, imp->imp_obd->obd_name, ocd->ocd_instance); exp = class_conn2export(&imp->imp_dlm_handle); - cfs_spin_unlock(&imp->imp_lock); + spin_unlock(&imp->imp_lock); + + if (!exp) { + /* This could happen if export is cleaned during the + connect attempt */ + CERROR("%s: missing export after connect\n", + imp->imp_obd->obd_name); + GOTO(out, rc = -ENODEV); + } /* check that server granted subset of flags we asked for. */ if ((ocd->ocd_connect_flags & imp->imp_connect_flags_orig) != ocd->ocd_connect_flags) { - CERROR("%s: Server didn't granted asked subset of flags: " - "asked="LPX64" grranted="LPX64"\n", - imp->imp_obd->obd_name,imp->imp_connect_flags_orig, + CERROR("%s: Server didn't grant requested subset of flags: " + "asked=%#llx granted=%#llx\n", + imp->imp_obd->obd_name, imp->imp_connect_flags_orig, ocd->ocd_connect_flags); GOTO(out, rc = -EPROTO); } - if (!exp) { - /* This could happen if export is cleaned during the - connect attempt */ - CERROR("%s: missing export after connect\n", - imp->imp_obd->obd_name); - GOTO(out, rc = -ENODEV); + if ((ocd->ocd_connect_flags2 & imp->imp_connect_flags2_orig) != + ocd->ocd_connect_flags2) { + CERROR("%s: Server didn't grant requested subset of flags2: " + "asked=%#llx granted=%#llx\n", + imp->imp_obd->obd_name, imp->imp_connect_flags2_orig, + ocd->ocd_connect_flags2); + GOTO(out, rc = -EPROTO); + } + + if (!(imp->imp_connect_flags_orig & OBD_CONNECT_LIGHTWEIGHT) && + (imp->imp_connect_flags_orig & OBD_CONNECT_MDS_MDS) && + (imp->imp_connect_flags_orig & OBD_CONNECT_FID) && + (ocd->ocd_connect_flags & OBD_CONNECT_VERSION)) { + __u32 major = OBD_OCD_VERSION_MAJOR(ocd->ocd_version); + __u32 minor = OBD_OCD_VERSION_MINOR(ocd->ocd_version); + __u32 patch = OBD_OCD_VERSION_PATCH(ocd->ocd_version); + + /* We do not support the MDT-MDT interoperations with + * different version MDT because of protocol changes. */ + if (unlikely(major != LUSTRE_MAJOR || + minor != LUSTRE_MINOR || + abs(patch - LUSTRE_PATCH) > 3)) { + LCONSOLE_WARN("%s: import %p (%u.%u.%u.%u) tried the " + "connection to different version MDT " + "(%d.%d.%d.%d) %s\n", + imp->imp_obd->obd_name, imp, LUSTRE_MAJOR, + LUSTRE_MINOR, LUSTRE_PATCH, LUSTRE_FIX, + major, minor, patch, + OBD_OCD_VERSION_FIX(ocd->ocd_version), + imp->imp_connection->c_remote_uuid.uuid); + + GOTO(out, rc = -EPROTO); + } } - old_connect_flags = exp->exp_connect_flags; - exp->exp_connect_flags = ocd->ocd_connect_flags; - imp->imp_obd->obd_self_export->exp_connect_flags = - ocd->ocd_connect_flags; + + old_connect_flags = exp_connect_flags(exp); + exp->exp_connect_data = *ocd; + imp->imp_obd->obd_self_export->exp_connect_data = *ocd; + + /* The net statistics after (re-)connect is not valid anymore, + * because may reflect other routing, etc. */ + at_reinit(&imp->imp_at.iat_net_latency, 0, 0); + ptlrpc_at_adj_net_latency(request, + lustre_msg_get_service_time(request->rq_repmsg)); + + /* Import flags should be updated before waking import at FULL state */ + rc = ptlrpc_connect_set_flags(imp, ocd, old_connect_flags, exp, + aa->pcaa_initial_connect); class_export_put(exp); + exp = NULL; + + if (rc != 0) + GOTO(out, rc); obd_import_event(imp->imp_obd, imp, IMP_EVENT_OCD); - if (aa->pcaa_initial_connect) { - cfs_spin_lock(&imp->imp_lock); - if (msg_flags & MSG_CONNECT_REPLAYABLE) { - imp->imp_replayable = 1; - cfs_spin_unlock(&imp->imp_lock); - CDEBUG(D_HA, "connected to replayable target: %s\n", - obd2cli_tgt(imp->imp_obd)); - } else { - imp->imp_replayable = 0; - cfs_spin_unlock(&imp->imp_lock); - } + if (aa->pcaa_initial_connect) { + spin_lock(&imp->imp_lock); + if (msg_flags & MSG_CONNECT_REPLAYABLE) { + imp->imp_replayable = 1; + spin_unlock(&imp->imp_lock); + CDEBUG(D_HA, "connected to replayable target: %s\n", + obd2cli_tgt(imp->imp_obd)); + } else { + imp->imp_replayable = 0; + spin_unlock(&imp->imp_lock); + } /* if applies, adjust the imp->imp_msg_magic here * according to reply flags */ @@ -866,8 +1129,8 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, obd2cli_tgt(imp->imp_obd)); IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_LOCKS); } else { - IMPORT_SET_STATE(imp, LUSTRE_IMP_FULL); - ptlrpc_activate_import(imp); + IMPORT_SET_STATE(imp, LUSTRE_IMP_FULL); + ptlrpc_activate_import(imp); } GOTO(finish, rc = 0); @@ -879,7 +1142,7 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, if (!memcmp(&old_hdl, lustre_msg_get_handle(request->rq_repmsg), sizeof (old_hdl))) { LCONSOLE_WARN("Reconnect to %s (at @%s) failed due " - "bad handle "LPX64"\n", + "bad handle %#llx\n", obd2cli_tgt(imp->imp_obd), imp->imp_connection->c_remote_uuid.uuid, imp->imp_dlm_handle.cookie); @@ -901,7 +1164,7 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, * with server again */ if ((MSG_CONNECT_RECOVERING & msg_flags)) { CDEBUG(level,"%s@%s changed server handle from " - LPX64" to "LPX64 + "%#llx to %#llx" " but is still in recovery\n", obd2cli_tgt(imp->imp_obd), imp->imp_connection->c_remote_uuid.uuid, @@ -911,7 +1174,7 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, } else { LCONSOLE_WARN("Evicted from %s (at %s) " "after server handle changed from " - LPX64" to "LPX64"\n", + "%#llx to %#llx\n", obd2cli_tgt(imp->imp_obd), imp->imp_connection-> \ c_remote_uuid.uuid, @@ -944,11 +1207,11 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, imp->imp_obd->obd_name, obd2cli_tgt(imp->imp_obd)); - cfs_spin_lock(&imp->imp_lock); - imp->imp_resend_replay = 1; - cfs_spin_unlock(&imp->imp_lock); + spin_lock(&imp->imp_lock); + imp->imp_resend_replay = 1; + spin_unlock(&imp->imp_lock); - IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY); + IMPORT_SET_STATE(imp, imp->imp_replay_state); } else { IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER); } @@ -957,6 +1220,7 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, imp->imp_remote_handle = *lustre_msg_get_handle(request->rq_repmsg); imp->imp_last_replay_transno = 0; + imp->imp_replay_cursor = &imp->imp_committed_list; IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY); } else { DEBUG_REQ(D_HA, request, "%s: evicting (reconnect/recover flags" @@ -975,8 +1239,8 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, if (lustre_msg_get_last_committed(request->rq_repmsg) > 0 && lustre_msg_get_last_committed(request->rq_repmsg) < aa->pcaa_peer_committed) { - CERROR("%s went back in time (transno "LPD64 - " was previously committed, server now claims "LPD64 + CERROR("%s went back in time (transno %lld" + " was previously committed, server now claims %lld" ")! See https://bugzilla.lustre.org/show_bug.cgi?" "id=9646\n", obd2cli_tgt(imp->imp_obd), aa->pcaa_peer_committed, @@ -984,153 +1248,30 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, } finish: - rc = ptlrpc_import_recovery_state_machine(imp); - if (rc != 0) { - if (rc == -ENOTCONN) { - CDEBUG(D_HA, "evicted/aborted by %s@%s during recovery;" - "invalidating and reconnecting\n", - obd2cli_tgt(imp->imp_obd), - imp->imp_connection->c_remote_uuid.uuid); - ptlrpc_connect_import(imp); - RETURN(0); - } - } else { - - cfs_spin_lock(&imp->imp_lock); - cfs_list_del(&imp->imp_conn_current->oic_item); - cfs_list_add(&imp->imp_conn_current->oic_item, - &imp->imp_conn_list); - imp->imp_last_success_conn = - imp->imp_conn_current->oic_last_attempt; - - cfs_spin_unlock(&imp->imp_lock); - - if (!ocd->ocd_ibits_known && - ocd->ocd_connect_flags & OBD_CONNECT_IBITS) - CERROR("Inodebits aware server returned zero compatible" - " bits?\n"); - - if ((ocd->ocd_connect_flags & OBD_CONNECT_VERSION) && - (ocd->ocd_version > LUSTRE_VERSION_CODE + - LUSTRE_VERSION_OFFSET_WARN || - ocd->ocd_version < LUSTRE_VERSION_CODE - - LUSTRE_VERSION_OFFSET_WARN)) { - /* Sigh, some compilers do not like #ifdef in the middle - of macro arguments */ -#ifdef __KERNEL__ - const char *older = "older. Consider upgrading server " - "or downgrading client"; -#else - const char *older = "older. Consider recompiling this " - "application"; -#endif - const char *newer = "newer than client version. " - "Consider upgrading client"; - - LCONSOLE_WARN("Server %s version (%d.%d.%d.%d) " - "is much %s (%s)\n", - obd2cli_tgt(imp->imp_obd), - OBD_OCD_VERSION_MAJOR(ocd->ocd_version), - OBD_OCD_VERSION_MINOR(ocd->ocd_version), - OBD_OCD_VERSION_PATCH(ocd->ocd_version), - OBD_OCD_VERSION_FIX(ocd->ocd_version), - ocd->ocd_version > LUSTRE_VERSION_CODE ? - newer : older, LUSTRE_VERSION_STRING); - } - -#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 6, 50, 0) - /* Check if server has LU-1252 fix applied to not always swab - * the IR MNE entries. Do this only once per connection. This - * fixup is version-limited, because we don't want to carry the - * OBD_CONNECT_MNE_SWAB flag around forever, just so long as we - * need interop with unpatched 2.2 servers. For newer servers, - * the client will do MNE swabbing only as needed. LU-1644 */ - if (unlikely((ocd->ocd_connect_flags & OBD_CONNECT_VERSION) && - !(ocd->ocd_connect_flags & OBD_CONNECT_MNE_SWAB) && - OBD_OCD_VERSION_MAJOR(ocd->ocd_version) == 2 && - OBD_OCD_VERSION_MINOR(ocd->ocd_version) == 2 && - OBD_OCD_VERSION_PATCH(ocd->ocd_version) < 55 && - strcmp(imp->imp_obd->obd_type->typ_name, - LUSTRE_MGC_NAME) == 0)) - imp->imp_need_mne_swab = 1; - else /* clear if server was upgraded since last connect */ - imp->imp_need_mne_swab = 0; -#else -#warning "LU-1644: Remove old OBD_CONNECT_MNE_SWAB fixup and exp_need_mne_swab" -#endif + ptlrpc_prepare_replay(imp); + rc = ptlrpc_import_recovery_state_machine(imp); + if (rc == -ENOTCONN) { + CDEBUG(D_HA, "evicted/aborted by %s@%s during recovery;" + "invalidating and reconnecting\n", + obd2cli_tgt(imp->imp_obd), + imp->imp_connection->c_remote_uuid.uuid); + ptlrpc_connect_import(imp); + spin_lock(&imp->imp_lock); + imp->imp_connected = 0; + imp->imp_connect_tried = 1; + spin_unlock(&imp->imp_lock); + RETURN(0); + } - if (ocd->ocd_connect_flags & OBD_CONNECT_CKSUM) { - /* We sent to the server ocd_cksum_types with bits set - * for algorithms we understand. The server masked off - * the checksum types it doesn't support */ - if ((ocd->ocd_cksum_types & - cksum_types_supported_client()) == 0) { - LCONSOLE_WARN("The negotiation of the checksum " - "alogrithm to use with server %s " - "failed (%x/%x), disabling " - "checksums\n", - obd2cli_tgt(imp->imp_obd), - ocd->ocd_cksum_types, - cksum_types_supported_client()); - cli->cl_checksum = 0; - cli->cl_supp_cksum_types = OBD_CKSUM_ADLER; - } else { - cli->cl_supp_cksum_types = ocd->ocd_cksum_types; - } - } else { - /* The server does not support OBD_CONNECT_CKSUM. - * Enforce ADLER for backward compatibility*/ - cli->cl_supp_cksum_types = OBD_CKSUM_ADLER; - } - cli->cl_cksum_type =cksum_type_select(cli->cl_supp_cksum_types); - - if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE) - cli->cl_max_pages_per_rpc = - ocd->ocd_brw_size >> CFS_PAGE_SHIFT; - else if (imp->imp_connect_op == MDS_CONNECT || - imp->imp_connect_op == MGS_CONNECT) - cli->cl_max_pages_per_rpc = 1; - - /* Reset ns_connect_flags only for initial connect. It might be - * changed in while using FS and if we reset it in reconnect - * this leads to losing user settings done before such as - * disable lru_resize, etc. */ - if (old_connect_flags != exp->exp_connect_flags || - aa->pcaa_initial_connect) { - CDEBUG(D_HA, "%s: Resetting ns_connect_flags to server " - "flags: "LPX64"\n", imp->imp_obd->obd_name, - ocd->ocd_connect_flags); - imp->imp_obd->obd_namespace->ns_connect_flags = - ocd->ocd_connect_flags; - imp->imp_obd->obd_namespace->ns_orig_connect_flags = - ocd->ocd_connect_flags; - } +out: + spin_lock(&imp->imp_lock); + imp->imp_connected = 0; + imp->imp_connect_tried = 1; + spin_unlock(&imp->imp_lock); - if ((ocd->ocd_connect_flags & OBD_CONNECT_AT) && - (imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V2)) - /* We need a per-message support flag, because - a. we don't know if the incoming connect reply - supports AT or not (in reply_in_callback) - until we unpack it. - b. failovered server means export and flags are gone - (in ptlrpc_send_reply). - Can only be set when we know AT is supported at - both ends */ - imp->imp_msghdr_flags |= MSGHDR_AT_SUPPORT; - else - imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT; - - if ((ocd->ocd_connect_flags & OBD_CONNECT_FULL20) && - (imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V2)) - imp->imp_msghdr_flags |= MSGHDR_CKSUM_INCOMPAT18; - else - imp->imp_msghdr_flags &= ~MSGHDR_CKSUM_INCOMPAT18; - - LASSERT((cli->cl_max_pages_per_rpc <= PTLRPC_MAX_BRW_PAGES) && - (cli->cl_max_pages_per_rpc > 0)); - } + if (exp != NULL) + class_export_put(exp); -out: if (rc != 0) { IMPORT_SET_STATE(imp, LUSTRE_IMP_DISCON); if (rc == -EACCES) { @@ -1149,14 +1290,15 @@ out: if (request->rq_repmsg == NULL) RETURN(-EPROTO); - ocd = req_capsule_server_get(&request->rq_pill, - &RMF_CONNECT_DATA); - if (ocd && - (ocd->ocd_connect_flags & OBD_CONNECT_VERSION) && - (ocd->ocd_version != LUSTRE_VERSION_CODE)) { - /* Actually servers are only supposed to refuse - connection from liblustre clients, so we should - never see this from VFS context */ + ocd = req_capsule_server_get(&request->rq_pill, + &RMF_CONNECT_DATA); + /* Servers are not supposed to refuse connections from + * clients based on version, only connection feature + * flags. We should never see this from llite, but it + * may be useful for debugging in the future. */ + if (ocd && + (ocd->ocd_connect_flags & OBD_CONNECT_VERSION) && + (ocd->ocd_version != LUSTRE_VERSION_CODE)) { LCONSOLE_ERROR_MSG(0x16a, "Server %s version " "(%d.%d.%d.%d)" " refused connection from this client " @@ -1174,15 +1316,15 @@ out: RETURN(-EPROTO); } - ptlrpc_maybe_ping_import_soon(imp); + ptlrpc_maybe_ping_import_soon(imp); - CDEBUG(D_HA, "recovery of %s on %s failed (%d)\n", - obd2cli_tgt(imp->imp_obd), - (char *)imp->imp_connection->c_remote_uuid.uuid, rc); - } + CDEBUG(D_HA, "recovery of %s on %s failed (%d)\n", + obd2cli_tgt(imp->imp_obd), + (char *)imp->imp_connection->c_remote_uuid.uuid, rc); + } - cfs_waitq_broadcast(&imp->imp_recovery_waitq); - RETURN(rc); + wake_up_all(&imp->imp_recovery_waitq); + RETURN(rc); } /** @@ -1193,26 +1335,26 @@ static int completed_replay_interpret(const struct lu_env *env, struct ptlrpc_request *req, void * data, int rc) { - ENTRY; - cfs_atomic_dec(&req->rq_import->imp_replay_inflight); - if (req->rq_status == 0 && - !req->rq_import->imp_vbr_failed) { - ptlrpc_import_recovery_state_machine(req->rq_import); - } else { - if (req->rq_import->imp_vbr_failed) { - CDEBUG(D_WARNING, - "%s: version recovery fails, reconnecting\n", - req->rq_import->imp_obd->obd_name); - } else { - CDEBUG(D_HA, "%s: LAST_REPLAY message error: %d, " - "reconnecting\n", - req->rq_import->imp_obd->obd_name, - req->rq_status); - } - ptlrpc_connect_import(req->rq_import); - } + ENTRY; + atomic_dec(&req->rq_import->imp_replay_inflight); + if (req->rq_status == 0 && + !req->rq_import->imp_vbr_failed) { + ptlrpc_import_recovery_state_machine(req->rq_import); + } else { + if (req->rq_import->imp_vbr_failed) { + CDEBUG(D_WARNING, + "%s: version recovery fails, reconnecting\n", + req->rq_import->imp_obd->obd_name); + } else { + CDEBUG(D_HA, "%s: LAST_REPLAY message error: %d, " + "reconnecting\n", + req->rq_import->imp_obd->obd_name, + req->rq_status); + } + ptlrpc_connect_import(req->rq_import); + } - RETURN(0); + RETURN(0); } /** @@ -1221,35 +1363,34 @@ static int completed_replay_interpret(const struct lu_env *env, */ static int signal_completed_replay(struct obd_import *imp) { - struct ptlrpc_request *req; - ENTRY; + struct ptlrpc_request *req; + ENTRY; - if (unlikely(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_FINISH_REPLAY))) - RETURN(0); + if (unlikely(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_FINISH_REPLAY))) + RETURN(0); - LASSERT(cfs_atomic_read(&imp->imp_replay_inflight) == 0); - cfs_atomic_inc(&imp->imp_replay_inflight); + LASSERT(atomic_read(&imp->imp_replay_inflight) == 0); + atomic_inc(&imp->imp_replay_inflight); - req = ptlrpc_request_alloc_pack(imp, &RQF_OBD_PING, LUSTRE_OBD_VERSION, - OBD_PING); - if (req == NULL) { - cfs_atomic_dec(&imp->imp_replay_inflight); - RETURN(-ENOMEM); - } + req = ptlrpc_request_alloc_pack(imp, &RQF_OBD_PING, LUSTRE_OBD_VERSION, + OBD_PING); + if (req == NULL) { + atomic_dec(&imp->imp_replay_inflight); + RETURN(-ENOMEM); + } - ptlrpc_request_set_replen(req); - req->rq_send_state = LUSTRE_IMP_REPLAY_WAIT; - lustre_msg_add_flags(req->rq_reqmsg, - MSG_LOCK_REPLAY_DONE | MSG_REQ_REPLAY_DONE); - if (AT_OFF) - req->rq_timeout *= 3; - req->rq_interpret_reply = completed_replay_interpret; + ptlrpc_request_set_replen(req); + req->rq_send_state = LUSTRE_IMP_REPLAY_WAIT; + lustre_msg_add_flags(req->rq_reqmsg, + MSG_LOCK_REPLAY_DONE | MSG_REQ_REPLAY_DONE); + if (AT_OFF) + req->rq_timeout *= 3; + req->rq_interpret_reply = completed_replay_interpret; - ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1); - RETURN(0); + ptlrpcd_add_req(req); + RETURN(0); } -#ifdef __KERNEL__ /** * In kernel code all import invalidation happens in its own * separate thread, so that whatever application happened to encounter @@ -1261,7 +1402,7 @@ static int ptlrpc_invalidate_import_thread(void *data) ENTRY; - cfs_daemonize_ctxt("ll_imp_inval"); + unshare_fs_struct(); CDEBUG(D_HA, "thread invalidate import %s to %s@%s\n", imp->imp_obd->obd_name, obd2cli_tgt(imp->imp_obd), @@ -1280,7 +1421,6 @@ static int ptlrpc_invalidate_import_thread(void *data) class_import_put(imp); RETURN(0); } -#endif /** * This is the state machine for client-side recovery on import. @@ -1316,75 +1456,72 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp) /* Don't care about MGC eviction */ if (strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_MGC_NAME) != 0) { - LCONSOLE_ERROR_MSG(0x167, "This client was evicted by " - "%.*s; in progress operations using " - "this service will fail.\n", - target_len, target_start); + LCONSOLE_ERROR_MSG(0x167, "%s: This client was evicted " + "by %.*s; in progress operations " + "using this service will fail.\n", + imp->imp_obd->obd_name, target_len, + target_start); + LASSERTF(!obd_lbug_on_eviction, "LBUG upon eviction"); } CDEBUG(D_HA, "evicted from %s@%s; invalidating\n", obd2cli_tgt(imp->imp_obd), imp->imp_connection->c_remote_uuid.uuid); /* reset vbr_failed flag upon eviction */ - cfs_spin_lock(&imp->imp_lock); - imp->imp_vbr_failed = 0; - cfs_spin_unlock(&imp->imp_lock); - -#ifdef __KERNEL__ - /* bug 17802: XXX client_disconnect_export vs connect request - * race. if client will evicted at this time, we start - * invalidate thread without reference to import and import can - * be freed at same time. */ - class_import_get(imp); - rc = cfs_create_thread(ptlrpc_invalidate_import_thread, imp, - CFS_DAEMON_FLAGS); - if (rc < 0) { - class_import_put(imp); - CERROR("error starting invalidate thread: %d\n", rc); - } else { - rc = 0; - } - RETURN(rc); -#else - ptlrpc_invalidate_import(imp); - - IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER); -#endif - } - - if (imp->imp_state == LUSTRE_IMP_REPLAY) { - CDEBUG(D_HA, "replay requested by %s\n", - obd2cli_tgt(imp->imp_obd)); - rc = ptlrpc_replay_next(imp, &inflight); - if (inflight == 0 && - cfs_atomic_read(&imp->imp_replay_inflight) == 0) { - IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_LOCKS); - rc = ldlm_replay_locks(imp); - if (rc) - GOTO(out, rc); - } - rc = 0; + spin_lock(&imp->imp_lock); + imp->imp_vbr_failed = 0; + spin_unlock(&imp->imp_lock); + + { + struct task_struct *task; + /* bug 17802: XXX client_disconnect_export vs connect request + * race. if client is evicted at this time then we start + * invalidate thread without reference to import and import can + * be freed at same time. */ + class_import_get(imp); + task = kthread_run(ptlrpc_invalidate_import_thread, imp, + "ll_imp_inval"); + if (IS_ERR(task)) { + class_import_put(imp); + CERROR("error starting invalidate thread: %d\n", rc); + rc = PTR_ERR(task); + } else { + rc = 0; + } + RETURN(rc); + } } - if (imp->imp_state == LUSTRE_IMP_REPLAY_LOCKS) { - if (cfs_atomic_read(&imp->imp_replay_inflight) == 0) { - IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_WAIT); - rc = signal_completed_replay(imp); - if (rc) - GOTO(out, rc); - } + if (imp->imp_state == LUSTRE_IMP_REPLAY) { + CDEBUG(D_HA, "replay requested by %s\n", + obd2cli_tgt(imp->imp_obd)); + rc = ptlrpc_replay_next(imp, &inflight); + if (inflight == 0 && + atomic_read(&imp->imp_replay_inflight) == 0) { + IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_LOCKS); + rc = ldlm_replay_locks(imp); + if (rc) + GOTO(out, rc); + } + rc = 0; + } - } + if (imp->imp_state == LUSTRE_IMP_REPLAY_LOCKS) { + if (atomic_read(&imp->imp_replay_inflight) == 0) { + IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_WAIT); + rc = signal_completed_replay(imp); + if (rc) + GOTO(out, rc); + } + } - if (imp->imp_state == LUSTRE_IMP_REPLAY_WAIT) { - if (cfs_atomic_read(&imp->imp_replay_inflight) == 0) { - IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER); - } - } + if (imp->imp_state == LUSTRE_IMP_REPLAY_WAIT) { + if (atomic_read(&imp->imp_replay_inflight) == 0) { + IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER); + } + } if (imp->imp_state == LUSTRE_IMP_RECOVER) { - CDEBUG(D_HA, "reconnected to %s@%s\n", - obd2cli_tgt(imp->imp_obd), - imp->imp_connection->c_remote_uuid.uuid); + struct ptlrpc_connection *conn = imp->imp_connection; rc = ptlrpc_resend(imp); if (rc) @@ -1392,137 +1529,241 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp) IMPORT_SET_STATE(imp, LUSTRE_IMP_FULL); ptlrpc_activate_import(imp); - deuuidify(obd2cli_tgt(imp->imp_obd), NULL, - &target_start, &target_len); - LCONSOLE_INFO("%s: Connection restored to %.*s (at %s)\n", - imp->imp_obd->obd_name, - target_len, target_start, - libcfs_nid2str(imp->imp_connection->c_peer.nid)); + LCONSOLE_INFO("%s: Connection restored to %s (at %s)\n", + imp->imp_obd->obd_name, + obd_uuid2str(&conn->c_remote_uuid), + obd_import_nid2str(imp)); } - if (imp->imp_state == LUSTRE_IMP_FULL) { - cfs_waitq_broadcast(&imp->imp_recovery_waitq); - ptlrpc_wake_delayed(imp); - } + if (imp->imp_state == LUSTRE_IMP_FULL) { + wake_up_all(&imp->imp_recovery_waitq); + ptlrpc_wake_delayed(imp); + } out: - RETURN(rc); + RETURN(rc); } -int ptlrpc_disconnect_import(struct obd_import *imp, int noclose) +static struct ptlrpc_request *ptlrpc_disconnect_prep_req(struct obd_import *imp) { - struct ptlrpc_request *req; - int rq_opc, rc = 0; - int nowait = imp->imp_obd->obd_force; - ENTRY; + struct ptlrpc_request *req; + int rq_opc, rc = 0; + ENTRY; + + switch (imp->imp_connect_op) { + case OST_CONNECT: + rq_opc = OST_DISCONNECT; + break; + case MDS_CONNECT: + rq_opc = MDS_DISCONNECT; + break; + case MGS_CONNECT: + rq_opc = MGS_DISCONNECT; + break; + default: + rc = -EINVAL; + CERROR("%s: don't know how to disconnect from %s " + "(connect_op %d): rc = %d\n", + imp->imp_obd->obd_name, obd2cli_tgt(imp->imp_obd), + imp->imp_connect_op, rc); + RETURN(ERR_PTR(rc)); + } - if (nowait) - GOTO(set_state, rc); - - switch (imp->imp_connect_op) { - case OST_CONNECT: rq_opc = OST_DISCONNECT; break; - case MDS_CONNECT: rq_opc = MDS_DISCONNECT; break; - case MGS_CONNECT: rq_opc = MGS_DISCONNECT; break; - default: - CERROR("don't know how to disconnect from %s (connect_op %d)\n", - obd2cli_tgt(imp->imp_obd), imp->imp_connect_op); - RETURN(-EINVAL); - } + req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_DISCONNECT, + LUSTRE_OBD_VERSION, rq_opc); + if (req == NULL) + RETURN(NULL); - if (ptlrpc_import_in_recovery(imp)) { - struct l_wait_info lwi; - cfs_duration_t timeout; + /* We are disconnecting, do not retry a failed DISCONNECT rpc if + * it fails. We can get through the above with a down server + * if the client doesn't know the server is gone yet. */ + req->rq_no_resend = 1; + /* We want client umounts to happen quickly, no matter the + server state... */ + req->rq_timeout = min_t(int, req->rq_timeout, + INITIAL_CONNECT_TIMEOUT); - if (AT_OFF) { - if (imp->imp_server_timeout) - timeout = cfs_time_seconds(obd_timeout / 2); - else - timeout = cfs_time_seconds(obd_timeout); - } else { - int idx = import_at_get_index(imp, - imp->imp_client->cli_request_portal); - timeout = cfs_time_seconds( - at_get(&imp->imp_at.iat_service_estimate[idx])); + IMPORT_SET_STATE(imp, LUSTRE_IMP_CONNECTING); + req->rq_send_state = LUSTRE_IMP_CONNECTING; + ptlrpc_request_set_replen(req); + + RETURN(req); +} + +int ptlrpc_disconnect_import(struct obd_import *imp, int noclose) +{ + struct ptlrpc_request *req; + int rc = 0; + ENTRY; + + if (imp->imp_obd->obd_force) + GOTO(set_state, rc); + + /* probably the import has been disconnected already being idle */ + spin_lock(&imp->imp_lock); + if (imp->imp_state == LUSTRE_IMP_IDLE) + GOTO(out, rc); + spin_unlock(&imp->imp_lock); + + if (ptlrpc_import_in_recovery(imp)) { + struct l_wait_info lwi; + long timeout_jiffies; + time64_t timeout; + + if (AT_OFF) { + if (imp->imp_server_timeout) + timeout = obd_timeout >> 1; + else + timeout = obd_timeout; + } else { + u32 req_portal; + int idx; + + req_portal = imp->imp_client->cli_request_portal; + idx = import_at_get_index(imp, req_portal); + timeout = at_get(&imp->imp_at.iat_service_estimate[idx]); } - lwi = LWI_TIMEOUT_INTR(cfs_timeout_cap(timeout), + timeout_jiffies = cfs_time_seconds(timeout); + lwi = LWI_TIMEOUT_INTR(max_t(long, timeout_jiffies, 1), back_to_sleep, LWI_ON_SIGNAL_NOOP, NULL); rc = l_wait_event(imp->imp_recovery_waitq, !ptlrpc_import_in_recovery(imp), &lwi); } - cfs_spin_lock(&imp->imp_lock); - if (imp->imp_state != LUSTRE_IMP_FULL) - GOTO(out, 0); - - cfs_spin_unlock(&imp->imp_lock); - - req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_DISCONNECT, - LUSTRE_OBD_VERSION, rq_opc); - if (req) { - /* We are disconnecting, do not retry a failed DISCONNECT rpc if - * it fails. We can get through the above with a down server - * if the client doesn't know the server is gone yet. */ - req->rq_no_resend = 1; - - /* We want client umounts to happen quickly, no matter the - server state... */ - req->rq_timeout = min_t(int, req->rq_timeout, - INITIAL_CONNECT_TIMEOUT); - - IMPORT_SET_STATE(imp, LUSTRE_IMP_CONNECTING); - req->rq_send_state = LUSTRE_IMP_CONNECTING; - ptlrpc_request_set_replen(req); - rc = ptlrpc_queue_wait(req); - ptlrpc_req_finished(req); - } + spin_lock(&imp->imp_lock); + if (imp->imp_state != LUSTRE_IMP_FULL) + GOTO(out, rc); + spin_unlock(&imp->imp_lock); + + req = ptlrpc_disconnect_prep_req(imp); + if (IS_ERR(req)) + GOTO(set_state, rc = PTR_ERR(req)); + rc = ptlrpc_queue_wait(req); + ptlrpc_req_finished(req); set_state: - cfs_spin_lock(&imp->imp_lock); + spin_lock(&imp->imp_lock); out: - if (noclose) - IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON); - else - IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CLOSED); - memset(&imp->imp_remote_handle, 0, sizeof(imp->imp_remote_handle)); - cfs_spin_unlock(&imp->imp_lock); - - RETURN(rc); + if (noclose) + IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON); + else + IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CLOSED); + memset(&imp->imp_remote_handle, 0, sizeof(imp->imp_remote_handle)); + spin_unlock(&imp->imp_lock); + + if (rc == -ETIMEDOUT || rc == -ENOTCONN || rc == -ESHUTDOWN) + rc = 0; + RETURN(rc); } EXPORT_SYMBOL(ptlrpc_disconnect_import); +static int ptlrpc_disconnect_idle_interpret(const struct lu_env *env, + struct ptlrpc_request *req, + void *data, int rc) +{ + struct obd_import *imp = req->rq_import; + int connect = 0; + + DEBUG_REQ(D_HA, req, "inflight=%d, refcount=%d: rc = %d\n", + atomic_read(&imp->imp_inflight), + atomic_read(&imp->imp_refcount), rc); + + spin_lock(&imp->imp_lock); + /* DISCONNECT reply can be late and another connection can just + * be initiated. so we have to abort disconnection. */ + if (req->rq_import_generation == imp->imp_generation && + imp->imp_state != LUSTRE_IMP_CLOSED) { + LASSERTF(imp->imp_state == LUSTRE_IMP_CONNECTING, + "%s\n", ptlrpc_import_state_name(imp->imp_state)); + imp->imp_state = LUSTRE_IMP_IDLE; + memset(&imp->imp_remote_handle, 0, + sizeof(imp->imp_remote_handle)); + /* take our DISCONNECT into account */ + if (atomic_read(&imp->imp_inflight) > 1) { + imp->imp_generation++; + imp->imp_initiated_at = imp->imp_generation; + IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_NEW); + connect = 1; + } + } + spin_unlock(&imp->imp_lock); + + if (connect) { + rc = ptlrpc_connect_import(imp); + if (rc >= 0) + ptlrpc_pinger_add_import(imp); + } + + return 0; +} + +int ptlrpc_disconnect_and_idle_import(struct obd_import *imp) +{ + struct ptlrpc_request *req; + ENTRY; + + if (imp->imp_obd->obd_force) + RETURN(0); + + if (ptlrpc_import_in_recovery(imp)) + RETURN(0); + + spin_lock(&imp->imp_lock); + if (imp->imp_state != LUSTRE_IMP_FULL) { + spin_unlock(&imp->imp_lock); + RETURN(0); + } + spin_unlock(&imp->imp_lock); + + req = ptlrpc_disconnect_prep_req(imp); + if (IS_ERR(req)) + RETURN(PTR_ERR(req)); + + CDEBUG_LIMIT(imp->imp_idle_debug, "%s: disconnect after %llus idle\n", + imp->imp_obd->obd_name, + ktime_get_real_seconds() - imp->imp_last_reply_time); + req->rq_interpret_reply = ptlrpc_disconnect_idle_interpret; + ptlrpcd_add_req(req); + + RETURN(0); +} +EXPORT_SYMBOL(ptlrpc_disconnect_and_idle_import); + void ptlrpc_cleanup_imp(struct obd_import *imp) { - ENTRY; + ENTRY; - cfs_spin_lock(&imp->imp_lock); - IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CLOSED); - imp->imp_generation++; - cfs_spin_unlock(&imp->imp_lock); - ptlrpc_abort_inflight(imp); + spin_lock(&imp->imp_lock); + IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CLOSED); + imp->imp_generation++; + spin_unlock(&imp->imp_lock); + ptlrpc_abort_inflight(imp); - EXIT; + EXIT; } -EXPORT_SYMBOL(ptlrpc_cleanup_imp); /* Adaptive Timeout utils */ extern unsigned int at_min, at_max, at_history; -/* Bin into timeslices using AT_BINS bins. - This gives us a max of the last binlimit*AT_BINS secs without the storage, - but still smoothing out a return to normalcy from a slow response. - (E.g. remember the maximum latency in each minute of the last 4 minutes.) */ +/* Update at_current with the specified value (bounded by at_min and at_max), + * as well as the AT history "bins". + * - Bin into timeslices using AT_BINS bins. + * - This gives us a max of the last at_history seconds without the storage, + * but still smoothing out a return to normalcy from a slow response. + * - (E.g. remember the maximum latency in each minute of the last 4 minutes.) + */ int at_measured(struct adaptive_timeout *at, unsigned int val) { unsigned int old = at->at_current; - time_t now = cfs_time_current_sec(); - time_t binlimit = max_t(time_t, at_history / AT_BINS, 1); + time64_t now = ktime_get_real_seconds(); + long binlimit = max_t(long, at_history / AT_BINS, 1); LASSERT(at); CDEBUG(D_OTHER, "add %u to %p time=%lu v=%u (%u %u %u %u)\n", - val, at, now - at->at_binstart, at->at_current, + val, at, (long)(now - at->at_binstart), at->at_current, at->at_hist[0], at->at_hist[1], at->at_hist[2], at->at_hist[3]); if (val == 0) @@ -1530,7 +1771,7 @@ int at_measured(struct adaptive_timeout *at, unsigned int val) drop to 0, and because 0 could mean an error */ return 0; - cfs_spin_lock(&at->at_lock); + spin_lock(&at->at_lock); if (unlikely(at->at_binstart == 0)) { /* Special case to remove default from history */ @@ -1546,8 +1787,9 @@ int at_measured(struct adaptive_timeout *at, unsigned int val) } else { int i, shift; unsigned int maxv = val; - /* move bins over */ - shift = (now - at->at_binstart) / binlimit; + + /* move bins over */ + shift = (u32)(now - at->at_binstart) / binlimit; LASSERT(shift > 0); for(i = AT_BINS - 1; i >= 0; i--) { if (i >= shift) { @@ -1586,7 +1828,7 @@ int at_measured(struct adaptive_timeout *at, unsigned int val) /* if we changed, report the old value */ old = (at->at_current != old) ? old : 0; - cfs_spin_unlock(&at->at_lock); + spin_unlock(&at->at_lock); return old; } @@ -1605,7 +1847,7 @@ int import_at_get_index(struct obd_import *imp, int portal) } /* Not found in list, add it under a lock */ - cfs_spin_lock(&imp->imp_lock); + spin_lock(&imp->imp_lock); /* Check unused under lock */ for (; i < IMP_AT_MAX_PORTALS; i++) { @@ -1621,6 +1863,6 @@ int import_at_get_index(struct obd_import *imp, int portal) at->iat_portal[i] = portal; out: - cfs_spin_unlock(&imp->imp_lock); - return i; + spin_unlock(&imp->imp_lock); + return i; }