X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fptlrpc%2Fimport.c;h=7c9a57c87b3bb43b849a731f9e1600c1e5f46359;hp=fad264b64475b88009298c24d758fae41316b45b;hb=389fde827be2ee6fb4ee08e955d773a2a16e70c6;hpb=b8e3553aa2b4f978a620f2ae0a71c0a415d6cfe4 diff --git a/lustre/ptlrpc/import.c b/lustre/ptlrpc/import.c index fad264b..7c9a57c 100644 --- a/lustre/ptlrpc/import.c +++ b/lustre/ptlrpc/import.c @@ -26,8 +26,10 @@ * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2011, Whamcloud, Inc. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -59,6 +61,10 @@ struct ptlrpc_connect_async_args { int pcaa_initial_connect; }; +/** + * Updates import \a imp current state to provided \a state value + * Helper function. Must be called under imp_lock. + */ static void __import_set_state(struct obd_import *imp, enum lustre_imp_state state) { @@ -84,9 +90,9 @@ do { \ #define IMPORT_SET_STATE(imp, state) \ do { \ - spin_lock(&imp->imp_lock); \ + cfs_spin_lock(&imp->imp_lock); \ IMPORT_SET_STATE_NOLOCK(imp, state); \ - spin_unlock(&imp->imp_lock); \ + cfs_spin_unlock(&imp->imp_lock); \ } while(0) @@ -102,20 +108,19 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp); * though. */ int ptlrpc_init_import(struct obd_import *imp) { - spin_lock(&imp->imp_lock); + cfs_spin_lock(&imp->imp_lock); imp->imp_generation++; imp->imp_state = LUSTRE_IMP_NEW; - spin_unlock(&imp->imp_lock); + cfs_spin_unlock(&imp->imp_lock); return 0; } EXPORT_SYMBOL(ptlrpc_init_import); #define UUID_STR "_UUID" -static void deuuidify(char *uuid, const char *prefix, char **uuid_start, - int *uuid_len) +void deuuidify(char *uuid, const char *prefix, char **uuid_start, int *uuid_len) { *uuid_start = !prefix || strncmp(uuid, prefix, strlen(prefix)) ? uuid : uuid + strlen(prefix); @@ -129,8 +134,10 @@ static void deuuidify(char *uuid, const char *prefix, char **uuid_start, UUID_STR, strlen(UUID_STR))) *uuid_len -= strlen(UUID_STR); } +EXPORT_SYMBOL(deuuidify); -/* Returns true if import was FULL, false if import was already not +/** + * Returns true if import was FULL, false if import was already not * connected. * @imp - import to be disconnected * @conn_cnt - connection count (epoch) of the request that timed out @@ -144,7 +151,7 @@ int ptlrpc_set_import_discon(struct obd_import *imp, __u32 conn_cnt) { int rc = 0; - spin_lock(&imp->imp_lock); + cfs_spin_lock(&imp->imp_lock); if (imp->imp_state == LUSTRE_IMP_FULL && (conn_cnt == 0 || conn_cnt == imp->imp_conn_cnt)) { @@ -155,22 +162,22 @@ int ptlrpc_set_import_discon(struct obd_import *imp, __u32 conn_cnt) &target_start, &target_len); if (imp->imp_replayable) { - LCONSOLE_WARN("%s: Connection to service %.*s via nid " - "%s was lost; in progress operations using this " - "service will wait for recovery to complete.\n", + LCONSOLE_WARN("%s: Connection to %.*s (at %s) was " + "lost; in progress operations using this " + "service will wait for recovery to complete\n", imp->imp_obd->obd_name, target_len, target_start, libcfs_nid2str(imp->imp_connection->c_peer.nid)); } else { - LCONSOLE_ERROR_MSG(0x166, "%s: Connection to service " - "%.*s via nid %s was lost; in progress " - "operations using this service will fail.\n", + LCONSOLE_ERROR_MSG(0x166, "%s: Connection to " + "%.*s (at %s) was lost; in progress " + "operations using this service will fail\n", imp->imp_obd->obd_name, target_len, target_start, libcfs_nid2str(imp->imp_connection->c_peer.nid)); } ptlrpc_deactivate_timeouts(imp); IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON); - spin_unlock(&imp->imp_lock); + cfs_spin_unlock(&imp->imp_lock); if (obd_dump_on_timeout) libcfs_debug_dumplog(); @@ -178,7 +185,7 @@ int ptlrpc_set_import_discon(struct obd_import *imp, __u32 conn_cnt) obd_import_event(imp->imp_obd, imp, IMP_EVENT_DISCON); rc = 1; } else { - spin_unlock(&imp->imp_lock); + cfs_spin_unlock(&imp->imp_lock); CDEBUG(D_HA, "%s: import %p already %s (conn %u, was %u): %s\n", imp->imp_client->cli_name, imp, (imp->imp_state == LUSTRE_IMP_FULL && @@ -199,7 +206,7 @@ static void ptlrpc_deactivate_and_unlock_import(struct obd_import *imp) CDEBUG(D_HA, "setting import %s INVALID\n", obd2cli_tgt(imp->imp_obd)); imp->imp_invalid = 1; imp->imp_generation++; - spin_unlock(&imp->imp_lock); + cfs_spin_unlock(&imp->imp_lock); ptlrpc_abort_inflight(imp); obd_import_event(imp->imp_obd, imp, IMP_EVENT_INACTIVE); @@ -213,7 +220,7 @@ static void ptlrpc_deactivate_and_unlock_import(struct obd_import *imp) */ void ptlrpc_deactivate_import(struct obd_import *imp) { - spin_lock(&imp->imp_lock); + cfs_spin_lock(&imp->imp_lock); ptlrpc_deactivate_and_unlock_import(imp); } @@ -244,20 +251,20 @@ ptlrpc_inflight_deadline(struct ptlrpc_request *req, time_t now) static unsigned int ptlrpc_inflight_timeout(struct obd_import *imp) { time_t now = cfs_time_current_sec(); - struct list_head *tmp, *n; + cfs_list_t *tmp, *n; struct ptlrpc_request *req; unsigned int timeout = 0; - spin_lock(&imp->imp_lock); - list_for_each_safe(tmp, n, &imp->imp_sending_list) { - req = list_entry(tmp, struct ptlrpc_request, rq_list); + cfs_spin_lock(&imp->imp_lock); + cfs_list_for_each_safe(tmp, n, &imp->imp_sending_list) { + req = cfs_list_entry(tmp, struct ptlrpc_request, rq_list); timeout = max(ptlrpc_inflight_deadline(req, now), timeout); } - spin_unlock(&imp->imp_lock); + cfs_spin_unlock(&imp->imp_lock); return timeout; } -/* +/** * This function will invalidate the import, if necessary, then block * for all the RPC completions, and finally notify the obd to * invalidate its state (ie cancel locks, clear pending requests, @@ -265,20 +272,13 @@ static unsigned int ptlrpc_inflight_timeout(struct obd_import *imp) */ void ptlrpc_invalidate_import(struct obd_import *imp) { - struct list_head *tmp, *n; + cfs_list_t *tmp, *n; struct ptlrpc_request *req; struct l_wait_info lwi; unsigned int timeout; int rc; - atomic_inc(&imp->imp_inval_count); - - /* - * If this is an invalid MGC connection, then don't bother - * waiting for imp_inflight to drop to 0. - */ - if (imp->imp_invalid && imp->imp_recon_bk &&!imp->imp_obd->obd_no_recov) - goto out; + cfs_atomic_inc(&imp->imp_inval_count); if (!imp->imp_invalid || imp->imp_obd->obd_no_recov) ptlrpc_deactivate_import(imp); @@ -315,16 +315,18 @@ void ptlrpc_invalidate_import(struct obd_import *imp) (timeout > 1)?cfs_time_seconds(1):cfs_time_seconds(1)/2, NULL, NULL); rc = l_wait_event(imp->imp_recovery_waitq, - (atomic_read(&imp->imp_inflight) == 0), &lwi); + (cfs_atomic_read(&imp->imp_inflight) == 0), + &lwi); if (rc) { const char *cli_tgt = obd2cli_tgt(imp->imp_obd); CERROR("%s: rc = %d waiting for callback (%d != 0)\n", - cli_tgt, rc, atomic_read(&imp->imp_inflight)); + cli_tgt, rc, + cfs_atomic_read(&imp->imp_inflight)); - spin_lock(&imp->imp_lock); - if (atomic_read(&imp->imp_inflight) == 0) { - int count = atomic_read(&imp->imp_unregistering); + cfs_spin_lock(&imp->imp_lock); + if (cfs_atomic_read(&imp->imp_inflight) == 0) { + int count = cfs_atomic_read(&imp->imp_unregistering); /* We know that "unregistering" rpcs only can * survive in sending or delaying lists (they @@ -340,19 +342,19 @@ void ptlrpc_invalidate_import(struct obd_import *imp) * this point. */ rc = 0; } else { - list_for_each_safe(tmp, n, - &imp->imp_sending_list) { - req = list_entry(tmp, - struct ptlrpc_request, - rq_list); + cfs_list_for_each_safe(tmp, n, + &imp->imp_sending_list) { + req = cfs_list_entry(tmp, + struct ptlrpc_request, + rq_list); DEBUG_REQ(D_ERROR, req, "still on sending list"); } - list_for_each_safe(tmp, n, - &imp->imp_delayed_list) { - req = list_entry(tmp, - struct ptlrpc_request, - rq_list); + cfs_list_for_each_safe(tmp, n, + &imp->imp_delayed_list) { + req = cfs_list_entry(tmp, + struct ptlrpc_request, + rq_list); DEBUG_REQ(D_ERROR, req, "still on delayed list"); } @@ -361,9 +363,10 @@ void ptlrpc_invalidate_import(struct obd_import *imp) "Network is sluggish? Waiting them " "to error out.\n", cli_tgt, ptlrpc_phase2str(RQ_PHASE_UNREGISTERING), - atomic_read(&imp->imp_unregistering)); + cfs_atomic_read(&imp-> + imp_unregistering)); } - spin_unlock(&imp->imp_lock); + cfs_spin_unlock(&imp->imp_lock); } } while (rc != 0); @@ -371,12 +374,11 @@ void ptlrpc_invalidate_import(struct obd_import *imp) * Let's additionally check that no new rpcs added to import in * "invalidate" state. */ - LASSERT(atomic_read(&imp->imp_inflight) == 0); -out: + LASSERT(cfs_atomic_read(&imp->imp_inflight) == 0); obd_import_event(imp->imp_obd, imp, IMP_EVENT_INVALIDATE); sptlrpc_import_flush_all_ctx(imp); - atomic_dec(&imp->imp_inval_count); + cfs_atomic_dec(&imp->imp_inval_count); cfs_waitq_broadcast(&imp->imp_recovery_waitq); } @@ -385,10 +387,10 @@ void ptlrpc_activate_import(struct obd_import *imp) { struct obd_device *obd = imp->imp_obd; - spin_lock(&imp->imp_lock); + cfs_spin_lock(&imp->imp_lock); imp->imp_invalid = 0; ptlrpc_activate_timeouts(imp); - spin_unlock(&imp->imp_lock); + cfs_spin_unlock(&imp->imp_lock); obd_import_event(obd, imp, IMP_EVENT_ACTIVE); } @@ -411,9 +413,9 @@ void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt) CDEBUG(D_HA, "%s: waking up pinger\n", obd2cli_tgt(imp->imp_obd)); - spin_lock(&imp->imp_lock); + cfs_spin_lock(&imp->imp_lock); imp->imp_force_verify = 1; - spin_unlock(&imp->imp_lock); + cfs_spin_unlock(&imp->imp_lock); ptlrpc_pinger_wake_up(); } @@ -428,15 +430,15 @@ int ptlrpc_reconnect_import(struct obd_import *imp) /* Do a fresh connect next time by zeroing the handle */ ptlrpc_disconnect_import(imp, 1); /* Wait for all invalidate calls to finish */ - if (atomic_read(&imp->imp_inval_count) > 0) { + if (cfs_atomic_read(&imp->imp_inval_count) > 0) { int rc; struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL); rc = l_wait_event(imp->imp_recovery_waitq, - (atomic_read(&imp->imp_inval_count) == 0), + (cfs_atomic_read(&imp->imp_inval_count) == 0), &lwi); if (rc) CERROR("Interrupted, inval=%d\n", - atomic_read(&imp->imp_inval_count)); + cfs_atomic_read(&imp->imp_inval_count)); } /* Allow reconnect attempts */ @@ -444,39 +446,38 @@ int ptlrpc_reconnect_import(struct obd_import *imp) /* Remove 'invalid' flag */ ptlrpc_activate_import(imp); /* Attempt a new connect */ - ptlrpc_recover_import(imp, NULL); + ptlrpc_recover_import(imp, NULL, 0); return 0; } - EXPORT_SYMBOL(ptlrpc_reconnect_import); +/** + * Connection on import \a imp is changed to another one (if more than one is + * present). We typically chose connection that we have not tried to connect to + * the longest + */ static int import_select_connection(struct obd_import *imp) { struct obd_import_conn *imp_conn = NULL, *conn; struct obd_export *dlmexp; - int tried_all = 1; + char *target_start; + int target_len, tried_all = 1; ENTRY; - spin_lock(&imp->imp_lock); + cfs_spin_lock(&imp->imp_lock); - if (list_empty(&imp->imp_conn_list)) { + if (cfs_list_empty(&imp->imp_conn_list)) { CERROR("%s: no connections available\n", imp->imp_obd->obd_name); - spin_unlock(&imp->imp_lock); + cfs_spin_unlock(&imp->imp_lock); RETURN(-EINVAL); } - list_for_each_entry(conn, &imp->imp_conn_list, oic_item) { + cfs_list_for_each_entry(conn, &imp->imp_conn_list, oic_item) { CDEBUG(D_HA, "%s: connect to NID %s last attempt "LPU64"\n", imp->imp_obd->obd_name, libcfs_nid2str(conn->oic_conn->c_peer.nid), conn->oic_last_attempt); - /* Don't thrash connections */ - if (cfs_time_before_64(cfs_time_current_64(), - conn->oic_last_attempt + - cfs_time_seconds(CONNECTION_SWITCH_MIN))) { - continue; - } /* If we have not tried this connection since the last successful attempt, go with this one */ @@ -511,18 +512,17 @@ static int import_select_connection(struct obd_import *imp) we do finally connect. (FIXME: really we should wait for all network state associated with the last connection attempt to drain before trying to reconnect on it.) */ - if (tried_all && (imp->imp_conn_list.next == &imp_conn->oic_item) && - !imp->imp_recon_bk /* not retrying */) { + if (tried_all && (imp->imp_conn_list.next == &imp_conn->oic_item)) { if (at_get(&imp->imp_at.iat_net_latency) < CONNECTION_SWITCH_MAX) { - at_add(&imp->imp_at.iat_net_latency, - at_get(&imp->imp_at.iat_net_latency) + - CONNECTION_SWITCH_INC); + at_measured(&imp->imp_at.iat_net_latency, + at_get(&imp->imp_at.iat_net_latency) + + CONNECTION_SWITCH_INC); } LASSERT(imp_conn->oic_last_attempt); - CWARN("%s: tried all connections, increasing latency to %ds\n", - imp->imp_obd->obd_name, - at_get(&imp->imp_at.iat_net_latency)); + CDEBUG(D_HA, "%s: tried all connections, increasing latency " + "to %ds\n", imp->imp_obd->obd_name, + at_get(&imp->imp_at.iat_net_latency)); } imp_conn->oic_last_attempt = cfs_time_current_64(); @@ -540,10 +540,17 @@ static int import_select_connection(struct obd_import *imp) class_export_put(dlmexp); if (imp->imp_conn_current != imp_conn) { - if (imp->imp_conn_current) - CDEBUG(D_HA, "Changing connection for %s to %s/%s\n", - imp->imp_obd->obd_name, imp_conn->oic_uuid.uuid, + if (imp->imp_conn_current) { + deuuidify(obd2cli_tgt(imp->imp_obd), NULL, + &target_start, &target_len); + + CDEBUG(D_HA, "%s: Connection changing to" + " %.*s (at %s)\n", + imp->imp_obd->obd_name, + target_len, target_start, libcfs_nid2str(imp_conn->oic_conn->c_peer.nid)); + } + imp->imp_conn_current = imp_conn; } @@ -551,7 +558,7 @@ static int import_select_connection(struct obd_import *imp) imp->imp_obd->obd_name, imp, imp_conn->oic_uuid.uuid, libcfs_nid2str(imp_conn->oic_conn->c_peer.nid)); - spin_unlock(&imp->imp_lock); + cfs_spin_unlock(&imp->imp_lock); RETURN(0); } @@ -562,12 +569,12 @@ static int import_select_connection(struct obd_import *imp) static int ptlrpc_first_transno(struct obd_import *imp, __u64 *transno) { struct ptlrpc_request *req; - struct list_head *tmp; + cfs_list_t *tmp; - if (list_empty(&imp->imp_replay_list)) + if (cfs_list_empty(&imp->imp_replay_list)) return 0; tmp = imp->imp_replay_list.next; - req = list_entry(tmp, struct ptlrpc_request, rq_replay_list); + req = cfs_list_entry(tmp, struct ptlrpc_request, rq_replay_list); *transno = req->rq_transno; if (req->rq_transno == 0) { DEBUG_REQ(D_ERROR, req, "zero transno in replay"); @@ -577,7 +584,13 @@ static int ptlrpc_first_transno(struct obd_import *imp, __u64 *transno) return 1; } -int ptlrpc_connect_import(struct obd_import *imp, char *new_uuid) +/** + * Attempt to (re)connect import \a imp. This includes all preparations, + * initializing CONNECT RPC request and passing it to ptlrpcd for + * actual sending. + * Returns 0 on success or error code. + */ +int ptlrpc_connect_import(struct obd_import *imp) { struct obd_device *obd = imp->imp_obd; int initial_connect = 0; @@ -593,17 +606,17 @@ int ptlrpc_connect_import(struct obd_import *imp, char *new_uuid) int rc; ENTRY; - spin_lock(&imp->imp_lock); + cfs_spin_lock(&imp->imp_lock); if (imp->imp_state == LUSTRE_IMP_CLOSED) { - spin_unlock(&imp->imp_lock); + cfs_spin_unlock(&imp->imp_lock); CERROR("can't connect to a closed import\n"); RETURN(-EINVAL); } else if (imp->imp_state == LUSTRE_IMP_FULL) { - spin_unlock(&imp->imp_lock); + cfs_spin_unlock(&imp->imp_lock); CERROR("already connected\n"); RETURN(0); } else if (imp->imp_state == LUSTRE_IMP_CONNECTING) { - spin_unlock(&imp->imp_lock); + cfs_spin_unlock(&imp->imp_lock); CERROR("already connecting\n"); RETURN(-EALREADY); } @@ -620,42 +633,12 @@ int ptlrpc_connect_import(struct obd_import *imp, char *new_uuid) set_transno = ptlrpc_first_transno(imp, &imp->imp_connect_data.ocd_transno); - spin_unlock(&imp->imp_lock); - - if (new_uuid) { - struct obd_uuid uuid; - - obd_str2uuid(&uuid, new_uuid); - rc = import_set_conn_priority(imp, &uuid); - if (rc) - GOTO(out, rc); - } + cfs_spin_unlock(&imp->imp_lock); rc = import_select_connection(imp); if (rc) GOTO(out, rc); - /* last in connection list */ - if (imp->imp_conn_current->oic_item.next == &imp->imp_conn_list) { - if (imp->imp_initial_recov_bk && initial_connect) { - CDEBUG(D_HA, "Last connection attempt (%d) for %s\n", - imp->imp_conn_cnt, obd2cli_tgt(imp->imp_obd)); - /* Don't retry if connect fails */ - rc = 0; - obd_set_info_async(obd->obd_self_export, - sizeof(KEY_INIT_RECOV), - KEY_INIT_RECOV, - sizeof(rc), &rc, NULL); - } - if (imp->imp_recon_bk) { - CDEBUG(D_HA, "Last reconnection attempt (%d) for %s\n", - imp->imp_conn_cnt, obd2cli_tgt(imp->imp_obd)); - spin_lock(&imp->imp_lock); - imp->imp_last_recon = 1; - spin_unlock(&imp->imp_lock); - } - } - rc = sptlrpc_import_sec_adapt(imp, NULL, 0); if (rc) GOTO(out, rc); @@ -664,6 +647,7 @@ int ptlrpc_connect_import(struct obd_import *imp, char *new_uuid) * the server is updated on-the-fly we will get the new features. */ imp->imp_connect_data.ocd_connect_flags = imp->imp_connect_flags_orig; imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT; + imp->imp_msghdr_flags &= ~MSGHDR_CKSUM_INCOMPAT18; rc = obd_reconnect(NULL, imp->imp_obd->obd_self_export, obd, &obd->obd_uuid, &imp->imp_connect_data, NULL); @@ -715,9 +699,9 @@ int ptlrpc_connect_import(struct obd_import *imp, char *new_uuid) aa->pcaa_initial_connect = initial_connect; if (aa->pcaa_initial_connect) { - spin_lock(&imp->imp_lock); + cfs_spin_lock(&imp->imp_lock); imp->imp_replayable = 1; - spin_unlock(&imp->imp_lock); + cfs_spin_unlock(&imp->imp_lock); lustre_msg_add_op_flags(request->rq_reqmsg, MSG_CONNECT_INITIAL); } @@ -726,8 +710,9 @@ int ptlrpc_connect_import(struct obd_import *imp, char *new_uuid) lustre_msg_add_op_flags(request->rq_reqmsg, MSG_CONNECT_TRANSNO); - DEBUG_REQ(D_RPCTRACE, request, "(re)connect request"); - ptlrpcd_add_req(request, PSCOPE_OTHER); + DEBUG_REQ(D_RPCTRACE, request, "(re)connect request (timeout %d)", + request->rq_timeout); + ptlrpcd_add_req(request, PDL_POLICY_ROUND, -1); rc = 0; out: if (rc != 0) { @@ -741,42 +726,18 @@ EXPORT_SYMBOL(ptlrpc_connect_import); static void ptlrpc_maybe_ping_import_soon(struct obd_import *imp) { #ifdef __KERNEL__ - struct obd_import_conn *imp_conn; -#endif - int wake_pinger = 0; - - ENTRY; + int force_verify; - spin_lock(&imp->imp_lock); - if (list_empty(&imp->imp_conn_list)) - GOTO(unlock, 0); + cfs_spin_lock(&imp->imp_lock); + force_verify = imp->imp_force_verify != 0; + cfs_spin_unlock(&imp->imp_lock); -#ifdef __KERNEL__ - imp_conn = list_entry(imp->imp_conn_list.prev, - struct obd_import_conn, - oic_item); - - /* XXX: When the failover node is the primary node, it is possible - * to have two identical connections in imp_conn_list. We must - * compare not conn's pointers but NIDs, otherwise we can defeat - * connection throttling. (See bug 14774.) */ - if (imp->imp_conn_current->oic_conn->c_peer.nid != - imp_conn->oic_conn->c_peer.nid) { - ptlrpc_ping_import_soon(imp); - wake_pinger = 1; - } + if (force_verify) + ptlrpc_pinger_wake_up(); #else /* liblustre has no pinger thread, so we wakeup pinger anyway */ - wake_pinger = 1; + ptlrpc_pinger_wake_up(); #endif - - unlock: - spin_unlock(&imp->imp_lock); - - if (wake_pinger) - ptlrpc_pinger_wake_up(); - - EXIT; } static int ptlrpc_busy_reconnect(int rc) @@ -784,7 +745,12 @@ static int ptlrpc_busy_reconnect(int rc) return (rc == -EBUSY) || (rc == -EAGAIN); } - +/** + * interpret_reply callback for connect RPCs. + * Looks into returned status of connect operation and decides + * what to do with the import - i.e enter recovery, promote it to + * full state for normal operations of disconnect it due to an error. + */ static int ptlrpc_connect_interpret(const struct lu_env *env, struct ptlrpc_request *request, void *data, int rc) @@ -797,18 +763,17 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, int msg_flags; ENTRY; - spin_lock(&imp->imp_lock); + cfs_spin_lock(&imp->imp_lock); if (imp->imp_state == LUSTRE_IMP_CLOSED) { - spin_unlock(&imp->imp_lock); + cfs_spin_unlock(&imp->imp_lock); RETURN(0); } if (rc) { /* if this reconnect to busy export - not need select new target * for connecting*/ - if (ptlrpc_busy_reconnect(rc)) - imp->imp_force_reconnect = 1; - spin_unlock(&imp->imp_lock); + imp->imp_force_reconnect = ptlrpc_busy_reconnect(rc); + cfs_spin_unlock(&imp->imp_lock); GOTO(out, rc); } @@ -819,16 +784,17 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, /* All imports are pingable */ imp->imp_pingable = 1; imp->imp_force_reconnect = 0; + imp->imp_force_verify = 0; if (aa->pcaa_initial_connect) { if (msg_flags & MSG_CONNECT_REPLAYABLE) { imp->imp_replayable = 1; - spin_unlock(&imp->imp_lock); + cfs_spin_unlock(&imp->imp_lock); CDEBUG(D_HA, "connected to replayable target: %s\n", obd2cli_tgt(imp->imp_obd)); } else { imp->imp_replayable = 0; - spin_unlock(&imp->imp_lock); + cfs_spin_unlock(&imp->imp_lock); } /* if applies, adjust the imp->imp_msg_magic here @@ -851,7 +817,7 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, GOTO(finish, rc = 0); } else { - spin_unlock(&imp->imp_lock); + cfs_spin_unlock(&imp->imp_lock); } /* Determine what recovery state to move the import to. */ @@ -859,10 +825,11 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, memset(&old_hdl, 0, sizeof(old_hdl)); if (!memcmp(&old_hdl, lustre_msg_get_handle(request->rq_repmsg), sizeof (old_hdl))) { - CERROR("%s@%s didn't like our handle "LPX64 - ", failed\n", obd2cli_tgt(imp->imp_obd), - imp->imp_connection->c_remote_uuid.uuid, - imp->imp_dlm_handle.cookie); + LCONSOLE_WARN("Reconnect to %s (at @%s) failed due " + "bad handle "LPX64"\n", + obd2cli_tgt(imp->imp_obd), + imp->imp_connection->c_remote_uuid.uuid, + imp->imp_dlm_handle.cookie); GOTO(out, rc = -ENOTCONN); } @@ -879,15 +846,27 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, * eviction. If it is in recovery - we are safe to * participate since we can reestablish all of our state * with server again */ - CDEBUG(level,"%s@%s changed server handle from " - LPX64" to "LPX64"%s\n", - obd2cli_tgt(imp->imp_obd), - imp->imp_connection->c_remote_uuid.uuid, - imp->imp_remote_handle.cookie, - lustre_msg_get_handle(request->rq_repmsg)-> - cookie, - (MSG_CONNECT_RECOVERING & msg_flags) ? - " but is still in recovery" : ""); + if ((MSG_CONNECT_RECOVERING & msg_flags)) { + CDEBUG(level,"%s@%s changed server handle from " + LPX64" to "LPX64 + " but is still in recovery\n", + obd2cli_tgt(imp->imp_obd), + imp->imp_connection->c_remote_uuid.uuid, + imp->imp_remote_handle.cookie, + lustre_msg_get_handle( + request->rq_repmsg)->cookie); + } else { + LCONSOLE_WARN("Evicted from %s (at %s) " + "after server handle changed from " + LPX64" to "LPX64"\n", + obd2cli_tgt(imp->imp_obd), + imp->imp_connection-> \ + c_remote_uuid.uuid, + imp->imp_remote_handle.cookie, + lustre_msg_get_handle( + request->rq_repmsg)->cookie); + } + imp->imp_remote_handle = *lustre_msg_get_handle(request->rq_repmsg); @@ -912,9 +891,9 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, imp->imp_obd->obd_name, obd2cli_tgt(imp->imp_obd)); - spin_lock(&imp->imp_lock); + cfs_spin_lock(&imp->imp_lock); imp->imp_resend_replay = 1; - spin_unlock(&imp->imp_lock); + cfs_spin_unlock(&imp->imp_lock); IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY); } else { @@ -940,7 +919,8 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, "after reconnect. We should LBUG right here.\n"); } - if (lustre_msg_get_last_committed(request->rq_repmsg) < + if (lustre_msg_get_last_committed(request->rq_repmsg) > 0 && + lustre_msg_get_last_committed(request->rq_repmsg) < aa->pcaa_peer_committed) { CERROR("%s went back in time (transno "LPD64 " was previously committed, server now claims "LPD64 @@ -958,7 +938,7 @@ finish: "invalidating and reconnecting\n", obd2cli_tgt(imp->imp_obd), imp->imp_connection->c_remote_uuid.uuid); - ptlrpc_connect_import(imp, NULL); + ptlrpc_connect_import(imp); RETURN(0); } } else { @@ -971,23 +951,26 @@ finish: ocd = req_capsule_server_sized_get(&request->rq_pill, &RMF_CONNECT_DATA, ret); - spin_lock(&imp->imp_lock); - list_del(&imp->imp_conn_current->oic_item); - list_add(&imp->imp_conn_current->oic_item, &imp->imp_conn_list); + cfs_spin_lock(&imp->imp_lock); + cfs_list_del(&imp->imp_conn_current->oic_item); + cfs_list_add(&imp->imp_conn_current->oic_item, + &imp->imp_conn_list); imp->imp_last_success_conn = imp->imp_conn_current->oic_last_attempt; if (ocd == NULL) { - spin_unlock(&imp->imp_lock); + cfs_spin_unlock(&imp->imp_lock); CERROR("Wrong connect data from server\n"); rc = -EPROTO; GOTO(out, rc); } imp->imp_connect_data = *ocd; + CDEBUG(D_HA, "obd %s to target with inst %u\n", + imp->imp_obd->obd_name, ocd->ocd_instance); exp = class_conn2export(&imp->imp_dlm_handle); - spin_unlock(&imp->imp_lock); + cfs_spin_unlock(&imp->imp_lock); /* check that server granted subset of flags we asked for. */ LASSERTF((ocd->ocd_connect_flags & @@ -1023,13 +1006,14 @@ finish: /* Sigh, some compilers do not like #ifdef in the middle of macro arguments */ #ifdef __KERNEL__ - const char *older = - "older. Consider upgrading this client"; + const char *older = "older. Consider upgrading server " + "or downgrading client"; #else - const char *older = - "older. Consider recompiling this application"; + const char *older = "older. Consider recompiling this " + "application"; #endif - const char *newer = "newer than client version"; + const char *newer = "newer than client version. " + "Consider upgrading client"; LCONSOLE_WARN("Server %s version (%d.%d.%d.%d) " "is much %s (%s)\n", @@ -1046,38 +1030,32 @@ finish: /* We sent to the server ocd_cksum_types with bits set * for algorithms we understand. The server masked off * the checksum types it doesn't support */ - if ((ocd->ocd_cksum_types & OBD_CKSUM_ALL) == 0) { + if ((ocd->ocd_cksum_types & cksum_types_supported()) == 0) { LCONSOLE_WARN("The negotiation of the checksum " "alogrithm to use with server %s " "failed (%x/%x), disabling " "checksums\n", obd2cli_tgt(imp->imp_obd), ocd->ocd_cksum_types, - OBD_CKSUM_ALL); + cksum_types_supported()); cli->cl_checksum = 0; cli->cl_supp_cksum_types = OBD_CKSUM_CRC32; - cli->cl_cksum_type = OBD_CKSUM_CRC32; } else { cli->cl_supp_cksum_types = ocd->ocd_cksum_types; - - if (ocd->ocd_cksum_types & OSC_DEFAULT_CKSUM) - cli->cl_cksum_type = OSC_DEFAULT_CKSUM; - else if (ocd->ocd_cksum_types & OBD_CKSUM_ADLER) - cli->cl_cksum_type = OBD_CKSUM_ADLER; - else - cli->cl_cksum_type = OBD_CKSUM_CRC32; } } else { /* The server does not support OBD_CONNECT_CKSUM. * Enforce CRC32 for backward compatibility*/ cli->cl_supp_cksum_types = OBD_CKSUM_CRC32; - cli->cl_cksum_type = OBD_CKSUM_CRC32; } + cli->cl_cksum_type =cksum_type_select(cli->cl_supp_cksum_types); - if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE) { + if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE) cli->cl_max_pages_per_rpc = ocd->ocd_brw_size >> CFS_PAGE_SHIFT; - } + else if (imp->imp_connect_op == MDS_CONNECT || + imp->imp_connect_op == MGS_CONNECT) + cli->cl_max_pages_per_rpc = 1; /* Reset ns_connect_flags only for initial connect. It might be * changed in while using FS and if we reset it in reconnect @@ -1108,6 +1086,12 @@ finish: else imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT; + if ((ocd->ocd_connect_flags & OBD_CONNECT_FULL20) && + (imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V2)) + imp->imp_msghdr_flags |= MSGHDR_CKSUM_INCOMPAT18; + else + imp->imp_msghdr_flags &= ~MSGHDR_CKSUM_INCOMPAT18; + LASSERT((cli->cl_max_pages_per_rpc <= PTLRPC_MAX_BRW_PAGES) && (cli->cl_max_pages_per_rpc > 0)); } @@ -1115,15 +1099,7 @@ finish: out: if (rc != 0) { IMPORT_SET_STATE(imp, LUSTRE_IMP_DISCON); - spin_lock(&imp->imp_lock); - if (aa->pcaa_initial_connect && !imp->imp_initial_recov && - (request->rq_import_generation == imp->imp_generation)) - ptlrpc_deactivate_and_unlock_import(imp); - else - spin_unlock(&imp->imp_lock); - - if ((imp->imp_recon_bk && imp->imp_last_recon) || - (rc == -EACCES)) { + if (rc == -EACCES) { /* * Give up trying to reconnect * EACCES means client has no permission for connection @@ -1171,20 +1147,20 @@ out: (char *)imp->imp_connection->c_remote_uuid.uuid, rc); } - spin_lock(&imp->imp_lock); - imp->imp_last_recon = 0; - spin_unlock(&imp->imp_lock); - cfs_waitq_broadcast(&imp->imp_recovery_waitq); RETURN(rc); } +/** + * interpret callback for "completed replay" RPCs. + * \see signal_completed_replay + */ static int completed_replay_interpret(const struct lu_env *env, struct ptlrpc_request *req, void * data, int rc) { ENTRY; - atomic_dec(&req->rq_import->imp_replay_inflight); + cfs_atomic_dec(&req->rq_import->imp_replay_inflight); if (req->rq_status == 0 && !req->rq_import->imp_vbr_failed) { ptlrpc_import_recovery_state_machine(req->rq_import); @@ -1193,33 +1169,37 @@ static int completed_replay_interpret(const struct lu_env *env, CDEBUG(D_WARNING, "%s: version recovery fails, reconnecting\n", req->rq_import->imp_obd->obd_name); - spin_lock(&req->rq_import->imp_lock); - req->rq_import->imp_vbr_failed = 0; - spin_unlock(&req->rq_import->imp_lock); } else { CDEBUG(D_HA, "%s: LAST_REPLAY message error: %d, " "reconnecting\n", req->rq_import->imp_obd->obd_name, req->rq_status); } - ptlrpc_connect_import(req->rq_import, NULL); + ptlrpc_connect_import(req->rq_import); } RETURN(0); } +/** + * Let server know that we have no requests to replay anymore. + * Achieved by just sending a PING request + */ static int signal_completed_replay(struct obd_import *imp) { struct ptlrpc_request *req; ENTRY; - LASSERT(atomic_read(&imp->imp_replay_inflight) == 0); - atomic_inc(&imp->imp_replay_inflight); + if (unlikely(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_FINISH_REPLAY))) + RETURN(0); + + LASSERT(cfs_atomic_read(&imp->imp_replay_inflight) == 0); + cfs_atomic_inc(&imp->imp_replay_inflight); req = ptlrpc_request_alloc_pack(imp, &RQF_OBD_PING, LUSTRE_OBD_VERSION, OBD_PING); if (req == NULL) { - atomic_dec(&imp->imp_replay_inflight); + cfs_atomic_dec(&imp->imp_replay_inflight); RETURN(-ENOMEM); } @@ -1227,14 +1207,20 @@ static int signal_completed_replay(struct obd_import *imp) req->rq_send_state = LUSTRE_IMP_REPLAY_WAIT; lustre_msg_add_flags(req->rq_reqmsg, MSG_LOCK_REPLAY_DONE | MSG_REQ_REPLAY_DONE); - req->rq_timeout *= 3; + if (AT_OFF) + req->rq_timeout *= 3; req->rq_interpret_reply = completed_replay_interpret; - ptlrpcd_add_req(req, PSCOPE_OTHER); + ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1); RETURN(0); } #ifdef __KERNEL__ +/** + * In kernel code all import invalidation happens in its own + * separate thread, so that whatever application happened to encounter + * a problem could still be killed or otherwise continue + */ static int ptlrpc_invalidate_import_thread(void *data) { struct obd_import *imp = data; @@ -1262,6 +1248,26 @@ static int ptlrpc_invalidate_import_thread(void *data) } #endif +/** + * This is the state machine for client-side recovery on import. + * + * Typicaly we have two possibly paths. If we came to server and it is not + * in recovery, we just enter IMP_EVICTED state, invalidate our import + * state and reconnect from scratch. + * If we came to server that is in recovery, we enter IMP_REPLAY import state. + * We go through our list of requests to replay and send them to server one by + * one. + * After sending all request from the list we change import state to + * IMP_REPLAY_LOCKS and re-request all the locks we believe we have from server + * and also all the locks we don't yet have and wait for server to grant us. + * After that we send a special "replay completed" request and change import + * state to IMP_REPLAY_WAIT. + * Upon receiving reply to that "replay completed" RPC we enter IMP_RECOVER + * state and resend all requests from sending list. + * After that we promote import to FULL state and send all delayed requests + * and import is fully operational after that. + * + */ int ptlrpc_import_recovery_state_machine(struct obd_import *imp) { int rc = 0; @@ -1284,6 +1290,10 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp) CDEBUG(D_HA, "evicted from %s@%s; invalidating\n", obd2cli_tgt(imp->imp_obd), imp->imp_connection->c_remote_uuid.uuid); + /* reset vbr_failed flag upon eviction */ + cfs_spin_lock(&imp->imp_lock); + imp->imp_vbr_failed = 0; + cfs_spin_unlock(&imp->imp_lock); #ifdef __KERNEL__ /* bug 17802: XXX client_disconnect_export vs connect request @@ -1291,8 +1301,8 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp) * invalidate thread without reference to import and import can * be freed at same time. */ class_import_get(imp); - rc = cfs_kernel_thread(ptlrpc_invalidate_import_thread, imp, - CLONE_VM | CLONE_FILES); + rc = cfs_create_thread(ptlrpc_invalidate_import_thread, imp, + CFS_DAEMON_FLAGS); if (rc < 0) { class_import_put(imp); CERROR("error starting invalidate thread: %d\n", rc); @@ -1312,7 +1322,7 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp) obd2cli_tgt(imp->imp_obd)); rc = ptlrpc_replay_next(imp, &inflight); if (inflight == 0 && - atomic_read(&imp->imp_replay_inflight) == 0) { + cfs_atomic_read(&imp->imp_replay_inflight) == 0) { IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_LOCKS); rc = ldlm_replay_locks(imp); if (rc) @@ -1322,7 +1332,7 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp) } if (imp->imp_state == LUSTRE_IMP_REPLAY_LOCKS) { - if (atomic_read(&imp->imp_replay_inflight) == 0) { + if (cfs_atomic_read(&imp->imp_replay_inflight) == 0) { IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_WAIT); rc = signal_completed_replay(imp); if (rc) @@ -1332,7 +1342,7 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp) } if (imp->imp_state == LUSTRE_IMP_REPLAY_WAIT) { - if (atomic_read(&imp->imp_replay_inflight) == 0) { + if (cfs_atomic_read(&imp->imp_replay_inflight) == 0) { IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER); } } @@ -1350,8 +1360,8 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp) deuuidify(obd2cli_tgt(imp->imp_obd), NULL, &target_start, &target_len); - LCONSOLE_INFO("%s: Connection restored to service %.*s " - "using nid %s.\n", imp->imp_obd->obd_name, + LCONSOLE_INFO("%s: Connection restored to %.*s (at %s)\n", + imp->imp_obd->obd_name, target_len, target_start, libcfs_nid2str(imp->imp_connection->c_peer.nid)); } @@ -1365,11 +1375,6 @@ out: RETURN(rc); } -static int back_to_sleep(void *unused) -{ - return 0; -} - int ptlrpc_disconnect_import(struct obd_import *imp, int noclose) { struct ptlrpc_request *req; @@ -1414,11 +1419,11 @@ int ptlrpc_disconnect_import(struct obd_import *imp, int noclose) } - spin_lock(&imp->imp_lock); + cfs_spin_lock(&imp->imp_lock); if (imp->imp_state != LUSTRE_IMP_FULL) GOTO(out, 0); - spin_unlock(&imp->imp_lock); + cfs_spin_unlock(&imp->imp_lock); req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_DISCONNECT, LUSTRE_OBD_VERSION, rq_opc); @@ -1448,16 +1453,14 @@ int ptlrpc_disconnect_import(struct obd_import *imp, int noclose) } set_state: - spin_lock(&imp->imp_lock); + cfs_spin_lock(&imp->imp_lock); out: if (noclose) IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON); else IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CLOSED); memset(&imp->imp_remote_handle, 0, sizeof(imp->imp_remote_handle)); - /* Try all connections in the future - bz 12758 */ - imp->imp_last_recon = 0; - spin_unlock(&imp->imp_lock); + cfs_spin_unlock(&imp->imp_lock); RETURN(rc); } @@ -1466,10 +1469,10 @@ void ptlrpc_cleanup_imp(struct obd_import *imp) { ENTRY; - spin_lock(&imp->imp_lock); + cfs_spin_lock(&imp->imp_lock); IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CLOSED); imp->imp_generation++; - spin_unlock(&imp->imp_lock); + cfs_spin_unlock(&imp->imp_lock); ptlrpc_abort_inflight(imp); EXIT; @@ -1482,7 +1485,7 @@ extern unsigned int at_min, at_max, at_history; This gives us a max of the last binlimit*AT_BINS secs without the storage, but still smoothing out a return to normalcy from a slow response. (E.g. remember the maximum latency in each minute of the last 4 minutes.) */ -int at_add(struct adaptive_timeout *at, unsigned int val) +int at_measured(struct adaptive_timeout *at, unsigned int val) { unsigned int old = at->at_current; time_t now = cfs_time_current_sec(); @@ -1498,7 +1501,7 @@ int at_add(struct adaptive_timeout *at, unsigned int val) drop to 0, and because 0 could mean an error */ return 0; - spin_lock(&at->at_lock); + cfs_spin_lock(&at->at_lock); if (unlikely(at->at_binstart == 0)) { /* Special case to remove default from history */ @@ -1554,7 +1557,7 @@ int at_add(struct adaptive_timeout *at, unsigned int val) /* if we changed, report the old value */ old = (at->at_current != old) ? old : 0; - spin_unlock(&at->at_lock); + cfs_spin_unlock(&at->at_lock); return old; } @@ -1573,7 +1576,7 @@ int import_at_get_index(struct obd_import *imp, int portal) } /* Not found in list, add it under a lock */ - spin_lock(&imp->imp_lock); + cfs_spin_lock(&imp->imp_lock); /* Check unused under lock */ for (; i < IMP_AT_MAX_PORTALS; i++) { @@ -1589,6 +1592,6 @@ int import_at_get_index(struct obd_import *imp, int portal) at->iat_portal[i] = portal; out: - spin_unlock(&imp->imp_lock); + cfs_spin_unlock(&imp->imp_lock); return i; }