4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2013, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/ptlrpc/import.c
38 * Author: Mike Shaver <shaver@clusterfs.com>
41 #define DEBUG_SUBSYSTEM S_RPC
43 # include <liblustre.h>
46 #include <obd_support.h>
47 #include <lustre_ha.h>
48 #include <lustre_net.h>
49 #include <lustre_import.h>
50 #include <lustre_export.h>
52 #include <obd_cksum.h>
53 #include <obd_class.h>
55 #include "ptlrpc_internal.h"
57 struct ptlrpc_connect_async_args {
58 __u64 pcaa_peer_committed;
59 int pcaa_initial_connect;
63 * Updates import \a imp current state to provided \a state value
64 * Helper function. Must be called under imp_lock.
66 static void __import_set_state(struct obd_import *imp,
67 enum lustre_imp_state state)
69 imp->imp_state = state;
70 imp->imp_state_hist[imp->imp_state_hist_idx].ish_state = state;
71 imp->imp_state_hist[imp->imp_state_hist_idx].ish_time =
72 cfs_time_current_sec();
73 imp->imp_state_hist_idx = (imp->imp_state_hist_idx + 1) %
77 /* A CLOSED import should remain so. */
78 #define IMPORT_SET_STATE_NOLOCK(imp, state) \
80 if (imp->imp_state != LUSTRE_IMP_CLOSED) { \
81 CDEBUG(D_HA, "%p %s: changing import state from %s to %s\n", \
82 imp, obd2cli_tgt(imp->imp_obd), \
83 ptlrpc_import_state_name(imp->imp_state), \
84 ptlrpc_import_state_name(state)); \
85 __import_set_state(imp, state); \
89 #define IMPORT_SET_STATE(imp, state) \
91 spin_lock(&imp->imp_lock); \
92 IMPORT_SET_STATE_NOLOCK(imp, state); \
93 spin_unlock(&imp->imp_lock); \
97 static int ptlrpc_connect_interpret(const struct lu_env *env,
98 struct ptlrpc_request *request,
100 int ptlrpc_import_recovery_state_machine(struct obd_import *imp);
102 /* Only this function is allowed to change the import state when it is
103 * CLOSED. I would rather refcount the import and free it after
104 * disconnection like we do with exports. To do that, the client_obd
105 * will need to save the peer info somewhere other than in the import,
107 int ptlrpc_init_import(struct obd_import *imp)
109 spin_lock(&imp->imp_lock);
111 imp->imp_generation++;
112 imp->imp_state = LUSTRE_IMP_NEW;
114 spin_unlock(&imp->imp_lock);
118 EXPORT_SYMBOL(ptlrpc_init_import);
120 #define UUID_STR "_UUID"
121 void deuuidify(char *uuid, const char *prefix, char **uuid_start, int *uuid_len)
123 *uuid_start = !prefix || strncmp(uuid, prefix, strlen(prefix))
124 ? uuid : uuid + strlen(prefix);
126 *uuid_len = strlen(*uuid_start);
128 if (*uuid_len < strlen(UUID_STR))
131 if (!strncmp(*uuid_start + *uuid_len - strlen(UUID_STR),
132 UUID_STR, strlen(UUID_STR)))
133 *uuid_len -= strlen(UUID_STR);
135 EXPORT_SYMBOL(deuuidify);
138 * Returns true if import was FULL, false if import was already not
140 * @imp - import to be disconnected
141 * @conn_cnt - connection count (epoch) of the request that timed out
142 * and caused the disconnection. In some cases, multiple
143 * inflight requests can fail to a single target (e.g. OST
144 * bulk requests) and if one has already caused a reconnection
145 * (increasing the import->conn_cnt) the older failure should
146 * not also cause a reconnection. If zero it forces a reconnect.
148 int ptlrpc_set_import_discon(struct obd_import *imp, __u32 conn_cnt)
152 spin_lock(&imp->imp_lock);
154 if (imp->imp_state == LUSTRE_IMP_FULL &&
155 (conn_cnt == 0 || conn_cnt == imp->imp_conn_cnt)) {
159 deuuidify(obd2cli_tgt(imp->imp_obd), NULL,
160 &target_start, &target_len);
162 if (imp->imp_replayable) {
163 LCONSOLE_WARN("%s: Connection to %.*s (at %s) was "
164 "lost; in progress operations using this "
165 "service will wait for recovery to complete\n",
166 imp->imp_obd->obd_name, target_len, target_start,
167 libcfs_nid2str(imp->imp_connection->c_peer.nid));
169 LCONSOLE_ERROR_MSG(0x166, "%s: Connection to "
170 "%.*s (at %s) was lost; in progress "
171 "operations using this service will fail\n",
172 imp->imp_obd->obd_name,
173 target_len, target_start,
174 libcfs_nid2str(imp->imp_connection->c_peer.nid));
176 IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON);
177 spin_unlock(&imp->imp_lock);
179 if (obd_dump_on_timeout)
180 libcfs_debug_dumplog();
182 obd_import_event(imp->imp_obd, imp, IMP_EVENT_DISCON);
185 spin_unlock(&imp->imp_lock);
186 CDEBUG(D_HA, "%s: import %p already %s (conn %u, was %u): %s\n",
187 imp->imp_client->cli_name, imp,
188 (imp->imp_state == LUSTRE_IMP_FULL &&
189 imp->imp_conn_cnt > conn_cnt) ?
190 "reconnected" : "not connected", imp->imp_conn_cnt,
191 conn_cnt, ptlrpc_import_state_name(imp->imp_state));
197 /* Must be called with imp_lock held! */
198 static void ptlrpc_deactivate_and_unlock_import(struct obd_import *imp)
201 LASSERT(spin_is_locked(&imp->imp_lock));
203 CDEBUG(D_HA, "setting import %s INVALID\n", obd2cli_tgt(imp->imp_obd));
204 imp->imp_invalid = 1;
205 imp->imp_generation++;
206 spin_unlock(&imp->imp_lock);
208 ptlrpc_abort_inflight(imp);
209 obd_import_event(imp->imp_obd, imp, IMP_EVENT_INACTIVE);
215 * This acts as a barrier; all existing requests are rejected, and
216 * no new requests will be accepted until the import is valid again.
218 void ptlrpc_deactivate_import(struct obd_import *imp)
220 spin_lock(&imp->imp_lock);
221 ptlrpc_deactivate_and_unlock_import(imp);
223 EXPORT_SYMBOL(ptlrpc_deactivate_import);
226 ptlrpc_inflight_deadline(struct ptlrpc_request *req, time_t now)
230 if (!(((req->rq_phase == RQ_PHASE_RPC) && !req->rq_waiting) ||
231 (req->rq_phase == RQ_PHASE_BULK) ||
232 (req->rq_phase == RQ_PHASE_NEW)))
235 if (req->rq_timedout)
238 if (req->rq_phase == RQ_PHASE_NEW)
241 dl = req->rq_deadline;
249 static unsigned int ptlrpc_inflight_timeout(struct obd_import *imp)
251 time_t now = cfs_time_current_sec();
253 struct ptlrpc_request *req;
254 unsigned int timeout = 0;
256 spin_lock(&imp->imp_lock);
257 cfs_list_for_each_safe(tmp, n, &imp->imp_sending_list) {
258 req = cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
259 timeout = max(ptlrpc_inflight_deadline(req, now), timeout);
261 spin_unlock(&imp->imp_lock);
266 * This function will invalidate the import, if necessary, then block
267 * for all the RPC completions, and finally notify the obd to
268 * invalidate its state (ie cancel locks, clear pending requests,
271 void ptlrpc_invalidate_import(struct obd_import *imp)
274 struct ptlrpc_request *req;
275 struct l_wait_info lwi;
276 unsigned int timeout;
279 cfs_atomic_inc(&imp->imp_inval_count);
281 if (!imp->imp_invalid || imp->imp_obd->obd_no_recov)
282 ptlrpc_deactivate_import(imp);
284 LASSERT(imp->imp_invalid);
286 /* Wait forever until inflight == 0. We really can't do it another
287 * way because in some cases we need to wait for very long reply
288 * unlink. We can't do anything before that because there is really
289 * no guarantee that some rdma transfer is not in progress right now. */
291 /* Calculate max timeout for waiting on rpcs to error
292 * out. Use obd_timeout if calculated value is smaller
294 if (!OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) {
295 timeout = ptlrpc_inflight_timeout(imp);
296 timeout += timeout / 3;
299 timeout = obd_timeout;
301 /* decrease the interval to increase race condition */
305 CDEBUG(D_RPCTRACE,"Sleeping %d sec for inflight to error out\n",
308 /* Wait for all requests to error out and call completion
309 * callbacks. Cap it at obd_timeout -- these should all
310 * have been locally cancelled by ptlrpc_abort_inflight. */
311 lwi = LWI_TIMEOUT_INTERVAL(
312 cfs_timeout_cap(cfs_time_seconds(timeout)),
313 (timeout > 1)?cfs_time_seconds(1):cfs_time_seconds(1)/2,
315 rc = l_wait_event(imp->imp_recovery_waitq,
316 (cfs_atomic_read(&imp->imp_inflight) == 0),
319 const char *cli_tgt = obd2cli_tgt(imp->imp_obd);
321 CERROR("%s: rc = %d waiting for callback (%d != 0)\n",
323 cfs_atomic_read(&imp->imp_inflight));
325 spin_lock(&imp->imp_lock);
326 if (cfs_atomic_read(&imp->imp_inflight) == 0) {
327 int count = cfs_atomic_read(&imp->imp_unregistering);
329 /* We know that "unregistering" rpcs only can
330 * survive in sending or delaying lists (they
331 * maybe waiting for long reply unlink in
332 * sluggish nets). Let's check this. If there
333 * is no inflight and unregistering != 0, this
335 LASSERTF(count == 0, "Some RPCs are still "
336 "unregistering: %d\n", count);
338 /* Let's save one loop as soon as inflight have
339 * dropped to zero. No new inflights possible at
343 cfs_list_for_each_safe(tmp, n,
344 &imp->imp_sending_list) {
345 req = cfs_list_entry(tmp,
346 struct ptlrpc_request,
348 DEBUG_REQ(D_ERROR, req,
349 "still on sending list");
351 cfs_list_for_each_safe(tmp, n,
352 &imp->imp_delayed_list) {
353 req = cfs_list_entry(tmp,
354 struct ptlrpc_request,
356 DEBUG_REQ(D_ERROR, req,
357 "still on delayed list");
360 CERROR("%s: RPCs in \"%s\" phase found (%d). "
361 "Network is sluggish? Waiting them "
362 "to error out.\n", cli_tgt,
363 ptlrpc_phase2str(RQ_PHASE_UNREGISTERING),
364 cfs_atomic_read(&imp->
367 spin_unlock(&imp->imp_lock);
372 * Let's additionally check that no new rpcs added to import in
373 * "invalidate" state.
375 LASSERT(cfs_atomic_read(&imp->imp_inflight) == 0);
376 obd_import_event(imp->imp_obd, imp, IMP_EVENT_INVALIDATE);
377 sptlrpc_import_flush_all_ctx(imp);
379 cfs_atomic_dec(&imp->imp_inval_count);
380 wake_up_all(&imp->imp_recovery_waitq);
382 EXPORT_SYMBOL(ptlrpc_invalidate_import);
384 /* unset imp_invalid */
385 void ptlrpc_activate_import(struct obd_import *imp)
387 struct obd_device *obd = imp->imp_obd;
389 spin_lock(&imp->imp_lock);
390 imp->imp_invalid = 0;
391 spin_unlock(&imp->imp_lock);
392 obd_import_event(obd, imp, IMP_EVENT_ACTIVE);
394 EXPORT_SYMBOL(ptlrpc_activate_import);
396 void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt)
400 LASSERT(!imp->imp_dlm_fake);
402 if (ptlrpc_set_import_discon(imp, conn_cnt)) {
403 if (!imp->imp_replayable) {
404 CDEBUG(D_HA, "import %s@%s for %s not replayable, "
405 "auto-deactivating\n",
406 obd2cli_tgt(imp->imp_obd),
407 imp->imp_connection->c_remote_uuid.uuid,
408 imp->imp_obd->obd_name);
409 ptlrpc_deactivate_import(imp);
412 CDEBUG(D_HA, "%s: waking up pinger\n",
413 obd2cli_tgt(imp->imp_obd));
415 spin_lock(&imp->imp_lock);
416 imp->imp_force_verify = 1;
417 spin_unlock(&imp->imp_lock);
419 ptlrpc_pinger_wake_up();
423 EXPORT_SYMBOL(ptlrpc_fail_import);
425 int ptlrpc_reconnect_import(struct obd_import *imp)
427 ptlrpc_set_import_discon(imp, 0);
428 /* Force a new connect attempt */
429 ptlrpc_invalidate_import(imp);
430 /* Do a fresh connect next time by zeroing the handle */
431 ptlrpc_disconnect_import(imp, 1);
432 /* Wait for all invalidate calls to finish */
433 if (cfs_atomic_read(&imp->imp_inval_count) > 0) {
435 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
436 rc = l_wait_event(imp->imp_recovery_waitq,
437 (cfs_atomic_read(&imp->imp_inval_count) == 0),
440 CERROR("Interrupted, inval=%d\n",
441 cfs_atomic_read(&imp->imp_inval_count));
444 /* Allow reconnect attempts */
445 imp->imp_obd->obd_no_recov = 0;
446 /* Remove 'invalid' flag */
447 ptlrpc_activate_import(imp);
448 /* Attempt a new connect */
449 ptlrpc_recover_import(imp, NULL, 0);
452 EXPORT_SYMBOL(ptlrpc_reconnect_import);
455 * Connection on import \a imp is changed to another one (if more than one is
456 * present). We typically chose connection that we have not tried to connect to
459 static int import_select_connection(struct obd_import *imp)
461 struct obd_import_conn *imp_conn = NULL, *conn;
462 struct obd_export *dlmexp;
464 int target_len, tried_all = 1;
467 spin_lock(&imp->imp_lock);
469 if (cfs_list_empty(&imp->imp_conn_list)) {
470 CERROR("%s: no connections available\n",
471 imp->imp_obd->obd_name);
472 spin_unlock(&imp->imp_lock);
476 cfs_list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
477 CDEBUG(D_HA, "%s: connect to NID %s last attempt "LPU64"\n",
478 imp->imp_obd->obd_name,
479 libcfs_nid2str(conn->oic_conn->c_peer.nid),
480 conn->oic_last_attempt);
482 /* If we have not tried this connection since
483 the last successful attempt, go with this one */
484 if ((conn->oic_last_attempt == 0) ||
485 cfs_time_beforeq_64(conn->oic_last_attempt,
486 imp->imp_last_success_conn)) {
492 /* If all of the connections have already been tried
493 since the last successful connection; just choose the
494 least recently used */
497 else if (cfs_time_before_64(conn->oic_last_attempt,
498 imp_conn->oic_last_attempt))
502 /* if not found, simply choose the current one */
503 if (!imp_conn || imp->imp_force_reconnect) {
504 LASSERT(imp->imp_conn_current);
505 imp_conn = imp->imp_conn_current;
508 LASSERT(imp_conn->oic_conn);
510 /* If we've tried everything, and we're back to the beginning of the
511 list, increase our timeout and try again. It will be reset when
512 we do finally connect. (FIXME: really we should wait for all network
513 state associated with the last connection attempt to drain before
514 trying to reconnect on it.) */
515 if (tried_all && (imp->imp_conn_list.next == &imp_conn->oic_item)) {
516 struct adaptive_timeout *at = &imp->imp_at.iat_net_latency;
517 if (at_get(at) < CONNECTION_SWITCH_MAX) {
518 at_measured(at, at_get(at) + CONNECTION_SWITCH_INC);
519 if (at_get(at) > CONNECTION_SWITCH_MAX)
520 at_reset(at, CONNECTION_SWITCH_MAX);
522 LASSERT(imp_conn->oic_last_attempt);
523 CDEBUG(D_HA, "%s: tried all connections, increasing latency "
524 "to %ds\n", imp->imp_obd->obd_name, at_get(at));
527 imp_conn->oic_last_attempt = cfs_time_current_64();
529 /* switch connection, don't mind if it's same as the current one */
530 if (imp->imp_connection)
531 ptlrpc_connection_put(imp->imp_connection);
532 imp->imp_connection = ptlrpc_connection_addref(imp_conn->oic_conn);
534 dlmexp = class_conn2export(&imp->imp_dlm_handle);
535 LASSERT(dlmexp != NULL);
536 if (dlmexp->exp_connection)
537 ptlrpc_connection_put(dlmexp->exp_connection);
538 dlmexp->exp_connection = ptlrpc_connection_addref(imp_conn->oic_conn);
539 class_export_put(dlmexp);
541 if (imp->imp_conn_current != imp_conn) {
542 if (imp->imp_conn_current) {
543 deuuidify(obd2cli_tgt(imp->imp_obd), NULL,
544 &target_start, &target_len);
546 CDEBUG(D_HA, "%s: Connection changing to"
548 imp->imp_obd->obd_name,
549 target_len, target_start,
550 libcfs_nid2str(imp_conn->oic_conn->c_peer.nid));
553 imp->imp_conn_current = imp_conn;
556 CDEBUG(D_HA, "%s: import %p using connection %s/%s\n",
557 imp->imp_obd->obd_name, imp, imp_conn->oic_uuid.uuid,
558 libcfs_nid2str(imp_conn->oic_conn->c_peer.nid));
560 spin_unlock(&imp->imp_lock);
566 * must be called under imp_lock
568 static int ptlrpc_first_transno(struct obd_import *imp, __u64 *transno)
570 struct ptlrpc_request *req;
573 /* The requests in committed_list always have smaller transnos than
574 * the requests in replay_list */
575 if (!cfs_list_empty(&imp->imp_committed_list)) {
576 tmp = imp->imp_committed_list.next;
577 req = cfs_list_entry(tmp, struct ptlrpc_request, rq_replay_list);
578 *transno = req->rq_transno;
579 if (req->rq_transno == 0) {
580 DEBUG_REQ(D_ERROR, req, "zero transno in committed_list");
585 if (!cfs_list_empty(&imp->imp_replay_list)) {
586 tmp = imp->imp_replay_list.next;
587 req = cfs_list_entry(tmp, struct ptlrpc_request, rq_replay_list);
588 *transno = req->rq_transno;
589 if (req->rq_transno == 0) {
590 DEBUG_REQ(D_ERROR, req, "zero transno in replay_list");
599 * Attempt to (re)connect import \a imp. This includes all preparations,
600 * initializing CONNECT RPC request and passing it to ptlrpcd for
602 * Returns 0 on success or error code.
604 int ptlrpc_connect_import(struct obd_import *imp)
606 struct obd_device *obd = imp->imp_obd;
607 int initial_connect = 0;
609 __u64 committed_before_reconnect = 0;
610 struct ptlrpc_request *request;
611 char *bufs[] = { NULL,
612 obd2cli_tgt(imp->imp_obd),
614 (char *)&imp->imp_dlm_handle,
615 (char *)&imp->imp_connect_data };
616 struct ptlrpc_connect_async_args *aa;
620 spin_lock(&imp->imp_lock);
621 if (imp->imp_state == LUSTRE_IMP_CLOSED) {
622 spin_unlock(&imp->imp_lock);
623 CERROR("can't connect to a closed import\n");
625 } else if (imp->imp_state == LUSTRE_IMP_FULL) {
626 spin_unlock(&imp->imp_lock);
627 CERROR("already connected\n");
629 } else if (imp->imp_state == LUSTRE_IMP_CONNECTING) {
630 spin_unlock(&imp->imp_lock);
631 CERROR("already connecting\n");
635 IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CONNECTING);
638 imp->imp_resend_replay = 0;
640 if (!lustre_handle_is_used(&imp->imp_remote_handle))
643 committed_before_reconnect = imp->imp_peer_committed_transno;
645 set_transno = ptlrpc_first_transno(imp,
646 &imp->imp_connect_data.ocd_transno);
647 spin_unlock(&imp->imp_lock);
649 rc = import_select_connection(imp);
653 rc = sptlrpc_import_sec_adapt(imp, NULL, 0);
657 /* Reset connect flags to the originally requested flags, in case
658 * the server is updated on-the-fly we will get the new features. */
659 imp->imp_connect_data.ocd_connect_flags = imp->imp_connect_flags_orig;
660 /* Reset ocd_version each time so the server knows the exact versions */
661 imp->imp_connect_data.ocd_version = LUSTRE_VERSION_CODE;
662 imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT;
663 imp->imp_msghdr_flags &= ~MSGHDR_CKSUM_INCOMPAT18;
665 rc = obd_reconnect(NULL, imp->imp_obd->obd_self_export, obd,
666 &obd->obd_uuid, &imp->imp_connect_data, NULL);
670 request = ptlrpc_request_alloc(imp, &RQF_MDS_CONNECT);
672 GOTO(out, rc = -ENOMEM);
674 rc = ptlrpc_request_bufs_pack(request, LUSTRE_OBD_VERSION,
675 imp->imp_connect_op, bufs, NULL);
677 ptlrpc_request_free(request);
681 /* Report the rpc service time to the server so that it knows how long
682 * to wait for clients to join recovery */
683 lustre_msg_set_service_time(request->rq_reqmsg,
684 at_timeout2est(request->rq_timeout));
686 /* The amount of time we give the server to process the connect req.
687 * import_select_connection will increase the net latency on
688 * repeated reconnect attempts to cover slow networks.
689 * We override/ignore the server rpc completion estimate here,
690 * which may be large if this is a reconnect attempt */
691 request->rq_timeout = INITIAL_CONNECT_TIMEOUT;
692 lustre_msg_set_timeout(request->rq_reqmsg, request->rq_timeout);
695 lustre_msg_add_op_flags(request->rq_reqmsg, MSG_CONNECT_LIBCLIENT);
697 lustre_msg_add_op_flags(request->rq_reqmsg, MSG_CONNECT_NEXT_VER);
699 request->rq_no_resend = request->rq_no_delay = 1;
700 request->rq_send_state = LUSTRE_IMP_CONNECTING;
701 /* Allow a slightly larger reply for future growth compatibility */
702 req_capsule_set_size(&request->rq_pill, &RMF_CONNECT_DATA, RCL_SERVER,
703 sizeof(struct obd_connect_data)+16*sizeof(__u64));
704 ptlrpc_request_set_replen(request);
705 request->rq_interpret_reply = ptlrpc_connect_interpret;
707 CLASSERT(sizeof (*aa) <= sizeof (request->rq_async_args));
708 aa = ptlrpc_req_async_args(request);
709 memset(aa, 0, sizeof *aa);
711 aa->pcaa_peer_committed = committed_before_reconnect;
712 aa->pcaa_initial_connect = initial_connect;
714 if (aa->pcaa_initial_connect) {
715 spin_lock(&imp->imp_lock);
716 imp->imp_replayable = 1;
717 spin_unlock(&imp->imp_lock);
718 lustre_msg_add_op_flags(request->rq_reqmsg,
719 MSG_CONNECT_INITIAL);
723 lustre_msg_add_op_flags(request->rq_reqmsg,
724 MSG_CONNECT_TRANSNO);
726 DEBUG_REQ(D_RPCTRACE, request, "(re)connect request (timeout %d)",
727 request->rq_timeout);
728 ptlrpcd_add_req(request, PDL_POLICY_ROUND, -1);
732 IMPORT_SET_STATE(imp, LUSTRE_IMP_DISCON);
737 EXPORT_SYMBOL(ptlrpc_connect_import);
739 static void ptlrpc_maybe_ping_import_soon(struct obd_import *imp)
744 spin_lock(&imp->imp_lock);
745 force_verify = imp->imp_force_verify != 0;
746 spin_unlock(&imp->imp_lock);
749 ptlrpc_pinger_wake_up();
751 /* liblustre has no pinger thread, so we wakeup pinger anyway */
752 ptlrpc_pinger_wake_up();
756 static int ptlrpc_busy_reconnect(int rc)
758 return (rc == -EBUSY) || (rc == -EAGAIN);
762 * interpret_reply callback for connect RPCs.
763 * Looks into returned status of connect operation and decides
764 * what to do with the import - i.e enter recovery, promote it to
765 * full state for normal operations of disconnect it due to an error.
767 static int ptlrpc_connect_interpret(const struct lu_env *env,
768 struct ptlrpc_request *request,
771 struct ptlrpc_connect_async_args *aa = data;
772 struct obd_import *imp = request->rq_import;
773 struct client_obd *cli = &imp->imp_obd->u.cli;
774 struct lustre_handle old_hdl;
775 __u64 old_connect_flags;
777 struct obd_connect_data *ocd;
778 struct obd_export *exp;
782 spin_lock(&imp->imp_lock);
783 if (imp->imp_state == LUSTRE_IMP_CLOSED) {
784 imp->imp_connect_tried = 1;
785 spin_unlock(&imp->imp_lock);
790 /* if this reconnect to busy export - not need select new target
792 imp->imp_force_reconnect = ptlrpc_busy_reconnect(rc);
793 spin_unlock(&imp->imp_lock);
794 ptlrpc_maybe_ping_import_soon(imp);
797 spin_unlock(&imp->imp_lock);
799 LASSERT(imp->imp_conn_current);
801 msg_flags = lustre_msg_get_op_flags(request->rq_repmsg);
803 ret = req_capsule_get_size(&request->rq_pill, &RMF_CONNECT_DATA,
805 /* server replied obd_connect_data is always bigger */
806 ocd = req_capsule_server_sized_get(&request->rq_pill,
807 &RMF_CONNECT_DATA, ret);
810 CERROR("%s: no connect data from server\n",
811 imp->imp_obd->obd_name);
816 spin_lock(&imp->imp_lock);
818 /* All imports are pingable */
819 imp->imp_pingable = 1;
820 imp->imp_force_reconnect = 0;
821 imp->imp_force_verify = 0;
823 imp->imp_connect_data = *ocd;
825 CDEBUG(D_HA, "%s: connect to target with instance %u\n",
826 imp->imp_obd->obd_name, ocd->ocd_instance);
827 exp = class_conn2export(&imp->imp_dlm_handle);
829 spin_unlock(&imp->imp_lock);
831 /* check that server granted subset of flags we asked for. */
832 if ((ocd->ocd_connect_flags & imp->imp_connect_flags_orig) !=
833 ocd->ocd_connect_flags) {
834 CERROR("%s: Server didn't granted asked subset of flags: "
835 "asked="LPX64" grranted="LPX64"\n",
836 imp->imp_obd->obd_name,imp->imp_connect_flags_orig,
837 ocd->ocd_connect_flags);
838 GOTO(out, rc = -EPROTO);
842 /* This could happen if export is cleaned during the
844 CERROR("%s: missing export after connect\n",
845 imp->imp_obd->obd_name);
846 GOTO(out, rc = -ENODEV);
848 old_connect_flags = exp_connect_flags(exp);
849 exp->exp_connect_data = *ocd;
850 imp->imp_obd->obd_self_export->exp_connect_data = *ocd;
851 class_export_put(exp);
853 obd_import_event(imp->imp_obd, imp, IMP_EVENT_OCD);
855 if (aa->pcaa_initial_connect) {
856 spin_lock(&imp->imp_lock);
857 if (msg_flags & MSG_CONNECT_REPLAYABLE) {
858 imp->imp_replayable = 1;
859 spin_unlock(&imp->imp_lock);
860 CDEBUG(D_HA, "connected to replayable target: %s\n",
861 obd2cli_tgt(imp->imp_obd));
863 imp->imp_replayable = 0;
864 spin_unlock(&imp->imp_lock);
867 /* if applies, adjust the imp->imp_msg_magic here
868 * according to reply flags */
870 imp->imp_remote_handle =
871 *lustre_msg_get_handle(request->rq_repmsg);
873 /* Initial connects are allowed for clients with non-random
874 * uuids when servers are in recovery. Simply signal the
875 * servers replay is complete and wait in REPLAY_WAIT. */
876 if (msg_flags & MSG_CONNECT_RECOVERING) {
877 CDEBUG(D_HA, "connect to %s during recovery\n",
878 obd2cli_tgt(imp->imp_obd));
879 IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_LOCKS);
881 IMPORT_SET_STATE(imp, LUSTRE_IMP_FULL);
882 ptlrpc_activate_import(imp);
885 GOTO(finish, rc = 0);
888 /* Determine what recovery state to move the import to. */
889 if (MSG_CONNECT_RECONNECT & msg_flags) {
890 memset(&old_hdl, 0, sizeof(old_hdl));
891 if (!memcmp(&old_hdl, lustre_msg_get_handle(request->rq_repmsg),
893 LCONSOLE_WARN("Reconnect to %s (at @%s) failed due "
894 "bad handle "LPX64"\n",
895 obd2cli_tgt(imp->imp_obd),
896 imp->imp_connection->c_remote_uuid.uuid,
897 imp->imp_dlm_handle.cookie);
898 GOTO(out, rc = -ENOTCONN);
901 if (memcmp(&imp->imp_remote_handle,
902 lustre_msg_get_handle(request->rq_repmsg),
903 sizeof(imp->imp_remote_handle))) {
904 int level = msg_flags & MSG_CONNECT_RECOVERING ?
907 /* Bug 16611/14775: if server handle have changed,
908 * that means some sort of disconnection happened.
909 * If the server is not in recovery, that also means it
910 * already erased all of our state because of previous
911 * eviction. If it is in recovery - we are safe to
912 * participate since we can reestablish all of our state
913 * with server again */
914 if ((MSG_CONNECT_RECOVERING & msg_flags)) {
915 CDEBUG(level,"%s@%s changed server handle from "
917 " but is still in recovery\n",
918 obd2cli_tgt(imp->imp_obd),
919 imp->imp_connection->c_remote_uuid.uuid,
920 imp->imp_remote_handle.cookie,
921 lustre_msg_get_handle(
922 request->rq_repmsg)->cookie);
924 LCONSOLE_WARN("Evicted from %s (at %s) "
925 "after server handle changed from "
926 LPX64" to "LPX64"\n",
927 obd2cli_tgt(imp->imp_obd),
928 imp->imp_connection-> \
930 imp->imp_remote_handle.cookie,
931 lustre_msg_get_handle(
932 request->rq_repmsg)->cookie);
936 imp->imp_remote_handle =
937 *lustre_msg_get_handle(request->rq_repmsg);
939 if (!(MSG_CONNECT_RECOVERING & msg_flags)) {
940 IMPORT_SET_STATE(imp, LUSTRE_IMP_EVICTED);
941 GOTO(finish, rc = 0);
945 CDEBUG(D_HA, "reconnected to %s@%s after partition\n",
946 obd2cli_tgt(imp->imp_obd),
947 imp->imp_connection->c_remote_uuid.uuid);
950 if (imp->imp_invalid) {
951 CDEBUG(D_HA, "%s: reconnected but import is invalid; "
952 "marking evicted\n", imp->imp_obd->obd_name);
953 IMPORT_SET_STATE(imp, LUSTRE_IMP_EVICTED);
954 } else if (MSG_CONNECT_RECOVERING & msg_flags) {
955 CDEBUG(D_HA, "%s: reconnected to %s during replay\n",
956 imp->imp_obd->obd_name,
957 obd2cli_tgt(imp->imp_obd));
959 spin_lock(&imp->imp_lock);
960 imp->imp_resend_replay = 1;
961 spin_unlock(&imp->imp_lock);
963 IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY);
965 IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER);
967 } else if ((MSG_CONNECT_RECOVERING & msg_flags) && !imp->imp_invalid) {
968 LASSERT(imp->imp_replayable);
969 imp->imp_remote_handle =
970 *lustre_msg_get_handle(request->rq_repmsg);
971 imp->imp_last_replay_transno = 0;
972 IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY);
974 DEBUG_REQ(D_HA, request, "%s: evicting (reconnect/recover flags"
975 " not set: %x)", imp->imp_obd->obd_name, msg_flags);
976 imp->imp_remote_handle =
977 *lustre_msg_get_handle(request->rq_repmsg);
978 IMPORT_SET_STATE(imp, LUSTRE_IMP_EVICTED);
981 /* Sanity checks for a reconnected import. */
982 if (!(imp->imp_replayable) != !(msg_flags & MSG_CONNECT_REPLAYABLE)) {
983 CERROR("imp_replayable flag does not match server "
984 "after reconnect. We should LBUG right here.\n");
987 if (lustre_msg_get_last_committed(request->rq_repmsg) > 0 &&
988 lustre_msg_get_last_committed(request->rq_repmsg) <
989 aa->pcaa_peer_committed) {
990 CERROR("%s went back in time (transno "LPD64
991 " was previously committed, server now claims "LPD64
992 ")! See https://bugzilla.lustre.org/show_bug.cgi?"
994 obd2cli_tgt(imp->imp_obd), aa->pcaa_peer_committed,
995 lustre_msg_get_last_committed(request->rq_repmsg));
999 rc = ptlrpc_import_recovery_state_machine(imp);
1001 if (rc == -ENOTCONN) {
1002 CDEBUG(D_HA, "evicted/aborted by %s@%s during recovery;"
1003 "invalidating and reconnecting\n",
1004 obd2cli_tgt(imp->imp_obd),
1005 imp->imp_connection->c_remote_uuid.uuid);
1006 ptlrpc_connect_import(imp);
1007 imp->imp_connect_tried = 1;
1013 spin_lock(&imp->imp_lock);
1014 cfs_list_del(&imp->imp_conn_current->oic_item);
1015 cfs_list_add(&imp->imp_conn_current->oic_item,
1016 &imp->imp_conn_list);
1017 imp->imp_last_success_conn =
1018 imp->imp_conn_current->oic_last_attempt;
1020 spin_unlock(&imp->imp_lock);
1022 if (!ocd->ocd_ibits_known &&
1023 ocd->ocd_connect_flags & OBD_CONNECT_IBITS)
1024 CERROR("Inodebits aware server returned zero compatible"
1027 if (!warned && (ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
1028 (ocd->ocd_version > LUSTRE_VERSION_CODE +
1029 LUSTRE_VERSION_OFFSET_WARN ||
1030 ocd->ocd_version < LUSTRE_VERSION_CODE -
1031 LUSTRE_VERSION_OFFSET_WARN)) {
1032 /* Sigh, some compilers do not like #ifdef in the middle
1033 of macro arguments */
1034 const char *older = "older than client. "
1035 "Consider upgrading server";
1037 const char *newer = "newer than client. "
1038 "Consider recompiling application";
1040 const char *newer = "newer than client. "
1041 "Consider upgrading client";
1044 LCONSOLE_WARN("Server %s version (%d.%d.%d.%d) "
1045 "is much %s (%s)\n",
1046 obd2cli_tgt(imp->imp_obd),
1047 OBD_OCD_VERSION_MAJOR(ocd->ocd_version),
1048 OBD_OCD_VERSION_MINOR(ocd->ocd_version),
1049 OBD_OCD_VERSION_PATCH(ocd->ocd_version),
1050 OBD_OCD_VERSION_FIX(ocd->ocd_version),
1051 ocd->ocd_version > LUSTRE_VERSION_CODE ?
1052 newer : older, LUSTRE_VERSION_STRING);
1056 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 2, 50, 0)
1057 /* Check if server has LU-1252 fix applied to not always swab
1058 * the IR MNE entries. Do this only once per connection. This
1059 * fixup is version-limited, because we don't want to carry the
1060 * OBD_CONNECT_MNE_SWAB flag around forever, just so long as we
1061 * need interop with unpatched 2.2 servers. For newer servers,
1062 * the client will do MNE swabbing only as needed. LU-1644 */
1063 if (unlikely((ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
1064 !(ocd->ocd_connect_flags & OBD_CONNECT_MNE_SWAB) &&
1065 OBD_OCD_VERSION_MAJOR(ocd->ocd_version) == 2 &&
1066 OBD_OCD_VERSION_MINOR(ocd->ocd_version) == 2 &&
1067 OBD_OCD_VERSION_PATCH(ocd->ocd_version) < 55 &&
1068 strcmp(imp->imp_obd->obd_type->typ_name,
1069 LUSTRE_MGC_NAME) == 0))
1070 imp->imp_need_mne_swab = 1;
1071 else /* clear if server was upgraded since last connect */
1072 imp->imp_need_mne_swab = 0;
1074 #warning "LU-1644: Remove old OBD_CONNECT_MNE_SWAB fixup and imp_need_mne_swab"
1077 if (ocd->ocd_connect_flags & OBD_CONNECT_CKSUM) {
1078 /* We sent to the server ocd_cksum_types with bits set
1079 * for algorithms we understand. The server masked off
1080 * the checksum types it doesn't support */
1081 if ((ocd->ocd_cksum_types &
1082 cksum_types_supported_client()) == 0) {
1083 LCONSOLE_WARN("The negotiation of the checksum "
1084 "alogrithm to use with server %s "
1085 "failed (%x/%x), disabling "
1087 obd2cli_tgt(imp->imp_obd),
1088 ocd->ocd_cksum_types,
1089 cksum_types_supported_client());
1090 cli->cl_checksum = 0;
1091 cli->cl_supp_cksum_types = OBD_CKSUM_ADLER;
1093 cli->cl_supp_cksum_types = ocd->ocd_cksum_types;
1096 /* The server does not support OBD_CONNECT_CKSUM.
1097 * Enforce ADLER for backward compatibility*/
1098 cli->cl_supp_cksum_types = OBD_CKSUM_ADLER;
1100 cli->cl_cksum_type =cksum_type_select(cli->cl_supp_cksum_types);
1102 if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
1103 cli->cl_max_pages_per_rpc =
1104 min(ocd->ocd_brw_size >> PAGE_CACHE_SHIFT,
1105 cli->cl_max_pages_per_rpc);
1106 else if (imp->imp_connect_op == MDS_CONNECT ||
1107 imp->imp_connect_op == MGS_CONNECT)
1108 cli->cl_max_pages_per_rpc = 1;
1110 /* Reset ns_connect_flags only for initial connect. It might be
1111 * changed in while using FS and if we reset it in reconnect
1112 * this leads to losing user settings done before such as
1113 * disable lru_resize, etc. */
1114 if (old_connect_flags != exp_connect_flags(exp) ||
1115 aa->pcaa_initial_connect) {
1116 CDEBUG(D_HA, "%s: Resetting ns_connect_flags to server "
1117 "flags: "LPX64"\n", imp->imp_obd->obd_name,
1118 ocd->ocd_connect_flags);
1119 imp->imp_obd->obd_namespace->ns_connect_flags =
1120 ocd->ocd_connect_flags;
1121 imp->imp_obd->obd_namespace->ns_orig_connect_flags =
1122 ocd->ocd_connect_flags;
1125 if ((ocd->ocd_connect_flags & OBD_CONNECT_AT) &&
1126 (imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V2))
1127 /* We need a per-message support flag, because
1128 a. we don't know if the incoming connect reply
1129 supports AT or not (in reply_in_callback)
1131 b. failovered server means export and flags are gone
1132 (in ptlrpc_send_reply).
1133 Can only be set when we know AT is supported at
1135 imp->imp_msghdr_flags |= MSGHDR_AT_SUPPORT;
1137 imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT;
1139 if ((ocd->ocd_connect_flags & OBD_CONNECT_FULL20) &&
1140 (imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V2))
1141 imp->imp_msghdr_flags |= MSGHDR_CKSUM_INCOMPAT18;
1143 imp->imp_msghdr_flags &= ~MSGHDR_CKSUM_INCOMPAT18;
1145 LASSERT((cli->cl_max_pages_per_rpc <= PTLRPC_MAX_BRW_PAGES) &&
1146 (cli->cl_max_pages_per_rpc > 0));
1150 imp->imp_connect_tried = 1;
1153 IMPORT_SET_STATE(imp, LUSTRE_IMP_DISCON);
1154 if (rc == -EACCES) {
1156 * Give up trying to reconnect
1157 * EACCES means client has no permission for connection
1159 imp->imp_obd->obd_no_recov = 1;
1160 ptlrpc_deactivate_import(imp);
1163 if (rc == -EPROTO) {
1164 struct obd_connect_data *ocd;
1166 /* reply message might not be ready */
1167 if (request->rq_repmsg == NULL)
1170 ocd = req_capsule_server_get(&request->rq_pill,
1173 (ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
1174 (ocd->ocd_version != LUSTRE_VERSION_CODE)) {
1175 /* Actually servers are only supposed to refuse
1176 connection from liblustre clients, so we should
1177 never see this from VFS context */
1178 LCONSOLE_ERROR_MSG(0x16a, "Server %s version "
1180 " refused connection from this client "
1181 "with an incompatible version (%s). "
1182 "Client must be recompiled\n",
1183 obd2cli_tgt(imp->imp_obd),
1184 OBD_OCD_VERSION_MAJOR(ocd->ocd_version),
1185 OBD_OCD_VERSION_MINOR(ocd->ocd_version),
1186 OBD_OCD_VERSION_PATCH(ocd->ocd_version),
1187 OBD_OCD_VERSION_FIX(ocd->ocd_version),
1188 LUSTRE_VERSION_STRING);
1189 ptlrpc_deactivate_import(imp);
1190 IMPORT_SET_STATE(imp, LUSTRE_IMP_CLOSED);
1195 ptlrpc_maybe_ping_import_soon(imp);
1197 CDEBUG(D_HA, "recovery of %s on %s failed (%d)\n",
1198 obd2cli_tgt(imp->imp_obd),
1199 (char *)imp->imp_connection->c_remote_uuid.uuid, rc);
1202 wake_up_all(&imp->imp_recovery_waitq);
1207 * interpret callback for "completed replay" RPCs.
1208 * \see signal_completed_replay
1210 static int completed_replay_interpret(const struct lu_env *env,
1211 struct ptlrpc_request *req,
1212 void * data, int rc)
1215 cfs_atomic_dec(&req->rq_import->imp_replay_inflight);
1216 if (req->rq_status == 0 &&
1217 !req->rq_import->imp_vbr_failed) {
1218 ptlrpc_import_recovery_state_machine(req->rq_import);
1220 if (req->rq_import->imp_vbr_failed) {
1222 "%s: version recovery fails, reconnecting\n",
1223 req->rq_import->imp_obd->obd_name);
1225 CDEBUG(D_HA, "%s: LAST_REPLAY message error: %d, "
1227 req->rq_import->imp_obd->obd_name,
1230 ptlrpc_connect_import(req->rq_import);
1237 * Let server know that we have no requests to replay anymore.
1238 * Achieved by just sending a PING request
1240 static int signal_completed_replay(struct obd_import *imp)
1242 struct ptlrpc_request *req;
1245 if (unlikely(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_FINISH_REPLAY)))
1248 LASSERT(cfs_atomic_read(&imp->imp_replay_inflight) == 0);
1249 cfs_atomic_inc(&imp->imp_replay_inflight);
1251 req = ptlrpc_request_alloc_pack(imp, &RQF_OBD_PING, LUSTRE_OBD_VERSION,
1254 cfs_atomic_dec(&imp->imp_replay_inflight);
1258 ptlrpc_request_set_replen(req);
1259 req->rq_send_state = LUSTRE_IMP_REPLAY_WAIT;
1260 lustre_msg_add_flags(req->rq_reqmsg,
1261 MSG_LOCK_REPLAY_DONE | MSG_REQ_REPLAY_DONE);
1263 req->rq_timeout *= 3;
1264 req->rq_interpret_reply = completed_replay_interpret;
1266 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
1272 * In kernel code all import invalidation happens in its own
1273 * separate thread, so that whatever application happened to encounter
1274 * a problem could still be killed or otherwise continue
1276 static int ptlrpc_invalidate_import_thread(void *data)
1278 struct obd_import *imp = data;
1282 unshare_fs_struct();
1284 CDEBUG(D_HA, "thread invalidate import %s to %s@%s\n",
1285 imp->imp_obd->obd_name, obd2cli_tgt(imp->imp_obd),
1286 imp->imp_connection->c_remote_uuid.uuid);
1288 ptlrpc_invalidate_import(imp);
1290 if (obd_dump_on_eviction) {
1291 CERROR("dump the log upon eviction\n");
1292 libcfs_debug_dumplog();
1295 IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER);
1296 ptlrpc_import_recovery_state_machine(imp);
1298 class_import_put(imp);
1304 * This is the state machine for client-side recovery on import.
1306 * Typicaly we have two possibly paths. If we came to server and it is not
1307 * in recovery, we just enter IMP_EVICTED state, invalidate our import
1308 * state and reconnect from scratch.
1309 * If we came to server that is in recovery, we enter IMP_REPLAY import state.
1310 * We go through our list of requests to replay and send them to server one by
1312 * After sending all request from the list we change import state to
1313 * IMP_REPLAY_LOCKS and re-request all the locks we believe we have from server
1314 * and also all the locks we don't yet have and wait for server to grant us.
1315 * After that we send a special "replay completed" request and change import
1316 * state to IMP_REPLAY_WAIT.
1317 * Upon receiving reply to that "replay completed" RPC we enter IMP_RECOVER
1318 * state and resend all requests from sending list.
1319 * After that we promote import to FULL state and send all delayed requests
1320 * and import is fully operational after that.
1323 int ptlrpc_import_recovery_state_machine(struct obd_import *imp)
1331 if (imp->imp_state == LUSTRE_IMP_EVICTED) {
1332 deuuidify(obd2cli_tgt(imp->imp_obd), NULL,
1333 &target_start, &target_len);
1334 /* Don't care about MGC eviction */
1335 if (strcmp(imp->imp_obd->obd_type->typ_name,
1336 LUSTRE_MGC_NAME) != 0) {
1337 LCONSOLE_ERROR_MSG(0x167, "%s: This client was evicted "
1338 "by %.*s; in progress operations "
1339 "using this service will fail.\n",
1340 imp->imp_obd->obd_name, target_len,
1343 CDEBUG(D_HA, "evicted from %s@%s; invalidating\n",
1344 obd2cli_tgt(imp->imp_obd),
1345 imp->imp_connection->c_remote_uuid.uuid);
1346 /* reset vbr_failed flag upon eviction */
1347 spin_lock(&imp->imp_lock);
1348 imp->imp_vbr_failed = 0;
1349 spin_unlock(&imp->imp_lock);
1353 struct task_struct *task;
1354 /* bug 17802: XXX client_disconnect_export vs connect request
1355 * race. if client will evicted at this time, we start
1356 * invalidate thread without reference to import and import can
1357 * be freed at same time. */
1358 class_import_get(imp);
1359 task = kthread_run(ptlrpc_invalidate_import_thread, imp,
1362 class_import_put(imp);
1363 CERROR("error starting invalidate thread: %d\n", rc);
1371 ptlrpc_invalidate_import(imp);
1373 IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER);
1377 if (imp->imp_state == LUSTRE_IMP_REPLAY) {
1378 CDEBUG(D_HA, "replay requested by %s\n",
1379 obd2cli_tgt(imp->imp_obd));
1380 rc = ptlrpc_replay_next(imp, &inflight);
1381 if (inflight == 0 &&
1382 cfs_atomic_read(&imp->imp_replay_inflight) == 0) {
1383 IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_LOCKS);
1384 rc = ldlm_replay_locks(imp);
1391 if (imp->imp_state == LUSTRE_IMP_REPLAY_LOCKS) {
1392 if (cfs_atomic_read(&imp->imp_replay_inflight) == 0) {
1393 IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_WAIT);
1394 rc = signal_completed_replay(imp);
1401 if (imp->imp_state == LUSTRE_IMP_REPLAY_WAIT) {
1402 if (cfs_atomic_read(&imp->imp_replay_inflight) == 0) {
1403 IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER);
1407 if (imp->imp_state == LUSTRE_IMP_RECOVER) {
1408 CDEBUG(D_HA, "reconnected to %s@%s\n",
1409 obd2cli_tgt(imp->imp_obd),
1410 imp->imp_connection->c_remote_uuid.uuid);
1412 rc = ptlrpc_resend(imp);
1415 IMPORT_SET_STATE(imp, LUSTRE_IMP_FULL);
1416 ptlrpc_activate_import(imp);
1418 deuuidify(obd2cli_tgt(imp->imp_obd), NULL,
1419 &target_start, &target_len);
1420 LCONSOLE_INFO("%s: Connection restored to %.*s (at %s)\n",
1421 imp->imp_obd->obd_name,
1422 target_len, target_start,
1423 libcfs_nid2str(imp->imp_connection->c_peer.nid));
1426 if (imp->imp_state == LUSTRE_IMP_FULL) {
1427 wake_up_all(&imp->imp_recovery_waitq);
1428 ptlrpc_wake_delayed(imp);
1435 int ptlrpc_disconnect_import(struct obd_import *imp, int noclose)
1437 struct ptlrpc_request *req;
1439 int nowait = imp->imp_obd->obd_force;
1443 GOTO(set_state, rc);
1445 switch (imp->imp_connect_op) {
1446 case OST_CONNECT: rq_opc = OST_DISCONNECT; break;
1447 case MDS_CONNECT: rq_opc = MDS_DISCONNECT; break;
1448 case MGS_CONNECT: rq_opc = MGS_DISCONNECT; break;
1450 CERROR("don't know how to disconnect from %s (connect_op %d)\n",
1451 obd2cli_tgt(imp->imp_obd), imp->imp_connect_op);
1455 if (ptlrpc_import_in_recovery(imp)) {
1456 struct l_wait_info lwi;
1457 cfs_duration_t timeout;
1461 if (imp->imp_server_timeout)
1462 timeout = cfs_time_seconds(obd_timeout / 2);
1464 timeout = cfs_time_seconds(obd_timeout);
1466 int idx = import_at_get_index(imp,
1467 imp->imp_client->cli_request_portal);
1468 timeout = cfs_time_seconds(
1469 at_get(&imp->imp_at.iat_service_estimate[idx]));
1472 lwi = LWI_TIMEOUT_INTR(cfs_timeout_cap(timeout),
1473 back_to_sleep, LWI_ON_SIGNAL_NOOP, NULL);
1474 rc = l_wait_event(imp->imp_recovery_waitq,
1475 !ptlrpc_import_in_recovery(imp), &lwi);
1479 spin_lock(&imp->imp_lock);
1480 if (imp->imp_state != LUSTRE_IMP_FULL)
1483 spin_unlock(&imp->imp_lock);
1485 req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_DISCONNECT,
1486 LUSTRE_OBD_VERSION, rq_opc);
1488 /* We are disconnecting, do not retry a failed DISCONNECT rpc if
1489 * it fails. We can get through the above with a down server
1490 * if the client doesn't know the server is gone yet. */
1491 req->rq_no_resend = 1;
1493 /* We want client umounts to happen quickly, no matter the
1495 req->rq_timeout = min_t(int, req->rq_timeout,
1496 INITIAL_CONNECT_TIMEOUT);
1498 IMPORT_SET_STATE(imp, LUSTRE_IMP_CONNECTING);
1499 req->rq_send_state = LUSTRE_IMP_CONNECTING;
1500 ptlrpc_request_set_replen(req);
1501 rc = ptlrpc_queue_wait(req);
1502 ptlrpc_req_finished(req);
1506 spin_lock(&imp->imp_lock);
1509 IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON);
1511 IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CLOSED);
1512 memset(&imp->imp_remote_handle, 0, sizeof(imp->imp_remote_handle));
1513 spin_unlock(&imp->imp_lock);
1517 EXPORT_SYMBOL(ptlrpc_disconnect_import);
1519 void ptlrpc_cleanup_imp(struct obd_import *imp)
1523 spin_lock(&imp->imp_lock);
1524 IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CLOSED);
1525 imp->imp_generation++;
1526 spin_unlock(&imp->imp_lock);
1527 ptlrpc_abort_inflight(imp);
1531 EXPORT_SYMBOL(ptlrpc_cleanup_imp);
1533 /* Adaptive Timeout utils */
1534 extern unsigned int at_min, at_max, at_history;
1536 /* Bin into timeslices using AT_BINS bins.
1537 This gives us a max of the last binlimit*AT_BINS secs without the storage,
1538 but still smoothing out a return to normalcy from a slow response.
1539 (E.g. remember the maximum latency in each minute of the last 4 minutes.) */
1540 int at_measured(struct adaptive_timeout *at, unsigned int val)
1542 unsigned int old = at->at_current;
1543 time_t now = cfs_time_current_sec();
1544 time_t binlimit = max_t(time_t, at_history / AT_BINS, 1);
1547 CDEBUG(D_OTHER, "add %u to %p time=%lu v=%u (%u %u %u %u)\n",
1548 val, at, now - at->at_binstart, at->at_current,
1549 at->at_hist[0], at->at_hist[1], at->at_hist[2], at->at_hist[3]);
1552 /* 0's don't count, because we never want our timeout to
1553 drop to 0, and because 0 could mean an error */
1556 spin_lock(&at->at_lock);
1558 if (unlikely(at->at_binstart == 0)) {
1559 /* Special case to remove default from history */
1560 at->at_current = val;
1561 at->at_worst_ever = val;
1562 at->at_worst_time = now;
1563 at->at_hist[0] = val;
1564 at->at_binstart = now;
1565 } else if (now - at->at_binstart < binlimit ) {
1567 at->at_hist[0] = max(val, at->at_hist[0]);
1568 at->at_current = max(val, at->at_current);
1571 unsigned int maxv = val;
1572 /* move bins over */
1573 shift = (now - at->at_binstart) / binlimit;
1575 for(i = AT_BINS - 1; i >= 0; i--) {
1577 at->at_hist[i] = at->at_hist[i - shift];
1578 maxv = max(maxv, at->at_hist[i]);
1583 at->at_hist[0] = val;
1584 at->at_current = maxv;
1585 at->at_binstart += shift * binlimit;
1588 if (at->at_current > at->at_worst_ever) {
1589 at->at_worst_ever = at->at_current;
1590 at->at_worst_time = now;
1593 if (at->at_flags & AT_FLG_NOHIST)
1594 /* Only keep last reported val; keeping the rest of the history
1596 at->at_current = val;
1599 at->at_current = min(at->at_current, at_max);
1600 at->at_current = max(at->at_current, at_min);
1602 if (at->at_current != old)
1603 CDEBUG(D_OTHER, "AT %p change: old=%u new=%u delta=%d "
1604 "(val=%u) hist %u %u %u %u\n", at,
1605 old, at->at_current, at->at_current - old, val,
1606 at->at_hist[0], at->at_hist[1], at->at_hist[2],
1609 /* if we changed, report the old value */
1610 old = (at->at_current != old) ? old : 0;
1612 spin_unlock(&at->at_lock);
1616 /* Find the imp_at index for a given portal; assign if space available */
1617 int import_at_get_index(struct obd_import *imp, int portal)
1619 struct imp_at *at = &imp->imp_at;
1622 for (i = 0; i < IMP_AT_MAX_PORTALS; i++) {
1623 if (at->iat_portal[i] == portal)
1625 if (at->iat_portal[i] == 0)
1630 /* Not found in list, add it under a lock */
1631 spin_lock(&imp->imp_lock);
1633 /* Check unused under lock */
1634 for (; i < IMP_AT_MAX_PORTALS; i++) {
1635 if (at->iat_portal[i] == portal)
1637 if (at->iat_portal[i] == 0)
1642 /* Not enough portals? */
1643 LASSERT(i < IMP_AT_MAX_PORTALS);
1645 at->iat_portal[i] = portal;
1647 spin_unlock(&imp->imp_lock);