1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/ptlrpc/import.c
38 * Author: Mike Shaver <shaver@clusterfs.com>
41 #define DEBUG_SUBSYSTEM S_RPC
43 # include <liblustre.h>
46 #include <obd_support.h>
47 #include <lustre_ha.h>
48 #include <lustre_net.h>
49 #include <lustre_import.h>
50 #include <lustre_export.h>
52 #include <obd_class.h>
54 #include "ptlrpc_internal.h"
56 struct ptlrpc_connect_async_args {
57 __u64 pcaa_peer_committed;
58 int pcaa_initial_connect;
61 static void __import_set_state(struct obd_import *imp,
62 enum lustre_imp_state state)
64 imp->imp_state = state;
65 imp->imp_state_hist[imp->imp_state_hist_idx].ish_state = state;
66 imp->imp_state_hist[imp->imp_state_hist_idx].ish_time =
67 cfs_time_current_sec();
68 imp->imp_state_hist_idx = (imp->imp_state_hist_idx + 1) %
72 /* A CLOSED import should remain so. */
73 #define IMPORT_SET_STATE_NOLOCK(imp, state) \
75 if (imp->imp_state != LUSTRE_IMP_CLOSED) { \
76 CDEBUG(D_HA, "%p %s: changing import state from %s to %s\n", \
77 imp, obd2cli_tgt(imp->imp_obd), \
78 ptlrpc_import_state_name(imp->imp_state), \
79 ptlrpc_import_state_name(state)); \
80 __import_set_state(imp, state); \
84 #define IMPORT_SET_STATE(imp, state) \
86 spin_lock(&imp->imp_lock); \
87 IMPORT_SET_STATE_NOLOCK(imp, state); \
88 spin_unlock(&imp->imp_lock); \
92 static int ptlrpc_connect_interpret(struct ptlrpc_request *request,
94 int ptlrpc_import_recovery_state_machine(struct obd_import *imp);
96 /* Only this function is allowed to change the import state when it is
97 * CLOSED. I would rather refcount the import and free it after
98 * disconnection like we do with exports. To do that, the client_obd
99 * will need to save the peer info somewhere other than in the import,
101 int ptlrpc_init_import(struct obd_import *imp)
103 spin_lock(&imp->imp_lock);
105 imp->imp_generation++;
106 imp->imp_state = LUSTRE_IMP_NEW;
108 spin_unlock(&imp->imp_lock);
112 EXPORT_SYMBOL(ptlrpc_init_import);
114 #define UUID_STR "_UUID"
115 static void deuuidify(char *uuid, const char *prefix, char **uuid_start,
118 *uuid_start = !prefix || strncmp(uuid, prefix, strlen(prefix))
119 ? uuid : uuid + strlen(prefix);
121 *uuid_len = strlen(*uuid_start);
123 if (*uuid_len < strlen(UUID_STR))
126 if (!strncmp(*uuid_start + *uuid_len - strlen(UUID_STR),
127 UUID_STR, strlen(UUID_STR)))
128 *uuid_len -= strlen(UUID_STR);
131 /* Returns true if import was FULL, false if import was already not
133 * @imp - import to be disconnected
134 * @conn_cnt - connection count (epoch) of the request that timed out
135 * and caused the disconnection. In some cases, multiple
136 * inflight requests can fail to a single target (e.g. OST
137 * bulk requests) and if one has already caused a reconnection
138 * (increasing the import->conn_cnt) the older failure should
139 * not also cause a reconnection. If zero it forces a reconnect.
141 int ptlrpc_set_import_discon(struct obd_import *imp, __u32 conn_cnt)
145 spin_lock(&imp->imp_lock);
147 if (imp->imp_state == LUSTRE_IMP_FULL &&
148 (conn_cnt == 0 || conn_cnt == imp->imp_conn_cnt)) {
152 deuuidify(obd2cli_tgt(imp->imp_obd), NULL,
153 &target_start, &target_len);
154 if (imp->imp_replayable) {
155 LCONSOLE_WARN("%s: Connection to service %.*s via nid "
156 "%s was lost; in progress operations using this "
157 "service will wait for recovery to complete.\n",
158 imp->imp_obd->obd_name, target_len, target_start,
159 libcfs_nid2str(imp->imp_connection->c_peer.nid));
161 LCONSOLE_ERROR_MSG(0x166, "%s: Connection to service "
162 "%.*s via nid %s was lost; in progress "
163 "operations using this service will fail.\n",
164 imp->imp_obd->obd_name, target_len, target_start,
165 libcfs_nid2str(imp->imp_connection->c_peer.nid));
167 IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON);
168 spin_unlock(&imp->imp_lock);
170 if (obd_dump_on_timeout)
171 libcfs_debug_dumplog();
173 obd_import_event(imp->imp_obd, imp, IMP_EVENT_DISCON);
176 spin_unlock(&imp->imp_lock);
177 CDEBUG(D_HA, "%s: import %p already %s (conn %u, was %u): %s\n",
178 imp->imp_client->cli_name, imp,
179 (imp->imp_state == LUSTRE_IMP_FULL &&
180 imp->imp_conn_cnt > conn_cnt) ?
181 "reconnected" : "not connected", imp->imp_conn_cnt,
182 conn_cnt, ptlrpc_import_state_name(imp->imp_state));
188 /* Must be called with imp_lock held! */
189 static void ptlrpc_deactivate_and_unlock_import(struct obd_import *imp)
192 LASSERT_SPIN_LOCKED(&imp->imp_lock);
194 CDEBUG(D_HA, "setting import %s INVALID\n", obd2cli_tgt(imp->imp_obd));
195 imp->imp_invalid = 1;
196 imp->imp_generation++;
197 spin_unlock(&imp->imp_lock);
199 ptlrpc_abort_inflight(imp);
200 obd_import_event(imp->imp_obd, imp, IMP_EVENT_INACTIVE);
204 * This acts as a barrier; all existing requests are rejected, and
205 * no new requests will be accepted until the import is valid again.
207 void ptlrpc_deactivate_import(struct obd_import *imp)
209 spin_lock(&imp->imp_lock);
210 ptlrpc_deactivate_and_unlock_import(imp);
214 ptlrpc_inflight_deadline(struct ptlrpc_request *req, time_t now)
218 if (!(((req->rq_phase == RQ_PHASE_RPC) && !req->rq_waiting) ||
219 (req->rq_phase == RQ_PHASE_BULK) ||
220 (req->rq_phase == RQ_PHASE_NEW)))
223 if (req->rq_timedout)
226 if (req->rq_phase == RQ_PHASE_NEW)
229 dl = req->rq_deadline;
237 static unsigned int ptlrpc_inflight_timeout(struct obd_import *imp)
239 time_t now = cfs_time_current_sec();
240 struct list_head *tmp, *n;
241 struct ptlrpc_request *req;
242 unsigned int timeout = 0;
244 spin_lock(&imp->imp_lock);
245 list_for_each_safe(tmp, n, &imp->imp_sending_list) {
246 req = list_entry(tmp, struct ptlrpc_request, rq_list);
247 timeout = max(ptlrpc_inflight_deadline(req, now), timeout);
249 spin_unlock(&imp->imp_lock);
254 * This function will invalidate the import, if necessary, then block
255 * for all the RPC completions, and finally notify the obd to
256 * invalidate its state (ie cancel locks, clear pending requests,
259 void ptlrpc_invalidate_import(struct obd_import *imp)
261 struct list_head *tmp, *n;
262 struct ptlrpc_request *req;
263 struct l_wait_info lwi;
264 unsigned int timeout;
267 atomic_inc(&imp->imp_inval_count);
270 * If this is an invalid MGC connection, then don't bother
271 * waiting for imp_inflight to drop to 0.
273 if (imp->imp_invalid && imp->imp_recon_bk &&!imp->imp_obd->obd_no_recov)
276 if (!imp->imp_invalid || imp->imp_obd->obd_no_recov)
277 ptlrpc_deactivate_import(imp);
279 LASSERT(imp->imp_invalid);
281 /* Wait forever until inflight == 0. We really can't do it another
282 * way because in some cases we need to wait for very long reply
283 * unlink. We can't do anything before that because there is really
284 * no guarantee that some rdma transfer is not in progress right now. */
286 /* Calculate max timeout for waiting on rpcs to error
287 * out. Use obd_timeout if calculated value is smaller
289 timeout = ptlrpc_inflight_timeout(imp);
290 timeout += timeout / 3;
293 timeout = obd_timeout;
295 CDEBUG(D_RPCTRACE,"Sleeping %d sec for inflight to error out\n",
298 /* Wait for all requests to error out and call completion
299 * callbacks. Cap it at obd_timeout -- these should all
300 * have been locally cancelled by ptlrpc_abort_inflight. */
301 lwi = LWI_TIMEOUT_INTERVAL(
302 cfs_timeout_cap(cfs_time_seconds(timeout)),
303 cfs_time_seconds(1), NULL, NULL);
304 rc = l_wait_event(imp->imp_recovery_waitq,
305 (atomic_read(&imp->imp_inflight) == 0), &lwi);
307 const char *cli_tgt = obd2cli_tgt(imp->imp_obd);
309 CERROR("%s: rc = %d waiting for callback (%d != 0)\n",
310 cli_tgt, rc, atomic_read(&imp->imp_inflight));
312 spin_lock(&imp->imp_lock);
313 list_for_each_safe(tmp, n, &imp->imp_sending_list) {
314 req = list_entry(tmp, struct ptlrpc_request,
316 DEBUG_REQ(D_ERROR, req,"still on sending list");
318 list_for_each_safe(tmp, n, &imp->imp_delayed_list) {
319 req = list_entry(tmp, struct ptlrpc_request,
321 DEBUG_REQ(D_ERROR, req,"still on delayed list");
324 if (atomic_read(&imp->imp_unregistering) == 0) {
325 /* We know that only "unregistering" rpcs may
326 * still survive in sending or delaying lists
327 * (They are waiting for long reply unlink in
328 * sluggish nets). Let's check this. If there
329 * is no unregistering and inflight != 0 this
331 LASSERT(atomic_read(&imp->imp_inflight) == 0);
333 /* Let's save one loop as soon as inflight have
334 * dropped to zero. No new inflights possible at
338 CERROR("%s: RPCs in \"%s\" phase found (%d). "
339 "Network is sluggish? Waiting them "
340 "to error out.\n", cli_tgt,
341 ptlrpc_phase2str(RQ_PHASE_UNREGISTERING),
342 atomic_read(&imp->imp_unregistering));
344 spin_unlock(&imp->imp_lock);
348 /* Let's additionally check that no new rpcs added to import in
349 * "invalidate" state. */
350 LASSERT(atomic_read(&imp->imp_inflight) == 0);
353 obd_import_event(imp->imp_obd, imp, IMP_EVENT_INVALIDATE);
355 atomic_dec(&imp->imp_inval_count);
356 cfs_waitq_broadcast(&imp->imp_recovery_waitq);
359 /* unset imp_invalid */
360 void ptlrpc_activate_import(struct obd_import *imp)
362 struct obd_device *obd = imp->imp_obd;
364 spin_lock(&imp->imp_lock);
365 imp->imp_invalid = 0;
366 spin_unlock(&imp->imp_lock);
368 obd_import_event(obd, imp, IMP_EVENT_ACTIVE);
371 void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt)
375 LASSERT(!imp->imp_dlm_fake);
377 if (ptlrpc_set_import_discon(imp, conn_cnt)) {
378 if (!imp->imp_replayable) {
379 CDEBUG(D_HA, "import %s@%s for %s not replayable, "
380 "auto-deactivating\n",
381 obd2cli_tgt(imp->imp_obd),
382 imp->imp_connection->c_remote_uuid.uuid,
383 imp->imp_obd->obd_name);
384 ptlrpc_deactivate_import(imp);
387 CDEBUG(D_HA, "%s: waking up pinger\n",
388 obd2cli_tgt(imp->imp_obd));
390 spin_lock(&imp->imp_lock);
391 imp->imp_force_verify = 1;
392 spin_unlock(&imp->imp_lock);
394 ptlrpc_pinger_wake_up();
399 int ptlrpc_reconnect_import(struct obd_import *imp)
402 ptlrpc_set_import_discon(imp, 0);
403 /* Force a new connect attempt */
404 ptlrpc_invalidate_import(imp);
405 /* Do a fresh connect next time by zeroing the handle */
406 ptlrpc_disconnect_import(imp, 1);
407 /* Wait for all invalidate calls to finish */
408 if (atomic_read(&imp->imp_inval_count) > 0) {
410 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
411 rc = l_wait_event(imp->imp_recovery_waitq,
412 (atomic_read(&imp->imp_inval_count) == 0),
415 CERROR("Interrupted, inval=%d\n",
416 atomic_read(&imp->imp_inval_count));
420 * Allow reconnect attempts. Note: Currently, the function is
421 * only called by MGC. So assume this is a recoverable import,
422 * and force import to be recoverable. fix this if you need to
425 imp->imp_obd->obd_no_recov = 0;
426 /* Remove 'invalid' flag */
427 ptlrpc_activate_import(imp);
428 /* Attempt a new connect */
429 ptlrpc_recover_import(imp, NULL);
433 EXPORT_SYMBOL(ptlrpc_reconnect_import);
435 static int import_select_connection(struct obd_import *imp)
437 struct obd_import_conn *imp_conn = NULL, *conn;
438 struct obd_export *dlmexp;
442 spin_lock(&imp->imp_lock);
444 if (list_empty(&imp->imp_conn_list)) {
445 CERROR("%s: no connections available\n",
446 imp->imp_obd->obd_name);
447 spin_unlock(&imp->imp_lock);
451 list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
452 CDEBUG(D_HA, "%s: connect to NID %s last attempt "LPU64"\n",
453 imp->imp_obd->obd_name,
454 libcfs_nid2str(conn->oic_conn->c_peer.nid),
455 conn->oic_last_attempt);
457 /* Don't thrash connections */
458 if (cfs_time_before_64(cfs_time_current_64(),
459 conn->oic_last_attempt +
460 cfs_time_seconds(CONNECTION_SWITCH_MIN))) {
464 /* If we have not tried this connection since the
465 the last successful attempt, go with this one */
466 if ((conn->oic_last_attempt == 0) ||
467 cfs_time_beforeq_64(conn->oic_last_attempt,
468 imp->imp_last_success_conn)) {
474 /* If all of the connections have already been tried
475 since the last successful connection; just choose the
476 least recently used */
479 else if (cfs_time_before_64(conn->oic_last_attempt,
480 imp_conn->oic_last_attempt))
484 /* if not found, simply choose the current one */
486 LASSERT(imp->imp_conn_current);
487 imp_conn = imp->imp_conn_current;
490 LASSERT(imp_conn->oic_conn);
492 /* If we've tried everything, and we're back to the beginning of the
493 list, increase our timeout and try again. It will be reset when
494 we do finally connect. (FIXME: really we should wait for all network
495 state associated with the last connection attempt to drain before
496 trying to reconnect on it.) */
497 if (tried_all && (imp->imp_conn_list.next == &imp_conn->oic_item) &&
498 !imp->imp_recon_bk /* not retrying */) {
499 if (at_get(&imp->imp_at.iat_net_latency) <
500 CONNECTION_SWITCH_MAX) {
501 at_add(&imp->imp_at.iat_net_latency,
502 MIN(at_get(&imp->imp_at.iat_net_latency) +
503 CONNECTION_SWITCH_INC, CONNECTION_SWITCH_MAX));
505 LASSERT(imp_conn->oic_last_attempt);
506 CWARN("%s: tried all connections, increasing latency to %ds\n",
507 imp->imp_obd->obd_name,
508 at_get(&imp->imp_at.iat_net_latency));
511 imp_conn->oic_last_attempt = cfs_time_current_64();
513 /* switch connection, don't mind if it's same as the current one */
514 if (imp->imp_connection)
515 ptlrpc_connection_put(imp->imp_connection);
516 imp->imp_connection = ptlrpc_connection_addref(imp_conn->oic_conn);
518 dlmexp = class_conn2export(&imp->imp_dlm_handle);
519 LASSERT(dlmexp != NULL);
520 if (dlmexp->exp_connection)
521 ptlrpc_connection_put(dlmexp->exp_connection);
522 dlmexp->exp_connection = ptlrpc_connection_addref(imp_conn->oic_conn);
523 class_export_put(dlmexp);
525 if (imp->imp_conn_current != imp_conn) {
526 if (imp->imp_conn_current)
527 CDEBUG(D_HA, "Changing connection for %s to %s/%s\n",
528 imp->imp_obd->obd_name, imp_conn->oic_uuid.uuid,
529 libcfs_nid2str(imp_conn->oic_conn->c_peer.nid));
530 imp->imp_conn_current = imp_conn;
533 CDEBUG(D_HA, "%s: import %p using connection %s/%s\n",
534 imp->imp_obd->obd_name, imp, imp_conn->oic_uuid.uuid,
535 libcfs_nid2str(imp_conn->oic_conn->c_peer.nid));
537 spin_unlock(&imp->imp_lock);
543 * must be called under imp lock
545 static int ptlrpc_first_transno(struct obd_import *imp, __u64 *transno)
547 struct ptlrpc_request *req;
548 struct list_head *tmp;
550 if (list_empty(&imp->imp_replay_list))
552 tmp = imp->imp_replay_list.next;
553 req = list_entry(tmp, struct ptlrpc_request, rq_replay_list);
554 *transno = req->rq_transno;
555 if (req->rq_transno == 0) {
556 DEBUG_REQ(D_ERROR, req, "zero transno in replay");
563 int ptlrpc_connect_import(struct obd_import *imp, char *new_uuid)
565 struct obd_device *obd = imp->imp_obd;
567 int initial_connect = 0;
569 __u64 committed_before_reconnect = 0;
570 struct ptlrpc_request *request;
571 __u32 size[] = { sizeof(struct ptlrpc_body),
572 sizeof(imp->imp_obd->u.cli.cl_target_uuid),
573 sizeof(obd->obd_uuid),
574 sizeof(imp->imp_dlm_handle),
575 sizeof(imp->imp_connect_data) };
576 char *tmp[] = { NULL,
577 obd2cli_tgt(imp->imp_obd),
579 (char *)&imp->imp_dlm_handle,
580 (char *)&imp->imp_connect_data };
581 struct ptlrpc_connect_async_args *aa;
584 spin_lock(&imp->imp_lock);
585 if (imp->imp_state == LUSTRE_IMP_CLOSED) {
586 spin_unlock(&imp->imp_lock);
587 CERROR("can't connect to a closed import\n");
589 } else if (imp->imp_state == LUSTRE_IMP_FULL) {
590 spin_unlock(&imp->imp_lock);
591 CERROR("already connected\n");
593 } else if (imp->imp_state == LUSTRE_IMP_CONNECTING) {
594 spin_unlock(&imp->imp_lock);
595 CERROR("already connecting\n");
599 IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CONNECTING);
602 imp->imp_resend_replay = 0;
604 if (!lustre_handle_is_used(&imp->imp_remote_handle))
607 committed_before_reconnect = imp->imp_peer_committed_transno;
609 set_transno = ptlrpc_first_transno(imp,
610 &imp->imp_connect_data.ocd_transno);
612 spin_unlock(&imp->imp_lock);
615 struct obd_uuid uuid;
617 obd_str2uuid(&uuid, new_uuid);
618 rc = import_set_conn_priority(imp, &uuid);
623 rc = import_select_connection(imp);
627 /* last in connection list */
628 if (imp->imp_conn_current->oic_item.next == &imp->imp_conn_list) {
629 if (imp->imp_initial_recov_bk && initial_connect) {
630 CDEBUG(D_HA, "Last connection attempt (%d) for %s\n",
631 imp->imp_conn_cnt, obd2cli_tgt(imp->imp_obd));
632 /* Don't retry if connect fails */
634 obd_set_info_async(obd->obd_self_export,
635 sizeof(KEY_INIT_RECOV),
637 sizeof(rc), &rc, NULL);
639 if (imp->imp_recon_bk) {
640 CDEBUG(D_HA, "Last reconnection attempt (%d) for %s\n",
641 imp->imp_conn_cnt, obd2cli_tgt(imp->imp_obd));
642 spin_lock(&imp->imp_lock);
643 imp->imp_last_recon = 1;
644 spin_unlock(&imp->imp_lock);
648 /* Reset connect flags to the originally requested flags, in case
649 * the server is updated on-the-fly we will get the new features. */
650 imp->imp_connect_data.ocd_connect_flags = imp->imp_connect_flags_orig;
651 imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT;
653 rc = obd_reconnect(imp->imp_obd->obd_self_export, obd,
654 &obd->obd_uuid, &imp->imp_connect_data, NULL);
658 request = ptlrpc_prep_req(imp, LUSTRE_OBD_VERSION, imp->imp_connect_op,
661 GOTO(out, rc = -ENOMEM);
663 /* Report the rpc service time to the server so that it knows how long
664 * to wait for clients to join recovery */
665 lustre_msg_set_service_time(request->rq_reqmsg,
666 at_timeout2est(request->rq_timeout));
668 /* The amount of time we give the server to process the connect req.
669 * import_select_connection will increase the net latency on
670 * repeated reconnect attempts to cover slow networks.
671 * We override/ignore the server rpc completion estimate here,
672 * which may be large if this is a reconnect attempt */
673 request->rq_timeout = INITIAL_CONNECT_TIMEOUT;
674 lustre_msg_set_timeout(request->rq_reqmsg, request->rq_timeout);
677 lustre_msg_add_op_flags(request->rq_reqmsg, MSG_CONNECT_LIBCLIENT);
679 if (imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V1)
680 lustre_msg_add_op_flags(request->rq_reqmsg,
681 MSG_CONNECT_NEXT_VER);
683 request->rq_no_resend = request->rq_no_delay = 1;
684 request->rq_send_state = LUSTRE_IMP_CONNECTING;
685 /* Allow a slightly larger reply for future growth compatibility */
686 size[REPLY_REC_OFF] = sizeof(struct obd_connect_data) +
688 ptlrpc_req_set_repsize(request, 2, size);
689 request->rq_interpret_reply = ptlrpc_connect_interpret;
691 CLASSERT(sizeof(*aa) <= sizeof(request->rq_async_args));
692 aa = ptlrpc_req_async_args(request);
693 memset(aa, 0, sizeof *aa);
695 aa->pcaa_peer_committed = committed_before_reconnect;
696 aa->pcaa_initial_connect = initial_connect;
697 if (aa->pcaa_initial_connect) {
698 spin_lock(&imp->imp_lock);
699 imp->imp_replayable = 1;
700 spin_unlock(&imp->imp_lock);
701 lustre_msg_add_op_flags(request->rq_reqmsg,
702 MSG_CONNECT_INITIAL);
706 lustre_msg_add_op_flags(request->rq_reqmsg,
707 MSG_CONNECT_TRANSNO);
709 DEBUG_REQ(D_RPCTRACE, request, "%sconnect request %d",
710 aa->pcaa_initial_connect ? "initial " : "re",
712 ptlrpcd_add_req(request);
716 IMPORT_SET_STATE(imp, LUSTRE_IMP_DISCON);
721 EXPORT_SYMBOL(ptlrpc_connect_import);
723 static void ptlrpc_maybe_ping_import_soon(struct obd_import *imp)
726 struct obd_import_conn *imp_conn;
732 spin_lock(&imp->imp_lock);
733 if (list_empty(&imp->imp_conn_list))
737 imp_conn = list_entry(imp->imp_conn_list.prev,
738 struct obd_import_conn,
741 /* XXX: When the failover node is the primary node, it is possible
742 * to have two identical connections in imp_conn_list. We must
743 * compare not conn's pointers but NIDs, otherwise we can defeat
744 * connection throttling. (See bug 14774.) */
745 if (imp->imp_conn_current->oic_conn->c_peer.nid !=
746 imp_conn->oic_conn->c_peer.nid) {
747 ptlrpc_ping_import_soon(imp);
752 /* liblustre has no pinger thead, so we wakup pinger anyway */
756 spin_unlock(&imp->imp_lock);
759 ptlrpc_pinger_wake_up();
764 static int ptlrpc_connect_interpret(struct ptlrpc_request *request,
767 struct ptlrpc_connect_async_args *aa = data;
768 struct obd_import *imp = request->rq_import;
769 struct client_obd *cli = &imp->imp_obd->u.cli;
770 struct lustre_handle old_hdl;
771 __u64 old_connect_flags;
775 spin_lock(&imp->imp_lock);
776 if (imp->imp_state == LUSTRE_IMP_CLOSED) {
777 spin_unlock(&imp->imp_lock);
780 spin_unlock(&imp->imp_lock);
785 LASSERT(imp->imp_conn_current);
787 msg_flags = lustre_msg_get_op_flags(request->rq_repmsg);
789 /* All imports are pingable */
790 spin_lock(&imp->imp_lock);
791 imp->imp_pingable = 1;
793 if (aa->pcaa_initial_connect) {
794 if (msg_flags & MSG_CONNECT_REPLAYABLE) {
795 imp->imp_replayable = 1;
796 spin_unlock(&imp->imp_lock);
797 CDEBUG(D_HA, "connected to replayable target: %s\n",
798 obd2cli_tgt(imp->imp_obd));
800 imp->imp_replayable = 0;
801 spin_unlock(&imp->imp_lock);
804 if ((request->rq_reqmsg->lm_magic == LUSTRE_MSG_MAGIC_V1 &&
805 msg_flags & MSG_CONNECT_NEXT_VER) ||
806 request->rq_reqmsg->lm_magic == LUSTRE_MSG_MAGIC_V2) {
807 imp->imp_msg_magic = LUSTRE_MSG_MAGIC_V2;
808 CDEBUG(D_RPCTRACE, "connect to %s with lustre_msg_v2\n",
809 obd2cli_tgt(imp->imp_obd));
811 CDEBUG(D_RPCTRACE, "connect to %s with lustre_msg_v1\n",
812 obd2cli_tgt(imp->imp_obd));
815 imp->imp_remote_handle =
816 *lustre_msg_get_handle(request->rq_repmsg);
818 /* Initial connects are allowed for clients with non-random
819 * uuids when servers are in recovery. Simply signal the
820 * servers replay is complete and wait in REPLAY_WAIT. */
821 if (msg_flags & MSG_CONNECT_RECOVERING) {
822 CDEBUG(D_HA, "connect to %s during recovery\n",
823 obd2cli_tgt(imp->imp_obd));
824 IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_LOCKS);
826 IMPORT_SET_STATE(imp, LUSTRE_IMP_FULL);
827 ptlrpc_activate_import(imp);
829 GOTO(finish, rc = 0);
831 spin_unlock(&imp->imp_lock);
834 /* Determine what recovery state to move the import to. */
835 if (MSG_CONNECT_RECONNECT & msg_flags) {
836 memset(&old_hdl, 0, sizeof(old_hdl));
837 if (!memcmp(&old_hdl, lustre_msg_get_handle(request->rq_repmsg),
839 CERROR("%s@%s didn't like our handle "LPX64
840 ", failed\n", obd2cli_tgt(imp->imp_obd),
841 imp->imp_connection->c_remote_uuid.uuid,
842 imp->imp_dlm_handle.cookie);
843 GOTO(out, rc = -ENOTCONN);
846 if (memcmp(&imp->imp_remote_handle,
847 lustre_msg_get_handle(request->rq_repmsg),
848 sizeof(imp->imp_remote_handle))) {
849 int level = msg_flags & MSG_CONNECT_RECOVERING ?
852 /* Bug 16611/14775: if server handle have changed,
853 * that means some sort of disconnection happened.
854 * If the server is not in recovery, that also means it
855 * already erased all of our state because of previous
856 * eviction. If it is in recovery - we are safe to
857 * participate since we can reestablish all of our state
858 * with server again */
859 CDEBUG(level,"%s@%s changed server handle from "
860 LPX64" to "LPX64"%s\n",
861 obd2cli_tgt(imp->imp_obd),
862 imp->imp_connection->c_remote_uuid.uuid,
863 imp->imp_remote_handle.cookie,
864 lustre_msg_get_handle(request->rq_repmsg)->
866 (MSG_CONNECT_RECOVERING & msg_flags) ?
867 " but is still in recovery" : "");
869 imp->imp_remote_handle =
870 *lustre_msg_get_handle(request->rq_repmsg);
872 if (!(MSG_CONNECT_RECOVERING & msg_flags)) {
873 IMPORT_SET_STATE(imp, LUSTRE_IMP_EVICTED);
874 GOTO(finish, rc = 0);
878 CDEBUG(D_HA, "reconnected to %s@%s after partition\n",
879 obd2cli_tgt(imp->imp_obd),
880 imp->imp_connection->c_remote_uuid.uuid);
883 if (imp->imp_invalid) {
884 CDEBUG(D_HA, "%s: reconnected but import is invalid; "
885 "marking evicted\n", imp->imp_obd->obd_name);
886 IMPORT_SET_STATE(imp, LUSTRE_IMP_EVICTED);
887 } else if (MSG_CONNECT_RECOVERING & msg_flags) {
888 CDEBUG(D_HA, "%s: reconnected to %s during replay\n",
889 imp->imp_obd->obd_name,
890 obd2cli_tgt(imp->imp_obd));
892 spin_lock(&imp->imp_lock);
893 imp->imp_resend_replay = 1;
894 /* VBR: delayed connection */
895 if (MSG_CONNECT_DELAYED & msg_flags) {
896 imp->imp_delayed_recovery = 1;
897 imp->imp_no_lock_replay = 1;
899 spin_unlock(&imp->imp_lock);
901 IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY);
903 IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER);
905 } else if ((MSG_CONNECT_RECOVERING & msg_flags) && !imp->imp_invalid) {
906 LASSERT(imp->imp_replayable);
907 imp->imp_remote_handle =
908 *lustre_msg_get_handle(request->rq_repmsg);
909 imp->imp_last_replay_transno = 0;
910 /* VBR: delayed connection */
911 if (MSG_CONNECT_DELAYED & msg_flags) {
912 spin_lock(&imp->imp_lock);
913 imp->imp_delayed_recovery = 1;
914 imp->imp_no_lock_replay = 1;
915 spin_unlock(&imp->imp_lock);
917 IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY);
919 DEBUG_REQ(D_HA, request, "evicting (not initial connect and "
920 "flags reconnect/recovering not set: %x)",msg_flags);
921 imp->imp_remote_handle =
922 *lustre_msg_get_handle(request->rq_repmsg);
923 IMPORT_SET_STATE(imp, LUSTRE_IMP_EVICTED);
926 /* Sanity checks for a reconnected import. */
927 if (!(imp->imp_replayable) != !(msg_flags & MSG_CONNECT_REPLAYABLE)) {
928 CERROR("imp_replayable flag does not match server "
929 "after reconnect. We should LBUG right here.\n");
932 if (lustre_msg_get_last_committed(request->rq_repmsg) <
933 aa->pcaa_peer_committed) {
934 CERROR("%s went back in time (transno "LPD64
935 " was previously committed, server now claims "LPD64
936 ")! See https://bugzilla.lustre.org/show_bug.cgi?"
938 obd2cli_tgt(imp->imp_obd), aa->pcaa_peer_committed,
939 lustre_msg_get_last_committed(request->rq_repmsg));
943 rc = ptlrpc_import_recovery_state_machine(imp);
945 if (rc == -ENOTCONN) {
946 CDEBUG(D_HA, "evicted/aborted by %s@%s during recovery;"
947 "invalidating and reconnecting\n",
948 obd2cli_tgt(imp->imp_obd),
949 imp->imp_connection->c_remote_uuid.uuid);
950 ptlrpc_connect_import(imp, NULL);
954 struct obd_connect_data *ocd;
955 struct obd_export *exp;
957 ocd = lustre_swab_repbuf(request, REPLY_REC_OFF, sizeof(*ocd),
958 lustre_swab_connect);
959 spin_lock(&imp->imp_lock);
960 list_del(&imp->imp_conn_current->oic_item);
961 list_add(&imp->imp_conn_current->oic_item, &imp->imp_conn_list);
962 imp->imp_last_success_conn =
963 imp->imp_conn_current->oic_last_attempt;
966 spin_unlock(&imp->imp_lock);
967 CERROR("Wrong connect data from server\n");
972 imp->imp_connect_data = *ocd;
974 exp = class_conn2export(&imp->imp_dlm_handle);
975 spin_unlock(&imp->imp_lock);
977 /* check that server granted subset of flags we asked for. */
978 LASSERTF((ocd->ocd_connect_flags &
979 imp->imp_connect_flags_orig) ==
980 ocd->ocd_connect_flags, LPX64" != "LPX64,
981 imp->imp_connect_flags_orig, ocd->ocd_connect_flags);
984 /* This could happen if export is cleaned during the
986 CERROR("Missing export for %s\n",
987 imp->imp_obd->obd_name);
988 GOTO(out, rc = -ENODEV);
990 old_connect_flags = exp->exp_connect_flags;
991 exp->exp_connect_flags = ocd->ocd_connect_flags;
992 imp->imp_obd->obd_self_export->exp_connect_flags =
993 ocd->ocd_connect_flags;
994 class_export_put(exp);
996 obd_import_event(imp->imp_obd, imp, IMP_EVENT_OCD);
998 if (!ocd->ocd_ibits_known &&
999 ocd->ocd_connect_flags & OBD_CONNECT_IBITS)
1000 CERROR("Inodebits aware server returned zero compatible"
1003 if ((ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
1004 (ocd->ocd_version > LUSTRE_VERSION_CODE +
1005 LUSTRE_VERSION_OFFSET_WARN ||
1006 ocd->ocd_version < LUSTRE_VERSION_CODE -
1007 LUSTRE_VERSION_OFFSET_WARN)) {
1008 /* Sigh, some compilers do not like #ifdef in the middle
1009 of macro arguments */
1012 "older. Consider upgrading this client";
1015 "older. Consider recompiling this application";
1017 const char *newer = "newer than client version";
1019 LCONSOLE_WARN("Server %s version (%d.%d.%d.%d) "
1020 "is much %s (%s)\n",
1021 obd2cli_tgt(imp->imp_obd),
1022 OBD_OCD_VERSION_MAJOR(ocd->ocd_version),
1023 OBD_OCD_VERSION_MINOR(ocd->ocd_version),
1024 OBD_OCD_VERSION_PATCH(ocd->ocd_version),
1025 OBD_OCD_VERSION_FIX(ocd->ocd_version),
1026 ocd->ocd_version > LUSTRE_VERSION_CODE ?
1027 newer : older, LUSTRE_VERSION_STRING);
1030 if (ocd->ocd_connect_flags & OBD_CONNECT_CKSUM) {
1031 /* We sent to the server ocd_cksum_types with bits set
1032 * for algorithms we understand. The server masked off
1033 * the checksum types it doesn't support */
1034 if ((ocd->ocd_cksum_types & OBD_CKSUM_ALL) == 0) {
1035 LCONSOLE_WARN("The negotiation of the checksum "
1036 "alogrithm to use with server %s "
1037 "failed (%x/%x), disabling "
1039 obd2cli_tgt(imp->imp_obd),
1040 ocd->ocd_cksum_types,
1042 cli->cl_checksum = 0;
1043 cli->cl_supp_cksum_types = OBD_CKSUM_CRC32;
1044 cli->cl_cksum_type = OBD_CKSUM_CRC32;
1046 cli->cl_supp_cksum_types = ocd->ocd_cksum_types;
1048 if (ocd->ocd_cksum_types & OSC_DEFAULT_CKSUM)
1049 cli->cl_cksum_type = OSC_DEFAULT_CKSUM;
1050 else if (ocd->ocd_cksum_types & OBD_CKSUM_ADLER)
1051 cli->cl_cksum_type = OBD_CKSUM_ADLER;
1053 cli->cl_cksum_type = OBD_CKSUM_CRC32;
1056 /* The server does not support OBD_CONNECT_CKSUM.
1057 * Enforce CRC32 for backward compatibility*/
1058 cli->cl_supp_cksum_types = OBD_CKSUM_CRC32;
1059 cli->cl_cksum_type = OBD_CKSUM_CRC32;
1062 if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE) {
1063 cli->cl_max_pages_per_rpc =
1064 ocd->ocd_brw_size >> CFS_PAGE_SHIFT;
1067 /* Reset ns_connect_flags only for initial connect. It might be
1068 * changed in while using FS and if we reset it in reconnect
1069 * this leads to lossing user settings done before such as
1070 * disable lru_resize, etc. */
1071 if (old_connect_flags != exp->exp_connect_flags ||
1072 aa->pcaa_initial_connect) {
1073 CDEBUG(D_HA, "%s: Resetting ns_connect_flags to server "
1074 "flags: "LPX64"\n", imp->imp_obd->obd_name,
1075 ocd->ocd_connect_flags);
1076 imp->imp_obd->obd_namespace->ns_connect_flags =
1077 ocd->ocd_connect_flags;
1078 imp->imp_obd->obd_namespace->ns_orig_connect_flags =
1079 ocd->ocd_connect_flags;
1082 if ((ocd->ocd_connect_flags & OBD_CONNECT_AT) &&
1083 (imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V2))
1084 /* We need a per-message support flag, because
1085 a. we don't know if the incoming connect reply
1086 supports AT or not (in reply_in_callback)
1088 b. failovered server means export and flags are gone
1089 (in ptlrpc_send_reply).
1090 Can only be set when we know AT is supported at
1092 imp->imp_msghdr_flags |= MSGHDR_AT_SUPPORT;
1094 imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT;
1096 LASSERT((cli->cl_max_pages_per_rpc <= PTLRPC_MAX_BRW_PAGES) &&
1097 (cli->cl_max_pages_per_rpc > 0));
1102 IMPORT_SET_STATE(imp, LUSTRE_IMP_DISCON);
1103 spin_lock(&imp->imp_lock);
1104 if (aa->pcaa_initial_connect && !imp->imp_initial_recov &&
1105 (request->rq_import_generation == imp->imp_generation))
1106 ptlrpc_deactivate_and_unlock_import(imp);
1108 spin_unlock(&imp->imp_lock);
1110 if (imp->imp_recon_bk && imp->imp_last_recon) {
1111 /* Give up trying to reconnect */
1112 imp->imp_obd->obd_no_recov = 1;
1113 ptlrpc_deactivate_import(imp);
1116 if (rc == -EPROTO) {
1117 struct obd_connect_data *ocd;
1118 ocd = lustre_swab_repbuf(request, REPLY_REC_OFF,
1120 lustre_swab_connect);
1122 (ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
1123 (ocd->ocd_version != LUSTRE_VERSION_CODE)) {
1124 /* Actually servers are only supposed to refuse
1125 connection from liblustre clients, so we should
1126 never see this from VFS context */
1127 LCONSOLE_ERROR_MSG(0x16a, "Server %s version "
1129 " refused connection from this client "
1130 "with an incompatible version (%s). "
1131 "Client must be recompiled\n",
1132 obd2cli_tgt(imp->imp_obd),
1133 OBD_OCD_VERSION_MAJOR(ocd->ocd_version),
1134 OBD_OCD_VERSION_MINOR(ocd->ocd_version),
1135 OBD_OCD_VERSION_PATCH(ocd->ocd_version),
1136 OBD_OCD_VERSION_FIX(ocd->ocd_version),
1137 LUSTRE_VERSION_STRING);
1138 ptlrpc_deactivate_import(imp);
1139 IMPORT_SET_STATE(imp, LUSTRE_IMP_CLOSED);
1144 ptlrpc_maybe_ping_import_soon(imp);
1146 CDEBUG(D_HA, "recovery of %s on %s failed (%d)\n",
1147 obd2cli_tgt(imp->imp_obd),
1148 (char *)imp->imp_connection->c_remote_uuid.uuid, rc);
1151 spin_lock(&imp->imp_lock);
1152 imp->imp_last_recon = 0;
1153 spin_unlock(&imp->imp_lock);
1155 cfs_waitq_broadcast(&imp->imp_recovery_waitq);
1159 static int completed_replay_interpret(struct ptlrpc_request *req,
1160 void * data, int rc)
1163 atomic_dec(&req->rq_import->imp_replay_inflight);
1164 if (req->rq_status == 0 &&
1165 !req->rq_import->imp_vbr_failed) {
1166 ptlrpc_import_recovery_state_machine(req->rq_import);
1168 if (req->rq_import->imp_vbr_failed) {
1170 "%s: version recovery fails, reconnecting\n",
1171 req->rq_import->imp_obd->obd_name);
1172 spin_lock(&req->rq_import->imp_lock);
1173 req->rq_import->imp_vbr_failed = 0;
1174 spin_unlock(&req->rq_import->imp_lock);
1176 CDEBUG(D_HA, "%s: LAST_REPLAY message error: %d, "
1178 req->rq_import->imp_obd->obd_name,
1181 ptlrpc_connect_import(req->rq_import, NULL);
1186 static int signal_completed_replay(struct obd_import *imp)
1188 struct ptlrpc_request *req;
1191 LASSERT(atomic_read(&imp->imp_replay_inflight) == 0);
1192 atomic_inc(&imp->imp_replay_inflight);
1194 req = ptlrpc_prep_req(imp, LUSTRE_OBD_VERSION, OBD_PING, 1, NULL, NULL);
1196 atomic_dec(&imp->imp_replay_inflight);
1200 ptlrpc_req_set_repsize(req, 1, NULL);
1201 req->rq_send_state = LUSTRE_IMP_REPLAY_WAIT;
1202 lustre_msg_add_flags(req->rq_reqmsg,
1203 MSG_LOCK_REPLAY_DONE |
1204 MSG_REQ_REPLAY_DONE |
1207 if (imp->imp_delayed_recovery)
1208 lustre_msg_add_flags(req->rq_reqmsg, MSG_DELAY_REPLAY);
1209 req->rq_timeout *= 3;
1210 req->rq_interpret_reply = completed_replay_interpret;
1212 ptlrpcd_add_req(req);
1217 static int ptlrpc_invalidate_import_thread(void *data)
1219 struct obd_import *imp = data;
1223 ptlrpc_daemonize("ll_imp_inval");
1225 CDEBUG(D_HA, "thread invalidate import %s to %s@%s\n",
1226 imp->imp_obd->obd_name, obd2cli_tgt(imp->imp_obd),
1227 imp->imp_connection->c_remote_uuid.uuid);
1229 ptlrpc_invalidate_import(imp);
1231 if (obd_dump_on_eviction) {
1232 CERROR("dump the log upon eviction\n");
1233 libcfs_debug_dumplog();
1236 IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER);
1237 ptlrpc_import_recovery_state_machine(imp);
1239 class_import_put(imp);
1244 int ptlrpc_import_recovery_state_machine(struct obd_import *imp)
1252 if (imp->imp_state == LUSTRE_IMP_EVICTED) {
1253 deuuidify(obd2cli_tgt(imp->imp_obd), NULL,
1254 &target_start, &target_len);
1255 /* Don't care about MGC eviction */
1256 if (strcmp(imp->imp_obd->obd_type->typ_name,
1257 LUSTRE_MGC_NAME) != 0) {
1258 LCONSOLE_ERROR_MSG(0x167, "This client was evicted by "
1259 "%.*s; in progress operations using "
1260 "this service will fail.\n",
1261 target_len, target_start);
1263 CDEBUG(D_HA, "evicted from %s@%s; invalidating\n",
1264 obd2cli_tgt(imp->imp_obd),
1265 imp->imp_connection->c_remote_uuid.uuid);
1268 /* bug 17802: XXX client_disconnect_export vs connect request
1269 * race. if client will evicted at this time, we start
1270 * invalidate thread without referece to import and import can
1271 * be freed at same time. */
1272 class_import_get(imp);
1273 rc = cfs_kernel_thread(ptlrpc_invalidate_import_thread, imp,
1274 CLONE_VM | CLONE_FILES);
1276 class_import_put(imp);
1277 CERROR("error starting invalidate thread: %d\n", rc);
1283 ptlrpc_invalidate_import(imp);
1285 IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER);
1289 if (imp->imp_state == LUSTRE_IMP_REPLAY) {
1290 CDEBUG(D_HA, "replay requested by %s\n",
1291 obd2cli_tgt(imp->imp_obd));
1292 rc = ptlrpc_replay_next(imp, &inflight);
1293 if (inflight == 0 &&
1294 atomic_read(&imp->imp_replay_inflight) == 0) {
1295 IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_LOCKS);
1296 rc = ldlm_replay_locks(imp);
1303 if (imp->imp_state == LUSTRE_IMP_REPLAY_LOCKS) {
1304 if (atomic_read(&imp->imp_replay_inflight) == 0) {
1305 IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_WAIT);
1306 rc = signal_completed_replay(imp);
1313 if (imp->imp_state == LUSTRE_IMP_REPLAY_WAIT) {
1314 if (atomic_read(&imp->imp_replay_inflight) == 0) {
1315 IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER);
1319 if (imp->imp_state == LUSTRE_IMP_RECOVER) {
1320 CDEBUG(D_HA, "reconnected to %s@%s\n",
1321 obd2cli_tgt(imp->imp_obd),
1322 imp->imp_connection->c_remote_uuid.uuid);
1324 rc = ptlrpc_resend(imp);
1327 IMPORT_SET_STATE(imp, LUSTRE_IMP_FULL);
1328 ptlrpc_activate_import(imp);
1330 deuuidify(obd2cli_tgt(imp->imp_obd), NULL,
1331 &target_start, &target_len);
1332 LCONSOLE_INFO("%s: Connection restored to service %.*s "
1333 "using nid %s.\n", imp->imp_obd->obd_name,
1334 target_len, target_start,
1335 libcfs_nid2str(imp->imp_connection->c_peer.nid));
1338 if (imp->imp_state == LUSTRE_IMP_FULL) {
1339 cfs_waitq_broadcast(&imp->imp_recovery_waitq);
1340 ptlrpc_wake_delayed(imp);
1347 static int back_to_sleep(void *unused)
1352 int ptlrpc_disconnect_import(struct obd_import *imp, int noclose)
1354 struct ptlrpc_request *req;
1356 int nowait = imp->imp_obd->obd_force;
1360 GOTO(set_state, rc);
1362 switch (imp->imp_connect_op) {
1363 case OST_CONNECT: rq_opc = OST_DISCONNECT; break;
1364 case MDS_CONNECT: rq_opc = MDS_DISCONNECT; break;
1365 case MGS_CONNECT: rq_opc = MGS_DISCONNECT; break;
1367 CERROR("don't know how to disconnect from %s (connect_op %d)\n",
1368 obd2cli_tgt(imp->imp_obd), imp->imp_connect_op);
1372 if (ptlrpc_import_in_recovery(imp)) {
1373 struct l_wait_info lwi;
1374 cfs_duration_t timeout;
1377 timeout = cfs_time_seconds(obd_timeout);
1379 int idx = import_at_get_index(imp,
1380 imp->imp_client->cli_request_portal);
1381 timeout = cfs_time_seconds(
1382 at_get(&imp->imp_at.iat_service_estimate[idx]));
1384 lwi = LWI_TIMEOUT_INTR(cfs_timeout_cap(timeout),
1385 back_to_sleep, LWI_ON_SIGNAL_NOOP, NULL);
1386 rc = l_wait_event(imp->imp_recovery_waitq,
1387 !ptlrpc_import_in_recovery(imp), &lwi);
1390 spin_lock(&imp->imp_lock);
1391 if (imp->imp_state != LUSTRE_IMP_FULL)
1394 spin_unlock(&imp->imp_lock);
1396 req = ptlrpc_prep_req(imp, LUSTRE_OBD_VERSION, rq_opc, 1, NULL, NULL);
1398 /* We are disconnecting, do not retry a failed DISCONNECT rpc if
1399 * it fails. We can get through the above with a down server
1400 * if the client doesn't know the server is gone yet. */
1401 req->rq_no_resend = 1;
1404 /* We want client umounts to happen quickly, no matter the
1406 req->rq_timeout = min_t(int, req->rq_timeout,
1407 INITIAL_CONNECT_TIMEOUT);
1409 /* ... but we always want liblustre clients to nicely
1410 disconnect, so only use the adaptive value. */
1412 req->rq_timeout = obd_timeout / 3;
1415 IMPORT_SET_STATE(imp, LUSTRE_IMP_CONNECTING);
1416 req->rq_send_state = LUSTRE_IMP_CONNECTING;
1417 ptlrpc_req_set_repsize(req, 1, NULL);
1418 rc = ptlrpc_queue_wait(req);
1419 ptlrpc_req_finished(req);
1423 spin_lock(&imp->imp_lock);
1426 IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON);
1428 IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CLOSED);
1429 memset(&imp->imp_remote_handle, 0, sizeof(imp->imp_remote_handle));
1430 /* Try all connections in the future - bz 12758 */
1431 imp->imp_last_recon = 0;
1432 spin_unlock(&imp->imp_lock);
1437 /* Sets maximal number of RPCs possible originating from other side of this
1438 import (server) to us and number of async RPC replies that we are not waiting
1440 void ptlrpc_import_setasync(struct obd_import *imp, int count)
1442 LNetSetAsync(imp->imp_connection->c_peer, count);
1445 void ptlrpc_cleanup_imp(struct obd_import *imp)
1449 spin_lock(&imp->imp_lock);
1450 IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CLOSED);
1451 imp->imp_generation++;
1452 spin_unlock(&imp->imp_lock);
1453 ptlrpc_abort_inflight(imp);
1458 /* Adaptive Timeout utils */
1459 extern unsigned int at_min, at_max, at_history;
1461 /* Bin into timeslices using AT_BINS bins.
1462 This gives us a max of the last binlimit*AT_BINS secs without the storage,
1463 but still smoothing out a return to normalcy from a slow response.
1464 (E.g. remember the maximum latency in each minute of the last 4 minutes.) */
1465 int at_add(struct adaptive_timeout *at, unsigned int val)
1467 unsigned int old = at->at_current;
1468 time_t now = cfs_time_current_sec();
1469 time_t binlimit = max_t(time_t, at_history / AT_BINS, 1);
1472 CDEBUG(D_OTHER, "add %u to %p time=%lu v=%u (%u %u %u %u)\n",
1473 val, at, now - at->at_binstart, at->at_current,
1474 at->at_hist[0], at->at_hist[1], at->at_hist[2], at->at_hist[3]);
1477 /* 0's don't count, because we never want our timeout to
1478 drop to 0, and because 0 could mean an error */
1481 spin_lock(&at->at_lock);
1483 if (unlikely(at->at_binstart == 0)) {
1484 /* Special case to remove default from history */
1485 at->at_current = val;
1486 at->at_worst_ever = val;
1487 at->at_worst_time = now;
1488 at->at_hist[0] = val;
1489 at->at_binstart = now;
1490 } else if (now - at->at_binstart < binlimit ) {
1492 at->at_hist[0] = max(val, at->at_hist[0]);
1493 at->at_current = max(val, at->at_current);
1496 unsigned int maxv = val;
1497 /* move bins over */
1498 shift = (now - at->at_binstart) / binlimit;
1500 for(i = AT_BINS - 1; i >= 0; i--) {
1502 at->at_hist[i] = at->at_hist[i - shift];
1503 maxv = max(maxv, at->at_hist[i]);
1508 at->at_hist[0] = val;
1509 at->at_current = maxv;
1510 at->at_binstart += shift * binlimit;
1513 if (at->at_current > at->at_worst_ever) {
1514 at->at_worst_ever = at->at_current;
1515 at->at_worst_time = now;
1518 if (at->at_flags & AT_FLG_NOHIST)
1519 /* Only keep last reported val; keeping the rest of the history
1521 at->at_current = val;
1524 at->at_current = min(at->at_current, at_max);
1525 at->at_current = max(at->at_current, at_min);
1527 if (at->at_current != old)
1528 CDEBUG(D_OTHER, "AT %p change: old=%u new=%u delta=%d "
1529 "(val=%u) hist %u %u %u %u\n", at,
1530 old, at->at_current, at->at_current - old, val,
1531 at->at_hist[0], at->at_hist[1], at->at_hist[2],
1534 /* if we changed, report the old value */
1535 old = (at->at_current != old) ? old : 0;
1537 spin_unlock(&at->at_lock);
1541 /* Find the imp_at index for a given portal; assign if space available */
1542 int import_at_get_index(struct obd_import *imp, int portal)
1544 struct imp_at *at = &imp->imp_at;
1547 for (i = 0; i < IMP_AT_MAX_PORTALS; i++) {
1548 if (at->iat_portal[i] == portal)
1550 if (at->iat_portal[i] == 0)
1555 /* Not found in list, add it under a lock */
1556 spin_lock(&imp->imp_lock);
1558 /* Check unused under lock */
1559 for (; i < IMP_AT_MAX_PORTALS; i++) {
1560 if (at->iat_portal[i] == portal)
1562 if (at->iat_portal[i] == 0)
1567 /* Not enough portals? */
1568 LASSERT(i < IMP_AT_MAX_PORTALS);
1570 at->iat_portal[i] = portal;
1572 spin_unlock(&imp->imp_lock);