Whamcloud - gitweb
29533524e30dd6980ef53c6d191f168e2635b29e
[fs/lustre-release.git] / lustre / ptlrpc / import.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  *
32  * Copyright (c) 2011, Whamcloud, Inc.
33  */
34 /*
35  * This file is part of Lustre, http://www.lustre.org/
36  * Lustre is a trademark of Sun Microsystems, Inc.
37  *
38  * lustre/ptlrpc/import.c
39  *
40  * Author: Mike Shaver <shaver@clusterfs.com>
41  */
42
43 #define DEBUG_SUBSYSTEM S_RPC
44 #ifndef __KERNEL__
45 # include <liblustre.h>
46 #endif
47
48 #include <obd_support.h>
49 #include <lustre_ha.h>
50 #include <lustre_net.h>
51 #include <lustre_import.h>
52 #include <lustre_export.h>
53 #include <obd.h>
54 #include <obd_cksum.h>
55 #include <obd_class.h>
56
57 #include "ptlrpc_internal.h"
58
59 struct ptlrpc_connect_async_args {
60          __u64 pcaa_peer_committed;
61         int pcaa_initial_connect;
62 };
63
64 /**
65  * Updates import \a imp current state to provided \a state value
66  * Helper function. Must be called under imp_lock.
67  */
68 static void __import_set_state(struct obd_import *imp,
69                                enum lustre_imp_state state)
70 {
71         imp->imp_state = state;
72         imp->imp_state_hist[imp->imp_state_hist_idx].ish_state = state;
73         imp->imp_state_hist[imp->imp_state_hist_idx].ish_time =
74                 cfs_time_current_sec();
75         imp->imp_state_hist_idx = (imp->imp_state_hist_idx + 1) %
76                 IMP_STATE_HIST_LEN;
77 }
78
79 /* A CLOSED import should remain so. */
80 #define IMPORT_SET_STATE_NOLOCK(imp, state)                                    \
81 do {                                                                           \
82         if (imp->imp_state != LUSTRE_IMP_CLOSED) {                             \
83                CDEBUG(D_HA, "%p %s: changing import state from %s to %s\n",    \
84                       imp, obd2cli_tgt(imp->imp_obd),                          \
85                       ptlrpc_import_state_name(imp->imp_state),                \
86                       ptlrpc_import_state_name(state));                        \
87                __import_set_state(imp, state);                                 \
88         }                                                                      \
89 } while(0)
90
91 #define IMPORT_SET_STATE(imp, state)            \
92 do {                                            \
93         cfs_spin_lock(&imp->imp_lock);          \
94         IMPORT_SET_STATE_NOLOCK(imp, state);    \
95         cfs_spin_unlock(&imp->imp_lock);        \
96 } while(0)
97
98
99 static int ptlrpc_connect_interpret(const struct lu_env *env,
100                                     struct ptlrpc_request *request,
101                                     void * data, int rc);
102 int ptlrpc_import_recovery_state_machine(struct obd_import *imp);
103
104 /* Only this function is allowed to change the import state when it is
105  * CLOSED. I would rather refcount the import and free it after
106  * disconnection like we do with exports. To do that, the client_obd
107  * will need to save the peer info somewhere other than in the import,
108  * though. */
109 int ptlrpc_init_import(struct obd_import *imp)
110 {
111         cfs_spin_lock(&imp->imp_lock);
112
113         imp->imp_generation++;
114         imp->imp_state =  LUSTRE_IMP_NEW;
115
116         cfs_spin_unlock(&imp->imp_lock);
117
118         return 0;
119 }
120 EXPORT_SYMBOL(ptlrpc_init_import);
121
122 #define UUID_STR "_UUID"
123 static void deuuidify(char *uuid, const char *prefix, char **uuid_start,
124                       int *uuid_len)
125 {
126         *uuid_start = !prefix || strncmp(uuid, prefix, strlen(prefix))
127                 ? uuid : uuid + strlen(prefix);
128
129         *uuid_len = strlen(*uuid_start);
130
131         if (*uuid_len < strlen(UUID_STR))
132                 return;
133
134         if (!strncmp(*uuid_start + *uuid_len - strlen(UUID_STR),
135                     UUID_STR, strlen(UUID_STR)))
136                 *uuid_len -= strlen(UUID_STR);
137 }
138
139 /**
140  * Returns true if import was FULL, false if import was already not
141  * connected.
142  * @imp - import to be disconnected
143  * @conn_cnt - connection count (epoch) of the request that timed out
144  *             and caused the disconnection.  In some cases, multiple
145  *             inflight requests can fail to a single target (e.g. OST
146  *             bulk requests) and if one has already caused a reconnection
147  *             (increasing the import->conn_cnt) the older failure should
148  *             not also cause a reconnection.  If zero it forces a reconnect.
149  */
150 int ptlrpc_set_import_discon(struct obd_import *imp, __u32 conn_cnt)
151 {
152         int rc = 0;
153
154         cfs_spin_lock(&imp->imp_lock);
155
156         if (imp->imp_state == LUSTRE_IMP_FULL &&
157             (conn_cnt == 0 || conn_cnt == imp->imp_conn_cnt)) {
158                 char *target_start;
159                 int   target_len;
160
161                 deuuidify(obd2cli_tgt(imp->imp_obd), NULL,
162                           &target_start, &target_len);
163
164                 if (imp->imp_replayable) {
165                         LCONSOLE_WARN("%s: Connection to service %.*s via nid "
166                                "%s was lost; in progress operations using this "
167                                "service will wait for recovery to complete.\n",
168                                imp->imp_obd->obd_name, target_len, target_start,
169                                libcfs_nid2str(imp->imp_connection->c_peer.nid));
170                 } else {
171                         LCONSOLE_ERROR_MSG(0x166, "%s: Connection to service "
172                                "%.*s via nid %s was lost; in progress "
173                                "operations using this service will fail.\n",
174                                imp->imp_obd->obd_name,
175                                target_len, target_start,
176                                libcfs_nid2str(imp->imp_connection->c_peer.nid));
177                 }
178                 ptlrpc_deactivate_timeouts(imp);
179                 IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON);
180                 cfs_spin_unlock(&imp->imp_lock);
181
182                 if (obd_dump_on_timeout)
183                         libcfs_debug_dumplog();
184
185                 obd_import_event(imp->imp_obd, imp, IMP_EVENT_DISCON);
186                 rc = 1;
187         } else {
188                 cfs_spin_unlock(&imp->imp_lock);
189                 CDEBUG(D_HA, "%s: import %p already %s (conn %u, was %u): %s\n",
190                        imp->imp_client->cli_name, imp,
191                        (imp->imp_state == LUSTRE_IMP_FULL &&
192                         imp->imp_conn_cnt > conn_cnt) ?
193                        "reconnected" : "not connected", imp->imp_conn_cnt,
194                        conn_cnt, ptlrpc_import_state_name(imp->imp_state));
195         }
196
197         return rc;
198 }
199
200 /* Must be called with imp_lock held! */
201 static void ptlrpc_deactivate_and_unlock_import(struct obd_import *imp)
202 {
203         ENTRY;
204         LASSERT_SPIN_LOCKED(&imp->imp_lock);
205
206         CDEBUG(D_HA, "setting import %s INVALID\n", obd2cli_tgt(imp->imp_obd));
207         imp->imp_invalid = 1;
208         imp->imp_generation++;
209         cfs_spin_unlock(&imp->imp_lock);
210
211         ptlrpc_abort_inflight(imp);
212         obd_import_event(imp->imp_obd, imp, IMP_EVENT_INACTIVE);
213
214         EXIT;
215 }
216
217 /*
218  * This acts as a barrier; all existing requests are rejected, and
219  * no new requests will be accepted until the import is valid again.
220  */
221 void ptlrpc_deactivate_import(struct obd_import *imp)
222 {
223         cfs_spin_lock(&imp->imp_lock);
224         ptlrpc_deactivate_and_unlock_import(imp);
225 }
226
227 static unsigned int
228 ptlrpc_inflight_deadline(struct ptlrpc_request *req, time_t now)
229 {
230         long dl;
231
232         if (!(((req->rq_phase == RQ_PHASE_RPC) && !req->rq_waiting) ||
233               (req->rq_phase == RQ_PHASE_BULK) ||
234               (req->rq_phase == RQ_PHASE_NEW)))
235                 return 0;
236
237         if (req->rq_timedout)
238                 return 0;
239
240         if (req->rq_phase == RQ_PHASE_NEW)
241                 dl = req->rq_sent;
242         else
243                 dl = req->rq_deadline;
244
245         if (dl <= now)
246                 return 0;
247
248         return dl - now;
249 }
250
251 static unsigned int ptlrpc_inflight_timeout(struct obd_import *imp)
252 {
253         time_t now = cfs_time_current_sec();
254         cfs_list_t *tmp, *n;
255         struct ptlrpc_request *req;
256         unsigned int timeout = 0;
257
258         cfs_spin_lock(&imp->imp_lock);
259         cfs_list_for_each_safe(tmp, n, &imp->imp_sending_list) {
260                 req = cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
261                 timeout = max(ptlrpc_inflight_deadline(req, now), timeout);
262         }
263         cfs_spin_unlock(&imp->imp_lock);
264         return timeout;
265 }
266
267 /**
268  * This function will invalidate the import, if necessary, then block
269  * for all the RPC completions, and finally notify the obd to
270  * invalidate its state (ie cancel locks, clear pending requests,
271  * etc).
272  */
273 void ptlrpc_invalidate_import(struct obd_import *imp)
274 {
275         cfs_list_t *tmp, *n;
276         struct ptlrpc_request *req;
277         struct l_wait_info lwi;
278         unsigned int timeout;
279         int rc;
280
281         cfs_atomic_inc(&imp->imp_inval_count);
282
283         if (!imp->imp_invalid || imp->imp_obd->obd_no_recov)
284                 ptlrpc_deactivate_import(imp);
285
286         LASSERT(imp->imp_invalid);
287
288         /* Wait forever until inflight == 0. We really can't do it another
289          * way because in some cases we need to wait for very long reply
290          * unlink. We can't do anything before that because there is really
291          * no guarantee that some rdma transfer is not in progress right now. */
292         do {
293                 /* Calculate max timeout for waiting on rpcs to error
294                  * out. Use obd_timeout if calculated value is smaller
295                  * than it. */
296                 if (!OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) {
297                         timeout = ptlrpc_inflight_timeout(imp);
298                         timeout += timeout / 3;
299
300                         if (timeout == 0)
301                                 timeout = obd_timeout;
302                 } else {
303                         /* decrease the interval to increase race condition */
304                         timeout = 1;
305                 }
306
307                 CDEBUG(D_RPCTRACE,"Sleeping %d sec for inflight to error out\n",
308                        timeout);
309
310                 /* Wait for all requests to error out and call completion
311                  * callbacks. Cap it at obd_timeout -- these should all
312                  * have been locally cancelled by ptlrpc_abort_inflight. */
313                 lwi = LWI_TIMEOUT_INTERVAL(
314                         cfs_timeout_cap(cfs_time_seconds(timeout)),
315                         (timeout > 1)?cfs_time_seconds(1):cfs_time_seconds(1)/2,
316                         NULL, NULL);
317                 rc = l_wait_event(imp->imp_recovery_waitq,
318                                   (cfs_atomic_read(&imp->imp_inflight) == 0),
319                                   &lwi);
320                 if (rc) {
321                         const char *cli_tgt = obd2cli_tgt(imp->imp_obd);
322
323                         CERROR("%s: rc = %d waiting for callback (%d != 0)\n",
324                                cli_tgt, rc,
325                                cfs_atomic_read(&imp->imp_inflight));
326
327                         cfs_spin_lock(&imp->imp_lock);
328                         if (cfs_atomic_read(&imp->imp_inflight) == 0) {
329                                 int count = cfs_atomic_read(&imp->imp_unregistering);
330
331                                 /* We know that "unregistering" rpcs only can
332                                  * survive in sending or delaying lists (they
333                                  * maybe waiting for long reply unlink in
334                                  * sluggish nets). Let's check this. If there
335                                  * is no inflight and unregistering != 0, this
336                                  * is bug. */
337                                 LASSERTF(count == 0, "Some RPCs are still "
338                                          "unregistering: %d\n", count);
339
340                                 /* Let's save one loop as soon as inflight have
341                                  * dropped to zero. No new inflights possible at
342                                  * this point. */
343                                 rc = 0;
344                         } else {
345                                 cfs_list_for_each_safe(tmp, n,
346                                                        &imp->imp_sending_list) {
347                                         req = cfs_list_entry(tmp,
348                                                              struct ptlrpc_request,
349                                                              rq_list);
350                                         DEBUG_REQ(D_ERROR, req,
351                                                   "still on sending list");
352                                 }
353                                 cfs_list_for_each_safe(tmp, n,
354                                                        &imp->imp_delayed_list) {
355                                         req = cfs_list_entry(tmp,
356                                                              struct ptlrpc_request,
357                                                              rq_list);
358                                         DEBUG_REQ(D_ERROR, req,
359                                                   "still on delayed list");
360                                 }
361
362                                 CERROR("%s: RPCs in \"%s\" phase found (%d). "
363                                        "Network is sluggish? Waiting them "
364                                        "to error out.\n", cli_tgt,
365                                        ptlrpc_phase2str(RQ_PHASE_UNREGISTERING),
366                                        cfs_atomic_read(&imp->
367                                                        imp_unregistering));
368                         }
369                         cfs_spin_unlock(&imp->imp_lock);
370                   }
371         } while (rc != 0);
372
373         /*
374          * Let's additionally check that no new rpcs added to import in
375          * "invalidate" state.
376          */
377         LASSERT(cfs_atomic_read(&imp->imp_inflight) == 0);
378         obd_import_event(imp->imp_obd, imp, IMP_EVENT_INVALIDATE);
379         sptlrpc_import_flush_all_ctx(imp);
380
381         cfs_atomic_dec(&imp->imp_inval_count);
382         cfs_waitq_broadcast(&imp->imp_recovery_waitq);
383 }
384
385 /* unset imp_invalid */
386 void ptlrpc_activate_import(struct obd_import *imp)
387 {
388         struct obd_device *obd = imp->imp_obd;
389
390         cfs_spin_lock(&imp->imp_lock);
391         imp->imp_invalid = 0;
392         ptlrpc_activate_timeouts(imp);
393         cfs_spin_unlock(&imp->imp_lock);
394         obd_import_event(obd, imp, IMP_EVENT_ACTIVE);
395 }
396
397 void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt)
398 {
399         ENTRY;
400
401         LASSERT(!imp->imp_dlm_fake);
402
403         if (ptlrpc_set_import_discon(imp, conn_cnt)) {
404                 if (!imp->imp_replayable) {
405                         CDEBUG(D_HA, "import %s@%s for %s not replayable, "
406                                "auto-deactivating\n",
407                                obd2cli_tgt(imp->imp_obd),
408                                imp->imp_connection->c_remote_uuid.uuid,
409                                imp->imp_obd->obd_name);
410                         ptlrpc_deactivate_import(imp);
411                 }
412
413                 CDEBUG(D_HA, "%s: waking up pinger\n",
414                        obd2cli_tgt(imp->imp_obd));
415
416                 cfs_spin_lock(&imp->imp_lock);
417                 imp->imp_force_verify = 1;
418                 cfs_spin_unlock(&imp->imp_lock);
419
420                 ptlrpc_pinger_wake_up();
421         }
422         EXIT;
423 }
424
425 int ptlrpc_reconnect_import(struct obd_import *imp)
426 {
427         ptlrpc_set_import_discon(imp, 0);
428         /* Force a new connect attempt */
429         ptlrpc_invalidate_import(imp);
430         /* Do a fresh connect next time by zeroing the handle */
431         ptlrpc_disconnect_import(imp, 1);
432         /* Wait for all invalidate calls to finish */
433         if (cfs_atomic_read(&imp->imp_inval_count) > 0) {
434                 int rc;
435                 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
436                 rc = l_wait_event(imp->imp_recovery_waitq,
437                                   (cfs_atomic_read(&imp->imp_inval_count) == 0),
438                                   &lwi);
439                 if (rc)
440                         CERROR("Interrupted, inval=%d\n",
441                                cfs_atomic_read(&imp->imp_inval_count));
442         }
443
444         /* Allow reconnect attempts */
445         imp->imp_obd->obd_no_recov = 0;
446         /* Remove 'invalid' flag */
447         ptlrpc_activate_import(imp);
448         /* Attempt a new connect */
449         ptlrpc_recover_import(imp, NULL, 0);
450         return 0;
451 }
452 EXPORT_SYMBOL(ptlrpc_reconnect_import);
453
454 /**
455  * Connection on import \a imp is changed to another one (if more than one is
456  * present). We typically chose connection that we have not tried to connect to
457  * the longest
458  */
459 static int import_select_connection(struct obd_import *imp)
460 {
461         struct obd_import_conn *imp_conn = NULL, *conn;
462         struct obd_export *dlmexp;
463         int tried_all = 1;
464         ENTRY;
465
466         cfs_spin_lock(&imp->imp_lock);
467
468         if (cfs_list_empty(&imp->imp_conn_list)) {
469                 CERROR("%s: no connections available\n",
470                         imp->imp_obd->obd_name);
471                 cfs_spin_unlock(&imp->imp_lock);
472                 RETURN(-EINVAL);
473         }
474
475         cfs_list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
476                 CDEBUG(D_HA, "%s: connect to NID %s last attempt "LPU64"\n",
477                        imp->imp_obd->obd_name,
478                        libcfs_nid2str(conn->oic_conn->c_peer.nid),
479                        conn->oic_last_attempt);
480
481                 /* If we have not tried this connection since
482                    the last successful attempt, go with this one */
483                 if ((conn->oic_last_attempt == 0) ||
484                     cfs_time_beforeq_64(conn->oic_last_attempt,
485                                        imp->imp_last_success_conn)) {
486                         imp_conn = conn;
487                         tried_all = 0;
488                         break;
489                 }
490
491                 /* If all of the connections have already been tried
492                    since the last successful connection; just choose the
493                    least recently used */
494                 if (!imp_conn)
495                         imp_conn = conn;
496                 else if (cfs_time_before_64(conn->oic_last_attempt,
497                                             imp_conn->oic_last_attempt))
498                         imp_conn = conn;
499         }
500
501         /* if not found, simply choose the current one */
502         if (!imp_conn || imp->imp_force_reconnect) {
503                 LASSERT(imp->imp_conn_current);
504                 imp_conn = imp->imp_conn_current;
505                 tried_all = 0;
506         }
507         LASSERT(imp_conn->oic_conn);
508
509         /* If we've tried everything, and we're back to the beginning of the
510            list, increase our timeout and try again. It will be reset when
511            we do finally connect. (FIXME: really we should wait for all network
512            state associated with the last connection attempt to drain before
513            trying to reconnect on it.) */
514         if (tried_all && (imp->imp_conn_list.next == &imp_conn->oic_item)) {
515                 if (at_get(&imp->imp_at.iat_net_latency) <
516                     CONNECTION_SWITCH_MAX) {
517                         at_measured(&imp->imp_at.iat_net_latency,
518                                     at_get(&imp->imp_at.iat_net_latency) +
519                                     CONNECTION_SWITCH_INC);
520                 }
521                 LASSERT(imp_conn->oic_last_attempt);
522                 CWARN("%s: tried all connections, increasing latency to %ds\n",
523                       imp->imp_obd->obd_name,
524                       at_get(&imp->imp_at.iat_net_latency));
525         }
526
527         imp_conn->oic_last_attempt = cfs_time_current_64();
528
529         /* switch connection, don't mind if it's same as the current one */
530         if (imp->imp_connection)
531                 ptlrpc_connection_put(imp->imp_connection);
532         imp->imp_connection = ptlrpc_connection_addref(imp_conn->oic_conn);
533
534         dlmexp =  class_conn2export(&imp->imp_dlm_handle);
535         LASSERT(dlmexp != NULL);
536         if (dlmexp->exp_connection)
537                 ptlrpc_connection_put(dlmexp->exp_connection);
538         dlmexp->exp_connection = ptlrpc_connection_addref(imp_conn->oic_conn);
539         class_export_put(dlmexp);
540
541         if (imp->imp_conn_current != imp_conn) {
542                 if (imp->imp_conn_current)
543                         CDEBUG(D_HA, "Changing connection for %s to %s/%s\n",
544                                imp->imp_obd->obd_name, imp_conn->oic_uuid.uuid,
545                                libcfs_nid2str(imp_conn->oic_conn->c_peer.nid));
546                 imp->imp_conn_current = imp_conn;
547         }
548
549         CDEBUG(D_HA, "%s: import %p using connection %s/%s\n",
550                imp->imp_obd->obd_name, imp, imp_conn->oic_uuid.uuid,
551                libcfs_nid2str(imp_conn->oic_conn->c_peer.nid));
552
553         cfs_spin_unlock(&imp->imp_lock);
554
555         RETURN(0);
556 }
557
558 /*
559  * must be called under imp_lock
560  */
561 static int ptlrpc_first_transno(struct obd_import *imp, __u64 *transno)
562 {
563         struct ptlrpc_request *req;
564         cfs_list_t *tmp;
565
566         if (cfs_list_empty(&imp->imp_replay_list))
567                 return 0;
568         tmp = imp->imp_replay_list.next;
569         req = cfs_list_entry(tmp, struct ptlrpc_request, rq_replay_list);
570         *transno = req->rq_transno;
571         if (req->rq_transno == 0) {
572                 DEBUG_REQ(D_ERROR, req, "zero transno in replay");
573                 LBUG();
574         }
575
576         return 1;
577 }
578
579 /**
580  * Attempt to (re)connect import \a imp. This includes all preparations,
581  * initializing CONNECT RPC request and passing it to ptlrpcd for
582  * actual sending.
583  * Returns 0 on success or error code.
584  */
585 int ptlrpc_connect_import(struct obd_import *imp)
586 {
587         struct obd_device *obd = imp->imp_obd;
588         int initial_connect = 0;
589         int set_transno = 0;
590         __u64 committed_before_reconnect = 0;
591         struct ptlrpc_request *request;
592         char *bufs[] = { NULL,
593                          obd2cli_tgt(imp->imp_obd),
594                          obd->obd_uuid.uuid,
595                          (char *)&imp->imp_dlm_handle,
596                          (char *)&imp->imp_connect_data };
597         struct ptlrpc_connect_async_args *aa;
598         int rc;
599         ENTRY;
600
601         cfs_spin_lock(&imp->imp_lock);
602         if (imp->imp_state == LUSTRE_IMP_CLOSED) {
603                 cfs_spin_unlock(&imp->imp_lock);
604                 CERROR("can't connect to a closed import\n");
605                 RETURN(-EINVAL);
606         } else if (imp->imp_state == LUSTRE_IMP_FULL) {
607                 cfs_spin_unlock(&imp->imp_lock);
608                 CERROR("already connected\n");
609                 RETURN(0);
610         } else if (imp->imp_state == LUSTRE_IMP_CONNECTING) {
611                 cfs_spin_unlock(&imp->imp_lock);
612                 CERROR("already connecting\n");
613                 RETURN(-EALREADY);
614         }
615
616         IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CONNECTING);
617
618         imp->imp_conn_cnt++;
619         imp->imp_resend_replay = 0;
620
621         if (!lustre_handle_is_used(&imp->imp_remote_handle))
622                 initial_connect = 1;
623         else
624                 committed_before_reconnect = imp->imp_peer_committed_transno;
625
626         set_transno = ptlrpc_first_transno(imp,
627                                            &imp->imp_connect_data.ocd_transno);
628         cfs_spin_unlock(&imp->imp_lock);
629
630         rc = import_select_connection(imp);
631         if (rc)
632                 GOTO(out, rc);
633
634         rc = sptlrpc_import_sec_adapt(imp, NULL, 0);
635         if (rc)
636                 GOTO(out, rc);
637
638         /* Reset connect flags to the originally requested flags, in case
639          * the server is updated on-the-fly we will get the new features. */
640         imp->imp_connect_data.ocd_connect_flags = imp->imp_connect_flags_orig;
641         imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT;
642         imp->imp_msghdr_flags &= ~MSGHDR_CKSUM_INCOMPAT18;
643
644         rc = obd_reconnect(NULL, imp->imp_obd->obd_self_export, obd,
645                            &obd->obd_uuid, &imp->imp_connect_data, NULL);
646         if (rc)
647                 GOTO(out, rc);
648
649         request = ptlrpc_request_alloc(imp, &RQF_MDS_CONNECT);
650         if (request == NULL)
651                 GOTO(out, rc = -ENOMEM);
652
653         rc = ptlrpc_request_bufs_pack(request, LUSTRE_OBD_VERSION,
654                                       imp->imp_connect_op, bufs, NULL);
655         if (rc) {
656                 ptlrpc_request_free(request);
657                 GOTO(out, rc);
658         }
659
660         /* Report the rpc service time to the server so that it knows how long
661          * to wait for clients to join recovery */
662         lustre_msg_set_service_time(request->rq_reqmsg,
663                                     at_timeout2est(request->rq_timeout));
664
665         /* The amount of time we give the server to process the connect req.
666          * import_select_connection will increase the net latency on
667          * repeated reconnect attempts to cover slow networks.
668          * We override/ignore the server rpc completion estimate here,
669          * which may be large if this is a reconnect attempt */
670         request->rq_timeout = INITIAL_CONNECT_TIMEOUT;
671         lustre_msg_set_timeout(request->rq_reqmsg, request->rq_timeout);
672
673 #ifndef __KERNEL__
674         lustre_msg_add_op_flags(request->rq_reqmsg, MSG_CONNECT_LIBCLIENT);
675 #endif
676         lustre_msg_add_op_flags(request->rq_reqmsg, MSG_CONNECT_NEXT_VER);
677
678         request->rq_no_resend = request->rq_no_delay = 1;
679         request->rq_send_state = LUSTRE_IMP_CONNECTING;
680         /* Allow a slightly larger reply for future growth compatibility */
681         req_capsule_set_size(&request->rq_pill, &RMF_CONNECT_DATA, RCL_SERVER,
682                              sizeof(struct obd_connect_data)+16*sizeof(__u64));
683         ptlrpc_request_set_replen(request);
684         request->rq_interpret_reply = ptlrpc_connect_interpret;
685
686         CLASSERT(sizeof (*aa) <= sizeof (request->rq_async_args));
687         aa = ptlrpc_req_async_args(request);
688         memset(aa, 0, sizeof *aa);
689
690         aa->pcaa_peer_committed = committed_before_reconnect;
691         aa->pcaa_initial_connect = initial_connect;
692
693         if (aa->pcaa_initial_connect) {
694                 cfs_spin_lock(&imp->imp_lock);
695                 imp->imp_replayable = 1;
696                 cfs_spin_unlock(&imp->imp_lock);
697                 lustre_msg_add_op_flags(request->rq_reqmsg,
698                                         MSG_CONNECT_INITIAL);
699         }
700
701         if (set_transno)
702                 lustre_msg_add_op_flags(request->rq_reqmsg,
703                                         MSG_CONNECT_TRANSNO);
704
705         DEBUG_REQ(D_RPCTRACE, request, "(re)connect request (timeout %d)",
706                   request->rq_timeout);
707         ptlrpcd_add_req(request, PDL_POLICY_ROUND, -1);
708         rc = 0;
709 out:
710         if (rc != 0) {
711                 IMPORT_SET_STATE(imp, LUSTRE_IMP_DISCON);
712         }
713
714         RETURN(rc);
715 }
716 EXPORT_SYMBOL(ptlrpc_connect_import);
717
718 static void ptlrpc_maybe_ping_import_soon(struct obd_import *imp)
719 {
720 #ifdef __KERNEL__
721         int force_verify;
722
723         cfs_spin_lock(&imp->imp_lock);
724         force_verify = imp->imp_force_verify != 0;
725         cfs_spin_unlock(&imp->imp_lock);
726
727         if (force_verify)
728                 ptlrpc_pinger_wake_up();
729 #else
730         /* liblustre has no pinger thread, so we wakeup pinger anyway */
731         ptlrpc_pinger_wake_up();
732 #endif
733 }
734
735 static int ptlrpc_busy_reconnect(int rc)
736 {
737         return (rc == -EBUSY) || (rc == -EAGAIN);
738 }
739
740 /**
741  * interpret_reply callback for connect RPCs.
742  * Looks into returned status of connect operation and decides
743  * what to do with the import - i.e enter recovery, promote it to
744  * full state for normal operations of disconnect it due to an error.
745  */
746 static int ptlrpc_connect_interpret(const struct lu_env *env,
747                                     struct ptlrpc_request *request,
748                                     void *data, int rc)
749 {
750         struct ptlrpc_connect_async_args *aa = data;
751         struct obd_import *imp = request->rq_import;
752         struct client_obd *cli = &imp->imp_obd->u.cli;
753         struct lustre_handle old_hdl;
754         __u64 old_connect_flags;
755         int msg_flags;
756         ENTRY;
757
758         cfs_spin_lock(&imp->imp_lock);
759         if (imp->imp_state == LUSTRE_IMP_CLOSED) {
760                 cfs_spin_unlock(&imp->imp_lock);
761                 RETURN(0);
762         }
763
764         if (rc) {
765                 /* if this reconnect to busy export - not need select new target
766                  * for connecting*/
767                 imp->imp_force_reconnect = ptlrpc_busy_reconnect(rc);
768                 cfs_spin_unlock(&imp->imp_lock);
769                 GOTO(out, rc);
770         }
771
772         LASSERT(imp->imp_conn_current);
773
774         msg_flags = lustre_msg_get_op_flags(request->rq_repmsg);
775
776         /* All imports are pingable */
777         imp->imp_pingable = 1;
778         imp->imp_force_reconnect = 0;
779         imp->imp_force_verify = 0;
780
781         if (aa->pcaa_initial_connect) {
782                 if (msg_flags & MSG_CONNECT_REPLAYABLE) {
783                         imp->imp_replayable = 1;
784                         cfs_spin_unlock(&imp->imp_lock);
785                         CDEBUG(D_HA, "connected to replayable target: %s\n",
786                                obd2cli_tgt(imp->imp_obd));
787                 } else {
788                         imp->imp_replayable = 0;
789                         cfs_spin_unlock(&imp->imp_lock);
790                 }
791
792                 /* if applies, adjust the imp->imp_msg_magic here
793                  * according to reply flags */
794
795                 imp->imp_remote_handle =
796                                 *lustre_msg_get_handle(request->rq_repmsg);
797
798                 /* Initial connects are allowed for clients with non-random
799                  * uuids when servers are in recovery.  Simply signal the
800                  * servers replay is complete and wait in REPLAY_WAIT. */
801                 if (msg_flags & MSG_CONNECT_RECOVERING) {
802                         CDEBUG(D_HA, "connect to %s during recovery\n",
803                                obd2cli_tgt(imp->imp_obd));
804                         IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_LOCKS);
805                 } else {
806                         IMPORT_SET_STATE(imp, LUSTRE_IMP_FULL);
807                         ptlrpc_activate_import(imp);
808                 }
809
810                 GOTO(finish, rc = 0);
811         } else {
812                 cfs_spin_unlock(&imp->imp_lock);
813         }
814
815         /* Determine what recovery state to move the import to. */
816         if (MSG_CONNECT_RECONNECT & msg_flags) {
817                 memset(&old_hdl, 0, sizeof(old_hdl));
818                 if (!memcmp(&old_hdl, lustre_msg_get_handle(request->rq_repmsg),
819                             sizeof (old_hdl))) {
820                         CERROR("%s@%s didn't like our handle "LPX64
821                                ", failed\n", obd2cli_tgt(imp->imp_obd),
822                                imp->imp_connection->c_remote_uuid.uuid,
823                                imp->imp_dlm_handle.cookie);
824                         GOTO(out, rc = -ENOTCONN);
825                 }
826
827                 if (memcmp(&imp->imp_remote_handle,
828                            lustre_msg_get_handle(request->rq_repmsg),
829                            sizeof(imp->imp_remote_handle))) {
830                         int level = msg_flags & MSG_CONNECT_RECOVERING ?
831                                 D_HA : D_WARNING;
832
833                         /* Bug 16611/14775: if server handle have changed,
834                          * that means some sort of disconnection happened.
835                          * If the server is not in recovery, that also means it
836                          * already erased all of our state because of previous
837                          * eviction. If it is in recovery - we are safe to
838                          * participate since we can reestablish all of our state
839                          * with server again */
840                         CDEBUG(level,"%s@%s changed server handle from "
841                                      LPX64" to "LPX64"%s\n",
842                                      obd2cli_tgt(imp->imp_obd),
843                                      imp->imp_connection->c_remote_uuid.uuid,
844                                      imp->imp_remote_handle.cookie,
845                                      lustre_msg_get_handle(request->rq_repmsg)->
846                                                                         cookie,
847                                      (MSG_CONNECT_RECOVERING & msg_flags) ?
848                                          " but is still in recovery" : "");
849
850                         imp->imp_remote_handle =
851                                      *lustre_msg_get_handle(request->rq_repmsg);
852
853                         if (!(MSG_CONNECT_RECOVERING & msg_flags)) {
854                                 IMPORT_SET_STATE(imp, LUSTRE_IMP_EVICTED);
855                                 GOTO(finish, rc = 0);
856                         }
857
858                 } else {
859                         CDEBUG(D_HA, "reconnected to %s@%s after partition\n",
860                                obd2cli_tgt(imp->imp_obd),
861                                imp->imp_connection->c_remote_uuid.uuid);
862                 }
863
864                 if (imp->imp_invalid) {
865                         CDEBUG(D_HA, "%s: reconnected but import is invalid; "
866                                "marking evicted\n", imp->imp_obd->obd_name);
867                         IMPORT_SET_STATE(imp, LUSTRE_IMP_EVICTED);
868                 } else if (MSG_CONNECT_RECOVERING & msg_flags) {
869                         CDEBUG(D_HA, "%s: reconnected to %s during replay\n",
870                                imp->imp_obd->obd_name,
871                                obd2cli_tgt(imp->imp_obd));
872
873                         cfs_spin_lock(&imp->imp_lock);
874                         imp->imp_resend_replay = 1;
875                         cfs_spin_unlock(&imp->imp_lock);
876
877                         IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY);
878                 } else {
879                         IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER);
880                 }
881         } else if ((MSG_CONNECT_RECOVERING & msg_flags) && !imp->imp_invalid) {
882                 LASSERT(imp->imp_replayable);
883                 imp->imp_remote_handle =
884                                 *lustre_msg_get_handle(request->rq_repmsg);
885                 imp->imp_last_replay_transno = 0;
886                 IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY);
887         } else {
888                 DEBUG_REQ(D_HA, request, "%s: evicting (reconnect/recover flags"
889                           " not set: %x)", imp->imp_obd->obd_name, msg_flags);
890                 imp->imp_remote_handle =
891                                 *lustre_msg_get_handle(request->rq_repmsg);
892                 IMPORT_SET_STATE(imp, LUSTRE_IMP_EVICTED);
893         }
894
895         /* Sanity checks for a reconnected import. */
896         if (!(imp->imp_replayable) != !(msg_flags & MSG_CONNECT_REPLAYABLE)) {
897                 CERROR("imp_replayable flag does not match server "
898                        "after reconnect. We should LBUG right here.\n");
899         }
900
901         if (lustre_msg_get_last_committed(request->rq_repmsg) > 0 &&
902             lustre_msg_get_last_committed(request->rq_repmsg) <
903             aa->pcaa_peer_committed) {
904                 CERROR("%s went back in time (transno "LPD64
905                        " was previously committed, server now claims "LPD64
906                        ")!  See https://bugzilla.lustre.org/show_bug.cgi?"
907                        "id=9646\n",
908                        obd2cli_tgt(imp->imp_obd), aa->pcaa_peer_committed,
909                        lustre_msg_get_last_committed(request->rq_repmsg));
910         }
911
912 finish:
913         rc = ptlrpc_import_recovery_state_machine(imp);
914         if (rc != 0) {
915                 if (rc == -ENOTCONN) {
916                         CDEBUG(D_HA, "evicted/aborted by %s@%s during recovery;"
917                                "invalidating and reconnecting\n",
918                                obd2cli_tgt(imp->imp_obd),
919                                imp->imp_connection->c_remote_uuid.uuid);
920                         ptlrpc_connect_import(imp);
921                         RETURN(0);
922                 }
923         } else {
924                 struct obd_connect_data *ocd;
925                 struct obd_export *exp;
926                 int ret;
927                 ret = req_capsule_get_size(&request->rq_pill, &RMF_CONNECT_DATA,
928                                            RCL_SERVER);
929                 /* server replied obd_connect_data is always bigger */
930                 ocd = req_capsule_server_sized_get(&request->rq_pill,
931                                                    &RMF_CONNECT_DATA, ret);
932
933                 cfs_spin_lock(&imp->imp_lock);
934                 cfs_list_del(&imp->imp_conn_current->oic_item);
935                 cfs_list_add(&imp->imp_conn_current->oic_item,
936                              &imp->imp_conn_list);
937                 imp->imp_last_success_conn =
938                         imp->imp_conn_current->oic_last_attempt;
939
940                 if (ocd == NULL) {
941                         cfs_spin_unlock(&imp->imp_lock);
942                         CERROR("Wrong connect data from server\n");
943                         rc = -EPROTO;
944                         GOTO(out, rc);
945                 }
946
947                 imp->imp_connect_data = *ocd;
948                 CDEBUG(D_HA, "obd %s to target with inst %u\n",
949                        imp->imp_obd->obd_name, ocd->ocd_instance);
950
951                 exp = class_conn2export(&imp->imp_dlm_handle);
952                 cfs_spin_unlock(&imp->imp_lock);
953
954                 /* check that server granted subset of flags we asked for. */
955                 LASSERTF((ocd->ocd_connect_flags &
956                           imp->imp_connect_flags_orig) ==
957                          ocd->ocd_connect_flags, LPX64" != "LPX64,
958                          imp->imp_connect_flags_orig, ocd->ocd_connect_flags);
959
960                 if (!exp) {
961                         /* This could happen if export is cleaned during the
962                            connect attempt */
963                         CERROR("Missing export for %s\n",
964                                imp->imp_obd->obd_name);
965                         GOTO(out, rc = -ENODEV);
966                 }
967                 old_connect_flags = exp->exp_connect_flags;
968                 exp->exp_connect_flags = ocd->ocd_connect_flags;
969                 imp->imp_obd->obd_self_export->exp_connect_flags =
970                                                         ocd->ocd_connect_flags;
971                 class_export_put(exp);
972
973                 obd_import_event(imp->imp_obd, imp, IMP_EVENT_OCD);
974
975                 if (!ocd->ocd_ibits_known &&
976                     ocd->ocd_connect_flags & OBD_CONNECT_IBITS)
977                         CERROR("Inodebits aware server returned zero compatible"
978                                " bits?\n");
979
980                 if ((ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
981                     (ocd->ocd_version > LUSTRE_VERSION_CODE +
982                                         LUSTRE_VERSION_OFFSET_WARN ||
983                      ocd->ocd_version < LUSTRE_VERSION_CODE -
984                                         LUSTRE_VERSION_OFFSET_WARN)) {
985                         /* Sigh, some compilers do not like #ifdef in the middle
986                            of macro arguments */
987 #ifdef __KERNEL__
988                         const char *older = "older. Consider upgrading server "
989                                             "or downgrading client";
990 #else
991                         const char *older = "older. Consider recompiling this "
992                                             "application";
993 #endif
994                         const char *newer = "newer than client version. "
995                                             "Consider upgrading client";
996
997                         LCONSOLE_WARN("Server %s version (%d.%d.%d.%d) "
998                                       "is much %s (%s)\n",
999                                       obd2cli_tgt(imp->imp_obd),
1000                                       OBD_OCD_VERSION_MAJOR(ocd->ocd_version),
1001                                       OBD_OCD_VERSION_MINOR(ocd->ocd_version),
1002                                       OBD_OCD_VERSION_PATCH(ocd->ocd_version),
1003                                       OBD_OCD_VERSION_FIX(ocd->ocd_version),
1004                                       ocd->ocd_version > LUSTRE_VERSION_CODE ?
1005                                       newer : older, LUSTRE_VERSION_STRING);
1006                 }
1007
1008                 if (ocd->ocd_connect_flags & OBD_CONNECT_CKSUM) {
1009                         /* We sent to the server ocd_cksum_types with bits set
1010                          * for algorithms we understand. The server masked off
1011                          * the checksum types it doesn't support */
1012                         if ((ocd->ocd_cksum_types & cksum_types_supported()) == 0) {
1013                                 LCONSOLE_WARN("The negotiation of the checksum "
1014                                               "alogrithm to use with server %s "
1015                                               "failed (%x/%x), disabling "
1016                                               "checksums\n",
1017                                               obd2cli_tgt(imp->imp_obd),
1018                                               ocd->ocd_cksum_types,
1019                                               cksum_types_supported());
1020                                 cli->cl_checksum = 0;
1021                                 cli->cl_supp_cksum_types = OBD_CKSUM_CRC32;
1022                         } else {
1023                                 cli->cl_supp_cksum_types = ocd->ocd_cksum_types;
1024                         }
1025                 } else {
1026                         /* The server does not support OBD_CONNECT_CKSUM.
1027                          * Enforce CRC32 for backward compatibility*/
1028                         cli->cl_supp_cksum_types = OBD_CKSUM_CRC32;
1029                 }
1030                 cli->cl_cksum_type =cksum_type_select(cli->cl_supp_cksum_types);
1031
1032                 if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
1033                         cli->cl_max_pages_per_rpc =
1034                                 ocd->ocd_brw_size >> CFS_PAGE_SHIFT;
1035                 else if (imp->imp_connect_op == MDS_CONNECT ||
1036                          imp->imp_connect_op == MGS_CONNECT)
1037                         cli->cl_max_pages_per_rpc = 1;
1038
1039                 /* Reset ns_connect_flags only for initial connect. It might be
1040                  * changed in while using FS and if we reset it in reconnect
1041                  * this leads to losing user settings done before such as
1042                  * disable lru_resize, etc. */
1043                 if (old_connect_flags != exp->exp_connect_flags ||
1044                     aa->pcaa_initial_connect) {
1045                         CDEBUG(D_HA, "%s: Resetting ns_connect_flags to server "
1046                                "flags: "LPX64"\n", imp->imp_obd->obd_name,
1047                               ocd->ocd_connect_flags);
1048                         imp->imp_obd->obd_namespace->ns_connect_flags =
1049                                 ocd->ocd_connect_flags;
1050                         imp->imp_obd->obd_namespace->ns_orig_connect_flags =
1051                                 ocd->ocd_connect_flags;
1052                 }
1053
1054                 if ((ocd->ocd_connect_flags & OBD_CONNECT_AT) &&
1055                     (imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V2))
1056                         /* We need a per-message support flag, because
1057                            a. we don't know if the incoming connect reply
1058                               supports AT or not (in reply_in_callback)
1059                               until we unpack it.
1060                            b. failovered server means export and flags are gone
1061                               (in ptlrpc_send_reply).
1062                            Can only be set when we know AT is supported at
1063                            both ends */
1064                         imp->imp_msghdr_flags |= MSGHDR_AT_SUPPORT;
1065                 else
1066                         imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT;
1067
1068                 if ((ocd->ocd_connect_flags & OBD_CONNECT_FULL20) &&
1069                     (imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V2))
1070                         imp->imp_msghdr_flags |= MSGHDR_CKSUM_INCOMPAT18;
1071                 else
1072                         imp->imp_msghdr_flags &= ~MSGHDR_CKSUM_INCOMPAT18;
1073
1074                 LASSERT((cli->cl_max_pages_per_rpc <= PTLRPC_MAX_BRW_PAGES) &&
1075                         (cli->cl_max_pages_per_rpc > 0));
1076         }
1077
1078 out:
1079         if (rc != 0) {
1080                 IMPORT_SET_STATE(imp, LUSTRE_IMP_DISCON);
1081                 if (rc == -EACCES) {
1082                         /*
1083                          * Give up trying to reconnect
1084                          * EACCES means client has no permission for connection
1085                          */
1086                         imp->imp_obd->obd_no_recov = 1;
1087                         ptlrpc_deactivate_import(imp);
1088                 }
1089
1090                 if (rc == -EPROTO) {
1091                         struct obd_connect_data *ocd;
1092
1093                         /* reply message might not be ready */
1094                         if (request->rq_repmsg == NULL)
1095                                 RETURN(-EPROTO);
1096
1097                         ocd = req_capsule_server_get(&request->rq_pill,
1098                                                      &RMF_CONNECT_DATA);
1099                         if (ocd &&
1100                             (ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
1101                             (ocd->ocd_version != LUSTRE_VERSION_CODE)) {
1102                            /* Actually servers are only supposed to refuse
1103                               connection from liblustre clients, so we should
1104                               never see this from VFS context */
1105                                 LCONSOLE_ERROR_MSG(0x16a, "Server %s version "
1106                                         "(%d.%d.%d.%d)"
1107                                         " refused connection from this client "
1108                                         "with an incompatible version (%s).  "
1109                                         "Client must be recompiled\n",
1110                                         obd2cli_tgt(imp->imp_obd),
1111                                         OBD_OCD_VERSION_MAJOR(ocd->ocd_version),
1112                                         OBD_OCD_VERSION_MINOR(ocd->ocd_version),
1113                                         OBD_OCD_VERSION_PATCH(ocd->ocd_version),
1114                                         OBD_OCD_VERSION_FIX(ocd->ocd_version),
1115                                         LUSTRE_VERSION_STRING);
1116                                 ptlrpc_deactivate_import(imp);
1117                                 IMPORT_SET_STATE(imp, LUSTRE_IMP_CLOSED);
1118                         }
1119                         RETURN(-EPROTO);
1120                 }
1121
1122                 ptlrpc_maybe_ping_import_soon(imp);
1123
1124                 CDEBUG(D_HA, "recovery of %s on %s failed (%d)\n",
1125                        obd2cli_tgt(imp->imp_obd),
1126                        (char *)imp->imp_connection->c_remote_uuid.uuid, rc);
1127         }
1128
1129         cfs_waitq_broadcast(&imp->imp_recovery_waitq);
1130         RETURN(rc);
1131 }
1132
1133 /**
1134  * interpret callback for "completed replay" RPCs.
1135  * \see signal_completed_replay
1136  */
1137 static int completed_replay_interpret(const struct lu_env *env,
1138                                       struct ptlrpc_request *req,
1139                                       void * data, int rc)
1140 {
1141         ENTRY;
1142         cfs_atomic_dec(&req->rq_import->imp_replay_inflight);
1143         if (req->rq_status == 0 &&
1144             !req->rq_import->imp_vbr_failed) {
1145                 ptlrpc_import_recovery_state_machine(req->rq_import);
1146         } else {
1147                 if (req->rq_import->imp_vbr_failed) {
1148                         CDEBUG(D_WARNING,
1149                                "%s: version recovery fails, reconnecting\n",
1150                                req->rq_import->imp_obd->obd_name);
1151                 } else {
1152                         CDEBUG(D_HA, "%s: LAST_REPLAY message error: %d, "
1153                                      "reconnecting\n",
1154                                req->rq_import->imp_obd->obd_name,
1155                                req->rq_status);
1156                 }
1157                 ptlrpc_connect_import(req->rq_import);
1158         }
1159
1160         RETURN(0);
1161 }
1162
1163 /**
1164  * Let server know that we have no requests to replay anymore.
1165  * Achieved by just sending a PING request
1166  */
1167 static int signal_completed_replay(struct obd_import *imp)
1168 {
1169         struct ptlrpc_request *req;
1170         ENTRY;
1171
1172         if (unlikely(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_FINISH_REPLAY)))
1173                 RETURN(0);
1174
1175         LASSERT(cfs_atomic_read(&imp->imp_replay_inflight) == 0);
1176         cfs_atomic_inc(&imp->imp_replay_inflight);
1177
1178         req = ptlrpc_request_alloc_pack(imp, &RQF_OBD_PING, LUSTRE_OBD_VERSION,
1179                                         OBD_PING);
1180         if (req == NULL) {
1181                 cfs_atomic_dec(&imp->imp_replay_inflight);
1182                 RETURN(-ENOMEM);
1183         }
1184
1185         ptlrpc_request_set_replen(req);
1186         req->rq_send_state = LUSTRE_IMP_REPLAY_WAIT;
1187         lustre_msg_add_flags(req->rq_reqmsg,
1188                              MSG_LOCK_REPLAY_DONE | MSG_REQ_REPLAY_DONE);
1189         if (AT_OFF)
1190                 req->rq_timeout *= 3;
1191         req->rq_interpret_reply = completed_replay_interpret;
1192
1193         ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
1194         RETURN(0);
1195 }
1196
1197 #ifdef __KERNEL__
1198 /**
1199  * In kernel code all import invalidation happens in its own
1200  * separate thread, so that whatever application happened to encounter
1201  * a problem could still be killed or otherwise continue
1202  */
1203 static int ptlrpc_invalidate_import_thread(void *data)
1204 {
1205         struct obd_import *imp = data;
1206
1207         ENTRY;
1208
1209         cfs_daemonize_ctxt("ll_imp_inval");
1210
1211         CDEBUG(D_HA, "thread invalidate import %s to %s@%s\n",
1212                imp->imp_obd->obd_name, obd2cli_tgt(imp->imp_obd),
1213                imp->imp_connection->c_remote_uuid.uuid);
1214
1215         ptlrpc_invalidate_import(imp);
1216
1217         if (obd_dump_on_eviction) {
1218                 CERROR("dump the log upon eviction\n");
1219                 libcfs_debug_dumplog();
1220         }
1221
1222         IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER);
1223         ptlrpc_import_recovery_state_machine(imp);
1224
1225         class_import_put(imp);
1226         RETURN(0);
1227 }
1228 #endif
1229
1230 /**
1231  * This is the state machine for client-side recovery on import.
1232  *
1233  * Typicaly we have two possibly paths. If we came to server and it is not
1234  * in recovery, we just enter IMP_EVICTED state, invalidate our import
1235  * state and reconnect from scratch.
1236  * If we came to server that is in recovery, we enter IMP_REPLAY import state.
1237  * We go through our list of requests to replay and send them to server one by
1238  * one.
1239  * After sending all request from the list we change import state to
1240  * IMP_REPLAY_LOCKS and re-request all the locks we believe we have from server
1241  * and also all the locks we don't yet have and wait for server to grant us.
1242  * After that we send a special "replay completed" request and change import
1243  * state to IMP_REPLAY_WAIT.
1244  * Upon receiving reply to that "replay completed" RPC we enter IMP_RECOVER
1245  * state and resend all requests from sending list.
1246  * After that we promote import to FULL state and send all delayed requests
1247  * and import is fully operational after that.
1248  *
1249  */
1250 int ptlrpc_import_recovery_state_machine(struct obd_import *imp)
1251 {
1252         int rc = 0;
1253         int inflight;
1254         char *target_start;
1255         int target_len;
1256
1257         ENTRY;
1258         if (imp->imp_state == LUSTRE_IMP_EVICTED) {
1259                 deuuidify(obd2cli_tgt(imp->imp_obd), NULL,
1260                           &target_start, &target_len);
1261                 /* Don't care about MGC eviction */
1262                 if (strcmp(imp->imp_obd->obd_type->typ_name,
1263                            LUSTRE_MGC_NAME) != 0) {
1264                         LCONSOLE_ERROR_MSG(0x167, "This client was evicted by "
1265                                            "%.*s; in progress operations using "
1266                                            "this service will fail.\n",
1267                                            target_len, target_start);
1268                 }
1269                 CDEBUG(D_HA, "evicted from %s@%s; invalidating\n",
1270                        obd2cli_tgt(imp->imp_obd),
1271                        imp->imp_connection->c_remote_uuid.uuid);
1272                 /* reset vbr_failed flag upon eviction */
1273                 cfs_spin_lock(&imp->imp_lock);
1274                 imp->imp_vbr_failed = 0;
1275                 cfs_spin_unlock(&imp->imp_lock);
1276
1277 #ifdef __KERNEL__
1278                 /* bug 17802:  XXX client_disconnect_export vs connect request
1279                  * race. if client will evicted at this time, we start
1280                  * invalidate thread without reference to import and import can
1281                  * be freed at same time. */
1282                 class_import_get(imp);
1283                 rc = cfs_create_thread(ptlrpc_invalidate_import_thread, imp,
1284                                        CFS_DAEMON_FLAGS);
1285                 if (rc < 0) {
1286                         class_import_put(imp);
1287                         CERROR("error starting invalidate thread: %d\n", rc);
1288                 } else {
1289                         rc = 0;
1290                 }
1291                 RETURN(rc);
1292 #else
1293                 ptlrpc_invalidate_import(imp);
1294
1295                 IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER);
1296 #endif
1297         }
1298
1299         if (imp->imp_state == LUSTRE_IMP_REPLAY) {
1300                 CDEBUG(D_HA, "replay requested by %s\n",
1301                        obd2cli_tgt(imp->imp_obd));
1302                 rc = ptlrpc_replay_next(imp, &inflight);
1303                 if (inflight == 0 &&
1304                     cfs_atomic_read(&imp->imp_replay_inflight) == 0) {
1305                         IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_LOCKS);
1306                         rc = ldlm_replay_locks(imp);
1307                         if (rc)
1308                                 GOTO(out, rc);
1309                 }
1310                 rc = 0;
1311         }
1312
1313         if (imp->imp_state == LUSTRE_IMP_REPLAY_LOCKS) {
1314                 if (cfs_atomic_read(&imp->imp_replay_inflight) == 0) {
1315                         IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_WAIT);
1316                         rc = signal_completed_replay(imp);
1317                         if (rc)
1318                                 GOTO(out, rc);
1319                 }
1320
1321         }
1322
1323         if (imp->imp_state == LUSTRE_IMP_REPLAY_WAIT) {
1324                 if (cfs_atomic_read(&imp->imp_replay_inflight) == 0) {
1325                         IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER);
1326                 }
1327         }
1328
1329         if (imp->imp_state == LUSTRE_IMP_RECOVER) {
1330                 CDEBUG(D_HA, "reconnected to %s@%s\n",
1331                        obd2cli_tgt(imp->imp_obd),
1332                        imp->imp_connection->c_remote_uuid.uuid);
1333
1334                 rc = ptlrpc_resend(imp);
1335                 if (rc)
1336                         GOTO(out, rc);
1337                 IMPORT_SET_STATE(imp, LUSTRE_IMP_FULL);
1338                 ptlrpc_activate_import(imp);
1339
1340                 deuuidify(obd2cli_tgt(imp->imp_obd), NULL,
1341                           &target_start, &target_len);
1342                 LCONSOLE_INFO("%s: Connection restored to service %.*s "
1343                               "using nid %s.\n", imp->imp_obd->obd_name,
1344                               target_len, target_start,
1345                               libcfs_nid2str(imp->imp_connection->c_peer.nid));
1346         }
1347
1348         if (imp->imp_state == LUSTRE_IMP_FULL) {
1349                 cfs_waitq_broadcast(&imp->imp_recovery_waitq);
1350                 ptlrpc_wake_delayed(imp);
1351         }
1352
1353 out:
1354         RETURN(rc);
1355 }
1356
1357 int ptlrpc_disconnect_import(struct obd_import *imp, int noclose)
1358 {
1359         struct ptlrpc_request *req;
1360         int rq_opc, rc = 0;
1361         int nowait = imp->imp_obd->obd_force;
1362         ENTRY;
1363
1364         if (nowait)
1365                 GOTO(set_state, rc);
1366
1367         switch (imp->imp_connect_op) {
1368         case OST_CONNECT: rq_opc = OST_DISCONNECT; break;
1369         case MDS_CONNECT: rq_opc = MDS_DISCONNECT; break;
1370         case MGS_CONNECT: rq_opc = MGS_DISCONNECT; break;
1371         default:
1372                 CERROR("don't know how to disconnect from %s (connect_op %d)\n",
1373                        obd2cli_tgt(imp->imp_obd), imp->imp_connect_op);
1374                 RETURN(-EINVAL);
1375         }
1376
1377         if (ptlrpc_import_in_recovery(imp)) {
1378                 struct l_wait_info lwi;
1379                 cfs_duration_t timeout;
1380
1381
1382                 if (AT_OFF) {
1383                         if (imp->imp_server_timeout)
1384                                 timeout = cfs_time_seconds(obd_timeout / 2);
1385                         else
1386                                 timeout = cfs_time_seconds(obd_timeout);
1387                 } else {
1388                         int idx = import_at_get_index(imp,
1389                                 imp->imp_client->cli_request_portal);
1390                         timeout = cfs_time_seconds(
1391                                 at_get(&imp->imp_at.iat_service_estimate[idx]));
1392                 }
1393
1394                 lwi = LWI_TIMEOUT_INTR(cfs_timeout_cap(timeout),
1395                                        back_to_sleep, LWI_ON_SIGNAL_NOOP, NULL);
1396                 rc = l_wait_event(imp->imp_recovery_waitq,
1397                                   !ptlrpc_import_in_recovery(imp), &lwi);
1398
1399         }
1400
1401         cfs_spin_lock(&imp->imp_lock);
1402         if (imp->imp_state != LUSTRE_IMP_FULL)
1403                 GOTO(out, 0);
1404
1405         cfs_spin_unlock(&imp->imp_lock);
1406
1407         req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_DISCONNECT,
1408                                         LUSTRE_OBD_VERSION, rq_opc);
1409         if (req) {
1410                 /* We are disconnecting, do not retry a failed DISCONNECT rpc if
1411                  * it fails.  We can get through the above with a down server
1412                  * if the client doesn't know the server is gone yet. */
1413                 req->rq_no_resend = 1;
1414
1415 #ifndef CRAY_XT3
1416                 /* We want client umounts to happen quickly, no matter the
1417                    server state... */
1418                 req->rq_timeout = min_t(int, req->rq_timeout,
1419                                         INITIAL_CONNECT_TIMEOUT);
1420 #else
1421                 /* ... but we always want liblustre clients to nicely
1422                    disconnect, so only use the adaptive value. */
1423                 if (AT_OFF)
1424                         req->rq_timeout = obd_timeout / 3;
1425 #endif
1426
1427                 IMPORT_SET_STATE(imp, LUSTRE_IMP_CONNECTING);
1428                 req->rq_send_state =  LUSTRE_IMP_CONNECTING;
1429                 ptlrpc_request_set_replen(req);
1430                 rc = ptlrpc_queue_wait(req);
1431                 ptlrpc_req_finished(req);
1432         }
1433
1434 set_state:
1435         cfs_spin_lock(&imp->imp_lock);
1436 out:
1437         if (noclose)
1438                 IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON);
1439         else
1440                 IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CLOSED);
1441         memset(&imp->imp_remote_handle, 0, sizeof(imp->imp_remote_handle));
1442         cfs_spin_unlock(&imp->imp_lock);
1443
1444         RETURN(rc);
1445 }
1446
1447 void ptlrpc_cleanup_imp(struct obd_import *imp)
1448 {
1449         ENTRY;
1450
1451         cfs_spin_lock(&imp->imp_lock);
1452         IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CLOSED);
1453         imp->imp_generation++;
1454         cfs_spin_unlock(&imp->imp_lock);
1455         ptlrpc_abort_inflight(imp);
1456
1457         EXIT;
1458 }
1459
1460 /* Adaptive Timeout utils */
1461 extern unsigned int at_min, at_max, at_history;
1462
1463 /* Bin into timeslices using AT_BINS bins.
1464    This gives us a max of the last binlimit*AT_BINS secs without the storage,
1465    but still smoothing out a return to normalcy from a slow response.
1466    (E.g. remember the maximum latency in each minute of the last 4 minutes.) */
1467 int at_measured(struct adaptive_timeout *at, unsigned int val)
1468 {
1469         unsigned int old = at->at_current;
1470         time_t now = cfs_time_current_sec();
1471         time_t binlimit = max_t(time_t, at_history / AT_BINS, 1);
1472
1473         LASSERT(at);
1474         CDEBUG(D_OTHER, "add %u to %p time=%lu v=%u (%u %u %u %u)\n",
1475                val, at, now - at->at_binstart, at->at_current,
1476                at->at_hist[0], at->at_hist[1], at->at_hist[2], at->at_hist[3]);
1477
1478         if (val == 0)
1479                 /* 0's don't count, because we never want our timeout to
1480                    drop to 0, and because 0 could mean an error */
1481                 return 0;
1482
1483         cfs_spin_lock(&at->at_lock);
1484
1485         if (unlikely(at->at_binstart == 0)) {
1486                 /* Special case to remove default from history */
1487                 at->at_current = val;
1488                 at->at_worst_ever = val;
1489                 at->at_worst_time = now;
1490                 at->at_hist[0] = val;
1491                 at->at_binstart = now;
1492         } else if (now - at->at_binstart < binlimit ) {
1493                 /* in bin 0 */
1494                 at->at_hist[0] = max(val, at->at_hist[0]);
1495                 at->at_current = max(val, at->at_current);
1496         } else {
1497                 int i, shift;
1498                 unsigned int maxv = val;
1499                 /* move bins over */
1500                 shift = (now - at->at_binstart) / binlimit;
1501                 LASSERT(shift > 0);
1502                 for(i = AT_BINS - 1; i >= 0; i--) {
1503                         if (i >= shift) {
1504                                 at->at_hist[i] = at->at_hist[i - shift];
1505                                 maxv = max(maxv, at->at_hist[i]);
1506                         } else {
1507                                 at->at_hist[i] = 0;
1508                         }
1509                 }
1510                 at->at_hist[0] = val;
1511                 at->at_current = maxv;
1512                 at->at_binstart += shift * binlimit;
1513         }
1514
1515         if (at->at_current > at->at_worst_ever) {
1516                 at->at_worst_ever = at->at_current;
1517                 at->at_worst_time = now;
1518         }
1519
1520         if (at->at_flags & AT_FLG_NOHIST)
1521                 /* Only keep last reported val; keeping the rest of the history
1522                    for proc only */
1523                 at->at_current = val;
1524
1525         if (at_max > 0)
1526                 at->at_current =  min(at->at_current, at_max);
1527         at->at_current =  max(at->at_current, at_min);
1528
1529         if (at->at_current != old)
1530                 CDEBUG(D_OTHER, "AT %p change: old=%u new=%u delta=%d "
1531                        "(val=%u) hist %u %u %u %u\n", at,
1532                        old, at->at_current, at->at_current - old, val,
1533                        at->at_hist[0], at->at_hist[1], at->at_hist[2],
1534                        at->at_hist[3]);
1535
1536         /* if we changed, report the old value */
1537         old = (at->at_current != old) ? old : 0;
1538
1539         cfs_spin_unlock(&at->at_lock);
1540         return old;
1541 }
1542
1543 /* Find the imp_at index for a given portal; assign if space available */
1544 int import_at_get_index(struct obd_import *imp, int portal)
1545 {
1546         struct imp_at *at = &imp->imp_at;
1547         int i;
1548
1549         for (i = 0; i < IMP_AT_MAX_PORTALS; i++) {
1550                 if (at->iat_portal[i] == portal)
1551                         return i;
1552                 if (at->iat_portal[i] == 0)
1553                         /* unused */
1554                         break;
1555         }
1556
1557         /* Not found in list, add it under a lock */
1558         cfs_spin_lock(&imp->imp_lock);
1559
1560         /* Check unused under lock */
1561         for (; i < IMP_AT_MAX_PORTALS; i++) {
1562                 if (at->iat_portal[i] == portal)
1563                         goto out;
1564                 if (at->iat_portal[i] == 0)
1565                         /* unused */
1566                         break;
1567         }
1568
1569         /* Not enough portals? */
1570         LASSERT(i < IMP_AT_MAX_PORTALS);
1571
1572         at->iat_portal[i] = portal;
1573 out:
1574         cfs_spin_unlock(&imp->imp_lock);
1575         return i;
1576 }