Whamcloud - gitweb
LU-1431 ptlrpc: Support for over 1MB bulk I/O RPC
[fs/lustre-release.git] / lustre / ptlrpc / import.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ptlrpc/import.c
37  *
38  * Author: Mike Shaver <shaver@clusterfs.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_RPC
42 #ifndef __KERNEL__
43 # include <liblustre.h>
44 #endif
45
46 #include <obd_support.h>
47 #include <lustre_ha.h>
48 #include <lustre_net.h>
49 #include <lustre_import.h>
50 #include <lustre_export.h>
51 #include <obd.h>
52 #include <obd_cksum.h>
53 #include <obd_class.h>
54
55 #include "ptlrpc_internal.h"
56
57 struct ptlrpc_connect_async_args {
58          __u64 pcaa_peer_committed;
59         int pcaa_initial_connect;
60 };
61
62 /**
63  * Updates import \a imp current state to provided \a state value
64  * Helper function. Must be called under imp_lock.
65  */
66 static void __import_set_state(struct obd_import *imp,
67                                enum lustre_imp_state state)
68 {
69         imp->imp_state = state;
70         imp->imp_state_hist[imp->imp_state_hist_idx].ish_state = state;
71         imp->imp_state_hist[imp->imp_state_hist_idx].ish_time =
72                 cfs_time_current_sec();
73         imp->imp_state_hist_idx = (imp->imp_state_hist_idx + 1) %
74                 IMP_STATE_HIST_LEN;
75 }
76
77 /* A CLOSED import should remain so. */
78 #define IMPORT_SET_STATE_NOLOCK(imp, state)                                    \
79 do {                                                                           \
80         if (imp->imp_state != LUSTRE_IMP_CLOSED) {                             \
81                CDEBUG(D_HA, "%p %s: changing import state from %s to %s\n",    \
82                       imp, obd2cli_tgt(imp->imp_obd),                          \
83                       ptlrpc_import_state_name(imp->imp_state),                \
84                       ptlrpc_import_state_name(state));                        \
85                __import_set_state(imp, state);                                 \
86         }                                                                      \
87 } while(0)
88
89 #define IMPORT_SET_STATE(imp, state)                                    \
90 do {                                                                    \
91         spin_lock(&imp->imp_lock);                                      \
92         IMPORT_SET_STATE_NOLOCK(imp, state);                            \
93         spin_unlock(&imp->imp_lock);                                    \
94 } while(0)
95
96
97 static int ptlrpc_connect_interpret(const struct lu_env *env,
98                                     struct ptlrpc_request *request,
99                                     void * data, int rc);
100 int ptlrpc_import_recovery_state_machine(struct obd_import *imp);
101
102 /* Only this function is allowed to change the import state when it is
103  * CLOSED. I would rather refcount the import and free it after
104  * disconnection like we do with exports. To do that, the client_obd
105  * will need to save the peer info somewhere other than in the import,
106  * though. */
107 int ptlrpc_init_import(struct obd_import *imp)
108 {
109         spin_lock(&imp->imp_lock);
110
111         imp->imp_generation++;
112         imp->imp_state =  LUSTRE_IMP_NEW;
113
114         spin_unlock(&imp->imp_lock);
115
116         return 0;
117 }
118 EXPORT_SYMBOL(ptlrpc_init_import);
119
120 #define UUID_STR "_UUID"
121 void deuuidify(char *uuid, const char *prefix, char **uuid_start, int *uuid_len)
122 {
123         *uuid_start = !prefix || strncmp(uuid, prefix, strlen(prefix))
124                 ? uuid : uuid + strlen(prefix);
125
126         *uuid_len = strlen(*uuid_start);
127
128         if (*uuid_len < strlen(UUID_STR))
129                 return;
130
131         if (!strncmp(*uuid_start + *uuid_len - strlen(UUID_STR),
132                     UUID_STR, strlen(UUID_STR)))
133                 *uuid_len -= strlen(UUID_STR);
134 }
135 EXPORT_SYMBOL(deuuidify);
136
137 /**
138  * Returns true if import was FULL, false if import was already not
139  * connected.
140  * @imp - import to be disconnected
141  * @conn_cnt - connection count (epoch) of the request that timed out
142  *             and caused the disconnection.  In some cases, multiple
143  *             inflight requests can fail to a single target (e.g. OST
144  *             bulk requests) and if one has already caused a reconnection
145  *             (increasing the import->conn_cnt) the older failure should
146  *             not also cause a reconnection.  If zero it forces a reconnect.
147  */
148 int ptlrpc_set_import_discon(struct obd_import *imp, __u32 conn_cnt)
149 {
150         int rc = 0;
151
152         spin_lock(&imp->imp_lock);
153
154         if (imp->imp_state == LUSTRE_IMP_FULL &&
155             (conn_cnt == 0 || conn_cnt == imp->imp_conn_cnt)) {
156                 char *target_start;
157                 int   target_len;
158
159                 deuuidify(obd2cli_tgt(imp->imp_obd), NULL,
160                           &target_start, &target_len);
161
162                 if (imp->imp_replayable) {
163                         LCONSOLE_WARN("%s: Connection to %.*s (at %s) was "
164                                "lost; in progress operations using this "
165                                "service will wait for recovery to complete\n",
166                                imp->imp_obd->obd_name, target_len, target_start,
167                                libcfs_nid2str(imp->imp_connection->c_peer.nid));
168                 } else {
169                         LCONSOLE_ERROR_MSG(0x166, "%s: Connection to "
170                                "%.*s (at %s) was lost; in progress "
171                                "operations using this service will fail\n",
172                                imp->imp_obd->obd_name,
173                                target_len, target_start,
174                                libcfs_nid2str(imp->imp_connection->c_peer.nid));
175                 }
176                 ptlrpc_deactivate_timeouts(imp);
177                 IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON);
178                 spin_unlock(&imp->imp_lock);
179
180                 if (obd_dump_on_timeout)
181                         libcfs_debug_dumplog();
182
183                 obd_import_event(imp->imp_obd, imp, IMP_EVENT_DISCON);
184                 rc = 1;
185         } else {
186                 spin_unlock(&imp->imp_lock);
187                 CDEBUG(D_HA, "%s: import %p already %s (conn %u, was %u): %s\n",
188                        imp->imp_client->cli_name, imp,
189                        (imp->imp_state == LUSTRE_IMP_FULL &&
190                         imp->imp_conn_cnt > conn_cnt) ?
191                        "reconnected" : "not connected", imp->imp_conn_cnt,
192                        conn_cnt, ptlrpc_import_state_name(imp->imp_state));
193         }
194
195         return rc;
196 }
197
198 /* Must be called with imp_lock held! */
199 static void ptlrpc_deactivate_and_unlock_import(struct obd_import *imp)
200 {
201         ENTRY;
202         LASSERT_SPIN_LOCKED(&imp->imp_lock);
203
204         CDEBUG(D_HA, "setting import %s INVALID\n", obd2cli_tgt(imp->imp_obd));
205         imp->imp_invalid = 1;
206         imp->imp_generation++;
207         spin_unlock(&imp->imp_lock);
208
209         ptlrpc_abort_inflight(imp);
210         obd_import_event(imp->imp_obd, imp, IMP_EVENT_INACTIVE);
211
212         EXIT;
213 }
214
215 /*
216  * This acts as a barrier; all existing requests are rejected, and
217  * no new requests will be accepted until the import is valid again.
218  */
219 void ptlrpc_deactivate_import(struct obd_import *imp)
220 {
221         spin_lock(&imp->imp_lock);
222         ptlrpc_deactivate_and_unlock_import(imp);
223 }
224 EXPORT_SYMBOL(ptlrpc_deactivate_import);
225
226 static unsigned int
227 ptlrpc_inflight_deadline(struct ptlrpc_request *req, time_t now)
228 {
229         long dl;
230
231         if (!(((req->rq_phase == RQ_PHASE_RPC) && !req->rq_waiting) ||
232               (req->rq_phase == RQ_PHASE_BULK) ||
233               (req->rq_phase == RQ_PHASE_NEW)))
234                 return 0;
235
236         if (req->rq_timedout)
237                 return 0;
238
239         if (req->rq_phase == RQ_PHASE_NEW)
240                 dl = req->rq_sent;
241         else
242                 dl = req->rq_deadline;
243
244         if (dl <= now)
245                 return 0;
246
247         return dl - now;
248 }
249
250 static unsigned int ptlrpc_inflight_timeout(struct obd_import *imp)
251 {
252         time_t now = cfs_time_current_sec();
253         cfs_list_t *tmp, *n;
254         struct ptlrpc_request *req;
255         unsigned int timeout = 0;
256
257         spin_lock(&imp->imp_lock);
258         cfs_list_for_each_safe(tmp, n, &imp->imp_sending_list) {
259                 req = cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
260                 timeout = max(ptlrpc_inflight_deadline(req, now), timeout);
261         }
262         spin_unlock(&imp->imp_lock);
263         return timeout;
264 }
265
266 /**
267  * This function will invalidate the import, if necessary, then block
268  * for all the RPC completions, and finally notify the obd to
269  * invalidate its state (ie cancel locks, clear pending requests,
270  * etc).
271  */
272 void ptlrpc_invalidate_import(struct obd_import *imp)
273 {
274         cfs_list_t *tmp, *n;
275         struct ptlrpc_request *req;
276         struct l_wait_info lwi;
277         unsigned int timeout;
278         int rc;
279
280         cfs_atomic_inc(&imp->imp_inval_count);
281
282         if (!imp->imp_invalid || imp->imp_obd->obd_no_recov)
283                 ptlrpc_deactivate_import(imp);
284
285         LASSERT(imp->imp_invalid);
286
287         /* Wait forever until inflight == 0. We really can't do it another
288          * way because in some cases we need to wait for very long reply
289          * unlink. We can't do anything before that because there is really
290          * no guarantee that some rdma transfer is not in progress right now. */
291         do {
292                 /* Calculate max timeout for waiting on rpcs to error
293                  * out. Use obd_timeout if calculated value is smaller
294                  * than it. */
295                 if (!OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) {
296                         timeout = ptlrpc_inflight_timeout(imp);
297                         timeout += timeout / 3;
298
299                         if (timeout == 0)
300                                 timeout = obd_timeout;
301                 } else {
302                         /* decrease the interval to increase race condition */
303                         timeout = 1;
304                 }
305
306                 CDEBUG(D_RPCTRACE,"Sleeping %d sec for inflight to error out\n",
307                        timeout);
308
309                 /* Wait for all requests to error out and call completion
310                  * callbacks. Cap it at obd_timeout -- these should all
311                  * have been locally cancelled by ptlrpc_abort_inflight. */
312                 lwi = LWI_TIMEOUT_INTERVAL(
313                         cfs_timeout_cap(cfs_time_seconds(timeout)),
314                         (timeout > 1)?cfs_time_seconds(1):cfs_time_seconds(1)/2,
315                         NULL, NULL);
316                 rc = l_wait_event(imp->imp_recovery_waitq,
317                                   (cfs_atomic_read(&imp->imp_inflight) == 0),
318                                   &lwi);
319                 if (rc) {
320                         const char *cli_tgt = obd2cli_tgt(imp->imp_obd);
321
322                         CERROR("%s: rc = %d waiting for callback (%d != 0)\n",
323                                cli_tgt, rc,
324                                cfs_atomic_read(&imp->imp_inflight));
325
326                         spin_lock(&imp->imp_lock);
327                         if (cfs_atomic_read(&imp->imp_inflight) == 0) {
328                                 int count = cfs_atomic_read(&imp->imp_unregistering);
329
330                                 /* We know that "unregistering" rpcs only can
331                                  * survive in sending or delaying lists (they
332                                  * maybe waiting for long reply unlink in
333                                  * sluggish nets). Let's check this. If there
334                                  * is no inflight and unregistering != 0, this
335                                  * is bug. */
336                                 LASSERTF(count == 0, "Some RPCs are still "
337                                          "unregistering: %d\n", count);
338
339                                 /* Let's save one loop as soon as inflight have
340                                  * dropped to zero. No new inflights possible at
341                                  * this point. */
342                                 rc = 0;
343                         } else {
344                                 cfs_list_for_each_safe(tmp, n,
345                                                        &imp->imp_sending_list) {
346                                         req = cfs_list_entry(tmp,
347                                                              struct ptlrpc_request,
348                                                              rq_list);
349                                         DEBUG_REQ(D_ERROR, req,
350                                                   "still on sending list");
351                                 }
352                                 cfs_list_for_each_safe(tmp, n,
353                                                        &imp->imp_delayed_list) {
354                                         req = cfs_list_entry(tmp,
355                                                              struct ptlrpc_request,
356                                                              rq_list);
357                                         DEBUG_REQ(D_ERROR, req,
358                                                   "still on delayed list");
359                                 }
360
361                                 CERROR("%s: RPCs in \"%s\" phase found (%d). "
362                                        "Network is sluggish? Waiting them "
363                                        "to error out.\n", cli_tgt,
364                                        ptlrpc_phase2str(RQ_PHASE_UNREGISTERING),
365                                        cfs_atomic_read(&imp->
366                                                        imp_unregistering));
367                         }
368                         spin_unlock(&imp->imp_lock);
369                   }
370         } while (rc != 0);
371
372         /*
373          * Let's additionally check that no new rpcs added to import in
374          * "invalidate" state.
375          */
376         LASSERT(cfs_atomic_read(&imp->imp_inflight) == 0);
377         obd_import_event(imp->imp_obd, imp, IMP_EVENT_INVALIDATE);
378         sptlrpc_import_flush_all_ctx(imp);
379
380         cfs_atomic_dec(&imp->imp_inval_count);
381         cfs_waitq_broadcast(&imp->imp_recovery_waitq);
382 }
383 EXPORT_SYMBOL(ptlrpc_invalidate_import);
384
385 /* unset imp_invalid */
386 void ptlrpc_activate_import(struct obd_import *imp)
387 {
388         struct obd_device *obd = imp->imp_obd;
389
390         spin_lock(&imp->imp_lock);
391         imp->imp_invalid = 0;
392         ptlrpc_activate_timeouts(imp);
393         spin_unlock(&imp->imp_lock);
394         obd_import_event(obd, imp, IMP_EVENT_ACTIVE);
395 }
396 EXPORT_SYMBOL(ptlrpc_activate_import);
397
398 void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt)
399 {
400         ENTRY;
401
402         LASSERT(!imp->imp_dlm_fake);
403
404         if (ptlrpc_set_import_discon(imp, conn_cnt)) {
405                 if (!imp->imp_replayable) {
406                         CDEBUG(D_HA, "import %s@%s for %s not replayable, "
407                                "auto-deactivating\n",
408                                obd2cli_tgt(imp->imp_obd),
409                                imp->imp_connection->c_remote_uuid.uuid,
410                                imp->imp_obd->obd_name);
411                         ptlrpc_deactivate_import(imp);
412                 }
413
414                 CDEBUG(D_HA, "%s: waking up pinger\n",
415                        obd2cli_tgt(imp->imp_obd));
416
417                 spin_lock(&imp->imp_lock);
418                 imp->imp_force_verify = 1;
419                 spin_unlock(&imp->imp_lock);
420
421                 ptlrpc_pinger_wake_up();
422         }
423         EXIT;
424 }
425 EXPORT_SYMBOL(ptlrpc_fail_import);
426
427 int ptlrpc_reconnect_import(struct obd_import *imp)
428 {
429         ptlrpc_set_import_discon(imp, 0);
430         /* Force a new connect attempt */
431         ptlrpc_invalidate_import(imp);
432         /* Do a fresh connect next time by zeroing the handle */
433         ptlrpc_disconnect_import(imp, 1);
434         /* Wait for all invalidate calls to finish */
435         if (cfs_atomic_read(&imp->imp_inval_count) > 0) {
436                 int rc;
437                 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
438                 rc = l_wait_event(imp->imp_recovery_waitq,
439                                   (cfs_atomic_read(&imp->imp_inval_count) == 0),
440                                   &lwi);
441                 if (rc)
442                         CERROR("Interrupted, inval=%d\n",
443                                cfs_atomic_read(&imp->imp_inval_count));
444         }
445
446         /* Allow reconnect attempts */
447         imp->imp_obd->obd_no_recov = 0;
448         /* Remove 'invalid' flag */
449         ptlrpc_activate_import(imp);
450         /* Attempt a new connect */
451         ptlrpc_recover_import(imp, NULL, 0);
452         return 0;
453 }
454 EXPORT_SYMBOL(ptlrpc_reconnect_import);
455
456 /**
457  * Connection on import \a imp is changed to another one (if more than one is
458  * present). We typically chose connection that we have not tried to connect to
459  * the longest
460  */
461 static int import_select_connection(struct obd_import *imp)
462 {
463         struct obd_import_conn *imp_conn = NULL, *conn;
464         struct obd_export *dlmexp;
465         char *target_start;
466         int target_len, tried_all = 1;
467         ENTRY;
468
469         spin_lock(&imp->imp_lock);
470
471         if (cfs_list_empty(&imp->imp_conn_list)) {
472                 CERROR("%s: no connections available\n",
473                        imp->imp_obd->obd_name);
474                 spin_unlock(&imp->imp_lock);
475                 RETURN(-EINVAL);
476         }
477
478         cfs_list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
479                 CDEBUG(D_HA, "%s: connect to NID %s last attempt "LPU64"\n",
480                        imp->imp_obd->obd_name,
481                        libcfs_nid2str(conn->oic_conn->c_peer.nid),
482                        conn->oic_last_attempt);
483
484                 /* If we have not tried this connection since
485                    the last successful attempt, go with this one */
486                 if ((conn->oic_last_attempt == 0) ||
487                     cfs_time_beforeq_64(conn->oic_last_attempt,
488                                        imp->imp_last_success_conn)) {
489                         imp_conn = conn;
490                         tried_all = 0;
491                         break;
492                 }
493
494                 /* If all of the connections have already been tried
495                    since the last successful connection; just choose the
496                    least recently used */
497                 if (!imp_conn)
498                         imp_conn = conn;
499                 else if (cfs_time_before_64(conn->oic_last_attempt,
500                                             imp_conn->oic_last_attempt))
501                         imp_conn = conn;
502         }
503
504         /* if not found, simply choose the current one */
505         if (!imp_conn || imp->imp_force_reconnect) {
506                 LASSERT(imp->imp_conn_current);
507                 imp_conn = imp->imp_conn_current;
508                 tried_all = 0;
509         }
510         LASSERT(imp_conn->oic_conn);
511
512         /* If we've tried everything, and we're back to the beginning of the
513            list, increase our timeout and try again. It will be reset when
514            we do finally connect. (FIXME: really we should wait for all network
515            state associated with the last connection attempt to drain before
516            trying to reconnect on it.) */
517         if (tried_all && (imp->imp_conn_list.next == &imp_conn->oic_item)) {
518                 struct adaptive_timeout *at = &imp->imp_at.iat_net_latency;
519                 if (at_get(at) < CONNECTION_SWITCH_MAX) {
520                         at_measured(at, at_get(at) + CONNECTION_SWITCH_INC);
521                         if (at_get(at) > CONNECTION_SWITCH_MAX)
522                                 at_reset(at, CONNECTION_SWITCH_MAX);
523                 }
524                 LASSERT(imp_conn->oic_last_attempt);
525                 CDEBUG(D_HA, "%s: tried all connections, increasing latency "
526                         "to %ds\n", imp->imp_obd->obd_name, at_get(at));
527         }
528
529         imp_conn->oic_last_attempt = cfs_time_current_64();
530
531         /* switch connection, don't mind if it's same as the current one */
532         if (imp->imp_connection)
533                 ptlrpc_connection_put(imp->imp_connection);
534         imp->imp_connection = ptlrpc_connection_addref(imp_conn->oic_conn);
535
536         dlmexp =  class_conn2export(&imp->imp_dlm_handle);
537         LASSERT(dlmexp != NULL);
538         if (dlmexp->exp_connection)
539                 ptlrpc_connection_put(dlmexp->exp_connection);
540         dlmexp->exp_connection = ptlrpc_connection_addref(imp_conn->oic_conn);
541         class_export_put(dlmexp);
542
543         if (imp->imp_conn_current != imp_conn) {
544                 if (imp->imp_conn_current) {
545                         deuuidify(obd2cli_tgt(imp->imp_obd), NULL,
546                                   &target_start, &target_len);
547
548                         CDEBUG(D_HA, "%s: Connection changing to"
549                                " %.*s (at %s)\n",
550                                imp->imp_obd->obd_name,
551                                target_len, target_start,
552                                libcfs_nid2str(imp_conn->oic_conn->c_peer.nid));
553                 }
554
555                 imp->imp_conn_current = imp_conn;
556         }
557
558         CDEBUG(D_HA, "%s: import %p using connection %s/%s\n",
559                imp->imp_obd->obd_name, imp, imp_conn->oic_uuid.uuid,
560                libcfs_nid2str(imp_conn->oic_conn->c_peer.nid));
561
562         spin_unlock(&imp->imp_lock);
563
564         RETURN(0);
565 }
566
567 /*
568  * must be called under imp_lock
569  */
570 static int ptlrpc_first_transno(struct obd_import *imp, __u64 *transno)
571 {
572         struct ptlrpc_request *req;
573         cfs_list_t *tmp;
574
575         if (cfs_list_empty(&imp->imp_replay_list))
576                 return 0;
577         tmp = imp->imp_replay_list.next;
578         req = cfs_list_entry(tmp, struct ptlrpc_request, rq_replay_list);
579         *transno = req->rq_transno;
580         if (req->rq_transno == 0) {
581                 DEBUG_REQ(D_ERROR, req, "zero transno in replay");
582                 LBUG();
583         }
584
585         return 1;
586 }
587
588 /**
589  * Attempt to (re)connect import \a imp. This includes all preparations,
590  * initializing CONNECT RPC request and passing it to ptlrpcd for
591  * actual sending.
592  * Returns 0 on success or error code.
593  */
594 int ptlrpc_connect_import(struct obd_import *imp)
595 {
596         struct obd_device *obd = imp->imp_obd;
597         int initial_connect = 0;
598         int set_transno = 0;
599         __u64 committed_before_reconnect = 0;
600         struct ptlrpc_request *request;
601         char *bufs[] = { NULL,
602                          obd2cli_tgt(imp->imp_obd),
603                          obd->obd_uuid.uuid,
604                          (char *)&imp->imp_dlm_handle,
605                          (char *)&imp->imp_connect_data };
606         struct ptlrpc_connect_async_args *aa;
607         int rc;
608         ENTRY;
609
610         spin_lock(&imp->imp_lock);
611         if (imp->imp_state == LUSTRE_IMP_CLOSED) {
612                 spin_unlock(&imp->imp_lock);
613                 CERROR("can't connect to a closed import\n");
614                 RETURN(-EINVAL);
615         } else if (imp->imp_state == LUSTRE_IMP_FULL) {
616                 spin_unlock(&imp->imp_lock);
617                 CERROR("already connected\n");
618                 RETURN(0);
619         } else if (imp->imp_state == LUSTRE_IMP_CONNECTING) {
620                 spin_unlock(&imp->imp_lock);
621                 CERROR("already connecting\n");
622                 RETURN(-EALREADY);
623         }
624
625         IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CONNECTING);
626
627         imp->imp_conn_cnt++;
628         imp->imp_resend_replay = 0;
629
630         if (!lustre_handle_is_used(&imp->imp_remote_handle))
631                 initial_connect = 1;
632         else
633                 committed_before_reconnect = imp->imp_peer_committed_transno;
634
635         set_transno = ptlrpc_first_transno(imp,
636                                            &imp->imp_connect_data.ocd_transno);
637         spin_unlock(&imp->imp_lock);
638
639         rc = import_select_connection(imp);
640         if (rc)
641                 GOTO(out, rc);
642
643         rc = sptlrpc_import_sec_adapt(imp, NULL, 0);
644         if (rc)
645                 GOTO(out, rc);
646
647         /* Reset connect flags to the originally requested flags, in case
648          * the server is updated on-the-fly we will get the new features. */
649         imp->imp_connect_data.ocd_connect_flags = imp->imp_connect_flags_orig;
650         /* Reset ocd_version each time so the server knows the exact versions */
651         imp->imp_connect_data.ocd_version = LUSTRE_VERSION_CODE;
652         imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT;
653         imp->imp_msghdr_flags &= ~MSGHDR_CKSUM_INCOMPAT18;
654
655         rc = obd_reconnect(NULL, imp->imp_obd->obd_self_export, obd,
656                            &obd->obd_uuid, &imp->imp_connect_data, NULL);
657         if (rc)
658                 GOTO(out, rc);
659
660         request = ptlrpc_request_alloc(imp, &RQF_MDS_CONNECT);
661         if (request == NULL)
662                 GOTO(out, rc = -ENOMEM);
663
664         rc = ptlrpc_request_bufs_pack(request, LUSTRE_OBD_VERSION,
665                                       imp->imp_connect_op, bufs, NULL);
666         if (rc) {
667                 ptlrpc_request_free(request);
668                 GOTO(out, rc);
669         }
670
671         /* Report the rpc service time to the server so that it knows how long
672          * to wait for clients to join recovery */
673         lustre_msg_set_service_time(request->rq_reqmsg,
674                                     at_timeout2est(request->rq_timeout));
675
676         /* The amount of time we give the server to process the connect req.
677          * import_select_connection will increase the net latency on
678          * repeated reconnect attempts to cover slow networks.
679          * We override/ignore the server rpc completion estimate here,
680          * which may be large if this is a reconnect attempt */
681         request->rq_timeout = INITIAL_CONNECT_TIMEOUT;
682         lustre_msg_set_timeout(request->rq_reqmsg, request->rq_timeout);
683
684 #ifndef __KERNEL__
685         lustre_msg_add_op_flags(request->rq_reqmsg, MSG_CONNECT_LIBCLIENT);
686 #endif
687         lustre_msg_add_op_flags(request->rq_reqmsg, MSG_CONNECT_NEXT_VER);
688
689         request->rq_no_resend = request->rq_no_delay = 1;
690         request->rq_send_state = LUSTRE_IMP_CONNECTING;
691         /* Allow a slightly larger reply for future growth compatibility */
692         req_capsule_set_size(&request->rq_pill, &RMF_CONNECT_DATA, RCL_SERVER,
693                              sizeof(struct obd_connect_data)+16*sizeof(__u64));
694         ptlrpc_request_set_replen(request);
695         request->rq_interpret_reply = ptlrpc_connect_interpret;
696
697         CLASSERT(sizeof (*aa) <= sizeof (request->rq_async_args));
698         aa = ptlrpc_req_async_args(request);
699         memset(aa, 0, sizeof *aa);
700
701         aa->pcaa_peer_committed = committed_before_reconnect;
702         aa->pcaa_initial_connect = initial_connect;
703
704         if (aa->pcaa_initial_connect) {
705                 spin_lock(&imp->imp_lock);
706                 imp->imp_replayable = 1;
707                 spin_unlock(&imp->imp_lock);
708                 lustre_msg_add_op_flags(request->rq_reqmsg,
709                                         MSG_CONNECT_INITIAL);
710         }
711
712         if (set_transno)
713                 lustre_msg_add_op_flags(request->rq_reqmsg,
714                                         MSG_CONNECT_TRANSNO);
715
716         DEBUG_REQ(D_RPCTRACE, request, "(re)connect request (timeout %d)",
717                   request->rq_timeout);
718         ptlrpcd_add_req(request, PDL_POLICY_ROUND, -1);
719         rc = 0;
720 out:
721         if (rc != 0) {
722                 IMPORT_SET_STATE(imp, LUSTRE_IMP_DISCON);
723         }
724
725         RETURN(rc);
726 }
727 EXPORT_SYMBOL(ptlrpc_connect_import);
728
729 static void ptlrpc_maybe_ping_import_soon(struct obd_import *imp)
730 {
731 #ifdef __KERNEL__
732         int force_verify;
733
734         spin_lock(&imp->imp_lock);
735         force_verify = imp->imp_force_verify != 0;
736         spin_unlock(&imp->imp_lock);
737
738         if (force_verify)
739                 ptlrpc_pinger_wake_up();
740 #else
741         /* liblustre has no pinger thread, so we wakeup pinger anyway */
742         ptlrpc_pinger_wake_up();
743 #endif
744 }
745
746 static int ptlrpc_busy_reconnect(int rc)
747 {
748         return (rc == -EBUSY) || (rc == -EAGAIN);
749 }
750
751 /**
752  * interpret_reply callback for connect RPCs.
753  * Looks into returned status of connect operation and decides
754  * what to do with the import - i.e enter recovery, promote it to
755  * full state for normal operations of disconnect it due to an error.
756  */
757 static int ptlrpc_connect_interpret(const struct lu_env *env,
758                                     struct ptlrpc_request *request,
759                                     void *data, int rc)
760 {
761         struct ptlrpc_connect_async_args *aa = data;
762         struct obd_import *imp = request->rq_import;
763         struct client_obd *cli = &imp->imp_obd->u.cli;
764         struct lustre_handle old_hdl;
765         __u64 old_connect_flags;
766         int msg_flags;
767         struct obd_connect_data *ocd;
768         struct obd_export *exp;
769         int ret;
770         ENTRY;
771
772         spin_lock(&imp->imp_lock);
773         if (imp->imp_state == LUSTRE_IMP_CLOSED) {
774                 spin_unlock(&imp->imp_lock);
775                 RETURN(0);
776         }
777
778         if (rc) {
779                 /* if this reconnect to busy export - not need select new target
780                  * for connecting*/
781                 imp->imp_force_reconnect = ptlrpc_busy_reconnect(rc);
782                 spin_unlock(&imp->imp_lock);
783                 ptlrpc_maybe_ping_import_soon(imp);
784                 GOTO(out, rc);
785         }
786         spin_unlock(&imp->imp_lock);
787
788         LASSERT(imp->imp_conn_current);
789
790         msg_flags = lustre_msg_get_op_flags(request->rq_repmsg);
791
792         ret = req_capsule_get_size(&request->rq_pill, &RMF_CONNECT_DATA,
793                                    RCL_SERVER);
794         /* server replied obd_connect_data is always bigger */
795         ocd = req_capsule_server_sized_get(&request->rq_pill,
796                                            &RMF_CONNECT_DATA, ret);
797
798         if (ocd == NULL) {
799                 CERROR("%s: no connect data from server\n",
800                        imp->imp_obd->obd_name);
801                 rc = -EPROTO;
802                 GOTO(out, rc);
803         }
804
805         spin_lock(&imp->imp_lock);
806
807         /* All imports are pingable */
808         imp->imp_pingable = 1;
809         imp->imp_force_reconnect = 0;
810         imp->imp_force_verify = 0;
811
812         imp->imp_connect_data = *ocd;
813
814         CDEBUG(D_HA, "%s: connect to target with instance %u\n",
815                imp->imp_obd->obd_name, ocd->ocd_instance);
816         exp = class_conn2export(&imp->imp_dlm_handle);
817
818         spin_unlock(&imp->imp_lock);
819
820         /* check that server granted subset of flags we asked for. */
821         if ((ocd->ocd_connect_flags & imp->imp_connect_flags_orig) !=
822             ocd->ocd_connect_flags) {
823                 CERROR("%s: Server didn't granted asked subset of flags: "
824                        "asked="LPX64" grranted="LPX64"\n",
825                        imp->imp_obd->obd_name,imp->imp_connect_flags_orig,
826                        ocd->ocd_connect_flags);
827                 GOTO(out, rc = -EPROTO);
828         }
829
830         if (!exp) {
831                 /* This could happen if export is cleaned during the
832                    connect attempt */
833                 CERROR("%s: missing export after connect\n",
834                        imp->imp_obd->obd_name);
835                 GOTO(out, rc = -ENODEV);
836         }
837         old_connect_flags = exp_connect_flags(exp);
838         exp->exp_connect_data = *ocd;
839         imp->imp_obd->obd_self_export->exp_connect_data = *ocd;
840         class_export_put(exp);
841
842         obd_import_event(imp->imp_obd, imp, IMP_EVENT_OCD);
843
844         if (aa->pcaa_initial_connect) {
845                 spin_lock(&imp->imp_lock);
846                 if (msg_flags & MSG_CONNECT_REPLAYABLE) {
847                         imp->imp_replayable = 1;
848                         spin_unlock(&imp->imp_lock);
849                         CDEBUG(D_HA, "connected to replayable target: %s\n",
850                                obd2cli_tgt(imp->imp_obd));
851                 } else {
852                         imp->imp_replayable = 0;
853                         spin_unlock(&imp->imp_lock);
854                 }
855
856                 /* if applies, adjust the imp->imp_msg_magic here
857                  * according to reply flags */
858
859                 imp->imp_remote_handle =
860                                 *lustre_msg_get_handle(request->rq_repmsg);
861
862                 /* Initial connects are allowed for clients with non-random
863                  * uuids when servers are in recovery.  Simply signal the
864                  * servers replay is complete and wait in REPLAY_WAIT. */
865                 if (msg_flags & MSG_CONNECT_RECOVERING) {
866                         CDEBUG(D_HA, "connect to %s during recovery\n",
867                                obd2cli_tgt(imp->imp_obd));
868                         IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_LOCKS);
869                 } else {
870                         IMPORT_SET_STATE(imp, LUSTRE_IMP_FULL);
871                         ptlrpc_activate_import(imp);
872                 }
873
874                 GOTO(finish, rc = 0);
875         }
876
877         /* Determine what recovery state to move the import to. */
878         if (MSG_CONNECT_RECONNECT & msg_flags) {
879                 memset(&old_hdl, 0, sizeof(old_hdl));
880                 if (!memcmp(&old_hdl, lustre_msg_get_handle(request->rq_repmsg),
881                             sizeof (old_hdl))) {
882                         LCONSOLE_WARN("Reconnect to %s (at @%s) failed due "
883                                       "bad handle "LPX64"\n",
884                                       obd2cli_tgt(imp->imp_obd),
885                                       imp->imp_connection->c_remote_uuid.uuid,
886                                       imp->imp_dlm_handle.cookie);
887                         GOTO(out, rc = -ENOTCONN);
888                 }
889
890                 if (memcmp(&imp->imp_remote_handle,
891                            lustre_msg_get_handle(request->rq_repmsg),
892                            sizeof(imp->imp_remote_handle))) {
893                         int level = msg_flags & MSG_CONNECT_RECOVERING ?
894                                 D_HA : D_WARNING;
895
896                         /* Bug 16611/14775: if server handle have changed,
897                          * that means some sort of disconnection happened.
898                          * If the server is not in recovery, that also means it
899                          * already erased all of our state because of previous
900                          * eviction. If it is in recovery - we are safe to
901                          * participate since we can reestablish all of our state
902                          * with server again */
903                         if ((MSG_CONNECT_RECOVERING & msg_flags)) {
904                                 CDEBUG(level,"%s@%s changed server handle from "
905                                        LPX64" to "LPX64
906                                        " but is still in recovery\n",
907                                        obd2cli_tgt(imp->imp_obd),
908                                        imp->imp_connection->c_remote_uuid.uuid,
909                                        imp->imp_remote_handle.cookie,
910                                        lustre_msg_get_handle(
911                                        request->rq_repmsg)->cookie);
912                         } else {
913                                 LCONSOLE_WARN("Evicted from %s (at %s) "
914                                               "after server handle changed from "
915                                               LPX64" to "LPX64"\n",
916                                               obd2cli_tgt(imp->imp_obd),
917                                               imp->imp_connection-> \
918                                               c_remote_uuid.uuid,
919                                               imp->imp_remote_handle.cookie,
920                                               lustre_msg_get_handle(
921                                               request->rq_repmsg)->cookie);
922                         }
923
924
925                         imp->imp_remote_handle =
926                                      *lustre_msg_get_handle(request->rq_repmsg);
927
928                         if (!(MSG_CONNECT_RECOVERING & msg_flags)) {
929                                 IMPORT_SET_STATE(imp, LUSTRE_IMP_EVICTED);
930                                 GOTO(finish, rc = 0);
931                         }
932
933                 } else {
934                         CDEBUG(D_HA, "reconnected to %s@%s after partition\n",
935                                obd2cli_tgt(imp->imp_obd),
936                                imp->imp_connection->c_remote_uuid.uuid);
937                 }
938
939                 if (imp->imp_invalid) {
940                         CDEBUG(D_HA, "%s: reconnected but import is invalid; "
941                                "marking evicted\n", imp->imp_obd->obd_name);
942                         IMPORT_SET_STATE(imp, LUSTRE_IMP_EVICTED);
943                 } else if (MSG_CONNECT_RECOVERING & msg_flags) {
944                         CDEBUG(D_HA, "%s: reconnected to %s during replay\n",
945                                imp->imp_obd->obd_name,
946                                obd2cli_tgt(imp->imp_obd));
947
948                         spin_lock(&imp->imp_lock);
949                         imp->imp_resend_replay = 1;
950                         spin_unlock(&imp->imp_lock);
951
952                         IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY);
953                 } else {
954                         IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER);
955                 }
956         } else if ((MSG_CONNECT_RECOVERING & msg_flags) && !imp->imp_invalid) {
957                 LASSERT(imp->imp_replayable);
958                 imp->imp_remote_handle =
959                                 *lustre_msg_get_handle(request->rq_repmsg);
960                 imp->imp_last_replay_transno = 0;
961                 IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY);
962         } else {
963                 DEBUG_REQ(D_HA, request, "%s: evicting (reconnect/recover flags"
964                           " not set: %x)", imp->imp_obd->obd_name, msg_flags);
965                 imp->imp_remote_handle =
966                                 *lustre_msg_get_handle(request->rq_repmsg);
967                 IMPORT_SET_STATE(imp, LUSTRE_IMP_EVICTED);
968         }
969
970         /* Sanity checks for a reconnected import. */
971         if (!(imp->imp_replayable) != !(msg_flags & MSG_CONNECT_REPLAYABLE)) {
972                 CERROR("imp_replayable flag does not match server "
973                        "after reconnect. We should LBUG right here.\n");
974         }
975
976         if (lustre_msg_get_last_committed(request->rq_repmsg) > 0 &&
977             lustre_msg_get_last_committed(request->rq_repmsg) <
978             aa->pcaa_peer_committed) {
979                 CERROR("%s went back in time (transno "LPD64
980                        " was previously committed, server now claims "LPD64
981                        ")!  See https://bugzilla.lustre.org/show_bug.cgi?"
982                        "id=9646\n",
983                        obd2cli_tgt(imp->imp_obd), aa->pcaa_peer_committed,
984                        lustre_msg_get_last_committed(request->rq_repmsg));
985         }
986
987 finish:
988         rc = ptlrpc_import_recovery_state_machine(imp);
989         if (rc != 0) {
990                 if (rc == -ENOTCONN) {
991                         CDEBUG(D_HA, "evicted/aborted by %s@%s during recovery;"
992                                "invalidating and reconnecting\n",
993                                obd2cli_tgt(imp->imp_obd),
994                                imp->imp_connection->c_remote_uuid.uuid);
995                         ptlrpc_connect_import(imp);
996                         RETURN(0);
997                 }
998         } else {
999
1000                 spin_lock(&imp->imp_lock);
1001                 cfs_list_del(&imp->imp_conn_current->oic_item);
1002                 cfs_list_add(&imp->imp_conn_current->oic_item,
1003                              &imp->imp_conn_list);
1004                 imp->imp_last_success_conn =
1005                         imp->imp_conn_current->oic_last_attempt;
1006
1007                 spin_unlock(&imp->imp_lock);
1008
1009                 if (!ocd->ocd_ibits_known &&
1010                     ocd->ocd_connect_flags & OBD_CONNECT_IBITS)
1011                         CERROR("Inodebits aware server returned zero compatible"
1012                                " bits?\n");
1013
1014                 if ((ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
1015                     (ocd->ocd_version > LUSTRE_VERSION_CODE +
1016                                         LUSTRE_VERSION_OFFSET_WARN ||
1017                      ocd->ocd_version < LUSTRE_VERSION_CODE -
1018                                         LUSTRE_VERSION_OFFSET_WARN)) {
1019                         /* Sigh, some compilers do not like #ifdef in the middle
1020                            of macro arguments */
1021 #ifdef __KERNEL__
1022                         const char *older = "older. Consider upgrading server "
1023                                             "or downgrading client";
1024 #else
1025                         const char *older = "older. Consider recompiling this "
1026                                             "application";
1027 #endif
1028                         const char *newer = "newer than client version. "
1029                                             "Consider upgrading client";
1030
1031                         LCONSOLE_WARN("Server %s version (%d.%d.%d.%d) "
1032                                       "is much %s (%s)\n",
1033                                       obd2cli_tgt(imp->imp_obd),
1034                                       OBD_OCD_VERSION_MAJOR(ocd->ocd_version),
1035                                       OBD_OCD_VERSION_MINOR(ocd->ocd_version),
1036                                       OBD_OCD_VERSION_PATCH(ocd->ocd_version),
1037                                       OBD_OCD_VERSION_FIX(ocd->ocd_version),
1038                                       ocd->ocd_version > LUSTRE_VERSION_CODE ?
1039                                       newer : older, LUSTRE_VERSION_STRING);
1040                 }
1041
1042 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 2, 50, 0)
1043                 /* Check if server has LU-1252 fix applied to not always swab
1044                  * the IR MNE entries. Do this only once per connection.  This
1045                  * fixup is version-limited, because we don't want to carry the
1046                  * OBD_CONNECT_MNE_SWAB flag around forever, just so long as we
1047                  * need interop with unpatched 2.2 servers.  For newer servers,
1048                  * the client will do MNE swabbing only as needed.  LU-1644 */
1049                 if (unlikely((ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
1050                              !(ocd->ocd_connect_flags & OBD_CONNECT_MNE_SWAB) &&
1051                              OBD_OCD_VERSION_MAJOR(ocd->ocd_version) == 2 &&
1052                              OBD_OCD_VERSION_MINOR(ocd->ocd_version) == 2 &&
1053                              OBD_OCD_VERSION_PATCH(ocd->ocd_version) < 55 &&
1054                              strcmp(imp->imp_obd->obd_type->typ_name,
1055                                     LUSTRE_MGC_NAME) == 0))
1056                         imp->imp_need_mne_swab = 1;
1057                 else /* clear if server was upgraded since last connect */
1058                         imp->imp_need_mne_swab = 0;
1059 #else
1060 #warning "LU-1644: Remove old OBD_CONNECT_MNE_SWAB fixup and imp_need_mne_swab"
1061 #endif
1062
1063                 if (ocd->ocd_connect_flags & OBD_CONNECT_CKSUM) {
1064                         /* We sent to the server ocd_cksum_types with bits set
1065                          * for algorithms we understand. The server masked off
1066                          * the checksum types it doesn't support */
1067                         if ((ocd->ocd_cksum_types &
1068                              cksum_types_supported_client()) == 0) {
1069                                 LCONSOLE_WARN("The negotiation of the checksum "
1070                                               "alogrithm to use with server %s "
1071                                               "failed (%x/%x), disabling "
1072                                               "checksums\n",
1073                                               obd2cli_tgt(imp->imp_obd),
1074                                               ocd->ocd_cksum_types,
1075                                               cksum_types_supported_client());
1076                                 cli->cl_checksum = 0;
1077                                 cli->cl_supp_cksum_types = OBD_CKSUM_ADLER;
1078                         } else {
1079                                 cli->cl_supp_cksum_types = ocd->ocd_cksum_types;
1080                         }
1081                 } else {
1082                         /* The server does not support OBD_CONNECT_CKSUM.
1083                          * Enforce ADLER for backward compatibility*/
1084                         cli->cl_supp_cksum_types = OBD_CKSUM_ADLER;
1085                 }
1086                 cli->cl_cksum_type =cksum_type_select(cli->cl_supp_cksum_types);
1087
1088                 if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
1089                         cli->cl_max_pages_per_rpc =
1090                                 min(ocd->ocd_brw_size >> CFS_PAGE_SHIFT,
1091                                     cli->cl_max_pages_per_rpc);
1092                 else if (imp->imp_connect_op == MDS_CONNECT ||
1093                          imp->imp_connect_op == MGS_CONNECT)
1094                         cli->cl_max_pages_per_rpc = 1;
1095
1096                 /* Reset ns_connect_flags only for initial connect. It might be
1097                  * changed in while using FS and if we reset it in reconnect
1098                  * this leads to losing user settings done before such as
1099                  * disable lru_resize, etc. */
1100                 if (old_connect_flags != exp_connect_flags(exp) ||
1101                     aa->pcaa_initial_connect) {
1102                         CDEBUG(D_HA, "%s: Resetting ns_connect_flags to server "
1103                                "flags: "LPX64"\n", imp->imp_obd->obd_name,
1104                               ocd->ocd_connect_flags);
1105                         imp->imp_obd->obd_namespace->ns_connect_flags =
1106                                 ocd->ocd_connect_flags;
1107                         imp->imp_obd->obd_namespace->ns_orig_connect_flags =
1108                                 ocd->ocd_connect_flags;
1109                 }
1110
1111                 if ((ocd->ocd_connect_flags & OBD_CONNECT_AT) &&
1112                     (imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V2))
1113                         /* We need a per-message support flag, because
1114                            a. we don't know if the incoming connect reply
1115                               supports AT or not (in reply_in_callback)
1116                               until we unpack it.
1117                            b. failovered server means export and flags are gone
1118                               (in ptlrpc_send_reply).
1119                            Can only be set when we know AT is supported at
1120                            both ends */
1121                         imp->imp_msghdr_flags |= MSGHDR_AT_SUPPORT;
1122                 else
1123                         imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT;
1124
1125                 if ((ocd->ocd_connect_flags & OBD_CONNECT_FULL20) &&
1126                     (imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V2))
1127                         imp->imp_msghdr_flags |= MSGHDR_CKSUM_INCOMPAT18;
1128                 else
1129                         imp->imp_msghdr_flags &= ~MSGHDR_CKSUM_INCOMPAT18;
1130
1131                 LASSERT((cli->cl_max_pages_per_rpc <= PTLRPC_MAX_BRW_PAGES) &&
1132                         (cli->cl_max_pages_per_rpc > 0));
1133         }
1134
1135 out:
1136         if (rc != 0) {
1137                 IMPORT_SET_STATE(imp, LUSTRE_IMP_DISCON);
1138                 if (rc == -EACCES) {
1139                         /*
1140                          * Give up trying to reconnect
1141                          * EACCES means client has no permission for connection
1142                          */
1143                         imp->imp_obd->obd_no_recov = 1;
1144                         ptlrpc_deactivate_import(imp);
1145                 }
1146
1147                 if (rc == -EPROTO) {
1148                         struct obd_connect_data *ocd;
1149
1150                         /* reply message might not be ready */
1151                         if (request->rq_repmsg == NULL)
1152                                 RETURN(-EPROTO);
1153
1154                         ocd = req_capsule_server_get(&request->rq_pill,
1155                                                      &RMF_CONNECT_DATA);
1156                         if (ocd &&
1157                             (ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
1158                             (ocd->ocd_version != LUSTRE_VERSION_CODE)) {
1159                            /* Actually servers are only supposed to refuse
1160                               connection from liblustre clients, so we should
1161                               never see this from VFS context */
1162                                 LCONSOLE_ERROR_MSG(0x16a, "Server %s version "
1163                                         "(%d.%d.%d.%d)"
1164                                         " refused connection from this client "
1165                                         "with an incompatible version (%s).  "
1166                                         "Client must be recompiled\n",
1167                                         obd2cli_tgt(imp->imp_obd),
1168                                         OBD_OCD_VERSION_MAJOR(ocd->ocd_version),
1169                                         OBD_OCD_VERSION_MINOR(ocd->ocd_version),
1170                                         OBD_OCD_VERSION_PATCH(ocd->ocd_version),
1171                                         OBD_OCD_VERSION_FIX(ocd->ocd_version),
1172                                         LUSTRE_VERSION_STRING);
1173                                 ptlrpc_deactivate_import(imp);
1174                                 IMPORT_SET_STATE(imp, LUSTRE_IMP_CLOSED);
1175                         }
1176                         RETURN(-EPROTO);
1177                 }
1178
1179                 ptlrpc_maybe_ping_import_soon(imp);
1180
1181                 CDEBUG(D_HA, "recovery of %s on %s failed (%d)\n",
1182                        obd2cli_tgt(imp->imp_obd),
1183                        (char *)imp->imp_connection->c_remote_uuid.uuid, rc);
1184         }
1185
1186         cfs_waitq_broadcast(&imp->imp_recovery_waitq);
1187         RETURN(rc);
1188 }
1189
1190 /**
1191  * interpret callback for "completed replay" RPCs.
1192  * \see signal_completed_replay
1193  */
1194 static int completed_replay_interpret(const struct lu_env *env,
1195                                       struct ptlrpc_request *req,
1196                                       void * data, int rc)
1197 {
1198         ENTRY;
1199         cfs_atomic_dec(&req->rq_import->imp_replay_inflight);
1200         if (req->rq_status == 0 &&
1201             !req->rq_import->imp_vbr_failed) {
1202                 ptlrpc_import_recovery_state_machine(req->rq_import);
1203         } else {
1204                 if (req->rq_import->imp_vbr_failed) {
1205                         CDEBUG(D_WARNING,
1206                                "%s: version recovery fails, reconnecting\n",
1207                                req->rq_import->imp_obd->obd_name);
1208                 } else {
1209                         CDEBUG(D_HA, "%s: LAST_REPLAY message error: %d, "
1210                                      "reconnecting\n",
1211                                req->rq_import->imp_obd->obd_name,
1212                                req->rq_status);
1213                 }
1214                 ptlrpc_connect_import(req->rq_import);
1215         }
1216
1217         RETURN(0);
1218 }
1219
1220 /**
1221  * Let server know that we have no requests to replay anymore.
1222  * Achieved by just sending a PING request
1223  */
1224 static int signal_completed_replay(struct obd_import *imp)
1225 {
1226         struct ptlrpc_request *req;
1227         ENTRY;
1228
1229         if (unlikely(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_FINISH_REPLAY)))
1230                 RETURN(0);
1231
1232         LASSERT(cfs_atomic_read(&imp->imp_replay_inflight) == 0);
1233         cfs_atomic_inc(&imp->imp_replay_inflight);
1234
1235         req = ptlrpc_request_alloc_pack(imp, &RQF_OBD_PING, LUSTRE_OBD_VERSION,
1236                                         OBD_PING);
1237         if (req == NULL) {
1238                 cfs_atomic_dec(&imp->imp_replay_inflight);
1239                 RETURN(-ENOMEM);
1240         }
1241
1242         ptlrpc_request_set_replen(req);
1243         req->rq_send_state = LUSTRE_IMP_REPLAY_WAIT;
1244         lustre_msg_add_flags(req->rq_reqmsg,
1245                              MSG_LOCK_REPLAY_DONE | MSG_REQ_REPLAY_DONE);
1246         if (AT_OFF)
1247                 req->rq_timeout *= 3;
1248         req->rq_interpret_reply = completed_replay_interpret;
1249
1250         ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
1251         RETURN(0);
1252 }
1253
1254 #ifdef __KERNEL__
1255 /**
1256  * In kernel code all import invalidation happens in its own
1257  * separate thread, so that whatever application happened to encounter
1258  * a problem could still be killed or otherwise continue
1259  */
1260 static int ptlrpc_invalidate_import_thread(void *data)
1261 {
1262         struct obd_import *imp = data;
1263
1264         ENTRY;
1265
1266         cfs_daemonize_ctxt("ll_imp_inval");
1267
1268         CDEBUG(D_HA, "thread invalidate import %s to %s@%s\n",
1269                imp->imp_obd->obd_name, obd2cli_tgt(imp->imp_obd),
1270                imp->imp_connection->c_remote_uuid.uuid);
1271
1272         ptlrpc_invalidate_import(imp);
1273
1274         if (obd_dump_on_eviction) {
1275                 CERROR("dump the log upon eviction\n");
1276                 libcfs_debug_dumplog();
1277         }
1278
1279         IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER);
1280         ptlrpc_import_recovery_state_machine(imp);
1281
1282         class_import_put(imp);
1283         RETURN(0);
1284 }
1285 #endif
1286
1287 /**
1288  * This is the state machine for client-side recovery on import.
1289  *
1290  * Typicaly we have two possibly paths. If we came to server and it is not
1291  * in recovery, we just enter IMP_EVICTED state, invalidate our import
1292  * state and reconnect from scratch.
1293  * If we came to server that is in recovery, we enter IMP_REPLAY import state.
1294  * We go through our list of requests to replay and send them to server one by
1295  * one.
1296  * After sending all request from the list we change import state to
1297  * IMP_REPLAY_LOCKS and re-request all the locks we believe we have from server
1298  * and also all the locks we don't yet have and wait for server to grant us.
1299  * After that we send a special "replay completed" request and change import
1300  * state to IMP_REPLAY_WAIT.
1301  * Upon receiving reply to that "replay completed" RPC we enter IMP_RECOVER
1302  * state and resend all requests from sending list.
1303  * After that we promote import to FULL state and send all delayed requests
1304  * and import is fully operational after that.
1305  *
1306  */
1307 int ptlrpc_import_recovery_state_machine(struct obd_import *imp)
1308 {
1309         int rc = 0;
1310         int inflight;
1311         char *target_start;
1312         int target_len;
1313
1314         ENTRY;
1315         if (imp->imp_state == LUSTRE_IMP_EVICTED) {
1316                 deuuidify(obd2cli_tgt(imp->imp_obd), NULL,
1317                           &target_start, &target_len);
1318                 /* Don't care about MGC eviction */
1319                 if (strcmp(imp->imp_obd->obd_type->typ_name,
1320                            LUSTRE_MGC_NAME) != 0) {
1321                         LCONSOLE_ERROR_MSG(0x167, "%s: This client was evicted "
1322                                            "by %.*s; in progress operations "
1323                                            "using this service will fail.\n",
1324                                            imp->imp_obd->obd_name, target_len,
1325                                            target_start);
1326                 }
1327                 CDEBUG(D_HA, "evicted from %s@%s; invalidating\n",
1328                        obd2cli_tgt(imp->imp_obd),
1329                        imp->imp_connection->c_remote_uuid.uuid);
1330                 /* reset vbr_failed flag upon eviction */
1331                 spin_lock(&imp->imp_lock);
1332                 imp->imp_vbr_failed = 0;
1333                 spin_unlock(&imp->imp_lock);
1334
1335 #ifdef __KERNEL__
1336                 /* bug 17802:  XXX client_disconnect_export vs connect request
1337                  * race. if client will evicted at this time, we start
1338                  * invalidate thread without reference to import and import can
1339                  * be freed at same time. */
1340                 class_import_get(imp);
1341                 rc = cfs_create_thread(ptlrpc_invalidate_import_thread, imp,
1342                                        CFS_DAEMON_FLAGS);
1343                 if (rc < 0) {
1344                         class_import_put(imp);
1345                         CERROR("error starting invalidate thread: %d\n", rc);
1346                 } else {
1347                         rc = 0;
1348                 }
1349                 RETURN(rc);
1350 #else
1351                 ptlrpc_invalidate_import(imp);
1352
1353                 IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER);
1354 #endif
1355         }
1356
1357         if (imp->imp_state == LUSTRE_IMP_REPLAY) {
1358                 CDEBUG(D_HA, "replay requested by %s\n",
1359                        obd2cli_tgt(imp->imp_obd));
1360                 rc = ptlrpc_replay_next(imp, &inflight);
1361                 if (inflight == 0 &&
1362                     cfs_atomic_read(&imp->imp_replay_inflight) == 0) {
1363                         IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_LOCKS);
1364                         rc = ldlm_replay_locks(imp);
1365                         if (rc)
1366                                 GOTO(out, rc);
1367                 }
1368                 rc = 0;
1369         }
1370
1371         if (imp->imp_state == LUSTRE_IMP_REPLAY_LOCKS) {
1372                 if (cfs_atomic_read(&imp->imp_replay_inflight) == 0) {
1373                         IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_WAIT);
1374                         rc = signal_completed_replay(imp);
1375                         if (rc)
1376                                 GOTO(out, rc);
1377                 }
1378
1379         }
1380
1381         if (imp->imp_state == LUSTRE_IMP_REPLAY_WAIT) {
1382                 if (cfs_atomic_read(&imp->imp_replay_inflight) == 0) {
1383                         IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER);
1384                 }
1385         }
1386
1387         if (imp->imp_state == LUSTRE_IMP_RECOVER) {
1388                 CDEBUG(D_HA, "reconnected to %s@%s\n",
1389                        obd2cli_tgt(imp->imp_obd),
1390                        imp->imp_connection->c_remote_uuid.uuid);
1391
1392                 rc = ptlrpc_resend(imp);
1393                 if (rc)
1394                         GOTO(out, rc);
1395                 IMPORT_SET_STATE(imp, LUSTRE_IMP_FULL);
1396                 ptlrpc_activate_import(imp);
1397
1398                 deuuidify(obd2cli_tgt(imp->imp_obd), NULL,
1399                           &target_start, &target_len);
1400                 LCONSOLE_INFO("%s: Connection restored to %.*s (at %s)\n",
1401                               imp->imp_obd->obd_name,
1402                               target_len, target_start,
1403                               libcfs_nid2str(imp->imp_connection->c_peer.nid));
1404         }
1405
1406         if (imp->imp_state == LUSTRE_IMP_FULL) {
1407                 cfs_waitq_broadcast(&imp->imp_recovery_waitq);
1408                 ptlrpc_wake_delayed(imp);
1409         }
1410
1411 out:
1412         RETURN(rc);
1413 }
1414
1415 int ptlrpc_disconnect_import(struct obd_import *imp, int noclose)
1416 {
1417         struct ptlrpc_request *req;
1418         int rq_opc, rc = 0;
1419         int nowait = imp->imp_obd->obd_force;
1420         ENTRY;
1421
1422         if (nowait)
1423                 GOTO(set_state, rc);
1424
1425         switch (imp->imp_connect_op) {
1426         case OST_CONNECT: rq_opc = OST_DISCONNECT; break;
1427         case MDS_CONNECT: rq_opc = MDS_DISCONNECT; break;
1428         case MGS_CONNECT: rq_opc = MGS_DISCONNECT; break;
1429         default:
1430                 CERROR("don't know how to disconnect from %s (connect_op %d)\n",
1431                        obd2cli_tgt(imp->imp_obd), imp->imp_connect_op);
1432                 RETURN(-EINVAL);
1433         }
1434
1435         if (ptlrpc_import_in_recovery(imp)) {
1436                 struct l_wait_info lwi;
1437                 cfs_duration_t timeout;
1438
1439
1440                 if (AT_OFF) {
1441                         if (imp->imp_server_timeout)
1442                                 timeout = cfs_time_seconds(obd_timeout / 2);
1443                         else
1444                                 timeout = cfs_time_seconds(obd_timeout);
1445                 } else {
1446                         int idx = import_at_get_index(imp,
1447                                 imp->imp_client->cli_request_portal);
1448                         timeout = cfs_time_seconds(
1449                                 at_get(&imp->imp_at.iat_service_estimate[idx]));
1450                 }
1451
1452                 lwi = LWI_TIMEOUT_INTR(cfs_timeout_cap(timeout),
1453                                        back_to_sleep, LWI_ON_SIGNAL_NOOP, NULL);
1454                 rc = l_wait_event(imp->imp_recovery_waitq,
1455                                   !ptlrpc_import_in_recovery(imp), &lwi);
1456
1457         }
1458
1459         spin_lock(&imp->imp_lock);
1460         if (imp->imp_state != LUSTRE_IMP_FULL)
1461                 GOTO(out, 0);
1462
1463         spin_unlock(&imp->imp_lock);
1464
1465         req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_DISCONNECT,
1466                                         LUSTRE_OBD_VERSION, rq_opc);
1467         if (req) {
1468                 /* We are disconnecting, do not retry a failed DISCONNECT rpc if
1469                  * it fails.  We can get through the above with a down server
1470                  * if the client doesn't know the server is gone yet. */
1471                 req->rq_no_resend = 1;
1472
1473                 /* We want client umounts to happen quickly, no matter the
1474                    server state... */
1475                 req->rq_timeout = min_t(int, req->rq_timeout,
1476                                         INITIAL_CONNECT_TIMEOUT);
1477
1478                 IMPORT_SET_STATE(imp, LUSTRE_IMP_CONNECTING);
1479                 req->rq_send_state =  LUSTRE_IMP_CONNECTING;
1480                 ptlrpc_request_set_replen(req);
1481                 rc = ptlrpc_queue_wait(req);
1482                 ptlrpc_req_finished(req);
1483         }
1484
1485 set_state:
1486         spin_lock(&imp->imp_lock);
1487 out:
1488         if (noclose)
1489                 IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON);
1490         else
1491                 IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CLOSED);
1492         memset(&imp->imp_remote_handle, 0, sizeof(imp->imp_remote_handle));
1493         spin_unlock(&imp->imp_lock);
1494
1495         RETURN(rc);
1496 }
1497 EXPORT_SYMBOL(ptlrpc_disconnect_import);
1498
1499 void ptlrpc_cleanup_imp(struct obd_import *imp)
1500 {
1501         ENTRY;
1502
1503         spin_lock(&imp->imp_lock);
1504         IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CLOSED);
1505         imp->imp_generation++;
1506         spin_unlock(&imp->imp_lock);
1507         ptlrpc_abort_inflight(imp);
1508
1509         EXIT;
1510 }
1511 EXPORT_SYMBOL(ptlrpc_cleanup_imp);
1512
1513 /* Adaptive Timeout utils */
1514 extern unsigned int at_min, at_max, at_history;
1515
1516 /* Bin into timeslices using AT_BINS bins.
1517    This gives us a max of the last binlimit*AT_BINS secs without the storage,
1518    but still smoothing out a return to normalcy from a slow response.
1519    (E.g. remember the maximum latency in each minute of the last 4 minutes.) */
1520 int at_measured(struct adaptive_timeout *at, unsigned int val)
1521 {
1522         unsigned int old = at->at_current;
1523         time_t now = cfs_time_current_sec();
1524         time_t binlimit = max_t(time_t, at_history / AT_BINS, 1);
1525
1526         LASSERT(at);
1527         CDEBUG(D_OTHER, "add %u to %p time=%lu v=%u (%u %u %u %u)\n",
1528                val, at, now - at->at_binstart, at->at_current,
1529                at->at_hist[0], at->at_hist[1], at->at_hist[2], at->at_hist[3]);
1530
1531         if (val == 0)
1532                 /* 0's don't count, because we never want our timeout to
1533                    drop to 0, and because 0 could mean an error */
1534                 return 0;
1535
1536         spin_lock(&at->at_lock);
1537
1538         if (unlikely(at->at_binstart == 0)) {
1539                 /* Special case to remove default from history */
1540                 at->at_current = val;
1541                 at->at_worst_ever = val;
1542                 at->at_worst_time = now;
1543                 at->at_hist[0] = val;
1544                 at->at_binstart = now;
1545         } else if (now - at->at_binstart < binlimit ) {
1546                 /* in bin 0 */
1547                 at->at_hist[0] = max(val, at->at_hist[0]);
1548                 at->at_current = max(val, at->at_current);
1549         } else {
1550                 int i, shift;
1551                 unsigned int maxv = val;
1552                 /* move bins over */
1553                 shift = (now - at->at_binstart) / binlimit;
1554                 LASSERT(shift > 0);
1555                 for(i = AT_BINS - 1; i >= 0; i--) {
1556                         if (i >= shift) {
1557                                 at->at_hist[i] = at->at_hist[i - shift];
1558                                 maxv = max(maxv, at->at_hist[i]);
1559                         } else {
1560                                 at->at_hist[i] = 0;
1561                         }
1562                 }
1563                 at->at_hist[0] = val;
1564                 at->at_current = maxv;
1565                 at->at_binstart += shift * binlimit;
1566         }
1567
1568         if (at->at_current > at->at_worst_ever) {
1569                 at->at_worst_ever = at->at_current;
1570                 at->at_worst_time = now;
1571         }
1572
1573         if (at->at_flags & AT_FLG_NOHIST)
1574                 /* Only keep last reported val; keeping the rest of the history
1575                    for proc only */
1576                 at->at_current = val;
1577
1578         if (at_max > 0)
1579                 at->at_current =  min(at->at_current, at_max);
1580         at->at_current =  max(at->at_current, at_min);
1581
1582         if (at->at_current != old)
1583                 CDEBUG(D_OTHER, "AT %p change: old=%u new=%u delta=%d "
1584                        "(val=%u) hist %u %u %u %u\n", at,
1585                        old, at->at_current, at->at_current - old, val,
1586                        at->at_hist[0], at->at_hist[1], at->at_hist[2],
1587                        at->at_hist[3]);
1588
1589         /* if we changed, report the old value */
1590         old = (at->at_current != old) ? old : 0;
1591
1592         spin_unlock(&at->at_lock);
1593         return old;
1594 }
1595
1596 /* Find the imp_at index for a given portal; assign if space available */
1597 int import_at_get_index(struct obd_import *imp, int portal)
1598 {
1599         struct imp_at *at = &imp->imp_at;
1600         int i;
1601
1602         for (i = 0; i < IMP_AT_MAX_PORTALS; i++) {
1603                 if (at->iat_portal[i] == portal)
1604                         return i;
1605                 if (at->iat_portal[i] == 0)
1606                         /* unused */
1607                         break;
1608         }
1609
1610         /* Not found in list, add it under a lock */
1611         spin_lock(&imp->imp_lock);
1612
1613         /* Check unused under lock */
1614         for (; i < IMP_AT_MAX_PORTALS; i++) {
1615                 if (at->iat_portal[i] == portal)
1616                         goto out;
1617                 if (at->iat_portal[i] == 0)
1618                         /* unused */
1619                         break;
1620         }
1621
1622         /* Not enough portals? */
1623         LASSERT(i < IMP_AT_MAX_PORTALS);
1624
1625         at->iat_portal[i] = portal;
1626 out:
1627         spin_unlock(&imp->imp_lock);
1628         return i;
1629 }