Whamcloud - gitweb
LU-5092 nodemap: save nodemaps to targets for caching
[fs/lustre-release.git] / lustre / ptlrpc / recover.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2015, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ptlrpc/recover.c
37  *
38  * Author: Mike Shaver <shaver@clusterfs.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_RPC
42 #include <libcfs/libcfs.h>
43 #include <obd_support.h>
44 #include <lustre_ha.h>
45 #include <lustre_net.h>
46 #include <lustre_import.h>
47 #include <lustre_export.h>
48 #include <obd.h>
49 #include <obd_class.h>
50 #include <libcfs/list.h>
51
52 #include "ptlrpc_internal.h"
53
54 /**
55  * Start recovery on disconnected import.
56  * This is done by just attempting a connect
57  */
58 void ptlrpc_initiate_recovery(struct obd_import *imp)
59 {
60         ENTRY;
61
62         CDEBUG(D_HA, "%s: starting recovery\n", obd2cli_tgt(imp->imp_obd));
63         ptlrpc_connect_import(imp);
64
65         EXIT;
66 }
67
68 /**
69  * Identify what request from replay list needs to be replayed next
70  * (based on what we have already replayed) and send it to server.
71  */
72 int ptlrpc_replay_next(struct obd_import *imp, int *inflight)
73 {
74         int rc = 0;
75         struct list_head *tmp, *pos;
76         struct ptlrpc_request *req = NULL;
77         __u64 last_transno;
78         ENTRY;
79
80         *inflight = 0;
81
82         /* It might have committed some after we last spoke, so make sure we
83          * get rid of them now.
84          */
85         spin_lock(&imp->imp_lock);
86         imp->imp_last_transno_checked = 0;
87         ptlrpc_free_committed(imp);
88         last_transno = imp->imp_last_replay_transno;
89         spin_unlock(&imp->imp_lock);
90
91         CDEBUG(D_HA, "import %p from %s committed "LPU64" last "LPU64"\n",
92                imp, obd2cli_tgt(imp->imp_obd),
93                imp->imp_peer_committed_transno, last_transno);
94
95         /* Do I need to hold a lock across this iteration?  We shouldn't be
96          * racing with any additions to the list, because we're in recovery
97          * and are therefore not processing additional requests to add.  Calls
98          * to ptlrpc_free_committed might commit requests, but nothing "newer"
99          * than the one we're replaying (it can't be committed until it's
100          * replayed, and we're doing that here).  l_f_e_safe protects against
101          * problems with the current request being committed, in the unlikely
102          * event of that race.  So, in conclusion, I think that it's safe to
103          * perform this list-walk without the imp_lock held.
104          *
105          * But, the {mdc,osc}_replay_open callbacks both iterate
106          * request lists, and have comments saying they assume the
107          * imp_lock is being held by ptlrpc_replay, but it's not. it's
108          * just a little race...
109          */
110
111         /* Replay all the committed open requests on committed_list first */
112         if (!list_empty(&imp->imp_committed_list)) {
113                 tmp = imp->imp_committed_list.prev;
114                 req = list_entry(tmp, struct ptlrpc_request,
115                                      rq_replay_list);
116
117                 /* The last request on committed_list hasn't been replayed */
118                 if (req->rq_transno > last_transno) {
119                         /* Since the imp_committed_list is immutable before
120                          * all of it's requests being replayed, it's safe to
121                          * use a cursor to accelerate the search */
122                         if (!imp->imp_resend_replay ||
123                             imp->imp_replay_cursor == &imp->imp_committed_list)
124                                 imp->imp_replay_cursor =
125                                         imp->imp_replay_cursor->next;
126
127                         while (imp->imp_replay_cursor !=
128                                &imp->imp_committed_list) {
129                                 req = list_entry(imp->imp_replay_cursor,
130                                                      struct ptlrpc_request,
131                                                      rq_replay_list);
132                                 if (req->rq_transno > last_transno)
133                                         break;
134
135                                 req = NULL;
136                                 imp->imp_replay_cursor =
137                                         imp->imp_replay_cursor->next;
138                         }
139                 } else {
140                         /* All requests on committed_list have been replayed */
141                         imp->imp_replay_cursor = &imp->imp_committed_list;
142                         req = NULL;
143                 }
144         }
145
146         /* All the requests in committed list have been replayed, let's replay
147          * the imp_replay_list */
148         if (req == NULL) {
149                 list_for_each_safe(tmp, pos, &imp->imp_replay_list) {
150                         req = list_entry(tmp, struct ptlrpc_request,
151                                              rq_replay_list);
152
153                         if (req->rq_transno > last_transno)
154                                 break;
155                         req = NULL;
156                 }
157         }
158
159         /* If need to resend the last sent transno (because a reconnect
160          * has occurred), then stop on the matching req and send it again.
161          * If, however, the last sent transno has been committed then we
162          * continue replay from the next request. */
163         if (req != NULL && imp->imp_resend_replay)
164                 lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT);
165
166         spin_lock(&imp->imp_lock);
167         /* The resend replay request may have been removed from the
168          * unreplied list. */
169         if (req != NULL && imp->imp_resend_replay &&
170             list_empty(&req->rq_unreplied_list))
171                 ptlrpc_add_unreplied(req);
172
173         imp->imp_resend_replay = 0;
174         spin_unlock(&imp->imp_lock);
175
176         if (req != NULL) {
177                 /* The request should have been added back in unreplied list
178                  * by ptlrpc_prepare_replay(). */
179                 LASSERT(!list_empty(&req->rq_unreplied_list));
180
181                 rc = ptlrpc_replay_req(req);
182                 if (rc) {
183                         CERROR("recovery replay error %d for req "
184                                LPU64"\n", rc, req->rq_xid);
185                         RETURN(rc);
186                 }
187                 *inflight = 1;
188         }
189         RETURN(rc);
190 }
191
192 /**
193  * Schedule resending of request on sending_list. This is done after
194  * we completed replaying of requests and locks.
195  */
196 int ptlrpc_resend(struct obd_import *imp)
197 {
198         struct ptlrpc_request *req, *next;
199
200         ENTRY;
201
202         /* As long as we're in recovery, nothing should be added to the sending
203          * list, so we don't need to hold the lock during this iteration and
204          * resend process.
205          */
206         /* Well... what if lctl recover is called twice at the same time?
207          */
208         spin_lock(&imp->imp_lock);
209         if (imp->imp_state != LUSTRE_IMP_RECOVER) {
210                 spin_unlock(&imp->imp_lock);
211                 RETURN(-1);
212         }
213
214         list_for_each_entry_safe(req, next, &imp->imp_sending_list, rq_list) {
215                 LASSERTF((long)req > PAGE_CACHE_SIZE && req != LP_POISON,
216                          "req %p bad\n", req);
217                 LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req);
218
219                 /* If the request is allowed to be sent during replay and it
220                  * is not timeout yet, then it does not need to be resent. */
221                 if (!ptlrpc_no_resend(req) &&
222                     (req->rq_timedout || !req->rq_allow_replay))
223                         ptlrpc_resend_req(req);
224         }
225         spin_unlock(&imp->imp_lock);
226
227         RETURN(0);
228 }
229
230 /**
231  * Go through all requests in delayed list and wake their threads
232  * for resending
233  */
234 void ptlrpc_wake_delayed(struct obd_import *imp)
235 {
236         struct list_head *tmp, *pos;
237         struct ptlrpc_request *req;
238
239         spin_lock(&imp->imp_lock);
240         list_for_each_safe(tmp, pos, &imp->imp_delayed_list) {
241                 req = list_entry(tmp, struct ptlrpc_request, rq_list);
242
243                 DEBUG_REQ(D_HA, req, "waking (set %p):", req->rq_set);
244                 ptlrpc_client_wake_req(req);
245         }
246         spin_unlock(&imp->imp_lock);
247 }
248
249 void ptlrpc_request_handle_notconn(struct ptlrpc_request *failed_req)
250 {
251         struct obd_import *imp = failed_req->rq_import;
252         ENTRY;
253
254         CDEBUG(D_HA, "import %s of %s@%s abruptly disconnected: reconnecting\n",
255                imp->imp_obd->obd_name, obd2cli_tgt(imp->imp_obd),
256                imp->imp_connection->c_remote_uuid.uuid);
257
258         if (ptlrpc_set_import_discon(imp,
259                               lustre_msg_get_conn_cnt(failed_req->rq_reqmsg))) {
260                 if (!imp->imp_replayable) {
261                         CDEBUG(D_HA, "import %s@%s for %s not replayable, "
262                                "auto-deactivating\n",
263                                obd2cli_tgt(imp->imp_obd),
264                                imp->imp_connection->c_remote_uuid.uuid,
265                                imp->imp_obd->obd_name);
266                         ptlrpc_deactivate_import(imp);
267                 }
268                 /* to control recovery via lctl {disable|enable}_recovery */
269                 if (imp->imp_deactive == 0)
270                         ptlrpc_connect_import(imp);
271         }
272
273         /* Wait for recovery to complete and resend. If evicted, then
274            this request will be errored out later.*/
275         spin_lock(&failed_req->rq_lock);
276         if (!failed_req->rq_no_resend)
277                 failed_req->rq_resend = 1;
278         spin_unlock(&failed_req->rq_lock);
279
280         EXIT;
281 }
282
283 /**
284  * Administratively active/deactive a client. 
285  * This should only be called by the ioctl interface, currently
286  *  - the lctl deactivate and activate commands
287  *  - echo 0/1 >> /proc/osc/XXX/active
288  *  - client umount -f (ll_umount_begin)
289  */
290 int ptlrpc_set_import_active(struct obd_import *imp, int active)
291 {
292         struct obd_device *obd = imp->imp_obd;
293         int rc = 0;
294
295         ENTRY;
296         LASSERT(obd);
297
298         /* When deactivating, mark import invalid, and abort in-flight
299          * requests. */
300         if (!active) {
301                 LCONSOLE_WARN("setting import %s INACTIVE by administrator "
302                               "request\n", obd2cli_tgt(imp->imp_obd));
303
304                 /* set before invalidate to avoid messages about imp_inval
305                  * set without imp_deactive in ptlrpc_import_delay_req */
306                 spin_lock(&imp->imp_lock);
307                 imp->imp_deactive = 1;
308                 spin_unlock(&imp->imp_lock);
309
310                 obd_import_event(imp->imp_obd, imp, IMP_EVENT_DEACTIVATE);
311
312                 ptlrpc_invalidate_import(imp);
313         }
314
315         /* When activating, mark import valid, and attempt recovery */
316         if (active) {
317                 CDEBUG(D_HA, "setting import %s VALID\n",
318                        obd2cli_tgt(imp->imp_obd));
319
320                 spin_lock(&imp->imp_lock);
321                 imp->imp_deactive = 0;
322                 spin_unlock(&imp->imp_lock);
323                 obd_import_event(imp->imp_obd, imp, IMP_EVENT_ACTIVATE);
324
325                 rc = ptlrpc_recover_import(imp, NULL, 0);
326         }
327
328         RETURN(rc);
329 }
330 EXPORT_SYMBOL(ptlrpc_set_import_active);
331
332 /* Attempt to reconnect an import */
333 int ptlrpc_recover_import(struct obd_import *imp, char *new_uuid, int async)
334 {
335         int rc = 0;
336         ENTRY;
337
338         spin_lock(&imp->imp_lock);
339         if (imp->imp_state == LUSTRE_IMP_NEW || imp->imp_deactive ||
340             atomic_read(&imp->imp_inval_count))
341                 rc = -EINVAL;
342         spin_unlock(&imp->imp_lock);
343         if (rc)
344                 GOTO(out, rc);
345
346         /* force import to be disconnected. */
347         ptlrpc_set_import_discon(imp, 0);
348
349         if (new_uuid) {
350                 struct obd_uuid uuid;
351
352                 /* intruct import to use new uuid */
353                 obd_str2uuid(&uuid, new_uuid);
354                 rc = import_set_conn_priority(imp, &uuid);
355                 if (rc)
356                         GOTO(out, rc);
357         }
358
359         /* Check if reconnect is already in progress */
360         spin_lock(&imp->imp_lock);
361         if (imp->imp_state != LUSTRE_IMP_DISCON) {
362                 imp->imp_force_verify = 1;
363                 rc = -EALREADY;
364         }
365         spin_unlock(&imp->imp_lock);
366         if (rc)
367                 GOTO(out, rc);
368
369         rc = ptlrpc_connect_import(imp);
370         if (rc)
371                 GOTO(out, rc);
372
373         if (!async) {
374                 struct l_wait_info lwi;
375                 int secs = cfs_time_seconds(obd_timeout);
376
377                 CDEBUG(D_HA, "%s: recovery started, waiting %u seconds\n",
378                        obd2cli_tgt(imp->imp_obd), secs);
379
380                 lwi = LWI_TIMEOUT(secs, NULL, NULL);
381                 rc = l_wait_event(imp->imp_recovery_waitq,
382                                   !ptlrpc_import_in_recovery(imp), &lwi);
383                 CDEBUG(D_HA, "%s: recovery finished\n",
384                        obd2cli_tgt(imp->imp_obd));
385         }
386         EXIT;
387
388 out:
389         return rc;
390 }
391 EXPORT_SYMBOL(ptlrpc_recover_import);
392
393 int ptlrpc_import_in_recovery(struct obd_import *imp)
394 {
395         int in_recovery = 1;
396
397         spin_lock(&imp->imp_lock);
398         if (imp->imp_state == LUSTRE_IMP_FULL ||
399             imp->imp_state == LUSTRE_IMP_CLOSED ||
400             imp->imp_state == LUSTRE_IMP_DISCON ||
401             imp->imp_obd->obd_no_recov)
402                 in_recovery = 0;
403         spin_unlock(&imp->imp_lock);
404
405         return in_recovery;
406 }