Whamcloud - gitweb
LU-8351 ptlrpc: allow blocking asts to be delayed
[fs/lustre-release.git] / lustre / ptlrpc / recover.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2015, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/ptlrpc/recover.c
33  *
34  * Author: Mike Shaver <shaver@clusterfs.com>
35  */
36
37 #define DEBUG_SUBSYSTEM S_RPC
38 #include <linux/list.h>
39 #include <libcfs/libcfs.h>
40 #include <obd_support.h>
41 #include <lustre_ha.h>
42 #include <lustre_net.h>
43 #include <lustre_import.h>
44 #include <lustre_export.h>
45 #include <obd.h>
46 #include <obd_class.h>
47
48 #include "ptlrpc_internal.h"
49
50 /**
51  * Start recovery on disconnected import.
52  * This is done by just attempting a connect
53  */
54 void ptlrpc_initiate_recovery(struct obd_import *imp)
55 {
56         ENTRY;
57
58         CDEBUG(D_HA, "%s: starting recovery\n", obd2cli_tgt(imp->imp_obd));
59         ptlrpc_connect_import(imp);
60
61         EXIT;
62 }
63
64 /**
65  * Identify what request from replay list needs to be replayed next
66  * (based on what we have already replayed) and send it to server.
67  */
68 int ptlrpc_replay_next(struct obd_import *imp, int *inflight)
69 {
70         int rc = 0;
71         struct list_head *tmp, *pos;
72         struct ptlrpc_request *req = NULL;
73         __u64 last_transno;
74         ENTRY;
75
76         *inflight = 0;
77
78         /* It might have committed some after we last spoke, so make sure we
79          * get rid of them now.
80          */
81         spin_lock(&imp->imp_lock);
82         imp->imp_last_transno_checked = 0;
83         ptlrpc_free_committed(imp);
84         last_transno = imp->imp_last_replay_transno;
85         spin_unlock(&imp->imp_lock);
86
87         CDEBUG(D_HA, "import %p from %s committed %llu last %llu\n",
88                imp, obd2cli_tgt(imp->imp_obd),
89                imp->imp_peer_committed_transno, last_transno);
90
91         /* Do I need to hold a lock across this iteration?  We shouldn't be
92          * racing with any additions to the list, because we're in recovery
93          * and are therefore not processing additional requests to add.  Calls
94          * to ptlrpc_free_committed might commit requests, but nothing "newer"
95          * than the one we're replaying (it can't be committed until it's
96          * replayed, and we're doing that here).  l_f_e_safe protects against
97          * problems with the current request being committed, in the unlikely
98          * event of that race.  So, in conclusion, I think that it's safe to
99          * perform this list-walk without the imp_lock held.
100          *
101          * But, the {mdc,osc}_replay_open callbacks both iterate
102          * request lists, and have comments saying they assume the
103          * imp_lock is being held by ptlrpc_replay, but it's not. it's
104          * just a little race...
105          */
106
107         /* Replay all the committed open requests on committed_list first */
108         if (!list_empty(&imp->imp_committed_list)) {
109                 tmp = imp->imp_committed_list.prev;
110                 req = list_entry(tmp, struct ptlrpc_request,
111                                      rq_replay_list);
112
113                 /* The last request on committed_list hasn't been replayed */
114                 if (req->rq_transno > last_transno) {
115                         /* Since the imp_committed_list is immutable before
116                          * all of it's requests being replayed, it's safe to
117                          * use a cursor to accelerate the search */
118                         if (!imp->imp_resend_replay ||
119                             imp->imp_replay_cursor == &imp->imp_committed_list)
120                                 imp->imp_replay_cursor =
121                                         imp->imp_replay_cursor->next;
122
123                         while (imp->imp_replay_cursor !=
124                                &imp->imp_committed_list) {
125                                 req = list_entry(imp->imp_replay_cursor,
126                                                      struct ptlrpc_request,
127                                                      rq_replay_list);
128                                 if (req->rq_transno > last_transno)
129                                         break;
130
131                                 req = NULL;
132                                 imp->imp_replay_cursor =
133                                         imp->imp_replay_cursor->next;
134                         }
135                 } else {
136                         /* All requests on committed_list have been replayed */
137                         imp->imp_replay_cursor = &imp->imp_committed_list;
138                         req = NULL;
139                 }
140         }
141
142         /* All the requests in committed list have been replayed, let's replay
143          * the imp_replay_list */
144         if (req == NULL) {
145                 list_for_each_safe(tmp, pos, &imp->imp_replay_list) {
146                         req = list_entry(tmp, struct ptlrpc_request,
147                                              rq_replay_list);
148
149                         if (req->rq_transno > last_transno)
150                                 break;
151                         req = NULL;
152                 }
153         }
154
155         /* If need to resend the last sent transno (because a reconnect
156          * has occurred), then stop on the matching req and send it again.
157          * If, however, the last sent transno has been committed then we
158          * continue replay from the next request. */
159         if (req != NULL && imp->imp_resend_replay)
160                 lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT);
161
162         spin_lock(&imp->imp_lock);
163         /* The resend replay request may have been removed from the
164          * unreplied list. */
165         if (req != NULL && imp->imp_resend_replay &&
166             list_empty(&req->rq_unreplied_list)) {
167                 ptlrpc_add_unreplied(req);
168                 imp->imp_known_replied_xid = ptlrpc_known_replied_xid(imp);
169         }
170
171         imp->imp_resend_replay = 0;
172         spin_unlock(&imp->imp_lock);
173
174         if (req != NULL) {
175                 /* The request should have been added back in unreplied list
176                  * by ptlrpc_prepare_replay(). */
177                 LASSERT(!list_empty(&req->rq_unreplied_list));
178
179                 rc = ptlrpc_replay_req(req);
180                 if (rc) {
181                         CERROR("recovery replay error %d for req "
182                                "%llu\n", rc, req->rq_xid);
183                         RETURN(rc);
184                 }
185                 *inflight = 1;
186         }
187         RETURN(rc);
188 }
189
190 /**
191  * Schedule resending of request on sending_list. This is done after
192  * we completed replaying of requests and locks.
193  */
194 int ptlrpc_resend(struct obd_import *imp)
195 {
196         struct ptlrpc_request *req, *next;
197
198         ENTRY;
199
200         /* As long as we're in recovery, nothing should be added to the sending
201          * list, so we don't need to hold the lock during this iteration and
202          * resend process.
203          */
204         /* Well... what if lctl recover is called twice at the same time?
205          */
206         spin_lock(&imp->imp_lock);
207         if (imp->imp_state != LUSTRE_IMP_RECOVER) {
208                 spin_unlock(&imp->imp_lock);
209                 RETURN(-1);
210         }
211
212         list_for_each_entry_safe(req, next, &imp->imp_sending_list, rq_list) {
213                 LASSERTF((long)req > PAGE_SIZE && req != LP_POISON,
214                          "req %p bad\n", req);
215                 LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req);
216
217                 /* If the request is allowed to be sent during replay and it
218                  * is not timeout yet, then it does not need to be resent. */
219                 if (!ptlrpc_no_resend(req) &&
220                     (req->rq_timedout || !req->rq_allow_replay))
221                         ptlrpc_resend_req(req);
222         }
223         spin_unlock(&imp->imp_lock);
224
225         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_OLD_EXPORT, 2);
226         RETURN(0);
227 }
228
229 /**
230  * Go through all requests in delayed list and wake their threads
231  * for resending
232  */
233 void ptlrpc_wake_delayed(struct obd_import *imp)
234 {
235         struct list_head *tmp, *pos;
236         struct ptlrpc_request *req;
237
238         spin_lock(&imp->imp_lock);
239         list_for_each_safe(tmp, pos, &imp->imp_delayed_list) {
240                 req = list_entry(tmp, struct ptlrpc_request, rq_list);
241
242                 DEBUG_REQ(D_HA, req, "waking (set %p):", req->rq_set);
243                 ptlrpc_client_wake_req(req);
244         }
245         spin_unlock(&imp->imp_lock);
246 }
247
248 void ptlrpc_request_handle_notconn(struct ptlrpc_request *failed_req)
249 {
250         struct obd_import *imp = failed_req->rq_import;
251         ENTRY;
252
253         CDEBUG(D_HA, "import %s of %s@%s abruptly disconnected: reconnecting\n",
254                imp->imp_obd->obd_name, obd2cli_tgt(imp->imp_obd),
255                imp->imp_connection->c_remote_uuid.uuid);
256
257         if (ptlrpc_set_import_discon(imp,
258                               lustre_msg_get_conn_cnt(failed_req->rq_reqmsg))) {
259                 if (!imp->imp_replayable) {
260                         CDEBUG(D_HA, "import %s@%s for %s not replayable, "
261                                "auto-deactivating\n",
262                                obd2cli_tgt(imp->imp_obd),
263                                imp->imp_connection->c_remote_uuid.uuid,
264                                imp->imp_obd->obd_name);
265                         ptlrpc_deactivate_import(imp);
266                 }
267                 /* to control recovery via lctl {disable|enable}_recovery */
268                 if (imp->imp_deactive == 0)
269                         ptlrpc_connect_import(imp);
270         }
271
272         /* Wait for recovery to complete and resend. If evicted, then
273            this request will be errored out later.*/
274         spin_lock(&failed_req->rq_lock);
275         if (!failed_req->rq_no_resend)
276                 failed_req->rq_resend = 1;
277         spin_unlock(&failed_req->rq_lock);
278
279         EXIT;
280 }
281
282 /**
283  * Administratively active/deactive a client. 
284  * This should only be called by the ioctl interface, currently
285  *  - the lctl deactivate and activate commands
286  *  - echo 0/1 >> /proc/osc/XXX/active
287  *  - client umount -f (ll_umount_begin)
288  */
289 int ptlrpc_set_import_active(struct obd_import *imp, int active)
290 {
291         struct obd_device *obd = imp->imp_obd;
292         int rc = 0;
293
294         ENTRY;
295         LASSERT(obd);
296
297         /* When deactivating, mark import invalid, and abort in-flight
298          * requests. */
299         if (!active) {
300                 LCONSOLE_WARN("setting import %s INACTIVE by administrator "
301                               "request\n", obd2cli_tgt(imp->imp_obd));
302
303                 /* set before invalidate to avoid messages about imp_inval
304                  * set without imp_deactive in ptlrpc_import_delay_req */
305                 spin_lock(&imp->imp_lock);
306                 imp->imp_deactive = 1;
307                 spin_unlock(&imp->imp_lock);
308
309                 obd_import_event(imp->imp_obd, imp, IMP_EVENT_DEACTIVATE);
310
311                 ptlrpc_invalidate_import(imp);
312         }
313
314         /* When activating, mark import valid, and attempt recovery */
315         if (active) {
316                 CDEBUG(D_HA, "setting import %s VALID\n",
317                        obd2cli_tgt(imp->imp_obd));
318
319                 spin_lock(&imp->imp_lock);
320                 imp->imp_deactive = 0;
321                 spin_unlock(&imp->imp_lock);
322                 obd_import_event(imp->imp_obd, imp, IMP_EVENT_ACTIVATE);
323
324                 rc = ptlrpc_recover_import(imp, NULL, 0);
325         }
326
327         RETURN(rc);
328 }
329 EXPORT_SYMBOL(ptlrpc_set_import_active);
330
331 /* Attempt to reconnect an import */
332 int ptlrpc_recover_import(struct obd_import *imp, char *new_uuid, int async)
333 {
334         int rc = 0;
335         ENTRY;
336
337         spin_lock(&imp->imp_lock);
338         if (imp->imp_state == LUSTRE_IMP_NEW || imp->imp_deactive ||
339             atomic_read(&imp->imp_inval_count))
340                 rc = -EINVAL;
341         spin_unlock(&imp->imp_lock);
342         if (rc)
343                 GOTO(out, rc);
344
345         /* force import to be disconnected. */
346         ptlrpc_set_import_discon(imp, 0);
347
348         if (new_uuid) {
349                 struct obd_uuid uuid;
350
351                 /* intruct import to use new uuid */
352                 obd_str2uuid(&uuid, new_uuid);
353                 rc = import_set_conn_priority(imp, &uuid);
354                 if (rc)
355                         GOTO(out, rc);
356         }
357
358         /* Check if reconnect is already in progress */
359         spin_lock(&imp->imp_lock);
360         if (imp->imp_state != LUSTRE_IMP_DISCON) {
361                 imp->imp_force_verify = 1;
362                 rc = -EALREADY;
363         }
364         spin_unlock(&imp->imp_lock);
365         if (rc)
366                 GOTO(out, rc);
367
368         rc = ptlrpc_connect_import(imp);
369         if (rc)
370                 GOTO(out, rc);
371
372         if (!async) {
373                 struct l_wait_info lwi;
374                 int secs = cfs_time_seconds(obd_timeout);
375
376                 CDEBUG(D_HA, "%s: recovery started, waiting %u seconds\n",
377                        obd2cli_tgt(imp->imp_obd), secs);
378
379                 lwi = LWI_TIMEOUT(secs, NULL, NULL);
380                 rc = l_wait_event(imp->imp_recovery_waitq,
381                                   !ptlrpc_import_in_recovery(imp), &lwi);
382                 CDEBUG(D_HA, "%s: recovery finished\n",
383                        obd2cli_tgt(imp->imp_obd));
384         }
385         EXIT;
386
387 out:
388         return rc;
389 }
390 EXPORT_SYMBOL(ptlrpc_recover_import);
391
392 int ptlrpc_import_in_recovery(struct obd_import *imp)
393 {
394         int in_recovery = 1;
395
396         spin_lock(&imp->imp_lock);
397         if (imp->imp_state == LUSTRE_IMP_FULL ||
398             imp->imp_state == LUSTRE_IMP_CLOSED ||
399             imp->imp_state == LUSTRE_IMP_DISCON ||
400             imp->imp_obd->obd_no_recov)
401                 in_recovery = 0;
402         spin_unlock(&imp->imp_lock);
403
404         return in_recovery;
405 }