Whamcloud - gitweb
LU-2613 recovery: free open/close request promptly
[fs/lustre-release.git] / lustre / ptlrpc / recover.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ptlrpc/recover.c
37  *
38  * Author: Mike Shaver <shaver@clusterfs.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_RPC
42 #ifdef __KERNEL__
43 # include <libcfs/libcfs.h>
44 #else
45 # include <liblustre.h>
46 #endif
47
48 #include <obd_support.h>
49 #include <lustre_ha.h>
50 #include <lustre_net.h>
51 #include <lustre_import.h>
52 #include <lustre_export.h>
53 #include <obd.h>
54 #include <obd_ost.h>
55 #include <obd_class.h>
56 #include <obd_lov.h> /* for IOC_LOV_SET_OSC_ACTIVE */
57 #include <libcfs/list.h>
58
59 #include "ptlrpc_internal.h"
60
61 /**
62  * Start recovery on disconnected import.
63  * This is done by just attempting a connect
64  */
65 void ptlrpc_initiate_recovery(struct obd_import *imp)
66 {
67         ENTRY;
68
69         CDEBUG(D_HA, "%s: starting recovery\n", obd2cli_tgt(imp->imp_obd));
70         ptlrpc_connect_import(imp);
71
72         EXIT;
73 }
74
75 /**
76  * Identify what request from replay list needs to be replayed next
77  * (based on what we have already replayed) and send it to server.
78  */
79 int ptlrpc_replay_next(struct obd_import *imp, int *inflight)
80 {
81         int rc = 0;
82         cfs_list_t *tmp, *pos;
83         struct ptlrpc_request *req = NULL;
84         __u64 last_transno;
85         ENTRY;
86
87         *inflight = 0;
88
89         /* It might have committed some after we last spoke, so make sure we
90          * get rid of them now.
91          */
92         spin_lock(&imp->imp_lock);
93         imp->imp_last_transno_checked = 0;
94         ptlrpc_free_committed(imp);
95         last_transno = imp->imp_last_replay_transno;
96         spin_unlock(&imp->imp_lock);
97
98         CDEBUG(D_HA, "import %p from %s committed "LPU64" last "LPU64"\n",
99                imp, obd2cli_tgt(imp->imp_obd),
100                imp->imp_peer_committed_transno, last_transno);
101
102         /* Do I need to hold a lock across this iteration?  We shouldn't be
103          * racing with any additions to the list, because we're in recovery
104          * and are therefore not processing additional requests to add.  Calls
105          * to ptlrpc_free_committed might commit requests, but nothing "newer"
106          * than the one we're replaying (it can't be committed until it's
107          * replayed, and we're doing that here).  l_f_e_safe protects against
108          * problems with the current request being committed, in the unlikely
109          * event of that race.  So, in conclusion, I think that it's safe to
110          * perform this list-walk without the imp_lock held.
111          *
112          * But, the {mdc,osc}_replay_open callbacks both iterate
113          * request lists, and have comments saying they assume the
114          * imp_lock is being held by ptlrpc_replay, but it's not. it's
115          * just a little race...
116          */
117
118         /* Replay all the committed open requests on committed_list first */
119         if (!cfs_list_empty(&imp->imp_committed_list)) {
120                 tmp = imp->imp_committed_list.prev;
121                 req = cfs_list_entry(tmp, struct ptlrpc_request,
122                                      rq_replay_list);
123
124                 /* The last request on committed_list hasn't been replayed */
125                 if (req->rq_transno > last_transno) {
126                         /* Since the imp_committed_list is immutable before
127                          * all of it's requests being replayed, it's safe to
128                          * use a cursor to accelerate the search */
129                         imp->imp_replay_cursor = imp->imp_replay_cursor->next;
130
131                         while (imp->imp_replay_cursor !=
132                                &imp->imp_committed_list) {
133                                 req = cfs_list_entry(imp->imp_replay_cursor,
134                                                      struct ptlrpc_request,
135                                                      rq_replay_list);
136                                 if (req->rq_transno > last_transno)
137                                         break;
138
139                                 req = NULL;
140                                 imp->imp_replay_cursor =
141                                         imp->imp_replay_cursor->next;
142                         }
143                 } else {
144                         /* All requests on committed_list have been replayed */
145                         imp->imp_replay_cursor = &imp->imp_committed_list;
146                         req = NULL;
147                 }
148         }
149
150         /* All the requests in committed list have been replayed, let's replay
151          * the imp_replay_list */
152         if (req == NULL) {
153                 cfs_list_for_each_safe(tmp, pos, &imp->imp_replay_list) {
154                         req = cfs_list_entry(tmp, struct ptlrpc_request,
155                                              rq_replay_list);
156
157                         if (req->rq_transno > last_transno)
158                                 break;
159                         req = NULL;
160                 }
161         }
162
163         /* If need to resend the last sent transno (because a reconnect
164          * has occurred), then stop on the matching req and send it again.
165          * If, however, the last sent transno has been committed then we 
166          * continue replay from the next request. */
167         if (req != NULL && imp->imp_resend_replay)
168                 lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT);
169
170         spin_lock(&imp->imp_lock);
171         imp->imp_resend_replay = 0;
172         spin_unlock(&imp->imp_lock);
173
174         if (req != NULL) {
175                 rc = ptlrpc_replay_req(req);
176                 if (rc) {
177                         CERROR("recovery replay error %d for req "
178                                LPU64"\n", rc, req->rq_xid);
179                         RETURN(rc);
180                 }
181                 *inflight = 1;
182         }
183         RETURN(rc);
184 }
185
186 /**
187  * Schedule resending of request on sending_list. This is done after
188  * we completed replaying of requests and locks.
189  */
190 int ptlrpc_resend(struct obd_import *imp)
191 {
192         struct ptlrpc_request *req, *next;
193
194         ENTRY;
195
196         /* As long as we're in recovery, nothing should be added to the sending
197          * list, so we don't need to hold the lock during this iteration and
198          * resend process.
199          */
200         /* Well... what if lctl recover is called twice at the same time?
201          */
202         spin_lock(&imp->imp_lock);
203         if (imp->imp_state != LUSTRE_IMP_RECOVER) {
204                 spin_unlock(&imp->imp_lock);
205                 RETURN(-1);
206         }
207
208         cfs_list_for_each_entry_safe(req, next, &imp->imp_sending_list,
209                                      rq_list) {
210                 LASSERTF((long)req > PAGE_CACHE_SIZE && req != LP_POISON,
211                          "req %p bad\n", req);
212                 LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req);
213                 if (!ptlrpc_no_resend(req))
214                         ptlrpc_resend_req(req);
215         }
216         spin_unlock(&imp->imp_lock);
217
218         RETURN(0);
219 }
220 EXPORT_SYMBOL(ptlrpc_resend);
221
222 /**
223  * Go through all requests in delayed list and wake their threads
224  * for resending
225  */
226 void ptlrpc_wake_delayed(struct obd_import *imp)
227 {
228         cfs_list_t *tmp, *pos;
229         struct ptlrpc_request *req;
230
231         spin_lock(&imp->imp_lock);
232         cfs_list_for_each_safe(tmp, pos, &imp->imp_delayed_list) {
233                 req = cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
234
235                 DEBUG_REQ(D_HA, req, "waking (set %p):", req->rq_set);
236                 ptlrpc_client_wake_req(req);
237         }
238         spin_unlock(&imp->imp_lock);
239 }
240 EXPORT_SYMBOL(ptlrpc_wake_delayed);
241
242 void ptlrpc_request_handle_notconn(struct ptlrpc_request *failed_req)
243 {
244         struct obd_import *imp = failed_req->rq_import;
245         ENTRY;
246
247         CDEBUG(D_HA, "import %s of %s@%s abruptly disconnected: reconnecting\n",
248                imp->imp_obd->obd_name, obd2cli_tgt(imp->imp_obd),
249                imp->imp_connection->c_remote_uuid.uuid);
250
251         if (ptlrpc_set_import_discon(imp,
252                               lustre_msg_get_conn_cnt(failed_req->rq_reqmsg))) {
253                 if (!imp->imp_replayable) {
254                         CDEBUG(D_HA, "import %s@%s for %s not replayable, "
255                                "auto-deactivating\n",
256                                obd2cli_tgt(imp->imp_obd),
257                                imp->imp_connection->c_remote_uuid.uuid,
258                                imp->imp_obd->obd_name);
259                         ptlrpc_deactivate_import(imp);
260                 }
261                 /* to control recovery via lctl {disable|enable}_recovery */
262                 if (imp->imp_deactive == 0)
263                         ptlrpc_connect_import(imp);
264         }
265
266         /* Wait for recovery to complete and resend. If evicted, then
267            this request will be errored out later.*/
268         spin_lock(&failed_req->rq_lock);
269         if (!failed_req->rq_no_resend)
270                 failed_req->rq_resend = 1;
271         spin_unlock(&failed_req->rq_lock);
272
273         EXIT;
274 }
275
276 /**
277  * Administratively active/deactive a client. 
278  * This should only be called by the ioctl interface, currently
279  *  - the lctl deactivate and activate commands
280  *  - echo 0/1 >> /proc/osc/XXX/active
281  *  - client umount -f (ll_umount_begin)
282  */
283 int ptlrpc_set_import_active(struct obd_import *imp, int active)
284 {
285         struct obd_device *obd = imp->imp_obd;
286         int rc = 0;
287
288         ENTRY;
289         LASSERT(obd);
290
291         /* When deactivating, mark import invalid, and abort in-flight
292          * requests. */
293         if (!active) {
294                 LCONSOLE_WARN("setting import %s INACTIVE by administrator "
295                               "request\n", obd2cli_tgt(imp->imp_obd));
296
297                 /* set before invalidate to avoid messages about imp_inval
298                  * set without imp_deactive in ptlrpc_import_delay_req */
299                 spin_lock(&imp->imp_lock);
300                 imp->imp_deactive = 1;
301                 spin_unlock(&imp->imp_lock);
302
303                 obd_import_event(imp->imp_obd, imp, IMP_EVENT_DEACTIVATE);
304
305                 ptlrpc_invalidate_import(imp);
306         }
307
308         /* When activating, mark import valid, and attempt recovery */
309         if (active) {
310                 CDEBUG(D_HA, "setting import %s VALID\n",
311                        obd2cli_tgt(imp->imp_obd));
312
313                 spin_lock(&imp->imp_lock);
314                 imp->imp_deactive = 0;
315                 spin_unlock(&imp->imp_lock);
316                 obd_import_event(imp->imp_obd, imp, IMP_EVENT_ACTIVATE);
317
318                 rc = ptlrpc_recover_import(imp, NULL, 0);
319         }
320
321         RETURN(rc);
322 }
323 EXPORT_SYMBOL(ptlrpc_set_import_active);
324
325 /* Attempt to reconnect an import */
326 int ptlrpc_recover_import(struct obd_import *imp, char *new_uuid, int async)
327 {
328         int rc = 0;
329         ENTRY;
330
331         spin_lock(&imp->imp_lock);
332         if (imp->imp_state == LUSTRE_IMP_NEW || imp->imp_deactive ||
333             cfs_atomic_read(&imp->imp_inval_count))
334                 rc = -EINVAL;
335         spin_unlock(&imp->imp_lock);
336         if (rc)
337                 GOTO(out, rc);
338
339         /* force import to be disconnected. */
340         ptlrpc_set_import_discon(imp, 0);
341
342         if (new_uuid) {
343                 struct obd_uuid uuid;
344
345                 /* intruct import to use new uuid */
346                 obd_str2uuid(&uuid, new_uuid);
347                 rc = import_set_conn_priority(imp, &uuid);
348                 if (rc)
349                         GOTO(out, rc);
350         }
351
352         /* Check if reconnect is already in progress */
353         spin_lock(&imp->imp_lock);
354         if (imp->imp_state != LUSTRE_IMP_DISCON) {
355                 imp->imp_force_verify = 1;
356                 rc = -EALREADY;
357         }
358         spin_unlock(&imp->imp_lock);
359         if (rc)
360                 GOTO(out, rc);
361
362         rc = ptlrpc_connect_import(imp);
363         if (rc)
364                 GOTO(out, rc);
365
366         if (!async) {
367                 struct l_wait_info lwi;
368                 int secs = cfs_time_seconds(obd_timeout);
369
370                 CDEBUG(D_HA, "%s: recovery started, waiting %u seconds\n",
371                        obd2cli_tgt(imp->imp_obd), secs);
372
373                 lwi = LWI_TIMEOUT(secs, NULL, NULL);
374                 rc = l_wait_event(imp->imp_recovery_waitq,
375                                   !ptlrpc_import_in_recovery(imp), &lwi);
376                 CDEBUG(D_HA, "%s: recovery finished\n",
377                        obd2cli_tgt(imp->imp_obd));
378         }
379         EXIT;
380
381 out:
382         return rc;
383 }
384 EXPORT_SYMBOL(ptlrpc_recover_import);
385
386 int ptlrpc_import_in_recovery(struct obd_import *imp)
387 {
388         int in_recovery = 1;
389         spin_lock(&imp->imp_lock);
390         if (imp->imp_state == LUSTRE_IMP_FULL ||
391             imp->imp_state == LUSTRE_IMP_CLOSED ||
392             imp->imp_state == LUSTRE_IMP_DISCON)
393                 in_recovery = 0;
394         spin_unlock(&imp->imp_lock);
395         return in_recovery;
396 }