4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/ptlrpc/recover.c
38 * Author: Mike Shaver <shaver@clusterfs.com>
41 #define DEBUG_SUBSYSTEM S_RPC
43 # include <libcfs/libcfs.h>
45 # include <liblustre.h>
48 #include <obd_support.h>
49 #include <lustre_ha.h>
50 #include <lustre_net.h>
51 #include <lustre_import.h>
52 #include <lustre_export.h>
55 #include <obd_class.h>
56 #include <obd_lov.h> /* for IOC_LOV_SET_OSC_ACTIVE */
57 #include <libcfs/list.h>
59 #include "ptlrpc_internal.h"
62 * Start recovery on disconnected import.
63 * This is done by just attempting a connect
65 void ptlrpc_initiate_recovery(struct obd_import *imp)
69 CDEBUG(D_HA, "%s: starting recovery\n", obd2cli_tgt(imp->imp_obd));
70 ptlrpc_connect_import(imp);
76 * Identify what request from replay list needs to be replayed next
77 * (based on what we have already replayed) and send it to server.
79 int ptlrpc_replay_next(struct obd_import *imp, int *inflight)
82 cfs_list_t *tmp, *pos;
83 struct ptlrpc_request *req = NULL;
89 /* It might have committed some after we last spoke, so make sure we
90 * get rid of them now.
92 spin_lock(&imp->imp_lock);
93 imp->imp_last_transno_checked = 0;
94 ptlrpc_free_committed(imp);
95 last_transno = imp->imp_last_replay_transno;
96 spin_unlock(&imp->imp_lock);
98 CDEBUG(D_HA, "import %p from %s committed "LPU64" last "LPU64"\n",
99 imp, obd2cli_tgt(imp->imp_obd),
100 imp->imp_peer_committed_transno, last_transno);
102 /* Do I need to hold a lock across this iteration? We shouldn't be
103 * racing with any additions to the list, because we're in recovery
104 * and are therefore not processing additional requests to add. Calls
105 * to ptlrpc_free_committed might commit requests, but nothing "newer"
106 * than the one we're replaying (it can't be committed until it's
107 * replayed, and we're doing that here). l_f_e_safe protects against
108 * problems with the current request being committed, in the unlikely
109 * event of that race. So, in conclusion, I think that it's safe to
110 * perform this list-walk without the imp_lock held.
112 * But, the {mdc,osc}_replay_open callbacks both iterate
113 * request lists, and have comments saying they assume the
114 * imp_lock is being held by ptlrpc_replay, but it's not. it's
115 * just a little race...
118 /* Replay all the committed open requests on committed_list first */
119 if (!cfs_list_empty(&imp->imp_committed_list)) {
120 tmp = imp->imp_committed_list.prev;
121 req = cfs_list_entry(tmp, struct ptlrpc_request,
124 /* The last request on committed_list hasn't been replayed */
125 if (req->rq_transno > last_transno) {
126 /* Since the imp_committed_list is immutable before
127 * all of it's requests being replayed, it's safe to
128 * use a cursor to accelerate the search */
129 imp->imp_replay_cursor = imp->imp_replay_cursor->next;
131 while (imp->imp_replay_cursor !=
132 &imp->imp_committed_list) {
133 req = cfs_list_entry(imp->imp_replay_cursor,
134 struct ptlrpc_request,
136 if (req->rq_transno > last_transno)
140 imp->imp_replay_cursor =
141 imp->imp_replay_cursor->next;
144 /* All requests on committed_list have been replayed */
145 imp->imp_replay_cursor = &imp->imp_committed_list;
150 /* All the requests in committed list have been replayed, let's replay
151 * the imp_replay_list */
153 cfs_list_for_each_safe(tmp, pos, &imp->imp_replay_list) {
154 req = cfs_list_entry(tmp, struct ptlrpc_request,
157 if (req->rq_transno > last_transno)
163 /* If need to resend the last sent transno (because a reconnect
164 * has occurred), then stop on the matching req and send it again.
165 * If, however, the last sent transno has been committed then we
166 * continue replay from the next request. */
167 if (req != NULL && imp->imp_resend_replay)
168 lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT);
170 spin_lock(&imp->imp_lock);
171 imp->imp_resend_replay = 0;
172 spin_unlock(&imp->imp_lock);
175 rc = ptlrpc_replay_req(req);
177 CERROR("recovery replay error %d for req "
178 LPU64"\n", rc, req->rq_xid);
187 * Schedule resending of request on sending_list. This is done after
188 * we completed replaying of requests and locks.
190 int ptlrpc_resend(struct obd_import *imp)
192 struct ptlrpc_request *req, *next;
196 /* As long as we're in recovery, nothing should be added to the sending
197 * list, so we don't need to hold the lock during this iteration and
200 /* Well... what if lctl recover is called twice at the same time?
202 spin_lock(&imp->imp_lock);
203 if (imp->imp_state != LUSTRE_IMP_RECOVER) {
204 spin_unlock(&imp->imp_lock);
208 cfs_list_for_each_entry_safe(req, next, &imp->imp_sending_list,
210 LASSERTF((long)req > PAGE_CACHE_SIZE && req != LP_POISON,
211 "req %p bad\n", req);
212 LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req);
213 if (!ptlrpc_no_resend(req))
214 ptlrpc_resend_req(req);
216 spin_unlock(&imp->imp_lock);
220 EXPORT_SYMBOL(ptlrpc_resend);
223 * Go through all requests in delayed list and wake their threads
226 void ptlrpc_wake_delayed(struct obd_import *imp)
228 cfs_list_t *tmp, *pos;
229 struct ptlrpc_request *req;
231 spin_lock(&imp->imp_lock);
232 cfs_list_for_each_safe(tmp, pos, &imp->imp_delayed_list) {
233 req = cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
235 DEBUG_REQ(D_HA, req, "waking (set %p):", req->rq_set);
236 ptlrpc_client_wake_req(req);
238 spin_unlock(&imp->imp_lock);
240 EXPORT_SYMBOL(ptlrpc_wake_delayed);
242 void ptlrpc_request_handle_notconn(struct ptlrpc_request *failed_req)
244 struct obd_import *imp = failed_req->rq_import;
247 CDEBUG(D_HA, "import %s of %s@%s abruptly disconnected: reconnecting\n",
248 imp->imp_obd->obd_name, obd2cli_tgt(imp->imp_obd),
249 imp->imp_connection->c_remote_uuid.uuid);
251 if (ptlrpc_set_import_discon(imp,
252 lustre_msg_get_conn_cnt(failed_req->rq_reqmsg))) {
253 if (!imp->imp_replayable) {
254 CDEBUG(D_HA, "import %s@%s for %s not replayable, "
255 "auto-deactivating\n",
256 obd2cli_tgt(imp->imp_obd),
257 imp->imp_connection->c_remote_uuid.uuid,
258 imp->imp_obd->obd_name);
259 ptlrpc_deactivate_import(imp);
261 /* to control recovery via lctl {disable|enable}_recovery */
262 if (imp->imp_deactive == 0)
263 ptlrpc_connect_import(imp);
266 /* Wait for recovery to complete and resend. If evicted, then
267 this request will be errored out later.*/
268 spin_lock(&failed_req->rq_lock);
269 if (!failed_req->rq_no_resend)
270 failed_req->rq_resend = 1;
271 spin_unlock(&failed_req->rq_lock);
277 * Administratively active/deactive a client.
278 * This should only be called by the ioctl interface, currently
279 * - the lctl deactivate and activate commands
280 * - echo 0/1 >> /proc/osc/XXX/active
281 * - client umount -f (ll_umount_begin)
283 int ptlrpc_set_import_active(struct obd_import *imp, int active)
285 struct obd_device *obd = imp->imp_obd;
291 /* When deactivating, mark import invalid, and abort in-flight
294 LCONSOLE_WARN("setting import %s INACTIVE by administrator "
295 "request\n", obd2cli_tgt(imp->imp_obd));
297 /* set before invalidate to avoid messages about imp_inval
298 * set without imp_deactive in ptlrpc_import_delay_req */
299 spin_lock(&imp->imp_lock);
300 imp->imp_deactive = 1;
301 spin_unlock(&imp->imp_lock);
303 obd_import_event(imp->imp_obd, imp, IMP_EVENT_DEACTIVATE);
305 ptlrpc_invalidate_import(imp);
308 /* When activating, mark import valid, and attempt recovery */
310 CDEBUG(D_HA, "setting import %s VALID\n",
311 obd2cli_tgt(imp->imp_obd));
313 spin_lock(&imp->imp_lock);
314 imp->imp_deactive = 0;
315 spin_unlock(&imp->imp_lock);
316 obd_import_event(imp->imp_obd, imp, IMP_EVENT_ACTIVATE);
318 rc = ptlrpc_recover_import(imp, NULL, 0);
323 EXPORT_SYMBOL(ptlrpc_set_import_active);
325 /* Attempt to reconnect an import */
326 int ptlrpc_recover_import(struct obd_import *imp, char *new_uuid, int async)
331 spin_lock(&imp->imp_lock);
332 if (imp->imp_state == LUSTRE_IMP_NEW || imp->imp_deactive ||
333 cfs_atomic_read(&imp->imp_inval_count))
335 spin_unlock(&imp->imp_lock);
339 /* force import to be disconnected. */
340 ptlrpc_set_import_discon(imp, 0);
343 struct obd_uuid uuid;
345 /* intruct import to use new uuid */
346 obd_str2uuid(&uuid, new_uuid);
347 rc = import_set_conn_priority(imp, &uuid);
352 /* Check if reconnect is already in progress */
353 spin_lock(&imp->imp_lock);
354 if (imp->imp_state != LUSTRE_IMP_DISCON) {
355 imp->imp_force_verify = 1;
358 spin_unlock(&imp->imp_lock);
362 rc = ptlrpc_connect_import(imp);
367 struct l_wait_info lwi;
368 int secs = cfs_time_seconds(obd_timeout);
370 CDEBUG(D_HA, "%s: recovery started, waiting %u seconds\n",
371 obd2cli_tgt(imp->imp_obd), secs);
373 lwi = LWI_TIMEOUT(secs, NULL, NULL);
374 rc = l_wait_event(imp->imp_recovery_waitq,
375 !ptlrpc_import_in_recovery(imp), &lwi);
376 CDEBUG(D_HA, "%s: recovery finished\n",
377 obd2cli_tgt(imp->imp_obd));
384 EXPORT_SYMBOL(ptlrpc_recover_import);
386 int ptlrpc_import_in_recovery(struct obd_import *imp)
389 spin_lock(&imp->imp_lock);
390 if (imp->imp_state == LUSTRE_IMP_FULL ||
391 imp->imp_state == LUSTRE_IMP_CLOSED ||
392 imp->imp_state == LUSTRE_IMP_DISCON)
394 spin_unlock(&imp->imp_lock);