4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * lustre/ptlrpc/recover.c
33 * Author: Mike Shaver <shaver@clusterfs.com>
36 #define DEBUG_SUBSYSTEM S_RPC
37 #include <linux/list.h>
38 #include <libcfs/libcfs.h>
39 #include <obd_support.h>
40 #include <lustre_ha.h>
41 #include <lustre_net.h>
42 #include <lustre_import.h>
43 #include <lustre_export.h>
45 #include <obd_class.h>
47 #include "ptlrpc_internal.h"
50 * Identify what request from replay list needs to be replayed next
51 * (based on what we have already replayed) and send it to server.
53 int ptlrpc_replay_next(struct obd_import *imp, int *inflight)
56 struct ptlrpc_request *req = NULL;
62 /* It might have committed some after we last spoke, so make sure we
63 * get rid of them now.
65 spin_lock(&imp->imp_lock);
66 imp->imp_last_transno_checked = 0;
67 ptlrpc_free_committed(imp);
68 last_transno = imp->imp_last_replay_transno;
70 CDEBUG(D_HA, "import %p from %s committed %llu last %llu\n",
71 imp, obd2cli_tgt(imp->imp_obd),
72 imp->imp_peer_committed_transno, last_transno);
74 /* Replay all the committed open requests on committed_list first */
75 if (!list_empty(&imp->imp_committed_list)) {
76 req = list_last_entry(&imp->imp_committed_list,
77 struct ptlrpc_request, rq_replay_list);
79 /* The last request on committed_list hasn't been replayed */
80 if (req->rq_transno > last_transno) {
81 if (!imp->imp_resend_replay ||
82 imp->imp_replay_cursor == &imp->imp_committed_list)
83 imp->imp_replay_cursor =
84 imp->imp_replay_cursor->next;
86 while (imp->imp_replay_cursor !=
87 &imp->imp_committed_list) {
88 req = list_entry(imp->imp_replay_cursor,
89 struct ptlrpc_request,
91 if (req->rq_transno > last_transno)
95 LASSERT(!list_empty(imp->imp_replay_cursor));
96 imp->imp_replay_cursor =
97 imp->imp_replay_cursor->next;
100 /* All requests on committed_list have been replayed */
101 imp->imp_replay_cursor = &imp->imp_committed_list;
106 /* All the requests in committed list have been replayed, let's replay
107 * the imp_replay_list */
109 struct ptlrpc_request *tmp;
111 list_for_each_entry(tmp, &imp->imp_replay_list,
113 if (tmp->rq_transno > last_transno) {
120 /* If need to resend the last sent transno (because a reconnect
121 * has occurred), then stop on the matching req and send it again.
122 * If, however, the last sent transno has been committed then we
123 * continue replay from the next request. */
124 if (req != NULL && imp->imp_resend_replay)
125 lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT);
127 /* ptlrpc_prepare_replay() may fail to add the reqeust into unreplied
128 * list if the request hasn't been added to replay list then. Another
129 * exception is that resend replay could have been removed from the
131 if (req != NULL && list_empty(&req->rq_unreplied_list)) {
132 DEBUG_REQ(D_HA, req, "resend_replay=%d, last_transno=%llu",
133 imp->imp_resend_replay, last_transno);
134 ptlrpc_add_unreplied(req);
135 imp->imp_known_replied_xid = ptlrpc_known_replied_xid(imp);
138 imp->imp_resend_replay = 0;
139 spin_unlock(&imp->imp_lock);
142 LASSERT(!list_empty(&req->rq_unreplied_list));
144 rc = ptlrpc_replay_req(req);
146 CERROR("recovery replay error %d for req %llu\n",
156 * Schedule resending of request on sending_list. This is done after
157 * we completed replaying of requests and locks.
159 int ptlrpc_resend(struct obd_import *imp)
161 struct ptlrpc_request *req;
165 /* As long as we're in recovery, nothing should be added to the sending
166 * list, so we don't need to hold the lock during this iteration and
169 /* Well... what if lctl recover is called twice at the same time?
171 spin_lock(&imp->imp_lock);
172 if (imp->imp_state != LUSTRE_IMP_RECOVER) {
173 spin_unlock(&imp->imp_lock);
177 list_for_each_entry(req, &imp->imp_sending_list, rq_list) {
178 LASSERTF((long)req > PAGE_SIZE && req != LP_POISON,
179 "req %px bad\n", req);
180 LASSERTF(req->rq_type != LI_POISON, "req %px freed\n", req);
182 /* If the request is allowed to be sent during replay and it
183 * is not timeout yet, then it does not need to be resent. */
184 if (!ptlrpc_no_resend(req) &&
185 (req->rq_timedout || !req->rq_allow_replay))
186 ptlrpc_resend_req(req);
188 spin_unlock(&imp->imp_lock);
190 CFS_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_OLD_EXPORT, 2);
195 * Go through all requests in delayed list and wake their threads
198 void ptlrpc_wake_delayed(struct obd_import *imp)
200 struct ptlrpc_request *req;
202 spin_lock(&imp->imp_lock);
203 list_for_each_entry(req, &imp->imp_delayed_list, rq_list) {
204 DEBUG_REQ(D_HA, req, "waking (set %p):", req->rq_set);
205 ptlrpc_client_wake_req(req);
207 spin_unlock(&imp->imp_lock);
210 void ptlrpc_request_handle_notconn(struct ptlrpc_request *failed_req)
212 struct obd_import *imp = failed_req->rq_import;
213 int conn = lustre_msg_get_conn_cnt(failed_req->rq_reqmsg);
216 CDEBUG(D_HA, "import %s of %s@%s abruptly disconnected: reconnecting\n",
217 imp->imp_obd->obd_name, obd2cli_tgt(imp->imp_obd),
218 imp->imp_connection->c_remote_uuid.uuid);
220 if (ptlrpc_set_import_discon(imp, conn, true)) {
221 /* to control recovery via lctl {disable|enable}_recovery */
222 if (imp->imp_deactive == 0)
223 ptlrpc_connect_import(imp);
226 /* Wait for recovery to complete and resend. If evicted, then
227 this request will be errored out later.*/
228 spin_lock(&failed_req->rq_lock);
229 if (!failed_req->rq_no_resend)
230 failed_req->rq_resend = 1;
231 spin_unlock(&failed_req->rq_lock);
237 * Administratively active/deactive a client.
238 * This should only be called by the ioctl interface, currently
239 * - the lctl deactivate and activate commands
240 * - echo 0/1 >> /proc/osc/XXX/active
241 * - client umount -f (ll_umount_begin)
243 int ptlrpc_set_import_active(struct obd_import *imp, int active)
245 struct obd_device *obd = imp->imp_obd;
251 /* When deactivating, mark import invalid, and abort in-flight
254 LCONSOLE_WARN("setting import %s INACTIVE by administrator "
255 "request\n", obd2cli_tgt(imp->imp_obd));
257 /* set before invalidate to avoid messages about imp_inval
258 * set without imp_deactive in ptlrpc_import_delay_req */
259 spin_lock(&imp->imp_lock);
260 imp->imp_deactive = 1;
261 spin_unlock(&imp->imp_lock);
263 obd_import_event(imp->imp_obd, imp, IMP_EVENT_DEACTIVATE);
265 ptlrpc_invalidate_import(imp);
268 /* When activating, mark import valid, and attempt recovery */
270 CDEBUG(D_HA, "setting import %s VALID\n",
271 obd2cli_tgt(imp->imp_obd));
273 spin_lock(&imp->imp_lock);
274 imp->imp_deactive = 0;
275 spin_unlock(&imp->imp_lock);
276 obd_import_event(imp->imp_obd, imp, IMP_EVENT_ACTIVATE);
278 rc = ptlrpc_recover_import(imp, NULL, 0);
283 EXPORT_SYMBOL(ptlrpc_set_import_active);
285 bool ptlrpc_import_in_recovery_disconnect(struct obd_import *imp,
286 bool disconnect_is_recovery)
288 bool in_recovery = true;
290 spin_lock(&imp->imp_lock);
291 if (imp->imp_state < LUSTRE_IMP_DISCON ||
292 (!disconnect_is_recovery && imp->imp_state == LUSTRE_IMP_DISCON) ||
293 imp->imp_state >= LUSTRE_IMP_FULL ||
294 imp->imp_obd->obd_no_recov)
296 spin_unlock(&imp->imp_lock);
302 /* Attempt to reconnect an import */
303 int ptlrpc_recover_import(struct obd_import *imp, char *new_uuid, int async)
308 spin_lock(&imp->imp_lock);
309 if (imp->imp_state == LUSTRE_IMP_NEW || imp->imp_deactive ||
310 atomic_read(&imp->imp_inval_count))
312 spin_unlock(&imp->imp_lock);
316 /* force import to be disconnected. */
317 ptlrpc_set_import_discon(imp, 0, false);
320 struct obd_uuid uuid;
322 /* intruct import to use new uuid */
323 obd_str2uuid(&uuid, new_uuid);
324 rc = import_set_conn_priority(imp, &uuid);
329 /* Check if reconnect is already in progress */
330 spin_lock(&imp->imp_lock);
331 if (imp->imp_state != LUSTRE_IMP_DISCON) {
332 imp->imp_force_verify = 1;
335 spin_unlock(&imp->imp_lock);
338 CFS_RACE(OBD_FAIL_PTLRPC_CONNECT_RACE);
339 rc = ptlrpc_connect_import(imp);
342 if (!async && (rc == -EALREADY || rc == 0)) {
343 long timeout = cfs_time_seconds(obd_timeout);
345 CDEBUG(D_HA, "%s: recovery started, waiting %u seconds\n",
346 obd2cli_tgt(imp->imp_obd), obd_timeout);
348 rc = wait_event_idle_timeout(imp->imp_recovery_waitq,
349 !ptlrpc_import_in_recovery_disconnect(imp, true),
355 CDEBUG(D_HA, "%s: recovery finished %s, rc = %d\n",
356 obd2cli_tgt(imp->imp_obd),
357 ptlrpc_import_state_name(imp->imp_state), rc);
364 EXPORT_SYMBOL(ptlrpc_recover_import);
366 bool ptlrpc_import_in_recovery(struct obd_import *imp)
368 return ptlrpc_import_in_recovery_disconnect(imp, false);