*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2014, Intel Corporation.
+ * Copyright (c) 2011, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
*/
#define DEBUG_SUBSYSTEM S_RPC
+#include <linux/list.h>
#include <libcfs/libcfs.h>
#include <obd_support.h>
#include <lustre_ha.h>
#include <lustre_export.h>
#include <obd.h>
#include <obd_class.h>
-#include <libcfs/list.h>
#include "ptlrpc_internal.h"
imp->imp_last_transno_checked = 0;
ptlrpc_free_committed(imp);
last_transno = imp->imp_last_replay_transno;
- spin_unlock(&imp->imp_lock);
- CDEBUG(D_HA, "import %p from %s committed "LPU64" last "LPU64"\n",
- imp, obd2cli_tgt(imp->imp_obd),
- imp->imp_peer_committed_transno, last_transno);
-
- /* Do I need to hold a lock across this iteration? We shouldn't be
- * racing with any additions to the list, because we're in recovery
- * and are therefore not processing additional requests to add. Calls
- * to ptlrpc_free_committed might commit requests, but nothing "newer"
- * than the one we're replaying (it can't be committed until it's
- * replayed, and we're doing that here). l_f_e_safe protects against
- * problems with the current request being committed, in the unlikely
- * event of that race. So, in conclusion, I think that it's safe to
- * perform this list-walk without the imp_lock held.
- *
- * But, the {mdc,osc}_replay_open callbacks both iterate
- * request lists, and have comments saying they assume the
- * imp_lock is being held by ptlrpc_replay, but it's not. it's
- * just a little race...
- */
+ CDEBUG(D_HA, "import %p from %s committed %llu last %llu\n",
+ imp, obd2cli_tgt(imp->imp_obd),
+ imp->imp_peer_committed_transno, last_transno);
/* Replay all the committed open requests on committed_list first */
if (!list_empty(&imp->imp_committed_list)) {
/* The last request on committed_list hasn't been replayed */
if (req->rq_transno > last_transno) {
- /* Since the imp_committed_list is immutable before
- * all of it's requests being replayed, it's safe to
- * use a cursor to accelerate the search */
if (!imp->imp_resend_replay ||
imp->imp_replay_cursor == &imp->imp_committed_list)
imp->imp_replay_cursor =
break;
req = NULL;
+ LASSERT(!list_empty(imp->imp_replay_cursor));
imp->imp_replay_cursor =
imp->imp_replay_cursor->next;
}
/* If need to resend the last sent transno (because a reconnect
* has occurred), then stop on the matching req and send it again.
- * If, however, the last sent transno has been committed then we
+ * If, however, the last sent transno has been committed then we
* continue replay from the next request. */
if (req != NULL && imp->imp_resend_replay)
lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT);
- spin_lock(&imp->imp_lock);
+ /* ptlrpc_prepare_replay() may fail to add the reqeust into unreplied
+ * list if the request hasn't been added to replay list then. Another
+ * exception is that resend replay could have been removed from the
+ * unreplied list. */
+ if (req != NULL && list_empty(&req->rq_unreplied_list)) {
+ DEBUG_REQ(D_HA, req, "resend_replay: %d, last_transno: %llu\n",
+ imp->imp_resend_replay, last_transno);
+ ptlrpc_add_unreplied(req);
+ imp->imp_known_replied_xid = ptlrpc_known_replied_xid(imp);
+ }
+
imp->imp_resend_replay = 0;
spin_unlock(&imp->imp_lock);
- if (req != NULL) {
- rc = ptlrpc_replay_req(req);
- if (rc) {
- CERROR("recovery replay error %d for req "
- LPU64"\n", rc, req->rq_xid);
- RETURN(rc);
- }
- *inflight = 1;
- }
- RETURN(rc);
+ if (req != NULL) {
+ LASSERT(!list_empty(&req->rq_unreplied_list));
+
+ rc = ptlrpc_replay_req(req);
+ if (rc) {
+ CERROR("recovery replay error %d for req "
+ "%llu\n", rc, req->rq_xid);
+ RETURN(rc);
+ }
+ *inflight = 1;
+ }
+ RETURN(rc);
}
/**
}
list_for_each_entry_safe(req, next, &imp->imp_sending_list, rq_list) {
- LASSERTF((long)req > PAGE_CACHE_SIZE && req != LP_POISON,
+ LASSERTF((long)req > PAGE_SIZE && req != LP_POISON,
"req %p bad\n", req);
LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req);
}
spin_unlock(&imp->imp_lock);
+ OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_OLD_EXPORT, 2);
RETURN(0);
}
if (rc)
GOTO(out, rc);
+ OBD_RACE(OBD_FAIL_PTLRPC_CONNECT_RACE);
+
rc = ptlrpc_connect_import(imp);
if (rc)
GOTO(out, rc);
if (!async) {
struct l_wait_info lwi;
- int secs = cfs_time_seconds(obd_timeout);
+ long secs = cfs_time_seconds(obd_timeout);
- CDEBUG(D_HA, "%s: recovery started, waiting %u seconds\n",
+ CDEBUG(D_HA, "%s: recovery started, waiting %lu seconds\n",
obd2cli_tgt(imp->imp_obd), secs);
lwi = LWI_TIMEOUT(secs, NULL, NULL);
int in_recovery = 1;
spin_lock(&imp->imp_lock);
- if (imp->imp_state == LUSTRE_IMP_FULL ||
- imp->imp_state == LUSTRE_IMP_CLOSED ||
- imp->imp_state == LUSTRE_IMP_DISCON ||
+ if (imp->imp_state <= LUSTRE_IMP_DISCON ||
+ imp->imp_state >= LUSTRE_IMP_FULL ||
imp->imp_obd->obd_no_recov)
in_recovery = 0;
spin_unlock(&imp->imp_lock);