*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Intel Corporation.
+ * Copyright (c) 2011, 2016, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
*/
#define DEBUG_SUBSYSTEM S_RPC
-#ifdef __KERNEL__
-# include <libcfs/libcfs.h>
-#else
-# include <liblustre.h>
-#endif
-
+#include <linux/list.h>
+#include <libcfs/libcfs.h>
#include <obd_support.h>
#include <lustre_ha.h>
#include <lustre_net.h>
#include <lustre_import.h>
#include <lustre_export.h>
#include <obd.h>
-#include <obd_ost.h>
#include <obd_class.h>
-#include <obd_lov.h> /* for IOC_LOV_SET_OSC_ACTIVE */
-#include <libcfs/list.h>
#include "ptlrpc_internal.h"
int ptlrpc_replay_next(struct obd_import *imp, int *inflight)
{
int rc = 0;
- cfs_list_t *tmp, *pos;
+ struct list_head *tmp, *pos;
struct ptlrpc_request *req = NULL;
__u64 last_transno;
ENTRY;
imp->imp_last_transno_checked = 0;
ptlrpc_free_committed(imp);
last_transno = imp->imp_last_replay_transno;
- spin_unlock(&imp->imp_lock);
- CDEBUG(D_HA, "import %p from %s committed "LPU64" last "LPU64"\n",
- imp, obd2cli_tgt(imp->imp_obd),
- imp->imp_peer_committed_transno, last_transno);
-
- /* Do I need to hold a lock across this iteration? We shouldn't be
- * racing with any additions to the list, because we're in recovery
- * and are therefore not processing additional requests to add. Calls
- * to ptlrpc_free_committed might commit requests, but nothing "newer"
- * than the one we're replaying (it can't be committed until it's
- * replayed, and we're doing that here). l_f_e_safe protects against
- * problems with the current request being committed, in the unlikely
- * event of that race. So, in conclusion, I think that it's safe to
- * perform this list-walk without the imp_lock held.
- *
- * But, the {mdc,osc}_replay_open callbacks both iterate
- * request lists, and have comments saying they assume the
- * imp_lock is being held by ptlrpc_replay, but it's not. it's
- * just a little race...
- */
+ CDEBUG(D_HA, "import %p from %s committed %llu last %llu\n",
+ imp, obd2cli_tgt(imp->imp_obd),
+ imp->imp_peer_committed_transno, last_transno);
/* Replay all the committed open requests on committed_list first */
- if (!cfs_list_empty(&imp->imp_committed_list)) {
+ if (!list_empty(&imp->imp_committed_list)) {
tmp = imp->imp_committed_list.prev;
- req = cfs_list_entry(tmp, struct ptlrpc_request,
+ req = list_entry(tmp, struct ptlrpc_request,
rq_replay_list);
/* The last request on committed_list hasn't been replayed */
if (req->rq_transno > last_transno) {
- /* Since the imp_committed_list is immutable before
- * all of it's requests being replayed, it's safe to
- * use a cursor to accelerate the search */
- imp->imp_replay_cursor = imp->imp_replay_cursor->next;
+ if (!imp->imp_resend_replay ||
+ imp->imp_replay_cursor == &imp->imp_committed_list)
+ imp->imp_replay_cursor =
+ imp->imp_replay_cursor->next;
while (imp->imp_replay_cursor !=
&imp->imp_committed_list) {
- req = cfs_list_entry(imp->imp_replay_cursor,
+ req = list_entry(imp->imp_replay_cursor,
struct ptlrpc_request,
rq_replay_list);
if (req->rq_transno > last_transno)
break;
req = NULL;
+ LASSERT(!list_empty(imp->imp_replay_cursor));
imp->imp_replay_cursor =
imp->imp_replay_cursor->next;
}
/* All the requests in committed list have been replayed, let's replay
* the imp_replay_list */
if (req == NULL) {
- cfs_list_for_each_safe(tmp, pos, &imp->imp_replay_list) {
- req = cfs_list_entry(tmp, struct ptlrpc_request,
+ list_for_each_safe(tmp, pos, &imp->imp_replay_list) {
+ req = list_entry(tmp, struct ptlrpc_request,
rq_replay_list);
if (req->rq_transno > last_transno)
/* If need to resend the last sent transno (because a reconnect
* has occurred), then stop on the matching req and send it again.
- * If, however, the last sent transno has been committed then we
+ * If, however, the last sent transno has been committed then we
* continue replay from the next request. */
if (req != NULL && imp->imp_resend_replay)
lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT);
- spin_lock(&imp->imp_lock);
+ /* ptlrpc_prepare_replay() may fail to add the reqeust into unreplied
+ * list if the request hasn't been added to replay list then. Another
+ * exception is that resend replay could have been removed from the
+ * unreplied list. */
+ if (req != NULL && list_empty(&req->rq_unreplied_list)) {
+ DEBUG_REQ(D_HA, req, "resend_replay: %d, last_transno: %llu\n",
+ imp->imp_resend_replay, last_transno);
+ ptlrpc_add_unreplied(req);
+ imp->imp_known_replied_xid = ptlrpc_known_replied_xid(imp);
+ }
+
imp->imp_resend_replay = 0;
spin_unlock(&imp->imp_lock);
- if (req != NULL) {
- rc = ptlrpc_replay_req(req);
- if (rc) {
- CERROR("recovery replay error %d for req "
- LPU64"\n", rc, req->rq_xid);
- RETURN(rc);
- }
- *inflight = 1;
- }
- RETURN(rc);
+ if (req != NULL) {
+ LASSERT(!list_empty(&req->rq_unreplied_list));
+
+ rc = ptlrpc_replay_req(req);
+ if (rc) {
+ CERROR("recovery replay error %d for req "
+ "%llu\n", rc, req->rq_xid);
+ RETURN(rc);
+ }
+ *inflight = 1;
+ }
+ RETURN(rc);
}
/**
RETURN(-1);
}
- cfs_list_for_each_entry_safe(req, next, &imp->imp_sending_list,
- rq_list) {
- LASSERTF((long)req > PAGE_CACHE_SIZE && req != LP_POISON,
- "req %p bad\n", req);
- LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req);
- if (!ptlrpc_no_resend(req))
- ptlrpc_resend_req(req);
- }
+ list_for_each_entry_safe(req, next, &imp->imp_sending_list, rq_list) {
+ LASSERTF((long)req > PAGE_SIZE && req != LP_POISON,
+ "req %p bad\n", req);
+ LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req);
+
+ /* If the request is allowed to be sent during replay and it
+ * is not timeout yet, then it does not need to be resent. */
+ if (!ptlrpc_no_resend(req) &&
+ (req->rq_timedout || !req->rq_allow_replay))
+ ptlrpc_resend_req(req);
+ }
spin_unlock(&imp->imp_lock);
+ OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_OLD_EXPORT, 2);
RETURN(0);
}
-EXPORT_SYMBOL(ptlrpc_resend);
/**
* Go through all requests in delayed list and wake their threads
*/
void ptlrpc_wake_delayed(struct obd_import *imp)
{
- cfs_list_t *tmp, *pos;
+ struct list_head *tmp, *pos;
struct ptlrpc_request *req;
spin_lock(&imp->imp_lock);
- cfs_list_for_each_safe(tmp, pos, &imp->imp_delayed_list) {
- req = cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
+ list_for_each_safe(tmp, pos, &imp->imp_delayed_list) {
+ req = list_entry(tmp, struct ptlrpc_request, rq_list);
DEBUG_REQ(D_HA, req, "waking (set %p):", req->rq_set);
ptlrpc_client_wake_req(req);
}
spin_unlock(&imp->imp_lock);
}
-EXPORT_SYMBOL(ptlrpc_wake_delayed);
void ptlrpc_request_handle_notconn(struct ptlrpc_request *failed_req)
{
spin_lock(&imp->imp_lock);
if (imp->imp_state == LUSTRE_IMP_NEW || imp->imp_deactive ||
- cfs_atomic_read(&imp->imp_inval_count))
+ atomic_read(&imp->imp_inval_count))
rc = -EINVAL;
spin_unlock(&imp->imp_lock);
if (rc)
int ptlrpc_import_in_recovery(struct obd_import *imp)
{
int in_recovery = 1;
+
spin_lock(&imp->imp_lock);
if (imp->imp_state == LUSTRE_IMP_FULL ||
imp->imp_state == LUSTRE_IMP_CLOSED ||
- imp->imp_state == LUSTRE_IMP_DISCON)
+ imp->imp_state == LUSTRE_IMP_DISCON ||
+ imp->imp_obd->obd_no_recov)
in_recovery = 0;
spin_unlock(&imp->imp_lock);
+
return in_recovery;
}