CFS_INIT_LIST_HEAD(&request->rq_mod_list);
CFS_INIT_LIST_HEAD(&request->rq_ctx_chain);
CFS_INIT_LIST_HEAD(&request->rq_set_chain);
+ CFS_INIT_LIST_HEAD(&request->rq_history_list);
cfs_waitq_init(&request->rq_reply_waitq);
request->rq_xid = ptlrpc_next_xid();
atomic_set(&request->rq_refcount, 1);
request->rq_pill.rc_area[RCL_CLIENT],
bufs, ctx);
}
+EXPORT_SYMBOL(ptlrpc_request_bufs_pack);
int ptlrpc_request_pack(struct ptlrpc_request *request,
__u32 version, int opcode)
DEBUG_REQ(D_ERROR, req, "IMP_CLOSED ");
*status = -EIO;
} else if (req->rq_send_state == LUSTRE_IMP_CONNECTING &&
- imp->imp_state == LUSTRE_IMP_CONNECTING) {
+ imp->imp_state == LUSTRE_IMP_CONNECTING) {
/* allow CONNECT even if import is invalid */ ;
- } else if (imp->imp_invalid) {
- /* if it is mgc, wait for recovry. b=13464 */
- if (imp->imp_recon_bk && !imp->imp_obd->obd_no_recov)
- delay = 1;
+ if (atomic_read(&imp->imp_inval_count) != 0) {
+ DEBUG_REQ(D_ERROR, req, "invalidate in flight");
+ *status = -EIO;
+ }
+
+ } else if ((imp->imp_invalid && (!imp->imp_recon_bk)) ||
+ imp->imp_obd->obd_no_recov) {
/* If the import has been invalidated (such as by an OST
- * failure) the request must fail with -ESHUTDOWN. This
- * indicates the requests should be discarded; an -EIO
+ * failure), and if the import(MGC) tried all of its connection
+ * list (Bug 13464), the request must fail with -ESHUTDOWN.
+ * This indicates the requests should be discarded; an -EIO
* may result in a resend of the request. */
if (!imp->imp_deactive)
- DEBUG_REQ(D_ERROR, req, "IMP_INVALID");
+ DEBUG_REQ(D_ERROR, req, "IMP_INVALID");
*status = -ESHUTDOWN; /* bz 12940 */
} else if (req->rq_import_generation != imp->imp_generation) {
DEBUG_REQ(D_ERROR, req, "req wrong generation:");
*status = -EIO;
} else if (req->rq_send_state != imp->imp_state) {
- if (imp->imp_obd->obd_no_recov || imp->imp_dlm_fake ||
- req->rq_no_delay)
+ /* invalidate in progress - any requests should be drop */
+ if (atomic_read(&imp->imp_inval_count) != 0) {
+ DEBUG_REQ(D_ERROR, req, "invalidate in flight");
+ *status = -EIO;
+ } else if (imp->imp_dlm_fake || req->rq_no_delay) {
*status = -EWOULDBLOCK;
- else
+ } else {
delay = 1;
+ }
}
RETURN(delay);
ENTRY;
LASSERT(req->rq_phase == RQ_PHASE_NEW);
- if (req->rq_sent && (req->rq_sent > CURRENT_SECONDS))
+ if (req->rq_sent && (req->rq_sent > cfs_time_current_sec()))
RETURN (0);
req->rq_phase = RQ_PHASE_RPC;
RETURN(1);
} else {
/* here begins timeout counting */
- req->rq_sent = CURRENT_SECONDS;
+ req->rq_sent = cfs_time_current_sec();
req->rq_wait_ctx = 1;
RETURN(0);
}
}
if (!req->rq_wait_ctx) {
/* begins timeout counting */
- req->rq_sent = CURRENT_SECONDS;
+ req->rq_sent = cfs_time_current_sec();
req->rq_wait_ctx = 1;
}
continue;
libcfs_nid2str(imp->imp_connection->c_peer.nid),
lustre_msg_get_opc(req->rq_reqmsg));
- set->set_remaining--;
-
atomic_dec(&imp->imp_inflight);
+ set->set_remaining--;
cfs_waitq_signal(&imp->imp_recovery_waitq);
}
int rc = 0;
ENTRY;
- DEBUG_REQ(D_ERROR|D_NETERROR, req, "%s (sent at %lu, %lus ago)",
+ DEBUG_REQ(D_ERROR|D_NETERROR, req, "%s (sent at %lu, "CFS_DURATION_T"s ago)",
req->rq_net_err ? "network error" : "timeout",
- (long)req->rq_sent, CURRENT_SECONDS - req->rq_sent);
+ (long)req->rq_sent, cfs_time_current_sec() - req->rq_sent);
if (imp != NULL && obd_debug_peer_on_timeout)
LNetCtl(IOC_LIBCFS_DEBUG_PEER, &imp->imp_connection->c_peer);
{
struct ptlrpc_request_set *set = data;
struct list_head *tmp;
- time_t now = CURRENT_SECONDS;
+ time_t now = cfs_time_current_sec();
ENTRY;
LASSERT(set != NULL);
int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
{
struct list_head *tmp;
- time_t now = CURRENT_SECONDS;
+ time_t now = cfs_time_current_sec();
time_t deadline;
int timeout = 0;
struct ptlrpc_request *req;
req->rq_phase = RQ_PHASE_RPC;
spin_lock(&imp->imp_lock);
-restart:
req->rq_import_generation = imp->imp_generation;
+restart:
if (ptlrpc_import_delay_req(imp, req, &rc)) {
list_del(&req->rq_list);