X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fptlrpc%2Fclient.c;h=78022824ba4695b51002e0d6795ab4e0600bb14c;hp=69259bbc436501c241c86e1baa70d1217438a4c5;hb=fb4073bc3cfbf1a7ad17b03270e986098a2869ee;hpb=20d7fb85fb4f2e6af876995228908d6b8bf7eaa4 diff --git a/lustre/ptlrpc/client.c b/lustre/ptlrpc/client.c index 69259bb..7802282 100644 --- a/lustre/ptlrpc/client.c +++ b/lustre/ptlrpc/client.c @@ -90,7 +90,7 @@ static inline struct ptlrpc_bulk_desc *new_bulk(int npages, int type, int portal if (!desc) return NULL; - spin_lock_init(&desc->bd_lock); + cfs_spin_lock_init(&desc->bd_lock); cfs_waitq_init(&desc->bd_waitq); desc->bd_max_iov = npages; desc->bd_iov_count = 0; @@ -101,8 +101,8 @@ static inline struct ptlrpc_bulk_desc *new_bulk(int npages, int type, int portal return desc; } -struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp (struct ptlrpc_request *req, - int npages, int type, int portal) +struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req, + int npages, int type, int portal) { struct obd_import *imp = req->rq_import; struct ptlrpc_bulk_desc *desc; @@ -198,16 +198,14 @@ void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req) if (AT_OFF) { /* non-AT settings */ req->rq_timeout = req->rq_import->imp_server_timeout ? - obd_timeout / 2 : obd_timeout; - lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout); - return; + obd_timeout / 2 : obd_timeout; + } else { + at = &req->rq_import->imp_at; + idx = import_at_get_index(req->rq_import, + req->rq_request_portal); + serv_est = at_get(&at->iat_service_estimate[idx]); + req->rq_timeout = at_est2timeout(serv_est); } - - at = &req->rq_import->imp_at; - idx = import_at_get_index(req->rq_import, - req->rq_request_portal); - serv_est = at_get(&at->iat_service_estimate[idx]); - req->rq_timeout = at_est2timeout(serv_est); /* We could get even fancier here, using history to predict increased loading... */ @@ -224,17 +222,13 @@ static void ptlrpc_at_adj_service(struct ptlrpc_request *req, unsigned int oldse; struct imp_at *at; - /* do estimate only if is not in recovery */ - if (!(req->rq_send_state & (LUSTRE_IMP_FULL | LUSTRE_IMP_CONNECTING))) - return; - LASSERT(req->rq_import); at = &req->rq_import->imp_at; idx = import_at_get_index(req->rq_import, req->rq_request_portal); /* max service estimates are tracked on the server side, so just keep minimal history here */ - oldse = at_add(&at->iat_service_estimate[idx], serv_est); + oldse = at_measured(&at->iat_service_estimate[idx], serv_est); if (oldse != 0) CDEBUG(D_ADAPTTO, "The RPC service estimate for %s ptl %d " "has changed from %d to %d\n", @@ -266,7 +260,7 @@ static void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req, CFS_DURATION_T"\n", service_time, cfs_time_sub(now, req->rq_sent)); - oldnl = at_add(&at->iat_net_latency, nl); + oldnl = at_measured(&at->iat_net_latency, nl); if (oldnl != 0) CDEBUG(D_ADAPTTO, "The network latency for %s (nid %s) " "has changed from %d to %d\n", @@ -280,13 +274,12 @@ static int unpack_reply(struct ptlrpc_request *req) { int rc; - /* Clear reply swab mask; we may have already swabbed an early reply */ - req->rq_rep_swab_mask = 0; - - rc = lustre_unpack_msg(req->rq_repmsg, req->rq_replen); - if (rc) { - DEBUG_REQ(D_ERROR, req, "unpack_rep failed: %d", rc); - return(-EPROTO); + if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL) { + rc = ptlrpc_unpack_rep_msg(req, req->rq_replen); + if (rc) { + DEBUG_REQ(D_ERROR, req, "unpack_rep failed: %d", rc); + return(-EPROTO); + } } rc = lustre_unpack_rep_ptlrpc_body(req, MSG_PTLRPC_BODY_OFF); @@ -309,11 +302,11 @@ static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req) ENTRY; req->rq_early = 0; - spin_unlock(&req->rq_lock); + cfs_spin_unlock(&req->rq_lock); rc = sptlrpc_cli_unwrap_early_reply(req, &early_req); if (rc) { - spin_lock(&req->rq_lock); + cfs_spin_lock(&req->rq_lock); RETURN(rc); } @@ -328,7 +321,7 @@ static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req) sptlrpc_cli_finish_early_reply(early_req); - spin_lock(&req->rq_lock); + cfs_spin_lock(&req->rq_lock); if (rc == 0) { /* Adjust the local timeout for this req */ @@ -353,20 +346,21 @@ static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req) void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool) { - struct list_head *l, *tmp; + cfs_list_t *l, *tmp; struct ptlrpc_request *req; - if (!pool) - return; + LASSERT(pool != NULL); - list_for_each_safe(l, tmp, &pool->prp_req_list) { - req = list_entry(l, struct ptlrpc_request, rq_list); - list_del(&req->rq_list); + cfs_spin_lock(&pool->prp_lock); + cfs_list_for_each_safe(l, tmp, &pool->prp_req_list) { + req = cfs_list_entry(l, struct ptlrpc_request, rq_list); + cfs_list_del(&req->rq_list); LASSERT(req->rq_reqbuf); LASSERT(req->rq_reqbuf_len == pool->prp_rq_size); OBD_FREE(req->rq_reqbuf, pool->prp_rq_size); OBD_FREE(req, sizeof(*req)); } + cfs_spin_unlock(&pool->prp_lock); OBD_FREE(pool, sizeof(*pool)); } @@ -375,20 +369,21 @@ void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq) int i; int size = 1; - while (size < pool->prp_rq_size + SPTLRPC_MAX_PAYLOAD) + while (size < pool->prp_rq_size) size <<= 1; - LASSERTF(list_empty(&pool->prp_req_list) || size == pool->prp_rq_size, + LASSERTF(cfs_list_empty(&pool->prp_req_list) || + size == pool->prp_rq_size, "Trying to change pool size with nonempty pool " "from %d to %d bytes\n", pool->prp_rq_size, size); - spin_lock(&pool->prp_lock); + cfs_spin_lock(&pool->prp_lock); pool->prp_rq_size = size; for (i = 0; i < num_rq; i++) { struct ptlrpc_request *req; struct lustre_msg *msg; - spin_unlock(&pool->prp_lock); + cfs_spin_unlock(&pool->prp_lock); OBD_ALLOC(req, sizeof(struct ptlrpc_request)); if (!req) return; @@ -400,10 +395,10 @@ void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq) req->rq_reqbuf = msg; req->rq_reqbuf_len = size; req->rq_pool = pool; - spin_lock(&pool->prp_lock); - list_add_tail(&req->rq_list, &pool->prp_req_list); + cfs_spin_lock(&pool->prp_lock); + cfs_list_add_tail(&req->rq_list, &pool->prp_req_list); } - spin_unlock(&pool->prp_lock); + cfs_spin_unlock(&pool->prp_lock); return; } @@ -420,14 +415,14 @@ ptlrpc_init_rq_pool(int num_rq, int msgsize, /* Request next power of two for the allocation, because internally kernel would do exactly this */ - spin_lock_init(&pool->prp_lock); + cfs_spin_lock_init(&pool->prp_lock); CFS_INIT_LIST_HEAD(&pool->prp_req_list); - pool->prp_rq_size = msgsize; + pool->prp_rq_size = msgsize + SPTLRPC_MAX_PAYLOAD; pool->prp_populate = populate_pool; populate_pool(pool, num_rq); - if (list_empty(&pool->prp_req_list)) { + if (cfs_list_empty(&pool->prp_req_list)) { /* have not allocated a single request for the pool */ OBD_FREE(pool, sizeof (struct ptlrpc_request_pool)); pool = NULL; @@ -444,21 +439,21 @@ ptlrpc_prep_req_from_pool(struct ptlrpc_request_pool *pool) if (!pool) return NULL; - spin_lock(&pool->prp_lock); + cfs_spin_lock(&pool->prp_lock); /* See if we have anything in a pool, and bail out if nothing, * in writeout path, where this matters, this is safe to do, because * nothing is lost in this case, and when some in-flight requests * complete, this code will be called again. */ - if (unlikely(list_empty(&pool->prp_req_list))) { - spin_unlock(&pool->prp_lock); + if (unlikely(cfs_list_empty(&pool->prp_req_list))) { + cfs_spin_unlock(&pool->prp_lock); return NULL; } - request = list_entry(pool->prp_req_list.next, struct ptlrpc_request, - rq_list); - list_del(&request->rq_list); - spin_unlock(&pool->prp_lock); + request = cfs_list_entry(pool->prp_req_list.next, struct ptlrpc_request, + rq_list); + cfs_list_del_init(&request->rq_list); + cfs_spin_unlock(&pool->prp_lock); LASSERT(request->rq_reqbuf); LASSERT(request->rq_pool); @@ -476,11 +471,11 @@ static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request) { struct ptlrpc_request_pool *pool = request->rq_pool; - spin_lock(&pool->prp_lock); - LASSERT(list_empty(&request->rq_list)); + cfs_spin_lock(&pool->prp_lock); + LASSERT(cfs_list_empty(&request->rq_list)); LASSERT(!request->rq_receiving_reply); - list_add_tail(&request->rq_list, &pool->prp_req_list); - spin_unlock(&pool->prp_lock); + cfs_list_add_tail(&request->rq_list, &pool->prp_req_list); + cfs_spin_unlock(&pool->prp_lock); } static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request, @@ -529,17 +524,18 @@ static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request, ptlrpc_at_set_req_timeout(request); - spin_lock_init(&request->rq_lock); + cfs_spin_lock_init(&request->rq_lock); CFS_INIT_LIST_HEAD(&request->rq_list); CFS_INIT_LIST_HEAD(&request->rq_timed_list); CFS_INIT_LIST_HEAD(&request->rq_replay_list); - CFS_INIT_LIST_HEAD(&request->rq_mod_list); CFS_INIT_LIST_HEAD(&request->rq_ctx_chain); CFS_INIT_LIST_HEAD(&request->rq_set_chain); CFS_INIT_LIST_HEAD(&request->rq_history_list); + CFS_INIT_LIST_HEAD(&request->rq_exp_list); cfs_waitq_init(&request->rq_reply_waitq); + cfs_waitq_init(&request->rq_set_waitq); request->rq_xid = ptlrpc_next_xid(); - atomic_set(&request->rq_refcount, 1); + cfs_atomic_set(&request->rq_refcount, 1); lustre_msg_set_opc(request->rq_reqmsg, opcode); @@ -681,6 +677,68 @@ ptlrpc_prep_req(struct obd_import *imp, __u32 version, int opcode, int count, NULL); } +struct ptlrpc_request *ptlrpc_prep_fakereq(struct obd_import *imp, + unsigned int timeout, + ptlrpc_interpterer_t interpreter) +{ + struct ptlrpc_request *request = NULL; + ENTRY; + + OBD_ALLOC(request, sizeof(*request)); + if (!request) { + CERROR("request allocation out of memory\n"); + RETURN(NULL); + } + + request->rq_send_state = LUSTRE_IMP_FULL; + request->rq_type = PTL_RPC_MSG_REQUEST; + request->rq_import = class_import_get(imp); + request->rq_export = NULL; + request->rq_import_generation = imp->imp_generation; + + request->rq_timeout = timeout; + request->rq_sent = cfs_time_current_sec(); + request->rq_deadline = request->rq_sent + timeout; + request->rq_reply_deadline = request->rq_deadline; + request->rq_interpret_reply = interpreter; + request->rq_phase = RQ_PHASE_RPC; + request->rq_next_phase = RQ_PHASE_INTERPRET; + /* don't want reply */ + request->rq_receiving_reply = 0; + request->rq_must_unlink = 0; + request->rq_no_delay = request->rq_no_resend = 1; + request->rq_fake = 1; + + cfs_spin_lock_init(&request->rq_lock); + CFS_INIT_LIST_HEAD(&request->rq_list); + CFS_INIT_LIST_HEAD(&request->rq_replay_list); + CFS_INIT_LIST_HEAD(&request->rq_set_chain); + CFS_INIT_LIST_HEAD(&request->rq_history_list); + CFS_INIT_LIST_HEAD(&request->rq_exp_list); + cfs_waitq_init(&request->rq_reply_waitq); + cfs_waitq_init(&request->rq_set_waitq); + + request->rq_xid = ptlrpc_next_xid(); + cfs_atomic_set(&request->rq_refcount, 1); + + RETURN(request); +} + +void ptlrpc_fakereq_finished(struct ptlrpc_request *req) +{ + /* if we kill request before timeout - need adjust counter */ + if (req->rq_phase == RQ_PHASE_RPC) { + struct ptlrpc_request_set *set = req->rq_set; + + if (set) + cfs_atomic_dec(&set->set_remaining); + } + + ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE); + cfs_list_del_init(&req->rq_list); +} + + struct ptlrpc_request_set *ptlrpc_prep_set(void) { struct ptlrpc_request_set *set; @@ -691,8 +749,8 @@ struct ptlrpc_request_set *ptlrpc_prep_set(void) RETURN(NULL); CFS_INIT_LIST_HEAD(&set->set_requests); cfs_waitq_init(&set->set_waitq); - set->set_remaining = 0; - spin_lock_init(&set->set_new_req_lock); + cfs_atomic_set(&set->set_remaining, 0); + cfs_spin_lock_init(&set->set_new_req_lock); CFS_INIT_LIST_HEAD(&set->set_new_requests); CFS_INIT_LIST_HEAD(&set->set_cblist); @@ -702,52 +760,46 @@ struct ptlrpc_request_set *ptlrpc_prep_set(void) /* Finish with this set; opposite of prep_set. */ void ptlrpc_set_destroy(struct ptlrpc_request_set *set) { - struct list_head *tmp; - struct list_head *next; + cfs_list_t *tmp; + cfs_list_t *next; int expected_phase; int n = 0; ENTRY; /* Requests on the set should either all be completed, or all be new */ - expected_phase = (set->set_remaining == 0) ? + expected_phase = (cfs_atomic_read(&set->set_remaining) == 0) ? RQ_PHASE_COMPLETE : RQ_PHASE_NEW; - list_for_each (tmp, &set->set_requests) { + cfs_list_for_each (tmp, &set->set_requests) { struct ptlrpc_request *req = - list_entry(tmp, struct ptlrpc_request, rq_set_chain); + cfs_list_entry(tmp, struct ptlrpc_request, + rq_set_chain); LASSERT(req->rq_phase == expected_phase); n++; } - LASSERT(set->set_remaining == 0 || set->set_remaining == n); + LASSERTF(cfs_atomic_read(&set->set_remaining) == 0 || + cfs_atomic_read(&set->set_remaining) == n, "%d / %d\n", + cfs_atomic_read(&set->set_remaining), n); - list_for_each_safe(tmp, next, &set->set_requests) { + cfs_list_for_each_safe(tmp, next, &set->set_requests) { struct ptlrpc_request *req = - list_entry(tmp, struct ptlrpc_request, rq_set_chain); - list_del_init(&req->rq_set_chain); + cfs_list_entry(tmp, struct ptlrpc_request, + rq_set_chain); + cfs_list_del_init(&req->rq_set_chain); LASSERT(req->rq_phase == expected_phase); if (req->rq_phase == RQ_PHASE_NEW) { - - if (req->rq_interpret_reply != NULL) { - ptlrpc_interpterer_t interpreter = - req->rq_interpret_reply; - - /* higher level (i.e. LOV) failed; - * let the sub reqs clean up */ - req->rq_status = -EBADR; - interpreter(NULL, req, &req->rq_async_args, - req->rq_status); - } - set->set_remaining--; + ptlrpc_req_interpret(NULL, req, -EBADR); + cfs_atomic_dec(&set->set_remaining); } req->rq_set = NULL; ptlrpc_req_finished (req); } - LASSERT(set->set_remaining == 0); + LASSERT(cfs_atomic_read(&set->set_remaining) == 0); OBD_FREE(set, sizeof(*set)); EXIT; @@ -764,7 +816,7 @@ int ptlrpc_set_add_cb(struct ptlrpc_request_set *set, cbdata->psc_interpret = fn; cbdata->psc_data = data; - list_add_tail(&cbdata->psc_item, &set->set_cblist); + cfs_list_add_tail(&cbdata->psc_item, &set->set_cblist); RETURN(0); } @@ -773,11 +825,10 @@ void ptlrpc_set_add_req(struct ptlrpc_request_set *set, struct ptlrpc_request *req) { /* The set takes over the caller's request reference */ - list_add_tail(&req->rq_set_chain, &set->set_requests); + cfs_list_add_tail(&req->rq_set_chain, &set->set_requests); req->rq_set = set; - set->set_remaining++; - - atomic_inc(&req->rq_import->imp_inflight); + cfs_atomic_inc(&set->set_remaining); + req->rq_queued_time = cfs_time_current(); /* Where is the best place to set this? */ } /** @@ -793,21 +844,17 @@ int ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc, * Let caller know that we stopped and will not handle this request. * It needs to take care itself of request. */ - if (test_bit(LIOD_STOP, &pc->pc_flags)) + if (cfs_test_bit(LIOD_STOP, &pc->pc_flags)) return -EALREADY; - spin_lock(&set->set_new_req_lock); + cfs_spin_lock(&set->set_new_req_lock); /* * The set takes over the caller's request reference. */ - list_add_tail(&req->rq_set_chain, &set->set_new_requests); + cfs_list_add_tail(&req->rq_set_chain, &set->set_new_requests); req->rq_set = set; - spin_unlock(&set->set_new_req_lock); + cfs_spin_unlock(&set->set_new_req_lock); - /* - * Let thead know that we added something and better it to wake up - * and process. - */ cfs_waitq_signal(&set->set_waitq); return 0; } @@ -840,20 +887,20 @@ static int ptlrpc_import_delay_req(struct obd_import *imp, } else if (imp->imp_state == LUSTRE_IMP_CLOSED) { DEBUG_REQ(D_ERROR, req, "IMP_CLOSED "); *status = -EIO; + } else if (imp->imp_obd->obd_no_recov) { + *status = -ESHUTDOWN; + } else if (ptlrpc_send_limit_expired(req)) { + /* probably doesn't need to be a D_ERROR after initial testing */ + DEBUG_REQ(D_ERROR, req, "send limit expired "); + *status = -EIO; } else if (req->rq_send_state == LUSTRE_IMP_CONNECTING && imp->imp_state == LUSTRE_IMP_CONNECTING) { /* allow CONNECT even if import is invalid */ ; - if (atomic_read(&imp->imp_inval_count) != 0) { + if (cfs_atomic_read(&imp->imp_inval_count) != 0) { DEBUG_REQ(D_ERROR, req, "invalidate in flight"); *status = -EIO; } - } else if ((imp->imp_invalid && (!imp->imp_recon_bk)) || - imp->imp_obd->obd_no_recov) { - /* If the import has been invalidated (such as by an OST - * failure), and if the import(MGC) tried all of its connection - * list (Bug 13464), the request must fail with -ESHUTDOWN. - * This indicates the requests should be discarded; an -EIO - * may result in a resend of the request. */ + } else if (imp->imp_invalid) { if (!imp->imp_deactive) DEBUG_REQ(D_ERROR, req, "IMP_INVALID"); *status = -ESHUTDOWN; /* bz 12940 */ @@ -862,7 +909,7 @@ static int ptlrpc_import_delay_req(struct obd_import *imp, *status = -EIO; } else if (req->rq_send_state != imp->imp_state) { /* invalidate in progress - any requests should be drop */ - if (atomic_read(&imp->imp_inval_count) != 0) { + if (cfs_atomic_read(&imp->imp_inval_count) != 0) { DEBUG_REQ(D_ERROR, req, "invalidate in flight"); *status = -EIO; } else if (imp->imp_dlm_fake || req->rq_no_delay) { @@ -875,43 +922,28 @@ static int ptlrpc_import_delay_req(struct obd_import *imp, RETURN(delay); } -static int ptlrpc_check_reply(struct ptlrpc_request *req) +/* Conditionally suppress specific console messages */ +static int ptlrpc_console_allow(struct ptlrpc_request *req) { - int rc = 0; - ENTRY; - - /* serialise with network callback */ - spin_lock(&req->rq_lock); - - if (ptlrpc_client_replied(req)) - GOTO(out, rc = 1); - - if (req->rq_net_err && !req->rq_timedout) { - spin_unlock(&req->rq_lock); - rc = ptlrpc_expire_one_request(req, 0); - spin_lock(&req->rq_lock); - GOTO(out, rc); - } - - if (req->rq_err) - GOTO(out, rc = 1); + __u32 opc = lustre_msg_get_opc(req->rq_reqmsg); + int err; - if (req->rq_resend) - GOTO(out, rc = 1); + /* Suppress particular reconnect errors which are to be expected. No + * errors are suppressed for the initial connection on an import */ + if ((lustre_handle_is_used(&req->rq_import->imp_remote_handle)) && + (opc == OST_CONNECT || opc == MDS_CONNECT || opc == MGS_CONNECT)) { - if (req->rq_restart) - GOTO(out, rc = 1); + /* Suppress timed out reconnect requests */ + if (req->rq_timedout) + return 0; - if (ptlrpc_client_early(req)) { - ptlrpc_at_recv_early_reply(req); - GOTO(out, rc = 0); /* keep waiting */ + /* Suppress unavailable/again reconnect requests */ + err = lustre_msg_get_status(req->rq_repmsg); + if (err == -ENODEV || err == -EAGAIN) + return 0; } - EXIT; - out: - spin_unlock(&req->rq_lock); - DEBUG_REQ(D_NET, req, "rc = %d for", rc); - return rc; + return 1; } static int ptlrpc_check_status(struct ptlrpc_request *req) @@ -937,10 +969,46 @@ static int ptlrpc_check_status(struct ptlrpc_request *req) DEBUG_REQ(D_INFO, req, "status is %d", err); } + if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) { + struct obd_import *imp = req->rq_import; + __u32 opc = lustre_msg_get_opc(req->rq_reqmsg); + + if (ptlrpc_console_allow(req)) + LCONSOLE_ERROR_MSG(0x011,"an error occurred while " + "communicating with %s. The %s " + "operation failed with %d\n", + libcfs_nid2str( + imp->imp_connection->c_peer.nid), + ll_opcode2str(opc), err); + + RETURN(err < 0 ? err : -EINVAL); + } + RETURN(err); } /** + * save pre-versions for replay + */ +static void ptlrpc_save_versions(struct ptlrpc_request *req) +{ + struct lustre_msg *repmsg = req->rq_repmsg; + struct lustre_msg *reqmsg = req->rq_reqmsg; + __u64 *versions = lustre_msg_get_versions(repmsg); + ENTRY; + + if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) + return; + + LASSERT(versions); + lustre_msg_set_versions(reqmsg, versions); + CDEBUG(D_INFO, "Client save versions ["LPX64"/"LPX64"]\n", + versions[0], versions[1]); + + EXIT; +} + +/** * Callback function called when client receives RPC reply for \a req. */ static int after_reply(struct ptlrpc_request *req) @@ -952,15 +1020,33 @@ static int after_reply(struct ptlrpc_request *req) long timediff; ENTRY; - LASSERT(!req->rq_receiving_reply); - LASSERT(obd); - LASSERT(req->rq_nob_received <= req->rq_repbuf_len); + LASSERT(obd != NULL); + /* repbuf must be unlinked */ + LASSERT(!req->rq_receiving_reply && !req->rq_must_unlink); + + if (req->rq_reply_truncate) { + if (ptlrpc_no_resend(req)) { + DEBUG_REQ(D_ERROR, req, "reply buffer overflow," + " expected: %d, actual size: %d", + req->rq_nob_received, req->rq_repbuf_len); + RETURN(-EOVERFLOW); + } + + sptlrpc_cli_free_repbuf(req); + /* Pass the required reply buffer size (include + * space for early reply). + * NB: no need to roundup because alloc_repbuf + * will roundup it */ + req->rq_replen = req->rq_nob_received; + req->rq_nob_received = 0; + req->rq_resend = 1; + RETURN(0); + } /* * NB Until this point, the whole of the incoming message, * including buflens, status etc is in the sender's byte order. */ - rc = sptlrpc_cli_unwrap_reply(req); if (rc) { DEBUG_REQ(D_ERROR, req, "unwrap reply failed (%d):", rc); @@ -977,11 +1063,13 @@ static int after_reply(struct ptlrpc_request *req) if (rc) RETURN(rc); - do_gettimeofday(&work_start); + cfs_gettimeofday(&work_start); timediff = cfs_timeval_sub(&work_start, &req->rq_arrival_time, NULL); - if (obd->obd_svc_stats != NULL) + if (obd->obd_svc_stats != NULL) { lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR, timediff); + ptlrpc_lprocfs_rpc_sent(req, timediff); + } if (lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_REPLY && lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_ERR) { @@ -990,7 +1078,8 @@ static int after_reply(struct ptlrpc_request *req) RETURN(-EPROTO); } - OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_PAUSE_REP, obd_fail_val); + if (lustre_msg_get_opc(req->rq_reqmsg) != OBD_PING) + OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_PAUSE_REP, obd_fail_val); ptlrpc_at_adj_service(req, lustre_msg_get_timeout(req->rq_repmsg)); ptlrpc_at_adj_net_latency(req, lustre_msg_get_service_time(req->rq_repmsg)); @@ -1023,11 +1112,13 @@ static int after_reply(struct ptlrpc_request *req) /* * Store transno in reqmsg for replay. */ - req->rq_transno = lustre_msg_get_transno(req->rq_repmsg); - lustre_msg_set_transno(req->rq_reqmsg, req->rq_transno); + if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)) { + req->rq_transno = lustre_msg_get_transno(req->rq_repmsg); + lustre_msg_set_transno(req->rq_reqmsg, req->rq_transno); + } - if (req->rq_import->imp_replayable) { - spin_lock(&imp->imp_lock); + if (imp->imp_replayable) { + cfs_spin_lock(&imp->imp_lock); /* * No point in adding already-committed requests to the replay * list, we will just remove them immediately. b=9829 @@ -1035,12 +1126,14 @@ static int after_reply(struct ptlrpc_request *req) if (req->rq_transno != 0 && (req->rq_transno > lustre_msg_get_last_committed(req->rq_repmsg) || - req->rq_replay)) + req->rq_replay)) { + /** version recovery */ + ptlrpc_save_versions(req); ptlrpc_retain_replayable_request(req, imp); - else if (req->rq_commit_cb != NULL) { - spin_unlock(&imp->imp_lock); + } else if (req->rq_commit_cb != NULL) { + cfs_spin_unlock(&imp->imp_lock); req->rq_commit_cb(req); - spin_lock(&imp->imp_lock); + cfs_spin_lock(&imp->imp_lock); } /* @@ -1051,7 +1144,7 @@ static int after_reply(struct ptlrpc_request *req) lustre_msg_get_last_committed(req->rq_repmsg); } ptlrpc_free_committed(imp); - spin_unlock(&imp->imp_lock); + cfs_spin_unlock(&imp->imp_lock); } RETURN(rc); @@ -1070,38 +1163,37 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req) ptlrpc_rqphase_move(req, RQ_PHASE_RPC); imp = req->rq_import; - spin_lock(&imp->imp_lock); + cfs_spin_lock(&imp->imp_lock); req->rq_import_generation = imp->imp_generation; if (ptlrpc_import_delay_req(imp, req, &rc)) { - spin_lock (&req->rq_lock); + cfs_spin_lock(&req->rq_lock); req->rq_waiting = 1; - spin_unlock (&req->rq_lock); + cfs_spin_unlock(&req->rq_lock); DEBUG_REQ(D_HA, req, "req from PID %d waiting for recovery: " - "(%s != %s)", - lustre_msg_get_status(req->rq_reqmsg) , + "(%s != %s)", lustre_msg_get_status(req->rq_reqmsg), ptlrpc_import_state_name(req->rq_send_state), ptlrpc_import_state_name(imp->imp_state)); - LASSERT(list_empty (&req->rq_list)); - - list_add_tail(&req->rq_list, &imp->imp_delayed_list); - spin_unlock(&imp->imp_lock); + LASSERT(cfs_list_empty(&req->rq_list)); + cfs_list_add_tail(&req->rq_list, &imp->imp_delayed_list); + cfs_atomic_inc(&req->rq_import->imp_inflight); + cfs_spin_unlock(&imp->imp_lock); RETURN(0); } if (rc != 0) { - spin_unlock(&imp->imp_lock); + cfs_spin_unlock(&imp->imp_lock); req->rq_status = rc; ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET); RETURN(rc); } - /* XXX this is the same as ptlrpc_queue_wait */ - LASSERT(list_empty(&req->rq_list)); - list_add_tail(&req->rq_list, &imp->imp_sending_list); - spin_unlock(&imp->imp_lock); + LASSERT(cfs_list_empty(&req->rq_list)); + cfs_list_add_tail(&req->rq_list, &imp->imp_sending_list); + cfs_atomic_inc(&req->rq_import->imp_inflight); + cfs_spin_unlock(&imp->imp_lock); lustre_msg_set_status(req->rq_reqmsg, cfs_curproc_pid()); @@ -1135,17 +1227,19 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req) /* this sends any unsent RPCs in @set and returns TRUE if all are sent */ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) { - struct list_head *tmp; + cfs_list_t *tmp; int force_timer_recalc = 0; ENTRY; - if (set->set_remaining == 0) + if (cfs_atomic_read(&set->set_remaining) == 0) RETURN(1); - list_for_each(tmp, &set->set_requests) { + cfs_list_for_each(tmp, &set->set_requests) { struct ptlrpc_request *req = - list_entry(tmp, struct ptlrpc_request, rq_set_chain); + cfs_list_entry(tmp, struct ptlrpc_request, + rq_set_chain); struct obd_import *imp = req->rq_import; + int unregistered = 0; int rc = 0; if (req->rq_phase == RQ_PHASE_NEW && @@ -1170,26 +1264,33 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) LASSERT(req->rq_next_phase != req->rq_phase); LASSERT(req->rq_next_phase != RQ_PHASE_UNDEFINED); - /* + /* * Skip processing until reply is unlinked. We * can't return to pool before that and we can't * call interpret before that. We need to make * sure that all rdma transfers finished and will - * not corrupt any data. + * not corrupt any data. */ - if (ptlrpc_client_recv_or_unlink(req)) + if (ptlrpc_client_recv_or_unlink(req) || + ptlrpc_client_bulk_active(req)) continue; - - /* + + /* * Turn fail_loc off to prevent it from looping - * forever. + * forever. */ - OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_UNLINK | - OBD_FAIL_ONCE); + if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) { + OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK, + OBD_FAIL_ONCE); + } + if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK)) { + OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK, + OBD_FAIL_ONCE); + } - /* - * Move to next phase if reply was successfully - * unlinked. + /* + * Move to next phase if reply was successfully + * unlinked. */ ptlrpc_rqphase_move(req, req->rq_next_phase); } @@ -1200,32 +1301,39 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) if (req->rq_phase == RQ_PHASE_INTERPRET) GOTO(interpret, req->rq_status); - /* - * Note that this also will start async reply unlink. + /* + * Note that this also will start async reply unlink. */ if (req->rq_net_err && !req->rq_timedout) { ptlrpc_expire_one_request(req, 1); - /* - * Check if we still need to wait for unlink. + /* + * Check if we still need to wait for unlink. */ - if (ptlrpc_client_recv_or_unlink(req)) + if (ptlrpc_client_recv_or_unlink(req) || + ptlrpc_client_bulk_active(req)) continue; } if (req->rq_err) { + cfs_spin_lock(&req->rq_lock); req->rq_replied = 0; + cfs_spin_unlock(&req->rq_lock); if (req->rq_status == 0) req->rq_status = -EIO; ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET); GOTO(interpret, req->rq_status); } - /* ptlrpc_queue_wait->l_wait_event guarantees that rq_intr - * will only be set after rq_timedout, but the oig waiting - * path sets rq_intr irrespective of whether ptlrpcd has - * seen a timeout. our policy is to only interpret - * interrupted rpcs after they have timed out */ + /* ptlrpc_set_wait->l_wait_event sets lwi_allow_intr + * so it sets rq_intr regardless of individual rpc + * timeouts. The synchronous IO waiting path sets + * rq_intr irrespective of whether ptlrpcd + * has seen a timeout. Our policy is to only interpret + * interrupted rpcs after they have timed out, so we + * need to enforce that here. + */ + if (req->rq_intr && (req->rq_timedout || req->rq_waiting || req->rq_wait_ctx)) { req->rq_status = -EINTR; @@ -1241,48 +1349,57 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) if (!ptlrpc_unregister_reply(req, 1)) continue; - spin_lock(&imp->imp_lock); - + cfs_spin_lock(&imp->imp_lock); if (ptlrpc_import_delay_req(imp, req, &status)){ - spin_unlock(&imp->imp_lock); + /* put on delay list - only if we wait + * recovery finished - before send */ + cfs_list_del_init(&req->rq_list); + cfs_list_add_tail(&req->rq_list, + &imp-> \ + imp_delayed_list); + cfs_spin_unlock(&imp->imp_lock); continue; } if (status != 0) { req->rq_status = status; - ptlrpc_rqphase_move(req, + ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET); - spin_unlock(&imp->imp_lock); + cfs_spin_unlock(&imp->imp_lock); GOTO(interpret, req->rq_status); } - if (req->rq_no_resend && !req->rq_wait_ctx) { + if (ptlrpc_no_resend(req) && !req->rq_wait_ctx) { req->rq_status = -ENOTCONN; - ptlrpc_rqphase_move(req, + ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET); - spin_unlock(&imp->imp_lock); + cfs_spin_unlock(&imp->imp_lock); GOTO(interpret, req->rq_status); } - list_del_init(&req->rq_list); - list_add_tail(&req->rq_list, + cfs_list_del_init(&req->rq_list); + cfs_list_add_tail(&req->rq_list, &imp->imp_sending_list); - spin_unlock(&imp->imp_lock); + cfs_spin_unlock(&imp->imp_lock); + cfs_spin_lock(&req->rq_lock); req->rq_waiting = 0; + cfs_spin_unlock(&req->rq_lock); - if (req->rq_timedout||req->rq_resend) { - /* This is re-sending anyways, + if (req->rq_timedout || req->rq_resend) { + /* This is re-sending anyways, * let's mark req as resend. */ + cfs_spin_lock(&req->rq_lock); req->rq_resend = 1; - lustre_msg_add_flags(req->rq_reqmsg, - MSG_RESENT); + cfs_spin_unlock(&req->rq_lock); if (req->rq_bulk) { - __u64 old_xid = req->rq_xid; + __u64 old_xid; - ptlrpc_unregister_bulk(req); + if (!ptlrpc_unregister_bulk(req, 1)) + continue; /* ensure previous bulk fails */ + old_xid = req->rq_xid; req->rq_xid = ptlrpc_next_xid(); CDEBUG(D_HA, "resend bulk " "old x"LPU64 @@ -1298,14 +1415,21 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) if (status) { if (req->rq_err) { req->rq_status = status; + cfs_spin_lock(&req->rq_lock); + req->rq_wait_ctx = 0; + cfs_spin_unlock(&req->rq_lock); force_timer_recalc = 1; } else { + cfs_spin_lock(&req->rq_lock); req->rq_wait_ctx = 1; + cfs_spin_unlock(&req->rq_lock); } continue; } else { + cfs_spin_lock(&req->rq_lock); req->rq_wait_ctx = 0; + cfs_spin_unlock(&req->rq_lock); } rc = ptl_send_rpc(req, 0); @@ -1313,46 +1437,45 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) DEBUG_REQ(D_HA, req, "send failed (%d)", rc); force_timer_recalc = 1; + cfs_spin_lock(&req->rq_lock); req->rq_net_err = 1; + cfs_spin_unlock(&req->rq_lock); } /* need to reset the timeout */ force_timer_recalc = 1; } - spin_lock(&req->rq_lock); + cfs_spin_lock(&req->rq_lock); if (ptlrpc_client_early(req)) { ptlrpc_at_recv_early_reply(req); - spin_unlock(&req->rq_lock); + cfs_spin_unlock(&req->rq_lock); continue; } /* Still waiting for a reply? */ if (ptlrpc_client_recv(req)) { - spin_unlock(&req->rq_lock); + cfs_spin_unlock(&req->rq_lock); continue; } /* Did we actually receive a reply? */ if (!ptlrpc_client_replied(req)) { - spin_unlock(&req->rq_lock); + cfs_spin_unlock(&req->rq_lock); continue; } - spin_unlock(&req->rq_lock); + cfs_spin_unlock(&req->rq_lock); + + /* unlink from net because we are going to + * swab in-place of reply buffer */ + unregistered = ptlrpc_unregister_reply(req, 1); + if (!unregistered) + continue; req->rq_status = after_reply(req); - if (req->rq_resend) { - /* Add this req to the delayed list so - it can be errored if the import is - evicted after recovery. */ - spin_lock(&imp->imp_lock); - list_del_init(&req->rq_list); - list_add_tail(&req->rq_list, - &imp->imp_delayed_list); - spin_unlock(&imp->imp_lock); + if (req->rq_resend) continue; - } /* If there is no bulk associated with this request, * then we're done and should let the interpreter @@ -1368,12 +1491,12 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) } LASSERT(req->rq_phase == RQ_PHASE_BULK); - if (ptlrpc_bulk_active(req->rq_bulk)) + if (ptlrpc_client_bulk_active(req)) continue; if (!req->rq_bulk->bd_success) { /* The RPC reply arrived OK, but the bulk screwed - * up! Dead wierd since the server told us the RPC + * up! Dead weird since the server told us the RPC * was good after getting the REPLY for her GET or * the ACK for her PUT. */ DEBUG_REQ(D_ERROR, req, "bulk transfer failed"); @@ -1387,44 +1510,45 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) /* This moves to "unregistering" phase we need to wait for * reply unlink. */ - if (!ptlrpc_unregister_reply(req, 1)) + if (!unregistered && !ptlrpc_unregister_reply(req, 1)) continue; - if (req->rq_bulk != NULL) - ptlrpc_unregister_bulk(req); + if (!ptlrpc_unregister_bulk(req, 1)) + continue; /* When calling interpret receiving already should be * finished. */ LASSERT(!req->rq_receiving_reply); - if (req->rq_interpret_reply != NULL) { - ptlrpc_interpterer_t interpreter = - req->rq_interpret_reply; - req->rq_status = interpreter(NULL, req, - &req->rq_async_args, - req->rq_status); - } + ptlrpc_req_interpret(env, req, req->rq_status); + ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE); CDEBUG(D_RPCTRACE, "Completed RPC pname:cluuid:pid:xid:nid:" "opc %s:%s:%d:"LPU64":%s:%d\n", cfs_curproc_comm(), imp->imp_obd->obd_uuid.uuid, - lustre_msg_get_status(req->rq_reqmsg), req->rq_xid, + req->rq_reqmsg ? lustre_msg_get_status(req->rq_reqmsg):-1, + req->rq_xid, libcfs_nid2str(imp->imp_connection->c_peer.nid), - lustre_msg_get_opc(req->rq_reqmsg)); - - spin_lock(&imp->imp_lock); - if (!list_empty(&req->rq_list)) - list_del_init(&req->rq_list); - atomic_dec(&imp->imp_inflight); - spin_unlock(&imp->imp_lock); + req->rq_reqmsg ? lustre_msg_get_opc(req->rq_reqmsg) : -1); + + cfs_spin_lock(&imp->imp_lock); + /* Request already may be not on sending or delaying list. This + * may happen in the case of marking it erroneous for the case + * ptlrpc_import_delay_req(req, status) find it impossible to + * allow sending this rpc and returns *status != 0. */ + if (!cfs_list_empty(&req->rq_list)) { + cfs_list_del_init(&req->rq_list); + cfs_atomic_dec(&imp->imp_inflight); + } + cfs_spin_unlock(&imp->imp_lock); - set->set_remaining--; - cfs_waitq_signal(&imp->imp_recovery_waitq); + cfs_atomic_dec(&set->set_remaining); + cfs_waitq_broadcast(&imp->imp_recovery_waitq); } /* If we hit an error, we want to recover promptly. */ - RETURN(set->set_remaining == 0 || force_timer_recalc); + RETURN(cfs_atomic_read(&set->set_remaining) == 0 || force_timer_recalc); } /* Return 1 if we should give up, else 0 */ @@ -1434,42 +1558,38 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink) int rc = 0; ENTRY; - DEBUG_REQ(D_ERROR|D_NETERROR, req, - "%s (sent at "CFS_TIME_T", "CFS_DURATION_T"s ago)", - req->rq_net_err ? "network error" : "timeout", - req->rq_sent, cfs_time_sub(cfs_time_current_sec(), - req->rq_sent)); + cfs_spin_lock(&req->rq_lock); + req->rq_timedout = 1; + cfs_spin_unlock(&req->rq_lock); - if (imp) { - LCONSOLE_WARN("Request x"LPU64" sent from %s to NID %s " - CFS_DURATION_T"s ago has timed out " - "(limit "CFS_DURATION_T"s).\n", req->rq_xid, - req->rq_import->imp_obd->obd_name, - libcfs_nid2str(imp->imp_connection->c_peer.nid), - cfs_time_sub(cfs_time_current_sec(), req->rq_sent), - cfs_time_sub(req->rq_deadline, req->rq_sent)); - } + DEBUG_REQ(req->rq_fake ? D_INFO : D_WARNING, req, + "Request x"LPU64" sent from %s to NID %s "CFS_DURATION_T"s " + "ago has %s ("CFS_DURATION_T"s prior to deadline).\n", + req->rq_xid, imp ? imp->imp_obd->obd_name : "", + imp ? libcfs_nid2str(imp->imp_connection->c_peer.nid) : "", + cfs_time_sub(cfs_time_current_sec(), req->rq_sent), + req->rq_net_err ? "failed due to network error" : "timed out", + cfs_time_sub(req->rq_deadline, req->rq_sent)); if (imp != NULL && obd_debug_peer_on_timeout) LNetCtl(IOC_LIBCFS_DEBUG_PEER, &imp->imp_connection->c_peer); - spin_lock(&req->rq_lock); - req->rq_timedout = 1; - spin_unlock(&req->rq_lock); - ptlrpc_unregister_reply(req, async_unlink); + ptlrpc_unregister_bulk(req, async_unlink); if (obd_dump_on_timeout) libcfs_debug_dumplog(); - if (req->rq_bulk != NULL) - ptlrpc_unregister_bulk (req); - if (imp == NULL) { DEBUG_REQ(D_HA, req, "NULL import: already cleaned up?"); RETURN(1); } + if (req->rq_fake) + RETURN(1); + + cfs_atomic_inc(&imp->imp_timeouts); + /* The DLM server doesn't want recovery run on its imports. */ if (imp->imp_dlm_fake) RETURN(1); @@ -1482,16 +1602,16 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink) DEBUG_REQ(D_RPCTRACE, req, "err -110, sent_state=%s (now=%s)", ptlrpc_import_state_name(req->rq_send_state), ptlrpc_import_state_name(imp->imp_state)); - spin_lock(&req->rq_lock); + cfs_spin_lock(&req->rq_lock); req->rq_status = -ETIMEDOUT; req->rq_err = 1; - spin_unlock(&req->rq_lock); + cfs_spin_unlock(&req->rq_lock); RETURN(1); } /* if a request can't be resent we can't wait for an answer after the timeout */ - if (req->rq_no_resend) { + if (ptlrpc_no_resend(req)) { DEBUG_REQ(D_RPCTRACE, req, "TIMEOUT-NORESEND:"); rc = 1; } @@ -1504,26 +1624,30 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink) int ptlrpc_expired_set(void *data) { struct ptlrpc_request_set *set = data; - struct list_head *tmp; + cfs_list_t *tmp; time_t now = cfs_time_current_sec(); ENTRY; LASSERT(set != NULL); - /* - * A timeout expired. See which reqs it applies to... + /* + * A timeout expired. See which reqs it applies to... */ - list_for_each (tmp, &set->set_requests) { + cfs_list_for_each (tmp, &set->set_requests) { struct ptlrpc_request *req = - list_entry(tmp, struct ptlrpc_request, rq_set_chain); + cfs_list_entry(tmp, struct ptlrpc_request, + rq_set_chain); + + /* don't expire request waiting for context */ + if (req->rq_wait_ctx) + continue; /* Request in-flight? */ - if (!((req->rq_phase & - (RQ_PHASE_RPC | RQ_PHASE_UNREGISTERING) && + if (!((req->rq_phase == RQ_PHASE_RPC && !req->rq_waiting && !req->rq_resend) || (req->rq_phase == RQ_PHASE_BULK))) continue; - + if (req->rq_timedout || /* already dealt with */ req->rq_deadline > now) /* not expired */ continue; @@ -1533,8 +1657,8 @@ int ptlrpc_expired_set(void *data) ptlrpc_expire_one_request(req, 1); } - /* - * When waiting for a whole set, we always to break out of the + /* + * When waiting for a whole set, we always break out of the * sleep so we can recalculate the timeout, or enable interrupts * if everyone's timed out. */ @@ -1543,24 +1667,25 @@ int ptlrpc_expired_set(void *data) void ptlrpc_mark_interrupted(struct ptlrpc_request *req) { - spin_lock(&req->rq_lock); + cfs_spin_lock(&req->rq_lock); req->rq_intr = 1; - spin_unlock(&req->rq_lock); + cfs_spin_unlock(&req->rq_lock); } void ptlrpc_interrupted_set(void *data) { struct ptlrpc_request_set *set = data; - struct list_head *tmp; + cfs_list_t *tmp; LASSERT(set != NULL); CERROR("INTERRUPTED SET %p\n", set); - list_for_each(tmp, &set->set_requests) { + cfs_list_for_each(tmp, &set->set_requests) { struct ptlrpc_request *req = - list_entry(tmp, struct ptlrpc_request, rq_set_chain); + cfs_list_entry(tmp, struct ptlrpc_request, + rq_set_chain); - if (req->rq_phase != RQ_PHASE_RPC && + if (req->rq_phase != RQ_PHASE_RPC && req->rq_phase != RQ_PHASE_UNREGISTERING) continue; @@ -1568,12 +1693,12 @@ void ptlrpc_interrupted_set(void *data) } } -/** - * Get the smallest timeout in the set; this does NOT set a timeout. +/** + * Get the smallest timeout in the set; this does NOT set a timeout. */ int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set) { - struct list_head *tmp; + cfs_list_t *tmp; time_t now = cfs_time_current_sec(); int timeout = 0; struct ptlrpc_request *req; @@ -1582,36 +1707,25 @@ int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set) SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */ - list_for_each(tmp, &set->set_requests) { - req = list_entry(tmp, struct ptlrpc_request, rq_set_chain); + cfs_list_for_each(tmp, &set->set_requests) { + req = cfs_list_entry(tmp, struct ptlrpc_request, rq_set_chain); - /* - * Request in-flight? + /* + * Request in-flight? */ - if (!(((req->rq_phase & - (RQ_PHASE_RPC | RQ_PHASE_UNREGISTERING)) && - !req->rq_waiting) || + if (!(((req->rq_phase == RQ_PHASE_RPC) && !req->rq_waiting) || (req->rq_phase == RQ_PHASE_BULK) || (req->rq_phase == RQ_PHASE_NEW))) continue; - /* - * Check those waiting for long reply unlink every one - * second. - */ - if (req->rq_phase == RQ_PHASE_UNREGISTERING) { - timeout = 1; - break; - } - - /* - * Already timed out. + /* + * Already timed out. */ if (req->rq_timedout) continue; - /* - * Waiting for ctx. + /* + * Waiting for ctx. */ if (req->rq_wait_ctx) continue; @@ -1631,17 +1745,17 @@ int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set) int ptlrpc_set_wait(struct ptlrpc_request_set *set) { - struct list_head *tmp; + cfs_list_t *tmp; struct ptlrpc_request *req; struct l_wait_info lwi; int rc, timeout; ENTRY; - if (list_empty(&set->set_requests)) + if (cfs_list_empty(&set->set_requests)) RETURN(0); - list_for_each(tmp, &set->set_requests) { - req = list_entry(tmp, struct ptlrpc_request, rq_set_chain); + cfs_list_for_each(tmp, &set->set_requests) { + req = cfs_list_entry(tmp, struct ptlrpc_request, rq_set_chain); if (req->rq_phase == RQ_PHASE_NEW) (void)ptlrpc_send_new_req(req); } @@ -1653,28 +1767,53 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set) * req times out */ CDEBUG(D_RPCTRACE, "set %p going to sleep for %d seconds\n", set, timeout); - lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout ? timeout : 1), - ptlrpc_expired_set, - ptlrpc_interrupted_set, set); - rc = l_wait_event(set->set_waitq, - ptlrpc_check_set(NULL, set), &lwi); + + if (timeout == 0 && !cfs_signal_pending()) + /* + * No requests are in-flight (ether timed out + * or delayed), so we can allow interrupts. + * We still want to block for a limited time, + * so we allow interrupts during the timeout. + */ + lwi = LWI_TIMEOUT_INTR_ALL(cfs_time_seconds(1), + ptlrpc_expired_set, + ptlrpc_interrupted_set, set); + else + /* + * At least one request is in flight, so no + * interrupts are allowed. Wait until all + * complete, or an in-flight req times out. + */ + lwi = LWI_TIMEOUT(cfs_time_seconds(timeout? timeout : 1), + ptlrpc_expired_set, set); + + rc = l_wait_event(set->set_waitq, ptlrpc_check_set(NULL, set), &lwi); LASSERT(rc == 0 || rc == -EINTR || rc == -ETIMEDOUT); /* -EINTR => all requests have been flagged rq_intr so next * check completes. - * -ETIMEOUTD => someone timed out. When all reqs have + * -ETIMEDOUT => someone timed out. When all reqs have * timed out, signals are enabled allowing completion with * EINTR. * I don't really care if we go once more round the loop in * the error cases -eeb. */ - } while (rc != 0 || set->set_remaining != 0); + if (rc == 0 && cfs_atomic_read(&set->set_remaining) == 0) { + cfs_list_for_each(tmp, &set->set_requests) { + req = cfs_list_entry(tmp, struct ptlrpc_request, + rq_set_chain); + cfs_spin_lock(&req->rq_lock); + req->rq_invalid_rqset = 1; + cfs_spin_unlock(&req->rq_lock); + } + } + } while (rc != 0 || cfs_atomic_read(&set->set_remaining) != 0); - LASSERT(set->set_remaining == 0); + LASSERT(cfs_atomic_read(&set->set_remaining) == 0); rc = 0; - list_for_each(tmp, &set->set_requests) { - req = list_entry(tmp, struct ptlrpc_request, rq_set_chain); + cfs_list_for_each(tmp, &set->set_requests) { + req = cfs_list_entry(tmp, struct ptlrpc_request, rq_set_chain); LASSERT(req->rq_phase == RQ_PHASE_COMPLETE); if (req->rq_status != 0) @@ -1689,9 +1828,9 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set) struct ptlrpc_set_cbdata *cbdata, *n; int err; - list_for_each_entry_safe(cbdata, n, + cfs_list_for_each_entry_safe(cbdata, n, &set->set_cblist, psc_item) { - list_del_init(&cbdata->psc_item); + cfs_list_del_init(&cbdata->psc_item); err = cbdata->psc_interpret(set, cbdata->psc_data, rc); if (err && !rc) rc = err; @@ -1712,10 +1851,11 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked) LASSERTF(!request->rq_receiving_reply, "req %p\n", request); LASSERTF(request->rq_rqbd == NULL, "req %p\n",request);/* client-side */ - LASSERTF(list_empty(&request->rq_list), "req %p\n", request); - LASSERTF(list_empty(&request->rq_set_chain), "req %p\n", request); + LASSERTF(cfs_list_empty(&request->rq_list), "req %p\n", request); + LASSERTF(cfs_list_empty(&request->rq_set_chain), "req %p\n", request); + LASSERTF(cfs_list_empty(&request->rq_exp_list), "req %p\n", request); LASSERTF(!request->rq_replay, "req %p\n", request); - LASSERT(request->rq_cli_ctx); + LASSERT(request->rq_cli_ctx || request->rq_fake); req_capsule_fini(&request->rq_pill); @@ -1723,15 +1863,14 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked) * request->rq_reqmsg to NULL while osc_close is dereferencing it. */ if (request->rq_import != NULL) { if (!locked) - spin_lock(&request->rq_import->imp_lock); - list_del_init(&request->rq_mod_list); - list_del_init(&request->rq_replay_list); + cfs_spin_lock(&request->rq_import->imp_lock); + cfs_list_del_init(&request->rq_replay_list); if (!locked) - spin_unlock(&request->rq_import->imp_lock); + cfs_spin_unlock(&request->rq_import->imp_lock); } - LASSERTF(list_empty(&request->rq_replay_list), "req %p\n", request); + LASSERTF(cfs_list_empty(&request->rq_replay_list), "req %p\n", request); - if (atomic_read(&request->rq_refcount) != 0) { + if (cfs_atomic_read(&request->rq_refcount) != 0) { DEBUG_REQ(D_ERROR, request, "freeing request with nonzero refcount"); LBUG(); @@ -1753,7 +1892,8 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked) if (request->rq_reqbuf != NULL || request->rq_clrbuf != NULL) sptlrpc_cli_free_reqbuf(request); - sptlrpc_req_put_ctx(request, !locked); + if (request->rq_cli_ctx) + sptlrpc_req_put_ctx(request, !locked); if (request->rq_pool) __ptlrpc_free_req_to_pool(request); @@ -1783,9 +1923,9 @@ static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked) } DEBUG_REQ(D_INFO, request, "refcount now %u", - atomic_read(&request->rq_refcount) - 1); + cfs_atomic_read(&request->rq_refcount) - 1); - if (atomic_dec_and_test(&request->rq_refcount)) { + if (cfs_atomic_dec_and_test(&request->rq_refcount)) { __ptlrpc_free_req(request, locked); RETURN(1); } @@ -1815,47 +1955,47 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async) cfs_waitq_t *wq; struct l_wait_info lwi; - /* - * Might sleep. + /* + * Might sleep. */ - LASSERT(!in_interrupt()); + LASSERT(!cfs_in_interrupt()); - /* - * Let's setup deadline for reply unlink. + /* + * Let's setup deadline for reply unlink. */ - if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_UNLINK) && + if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) && async && request->rq_reply_deadline == 0) request->rq_reply_deadline = cfs_time_current_sec()+LONG_UNLINK; - /* - * Nothing left to do. + /* + * Nothing left to do. */ if (!ptlrpc_client_recv_or_unlink(request)) RETURN(1); LNetMDUnlink(request->rq_reply_md_h); - /* - * Let's check it once again. + /* + * Let's check it once again. */ if (!ptlrpc_client_recv_or_unlink(request)) RETURN(1); - /* - * Move to "Unregistering" phase as reply was not unlinked yet. + /* + * Move to "Unregistering" phase as reply was not unlinked yet. */ ptlrpc_rqphase_move(request, RQ_PHASE_UNREGISTERING); - /* - * Do not wait for unlink to finish. + /* + * Do not wait for unlink to finish. */ if (async) RETURN(0); - /* + /* * We have to l_wait_event() whatever the result, to give liblustre * a chance to run reply_in_callback(), and to make sure we've - * unlinked before returning a req to the pool. + * unlinked before returning a req to the pool. */ if (request->rq_set != NULL) wq = &request->rq_set->set_waitq; @@ -1865,14 +2005,15 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async) for (;;) { /* Network access will complete in finite time but the HUGE * timeout lets us CWARN for visibility of sluggish NALs */ - lwi = LWI_TIMEOUT(cfs_time_seconds(LONG_UNLINK), NULL, NULL); + lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK), + cfs_time_seconds(1), NULL, NULL); rc = l_wait_event(*wq, !ptlrpc_client_recv_or_unlink(request), &lwi); if (rc == 0) { ptlrpc_rqphase_move(request, request->rq_next_phase); RETURN(1); } - + LASSERT(rc == -ETIMEDOUT); DEBUG_REQ(D_WARNING, request, "Unexpectedly long timeout " "rvcng=%d unlnk=%d", request->rq_receiving_reply, @@ -1884,7 +2025,7 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async) /* caller must hold imp->imp_lock */ void ptlrpc_free_committed(struct obd_import *imp) { - struct list_head *tmp, *saved; + cfs_list_t *tmp, *saved; struct ptlrpc_request *req; struct ptlrpc_request *last_req = NULL; /* temporary fire escape */ ENTRY; @@ -1901,20 +2042,24 @@ void ptlrpc_free_committed(struct obd_import *imp) EXIT; return; } - CDEBUG(D_RPCTRACE, "%s: committing for last_committed "LPU64" gen %d\n", imp->imp_obd->obd_name, imp->imp_peer_committed_transno, imp->imp_generation); imp->imp_last_transno_checked = imp->imp_peer_committed_transno; imp->imp_last_generation_checked = imp->imp_generation; - list_for_each_safe(tmp, saved, &imp->imp_replay_list) { - req = list_entry(tmp, struct ptlrpc_request, rq_replay_list); + cfs_list_for_each_safe(tmp, saved, &imp->imp_replay_list) { + req = cfs_list_entry(tmp, struct ptlrpc_request, + rq_replay_list); /* XXX ok to remove when 1357 resolved - rread 05/29/03 */ LASSERT(req != last_req); last_req = req; + if (req->rq_transno == 0) { + DEBUG_REQ(D_EMERG, req, "zero transno during replay"); + LBUG(); + } if (req->rq_import_generation < imp->imp_generation) { DEBUG_REQ(D_RPCTRACE, req, "free request with old gen"); GOTO(free_req, 0); @@ -1934,12 +2079,12 @@ void ptlrpc_free_committed(struct obd_import *imp) DEBUG_REQ(D_RPCTRACE, req, "commit (last_committed "LPU64")", imp->imp_peer_committed_transno); free_req: - spin_lock(&req->rq_lock); + cfs_spin_lock(&req->rq_lock); req->rq_replay = 0; - spin_unlock(&req->rq_lock); + cfs_spin_unlock(&req->rq_lock); if (req->rq_commit_cb != NULL) req->rq_commit_cb(req); - list_del_init(&req->rq_replay_list); + cfs_list_del_init(&req->rq_replay_list); __ptlrpc_req_finished(req, 1); } @@ -1960,7 +2105,7 @@ void ptlrpc_resend_req(struct ptlrpc_request *req) lustre_msg_set_handle(req->rq_reqmsg, &(struct lustre_handle){ 0 }); req->rq_status = -EAGAIN; - spin_lock(&req->rq_lock); + cfs_spin_lock(&req->rq_lock); req->rq_resend = 1; req->rq_net_err = 0; req->rq_timedout = 0; @@ -1973,7 +2118,7 @@ void ptlrpc_resend_req(struct ptlrpc_request *req) old_xid, req->rq_xid); } ptlrpc_client_wake_req(req); - spin_unlock(&req->rq_lock); + cfs_spin_unlock(&req->rq_lock); } /* XXX: this function and rq_status are currently unused */ @@ -1982,62 +2127,38 @@ void ptlrpc_restart_req(struct ptlrpc_request *req) DEBUG_REQ(D_HA, req, "restarting (possibly-)completed request"); req->rq_status = -ERESTARTSYS; - spin_lock(&req->rq_lock); + cfs_spin_lock(&req->rq_lock); req->rq_restart = 1; req->rq_timedout = 0; ptlrpc_client_wake_req(req); - spin_unlock(&req->rq_lock); -} - -static int expired_request(void *data) -{ - struct ptlrpc_request *req = data; - ENTRY; - - /* - * Some failure can suspend regular timeouts. - */ - if (ptlrpc_check_suspend()) - RETURN(1); - - /* - * Deadline may have changed with an early reply. - */ - if (req->rq_deadline > cfs_time_current_sec()) - RETURN(1); - - RETURN(ptlrpc_expire_one_request(req, 0)); -} - -static void interrupted_request(void *data) -{ - struct ptlrpc_request *req = data; - DEBUG_REQ(D_HA, req, "request interrupted"); - spin_lock(&req->rq_lock); - req->rq_intr = 1; - spin_unlock(&req->rq_lock); + cfs_spin_unlock(&req->rq_lock); } struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req) { ENTRY; - atomic_inc(&req->rq_refcount); + cfs_atomic_inc(&req->rq_refcount); RETURN(req); } void ptlrpc_retain_replayable_request(struct ptlrpc_request *req, struct obd_import *imp) { - struct list_head *tmp; + cfs_list_t *tmp; LASSERT_SPIN_LOCKED(&imp->imp_lock); + if (req->rq_transno == 0) { + DEBUG_REQ(D_EMERG, req, "saving request with zero transno"); + LBUG(); + } + /* clear this for new requests that were resent as well as resent replayed requests. */ lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT); /* don't re-add requests that have been replayed */ - if (!list_empty(&req->rq_replay_list)) + if (!cfs_list_empty(&req->rq_replay_list)) return; lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY); @@ -2045,9 +2166,10 @@ void ptlrpc_retain_replayable_request(struct ptlrpc_request *req, LASSERT(imp->imp_replayable); /* Balanced in ptlrpc_free_committed, usually. */ ptlrpc_request_addref(req); - list_for_each_prev(tmp, &imp->imp_replay_list) { + cfs_list_for_each_prev(tmp, &imp->imp_replay_list) { struct ptlrpc_request *iter = - list_entry(tmp, struct ptlrpc_request, rq_replay_list); + cfs_list_entry(tmp, struct ptlrpc_request, + rq_replay_list); /* We may have duplicate transnos if we create and then * open a file, or for closes retained if to match creating @@ -2064,237 +2186,37 @@ void ptlrpc_retain_replayable_request(struct ptlrpc_request *req, continue; } - list_add(&req->rq_replay_list, &iter->rq_replay_list); + cfs_list_add(&req->rq_replay_list, &iter->rq_replay_list); return; } - list_add_tail(&req->rq_replay_list, &imp->imp_replay_list); + cfs_list_add(&req->rq_replay_list, &imp->imp_replay_list); } int ptlrpc_queue_wait(struct ptlrpc_request *req) { - int rc = 0; - int brc; - struct l_wait_info lwi; - struct obd_import *imp = req->rq_import; - cfs_duration_t timeout = CFS_TICK; - long timeoutl; + struct ptlrpc_request_set *set; + int rc; ENTRY; LASSERT(req->rq_set == NULL); LASSERT(!req->rq_receiving_reply); - atomic_inc(&imp->imp_inflight); - - /* for distributed debugging */ - lustre_msg_set_status(req->rq_reqmsg, cfs_curproc_pid()); - LASSERT(imp->imp_obd != NULL); - CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:nid:opc " - "%s:%s:%d:"LPU64":%s:%d\n", cfs_curproc_comm(), - imp->imp_obd->obd_uuid.uuid, - lustre_msg_get_status(req->rq_reqmsg), req->rq_xid, - libcfs_nid2str(imp->imp_connection->c_peer.nid), - lustre_msg_get_opc(req->rq_reqmsg)); - - /* Mark phase here for a little debug help */ - ptlrpc_rqphase_move(req, RQ_PHASE_RPC); - - spin_lock(&imp->imp_lock); - req->rq_import_generation = imp->imp_generation; -restart: - if (ptlrpc_import_delay_req(imp, req, &rc)) { - list_del(&req->rq_list); - - list_add_tail(&req->rq_list, &imp->imp_delayed_list); - spin_unlock(&imp->imp_lock); - - DEBUG_REQ(D_HA, req, "\"%s\" waiting for recovery: (%s != %s)", - cfs_curproc_comm(), - ptlrpc_import_state_name(req->rq_send_state), - ptlrpc_import_state_name(imp->imp_state)); - lwi = LWI_INTR(interrupted_request, req); - rc = l_wait_event(req->rq_reply_waitq, - (req->rq_send_state == imp->imp_state || - req->rq_err || req->rq_intr), - &lwi); - DEBUG_REQ(D_HA, req, "\"%s\" awake: (%s == %s or %d/%d == 1)", - cfs_curproc_comm(), - ptlrpc_import_state_name(imp->imp_state), - ptlrpc_import_state_name(req->rq_send_state), - req->rq_err, req->rq_intr); - - spin_lock(&imp->imp_lock); - list_del_init(&req->rq_list); - - if (req->rq_err) { - /* rq_status was set locally */ - rc = -EIO; - } - else if (req->rq_intr) { - rc = -EINTR; - } - else if (req->rq_no_resend) { - spin_unlock(&imp->imp_lock); - GOTO(out, rc = -ETIMEDOUT); - } - else { - GOTO(restart, rc); - } - } - - if (rc != 0) { - list_del_init(&req->rq_list); - spin_unlock(&imp->imp_lock); - req->rq_status = rc; // XXX this ok? - GOTO(out, rc); - } - - if (req->rq_resend) { - lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT); - - if (req->rq_bulk != NULL) { - ptlrpc_unregister_bulk (req); - /* bulk requests are supposed to be - * idempotent, so we are free to bump the xid - * here, which we need to do before - * registering the bulk again (bug 6371). - * print the old xid first for sanity. - */ - DEBUG_REQ(D_HA, req, "bumping xid for bulk: "); - req->rq_xid = ptlrpc_next_xid(); - } - - DEBUG_REQ(D_HA, req, "resending: "); - } - - /* XXX this is the same as ptlrpc_set_wait */ - LASSERT(list_empty(&req->rq_list)); - list_add_tail(&req->rq_list, &imp->imp_sending_list); - spin_unlock(&imp->imp_lock); - - rc = sptlrpc_req_refresh_ctx(req, 0); - if (rc) { - if (req->rq_err) { - /* we got fatal ctx refresh error, directly jump out - * thus we can pass back the actual error code. - */ - spin_lock(&imp->imp_lock); - list_del_init(&req->rq_list); - spin_unlock(&imp->imp_lock); - - CERROR("Failed to refresh ctx of req %p: %d\n", req, rc); - GOTO(out, rc); - } - /* simulating we got error during send rpc */ - goto after_send; - } - - rc = ptl_send_rpc(req, 0); - if (rc) - DEBUG_REQ(D_HA, req, "send failed (%d); recovering", rc); - -repeat: - timeoutl = req->rq_deadline - cfs_time_current_sec(); - timeout = (timeoutl <= 0 || rc) ? CFS_TICK : - cfs_time_seconds(timeoutl); - DEBUG_REQ(D_NET, req, - "-- sleeping for "CFS_DURATION_T" ticks", timeout); - lwi = LWI_TIMEOUT_INTR(timeout, expired_request, interrupted_request, - req); - brc = l_wait_event(req->rq_reply_waitq, ptlrpc_check_reply(req), &lwi); - if (brc == -ETIMEDOUT && ((req->rq_deadline > cfs_time_current_sec()) || - ptlrpc_check_and_wait_suspend(req))) - goto repeat; - -after_send: - CDEBUG(D_RPCTRACE, "Completed RPC pname:cluuid:pid:xid:nid:opc " - "%s:%s:%d:"LPU64":%s:%d\n", cfs_curproc_comm(), - imp->imp_obd->obd_uuid.uuid, - lustre_msg_get_status(req->rq_reqmsg), req->rq_xid, - libcfs_nid2str(imp->imp_connection->c_peer.nid), - lustre_msg_get_opc(req->rq_reqmsg)); - - spin_lock(&imp->imp_lock); - list_del_init(&req->rq_list); - spin_unlock(&imp->imp_lock); - - /* If the reply was received normally, this just grabs the spinlock - * (ensuring the reply callback has returned), sees that - * req->rq_receiving_reply is clear and returns. */ - ptlrpc_unregister_reply(req, 0); - - if (req->rq_err) { - DEBUG_REQ(D_RPCTRACE, req, "err rc=%d status=%d", - rc, req->rq_status); - GOTO(out, rc = rc ? rc : -EIO); - } - - if (req->rq_intr) { - /* Should only be interrupted if we timed out. */ - if (!req->rq_timedout) - DEBUG_REQ(D_ERROR, req, - "rq_intr set but rq_timedout not"); - GOTO(out, rc = -EINTR); - } - - /* Resend if we need to */ - if (req->rq_resend) { - /* ...unless we were specifically told otherwise. */ - if (req->rq_no_resend) - GOTO(out, rc = -ETIMEDOUT); - spin_lock(&imp->imp_lock); - goto restart; - } - - if (req->rq_timedout) { /* non-recoverable timeout */ - GOTO(out, rc = -ETIMEDOUT); - } - - if (!ptlrpc_client_replied(req)) { - /* How can this be? -eeb */ - DEBUG_REQ(D_ERROR, req, "!rq_replied: "); - LBUG(); - GOTO(out, rc = req->rq_status); - } - - rc = after_reply(req); - /* NB may return +ve success rc */ - if (req->rq_resend) { - spin_lock(&imp->imp_lock); - goto restart; + set = ptlrpc_prep_set(); + if (set == NULL) { + CERROR("Unable to allocate ptlrpc set."); + RETURN(-ENOMEM); } - out: - if (req->rq_bulk != NULL) { - if (rc >= 0) { - /* success so far. Note that anything going wrong - * with bulk now, is EXTREMELY strange, since the - * server must have believed that the bulk - * tranferred OK before she replied with success to - * me. */ - lwi = LWI_TIMEOUT(timeout, NULL, NULL); - brc = l_wait_event(req->rq_reply_waitq, - !ptlrpc_bulk_active(req->rq_bulk), - &lwi); - LASSERT(brc == 0 || brc == -ETIMEDOUT); - if (brc != 0) { - LASSERT(brc == -ETIMEDOUT); - DEBUG_REQ(D_ERROR, req, "bulk timed out"); - rc = brc; - } else if (!req->rq_bulk->bd_success) { - DEBUG_REQ(D_ERROR, req, "bulk transfer failed"); - rc = -EIO; - } - } - if (rc < 0) - ptlrpc_unregister_bulk (req); - } + /* for distributed debugging */ + lustre_msg_set_status(req->rq_reqmsg, cfs_curproc_pid()); - LASSERT(!req->rq_receiving_reply); - ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET); + /* add a ref for the set (see comment in ptlrpc_set_add_req) */ + ptlrpc_request_addref(req); + ptlrpc_set_add_req(set, req); + rc = ptlrpc_set_wait(set); + ptlrpc_set_destroy(set); - atomic_dec(&imp->imp_inflight); - cfs_waitq_signal(&imp->imp_recovery_waitq); RETURN(rc); } @@ -2305,13 +2227,13 @@ struct ptlrpc_replay_async_args { static int ptlrpc_replay_interpret(const struct lu_env *env, struct ptlrpc_request *req, - void * data, int rc) + void * data, int rc) { struct ptlrpc_replay_async_args *aa = data; struct obd_import *imp = req->rq_import; ENTRY; - atomic_dec(&imp->imp_replay_inflight); + cfs_atomic_dec(&imp->imp_replay_inflight); if (!ptlrpc_client_replied(req)) { CERROR("request replay timed out, restarting recovery\n"); @@ -2323,9 +2245,31 @@ static int ptlrpc_replay_interpret(const struct lu_env *env, lustre_msg_get_status(req->rq_repmsg) == -ENODEV)) GOTO(out, rc = lustre_msg_get_status(req->rq_repmsg)); - /* The transno had better not change over replay. */ - LASSERT(lustre_msg_get_transno(req->rq_reqmsg) == - lustre_msg_get_transno(req->rq_repmsg)); + /** VBR: check version failure */ + if (lustre_msg_get_status(req->rq_repmsg) == -EOVERFLOW) { + /** replay was failed due to version mismatch */ + DEBUG_REQ(D_WARNING, req, "Version mismatch during replay\n"); + cfs_spin_lock(&imp->imp_lock); + imp->imp_vbr_failed = 1; + imp->imp_no_lock_replay = 1; + cfs_spin_unlock(&imp->imp_lock); + } else { + /** The transno had better not change over replay. */ + LASSERTF(lustre_msg_get_transno(req->rq_reqmsg) == + lustre_msg_get_transno(req->rq_repmsg) || + lustre_msg_get_transno(req->rq_repmsg) == 0, + LPX64"/"LPX64"\n", + lustre_msg_get_transno(req->rq_reqmsg), + lustre_msg_get_transno(req->rq_repmsg)); + } + + cfs_spin_lock(&imp->imp_lock); + /** if replays by version then gap was occur on server, no trust to locks */ + if (lustre_msg_get_flags(req->rq_repmsg) & MSG_VERSION_REPLAY) + imp->imp_no_lock_replay = 1; + imp->imp_last_replay_transno = lustre_msg_get_transno(req->rq_reqmsg); + cfs_spin_unlock(&imp->imp_lock); + LASSERT(imp->imp_last_replay_transno); DEBUG_REQ(D_HA, req, "got rep"); @@ -2348,10 +2292,10 @@ static int ptlrpc_replay_interpret(const struct lu_env *env, * imp_last_replay_transno shouldn't be set to 0 anyway */ if (req->rq_transno > 0) { - spin_lock(&imp->imp_lock); + cfs_spin_lock(&imp->imp_lock); LASSERT(req->rq_transno <= imp->imp_last_replay_transno); imp->imp_last_replay_transno = req->rq_transno; - spin_unlock(&imp->imp_lock); + cfs_spin_unlock(&imp->imp_lock); } else CERROR("Transno is 0 during replay!\n"); /* continue with recovery */ @@ -2393,56 +2337,56 @@ int ptlrpc_replay_req(struct ptlrpc_request *req) DEBUG_REQ(D_HA, req, "REPLAY"); - atomic_inc(&req->rq_import->imp_replay_inflight); + cfs_atomic_inc(&req->rq_import->imp_replay_inflight); ptlrpc_request_addref(req); /* ptlrpcd needs a ref */ - ptlrpcd_add_req(req); + ptlrpcd_add_req(req, PSCOPE_OTHER); RETURN(0); } void ptlrpc_abort_inflight(struct obd_import *imp) { - struct list_head *tmp, *n; + cfs_list_t *tmp, *n; ENTRY; /* Make sure that no new requests get processed for this import. * ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing * this flag and then putting requests on sending_list or delayed_list. */ - spin_lock(&imp->imp_lock); + cfs_spin_lock(&imp->imp_lock); /* XXX locking? Maybe we should remove each request with the list * locked? Also, how do we know if the requests on the list are * being freed at this time? */ - list_for_each_safe(tmp, n, &imp->imp_sending_list) { + cfs_list_for_each_safe(tmp, n, &imp->imp_sending_list) { struct ptlrpc_request *req = - list_entry(tmp, struct ptlrpc_request, rq_list); + cfs_list_entry(tmp, struct ptlrpc_request, rq_list); DEBUG_REQ(D_RPCTRACE, req, "inflight"); - spin_lock (&req->rq_lock); + cfs_spin_lock (&req->rq_lock); if (req->rq_import_generation < imp->imp_generation) { req->rq_err = 1; req->rq_status = -EINTR; ptlrpc_client_wake_req(req); } - spin_unlock (&req->rq_lock); + cfs_spin_unlock (&req->rq_lock); } - list_for_each_safe(tmp, n, &imp->imp_delayed_list) { + cfs_list_for_each_safe(tmp, n, &imp->imp_delayed_list) { struct ptlrpc_request *req = - list_entry(tmp, struct ptlrpc_request, rq_list); + cfs_list_entry(tmp, struct ptlrpc_request, rq_list); DEBUG_REQ(D_RPCTRACE, req, "aborting waiting req"); - spin_lock (&req->rq_lock); + cfs_spin_lock (&req->rq_lock); if (req->rq_import_generation < imp->imp_generation) { req->rq_err = 1; req->rq_status = -EINTR; ptlrpc_client_wake_req(req); } - spin_unlock (&req->rq_lock); + cfs_spin_unlock (&req->rq_lock); } /* Last chance to free reqs left on the replay list, but we @@ -2450,36 +2394,37 @@ void ptlrpc_abort_inflight(struct obd_import *imp) if (imp->imp_replayable) ptlrpc_free_committed(imp); - spin_unlock(&imp->imp_lock); + cfs_spin_unlock(&imp->imp_lock); EXIT; } void ptlrpc_abort_set(struct ptlrpc_request_set *set) { - struct list_head *tmp, *n; + cfs_list_t *tmp, *pos; LASSERT(set != NULL); - list_for_each_safe(tmp, n, &set->set_requests) { + cfs_list_for_each_safe(pos, tmp, &set->set_requests) { struct ptlrpc_request *req = - list_entry(tmp, struct ptlrpc_request, rq_set_chain); + cfs_list_entry(pos, struct ptlrpc_request, + rq_set_chain); - spin_lock (&req->rq_lock); + cfs_spin_lock(&req->rq_lock); if (req->rq_phase != RQ_PHASE_RPC) { - spin_unlock (&req->rq_lock); + cfs_spin_unlock(&req->rq_lock); continue; } req->rq_err = 1; req->rq_status = -EINTR; ptlrpc_client_wake_req(req); - spin_unlock (&req->rq_lock); + cfs_spin_unlock(&req->rq_lock); } } static __u64 ptlrpc_last_xid; -static spinlock_t ptlrpc_last_xid_lock; +static cfs_spinlock_t ptlrpc_last_xid_lock; /* Initialize the XID for the node. This is common among all requests on * this node, and only requires the property that it is monotonically @@ -2489,7 +2434,7 @@ static spinlock_t ptlrpc_last_xid_lock; * NOT want to have an XID per target or similar. * * To avoid an unlikely collision between match bits after a client reboot - * (which would cause old to be delivered into the wrong buffer) we initialize + * (which would deliver old data into the wrong RDMA buffer) initialize * the XID based on the current time, assuming a maximum RPC rate of 1M RPC/s. * If the time is clearly incorrect, we instead use a 62-bit random number. * In the worst case the random number will overflow 1M RPCs per second in @@ -2500,22 +2445,22 @@ void ptlrpc_init_xid(void) { time_t now = cfs_time_current_sec(); - spin_lock_init(&ptlrpc_last_xid_lock); + cfs_spin_lock_init(&ptlrpc_last_xid_lock); if (now < YEAR_2004) { ll_get_random_bytes(&ptlrpc_last_xid, sizeof(ptlrpc_last_xid)); ptlrpc_last_xid >>= 2; ptlrpc_last_xid |= (1ULL << 61); } else { - ptlrpc_last_xid = (now << 20); + ptlrpc_last_xid = (__u64)now << 20; } } __u64 ptlrpc_next_xid(void) { __u64 tmp; - spin_lock(&ptlrpc_last_xid_lock); + cfs_spin_lock(&ptlrpc_last_xid_lock); tmp = ++ptlrpc_last_xid; - spin_unlock(&ptlrpc_last_xid_lock); + cfs_spin_unlock(&ptlrpc_last_xid_lock); return tmp; } @@ -2524,9 +2469,9 @@ __u64 ptlrpc_sample_next_xid(void) #if BITS_PER_LONG == 32 /* need to avoid possible word tearing on 32-bit systems */ __u64 tmp; - spin_lock(&ptlrpc_last_xid_lock); + cfs_spin_lock(&ptlrpc_last_xid_lock); tmp = ptlrpc_last_xid + 1; - spin_unlock(&ptlrpc_last_xid_lock); + cfs_spin_unlock(&ptlrpc_last_xid_lock); return tmp; #else /* No need to lock, since returned value is racy anyways */