/** Implementation of client-side PortalRPC interfaces */
#define DEBUG_SUBSYSTEM S_RPC
-#ifndef __KERNEL__
-#include <errno.h>
-#include <signal.h>
-#include <liblustre.h>
-#endif
#include <obd_support.h>
#include <obd_class.h>
}
/* Adjust expected network latency */
-static void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req,
- unsigned int service_time)
+void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req,
+ unsigned int service_time)
{
unsigned int nl, oldnl;
struct imp_at *at;
* If anything goes wrong just ignore it - same as if it never happened
*/
static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
+__must_hold(&req->rq_lock)
{
struct ptlrpc_request *early_req;
time_t olddl;
kmem_cache_destroy(request_cache);
}
-struct ptlrpc_request *ptlrpc_request_cache_alloc(int flags)
+struct ptlrpc_request *ptlrpc_request_cache_alloc(gfp_t flags)
{
struct ptlrpc_request *req;
D_HA : D_ERROR, req, "IMP_CLOSED ");
*status = -EIO;
} else if (ptlrpc_send_limit_expired(req)) {
- /* probably doesn't need to be a D_ERROR after initial testing */
- DEBUG_REQ(D_ERROR, req, "send limit expired ");
- *status = -EIO;
+ /* probably doesn't need to be a D_ERROR after initial testing*/
+ DEBUG_REQ(D_HA, req, "send limit expired ");
+ *status = -ETIMEDOUT;
} else if (req->rq_send_state == LUSTRE_IMP_CONNECTING &&
imp->imp_state == LUSTRE_IMP_CONNECTING) {
/* allow CONNECT even if import is invalid */ ;
time_t now = cfs_time_current_sec();
DEBUG_REQ(D_RPCTRACE, req, "Resending request on EINPROGRESS");
+ spin_lock(&req->rq_lock);
req->rq_resend = 1;
+ spin_unlock(&req->rq_lock);
req->rq_nr_resend++;
/* allocate new xid to avoid reply reconstruction */
int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
{
struct list_head *tmp, *next;
- int force_timer_recalc = 0;
- ENTRY;
+ struct list_head comp_reqs;
+ int force_timer_recalc = 0;
+ ENTRY;
if (atomic_read(&set->set_remaining) == 0)
- RETURN(1);
+ RETURN(1);
+ INIT_LIST_HEAD(&comp_reqs);
list_for_each_safe(tmp, next, &set->set_requests) {
struct ptlrpc_request *req =
list_entry(tmp, struct ptlrpc_request,
ptlrpc_rqphase_move(req, req->rq_next_phase);
}
- if (req->rq_phase == RQ_PHASE_COMPLETE)
+ if (req->rq_phase == RQ_PHASE_COMPLETE) {
+ list_move_tail(&req->rq_set_chain, &comp_reqs);
continue;
+ }
if (req->rq_phase == RQ_PHASE_INTERPRET)
GOTO(interpret, req->rq_status);
if (req->rq_status != 0)
set->set_rc = req->rq_status;
ptlrpc_req_finished(req);
+ } else {
+ list_move_tail(&req->rq_set_chain, &comp_reqs);
}
}
+ /* move completed request at the head of list so it's easier for
+ * caller to find them */
+ list_splice(&comp_reqs, &set->set_requests);
+
/* If we hit an error, we want to recover promptly. */
RETURN(atomic_read(&set->set_remaining) == 0 || force_timer_recalc);
}
* unlinked before returning a req to the pool.
*/
for (;;) {
-#ifdef __KERNEL__
/* The wq argument is ignored by user-space wait_event macros */
wait_queue_head_t *wq = (request->rq_set != NULL) ?
&request->rq_set->set_waitq :
&request->rq_reply_waitq;
-#endif
/* Network access will complete in finite time but the HUGE
* timeout lets us CWARN for visibility of sluggish NALs */
lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),