if (!desc)
return NULL;
- cfs_spin_lock_init(&desc->bd_lock);
+ spin_lock_init(&desc->bd_lock);
cfs_waitq_init(&desc->bd_waitq);
desc->bd_max_iov = npages;
desc->bd_iov_count = 0;
ENTRY;
req->rq_early = 0;
- cfs_spin_unlock(&req->rq_lock);
+ spin_unlock(&req->rq_lock);
- rc = sptlrpc_cli_unwrap_early_reply(req, &early_req);
- if (rc) {
- cfs_spin_lock(&req->rq_lock);
+ rc = sptlrpc_cli_unwrap_early_reply(req, &early_req);
+ if (rc) {
+ spin_lock(&req->rq_lock);
RETURN(rc);
}
sptlrpc_cli_finish_early_reply(early_req);
- cfs_spin_lock(&req->rq_lock);
+ spin_lock(&req->rq_lock);
if (rc == 0) {
/* Adjust the local timeout for this req */
*/
void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool)
{
- cfs_list_t *l, *tmp;
- struct ptlrpc_request *req;
+ cfs_list_t *l, *tmp;
+ struct ptlrpc_request *req;
- LASSERT(pool != NULL);
+ LASSERT(pool != NULL);
- cfs_spin_lock(&pool->prp_lock);
- cfs_list_for_each_safe(l, tmp, &pool->prp_req_list) {
- req = cfs_list_entry(l, struct ptlrpc_request, rq_list);
- cfs_list_del(&req->rq_list);
- LASSERT(req->rq_reqbuf);
- LASSERT(req->rq_reqbuf_len == pool->prp_rq_size);
- OBD_FREE_LARGE(req->rq_reqbuf, pool->prp_rq_size);
- OBD_FREE(req, sizeof(*req));
- }
- cfs_spin_unlock(&pool->prp_lock);
- OBD_FREE(pool, sizeof(*pool));
+ spin_lock(&pool->prp_lock);
+ cfs_list_for_each_safe(l, tmp, &pool->prp_req_list) {
+ req = cfs_list_entry(l, struct ptlrpc_request, rq_list);
+ cfs_list_del(&req->rq_list);
+ LASSERT(req->rq_reqbuf);
+ LASSERT(req->rq_reqbuf_len == pool->prp_rq_size);
+ OBD_FREE_LARGE(req->rq_reqbuf, pool->prp_rq_size);
+ OBD_FREE(req, sizeof(*req));
+ }
+ spin_unlock(&pool->prp_lock);
+ OBD_FREE(pool, sizeof(*pool));
}
EXPORT_SYMBOL(ptlrpc_free_rq_pool);
"Trying to change pool size with nonempty pool "
"from %d to %d bytes\n", pool->prp_rq_size, size);
- cfs_spin_lock(&pool->prp_lock);
- pool->prp_rq_size = size;
- for (i = 0; i < num_rq; i++) {
- struct ptlrpc_request *req;
- struct lustre_msg *msg;
+ spin_lock(&pool->prp_lock);
+ pool->prp_rq_size = size;
+ for (i = 0; i < num_rq; i++) {
+ struct ptlrpc_request *req;
+ struct lustre_msg *msg;
- cfs_spin_unlock(&pool->prp_lock);
+ spin_unlock(&pool->prp_lock);
OBD_ALLOC(req, sizeof(struct ptlrpc_request));
if (!req)
return;
req->rq_reqbuf = msg;
req->rq_reqbuf_len = size;
req->rq_pool = pool;
- cfs_spin_lock(&pool->prp_lock);
- cfs_list_add_tail(&req->rq_list, &pool->prp_req_list);
- }
- cfs_spin_unlock(&pool->prp_lock);
- return;
+ spin_lock(&pool->prp_lock);
+ cfs_list_add_tail(&req->rq_list, &pool->prp_req_list);
+ }
+ spin_unlock(&pool->prp_lock);
+ return;
}
EXPORT_SYMBOL(ptlrpc_add_rqs_to_pool);
/* Request next power of two for the allocation, because internally
kernel would do exactly this */
- cfs_spin_lock_init(&pool->prp_lock);
+ spin_lock_init(&pool->prp_lock);
CFS_INIT_LIST_HEAD(&pool->prp_req_list);
pool->prp_rq_size = msgsize + SPTLRPC_MAX_PAYLOAD;
pool->prp_populate = populate_pool;
if (!pool)
return NULL;
- cfs_spin_lock(&pool->prp_lock);
+ spin_lock(&pool->prp_lock);
- /* See if we have anything in a pool, and bail out if nothing,
- * in writeout path, where this matters, this is safe to do, because
- * nothing is lost in this case, and when some in-flight requests
- * complete, this code will be called again. */
- if (unlikely(cfs_list_empty(&pool->prp_req_list))) {
- cfs_spin_unlock(&pool->prp_lock);
- return NULL;
- }
+ /* See if we have anything in a pool, and bail out if nothing,
+ * in writeout path, where this matters, this is safe to do, because
+ * nothing is lost in this case, and when some in-flight requests
+ * complete, this code will be called again. */
+ if (unlikely(cfs_list_empty(&pool->prp_req_list))) {
+ spin_unlock(&pool->prp_lock);
+ return NULL;
+ }
- request = cfs_list_entry(pool->prp_req_list.next, struct ptlrpc_request,
- rq_list);
- cfs_list_del_init(&request->rq_list);
- cfs_spin_unlock(&pool->prp_lock);
+ request = cfs_list_entry(pool->prp_req_list.next, struct ptlrpc_request,
+ rq_list);
+ cfs_list_del_init(&request->rq_list);
+ spin_unlock(&pool->prp_lock);
LASSERT(request->rq_reqbuf);
LASSERT(request->rq_pool);
*/
static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request)
{
- struct ptlrpc_request_pool *pool = request->rq_pool;
+ struct ptlrpc_request_pool *pool = request->rq_pool;
- cfs_spin_lock(&pool->prp_lock);
- LASSERT(cfs_list_empty(&request->rq_list));
- LASSERT(!request->rq_receiving_reply);
- cfs_list_add_tail(&request->rq_list, &pool->prp_req_list);
- cfs_spin_unlock(&pool->prp_lock);
+ spin_lock(&pool->prp_lock);
+ LASSERT(cfs_list_empty(&request->rq_list));
+ LASSERT(!request->rq_receiving_reply);
+ cfs_list_add_tail(&request->rq_list, &pool->prp_req_list);
+ spin_unlock(&pool->prp_lock);
}
static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
ptlrpc_at_set_req_timeout(request);
- cfs_spin_lock_init(&request->rq_lock);
+ spin_lock_init(&request->rq_lock);
CFS_INIT_LIST_HEAD(&request->rq_list);
CFS_INIT_LIST_HEAD(&request->rq_timed_list);
CFS_INIT_LIST_HEAD(&request->rq_replay_list);
cfs_waitq_init(&set->set_waitq);
cfs_atomic_set(&set->set_new_count, 0);
cfs_atomic_set(&set->set_remaining, 0);
- cfs_spin_lock_init(&set->set_new_req_lock);
+ spin_lock_init(&set->set_new_req_lock);
CFS_INIT_LIST_HEAD(&set->set_new_requests);
CFS_INIT_LIST_HEAD(&set->set_cblist);
set->set_max_inflight = UINT_MAX;
cfs_atomic_dec(&set->set_remaining);
}
- cfs_spin_lock(&req->rq_lock);
- req->rq_set = NULL;
- req->rq_invalid_rqset = 0;
- cfs_spin_unlock(&req->rq_lock);
+ spin_lock(&req->rq_lock);
+ req->rq_set = NULL;
+ req->rq_invalid_rqset = 0;
+ spin_unlock(&req->rq_lock);
ptlrpc_req_finished (req);
}
int count, i;
LASSERT(req->rq_set == NULL);
- LASSERT(cfs_test_bit(LIOD_STOP, &pc->pc_flags) == 0);
+ LASSERT(test_bit(LIOD_STOP, &pc->pc_flags) == 0);
- cfs_spin_lock(&set->set_new_req_lock);
- /*
- * The set takes over the caller's request reference.
- */
- req->rq_set = set;
- req->rq_queued_time = cfs_time_current();
- cfs_list_add_tail(&req->rq_set_chain, &set->set_new_requests);
- count = cfs_atomic_inc_return(&set->set_new_count);
- cfs_spin_unlock(&set->set_new_req_lock);
+ spin_lock(&set->set_new_req_lock);
+ /*
+ * The set takes over the caller's request reference.
+ */
+ req->rq_set = set;
+ req->rq_queued_time = cfs_time_current();
+ cfs_list_add_tail(&req->rq_set_chain, &set->set_new_requests);
+ count = cfs_atomic_inc_return(&set->set_new_count);
+ spin_unlock(&set->set_new_req_lock);
/* Only need to call wakeup once for the first entry. */
if (count == 1) {
}
if (imp->imp_replayable) {
- cfs_spin_lock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
/*
* No point in adding already-committed requests to the replay
* list, we will just remove them immediately. b=9829
ptlrpc_save_versions(req);
ptlrpc_retain_replayable_request(req, imp);
} else if (req->rq_commit_cb != NULL) {
- cfs_spin_unlock(&imp->imp_lock);
- req->rq_commit_cb(req);
- cfs_spin_lock(&imp->imp_lock);
+ spin_unlock(&imp->imp_lock);
+ req->rq_commit_cb(req);
+ spin_lock(&imp->imp_lock);
}
/*
if (req->rq_transno > imp->imp_peer_committed_transno)
ptlrpc_pinger_commit_expected(imp);
- cfs_spin_unlock(&imp->imp_lock);
- }
+ spin_unlock(&imp->imp_lock);
+ }
- RETURN(rc);
+ RETURN(rc);
}
/**
ptlrpc_rqphase_move(req, RQ_PHASE_RPC);
- cfs_spin_lock(&imp->imp_lock);
-
- if (!req->rq_generation_set)
- req->rq_import_generation = imp->imp_generation;
-
- if (ptlrpc_import_delay_req(imp, req, &rc)) {
- cfs_spin_lock(&req->rq_lock);
- req->rq_waiting = 1;
- cfs_spin_unlock(&req->rq_lock);
-
- DEBUG_REQ(D_HA, req, "req from PID %d waiting for recovery: "
- "(%s != %s)", lustre_msg_get_status(req->rq_reqmsg),
- ptlrpc_import_state_name(req->rq_send_state),
- ptlrpc_import_state_name(imp->imp_state));
- LASSERT(cfs_list_empty(&req->rq_list));
- cfs_list_add_tail(&req->rq_list, &imp->imp_delayed_list);
- cfs_atomic_inc(&req->rq_import->imp_inflight);
- cfs_spin_unlock(&imp->imp_lock);
- RETURN(0);
- }
+ spin_lock(&imp->imp_lock);
+
+ if (!req->rq_generation_set)
+ req->rq_import_generation = imp->imp_generation;
+
+ if (ptlrpc_import_delay_req(imp, req, &rc)) {
+ spin_lock(&req->rq_lock);
+ req->rq_waiting = 1;
+ spin_unlock(&req->rq_lock);
+
+ DEBUG_REQ(D_HA, req, "req from PID %d waiting for recovery: "
+ "(%s != %s)", lustre_msg_get_status(req->rq_reqmsg),
+ ptlrpc_import_state_name(req->rq_send_state),
+ ptlrpc_import_state_name(imp->imp_state));
+ LASSERT(cfs_list_empty(&req->rq_list));
+ cfs_list_add_tail(&req->rq_list, &imp->imp_delayed_list);
+ cfs_atomic_inc(&req->rq_import->imp_inflight);
+ spin_unlock(&imp->imp_lock);
+ RETURN(0);
+ }
- if (rc != 0) {
- cfs_spin_unlock(&imp->imp_lock);
- req->rq_status = rc;
- ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
- RETURN(rc);
- }
+ if (rc != 0) {
+ spin_unlock(&imp->imp_lock);
+ req->rq_status = rc;
+ ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
+ RETURN(rc);
+ }
- LASSERT(cfs_list_empty(&req->rq_list));
- cfs_list_add_tail(&req->rq_list, &imp->imp_sending_list);
- cfs_atomic_inc(&req->rq_import->imp_inflight);
- cfs_spin_unlock(&imp->imp_lock);
+ LASSERT(cfs_list_empty(&req->rq_list));
+ cfs_list_add_tail(&req->rq_list, &imp->imp_sending_list);
+ cfs_atomic_inc(&req->rq_import->imp_inflight);
+ spin_unlock(&imp->imp_lock);
lustre_msg_set_status(req->rq_reqmsg, cfs_curproc_pid());
}
if (req->rq_err) {
- cfs_spin_lock(&req->rq_lock);
- req->rq_replied = 0;
- cfs_spin_unlock(&req->rq_lock);
+ spin_lock(&req->rq_lock);
+ req->rq_replied = 0;
+ spin_unlock(&req->rq_lock);
if (req->rq_status == 0)
req->rq_status = -EIO;
ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
if (!ptlrpc_unregister_reply(req, 1))
continue;
- cfs_spin_lock(&imp->imp_lock);
- if (ptlrpc_import_delay_req(imp, req, &status)){
- /* put on delay list - only if we wait
- * recovery finished - before send */
- cfs_list_del_init(&req->rq_list);
- cfs_list_add_tail(&req->rq_list,
- &imp-> \
- imp_delayed_list);
- cfs_spin_unlock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
+ if (ptlrpc_import_delay_req(imp, req, &status)){
+ /* put on delay list - only if we wait
+ * recovery finished - before send */
+ cfs_list_del_init(&req->rq_list);
+ cfs_list_add_tail(&req->rq_list,
+ &imp->
+ imp_delayed_list);
+ spin_unlock(&imp->imp_lock);
continue;
}
req->rq_status = status;
ptlrpc_rqphase_move(req,
RQ_PHASE_INTERPRET);
- cfs_spin_unlock(&imp->imp_lock);
- GOTO(interpret, req->rq_status);
- }
- if (ptlrpc_no_resend(req) && !req->rq_wait_ctx) {
- req->rq_status = -ENOTCONN;
- ptlrpc_rqphase_move(req,
- RQ_PHASE_INTERPRET);
- cfs_spin_unlock(&imp->imp_lock);
- GOTO(interpret, req->rq_status);
- }
-
- cfs_list_del_init(&req->rq_list);
- cfs_list_add_tail(&req->rq_list,
- &imp->imp_sending_list);
-
- cfs_spin_unlock(&imp->imp_lock);
-
- cfs_spin_lock(&req->rq_lock);
- req->rq_waiting = 0;
- cfs_spin_unlock(&req->rq_lock);
-
- if (req->rq_timedout || req->rq_resend) {
- /* This is re-sending anyways,
- * let's mark req as resend. */
- cfs_spin_lock(&req->rq_lock);
- req->rq_resend = 1;
- cfs_spin_unlock(&req->rq_lock);
+ spin_unlock(&imp->imp_lock);
+ GOTO(interpret, req->rq_status);
+ }
+ if (ptlrpc_no_resend(req) &&
+ !req->rq_wait_ctx) {
+ req->rq_status = -ENOTCONN;
+ ptlrpc_rqphase_move(req,
+ RQ_PHASE_INTERPRET);
+ spin_unlock(&imp->imp_lock);
+ GOTO(interpret, req->rq_status);
+ }
+
+ cfs_list_del_init(&req->rq_list);
+ cfs_list_add_tail(&req->rq_list,
+ &imp->imp_sending_list);
+
+ spin_unlock(&imp->imp_lock);
+
+ spin_lock(&req->rq_lock);
+ req->rq_waiting = 0;
+ spin_unlock(&req->rq_lock);
+
+ if (req->rq_timedout || req->rq_resend) {
+ /* This is re-sending anyways,
+ * let's mark req as resend. */
+ spin_lock(&req->rq_lock);
+ req->rq_resend = 1;
+ spin_unlock(&req->rq_lock);
if (req->rq_bulk) {
__u64 old_xid;
if (status) {
if (req->rq_err) {
req->rq_status = status;
- cfs_spin_lock(&req->rq_lock);
- req->rq_wait_ctx = 0;
- cfs_spin_unlock(&req->rq_lock);
- force_timer_recalc = 1;
- } else {
- cfs_spin_lock(&req->rq_lock);
- req->rq_wait_ctx = 1;
- cfs_spin_unlock(&req->rq_lock);
- }
-
- continue;
- } else {
- cfs_spin_lock(&req->rq_lock);
- req->rq_wait_ctx = 0;
- cfs_spin_unlock(&req->rq_lock);
- }
+ spin_lock(&req->rq_lock);
+ req->rq_wait_ctx = 0;
+ spin_unlock(&req->rq_lock);
+ force_timer_recalc = 1;
+ } else {
+ spin_lock(&req->rq_lock);
+ req->rq_wait_ctx = 1;
+ spin_unlock(&req->rq_lock);
+ }
+
+ continue;
+ } else {
+ spin_lock(&req->rq_lock);
+ req->rq_wait_ctx = 0;
+ spin_unlock(&req->rq_lock);
+ }
+
+ rc = ptl_send_rpc(req, 0);
+ if (rc) {
+ DEBUG_REQ(D_HA, req,
+ "send failed: rc = %d", rc);
+ force_timer_recalc = 1;
+ spin_lock(&req->rq_lock);
+ req->rq_net_err = 1;
+ spin_unlock(&req->rq_lock);
+ }
+ /* need to reset the timeout */
+ force_timer_recalc = 1;
+ }
- rc = ptl_send_rpc(req, 0);
- if (rc) {
- DEBUG_REQ(D_HA, req, "send failed (%d)",
- rc);
- force_timer_recalc = 1;
- cfs_spin_lock(&req->rq_lock);
- req->rq_net_err = 1;
- cfs_spin_unlock(&req->rq_lock);
- }
- /* need to reset the timeout */
- force_timer_recalc = 1;
- }
+ spin_lock(&req->rq_lock);
- cfs_spin_lock(&req->rq_lock);
+ if (ptlrpc_client_early(req)) {
+ ptlrpc_at_recv_early_reply(req);
+ spin_unlock(&req->rq_lock);
+ continue;
+ }
- if (ptlrpc_client_early(req)) {
- ptlrpc_at_recv_early_reply(req);
- cfs_spin_unlock(&req->rq_lock);
- continue;
- }
+ /* Still waiting for a reply? */
+ if (ptlrpc_client_recv(req)) {
+ spin_unlock(&req->rq_lock);
+ continue;
+ }
- /* Still waiting for a reply? */
- if (ptlrpc_client_recv(req)) {
- cfs_spin_unlock(&req->rq_lock);
- continue;
- }
+ /* Did we actually receive a reply? */
+ if (!ptlrpc_client_replied(req)) {
+ spin_unlock(&req->rq_lock);
+ continue;
+ }
- /* Did we actually receive a reply? */
- if (!ptlrpc_client_replied(req)) {
- cfs_spin_unlock(&req->rq_lock);
- continue;
- }
-
- cfs_spin_unlock(&req->rq_lock);
+ spin_unlock(&req->rq_lock);
/* unlink from net because we are going to
* swab in-place of reply buffer */
libcfs_nid2str(imp->imp_connection->c_peer.nid),
lustre_msg_get_opc(req->rq_reqmsg));
- cfs_spin_lock(&imp->imp_lock);
- /* Request already may be not on sending or delaying list. This
- * may happen in the case of marking it erroneous for the case
- * ptlrpc_import_delay_req(req, status) find it impossible to
- * allow sending this rpc and returns *status != 0. */
- if (!cfs_list_empty(&req->rq_list)) {
- cfs_list_del_init(&req->rq_list);
- cfs_atomic_dec(&imp->imp_inflight);
- }
- cfs_spin_unlock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
+ /* Request already may be not on sending or delaying list. This
+ * may happen in the case of marking it erroneous for the case
+ * ptlrpc_import_delay_req(req, status) find it impossible to
+ * allow sending this rpc and returns *status != 0. */
+ if (!cfs_list_empty(&req->rq_list)) {
+ cfs_list_del_init(&req->rq_list);
+ cfs_atomic_dec(&imp->imp_inflight);
+ }
+ spin_unlock(&imp->imp_lock);
cfs_atomic_dec(&set->set_remaining);
cfs_waitq_broadcast(&imp->imp_recovery_waitq);
/* free the request that has just been completed
* in order not to pollute set->set_requests */
cfs_list_del_init(&req->rq_set_chain);
- cfs_spin_lock(&req->rq_lock);
+ spin_lock(&req->rq_lock);
req->rq_set = NULL;
req->rq_invalid_rqset = 0;
- cfs_spin_unlock(&req->rq_lock);
+ spin_unlock(&req->rq_lock);
/* record rq_status to compute the final status later */
if (req->rq_status != 0)
*/
int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
{
- struct obd_import *imp = req->rq_import;
- int rc = 0;
- ENTRY;
+ struct obd_import *imp = req->rq_import;
+ int rc = 0;
+ ENTRY;
- cfs_spin_lock(&req->rq_lock);
- req->rq_timedout = 1;
- cfs_spin_unlock(&req->rq_lock);
+ spin_lock(&req->rq_lock);
+ req->rq_timedout = 1;
+ spin_unlock(&req->rq_lock);
DEBUG_REQ(D_WARNING, req, "Request sent has %s: [sent "CFS_DURATION_T
"/real "CFS_DURATION_T"]",
DEBUG_REQ(D_RPCTRACE, req, "err -110, sent_state=%s (now=%s)",
ptlrpc_import_state_name(req->rq_send_state),
ptlrpc_import_state_name(imp->imp_state));
- cfs_spin_lock(&req->rq_lock);
- req->rq_status = -ETIMEDOUT;
- req->rq_err = 1;
- cfs_spin_unlock(&req->rq_lock);
- RETURN(1);
+ spin_lock(&req->rq_lock);
+ req->rq_status = -ETIMEDOUT;
+ req->rq_err = 1;
+ spin_unlock(&req->rq_lock);
+ RETURN(1);
}
/* if a request can't be resent we can't wait for an answer after
*/
void ptlrpc_mark_interrupted(struct ptlrpc_request *req)
{
- cfs_spin_lock(&req->rq_lock);
- req->rq_intr = 1;
- cfs_spin_unlock(&req->rq_lock);
+ spin_lock(&req->rq_lock);
+ req->rq_intr = 1;
+ spin_unlock(&req->rq_lock);
}
EXPORT_SYMBOL(ptlrpc_mark_interrupted);
cfs_list_for_each(tmp, &set->set_requests) {
req = cfs_list_entry(tmp, struct ptlrpc_request,
rq_set_chain);
- cfs_spin_lock(&req->rq_lock);
- req->rq_invalid_rqset = 1;
- cfs_spin_unlock(&req->rq_lock);
+ spin_lock(&req->rq_lock);
+ req->rq_invalid_rqset = 1;
+ spin_unlock(&req->rq_lock);
}
}
} while (rc != 0 || cfs_atomic_read(&set->set_remaining) != 0);
/* We must take it off the imp_replay_list first. Otherwise, we'll set
* request->rq_reqmsg to NULL while osc_close is dereferencing it. */
if (request->rq_import != NULL) {
- if (!locked)
- cfs_spin_lock(&request->rq_import->imp_lock);
- cfs_list_del_init(&request->rq_replay_list);
- if (!locked)
- cfs_spin_unlock(&request->rq_import->imp_lock);
+ if (!locked)
+ spin_lock(&request->rq_import->imp_lock);
+ cfs_list_del_init(&request->rq_replay_list);
+ if (!locked)
+ spin_unlock(&request->rq_import->imp_lock);
}
LASSERTF(cfs_list_empty(&request->rq_replay_list), "req %p\n", request);
DEBUG_REQ(D_INFO, req, "commit (last_committed "LPU64")",
imp->imp_peer_committed_transno);
free_req:
- cfs_spin_lock(&req->rq_lock);
- req->rq_replay = 0;
- cfs_spin_unlock(&req->rq_lock);
+ spin_lock(&req->rq_lock);
+ req->rq_replay = 0;
+ spin_unlock(&req->rq_lock);
if (req->rq_commit_cb != NULL)
req->rq_commit_cb(req);
cfs_list_del_init(&req->rq_replay_list);
lustre_msg_set_handle(req->rq_reqmsg, &(struct lustre_handle){ 0 });
req->rq_status = -EAGAIN;
- cfs_spin_lock(&req->rq_lock);
+ spin_lock(&req->rq_lock);
req->rq_resend = 1;
req->rq_net_err = 0;
req->rq_timedout = 0;
old_xid, req->rq_xid);
}
ptlrpc_client_wake_req(req);
- cfs_spin_unlock(&req->rq_lock);
+ spin_unlock(&req->rq_lock);
}
EXPORT_SYMBOL(ptlrpc_resend_req);
/* XXX: this function and rq_status are currently unused */
void ptlrpc_restart_req(struct ptlrpc_request *req)
{
- DEBUG_REQ(D_HA, req, "restarting (possibly-)completed request");
- req->rq_status = -ERESTARTSYS;
+ DEBUG_REQ(D_HA, req, "restarting (possibly-)completed request");
+ req->rq_status = -ERESTARTSYS;
- cfs_spin_lock(&req->rq_lock);
- req->rq_restart = 1;
- req->rq_timedout = 0;
- ptlrpc_client_wake_req(req);
- cfs_spin_unlock(&req->rq_lock);
+ spin_lock(&req->rq_lock);
+ req->rq_restart = 1;
+ req->rq_timedout = 0;
+ ptlrpc_client_wake_req(req);
+ spin_unlock(&req->rq_lock);
}
EXPORT_SYMBOL(ptlrpc_restart_req);
/** VBR: check version failure */
if (lustre_msg_get_status(req->rq_repmsg) == -EOVERFLOW) {
- /** replay was failed due to version mismatch */
- DEBUG_REQ(D_WARNING, req, "Version mismatch during replay\n");
- cfs_spin_lock(&imp->imp_lock);
- imp->imp_vbr_failed = 1;
- imp->imp_no_lock_replay = 1;
- cfs_spin_unlock(&imp->imp_lock);
- lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
+ /** replay was failed due to version mismatch */
+ DEBUG_REQ(D_WARNING, req, "Version mismatch during replay\n");
+ spin_lock(&imp->imp_lock);
+ imp->imp_vbr_failed = 1;
+ imp->imp_no_lock_replay = 1;
+ spin_unlock(&imp->imp_lock);
+ lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
} else {
/** The transno had better not change over replay. */
LASSERTF(lustre_msg_get_transno(req->rq_reqmsg) ==
lustre_msg_get_transno(req->rq_repmsg));
}
- cfs_spin_lock(&imp->imp_lock);
- /** if replays by version then gap was occur on server, no trust to locks */
- if (lustre_msg_get_flags(req->rq_repmsg) & MSG_VERSION_REPLAY)
- imp->imp_no_lock_replay = 1;
- imp->imp_last_replay_transno = lustre_msg_get_transno(req->rq_reqmsg);
- cfs_spin_unlock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
+ /** if replays by version then gap occur on server, no trust to locks */
+ if (lustre_msg_get_flags(req->rq_repmsg) & MSG_VERSION_REPLAY)
+ imp->imp_no_lock_replay = 1;
+ imp->imp_last_replay_transno = lustre_msg_get_transno(req->rq_reqmsg);
+ spin_unlock(&imp->imp_lock);
LASSERT(imp->imp_last_replay_transno);
/* transaction number shouldn't be bigger than the latest replayed */
* ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
* this flag and then putting requests on sending_list or delayed_list.
*/
- cfs_spin_lock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
/* XXX locking? Maybe we should remove each request with the list
* locked? Also, how do we know if the requests on the list are
DEBUG_REQ(D_RPCTRACE, req, "inflight");
- cfs_spin_lock (&req->rq_lock);
- if (req->rq_import_generation < imp->imp_generation) {
- req->rq_err = 1;
+ spin_lock(&req->rq_lock);
+ if (req->rq_import_generation < imp->imp_generation) {
+ req->rq_err = 1;
req->rq_status = -EIO;
- ptlrpc_client_wake_req(req);
- }
- cfs_spin_unlock (&req->rq_lock);
- }
+ ptlrpc_client_wake_req(req);
+ }
+ spin_unlock(&req->rq_lock);
+ }
- cfs_list_for_each_safe(tmp, n, &imp->imp_delayed_list) {
- struct ptlrpc_request *req =
- cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
+ cfs_list_for_each_safe(tmp, n, &imp->imp_delayed_list) {
+ struct ptlrpc_request *req =
+ cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
- DEBUG_REQ(D_RPCTRACE, req, "aborting waiting req");
+ DEBUG_REQ(D_RPCTRACE, req, "aborting waiting req");
- cfs_spin_lock (&req->rq_lock);
- if (req->rq_import_generation < imp->imp_generation) {
- req->rq_err = 1;
+ spin_lock(&req->rq_lock);
+ if (req->rq_import_generation < imp->imp_generation) {
+ req->rq_err = 1;
req->rq_status = -EIO;
- ptlrpc_client_wake_req(req);
- }
- cfs_spin_unlock (&req->rq_lock);
- }
+ ptlrpc_client_wake_req(req);
+ }
+ spin_unlock(&req->rq_lock);
+ }
- /* Last chance to free reqs left on the replay list, but we
- * will still leak reqs that haven't committed. */
- if (imp->imp_replayable)
- ptlrpc_free_committed(imp);
+ /* Last chance to free reqs left on the replay list, but we
+ * will still leak reqs that haven't committed. */
+ if (imp->imp_replayable)
+ ptlrpc_free_committed(imp);
- cfs_spin_unlock(&imp->imp_lock);
+ spin_unlock(&imp->imp_lock);
- EXIT;
+ EXIT;
}
EXPORT_SYMBOL(ptlrpc_abort_inflight);
cfs_list_entry(pos, struct ptlrpc_request,
rq_set_chain);
- cfs_spin_lock(&req->rq_lock);
- if (req->rq_phase != RQ_PHASE_RPC) {
- cfs_spin_unlock(&req->rq_lock);
- continue;
- }
+ spin_lock(&req->rq_lock);
+ if (req->rq_phase != RQ_PHASE_RPC) {
+ spin_unlock(&req->rq_lock);
+ continue;
+ }
- req->rq_err = 1;
- req->rq_status = -EINTR;
- ptlrpc_client_wake_req(req);
- cfs_spin_unlock(&req->rq_lock);
- }
+ req->rq_err = 1;
+ req->rq_status = -EINTR;
+ ptlrpc_client_wake_req(req);
+ spin_unlock(&req->rq_lock);
+ }
}
static __u64 ptlrpc_last_xid;
-static cfs_spinlock_t ptlrpc_last_xid_lock;
+static spinlock_t ptlrpc_last_xid_lock;
/**
* Initialize the XID for the node. This is common among all requests on
{
time_t now = cfs_time_current_sec();
- cfs_spin_lock_init(&ptlrpc_last_xid_lock);
+ spin_lock_init(&ptlrpc_last_xid_lock);
if (now < YEAR_2004) {
cfs_get_random_bytes(&ptlrpc_last_xid, sizeof(ptlrpc_last_xid));
ptlrpc_last_xid >>= 2;
*/
__u64 ptlrpc_next_xid(void)
{
- __u64 tmp;
- cfs_spin_lock(&ptlrpc_last_xid_lock);
- tmp = ++ptlrpc_last_xid;
- cfs_spin_unlock(&ptlrpc_last_xid_lock);
- return tmp;
+ __u64 tmp;
+ spin_lock(&ptlrpc_last_xid_lock);
+ tmp = ++ptlrpc_last_xid;
+ spin_unlock(&ptlrpc_last_xid_lock);
+ return tmp;
}
EXPORT_SYMBOL(ptlrpc_next_xid);
__u64 ptlrpc_sample_next_xid(void)
{
#if BITS_PER_LONG == 32
- /* need to avoid possible word tearing on 32-bit systems */
- __u64 tmp;
- cfs_spin_lock(&ptlrpc_last_xid_lock);
- tmp = ptlrpc_last_xid + 1;
- cfs_spin_unlock(&ptlrpc_last_xid_lock);
- return tmp;
+ /* need to avoid possible word tearing on 32-bit systems */
+ __u64 tmp;
+ spin_lock(&ptlrpc_last_xid_lock);
+ tmp = ptlrpc_last_xid + 1;
+ spin_unlock(&ptlrpc_last_xid_lock);
+ return tmp;
#else
- /* No need to lock, since returned value is racy anyways */
- return ptlrpc_last_xid + 1;
+ /* No need to lock, since returned value is racy anyways */
+ return ptlrpc_last_xid + 1;
#endif
}
EXPORT_SYMBOL(ptlrpc_sample_next_xid);
req->rq_must_unlink = 0;
req->rq_no_delay = req->rq_no_resend = 1;
- cfs_spin_lock_init(&req->rq_lock);
+ spin_lock_init(&req->rq_lock);
CFS_INIT_LIST_HEAD(&req->rq_list);
CFS_INIT_LIST_HEAD(&req->rq_replay_list);
CFS_INIT_LIST_HEAD(&req->rq_set_chain);