init_waitqueue_head(&request->rq_reply_waitq);
init_waitqueue_head(&request->rq_set_waitq);
request->rq_xid = ptlrpc_next_xid();
- cfs_atomic_set(&request->rq_refcount, 1);
+ atomic_set(&request->rq_refcount, 1);
lustre_msg_set_opc(request->rq_reqmsg, opcode);
OBD_ALLOC(set, sizeof *set);
if (!set)
RETURN(NULL);
- cfs_atomic_set(&set->set_refcount, 1);
+ atomic_set(&set->set_refcount, 1);
CFS_INIT_LIST_HEAD(&set->set_requests);
init_waitqueue_head(&set->set_waitq);
- cfs_atomic_set(&set->set_new_count, 0);
- cfs_atomic_set(&set->set_remaining, 0);
+ atomic_set(&set->set_new_count, 0);
+ atomic_set(&set->set_remaining, 0);
spin_lock_init(&set->set_new_req_lock);
CFS_INIT_LIST_HEAD(&set->set_new_requests);
CFS_INIT_LIST_HEAD(&set->set_cblist);
ENTRY;
/* Requests on the set should either all be completed, or all be new */
- expected_phase = (cfs_atomic_read(&set->set_remaining) == 0) ?
+ expected_phase = (atomic_read(&set->set_remaining) == 0) ?
RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
cfs_list_for_each (tmp, &set->set_requests) {
struct ptlrpc_request *req =
n++;
}
- LASSERTF(cfs_atomic_read(&set->set_remaining) == 0 ||
- cfs_atomic_read(&set->set_remaining) == n, "%d / %d\n",
- cfs_atomic_read(&set->set_remaining), n);
+ LASSERTF(atomic_read(&set->set_remaining) == 0 ||
+ atomic_read(&set->set_remaining) == n, "%d / %d\n",
+ atomic_read(&set->set_remaining), n);
cfs_list_for_each_safe(tmp, next, &set->set_requests) {
struct ptlrpc_request *req =
rq_set_chain);
cfs_list_del_init(&req->rq_set_chain);
- LASSERT(req->rq_phase == expected_phase);
+ LASSERT(req->rq_phase == expected_phase);
- if (req->rq_phase == RQ_PHASE_NEW) {
- ptlrpc_req_interpret(NULL, req, -EBADR);
- cfs_atomic_dec(&set->set_remaining);
- }
+ if (req->rq_phase == RQ_PHASE_NEW) {
+ ptlrpc_req_interpret(NULL, req, -EBADR);
+ atomic_dec(&set->set_remaining);
+ }
spin_lock(&req->rq_lock);
req->rq_set = NULL;
ptlrpc_req_finished (req);
}
- LASSERT(cfs_atomic_read(&set->set_remaining) == 0);
+ LASSERT(atomic_read(&set->set_remaining) == 0);
- ptlrpc_reqset_put(set);
- EXIT;
+ ptlrpc_reqset_put(set);
+ EXIT;
}
EXPORT_SYMBOL(ptlrpc_set_destroy);
/* The set takes over the caller's request reference */
cfs_list_add_tail(&req->rq_set_chain, &set->set_requests);
req->rq_set = set;
- cfs_atomic_inc(&set->set_remaining);
+ atomic_inc(&set->set_remaining);
req->rq_queued_time = cfs_time_current();
if (req->rq_reqmsg != NULL)
req->rq_set = set;
req->rq_queued_time = cfs_time_current();
cfs_list_add_tail(&req->rq_set_chain, &set->set_new_requests);
- count = cfs_atomic_inc_return(&set->set_new_count);
+ count = atomic_inc_return(&set->set_new_count);
spin_unlock(&set->set_new_req_lock);
/* Only need to call wakeup once for the first entry. */
/* probably doesn't need to be a D_ERROR after initial testing */
DEBUG_REQ(D_ERROR, req, "send limit expired ");
*status = -EIO;
- } else if (req->rq_send_state == LUSTRE_IMP_CONNECTING &&
- imp->imp_state == LUSTRE_IMP_CONNECTING) {
- /* allow CONNECT even if import is invalid */ ;
- if (cfs_atomic_read(&imp->imp_inval_count) != 0) {
- DEBUG_REQ(D_ERROR, req, "invalidate in flight");
- *status = -EIO;
- }
+ } else if (req->rq_send_state == LUSTRE_IMP_CONNECTING &&
+ imp->imp_state == LUSTRE_IMP_CONNECTING) {
+ /* allow CONNECT even if import is invalid */ ;
+ if (atomic_read(&imp->imp_inval_count) != 0) {
+ DEBUG_REQ(D_ERROR, req, "invalidate in flight");
+ *status = -EIO;
+ }
} else if (imp->imp_invalid || imp->imp_obd->obd_no_recov) {
if (!imp->imp_deactive)
DEBUG_REQ(D_NET, req, "IMP_INVALID");
*status = -EIO;
} else if (req->rq_send_state != imp->imp_state) {
/* invalidate in progress - any requests should be drop */
- if (cfs_atomic_read(&imp->imp_inval_count) != 0) {
+ if (atomic_read(&imp->imp_inval_count) != 0) {
DEBUG_REQ(D_ERROR, req, "invalidate in flight");
*status = -EIO;
} else if (imp->imp_dlm_fake || req->rq_no_delay) {
ptlrpc_import_state_name(imp->imp_state));
LASSERT(cfs_list_empty(&req->rq_list));
cfs_list_add_tail(&req->rq_list, &imp->imp_delayed_list);
- cfs_atomic_inc(&req->rq_import->imp_inflight);
+ atomic_inc(&req->rq_import->imp_inflight);
spin_unlock(&imp->imp_lock);
RETURN(0);
}
LASSERT(cfs_list_empty(&req->rq_list));
cfs_list_add_tail(&req->rq_list, &imp->imp_sending_list);
- cfs_atomic_inc(&req->rq_import->imp_inflight);
+ atomic_inc(&req->rq_import->imp_inflight);
spin_unlock(&imp->imp_lock);
lustre_msg_set_status(req->rq_reqmsg, current_pid());
LASSERT(set->set_producer != NULL);
- remaining = cfs_atomic_read(&set->set_remaining);
+ remaining = atomic_read(&set->set_remaining);
/* populate the ->set_requests list with requests until we
* reach the maximum number of RPCs in flight for this set */
- while (cfs_atomic_read(&set->set_remaining) < set->set_max_inflight) {
+ while (atomic_read(&set->set_remaining) < set->set_max_inflight) {
rc = set->set_producer(set, set->set_producer_arg);
if (rc == -ENOENT) {
/* no more RPC to produce */
}
}
- RETURN((cfs_atomic_read(&set->set_remaining) - remaining));
+ RETURN((atomic_read(&set->set_remaining) - remaining));
}
/**
int force_timer_recalc = 0;
ENTRY;
- if (cfs_atomic_read(&set->set_remaining) == 0)
+ if (atomic_read(&set->set_remaining) == 0)
RETURN(1);
cfs_list_for_each_safe(tmp, next, &set->set_requests) {
* allow sending this rpc and returns *status != 0. */
if (!cfs_list_empty(&req->rq_list)) {
cfs_list_del_init(&req->rq_list);
- cfs_atomic_dec(&imp->imp_inflight);
+ atomic_dec(&imp->imp_inflight);
}
spin_unlock(&imp->imp_lock);
- cfs_atomic_dec(&set->set_remaining);
+ atomic_dec(&set->set_remaining);
wake_up_all(&imp->imp_recovery_waitq);
if (set->set_producer) {
set->set_rc = req->rq_status;
ptlrpc_req_finished(req);
}
- }
+ }
- /* If we hit an error, we want to recover promptly. */
- RETURN(cfs_atomic_read(&set->set_remaining) == 0 || force_timer_recalc);
+ /* If we hit an error, we want to recover promptly. */
+ RETURN(atomic_read(&set->set_remaining) == 0 || force_timer_recalc);
}
EXPORT_SYMBOL(ptlrpc_check_set);
RETURN(1);
}
- cfs_atomic_inc(&imp->imp_timeouts);
+ atomic_inc(&imp->imp_timeouts);
/* The DLM server doesn't want recovery run on its imports. */
if (imp->imp_dlm_fake)
* EINTR.
* I don't really care if we go once more round the loop in
* the error cases -eeb. */
- if (rc == 0 && cfs_atomic_read(&set->set_remaining) == 0) {
+ if (rc == 0 && atomic_read(&set->set_remaining) == 0) {
cfs_list_for_each(tmp, &set->set_requests) {
req = cfs_list_entry(tmp, struct ptlrpc_request,
rq_set_chain);
spin_lock(&req->rq_lock);
req->rq_invalid_rqset = 1;
spin_unlock(&req->rq_lock);
- }
- }
- } while (rc != 0 || cfs_atomic_read(&set->set_remaining) != 0);
+ }
+ }
+ } while (rc != 0 || atomic_read(&set->set_remaining) != 0);
- LASSERT(cfs_atomic_read(&set->set_remaining) == 0);
+ LASSERT(atomic_read(&set->set_remaining) == 0);
rc = set->set_rc; /* rq_status of already freed requests if any */
cfs_list_for_each(tmp, &set->set_requests) {
}
LASSERTF(cfs_list_empty(&request->rq_replay_list), "req %p\n", request);
- if (cfs_atomic_read(&request->rq_refcount) != 0) {
- DEBUG_REQ(D_ERROR, request,
- "freeing request with nonzero refcount");
- LBUG();
- }
+ if (atomic_read(&request->rq_refcount) != 0) {
+ DEBUG_REQ(D_ERROR, request,
+ "freeing request with nonzero refcount");
+ LBUG();
+ }
if (request->rq_repbuf != NULL)
sptlrpc_cli_free_repbuf(request);
}
DEBUG_REQ(D_INFO, request, "refcount now %u",
- cfs_atomic_read(&request->rq_refcount) - 1);
+ atomic_read(&request->rq_refcount) - 1);
- if (cfs_atomic_dec_and_test(&request->rq_refcount)) {
+ if (atomic_dec_and_test(&request->rq_refcount)) {
__ptlrpc_free_req(request, locked);
RETURN(1);
}
*/
struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req)
{
- ENTRY;
- cfs_atomic_inc(&req->rq_refcount);
- RETURN(req);
+ ENTRY;
+ atomic_inc(&req->rq_refcount);
+ RETURN(req);
}
EXPORT_SYMBOL(ptlrpc_request_addref);
* In case of error restart replay process.
*/
static int ptlrpc_replay_interpret(const struct lu_env *env,
- struct ptlrpc_request *req,
- void * data, int rc)
+ struct ptlrpc_request *req,
+ void * data, int rc)
{
- struct ptlrpc_replay_async_args *aa = data;
- struct obd_import *imp = req->rq_import;
+ struct ptlrpc_replay_async_args *aa = data;
+ struct obd_import *imp = req->rq_import;
- ENTRY;
- cfs_atomic_dec(&imp->imp_replay_inflight);
+ ENTRY;
+ atomic_dec(&imp->imp_replay_inflight);
if (!ptlrpc_client_replied(req)) {
CERROR("request replay timed out, restarting recovery\n");
ptlrpc_at_get_net_latency(req));
DEBUG_REQ(D_HA, req, "REPLAY");
- cfs_atomic_inc(&req->rq_import->imp_replay_inflight);
- ptlrpc_request_addref(req); /* ptlrpcd needs a ref */
+ atomic_inc(&req->rq_import->imp_replay_inflight);
+ ptlrpc_request_addref(req); /* ptlrpcd needs a ref */
- ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
- RETURN(0);
+ ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
+ RETURN(0);
}
EXPORT_SYMBOL(ptlrpc_replay_req);