struct nrs_crrn_client *cli = cfs_hlist_entry(hnode,
struct nrs_crrn_client,
cc_hnode);
- cfs_atomic_inc(&cli->cc_ref);
+ atomic_inc(&cli->cc_ref);
}
static void nrs_crrn_hop_put(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
struct nrs_crrn_client *cli = cfs_hlist_entry(hnode,
struct nrs_crrn_client,
cc_hnode);
- cfs_atomic_dec(&cli->cc_ref);
+ atomic_dec(&cli->cc_ref);
}
static void nrs_crrn_hop_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
struct nrs_crrn_client *cli = cfs_hlist_entry(hnode,
struct nrs_crrn_client,
cc_hnode);
- LASSERTF(cfs_atomic_read(&cli->cc_ref) == 0,
+ LASSERTF(atomic_read(&cli->cc_ref) == 0,
"Busy CRR-N object from client with NID %s, with %d refs\n",
- libcfs_nid2str(cli->cc_nid), cfs_atomic_read(&cli->cc_ref));
+ libcfs_nid2str(cli->cc_nid), atomic_read(&cli->cc_ref));
OBD_FREE_PTR(cli);
}
* with the default max_rpcs_in_flight value, as we are scheduling over
* NIDs, and there may be more than one mount point per client.
*/
- net->cn_quantum = OSC_MAX_RIF_DEFAULT;
+ net->cn_quantum = OBD_MAX_RIF_DEFAULT;
/**
* Set to 1 so that the test inside nrs_crrn_req_add() can evaluate to
* true.
* \param[in] opc the opcode
* \param[in,out] arg used for passing parameters and information
*
- * \pre spin_is_locked(&policy->pol_nrs->->nrs_lock)
- * \post spin_is_locked(&policy->pol_nrs->->nrs_lock)
+ * \pre assert_spin_locked(&policy->pol_nrs->->nrs_lock)
+ * \post assert_spin_locked(&policy->pol_nrs->->nrs_lock)
*
* \retval 0 operation carried out successfully
* \retval -ve error
int nrs_crrn_ctl(struct ptlrpc_nrs_policy *policy, enum ptlrpc_nrs_ctl opc,
void *arg)
{
- LASSERT(spin_is_locked(&policy->pol_nrs->nrs_lock));
+ assert_spin_locked(&policy->pol_nrs->nrs_lock);
switch((enum nrs_ctl_crr)opc) {
default:
cli->cc_nid = req->rq_peer.nid;
- cfs_atomic_set(&cli->cc_ref, 1);
+ atomic_set(&cli->cc_ref, 1);
tmp = cfs_hash_findadd_unique(net->cn_cli_hash, &cli->cc_nid,
&cli->cc_hnode);
if (tmp != cli) {