mxlnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
{
cfs_task *task;
- int i = (int) ((long) arg);
+ int i = (int) ((long) arg);
- cfs_atomic_inc(&kmxlnd_data.kmx_nthreads);
+ atomic_inc(&kmxlnd_data.kmx_nthreads);
init_completion(&kmxlnd_data.kmx_completions[i]);
task = kthread_run(fn, arg, name);
if (IS_ERR(task)) {
CERROR("cfs_create_thread() failed with %d\n", PTR_ERR(task));
- cfs_atomic_dec(&kmxlnd_data.kmx_nthreads);
+ atomic_dec(&kmxlnd_data.kmx_nthreads);
}
return PTR_ERR(task);
}
void
mxlnd_thread_stop(long id)
{
- int i = (int) id;
- cfs_atomic_dec (&kmxlnd_data.kmx_nthreads);
+ int i = (int) id;
+ atomic_dec (&kmxlnd_data.kmx_nthreads);
complete(&kmxlnd_data.kmx_completions[i]);
}
void
mxlnd_shutdown (lnet_ni_t *ni)
{
- int i = 0;
- int nthreads = MXLND_NDAEMONS
- + *kmxlnd_tunables.kmx_n_waitd;
+ int i = 0;
+ int nthreads = MXLND_NDAEMONS + *kmxlnd_tunables.kmx_n_waitd;
- LASSERT (ni == kmxlnd_data.kmx_ni);
- LASSERT (ni->ni_data == &kmxlnd_data);
- CDEBUG(D_NET, "in shutdown()\n");
+ LASSERT (ni == kmxlnd_data.kmx_ni);
+ LASSERT (ni->ni_data == &kmxlnd_data);
+ CDEBUG(D_NET, "in shutdown()\n");
- CDEBUG(D_MALLOC, "before MXLND cleanup: libcfs_kmemory %d "
- "kmx_mem_used %ld\n", cfs_atomic_read(&libcfs_kmemory),
- kmxlnd_data.kmx_mem_used);
+ CDEBUG(D_MALLOC, "before MXLND cleanup: libcfs_kmemory %d "
+ "kmx_mem_used %ld\n", atomic_read(&libcfs_kmemory),
+ kmxlnd_data.kmx_mem_used);
- CDEBUG(D_NET, "setting shutdown = 1\n");
- cfs_atomic_set(&kmxlnd_data.kmx_shutdown, 1);
+ CDEBUG(D_NET, "setting shutdown = 1\n");
+ atomic_set(&kmxlnd_data.kmx_shutdown, 1);
- switch (kmxlnd_data.kmx_init) {
+ switch (kmxlnd_data.kmx_init) {
case MXLND_INIT_ALL:
case MXLND_INIT_THREADS:
- CDEBUG(D_NET, "waiting on threads\n");
- /* wait for threads to complete */
- for (i = 0; i < nthreads; i++) {
+ CDEBUG(D_NET, "waiting on threads\n");
+ /* wait for threads to complete */
+ for (i = 0; i < nthreads; i++) {
wait_for_completion(&kmxlnd_data.kmx_completions[i]);
- }
- LASSERT(cfs_atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
+ }
+ LASSERT(atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
CDEBUG(D_NET, "freeing completions\n");
MXLND_FREE(kmxlnd_data.kmx_completions,
}
CDEBUG(D_NET, "shutdown complete\n");
- CDEBUG(D_MALLOC, "after MXLND cleanup: libcfs_kmemory %d "
- "kmx_mem_used %ld\n", cfs_atomic_read(&libcfs_kmemory),
- kmxlnd_data.kmx_mem_used);
+ CDEBUG(D_MALLOC, "after MXLND cleanup: libcfs_kmemory %d "
+ "kmx_mem_used %ld\n", atomic_read(&libcfs_kmemory),
+ kmxlnd_data.kmx_mem_used);
kmxlnd_data.kmx_init = MXLND_INIT_NOTHING;
module_put(THIS_MODULE);
CERROR("Only 1 instance supported\n");
return -EPERM;
}
- CDEBUG(D_MALLOC, "before MXLND startup: libcfs_kmemory %d "
- "kmx_mem_used %ld\n", cfs_atomic_read(&libcfs_kmemory),
- kmxlnd_data.kmx_mem_used);
+ CDEBUG(D_MALLOC, "before MXLND startup: libcfs_kmemory %d "
+ "kmx_mem_used %ld\n", atomic_read(&libcfs_kmemory),
+ kmxlnd_data.kmx_mem_used);
ni->ni_maxtxcredits = MXLND_TX_MSGS();
ni->ni_peertxcredits = *kmxlnd_tunables.kmx_peercredits;
if (ret < 0) {
CERROR("Starting mxlnd_request_waitd[%d] "
"failed with %d\n", i, ret);
- cfs_atomic_set(&kmxlnd_data.kmx_shutdown, 1);
- mx_wakeup(kmxlnd_data.kmx_endpt);
- for (--i; i >= 0; i--) {
+ atomic_set(&kmxlnd_data.kmx_shutdown, 1);
+ mx_wakeup(kmxlnd_data.kmx_endpt);
+ for (--i; i >= 0; i--) {
wait_for_completion(&kmxlnd_data.kmx_completions[i]);
- }
- LASSERT(cfs_atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
- MXLND_FREE(kmxlnd_data.kmx_completions,
+ }
+ LASSERT(atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
+ MXLND_FREE(kmxlnd_data.kmx_completions,
nthreads * sizeof(struct completion));
- goto failed;
- }
- }
+ goto failed;
+ }
+ }
ret = mxlnd_thread_start(mxlnd_tx_queued, (void *)((long)i++),
"mxlnd_tx_queued");
- if (ret < 0) {
- CERROR("Starting mxlnd_tx_queued failed with %d\n", ret);
- cfs_atomic_set(&kmxlnd_data.kmx_shutdown, 1);
- mx_wakeup(kmxlnd_data.kmx_endpt);
- for (--i; i >= 0; i--) {
+ if (ret < 0) {
+ CERROR("Starting mxlnd_tx_queued failed with %d\n", ret);
+ atomic_set(&kmxlnd_data.kmx_shutdown, 1);
+ mx_wakeup(kmxlnd_data.kmx_endpt);
+ for (--i; i >= 0; i--) {
wait_for_completion(&kmxlnd_data.kmx_completions[i]);
- }
- LASSERT(cfs_atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
- MXLND_FREE(kmxlnd_data.kmx_completions,
+ }
+ LASSERT(atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
+ MXLND_FREE(kmxlnd_data.kmx_completions,
nthreads * sizeof(struct completion));
- goto failed;
- }
+ goto failed;
+ }
ret = mxlnd_thread_start(mxlnd_timeoutd, (void *)((long)i++),
"mxlnd_timeoutd");
- if (ret < 0) {
- CERROR("Starting mxlnd_timeoutd failed with %d\n", ret);
- cfs_atomic_set(&kmxlnd_data.kmx_shutdown, 1);
- mx_wakeup(kmxlnd_data.kmx_endpt);
+ if (ret < 0) {
+ CERROR("Starting mxlnd_timeoutd failed with %d\n", ret);
+ atomic_set(&kmxlnd_data.kmx_shutdown, 1);
+ mx_wakeup(kmxlnd_data.kmx_endpt);
up(&kmxlnd_data.kmx_tx_queue_sem);
- for (--i; i >= 0; i--) {
+ for (--i; i >= 0; i--) {
wait_for_completion(&kmxlnd_data.kmx_completions[i]);
- }
- LASSERT(cfs_atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
- MXLND_FREE(kmxlnd_data.kmx_completions,
+ }
+ LASSERT(atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
+ MXLND_FREE(kmxlnd_data.kmx_completions,
nthreads * sizeof(struct completion));
- goto failed;
- }
+ goto failed;
+ }
ret = mxlnd_thread_start(mxlnd_connd, (void *)((long)i++),
"mxlnd_connd");
- if (ret < 0) {
- CERROR("Starting mxlnd_connd failed with %d\n", ret);
- cfs_atomic_set(&kmxlnd_data.kmx_shutdown, 1);
- mx_wakeup(kmxlnd_data.kmx_endpt);
+ if (ret < 0) {
+ CERROR("Starting mxlnd_connd failed with %d\n", ret);
+ atomic_set(&kmxlnd_data.kmx_shutdown, 1);
+ mx_wakeup(kmxlnd_data.kmx_endpt);
up(&kmxlnd_data.kmx_tx_queue_sem);
- for (--i; i >= 0; i--) {
+ for (--i; i >= 0; i--) {
wait_for_completion(&kmxlnd_data.kmx_completions[i]);
- }
- LASSERT(cfs_atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
- MXLND_FREE(kmxlnd_data.kmx_completions,
+ }
+ LASSERT(atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
+ MXLND_FREE(kmxlnd_data.kmx_completions,
nthreads * sizeof(struct completion));
- goto failed;
- }
+ goto failed;
+ }
kmxlnd_data.kmx_init = MXLND_INIT_THREADS;
/*****************************************************/
/* global interface state */
typedef struct kmx_data
{
- int kmx_init; /* initialization state */
- cfs_atomic_t kmx_shutdown; /* shutting down? */
- cfs_atomic_t kmx_nthreads; /* number of threads */
+ int kmx_init; /* initialization state */
+ atomic_t kmx_shutdown; /* shutting down? */
+ atomic_t kmx_nthreads; /* number of threads */
struct completion *kmx_completions; /* array of completion struct */
lnet_ni_t *kmx_ni; /* the LND instance */
u64 kmx_incarnation; /* my incarnation value */
cfs_list_t kmx_conn_reqs; /* list of connection reqs */
spinlock_t kmx_conn_lock; /* connection list lock */
struct semaphore kmx_conn_sem; /* connection request list */
- cfs_list_t kmx_conn_zombies; /* list of zombie connections */
- cfs_list_t kmx_orphan_msgs; /* list of txs to cancel */
+ cfs_list_t kmx_conn_zombies; /* list of zombie connections */
+ cfs_list_t kmx_orphan_msgs; /* list of txs to cancel */
- /* list of all known peers */
- cfs_list_t kmx_peers[MXLND_HASH_SIZE];
- cfs_atomic_t kmx_npeers; /* number of peers */
+ /* list of all known peers */
+ cfs_list_t kmx_peers[MXLND_HASH_SIZE];
+ atomic_t kmx_npeers; /* number of peers */
- kmx_pages_t *kmx_tx_pages; /* tx msg pages */
+ kmx_pages_t *kmx_tx_pages; /* tx msg pages */
- struct kmx_ctx *kmx_txs; /* all tx descriptors */
- cfs_list_t kmx_tx_idle; /* list of idle tx */
+ struct kmx_ctx *kmx_txs; /* all tx descriptors */
+ cfs_list_t kmx_tx_idle; /* list of idle tx */
spinlock_t kmx_tx_idle_lock; /* lock for idle tx list */
s32 kmx_tx_used; /* txs in use */
u64 kmx_tx_next_cookie; /* unique id for tx */
cfs_list_t mxk_zombie; /* for placing on zombies list */
u64 mxk_incarnation; /* connections's incarnation value */
u32 mxk_sid; /* peer's MX session id */
- cfs_atomic_t mxk_refcount; /* reference counting */
+ atomic_t mxk_refcount; /* reference counting */
int mxk_status; /* can we send messages? MXLND_CONN_* */
mx_endpoint_addr_t mxk_epa; /* peer's endpoint address */
cfs_list_t mxp_list; /* for placing on kmx_peers */
lnet_nid_t mxp_nid; /* peer's LNET NID */
lnet_ni_t *mxp_ni; /* LNET interface */
- cfs_atomic_t mxp_refcount; /* reference counts */
+ atomic_t mxp_refcount; /* reference counts */
cfs_list_t mxp_conns; /* list of connections */
kmx_conn_t *mxp_conn; /* current connection */
#define mxlnd_peer_addref(peer) \
do { \
- LASSERT(peer != NULL); \
- LASSERT(cfs_atomic_read(&(peer)->mxp_refcount) > 0); \
- cfs_atomic_inc(&(peer)->mxp_refcount); \
+ LASSERT(peer != NULL); \
+ LASSERT(atomic_read(&(peer)->mxp_refcount) > 0); \
+ atomic_inc(&(peer)->mxp_refcount); \
} while (0)
#define mxlnd_peer_decref(peer) \
do { \
- LASSERT(cfs_atomic_read(&(peer)->mxp_refcount) > 0); \
- if (cfs_atomic_dec_and_test(&(peer)->mxp_refcount)) \
- mxlnd_peer_free(peer); \
+ LASSERT(atomic_read(&(peer)->mxp_refcount) > 0); \
+ if (atomic_dec_and_test(&(peer)->mxp_refcount)) \
+ mxlnd_peer_free(peer); \
} while (0)
#define mxlnd_conn_addref(conn) \
do { \
- LASSERT(conn != NULL); \
- LASSERT(cfs_atomic_read(&(conn)->mxk_refcount) > 0); \
- cfs_atomic_inc(&(conn)->mxk_refcount); \
+ LASSERT(conn != NULL); \
+ LASSERT(atomic_read(&(conn)->mxk_refcount) > 0); \
+ atomic_inc(&(conn)->mxk_refcount); \
} while (0)
#define mxlnd_conn_decref(conn) \
do { \
LASSERT(conn != NULL); \
- LASSERT(cfs_atomic_read(&(conn)->mxk_refcount) > 0); \
- if (cfs_atomic_dec_and_test(&(conn)->mxk_refcount)) { \
+ LASSERT(atomic_read(&(conn)->mxk_refcount) > 0); \
+ if (atomic_dec_and_test(&(conn)->mxk_refcount)) { \
spin_lock(&kmxlnd_data.kmx_conn_lock); \
LASSERT((conn)->mxk_status == MXLND_CONN_DISCONNECT); \
CDEBUG(D_NET, "adding conn %p to zombies\n", (conn)); \
mxlnd_sleep(msecs_to_jiffies(20));
}
- if (cfs_atomic_read(&kmxlnd_data.kmx_shutdown) != 1) {
- unsigned long last_msg = 0;
+ if (atomic_read(&kmxlnd_data.kmx_shutdown) != 1) {
+ unsigned long last_msg = 0;
/* notify LNET that we are giving up on this peer */
if (cfs_time_after(conn->mxk_last_rx, conn->mxk_last_tx))
memset(conn->mxk_rxs, 0, MXLND_RX_MSGS() * sizeof(kmx_ctx_t));
- conn->mxk_peer = peer;
- CFS_INIT_LIST_HEAD(&conn->mxk_list);
- CFS_INIT_LIST_HEAD(&conn->mxk_zombie);
- cfs_atomic_set(&conn->mxk_refcount, 2); /* ref for owning peer
- and one for the caller */
- if (peer->mxp_nid == kmxlnd_data.kmx_ni->ni_nid) {
- u64 nic_id = 0ULL;
+ conn->mxk_peer = peer;
+ CFS_INIT_LIST_HEAD(&conn->mxk_list);
+ CFS_INIT_LIST_HEAD(&conn->mxk_zombie);
+ atomic_set(&conn->mxk_refcount, 2); /* ref for owning peer
+ and one for the caller */
+ if (peer->mxp_nid == kmxlnd_data.kmx_ni->ni_nid) {
+ u64 nic_id = 0ULL;
u32 ep_id = 0;
/* this is localhost, set the epa and status as up */
void
mxlnd_peer_free(kmx_peer_t *peer)
{
- CDEBUG(D_NET, "freeing peer 0x%p %s\n", peer, libcfs_nid2str(peer->mxp_nid));
+ CDEBUG(D_NET, "freeing peer 0x%p %s\n", peer, libcfs_nid2str(peer->mxp_nid));
- LASSERT (cfs_atomic_read(&peer->mxp_refcount) == 0);
+ LASSERT (atomic_read(&peer->mxp_refcount) == 0);
- if (!cfs_list_empty(&peer->mxp_list)) {
- /* assume we are locked */
- cfs_list_del_init(&peer->mxp_list);
- }
+ if (!cfs_list_empty(&peer->mxp_list)) {
+ /* assume we are locked */
+ cfs_list_del_init(&peer->mxp_list);
+ }
- MXLND_FREE(peer, sizeof (*peer));
- cfs_atomic_dec(&kmxlnd_data.kmx_npeers);
- return;
+ MXLND_FREE(peer, sizeof (*peer));
+ atomic_dec(&kmxlnd_data.kmx_npeers);
+ return;
}
static int
memset(peer, 0, sizeof(*peer));
- CFS_INIT_LIST_HEAD(&peer->mxp_list);
- peer->mxp_nid = nid;
- /* peer->mxp_ni unused - may be used for multi-rail */
- cfs_atomic_set(&peer->mxp_refcount, 1); /* ref for kmx_peers list */
+ CFS_INIT_LIST_HEAD(&peer->mxp_list);
+ peer->mxp_nid = nid;
+ /* peer->mxp_ni unused - may be used for multi-rail */
+ atomic_set(&peer->mxp_refcount, 1); /* ref for kmx_peers list */
peer->mxp_board = board;
peer->mxp_ep_id = ep_id;
mxlnd_peer_decref(peer);
peer = old;
} else {
- /* no other peer, use this one */
- cfs_list_add_tail(&peer->mxp_list,
- &kmxlnd_data.kmx_peers[hash]);
- cfs_atomic_inc(&kmxlnd_data.kmx_npeers);
- mxlnd_peer_addref(peer);
- mxlnd_conn_decref(peer->mxp_conn); /* drop ref from peer_alloc */
+ /* no other peer, use this one */
+ cfs_list_add_tail(&peer->mxp_list,
+ &kmxlnd_data.kmx_peers[hash]);
+ atomic_inc(&kmxlnd_data.kmx_npeers);
+ mxlnd_peer_addref(peer);
+ mxlnd_conn_decref(peer->mxp_conn); /* drop ref from peer_alloc */
}
write_unlock(g_lock);
for (i = 0; i < MXLND_HASH_SIZE; i++) {
cfs_list_for_each_entry(peer, &kmxlnd_data.kmx_peers[i],
mxp_list) {
- if (index-- == 0) {
- *nidp = peer->mxp_nid;
- *count = cfs_atomic_read(&peer->mxp_refcount);
- ret = 0;
- break;
- }
+ if (index-- == 0) {
+ *nidp = peer->mxp_nid;
+ *count = atomic_read(&peer->mxp_refcount);
+ ret = 0;
+ break;
+ }
}
}
read_unlock(&kmxlnd_data.kmx_global_lock);
spinlock_t *tx_q_lock = &kmxlnd_data.kmx_tx_queue_lock;
rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock;
- while (!(cfs_atomic_read(&kmxlnd_data.kmx_shutdown))) {
+ while (!(atomic_read(&kmxlnd_data.kmx_shutdown))) {
ret = down_interruptible(&kmxlnd_data.kmx_tx_queue_sem);
- if (cfs_atomic_read(&kmxlnd_data.kmx_shutdown))
+ if (atomic_read(&kmxlnd_data.kmx_shutdown))
break;
if (ret != 0) /* Should we check for -EINTR? */
continue;
}
}
- if (found == 0) {
- cfs_list_add_tail(&peer->mxp_list,
- &kmxlnd_data.kmx_peers[hash]);
- cfs_atomic_inc(&kmxlnd_data.kmx_npeers);
- } else {
+ if (found == 0) {
+ cfs_list_add_tail(&peer->mxp_list,
+ &kmxlnd_data.kmx_peers[hash]);
+ atomic_inc(&kmxlnd_data.kmx_npeers);
+ } else {
tx->mxc_peer = old;
tx->mxc_conn = old->mxp_conn;
LASSERT(old->mxp_conn != NULL);
CDEBUG(D_NET, "%s starting\n", name);
- while (!(cfs_atomic_read(&kmxlnd_data.kmx_shutdown))) {
- u8 msg_type = 0;
+ while (!(atomic_read(&kmxlnd_data.kmx_shutdown))) {
+ u8 msg_type = 0;
mxret = MX_SUCCESS;
result = 0;
mxret = mx_wait_any(kmxlnd_data.kmx_endpt, MXLND_WAIT_TIMEOUT,
0ULL, 0ULL, &status, &result);
#endif
- if (unlikely(cfs_atomic_read(&kmxlnd_data.kmx_shutdown)))
- break;
+ if (unlikely(atomic_read(&kmxlnd_data.kmx_shutdown)))
+ break;
if (result != 1) {
/* nothing completed... */
cfs_list_for_each_entry(peer, &kmxlnd_data.kmx_peers[i],
mxp_list) {
- if (unlikely(cfs_atomic_read(&kmxlnd_data.kmx_shutdown))) {
+ if (unlikely(atomic_read(&kmxlnd_data.kmx_shutdown))) {
read_unlock(g_lock);
- return next;
- }
+ return next;
+ }
conn = peer->mxp_conn;
if (conn) {
peer = existing_peer;
mxlnd_conn_addref(peer->mxp_conn);
conn = peer->mxp_conn;
- } else {
- cfs_list_add_tail(&peer->mxp_list,
- &kmxlnd_data.kmx_peers[hash]);
- cfs_atomic_inc(&kmxlnd_data.kmx_npeers);
- }
+ } else {
+ cfs_list_add_tail(&peer->mxp_list,
+ &kmxlnd_data.kmx_peers[hash]);
+ atomic_inc(&kmxlnd_data.kmx_npeers);
+ }
write_unlock(g_lock);
} else {
ret = mxlnd_conn_alloc(&conn, peer); /* adds 2nd ref */
int
mxlnd_connd(void *arg)
{
- long id = (long) arg;
+ long id = (long) arg;
- CDEBUG(D_NET, "connd starting\n");
+ CDEBUG(D_NET, "connd starting\n");
- while (!(cfs_atomic_read(&kmxlnd_data.kmx_shutdown))) {
- int ret = 0;
- kmx_connparams_t *cp = NULL;
+ while (!(atomic_read(&kmxlnd_data.kmx_shutdown))) {
+ int ret = 0;
+ kmx_connparams_t *cp = NULL;
spinlock_t *g_conn_lock = &kmxlnd_data.kmx_conn_lock;
cfs_list_t *conn_reqs = &kmxlnd_data.kmx_conn_reqs;
ret = down_interruptible(&kmxlnd_data.kmx_conn_sem);
- if (cfs_atomic_read(&kmxlnd_data.kmx_shutdown))
- break;
+ if (atomic_read(&kmxlnd_data.kmx_shutdown))
+ break;
if (ret != 0)
continue;
CDEBUG(D_NET, "timeoutd starting\n");
- while (!(cfs_atomic_read(&kmxlnd_data.kmx_shutdown))) {
+ while (!(atomic_read(&kmxlnd_data.kmx_shutdown))) {
now = jiffies;
/* if the next timeout has not arrived, go back to sleep */
* not against the removal of temp */
cfs_list_for_each_entry_safe(peer, temp, peers,
mxp_list) {
- if (cfs_atomic_read(&kmxlnd_data.kmx_shutdown))
+ if (atomic_read(&kmxlnd_data.kmx_shutdown))
break;
mxlnd_peer_addref(peer); /* add ref... */
conn = peer->mxp_conn;
peer->ibp_nid = nid;
peer->ibp_error = 0;
peer->ibp_last_alive = 0;
- cfs_atomic_set(&peer->ibp_refcount, 1); /* 1 ref for caller */
+ atomic_set(&peer->ibp_refcount, 1); /* 1 ref for caller */
CFS_INIT_LIST_HEAD(&peer->ibp_list); /* not in the peer table yet */
CFS_INIT_LIST_HEAD(&peer->ibp_conns);
LASSERT (net->ibn_shutdown == 0);
/* npeers only grows with the global lock held */
- cfs_atomic_inc(&net->ibn_npeers);
+ atomic_inc(&net->ibn_npeers);
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
kib_net_t *net = peer->ibp_ni->ni_data;
LASSERT (net != NULL);
- LASSERT (cfs_atomic_read(&peer->ibp_refcount) == 0);
+ LASSERT (atomic_read(&peer->ibp_refcount) == 0);
LASSERT (!kiblnd_peer_active(peer));
LASSERT (peer->ibp_connecting == 0);
LASSERT (peer->ibp_accepting == 0);
* they are destroyed, so we can be assured that _all_ state to do
* with this peer has been cleaned up when its refcount drops to
* zero. */
- cfs_atomic_dec(&net->ibn_npeers);
+ atomic_dec(&net->ibn_npeers);
}
kib_peer_t *
CDEBUG(D_NET, "got peer [%p] -> %s (%d) version: %x\n",
peer, libcfs_nid2str(nid),
- cfs_atomic_read(&peer->ibp_refcount),
+ atomic_read(&peer->ibp_refcount),
peer->ibp_version);
return peer;
}
continue;
*nidp = peer->ibp_nid;
- *count = cfs_atomic_read(&peer->ibp_refcount);
+ *count = atomic_read(&peer->ibp_refcount);
read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
flags);
spin_lock(&conn->ibc_lock);
CDEBUG(D_CONSOLE, "conn[%d] %p [version %x] -> %s: \n",
- cfs_atomic_read(&conn->ibc_refcount), conn,
+ atomic_read(&conn->ibc_refcount), conn,
conn->ibc_version, libcfs_nid2str(conn->ibc_peer->ibp_nid));
CDEBUG(D_CONSOLE, " state %d nposted %d/%d cred %d o_cred %d r_cred %d\n",
conn->ibc_state, conn->ibc_noops_posted,
LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
/* 1 ref for caller and each rxmsg */
- cfs_atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(version));
+ atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(version));
conn->ibc_nrx = IBLND_RX_MSGS(version);
/* post receives */
conn->ibc_state = state;
/* 1 more conn */
- cfs_atomic_inc(&net->ibn_nconns);
+ atomic_inc(&net->ibn_nconns);
return conn;
failed_2:
int rc;
LASSERT (!in_interrupt());
- LASSERT (cfs_atomic_read(&conn->ibc_refcount) == 0);
+ LASSERT (atomic_read(&conn->ibc_refcount) == 0);
LASSERT (cfs_list_empty(&conn->ibc_early_rxs));
LASSERT (cfs_list_empty(&conn->ibc_tx_noops));
LASSERT (cfs_list_empty(&conn->ibc_tx_queue));
kiblnd_peer_decref(peer);
rdma_destroy_id(cmid);
- cfs_atomic_dec(&net->ibn_nconns);
+ atomic_dec(&net->ibn_nconns);
}
LIBCFS_FREE(conn, sizeof(*conn));
LASSERT (cfs_list_empty(&kiblnd_data.kib_devs));
CDEBUG(D_MALLOC, "before LND base cleanup: kmem %d\n",
- cfs_atomic_read(&libcfs_kmemory));
+ atomic_read(&libcfs_kmemory));
switch (kiblnd_data.kib_init) {
default:
wake_up_all(&kiblnd_data.kib_failover_waitq);
i = 2;
- while (cfs_atomic_read(&kiblnd_data.kib_nthreads) != 0) {
+ while (atomic_read(&kiblnd_data.kib_nthreads) != 0) {
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
"Waiting for %d threads to terminate\n",
- cfs_atomic_read(&kiblnd_data.kib_nthreads));
+ atomic_read(&kiblnd_data.kib_nthreads));
cfs_pause(cfs_time_seconds(1));
}
cfs_percpt_free(kiblnd_data.kib_scheds);
CDEBUG(D_MALLOC, "after LND base cleanup: kmem %d\n",
- cfs_atomic_read(&libcfs_kmemory));
+ atomic_read(&libcfs_kmemory));
kiblnd_data.kib_init = IBLND_INIT_NOTHING;
module_put(THIS_MODULE);
goto out;
CDEBUG(D_MALLOC, "before LND net cleanup: kmem %d\n",
- cfs_atomic_read(&libcfs_kmemory));
+ atomic_read(&libcfs_kmemory));
write_lock_irqsave(g_lock, flags);
net->ibn_shutdown = 1;
/* Wait for all peer state to clean up */
i = 2;
- while (cfs_atomic_read(&net->ibn_npeers) != 0) {
+ while (atomic_read(&net->ibn_npeers) != 0) {
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n? */
"%s: waiting for %d peers to disconnect\n",
libcfs_nid2str(ni->ni_nid),
- cfs_atomic_read(&net->ibn_npeers));
+ atomic_read(&net->ibn_npeers));
cfs_pause(cfs_time_seconds(1));
}
/* fall through */
case IBLND_INIT_NOTHING:
- LASSERT (cfs_atomic_read(&net->ibn_nconns) == 0);
+ LASSERT (atomic_read(&net->ibn_nconns) == 0);
if (net->ibn_dev != NULL &&
net->ibn_dev->ibd_nnets == 0)
}
CDEBUG(D_MALLOC, "after LND net cleanup: kmem %d\n",
- cfs_atomic_read(&libcfs_kmemory));
+ atomic_read(&libcfs_kmemory));
net->ibn_init = IBLND_INIT_NOTHING;
ni->ni_data = NULL;
typedef struct kib_hca_dev
{
- struct rdma_cm_id *ibh_cmid; /* listener cmid */
- struct ib_device *ibh_ibdev; /* IB device */
- int ibh_page_shift; /* page shift of current HCA */
- int ibh_page_size; /* page size of current HCA */
- __u64 ibh_page_mask; /* page mask of current HCA */
- int ibh_mr_shift; /* bits shift of max MR size */
- __u64 ibh_mr_size; /* size of MR */
- int ibh_nmrs; /* # of global MRs */
- struct ib_mr **ibh_mrs; /* global MR */
- struct ib_pd *ibh_pd; /* PD */
- kib_dev_t *ibh_dev; /* owner */
- cfs_atomic_t ibh_ref; /* refcount */
+ struct rdma_cm_id *ibh_cmid; /* listener cmid */
+ struct ib_device *ibh_ibdev; /* IB device */
+ int ibh_page_shift; /* page shift of current HCA */
+ int ibh_page_size; /* page size of current HCA */
+ __u64 ibh_page_mask; /* page mask of current HCA */
+ int ibh_mr_shift; /* bits shift of max MR size */
+ __u64 ibh_mr_size; /* size of MR */
+ int ibh_nmrs; /* # of global MRs */
+ struct ib_mr **ibh_mrs; /* global MR */
+ struct ib_pd *ibh_pd; /* PD */
+ kib_dev_t *ibh_dev; /* owner */
+ atomic_t ibh_ref; /* refcount */
} kib_hca_dev_t;
/** # of seconds to keep pool alive */
typedef struct kib_net
{
- cfs_list_t ibn_list; /* chain on kib_dev_t::ibd_nets */
- __u64 ibn_incarnation; /* my epoch */
- int ibn_init; /* initialisation state */
- int ibn_shutdown; /* shutting down? */
+ cfs_list_t ibn_list; /* chain on kib_dev_t::ibd_nets */
+ __u64 ibn_incarnation; /* my epoch */
+ int ibn_init; /* initialisation state */
+ int ibn_shutdown; /* shutting down? */
- cfs_atomic_t ibn_npeers; /* # peers extant */
- cfs_atomic_t ibn_nconns; /* # connections extant */
+ atomic_t ibn_npeers; /* # peers extant */
+ atomic_t ibn_nconns; /* # connections extant */
kib_tx_poolset_t **ibn_tx_ps; /* tx pool-set */
kib_fmr_poolset_t **ibn_fmr_ps; /* fmr pool-set */
cfs_list_t kib_failed_devs;
/* schedulers sleep here */
wait_queue_head_t kib_failover_waitq;
- cfs_atomic_t kib_nthreads; /* # live threads */
+ atomic_t kib_nthreads; /* # live threads */
/* stabilize net/dev/peer/conn ops */
rwlock_t kib_global_lock;
/* hash table of all my known peers */
cfs_list_t ibc_sched_list; /* schedule for attention */
__u16 ibc_version; /* version of connection */
__u64 ibc_incarnation; /* which instance of the peer */
- cfs_atomic_t ibc_refcount; /* # users */
+ atomic_t ibc_refcount; /* # users */
int ibc_state; /* what's happening */
int ibc_nsends_posted; /* # uncompleted sends */
int ibc_noops_posted; /* # uncompleted NOOPs */
cfs_list_t ibp_list; /* stash on global peer list */
lnet_nid_t ibp_nid; /* who's on the other end(s) */
lnet_ni_t *ibp_ni; /* LNet interface */
- cfs_atomic_t ibp_refcount; /* # users */
+ atomic_t ibp_refcount; /* # users */
cfs_list_t ibp_conns; /* all active connections */
cfs_list_t ibp_tx_queue; /* msgs waiting for a conn */
__u16 ibp_version; /* version of peer */
static inline void
kiblnd_hdev_addref_locked(kib_hca_dev_t *hdev)
{
- LASSERT (cfs_atomic_read(&hdev->ibh_ref) > 0);
- cfs_atomic_inc(&hdev->ibh_ref);
+ LASSERT (atomic_read(&hdev->ibh_ref) > 0);
+ atomic_inc(&hdev->ibh_ref);
}
static inline void
kiblnd_hdev_decref(kib_hca_dev_t *hdev)
{
- LASSERT (cfs_atomic_read(&hdev->ibh_ref) > 0);
- if (cfs_atomic_dec_and_test(&hdev->ibh_ref))
- kiblnd_hdev_destroy(hdev);
+ LASSERT (atomic_read(&hdev->ibh_ref) > 0);
+ if (atomic_dec_and_test(&hdev->ibh_ref))
+ kiblnd_hdev_destroy(hdev);
}
static inline int
#define kiblnd_conn_addref(conn) \
do { \
CDEBUG(D_NET, "conn[%p] (%d)++\n", \
- (conn), cfs_atomic_read(&(conn)->ibc_refcount)); \
- cfs_atomic_inc(&(conn)->ibc_refcount); \
+ (conn), atomic_read(&(conn)->ibc_refcount)); \
+ atomic_inc(&(conn)->ibc_refcount); \
} while (0)
#define kiblnd_conn_decref(conn) \
unsigned long flags; \
\
CDEBUG(D_NET, "conn[%p] (%d)--\n", \
- (conn), cfs_atomic_read(&(conn)->ibc_refcount)); \
+ (conn), atomic_read(&(conn)->ibc_refcount)); \
LASSERT_ATOMIC_POS(&(conn)->ibc_refcount); \
- if (cfs_atomic_dec_and_test(&(conn)->ibc_refcount)) { \
+ if (atomic_dec_and_test(&(conn)->ibc_refcount)) { \
spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); \
cfs_list_add_tail(&(conn)->ibc_list, \
&kiblnd_data.kib_connd_zombies); \
#define kiblnd_peer_addref(peer) \
do { \
- CDEBUG(D_NET, "peer[%p] -> %s (%d)++\n", \
- (peer), libcfs_nid2str((peer)->ibp_nid), \
- cfs_atomic_read (&(peer)->ibp_refcount)); \
- cfs_atomic_inc(&(peer)->ibp_refcount); \
+ CDEBUG(D_NET, "peer[%p] -> %s (%d)++\n", \
+ (peer), libcfs_nid2str((peer)->ibp_nid), \
+ atomic_read (&(peer)->ibp_refcount)); \
+ atomic_inc(&(peer)->ibp_refcount); \
} while (0)
#define kiblnd_peer_decref(peer) \
do { \
- CDEBUG(D_NET, "peer[%p] -> %s (%d)--\n", \
- (peer), libcfs_nid2str((peer)->ibp_nid), \
- cfs_atomic_read (&(peer)->ibp_refcount)); \
- LASSERT_ATOMIC_POS(&(peer)->ibp_refcount); \
- if (cfs_atomic_dec_and_test(&(peer)->ibp_refcount)) \
- kiblnd_destroy_peer(peer); \
+ CDEBUG(D_NET, "peer[%p] -> %s (%d)--\n", \
+ (peer), libcfs_nid2str((peer)->ibp_nid), \
+ atomic_read (&(peer)->ibp_refcount)); \
+ LASSERT_ATOMIC_POS(&(peer)->ibp_refcount); \
+ if (atomic_dec_and_test(&(peer)->ibp_refcount)) \
+ kiblnd_destroy_peer(peer); \
} while (0)
static inline cfs_list_t *
if (IS_ERR(task))
return PTR_ERR(task);
- cfs_atomic_inc(&kiblnd_data.kib_nthreads);
+ atomic_inc(&kiblnd_data.kib_nthreads);
return 0;
}
void
kiblnd_thread_fini (void)
{
- cfs_atomic_dec (&kiblnd_data.kib_nthreads);
+ atomic_dec (&kiblnd_data.kib_nthreads);
}
void
/**********************************************************************/
/* wait for sends that have allocated a tx desc to launch or give up */
- while (cfs_atomic_read (&kqswnal_data.kqn_pending_txs) != 0) {
+ while (atomic_read (&kqswnal_data.kqn_pending_txs) != 0) {
CDEBUG(D_NET, "waiting for %d pending sends\n",
- cfs_atomic_read (&kqswnal_data.kqn_pending_txs));
+ atomic_read (&kqswnal_data.kqn_pending_txs));
cfs_pause(cfs_time_seconds(1));
}
kqswnal_data.kqn_shuttingdown = 2;
wake_up_all (&kqswnal_data.kqn_sched_waitq);
- while (cfs_atomic_read (&kqswnal_data.kqn_nthreads) != 0) {
+ while (atomic_read (&kqswnal_data.kqn_nthreads) != 0) {
CDEBUG(D_NET, "waiting for %d threads to terminate\n",
- cfs_atomic_read (&kqswnal_data.kqn_nthreads));
+ atomic_read (&kqswnal_data.kqn_nthreads));
cfs_pause(cfs_time_seconds(1));
}
/* resets flags, pointers to NULL etc */
memset(&kqswnal_data, 0, sizeof (kqswnal_data));
- CDEBUG (D_MALLOC, "done kmem %d\n", cfs_atomic_read(&libcfs_kmemory));
+ CDEBUG (D_MALLOC, "done kmem %d\n", atomic_read(&libcfs_kmemory));
module_put(THIS_MODULE);
}
*kqswnal_tunables.kqn_credits);
}
- CDEBUG (D_MALLOC, "start kmem %d\n", cfs_atomic_read(&libcfs_kmemory));
+ CDEBUG (D_MALLOC, "start kmem %d\n", atomic_read(&libcfs_kmemory));
/* ensure all pointers NULL etc */
memset (&kqswnal_data, 0, sizeof (kqswnal_data));
int krx_nob; /* Number Of Bytes received into buffer */
int krx_rpc_reply_needed:1; /* peer waiting for EKC RPC reply */
int krx_state; /* what this RX is doing */
- cfs_atomic_t krx_refcount; /* how to tell when rpc is done */
+ atomic_t krx_refcount; /* how to tell when rpc is done */
#if KQSW_CKSUM
__u32 krx_cksum; /* checksum */
#endif
{
char kqn_init; /* what's been initialised */
char kqn_shuttingdown;/* I'm trying to shut down */
- cfs_atomic_t kqn_nthreads; /* # threads running */
+ atomic_t kqn_nthreads; /* # threads running */
lnet_ni_t *kqn_ni; /* _the_ instance of me */
kqswnal_rx_t *kqn_rxds; /* stack of all the receive descriptors */
cfs_list_t kqn_idletxds; /* transmit descriptors free to use */
cfs_list_t kqn_activetxds; /* transmit descriptors being used */
spinlock_t kqn_idletxd_lock; /* serialise idle txd access */
- cfs_atomic_t kqn_pending_txs; /* # transmits being prepped */
+ atomic_t kqn_pending_txs; /* # transmits being prepped */
spinlock_t kqn_sched_lock; /* serialise packet schedulers */
wait_queue_head_t kqn_sched_waitq;/* scheduler blocks here */
static inline void kqswnal_rx_decref (kqswnal_rx_t *krx)
{
- LASSERT (cfs_atomic_read (&krx->krx_refcount) > 0);
- if (cfs_atomic_dec_and_test (&krx->krx_refcount))
+ LASSERT (atomic_read (&krx->krx_refcount) > 0);
+ if (atomic_dec_and_test (&krx->krx_refcount))
kqswnal_rx_done(krx);
}
cfs_list_add (&ktx->ktx_list, &kqswnal_data.kqn_activetxds);
ktx->ktx_launcher = current->pid;
- cfs_atomic_inc(&kqswnal_data.kqn_pending_txs);
+ atomic_inc(&kqswnal_data.kqn_pending_txs);
spin_unlock_irqrestore(&kqswnal_data.kqn_idletxd_lock, flags);
ktx->ktx_args[0] = krx;
ktx->ktx_args[1] = lntmsg;
- LASSERT (cfs_atomic_read(&krx->krx_refcount) > 0);
+ LASSERT (atomic_read(&krx->krx_refcount) > 0);
/* Take an extra ref for the completion callback */
- cfs_atomic_inc(&krx->krx_refcount);
+ atomic_inc(&krx->krx_refcount);
/* Map on the rail the RPC prefers */
ktx->ktx_rail = ep_rcvr_prefrail(krx->krx_eprx,
kqswnal_put_idle_tx (ktx);
}
- cfs_atomic_dec(&kqswnal_data.kqn_pending_txs);
+ atomic_dec(&kqswnal_data.kqn_pending_txs);
return (rc);
}
}
- cfs_atomic_dec(&kqswnal_data.kqn_pending_txs);
+ atomic_dec(&kqswnal_data.kqn_pending_txs);
return (rc == 0 ? 0 : -EIO);
}
void
kqswnal_requeue_rx (kqswnal_rx_t *krx)
{
- LASSERT (cfs_atomic_read(&krx->krx_refcount) == 0);
+ LASSERT (atomic_read(&krx->krx_refcount) == 0);
LASSERT (!krx->krx_rpc_reply_needed);
krx->krx_state = KRX_POSTED;
{
int rc;
- LASSERT (cfs_atomic_read(&krx->krx_refcount) == 0);
+ LASSERT (atomic_read(&krx->krx_refcount) == 0);
if (krx->krx_rpc_reply_needed) {
/* We've not completed the peer's RPC yet... */
int nob;
int rc;
- LASSERT (cfs_atomic_read(&krx->krx_refcount) == 1);
+ LASSERT (atomic_read(&krx->krx_refcount) == 1);
if (krx->krx_nob < offsetof(kqswnal_msg_t, kqm_u)) {
CERROR("Short message %d received from %s\n",
/* Default to failure if an RPC reply is requested but not handled */
krx->krx_rpc_reply.msg.status = -EPROTO;
- cfs_atomic_set (&krx->krx_refcount, 1);
+ atomic_set (&krx->krx_refcount, 1);
if (status != EP_SUCCESS) {
/* receives complete with failure when receiver is removed */
if (IS_ERR(task))
return PTR_ERR(task);
- cfs_atomic_inc(&kqswnal_data.kqn_nthreads);
+ atomic_inc(&kqswnal_data.kqn_nthreads);
return 0;
}
void
kqswnal_thread_fini (void)
{
- cfs_atomic_dec (&kqswnal_data.kqn_nthreads);
+ atomic_dec (&kqswnal_data.kqn_nthreads);
}
int
libcfs_nid2str(ktx->ktx_nid), rc);
kqswnal_tx_done (ktx, rc);
}
- cfs_atomic_dec (&kqswnal_data.kqn_pending_txs);
+ atomic_dec (&kqswnal_data.kqn_pending_txs);
did_something = 1;
spin_lock_irqsave(&kqswnal_data.kqn_sched_lock,
return -ENOMEM;
memset(conn, 0, sizeof(*conn));
- cfs_atomic_set(&conn->rac_refcount, 1);
+ atomic_set(&conn->rac_refcount, 1);
CFS_INIT_LIST_HEAD(&conn->rac_list);
CFS_INIT_LIST_HEAD(&conn->rac_hashlist);
CFS_INIT_LIST_HEAD(&conn->rac_schedlist);
return -ENETDOWN;
}
- cfs_atomic_inc(&kranal_data.kra_nconns);
+ atomic_inc(&kranal_data.kra_nconns);
*connp = conn;
return 0;
}
LASSERT (cfs_list_empty(&conn->rac_list));
LASSERT (cfs_list_empty(&conn->rac_hashlist));
LASSERT (cfs_list_empty(&conn->rac_schedlist));
- LASSERT (cfs_atomic_read(&conn->rac_refcount) == 0);
+ LASSERT (atomic_read(&conn->rac_refcount) == 0);
LASSERT (cfs_list_empty(&conn->rac_fmaq));
LASSERT (cfs_list_empty(&conn->rac_rdmaq));
LASSERT (cfs_list_empty(&conn->rac_replyq));
kranal_peer_decref(conn->rac_peer);
LIBCFS_FREE(conn, sizeof(*conn));
- cfs_atomic_dec(&kranal_data.kra_nconns);
+ atomic_dec(&kranal_data.kra_nconns);
}
void
memset(peer, 0, sizeof(*peer)); /* zero flags etc */
peer->rap_nid = nid;
- cfs_atomic_set(&peer->rap_refcount, 1); /* 1 ref for caller */
+ atomic_set(&peer->rap_refcount, 1); /* 1 ref for caller */
CFS_INIT_LIST_HEAD(&peer->rap_list);
CFS_INIT_LIST_HEAD(&peer->rap_connd_list);
return -ESHUTDOWN;
}
- cfs_atomic_inc(&kranal_data.kra_npeers);
+ atomic_inc(&kranal_data.kra_npeers);
write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
CDEBUG(D_NET, "peer %s %p deleted\n",
libcfs_nid2str(peer->rap_nid), peer);
- LASSERT (cfs_atomic_read(&peer->rap_refcount) == 0);
+ LASSERT (atomic_read(&peer->rap_refcount) == 0);
LASSERT (peer->rap_persistence == 0);
LASSERT (!kranal_peer_active(peer));
LASSERT (!peer->rap_connecting);
* they are destroyed, so we can be assured that _all_ state to do
* with this peer has been cleaned up when its refcount drops to
* zero. */
- cfs_atomic_dec(&kranal_data.kra_npeers);
+ atomic_dec(&kranal_data.kra_npeers);
}
kra_peer_t *
CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n",
peer, libcfs_nid2str(nid),
- cfs_atomic_read(&peer->rap_refcount));
+ atomic_read(&peer->rap_refcount));
return peer;
}
return NULL;
rac_list);
CDEBUG(D_NET, "++conn[%p] -> %s (%d)\n", conn,
libcfs_nid2str(conn->rac_peer->rap_nid),
- cfs_atomic_read(&conn->rac_refcount));
- cfs_atomic_inc(&conn->rac_refcount);
+ atomic_read(&conn->rac_refcount));
+ atomic_inc(&conn->rac_refcount);
read_unlock(&kranal_data.kra_global_lock);
return conn;
}
unsigned long flags;
CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
- cfs_atomic_read(&libcfs_kmemory));
+ atomic_read(&libcfs_kmemory));
LASSERT (ni == kranal_data.kra_ni);
LASSERT (ni->ni_data == &kranal_data);
/* Wait for all peers to be freed */
i = 2;
- while (cfs_atomic_read(&kranal_data.kra_npeers) != 0) {
+ while (atomic_read(&kranal_data.kra_npeers) != 0) {
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n */
"waiting for %d peers to close down\n",
- cfs_atomic_read(&kranal_data.kra_npeers));
+ atomic_read(&kranal_data.kra_npeers));
cfs_pause(cfs_time_seconds(1));
}
/* fall through */
* while there are still active connds, but these will be temporary
* since peer creation always fails after the listener has started to
* shut down. */
- LASSERT (cfs_atomic_read(&kranal_data.kra_npeers) == 0);
+ LASSERT (atomic_read(&kranal_data.kra_npeers) == 0);
/* Flag threads to terminate */
kranal_data.kra_shutdown = 1;
/* Wait for threads to exit */
i = 2;
- while (cfs_atomic_read(&kranal_data.kra_nthreads) != 0) {
+ while (atomic_read(&kranal_data.kra_nthreads) != 0) {
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
"Waiting for %d threads to terminate\n",
- cfs_atomic_read(&kranal_data.kra_nthreads));
+ atomic_read(&kranal_data.kra_nthreads));
cfs_pause(cfs_time_seconds(1));
}
- LASSERT (cfs_atomic_read(&kranal_data.kra_npeers) == 0);
+ LASSERT (atomic_read(&kranal_data.kra_npeers) == 0);
if (kranal_data.kra_peers != NULL) {
for (i = 0; i < kranal_data.kra_peer_hash_size; i++)
LASSERT (cfs_list_empty(&kranal_data.kra_peers[i]));
kranal_data.kra_peer_hash_size);
}
- LASSERT (cfs_atomic_read(&kranal_data.kra_nconns) == 0);
+ LASSERT (atomic_read(&kranal_data.kra_nconns) == 0);
if (kranal_data.kra_conns != NULL) {
for (i = 0; i < kranal_data.kra_conn_hash_size; i++)
LASSERT (cfs_list_empty(&kranal_data.kra_conns[i]));
kranal_free_txdescs(&kranal_data.kra_idle_txs);
CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
- cfs_atomic_read(&libcfs_kmemory));
+ atomic_read(&libcfs_kmemory));
kranal_data.kra_init = RANAL_INIT_NOTHING;
module_put(THIS_MODULE);
kranal_startup (lnet_ni_t *ni)
{
struct timeval tv;
- int pkmem = cfs_atomic_read(&libcfs_kmemory);
+ int pkmem = atomic_read(&libcfs_kmemory);
int rc;
int i;
kra_device_t *dev;
{
int kra_init; /* initialisation state */
int kra_shutdown; /* shut down? */
- cfs_atomic_t kra_nthreads; /* # live threads */
+ atomic_t kra_nthreads; /* # live threads */
lnet_ni_t *kra_ni; /* _the_ nal instance */
kra_device_t kra_devices[RANAL_MAXDEVS]; /* device/ptag/cq */
cfs_list_t *kra_peers; /* hash table of all my known peers */
int kra_peer_hash_size; /* size of kra_peers */
- cfs_atomic_t kra_npeers; /* # peers extant */
+ atomic_t kra_npeers; /* # peers extant */
int kra_nonewpeers; /* prevent new peers */
cfs_list_t *kra_conns; /* conns hashed by cqid */
__u64 kra_peerstamp; /* when I started up */
__u64 kra_connstamp; /* conn stamp generator */
int kra_next_cqid; /* cqid generator */
- cfs_atomic_t kra_nconns; /* # connections extant */
+ atomic_t kra_nconns; /* # connections extant */
long kra_new_min_timeout; /* minimum timeout on any new conn */
wait_queue_head_t kra_reaper_waitq; /* reaper sleeps here */
__u32 rac_cqid; /* my completion callback id (non-unique) */
__u32 rac_tx_seq; /* tx msg sequence number */
__u32 rac_rx_seq; /* rx msg sequence number */
- cfs_atomic_t rac_refcount; /* # users */
+ atomic_t rac_refcount; /* # users */
unsigned int rac_close_sent; /* I've sent CLOSE */
unsigned int rac_close_recvd; /* I've received CLOSE */
unsigned int rac_state; /* connection state */
lnet_nid_t rap_nid; /* who's on the other end(s) */
__u32 rap_ip; /* IP address of peer */
int rap_port; /* port on which peer listens */
- cfs_atomic_t rap_refcount; /* # users */
+ atomic_t rap_refcount; /* # users */
int rap_persistence; /* "known" peer refs */
int rap_connecting; /* connection forming */
unsigned long rap_reconnect_time; /* get_seconds() when reconnect OK */
kranal_peer_addref(kra_peer_t *peer)
{
CDEBUG(D_NET, "%p->%s\n", peer, libcfs_nid2str(peer->rap_nid));
- LASSERT(cfs_atomic_read(&peer->rap_refcount) > 0);
- cfs_atomic_inc(&peer->rap_refcount);
+ LASSERT(atomic_read(&peer->rap_refcount) > 0);
+ atomic_inc(&peer->rap_refcount);
}
static inline void
kranal_peer_decref(kra_peer_t *peer)
{
CDEBUG(D_NET, "%p->%s\n", peer, libcfs_nid2str(peer->rap_nid));
- LASSERT(cfs_atomic_read(&peer->rap_refcount) > 0);
- if (cfs_atomic_dec_and_test(&peer->rap_refcount))
+ LASSERT(atomic_read(&peer->rap_refcount) > 0);
+ if (atomic_dec_and_test(&peer->rap_refcount))
kranal_destroy_peer(peer);
}
{
CDEBUG(D_NET, "%p->%s\n", conn,
libcfs_nid2str(conn->rac_peer->rap_nid));
- LASSERT(cfs_atomic_read(&conn->rac_refcount) > 0);
- cfs_atomic_inc(&conn->rac_refcount);
+ LASSERT(atomic_read(&conn->rac_refcount) > 0);
+ atomic_inc(&conn->rac_refcount);
}
static inline void
{
CDEBUG(D_NET, "%p->%s\n", conn,
libcfs_nid2str(conn->rac_peer->rap_nid));
- LASSERT(cfs_atomic_read(&conn->rac_refcount) > 0);
- if (cfs_atomic_dec_and_test(&conn->rac_refcount))
+ LASSERT(atomic_read(&conn->rac_refcount) > 0);
+ if (atomic_dec_and_test(&conn->rac_refcount))
kranal_destroy_conn(conn);
}
struct task_struct *task = cfs_thread_run(fn, arg, name);
if (!IS_ERR(task))
- cfs_atomic_inc(&kranal_data.kra_nthreads);
+ atomic_inc(&kranal_data.kra_nthreads);
return PTR_ERR(task);
}
void
kranal_thread_fini (void)
{
- cfs_atomic_dec(&kranal_data.kra_nthreads);
+ atomic_dec(&kranal_data.kra_nthreads);
}
int
ksock_route_t *
ksocknal_create_route (__u32 ipaddr, int port)
{
- ksock_route_t *route;
+ ksock_route_t *route;
- LIBCFS_ALLOC (route, sizeof (*route));
- if (route == NULL)
- return (NULL);
+ LIBCFS_ALLOC (route, sizeof (*route));
+ if (route == NULL)
+ return (NULL);
- cfs_atomic_set (&route->ksnr_refcount, 1);
- route->ksnr_peer = NULL;
- route->ksnr_retry_interval = 0; /* OK to connect at any time */
- route->ksnr_ipaddr = ipaddr;
+ atomic_set (&route->ksnr_refcount, 1);
+ route->ksnr_peer = NULL;
+ route->ksnr_retry_interval = 0; /* OK to connect at any time */
+ route->ksnr_ipaddr = ipaddr;
route->ksnr_port = port;
route->ksnr_scheduled = 0;
route->ksnr_connecting = 0;
void
ksocknal_destroy_route (ksock_route_t *route)
{
- LASSERT (cfs_atomic_read(&route->ksnr_refcount) == 0);
+ LASSERT (atomic_read(&route->ksnr_refcount) == 0);
- if (route->ksnr_peer != NULL)
- ksocknal_peer_decref(route->ksnr_peer);
+ if (route->ksnr_peer != NULL)
+ ksocknal_peer_decref(route->ksnr_peer);
- LIBCFS_FREE (route, sizeof (*route));
+ LIBCFS_FREE (route, sizeof (*route));
}
int
memset (peer, 0, sizeof (*peer)); /* NULL pointers/clear flags etc */
- peer->ksnp_ni = ni;
- peer->ksnp_id = id;
- cfs_atomic_set (&peer->ksnp_refcount, 1); /* 1 ref for caller */
- peer->ksnp_closing = 0;
- peer->ksnp_accepting = 0;
- peer->ksnp_proto = NULL;
- peer->ksnp_last_alive = 0;
- peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
+ peer->ksnp_ni = ni;
+ peer->ksnp_id = id;
+ atomic_set (&peer->ksnp_refcount, 1); /* 1 ref for caller */
+ peer->ksnp_closing = 0;
+ peer->ksnp_accepting = 0;
+ peer->ksnp_proto = NULL;
+ peer->ksnp_last_alive = 0;
+ peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
CFS_INIT_LIST_HEAD (&peer->ksnp_conns);
CFS_INIT_LIST_HEAD (&peer->ksnp_routes);
void
ksocknal_destroy_peer (ksock_peer_t *peer)
{
- ksock_net_t *net = peer->ksnp_ni->ni_data;
+ ksock_net_t *net = peer->ksnp_ni->ni_data;
- CDEBUG (D_NET, "peer %s %p deleted\n",
- libcfs_id2str(peer->ksnp_id), peer);
+ CDEBUG (D_NET, "peer %s %p deleted\n",
+ libcfs_id2str(peer->ksnp_id), peer);
- LASSERT (cfs_atomic_read (&peer->ksnp_refcount) == 0);
- LASSERT (peer->ksnp_accepting == 0);
- LASSERT (cfs_list_empty (&peer->ksnp_conns));
- LASSERT (cfs_list_empty (&peer->ksnp_routes));
- LASSERT (cfs_list_empty (&peer->ksnp_tx_queue));
- LASSERT (cfs_list_empty (&peer->ksnp_zc_req_list));
+ LASSERT (atomic_read (&peer->ksnp_refcount) == 0);
+ LASSERT (peer->ksnp_accepting == 0);
+ LASSERT (cfs_list_empty (&peer->ksnp_conns));
+ LASSERT (cfs_list_empty (&peer->ksnp_routes));
+ LASSERT (cfs_list_empty (&peer->ksnp_tx_queue));
+ LASSERT (cfs_list_empty (&peer->ksnp_zc_req_list));
LIBCFS_FREE (peer, sizeof (*peer));
peer->ksnp_id.pid != id.pid)
continue;
- CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n",
- peer, libcfs_id2str(id),
- cfs_atomic_read(&peer->ksnp_refcount));
- return (peer);
- }
- return (NULL);
+ CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n",
+ peer, libcfs_id2str(id),
+ atomic_read(&peer->ksnp_refcount));
+ return (peer);
+ }
+ return (NULL);
}
ksock_peer_t *
conn->ksnc_peer = NULL;
conn->ksnc_route = NULL;
conn->ksnc_sock = sock;
- /* 2 ref, 1 for conn, another extra ref prevents socket
- * being closed before establishment of connection */
- cfs_atomic_set (&conn->ksnc_sock_refcount, 2);
- conn->ksnc_type = type;
- ksocknal_lib_save_callback(sock, conn);
- cfs_atomic_set (&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
-
- conn->ksnc_rx_ready = 0;
- conn->ksnc_rx_scheduled = 0;
-
- CFS_INIT_LIST_HEAD (&conn->ksnc_tx_queue);
- conn->ksnc_tx_ready = 0;
- conn->ksnc_tx_scheduled = 0;
- conn->ksnc_tx_carrier = NULL;
- cfs_atomic_set (&conn->ksnc_tx_nob, 0);
-
- LIBCFS_ALLOC(hello, offsetof(ksock_hello_msg_t,
- kshm_ips[LNET_MAX_INTERFACES]));
+ /* 2 ref, 1 for conn, another extra ref prevents socket
+ * being closed before establishment of connection */
+ atomic_set (&conn->ksnc_sock_refcount, 2);
+ conn->ksnc_type = type;
+ ksocknal_lib_save_callback(sock, conn);
+ atomic_set (&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
+
+ conn->ksnc_rx_ready = 0;
+ conn->ksnc_rx_scheduled = 0;
+
+ CFS_INIT_LIST_HEAD (&conn->ksnc_tx_queue);
+ conn->ksnc_tx_ready = 0;
+ conn->ksnc_tx_scheduled = 0;
+ conn->ksnc_tx_carrier = NULL;
+ atomic_set (&conn->ksnc_tx_nob, 0);
+
+ LIBCFS_ALLOC(hello, offsetof(ksock_hello_msg_t,
+ kshm_ips[LNET_MAX_INTERFACES]));
if (hello == NULL) {
rc = -ENOMEM;
goto failed_1;
{
/* Queue the conn for the reaper to destroy */
- LASSERT(cfs_atomic_read(&conn->ksnc_conn_refcount) == 0);
+ LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
cfs_list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
void
ksocknal_destroy_conn (ksock_conn_t *conn)
{
- cfs_time_t last_rcv;
+ cfs_time_t last_rcv;
- /* Final coup-de-grace of the reaper */
- CDEBUG (D_NET, "connection %p\n", conn);
+ /* Final coup-de-grace of the reaper */
+ CDEBUG (D_NET, "connection %p\n", conn);
- LASSERT (cfs_atomic_read (&conn->ksnc_conn_refcount) == 0);
- LASSERT (cfs_atomic_read (&conn->ksnc_sock_refcount) == 0);
- LASSERT (conn->ksnc_sock == NULL);
- LASSERT (conn->ksnc_route == NULL);
- LASSERT (!conn->ksnc_tx_scheduled);
- LASSERT (!conn->ksnc_rx_scheduled);
- LASSERT (cfs_list_empty(&conn->ksnc_tx_queue));
+ LASSERT (atomic_read (&conn->ksnc_conn_refcount) == 0);
+ LASSERT (atomic_read (&conn->ksnc_sock_refcount) == 0);
+ LASSERT (conn->ksnc_sock == NULL);
+ LASSERT (conn->ksnc_route == NULL);
+ LASSERT (!conn->ksnc_tx_scheduled);
+ LASSERT (!conn->ksnc_rx_scheduled);
+ LASSERT (cfs_list_empty(&conn->ksnc_tx_queue));
/* complete current receive if any */
switch (conn->ksnc_rx_state) {
void
ksocknal_free_buffers (void)
{
- LASSERT (cfs_atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
+ LASSERT (atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
if (ksocknal_data.ksnd_sched_info != NULL) {
struct ksock_sched_info *info;
int i;
int j;
- CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
- cfs_atomic_read (&libcfs_kmemory));
- LASSERT (ksocknal_data.ksnd_nnets == 0);
+ CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
+ atomic_read (&libcfs_kmemory));
+ LASSERT (ksocknal_data.ksnd_nnets == 0);
switch (ksocknal_data.ksnd_init) {
default:
}
CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
- cfs_atomic_read (&libcfs_kmemory));
+ atomic_read (&libcfs_kmemory));
module_put(THIS_MODULE);
}
ksock_route_t *route;
ksock_conn_t *conn;
- CWARN ("Active peer on shutdown: %s, ref %d, scnt %d, "
- "closing %d, accepting %d, err %d, zcookie "LPU64", "
- "txq %d, zc_req %d\n", libcfs_id2str(peer->ksnp_id),
- cfs_atomic_read(&peer->ksnp_refcount),
- peer->ksnp_sharecount, peer->ksnp_closing,
- peer->ksnp_accepting, peer->ksnp_error,
- peer->ksnp_zc_next_cookie,
- !cfs_list_empty(&peer->ksnp_tx_queue),
- !cfs_list_empty(&peer->ksnp_zc_req_list));
-
- cfs_list_for_each (tmp, &peer->ksnp_routes) {
- route = cfs_list_entry(tmp, ksock_route_t, ksnr_list);
- CWARN ("Route: ref %d, schd %d, conn %d, cnted %d, "
- "del %d\n", cfs_atomic_read(&route->ksnr_refcount),
- route->ksnr_scheduled, route->ksnr_connecting,
- route->ksnr_connected, route->ksnr_deleted);
- }
+ CWARN ("Active peer on shutdown: %s, ref %d, scnt %d, "
+ "closing %d, accepting %d, err %d, zcookie "LPU64", "
+ "txq %d, zc_req %d\n", libcfs_id2str(peer->ksnp_id),
+ atomic_read(&peer->ksnp_refcount),
+ peer->ksnp_sharecount, peer->ksnp_closing,
+ peer->ksnp_accepting, peer->ksnp_error,
+ peer->ksnp_zc_next_cookie,
+ !cfs_list_empty(&peer->ksnp_tx_queue),
+ !cfs_list_empty(&peer->ksnp_zc_req_list));
+
+ cfs_list_for_each (tmp, &peer->ksnp_routes) {
+ route = cfs_list_entry(tmp, ksock_route_t, ksnr_list);
+ CWARN ("Route: ref %d, schd %d, conn %d, cnted %d, "
+ "del %d\n", atomic_read(&route->ksnr_refcount),
+ route->ksnr_scheduled, route->ksnr_connecting,
+ route->ksnr_connected, route->ksnr_deleted);
+ }
- cfs_list_for_each (tmp, &peer->ksnp_conns) {
- conn = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
- CWARN ("Conn: ref %d, sref %d, t %d, c %d\n",
- cfs_atomic_read(&conn->ksnc_conn_refcount),
- cfs_atomic_read(&conn->ksnc_sock_refcount),
- conn->ksnc_type, conn->ksnc_closing);
- }
- }
+ cfs_list_for_each (tmp, &peer->ksnp_conns) {
+ conn = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
+ CWARN ("Conn: ref %d, sref %d, t %d, c %d\n",
+ atomic_read(&conn->ksnc_conn_refcount),
+ atomic_read(&conn->ksnc_sock_refcount),
+ conn->ksnc_type, conn->ksnc_closing);
+ }
+ }
read_unlock(&ksocknal_data.ksnd_global_lock);
- return;
+ return;
}
void
/* schedulers information */
struct ksock_sched_info **ksnd_sched_info;
- cfs_atomic_t ksnd_nactive_txs; /* #active txs */
+ atomic_t ksnd_nactive_txs; /* #active txs */
cfs_list_t ksnd_deathrow_conns; /* conns to close: reaper_lock*/
cfs_list_t ksnd_zombie_conns; /* conns to free: reaper_lock */
typedef struct /* transmit packet */
{
- cfs_list_t tx_list; /* queue on conn for transmission etc */
- cfs_list_t tx_zc_list; /* queue on peer for ZC request */
- cfs_atomic_t tx_refcount; /* tx reference count */
- int tx_nob; /* # packet bytes */
- int tx_resid; /* residual bytes */
+ cfs_list_t tx_list; /* queue on conn for transmission etc */
+ cfs_list_t tx_zc_list; /* queue on peer for ZC request */
+ atomic_t tx_refcount; /* tx reference count */
+ int tx_nob; /* # packet bytes */
+ int tx_resid; /* residual bytes */
int tx_niov; /* # packet iovec frags */
struct iovec *tx_iov; /* packet iovec frags */
int tx_nkiov; /* # packet page frags */
struct ksock_peer *ksnc_peer; /* owning peer */
struct ksock_route *ksnc_route; /* owning route */
cfs_list_t ksnc_list; /* stash on peer's conn list */
- cfs_socket_t *ksnc_sock; /* actual socket */
- void *ksnc_saved_data_ready; /* socket's original data_ready() callback */
- void *ksnc_saved_write_space; /* socket's original write_space() callback */
- cfs_atomic_t ksnc_conn_refcount; /* conn refcount */
- cfs_atomic_t ksnc_sock_refcount; /* sock refcount */
- ksock_sched_t *ksnc_scheduler; /* who schedules this connection */
- __u32 ksnc_myipaddr; /* my IP */
+ cfs_socket_t *ksnc_sock; /* actual socket */
+ void *ksnc_saved_data_ready; /* socket's original data_ready() callback */
+ void *ksnc_saved_write_space; /* socket's original write_space() callback */
+ atomic_t ksnc_conn_refcount; /* conn refcount */
+ atomic_t ksnc_sock_refcount; /* sock refcount */
+ ksock_sched_t *ksnc_scheduler; /* who schedules this connection */
+ __u32 ksnc_myipaddr; /* my IP */
__u32 ksnc_ipaddr; /* peer's IP */
int ksnc_port; /* peer's port */
signed int ksnc_type:3; /* type of connection,
cfs_list_t ksnc_tx_list; /* where I enq waiting for output space */
cfs_list_t ksnc_tx_queue; /* packets waiting to be sent */
ksock_tx_t *ksnc_tx_carrier; /* next TX that can carry a LNet message or ZC-ACK */
- cfs_time_t ksnc_tx_deadline; /* when (in jiffies) tx times out */
- int ksnc_tx_bufnob; /* send buffer marker */
- cfs_atomic_t ksnc_tx_nob; /* # bytes queued */
- int ksnc_tx_ready; /* write space */
- int ksnc_tx_scheduled; /* being progressed */
- cfs_time_t ksnc_tx_last_post; /* time stamp of the last posted TX */
+ cfs_time_t ksnc_tx_deadline; /* when (in jiffies) tx times out */
+ int ksnc_tx_bufnob; /* send buffer marker */
+ atomic_t ksnc_tx_nob; /* # bytes queued */
+ int ksnc_tx_ready; /* write space */
+ int ksnc_tx_scheduled; /* being progressed */
+ cfs_time_t ksnc_tx_last_post; /* time stamp of the last posted TX */
} ksock_conn_t;
typedef struct ksock_route
{
- cfs_list_t ksnr_list; /* chain on peer route list */
- cfs_list_t ksnr_connd_list; /* chain on ksnr_connd_routes */
- struct ksock_peer *ksnr_peer; /* owning peer */
- cfs_atomic_t ksnr_refcount; /* # users */
- cfs_time_t ksnr_timeout; /* when (in jiffies) reconnection can happen next */
- cfs_duration_t ksnr_retry_interval; /* how long between retries */
+ cfs_list_t ksnr_list; /* chain on peer route list */
+ cfs_list_t ksnr_connd_list; /* chain on ksnr_connd_routes */
+ struct ksock_peer *ksnr_peer; /* owning peer */
+ atomic_t ksnr_refcount; /* # users */
+ cfs_time_t ksnr_timeout; /* when (in jiffies) reconnection can happen next */
+ cfs_duration_t ksnr_retry_interval; /* how long between retries */
__u32 ksnr_myipaddr; /* my IP */
__u32 ksnr_ipaddr; /* IP address to connect to */
int ksnr_port; /* port to connect to */
typedef struct ksock_peer
{
- cfs_list_t ksnp_list; /* stash on global peer list */
- cfs_time_t ksnp_last_alive; /* when (in jiffies) I was last alive */
- lnet_process_id_t ksnp_id; /* who's on the other end(s) */
- cfs_atomic_t ksnp_refcount; /* # users */
- int ksnp_sharecount; /* lconf usage counter */
- int ksnp_closing; /* being closed */
+ cfs_list_t ksnp_list; /* stash on global peer list */
+ cfs_time_t ksnp_last_alive; /* when (in jiffies) I was last alive */
+ lnet_process_id_t ksnp_id; /* who's on the other end(s) */
+ atomic_t ksnp_refcount; /* # users */
+ int ksnp_sharecount; /* lconf usage counter */
+ int ksnp_closing; /* being closed */
int ksnp_accepting;/* # passive connections pending */
int ksnp_error; /* errno on closing last conn */
__u64 ksnp_zc_next_cookie;/* ZC completion cookie */
static inline void
ksocknal_conn_addref (ksock_conn_t *conn)
{
- LASSERT (cfs_atomic_read(&conn->ksnc_conn_refcount) > 0);
- cfs_atomic_inc(&conn->ksnc_conn_refcount);
+ LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
+ atomic_inc(&conn->ksnc_conn_refcount);
}
extern void ksocknal_queue_zombie_conn (ksock_conn_t *conn);
static inline void
ksocknal_conn_decref (ksock_conn_t *conn)
{
- LASSERT (cfs_atomic_read(&conn->ksnc_conn_refcount) > 0);
- if (cfs_atomic_dec_and_test(&conn->ksnc_conn_refcount))
- ksocknal_queue_zombie_conn(conn);
+ LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
+ if (atomic_dec_and_test(&conn->ksnc_conn_refcount))
+ ksocknal_queue_zombie_conn(conn);
}
static inline int
ksocknal_connsock_addref (ksock_conn_t *conn)
{
- int rc = -ESHUTDOWN;
+ int rc = -ESHUTDOWN;
read_lock(&ksocknal_data.ksnd_global_lock);
if (!conn->ksnc_closing) {
- LASSERT(cfs_atomic_read(&conn->ksnc_sock_refcount) > 0);
- cfs_atomic_inc(&conn->ksnc_sock_refcount);
+ LASSERT(atomic_read(&conn->ksnc_sock_refcount) > 0);
+ atomic_inc(&conn->ksnc_sock_refcount);
rc = 0;
}
read_unlock(&ksocknal_data.ksnd_global_lock);
- return (rc);
+ return (rc);
}
static inline void
ksocknal_connsock_decref (ksock_conn_t *conn)
{
- LASSERT (cfs_atomic_read(&conn->ksnc_sock_refcount) > 0);
- if (cfs_atomic_dec_and_test(&conn->ksnc_sock_refcount)) {
- LASSERT (conn->ksnc_closing);
- libcfs_sock_release(conn->ksnc_sock);
- conn->ksnc_sock = NULL;
- ksocknal_finalize_zcreq(conn);
- }
+ LASSERT (atomic_read(&conn->ksnc_sock_refcount) > 0);
+ if (atomic_dec_and_test(&conn->ksnc_sock_refcount)) {
+ LASSERT (conn->ksnc_closing);
+ libcfs_sock_release(conn->ksnc_sock);
+ conn->ksnc_sock = NULL;
+ ksocknal_finalize_zcreq(conn);
+ }
}
static inline void
ksocknal_tx_addref (ksock_tx_t *tx)
{
- LASSERT (cfs_atomic_read(&tx->tx_refcount) > 0);
- cfs_atomic_inc(&tx->tx_refcount);
+ LASSERT (atomic_read(&tx->tx_refcount) > 0);
+ atomic_inc(&tx->tx_refcount);
}
extern void ksocknal_tx_prep (ksock_conn_t *, ksock_tx_t *tx);
static inline void
ksocknal_tx_decref (ksock_tx_t *tx)
{
- LASSERT (cfs_atomic_read(&tx->tx_refcount) > 0);
- if (cfs_atomic_dec_and_test(&tx->tx_refcount))
- ksocknal_tx_done(NULL, tx);
+ LASSERT (atomic_read(&tx->tx_refcount) > 0);
+ if (atomic_dec_and_test(&tx->tx_refcount))
+ ksocknal_tx_done(NULL, tx);
}
static inline void
ksocknal_route_addref (ksock_route_t *route)
{
- LASSERT (cfs_atomic_read(&route->ksnr_refcount) > 0);
- cfs_atomic_inc(&route->ksnr_refcount);
+ LASSERT (atomic_read(&route->ksnr_refcount) > 0);
+ atomic_inc(&route->ksnr_refcount);
}
extern void ksocknal_destroy_route (ksock_route_t *route);
static inline void
ksocknal_route_decref (ksock_route_t *route)
{
- LASSERT (cfs_atomic_read (&route->ksnr_refcount) > 0);
- if (cfs_atomic_dec_and_test(&route->ksnr_refcount))
- ksocknal_destroy_route (route);
+ LASSERT (atomic_read (&route->ksnr_refcount) > 0);
+ if (atomic_dec_and_test(&route->ksnr_refcount))
+ ksocknal_destroy_route (route);
}
static inline void
ksocknal_peer_addref (ksock_peer_t *peer)
{
- LASSERT (cfs_atomic_read (&peer->ksnp_refcount) > 0);
- cfs_atomic_inc(&peer->ksnp_refcount);
+ LASSERT (atomic_read (&peer->ksnp_refcount) > 0);
+ atomic_inc(&peer->ksnp_refcount);
}
extern void ksocknal_destroy_peer (ksock_peer_t *peer);
static inline void
ksocknal_peer_decref (ksock_peer_t *peer)
{
- LASSERT (cfs_atomic_read (&peer->ksnp_refcount) > 0);
- if (cfs_atomic_dec_and_test(&peer->ksnp_refcount))
- ksocknal_destroy_peer (peer);
+ LASSERT (atomic_read (&peer->ksnp_refcount) > 0);
+ if (atomic_dec_and_test(&peer->ksnp_refcount))
+ ksocknal_destroy_peer (peer);
}
int ksocknal_startup (lnet_ni_t *ni);
if (tx == NULL)
return NULL;
- cfs_atomic_set(&tx->tx_refcount, 1);
- tx->tx_zc_aborted = 0;
- tx->tx_zc_capable = 0;
- tx->tx_zc_checked = 0;
- tx->tx_desc_size = size;
+ atomic_set(&tx->tx_refcount, 1);
+ tx->tx_zc_aborted = 0;
+ tx->tx_zc_capable = 0;
+ tx->tx_zc_checked = 0;
+ tx->tx_desc_size = size;
- cfs_atomic_inc(&ksocknal_data.ksnd_nactive_txs);
+ atomic_inc(&ksocknal_data.ksnd_nactive_txs);
- return tx;
+ return tx;
}
ksock_tx_t *
void
ksocknal_free_tx (ksock_tx_t *tx)
{
- cfs_atomic_dec(&ksocknal_data.ksnd_nactive_txs);
+ atomic_dec(&ksocknal_data.ksnd_nactive_txs);
if (tx->tx_lnetmsg == NULL && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) {
/* it's a noop tx */
}
/* socket's wmem_queued now includes 'rc' bytes */
- cfs_atomic_sub (rc, &conn->ksnc_tx_nob);
+ atomic_sub (rc, &conn->ksnc_tx_nob);
rc = 0;
} while (tx->tx_resid != 0);
cfs_list_del (&tx->tx_list);
- LASSERT (cfs_atomic_read(&tx->tx_refcount) == 1);
+ LASSERT (atomic_read(&tx->tx_refcount) == 1);
ksocknal_tx_done (ni, tx);
}
}
counter++; /* exponential backoff warnings */
if ((counter & (-counter)) == counter)
CWARN("%u ENOMEM tx %p (%u allocated)\n",
- counter, conn, cfs_atomic_read(&libcfs_kmemory));
+ counter, conn, atomic_read(&libcfs_kmemory));
/* Queue on ksnd_enomem_conns for retry after a timeout */
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
cfs_list_for_each (tmp, &peer->ksnp_conns) {
ksock_conn_t *c = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
- int nob = cfs_atomic_read(&c->ksnc_tx_nob) +
+ int nob = atomic_read(&c->ksnc_tx_nob) +
libcfs_sock_wmem_queued(c->ksnc_sock);
int rc;
{
conn->ksnc_proto->pro_pack(tx);
- cfs_atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
+ atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
ksocknal_conn_addref(conn); /* +1 ref for tx */
tx->tx_conn = conn;
}
}
if (ztx != NULL) {
- cfs_atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
+ atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
cfs_list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
}
lnet_process_id_t *id;
int rc;
- LASSERT (cfs_atomic_read(&conn->ksnc_conn_refcount) > 0);
+ LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
/* NB: sched lock NOT held */
/* SOCKNAL_RX_LNET_HEADER is here for backward compatability */
CERROR ("BRW RPC to %s failed with %d\n",
libcfs_id2str(rpc->crpc_dest), rpc->crpc_status);
if (!tsi->tsi_stopping) /* rpc could have been aborted */
- cfs_atomic_inc(&sn->sn_brw_errors);
+ atomic_inc(&sn->sn_brw_errors);
goto out;
}
libcfs_id2str(rpc->crpc_dest), reply->brw_status);
if (reply->brw_status != 0) {
- cfs_atomic_inc(&sn->sn_brw_errors);
+ atomic_inc(&sn->sn_brw_errors);
rpc->crpc_status = -(int)reply->brw_status;
goto out;
}
if (brw_check_bulk(&rpc->crpc_bulk, reqst->brw_flags, magic) != 0) {
CERROR ("Bulk data from %s is corrupted!\n",
libcfs_id2str(rpc->crpc_dest));
- cfs_atomic_inc(&sn->sn_brw_errors);
+ atomic_inc(&sn->sn_brw_errors);
rpc->crpc_status = -EBADMSG;
}
}
/* wakeup (transaction)thread if I'm the last RPC in the transaction */
- if (cfs_atomic_dec_and_test(&crpc->crp_trans->tas_remaining))
+ if (atomic_dec_and_test(&crpc->crp_trans->tas_remaining))
wake_up(&crpc->crp_trans->tas_waitq);
spin_unlock(&rpc->crpc_lock);
crpc->crp_embedded = embedded;
CFS_INIT_LIST_HEAD(&crpc->crp_link);
- cfs_atomic_inc(&console_session.ses_rpc_counter);
+ atomic_inc(&console_session.ses_rpc_counter);
return 0;
}
}
/* RPC is not alive now */
- cfs_atomic_dec(&console_session.ses_rpc_counter);
+ atomic_dec(&console_session.ses_rpc_counter);
}
void
LASSERT (trans != NULL);
- cfs_atomic_inc(&trans->tas_remaining);
+ atomic_inc(&trans->tas_remaining);
crpc->crp_posted = 1;
sfw_post_rpc(crpc->crp_rpc);
cfs_list_add_tail(&trans->tas_link, &console_session.ses_trans_list);
CFS_INIT_LIST_HEAD(&trans->tas_rpcs_list);
- cfs_atomic_set(&trans->tas_remaining, 0);
+ atomic_set(&trans->tas_remaining, 0);
init_waitqueue_head(&trans->tas_waitq);
spin_lock(&console_session.ses_rpc_lock);
!cfs_list_empty(&trans->tas_olink)) /* Not an end session RPC */
return 1;
- return (cfs_atomic_read(&trans->tas_remaining) == 0) ? 1: 0;
+ return (atomic_read(&trans->tas_remaining) == 0) ? 1: 0;
}
int
spin_unlock(&rpc->crpc_lock);
- cfs_atomic_dec(&trans->tas_remaining);
+ atomic_dec(&trans->tas_remaining);
}
- LASSERT (cfs_atomic_read(&trans->tas_remaining) == 0);
+ LASSERT (atomic_read(&trans->tas_remaining) == 0);
cfs_list_del(&trans->tas_link);
if (!cfs_list_empty(&trans->tas_olink))
int rc;
LASSERT (cfs_list_empty(&console_session.ses_rpc_freelist));
- LASSERT (cfs_atomic_read(&console_session.ses_rpc_counter) == 0);
+ LASSERT (atomic_read(&console_session.ses_rpc_counter) == 0);
rc = lstcon_rpc_trans_prep(NULL, LST_TRANS_SESPING,
&console_session.ses_ping);
spin_lock(&console_session.ses_rpc_lock);
- lst_wait_until((cfs_atomic_read(&console_session.ses_rpc_counter) == 0),
+ lst_wait_until((atomic_read(&console_session.ses_rpc_counter) == 0),
console_session.ses_rpc_lock,
"Network is not accessable or target is down, "
"waiting for %d console RPCs to being recycled\n",
- cfs_atomic_read(&console_session.ses_rpc_counter));
+ atomic_read(&console_session.ses_rpc_counter));
cfs_list_add(&zlist, &console_session.ses_rpc_freelist);
cfs_list_del_init(&console_session.ses_rpc_freelist);
console_session.ses_ping = NULL;
spin_lock_init(&console_session.ses_rpc_lock);
- cfs_atomic_set(&console_session.ses_rpc_counter, 0);
+ atomic_set(&console_session.ses_rpc_counter, 0);
CFS_INIT_LIST_HEAD(&console_session.ses_rpc_freelist);
return 0;
lstcon_rpc_module_fini(void)
{
LASSERT (cfs_list_empty(&console_session.ses_rpc_freelist));
- LASSERT (cfs_atomic_read(&console_session.ses_rpc_counter) == 0);
+ LASSERT (atomic_read(&console_session.ses_rpc_counter) == 0);
}
#endif
/* test features mask */
unsigned tas_features;
wait_queue_head_t tas_waitq; /* wait queue head */
- cfs_atomic_t tas_remaining; /* # of un-scheduled rpcs */
+ atomic_t tas_remaining; /* # of un-scheduled rpcs */
cfs_list_t tas_rpcs_list; /* queued requests */
} lstcon_rpc_trans_t;
cfs_list_t *ses_ndl_hash; /* hash table of nodes */
spinlock_t ses_rpc_lock; /* serialize */
- cfs_atomic_t ses_rpc_counter;/* # of initialized RPCs */
- cfs_list_t ses_rpc_freelist; /* idle console rpc */
+ atomic_t ses_rpc_counter;/* # of initialized RPCs */
+ cfs_list_t ses_rpc_freelist; /* idle console rpc */
} lstcon_session_t; /*** session descriptor */
extern lstcon_session_t console_session;
__swab64s(&(lc).route_length); \
} while (0)
-#define sfw_test_active(t) (cfs_atomic_read(&(t)->tsi_nactive) != 0)
-#define sfw_batch_active(b) (cfs_atomic_read(&(b)->bat_nactive) != 0)
+#define sfw_test_active(t) (atomic_read(&(t)->tsi_nactive) != 0)
+#define sfw_batch_active(b) (atomic_read(&(b)->bat_nactive) != 0)
struct smoketest_framework {
- cfs_list_t fw_zombie_rpcs; /* RPCs to be recycled */
- cfs_list_t fw_zombie_sessions; /* stopping sessions */
- cfs_list_t fw_tests; /* registered test cases */
- cfs_atomic_t fw_nzombies; /* # zombie sessions */
+ cfs_list_t fw_zombie_rpcs; /* RPCs to be recycled */
+ cfs_list_t fw_zombie_sessions; /* stopping sessions */
+ cfs_list_t fw_tests; /* registered test cases */
+ atomic_t fw_nzombies; /* # zombie sessions */
spinlock_t fw_lock; /* serialise */
sfw_session_t *fw_session; /* _the_ session */
int fw_shuttingdown; /* shutdown in progress */
LASSERT (!sn->sn_timer_active);
sfw_data.fw_session = NULL;
- cfs_atomic_inc(&sfw_data.fw_nzombies);
+ atomic_inc(&sfw_data.fw_nzombies);
cfs_list_add(&sn->sn_list, &sfw_data.fw_zombie_sessions);
spin_unlock(&sfw_data.fw_lock);
memset(sn, 0, sizeof(sfw_session_t));
CFS_INIT_LIST_HEAD(&sn->sn_list);
CFS_INIT_LIST_HEAD(&sn->sn_batches);
- cfs_atomic_set(&sn->sn_refcount, 1); /* +1 for caller */
- cfs_atomic_set(&sn->sn_brw_errors, 0);
- cfs_atomic_set(&sn->sn_ping_errors, 0);
+ atomic_set(&sn->sn_refcount, 1); /* +1 for caller */
+ atomic_set(&sn->sn_brw_errors, 0);
+ atomic_set(&sn->sn_ping_errors, 0);
strlcpy(&sn->sn_name[0], name, sizeof(sn->sn_name));
sn->sn_timer_active = 0;
{
LASSERT (rpc->crpc_bulk.bk_niov == 0);
LASSERT (cfs_list_empty(&rpc->crpc_list));
- LASSERT (cfs_atomic_read(&rpc->crpc_refcount) == 0);
+ LASSERT (atomic_read(&rpc->crpc_refcount) == 0);
#ifndef __KERNEL__
LASSERT (rpc->crpc_bulk.bk_pages == NULL);
#endif
bat->bat_error = 0;
bat->bat_session = sn;
bat->bat_id = bid;
- cfs_atomic_set(&bat->bat_nactive, 0);
+ atomic_set(&bat->bat_nactive, 0);
CFS_INIT_LIST_HEAD(&bat->bat_tests);
cfs_list_add_tail(&bat->bat_list, &sn->sn_batches);
sn->sn_started), &tv);
cnt->running_ms = (__u32)(tv.tv_sec * 1000 + tv.tv_usec / 1000);
- cnt->brw_errors = cfs_atomic_read(&sn->sn_brw_errors);
- cnt->ping_errors = cfs_atomic_read(&sn->sn_ping_errors);
- cnt->zombie_sessions = cfs_atomic_read(&sfw_data.fw_nzombies);
+ cnt->brw_errors = atomic_read(&sn->sn_brw_errors);
+ cnt->ping_errors = atomic_read(&sn->sn_ping_errors);
+ cnt->zombie_sessions = atomic_read(&sfw_data.fw_nzombies);
cnt->active_batches = 0;
cfs_list_for_each_entry_typed (bat, &sn->sn_batches,
sfw_batch_t, bat_list) {
- if (cfs_atomic_read(&bat->bat_nactive) > 0)
+ if (atomic_read(&bat->bat_nactive) > 0)
cnt->active_batches++;
}
reply->mksn_timeout = sn->sn_timeout;
if (sfw_sid_equal(request->mksn_sid, sn->sn_id)) {
- cfs_atomic_inc(&sn->sn_refcount);
+ atomic_inc(&sn->sn_refcount);
return 0;
}
return 0;
}
- if (!cfs_atomic_dec_and_test(&sn->sn_refcount)) {
+ if (!atomic_dec_and_test(&sn->sn_refcount)) {
reply->rmsn_status = 0;
return 0;
}
}
LIBCFS_FREE(sn, sizeof(*sn));
- cfs_atomic_dec(&sfw_data.fw_nzombies);
+ atomic_dec(&sfw_data.fw_nzombies);
return;
}
memset(tsi, 0, sizeof(*tsi));
spin_lock_init(&tsi->tsi_lock);
- cfs_atomic_set(&tsi->tsi_nactive, 0);
+ atomic_set(&tsi->tsi_nactive, 0);
CFS_INIT_LIST_HEAD(&tsi->tsi_units);
CFS_INIT_LIST_HEAD(&tsi->tsi_free_rpcs);
CFS_INIT_LIST_HEAD(&tsi->tsi_active_rpcs);
LASSERT (sfw_test_active(tsi));
- if (!cfs_atomic_dec_and_test(&tsi->tsi_nactive))
+ if (!atomic_dec_and_test(&tsi->tsi_nactive))
return;
/* the test instance is done */
spin_lock(&sfw_data.fw_lock);
- if (!cfs_atomic_dec_and_test(&tsb->bat_nactive) ||/* tsb still active */
+ if (!atomic_dec_and_test(&tsb->bat_nactive) ||/* tsb still active */
sn == sfw_data.fw_session) { /* sn also active */
spin_unlock(&sfw_data.fw_lock);
return;
if (sfw_batch_active(tsb)) {
CDEBUG(D_NET, "Batch already active: "LPU64" (%d)\n",
- tsb->bat_id.bat_id, cfs_atomic_read(&tsb->bat_nactive));
+ tsb->bat_id.bat_id, atomic_read(&tsb->bat_nactive));
return 0;
}
LASSERT (!tsi->tsi_stopping);
LASSERT (!sfw_test_active(tsi));
- cfs_atomic_inc(&tsb->bat_nactive);
+ atomic_inc(&tsb->bat_nactive);
cfs_list_for_each_entry_typed (tsu, &tsi->tsi_units,
sfw_test_unit_t, tsu_list) {
- cfs_atomic_inc(&tsi->tsi_nactive);
+ atomic_inc(&tsi->tsi_nactive);
tsu->tsu_loop = tsi->tsi_loop;
wi = &tsu->tsu_worker;
swi_init_workitem(wi, tsu, sfw_run_test,
return -EINVAL;
if (testidx == 0) {
- reply->bar_active = cfs_atomic_read(&tsb->bat_nactive);
+ reply->bar_active = atomic_read(&tsb->bat_nactive);
return 0;
}
if (testidx-- > 1)
continue;
- reply->bar_active = cfs_atomic_read(&tsi->tsi_nactive);
+ reply->bar_active = atomic_read(&tsi->tsi_nactive);
return 0;
}
void
sfw_abort_rpc (srpc_client_rpc_t *rpc)
{
- LASSERT(cfs_atomic_read(&rpc->crpc_refcount) > 0);
+ LASSERT(atomic_read(&rpc->crpc_refcount) > 0);
LASSERT(rpc->crpc_service <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
spin_lock(&rpc->crpc_lock);
sfw_data.fw_session = NULL;
sfw_data.fw_active_srpc = NULL;
spin_lock_init(&sfw_data.fw_lock);
- cfs_atomic_set(&sfw_data.fw_nzombies, 0);
+ atomic_set(&sfw_data.fw_nzombies, 0);
CFS_INIT_LIST_HEAD(&sfw_data.fw_tests);
CFS_INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
CFS_INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
"waiting for session timer to explode.\n");
sfw_deactivate_session();
- lst_wait_until(cfs_atomic_read(&sfw_data.fw_nzombies) == 0,
+ lst_wait_until(atomic_read(&sfw_data.fw_nzombies) == 0,
sfw_data.fw_lock,
"waiting for %d zombie sessions to die.\n",
- cfs_atomic_read(&sfw_data.fw_nzombies));
+ atomic_read(&sfw_data.fw_nzombies));
spin_unlock(&sfw_data.fw_lock);
LASSERT (sn != NULL);
LASSERT (tsi->tsi_is_client);
- errors = cfs_atomic_read(&sn->sn_ping_errors);
+ errors = atomic_read(&sn->sn_ping_errors);
if (errors)
CWARN ("%d pings have failed.\n", errors);
else
if (rpc->crpc_status != 0) {
if (!tsi->tsi_stopping) /* rpc could have been aborted */
- cfs_atomic_inc(&sn->sn_ping_errors);
+ atomic_inc(&sn->sn_ping_errors);
CERROR ("Unable to ping %s (%d): %d\n",
libcfs_id2str(rpc->crpc_dest),
reqst->pnr_seq, rpc->crpc_status);
if (reply->pnr_magic != LST_PING_TEST_MAGIC) {
rpc->crpc_status = -EBADMSG;
- cfs_atomic_inc(&sn->sn_ping_errors);
+ atomic_inc(&sn->sn_ping_errors);
CERROR ("Bad magic %u from %s, %u expected.\n",
reply->pnr_magic, libcfs_id2str(rpc->crpc_dest),
LST_PING_TEST_MAGIC);
if (reply->pnr_seq != reqst->pnr_seq) {
rpc->crpc_status = -EBADMSG;
- cfs_atomic_inc(&sn->sn_ping_errors);
+ atomic_inc(&sn->sn_ping_errors);
CERROR ("Bad seq %u from %s, %u expected.\n",
reply->pnr_seq, libcfs_id2str(rpc->crpc_dest),
reqst->pnr_seq);
cfs_list_t crpc_list; /* chain on user's lists */
spinlock_t crpc_lock; /* serialize */
int crpc_service;
- cfs_atomic_t crpc_refcount;
+ atomic_t crpc_refcount;
int crpc_timeout; /* # seconds to wait for reply */
stt_timer_t crpc_timer;
swi_workitem_t crpc_wi;
do { \
CDEBUG(D_NET, "RPC[%p] -> %s (%d)++\n", \
(rpc), libcfs_id2str((rpc)->crpc_dest), \
- cfs_atomic_read(&(rpc)->crpc_refcount)); \
- LASSERT(cfs_atomic_read(&(rpc)->crpc_refcount) > 0); \
- cfs_atomic_inc(&(rpc)->crpc_refcount); \
+ atomic_read(&(rpc)->crpc_refcount)); \
+ LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0); \
+ atomic_inc(&(rpc)->crpc_refcount); \
} while (0)
#define srpc_client_rpc_decref(rpc) \
do { \
CDEBUG(D_NET, "RPC[%p] -> %s (%d)--\n", \
(rpc), libcfs_id2str((rpc)->crpc_dest), \
- cfs_atomic_read(&(rpc)->crpc_refcount)); \
- LASSERT(cfs_atomic_read(&(rpc)->crpc_refcount) > 0); \
- if (cfs_atomic_dec_and_test(&(rpc)->crpc_refcount)) \
+ atomic_read(&(rpc)->crpc_refcount)); \
+ LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0); \
+ if (atomic_dec_and_test(&(rpc)->crpc_refcount)) \
srpc_destroy_client_rpc(rpc); \
} while (0)
stt_timer_t sn_timer;
cfs_list_t sn_batches; /* list of batches */
char sn_name[LST_NAME_SIZE];
- cfs_atomic_t sn_refcount;
- cfs_atomic_t sn_brw_errors;
- cfs_atomic_t sn_ping_errors;
+ atomic_t sn_refcount;
+ atomic_t sn_brw_errors;
+ atomic_t sn_ping_errors;
cfs_time_t sn_started;
} sfw_session_t;
lst_bid_t bat_id; /* batch id */
int bat_error; /* error code of batch */
sfw_session_t *bat_session; /* batch's session */
- cfs_atomic_t bat_nactive; /* # of active tests */
+ atomic_t bat_nactive; /* # of active tests */
cfs_list_t bat_tests; /* test instances */
} sfw_batch_t;
/* status of test instance */
spinlock_t tsi_lock; /* serialize */
unsigned int tsi_stopping:1; /* test is stopping */
- cfs_atomic_t tsi_nactive; /* # of active test unit */
- cfs_list_t tsi_units; /* test units */
- cfs_list_t tsi_free_rpcs; /* free rpcs */
- cfs_list_t tsi_active_rpcs; /* active rpcs */
+ atomic_t tsi_nactive; /* # of active test unit */
+ cfs_list_t tsi_units; /* test units */
+ cfs_list_t tsi_free_rpcs; /* free rpcs */
+ cfs_list_t tsi_active_rpcs; /* active rpcs */
union {
test_ping_req_t ping; /* ping parameter */
static inline void
srpc_destroy_client_rpc (srpc_client_rpc_t *rpc)
{
- LASSERT (rpc != NULL);
- LASSERT (!srpc_event_pending(rpc));
- LASSERT (cfs_atomic_read(&rpc->crpc_refcount) == 0);
+ LASSERT (rpc != NULL);
+ LASSERT (!srpc_event_pending(rpc));
+ LASSERT (atomic_read(&rpc->crpc_refcount) == 0);
#ifndef __KERNEL__
- LASSERT (rpc->crpc_bulk.bk_pages == NULL);
+ LASSERT (rpc->crpc_bulk.bk_pages == NULL);
#endif
- if (rpc->crpc_fini == NULL) {
- LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
- } else {
- (*rpc->crpc_fini) (rpc);
- }
+ if (rpc->crpc_fini == NULL) {
+ LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
+ } else {
+ (*rpc->crpc_fini) (rpc);
+ }
- return;
+ return;
}
static inline void
swi_init_workitem(&rpc->crpc_wi, rpc, srpc_send_rpc,
lst_sched_test[lnet_cpt_of_nid(peer.nid)]);
spin_lock_init(&rpc->crpc_lock);
- cfs_atomic_set(&rpc->crpc_refcount, 1); /* 1 ref for caller */
+ atomic_set(&rpc->crpc_refcount, 1); /* 1 ref for caller */
- rpc->crpc_dest = peer;
- rpc->crpc_priv = priv;
+ rpc->crpc_dest = peer;
+ rpc->crpc_priv = priv;
rpc->crpc_service = service;
rpc->crpc_bulk.bk_len = bulklen;
rpc->crpc_bulk.bk_niov = nbulkiov;