if (tx == NULL)
return NULL;
- cfs_atomic_set(&tx->tx_refcount, 1);
- tx->tx_zc_aborted = 0;
- tx->tx_zc_capable = 0;
- tx->tx_zc_checked = 0;
- tx->tx_desc_size = size;
+ atomic_set(&tx->tx_refcount, 1);
+ tx->tx_zc_aborted = 0;
+ tx->tx_zc_capable = 0;
+ tx->tx_zc_checked = 0;
+ tx->tx_desc_size = size;
- cfs_atomic_inc(&ksocknal_data.ksnd_nactive_txs);
+ atomic_inc(&ksocknal_data.ksnd_nactive_txs);
- return tx;
+ return tx;
}
ksock_tx_t *
void
ksocknal_free_tx (ksock_tx_t *tx)
{
- cfs_atomic_dec(&ksocknal_data.ksnd_nactive_txs);
+ atomic_dec(&ksocknal_data.ksnd_nactive_txs);
if (tx->tx_lnetmsg == NULL && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) {
/* it's a noop tx */
}
/* socket's wmem_queued now includes 'rc' bytes */
- cfs_atomic_sub (rc, &conn->ksnc_tx_nob);
+ atomic_sub (rc, &conn->ksnc_tx_nob);
rc = 0;
} while (tx->tx_resid != 0);
cfs_list_del (&tx->tx_list);
- LASSERT (cfs_atomic_read(&tx->tx_refcount) == 1);
+ LASSERT (atomic_read(&tx->tx_refcount) == 1);
ksocknal_tx_done (ni, tx);
}
}
counter++; /* exponential backoff warnings */
if ((counter & (-counter)) == counter)
CWARN("%u ENOMEM tx %p (%u allocated)\n",
- counter, conn, cfs_atomic_read(&libcfs_kmemory));
+ counter, conn, atomic_read(&libcfs_kmemory));
/* Queue on ksnd_enomem_conns for retry after a timeout */
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
cfs_list_for_each (tmp, &peer->ksnp_conns) {
ksock_conn_t *c = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
- int nob = cfs_atomic_read(&c->ksnc_tx_nob) +
+ int nob = atomic_read(&c->ksnc_tx_nob) +
libcfs_sock_wmem_queued(c->ksnc_sock);
int rc;
{
conn->ksnc_proto->pro_pack(tx);
- cfs_atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
+ atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
ksocknal_conn_addref(conn); /* +1 ref for tx */
tx->tx_conn = conn;
}
}
if (ztx != NULL) {
- cfs_atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
+ atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
cfs_list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
}
lnet_process_id_t *id;
int rc;
- LASSERT (cfs_atomic_read(&conn->ksnc_conn_refcount) > 0);
+ LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
/* NB: sched lock NOT held */
/* SOCKNAL_RX_LNET_HEADER is here for backward compatability */