int decref_flag = 0;
int killall_flag = 0;
void *rx_lnetmsg = NULL;
- CFS_LIST_HEAD (zombie_txs);
+ struct list_head zombie_txs = LIST_HEAD_INIT(zombie_txs);
if (peer == NULL) /* nothing to tear */
return;
}
/* we cannot finilize txs right now (bug #18844) */
- cfs_list_splice_init(&conn->uc_tx_list, &zombie_txs);
+ list_splice_init(&conn->uc_tx_list, &zombie_txs);
peer->up_conns[idx] = NULL;
conn->uc_peer = NULL;
for (i = 0; i < N_CONN_TYPES; i++)
LASSERT (peer->up_conns[i] == NULL);
- cfs_list_del(&peer->up_list);
+ list_del(&peer->up_list);
if (peer->up_errored &&
(peer->up_peerid.pid & LNET_PID_USERFLAG) == 0)
conn->uc_state = UC_RECEIVING_HELLO;
conn->uc_pt_idx = usocklnd_ip2pt_idx(peer_ip);
conn->uc_ni = ni;
- CFS_INIT_LIST_HEAD (&conn->uc_tx_list);
- CFS_INIT_LIST_HEAD (&conn->uc_zcack_list);
+ INIT_LIST_HEAD(&conn->uc_tx_list);
+ INIT_LIST_HEAD(&conn->uc_zcack_list);
pthread_mutex_init(&conn->uc_lock, NULL);
mt_atomic_set(&conn->uc_refcount, 1); /* 1 ref for me */
conn->uc_peer = peer;
usocklnd_peer_addref(peer);
- CFS_INIT_LIST_HEAD (&conn->uc_tx_list);
- CFS_INIT_LIST_HEAD (&conn->uc_zcack_list);
+ INIT_LIST_HEAD(&conn->uc_tx_list);
+ INIT_LIST_HEAD(&conn->uc_zcack_list);
pthread_mutex_init(&conn->uc_lock, NULL);
mt_atomic_set(&conn->uc_refcount, 1); /* 1 ref for me */
}
void
-usocklnd_destroy_txlist(lnet_ni_t *ni, cfs_list_t *txlist)
+usocklnd_destroy_txlist(lnet_ni_t *ni, struct list_head *txlist)
{
usock_tx_t *tx;
- while (!cfs_list_empty(txlist)) {
- tx = cfs_list_entry(txlist->next, usock_tx_t, tx_list);
- cfs_list_del(&tx->tx_list);
+ while (!list_empty(txlist)) {
+ tx = list_entry(txlist->next, usock_tx_t, tx_list);
+ list_del(&tx->tx_list);
usocklnd_destroy_tx(ni, tx);
}
}
void
-usocklnd_destroy_zcack_list(cfs_list_t *zcack_list)
+usocklnd_destroy_zcack_list(struct list_head *zcack_list)
{
usock_zc_ack_t *zcack;
- while (!cfs_list_empty(zcack_list)) {
- zcack = cfs_list_entry(zcack_list->next, usock_zc_ack_t,
+ while (!list_empty(zcack_list)) {
+ zcack = list_entry(zcack_list->next, usock_zc_ack_t,
zc_list);
- cfs_list_del(&zcack->zc_list);
+ list_del(&zcack->zc_list);
LIBCFS_FREE (zcack, sizeof(*zcack));
}
lnet_finalize(conn->uc_peer->up_ni, conn->uc_rx_lnetmsg, -EIO);
}
- if (!cfs_list_empty(&conn->uc_tx_list)) {
+ if (!list_empty(&conn->uc_tx_list)) {
LASSERT (conn->uc_peer != NULL);
usocklnd_destroy_txlist(conn->uc_peer->up_ni, &conn->uc_tx_list);
}
usock_peer_t *
usocklnd_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id)
{
- cfs_list_t *peer_list = usocklnd_nid2peerlist(id.nid);
- cfs_list_t *tmp;
+ struct list_head *peer_list = usocklnd_nid2peerlist(id.nid);
+ struct list_head *tmp;
usock_peer_t *peer;
- cfs_list_for_each (tmp, peer_list) {
+ list_for_each(tmp, peer_list) {
- peer = cfs_list_entry (tmp, usock_peer_t, up_list);
+ peer = list_entry(tmp, usock_peer_t, up_list);
if (peer->up_ni != ni)
continue;
/* peer table will take 1 of my refs on peer */
usocklnd_peer_addref(peer);
- cfs_list_add_tail (&peer->up_list,
+ list_add_tail(&peer->up_list,
usocklnd_nid2peerlist(id.nid));
} else {
usocklnd_peer_decref(peer); /* should destroy peer */
usocklnd_enqueue_zcack(usock_conn_t *conn, usock_zc_ack_t *zc_ack)
{
if (conn->uc_state == UC_READY &&
- cfs_list_empty(&conn->uc_tx_list) &&
- cfs_list_empty(&conn->uc_zcack_list) &&
+ list_empty(&conn->uc_tx_list) &&
+ list_empty(&conn->uc_zcack_list) &&
!conn->uc_sending) {
int rc = usocklnd_add_pollrequest(conn, POLL_TX_SET_REQUEST,
POLLOUT);
return rc;
}
- cfs_list_add_tail(&zc_ack->zc_list, &conn->uc_zcack_list);
+ list_add_tail(&zc_ack->zc_list, &conn->uc_zcack_list);
return 0;
}
int *send_immediately)
{
if (conn->uc_state == UC_READY &&
- cfs_list_empty(&conn->uc_tx_list) &&
- cfs_list_empty(&conn->uc_zcack_list) &&
+ list_empty(&conn->uc_tx_list) &&
+ list_empty(&conn->uc_zcack_list) &&
!conn->uc_sending) {
conn->uc_sending = 1;
*send_immediately = 1;
}
*send_immediately = 0;
- cfs_list_add_tail(&tx->tx_list, &conn->uc_tx_list);
+ list_add_tail(&tx->tx_list, &conn->uc_tx_list);
}
/* Safely create new conn if needed. Save result in *connp.