CFS_INIT_LIST_HEAD (&peer->ksnp_routes);
CFS_INIT_LIST_HEAD (&peer->ksnp_tx_queue);
CFS_INIT_LIST_HEAD (&peer->ksnp_zc_req_list);
- cfs_spin_lock_init(&peer->ksnp_lock);
+ spin_lock_init(&peer->ksnp_lock);
- cfs_spin_lock_bh (&net->ksnn_lock);
+ spin_lock_bh(&net->ksnn_lock);
- if (net->ksnn_shutdown) {
- cfs_spin_unlock_bh (&net->ksnn_lock);
+ if (net->ksnn_shutdown) {
+ spin_unlock_bh(&net->ksnn_lock);
- LIBCFS_FREE(peer, sizeof(*peer));
- CERROR("Can't create peer: network shutdown\n");
- return -ESHUTDOWN;
- }
+ LIBCFS_FREE(peer, sizeof(*peer));
+ CERROR("Can't create peer: network shutdown\n");
+ return -ESHUTDOWN;
+ }
- net->ksnn_npeers++;
+ net->ksnn_npeers++;
- cfs_spin_unlock_bh (&net->ksnn_lock);
+ spin_unlock_bh(&net->ksnn_lock);
- *peerp = peer;
- return 0;
+ *peerp = peer;
+ return 0;
}
void
* until they are destroyed, so we can be assured that _all_ state to
* do with this peer has been cleaned up when its refcount drops to
* zero. */
- cfs_spin_lock_bh (&net->ksnn_lock);
- net->ksnn_npeers--;
- cfs_spin_unlock_bh (&net->ksnn_lock);
+ spin_lock_bh(&net->ksnn_lock);
+ net->ksnn_npeers--;
+ spin_unlock_bh(&net->ksnn_lock);
}
ksock_peer_t *
{
ksock_peer_t *peer;
- cfs_read_lock (&ksocknal_data.ksnd_global_lock);
- peer = ksocknal_find_peer_locked (ni, id);
- if (peer != NULL) /* +1 ref for caller? */
- ksocknal_peer_addref(peer);
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_lock(&ksocknal_data.ksnd_global_lock);
+ peer = ksocknal_find_peer_locked(ni, id);
+ if (peer != NULL) /* +1 ref for caller? */
+ ksocknal_peer_addref(peer);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
return (peer);
}
int j;
int rc = -ENOENT;
- cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+ read_lock(&ksocknal_data.ksnd_global_lock);
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
}
}
out:
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
return (rc);
}
return (-ENOMEM);
}
- cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
/* always called with a ref on ni, so shutdown can't have started */
LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
route2->ksnr_share_count++;
}
- cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
return (0);
}
int i;
int rc = -ENOENT;
- cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
if (id.nid != LNET_NID_ANY)
lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
}
}
- cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
ksocknal_txlist_done(ni, &zombies, 1);
cfs_list_t *ctmp;
int i;
- cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+ read_lock(&ksocknal_data.ksnd_global_lock);
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
cfs_list_for_each (ptmp, &ksocknal_data.ksnd_peers[i]) {
conn = cfs_list_entry (ctmp, ksock_conn_t,
ksnc_list);
ksocknal_conn_addref(conn);
- cfs_read_unlock (&ksocknal_data. \
+ read_unlock(&ksocknal_data. \
ksnd_global_lock);
return (conn);
}
}
}
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
return (NULL);
}
int i;
int nip;
- cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+ read_lock(&ksocknal_data.ksnd_global_lock);
nip = net->ksnn_ninterfaces;
LASSERT (nip <= LNET_MAX_INTERFACES);
/* Only offer interfaces for additional connections if I have
* more than one. */
if (nip < 2) {
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
return 0;
}
LASSERT (ipaddrs[i] != 0);
}
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
return (nip);
}
int
ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
{
- cfs_rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
+ rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
ksock_net_t *net = peer->ksnp_ni->ni_data;
ksock_interface_t *iface;
ksock_interface_t *best_iface;
/* Also note that I'm not going to return more than n_peerips
* interfaces, even if I have more myself */
- cfs_write_lock_bh (global_lock);
+ write_lock_bh(global_lock);
LASSERT (n_peerips <= LNET_MAX_INTERFACES);
LASSERT (net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
/* Overwrite input peer IP addresses */
memcpy(peerips, peer->ksnp_passive_ips, n_ips * sizeof(*peerips));
- cfs_write_unlock_bh (global_lock);
+ write_unlock_bh(global_lock);
return (n_ips);
}
__u32 *peer_ipaddrs, int npeer_ipaddrs)
{
ksock_route_t *newroute = NULL;
- cfs_rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
+ rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
lnet_ni_t *ni = peer->ksnp_ni;
ksock_net_t *net = ni->ni_data;
cfs_list_t *rtmp;
* expecting to be dealing with small numbers of interfaces, so the
* O(n**3)-ness here shouldn't matter */
- cfs_write_lock_bh (global_lock);
+ write_lock_bh(global_lock);
if (net->ksnn_ninterfaces < 2) {
/* Only create additional connections
* if I have > 1 interface */
- cfs_write_unlock_bh (global_lock);
+ write_unlock_bh(global_lock);
return;
}
if (newroute != NULL) {
newroute->ksnr_ipaddr = peer_ipaddrs[i];
} else {
- cfs_write_unlock_bh (global_lock);
+ write_unlock_bh(global_lock);
newroute = ksocknal_create_route(peer_ipaddrs[i], port);
if (newroute == NULL)
return;
- cfs_write_lock_bh (global_lock);
+ write_lock_bh(global_lock);
}
if (peer->ksnp_closing) {
newroute = NULL;
}
- cfs_write_unlock_bh (global_lock);
+ write_unlock_bh(global_lock);
if (newroute != NULL)
ksocknal_route_decref(newroute);
}
cr->ksncr_ni = ni;
cr->ksncr_sock = sock;
- cfs_spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
+ spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
cfs_list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
cfs_waitq_signal(&ksocknal_data.ksnd_connd_waitq);
- cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
+ spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
return 0;
}
ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
cfs_socket_t *sock, int type)
{
- cfs_rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
+ rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
CFS_LIST_HEAD (zombies);
lnet_process_id_t peerid;
cfs_list_t *tmp;
hello->kshm_nips = ksocknal_local_ipvec(ni, hello->kshm_ips);
peerid = peer->ksnp_id;
- cfs_write_lock_bh(global_lock);
+ write_lock_bh(global_lock);
conn->ksnc_proto = peer->ksnp_proto;
- cfs_write_unlock_bh(global_lock);
+ write_unlock_bh(global_lock);
if (conn->ksnc_proto == NULL) {
conn->ksnc_proto = &ksocknal_protocol_v3x;
if (active) {
ksocknal_peer_addref(peer);
- cfs_write_lock_bh (global_lock);
+ write_lock_bh(global_lock);
} else {
rc = ksocknal_create_peer(&peer, ni, peerid);
if (rc != 0)
goto failed_1;
- cfs_write_lock_bh (global_lock);
+ write_lock_bh(global_lock);
/* called with a ref on ni, so shutdown can't have started */
LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
ksocknal_queue_tx_locked (tx, conn);
}
- cfs_write_unlock_bh (global_lock);
+ write_unlock_bh(global_lock);
/* We've now got a new connection. Any errors from here on are just
* like "normal" comms errors and we close the connection normally.
if (rc == 0)
rc = ksocknal_lib_setup_sock(sock);
- cfs_write_lock_bh(global_lock);
+ write_lock_bh(global_lock);
/* NB my callbacks block while I hold ksnd_global_lock */
ksocknal_lib_set_callback(sock, conn);
if (!active)
peer->ksnp_accepting--;
- cfs_write_unlock_bh(global_lock);
+ write_unlock_bh(global_lock);
if (rc != 0) {
- cfs_write_lock_bh(global_lock);
+ write_lock_bh(global_lock);
if (!conn->ksnc_closing) {
/* could be closed by another thread */
ksocknal_close_conn_locked(conn, rc);
}
- cfs_write_unlock_bh(global_lock);
+ write_unlock_bh(global_lock);
} else if (ksocknal_connsock_addref(conn) == 0) {
/* Allow I/O to proceed. */
ksocknal_read_callback(conn);
ksocknal_unlink_peer_locked(peer);
}
- cfs_write_unlock_bh (global_lock);
+ write_unlock_bh(global_lock);
if (warn != NULL) {
if (rc < 0)
ksocknal_send_hello(ni, conn, peerid.nid, hello);
}
- cfs_write_lock_bh(global_lock);
+ write_lock_bh(global_lock);
peer->ksnp_accepting--;
- cfs_write_unlock_bh(global_lock);
+ write_unlock_bh(global_lock);
}
ksocknal_txlist_done(ni, &zombies, 1);
tx_list)
ksocknal_tx_prep(conn, tx);
- cfs_spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
- cfs_list_splice_init(&peer->ksnp_tx_queue,
- &conn->ksnc_tx_queue);
- cfs_spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
+ spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
+ cfs_list_splice_init(&peer->ksnp_tx_queue,
+ &conn->ksnc_tx_queue);
+ spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
}
peer->ksnp_proto = NULL; /* renegotiate protocol version */
}
}
- cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+ spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
- cfs_list_add_tail (&conn->ksnc_list,
- &ksocknal_data.ksnd_deathrow_conns);
- cfs_waitq_signal (&ksocknal_data.ksnd_reaper_waitq);
+ cfs_list_add_tail(&conn->ksnc_list,
+ &ksocknal_data.ksnd_deathrow_conns);
+ cfs_waitq_signal(&ksocknal_data.ksnd_reaper_waitq);
- cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+ spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
}
void
* tell LNET I think the peer is dead if it's to another kernel and
* there are no connections or connection attempts in existance. */
- cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+ read_lock(&ksocknal_data.ksnd_global_lock);
if ((peer->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
cfs_list_empty(&peer->ksnp_conns) &&
last_alive = peer->ksnp_last_alive;
}
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
if (notify)
lnet_notify (peer->ksnp_ni, peer->ksnp_id.nid, 0,
* abort all buffered data */
LASSERT (conn->ksnc_sock == NULL);
- cfs_spin_lock(&peer->ksnp_lock);
+ spin_lock(&peer->ksnp_lock);
cfs_list_for_each_entry_safe_typed(tx, tmp, &peer->ksnp_zc_req_list,
ksock_tx_t, tx_zc_list) {
cfs_list_add(&tx->tx_zc_list, &zlist);
}
- cfs_spin_unlock(&peer->ksnp_lock);
+ spin_unlock(&peer->ksnp_lock);
while (!cfs_list_empty(&zlist)) {
tx = cfs_list_entry(zlist.next, ksock_tx_t, tx_zc_list);
LASSERT(conn->ksnc_closing);
/* wake up the scheduler to "send" all remaining packets to /dev/null */
- cfs_spin_lock_bh (&sched->kss_lock);
+ spin_lock_bh(&sched->kss_lock);
/* a closing conn is always ready to tx */
conn->ksnc_tx_ready = 1;
cfs_waitq_signal (&sched->kss_waitq);
}
- cfs_spin_unlock_bh (&sched->kss_lock);
+ spin_unlock_bh(&sched->kss_lock);
- /* serialise with callbacks */
- cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ /* serialise with callbacks */
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
ksocknal_lib_reset_callback(conn->ksnc_sock, conn);
peer->ksnp_error = 0; /* avoid multiple notifications */
}
- cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
if (failed)
ksocknal_peer_failed(peer);
void
ksocknal_queue_zombie_conn (ksock_conn_t *conn)
{
- /* Queue the conn for the reaper to destroy */
+ /* Queue the conn for the reaper to destroy */
- LASSERT (cfs_atomic_read(&conn->ksnc_conn_refcount) == 0);
- cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+ LASSERT(cfs_atomic_read(&conn->ksnc_conn_refcount) == 0);
+ spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
- cfs_list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
- cfs_waitq_signal(&ksocknal_data.ksnd_reaper_waitq);
+ cfs_list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
+ cfs_waitq_signal(&ksocknal_data.ksnd_reaper_waitq);
- cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+ spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
}
void
__u32 ipaddr = conn->ksnc_ipaddr;
int count;
- cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
count = ksocknal_close_peer_conns_locked (peer, ipaddr, why);
- cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
return (count);
}
int i;
int count = 0;
- cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
if (id.nid != LNET_NID_ANY)
lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
}
}
- cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
/* wildcards always succeed */
if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
cfs_time_t last_alive = 0;
cfs_time_t now = cfs_time_current();
ksock_peer_t *peer = NULL;
- cfs_rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
+ rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
lnet_process_id_t id = {.nid = nid, .pid = LUSTRE_SRV_LNET_PID};
- cfs_read_lock(glock);
+ read_lock(glock);
peer = ksocknal_find_peer_locked(ni, id);
if (peer != NULL) {
connect = 0;
}
- cfs_read_unlock(glock);
+ read_unlock(glock);
if (last_alive != 0)
*when = last_alive;
ksocknal_add_peer(ni, id, LNET_NIDADDR(nid), lnet_acceptor_port());
- cfs_write_lock_bh(glock);
+ write_lock_bh(glock);
peer = ksocknal_find_peer_locked(ni, id);
if (peer != NULL)
ksocknal_launch_all_connections_locked(peer);
- cfs_write_unlock_bh(glock);
+ write_unlock_bh(glock);
return;
}
ksock_conn_t *conn;
for (index = 0; ; index++) {
- cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+ read_lock(&ksocknal_data.ksnd_global_lock);
i = 0;
conn = NULL;
}
}
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
if (conn == NULL)
break;
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
for (j = 0; ; j++) {
- cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+ read_lock(&ksocknal_data.ksnd_global_lock);
index = 0;
peer = NULL;
}
}
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
if (peer != NULL) {
rc = 0;
netmask == 0)
return (-EINVAL);
- cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
iface = ksocknal_ip2iface(ni, ipaddress);
if (iface != NULL) {
/* NB only new connections will pay attention to the new interface! */
}
- cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
return (rc);
}
int i;
int j;
- cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
for (i = 0; i < net->ksnn_ninterfaces; i++) {
this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
}
}
- cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
return (rc);
}
ksock_net_t *net = ni->ni_data;
ksock_interface_t *iface;
- cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+ read_lock(&ksocknal_data.ksnd_global_lock);
if (data->ioc_count >= (__u32)net->ksnn_ninterfaces) {
rc = -ENOENT;
data->ioc_u32[3] = iface->ksni_nroutes;
}
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
return rc;
}
sizeof (cfs_list_t) *
ksocknal_data.ksnd_peer_hash_size);
- cfs_spin_lock(&ksocknal_data.ksnd_tx_lock);
+ spin_lock(&ksocknal_data.ksnd_tx_lock);
- if (!cfs_list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
- cfs_list_t zlist;
- ksock_tx_t *tx;
+ if (!cfs_list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
+ cfs_list_t zlist;
+ ksock_tx_t *tx;
- cfs_list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
- cfs_list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
- cfs_spin_unlock(&ksocknal_data.ksnd_tx_lock);
+ cfs_list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
+ cfs_list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
+ spin_unlock(&ksocknal_data.ksnd_tx_lock);
- while(!cfs_list_empty(&zlist)) {
- tx = cfs_list_entry(zlist.next, ksock_tx_t, tx_list);
- cfs_list_del(&tx->tx_list);
- LIBCFS_FREE(tx, tx->tx_desc_size);
- }
- } else {
- cfs_spin_unlock(&ksocknal_data.ksnd_tx_lock);
- }
+ while (!cfs_list_empty(&zlist)) {
+ tx = cfs_list_entry(zlist.next, ksock_tx_t, tx_list);
+ cfs_list_del(&tx->tx_list);
+ LIBCFS_FREE(tx, tx->tx_desc_size);
+ }
+ } else {
+ spin_unlock(&ksocknal_data.ksnd_tx_lock);
+ }
}
void
}
i = 4;
- cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+ read_lock(&ksocknal_data.ksnd_global_lock);
while (ksocknal_data.ksnd_nthreads != 0) {
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
"waiting for %d threads to terminate\n",
ksocknal_data.ksnd_nthreads);
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
cfs_pause(cfs_time_seconds(1));
- cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+ read_lock(&ksocknal_data.ksnd_global_lock);
}
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
ksocknal_free_buffers();
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
CFS_INIT_LIST_HEAD(&ksocknal_data.ksnd_peers[i]);
- cfs_rwlock_init(&ksocknal_data.ksnd_global_lock);
+ rwlock_init(&ksocknal_data.ksnd_global_lock);
CFS_INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
- cfs_spin_lock_init (&ksocknal_data.ksnd_reaper_lock);
+ spin_lock_init(&ksocknal_data.ksnd_reaper_lock);
CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_enomem_conns);
CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_zombie_conns);
CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_deathrow_conns);
cfs_waitq_init(&ksocknal_data.ksnd_reaper_waitq);
- cfs_spin_lock_init (&ksocknal_data.ksnd_connd_lock);
+ spin_lock_init(&ksocknal_data.ksnd_connd_lock);
CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_connreqs);
CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_routes);
cfs_waitq_init(&ksocknal_data.ksnd_connd_waitq);
- cfs_spin_lock_init (&ksocknal_data.ksnd_tx_lock);
+ spin_lock_init(&ksocknal_data.ksnd_tx_lock);
CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_idle_noop_txs);
/* NB memset above zeros whole of ksocknal_data */
sched = &info->ksi_scheds[nthrs - 1];
sched->kss_info = info;
- cfs_spin_lock_init(&sched->kss_lock);
+ spin_lock_init(&sched->kss_lock);
CFS_INIT_LIST_HEAD(&sched->kss_rx_conns);
CFS_INIT_LIST_HEAD(&sched->kss_tx_conns);
CFS_INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
}
for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
- cfs_spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
- ksocknal_data.ksnd_connd_starting++;
- cfs_spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
-
- rc = ksocknal_thread_start (ksocknal_connd,
- (void *)((ulong_ptr_t)i));
- if (rc != 0) {
- cfs_spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
- ksocknal_data.ksnd_connd_starting--;
- cfs_spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
+ spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
+ ksocknal_data.ksnd_connd_starting++;
+ spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
+
+ rc = ksocknal_thread_start(ksocknal_connd,
+ (void *)((ulong_ptr_t)i));
+ if (rc != 0) {
+ spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
+ ksocknal_data.ksnd_connd_starting--;
+ spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
CERROR("Can't spawn socknal connd: %d\n", rc);
goto failed;
}
void
ksocknal_debug_peerhash (lnet_ni_t *ni)
{
- ksock_peer_t *peer = NULL;
- cfs_list_t *tmp;
- int i;
+ ksock_peer_t *peer = NULL;
+ cfs_list_t *tmp;
+ int i;
- cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+ read_lock(&ksocknal_data.ksnd_global_lock);
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
cfs_list_for_each (tmp, &ksocknal_data.ksnd_peers[i]) {
}
}
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
return;
}
LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
LASSERT(ksocknal_data.ksnd_nnets > 0);
- cfs_spin_lock_bh (&net->ksnn_lock);
- net->ksnn_shutdown = 1; /* prevent new peers */
- cfs_spin_unlock_bh (&net->ksnn_lock);
+ spin_lock_bh(&net->ksnn_lock);
+ net->ksnn_shutdown = 1; /* prevent new peers */
+ spin_unlock_bh(&net->ksnn_lock);
- /* Delete all peers */
- ksocknal_del_peer(ni, anyid, 0);
+ /* Delete all peers */
+ ksocknal_del_peer(ni, anyid, 0);
- /* Wait for all peer state to clean up */
- i = 2;
- cfs_spin_lock_bh (&net->ksnn_lock);
- while (net->ksnn_npeers != 0) {
- cfs_spin_unlock_bh (&net->ksnn_lock);
+ /* Wait for all peer state to clean up */
+ i = 2;
+ spin_lock_bh(&net->ksnn_lock);
+ while (net->ksnn_npeers != 0) {
+ spin_unlock_bh(&net->ksnn_lock);
- i++;
- CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
- "waiting for %d peers to disconnect\n",
- net->ksnn_npeers);
- cfs_pause(cfs_time_seconds(1));
+ i++;
+ CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
+ "waiting for %d peers to disconnect\n",
+ net->ksnn_npeers);
+ cfs_pause(cfs_time_seconds(1));
- ksocknal_debug_peerhash(ni);
+ ksocknal_debug_peerhash(ni);
- cfs_spin_lock_bh (&net->ksnn_lock);
- }
- cfs_spin_unlock_bh (&net->ksnn_lock);
+ spin_lock_bh(&net->ksnn_lock);
+ }
+ spin_unlock_bh(&net->ksnn_lock);
for (i = 0; i < net->ksnn_ninterfaces; i++) {
LASSERT (net->ksnn_interfaces[i].ksni_npeers == 0);
if (net == NULL)
goto fail_0;
- cfs_spin_lock_init(&net->ksnn_lock);
+ spin_lock_init(&net->ksnn_lock);
net->ksnn_incarnation = ksocknal_new_incarnation();
ni->ni_data = net;
ni->ni_peertimeout = *ksocknal_tunables.ksnd_peertimeout;