LASSERT(id.pid != LNET_PID_ANY);
LASSERT(!in_interrupt());
+ if (!atomic_inc_unless_negative(&net->ksnn_npeers)) {
+ CERROR("Can't create peer_ni: network shutdown\n");
+ return ERR_PTR(-ESHUTDOWN);
+ }
+
LIBCFS_CPT_ALLOC(peer_ni, lnet_cpt_table(), cpt, sizeof(*peer_ni));
- if (peer_ni == NULL)
+ if (!peer_ni) {
+ atomic_dec(&net->ksnn_npeers);
return ERR_PTR(-ENOMEM);
+ }
peer_ni->ksnp_ni = ni;
peer_ni->ksnp_id = id;
INIT_LIST_HEAD(&peer_ni->ksnp_zc_req_list);
spin_lock_init(&peer_ni->ksnp_lock);
- spin_lock_bh(&net->ksnn_lock);
-
- if (net->ksnn_shutdown) {
- spin_unlock_bh(&net->ksnn_lock);
-
- LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
- CERROR("Can't create peer_ni: network shutdown\n");
- return ERR_PTR(-ESHUTDOWN);
- }
-
- net->ksnn_npeers++;
-
- spin_unlock_bh(&net->ksnn_lock);
-
return peer_ni;
}
LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
- /* NB a peer_ni's connections and routes keep a reference on their peer_ni
- * until they are destroyed, so we can be assured that _all_ state to
- * do with this peer_ni has been cleaned up when its refcount drops to
- * zero. */
- spin_lock_bh(&net->ksnn_lock);
- net->ksnn_npeers--;
- spin_unlock_bh(&net->ksnn_lock);
+ /* NB a peer_ni's connections and routes keep a reference on their
+ * peer_ni until they are destroyed, so we can be assured that _all_
+ * state to do with this peer_ni has been cleaned up when its refcount
+ * drops to zero.
+ */
+ atomic_dec(&net->ksnn_npeers);
}
struct ksock_peer_ni *
write_lock_bh(&ksocknal_data.ksnd_global_lock);
/* always called with a ref on ni, so shutdown can't have started */
- LASSERT(((struct ksock_net *) ni->ni_data)->ksnn_shutdown == 0);
+ LASSERT(atomic_read(&((struct ksock_net *)ni->ni_data)->ksnn_npeers)
+ >= 0);
peer2 = ksocknal_find_peer_locked(ni, id);
if (peer2 != NULL) {
write_lock_bh(global_lock);
/* called with a ref on ni, so shutdown can't have started */
- LASSERT(((struct ksock_net *) ni->ni_data)->ksnn_shutdown == 0);
+ LASSERT(atomic_read(&((struct ksock_net *)ni->ni_data)->ksnn_npeers) >= 0);
peer2 = ksocknal_find_peer_locked(ni, peerid);
if (peer2 == NULL) {
};
int i;
- LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
- LASSERT(ksocknal_data.ksnd_nnets > 0);
+ LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
+ LASSERT(ksocknal_data.ksnd_nnets > 0);
- spin_lock_bh(&net->ksnn_lock);
- net->ksnn_shutdown = 1; /* prevent new peers */
- spin_unlock_bh(&net->ksnn_lock);
+ /* prevent new peers */
+ atomic_add(SOCKNAL_SHUTDOWN_BIAS, &net->ksnn_npeers);
/* Delete all peers */
ksocknal_del_peer(ni, anyid, 0);
/* Wait for all peer_ni state to clean up */
i = 2;
- spin_lock_bh(&net->ksnn_lock);
- while (net->ksnn_npeers != 0) {
- spin_unlock_bh(&net->ksnn_lock);
-
+ while (atomic_read(&net->ksnn_npeers) > SOCKNAL_SHUTDOWN_BIAS) {
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
"waiting for %d peers to disconnect\n",
- net->ksnn_npeers);
+ atomic_read(&net->ksnn_npeers) - SOCKNAL_SHUTDOWN_BIAS);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1));
ksocknal_debug_peerhash(ni);
-
- spin_lock_bh(&net->ksnn_lock);
}
- spin_unlock_bh(&net->ksnn_lock);
- for (i = 0; i < net->ksnn_ninterfaces; i++) {
- LASSERT (net->ksnn_interfaces[i].ksni_npeers == 0);
- LASSERT (net->ksnn_interfaces[i].ksni_nroutes == 0);
- }
+ for (i = 0; i < net->ksnn_ninterfaces; i++) {
+ LASSERT(net->ksnn_interfaces[i].ksni_npeers == 0);
+ LASSERT(net->ksnn_interfaces[i].ksni_nroutes == 0);
+ }
list_del(&net->ksnn_list);
LIBCFS_FREE(net, sizeof(*net));
return rc;
}
- LIBCFS_ALLOC(net, sizeof(*net));
- if (net == NULL)
- goto fail_0;
+ LIBCFS_ALLOC(net, sizeof(*net));
+ if (net == NULL)
+ goto fail_0;
- spin_lock_init(&net->ksnn_lock);
net->ksnn_incarnation = ktime_get_real_ns();
ni->ni_data = net;
net_tunables = &ni->ni_net->net_tunables;