+ksock_route_t *
+ksocknal_create_route (__u32 ipaddr, int port, int buffer_size,
+ int nonagel, int irq_affinity, int eager)
+{
+ ksock_route_t *route;
+
+ PORTAL_ALLOC (route, sizeof (*route));
+ if (route == NULL)
+ return (NULL);
+
+ atomic_set (&route->ksnr_refcount, 1);
+ route->ksnr_sharecount = 0;
+ route->ksnr_peer = NULL;
+ route->ksnr_timeout = jiffies;
+ route->ksnr_retry_interval = SOCKNAL_MIN_RECONNECT_INTERVAL;
+ route->ksnr_ipaddr = ipaddr;
+ route->ksnr_port = port;
+ route->ksnr_buffer_size = buffer_size;
+ route->ksnr_irq_affinity = irq_affinity;
+ route->ksnr_nonagel = nonagel;
+ route->ksnr_eager = eager;
+ route->ksnr_connecting = 0;
+ route->ksnr_connected = 0;
+ route->ksnr_deleted = 0;
+ route->ksnr_conn_count = 0;
+
+ return (route);
+}
+
+void
+ksocknal_destroy_route (ksock_route_t *route)
+{
+ LASSERT (route->ksnr_sharecount == 0);
+
+ if (route->ksnr_peer != NULL)
+ ksocknal_put_peer (route->ksnr_peer);
+
+ PORTAL_FREE (route, sizeof (*route));
+}
+
+void
+ksocknal_put_route (ksock_route_t *route)
+{
+ CDEBUG (D_OTHER, "putting route[%p] (%d)\n",
+ route, atomic_read (&route->ksnr_refcount));
+
+ LASSERT (atomic_read (&route->ksnr_refcount) > 0);
+ if (!atomic_dec_and_test (&route->ksnr_refcount))
+ return;
+
+ ksocknal_destroy_route (route);
+}
+
+ksock_peer_t *
+ksocknal_create_peer (ptl_nid_t nid)
+{
+ ksock_peer_t *peer;
+
+ LASSERT (nid != PTL_NID_ANY);
+
+ PORTAL_ALLOC (peer, sizeof (*peer));
+ if (peer == NULL)
+ return (NULL);
+
+ memset (peer, 0, sizeof (*peer));
+
+ peer->ksnp_nid = nid;
+ atomic_set (&peer->ksnp_refcount, 1); /* 1 ref for caller */
+ peer->ksnp_closing = 0;
+ INIT_LIST_HEAD (&peer->ksnp_conns);
+ INIT_LIST_HEAD (&peer->ksnp_routes);
+ INIT_LIST_HEAD (&peer->ksnp_tx_queue);
+
+ /* Can't unload while peers exist; ensures all I/O has terminated
+ * before unload attempts */
+ PORTAL_MODULE_USE;
+ atomic_inc (&ksocknal_data.ksnd_npeers);
+ return (peer);
+}
+
+void
+ksocknal_destroy_peer (ksock_peer_t *peer)
+{
+ CDEBUG (D_NET, "peer "LPX64" %p deleted\n", peer->ksnp_nid, peer);
+
+ LASSERT (atomic_read (&peer->ksnp_refcount) == 0);
+ LASSERT (list_empty (&peer->ksnp_conns));
+ LASSERT (list_empty (&peer->ksnp_routes));
+ LASSERT (list_empty (&peer->ksnp_tx_queue));
+
+ PORTAL_FREE (peer, sizeof (*peer));
+
+ /* NB a peer's connections and autoconnect routes keep a reference
+ * on their peer until they are destroyed, so we can be assured
+ * that _all_ state to do with this peer has been cleaned up when
+ * its refcount drops to zero. */
+ atomic_dec (&ksocknal_data.ksnd_npeers);
+ PORTAL_MODULE_UNUSE;
+}
+
+void
+ksocknal_put_peer (ksock_peer_t *peer)
+{
+ CDEBUG (D_OTHER, "putting peer[%p] -> "LPX64" (%d)\n",
+ peer, peer->ksnp_nid,
+ atomic_read (&peer->ksnp_refcount));
+
+ LASSERT (atomic_read (&peer->ksnp_refcount) > 0);
+ if (!atomic_dec_and_test (&peer->ksnp_refcount))
+ return;
+
+ ksocknal_destroy_peer (peer);
+}
+
+ksock_peer_t *
+ksocknal_find_peer_locked (ptl_nid_t nid)
+{
+ struct list_head *peer_list = ksocknal_nid2peerlist (nid);
+ struct list_head *tmp;
+ ksock_peer_t *peer;
+
+ list_for_each (tmp, peer_list) {
+
+ peer = list_entry (tmp, ksock_peer_t, ksnp_list);
+
+ LASSERT (!peer->ksnp_closing);
+ LASSERT (!(list_empty (&peer->ksnp_routes) &&
+ list_empty (&peer->ksnp_conns)));
+
+ if (peer->ksnp_nid != nid)
+ continue;
+
+ CDEBUG(D_NET, "got peer [%p] -> "LPX64" (%d)\n",
+ peer, nid, atomic_read (&peer->ksnp_refcount));
+ return (peer);
+ }
+ return (NULL);
+}
+
+ksock_peer_t *
+ksocknal_get_peer (ptl_nid_t nid)
+{
+ ksock_peer_t *peer;
+
+ read_lock (&ksocknal_data.ksnd_global_lock);
+ peer = ksocknal_find_peer_locked (nid);
+ if (peer != NULL) /* +1 ref for caller? */
+ atomic_inc (&peer->ksnp_refcount);
+ read_unlock (&ksocknal_data.ksnd_global_lock);
+
+ return (peer);
+}
+
+void
+ksocknal_unlink_peer_locked (ksock_peer_t *peer)
+{
+ LASSERT (!peer->ksnp_closing);
+ peer->ksnp_closing = 1;
+ list_del (&peer->ksnp_list);
+ /* lose peerlist's ref */
+ ksocknal_put_peer (peer);
+}
+
+ksock_route_t *
+ksocknal_get_route_by_idx (int index)
+{
+ ksock_peer_t *peer;
+ struct list_head *ptmp;
+ ksock_route_t *route;
+ struct list_head *rtmp;
+ int i;
+
+ read_lock (&ksocknal_data.ksnd_global_lock);
+
+ for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
+ list_for_each (ptmp, &ksocknal_data.ksnd_peers[i]) {
+ peer = list_entry (ptmp, ksock_peer_t, ksnp_list);
+
+ LASSERT (!(list_empty (&peer->ksnp_routes) &&
+ list_empty (&peer->ksnp_conns)));
+
+ list_for_each (rtmp, &peer->ksnp_routes) {
+ if (index-- > 0)
+ continue;
+
+ route = list_entry (rtmp, ksock_route_t, ksnr_list);
+ atomic_inc (&route->ksnr_refcount);
+ read_unlock (&ksocknal_data.ksnd_global_lock);
+ return (route);
+ }
+ }
+ }
+
+ read_unlock (&ksocknal_data.ksnd_global_lock);
+ return (NULL);
+}
+
+int
+ksocknal_add_route (ptl_nid_t nid, __u32 ipaddr, int port, int bufnob,
+ int nonagle, int bind_irq, int share, int eager)
+{
+ unsigned long flags;
+ ksock_peer_t *peer;
+ ksock_peer_t *peer2;
+ ksock_route_t *route;
+ struct list_head *rtmp;
+ ksock_route_t *route2;
+
+ if (nid == PTL_NID_ANY)
+ return (-EINVAL);
+
+ /* Have a brand new peer ready... */
+ peer = ksocknal_create_peer (nid);
+ if (peer == NULL)
+ return (-ENOMEM);
+
+ route = ksocknal_create_route (ipaddr, port, bufnob,
+ nonagle, bind_irq, eager);
+ if (route == NULL) {
+ ksocknal_put_peer (peer);
+ return (-ENOMEM);
+ }
+
+ write_lock_irqsave (&ksocknal_data.ksnd_global_lock, flags);
+
+ peer2 = ksocknal_find_peer_locked (nid);
+ if (peer2 != NULL) {
+ ksocknal_put_peer (peer);
+ peer = peer2;
+ } else {
+ /* peer table takes existing ref on peer */
+ list_add (&peer->ksnp_list,
+ ksocknal_nid2peerlist (nid));
+ }
+
+ route2 = NULL;
+ if (share) {
+ /* check for existing route to this NID via this ipaddr */
+ list_for_each (rtmp, &peer->ksnp_routes) {
+ route2 = list_entry (rtmp, ksock_route_t, ksnr_list);
+
+ if (route2->ksnr_ipaddr == ipaddr)
+ break;
+
+ route2 = NULL;
+ }
+ }
+
+ if (route2 != NULL) {
+ ksocknal_put_route (route);
+ route = route2;
+ } else {
+ /* route takes a ref on peer */
+ route->ksnr_peer = peer;
+ atomic_inc (&peer->ksnp_refcount);
+ /* peer's route list takes existing ref on route */
+ list_add_tail (&route->ksnr_list, &peer->ksnp_routes);
+ }
+
+ route->ksnr_sharecount++;
+
+ write_unlock_irqrestore (&ksocknal_data.ksnd_global_lock, flags);
+
+ return (0);
+}
+
+void
+ksocknal_del_route_locked (ksock_route_t *route, int share, int keep_conn)
+{
+ ksock_peer_t *peer = route->ksnr_peer;
+ ksock_conn_t *conn;
+ struct list_head *ctmp;
+ struct list_head *cnxt;
+
+ if (!share)
+ route->ksnr_sharecount = 0;
+ else {
+ route->ksnr_sharecount--;
+ if (route->ksnr_sharecount != 0)
+ return;
+ }
+
+ list_for_each_safe (ctmp, cnxt, &peer->ksnp_conns) {
+ conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
+
+ if (conn->ksnc_route != route)
+ continue;
+
+ if (!keep_conn) {
+ ksocknal_close_conn_locked (conn, 0);
+ continue;
+ }
+
+ /* keeping the conn; just dissociate it and route... */
+ conn->ksnc_route = NULL;
+ ksocknal_put_route (route); /* drop conn's ref on route */
+ }
+
+ route->ksnr_deleted = 1;
+ list_del (&route->ksnr_list);
+ ksocknal_put_route (route); /* drop peer's ref */
+
+ if (list_empty (&peer->ksnp_routes) &&
+ list_empty (&peer->ksnp_conns)) {
+ /* I've just removed the last autoconnect route of a peer
+ * with no active connections */
+ ksocknal_unlink_peer_locked (peer);
+ }
+}
+