Whamcloud - gitweb
LU-1346 gnilnd: remove libcfs abstractions
[fs/lustre-release.git] / lnet / klnds / ralnd / ralnd.c
index a59757d..91ff1b1 100644 (file)
-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
+/*
+ * GPL HEADER START
  *
- * Copyright (C) 2004 Cluster File Systems, Inc.
- *   Author: Eric Barton <eric@bartonsoftware.com>
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
- *   This file is part of Lustre, http://www.lustre.org.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
  *
- *   Lustre is free software; you can redistribute it and/or
- *   modify it under the terms of version 2 of the GNU General Public
- *   License as published by the Free Software Foundation.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
  *
- *   Lustre is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
  *
- *   You should have received a copy of the GNU General Public License
- *   along with Lustre; if not, write to the Free Software
- *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
  *
+ * GPL HEADER END
  */
-#include "ranal.h"
-
-
-nal_t                   kranal_api;
-ptl_handle_ni_t         kranal_ni;
-kra_data_t              kranal_data;
-kra_tunables_t          kranal_tunables;
-
-#ifdef CONFIG_SYSCTL
-#define RANAL_SYSCTL_TIMEOUT           1
-#define RANAL_SYSCTL_LISTENER_TIMEOUT  2
-#define RANAL_SYSCTL_BACKLOG           3
-#define RANAL_SYSCTL_PORT              4
-#define RANAL_SYSCTL_MAX_IMMEDIATE     5
-
-#define RANAL_SYSCTL                   202
-
-static ctl_table kranal_ctl_table[] = {
-        {RANAL_SYSCTL_TIMEOUT, "timeout", 
-         &kranal_tunables.kra_timeout, sizeof(int),
-         0644, NULL, &proc_dointvec},
-        {RANAL_SYSCTL_LISTENER_TIMEOUT, "listener_timeout", 
-         &kranal_tunables.kra_listener_timeout, sizeof(int),
-         0644, NULL, &proc_dointvec},
-       {RANAL_SYSCTL_BACKLOG, "backlog",
-        &kranal_tunables.kra_backlog, sizeof(int),
-        0644, NULL, kranal_listener_procint},
-       {RANAL_SYSCTL_PORT, "port",
-        &kranal_tunables.kra_port, sizeof(int),
-        0644, NULL, kranal_listener_procint},
-        {RANAL_SYSCTL_MAX_IMMEDIATE, "max_immediate", 
-         &kranal_tunables.kra_max_immediate, sizeof(int),
-         0644, NULL, &proc_dointvec},
-        { 0 }
+/*
+ * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lnet/klnds/ralnd/ralnd.c
+ *
+ * Author: Eric Barton <eric@bartonsoftware.com>
+ */
+#include "ralnd.h"
+
+static int        kranal_devids[RANAL_MAXDEVS] = {RAPK_MAIN_DEVICE_ID,
+                                                  RAPK_EXPANSION_DEVICE_ID};
+
+lnd_t the_kralnd = {
+        .lnd_type       = RALND,
+        .lnd_startup    = kranal_startup,
+        .lnd_shutdown   = kranal_shutdown,
+        .lnd_ctl        = kranal_ctl,
+        .lnd_send       = kranal_send,
+        .lnd_recv       = kranal_recv,
+        .lnd_eager_recv = kranal_eager_recv,
+        .lnd_accept     = kranal_accept,
 };
 
-static ctl_table kranal_top_ctl_table[] = {
-        {RANAL_SYSCTL, "ranal", NULL, 0, 0555, kranal_ctl_table},
-        { 0 }
-};
-#endif
+kra_data_t              kranal_data;
 
-int
-kranal_sock_write (struct socket *sock, void *buffer, int nob)
+void
+kranal_pack_connreq(kra_connreq_t *connreq, kra_conn_t *conn, lnet_nid_t dstnid)
 {
-        int           rc;
-        mm_segment_t  oldmm = get_fs();
-       struct iovec  iov = {
-               .iov_base = buffer,
-               .iov_len  = nob
-       };
-       struct msghdr msg = {
-               .msg_name       = NULL,
-               .msg_namelen    = 0,
-               .msg_iov        = &iov,
-               .msg_iovlen     = 1,
-               .msg_control    = NULL,
-               .msg_controllen = 0,
-               .msg_flags      = MSG_DONTWAIT
-       };
-
-       /* We've set up the socket's send buffer to be large enough for
-        * everything we send, so a single non-blocking send should
-        * complete without error. */
-
-       set_fs(KERNEL_DS);
-       rc = sock_sendmsg(sock, &msg, iov.iov_len);
-       set_fs(oldmm);
-
-       return rc;
-}
+        RAP_RETURN   rrc;
 
-int
-kranal_sock_read (struct socket *sock, void *buffer, int nob, int timeout)
-{
-        int            rc;
-        mm_segment_t   oldmm = get_fs();
-       long           ticks = timeout * HZ;
-       unsigned long  then;
-       struct timeval tv;
-
-       LASSERT (nob > 0);
-       LASSERT (ticks > 0);
-
-        for (;;) {
-                struct iovec  iov = {
-                        .iov_base = buffer,
-                        .iov_len  = nob
-                };
-                struct msghdr msg = {
-                        .msg_name       = NULL,
-                        .msg_namelen    = 0,
-                        .msg_iov        = &iov,
-                        .msg_iovlen     = 1,
-                        .msg_control    = NULL,
-                        .msg_controllen = 0,
-                        .msg_flags      = 0
-                };
-
-               /* Set receive timeout to remaining time */
-               tv = (struct timeval) {
-                       .tv_sec = ticks / HZ,
-                       .tv_usec = ((ticks % HZ) * 1000000) / HZ;
-               };
-               set_fs(KERNEL_DS);
-               rc = sock_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO,
-                                    (char *)&tv, sizeof(tv));
-               set_fs(oldmm);
-               if (rc != 0) {
-                       CERROR("Can't set socket recv timeout %d: %d\n",
-                              send_timeout, rc);
-                       return rc;
-               }
-
-                set_fs(KERNEL_DS);
-               then = jiffies;
-                rc = sock_recvmsg(sock, &msg, iov.iov_len, 0);
-               ticks -= jiffies - then;
-                set_fs(oldmm);
-
-                if (rc < 0)
-                        return rc;
+        memset(connreq, 0, sizeof(*connreq));
 
-                if (rc == 0)
-                        return -ECONNABORTED;
+        connreq->racr_magic     = RANAL_MSG_MAGIC;
+        connreq->racr_version   = RANAL_MSG_VERSION;
 
-                buffer = ((char *)buffer) + rc;
-                nob -= rc;
+        if (conn == NULL)                       /* prepping a "stub" reply */
+                return;
 
-               if (nob == 0)
-                       return 0;
+        connreq->racr_devid     = conn->rac_device->rad_id;
+        connreq->racr_srcnid    = kranal_data.kra_ni->ni_nid;
+        connreq->racr_dstnid    = dstnid;
+        connreq->racr_peerstamp = kranal_data.kra_peerstamp;
+        connreq->racr_connstamp = conn->rac_my_connstamp;
+        connreq->racr_timeout   = conn->rac_timeout;
 
-               if (ticks <= 0)
-                       return -ETIMEDOUT;
-        }
+        rrc = RapkGetRiParams(conn->rac_rihandle, &connreq->racr_riparams);
+        LASSERT(rrc == RAP_SUCCESS);
 }
 
 int
-kranal_create_sock(struct socket **sockp)
+kranal_recv_connreq(struct socket *sock, kra_connreq_t *connreq, int active)
 {
-       struct socket       *sock;
-       int                  rc;
-        struct timeval       tv;
-       int                  option;
-        mm_segment_t         oldmm = get_fs();
-
-       rc = sock_create(PF_INET, SOCK_STREAM, 0, &sock);
-       if (rc != 0) {
-               CERROR("Can't create socket: %d\n", rc);
-               return rc;
-       }
-
-       /* Ensure sending connection info doesn't block */
-       option = 2 * sizeof(kra_connreq_t);
-       set_fs(KERNEL_DS);
-       rc = sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF,
-                            (char *)&option, sizeof(option));
-       set_fs(oldmm);
-       if (rc != 0) {
-               CERROR("Can't set send buffer %d: %d\n", option, rc);
-               goto failed;
-       }
-
-       option = 1;
-       set_fs(KERNEL_DS);
-       rc = sock_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
-                            (char *)&option, sizeof(option));
-       set_fs(oldmm);
-       if (rc != 0) {
-               CERROR("Can't set SO_REUSEADDR: %d\n", rc);
-               goto failed;
-       }
+        int         timeout = active ? *kranal_tunables.kra_timeout :
+                                        lnet_acceptor_timeout();
+        int         swab;
+        int         rc;
 
-       *sockp = sock;
-       return 0;
+        /* return 0 on success, -ve on error, +ve to tell the peer I'm "old" */
 
- failed:
-       sock_release(sock);
-       return rc;
-}
-
-void
-kranal_pause(int ticks)
-{
-       set_current_state(TASK_UNINTERRUPTIBLE);
-       schedule_timeout(ticks);
-}
+        rc = libcfs_sock_read(sock, &connreq->racr_magic, 
+                              sizeof(connreq->racr_magic), timeout);
+        if (rc != 0) {
+                CERROR("Read(magic) failed(1): %d\n", rc);
+                return -EIO;
+        }
 
-void
-kranal_pack_connreq(kra_connreq_t *connreq, kra_conn_t *conn)
-{
-        memset(connreq, 0, sizeof(*connreq));
+        if (connreq->racr_magic != RANAL_MSG_MAGIC &&
+            connreq->racr_magic != __swab32(RANAL_MSG_MAGIC)) {
+                /* Unexpected magic! */
+                if (!active &&
+                    (connreq->racr_magic == LNET_PROTO_MAGIC ||
+                     connreq->racr_magic == __swab32(LNET_PROTO_MAGIC))) {
+                        /* future protocol version compatibility!
+                         * When LNET unifies protocols over all LNDs, the first
+                         * thing sent will be a version query.  +ve rc means I
+                         * reply with my current magic/version */
+                        return EPROTO;
+                }
 
-        connreq->racr_magic       = RANAL_MSG_MAGIC;
-        connreq->racr_version     = RANAL_MSG_VERSION;
-        connreq->racr_devid       = conn->rac_device->rad_id;
-        connreq->racr_nid         = kranal_lib.libnal_ni.ni_pid.nid;
-        connreq->racr_timeout     = conn->rac_timeout;
-        connreq->racr_incarnation = conn->rac_my_incarnation;
+                CERROR("Unexpected magic %08x (%s)\n",
+                       connreq->racr_magic, active ? "active" : "passive");
+                return -EPROTO;
+        }
 
-        rrc = RapkGetRiParams(conn->rac_rihandle, &connreq->racr_riparams);
-        LASSERT(rrc == RAP_SUCCESS);
-}
+        swab = (connreq->racr_magic == __swab32(RANAL_MSG_MAGIC));
 
-int
-kranal_recv_connreq(struct sock *sock, kra_connreq_t *connreq, int timeout)
-{
-        int         i;
-       int         rc;
+        rc = libcfs_sock_read(sock, &connreq->racr_version,
+                              sizeof(connreq->racr_version), timeout);
+        if (rc != 0) {
+                CERROR("Read(version) failed: %d\n", rc);
+                return -EIO;
+        }
 
-       rc = kranal_sock_read(newsock, connreq, sizeof(*connreq), timeout);
-       if (rc != 0) {
-               CERROR("Read failed: %d\n", rc);
-               return rc;
-       }
+        if (swab)
+                __swab16s(&connreq->racr_version);
+        
+        if (connreq->racr_version != RANAL_MSG_VERSION) {
+                if (active) {
+                        CERROR("Unexpected version %d\n", connreq->racr_version);
+                        return -EPROTO;
+                }
+                /* If this is a future version of the ralnd protocol, and I'm
+                 * passive (accepted the connection), tell my peer I'm "old"
+                 * (+ve rc) */
+                return EPROTO;
+        }
 
-       if (connreq->racr_magic != RANAL_MSG_MAGIC) {
-               if (__swab32(connreq->racr_magic) != RANAL_MSG_MAGIC) {
-                       CERROR("Unexpected magic %08x\n", connreq->racr_magic);
-                       return -EPROTO;
-               }
+        rc = libcfs_sock_read(sock, &connreq->racr_devid,
+                              sizeof(connreq->racr_version) -
+                              offsetof(kra_connreq_t, racr_devid),
+                              timeout);
+        if (rc != 0) {
+                CERROR("Read(body) failed: %d\n", rc);
+                return -EIO;
+        }
 
-               __swab32s(&connreq->racr_magic);
-               __swab16s(&connreq->racr_version);
+        if (swab) {
+                __swab32s(&connreq->racr_magic);
+                __swab16s(&connreq->racr_version);
                 __swab16s(&connreq->racr_devid);
-               __swab64s(&connreq->racr_nid);
-               __swab64s(&connreq->racr_incarnation);
+                __swab64s(&connreq->racr_srcnid);
+                __swab64s(&connreq->racr_dstnid);
+                __swab64s(&connreq->racr_peerstamp);
+                __swab64s(&connreq->racr_connstamp);
                 __swab32s(&connreq->racr_timeout);
 
-               __swab32s(&connreq->racr_riparams.FmaDomainHndl);
-               __swab32s(&connreq->racr_riparams.RcvCqHndl);
-               __swab32s(&connreq->racr_riparams.PTag);
+                __swab32s(&connreq->racr_riparams.HostId);
+                __swab32s(&connreq->racr_riparams.FmaDomainHndl);
+                __swab32s(&connreq->racr_riparams.PTag);
                 __swab32s(&connreq->racr_riparams.CompletionCookie);
-       }
-
-       if (connreq->racr_version != RANAL_MSG_VERSION) {
-               CERROR("Unexpected version %d\n", connreq->racr_version);
-               return -EPROTO;
-       }
+        }
 
-        if (connreq->racr_nid == PTL_NID_ANY) {
-                CERROR("Received PTL_NID_ANY\n");
+        if (connreq->racr_srcnid == LNET_NID_ANY ||
+            connreq->racr_dstnid == LNET_NID_ANY) {
+                CERROR("Received LNET_NID_ANY\n");
                 return -EPROTO;
         }
 
@@ -270,47 +173,106 @@ kranal_recv_connreq(struct sock *sock, kra_connreq_t *connreq, int timeout)
                        connreq->racr_timeout, RANAL_MIN_TIMEOUT);
                 return -EPROTO;
         }
-        
-        for (i = 0; i < kranal_data.kra_ndevs; i++)
-                if (connreq->racr_devid == 
-                    kranal_data.kra_devices[i]->rad_id)
-                        break;
 
-        if (i == kranal_data.kra_ndevs) {
-                CERROR("Can't match device %d\n", connreq->racr_devid);
-                return -ENODEV;
+        return 0;
+}
+
+int
+kranal_close_stale_conns_locked (kra_peer_t *peer, kra_conn_t *newconn)
+{
+        kra_conn_t         *conn;
+        cfs_list_t         *ctmp;
+        cfs_list_t         *cnxt;
+        int                 loopback;
+        int                 count = 0;
+
+        loopback = peer->rap_nid == kranal_data.kra_ni->ni_nid;
+
+        cfs_list_for_each_safe (ctmp, cnxt, &peer->rap_conns) {
+                conn = cfs_list_entry(ctmp, kra_conn_t, rac_list);
+
+                if (conn == newconn)
+                        continue;
+
+                if (conn->rac_peerstamp != newconn->rac_peerstamp) {
+                        CDEBUG(D_NET, "Closing stale conn nid: %s "
+                               " peerstamp:"LPX64"("LPX64")\n", 
+                               libcfs_nid2str(peer->rap_nid),
+                               conn->rac_peerstamp, newconn->rac_peerstamp);
+                        LASSERT (conn->rac_peerstamp < newconn->rac_peerstamp);
+                        count++;
+                        kranal_close_conn_locked(conn, -ESTALE);
+                        continue;
+                }
+
+                if (conn->rac_device != newconn->rac_device)
+                        continue;
+
+                if (loopback &&
+                    newconn->rac_my_connstamp == conn->rac_peer_connstamp &&
+                    newconn->rac_peer_connstamp == conn->rac_my_connstamp)
+                        continue;
+
+                LASSERT (conn->rac_peer_connstamp < newconn->rac_peer_connstamp);
+
+                CDEBUG(D_NET, "Closing stale conn nid: %s"
+                       " connstamp:"LPX64"("LPX64")\n", 
+                       libcfs_nid2str(peer->rap_nid),
+                       conn->rac_peer_connstamp, newconn->rac_peer_connstamp);
+
+                count++;
+                kranal_close_conn_locked(conn, -ESTALE);
         }
 
-       return 0;
+        return count;
 }
 
 int
-kranal_conn_isdup_locked(kranal_peer_t *peer, __u64 incarnation)
+kranal_conn_isdup_locked(kra_peer_t *peer, kra_conn_t *newconn)
 {
-       kra_conn_t       *conn;
-       struct list_head *tmp;
-        int               loopback = 0;
+        kra_conn_t       *conn;
+        cfs_list_t       *tmp;
+        int               loopback;
 
-       list_for_each(tmp, &peer->rap_conns) {
-               conn = list_entry(tmp, kra_conn_t, rac_list);
+        loopback = peer->rap_nid == kranal_data.kra_ni->ni_nid;
 
-                if (conn->rac_incarnation < incarnation) {
-                        /* Conns with an older incarnation get culled later */
+        cfs_list_for_each(tmp, &peer->rap_conns) {
+                conn = cfs_list_entry(tmp, kra_conn_t, rac_list);
+
+                /* 'newconn' is from an earlier version of 'peer'!!! */
+                if (newconn->rac_peerstamp < conn->rac_peerstamp)
+                        return 1;
+
+                /* 'conn' is from an earlier version of 'peer': it will be
+                 * removed when we cull stale conns later on... */
+                if (newconn->rac_peerstamp > conn->rac_peerstamp)
                         continue;
-                }
 
-                if (!loopback &&
-                    conn->rac_incarnation == incarnation &&
-                    peer->rap_nid == kranal_lib.libnal_ni.ni_pid.nid) {
-                        /* loopback creates 2 conns */
-                        loopback = 1;
+                /* Different devices are OK */
+                if (conn->rac_device != newconn->rac_device)
                         continue;
-                }
 
-                return 1;
-       }
+                /* It's me connecting to myself */
+                if (loopback &&
+                    newconn->rac_my_connstamp == conn->rac_peer_connstamp &&
+                    newconn->rac_peer_connstamp == conn->rac_my_connstamp)
+                        continue;
 
-       return 0;
+                /* 'newconn' is an earlier connection from 'peer'!!! */
+                if (newconn->rac_peer_connstamp < conn->rac_peer_connstamp)
+                        return 2;
+
+                /* 'conn' is an earlier connection from 'peer': it will be
+                 * removed when we cull stale conns later on... */
+                if (newconn->rac_peer_connstamp > conn->rac_peer_connstamp)
+                        continue;
+
+                /* 'newconn' has the SAME connection stamp; 'peer' isn't
+                 * playing the game... */
+                return 3;
+        }
+
+        return 0;
 }
 
 void
@@ -318,86 +280,73 @@ kranal_set_conn_uniqueness (kra_conn_t *conn)
 {
         unsigned long  flags;
 
-        write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+       write_lock_irqsave(&kranal_data.kra_global_lock, flags);
 
-        conn->rac_my_incarnation = kranal_data.kra_next_incarnation++;
+        conn->rac_my_connstamp = kranal_data.kra_connstamp++;
 
         do {    /* allocate a unique cqid */
                 conn->rac_cqid = kranal_data.kra_next_cqid++;
-        } while (kranal_cqid2conn_locked(conn->rac_cqid) != NULL)
-        
+        } while (kranal_cqid2conn_locked(conn->rac_cqid) != NULL);
 
-        write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+       write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
 }
 
 int
-kranal_alloc_conn(kra_conn_t **connp, kra_device_t *dev)
+kranal_create_conn(kra_conn_t **connp, kra_device_t *dev)
 {
-       kra_conn_t    *conn;
+        kra_conn_t    *conn;
         RAP_RETURN     rrc;
 
-        LASSERT (!in_interrupt());
-       PORTAL_ALLOC(conn, sizeof(*conn));
+        LASSERT (!cfs_in_interrupt());
+        LIBCFS_ALLOC(conn, sizeof(*conn));
 
-       if (conn == NULL)
-               return -ENOMEM;
+        if (conn == NULL)
+                return -ENOMEM;
 
-       memset(conn, 0, sizeof(*conn));
-        conn->rac_cqid = cqid;
-       atomic_set(&conn->rac_refcount, 1);
-       INIT_LIST_HEAD(&conn->rac_list);
-       INIT_LIST_HEAD(&conn->rac_hashlist);
-       INIT_LIST_HEAD(&conn->rac_fmaq);
-       INIT_LIST_HEAD(&conn->rac_rdmaq);
-       INIT_LIST_HEAD(&conn->rac_replyq);
+        memset(conn, 0, sizeof(*conn));
+        cfs_atomic_set(&conn->rac_refcount, 1);
+        CFS_INIT_LIST_HEAD(&conn->rac_list);
+        CFS_INIT_LIST_HEAD(&conn->rac_hashlist);
+        CFS_INIT_LIST_HEAD(&conn->rac_schedlist);
+        CFS_INIT_LIST_HEAD(&conn->rac_fmaq);
+        CFS_INIT_LIST_HEAD(&conn->rac_rdmaq);
+        CFS_INIT_LIST_HEAD(&conn->rac_replyq);
        spin_lock_init(&conn->rac_lock);
 
-        conn->rac_timeout = MAX(kranal_tunables.kra_timeout, RANAL_MIN_TIMEOUT);
+        kranal_set_conn_uniqueness(conn);
+
+        conn->rac_device = dev;
+        conn->rac_timeout = MAX(*kranal_tunables.kra_timeout, RANAL_MIN_TIMEOUT);
         kranal_update_reaper_timeout(conn->rac_timeout);
 
-        rrc = RapkCreateRi(dev->rad_handle, cqid, dev->rad_ptag,
-                           dev->rad_rdma_cq, dev->rad_fma_cq,
+        rrc = RapkCreateRi(dev->rad_handle, conn->rac_cqid,
                            &conn->rac_rihandle);
         if (rrc != RAP_SUCCESS) {
                 CERROR("RapkCreateRi failed: %d\n", rrc);
-                PORTAL_FREE(conn, sizeof(*conn));
+                LIBCFS_FREE(conn, sizeof(*conn));
                 return -ENETDOWN;
         }
 
-        atomic_inc(&kranal_data.kra_nconns);
-       *connp = conn;
-       return 0;
+        cfs_atomic_inc(&kranal_data.kra_nconns);
+        *connp = conn;
+        return 0;
 }
 
 void
-__kranal_conn_decref(kra_conn_t *conn) 
+kranal_destroy_conn(kra_conn_t *conn)
 {
-        kra_tx_t          *tx;
         RAP_RETURN         rrc;
 
-        LASSERT (!in_interrupt());
+        LASSERT (!cfs_in_interrupt());
         LASSERT (!conn->rac_scheduled);
-        LASSERT (list_empty(&conn->rac_list));
-        LASSERT (list_empty(&conn->rac_hashlist));
-        LASSERT (atomic_read(&conn->rac_refcount) == 0);
+        LASSERT (cfs_list_empty(&conn->rac_list));
+        LASSERT (cfs_list_empty(&conn->rac_hashlist));
+        LASSERT (cfs_list_empty(&conn->rac_schedlist));
+        LASSERT (cfs_atomic_read(&conn->rac_refcount) == 0);
+        LASSERT (cfs_list_empty(&conn->rac_fmaq));
+        LASSERT (cfs_list_empty(&conn->rac_rdmaq));
+        LASSERT (cfs_list_empty(&conn->rac_replyq));
 
-        while (!list_empty(&conn->rac_fmaq)) {
-                tx = list_entry(conn->rac_fmaq.next, kra_tx_t, tx_list);
-                
-                list_del(&tx->tx_list);
-                kranal_tx_done(tx, -ECONNABORTED);
-        }
-        
-        /* We may not destroy this connection while it has RDMAs outstanding */
-        LASSERT (list_empty(&conn->rac_rdmaq));
-
-        while (!list_empty(&conn->rac_replyq)) {
-                tx = list_entry(conn->rac_replyq.next, kra_tx_t, tx_list);
-                
-                list_del(&tx->tx_list);
-                kranal_tx_done(tx, -ECONNABORTED);
-        }
-        
         rrc = RapkDestroyRi(conn->rac_device->rad_handle,
                             conn->rac_rihandle);
         LASSERT (rrc == RAP_SUCCESS);
@@ -405,25 +354,27 @@ __kranal_conn_decref(kra_conn_t *conn)
         if (conn->rac_peer != NULL)
                 kranal_peer_decref(conn->rac_peer);
 
-       PORTAL_FREE(conn, sizeof(*conn));
-        atomic_dec(&kranal_data.kra_nconns);
+        LIBCFS_FREE(conn, sizeof(*conn));
+        cfs_atomic_dec(&kranal_data.kra_nconns);
 }
 
 void
 kranal_terminate_conn_locked (kra_conn_t *conn)
 {
-        kra_peer_t     *peer - conn->rac_peer;
-
-        LASSERT (!in_interrupt());
-        LASSERT (conn->rac_closing);
-        LASSERT (!list_empty(&conn->rac_hashlist));
-        LASSERT (list_empty(&conn->rac_list));
+        LASSERT (!cfs_in_interrupt());
+        LASSERT (conn->rac_state == RANAL_CONN_CLOSING);
+        LASSERT (!cfs_list_empty(&conn->rac_hashlist));
+        LASSERT (cfs_list_empty(&conn->rac_list));
 
-        /* Remove from conn hash table (no new callbacks) */
-        list_del_init(&conn->rac_hashlist);
+        /* Remove from conn hash table: no new callbacks */
+        cfs_list_del_init(&conn->rac_hashlist);
         kranal_conn_decref(conn);
 
-        /* Conn is now just waiting for remaining refs to go */
+        conn->rac_state = RANAL_CONN_CLOSED;
+
+        /* schedule to clear out all uncompleted comms in context of dev's
+         * scheduler */
+        kranal_schedule_conn(conn);
 }
 
 void
@@ -431,24 +382,32 @@ kranal_close_conn_locked (kra_conn_t *conn, int error)
 {
         kra_peer_t        *peer = conn->rac_peer;
 
-        CDEBUG(error == 0 ? D_NET : D_ERROR,
-              "closing conn to "LPX64": error %d\n", peer->rap_nid, error);
+        CDEBUG_LIMIT(error == 0 ? D_NET : D_NETERROR,
+                     "closing conn to %s: error %d\n",
+                     libcfs_nid2str(peer->rap_nid), error);
 
-        LASSERT (!in_interrupt());
-        LASSERT (!conn->rac_closing);
-        LASSERT (!list_empty(&conn->rac_hashlist));
-        LASSERT (!list_empty(&conn->rac_list));
+        LASSERT (!cfs_in_interrupt());
+        LASSERT (conn->rac_state == RANAL_CONN_ESTABLISHED);
+        LASSERT (!cfs_list_empty(&conn->rac_hashlist));
+        LASSERT (!cfs_list_empty(&conn->rac_list));
 
-        list_del_init(&conn->rac_list);
+        cfs_list_del_init(&conn->rac_list);
 
-        if (list_empty(&peer->rap_conns) &&
+        if (cfs_list_empty(&peer->rap_conns) &&
             peer->rap_persistence == 0) {
                 /* Non-persistent peer with no more conns... */
                 kranal_unlink_peer_locked(peer);
         }
 
-        conn->rac_closing = 1;
-        kranal_schedule_conn(conn);
+        /* Reset RX timeout to ensure we wait for an incoming CLOSE for the
+         * full timeout.  If we get a CLOSE we know the peer has stopped all
+         * RDMA.  Otherwise if we wait for the full timeout we can also be sure
+         * all RDMA has stopped. */
+        conn->rac_last_rx = jiffies;
+        cfs_mb();
+
+        conn->rac_state = RANAL_CONN_CLOSING;
+        kranal_schedule_conn(conn);             /* schedule sending CLOSE */
 
         kranal_conn_decref(conn);               /* lose peer's ref */
 }
@@ -457,186 +416,177 @@ void
 kranal_close_conn (kra_conn_t *conn, int error)
 {
         unsigned long    flags;
-        
 
-        write_lock_irqsave(&kranal_data.kra_global_lock, flags);
-        
-        if (!conn->rac_closing)
+
+       write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+
+        if (conn->rac_state == RANAL_CONN_ESTABLISHED)
                 kranal_close_conn_locked(conn, error);
-        
-        write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+
+       write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+}
+
+int
+kranal_set_conn_params(kra_conn_t *conn, kra_connreq_t *connreq,
+                       __u32 peer_ip, int peer_port)
+{
+       kra_device_t  *dev = conn->rac_device;
+       unsigned long  flags;
+       RAP_RETURN     rrc;
+
+       /* CAVEAT EMPTOR: we're really overloading rac_last_tx + rac_keepalive
+        * to do RapkCompleteSync() timekeeping (see kibnal_scheduler). */
+       conn->rac_last_tx = jiffies;
+       conn->rac_keepalive = 0;
+
+       rrc = RapkSetRiParams(conn->rac_rihandle, &connreq->racr_riparams);
+       if (rrc != RAP_SUCCESS) {
+               CERROR("Error setting riparams from %u.%u.%u.%u/%d: %d\n",
+                      HIPQUAD(peer_ip), peer_port, rrc);
+               return -ECONNABORTED;
+       }
+
+       /* Schedule conn on rad_new_conns */
+       kranal_conn_addref(conn);
+       spin_lock_irqsave(&dev->rad_lock, flags);
+       cfs_list_add_tail(&conn->rac_schedlist, &dev->rad_new_conns);
+       wake_up(&dev->rad_waitq);
+       spin_unlock_irqrestore(&dev->rad_lock, flags);
+
+       rrc = RapkWaitToConnect(conn->rac_rihandle);
+       if (rrc != RAP_SUCCESS) {
+               CERROR("Error waiting to connect to %u.%u.%u.%u/%d: %d\n",
+                      HIPQUAD(peer_ip), peer_port, rrc);
+               return -ECONNABORTED;
+       }
+
+       /* Scheduler doesn't touch conn apart from to deschedule and decref it
+        * after RapkCompleteSync() return success, so conn is all mine */
+
+       conn->rac_peerstamp = connreq->racr_peerstamp;
+       conn->rac_peer_connstamp = connreq->racr_connstamp;
+       conn->rac_keepalive = RANAL_TIMEOUT2KEEPALIVE(connreq->racr_timeout);
+       kranal_update_reaper_timeout(conn->rac_keepalive);
+       return 0;
 }
 
 int
-kranal_passive_conn_handshake (struct socket *sock, 
-                               ptl_nid_t **peer_nidp, kra_conn_t **connp)
+kranal_passive_conn_handshake (struct socket *sock, lnet_nid_t *src_nidp,
+                               lnet_nid_t *dst_nidp, kra_conn_t **connp)
 {
-       struct sockaddr_in   addr;
-       __u32                peer_ip;
+        __u32                peer_ip;
         unsigned int         peer_port;
-       kra_connreq_t        connreq;
-       ptl_nid_t            peer_nid;
+        kra_connreq_t        rx_connreq;
+        kra_connreq_t        tx_connreq;
         kra_conn_t          *conn;
         kra_device_t        *dev;
-       RAP_RETURN           rrc;
-       int                  rc;
+        int                  rc;
         int                  i;
 
-       rc = sock->ops->getname(newsock, (struct sockaddr *)addr, &len, 2);
+        rc = libcfs_sock_getaddr(sock, 1, &peer_ip, &peer_port);
         if (rc != 0) {
                 CERROR("Can't get peer's IP: %d\n", rc);
                 return rc;
         }
 
-        peer_ip = ntohl(sin.sin_addr.s_addr);
-        peer_port = ntohs(sin.sin_port);
+        rc = kranal_recv_connreq(sock, &rx_connreq, 0);
 
-        if (peer_port >= 1024) {
-                CERROR("Refusing unprivileged connection from %u.%u.%u.%u/%d\n",
-                       HIPQUAD(peer_ip), peer_port);
-                return -ECONNREFUSED;
-        }
-
-        rc = kranal_recv_connreq(sock, &connreq, 
-                                 kranal_data.kra_listener_timeout);
-        if (rc != 0) {
-                CERROR("Can't rx connreq from %u.%u.%u.%u/%d: %d\n", 
+        if (rc < 0) {
+                CERROR("Can't rx connreq from %u.%u.%u.%u/%d: %d\n",
                        HIPQUAD(peer_ip), peer_port, rc);
                 return rc;
         }
 
-        peer_nid = connreq.racr_nid;
-        LASSERT (peer_nid != PTL_NID_ANY);
+        if (rc > 0) {
+                /* Request from "new" peer: send reply with my MAGIC/VERSION to
+                 * tell her I'm old... */
+                kranal_pack_connreq(&tx_connreq, NULL, LNET_NID_ANY);
+
+                rc = libcfs_sock_write(sock, &tx_connreq, sizeof(tx_connreq),
+                                       lnet_acceptor_timeout());
+                if (rc != 0)
+                        CERROR("Can't tx stub connreq to %u.%u.%u.%u/%d: %d\n",
+                               HIPQUAD(peer_ip), peer_port, rc);
+
+                return -EPROTO;
+        }
 
         for (i = 0;;i++) {
-                LASSERT(i < kranal_data.kra_ndevs);
+                if (i == kranal_data.kra_ndevs) {
+                        CERROR("Can't match dev %d from %u.%u.%u.%u/%d\n",
+                               rx_connreq.racr_devid, HIPQUAD(peer_ip), peer_port);
+                        return -ENODEV;
+                }
                 dev = &kranal_data.kra_devices[i];
-                if (dev->rad_id == connreq->racr_devid)
+                if (dev->rad_id == rx_connreq.racr_devid)
                         break;
         }
 
-        rc = kranal_alloc_conn(&conn, dev,(__u32)(peer_nid & 0xffffffff));
+        rc = kranal_create_conn(&conn, dev);
         if (rc != 0)
                 return rc;
 
-        conn->rac_peer_incarnation = connreq.racr_incarnation;
-        conn->rac_keepalive = RANAL_TIMEOUT2KEEPALIVE(connreq.racr_timeout);
-        kranal_update_reaper_timeout(conn->rac_keepalive);
-        
-        rrc = RapkSetRiParams(conn->rac_rihandle, &connreq->racr_riparams);
-        if (rrc != RAP_SUCCESS) {
-                CERROR("Can't set riparams for "LPX64": %d\n", peer_nid, rrc);
+        kranal_pack_connreq(&tx_connreq, conn, rx_connreq.racr_srcnid);
+
+        rc = libcfs_sock_write(sock, &tx_connreq, sizeof(tx_connreq),
+                               lnet_acceptor_timeout());
+        if (rc != 0) {
+                CERROR("Can't tx connreq to %u.%u.%u.%u/%d: %d\n",
+                       HIPQUAD(peer_ip), peer_port, rc);
                 kranal_conn_decref(conn);
-                return -EPROTO;
+                return rc;
         }
 
-        kranal_pack_connreq(&connreq, conn);
-
-        rc = kranal_sock_write(sock, &connreq, sizeof(connreq));
+        rc = kranal_set_conn_params(conn, &rx_connreq, peer_ip, peer_port);
         if (rc != 0) {
-                CERROR("Can't tx connreq to %u.%u.%u.%u/%p: %d\n", 
-                       HIPQUAD(peer_ip), peer_port, rc);
                 kranal_conn_decref(conn);
                 return rc;
         }
 
         *connp = conn;
-        *peer_nidp = peer_nid;
+        *src_nidp = rx_connreq.racr_srcnid;
+        *dst_nidp = rx_connreq.racr_dstnid;
         return 0;
 }
 
 int
-ranal_connect_sock(kra_peer_t *peer, struct socket **sockp)
-{
-        struct sockaddr_in  locaddr;
-        struct sockaddr_in  srvaddr;
-        struct socket      *sock;
-        unsigned int        port;
-        int                 rc;
-        int                 option;
-        mm_segment_t        oldmm = get_fs();
-        struct timeval      tv;
-
-        for (port = 1023; port >= 512; port--) {
-
-                memset(&locaddr, 0, sizeof(locaddr)); 
-                locaddr.sin_family      = AF_INET; 
-                locaddr.sin_port        = htons(port);
-                locaddr.sin_addr.s_addr = htonl(INADDR_ANY);
-
-                memset (&srvaddr, 0, sizeof (srvaddr));
-                srvaddr.sin_family      = AF_INET;
-                srvaddr.sin_port        = htons (peer->rap_port);
-                srvaddr.sin_addr.s_addr = htonl (peer->rap_ip);
-
-                rc = kranal_create_sock(&sock);
-                if (rc != 0)
-                        return rc;
-
-                rc = sock->ops->bind(sock,
-                                     (struct sockaddr *)&locaddr, sizeof(locaddr));
-                if (rc != 0) {
-                        sock_release(sock);
-                        
-                        if (rc == -EADDRINUSE) {
-                                CDEBUG(D_NET, "Port %d already in use\n", port);
-                                continue;
-                        }
-
-                        CERROR("Can't bind to reserved port %d: %d\n", port, rc);
-                        return rc;
-                }
-
-                rc = sock->ops->connect(sock,
-                                        (struct sockaddr *)&srvaddr, sizeof(srvaddr),
-                                        0);
-                if (rc == 0) {
-                        *sockp = sock;
-                        return 0;
-                }
-                
-                sock_release(sock);
-
-                if (rc != -EADDRNOTAVAIL) {
-                        CERROR("Can't connect port %d to %u.%u.%u.%u/%d: %d\n",
-                               port, HIPQUAD(peer->rap_ip), peer->rap_port, rc);
-                        return rc;
-                }
-                
-                CDEBUG(D_NET, "Port %d not available for %u.%u.%u.%u/%d\n", 
-                       port, HIPQUAD(peer->rap_ip), peer->rap_port);
-        }
-}
-
-
-int
-kranal_active_conn_handshake(kra_peer_t *peer, kra_conn_t **connp)
+kranal_active_conn_handshake(kra_peer_t *peer,
+                             lnet_nid_t *dst_nidp, kra_conn_t **connp)
 {
-       kra_connreq_t       connreq;
+        kra_connreq_t       connreq;
         kra_conn_t         *conn;
         kra_device_t       *dev;
         struct socket      *sock;
-        __u32               id32;
-       RAP_RETURN          rrc;
-       int                 rc;
+        int                 rc;
+        unsigned int        idx;
 
-        id32 = (peer_nid & 0xffffffff);
-        dev = &kranal_data.kra_devices[id32 % kranal_data.kra_ndevs];
+        /* spread connections over all devices using both peer NIDs to ensure
+         * all nids use all devices */
+        idx = peer->rap_nid + kranal_data.kra_ni->ni_nid;
+        dev = &kranal_data.kra_devices[idx % kranal_data.kra_ndevs];
 
-        rc = kranal_alloc_conn(&conn, dev, id32);
+        rc = kranal_create_conn(&conn, dev);
         if (rc != 0)
                 return rc;
 
-        kranal_pack_connreq(&connreq, conn);
-        
-        memset(&dstaddr, 0, sizeof(addr));
-        dstaddr.sin_family      = AF_INET;
-        dstaddr.sin_port        = htons(peer->rap_port);
-        dstaddr.sin_addr.s_addr = htonl(peer->rap_ip);
+        kranal_pack_connreq(&connreq, conn, peer->rap_nid);
 
-        memset(&srcaddr, 0, sizeof(addr));
+        if (the_lnet.ln_testprotocompat != 0) {
+                /* single-shot proto test */
+                LNET_LOCK();
+                if ((the_lnet.ln_testprotocompat & 1) != 0) {
+                        connreq.racr_version++;
+                        the_lnet.ln_testprotocompat &= ~1;
+                }
+                if ((the_lnet.ln_testprotocompat & 2) != 0) {
+                        connreq.racr_magic = LNET_PROTO_MAGIC;
+                        the_lnet.ln_testprotocompat &= ~2;
+                }
+                LNET_UNLOCK();
+        }
 
-        rc = ranal_connect_sock(peer, &sock);
+        rc = lnet_connect(&sock, peer->rap_nid,
+                         0, peer->rap_ip, peer->rap_port);
         if (rc != 0)
                 goto failed_0;
 
@@ -644,172 +594,205 @@ kranal_active_conn_handshake(kra_peer_t *peer, kra_conn_t **connp)
          * immediately after accepting a connection, so we connect and then
          * send immediately. */
 
-        rc = kranal_sock_write(sock, &connreq, sizeof(connreq));
+        rc = libcfs_sock_write(sock, &connreq, sizeof(connreq),
+                               lnet_acceptor_timeout());
         if (rc != 0) {
-                CERROR("Can't tx connreq to %u.%u.%u.%u/%d: %d\n", 
+                CERROR("Can't tx connreq to %u.%u.%u.%u/%d: %d\n",
                        HIPQUAD(peer->rap_ip), peer->rap_port, rc);
-                goto failed_1;
+                goto failed_2;
         }
 
-        rc = kranal_recv_connreq(sock, &connreq, kranal_data.kra_timeout);
+        rc = kranal_recv_connreq(sock, &connreq, 1);
         if (rc != 0) {
-                CERROR("Can't rx connreq from %u.%u.%u.%u/%d: %d\n", 
+                CERROR("Can't rx connreq from %u.%u.%u.%u/%d: %d\n",
                        HIPQUAD(peer->rap_ip), peer->rap_port, rc);
-                goto failed_1;
+                goto failed_2;
         }
 
-        sock_release(sock);
+        libcfs_sock_release(sock);
         rc = -EPROTO;
 
-        if (connreq.racr_nid != peer->rap_nid) {
-                CERROR("Unexpected nid from %u.%u.%u.%u/%d: "
-                       "received "LPX64" expected "LPX64"\n",
-                       HIPQUAD(peer->rap_ip), peer->rap_port, 
-                       connreq.racr_nid, peer->rap_nid);
-                goto failed_0;
+        if (connreq.racr_srcnid != peer->rap_nid) {
+                CERROR("Unexpected srcnid from %u.%u.%u.%u/%d: "
+                       "received %s expected %s\n",
+                       HIPQUAD(peer->rap_ip), peer->rap_port,
+                       libcfs_nid2str(connreq.racr_srcnid), 
+                       libcfs_nid2str(peer->rap_nid));
+                goto failed_1;
         }
 
         if (connreq.racr_devid != dev->rad_id) {
                 CERROR("Unexpected device id from %u.%u.%u.%u/%d: "
                        "received %d expected %d\n",
-                       HIPQUAD(peer->rap_ip), peer->rap_port, 
+                       HIPQUAD(peer->rap_ip), peer->rap_port,
                        connreq.racr_devid, dev->rad_id);
-                goto failed_0;
+                goto failed_1;
         }
 
-        conn->rac_peer_incarnation = connreq.racr_incarnation; 
-        conn->rac_keepalive = RANAL_TIMEOUT2KEEPALIVE(connreq.racr_timeout);
-        kranal_update_reaper_timeout(conn->rac_keepalive);
-
-        rc = -ENETDOWN;
-        rrc = RapkSetRiParams(conn->rac_rihandle,
-                              &connreq->racr_riparams);
-        if (rrc != RAP_SUCCESS) {
-                CERROR("Can't set riparams for "LPX64": %d\n",
-                       peer_nid, rrc);
-                goto failed_0;
-        }
+        rc = kranal_set_conn_params(conn, &connreq,
+                                    peer->rap_ip, peer->rap_port);
+        if (rc != 0)
+                goto failed_1;
 
         *connp = conn;
-       return 0;
+        *dst_nidp = connreq.racr_dstnid;
+        return 0;
 
+ failed_2:
+        libcfs_sock_release(sock);
  failed_1:
-        release_sock(sock);
+        lnet_connect_console_error(rc, peer->rap_nid,
+                                  peer->rap_ip, peer->rap_port);
  failed_0:
         kranal_conn_decref(conn);
         return rc;
 }
 
 int
-kranal_conn_handshake (struct socket *sock, kranal_peer_t *peer)
+kranal_conn_handshake (struct socket *sock, kra_peer_t *peer)
 {
-        kranal_peer_t     *peer2;
-       ptl_nid_t          peer_nid;
-       unsigned long      flags;
-        unsigned long      timeout;
-       kra_conn_t        *conn;
-       int                rc;
+        kra_peer_t        *peer2;
+        kra_tx_t          *tx;
+        lnet_nid_t         peer_nid;
+        lnet_nid_t         dst_nid;
+        unsigned long      flags;
+        kra_conn_t        *conn;
+        int                rc;
         int                nstale;
+        int                new_peer = 0;
 
-        if (sock != NULL) {
-                /* passive: listener accepted sock */
-                LASSERT (peer == NULL);
+        if (sock == NULL) {
+                /* active: connd wants to connect to 'peer' */
+                LASSERT (peer != NULL);
+                LASSERT (peer->rap_connecting);
 
-                rc = kranal_passive_conn_handshake(sock, &peer_nid, &conn);
+                rc = kranal_active_conn_handshake(peer, &dst_nid, &conn);
                 if (rc != 0)
                         return rc;
 
-               /* assume this is a new peer */
-               peer = kranal_create_peer(peer_nid);
-               if (peer == NULL) {
-                       CERROR("Can't allocate peer for "LPX64"\n", peer_nid);
-                        kranal_conn_decref(conn);
-                        return -ENOMEM;
-               }
-
                write_lock_irqsave(&kranal_data.kra_global_lock, flags);
 
-               peer2 = kranal_find_peer_locked(peer_nid);
-               if (peer2 == NULL) {
-                       /* peer table takes my initial ref on peer */
-                       list_add_tail(&peer->rap_list,
-                                     kranal_nid2peerlist(peer_nid));
-               } else {
-                       /* peer_nid already in the peer table */
-                       kranal_peer_decref(peer);
-                       peer = peer2;
-               }
-                /* NB I may now have a non-persistent peer in the peer
-                 * table with no connections: I can't drop the global lock
-                 * until I've given it a connection or removed it, and when
-                 * I do 'peer' can disappear under me. */
+                if (!kranal_peer_active(peer)) {
+                        /* raced with peer getting unlinked */
+                       write_unlock_irqrestore(&kranal_data. \
+                                                    kra_global_lock,
+                                                    flags);
+                        kranal_conn_decref(conn);
+                        return -ESTALE;
+                }
+
+                peer_nid = peer->rap_nid;
         } else {
-                /* active: connd wants to connect to peer */
-                LASSERT (peer != NULL);
-                LASSERT (peer->rap_connecting);
-                
-                rc = kranal_active_conn_handshake(peer, &conn);
+                /* passive: listener accepted 'sock' */
+                LASSERT (peer == NULL);
+
+                rc = kranal_passive_conn_handshake(sock, &peer_nid,
+                                                   &dst_nid, &conn);
                 if (rc != 0)
                         return rc;
 
+                /* assume this is a new peer */
+                rc = kranal_create_peer(&peer, peer_nid);
+                if (rc != 0) {
+                        CERROR("Can't create conn for %s\n", 
+                               libcfs_nid2str(peer_nid));
+                        kranal_conn_decref(conn);
+                        return -ENOMEM;
+                }
+
                write_lock_irqsave(&kranal_data.kra_global_lock, flags);
 
-               if (!kranal_peer_active(peer)) {
-                       /* raced with peer getting unlinked */
-                        write_unlock_irqrestore(&kranal_data.kra_global_lock, 
-                                                flags);
-                        kranal_conn_decref(conn);
-                       return ESTALE;
-               }
-       }
+                peer2 = kranal_find_peer_locked(peer_nid);
+                if (peer2 == NULL) {
+                        new_peer = 1;
+                } else {
+                        /* peer_nid already in the peer table */
+                        kranal_peer_decref(peer);
+                        peer = peer2;
+                }
+        }
 
-       LASSERT (kranal_peer_active(peer));     /* peer is in the peer table */
-        peer_nid = peer->rap_nid;
-
-        /* Refuse to duplicate an existing connection (both sides might try
-         * to connect at once).  NB we return success!  We _do_ have a
-         * connection (so we don't need to remove the peer from the peer
-         * table) and we _don't_ have any blocked txs to complete */
-       if (kranal_conn_isdup_locked(peer, conn->rac_incarnation)) {
-                LASSERT (!list_empty(&peer->rap_conns));
-                LASSERT (list_empty(&peer->rap_tx_queue));
-                write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
-               CWARN("Not creating duplicate connection to "LPX64"\n",
-                      peer_nid);
-                kranal_conn_decref(conn);
-                return 0;
-       }
+        LASSERT ((!new_peer) != (!kranal_peer_active(peer)));
+
+        /* Refuse connection if peer thinks we are a different NID.  We check
+         * this while holding the global lock, to synch with connection
+         * destruction on NID change. */
+        if (kranal_data.kra_ni->ni_nid != dst_nid) {
+               write_unlock_irqrestore(&kranal_data.kra_global_lock,
+                                            flags);
+
+                CERROR("Stale/bad connection with %s: dst_nid %s, expected %s\n",
+                       libcfs_nid2str(peer_nid), libcfs_nid2str(dst_nid), 
+                       libcfs_nid2str(kranal_data.kra_ni->ni_nid));
+                rc = -ESTALE;
+                goto failed;
+        }
+
+        /* Refuse to duplicate an existing connection (both sides might try to
+         * connect at once).  NB we return success!  We _are_ connected so we
+         * _don't_ have any blocked txs to complete with failure. */
+        rc = kranal_conn_isdup_locked(peer, conn);
+        if (rc != 0) {
+                LASSERT (!cfs_list_empty(&peer->rap_conns));
+                LASSERT (cfs_list_empty(&peer->rap_tx_queue));
+               write_unlock_irqrestore(&kranal_data.kra_global_lock,
+                                            flags);
+                CWARN("Not creating duplicate connection to %s: %d\n",
+                      libcfs_nid2str(peer_nid), rc);
+                rc = 0;
+                goto failed;
+        }
+
+        if (new_peer) {
+                /* peer table takes my ref on the new peer */
+                cfs_list_add_tail(&peer->rap_list,
+                                  kranal_nid2peerlist(peer_nid));
+        }
+
+        /* initialise timestamps before reaper looks at them */
+        conn->rac_last_tx = conn->rac_last_rx = jiffies;
 
-       kranal_peer_addref(peer);               /* +1 ref for conn */
-       conn->rac_peer = peer;
-       list_add_tail(&conn->rac_list, &peer->rap_conns);
+        kranal_peer_addref(peer);               /* +1 ref for conn */
+        conn->rac_peer = peer;
+        cfs_list_add_tail(&conn->rac_list, &peer->rap_conns);
 
         kranal_conn_addref(conn);               /* +1 ref for conn table */
-        list_add_tail(&conn->rac_hashlist,
-                      kranal_cqid2connlist(conn->rac_cqid));
+        cfs_list_add_tail(&conn->rac_hashlist,
+                          kranal_cqid2connlist(conn->rac_cqid));
 
         /* Schedule all packets blocking for a connection */
-        while (!list_empty(&peer->rap_tx_queue)) {
-                tx = list_entry(&peer->rap_tx_queue.next,
-                                kra_tx_t, tx_list);
+        while (!cfs_list_empty(&peer->rap_tx_queue)) {
+                tx = cfs_list_entry(peer->rap_tx_queue.next,
+                                    kra_tx_t, tx_list);
 
-                list_del(&tx->tx_list);
-                kranal_queue_tx_locked(tx, conn);
+                cfs_list_del(&tx->tx_list);
+                kranal_post_fma(conn, tx);
         }
 
-       nstale = kranal_close_stale_conns_locked(peer, conn->rac_incarnation);
+        nstale = kranal_close_stale_conns_locked(peer, conn);
 
        write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
 
         /* CAVEAT EMPTOR: passive peer can disappear NOW */
 
         if (nstale != 0)
-                CWARN("Closed %d stale conns to "LPX64"\n", nstale, peer_nid);
+                CWARN("Closed %d stale conns to %s\n", nstale, 
+                      libcfs_nid2str(peer_nid));
+
+        CWARN("New connection to %s on devid[%d] = %d\n",
+               libcfs_nid2str(peer_nid), 
+               conn->rac_device->rad_idx, conn->rac_device->rad_id);
 
         /* Ensure conn gets checked.  Transmits may have been queued and an
          * FMA event may have happened before it got in the cq hash table */
         kranal_schedule_conn(conn);
-       return 0;
+        return 0;
+
+ failed:
+        if (new_peer)
+                kranal_peer_decref(peer);
+        kranal_conn_decref(conn);
+        return rc;
 }
 
 void
@@ -817,14 +800,20 @@ kranal_connect (kra_peer_t *peer)
 {
         kra_tx_t          *tx;
         unsigned long      flags;
-        struct list_head   zombies;
+        cfs_list_t         zombies;
         int                rc;
 
         LASSERT (peer->rap_connecting);
 
+        CDEBUG(D_NET, "About to handshake %s\n", 
+               libcfs_nid2str(peer->rap_nid));
+
         rc = kranal_conn_handshake(NULL, peer);
 
-        write_lock_irqqsave(&kranal_data.kra_global_lock, flags);
+        CDEBUG(D_NET, "Done handshake %s:%d \n", 
+               libcfs_nid2str(peer->rap_nid), rc);
+
+       write_lock_irqsave(&kranal_data.kra_global_lock, flags);
 
         LASSERT (peer->rap_connecting);
         peer->rap_connecting = 0;
@@ -832,346 +821,185 @@ kranal_connect (kra_peer_t *peer)
         if (rc == 0) {
                 /* kranal_conn_handshake() queues blocked txs immediately on
                  * success to avoid messages jumping the queue */
-                LASSERT (list_empty(&peer->rap_tx_queue));
+                LASSERT (cfs_list_empty(&peer->rap_tx_queue));
 
-                /* reset reconnection timeouts */
-                peer->rap_reconnect_interval = RANAL_MIN_RECONNECT_INTERVAL;
-                peer->rap_reconnect_time = CURRENT_TIME;
+                peer->rap_reconnect_interval = 0; /* OK to reconnect at any time */
 
-                write_unlock_irqrestore(&kranal-data.kra_global_lock, flags);
+               write_unlock_irqrestore(&kranal_data.kra_global_lock,
+                                            flags);
                 return;
         }
 
-        LASSERT (peer->rap_reconnect_interval != 0);
-        peer->rap_reconnect_time = CURRENT_TIME + peer->rap_reconnect_interval;
-        peer->rap_reconnect_interval = MAX(RANAL_MAX_RECONNECT_INTERVAL,
-                                           1 * peer->rap_reconnect_interval);
+        peer->rap_reconnect_interval *= 2;
+        peer->rap_reconnect_interval =
+                MAX(peer->rap_reconnect_interval,
+                    *kranal_tunables.kra_min_reconnect_interval);
+        peer->rap_reconnect_interval =
+                MIN(peer->rap_reconnect_interval,
+                    *kranal_tunables.kra_max_reconnect_interval);
+
+       peer->rap_reconnect_time = jiffies + peer->rap_reconnect_interval * HZ;
 
         /* Grab all blocked packets while we have the global lock */
-        list_add(&zombies, &peer->rap_tx_queue);
-        list_del_init(&peer->rap_tx_queue);
+        cfs_list_add(&zombies, &peer->rap_tx_queue);
+        cfs_list_del_init(&peer->rap_tx_queue);
 
-        write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+       write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
 
-        if (list_empty(&zombies))
+        if (cfs_list_empty(&zombies))
                 return;
 
-        CWARN("Dropping packets for "LPX64": connection failed\n",
-              peer->rap_nid);
+        CNETERR("Dropping packets for %s: connection failed\n",
+                libcfs_nid2str(peer->rap_nid));
 
         do {
-                tx = list_entry(zombies.next, kra_tx_t, tx_list);
+                tx = cfs_list_entry(zombies.next, kra_tx_t, tx_list);
 
-                list_del(&tx->tx_list);
+                cfs_list_del(&tx->tx_list);
                 kranal_tx_done(tx, -EHOSTUNREACH);
 
-        } while (!list_empty(&zombies));
+        } while (!cfs_list_empty(&zombies));
 }
 
-int
-kranal_listener(void *arg)
+void
+kranal_free_acceptsock (kra_acceptsock_t *ras)
 {
-       struct sockaddr_in addr;
-       wait_queue_t       wait;
-       struct socket     *sock;
-       struct socket     *newsock;
-       int                port;
-       int                backlog;
-       int                timeout;
-       kra_connreq_t     *connreqs;
-       char               name[16];
-
-       /* Parent thread holds kra_nid_mutex, and is, or is about to
-        * block on kra_listener_signal */
-
-       port = kra_tunables.kra_port;
-       snprintf(name, "kranal_lstn%03d", port);
-       kportal_daemonize(name);
-       kportal_blockallsigs();
-
-       init_waitqueue_entry(&wait, current);
-
-       rc = -ENOMEM;
-       PORTAL_ALLOC(connreqs, 2 * sizeof(*connreqs));
-       if (connreqs == NULL)
-               goto out_0;
-
-       rc = kranal_create_sock(&sock, port);
-       if (rc != 0)
-               goto out_1;
-
-        memset(&addr, 0, sizeof(addr));
-        addr.sin_family      = AF_INET;
-        addr.sin_port        = htons(port);
-        addr.sin_addr.s_addr = INADDR_ANY
-
-       rc = sock->ops->bind(sock, &addr, sizeof(addr));
-       if (rc != 0) {
-               CERROR("Can't bind to port %d\n", port);
-               goto out_2;
-       }
-
-       rc = sock->ops->listen(sock, kra_tunalbes.kra_backlog);
-       if (rc != 0) {
-               CERROR("Can't set listen backlog %d: %d\n", backlog, rc);
-               goto out_2;
-       }
-
-       LASSERT (kranal_data.kra_listener_sock == NULL);
-       kranal_data.kra_listener_sock = sock;
-
-       /* unblock waiting parent */
-       LASSERT (kranal_data.kra_listener_shutdown == 0);
-       up(&kranal_data.kra_listener_signal);
-
-       /* Wake me any time something happens on my socket */
-       add_wait_queue(sock->sk->sk_sleep, &wait);
-
-       while (kranal_data.kra_listener_shutdown == 0) {
-
-               newsock = sock_alloc();
-               if (newsock == NULL) {
-                       CERROR("Can't allocate new socket for accept\n");
-                       kranal_pause(HZ);
-                       continue;
-               }
-
-               set_current_state(TASK_INTERRUPTIBLE);
-
-               rc = sock->ops->accept(sock, newsock, O_NONBLOCK);
-
-               if (rc == -EAGAIN &&
-                   kranal_data.kra_listener_shutdown == 0)
-                       schedule();
-
-               set_current_state(TASK_RUNNING);
-
-               if (rc != 0) {
-                       sock_release(newsock);
-                       if (rc != -EAGAIN) {
-                               CERROR("Accept failed: %d\n", rc);
-                               kranal_pause(HZ);
-                       }
-                       continue;
-               } 
-
-                kranal_conn_handshake(newsock, NULL);
-                sock_release(newsock);
-       }
-
-       rc = 0;
-       remove_wait_queue(sock->sk->sk_sleep, &wait);
- out_2:
-       sock_release(sock);
-       kranal_data.kra_listener_sock = NULL;
- out_1:
-       PORTAL_FREE(connreqs, 2 * sizeof(*connreqs));
- out_0:
-       /* set completion status and unblock thread waiting for me 
-        * (parent on startup failure, executioner on normal shutdown) */
-       kranal_data.kra_listener_shutdown = rc;
-       up(&kranal_data.kra_listener_signal);
-
-       return 0;
+        libcfs_sock_release(ras->ras_sock);
+        LIBCFS_FREE(ras, sizeof(*ras));
 }
 
 int
-kranal_start_listener ()
+kranal_accept (lnet_ni_t *ni, struct socket *sock)
 {
-       long           pid;
-       int            rc;
-
-        CDEBUG(D_WARNING, "Starting listener\n");
+       kra_acceptsock_t  *ras;
+       int                rc;
+       __u32              peer_ip;
+       int                peer_port;
+       unsigned long      flags;
 
-       /* Called holding kra_nid_mutex: listener stopped */
-       LASSERT (kranal_data.kra_listener_sock == NULL);
+       rc = libcfs_sock_getaddr(sock, 1, &peer_ip, &peer_port);
+       LASSERT (rc == 0);                      /* we succeeded before */
 
-       kranal_data.kra_listener_shutdown == 0;
-       pid = kernel_thread(kranal_listener, sock, 0);
-       if (pid < 0) {
-               CERROR("Can't spawn listener: %ld\n", pid);
-               return (int)pid;
+       LIBCFS_ALLOC(ras, sizeof(*ras));
+       if (ras == NULL) {
+               CERROR("ENOMEM allocating connection request from "
+                      "%u.%u.%u.%u\n", HIPQUAD(peer_ip));
+               return -ENOMEM;
        }
 
-       /* Block until listener has started up. */
-       down(&kranal_data.kra_listener_signal);
-
-       rc = kranal_data.kra_listener_shutdown;
-       LASSERT ((rc != 0) == (kranal_data.kra_listener_sock == NULL));
-
-        CDEBUG(D_WARNING, "Listener %ld started OK\n", pid);
-       return rc;
-}
-
-void
-kranal_stop_listener()
-{
-        CDEBUG(D_WARNING, "Stopping listener\n");
-
-       /* Called holding kra_nid_mutex: listener running */
-       LASSERT (kranal_data.kra_listener_sock != NULL);
-
-       kranal_data.kra_listener_shutdown = 1;
-       wake_up_all(kranal_data->kra_listener_sock->sk->sk_sleep);
-
-       /* Block until listener has torn down. */
-       down(&kranal_data.kra_listener_signal);
-
-       LASSERT (kranal_data.kra_listener_sock == NULL);
-        CDEBUG(D_WARNING, "Listener stopped\n");
-}
-
-int 
-kranal_listener_procint(ctl_table *table, int write, struct file *filp,
-                       void *buffer, size_t *lenp)
-{
-       int   *tunable = (int *)table->data;
-       int    old_val;
-       int    rc;
-
-       down(&kranal_data.kra_nid_mutex);
-
-       LASSERT (tunable == &kranal_data.kra_port ||
-                tunable == &kranal_data.kra_backlog);
-       old_val = *tunable;
-
-       rc = proc_dointvec(table, write, filp, buffer, lenp);
-
-       if (write &&
-           (*tunable != old_val ||
-            kranal_data.kra_listener_sock == NULL)) {
+       ras->ras_sock = sock;
 
-               if (kranal_data.kra_listener_sock != NULL)
-                       kranal_stop_listener();
+       spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
 
-               rc = kranal_start_listener();
+       cfs_list_add_tail(&ras->ras_list, &kranal_data.kra_connd_acceptq);
+       wake_up(&kranal_data.kra_connd_waitq);
 
-               if (rc != 0) {
-                       *tunable = old_val;
-                       kranal_start_listener();
-               }
-       }
-
-       up(&kranal_data.kra_nid_mutex);
-       return rc;
+       spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
+       return 0;
 }
 
 int
-kranal_set_mynid(ptl_nid_t nid)
+kranal_create_peer (kra_peer_t **peerp, lnet_nid_t nid)
 {
-        lib_ni_t      *ni = &kranal_lib.libnal_ni;
-        int            rc;
-
-        CDEBUG(D_NET, "setting mynid to "LPX64" (old nid="LPX64")\n",
-               nid, ni->ni_pid.nid);
-
-        down(&kranal_data.kra_nid_mutex);
-
-        if (nid == ni->ni_pid.nid) {
-                /* no change of NID */
-                up(&kranal_data.kra_nid_mutex);
-                return 0;
-        }
-
-       if (kranal_data.kra_listener_sock != NULL)
-               kranal_stop_listener();
+        kra_peer_t    *peer;
+        unsigned long  flags;
 
-        ni->ni_pid.nid = nid;
+        LASSERT (nid != LNET_NID_ANY);
 
-        /* Delete all existing peers and their connections after new
-         * NID/incarnation set to ensure no old connections in our brave
-         * new world. */
-        kranal_del_peer(PTL_NID_ANY, 0);
+        LIBCFS_ALLOC(peer, sizeof(*peer));
+        if (peer == NULL)
+                return -ENOMEM;
 
-        if (nid != PTL_NID_ANY)
-                rc = kranal_start_listener();
+        memset(peer, 0, sizeof(*peer));         /* zero flags etc */
 
-        up(&kranal_data.kra_nid_mutex);
-        return rc;
-}
+        peer->rap_nid = nid;
+        cfs_atomic_set(&peer->rap_refcount, 1);     /* 1 ref for caller */
 
-kra_peer_t *
-kranal_create_peer (ptl_nid_t nid)
-{
-        kra_peer_t *peer;
+        CFS_INIT_LIST_HEAD(&peer->rap_list);
+        CFS_INIT_LIST_HEAD(&peer->rap_connd_list);
+        CFS_INIT_LIST_HEAD(&peer->rap_conns);
+        CFS_INIT_LIST_HEAD(&peer->rap_tx_queue);
 
-        LASSERT (nid != PTL_NID_ANY);
+        peer->rap_reconnect_interval = 0;       /* OK to connect at any time */
 
-        PORTAL_ALLOC(peer, sizeof(*peer));
-        if (peer == NULL)
-                return NULL;
+       write_lock_irqsave(&kranal_data.kra_global_lock, flags);
 
-        memset(peer, 0, sizeof(*peer));         /* zero flags etc */
+        if (kranal_data.kra_nonewpeers) {
+                /* shutdown has started already */
+               write_unlock_irqrestore(&kranal_data.kra_global_lock,
+                                            flags);
 
-        peer->rap_nid = nid;
-        atomic_set(&peer->rap_refcount, 1);     /* 1 ref for caller */
+                LIBCFS_FREE(peer, sizeof(*peer));
+                CERROR("Can't create peer: network shutdown\n");
+                return -ESHUTDOWN;
+        }
 
-        INIT_LIST_HEAD(&peer->rap_list);        /* not in the peer table yet */
-        INIT_LIST_HEAD(&peer->rap_conns);
-        INIT_LIST_HEAD(&peer->rap_tx_queue);
+        cfs_atomic_inc(&kranal_data.kra_npeers);
 
-        peer->rap_reconnect_time = CURRENT_TIME;
-        peer->rap_reconnect_interval = RANAL_MIN_RECONNECT_INTERVAL;
+       write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
 
-        atomic_inc(&kranal_data.kra_npeers);
-        return peer;
+        *peerp = peer;
+        return 0;
 }
 
 void
-__kranal_peer_decref (kra_peer_t *peer)
+kranal_destroy_peer (kra_peer_t *peer)
 {
-        CDEBUG(D_NET, "peer "LPX64" %p deleted\n", peer->rap_nid, peer);
+        CDEBUG(D_NET, "peer %s %p deleted\n", 
+               libcfs_nid2str(peer->rap_nid), peer);
 
-        LASSERT (atomic_read(&peer->rap_refcount) == 0);
+        LASSERT (cfs_atomic_read(&peer->rap_refcount) == 0);
         LASSERT (peer->rap_persistence == 0);
         LASSERT (!kranal_peer_active(peer));
-        LASSERT (peer->rap_connecting == 0);
-        LASSERT (list_empty(&peer->rap_conns));
-        LASSERT (list_empty(&peer->rap_tx_queue));
+        LASSERT (!peer->rap_connecting);
+        LASSERT (cfs_list_empty(&peer->rap_conns));
+        LASSERT (cfs_list_empty(&peer->rap_tx_queue));
+        LASSERT (cfs_list_empty(&peer->rap_connd_list));
 
-        PORTAL_FREE(peer, sizeof(*peer));
+        LIBCFS_FREE(peer, sizeof(*peer));
 
         /* NB a peer's connections keep a reference on their peer until
          * they are destroyed, so we can be assured that _all_ state to do
          * with this peer has been cleaned up when its refcount drops to
          * zero. */
-        atomic_dec(&kranal_data.kra_npeers);
+        cfs_atomic_dec(&kranal_data.kra_npeers);
 }
 
 kra_peer_t *
-kranal_find_peer_locked (ptl_nid_t nid)
+kranal_find_peer_locked (lnet_nid_t nid)
 {
-        struct list_head *peer_list = kranal_nid2peerlist(nid);
-        struct list_head *tmp;
+        cfs_list_t       *peer_list = kranal_nid2peerlist(nid);
+        cfs_list_t       *tmp;
         kra_peer_t       *peer;
 
-        list_for_each (tmp, peer_list) {
+        cfs_list_for_each (tmp, peer_list) {
 
-                peer = list_entry(tmp, kra_peer_t, rap_list);
+                peer = cfs_list_entry(tmp, kra_peer_t, rap_list);
 
                 LASSERT (peer->rap_persistence > 0 ||     /* persistent peer */
-                         !list_empty(&peer->rap_conns));  /* active conn */
+                         !cfs_list_empty(&peer->rap_conns));  /* active conn */
 
                 if (peer->rap_nid != nid)
                         continue;
 
-                CDEBUG(D_NET, "got peer [%p] -> "LPX64" (%d)\n",
-                       peer, nid, atomic_read(&peer->rap_refcount));
+                CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n",
+                       peer, libcfs_nid2str(nid), 
+                       cfs_atomic_read(&peer->rap_refcount));
                 return peer;
         }
         return NULL;
 }
 
 kra_peer_t *
-kranal_find_peer (ptl_nid_t nid)
+kranal_find_peer (lnet_nid_t nid)
 {
         kra_peer_t     *peer;
 
-        read_lock(&kranal_data.kra_global_lock);
+       read_lock(&kranal_data.kra_global_lock);
         peer = kranal_find_peer_locked(nid);
         if (peer != NULL)                       /* +1 ref for caller? */
                 kranal_peer_addref(peer);
-        read_unlock(&kranal_data.kra_global_lock);
+       read_unlock(&kranal_data.kra_global_lock);
 
         return peer;
 }
@@ -1180,71 +1008,74 @@ void
 kranal_unlink_peer_locked (kra_peer_t *peer)
 {
         LASSERT (peer->rap_persistence == 0);
-        LASSERT (list_empty(&peer->rap_conns));
+        LASSERT (cfs_list_empty(&peer->rap_conns));
 
         LASSERT (kranal_peer_active(peer));
-        list_del_init(&peer->rap_list);
+        cfs_list_del_init(&peer->rap_list);
 
         /* lose peerlist's ref */
         kranal_peer_decref(peer);
 }
 
 int
-kranal_get_peer_info (int index, ptl_nid_t *nidp, int *portp, int *persistencep)
+kranal_get_peer_info (int index, lnet_nid_t *nidp, __u32 *ipp, int *portp,
+                      int *persistencep)
 {
         kra_peer_t        *peer;
-        struct list_head  *ptmp;
+        cfs_list_t        *ptmp;
         int                i;
 
-        read_lock(&kranal_data.kra_global_lock);
+       read_lock(&kranal_data.kra_global_lock);
 
         for (i = 0; i < kranal_data.kra_peer_hash_size; i++) {
 
-                list_for_each(ptmp, &kranal_data.kra_peers[i]) {
+                cfs_list_for_each(ptmp, &kranal_data.kra_peers[i]) {
 
-                        peer = list_entry(ptmp, kra_peer_t, rap_list);
+                        peer = cfs_list_entry(ptmp, kra_peer_t, rap_list);
                         LASSERT (peer->rap_persistence > 0 ||
-                                 !list_empty(&peer->rap_conns));
+                                 !cfs_list_empty(&peer->rap_conns));
 
                         if (index-- > 0)
                                 continue;
 
                         *nidp = peer->rap_nid;
+                        *ipp = peer->rap_ip;
                         *portp = peer->rap_port;
                         *persistencep = peer->rap_persistence;
 
-                        read_unlock(&kranal_data.kra_global_lock);
+                       read_unlock(&kranal_data.kra_global_lock);
                         return 0;
                 }
         }
 
-        read_unlock(&kranal_data.kra_global_lock);
+       read_unlock(&kranal_data.kra_global_lock);
         return -ENOENT;
 }
 
 int
-kranal_add_persistent_peer (ptl_nid_t nid, __u32 ip, int port)
+kranal_add_persistent_peer (lnet_nid_t nid, __u32 ip, int port)
 {
         unsigned long      flags;
         kra_peer_t        *peer;
         kra_peer_t        *peer2;
+        int                rc;
 
-        if (nid == PTL_NID_ANY)
+        if (nid == LNET_NID_ANY)
                 return -EINVAL;
 
-        peer = kranal_create_peer(nid);
-        if (peer == NULL)
-                return -ENOMEM;
+        rc = kranal_create_peer(&peer, nid);
+        if (rc != 0)
+                return rc;
 
-        write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+       write_lock_irqsave(&kranal_data.kra_global_lock, flags);
 
         peer2 = kranal_find_peer_locked(nid);
         if (peer2 != NULL) {
-                kranal_put_peer(peer);
+                kranal_peer_decref(peer);
                 peer = peer2;
         } else {
                 /* peer table takes existing ref on peer */
-                list_add_tail(&peer->rap_list,
+                cfs_list_add_tail(&peer->rap_list,
                               kranal_nid2peerlist(nid));
         }
 
@@ -1252,30 +1083,24 @@ kranal_add_persistent_peer (ptl_nid_t nid, __u32 ip, int port)
         peer->rap_port = port;
         peer->rap_persistence++;
 
-        write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+       write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
         return 0;
 }
 
 void
-kranal_del_peer_locked (kra_peer_t *peer, int single_share)
+kranal_del_peer_locked (kra_peer_t *peer)
 {
-        struct list_head *ctmp;
-        struct list_head *cnxt;
+        cfs_list_t       *ctmp;
+        cfs_list_t       *cnxt;
         kra_conn_t       *conn;
 
-        if (!single_share)
-                peer->rap_persistence = 0;
-        else if (peer->rap_persistence > 0)
-                peer->rap_persistence--;
-
-        if (peer->rap_persistence != 0)
-                return;
+        peer->rap_persistence = 0;
 
-        if (list_empty(&peer->rap_conns)) {
+        if (cfs_list_empty(&peer->rap_conns)) {
                 kranal_unlink_peer_locked(peer);
         } else {
-                list_for_each_safe(ctmp, cnxt, &peer->rap_conns) {
-                        conn = list_entry(ctmp, kra_conn_t, rac_list);
+                cfs_list_for_each_safe(ctmp, cnxt, &peer->rap_conns) {
+                        conn = cfs_list_entry(ctmp, kra_conn_t, rac_list);
 
                         kranal_close_conn_locked(conn, 0);
                 }
@@ -1284,20 +1109,20 @@ kranal_del_peer_locked (kra_peer_t *peer, int single_share)
 }
 
 int
-kranal_del_peer (ptl_nid_t nid, int single_share)
+kranal_del_peer (lnet_nid_t nid)
 {
         unsigned long      flags;
-        struct list_head  *ptmp;
-        struct list_head  *pnxt;
+        cfs_list_t        *ptmp;
+        cfs_list_t        *pnxt;
         kra_peer_t        *peer;
         int                lo;
         int                hi;
         int                i;
         int                rc = -ENOENT;
 
-        write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+       write_lock_irqsave(&kranal_data.kra_global_lock, flags);
 
-        if (nid != PTL_NID_ANY)
+        if (nid != LNET_NID_ANY)
                 lo = hi = kranal_nid2peerlist(nid) - kranal_data.kra_peers;
         else {
                 lo = 0;
@@ -1305,23 +1130,20 @@ kranal_del_peer (ptl_nid_t nid, int single_share)
         }
 
         for (i = lo; i <= hi; i++) {
-                list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) {
-                        peer = list_entry(ptmp, kra_peer_t, rap_list);
+                cfs_list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) {
+                        peer = cfs_list_entry(ptmp, kra_peer_t, rap_list);
                         LASSERT (peer->rap_persistence > 0 ||
-                                 !list_empty(&peer->rap_conns));
+                                 !cfs_list_empty(&peer->rap_conns));
 
-                        if (!(nid == PTL_NID_ANY || peer->rap_nid == nid))
+                        if (!(nid == LNET_NID_ANY || peer->rap_nid == nid))
                                 continue;
 
-                        kranal_del_peer_locked(peer, single_share);
+                        kranal_del_peer_locked(peer);
                         rc = 0;         /* matched something */
-
-                        if (single_share)
-                                goto out;
                 }
         }
- out:
-        write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+
+       write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
 
         return rc;
 }
@@ -1330,36 +1152,37 @@ kra_conn_t *
 kranal_get_conn_by_idx (int index)
 {
         kra_peer_t        *peer;
-        struct list_head  *ptmp;
+        cfs_list_t        *ptmp;
         kra_conn_t        *conn;
-        struct list_head  *ctmp;
+        cfs_list_t        *ctmp;
         int                i;
 
-        read_lock (&kranal_data.kra_global_lock);
+       read_lock(&kranal_data.kra_global_lock);
 
         for (i = 0; i < kranal_data.kra_peer_hash_size; i++) {
-                list_for_each (ptmp, &kranal_data.kra_peers[i]) {
+                cfs_list_for_each (ptmp, &kranal_data.kra_peers[i]) {
 
-                        peer = list_entry(ptmp, kra_peer_t, rap_list);
+                        peer = cfs_list_entry(ptmp, kra_peer_t, rap_list);
                         LASSERT (peer->rap_persistence > 0 ||
-                                 !list_empty(&peer->rap_conns));
+                                 !cfs_list_empty(&peer->rap_conns));
 
-                        list_for_each (ctmp, &peer->rap_conns) {
+                        cfs_list_for_each (ctmp, &peer->rap_conns) {
                                 if (index-- > 0)
                                         continue;
 
-                                conn = list_entry(ctmp, kra_conn_t, rac_list);
-                                CDEBUG(D_NET, "++conn[%p] -> "LPX64" (%d)\n",
-                                       conn, conn->rac_peer->rap_nid,
-                                       atomic_read(&conn->rac_refcount));
-                                atomic_inc(&conn->rac_refcount);
-                                read_unlock(&kranal_data.kra_global_lock);
+                                conn = cfs_list_entry(ctmp, kra_conn_t,
+                                                      rac_list);
+                                CDEBUG(D_NET, "++conn[%p] -> %s (%d)\n", conn,
+                                       libcfs_nid2str(conn->rac_peer->rap_nid),
+                                       cfs_atomic_read(&conn->rac_refcount));
+                                cfs_atomic_inc(&conn->rac_refcount);
+                               read_unlock(&kranal_data.kra_global_lock);
                                 return conn;
                         }
                 }
         }
 
-        read_unlock(&kranal_data.kra_global_lock);
+       read_unlock(&kranal_data.kra_global_lock);
         return NULL;
 }
 
@@ -1367,12 +1190,12 @@ int
 kranal_close_peer_conns_locked (kra_peer_t *peer, int why)
 {
         kra_conn_t         *conn;
-        struct list_head   *ctmp;
-        struct list_head   *cnxt;
+        cfs_list_t         *ctmp;
+        cfs_list_t         *cnxt;
         int                 count = 0;
 
-        list_for_each_safe (ctmp, cnxt, &peer->rap_conns) {
-                conn = list_entry(ctmp, kra_conn_t, rac_list);
+        cfs_list_for_each_safe (ctmp, cnxt, &peer->rap_conns) {
+                conn = cfs_list_entry(ctmp, kra_conn_t, rac_list);
 
                 count++;
                 kranal_close_conn_locked(conn, why);
@@ -1382,45 +1205,20 @@ kranal_close_peer_conns_locked (kra_peer_t *peer, int why)
 }
 
 int
-kranal_close_stale_conns_locked (kra_peer_t *peer, __u64 incarnation)
-{
-        kra_conn_t         *conn;
-        struct list_head   *ctmp;
-        struct list_head   *cnxt;
-        int                 count = 0;
-
-        list_for_each_safe (ctmp, cnxt, &peer->rap_conns) {
-                conn = list_entry(ctmp, kra_conn_t, rac_list);
-
-                if (conn->rac_incarnation == incarnation)
-                        continue;
-
-                CDEBUG(D_NET, "Closing stale conn nid:"LPX64" incarnation:"LPX64"("LPX64")\n",
-                       peer->rap_nid, conn->rac_incarnation, incarnation);
-                LASSERT (conn->rac_incarnation < incarnation);
-
-                count++;
-                kranal_close_conn_locked(conn, -ESTALE);
-        }
-
-        return count;
-}
-
-int
-kranal_close_matching_conns (ptl_nid_t nid)
+kranal_close_matching_conns (lnet_nid_t nid)
 {
         unsigned long       flags;
         kra_peer_t         *peer;
-        struct list_head   *ptmp;
-        struct list_head   *pnxt;
+        cfs_list_t         *ptmp;
+        cfs_list_t         *pnxt;
         int                 lo;
         int                 hi;
         int                 i;
         int                 count = 0;
 
-        write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+       write_lock_irqsave(&kranal_data.kra_global_lock, flags);
 
-        if (nid != PTL_NID_ANY)
+        if (nid != LNET_NID_ANY)
                 lo = hi = kranal_nid2peerlist(nid) - kranal_data.kra_peers;
         else {
                 lo = 0;
@@ -1428,88 +1226,88 @@ kranal_close_matching_conns (ptl_nid_t nid)
         }
 
         for (i = lo; i <= hi; i++) {
-                list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) {
+                cfs_list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) {
 
-                        peer = list_entry(ptmp, kra_peer_t, rap_list);
+                        peer = cfs_list_entry(ptmp, kra_peer_t, rap_list);
                         LASSERT (peer->rap_persistence > 0 ||
-                                 !list_empty(&peer->rap_conns));
+                                 !cfs_list_empty(&peer->rap_conns));
 
-                        if (!(nid == PTL_NID_ANY || nid == peer->rap_nid))
+                        if (!(nid == LNET_NID_ANY || nid == peer->rap_nid))
                                 continue;
 
                         count += kranal_close_peer_conns_locked(peer, 0);
                 }
         }
 
-        write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+       write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
 
         /* wildcards always succeed */
-        if (nid == PTL_NID_ANY)
+        if (nid == LNET_NID_ANY)
                 return 0;
 
         return (count == 0) ? -ENOENT : 0;
 }
 
 int
-kranal_cmd(struct portals_cfg *pcfg, void * private)
+kranal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
 {
-        int rc = -EINVAL;
+        struct libcfs_ioctl_data *data = arg;
+        int                       rc = -EINVAL;
 
-        LASSERT (pcfg != NULL);
+        LASSERT (ni == kranal_data.kra_ni);
 
-        switch(pcfg->pcfg_command) {
-        case NAL_CMD_GET_PEER: {
-                ptl_nid_t   nid = 0;
+        switch(cmd) {
+        case IOC_LIBCFS_GET_PEER: {
+                lnet_nid_t   nid = 0;
                 __u32       ip = 0;
                 int         port = 0;
                 int         share_count = 0;
 
-                rc = kranal_get_peer_info(pcfg->pcfg_count,
+                rc = kranal_get_peer_info(data->ioc_count,
                                           &nid, &ip, &port, &share_count);
-                pcfg->pcfg_nid   = nid;
-                pcfg->pcfg_size  = 0;
-                pcfg->pcfg_id    = ip;
-                pcfg->pcfg_misc  = port;
-                pcfg->pcfg_count = 0;
-                pcfg->pcfg_wait  = share_count;
+                data->ioc_nid    = nid;
+                data->ioc_count  = share_count;
+                data->ioc_u32[0] = ip;
+                data->ioc_u32[1] = port;
                 break;
         }
-        case NAL_CMD_ADD_PEER: {
-                rc = kranal_add_persistent_peer(pcfg->pcfg_nid,
-                                                pcfg->pcfg_id, /* IP */
-                                                pcfg->pcfg_misc); /* port */
+        case IOC_LIBCFS_ADD_PEER: {
+                rc = kranal_add_persistent_peer(data->ioc_nid,
+                                                data->ioc_u32[0], /* IP */
+                                                data->ioc_u32[1]); /* port */
                 break;
         }
-        case NAL_CMD_DEL_PEER: {
-                rc = kranal_del_peer(pcfg->pcfg_nid, 
-                                     /* flags == single_share */
-                                     pcfg->pcfg_flags != 0);
+        case IOC_LIBCFS_DEL_PEER: {
+                rc = kranal_del_peer(data->ioc_nid);
                 break;
         }
-        case NAL_CMD_GET_CONN: {
-                kra_conn_t *conn = kranal_get_conn_by_idx(pcfg->pcfg_count);
+        case IOC_LIBCFS_GET_CONN: {
+                kra_conn_t *conn = kranal_get_conn_by_idx(data->ioc_count);
 
                 if (conn == NULL)
                         rc = -ENOENT;
                 else {
                         rc = 0;
-                        pcfg->pcfg_nid   = conn->rac_peer->rap_nid;
-                        pcfg->pcfg_id    = 0;
-                        pcfg->pcfg_misc  = 0;
-                        pcfg->pcfg_flags = 0;
-                        kranal_put_conn(conn);
+                        data->ioc_nid    = conn->rac_peer->rap_nid;
+                        data->ioc_u32[0] = conn->rac_device->rad_id;
+                        kranal_conn_decref(conn);
                 }
                 break;
         }
-        case NAL_CMD_CLOSE_CONNECTION: {
-                rc = kranal_close_matching_conns(pcfg->pcfg_nid);
+        case IOC_LIBCFS_CLOSE_CONNECTION: {
+                rc = kranal_close_matching_conns(data->ioc_nid);
                 break;
         }
-        case NAL_CMD_REGISTER_MYNID: {
-                if (pcfg->pcfg_nid == PTL_NID_ANY)
+        case IOC_LIBCFS_REGISTER_MYNID: {
+                /* Ignore if this is a noop */
+                if (data->ioc_nid == ni->ni_nid) {
+                        rc = 0;
+                } else {
+                        CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
+                               libcfs_nid2str(data->ioc_nid),
+                               libcfs_nid2str(ni->ni_nid));
                         rc = -EINVAL;
-                else
-                        rc = kranal_set_mynid(pcfg->pcfg_nid);
+                }
                 break;
         }
         }
@@ -1518,55 +1316,51 @@ kranal_cmd(struct portals_cfg *pcfg, void * private)
 }
 
 void
-kranal_free_txdescs(struct list_head *freelist)
+kranal_free_txdescs(cfs_list_t *freelist)
 {
         kra_tx_t    *tx;
 
-        while (!list_empty(freelist)) {
-                tx = list_entry(freelist->next, kra_tx_t, tx_list);
+        while (!cfs_list_empty(freelist)) {
+                tx = cfs_list_entry(freelist->next, kra_tx_t, tx_list);
 
-                list_del(&tx->tx_list);
-                PORTAL_FREE(tx->tx_phys, PTL_MD_MAX_IOV * sizeof(*tx->tx_phys));
-                PORTAL_FREE(tx, sizeof(*tx));
+                cfs_list_del(&tx->tx_list);
+                LIBCFS_FREE(tx->tx_phys, LNET_MAX_IOV * sizeof(*tx->tx_phys));
+                LIBCFS_FREE(tx, sizeof(*tx));
         }
 }
 
 int
-kranal_alloc_txdescs(struct list_head *freelist, int n)
+kranal_alloc_txdescs(cfs_list_t *freelist, int n)
 {
-        int            isnblk = (freelist == &kranal_data.kra_idle_nblk_txs);
         int            i;
         kra_tx_t      *tx;
 
-        LASSERT (freelist == &kranal_data.kra_idle_txs ||
-                 freelist == &kranal_data.kra_idle_nblk_txs);
-        LASSERT (list_empty(freelist));
+        LASSERT (freelist == &kranal_data.kra_idle_txs);
+        LASSERT (cfs_list_empty(freelist));
 
         for (i = 0; i < n; i++) {
 
-                PORTAL_ALLOC(tx, sizeof(*tx));
+                LIBCFS_ALLOC(tx, sizeof(*tx));
                 if (tx == NULL) {
-                        CERROR("Can't allocate %stx[%d]\n", 
-                               isnblk ? "nblk ", i);
-                        kranal_free_txdescs();
+                        CERROR("Can't allocate tx[%d]\n", i);
+                        kranal_free_txdescs(freelist);
                         return -ENOMEM;
                 }
 
-                PORTAL_ALLOC(tx->tx_phys,
-                             PLT_MD_MAX_IOV * sizeof(*tx->tx_phys));
+                LIBCFS_ALLOC(tx->tx_phys,
+                             LNET_MAX_IOV * sizeof(*tx->tx_phys));
                 if (tx->tx_phys == NULL) {
-                        CERROR("Can't allocate %stx[%d]->tx_phys\n", 
-                               isnblk ? "nblk ", i);
+                        CERROR("Can't allocate tx[%d]->tx_phys\n", i);
 
-                        PORTAL_FREE(tx, sizeof(*tx));
+                        LIBCFS_FREE(tx, sizeof(*tx));
                         kranal_free_txdescs(freelist);
                         return -ENOMEM;
                 }
 
-                tx->tx_isnblk = isnblk
                 tx->tx_buftype = RANAL_BUF_NONE;
+                tx->tx_msg.ram_type = RANAL_MSG_NONE;
 
-                list_add(&tx->tx_list, freelist);
+                cfs_list_add(&tx->tx_list, freelist);
         }
 
         return 0;
@@ -1575,54 +1369,45 @@ kranal_alloc_txdescs(struct list_head *freelist, int n)
 int
 kranal_device_init(int id, kra_device_t *dev)
 {
-        const int         total_ntx = RANAL_NTX + RANAL_NTX_NBLK;
+        int               total_ntx = *kranal_tunables.kra_ntx;
         RAP_RETURN        rrc;
 
         dev->rad_id = id;
-        rrc = RapkGetDeviceByIndex(id, NULL, kranal_device_callback,
+        rrc = RapkGetDeviceByIndex(id, kranal_device_callback,
                                    &dev->rad_handle);
         if (rrc != RAP_SUCCESS) {
-                CERROR("Can't get Rapidarray Device %d: %d\n", idx, rrc);
+                CERROR("Can't get Rapidarray Device %d: %d\n", id, rrc);
                 goto failed_0;
         }
 
         rrc = RapkReserveRdma(dev->rad_handle, total_ntx);
         if (rrc != RAP_SUCCESS) {
                 CERROR("Can't reserve %d RDMA descriptors"
-                       " for device[%d]: %d\n", total_ntx, i, rrc);
+                       " for device %d: %d\n", total_ntx, id, rrc);
                 goto failed_1;
         }
 
-        rrc = RapkCreatePtag(dev->rad_handle,
-                             &dev->rad_ptag);
+        rrc = RapkCreateCQ(dev->rad_handle, total_ntx, RAP_CQTYPE_SEND,
+                           &dev->rad_rdma_cqh);
         if (rrc != RAP_SUCCESS) {
-                CERROR("Can't create ptag"
-                       " for device[%d]: %d\n", i, rrc);
+                CERROR("Can't create rdma cq size %d for device %d: %d\n",
+                       total_ntx, id, rrc);
                 goto failed_1;
         }
 
-        rrc = RapkCreateCQ(dev->rad_handle, total_ntx, dev->rad_ptag,
-                           &dev->rad_rdma_cq);
+        rrc = RapkCreateCQ(dev->rad_handle, 
+                           *kranal_tunables.kra_fma_cq_size, 
+                           RAP_CQTYPE_RECV, &dev->rad_fma_cqh);
         if (rrc != RAP_SUCCESS) {
-                CERROR("Can't create rdma cq size %d"
-                       " for device[%d]: %d\n", total_ntx, i, rrc);
+                CERROR("Can't create fma cq size %d for device %d: %d\n", 
+                       *kranal_tunables.kra_fma_cq_size, id, rrc);
                 goto failed_2;
         }
 
-        rrc = RapkCreateCQ(dev->rad_handle, RANAL_FMA_CQ_SIZE,
-                           dev->rad_ptag, &dev->rad_fma_cq);
-        if (rrc != RAP_SUCCESS) {
-                CERROR("Can't create fma cq size %d"
-                       " for device[%d]: %d\n", RANAL_RX_CQ_SIZE, i, rrc);
-                goto failed_3;
-        }
-
         return 0;
 
- failed_3:
-        RapkDestroyCQ(dev->rad_handle, dev->rad_rdma_cq, dev->rad_ptag);
  failed_2:
-        RapkDestroyPtag(dev->rad_handle, dev->rad_ptag);
+        RapkDestroyCQ(dev->rad_handle, dev->rad_rdma_cqh);
  failed_1:
         RapkReleaseDevice(dev->rad_handle);
  failed_0:
@@ -1632,29 +1417,30 @@ kranal_device_init(int id, kra_device_t *dev)
 void
 kranal_device_fini(kra_device_t *dev)
 {
-        RapkDestroyCQ(dev->rad_handle, dev->rad_rx_cq, dev->rad_ptag);
-        RapkDestroyCQ(dev->rad_handle, dev->rad_rdma_cq, dev->rad_ptag);
-        RapkDestroyPtag(dev->rad_handle, dev->rad_ptag);
+        LASSERT (cfs_list_empty(&dev->rad_ready_conns));
+        LASSERT (cfs_list_empty(&dev->rad_new_conns));
+        LASSERT (dev->rad_nphysmap == 0);
+        LASSERT (dev->rad_nppphysmap == 0);
+        LASSERT (dev->rad_nvirtmap == 0);
+        LASSERT (dev->rad_nobvirtmap == 0);
+
+        LASSERT(dev->rad_scheduler == NULL);
+        RapkDestroyCQ(dev->rad_handle, dev->rad_fma_cqh);
+        RapkDestroyCQ(dev->rad_handle, dev->rad_rdma_cqh);
         RapkReleaseDevice(dev->rad_handle);
 }
 
 void
-kranal_api_shutdown (nal_t *nal)
+kranal_shutdown (lnet_ni_t *ni)
 {
         int           i;
-        int           rc;
         unsigned long flags;
-        
-        if (nal->nal_refct != 0) {
-                /* This module got the first ref */
-                PORTAL_MODULE_UNUSE;
-                return;
-        }
 
         CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
-               atomic_read(&portal_kmemory));
+               cfs_atomic_read(&libcfs_kmemory));
 
-        LASSERT (nal == &kranal_api);
+        LASSERT (ni == kranal_data.kra_ni);
+        LASSERT (ni->ni_data == &kranal_data);
 
         switch (kranal_data.kra_init) {
         default:
@@ -1662,84 +1448,100 @@ kranal_api_shutdown (nal_t *nal)
                 LBUG();
 
         case RANAL_INIT_ALL:
-                /* stop calls to nal_cmd */
-                libcfs_nal_cmd_unregister(OPENRANAL);
-                /* No new persistent peers */
+                /* Prevent new peers from being created */
+               write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+                kranal_data.kra_nonewpeers = 1;
+               write_unlock_irqrestore(&kranal_data.kra_global_lock,
+                                            flags);
 
-                /* resetting my NID to unadvertises me, removes my
-                 * listener and nukes all current peers */
-                kranal_set_mynid(PTL_NID_ANY);
-                /* no new peers or conns */
+                /* Remove all existing peers from the peer table */
+                kranal_del_peer(LNET_NID_ANY);
 
-                /* Wait for all peer/conn state to clean up */
+                /* Wait for pending conn reqs to be handled */
                 i = 2;
-                while (atomic_read(&kranal_data.kra_nconns) != 0 ||
-                       atomic_read(&kranal-data.kra_npeers) != 0) {
+               spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
+                while (!cfs_list_empty(&kranal_data.kra_connd_acceptq)) {
+                       spin_unlock_irqrestore(&kranal_data.kra_connd_lock,
+                                                   flags);
                         i++;
-                        CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
-                               "waiting for %d peers and %d conns to close down\n",
-                               atomic_read(&kranal_data.kra_npeers),
-                               atomic_read(&kranal_data.kra_nconns));
-                       kranal_pause(HZ);
+                        CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n */
+                               "waiting for conn reqs to clean up\n");
+                        cfs_pause(cfs_time_seconds(1));
+
+                       spin_lock_irqsave(&kranal_data.kra_connd_lock,
+                                              flags);
                 }
-                /* fall through */
+               spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
 
-        case RANAL_INIT_LIB:
-                lib_fini(&kranal_lib);
+                /* Wait for all peers to be freed */
+                i = 2;
+                while (cfs_atomic_read(&kranal_data.kra_npeers) != 0) {
+                        i++;
+                        CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n */
+                               "waiting for %d peers to close down\n",
+                               cfs_atomic_read(&kranal_data.kra_npeers));
+                        cfs_pause(cfs_time_seconds(1));
+                }
                 /* fall through */
 
         case RANAL_INIT_DATA:
                 break;
         }
 
-        /* flag threads to terminate; wake and wait for them to die */
+        /* Peer state all cleaned up BEFORE setting shutdown, so threads don't
+         * have to worry about shutdown races.  NB connections may be created
+         * while there are still active connds, but these will be temporary
+         * since peer creation always fails after the listener has started to
+         * shut down. */
+        LASSERT (cfs_atomic_read(&kranal_data.kra_npeers) == 0);
+        
+        /* Flag threads to terminate */
         kranal_data.kra_shutdown = 1;
 
-        for (i = 0; i < kranal_data.kra_ndevs; i++) {
-                kra_device_t *dev = &kranal_data.kra_devices[i];
-
-                LASSERT (list_empty(&dev->rad_connq));
+       for (i = 0; i < kranal_data.kra_ndevs; i++) {
+               kra_device_t *dev = &kranal_data.kra_devices[i];
 
-                spin_lock_irqsave(&dev->rad_lock, flags);
-                wake_up(&dev->rad_waitq);
-                spin_unlock_irqrestore(&dev->rad_lock, flags);
-        }
+               spin_lock_irqsave(&dev->rad_lock, flags);
+               wake_up(&dev->rad_waitq);
+               spin_unlock_irqrestore(&dev->rad_lock, flags);
+       }
 
-        spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
-        wake_up_all(&kranal_data.kra_reaper_waitq);
-        spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
+       spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
+       wake_up_all(&kranal_data.kra_reaper_waitq);
+       spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
 
-        LASSERT (list_empty(&kranal_data.kra_connd_peers));
-        spin_lock_irqsave(&kranal-data.kra_connd_lock, flags); 
-        wake_up_all(&kranal_data.kra_connd_waitq);
-        spin_unlock_irqrestore(&kranal-data.kra_connd_lock, flags); 
+       LASSERT (cfs_list_empty(&kranal_data.kra_connd_peers));
+       spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
+       wake_up_all(&kranal_data.kra_connd_waitq);
+       spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
 
+        /* Wait for threads to exit */
         i = 2;
-        while (atomic_read(&kranal_data.kra_nthreads) != 0) {
+        while (cfs_atomic_read(&kranal_data.kra_nthreads) != 0) {
                 i++;
                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
                        "Waiting for %d threads to terminate\n",
-                       atomic_read(&kranal_data.kra_nthreads));
-                kranal_pause(HZ);
+                       cfs_atomic_read(&kranal_data.kra_nthreads));
+                cfs_pause(cfs_time_seconds(1));
         }
 
-        LASSERT (atomic_read(&kranal_data.kra_npeers) == 0);
+        LASSERT (cfs_atomic_read(&kranal_data.kra_npeers) == 0);
         if (kranal_data.kra_peers != NULL) {
                 for (i = 0; i < kranal_data.kra_peer_hash_size; i++)
-                        LASSERT (list_empty(&kranal_data.kra_peers[i]));
+                        LASSERT (cfs_list_empty(&kranal_data.kra_peers[i]));
 
-                PORTAL_FREE(kranal_data.kra_peers,
-                            sizeof (struct list_head) * 
+                LIBCFS_FREE(kranal_data.kra_peers,
+                            sizeof (cfs_list_t) *
                             kranal_data.kra_peer_hash_size);
         }
 
-        LASSERT (atomic_read(&kranal_data.kra_nconns) == 0);
+        LASSERT (cfs_atomic_read(&kranal_data.kra_nconns) == 0);
         if (kranal_data.kra_conns != NULL) {
                 for (i = 0; i < kranal_data.kra_conn_hash_size; i++)
-                        LASSERT (list_empty(&kranal_data.kra_conns[i]));
+                        LASSERT (cfs_list_empty(&kranal_data.kra_conns[i]));
 
-                PORTAL_FREE(kranal_data.kra_conns,
-                            sizeof (struct list_head) * 
+                LIBCFS_FREE(kranal_data.kra_conns,
+                            sizeof (cfs_list_t) *
                             kranal_data.kra_conn_hash_size);
         }
 
@@ -1747,130 +1549,125 @@ kranal_api_shutdown (nal_t *nal)
                 kranal_device_fini(&kranal_data.kra_devices[i]);
 
         kranal_free_txdescs(&kranal_data.kra_idle_txs);
-        kranal_free_txdescs(&kranal_data.kra_idle_nblk_txs);
 
         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
-               atomic_read(&portal_kmemory));
-        printk(KERN_INFO "Lustre: RapidArray NAL unloaded (final mem %d)\n",
-               atomic_read(&portal_kmemory));
+               cfs_atomic_read(&libcfs_kmemory));
 
-        kranal_data.kra_init = RANAL_INIT_NOTHING;
+       kranal_data.kra_init = RANAL_INIT_NOTHING;
+       module_put(THIS_MODULE);
 }
 
 int
-kranal_api_startup (nal_t *nal, ptl_pid_t requested_pid,
-                    ptl_ni_limits_t *requested_limits,
-                    ptl_ni_limits_t *actual_limits)
+kranal_startup (lnet_ni_t *ni)
 {
-        static int        device_ids[] = {RAPK_MAIN_DEVICE_ID,
-                                          RAPK_EXPANSION_DEVICE_ID};
         struct timeval    tv;
-        ptl_process_id_t  process_id;
-        int               pkmem = atomic_read(&portal_kmemory);
+        int               pkmem = cfs_atomic_read(&libcfs_kmemory);
         int               rc;
         int               i;
         kra_device_t     *dev;
+       char              name[16];
 
-        LASSERT (nal == &kranal_api);
+        LASSERT (ni->ni_lnd == &the_kralnd);
 
-        if (nal->nal_refct != 0) {
-                if (actual_limits != NULL)
-                        *actual_limits = kranal_lib.libnal_ni.ni_actual_limits;
-                /* This module got the first ref */
-                PORTAL_MODULE_USE;
-                return PTL_OK;
+        /* Only 1 instance supported */
+        if (kranal_data.kra_init != RANAL_INIT_NOTHING) {
+                CERROR ("Only 1 instance supported\n");
+                return -EPERM;
         }
 
-        LASSERT (kranal_data.kra_init == RANAL_INIT_NOTHING);
+        if (lnet_set_ip_niaddr(ni) != 0) {
+                CERROR ("Can't determine my NID\n");
+                return -EPERM;
+        }
 
+        if (*kranal_tunables.kra_credits > *kranal_tunables.kra_ntx) {
+                CERROR ("Can't set credits(%d) > ntx(%d)\n",
+                        *kranal_tunables.kra_credits,
+                        *kranal_tunables.kra_ntx);
+                return -EINVAL;
+        }
+        
         memset(&kranal_data, 0, sizeof(kranal_data)); /* zero pointers, flags etc */
 
-        /* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and
-         * a unique (for all time) incarnation so we can uniquely identify
-         * the sender.  The incarnation is an incrementing counter
-         * initialised with seconds + microseconds at startup time.  So we
-         * rely on NOT creating connections more frequently on average than
-         * 1MHz to ensure we don't use old incarnations when we reboot. */
-        do_gettimeofday(&tv);
-        kranal_data.kra_next_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
+        ni->ni_maxtxcredits = *kranal_tunables.kra_credits;
+        ni->ni_peertxcredits = *kranal_tunables.kra_peercredits;
 
-        init_MUTEX(&kranal_data.kra_nid_mutex);
-        init_MUTEX_LOCKED(&kranal_data.kra_listener_signal);
+        ni->ni_data = &kranal_data;
+        kranal_data.kra_ni = ni;
 
-        rwlock_init(&kranal_data.kra_global_lock);
+       /* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and
+        * a unique (for all time) connstamp so we can uniquely identify
+        * the sender.  The connstamp is an incrementing counter
+        * initialised with seconds + microseconds at startup time.  So we
+        * rely on NOT creating connections more frequently on average than
+        * 1MHz to ensure we don't use old connstamps when we reboot. */
+       do_gettimeofday(&tv);
+       kranal_data.kra_connstamp =
+       kranal_data.kra_peerstamp = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
 
-        for (i = 0; i < RANAL_MAXDEVS; i++ ) {
-                kra_device_t  *dev = &kranal_data.kra_devices[i];
+       rwlock_init(&kranal_data.kra_global_lock);
 
-                dev->rad_idx = i;
-                INIT_LIST_HEAD(&dev->rad_connq);
-                init_waitqueue_head(&dev->rad_waitq);
-                spin_lock_init(&dev->rad_lock);
-        }
+       for (i = 0; i < RANAL_MAXDEVS; i++ ) {
+               kra_device_t  *dev = &kranal_data.kra_devices[i];
+
+               dev->rad_idx = i;
+               CFS_INIT_LIST_HEAD(&dev->rad_ready_conns);
+               CFS_INIT_LIST_HEAD(&dev->rad_new_conns);
+               init_waitqueue_head(&dev->rad_waitq);
+               spin_lock_init(&dev->rad_lock);
+       }
 
-        init_waitqueue_head(&kranal_data.kra_reaper_waitq);
-        spin_lock_init(&kranal_data.kra_reaper_lock);
+       kranal_data.kra_new_min_timeout = MAX_SCHEDULE_TIMEOUT;
+       init_waitqueue_head(&kranal_data.kra_reaper_waitq);
+       spin_lock_init(&kranal_data.kra_reaper_lock);
 
-        INIT_LIST_HEAD(&kranal_data.kra_connd_peers);
-        init_waitqueue_head(&kranal_data.kra_connd_waitq);
-        spin_lock_init(&kranal_data.kra_connd_lock);
+       CFS_INIT_LIST_HEAD(&kranal_data.kra_connd_acceptq);
+       CFS_INIT_LIST_HEAD(&kranal_data.kra_connd_peers);
+       init_waitqueue_head(&kranal_data.kra_connd_waitq);
+       spin_lock_init(&kranal_data.kra_connd_lock);
 
-        INIT_LIST_HEAD(&kranal_data.kra_idle_txs);
-        INIT_LIST_HEAD(&kranal_data.kra_idle_nblk_txs);
-        init_waitqueue_head(&kranal_data.kra_idle_tx_waitq);
-        spin_lock_init(&kranal_data.kra_tx_lock);
+        CFS_INIT_LIST_HEAD(&kranal_data.kra_idle_txs);
+       spin_lock_init(&kranal_data.kra_tx_lock);
+
+       /* OK to call kranal_api_shutdown() to cleanup now */
+       kranal_data.kra_init = RANAL_INIT_DATA;
+       try_module_get(THIS_MODULE);
 
-        /* OK to call kranal_api_shutdown() to cleanup now */
-        kranal_data.kra_init = RANAL_INIT_DATA;
-        
         kranal_data.kra_peer_hash_size = RANAL_PEER_HASH_SIZE;
-        PORTAL_ALLOC(kranal_data.kra_peers,
-                     sizeof(struct list_head) * kranal_data.kra_peer_hash_size);
+        LIBCFS_ALLOC(kranal_data.kra_peers,
+                     sizeof(cfs_list_t) *
+                            kranal_data.kra_peer_hash_size);
         if (kranal_data.kra_peers == NULL)
                 goto failed;
 
         for (i = 0; i < kranal_data.kra_peer_hash_size; i++)
-                INIT_LIST_HEAD(&kranal_data.kra_peers[i]);
+                CFS_INIT_LIST_HEAD(&kranal_data.kra_peers[i]);
 
         kranal_data.kra_conn_hash_size = RANAL_PEER_HASH_SIZE;
-        PORTAL_ALLOC(kranal_data.kra_conns,
-                     sizeof(struct list_head) * kranal_data.kra_conn_hash_size);
+        LIBCFS_ALLOC(kranal_data.kra_conns,
+                     sizeof(cfs_list_t) *
+                            kranal_data.kra_conn_hash_size);
         if (kranal_data.kra_conns == NULL)
                 goto failed;
 
         for (i = 0; i < kranal_data.kra_conn_hash_size; i++)
-                INIT_LIST_HEAD(&kranal_data.kra_conns[i]);
+                CFS_INIT_LIST_HEAD(&kranal_data.kra_conns[i]);
 
-        rc = kranal_alloc_txdescs(&kranal_data.kra_idle_txs, RANAL_NTX);
+        rc = kranal_alloc_txdescs(&kranal_data.kra_idle_txs, 
+                                  *kranal_tunables.kra_ntx);
         if (rc != 0)
                 goto failed;
 
-        rc = kranal_alloc_txdescs(&kranal_data.kra_idle_nblk_txs,RANAL_NTX_NBLK);
-        if (rc != 0)
-                goto failed;
-
-        process_id.pid = requested_pid;
-        process_id.nid = PTL_NID_ANY;           /* don't know my NID yet */
-
-        rc = lib_init(&kranal_lib, nal, process_id,
-                      requested_limits, actual_limits);
-        if (rc != PTL_OK) {
-                CERROR("lib_init failed: error %d\n", rc);
-                goto failed;
-        }
-
-        /* lib interface initialised */
-        kranal_data.kra_init = RANAL_INIT_LIB;
-        /*****************************************************/
-
-        rc = kranal_thread_start(kranal_reaper, NULL);
+       rc = kranal_thread_start(kranal_reaper, NULL, "kranal_reaper");
         if (rc != 0) {
                 CERROR("Can't spawn ranal reaper: %d\n", rc);
                 goto failed;
         }
 
-        for (i = 0; i < RANAL_N_CONND; i++) {
-                rc = kranal_thread_start(kranal_connd, (void *)i);
+        for (i = 0; i < *kranal_tunables.kra_n_connd; i++) {
+               snprintf(name, sizeof(name), "kranal_connd_%02ld", i);
+               rc = kranal_thread_start(kranal_connd,
+                                        (void *)(unsigned long)i, name);
                 if (rc != 0) {
                         CERROR("Can't spawn ranal connd[%d]: %d\n",
                                i, rc);
@@ -1878,15 +1675,26 @@ kranal_api_startup (nal_t *nal, ptl_pid_t requested_pid,
                 }
         }
 
-        LASSERT(kranal_data.kra_ndevs == 0);
-        for (i = 0; i < sizeof(device_ids)/sizeof(device_ids[0]); i++) {
+        LASSERT (kranal_data.kra_ndevs == 0);
+
+        /* Use all available RapidArray devices */
+        for (i = 0; i < RANAL_MAXDEVS; i++) {
                 dev = &kranal_data.kra_devices[kranal_data.kra_ndevs];
 
-                rc = kranal_device_init(device_ids[i], dev);
+                rc = kranal_device_init(kranal_devids[i], dev);
                 if (rc == 0)
                         kranal_data.kra_ndevs++;
+        }
 
-                rc = kranal_thread_start(kranal_scheduler, dev);
+        if (kranal_data.kra_ndevs == 0) {
+                CERROR("Can't initialise any RapidArray devices\n");
+                goto failed;
+        }
+        
+        for (i = 0; i < kranal_data.kra_ndevs; i++) {
+                dev = &kranal_data.kra_devices[i];
+               snprintf(name, sizeof(name), "kranal_sd_%02d", dev->rad_idx);
+               rc = kranal_thread_start(kranal_scheduler, dev, name);
                 if (rc != 0) {
                         CERROR("Can't spawn ranal scheduler[%d]: %d\n",
                                i, rc);
@@ -1894,40 +1702,23 @@ kranal_api_startup (nal_t *nal, ptl_pid_t requested_pid,
                 }
         }
 
-        if (kranal_data.kra_ndevs == 0)
-                goto failed;
-
-        rc = libcfs_nal_cmd_register(OPENRANAL, &kranal_cmd, NULL);
-        if (rc != 0) {
-                CERROR("Can't initialise command interface (rc = %d)\n", rc);
-                goto failed;
-        }
-
         /* flag everything initialised */
         kranal_data.kra_init = RANAL_INIT_ALL;
         /*****************************************************/
 
-        CDEBUG(D_MALLOC, "initial kmem %d\n", atomic_read(&portal_kmemory));
-        printk(KERN_INFO "Lustre: RapidArray NAL loaded "
-               "(initial mem %d)\n", pkmem);
-
-        return PTL_OK;
+        CDEBUG(D_MALLOC, "initial kmem %d\n", pkmem);
+        return 0;
 
  failed:
-        kranal_api_shutdown(&kranal_api);    
-        return PTL_FAIL;
+        kranal_shutdown(ni);
+        return -ENETDOWN;
 }
 
 void __exit
 kranal_module_fini (void)
 {
-#ifdef CONFIG_SYSCTL
-        if (kranal_tunables.kra_sysctl != NULL)
-                unregister_sysctl_table(kranal_tunables.kra_sysctl);
-#endif
-        PtlNIFini(kranal_ni);
-
-        ptl_unregister_nal(OPENRANAL);
+        lnet_unregister_lnd(&the_kralnd);
+        kranal_tunables_fini();
 }
 
 int __init
@@ -1935,43 +1726,17 @@ kranal_module_init (void)
 {
         int    rc;
 
-        /* the following must be sizeof(int) for
-         * proc_dointvec/kranal_listener_procint() */
-        LASSERT (sizeof(kranal_tunables.kra_timeout) == sizeof(int));
-        LASSERT (sizeof(kranal_tunables.kra_listener_timeout) == sizeof(int));
-        LASSERT (sizeof(kranal_tunables.kra_backlog) == sizeof(int));
-        LASSERT (sizeof(kranal_tunables.kra_port) == sizeof(int));
-        LASSERT (sizeof(kranal_tunables.kra_max_immediate) == sizeof(int));
-
-        kranal_api.nal_ni_init = kranal_api_startup;
-        kranal_api.nal_ni_fini = kranal_api_shutdown;
-
-        /* Initialise dynamic tunables to defaults once only */
-        kranal_tunables.kra_timeout = RANAL_TIMEOUT;
-
-        rc = ptl_register_nal(OPENRANAL, &kranal_api);
-        if (rc != PTL_OK) {
-                CERROR("Can't register RANAL: %d\n", rc);
-                return -ENOMEM;               /* or something... */
-        }
+        rc = kranal_tunables_init();
+        if (rc != 0)
+                return rc;
 
-        /* Pure gateways want the NAL started up at module load time... */
-        rc = PtlNIInit(OPENRANAL, LUSTRE_SRV_PTL_PID, NULL, NULL, &kranal_ni);
-        if (rc != PTL_OK && rc != PTL_IFACE_DUP) {
-                ptl_unregister_nal(OPENRANAL);
-                return -ENODEV;
-        }
+        lnet_register_lnd(&the_kralnd);
 
-#ifdef CONFIG_SYSCTL
-        /* Press on regardless even if registering sysctl doesn't work */
-        kranal_tunables.kra_sysctl = 
-                register_sysctl_table(kranal_top_ctl_table, 0);
-#endif
         return 0;
 }
 
-MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
-MODULE_DESCRIPTION("Kernel RapidArray NAL v0.01");
+MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
+MODULE_DESCRIPTION("Kernel RapidArray LND v0.01");
 MODULE_LICENSE("GPL");
 
 module_init(kranal_module_init);