Whamcloud - gitweb
LU-7734 lnet: fix lnet_peer_table_cleanup_locked()
[fs/lustre-release.git] / lnet / lnet / peer.c
index 6a6f56b..ec26f37 100644 (file)
 #include <lnet/lib-lnet.h>
 #include <lnet/lib-dlc.h>
 
+static void
+lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
+{
+       if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
+               list_del_init(&lpni->lpni_on_remote_peer_ni_list);
+               lnet_peer_ni_decref_locked(lpni);
+       }
+}
+
+void
+lnet_peer_net_added(struct lnet_net *net)
+{
+       struct lnet_peer_ni *lpni, *tmp;
+
+       list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
+                                lpni_on_remote_peer_ni_list) {
+
+               if (LNET_NIDNET(lpni->lpni_nid) == net->net_id) {
+                       lpni->lpni_net = net;
+
+                       spin_lock(&lpni->lpni_lock);
+                       lpni->lpni_txcredits =
+                               lpni->lpni_net->net_tunables.lct_peer_tx_credits;
+                       lpni->lpni_mintxcredits = lpni->lpni_txcredits;
+                       lpni->lpni_rtrcredits =
+                               lnet_peer_buffer_credits(lpni->lpni_net);
+                       lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
+                       spin_unlock(&lpni->lpni_lock);
+
+                       lnet_peer_remove_from_remote_list(lpni);
+               }
+       }
+}
+
+static void
+lnet_peer_tables_destroy(void)
+{
+       struct lnet_peer_table  *ptable;
+       struct list_head        *hash;
+       int                     i;
+       int                     j;
+
+       if (!the_lnet.ln_peer_tables)
+               return;
+
+       cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
+               hash = ptable->pt_hash;
+               if (!hash) /* not intialized */
+                       break;
+
+               LASSERT(list_empty(&ptable->pt_zombie_list));
+
+               ptable->pt_hash = NULL;
+               for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
+                       LASSERT(list_empty(&hash[j]));
+
+               LIBCFS_FREE(hash, LNET_PEER_HASH_SIZE * sizeof(*hash));
+       }
+
+       cfs_percpt_free(the_lnet.ln_peer_tables);
+       the_lnet.ln_peer_tables = NULL;
+}
+
 int
 lnet_peer_tables_create(void)
 {
@@ -61,6 +124,9 @@ lnet_peer_tables_create(void)
                        return -ENOMEM;
                }
 
+               spin_lock_init(&ptable->pt_zombie_lock);
+               INIT_LIST_HEAD(&ptable->pt_zombie_list);
+
                for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
                        INIT_LIST_HEAD(&hash[j]);
                ptable->pt_hash = hash; /* sign of initialization */
@@ -69,61 +135,238 @@ lnet_peer_tables_create(void)
        return 0;
 }
 
-void
-lnet_peer_tables_destroy(void)
+static struct lnet_peer_ni *
+lnet_peer_ni_alloc(lnet_nid_t nid)
 {
-       struct lnet_peer_table  *ptable;
-       struct list_head        *hash;
-       int                     i;
-       int                     j;
+       struct lnet_peer_ni *lpni;
+       struct lnet_net *net;
+       int cpt;
+
+       cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
+
+       LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
+       if (!lpni)
+               return NULL;
+
+       INIT_LIST_HEAD(&lpni->lpni_txq);
+       INIT_LIST_HEAD(&lpni->lpni_rtrq);
+       INIT_LIST_HEAD(&lpni->lpni_routes);
+       INIT_LIST_HEAD(&lpni->lpni_hashlist);
+       INIT_LIST_HEAD(&lpni->lpni_on_peer_net_list);
+       INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
+
+       spin_lock_init(&lpni->lpni_lock);
+
+       lpni->lpni_alive = !lnet_peers_start_down(); /* 1 bit!! */
+       lpni->lpni_last_alive = cfs_time_current(); /* assumes alive */
+       lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
+       lpni->lpni_nid = nid;
+       lpni->lpni_cpt = cpt;
+       lnet_set_peer_ni_health_locked(lpni, true);
+
+       net = lnet_get_net_locked(LNET_NIDNET(nid));
+       lpni->lpni_net = net;
+       if (net) {
+               lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits;
+               lpni->lpni_mintxcredits = lpni->lpni_txcredits;
+               lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net);
+               lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
+       } else {
+               /*
+                * This peer_ni is not on a local network, so we
+                * cannot add the credits here. In case the net is
+                * added later, add the peer_ni to the remote peer ni
+                * list so it can be easily found and revisited.
+                */
+               /* FIXME: per-net implementation instead? */
+               atomic_inc(&lpni->lpni_refcount);
+               list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
+                             &the_lnet.ln_remote_peer_ni_list);
+       }
+
+       /* TODO: update flags */
+
+       return lpni;
+}
+
+static struct lnet_peer_net *
+lnet_peer_net_alloc(__u32 net_id)
+{
+       struct lnet_peer_net *lpn;
 
-       if (the_lnet.ln_peer_tables == NULL)
+       LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn));
+       if (!lpn)
+               return NULL;
+
+       INIT_LIST_HEAD(&lpn->lpn_on_peer_list);
+       INIT_LIST_HEAD(&lpn->lpn_peer_nis);
+       lpn->lpn_net_id = net_id;
+
+       return lpn;
+}
+
+static struct lnet_peer *
+lnet_peer_alloc(lnet_nid_t nid)
+{
+       struct lnet_peer *lp;
+
+       LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp));
+       if (!lp)
+               return NULL;
+
+       INIT_LIST_HEAD(&lp->lp_on_lnet_peer_list);
+       INIT_LIST_HEAD(&lp->lp_peer_nets);
+       lp->lp_primary_nid = nid;
+
+       /* TODO: update flags */
+
+       return lp;
+}
+
+
+static void
+lnet_try_destroy_peer_hierarchy_locked(struct lnet_peer_ni *lpni)
+{
+       struct lnet_peer_net *peer_net;
+       struct lnet_peer *peer;
+
+       /* TODO: could the below situation happen? accessing an already
+        * destroyed peer? */
+       if (lpni->lpni_peer_net == NULL ||
+           lpni->lpni_peer_net->lpn_peer == NULL)
                return;
 
-       cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
-               hash = ptable->pt_hash;
-               if (hash == NULL) /* not intialized */
-                       break;
+       peer_net = lpni->lpni_peer_net;
+       peer = lpni->lpni_peer_net->lpn_peer;
 
-               ptable->pt_hash = NULL;
-               for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
-                       LASSERT(list_empty(&hash[j]));
+       list_del_init(&lpni->lpni_on_peer_net_list);
+       lpni->lpni_peer_net = NULL;
 
-               LIBCFS_FREE(hash, LNET_PEER_HASH_SIZE * sizeof(*hash));
+       /* if peer_net is empty, then remove it from the peer */
+       if (list_empty(&peer_net->lpn_peer_nis)) {
+               list_del_init(&peer_net->lpn_on_peer_list);
+               peer_net->lpn_peer = NULL;
+               LIBCFS_FREE(peer_net, sizeof(*peer_net));
+
+               /* if the peer is empty then remove it from the
+                * the_lnet.ln_peers */
+               if (list_empty(&peer->lp_peer_nets)) {
+                       list_del_init(&peer->lp_on_lnet_peer_list);
+                       LIBCFS_FREE(peer, sizeof(*peer));
+               }
        }
+}
 
-       cfs_percpt_free(the_lnet.ln_peer_tables);
-       the_lnet.ln_peer_tables = NULL;
+/* called with lnet_net_lock LNET_LOCK_EX held */
+static void
+lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni)
+{
+       struct lnet_peer_table *ptable = NULL;
+
+       lnet_peer_remove_from_remote_list(lpni);
+
+       /* remove peer ni from the hash list. */
+       list_del_init(&lpni->lpni_hashlist);
+
+       /* decrement the ref count on the peer table */
+       ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
+       LASSERT(atomic_read(&ptable->pt_number) > 0);
+       atomic_dec(&ptable->pt_number);
+
+       /*
+        * The peer_ni can no longer be found with a lookup. But there
+        * can be current users, so keep track of it on the zombie
+        * list until the reference count has gone to zero.
+        *
+        * The last reference may be lost in a place where the
+        * lnet_net_lock locks only a single cpt, and that cpt may not
+        * be lpni->lpni_cpt. So the zombie list of this peer_table
+        * has its own lock.
+        */
+       spin_lock(&ptable->pt_zombie_lock);
+       list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list);
+       ptable->pt_zombies++;
+       spin_unlock(&ptable->pt_zombie_lock);
+
+       /* no need to keep this peer on the hierarchy anymore */
+       lnet_try_destroy_peer_hierarchy_locked(lpni);
+
+       /* decrement reference on peer */
+       lnet_peer_ni_decref_locked(lpni);
+}
+
+void lnet_peer_uninit()
+{
+       struct lnet_peer_ni *lpni, *tmp;
+
+       lnet_net_lock(LNET_LOCK_EX);
+
+       /* remove all peer_nis from the remote peer and the hash list */
+       list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
+                                lpni_on_remote_peer_ni_list)
+               lnet_peer_ni_del_locked(lpni);
+
+       lnet_peer_tables_destroy();
+
+       lnet_net_unlock(LNET_LOCK_EX);
 }
 
 static void
-lnet_peer_table_cleanup_locked(lnet_ni_t *ni, struct lnet_peer_table *ptable)
+lnet_peer_del_locked(struct lnet_peer *peer)
+{
+       struct lnet_peer_ni *lpni = NULL, *lpni2;
+
+       lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
+       while (lpni != NULL) {
+               lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
+               lnet_peer_ni_del_locked(lpni);
+               lpni = lpni2;
+       }
+}
+
+static void
+lnet_peer_table_cleanup_locked(struct lnet_net *net,
+                              struct lnet_peer_table *ptable)
 {
        int                      i;
-       struct lnet_peer_ni     *lp;
-       struct lnet_peer_ni     *tmp;
+       struct lnet_peer_ni     *next;
+       struct lnet_peer_ni     *lpni;
+       struct lnet_peer        *peer;
 
        for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
-               list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
+               list_for_each_entry_safe(lpni, next, &ptable->pt_hash[i],
                                         lpni_hashlist) {
-                       if (ni != NULL && ni->ni_net != lp->lpni_net)
+                       if (net != NULL && net != lpni->lpni_net)
+                               continue;
+
+                       peer = lpni->lpni_peer_net->lpn_peer;
+                       if (peer->lp_primary_nid != lpni->lpni_nid) {
+                               lnet_peer_ni_del_locked(lpni);
                                continue;
-                       list_del_init(&lp->lpni_hashlist);
-                       /* Lose hash table's ref */
-                       ptable->pt_zombies++;
-                       lnet_peer_ni_decref_locked(lp);
+                       }
+                       /*
+                        * Removing the primary NID implies removing
+                        * the entire peer. Advance next beyond any
+                        * peer_ni that belongs to the same peer.
+                        */
+                       list_for_each_entry_from(next, &ptable->pt_hash[i],
+                                                lpni_hashlist) {
+                               if (next->lpni_peer_net->lpn_peer != peer)
+                                       break;
+                       }
+                       lnet_peer_del_locked(peer);
                }
        }
 }
 
 static void
-lnet_peer_table_finalize_wait_locked(struct lnet_peer_table *ptable,
-                                    int cpt_locked)
+lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
 {
-       int     i;
+       int     i = 3;
 
-       for (i = 3; ptable->pt_zombies != 0; i++) {
-               lnet_net_unlock(cpt_locked);
+       spin_lock(&ptable->pt_zombie_lock);
+       while (ptable->pt_zombies) {
+               spin_unlock(&ptable->pt_zombie_lock);
 
                if (IS_PO2(i)) {
                        CDEBUG(D_WARNING,
@@ -132,13 +375,14 @@ lnet_peer_table_finalize_wait_locked(struct lnet_peer_table *ptable,
                }
                set_current_state(TASK_UNINTERRUPTIBLE);
                schedule_timeout(cfs_time_seconds(1) >> 1);
-               lnet_net_lock(cpt_locked);
+               spin_lock(&ptable->pt_zombie_lock);
        }
+       spin_unlock(&ptable->pt_zombie_lock);
 }
 
 static void
-lnet_peer_table_del_rtrs_locked(lnet_ni_t *ni, struct lnet_peer_table *ptable,
-                               int cpt_locked)
+lnet_peer_table_del_rtrs_locked(struct lnet_net *net,
+                               struct lnet_peer_table *ptable)
 {
        struct lnet_peer_ni     *lp;
        struct lnet_peer_ni     *tmp;
@@ -148,7 +392,7 @@ lnet_peer_table_del_rtrs_locked(lnet_ni_t *ni, struct lnet_peer_table *ptable,
        for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
                list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
                                         lpni_hashlist) {
-                       if (ni->ni_net != lp->lpni_net)
+                       if (net != lp->lpni_net)
                                continue;
 
                        if (lp->lpni_rtr_refcount == 0)
@@ -156,41 +400,37 @@ lnet_peer_table_del_rtrs_locked(lnet_ni_t *ni, struct lnet_peer_table *ptable,
 
                        lpni_nid = lp->lpni_nid;
 
-                       lnet_net_unlock(cpt_locked);
+                       lnet_net_unlock(LNET_LOCK_EX);
                        lnet_del_route(LNET_NIDNET(LNET_NID_ANY), lpni_nid);
-                       lnet_net_lock(cpt_locked);
+                       lnet_net_lock(LNET_LOCK_EX);
                }
        }
 }
 
 void
-lnet_peer_tables_cleanup(lnet_ni_t *ni)
+lnet_peer_tables_cleanup(struct lnet_net *net)
 {
        int                             i;
        struct lnet_peer_table          *ptable;
 
-       LASSERT(the_lnet.ln_shutdown || ni != NULL);
+       LASSERT(the_lnet.ln_shutdown || net != NULL);
        /* If just deleting the peers for a NI, get rid of any routes these
         * peers are gateways for. */
        cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
                lnet_net_lock(LNET_LOCK_EX);
-               lnet_peer_table_del_rtrs_locked(ni, ptable, i);
+               lnet_peer_table_del_rtrs_locked(net, ptable);
                lnet_net_unlock(LNET_LOCK_EX);
        }
 
        /* Start the cleanup process */
        cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
                lnet_net_lock(LNET_LOCK_EX);
-               lnet_peer_table_cleanup_locked(ni, ptable);
+               lnet_peer_table_cleanup_locked(net, ptable);
                lnet_net_unlock(LNET_LOCK_EX);
        }
 
-       /* Wait until all peers have been destroyed. */
-       cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
-               lnet_net_lock(LNET_LOCK_EX);
-               lnet_peer_table_finalize_wait_locked(ptable, i);
-               lnet_net_unlock(LNET_LOCK_EX);
-       }
+       cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables)
+               lnet_peer_ni_finalize_wait(ptable);
 }
 
 static struct lnet_peer_ni *
@@ -213,10 +453,13 @@ lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, lnet_nid_t nid)
 }
 
 struct lnet_peer_ni *
-lnet_find_peer_ni_locked(lnet_nid_t nid, int cpt)
+lnet_find_peer_ni_locked(lnet_nid_t nid)
 {
        struct lnet_peer_ni *lpni;
        struct lnet_peer_table *ptable;
+       int cpt;
+
+       cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
 
        ptable = the_lnet.ln_peer_tables[cpt];
        lpni = lnet_get_peer_ni_locked(ptable, nid);
@@ -224,72 +467,461 @@ lnet_find_peer_ni_locked(lnet_nid_t nid, int cpt)
        return lpni;
 }
 
-static void
-lnet_try_destroy_peer_hierarchy_locked(struct lnet_peer_ni *lpni)
+struct lnet_peer *
+lnet_find_or_create_peer_locked(lnet_nid_t dst_nid, int cpt)
+{
+       struct lnet_peer_ni *lpni;
+       struct lnet_peer *lp;
+
+       lpni = lnet_find_peer_ni_locked(dst_nid);
+       if (!lpni) {
+               lpni = lnet_nid2peerni_locked(dst_nid, cpt);
+               if (IS_ERR(lpni))
+                       return ERR_CAST(lpni);
+       }
+
+       lp = lpni->lpni_peer_net->lpn_peer;
+       lnet_peer_ni_decref_locked(lpni);
+
+       return lp;
+}
+
+struct lnet_peer_ni *
+lnet_get_peer_ni_idx_locked(int idx, struct lnet_peer_net **lpn,
+                           struct lnet_peer **lp)
+{
+       struct lnet_peer_ni     *lpni;
+
+       list_for_each_entry((*lp), &the_lnet.ln_peers, lp_on_lnet_peer_list) {
+               list_for_each_entry((*lpn), &((*lp)->lp_peer_nets), lpn_on_peer_list) {
+                       list_for_each_entry(lpni, &((*lpn)->lpn_peer_nis),
+                                           lpni_on_peer_net_list)
+                               if (idx-- == 0)
+                                       return lpni;
+               }
+       }
+
+       return NULL;
+}
+
+struct lnet_peer_ni *
+lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
+                            struct lnet_peer_net *peer_net,
+                            struct lnet_peer_ni *prev)
+{
+       struct lnet_peer_ni *lpni;
+       struct lnet_peer_net *net = peer_net;
+
+       if (!prev) {
+               if (!net)
+                       net = list_entry(peer->lp_peer_nets.next,
+                                        struct lnet_peer_net,
+                                        lpn_on_peer_list);
+               lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
+                                 lpni_on_peer_net_list);
+
+               return lpni;
+       }
+
+       if (prev->lpni_on_peer_net_list.next ==
+           &prev->lpni_peer_net->lpn_peer_nis) {
+               /*
+                * if you reached the end of the peer ni list and the peer
+                * net is specified then there are no more peer nis in that
+                * net.
+                */
+               if (net)
+                       return NULL;
+
+               /*
+                * we reached the end of this net ni list. move to the
+                * next net
+                */
+               if (prev->lpni_peer_net->lpn_on_peer_list.next ==
+                   &peer->lp_peer_nets)
+                       /* no more nets and no more NIs. */
+                       return NULL;
+
+               /* get the next net */
+               net = list_entry(prev->lpni_peer_net->lpn_on_peer_list.next,
+                                struct lnet_peer_net,
+                                lpn_on_peer_list);
+               /* get the ni on it */
+               lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
+                                 lpni_on_peer_net_list);
+
+               return lpni;
+       }
+
+       /* there are more nis left */
+       lpni = list_entry(prev->lpni_on_peer_net_list.next,
+                         struct lnet_peer_ni, lpni_on_peer_net_list);
+
+       return lpni;
+}
+
+bool
+lnet_peer_is_ni_pref_locked(struct lnet_peer_ni *lpni, struct lnet_ni *ni)
+{
+       int i;
+
+       for (i = 0; i < lpni->lpni_pref_nnids; i++) {
+               if (lpni->lpni_pref_nids[i] == ni->ni_nid)
+                       return true;
+       }
+       return false;
+}
+
+lnet_nid_t
+lnet_peer_primary_nid(lnet_nid_t nid)
+{
+       struct lnet_peer_ni *lpni;
+       lnet_nid_t primary_nid = nid;
+       int cpt;
+
+       cpt = lnet_net_lock_current();
+       lpni = lnet_find_peer_ni_locked(nid);
+       if (lpni) {
+               primary_nid = lpni->lpni_peer_net->lpn_peer->lp_primary_nid;
+               lnet_peer_ni_decref_locked(lpni);
+       }
+       lnet_net_unlock(cpt);
+
+       return primary_nid;
+}
+
+struct lnet_peer_net *
+lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
 {
        struct lnet_peer_net *peer_net;
+       list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_on_peer_list) {
+               if (peer_net->lpn_net_id == net_id)
+                       return peer_net;
+       }
+       return NULL;
+}
+
+static int
+lnet_peer_setup_hierarchy(struct lnet_peer *lp, struct lnet_peer_ni *lpni,
+                         lnet_nid_t nid)
+{
+       struct lnet_peer_net *lpn = NULL;
+       struct lnet_peer_table *ptable;
+        __u32 net_id = LNET_NIDNET(nid);
+
+       /*
+        * Create the peer_ni, peer_net, and peer if they don't exist
+        * yet.
+        */
+       if (lp) {
+               lpn = lnet_peer_get_net_locked(lp, net_id);
+       } else {
+               lp = lnet_peer_alloc(nid);
+               if (!lp)
+                       goto out_enomem;
+       }
+
+       if (!lpn) {
+               lpn = lnet_peer_net_alloc(net_id);
+               if (!lpn)
+                       goto out_maybe_free_lp;
+       }
+
+       if (!lpni) {
+               lpni = lnet_peer_ni_alloc(nid);
+               if (!lpni)
+                       goto out_maybe_free_lpn;
+       }
+
+       /* Install the new peer_ni */
+       lnet_net_lock(LNET_LOCK_EX);
+       /* Add peer_ni to global peer table hash, if necessary. */
+       if (list_empty(&lpni->lpni_hashlist)) {
+               ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
+               list_add_tail(&lpni->lpni_hashlist,
+                             &ptable->pt_hash[lnet_nid2peerhash(nid)]);
+               ptable->pt_version++;
+               atomic_inc(&ptable->pt_number);
+               atomic_inc(&lpni->lpni_refcount);
+       }
+
+       /* Detach the peer_ni from an existing peer, if necessary. */
+       if (lpni->lpni_peer_net && lpni->lpni_peer_net->lpn_peer != lp)
+               lnet_try_destroy_peer_hierarchy_locked(lpni);
+
+       /* Add peer_ni to peer_net */
+       lpni->lpni_peer_net = lpn;
+       list_add_tail(&lpni->lpni_on_peer_net_list, &lpn->lpn_peer_nis);
+
+       /* Add peer_net to peer */
+       if (!lpn->lpn_peer) {
+               lpn->lpn_peer = lp;
+               list_add_tail(&lpn->lpn_on_peer_list, &lp->lp_peer_nets);
+       }
+
+       /* Add peer to global peer list */
+       if (list_empty(&lp->lp_on_lnet_peer_list))
+               list_add_tail(&lp->lp_on_lnet_peer_list, &the_lnet.ln_peers);
+       lnet_net_unlock(LNET_LOCK_EX);
+
+       return 0;
+
+out_maybe_free_lpn:
+       if (list_empty(&lpn->lpn_on_peer_list))
+               LIBCFS_FREE(lpn, sizeof(*lpn));
+out_maybe_free_lp:
+       if (list_empty(&lp->lp_on_lnet_peer_list))
+               LIBCFS_FREE(lp, sizeof(*lp));
+out_enomem:
+       return -ENOMEM;
+}
+
+static int
+lnet_add_prim_lpni(lnet_nid_t nid)
+{
+       int rc;
        struct lnet_peer *peer;
+       struct lnet_peer_ni *lpni;
 
-       /* TODO: could the below situation happen? accessing an already
-        * destroyed peer? */
-       if (lpni->lpni_peer_net == NULL ||
-           lpni->lpni_peer_net->lpn_peer == NULL)
-               return;
+       LASSERT(nid != LNET_NID_ANY);
+
+       /*
+        * lookup the NID and its peer
+        *  if the peer doesn't exist, create it.
+        *  if this is a non-MR peer then change its state to MR and exit.
+        *  if this is an MR peer and it's a primary NI: NO-OP.
+        *  if this is an MR peer and it's not a primary NI. Operation not
+        *     allowed.
+        *
+        * The adding and deleting of peer nis is being serialized through
+        * the api_mutex. So we can look up peers with the mutex locked
+        * safely. Only when we need to change the ptable, do we need to
+        * exclusively lock the lnet_net_lock()
+        */
+       lpni = lnet_find_peer_ni_locked(nid);
+       if (!lpni) {
+               rc = lnet_peer_setup_hierarchy(NULL, NULL, nid);
+               if (rc != 0)
+                       return rc;
+               lpni = lnet_find_peer_ni_locked(nid);
+       }
+
+       LASSERT(lpni);
+
+       lnet_peer_ni_decref_locked(lpni);
 
-       peer_net = lpni->lpni_peer_net;
        peer = lpni->lpni_peer_net->lpn_peer;
 
-       list_del_init(&lpni->lpni_on_peer_net_list);
-       lpni->lpni_peer_net = NULL;
+       /*
+        * If we found a lpni with the same nid as the NID we're trying to
+        * create, then we're trying to create an already existing lpni 
+        * that belongs to a different peer
+        */
+       if (peer->lp_primary_nid != nid)
+               return -EEXIST;
 
-       /* if peer_net is empty, then remove it from the peer */
-       if (list_empty(&peer_net->lpn_peer_nis)) {
-               list_del_init(&peer_net->lpn_on_peer_list);
-               peer_net->lpn_peer = NULL;
-               LIBCFS_FREE(peer_net, sizeof(*peer_net));
+       /*
+        * if we found an lpni that is not a multi-rail, which could occur
+        * if lpni is already created as a non-mr lpni or we just created
+        * it, then make sure you indicate that this lpni is a primary mr
+        * capable peer.
+        *
+        * TODO: update flags if necessary
+        */
+       if (!peer->lp_multi_rail && peer->lp_primary_nid == nid)
+               peer->lp_multi_rail = true;
 
-               /* if the peer is empty then remove it from the
-                * the_lnet.ln_peers */
-               if (list_empty(&peer->lp_peer_nets)) {
-                       list_del_init(&peer->lp_on_lnet_peer_list);
-                       LIBCFS_FREE(peer, sizeof(*peer));
+       return rc;
+}
+
+static int
+lnet_add_peer_ni_to_prim_lpni(lnet_nid_t key_nid, lnet_nid_t nid)
+{
+       struct lnet_peer *peer, *primary_peer;
+       struct lnet_peer_ni *lpni = NULL, *klpni = NULL;
+
+       LASSERT(key_nid != LNET_NID_ANY && nid != LNET_NID_ANY);
+
+       /*
+        * key nid must be created by this point. If not then this
+        * operation is not permitted
+        */
+       klpni = lnet_find_peer_ni_locked(key_nid);
+       if (!klpni)
+               return -ENOENT;
+
+       lnet_peer_ni_decref_locked(klpni);
+
+       primary_peer = klpni->lpni_peer_net->lpn_peer;
+
+       lpni = lnet_find_peer_ni_locked(nid);
+       if (lpni) {
+               lnet_peer_ni_decref_locked(lpni);
+
+               peer = lpni->lpni_peer_net->lpn_peer;
+               /*
+                * lpni already exists in the system but it belongs to
+                * a different peer. We can't re-added it
+                */
+               if (peer->lp_primary_nid != key_nid && peer->lp_multi_rail) {
+                       CERROR("Cannot add NID %s owned by peer %s to peer %s\n",
+                              libcfs_nid2str(lpni->lpni_nid),
+                              libcfs_nid2str(peer->lp_primary_nid),
+                              libcfs_nid2str(key_nid));
+                       return -EEXIST;
+               } else if (peer->lp_primary_nid == key_nid) {
+                       /*
+                        * found a peer_ni that is already part of the
+                        * peer. This is a no-op operation.
+                        */
+                       return 0;
                }
+
+               /*
+                * TODO: else if (peer->lp_primary_nid != key_nid &&
+                *                !peer->lp_multi_rail)
+                * peer is not an MR peer and it will be moved in the next
+                * step to klpni, so update its flags accordingly.
+                * lnet_move_peer_ni()
+                */
+
+               /*
+                * TODO: call lnet_update_peer() from here to update the
+                * flags. This is the case when the lpni you're trying to
+                * add is already part of the peer. This could've been
+                * added by the DD previously, so go ahead and do any
+                * updates to the state if necessary
+                */
+
        }
+
+       /*
+        * When we get here we either have found an existing lpni, which
+        * we can switch to the new peer. Or we need to create one and
+        * add it to the new peer
+        */
+       return lnet_peer_setup_hierarchy(primary_peer, lpni, nid);
 }
 
+/*
+ * lpni creation initiated due to traffic either sending or receiving.
+ */
 static int
-lnet_build_peer_hierarchy(struct lnet_peer_ni *lpni)
+lnet_peer_ni_traffic_add(lnet_nid_t nid)
 {
-       struct lnet_peer *peer;
-       struct lnet_peer_net *peer_net;
-       __u32 lpni_net = LNET_NIDNET(lpni->lpni_nid);
+       struct lnet_peer_ni *lpni;
+       int rc = 0;
+
+       if (nid == LNET_NID_ANY)
+               return -EINVAL;
+
+       /* lnet_net_lock is not needed here because ln_api_lock is held */
+       lpni = lnet_find_peer_ni_locked(nid);
+       if (lpni) {
+               /*
+                * TODO: lnet_update_primary_nid() but not all of it
+                * only indicate if we're converting this to MR capable
+                * Can happen due to DD
+                */
+               lnet_peer_ni_decref_locked(lpni);
+       } else {
+               rc = lnet_peer_setup_hierarchy(NULL, NULL, nid);
+       }
 
-       peer = NULL;
-       peer_net = NULL;
+       return rc;
 
-       LIBCFS_ALLOC(peer, sizeof(*peer));
-       if (peer == NULL)
-               return -ENOMEM;
+}
 
-       LIBCFS_ALLOC(peer_net, sizeof(*peer_net));
-       if (peer_net == NULL) {
-               LIBCFS_FREE(peer, sizeof(*peer));
-               return -ENOMEM;
+static int
+lnet_peer_ni_add_non_mr(lnet_nid_t nid)
+{
+       struct lnet_peer_ni *lpni;
+
+       lpni = lnet_find_peer_ni_locked(nid);
+       if (lpni) {
+               CERROR("Cannot add %s as non-mr when it already exists\n",
+                      libcfs_nid2str(nid));
+               lnet_peer_ni_decref_locked(lpni);
+               return -EEXIST;
        }
 
-       INIT_LIST_HEAD(&peer->lp_on_lnet_peer_list);
-       INIT_LIST_HEAD(&peer->lp_peer_nets);
-       INIT_LIST_HEAD(&peer_net->lpn_on_peer_list);
-       INIT_LIST_HEAD(&peer_net->lpn_peer_nis);
+       return lnet_peer_setup_hierarchy(NULL, NULL, nid);
+}
 
-       /* build the hierarchy */
-       peer_net->lpn_net_id = lpni_net;
-       peer_net->lpn_peer = peer;
-       lpni->lpni_peer_net = peer_net;
-       peer->lp_primary_nid = lpni->lpni_nid;
-       list_add_tail(&peer_net->lpn_on_peer_list, &peer->lp_peer_nets);
-       list_add_tail(&lpni->lpni_on_peer_net_list, &peer_net->lpn_peer_nis);
-       list_add_tail(&peer->lp_on_lnet_peer_list, &the_lnet.ln_peers);
+/*
+ * This API handles the following combinations:
+ *     Create a primary NI if only the key_nid is provided
+ *     Create or add an lpni to a primary NI. Primary NI must've already
+ *     been created
+ *     Create a non-MR peer.
+ */
+int
+lnet_add_peer_ni_to_peer(lnet_nid_t key_nid, lnet_nid_t nid, bool mr)
+{
+       /*
+        * Caller trying to setup an MR like peer hierarchy but
+        * specifying it to be non-MR. This is not allowed.
+        */
+       if (key_nid != LNET_NID_ANY &&
+           nid != LNET_NID_ANY && !mr)
+               return -EPERM;
+
+       /* Add the primary NID of a peer */
+       if (key_nid != LNET_NID_ANY &&
+           nid == LNET_NID_ANY && mr)
+               return lnet_add_prim_lpni(key_nid);
+
+       /* Add a NID to an existing peer */
+       if (key_nid != LNET_NID_ANY &&
+           nid != LNET_NID_ANY && mr)
+               return lnet_add_peer_ni_to_prim_lpni(key_nid, nid);
+
+       /* Add a non-MR peer NI */
+       if (((key_nid != LNET_NID_ANY &&
+             nid == LNET_NID_ANY) ||
+            (key_nid == LNET_NID_ANY &&
+             nid != LNET_NID_ANY)) && !mr)
+               return lnet_peer_ni_add_non_mr(key_nid != LNET_NID_ANY ?
+                                                        key_nid : nid);
+
+       return 0;
+}
+
+int
+lnet_del_peer_ni_from_peer(lnet_nid_t key_nid, lnet_nid_t nid)
+{
+       lnet_nid_t local_nid;
+       struct lnet_peer *peer;
+       struct lnet_peer_ni *lpni;
+
+       if (key_nid == LNET_NID_ANY)
+               return -EINVAL;
+
+       local_nid = (nid != LNET_NID_ANY) ? nid : key_nid;
+
+       lpni = lnet_find_peer_ni_locked(local_nid);
+       if (!lpni)
+               return -EINVAL;
+       lnet_peer_ni_decref_locked(lpni);
+
+       peer = lpni->lpni_peer_net->lpn_peer;
+       LASSERT(peer != NULL);
+
+       if (peer->lp_primary_nid == lpni->lpni_nid) {
+               /*
+                * deleting the primary ni is equivalent to deleting the
+                * entire peer
+                */
+               lnet_net_lock(LNET_LOCK_EX);
+               lnet_peer_del_locked(peer);
+               lnet_net_unlock(LNET_LOCK_EX);
+
+               return 0;
+       }
+
+       lnet_net_lock(LNET_LOCK_EX);
+       lnet_peer_ni_del_locked(lpni);
+       lnet_net_unlock(LNET_LOCK_EX);
 
        return 0;
 }
@@ -302,138 +934,70 @@ lnet_destroy_peer_ni_locked(struct lnet_peer_ni *lpni)
        LASSERT(atomic_read(&lpni->lpni_refcount) == 0);
        LASSERT(lpni->lpni_rtr_refcount == 0);
        LASSERT(list_empty(&lpni->lpni_txq));
-       LASSERT(list_empty(&lpni->lpni_hashlist));
        LASSERT(lpni->lpni_txqnob == 0);
-       LASSERT(lpni->lpni_peer_net != NULL);
-       LASSERT(lpni->lpni_peer_net->lpn_peer != NULL);
-
-       ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
-       LASSERT(ptable->pt_number > 0);
-       ptable->pt_number--;
 
        lpni->lpni_net = NULL;
 
-       lnet_try_destroy_peer_hierarchy_locked(lpni);
+       /* remove the peer ni from the zombie list */
+       ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
+       spin_lock(&ptable->pt_zombie_lock);
+       list_del_init(&lpni->lpni_hashlist);
+       ptable->pt_zombies--;
+       spin_unlock(&ptable->pt_zombie_lock);
 
        LIBCFS_FREE(lpni, sizeof(*lpni));
-
-       LASSERT(ptable->pt_zombies > 0);
-       ptable->pt_zombies--;
 }
 
-int
-lnet_nid2peerni_locked(struct lnet_peer_ni **lpnip, lnet_nid_t nid, int cpt)
+struct lnet_peer_ni *
+lnet_nid2peerni_locked(lnet_nid_t nid, int cpt)
 {
        struct lnet_peer_table  *ptable;
        struct lnet_peer_ni     *lpni = NULL;
-       struct lnet_peer_ni     *lpni2;
        int                     cpt2;
-       int                     rc = 0;
+       int                     rc;
 
-       *lpnip = NULL;
        if (the_lnet.ln_shutdown) /* it's shutting down */
-               return -ESHUTDOWN;
+               return ERR_PTR(-ESHUTDOWN);
 
        /*
         * calculate cpt2 with the standard hash function
-        * This cpt2 becomes the slot where we'll find or create the peer.
+        * This cpt2 is the slot where we'll find or create the peer.
         */
        cpt2 = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
-
-       /*
-        * Any changes to the peer tables happen under exclusive write
-        * lock. Any reads to the peer tables can be done via a standard
-        * CPT read lock.
-        */
-       if (cpt != LNET_LOCK_EX) {
-               lnet_net_unlock(cpt);
-               lnet_net_lock(LNET_LOCK_EX);
-       }
-
        ptable = the_lnet.ln_peer_tables[cpt2];
        lpni = lnet_get_peer_ni_locked(ptable, nid);
-       if (lpni != NULL) {
-               *lpnip = lpni;
-               if (cpt != LNET_LOCK_EX) {
-                       lnet_net_unlock(LNET_LOCK_EX);
-                       lnet_net_lock(cpt);
-               }
-               return 0;
-       }
+       if (lpni)
+               return lpni;
 
+       /* Slow path: serialized using the ln_api_mutex. */
+       lnet_net_unlock(cpt);
+       mutex_lock(&the_lnet.ln_api_mutex);
        /*
-        * take extra refcount in case another thread has shutdown LNet
-        * and destroyed locks and peer-table before I finish the allocation
+        * Shutdown is only set under the ln_api_lock, so a single
+        * check here is sufficent.
+        *
+        * lnet_add_nid_to_peer() also handles the case where we've
+        * raced and a different thread added the NID.
         */
-       ptable->pt_number++;
-       lnet_net_unlock(LNET_LOCK_EX);
-
-       LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt2, sizeof(*lpni));
-
-       if (lpni == NULL) {
-               rc = -ENOMEM;
-               lnet_net_lock(cpt);
-               goto out;
-       }
-
-       INIT_LIST_HEAD(&lpni->lpni_txq);
-       INIT_LIST_HEAD(&lpni->lpni_rtrq);
-       INIT_LIST_HEAD(&lpni->lpni_routes);
-
-       lpni->lpni_alive = !lnet_peers_start_down(); /* 1 bit!! */
-       lpni->lpni_last_alive = cfs_time_current(); /* assumes alive */
-       lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
-       lpni->lpni_nid = nid;
-       lpni->lpni_cpt = cpt2;
-       atomic_set(&lpni->lpni_refcount, 2);    /* 1 for caller; 1 for hash */
-
-       rc = lnet_build_peer_hierarchy(lpni);
-       if (rc != 0)
-               goto out;
-
-       lnet_net_lock(LNET_LOCK_EX);
-
        if (the_lnet.ln_shutdown) {
-               rc = -ESHUTDOWN;
-               goto out;
+               lpni = ERR_PTR(-ESHUTDOWN);
+               goto out_mutex_unlock;
        }
 
-       lpni2 = lnet_get_peer_ni_locked(ptable, nid);
-       if (lpni2 != NULL) {
-               *lpnip = lpni2;
-               goto out;
+       rc = lnet_peer_ni_traffic_add(nid);
+       if (rc) {
+               lpni = ERR_PTR(rc);
+               goto out_mutex_unlock;
        }
 
-       lpni->lpni_net = lnet_get_net_locked(LNET_NIDNET(lpni->lpni_nid));
-       lpni->lpni_txcredits    =
-       lpni->lpni_mintxcredits =
-               lpni->lpni_net->net_tunables.lct_peer_tx_credits;
-       lpni->lpni_rtrcredits    =
-       lpni->lpni_minrtrcredits =
-               lnet_peer_buffer_credits(lpni->lpni_net);
-
-       list_add_tail(&lpni->lpni_hashlist,
-                       &ptable->pt_hash[lnet_nid2peerhash(nid)]);
-       ptable->pt_version++;
-       *lpnip = lpni;
+       lpni = lnet_get_peer_ni_locked(ptable, nid);
+       LASSERT(lpni);
 
-       if (cpt != LNET_LOCK_EX) {
-               lnet_net_unlock(LNET_LOCK_EX);
-               lnet_net_lock(cpt);
-       }
+out_mutex_unlock:
+       mutex_unlock(&the_lnet.ln_api_mutex);
+       lnet_net_lock(cpt);
 
-       return 0;
-out:
-       if (lpni != NULL) {
-               lnet_try_destroy_peer_hierarchy_locked(lpni);
-               LIBCFS_FREE(lpni, sizeof(*lpni));
-       }
-       ptable->pt_number--;
-       if (cpt != LNET_LOCK_EX) {
-               lnet_net_unlock(LNET_LOCK_EX);
-               lnet_net_lock(cpt);
-       }
-       return rc;
+       return lpni;
 }
 
 void
@@ -441,14 +1005,13 @@ lnet_debug_peer(lnet_nid_t nid)
 {
        char                    *aliveness = "NA";
        struct lnet_peer_ni     *lp;
-       int                     rc;
        int                     cpt;
 
        cpt = lnet_cpt_of_nid(nid, NULL);
        lnet_net_lock(cpt);
 
-       rc = lnet_nid2peerni_locked(&lp, nid, cpt);
-       if (rc != 0) {
+       lp = lnet_nid2peerni_locked(nid, cpt);
+       if (IS_ERR(lp)) {
                lnet_net_unlock(cpt);
                CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid));
                return;
@@ -468,12 +1031,12 @@ lnet_debug_peer(lnet_nid_t nid)
        lnet_net_unlock(cpt);
 }
 
-int lnet_get_peer_info(__u32 peer_index, __u64 *nid,
-                      char aliveness[LNET_MAX_STR_LEN],
-                      __u32 *cpt_iter, __u32 *refcount,
-                      __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
-                      __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
-                      __u32 *peer_tx_qnob)
+int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
+                         char aliveness[LNET_MAX_STR_LEN],
+                         __u32 *cpt_iter, __u32 *refcount,
+                         __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
+                         __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
+                         __u32 *peer_tx_qnob)
 {
        struct lnet_peer_table          *peer_table;
        struct lnet_peer_ni             *lp;
@@ -530,3 +1093,40 @@ int lnet_get_peer_info(__u32 peer_index, __u64 *nid,
 
        return found ? 0 : -ENOENT;
 }
+
+int lnet_get_peer_info(__u32 idx, lnet_nid_t *primary_nid, lnet_nid_t *nid,
+                      bool *mr, struct lnet_peer_ni_credit_info *peer_ni_info,
+                      struct lnet_ioctl_element_stats *peer_ni_stats)
+{
+       struct lnet_peer_ni *lpni = NULL;
+       struct lnet_peer_net *lpn = NULL;
+       struct lnet_peer *lp = NULL;
+
+       lpni = lnet_get_peer_ni_idx_locked(idx, &lpn, &lp);
+
+       if (!lpni)
+               return -ENOENT;
+
+       *primary_nid = lp->lp_primary_nid;
+       *mr = lp->lp_multi_rail;
+       *nid = lpni->lpni_nid;
+       snprintf(peer_ni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA");
+       if (lnet_isrouter(lpni) ||
+               lnet_peer_aliveness_enabled(lpni))
+               snprintf(peer_ni_info->cr_aliveness, LNET_MAX_STR_LEN,
+                        lpni->lpni_alive ? "up" : "down");
+
+       peer_ni_info->cr_refcount = atomic_read(&lpni->lpni_refcount);
+       peer_ni_info->cr_ni_peer_tx_credits = (lpni->lpni_net != NULL) ?
+               lpni->lpni_net->net_tunables.lct_peer_tx_credits : 0;
+       peer_ni_info->cr_peer_tx_credits = lpni->lpni_txcredits;
+       peer_ni_info->cr_peer_rtr_credits = lpni->lpni_rtrcredits;
+       peer_ni_info->cr_peer_min_rtr_credits = lpni->lpni_mintxcredits;
+       peer_ni_info->cr_peer_tx_qnob = lpni->lpni_txqnob;
+
+       peer_ni_stats->send_count = atomic_read(&lpni->lpni_stats.send_count);
+       peer_ni_stats->recv_count = atomic_read(&lpni->lpni_stats.recv_count);
+       peer_ni_stats->drop_count = atomic_read(&lpni->lpni_stats.drop_count);
+
+       return 0;
+}