/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#define DEBUG_SUBSYSTEM S_LNET
#include <lnet/lib-lnet.h>
+#include <lnet/lib-dlc.h>
int
-lnet_peer_table_create(void)
+lnet_peer_tables_create(void)
{
struct lnet_peer_table *ptable;
- cfs_list_t *hash;
+ struct list_head *hash;
+ int i;
int j;
- LIBCFS_ALLOC(ptable, sizeof(*ptable));
- if (ptable == NULL) {
+ the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
+ sizeof(*ptable));
+ if (the_lnet.ln_peer_tables == NULL) {
CERROR("Failed to allocate cpu-partition peer tables\n");
return -ENOMEM;
}
- the_lnet.ln_peer_table = ptable;
-
- do { /* we will have per CPT peer-tables iterate them by then */
- CFS_INIT_LIST_HEAD(&ptable->pt_deathrow);
+ cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
+ INIT_LIST_HEAD(&ptable->pt_deathrow);
- LIBCFS_ALLOC(hash, LNET_PEER_HASH_SIZE * sizeof(*hash));
+ LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
+ LNET_PEER_HASH_SIZE * sizeof(*hash));
if (hash == NULL) {
CERROR("Failed to create peer hash table\n");
- lnet_peer_table_destroy();
+ lnet_peer_tables_destroy();
return -ENOMEM;
}
for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
- CFS_INIT_LIST_HEAD(&hash[j]);
+ INIT_LIST_HEAD(&hash[j]);
ptable->pt_hash = hash; /* sign of initialization */
- } while (0);
+ }
return 0;
}
void
-lnet_peer_table_destroy(void)
+lnet_peer_tables_destroy(void)
{
struct lnet_peer_table *ptable;
- cfs_list_t *hash;
+ struct list_head *hash;
+ int i;
int j;
- if (the_lnet.ln_peer_table == NULL)
+ if (the_lnet.ln_peer_tables == NULL)
return;
- ptable = the_lnet.ln_peer_table;
-
- do { /* we will have per CPT peer-tables iterate them by then */
+ cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
hash = ptable->pt_hash;
if (hash == NULL) /* not intialized */
break;
- LASSERT(cfs_list_empty(&ptable->pt_deathrow));
+ LASSERT(list_empty(&ptable->pt_deathrow));
ptable->pt_hash = NULL;
for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
- LASSERT(cfs_list_empty(&hash[j]));
+ LASSERT(list_empty(&hash[j]));
LIBCFS_FREE(hash, LNET_PEER_HASH_SIZE * sizeof(*hash));
- } while (0);
+ }
- LIBCFS_FREE(ptable, sizeof(*ptable));
- the_lnet.ln_peer_table = NULL;
+ cfs_percpt_free(the_lnet.ln_peer_tables);
+ the_lnet.ln_peer_tables = NULL;
}
-void
-lnet_peer_table_cleanup(void)
+static void
+lnet_peer_table_cleanup_locked(lnet_ni_t *ni, struct lnet_peer_table *ptable)
{
- struct lnet_peer_table *ptable;
- int j;
-
- LASSERT(the_lnet.ln_shutdown); /* i.e. no new peers */
- ptable = the_lnet.ln_peer_table;
+ int i;
+ lnet_peer_t *lp;
+ lnet_peer_t *tmp;
+
+ for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
+ list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
+ lp_hashlist) {
+ if (ni != NULL && ni != lp->lp_ni)
+ continue;
+ list_del_init(&lp->lp_hashlist);
+ /* Lose hash table's ref */
+ ptable->pt_zombies++;
+ lnet_peer_decref_locked(lp);
+ }
+ }
+}
- do { /* we will have per CPT peer-tables iterate them by then */
- LNET_LOCK();
+static void
+lnet_peer_table_deathrow_wait_locked(struct lnet_peer_table *ptable,
+ int cpt_locked)
+{
+ int i;
- for (j = 0; j < LNET_PEER_HASH_SIZE; j++) {
- cfs_list_t *peers = &ptable->pt_hash[j];
+ for (i = 3; ptable->pt_zombies != 0; i++) {
+ lnet_net_unlock(cpt_locked);
- while (!cfs_list_empty(peers)) {
- lnet_peer_t *lp = cfs_list_entry(peers->next,
- lnet_peer_t,
- lp_hashlist);
- cfs_list_del_init(&lp->lp_hashlist);
- /* lose hash table's ref */
- lnet_peer_decref_locked(lp);
- }
+ if (IS_PO2(i)) {
+ CDEBUG(D_WARNING,
+ "Waiting for %d zombies on peer table\n",
+ ptable->pt_zombies);
}
+ cfs_pause(cfs_time_seconds(1) >> 1);
+ lnet_net_lock(cpt_locked);
+ }
+}
- LNET_UNLOCK();
- } while (0);
+static void
+lnet_peer_table_del_rtrs_locked(lnet_ni_t *ni, struct lnet_peer_table *ptable,
+ int cpt_locked)
+{
+ lnet_peer_t *lp;
+ lnet_peer_t *tmp;
+ lnet_nid_t lp_nid;
+ int i;
- do { /* we will have per CPT peer-tables iterate them by then */
- CFS_LIST_HEAD (deathrow);
- lnet_peer_t *lp;
+ for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
+ list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
+ lp_hashlist) {
+ if (ni != lp->lp_ni)
+ continue;
- LNET_LOCK();
+ if (lp->lp_rtr_refcount == 0)
+ continue;
- for (j = 3; ptable->pt_number != 0; j++) {
- LNET_UNLOCK();
+ lp_nid = lp->lp_nid;
- if ((j & (j - 1)) == 0) {
- CDEBUG(D_WARNING,
- "Waiting for %d peers on peer table\n",
- ptable->pt_number);
- }
- cfs_pause(cfs_time_seconds(1) / 2);
- LNET_LOCK();
+ lnet_net_unlock(cpt_locked);
+ lnet_del_route(LNET_NIDNET(LNET_NID_ANY), lp_nid);
+ lnet_net_lock(cpt_locked);
}
- cfs_list_splice_init(&ptable->pt_deathrow, &deathrow);
+ }
+}
- LNET_UNLOCK();
+void
+lnet_peer_tables_cleanup(lnet_ni_t *ni)
+{
+ int i;
+ struct lnet_peer_table *ptable;
+ lnet_peer_t *lp;
+ struct list_head deathrow;
+
+ INIT_LIST_HEAD(&deathrow);
+
+ LASSERT(the_lnet.ln_shutdown || ni != NULL);
+ /* If just deleting the peers for a NI, get rid of any routes these
+ * peers are gateways for. */
+ cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
+ lnet_net_lock(i);
+ lnet_peer_table_del_rtrs_locked(ni, ptable, i);
+ lnet_net_unlock(i);
+ }
- while (!cfs_list_empty(&deathrow)) {
- lp = cfs_list_entry(deathrow.next,
- lnet_peer_t, lp_hashlist);
- cfs_list_del(&lp->lp_hashlist);
- LIBCFS_FREE(lp, sizeof(*lp));
- }
- } while (0);
+ /* Start the process of moving the applicable peers to
+ * deathrow. */
+ cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
+ lnet_net_lock(i);
+ lnet_peer_table_cleanup_locked(ni, ptable);
+ lnet_net_unlock(i);
+ }
+
+ /* Cleanup all entries on deathrow. */
+ cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
+ lnet_net_lock(i);
+ lnet_peer_table_deathrow_wait_locked(ptable, i);
+ list_splice_init(&ptable->pt_deathrow, &deathrow);
+ lnet_net_unlock(i);
+ }
+
+ while (!list_empty(&deathrow)) {
+ lp = list_entry(deathrow.next, lnet_peer_t, lp_hashlist);
+ list_del(&lp->lp_hashlist);
+ LIBCFS_FREE(lp, sizeof(*lp));
+ }
}
void
lnet_destroy_peer_locked(lnet_peer_t *lp)
{
- struct lnet_peer_table *ptable = the_lnet.ln_peer_table;
+ struct lnet_peer_table *ptable;
LASSERT(lp->lp_refcount == 0);
LASSERT(lp->lp_rtr_refcount == 0);
- LASSERT(cfs_list_empty(&lp->lp_txq));
- LASSERT(cfs_list_empty(&lp->lp_hashlist));
+ LASSERT(list_empty(&lp->lp_txq));
+ LASSERT(list_empty(&lp->lp_hashlist));
LASSERT(lp->lp_txqnob == 0);
+ ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
LASSERT(ptable->pt_number > 0);
ptable->pt_number--;
- lnet_ni_decref_locked(lp->lp_ni);
+ lnet_ni_decref_locked(lp->lp_ni, lp->lp_cpt);
lp->lp_ni = NULL;
- cfs_list_add(&lp->lp_hashlist, &ptable->pt_deathrow);
+ list_add(&lp->lp_hashlist, &ptable->pt_deathrow);
+ LASSERT(ptable->pt_zombies > 0);
+ ptable->pt_zombies--;
}
lnet_peer_t *
-lnet_find_peer_locked(lnet_nid_t nid)
+lnet_find_peer_locked(struct lnet_peer_table *ptable, lnet_nid_t nid)
{
- cfs_list_t *peers;
- lnet_peer_t *lp;
+ struct list_head *peers;
+ lnet_peer_t *lp;
- if (the_lnet.ln_shutdown)
- return NULL;
+ LASSERT(!the_lnet.ln_shutdown);
- peers = &the_lnet.ln_peer_table->pt_hash[lnet_nid2peerhash(nid)];
- cfs_list_for_each_entry(lp, peers, lp_hashlist) {
+ peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
+ list_for_each_entry(lp, peers, lp_hashlist) {
if (lp->lp_nid == nid) {
lnet_peer_addref_locked(lp);
return lp;
}
int
-lnet_nid2peer_locked(lnet_peer_t **lpp, lnet_nid_t nid)
+lnet_nid2peer_locked(lnet_peer_t **lpp, lnet_nid_t nid, int cpt)
{
- struct lnet_peer_table *ptable = the_lnet.ln_peer_table;
+ struct lnet_peer_table *ptable;
lnet_peer_t *lp = NULL;
lnet_peer_t *lp2;
+ int cpt2;
+ int rc = 0;
- lp = lnet_find_peer_locked(nid);
- if (lp != NULL) {
- *lpp = lp;
- return 0;
- }
+ *lpp = NULL;
+ if (the_lnet.ln_shutdown) /* it's shutting down */
+ return -ESHUTDOWN;
+
+ /* cpt can be LNET_LOCK_EX if it's called from router functions */
+ cpt2 = cpt != LNET_LOCK_EX ? cpt : lnet_cpt_of_nid_locked(nid);
+
+ ptable = the_lnet.ln_peer_tables[cpt2];
+ lp = lnet_find_peer_locked(ptable, nid);
+ if (lp != NULL) {
+ *lpp = lp;
+ return 0;
+ }
- if (!cfs_list_empty(&ptable->pt_deathrow)) {
- lp = cfs_list_entry(ptable->pt_deathrow.next,
- lnet_peer_t, lp_hashlist);
- cfs_list_del(&lp->lp_hashlist);
+ if (!list_empty(&ptable->pt_deathrow)) {
+ lp = list_entry(ptable->pt_deathrow.next,
+ lnet_peer_t, lp_hashlist);
+ list_del(&lp->lp_hashlist);
}
- LNET_UNLOCK();
+ /*
+ * take extra refcount in case another thread has shutdown LNet
+ * and destroyed locks and peer-table before I finish the allocation
+ */
+ ptable->pt_number++;
+ lnet_net_unlock(cpt);
if (lp != NULL)
memset(lp, 0, sizeof(*lp));
else
- LIBCFS_ALLOC(lp, sizeof(*lp));
+ LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), cpt2, sizeof(*lp));
if (lp == NULL) {
- *lpp = NULL;
- LNET_LOCK();
- return -ENOMEM;
- }
+ rc = -ENOMEM;
+ lnet_net_lock(cpt);
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&lp->lp_txq);
+ INIT_LIST_HEAD(&lp->lp_rtrq);
+ INIT_LIST_HEAD(&lp->lp_routes);
- memset(lp, 0, sizeof(*lp)); /* zero counters etc */
-
- CFS_INIT_LIST_HEAD(&lp->lp_txq);
- CFS_INIT_LIST_HEAD(&lp->lp_rtrq);
-
lp->lp_notify = 0;
lp->lp_notifylnd = 0;
lp->lp_notifying = 0;
lp->lp_last_alive = cfs_time_current(); /* assumes alive */
lp->lp_last_query = 0; /* haven't asked NI yet */
lp->lp_ping_timestamp = 0;
- lp->lp_nid = nid;
- lp->lp_refcount = 2; /* 1 for caller; 1 for hash */
- lp->lp_rtr_refcount = 0;
-
- LNET_LOCK();
+ lp->lp_ping_feats = LNET_PING_FEAT_INVAL;
+ lp->lp_nid = nid;
+ lp->lp_cpt = cpt2;
+ lp->lp_refcount = 2; /* 1 for caller; 1 for hash */
+ lp->lp_rtr_refcount = 0;
- lp2 = lnet_find_peer_locked(nid);
- if (lp2 != NULL) {
- cfs_list_add(&lp->lp_hashlist, &ptable->pt_deathrow);
+ lnet_net_lock(cpt);
- if (the_lnet.ln_shutdown) {
- lnet_peer_decref_locked(lp2);
- *lpp = NULL;
- return -ESHUTDOWN;
- }
-
- *lpp = lp2;
- return 0;
- }
-
- lp->lp_ni = lnet_net2ni_locked(LNET_NIDNET(nid));
- if (lp->lp_ni == NULL) {
- cfs_list_add(&lp->lp_hashlist, &ptable->pt_deathrow);
+ if (the_lnet.ln_shutdown) {
+ rc = -ESHUTDOWN;
+ goto out;
+ }
- *lpp = NULL;
- return the_lnet.ln_shutdown ? -ESHUTDOWN : -EHOSTUNREACH;
- }
+ lp2 = lnet_find_peer_locked(ptable, nid);
+ if (lp2 != NULL) {
+ *lpp = lp2;
+ goto out;
+ }
- lp->lp_txcredits =
- lp->lp_mintxcredits = lp->lp_ni->ni_peertxcredits;
- lp->lp_rtrcredits =
- lp->lp_minrtrcredits = lnet_peer_buffer_credits(lp->lp_ni);
+ lp->lp_ni = lnet_net2ni_locked(LNET_NIDNET(nid), cpt2);
+ if (lp->lp_ni == NULL) {
+ rc = -EHOSTUNREACH;
+ goto out;
+ }
- /* can't add peers after shutdown starts */
- LASSERT (!the_lnet.ln_shutdown);
+ lp->lp_txcredits =
+ lp->lp_mintxcredits = lp->lp_ni->ni_peertxcredits;
+ lp->lp_rtrcredits =
+ lp->lp_minrtrcredits = lnet_peer_buffer_credits(lp->lp_ni);
- cfs_list_add_tail(&lp->lp_hashlist,
- &ptable->pt_hash[lnet_nid2peerhash(nid)]);
+ list_add_tail(&lp->lp_hashlist,
+ &ptable->pt_hash[lnet_nid2peerhash(nid)]);
ptable->pt_version++;
- ptable->pt_number++;
-
*lpp = lp;
+
return 0;
+out:
+ if (lp != NULL)
+ list_add(&lp->lp_hashlist, &ptable->pt_deathrow);
+ ptable->pt_number--;
+ return rc;
}
void
lnet_debug_peer(lnet_nid_t nid)
{
- char *aliveness = "NA";
- int rc;
- lnet_peer_t *lp;
+ char *aliveness = "NA";
+ lnet_peer_t *lp;
+ int rc;
+ int cpt;
- LNET_LOCK();
+ cpt = lnet_cpt_of_nid(nid);
+ lnet_net_lock(cpt);
- rc = lnet_nid2peer_locked(&lp, nid);
- if (rc != 0) {
- LNET_UNLOCK();
+ rc = lnet_nid2peer_locked(&lp, nid, cpt);
+ if (rc != 0) {
+ lnet_net_unlock(cpt);
CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid));
return;
}
lnet_peer_decref_locked(lp);
- LNET_UNLOCK();
+ lnet_net_unlock(cpt);
+}
+
+int lnet_get_peer_info(__u32 peer_index, __u64 *nid,
+ char aliveness[LNET_MAX_STR_LEN],
+ __u32 *cpt_iter, __u32 *refcount,
+ __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
+ __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
+ __u32 *peer_tx_qnob)
+{
+ struct lnet_peer_table *peer_table;
+ lnet_peer_t *lp;
+ int j;
+ int lncpt;
+ bool found = false;
+
+ /* get the number of CPTs */
+ lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
+
+ /* if the cpt number to be examined is >= the number of cpts in
+ * the system then indicate that there are no more cpts to examin
+ */
+ if (*cpt_iter > lncpt)
+ return -ENOENT;
+
+ /* get the current table */
+ peer_table = the_lnet.ln_peer_tables[*cpt_iter];
+ /* if the ptable is NULL then there are no more cpts to examine */
+ if (peer_table == NULL)
+ return -ENOENT;
+
+ lnet_net_lock(*cpt_iter);
+
+ for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
+ struct list_head *peers = &peer_table->pt_hash[j];
+
+ list_for_each_entry(lp, peers, lp_hashlist) {
+ if (peer_index-- > 0)
+ continue;
+
+ snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
+ if (lnet_isrouter(lp) ||
+ lnet_peer_aliveness_enabled(lp))
+ snprintf(aliveness, LNET_MAX_STR_LEN,
+ lp->lp_alive ? "up" : "down");
+
+ *nid = lp->lp_nid;
+ *refcount = lp->lp_refcount;
+ *ni_peer_tx_credits = lp->lp_ni->ni_peertxcredits;
+ *peer_tx_credits = lp->lp_txcredits;
+ *peer_rtr_credits = lp->lp_rtrcredits;
+ *peer_min_rtr_credits = lp->lp_mintxcredits;
+ *peer_tx_qnob = lp->lp_txqnob;
+
+ found = true;
+ }
+
+ }
+ lnet_net_unlock(*cpt_iter);
+
+ *cpt_iter = lncpt;
+
+ return found ? 0 : -ENOENT;
}