From fa8b4e6357c53ea457ef6624b0b19bece0b0fdde Mon Sep 17 00:00:00 2001 From: Amir Shehata Date: Thu, 26 May 2016 15:42:39 -0700 Subject: [PATCH] LU-7734 lnet: peer/peer_ni handling adjustments A peer can be added by specifying a list of NIDs The first NID shall be used as the primary NID. The rest of the NIDs will be added under the primary NID A peer can be added by explicitly specifying the key NID, and then by adding a set of other NIDs, all done through one API call If a key NID already exists, but it's not an MR NI, then adding that Key NID from DLC shall convert that NI to an MR NI If a key NID already exists, and it is an MR NI, then re-adding the Key NID shall have no effect if a Key NID already exists as part of another peer, then adding that NID as part of another peer or as primary shall fail if a NID is being added to a peer NI and that NID is a non-MR, then that NID is moved under the peer and is made to be MR capable if a NID is being added to a peer and that NID is an MR NID and part of another peer, then the operation shall fail if a NID is being added to a peer and it is already part of that Peer then the operation is a no-op. Moreover, the code is structured to consider the addition of Dynamic Discovery in later patches. Signed-off-by: Amir Shehata Change-Id: I71f740192a31ae00f83014ca3e9e06b61ae4ecd5 Reviewed-on: http://review.whamcloud.com/20531 --- lnet/include/lnet/lib-lnet.h | 9 +- lnet/include/lnet/lib-types.h | 12 +- lnet/lnet/api-ni.c | 76 ++- lnet/lnet/lib-move.c | 36 +- lnet/lnet/peer.c | 910 +++++++++++++++++++--------------- lnet/lnet/router.c | 8 +- lnet/utils/lnetconfig/liblnetconfig.c | 294 +++++++---- lnet/utils/lnetconfig/liblnetconfig.h | 33 +- lnet/utils/lnetctl.c | 95 ++-- 9 files changed, 888 insertions(+), 585 deletions(-) diff --git a/lnet/include/lnet/lib-lnet.h b/lnet/include/lnet/lib-lnet.h index 89efd70..b99598a 100644 --- a/lnet/include/lnet/lib-lnet.h +++ b/lnet/include/lnet/lib-lnet.h @@ -801,13 +801,12 @@ inline __u32 lnet_get_numa_range(void); struct lnet_peer_ni *lnet_get_next_peer_ni_locked(struct lnet_peer *peer, struct lnet_peer_net *peer_net, struct lnet_peer_ni *prev); -int lnet_find_or_create_peer_locked(lnet_nid_t dst_nid, int cpt, - struct lnet_peer **peer); -int lnet_nid2peerni_locked(struct lnet_peer_ni **lpp, lnet_nid_t nid, int cpt); +struct lnet_peer *lnet_find_or_create_peer_locked(lnet_nid_t dst_nid, int cpt); +struct lnet_peer_ni *lnet_nid2peerni_locked(lnet_nid_t nid, int cpt); struct lnet_peer_ni *lnet_find_peer_ni_locked(lnet_nid_t nid); void lnet_peer_net_added(struct lnet_net *net); lnet_nid_t lnet_peer_primary_nid(lnet_nid_t nid); -void lnet_peer_tables_cleanup(lnet_ni_t *ni); +void lnet_peer_tables_cleanup(struct lnet_net *net); void lnet_peer_uninit(void); int lnet_peer_tables_create(void); void lnet_debug_peer(lnet_nid_t nid); @@ -818,7 +817,7 @@ bool lnet_peer_is_ni_pref_locked(struct lnet_peer_ni *lpni, int lnet_add_peer_ni_to_peer(lnet_nid_t key_nid, lnet_nid_t nid, bool mr); int lnet_del_peer_ni_from_peer(lnet_nid_t key_nid, lnet_nid_t nid); int lnet_get_peer_info(__u32 idx, lnet_nid_t *primary_nid, lnet_nid_t *nid, - struct lnet_peer_ni_credit_info *peer_ni_info, + bool *mr, struct lnet_peer_ni_credit_info *peer_ni_info, struct lnet_ioctl_element_stats *peer_ni_stats); int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid, char alivness[LNET_MAX_STR_LEN], diff --git a/lnet/include/lnet/lib-types.h b/lnet/include/lnet/lib-types.h index 77b3564..2e4820d 100644 --- a/lnet/include/lnet/lib-types.h +++ b/lnet/include/lnet/lib-types.h @@ -302,9 +302,9 @@ struct lnet_net { /* chain on the ln_nets */ struct list_head net_list; - /* net ID, which is compoed of + /* net ID, which is composed of * (net_type << 16) | net_num. - * net_type can be one of the enumarated types defined in + * net_type can be one of the enumerated types defined in * lnet/include/lnet/nidstr.h */ __u32 net_id; @@ -548,11 +548,11 @@ struct lnet_peer_net { /* peer hash table */ struct lnet_peer_table { int pt_version; /* /proc validity stamp */ - int pt_number; /* # peers extant */ - int pt_zombies; /* # zombies to go to deathrow - * (and not there yet) */ - struct list_head pt_deathrow; /* zombie peers */ + atomic_t pt_number; /* # peers extant */ struct list_head *pt_hash; /* NID->peer hash */ + struct list_head pt_zombie_list; /* zombie peers */ + int pt_zombies; /* # zombie peers */ + spinlock_t pt_zombie_lock; /* protect list and count */ }; /* peer aliveness is enabled only on routers for peers in a network where the diff --git a/lnet/lnet/api-ni.c b/lnet/lnet/api-ni.c index 3be384e..6a213b3 100644 --- a/lnet/lnet/api-ni.c +++ b/lnet/lnet/api-ni.c @@ -1298,9 +1298,6 @@ lnet_shutdown_lndni(struct lnet_ni *ni) for (i = 0; i < the_lnet.ln_nportals; i++) lnet_clear_lazy_portal(ni, i, "Shutting down NI"); - /* Do peer table cleanup for this ni */ - lnet_peer_tables_cleanup(ni); - lnet_net_lock(LNET_LOCK_EX); lnet_clear_zombies_nis_locked(net); lnet_net_unlock(LNET_LOCK_EX); @@ -1325,6 +1322,12 @@ lnet_shutdown_lndnet(struct lnet_net *net) lnet_net_lock(LNET_LOCK_EX); } + lnet_net_unlock(LNET_LOCK_EX); + + /* Do peer table cleanup for this net */ + lnet_peer_tables_cleanup(net); + + lnet_net_lock(LNET_LOCK_EX); /* * decrement ref count on lnd only when the entire network goes * away @@ -2658,13 +2661,16 @@ LNetCtl(unsigned int cmd, void *arg) if (config->cfg_hdr.ioc_len < sizeof(*config)) return -EINVAL; - return lnet_get_route(config->cfg_count, - &config->cfg_net, - &config->cfg_config_u.cfg_route.rtr_hop, - &config->cfg_nid, - &config->cfg_config_u.cfg_route.rtr_flags, - &config->cfg_config_u.cfg_route. + mutex_lock(&the_lnet.ln_api_mutex); + rc = lnet_get_route(config->cfg_count, + &config->cfg_net, + &config->cfg_config_u.cfg_route.rtr_hop, + &config->cfg_nid, + &config->cfg_config_u.cfg_route.rtr_flags, + &config->cfg_config_u.cfg_route. rtr_priority); + mutex_unlock(&the_lnet.ln_api_mutex); + return rc; case IOC_LIBCFS_GET_LOCAL_NI: { struct lnet_ioctl_config_ni *cfg_ni; @@ -2686,7 +2692,10 @@ LNetCtl(unsigned int cmd, void *arg) tun_size = cfg_ni->lic_cfg_hdr.ioc_len - sizeof(*cfg_ni) - sizeof(*stats); - return lnet_get_ni_config(cfg_ni, tun, stats, tun_size); + mutex_lock(&the_lnet.ln_api_mutex); + rc = lnet_get_ni_config(cfg_ni, tun, stats, tun_size); + mutex_unlock(&the_lnet.ln_api_mutex); + return rc; } case IOC_LIBCFS_GET_NET: { @@ -2697,7 +2706,10 @@ LNetCtl(unsigned int cmd, void *arg) if (config->cfg_hdr.ioc_len < total) return -EINVAL; - return lnet_get_net_config(config); + mutex_lock(&the_lnet.ln_api_mutex); + rc = lnet_get_net_config(config); + mutex_unlock(&the_lnet.ln_api_mutex); + return rc; } case IOC_LIBCFS_GET_LNET_STATS: @@ -2707,7 +2719,9 @@ LNetCtl(unsigned int cmd, void *arg) if (lnet_stats->st_hdr.ioc_len < sizeof(*lnet_stats)) return -EINVAL; + mutex_lock(&the_lnet.ln_api_mutex); lnet_counters_get(&lnet_stats->st_cntrs); + mutex_unlock(&the_lnet.ln_api_mutex); return 0; } @@ -2748,7 +2762,9 @@ LNetCtl(unsigned int cmd, void *arg) numa = arg; if (numa->nr_hdr.ioc_len != sizeof(*numa)) return -EINVAL; + mutex_lock(&the_lnet.ln_api_mutex); lnet_numa_range = numa->nr_range; + mutex_unlock(&the_lnet.ln_api_mutex); return 0; } @@ -2771,7 +2787,11 @@ LNetCtl(unsigned int cmd, void *arg) return -EINVAL; pool_cfg = (struct lnet_ioctl_pool_cfg *)config->cfg_bulk; - return lnet_get_rtr_pool_cfg(config->cfg_count, pool_cfg); + + mutex_lock(&the_lnet.ln_api_mutex); + rc = lnet_get_rtr_pool_cfg(config->cfg_count, pool_cfg); + mutex_unlock(&the_lnet.ln_api_mutex); + return rc; } case IOC_LIBCFS_ADD_PEER_NI: { @@ -2780,9 +2800,13 @@ LNetCtl(unsigned int cmd, void *arg) if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg)) return -EINVAL; - return lnet_add_peer_ni_to_peer(cfg->prcfg_key_nid, - cfg->prcfg_cfg_nid, - cfg->prcfg_mr); + mutex_lock(&the_lnet.ln_api_mutex); + lnet_incr_dlc_seq(); + rc = lnet_add_peer_ni_to_peer(cfg->prcfg_key_nid, + cfg->prcfg_cfg_nid, + cfg->prcfg_mr); + mutex_unlock(&the_lnet.ln_api_mutex); + return rc; } case IOC_LIBCFS_DEL_PEER_NI: { @@ -2791,8 +2815,12 @@ LNetCtl(unsigned int cmd, void *arg) if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg)) return -EINVAL; - return lnet_del_peer_ni_from_peer(cfg->prcfg_key_nid, - cfg->prcfg_cfg_nid); + mutex_lock(&the_lnet.ln_api_mutex); + lnet_incr_dlc_seq(); + rc = lnet_del_peer_ni_from_peer(cfg->prcfg_key_nid, + cfg->prcfg_cfg_nid); + mutex_unlock(&the_lnet.ln_api_mutex); + return rc; } case IOC_LIBCFS_GET_PEER_INFO: { @@ -2801,7 +2829,8 @@ LNetCtl(unsigned int cmd, void *arg) if (peer_info->pr_hdr.ioc_len < sizeof(*peer_info)) return -EINVAL; - return lnet_get_peer_ni_info( + mutex_lock(&the_lnet.ln_api_mutex); + rc = lnet_get_peer_ni_info( peer_info->pr_count, &peer_info->pr_nid, peer_info->pr_lnd_u.pr_peer_credits.cr_aliveness, @@ -2812,6 +2841,8 @@ LNetCtl(unsigned int cmd, void *arg) &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_rtr_credits, &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_min_rtr_credits, &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_qnob); + mutex_unlock(&the_lnet.ln_api_mutex); + return rc; } case IOC_LIBCFS_GET_PEER_NI: { @@ -2828,9 +2859,12 @@ LNetCtl(unsigned int cmd, void *arg) lpni_stats = (struct lnet_ioctl_element_stats *) (cfg->prcfg_bulk + sizeof(*lpni_cri)); - return lnet_get_peer_info(cfg->prcfg_idx, &cfg->prcfg_key_nid, - &cfg->prcfg_cfg_nid, lpni_cri, - lpni_stats); + mutex_lock(&the_lnet.ln_api_mutex); + rc = lnet_get_peer_info(cfg->prcfg_idx, &cfg->prcfg_key_nid, + &cfg->prcfg_cfg_nid, &cfg->prcfg_mr, + lpni_cri, lpni_stats); + mutex_unlock(&the_lnet.ln_api_mutex); + return rc; } case IOC_LIBCFS_NOTIFY_ROUTER: { diff --git a/lnet/lnet/lib-move.c b/lnet/lnet/lib-move.c index 938e536..ee167ea 100644 --- a/lnet/lnet/lib-move.c +++ b/lnet/lnet/lib-move.c @@ -1336,10 +1336,10 @@ again: lpni = NULL; seq = lnet_get_dlc_seq_locked(); - rc = lnet_find_or_create_peer_locked(dst_nid, cpt, &peer); - if (rc != 0) { + peer = lnet_find_or_create_peer_locked(dst_nid, cpt); + if (IS_ERR(peer)) { lnet_net_unlock(cpt); - return rc; + return PTR_ERR(peer); } /* If peer is not healthy then can not send anything to it */ @@ -1547,13 +1547,6 @@ set_ni: } } /* - * Now that we selected the NI to use increment its sequence - * number so the Round Robin algorithm will detect that it has - * been used and pick the next NI. - */ - best_ni->ni_seq++; - - /* * if the peer is not MR capable, then we should always send to it * using the first NI in the NET we determined. */ @@ -1567,6 +1560,13 @@ set_ni: return -EINVAL; } + /* + * Now that we selected the NI to use increment its sequence + * number so the Round Robin algorithm will detect that it has + * been used and pick the next NI. + */ + best_ni->ni_seq++; + if (routing) goto send; @@ -1633,7 +1633,7 @@ pick_peer: } CDEBUG(D_NET, "Best route to %s via %s for %s %d\n", - libcfs_nid2str(lpni->lpni_nid), + libcfs_nid2str(dst_nid), libcfs_nid2str(best_gw->lpni_nid), lnet_msgtyp2str(msg->msg_type), msg->msg_len); @@ -2247,8 +2247,9 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid, lnet_pid_t dest_pid; lnet_nid_t dest_nid; lnet_nid_t src_nid; - __u32 payload_length; - __u32 type; + struct lnet_peer_ni *lpni; + __u32 payload_length; + __u32 type; LASSERT (!in_interrupt ()); @@ -2410,19 +2411,20 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid, msg->msg_initiator = lnet_peer_primary_nid(src_nid); lnet_net_lock(cpt); - rc = lnet_nid2peerni_locked(&msg->msg_rxpeer, from_nid, cpt); - if (rc != 0) { + lpni = lnet_nid2peerni_locked(from_nid, cpt); + if (IS_ERR(lpni)) { lnet_net_unlock(cpt); CERROR("%s, src %s: Dropping %s " - "(error %d looking up sender)\n", + "(error %ld looking up sender)\n", libcfs_nid2str(from_nid), libcfs_nid2str(src_nid), - lnet_msgtyp2str(type), rc); + lnet_msgtyp2str(type), PTR_ERR(lpni)); lnet_msg_free(msg); if (rc == -ESHUTDOWN) /* We are shutting down. Don't do anything more */ return 0; goto drop; } + msg->msg_rxpeer = lpni; msg->msg_rxni = ni; lnet_ni_addref_locked(ni, cpt); diff --git a/lnet/lnet/peer.c b/lnet/lnet/peer.c index 86b56ae..56584fc 100644 --- a/lnet/lnet/peer.c +++ b/lnet/lnet/peer.c @@ -84,6 +84,8 @@ lnet_peer_tables_destroy(void) if (!hash) /* not intialized */ break; + LASSERT(list_empty(&ptable->pt_zombie_list)); + ptable->pt_hash = NULL; for (j = 0; j < LNET_PEER_HASH_SIZE; j++) LASSERT(list_empty(&hash[j])); @@ -119,6 +121,9 @@ lnet_peer_tables_create(void) return -ENOMEM; } + spin_lock_init(&ptable->pt_zombie_lock); + INIT_LIST_HEAD(&ptable->pt_zombie_list); + for (j = 0; j < LNET_PEER_HASH_SIZE; j++) INIT_LIST_HEAD(&hash[j]); ptable->pt_hash = hash; /* sign of initialization */ @@ -127,57 +132,230 @@ lnet_peer_tables_create(void) return 0; } -void lnet_peer_uninit() +static struct lnet_peer_ni * +lnet_peer_ni_alloc(lnet_nid_t nid) { + struct lnet_peer_ni *lpni; + struct lnet_net *net; int cpt; - struct lnet_peer_ni *lpni, *tmp; - struct lnet_peer_table *ptable = NULL; - /* remove all peer_nis from the remote peer and he hash list */ - list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list, - lpni_on_remote_peer_ni_list) { - list_del_init(&lpni->lpni_on_remote_peer_ni_list); - lnet_peer_ni_decref_locked(lpni); + cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER); + + LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni)); + if (!lpni) + return NULL; - cpt = lnet_cpt_of_nid_locked(lpni->lpni_nid, NULL); - ptable = the_lnet.ln_peer_tables[cpt]; - ptable->pt_zombies++; + INIT_LIST_HEAD(&lpni->lpni_txq); + INIT_LIST_HEAD(&lpni->lpni_rtrq); + INIT_LIST_HEAD(&lpni->lpni_routes); + INIT_LIST_HEAD(&lpni->lpni_hashlist); + INIT_LIST_HEAD(&lpni->lpni_on_peer_net_list); + INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list); - list_del_init(&lpni->lpni_hashlist); - lnet_peer_ni_decref_locked(lpni); + lpni->lpni_alive = !lnet_peers_start_down(); /* 1 bit!! */ + lpni->lpni_last_alive = cfs_time_current(); /* assumes alive */ + lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL; + lpni->lpni_nid = nid; + lpni->lpni_cpt = cpt; + lnet_set_peer_ni_health_locked(lpni, true); + + net = lnet_get_net_locked(LNET_NIDNET(nid)); + lpni->lpni_net = net; + if (net) { + lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits; + lpni->lpni_mintxcredits = lpni->lpni_txcredits; + lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net); + lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits; + } else { + /* + * This peer_ni is not on a local network, so we + * cannot add the credits here. In case the net is + * added later, add the peer_ni to the remote peer ni + * list so it can be easily found and revisited. + */ + /* FIXME: per-net implementation instead? */ + atomic_inc(&lpni->lpni_refcount); + list_add_tail(&lpni->lpni_on_remote_peer_ni_list, + &the_lnet.ln_remote_peer_ni_list); } + /* TODO: update flags */ + + return lpni; +} + +static struct lnet_peer_net * +lnet_peer_net_alloc(__u32 net_id) +{ + struct lnet_peer_net *lpn; + + LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn)); + if (!lpn) + return NULL; + + INIT_LIST_HEAD(&lpn->lpn_on_peer_list); + INIT_LIST_HEAD(&lpn->lpn_peer_nis); + lpn->lpn_net_id = net_id; + + return lpn; +} + +static struct lnet_peer * +lnet_peer_alloc(lnet_nid_t nid) +{ + struct lnet_peer *lp; + + LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp)); + if (!lp) + return NULL; + + INIT_LIST_HEAD(&lp->lp_on_lnet_peer_list); + INIT_LIST_HEAD(&lp->lp_peer_nets); + lp->lp_primary_nid = nid; + + /* TODO: update flags */ + + return lp; +} + + +static void +lnet_try_destroy_peer_hierarchy_locked(struct lnet_peer_ni *lpni) +{ + struct lnet_peer_net *peer_net; + struct lnet_peer *peer; + + /* TODO: could the below situation happen? accessing an already + * destroyed peer? */ + if (lpni->lpni_peer_net == NULL || + lpni->lpni_peer_net->lpn_peer == NULL) + return; + + peer_net = lpni->lpni_peer_net; + peer = lpni->lpni_peer_net->lpn_peer; + + list_del_init(&lpni->lpni_on_peer_net_list); + lpni->lpni_peer_net = NULL; + + /* if peer_net is empty, then remove it from the peer */ + if (list_empty(&peer_net->lpn_peer_nis)) { + list_del_init(&peer_net->lpn_on_peer_list); + peer_net->lpn_peer = NULL; + LIBCFS_FREE(peer_net, sizeof(*peer_net)); + + /* if the peer is empty then remove it from the + * the_lnet.ln_peers */ + if (list_empty(&peer->lp_peer_nets)) { + list_del_init(&peer->lp_on_lnet_peer_list); + LIBCFS_FREE(peer, sizeof(*peer)); + } + } +} + +/* called with lnet_net_lock LNET_LOCK_EX held */ +static void +lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni) +{ + struct lnet_peer_table *ptable = NULL; + + lnet_peer_remove_from_remote_list(lpni); + + /* remove peer ni from the hash list. */ + list_del_init(&lpni->lpni_hashlist); + + /* decrement the ref count on the peer table */ + ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt]; + LASSERT(atomic_read(&ptable->pt_number) > 0); + atomic_dec(&ptable->pt_number); + + /* + * The peer_ni can no longer be found with a lookup. But there + * can be current users, so keep track of it on the zombie + * list until the reference count has gone to zero. + * + * The last reference may be lost in a place where the + * lnet_net_lock locks only a single cpt, and that cpt may not + * be lpni->lpni_cpt. So the zombie list of this peer_table + * has its own lock. + */ + spin_lock(&ptable->pt_zombie_lock); + list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list); + ptable->pt_zombies++; + spin_unlock(&ptable->pt_zombie_lock); + + /* no need to keep this peer on the hierarchy anymore */ + lnet_try_destroy_peer_hierarchy_locked(lpni); + + /* decrement reference on peer */ + lnet_peer_ni_decref_locked(lpni); +} + +void lnet_peer_uninit() +{ + struct lnet_peer_ni *lpni, *tmp; + + lnet_net_lock(LNET_LOCK_EX); + + /* remove all peer_nis from the remote peer and the hash list */ + list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list, + lpni_on_remote_peer_ni_list) + lnet_peer_ni_del_locked(lpni); + lnet_peer_tables_destroy(); + + lnet_net_unlock(LNET_LOCK_EX); +} + +static void +lnet_peer_del_locked(struct lnet_peer *peer) +{ + struct lnet_peer_ni *lpni = NULL, *lpni2; + + lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni); + while (lpni != NULL) { + lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni); + lnet_peer_ni_del_locked(lpni); + lpni = lpni2; + } } static void -lnet_peer_table_cleanup_locked(lnet_ni_t *ni, struct lnet_peer_table *ptable) +lnet_peer_table_cleanup_locked(struct lnet_net *net, + struct lnet_peer_table *ptable) { int i; - struct lnet_peer_ni *lp; + struct lnet_peer_ni *lpni; struct lnet_peer_ni *tmp; + struct lnet_peer *peer; for (i = 0; i < LNET_PEER_HASH_SIZE; i++) { - list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i], + list_for_each_entry_safe(lpni, tmp, &ptable->pt_hash[i], lpni_hashlist) { - if (ni != NULL && ni->ni_net != lp->lpni_net) + if (net != NULL && net != lpni->lpni_net) continue; - list_del_init(&lp->lpni_hashlist); - /* Lose hash table's ref */ - ptable->pt_zombies++; - lnet_peer_ni_decref_locked(lp); + + /* + * check if by removing this peer ni we should be + * removing the entire peer. + */ + peer = lpni->lpni_peer_net->lpn_peer; + + if (peer->lp_primary_nid == lpni->lpni_nid) + lnet_peer_del_locked(peer); + else + lnet_peer_ni_del_locked(lpni); } } } static void -lnet_peer_table_finalize_wait_locked(struct lnet_peer_table *ptable, - int cpt_locked) +lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable) { - int i; + int i = 3; - for (i = 3; ptable->pt_zombies != 0; i++) { - lnet_net_unlock(cpt_locked); + spin_lock(&ptable->pt_zombie_lock); + while (ptable->pt_zombies) { + spin_unlock(&ptable->pt_zombie_lock); if (IS_PO2(i)) { CDEBUG(D_WARNING, @@ -186,13 +364,14 @@ lnet_peer_table_finalize_wait_locked(struct lnet_peer_table *ptable, } set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(cfs_time_seconds(1) >> 1); - lnet_net_lock(cpt_locked); + spin_lock(&ptable->pt_zombie_lock); } + spin_unlock(&ptable->pt_zombie_lock); } static void -lnet_peer_table_del_rtrs_locked(lnet_ni_t *ni, struct lnet_peer_table *ptable, - int cpt_locked) +lnet_peer_table_del_rtrs_locked(struct lnet_net *net, + struct lnet_peer_table *ptable) { struct lnet_peer_ni *lp; struct lnet_peer_ni *tmp; @@ -202,7 +381,7 @@ lnet_peer_table_del_rtrs_locked(lnet_ni_t *ni, struct lnet_peer_table *ptable, for (i = 0; i < LNET_PEER_HASH_SIZE; i++) { list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i], lpni_hashlist) { - if (ni->ni_net != lp->lpni_net) + if (net != lp->lpni_net) continue; if (lp->lpni_rtr_refcount == 0) @@ -210,41 +389,37 @@ lnet_peer_table_del_rtrs_locked(lnet_ni_t *ni, struct lnet_peer_table *ptable, lpni_nid = lp->lpni_nid; - lnet_net_unlock(cpt_locked); + lnet_net_unlock(LNET_LOCK_EX); lnet_del_route(LNET_NIDNET(LNET_NID_ANY), lpni_nid); - lnet_net_lock(cpt_locked); + lnet_net_lock(LNET_LOCK_EX); } } } void -lnet_peer_tables_cleanup(lnet_ni_t *ni) +lnet_peer_tables_cleanup(struct lnet_net *net) { int i; struct lnet_peer_table *ptable; - LASSERT(the_lnet.ln_shutdown || ni != NULL); + LASSERT(the_lnet.ln_shutdown || net != NULL); /* If just deleting the peers for a NI, get rid of any routes these * peers are gateways for. */ cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) { lnet_net_lock(LNET_LOCK_EX); - lnet_peer_table_del_rtrs_locked(ni, ptable, i); + lnet_peer_table_del_rtrs_locked(net, ptable); lnet_net_unlock(LNET_LOCK_EX); } /* Start the cleanup process */ cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) { lnet_net_lock(LNET_LOCK_EX); - lnet_peer_table_cleanup_locked(ni, ptable); + lnet_peer_table_cleanup_locked(net, ptable); lnet_net_unlock(LNET_LOCK_EX); } - /* Wait until all peers have been destroyed. */ - cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) { - lnet_net_lock(LNET_LOCK_EX); - lnet_peer_table_finalize_wait_locked(ptable, i); - lnet_net_unlock(LNET_LOCK_EX); - } + cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) + lnet_peer_ni_finalize_wait(ptable); } static struct lnet_peer_ni * @@ -281,23 +456,23 @@ lnet_find_peer_ni_locked(lnet_nid_t nid) return lpni; } -int -lnet_find_or_create_peer_locked(lnet_nid_t dst_nid, int cpt, struct lnet_peer **peer) +struct lnet_peer * +lnet_find_or_create_peer_locked(lnet_nid_t dst_nid, int cpt) { struct lnet_peer_ni *lpni; + struct lnet_peer *lp; lpni = lnet_find_peer_ni_locked(dst_nid); if (!lpni) { - int rc; - rc = lnet_nid2peerni_locked(&lpni, dst_nid, cpt); - if (rc != 0) - return rc; + lpni = lnet_nid2peerni_locked(dst_nid, cpt); + if (IS_ERR(lpni)) + return ERR_CAST(lpni); } - *peer = lpni->lpni_peer_net->lpn_peer; + lp = lpni->lpni_peer_net->lpn_peer; lnet_peer_ni_decref_locked(lpni); - return 0; + return lp; } struct lnet_peer_ni * @@ -404,268 +579,318 @@ lnet_peer_primary_nid(lnet_nid_t nid) return primary_nid; } -static void -lnet_try_destroy_peer_hierarchy_locked(struct lnet_peer_ni *lpni) +struct lnet_peer_net * +lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id) { struct lnet_peer_net *peer_net; - struct lnet_peer *peer; + list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_on_peer_list) { + if (peer_net->lpn_net_id == net_id) + return peer_net; + } + return NULL; +} - /* TODO: could the below situation happen? accessing an already - * destroyed peer? */ - if (lpni->lpni_peer_net == NULL || - lpni->lpni_peer_net->lpn_peer == NULL) - return; +static int +lnet_peer_setup_hierarchy(struct lnet_peer *lp, struct lnet_peer_ni *lpni, + lnet_nid_t nid) +{ + struct lnet_peer_net *lpn = NULL; + struct lnet_peer_table *ptable; + __u32 net_id = LNET_NIDNET(nid); - peer_net = lpni->lpni_peer_net; - peer = lpni->lpni_peer_net->lpn_peer; + /* + * Create the peer_ni, peer_net, and peer if they don't exist + * yet. + */ + if (lp) { + lpn = lnet_peer_get_net_locked(lp, net_id); + } else { + lp = lnet_peer_alloc(nid); + if (!lp) + goto out_enomem; + } - list_del_init(&lpni->lpni_on_peer_net_list); - lpni->lpni_peer_net = NULL; + if (!lpn) { + lpn = lnet_peer_net_alloc(net_id); + if (!lpn) + goto out_maybe_free_lp; + } - /* if peer_net is empty, then remove it from the peer */ - if (list_empty(&peer_net->lpn_peer_nis)) { - list_del_init(&peer_net->lpn_on_peer_list); - peer_net->lpn_peer = NULL; - LIBCFS_FREE(peer_net, sizeof(*peer_net)); + if (!lpni) { + lpni = lnet_peer_ni_alloc(nid); + if (!lpni) + goto out_maybe_free_lpn; + } - /* if the peer is empty then remove it from the - * the_lnet.ln_peers */ - if (list_empty(&peer->lp_peer_nets)) { - list_del_init(&peer->lp_on_lnet_peer_list); - LIBCFS_FREE(peer, sizeof(*peer)); - } + /* Install the new peer_ni */ + lnet_net_lock(LNET_LOCK_EX); + /* Add peer_ni to global peer table hash, if necessary. */ + if (list_empty(&lpni->lpni_hashlist)) { + ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt]; + list_add_tail(&lpni->lpni_hashlist, + &ptable->pt_hash[lnet_nid2peerhash(nid)]); + ptable->pt_version++; + atomic_inc(&ptable->pt_number); + atomic_inc(&lpni->lpni_refcount); + } + + /* Detach the peer_ni from an existing peer, if necessary. */ + if (lpni->lpni_peer_net && lpni->lpni_peer_net->lpn_peer != lp) + lnet_try_destroy_peer_hierarchy_locked(lpni); + + /* Add peer_ni to peer_net */ + lpni->lpni_peer_net = lpn; + list_add_tail(&lpni->lpni_on_peer_net_list, &lpn->lpn_peer_nis); + + /* Add peer_net to peer */ + if (!lpn->lpn_peer) { + lpn->lpn_peer = lp; + list_add_tail(&lpn->lpn_on_peer_list, &lp->lp_peer_nets); } + + /* Add peer to global peer list */ + if (list_empty(&lp->lp_on_lnet_peer_list)) + list_add_tail(&lp->lp_on_lnet_peer_list, &the_lnet.ln_peers); + lnet_net_unlock(LNET_LOCK_EX); + + return 0; + +out_maybe_free_lpn: + if (list_empty(&lpn->lpn_on_peer_list)) + LIBCFS_FREE(lpn, sizeof(*lpn)); +out_maybe_free_lp: + if (list_empty(&lp->lp_on_lnet_peer_list)) + LIBCFS_FREE(lp, sizeof(*lp)); +out_enomem: + return -ENOMEM; } static int -lnet_build_peer_hierarchy(struct lnet_peer_ni *lpni) +lnet_add_prim_lpni(lnet_nid_t nid) { + int rc; struct lnet_peer *peer; - struct lnet_peer_net *peer_net; - __u32 lpni_net = LNET_NIDNET(lpni->lpni_nid); - - peer = NULL; - peer_net = NULL; + struct lnet_peer_ni *lpni; - LIBCFS_ALLOC(peer, sizeof(*peer)); - if (peer == NULL) - return -ENOMEM; + LASSERT(nid != LNET_NID_ANY); - LIBCFS_ALLOC(peer_net, sizeof(*peer_net)); - if (peer_net == NULL) { - LIBCFS_FREE(peer, sizeof(*peer)); - return -ENOMEM; + /* + * lookup the NID and its peer + * if the peer doesn't exist, create it. + * if this is a non-MR peer then change its state to MR and exit. + * if this is an MR peer and it's a primary NI: NO-OP. + * if this is an MR peer and it's not a primary NI. Operation not + * allowed. + * + * The adding and deleting of peer nis is being serialized through + * the api_mutex. So we can look up peers with the mutex locked + * safely. Only when we need to change the ptable, do we need to + * exclusively lock the lnet_net_lock() + */ + lpni = lnet_find_peer_ni_locked(nid); + if (!lpni) { + rc = lnet_peer_setup_hierarchy(NULL, NULL, nid); + if (rc != 0) + return rc; + lpni = lnet_find_peer_ni_locked(nid); } - INIT_LIST_HEAD(&peer->lp_on_lnet_peer_list); - INIT_LIST_HEAD(&peer->lp_peer_nets); - INIT_LIST_HEAD(&peer_net->lpn_on_peer_list); - INIT_LIST_HEAD(&peer_net->lpn_peer_nis); + LASSERT(lpni); - /* build the hierarchy */ - peer_net->lpn_net_id = lpni_net; - peer_net->lpn_peer = peer; - lpni->lpni_peer_net = peer_net; - peer->lp_primary_nid = lpni->lpni_nid; - peer->lp_multi_rail = false; - list_add_tail(&peer_net->lpn_on_peer_list, &peer->lp_peer_nets); - list_add_tail(&lpni->lpni_on_peer_net_list, &peer_net->lpn_peer_nis); - list_add_tail(&peer->lp_on_lnet_peer_list, &the_lnet.ln_peers); + lnet_peer_ni_decref_locked(lpni); - return 0; -} + peer = lpni->lpni_peer_net->lpn_peer; -struct lnet_peer_net * -lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id) -{ - struct lnet_peer_net *peer_net; - list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_on_peer_list) { - if (peer_net->lpn_net_id == net_id) - return peer_net; - } - return NULL; + /* + * If we found a lpni with the same nid as the NID we're trying to + * create, then we're trying to create an already existing lpni + * that belongs to a different peer + */ + if (peer->lp_primary_nid != nid) + return -EEXIST; + + /* + * if we found an lpni that is not a multi-rail, which could occur + * if lpni is already created as a non-mr lpni or we just created + * it, then make sure you indicate that this lpni is a primary mr + * capable peer. + * + * TODO: update flags if necessary + */ + if (!peer->lp_multi_rail && peer->lp_primary_nid == nid) + peer->lp_multi_rail = true; + + return rc; } -/* - * given the key nid find the peer to add the new peer NID to. If the key - * nid is NULL, then create a new peer, but first make sure that the NID - * is unique - */ -int -lnet_add_peer_ni_to_peer(lnet_nid_t key_nid, lnet_nid_t nid, bool mr) +static int +lnet_add_peer_ni_to_prim_lpni(lnet_nid_t key_nid, lnet_nid_t nid) { - struct lnet_peer_ni *lpni, *lpni2; - struct lnet_peer *peer; - struct lnet_peer_net *peer_net, *pn; - int cpt, cpt2, rc; - struct lnet_peer_table *ptable = NULL; - __u32 net_id = LNET_NIDNET(nid); + struct lnet_peer *peer, *primary_peer; + struct lnet_peer_ni *lpni = NULL, *klpni = NULL; - if (nid == LNET_NID_ANY) - return -EINVAL; + LASSERT(key_nid != LNET_NID_ANY && nid != LNET_NID_ANY); + + /* + * key nid must be created by this point. If not then this + * operation is not permitted + */ + klpni = lnet_find_peer_ni_locked(key_nid); + if (!klpni) + return -ENOENT; + + lnet_peer_ni_decref_locked(klpni); + + primary_peer = klpni->lpni_peer_net->lpn_peer; - /* check that nid is unique */ - cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER); - lnet_net_lock(cpt); lpni = lnet_find_peer_ni_locked(nid); - if (lpni != NULL) { + if (lpni) { lnet_peer_ni_decref_locked(lpni); - lnet_net_unlock(cpt); - return -EEXIST; - } - lnet_net_unlock(cpt); - if (key_nid != LNET_NID_ANY) { - cpt2 = lnet_nid_cpt_hash(key_nid, LNET_CPT_NUMBER); - lnet_net_lock(cpt2); - lpni = lnet_find_peer_ni_locked(key_nid); - if (lpni == NULL) { - lnet_net_unlock(cpt2); - /* key_nid refers to a non-existant peer_ni.*/ - return -EINVAL; - } peer = lpni->lpni_peer_net->lpn_peer; - peer->lp_multi_rail = mr; - lnet_peer_ni_decref_locked(lpni); - lnet_net_unlock(cpt2); - } else { - lnet_net_lock(LNET_LOCK_EX); - rc = lnet_nid2peerni_locked(&lpni, nid, LNET_LOCK_EX); - if (rc == 0) { - lpni->lpni_peer_net->lpn_peer->lp_multi_rail = mr; - lnet_peer_ni_decref_locked(lpni); + /* + * lpni already exists in the system but it belongs to + * a different peer. We can't re-added it + */ + if (peer->lp_primary_nid != key_nid && peer->lp_multi_rail) { + CERROR("Cannot add NID %s owned by peer %s to peer %s\n", + libcfs_nid2str(lpni->lpni_nid), + libcfs_nid2str(peer->lp_primary_nid), + libcfs_nid2str(key_nid)); + return -EEXIST; + } else if (peer->lp_primary_nid == key_nid) { + /* + * found a peer_ni that is already part of the + * peer. This is a no-op operation. + */ + return 0; } - lnet_net_unlock(LNET_LOCK_EX); - return rc; - } - - lpni = NULL; - LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni)); - if (lpni == NULL) - return -ENOMEM; - - INIT_LIST_HEAD(&lpni->lpni_txq); - INIT_LIST_HEAD(&lpni->lpni_rtrq); - INIT_LIST_HEAD(&lpni->lpni_routes); - INIT_LIST_HEAD(&lpni->lpni_hashlist); - INIT_LIST_HEAD(&lpni->lpni_on_peer_net_list); - INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list); + /* + * TODO: else if (peer->lp_primary_nid != key_nid && + * !peer->lp_multi_rail) + * peer is not an MR peer and it will be moved in the next + * step to klpni, so update its flags accordingly. + * lnet_move_peer_ni() + */ - lpni->lpni_alive = !lnet_peers_start_down(); /* 1 bit!! */ - lpni->lpni_last_alive = cfs_time_current(); /* assumes alive */ - lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL; - lpni->lpni_nid = nid; - lpni->lpni_cpt = cpt; - lnet_set_peer_ni_health_locked(lpni, true); + /* + * TODO: call lnet_update_peer() from here to update the + * flags. This is the case when the lpni you're trying to + * add is already part of the peer. This could've been + * added by the DD previously, so go ahead and do any + * updates to the state if necessary + */ - /* allocate here in case we need to add a new peer_net */ - peer_net = NULL; - LIBCFS_ALLOC(peer_net, sizeof(*peer_net)); - if (peer_net == NULL) { - rc = -ENOMEM; - if (lpni != NULL) - LIBCFS_FREE(lpni, sizeof(*lpni)); - return rc; } - lnet_net_lock(LNET_LOCK_EX); + /* + * When we get here we either have found an existing lpni, which + * we can switch to the new peer. Or we need to create one and + * add it to the new peer + */ + return lnet_peer_setup_hierarchy(primary_peer, lpni, nid); +} - ptable = the_lnet.ln_peer_tables[cpt]; - ptable->pt_number++; - - lpni2 = lnet_find_peer_ni_locked(nid); - if (lpni2 != NULL) { - lnet_peer_ni_decref_locked(lpni2); - /* sanity check that lpni2's peer is what we expect */ - if (lpni2->lpni_peer_net->lpn_peer != peer) - rc = -EEXIST; - else - rc = -EINVAL; - - ptable->pt_number--; - /* another thread has already added it */ - lnet_net_unlock(LNET_LOCK_EX); - LIBCFS_FREE(peer_net, sizeof(*peer_net)); - return rc; - } +/* + * lpni creation initiated due to traffic either sending or receiving. + */ +static int +lnet_peer_ni_traffic_add(lnet_nid_t nid) +{ + struct lnet_peer_ni *lpni; + int rc = 0; - lpni->lpni_net = lnet_get_net_locked(LNET_NIDNET(lpni->lpni_nid)); - if (lpni->lpni_net != NULL) { - lpni->lpni_txcredits = - lpni->lpni_mintxcredits = - lpni->lpni_net->net_tunables.lct_peer_tx_credits; - lpni->lpni_rtrcredits = - lpni->lpni_minrtrcredits = lnet_peer_buffer_credits(lpni->lpni_net); - } else { + if (nid == LNET_NID_ANY) + return -EINVAL; + + /* lnet_net_lock is not needed here because ln_api_lock is held */ + lpni = lnet_find_peer_ni_locked(nid); + if (lpni) { /* - * if you're adding a peer which is not on a local network - * then we can't assign any of the credits. It won't be - * picked for sending anyway. Eventually a network can be - * added, in this case we need to revisit this peer and - * update its credits. + * TODO: lnet_update_primary_nid() but not all of it + * only indicate if we're converting this to MR capable + * Can happen due to DD */ - - /* increment refcount for remote peer list */ - atomic_inc(&lpni->lpni_refcount); - list_add_tail(&lpni->lpni_on_remote_peer_ni_list, - &the_lnet.ln_remote_peer_ni_list); + lnet_peer_ni_decref_locked(lpni); + } else { + rc = lnet_peer_setup_hierarchy(NULL, NULL, nid); } - /* increment refcount for peer on hash list */ - atomic_inc(&lpni->lpni_refcount); + return rc; - list_add_tail(&lpni->lpni_hashlist, - &ptable->pt_hash[lnet_nid2peerhash(nid)]); - ptable->pt_version++; +} - /* add the lpni to a net */ - list_for_each_entry(pn, &peer->lp_peer_nets, lpn_on_peer_list) { - if (pn->lpn_net_id == net_id) { - list_add_tail(&lpni->lpni_on_peer_net_list, - &pn->lpn_peer_nis); - lpni->lpni_peer_net = pn; - lnet_net_unlock(LNET_LOCK_EX); - LIBCFS_FREE(peer_net, sizeof(*peer_net)); - return 0; - } +static int +lnet_peer_ni_add_non_mr(lnet_nid_t nid) +{ + struct lnet_peer_ni *lpni; + + lpni = lnet_find_peer_ni_locked(nid); + if (lpni) { + CERROR("Cannot add %s as non-mr when it already exists\n", + libcfs_nid2str(nid)); + lnet_peer_ni_decref_locked(lpni); + return -EEXIST; } - INIT_LIST_HEAD(&peer_net->lpn_on_peer_list); - INIT_LIST_HEAD(&peer_net->lpn_peer_nis); + return lnet_peer_setup_hierarchy(NULL, NULL, nid); +} - /* build the hierarchy */ - peer_net->lpn_net_id = net_id; - peer_net->lpn_peer = peer; - lpni->lpni_peer_net = peer_net; - list_add_tail(&lpni->lpni_on_peer_net_list, &peer_net->lpn_peer_nis); - list_add_tail(&peer_net->lpn_on_peer_list, &peer->lp_peer_nets); +/* + * This API handles the following combinations: + * Create a primary NI if only the key_nid is provided + * Create or add an lpni to a primary NI. Primary NI must've already + * been created + * Create a non-MR peer. + */ +int +lnet_add_peer_ni_to_peer(lnet_nid_t key_nid, lnet_nid_t nid, bool mr) +{ + /* + * Caller trying to setup an MR like peer hierarchy but + * specifying it to be non-MR. This is not allowed. + */ + if (key_nid != LNET_NID_ANY && + nid != LNET_NID_ANY && !mr) + return -EPERM; + + /* Add the primary NID of a peer */ + if (key_nid != LNET_NID_ANY && + nid == LNET_NID_ANY && mr) + return lnet_add_prim_lpni(key_nid); + + /* Add a NID to an existing peer */ + if (key_nid != LNET_NID_ANY && + nid != LNET_NID_ANY && mr) + return lnet_add_peer_ni_to_prim_lpni(key_nid, nid); + + /* Add a non-MR peer NI */ + if (((key_nid != LNET_NID_ANY && + nid == LNET_NID_ANY) || + (key_nid == LNET_NID_ANY && + nid != LNET_NID_ANY)) && !mr) + return lnet_peer_ni_add_non_mr(key_nid != LNET_NID_ANY ? + key_nid : nid); - lnet_net_unlock(LNET_LOCK_EX); return 0; } int lnet_del_peer_ni_from_peer(lnet_nid_t key_nid, lnet_nid_t nid) { - int cpt; lnet_nid_t local_nid; struct lnet_peer *peer; - struct lnet_peer_ni *lpni, *lpni2; - struct lnet_peer_table *ptable = NULL; + struct lnet_peer_ni *lpni; if (key_nid == LNET_NID_ANY) return -EINVAL; local_nid = (nid != LNET_NID_ANY) ? nid : key_nid; - cpt = lnet_nid_cpt_hash(local_nid, LNET_CPT_NUMBER); - lnet_net_lock(LNET_LOCK_EX); lpni = lnet_find_peer_ni_locked(local_nid); - if (lpni == NULL) { - lnet_net_unlock(cpt); + if (!lpni) return -EINVAL; - } lnet_peer_ni_decref_locked(lpni); peer = lpni->lpni_peer_net->lpn_peer; @@ -676,30 +901,15 @@ lnet_del_peer_ni_from_peer(lnet_nid_t key_nid, lnet_nid_t nid) * deleting the primary ni is equivalent to deleting the * entire peer */ - lpni = NULL; - lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni); - while (lpni != NULL) { - lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni); - cpt = lnet_nid_cpt_hash(lpni->lpni_nid, - LNET_CPT_NUMBER); - lnet_peer_remove_from_remote_list(lpni); - ptable = the_lnet.ln_peer_tables[cpt]; - ptable->pt_zombies++; - list_del_init(&lpni->lpni_hashlist); - lnet_peer_ni_decref_locked(lpni); - lpni = lpni2; - } + lnet_net_lock(LNET_LOCK_EX); + lnet_peer_del_locked(peer); lnet_net_unlock(LNET_LOCK_EX); return 0; } - lnet_peer_remove_from_remote_list(lpni); - cpt = lnet_nid_cpt_hash(lpni->lpni_nid, LNET_CPT_NUMBER); - ptable = the_lnet.ln_peer_tables[cpt]; - ptable->pt_zombies++; - list_del_init(&lpni->lpni_hashlist); - lnet_peer_ni_decref_locked(lpni); + lnet_net_lock(LNET_LOCK_EX); + lnet_peer_ni_del_locked(lpni); lnet_net_unlock(LNET_LOCK_EX); return 0; @@ -713,160 +923,70 @@ lnet_destroy_peer_ni_locked(struct lnet_peer_ni *lpni) LASSERT(atomic_read(&lpni->lpni_refcount) == 0); LASSERT(lpni->lpni_rtr_refcount == 0); LASSERT(list_empty(&lpni->lpni_txq)); - LASSERT(list_empty(&lpni->lpni_hashlist)); LASSERT(lpni->lpni_txqnob == 0); - LASSERT(lpni->lpni_peer_net != NULL); - LASSERT(lpni->lpni_peer_net->lpn_peer != NULL); - - ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt]; - LASSERT(ptable->pt_number > 0); - ptable->pt_number--; lpni->lpni_net = NULL; - lnet_try_destroy_peer_hierarchy_locked(lpni); + /* remove the peer ni from the zombie list */ + ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt]; + spin_lock(&ptable->pt_zombie_lock); + list_del_init(&lpni->lpni_hashlist); + ptable->pt_zombies--; + spin_unlock(&ptable->pt_zombie_lock); LIBCFS_FREE(lpni, sizeof(*lpni)); - - LASSERT(ptable->pt_zombies > 0); - ptable->pt_zombies--; } -int -lnet_nid2peerni_locked(struct lnet_peer_ni **lpnip, lnet_nid_t nid, int cpt) +struct lnet_peer_ni * +lnet_nid2peerni_locked(lnet_nid_t nid, int cpt) { struct lnet_peer_table *ptable; struct lnet_peer_ni *lpni = NULL; - struct lnet_peer_ni *lpni2; int cpt2; - int rc = 0; + int rc; - *lpnip = NULL; if (the_lnet.ln_shutdown) /* it's shutting down */ - return -ESHUTDOWN; + return ERR_PTR(-ESHUTDOWN); /* * calculate cpt2 with the standard hash function - * This cpt2 becomes the slot where we'll find or create the peer. + * This cpt2 is the slot where we'll find or create the peer. */ cpt2 = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER); - - /* - * Any changes to the peer tables happen under exclusive write - * lock. Any reads to the peer tables can be done via a standard - * CPT read lock. - */ - if (cpt != LNET_LOCK_EX) { - lnet_net_unlock(cpt); - lnet_net_lock(LNET_LOCK_EX); - } - ptable = the_lnet.ln_peer_tables[cpt2]; lpni = lnet_get_peer_ni_locked(ptable, nid); - if (lpni != NULL) { - *lpnip = lpni; - if (cpt != LNET_LOCK_EX) { - lnet_net_unlock(LNET_LOCK_EX); - lnet_net_lock(cpt); - } - return 0; - } + if (lpni) + return lpni; + /* Slow path: serialized using the ln_api_mutex. */ + lnet_net_unlock(cpt); + mutex_lock(&the_lnet.ln_api_mutex); /* - * take extra refcount in case another thread has shutdown LNet - * and destroyed locks and peer-table before I finish the allocation + * Shutdown is only set under the ln_api_lock, so a single + * check here is sufficent. + * + * lnet_add_nid_to_peer() also handles the case where we've + * raced and a different thread added the NID. */ - ptable->pt_number++; - lnet_net_unlock(LNET_LOCK_EX); - - LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt2, sizeof(*lpni)); - - if (lpni == NULL) { - rc = -ENOMEM; - lnet_net_lock(cpt); - goto out; - } - - INIT_LIST_HEAD(&lpni->lpni_txq); - INIT_LIST_HEAD(&lpni->lpni_rtrq); - INIT_LIST_HEAD(&lpni->lpni_routes); - INIT_LIST_HEAD(&lpni->lpni_hashlist); - INIT_LIST_HEAD(&lpni->lpni_on_peer_net_list); - INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list); - - lpni->lpni_alive = !lnet_peers_start_down(); /* 1 bit!! */ - lpni->lpni_last_alive = cfs_time_current(); /* assumes alive */ - lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL; - lpni->lpni_nid = nid; - lpni->lpni_cpt = cpt2; - atomic_set(&lpni->lpni_refcount, 2); /* 1 for caller; 1 for hash */ - - rc = lnet_build_peer_hierarchy(lpni); - if (rc != 0) - goto out; - - lnet_net_lock(LNET_LOCK_EX); - if (the_lnet.ln_shutdown) { - rc = -ESHUTDOWN; - goto out; + lpni = ERR_PTR(-ESHUTDOWN); + goto out_mutex_unlock; } - lpni2 = lnet_get_peer_ni_locked(ptable, nid); - if (lpni2 != NULL) { - *lpnip = lpni2; - goto out; + rc = lnet_peer_ni_traffic_add(nid); + if (rc) { + lpni = ERR_PTR(rc); + goto out_mutex_unlock; } - lpni->lpni_net = lnet_get_net_locked(LNET_NIDNET(lpni->lpni_nid)); - if (lpni->lpni_net) { - lpni->lpni_txcredits = - lpni->lpni_mintxcredits = - lpni->lpni_net->net_tunables.lct_peer_tx_credits; - lpni->lpni_rtrcredits = - lpni->lpni_minrtrcredits = - lnet_peer_buffer_credits(lpni->lpni_net); - } else { - /* - * if you're adding a peer which is not on a local network - * then we can't assign any of the credits. It won't be - * picked for sending anyway. Eventually a network can be - * added, in this case we need to revisit this peer and - * update its credits. - */ - - CDEBUG(D_NET, "peer_ni %s is not directly connected\n", - libcfs_nid2str(nid)); - /* increment refcount for remote peer list */ - atomic_inc(&lpni->lpni_refcount); - list_add_tail(&lpni->lpni_on_remote_peer_ni_list, - &the_lnet.ln_remote_peer_ni_list); - } - - lnet_set_peer_ni_health_locked(lpni, true); - - list_add_tail(&lpni->lpni_hashlist, - &ptable->pt_hash[lnet_nid2peerhash(nid)]); - ptable->pt_version++; - *lpnip = lpni; + lpni = lnet_get_peer_ni_locked(ptable, nid); + LASSERT(lpni); - if (cpt != LNET_LOCK_EX) { - lnet_net_unlock(LNET_LOCK_EX); - lnet_net_lock(cpt); - } +out_mutex_unlock: + mutex_unlock(&the_lnet.ln_api_mutex); + lnet_net_lock(cpt); - return 0; -out: - if (lpni != NULL) { - lnet_try_destroy_peer_hierarchy_locked(lpni); - LIBCFS_FREE(lpni, sizeof(*lpni)); - } - ptable->pt_number--; - if (cpt != LNET_LOCK_EX) { - lnet_net_unlock(LNET_LOCK_EX); - lnet_net_lock(cpt); - } - return rc; + return lpni; } void @@ -874,14 +994,13 @@ lnet_debug_peer(lnet_nid_t nid) { char *aliveness = "NA"; struct lnet_peer_ni *lp; - int rc; int cpt; cpt = lnet_cpt_of_nid(nid, NULL); lnet_net_lock(cpt); - rc = lnet_nid2peerni_locked(&lp, nid, cpt); - if (rc != 0) { + lp = lnet_nid2peerni_locked(nid, cpt); + if (IS_ERR(lp)) { lnet_net_unlock(cpt); CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid)); return; @@ -965,7 +1084,7 @@ int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid, } int lnet_get_peer_info(__u32 idx, lnet_nid_t *primary_nid, lnet_nid_t *nid, - struct lnet_peer_ni_credit_info *peer_ni_info, + bool *mr, struct lnet_peer_ni_credit_info *peer_ni_info, struct lnet_ioctl_element_stats *peer_ni_stats) { struct lnet_peer_ni *lpni = NULL; @@ -978,6 +1097,7 @@ int lnet_get_peer_info(__u32 idx, lnet_nid_t *primary_nid, lnet_nid_t *nid, return -ENOENT; *primary_nid = lp->lp_primary_nid; + *mr = lp->lp_multi_rail; *nid = lpni->lpni_nid; snprintf(peer_ni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA"); if (lnet_isrouter(lpni) || diff --git a/lnet/lnet/router.c b/lnet/lnet/router.c index ad97f6c..3fdaa21 100644 --- a/lnet/lnet/router.c +++ b/lnet/lnet/router.c @@ -302,6 +302,7 @@ lnet_add_route(__u32 net, __u32 hops, lnet_nid_t gateway, lnet_remotenet_t *rnet2; lnet_route_t *route; lnet_ni_t *ni; + struct lnet_peer_ni *lpni; int add_route; int rc; @@ -340,13 +341,14 @@ lnet_add_route(__u32 net, __u32 hops, lnet_nid_t gateway, lnet_net_lock(LNET_LOCK_EX); - rc = lnet_nid2peerni_locked(&route->lr_gateway, gateway, LNET_LOCK_EX); - if (rc != 0) { + lpni = lnet_nid2peerni_locked(gateway, LNET_LOCK_EX); + if (IS_ERR(lpni)) { lnet_net_unlock(LNET_LOCK_EX); LIBCFS_FREE(route, sizeof(*route)); LIBCFS_FREE(rnet, sizeof(*rnet)); + rc = PTR_ERR(lpni); if (rc == -EHOSTUNREACH) /* gateway is not on a local net. */ return rc; /* ignore the route entry */ CERROR("Error %d creating route %s %d %s\n", rc, @@ -354,7 +356,7 @@ lnet_add_route(__u32 net, __u32 hops, lnet_nid_t gateway, libcfs_nid2str(gateway)); return rc; } - + route->lr_gateway = lpni; LASSERT(!the_lnet.ln_shutdown); rnet2 = lnet_find_rnet_locked(net); diff --git a/lnet/utils/lnetconfig/liblnetconfig.c b/lnet/utils/lnetconfig/liblnetconfig.c index 917ca44..a0ade5f 100644 --- a/lnet/utils/lnetconfig/liblnetconfig.c +++ b/lnet/utils/lnetconfig/liblnetconfig.c @@ -169,6 +169,90 @@ void lustre_lnet_init_nw_descr(struct lnet_dlc_network_descr *nw_descr) } } +int lustre_lnet_parse_nids(char *nids, char **array, int size, + char ***out_array) +{ + int num_nids = 0; + char *comma = nids, *cur, *entry; + char **new_array; + int i, len, start = 0, finish = 0; + + if (nids == NULL || strlen(nids) == 0) + return size; + + /* count the number or new nids, by counting the number of commas */ + while (comma) { + comma = strchr(comma, ','); + if (comma) { + comma++; + num_nids++; + } else { + num_nids++; + } + } + + /* + * if the array is not NULL allocate a large enough array to house + * the old and new entries + */ + new_array = calloc(sizeof(char*), + (size > 0) ? size + num_nids : num_nids); + + if (!new_array) + goto failed; + + /* parse our the new nids and add them to the tail of the array */ + comma = nids; + cur = nids; + start = (size > 0) ? size: 0; + finish = (size > 0) ? size + num_nids : num_nids; + for (i = start; i < finish; i++) { + comma = strchr(comma, ','); + if (!comma) + /* + * the length of the string to be parsed out is + * from cur to end of string. So it's good enough + * to strlen(cur) + */ + len = strlen(cur) + 1; + else + /* length of the string is comma - cur */ + len = (comma - cur) + 1; + + entry = calloc(1, len); + if (!entry) { + finish = i > 0 ? i - 1: 0; + goto failed; + } + strncpy(entry, cur, len - 1); + entry[len] = '\0'; + new_array[i] = entry; + if (comma) { + comma++; + cur = comma; + } + } + + /* add the old entries in the array and delete the old array*/ + for (i = 0; i < size; i++) + new_array[i] = array[i]; + + if (array) + free(array); + + *out_array = new_array; + + return finish; + +failed: + for (i = start; i < finish; i++) + free(new_array[i]); + if (new_array) + free(new_array); + + return size; +} + /* * format expected: * [], [],.. @@ -279,42 +363,34 @@ int lustre_lnet_config_ni_system(bool up, bool load_ni_from_mod, return rc; } -static lnet_nid_t *allocate_create_nid_array(char **nids, char *err_str) +static lnet_nid_t *allocate_create_nid_array(char **nids, __u32 num_nids, + char *err_str) { lnet_nid_t *array = NULL; - int idx = 0; + __u32 i; if (!nids) { snprintf(err_str, LNET_MAX_STR_LEN, "no NIDs to add"); return NULL; } - /* count the size of the array */ - while (nids[idx] != NULL) - idx++; - - array = calloc(sizeof(*array) * idx + 1, 1); + array = calloc(sizeof(*array) * num_nids, 1); if (array == NULL) { snprintf(err_str, LNET_MAX_STR_LEN, "out of memory"); return NULL; } - idx = 0; - while (nids[idx] != NULL) { - array[idx] = libcfs_str2nid(nids[idx]); - if (array[idx] == LNET_NID_ANY) { + for (i = 0; i < num_nids; i++) { + array[i] = libcfs_str2nid(nids[i]); + if (array[i] == LNET_NID_ANY) { free(array); snprintf(err_str, LNET_MAX_STR_LEN, "bad NID: '%s'", - nids[idx]); + nids[i]); return NULL; } - idx++; } - /* identify last entry */ - array[idx] = LNET_NID_ANY; - return array; } @@ -339,17 +415,18 @@ static int dispatch_peer_ni_cmd(lnet_nid_t knid, lnet_nid_t nid, __u32 cmd, return rc; } -int lustre_lnet_config_peer_nid(char *knid, char **nid, bool mr, int seq_no, - struct cYAML **err_rc) +int lustre_lnet_config_peer_nid(char *knid, char **nid, int num_nids, + bool mr, int seq_no, struct cYAML **err_rc) { struct lnet_ioctl_peer_cfg data; lnet_nid_t key_nid = LNET_NID_ANY; int rc = LUSTRE_CFG_RC_NO_ERR; int idx = 0; + bool nid0_used = false; char err_str[LNET_MAX_STR_LEN] = {0}; - lnet_nid_t *nids = allocate_create_nid_array(nid, err_str); + lnet_nid_t *nids = allocate_create_nid_array(nid, num_nids, err_str); - if (knid != NULL) { + if (knid) { key_nid = libcfs_str2nid(knid); if (key_nid == LNET_NID_ANY) { snprintf(err_str, sizeof(err_str), @@ -358,7 +435,7 @@ int lustre_lnet_config_peer_nid(char *knid, char **nid, bool mr, int seq_no, rc = LUSTRE_CFG_RC_MISSING_PARAM; goto out; } - } else if (nids[0] == LNET_NID_ANY) { + } else if (!nids || nids[0] == LNET_NID_ANY) { snprintf(err_str, sizeof(err_str), "no NIDs provided for configuration"); rc = LUSTRE_CFG_RC_MISSING_PARAM; @@ -371,14 +448,27 @@ int lustre_lnet_config_peer_nid(char *knid, char **nid, bool mr, int seq_no, LIBCFS_IOC_INIT_V2(data, prcfg_hdr); data.prcfg_mr = mr; - if (nids[0] == LNET_NID_ANY) { - rc = dispatch_peer_ni_cmd(LNET_NID_ANY, key_nid, - IOC_LIBCFS_ADD_PEER_NI, - &data, err_str, "add"); - goto out; + + /* + * if key_nid is not specified use the first nid in the list of + * nids provided as the key_nid. NOTE: on entering 'if' we must + * have at least 1 NID + */ + if (key_nid == LNET_NID_ANY) { + nid0_used = true; + key_nid = nids[0]; } - while (nids[idx] != LNET_NID_ANY) { + /* Create the key_nid first */ + rc = dispatch_peer_ni_cmd(key_nid, LNET_NID_ANY, + IOC_LIBCFS_ADD_PEER_NI, + &data, err_str, "add"); + + if (rc != 0) + goto out; + + /* add the rest of the nids to the key nid if any are available */ + for (idx = nid0_used ? 1 : 0 ; nids && idx < num_nids; idx++) { /* * If key_nid is not provided then the first nid in the * list becomes the key_nid. First time round the loop use @@ -391,11 +481,6 @@ int lustre_lnet_config_peer_nid(char *knid, char **nid, bool mr, int seq_no, if (rc != 0) goto out; - - if (idx == 0 && key_nid == LNET_NID_ANY) - key_nid = nids[0]; - - idx++; } out: @@ -405,15 +490,15 @@ out: return rc; } -int lustre_lnet_del_peer_nid(char *knid, char **nid, int seq_no, - struct cYAML **err_rc) +int lustre_lnet_del_peer_nid(char *knid, char **nid, int num_nids, + int seq_no, struct cYAML **err_rc) { struct lnet_ioctl_peer_cfg data; lnet_nid_t key_nid; int rc = LUSTRE_CFG_RC_NO_ERR; int idx = 0; char err_str[LNET_MAX_STR_LEN] = {0}; - lnet_nid_t *nids = allocate_create_nid_array(nid, err_str); + lnet_nid_t *nids = allocate_create_nid_array(nid, num_nids, err_str); if (knid == NULL) { snprintf(err_str, sizeof(err_str), @@ -434,22 +519,20 @@ int lustre_lnet_del_peer_nid(char *knid, char **nid, int seq_no, snprintf(err_str, sizeof(err_str), "\"Success\""); LIBCFS_IOC_INIT_V2(data, prcfg_hdr); - if (nids[0] == LNET_NID_ANY) { + if (!nids || nids[0] == LNET_NID_ANY) { rc = dispatch_peer_ni_cmd(key_nid, LNET_NID_ANY, IOC_LIBCFS_DEL_PEER_NI, &data, err_str, "del"); goto out; } - while (nids[idx] != LNET_NID_ANY) { + for (idx = 0; nids && idx < num_nids; idx++) { rc = dispatch_peer_ni_cmd(key_nid, nids[idx], IOC_LIBCFS_DEL_PEER_NI, &data, err_str, "del"); if (rc != 0) goto out; - - idx++; } out: @@ -1906,8 +1989,8 @@ out: return rc; } -int lustre_lnet_show_peer(char *knid, int seq_no, struct cYAML **show_rc, - struct cYAML **err_rc) +int lustre_lnet_show_peer(char *knid, int detail, int seq_no, + struct cYAML **show_rc, struct cYAML **err_rc) { struct lnet_ioctl_peer_cfg *peer_info; struct lnet_peer_ni_credit_info *lpni_cri; @@ -1981,6 +2064,11 @@ int lustre_lnet_show_peer(char *knid, int seq_no, struct cYAML **show_rc, libcfs_nid2str(pnid)) == NULL) goto out; + if (cYAML_create_string(peer, "Multi-Rail", + peer_info->prcfg_mr ? + "True" : "False") + == NULL) + goto out; tmp = cYAML_create_seq(peer, "peer ni"); if (tmp == NULL) goto out; @@ -2005,9 +2093,8 @@ int lustre_lnet_show_peer(char *knid, int seq_no, struct cYAML **show_rc, == NULL) goto out; - if (cYAML_create_number(peer_ni, "refcount", - lpni_cri->cr_refcount) == NULL) - goto out; + if (!detail) + continue; if (cYAML_create_number(peer_ni, "max_ni_tx_credits", lpni_cri->cr_ni_peer_tx_credits) @@ -2048,6 +2135,10 @@ int lustre_lnet_show_peer(char *knid, int seq_no, struct cYAML **show_rc, lpni_stats->drop_count) == NULL) goto out; + + if (cYAML_create_number(peer_ni, "refcount", + lpni_cri->cr_refcount) == NULL) + goto out; } if (l_errno != ENOENT) { @@ -2282,10 +2373,11 @@ static void yaml_free_string_array(char **array, int num) for (i = 0; i < num; i++) { if (*sub_array != NULL) - free(sub_array); + free(*sub_array); sub_array++; } - free(array); + if (array) + free(array); } /* @@ -2644,46 +2736,40 @@ static int handle_yaml_del_ni(struct cYAML *tree, struct cYAML **show_rc, static int yaml_copy_peer_nids(struct cYAML *tree, char ***nidsppp) { - struct cYAML *nids_entry = NULL, *child; + struct cYAML *nids_entry = NULL, *child = NULL, *entry = NULL; char **nids = NULL; int num = 0, rc = LUSTRE_CFG_RC_NO_ERR; - nids_entry = cYAML_get_object_item(tree, "nids"); - if (nids_entry != NULL) { - /* count */ - child = nids_entry->cy_child; - while (child != NULL) { + nids_entry = cYAML_get_object_item(tree, "peer ni"); + if (cYAML_is_sequence(nids_entry)) { + while (cYAML_get_next_seq_item(nids_entry, &child)) num++; - child = child->cy_next; - } + } - if (num == 0) - return LUSTRE_CFG_RC_MISSING_PARAM; - - nids = calloc(sizeof(*nids) * num, 1); - if (nids == NULL) - return LUSTRE_CFG_RC_OUT_OF_MEM; - - /* now grab all the nids */ - child = nids_entry->cy_child; - num = 0; - while (child != NULL) { - nids[num] = calloc(strlen(child->cy_valuestring) + 1, - 1); - if (nids[num] == NULL) { - rc = LUSTRE_CFG_RC_OUT_OF_MEM; - goto failed; - } - strncpy(nids[num], child->cy_valuestring, - strlen(child->cy_valuestring)); - child = child->cy_next; - num++; + if (num == 0) + return LUSTRE_CFG_RC_MISSING_PARAM; + + nids = calloc(sizeof(*nids) * num, 1); + if (nids == NULL) + return LUSTRE_CFG_RC_OUT_OF_MEM; + + /* now grab all the nids */ + num = 0; + child = NULL; + while (cYAML_get_next_seq_item(nids_entry, &child)) { + entry = cYAML_get_object_item(child, "nid"); + if (!entry) + continue; + nids[num] = calloc(strlen(entry->cy_valuestring) + 1, 1); + if (!nids[num]) { + rc = LUSTRE_CFG_RC_OUT_OF_MEM; + goto failed; } - rc = num; - } else { - rc = LUSTRE_CFG_RC_MISSING_PARAM; - goto failed; + strncpy(nids[num], entry->cy_valuestring, + strlen(entry->cy_valuestring)); + num++; } + rc = num; *nidsppp = nids; return rc; @@ -2707,11 +2793,11 @@ static int handle_yaml_config_peer(struct cYAML *tree, struct cYAML **show_rc, return num; seq_no = cYAML_get_object_item(tree, "seq_no"); - key_nid = cYAML_get_object_item(tree, "key_nid"); + key_nid = cYAML_get_object_item(tree, "primary nid"); non_mr = cYAML_get_object_item(tree, "non_mr"); rc = lustre_lnet_config_peer_nid((key_nid) ? key_nid->cy_valuestring : NULL, - nids, + nids, num, (non_mr) ? false : true, (seq_no) ? seq_no->cy_valueint : -1, err_rc); @@ -2732,10 +2818,10 @@ static int handle_yaml_del_peer(struct cYAML *tree, struct cYAML **show_rc, return num; seq_no = cYAML_get_object_item(tree, "seq_no"); - key_nid = cYAML_get_object_item(tree, "key_nid"); + key_nid = cYAML_get_object_item(tree, "primary nid"); rc = lustre_lnet_del_peer_nid((key_nid) ? key_nid->cy_valuestring : NULL, - nids, + nids, num, (seq_no) ? seq_no->cy_valueint : -1, err_rc); @@ -2870,12 +2956,14 @@ static int handle_yaml_show_routing(struct cYAML *tree, struct cYAML **show_rc, static int handle_yaml_show_credits(struct cYAML *tree, struct cYAML **show_rc, struct cYAML **err_rc) { - struct cYAML *seq_no, *key_nid; + struct cYAML *seq_no, *key_nid, *detail; seq_no = cYAML_get_object_item(tree, "seq_no"); + detail = cYAML_get_object_item(tree, "detail"); key_nid = cYAML_get_object_item(tree, "key_nid"); return lustre_lnet_show_peer((key_nid) ? key_nid->cy_valuestring : NULL, + (detail) ? detail->cy_valueint : 0, (seq_no) ? seq_no->cy_valueint : -1, show_rc, err_rc); } @@ -2891,6 +2979,41 @@ static int handle_yaml_show_stats(struct cYAML *tree, struct cYAML **show_rc, show_rc, err_rc); } +static int handle_yaml_config_numa(struct cYAML *tree, struct cYAML **show_rc, + struct cYAML **err_rc) +{ + struct cYAML *seq_no, *range; + + seq_no = cYAML_get_object_item(tree, "seq_no"); + range = cYAML_get_object_item(tree, "range"); + + return lustre_lnet_config_numa_range(range ? range->cy_valueint : -1, + seq_no ? seq_no->cy_valueint : -1, + err_rc); +} + +static int handle_yaml_del_numa(struct cYAML *tree, struct cYAML **show_rc, + struct cYAML **err_rc) +{ + struct cYAML *seq_no; + + seq_no = cYAML_get_object_item(tree, "seq_no"); + + return lustre_lnet_config_numa_range(0, seq_no ? seq_no->cy_valueint : -1, + err_rc); +} + +static int handle_yaml_show_numa(struct cYAML *tree, struct cYAML **show_rc, + struct cYAML **err_rc) +{ + struct cYAML *seq_no; + + seq_no = cYAML_get_object_item(tree, "seq_no"); + + return lustre_lnet_show_numa_range(seq_no ? seq_no->cy_valueint : -1, + show_rc, err_rc); +} + struct lookup_cmd_hdlr_tbl { char *name; cmd_handler_t cb; @@ -2903,6 +3026,7 @@ static struct lookup_cmd_hdlr_tbl lookup_config_tbl[] = { {"peer", handle_yaml_config_peer}, {"routing", handle_yaml_config_routing}, {"buffers", handle_yaml_config_buffers}, + {"numa", handle_yaml_config_numa}, {NULL, NULL} }; @@ -2911,6 +3035,7 @@ static struct lookup_cmd_hdlr_tbl lookup_del_tbl[] = { {"net", handle_yaml_del_ni}, {"peer", handle_yaml_del_peer}, {"routing", handle_yaml_del_routing}, + {"numa", handle_yaml_del_numa}, {NULL, NULL} }; @@ -2921,6 +3046,7 @@ static struct lookup_cmd_hdlr_tbl lookup_show_tbl[] = { {"routing", handle_yaml_show_routing}, {"credits", handle_yaml_show_credits}, {"statistics", handle_yaml_show_stats}, + {"numa", handle_yaml_show_numa}, {NULL, NULL} }; diff --git a/lnet/utils/lnetconfig/liblnetconfig.h b/lnet/utils/lnetconfig/liblnetconfig.h index 54bcc94..d69bfcd 100644 --- a/lnet/utils/lnetconfig/liblnetconfig.h +++ b/lnet/utils/lnetconfig/liblnetconfig.h @@ -271,12 +271,13 @@ int lustre_lnet_show_stats(int seq_no, struct cYAML **show_rc, * * knid - Key NID of the peer * nid - list of nids to add + * num_nids - number of nids in the nid array * mr - true if this peer is MR capable. * seq_no - sequence number of the command * err_rc - YAML strucutre of the resultant return code. */ -int lustre_lnet_config_peer_nid(char *knid, char **nid, bool mr, int seq_no, - struct cYAML **err_rc); +int lustre_lnet_config_peer_nid(char *knid, char **nid, int num_nids, + bool mr, int seq_no, struct cYAML **err_rc); /* * lustre_lnet_del_peer_nid @@ -287,11 +288,12 @@ int lustre_lnet_config_peer_nid(char *knid, char **nid, bool mr, int seq_no, * * knid - Key NID of the peer * nid - list of nids to add + * num_nids - number of nids in the nid array * seq_no - sequence number of the command * err_rc - YAML strucutre of the resultant return code. */ -int lustre_lnet_del_peer_nid(char *knid, char **nid, int seq_no, - struct cYAML **err_rc); +int lustre_lnet_del_peer_nid(char *knid, char **nid, int num_nids, + int seq_no, struct cYAML **err_rc); /* * lustre_lnet_show_peer @@ -299,13 +301,14 @@ int lustre_lnet_del_peer_nid(char *knid, char **nid, int seq_no, * system are shown. * * knid - Key NID of the peer + * detail - display detailed information * seq_no - sequence number of the command * show_rc - YAML structure of the resultant show * err_rc - YAML strucutre of the resultant return code. * */ -int lustre_lnet_show_peer(char *knid, int seq_no, struct cYAML **show_rc, - struct cYAML **err_rc); +int lustre_lnet_show_peer(char *knid, int detail, int seq_no, + struct cYAML **show_rc, struct cYAML **err_rc); /* * lustre_yaml_config @@ -357,6 +360,24 @@ int lustre_lnet_parse_interfaces(char *intf_str, struct lnet_dlc_network_descr *nw_descr); /* + * lustre_lnet_parse_nids + * Parse a set of nids into a locally allocated array and return the + * pointer of the array to the caller. The caller is responsible for + * freeing the array. If an initial array is provided then copy over + * the contents of that array into the new array and append to it the + * new content. + * The nids can be of the form "nid [,nid, nid, nid]" + * nids: nids string to be parsed + * array: initial array of content + * size: num of elements in the array + * out_array: [OUT] new allocated array. + * Returns size of array + * sets the out_array to NULL on failure. + */ +int lustre_lnet_parse_nids(char *nids, char **array, int size, + char ***out_array); + +/* * lustre_lnet_send_dbg_task * send a debug task to be carried out in the kernel. This API will * not be exposed to the user through lnetctl utility. It can only be diff --git a/lnet/utils/lnetctl.c b/lnet/utils/lnetctl.c index cbbd33e..e0aea3a 100644 --- a/lnet/utils/lnetctl.c +++ b/lnet/utils/lnetctl.c @@ -138,7 +138,8 @@ command_t peer_cmds[] = { "\t--key_nid: NID to identify peer. If not provided then the first\n" "\t NID in the list becomes the key NID of a newly created\n" "\t peer. \n" - "\t--nid: one or more peer NIDs\n"}, + "\t--nid: one or more peer NIDs\n" + "\t--non_mr: create this peer as not Multi-Rail capable\n"}, {"del", jt_del_peer_nid, 0, "delete a peer NID\n" "\t--key_nid: NID to identify peer.\n" "\t--nid: list of NIDs to remove. If none provided,\n" @@ -1062,6 +1063,18 @@ static int jt_export(int argc, char **argv) cYAML_free_tree(err_rc); } + rc = lustre_lnet_show_peer(NULL, 1, -1, &show_rc, &err_rc); + if (rc != LUSTRE_CFG_RC_NO_ERR) { + cYAML_print_tree2file(stderr, err_rc); + cYAML_free_tree(err_rc); + } + + rc = lustre_lnet_show_numa_range(-1, &show_rc, &err_rc); + if (rc != LUSTRE_CFG_RC_NO_ERR) { + cYAML_print_tree2file(stderr, err_rc); + cYAML_free_tree(err_rc); + } + if (show_rc != NULL) { cYAML_print_tree2file(f, show_rc); cYAML_free_tree(show_rc); @@ -1076,17 +1089,17 @@ static int jt_export(int argc, char **argv) static int jt_add_peer_nid(int argc, char **argv) { char *key_nid = NULL; - char *nid[LNET_MAX_INTERFACES] = {NULL}; - int idx = 0; + char **nids = NULL, **nids2 = NULL; + int size = 0; struct cYAML *err_rc = NULL; - int rc, opt; + int rc = LUSTRE_CFG_RC_NO_ERR, opt, i; bool non_mr = false; const char *const short_options = "k:n:mh"; const struct option long_options[] = { { "key_nid", 1, NULL, 'k' }, { "nid", 1, NULL, 'n' }, - { "non_mr", 1, NULL, 'm'}, + { "non_mr", 0, NULL, 'm'}, { "help", 0, NULL, 'h' }, { NULL, 0, NULL, 0 }, }; @@ -1098,26 +1111,16 @@ static int jt_add_peer_nid(int argc, char **argv) key_nid = optarg; break; case 'n': - if (idx >= LNET_MAX_INTERFACES) { - cYAML_build_error(-1, -1, "peer_ni", "add", - "too many interfaces", - &err_rc); - rc = LUSTRE_CFG_RC_BAD_PARAM; - goto failed; - } - nid[idx] = calloc(strlen(optarg) + 1, 1); - if (nid[idx] == NULL) { - cYAML_build_error(-1, -1, "peer_ni", "add", - "out of memory", - &err_rc); - rc = LUSTRE_CFG_RC_BAD_PARAM; + size = lustre_lnet_parse_nids(optarg, nids, size, + &nids2); + if (nids2 == NULL) goto failed; - } - strncpy(nid[idx], optarg, strlen(optarg)); - idx++; + nids = nids2; + rc = LUSTRE_CFG_RC_OUT_OF_MEM; break; case 'm': non_mr = true; + break; case 'h': print_help(peer_cmds, "peer", "add"); return 0; @@ -1126,14 +1129,13 @@ static int jt_add_peer_nid(int argc, char **argv) } } - rc = lustre_lnet_config_peer_nid(key_nid, nid, !non_mr, -1, &err_rc); + rc = lustre_lnet_config_peer_nid(key_nid, nids, size, + !non_mr, -1, &err_rc); failed: - idx = 0; - while (nid[idx] != NULL) { - free(nid[idx]); - idx++; - } + for (i = 0; i < size; i++) + free(nids[i]); + free(nids); if (rc != LUSTRE_CFG_RC_NO_ERR) cYAML_print_tree2file(stderr, err_rc); @@ -1146,10 +1148,9 @@ failed: static int jt_del_peer_nid(int argc, char **argv) { char *key_nid = NULL; - char *nid[LNET_MAX_INTERFACES] = {NULL}; - int idx = 0; + char **nids = NULL, **nids2 = NULL; struct cYAML *err_rc = NULL; - int rc, opt; + int rc = LUSTRE_CFG_RC_NO_ERR, opt, i, size = 0; const char *const short_options = "k:n:h"; const struct option long_options[] = { @@ -1166,23 +1167,12 @@ static int jt_del_peer_nid(int argc, char **argv) key_nid = optarg; break; case 'n': - if (idx >= LNET_MAX_INTERFACES) { - cYAML_build_error(-1, -1, "peer_ni", "del", - "too many interfaces", - &err_rc); - rc = LUSTRE_CFG_RC_BAD_PARAM; - goto failed; - } - nid[idx] = calloc(strlen(optarg) + 1, 1); - if (nid[idx] == NULL) { - cYAML_build_error(-1, -1, "peer_ni", "del", - "out of memory", - &err_rc); - rc = LUSTRE_CFG_RC_BAD_PARAM; + size = lustre_lnet_parse_nids(optarg, nids, size, + &nids2); + if (nids2 == NULL) goto failed; - } - strncpy(nid[idx], optarg, strlen(optarg)); - idx++; + nids = nids2; + rc = LUSTRE_CFG_RC_OUT_OF_MEM; break; case 'h': print_help(peer_cmds, "peer", "del"); @@ -1192,9 +1182,13 @@ static int jt_del_peer_nid(int argc, char **argv) } } - rc = lustre_lnet_del_peer_nid(key_nid, nid, -1, &err_rc); + rc = lustre_lnet_del_peer_nid(key_nid, nids, size, -1, &err_rc); failed: + for (i = 0; i < size; i++) + free(nids[i]); + free(nids); + if (rc != LUSTRE_CFG_RC_NO_ERR) cYAML_print_tree2file(stderr, err_rc); @@ -1208,10 +1202,12 @@ static int jt_show_peer(int argc, char **argv) char *key_nid = NULL; int rc, opt; struct cYAML *err_rc = NULL, *show_rc = NULL; + int detail = 0; const char *const short_options = "k:vh"; const struct option long_options[] = { { "key_nid", 1, NULL, 'k' }, + { "verbose", 0, NULL, 'v' }, { "help", 0, NULL, 'h' }, { NULL, 0, NULL, 0 }, }; @@ -1222,6 +1218,9 @@ static int jt_show_peer(int argc, char **argv) case 'k': key_nid = optarg; break; + case 'v': + detail = 1; + break; case 'h': print_help(peer_cmds, "peer", "add"); return 0; @@ -1230,7 +1229,7 @@ static int jt_show_peer(int argc, char **argv) } } - rc = lustre_lnet_show_peer(key_nid, -1, &show_rc, &err_rc); + rc = lustre_lnet_show_peer(key_nid, detail, -1, &show_rc, &err_rc); if (rc != LUSTRE_CFG_RC_NO_ERR) cYAML_print_tree2file(stderr, err_rc); -- 1.8.3.1