4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
39 #define DEBUG_SUBSYSTEM S_LNET
41 #include <lnet/lib-lnet.h>
44 lnet_peer_tables_create(void)
46 struct lnet_peer_table *ptable;
47 struct list_head *hash;
51 the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
53 if (the_lnet.ln_peer_tables == NULL) {
54 CERROR("Failed to allocate cpu-partition peer tables\n");
58 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
59 INIT_LIST_HEAD(&ptable->pt_deathrow);
61 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
62 LNET_PEER_HASH_SIZE * sizeof(*hash));
64 CERROR("Failed to create peer hash table\n");
65 lnet_peer_tables_destroy();
69 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
70 INIT_LIST_HEAD(&hash[j]);
71 ptable->pt_hash = hash; /* sign of initialization */
78 lnet_peer_tables_destroy(void)
80 struct lnet_peer_table *ptable;
81 struct list_head *hash;
85 if (the_lnet.ln_peer_tables == NULL)
88 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
89 hash = ptable->pt_hash;
90 if (hash == NULL) /* not intialized */
93 LASSERT(list_empty(&ptable->pt_deathrow));
95 ptable->pt_hash = NULL;
96 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
97 LASSERT(list_empty(&hash[j]));
99 LIBCFS_FREE(hash, LNET_PEER_HASH_SIZE * sizeof(*hash));
102 cfs_percpt_free(the_lnet.ln_peer_tables);
103 the_lnet.ln_peer_tables = NULL;
107 lnet_peer_table_cleanup_locked(lnet_ni_t *ni, struct lnet_peer_table *ptable)
113 for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
114 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
116 if (ni != NULL && ni != lp->lp_ni)
118 list_del_init(&lp->lp_hashlist);
119 /* Lose hash table's ref */
120 ptable->pt_zombies++;
121 lnet_peer_decref_locked(lp);
127 lnet_peer_table_deathrow_wait_locked(struct lnet_peer_table *ptable,
132 for (i = 3; ptable->pt_zombies != 0; i++) {
133 lnet_net_unlock(cpt_locked);
137 "Waiting for %d zombies on peer table\n",
140 cfs_pause(cfs_time_seconds(1) >> 1);
141 lnet_net_lock(cpt_locked);
146 lnet_peer_table_del_rtrs_locked(lnet_ni_t *ni, struct lnet_peer_table *ptable,
154 for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
155 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
160 if (lp->lp_rtr_refcount == 0)
165 lnet_net_unlock(cpt_locked);
166 lnet_del_route(LNET_NIDNET(LNET_NID_ANY), lp_nid);
167 lnet_net_lock(cpt_locked);
173 lnet_peer_tables_cleanup(lnet_ni_t *ni)
176 struct lnet_peer_table *ptable;
178 struct list_head deathrow;
180 INIT_LIST_HEAD(&deathrow);
182 LASSERT(the_lnet.ln_shutdown || ni != NULL);
183 /* If just deleting the peers for a NI, get rid of any routes these
184 * peers are gateways for. */
185 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
187 lnet_peer_table_del_rtrs_locked(ni, ptable, i);
191 /* Start the process of moving the applicable peers to
193 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
195 lnet_peer_table_cleanup_locked(ni, ptable);
199 /* Cleanup all entries on deathrow. */
200 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
202 lnet_peer_table_deathrow_wait_locked(ptable, i);
203 list_splice_init(&ptable->pt_deathrow, &deathrow);
207 while (!list_empty(&deathrow)) {
208 lp = list_entry(deathrow.next, lnet_peer_t, lp_hashlist);
209 list_del(&lp->lp_hashlist);
210 LIBCFS_FREE(lp, sizeof(*lp));
215 lnet_destroy_peer_locked(lnet_peer_t *lp)
217 struct lnet_peer_table *ptable;
219 LASSERT(lp->lp_refcount == 0);
220 LASSERT(lp->lp_rtr_refcount == 0);
221 LASSERT(list_empty(&lp->lp_txq));
222 LASSERT(list_empty(&lp->lp_hashlist));
223 LASSERT(lp->lp_txqnob == 0);
225 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
226 LASSERT(ptable->pt_number > 0);
229 lnet_ni_decref_locked(lp->lp_ni, lp->lp_cpt);
232 list_add(&lp->lp_hashlist, &ptable->pt_deathrow);
233 LASSERT(ptable->pt_zombies > 0);
234 ptable->pt_zombies--;
238 lnet_find_peer_locked(struct lnet_peer_table *ptable, lnet_nid_t nid)
240 struct list_head *peers;
243 LASSERT(!the_lnet.ln_shutdown);
245 peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
246 list_for_each_entry(lp, peers, lp_hashlist) {
247 if (lp->lp_nid == nid) {
248 lnet_peer_addref_locked(lp);
257 lnet_nid2peer_locked(lnet_peer_t **lpp, lnet_nid_t nid, int cpt)
259 struct lnet_peer_table *ptable;
260 lnet_peer_t *lp = NULL;
266 if (the_lnet.ln_shutdown) /* it's shutting down */
269 /* cpt can be LNET_LOCK_EX if it's called from router functions */
270 cpt2 = cpt != LNET_LOCK_EX ? cpt : lnet_cpt_of_nid_locked(nid);
272 ptable = the_lnet.ln_peer_tables[cpt2];
273 lp = lnet_find_peer_locked(ptable, nid);
279 if (!list_empty(&ptable->pt_deathrow)) {
280 lp = list_entry(ptable->pt_deathrow.next,
281 lnet_peer_t, lp_hashlist);
282 list_del(&lp->lp_hashlist);
286 * take extra refcount in case another thread has shutdown LNet
287 * and destroyed locks and peer-table before I finish the allocation
290 lnet_net_unlock(cpt);
293 memset(lp, 0, sizeof(*lp));
295 LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), cpt2, sizeof(*lp));
303 INIT_LIST_HEAD(&lp->lp_txq);
304 INIT_LIST_HEAD(&lp->lp_rtrq);
305 INIT_LIST_HEAD(&lp->lp_routes);
308 lp->lp_notifylnd = 0;
309 lp->lp_notifying = 0;
310 lp->lp_alive_count = 0;
311 lp->lp_timestamp = 0;
312 lp->lp_alive = !lnet_peers_start_down(); /* 1 bit!! */
313 lp->lp_last_alive = cfs_time_current(); /* assumes alive */
314 lp->lp_last_query = 0; /* haven't asked NI yet */
315 lp->lp_ping_timestamp = 0;
316 lp->lp_ping_feats = LNET_PING_FEAT_INVAL;
319 lp->lp_refcount = 2; /* 1 for caller; 1 for hash */
320 lp->lp_rtr_refcount = 0;
324 if (the_lnet.ln_shutdown) {
329 lp2 = lnet_find_peer_locked(ptable, nid);
335 lp->lp_ni = lnet_net2ni_locked(LNET_NIDNET(nid), cpt2);
336 if (lp->lp_ni == NULL) {
342 lp->lp_mintxcredits = lp->lp_ni->ni_peertxcredits;
344 lp->lp_minrtrcredits = lnet_peer_buffer_credits(lp->lp_ni);
346 list_add_tail(&lp->lp_hashlist,
347 &ptable->pt_hash[lnet_nid2peerhash(nid)]);
348 ptable->pt_version++;
354 list_add(&lp->lp_hashlist, &ptable->pt_deathrow);
360 lnet_debug_peer(lnet_nid_t nid)
362 char *aliveness = "NA";
367 cpt = lnet_cpt_of_nid(nid);
370 rc = lnet_nid2peer_locked(&lp, nid, cpt);
372 lnet_net_unlock(cpt);
373 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid));
377 if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
378 aliveness = lp->lp_alive ? "up" : "down";
380 CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
381 libcfs_nid2str(lp->lp_nid), lp->lp_refcount,
382 aliveness, lp->lp_ni->ni_peertxcredits,
383 lp->lp_rtrcredits, lp->lp_minrtrcredits,
384 lp->lp_txcredits, lp->lp_mintxcredits, lp->lp_txqnob);
386 lnet_peer_decref_locked(lp);
388 lnet_net_unlock(cpt);