2 * Copyright (C) 2012 Cray, Inc.
4 * Author: Igor Gorodetsky <iogordet@cray.com>
5 * Author: Nic Henke <nic@cray.com>
6 * Author: James Shimek <jshimek@cray.com>
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 /* Primary entry points from LNET. There are no guarantees against reentrance. */
29 .lnd_startup = kgnilnd_startup,
30 .lnd_shutdown = kgnilnd_shutdown,
31 .lnd_ctl = kgnilnd_ctl,
32 .lnd_send = kgnilnd_send,
33 .lnd_recv = kgnilnd_recv,
34 .lnd_eager_recv = kgnilnd_eager_recv,
35 .lnd_query = kgnilnd_query,
38 kgn_data_t kgnilnd_data;
39 kgn_hssops_t kgnilnd_hssops;
41 /* needs write_lock on kgn_peer_conn_lock */
43 kgnilnd_close_stale_conns_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
46 struct list_head *ctmp, *cnxt;
50 loopback = peer->gnp_nid == peer->gnp_net->gnn_ni->ni_nid;
52 list_for_each_safe(ctmp, cnxt, &peer->gnp_conns) {
53 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
55 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
61 if (conn->gnc_device != newconn->gnc_device)
64 /* This is a two connection loopback - one talking to the other */
66 newconn->gnc_my_connstamp == conn->gnc_peer_connstamp &&
67 newconn->gnc_peer_connstamp == conn->gnc_my_connstamp) {
68 CDEBUG(D_NET, "skipping prune of %p, "
69 "loopback and matching stamps"
70 " connstamp "LPU64"("LPU64")"
71 " peerstamp "LPU64"("LPU64")\n",
72 conn, newconn->gnc_my_connstamp,
73 conn->gnc_peer_connstamp,
74 newconn->gnc_peer_connstamp,
75 conn->gnc_my_connstamp);
79 if (conn->gnc_peerstamp != newconn->gnc_peerstamp) {
80 LASSERTF(conn->gnc_peerstamp < newconn->gnc_peerstamp,
81 "conn 0x%p peerstamp "LPU64" >= "
82 "newconn 0x%p peerstamp "LPU64"\n",
83 conn, conn->gnc_peerstamp,
84 newconn, newconn->gnc_peerstamp);
86 CDEBUG(D_NET, "Closing stale conn nid: %s "
87 " peerstamp:"LPX64"("LPX64")\n",
88 libcfs_nid2str(peer->gnp_nid),
89 conn->gnc_peerstamp, newconn->gnc_peerstamp);
92 LASSERTF(conn->gnc_peer_connstamp < newconn->gnc_peer_connstamp,
93 "conn 0x%p peer_connstamp "LPU64" >= "
94 "newconn 0x%p peer_connstamp "LPU64"\n",
95 conn, conn->gnc_peer_connstamp,
96 newconn, newconn->gnc_peer_connstamp);
98 CDEBUG(D_NET, "Closing stale conn nid: %s"
99 " connstamp:"LPU64"("LPU64")\n",
100 libcfs_nid2str(peer->gnp_nid),
101 conn->gnc_peer_connstamp, newconn->gnc_peer_connstamp);
105 kgnilnd_close_conn_locked(conn, -ESTALE);
109 CWARN("Closed %d stale conns to %s\n", count, libcfs_nid2str(peer->gnp_nid));
116 kgnilnd_conn_isdup_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
119 struct list_head *tmp;
123 loopback = peer->gnp_nid == peer->gnp_net->gnn_ni->ni_nid;
125 list_for_each(tmp, &peer->gnp_conns) {
126 conn = list_entry(tmp, kgn_conn_t, gnc_list);
127 CDEBUG(D_NET, "checking conn 0x%p for peer %s"
128 " lo %d new "LPU64" existing "LPU64
129 " new peer "LPU64" existing peer "LPU64
130 " new dev %p existing dev %p\n",
131 conn, libcfs_nid2str(peer->gnp_nid),
133 newconn->gnc_peerstamp, conn->gnc_peerstamp,
134 newconn->gnc_peer_connstamp, conn->gnc_peer_connstamp,
135 newconn->gnc_device, conn->gnc_device);
137 /* conn is in the process of closing */
138 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
141 /* 'newconn' is from an earlier version of 'peer'!!! */
142 if (newconn->gnc_peerstamp < conn->gnc_peerstamp)
145 /* 'conn' is from an earlier version of 'peer': it will be
146 * removed when we cull stale conns later on... */
147 if (newconn->gnc_peerstamp > conn->gnc_peerstamp)
150 /* Different devices are OK */
151 if (conn->gnc_device != newconn->gnc_device)
154 /* It's me connecting to myself */
156 newconn->gnc_my_connstamp == conn->gnc_peer_connstamp &&
157 newconn->gnc_peer_connstamp == conn->gnc_my_connstamp)
160 /* 'newconn' is an earlier connection from 'peer'!!! */
161 if (newconn->gnc_peer_connstamp < conn->gnc_peer_connstamp)
164 /* 'conn' is an earlier connection from 'peer': it will be
165 * removed when we cull stale conns later on... */
166 if (newconn->gnc_peer_connstamp > conn->gnc_peer_connstamp)
169 /* 'newconn' has the SAME connection stamp; 'peer' isn't
170 * playing the game... */
178 kgnilnd_create_conn(kgn_conn_t **connp, kgn_device_t *dev)
184 LASSERT (!in_interrupt());
185 atomic_inc(&kgnilnd_data.kgn_nconns);
187 /* divide by 2 to allow for complete reset and immediate reconnect */
188 if (atomic_read(&kgnilnd_data.kgn_nconns) >= GNILND_MAX_CQID/2) {
189 CERROR("Too many conn are live: %d > %d\n",
190 atomic_read(&kgnilnd_data.kgn_nconns), GNILND_MAX_CQID/2);
191 atomic_dec(&kgnilnd_data.kgn_nconns);
195 LIBCFS_ALLOC(conn, sizeof(*conn));
197 atomic_dec(&kgnilnd_data.kgn_nconns);
201 LIBCFS_ALLOC(conn->gnc_tx_ref_table, GNILND_MAX_MSG_ID * sizeof(void *));
202 if (conn->gnc_tx_ref_table == NULL) {
203 CERROR("Can't allocate conn tx_ref_table\n");
208 atomic_set(&conn->gnc_refcount, 1);
209 atomic_set(&conn->gnc_reaper_noop, 0);
210 atomic_set(&conn->gnc_sched_noop, 0);
211 INIT_LIST_HEAD(&conn->gnc_list);
212 INIT_LIST_HEAD(&conn->gnc_hashlist);
213 INIT_LIST_HEAD(&conn->gnc_schedlist);
214 INIT_LIST_HEAD(&conn->gnc_fmaq);
215 INIT_LIST_HEAD(&conn->gnc_mdd_list);
216 spin_lock_init(&conn->gnc_list_lock);
217 spin_lock_init(&conn->gnc_tx_lock);
219 /* set tx id to nearly the end to make sure we find wrapping
221 conn->gnc_next_tx = (int) GNILND_MAX_MSG_ID - 10;
223 /* if this fails, we have conflicts and MAX_TX is too large */
224 CLASSERT(GNILND_MAX_MSG_ID < GNILND_MSGID_CLOSE);
226 /* get a new unique CQ id for this conn */
227 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
228 conn->gnc_my_connstamp = kgnilnd_data.kgn_connstamp++;
229 conn->gnc_cqid = kgnilnd_get_cqid_locked();
230 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
232 if (conn->gnc_cqid == 0) {
233 CERROR("Could not allocate unique CQ ID for conn 0x%p\n", conn);
238 CDEBUG(D_NET, "alloc cqid %u for conn 0x%p\n",
239 conn->gnc_cqid, conn);
241 /* need to be set before gnc_ephandle to allow kgnilnd_destroy_conn_ep to
243 conn->gnc_device = dev;
245 conn->gnc_timeout = MAX(*kgnilnd_tunables.kgn_timeout,
247 kgnilnd_update_reaper_timeout(conn->gnc_timeout);
249 /* this is the ep_handle for doing SMSG & BTE */
250 mutex_lock(&dev->gnd_cq_mutex);
251 rrc = kgnilnd_ep_create(dev->gnd_handle, dev->gnd_snd_fma_cqh,
252 &conn->gnc_ephandle);
253 mutex_unlock(&dev->gnd_cq_mutex);
254 if (rrc != GNI_RC_SUCCESS) {
259 CDEBUG(D_NET, "created conn 0x%p ep_hndl 0x%p\n",
260 conn, conn->gnc_ephandle);
262 /* add ref for EP canceling */
263 kgnilnd_conn_addref(conn);
264 atomic_inc(&dev->gnd_neps);
270 atomic_dec(&kgnilnd_data.kgn_nconns);
271 LIBCFS_FREE(conn->gnc_tx_ref_table, GNILND_MAX_MSG_ID * sizeof(void *));
272 LIBCFS_FREE(conn, sizeof(*conn));
276 /* needs to be called with kgn_peer_conn_lock held (read or write) */
278 kgnilnd_find_conn_locked(kgn_peer_t *peer)
280 kgn_conn_t *conn = NULL;
283 /* if we are in reset, this conn is going to die soon */
284 if (unlikely(kgnilnd_data.kgn_in_reset)) {
288 /* just return the first ESTABLISHED connection */
289 list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
290 /* kgnilnd_finish_connect doesn't put connections on the
291 * peer list until they are actually established */
292 LASSERTF(conn->gnc_state >= GNILND_CONN_ESTABLISHED,
293 "found conn %p state %s on peer %p (%s)\n",
294 conn, kgnilnd_conn_state2str(conn), peer,
295 libcfs_nid2str(peer->gnp_nid));
296 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
304 /* needs write_lock on kgn_peer_conn_lock held */
306 kgnilnd_find_or_create_conn_locked(kgn_peer_t *peer) {
308 kgn_device_t *dev = peer->gnp_net->gnn_dev;
311 conn = kgnilnd_find_conn_locked(peer);
317 /* if the peer was previously connecting, check if we should
318 * trigger another connection attempt yet. */
319 if (time_before(jiffies, peer->gnp_reconnect_time)) {
323 /* This check prevents us from creating a new connection to a peer while we are
324 * still in the process of closing an existing connection to the peer.
326 list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
327 if (conn->gnc_ephandle != NULL) {
328 CDEBUG(D_NET, "Not connecting non-null ephandle found peer 0x%p->%s\n", peer,
329 libcfs_nid2str(peer->gnp_nid));
334 if (peer->gnp_connecting != GNILND_PEER_IDLE) {
335 /* if we are not connecting, fire up a new connection */
336 /* or if we are anything but IDLE DONT start a new connection */
340 CDEBUG(D_NET, "starting connect to %s\n",
341 libcfs_nid2str(peer->gnp_nid));
342 peer->gnp_connecting = GNILND_PEER_CONNECT;
343 kgnilnd_peer_addref(peer); /* extra ref for connd */
345 spin_lock(&dev->gnd_connd_lock);
346 list_add_tail(&peer->gnp_connd_list, &dev->gnd_connd_peers);
347 spin_unlock(&dev->gnd_connd_lock);
349 kgnilnd_schedule_dgram(dev);
350 CDEBUG(D_NETTRACE, "scheduling new connect\n");
355 /* Caller is responsible for deciding if/when to call this */
357 kgnilnd_destroy_conn_ep(kgn_conn_t *conn)
360 gni_ep_handle_t tmp_ep;
362 /* only if we actually initialized it,
363 * then set NULL to tell kgnilnd_destroy_conn to leave it alone */
365 tmp_ep = xchg(&conn->gnc_ephandle, NULL);
366 if (tmp_ep != NULL) {
367 /* we never re-use the EP, so unbind is not needed */
368 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
369 rrc = kgnilnd_ep_destroy(tmp_ep);
371 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
373 /* if this fails, it could hork up kgni smsg retransmit and others
374 * since we could free the SMSG mbox memory, etc. */
375 LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d conn 0x%p ep 0x%p\n",
376 rrc, conn, conn->gnc_ephandle);
378 atomic_dec(&conn->gnc_device->gnd_neps);
380 /* clear out count added in kgnilnd_close_conn_locked
381 * conn will have a peer once it hits finish_connect, where it
382 * is the first spot we'll mark it ESTABLISHED as well */
383 if (conn->gnc_peer) {
384 kgnilnd_admin_decref(conn->gnc_peer->gnp_dirty_eps);
387 /* drop ref for EP */
388 kgnilnd_conn_decref(conn);
393 kgnilnd_destroy_conn(kgn_conn_t *conn)
395 LASSERTF(!in_interrupt() &&
396 !conn->gnc_scheduled &&
397 !conn->gnc_in_purgatory &&
398 conn->gnc_ephandle == NULL &&
399 list_empty(&conn->gnc_list) &&
400 list_empty(&conn->gnc_hashlist) &&
401 list_empty(&conn->gnc_schedlist) &&
402 list_empty(&conn->gnc_mdd_list),
403 "conn 0x%p->%s IRQ %d sched %d purg %d ep 0x%p lists %d/%d/%d/%d\n",
404 conn, conn->gnc_peer ? libcfs_nid2str(conn->gnc_peer->gnp_nid)
406 !!in_interrupt(), conn->gnc_scheduled,
407 conn->gnc_in_purgatory,
409 list_empty(&conn->gnc_list),
410 list_empty(&conn->gnc_hashlist),
411 list_empty(&conn->gnc_schedlist),
412 list_empty(&conn->gnc_mdd_list));
414 /* Tripping these is especially bad, as it means we have items on the
415 * lists that didn't keep their refcount on the connection - or
416 * somebody evil released their own */
417 LASSERTF(list_empty(&conn->gnc_fmaq) &&
418 atomic_read(&conn->gnc_nlive_fma) == 0 &&
419 atomic_read(&conn->gnc_nlive_rdma) == 0,
420 "conn 0x%p fmaq %d@0x%p nfma %d nrdma %d\n",
421 conn, kgnilnd_count_list(&conn->gnc_fmaq), &conn->gnc_fmaq,
422 atomic_read(&conn->gnc_nlive_fma), atomic_read(&conn->gnc_nlive_rdma));
424 CDEBUG(D_NET, "destroying conn %p ephandle %p error %d\n",
425 conn, conn->gnc_ephandle, conn->gnc_error);
427 /* if there is an FMA blk left here, we'll tear it down */
428 if (conn->gnc_fma_blk) {
429 kgnilnd_release_mbox(conn, 0);
432 if (conn->gnc_peer != NULL)
433 kgnilnd_peer_decref(conn->gnc_peer);
435 if (conn->gnc_tx_ref_table != NULL) {
436 LIBCFS_FREE(conn->gnc_tx_ref_table,
437 GNILND_MAX_MSG_ID * sizeof(void *));
440 LIBCFS_FREE(conn, sizeof(*conn));
441 atomic_dec(&kgnilnd_data.kgn_nconns);
444 /* peer_alive and peer_notify done in the style of the o2iblnd */
446 kgnilnd_peer_alive(kgn_peer_t *peer)
448 set_mb(peer->gnp_last_alive, jiffies);
452 kgnilnd_peer_notify(kgn_peer_t *peer, int error)
463 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DONT_NOTIFY))
466 /* Tell LNet we are giving ups on this peer - but only
467 * if it isn't already reconnected or trying to reconnect */
468 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
470 /* use kgnilnd_find_conn_locked to avoid any conns in the process of being nuked
472 * don't tell LNet if we are in reset - we assume that everyone will be able to
473 * reconnect just fine
475 conn = kgnilnd_find_conn_locked(peer);
477 CDEBUG(D_NETTRACE, "peer 0x%p->%s ting %d conn 0x%p, rst %d error %d\n",
478 peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting, conn,
479 kgnilnd_data.kgn_in_reset, error);
481 if ((peer->gnp_connecting == GNILND_PEER_IDLE) &&
483 (!kgnilnd_data.kgn_in_reset) &&
484 (!kgnilnd_conn_clean_errno(error))) {
488 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
491 /* short circuit if we dont need to notify Lnet */
495 rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
498 /* dont do this if this fails since LNET is in shutdown or something else
501 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
502 list_for_each_entry(net , &kgnilnd_data.kgn_nets[i], gnn_list) {
503 /* if gnn_shutdown set for any net shutdown is in progress just return */
504 if (net->gnn_shutdown) {
505 up_read(&kgnilnd_data.kgn_net_rw_sem);
513 /* shutdown in progress most likely */
514 up_read(&kgnilnd_data.kgn_net_rw_sem);
518 LIBCFS_ALLOC(nets, nnets * sizeof(*nets));
521 up_read(&kgnilnd_data.kgn_net_rw_sem);
522 CERROR("Failed to allocate nets[%d]\n", nnets);
527 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
528 list_for_each_entry(net, &kgnilnd_data.kgn_nets[i], gnn_list) {
530 kgnilnd_net_addref(net);
534 up_read(&kgnilnd_data.kgn_net_rw_sem);
536 for (i = 0; i < nnets; i++) {
541 peer_nid = kgnilnd_lnd2lnetnid(net->gnn_ni->ni_nid,
544 CDEBUG(D_NET, "peer 0x%p->%s last_alive %lu (%lus ago)\n",
545 peer, libcfs_nid2str(peer_nid), peer->gnp_last_alive,
546 cfs_duration_sec(jiffies - peer->gnp_last_alive));
548 lnet_notify(net->gnn_ni, peer_nid, 0, peer->gnp_last_alive);
551 kgnilnd_net_decref(net);
554 LIBCFS_FREE(nets, nnets * sizeof(*nets));
558 /* need write_lock on kgn_peer_conn_lock */
560 kgnilnd_close_conn_locked(kgn_conn_t *conn, int error)
562 kgn_peer_t *peer = conn->gnc_peer;
565 LASSERT(!in_interrupt());
567 /* store error for tx completion */
568 conn->gnc_error = error;
569 peer->gnp_last_errno = error;
571 /* use real error from peer if possible */
572 if (error == -ECONNRESET) {
573 error = conn->gnc_peer_error;
576 /* if we NETERROR, make sure it is rate limited */
577 if (!kgnilnd_conn_clean_errno(error)) {
578 CNETERR("closing conn to %s: error %d\n",
579 libcfs_nid2str(peer->gnp_nid), error);
581 CDEBUG(D_NET, "closing conn to %s: error %d\n",
582 libcfs_nid2str(peer->gnp_nid), error);
585 LASSERTF(conn->gnc_state == GNILND_CONN_ESTABLISHED,
586 "conn %p to %s with bogus state %s\n", conn,
587 libcfs_nid2str(conn->gnc_peer->gnp_nid),
588 kgnilnd_conn_state2str(conn));
589 LASSERT(!list_empty(&conn->gnc_hashlist));
590 LASSERT(!list_empty(&conn->gnc_list));
593 /* mark peer count here so any place the EP gets destroyed will
594 * open up the peer count so that a new ESTABLISHED conn is then free
595 * to send new messages -- sending before the previous EPs are destroyed
596 * could end up with messages on the network for the old conn _after_
597 * the new conn and break the mbox safety protocol */
598 kgnilnd_admin_addref(conn->gnc_peer->gnp_dirty_eps);
600 /* Remove from conn hash table: no new callbacks */
601 list_del_init(&conn->gnc_hashlist);
602 kgnilnd_data.kgn_conn_version++;
604 /* if we are in reset, go right to CLOSED as there is no scheduler
605 * thread to move from CLOSING to CLOSED */
606 if (unlikely(kgnilnd_data.kgn_in_reset)) {
607 conn->gnc_state = GNILND_CONN_CLOSED;
609 conn->gnc_state = GNILND_CONN_CLOSING;
612 /* leave on peer->gnp_conns to make sure we don't let the reaper
613 * or others try to unlink this peer until the conn is fully
614 * processed for closing */
616 if (kgnilnd_check_purgatory_conn(conn)) {
617 kgnilnd_add_purgatory_locked(conn, conn->gnc_peer);
620 /* Reset RX timeout to ensure we wait for an incoming CLOSE
621 * for the full timeout. If we get a CLOSE we know the
622 * peer has stopped all RDMA. Otherwise if we wait for
623 * the full timeout we can also be sure all RDMA has stopped. */
624 conn->gnc_last_rx = conn->gnc_last_rx_cq = jiffies;
627 /* schedule sending CLOSE - if we are in quiesce, this adds to
628 * gnd_ready_conns and allows us to find it in quiesce processing */
629 kgnilnd_schedule_conn(conn);
631 /* lose peer's ref */
632 kgnilnd_conn_decref(conn);
633 /* -1 for conn table */
634 kgnilnd_conn_decref(conn);
640 kgnilnd_close_conn(kgn_conn_t *conn, int error)
642 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
643 /* need to check the state here - this call is racy and we don't
644 * know the state until after the lock is grabbed */
645 if (conn->gnc_state == GNILND_CONN_ESTABLISHED) {
646 kgnilnd_close_conn_locked(conn, error);
648 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
652 kgnilnd_complete_closed_conn(kgn_conn_t *conn)
662 /* Dump log on cksum error - wait until complete phase to let
663 * RX of error happen */
664 if (*kgnilnd_tunables.kgn_checksum_dump &&
665 (conn != NULL && conn->gnc_peer_error == -ENOKEY)) {
666 libcfs_debug_dumplog();
669 /* _CLOSED set in kgnilnd_process_fmaq once we decide to
670 * send the CLOSE or not */
671 LASSERTF(conn->gnc_state == GNILND_CONN_CLOSED,
672 "conn 0x%p->%s with bad state %s\n",
673 conn, conn->gnc_peer ?
674 libcfs_nid2str(conn->gnc_peer->gnp_nid) :
676 kgnilnd_conn_state2str(conn));
678 LASSERT(list_empty(&conn->gnc_hashlist));
680 /* we've sent the close, start nuking */
682 /* we don't use lists to track things that we can get out of the
685 /* need to hold locks for tx_list_state, sampling it is too racy:
686 * - the lock actually protects tx != NULL, but we can't take the proper
687 * lock until we check tx_list_state, which would be too late and
688 * we could have the TX change under us.
689 * gnd_rdmaq_lock and gnd_lock and not used together, so taking both
691 spin_lock(&conn->gnc_device->gnd_rdmaq_lock);
692 spin_lock(&conn->gnc_device->gnd_lock);
694 for (nrdma = 0; nrdma < GNILND_MAX_MSG_ID; nrdma++) {
695 tx = conn->gnc_tx_ref_table[nrdma];
698 /* only print the first error and if not CLOSE, we often don't see
699 * CQ events for that by the time we get here... and really don't care */
700 if (nlive || tx->tx_msg.gnm_type == GNILND_MSG_CLOSE)
701 tx->tx_state |= GNILND_TX_QUIET_ERROR;
703 GNIDBG_TX(D_NET, tx, "cleaning up on close, nlive %d", nlive);
705 /* don't worry about gnc_lock here as nobody else should be
706 * touching this conn */
707 kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
708 list_add_tail(&tx->tx_list, &sinners);
711 spin_unlock(&conn->gnc_device->gnd_lock);
712 spin_unlock(&conn->gnc_device->gnd_rdmaq_lock);
714 /* nobody should have marked this as needing scheduling after
715 * we called close - so only ref should be us handling it */
716 LASSERTF(conn->gnc_scheduled == GNILND_CONN_PROCESS,
717 "conn 0x%p scheduled %d\n", conn, conn->gnc_scheduled);
719 /* now reset a few to actual counters... */
720 nrdma = atomic_read(&conn->gnc_nlive_rdma);
721 nq_rdma = atomic_read(&conn->gnc_nq_rdma);
723 if (!list_empty(&sinners)) {
724 list_for_each_entry_safe(tx, txn, &sinners, tx_list) {
725 /* clear tx_list to make tx_add_list_locked happy */
726 list_del_init(&tx->tx_list);
727 /* The error codes determine if we hold onto the MDD */
728 kgnilnd_tx_done(tx, conn->gnc_error);
732 logmsg = (nlive + nrdma + nq_rdma);
735 if (conn->gnc_peer_error != 0) {
736 CNETERR("Closed conn 0x%p->%s (errno %d, peer errno %d): "
737 "canceled %d TX, %d/%d RDMA\n",
738 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
739 conn->gnc_error, conn->gnc_peer_error,
740 nlive, nq_rdma, nrdma);
742 CNETERR("Closed conn 0x%p->%s (errno %d): "
743 "canceled %d TX, %d/%d RDMA\n",
744 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
746 nlive, nq_rdma, nrdma);
750 kgnilnd_destroy_conn_ep(conn);
752 /* Bug 765042 - race this with completing a new conn to same peer - we need
753 * finish_connect to detach purgatory before we can do it ourselves here */
754 CFS_RACE(CFS_FAIL_GNI_FINISH_PURG);
756 /* now it is safe to remove from peer list - anyone looking at
757 * gnp_conns now is free to unlink if not on purgatory */
758 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
760 conn->gnc_state = GNILND_CONN_DONE;
762 /* Decrement counter if we are marked by del_conn_or_peers for closing
764 if (conn->gnc_needs_closing)
765 kgnilnd_admin_decref(kgnilnd_data.kgn_npending_conns);
767 /* Remove from peer's list of valid connections if its not in purgatory */
768 if (!conn->gnc_in_purgatory) {
769 list_del_init(&conn->gnc_list);
772 /* NB - only unlinking if we set pending in del_peer_locked from admin or
774 if (kgnilnd_peer_active(conn->gnc_peer) &&
775 conn->gnc_peer->gnp_pending_unlink &&
776 kgnilnd_can_unlink_peer_locked(conn->gnc_peer)) {
777 kgnilnd_unlink_peer_locked(conn->gnc_peer);
780 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
782 /* I'm telling Mommy! - use peer_error if they initiated close */
783 kgnilnd_peer_notify(conn->gnc_peer,
784 conn->gnc_error == -ECONNRESET ? conn->gnc_peer_error
791 kgnilnd_set_conn_params(kgn_dgram_t *dgram)
793 kgn_conn_t *conn = dgram->gndg_conn;
794 kgn_connreq_t *connreq = &dgram->gndg_conn_in;
795 kgn_gniparams_t *rem_param = &connreq->gncr_gnparams;
799 /* set timeout vals in conn early so we can use them for the NAK */
801 /* use max of the requested and our timeout, peer will do the same */
802 conn->gnc_timeout = MAX(conn->gnc_timeout, connreq->gncr_timeout);
804 /* only ep_bind really mucks around with the CQ */
805 /* only ep bind if we are not connecting to ourself and the dstnid is not a wildcard. this check
806 * is necessary as you can only bind an ep once and we must make sure we dont bind when already bound.
808 if (connreq->gncr_dstnid != LNET_NID_ANY && dgram->gndg_conn_out.gncr_dstnid != connreq->gncr_srcnid) {
809 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
810 rrc = kgnilnd_ep_bind(conn->gnc_ephandle,
811 connreq->gncr_gnparams.gnpr_host_id,
813 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
814 if (rrc != GNI_RC_SUCCESS) {
820 rrc = kgnilnd_ep_set_eventdata(conn->gnc_ephandle, conn->gnc_cqid,
821 connreq->gncr_gnparams.gnpr_cqid);
822 if (rrc != GNI_RC_SUCCESS) {
827 /* Initialize SMSG */
828 rrc = kgnilnd_smsg_init(conn->gnc_ephandle, &conn->gnpr_smsg_attr,
829 &connreq->gncr_gnparams.gnpr_smsg_attr);
830 if (unlikely(rrc == GNI_RC_INVALID_PARAM)) {
831 gni_smsg_attr_t *local = &conn->gnpr_smsg_attr;
832 gni_smsg_attr_t *remote = &connreq->gncr_gnparams.gnpr_smsg_attr;
833 /* help folks figure out if there is a tunable off, etc. */
834 LCONSOLE_ERROR("SMSG attribute mismatch. Data from local/remote:"
835 " type %d/%d msg_maxsize %u/%u"
836 " mbox_maxcredit %u/%u. Please check kgni"
837 " logs for further data\n",
838 local->msg_type, remote->msg_type,
839 local->msg_maxsize, remote->msg_maxsize,
840 local->mbox_maxcredit, remote->mbox_maxcredit);
842 if (rrc != GNI_RC_SUCCESS) {
847 /* log this for help in debuggin SMSG buffer re-use */
848 CDEBUG(D_NET, "conn %p src %s dst %s smsg %p acquired"
849 " local cqid %u SMSG %p->%u hndl "LPX64"."LPX64
850 " remote cqid %u SMSG %p->%u hndl "LPX64"."LPX64"\n",
851 conn, libcfs_nid2str(connreq->gncr_srcnid),
852 libcfs_nid2str(connreq->gncr_dstnid),
853 &conn->gnpr_smsg_attr,
855 conn->gnpr_smsg_attr.msg_buffer,
856 conn->gnpr_smsg_attr.mbox_offset,
857 conn->gnpr_smsg_attr.mem_hndl.qword1,
858 conn->gnpr_smsg_attr.mem_hndl.qword2,
859 rem_param->gnpr_cqid,
860 rem_param->gnpr_smsg_attr.msg_buffer,
861 rem_param->gnpr_smsg_attr.mbox_offset,
862 rem_param->gnpr_smsg_attr.mem_hndl.qword1,
863 rem_param->gnpr_smsg_attr.mem_hndl.qword2);
865 conn->gnc_peerstamp = connreq->gncr_peerstamp;
866 conn->gnc_peer_connstamp = connreq->gncr_connstamp;
868 /* We update the reaper timeout once we have a valid conn and timeout */
869 kgnilnd_update_reaper_timeout(GNILND_TO2KA(conn->gnc_timeout));
874 rrc = kgnilnd_ep_unbind(conn->gnc_ephandle);
875 /* not sure I can just let this fly */
876 LASSERTF(rrc == GNI_RC_SUCCESS,
877 "bad rc from gni_ep_unbind trying to cleanup: %d\n", rrc);
880 LASSERTF(rc != 0, "SOFTWARE BUG: rc == 0\n");
881 CERROR("Error setting connection params from %s: %d\n",
882 libcfs_nid2str(connreq->gncr_srcnid), rc);
886 /* needs down_read on kgn_net_rw_sem held from before this call until
887 * after the write_lock on kgn_peer_conn_lock - this ensures we stay sane
888 * with kgnilnd_shutdown - it'll get the sem and set shutdown, then get the
889 * kgn_peer_conn_lock to start del_peer'ing. If we hold the sem until after
890 * kgn_peer_conn_lock is held, we guarantee that nobody calls
891 * kgnilnd_add_peer_locked without checking gnn_shutdown */
893 kgnilnd_create_peer_safe(kgn_peer_t **peerp, lnet_nid_t nid, kgn_net_t *net)
898 LASSERT(nid != LNET_NID_ANY);
900 /* We dont pass the net around in the dgram anymore so here is where we find it
901 * this will work unless its in shutdown or the nid has a net that is invalid.
902 * Either way error code needs to be returned in that case.
904 * If the net passed in is not NULL then we can use it, this alleviates looking it
905 * when the calling function has access to the data.
908 rc = kgnilnd_find_net(nid, &net);
912 /* find net adds a reference on the net if we are not using
913 * it we must do it manually so the net references are
914 * correct when tearing down the net
916 kgnilnd_net_addref(net);
919 LIBCFS_ALLOC(peer, sizeof(*peer));
921 kgnilnd_net_decref(net);
926 /* translate from nid to nic addr & store */
927 rc = kgnilnd_nid_to_nicaddrs(LNET_NIDADDR(nid), 1, &peer->gnp_host_id);
929 kgnilnd_net_decref(net);
930 LIBCFS_FREE(peer, sizeof(*peer));
933 CDEBUG(D_NET, "peer 0x%p->%s -> NIC 0x%x\n", peer,
934 libcfs_nid2str(nid), peer->gnp_host_id);
936 atomic_set(&peer->gnp_refcount, 1); /* 1 ref for caller */
937 atomic_set(&peer->gnp_dirty_eps, 0);
939 INIT_LIST_HEAD(&peer->gnp_list);
940 INIT_LIST_HEAD(&peer->gnp_connd_list);
941 INIT_LIST_HEAD(&peer->gnp_conns);
942 INIT_LIST_HEAD(&peer->gnp_tx_queue);
944 /* the first reconnect should happen immediately, so we leave
945 * gnp_reconnect_interval set to 0 */
947 LASSERTF(net != NULL, "peer 0x%p->%s with NULL net\n",
948 peer, libcfs_nid2str(nid));
950 /* must have kgn_net_rw_sem held for this... */
951 if (net->gnn_shutdown) {
952 /* shutdown has started already */
953 kgnilnd_net_decref(net);
954 LIBCFS_FREE(peer, sizeof(*peer));
960 atomic_inc(&kgnilnd_data.kgn_npeers);
967 kgnilnd_destroy_peer(kgn_peer_t *peer)
969 CDEBUG(D_NET, "peer %s %p deleted\n",
970 libcfs_nid2str(peer->gnp_nid), peer);
971 LASSERTF(atomic_read(&peer->gnp_refcount) == 0,
972 "peer 0x%p->%s refs %d\n",
973 peer, libcfs_nid2str(peer->gnp_nid),
974 atomic_read(&peer->gnp_refcount));
975 LASSERTF(atomic_read(&peer->gnp_dirty_eps) == 0,
976 "peer 0x%p->%s dirty eps %d\n",
977 peer, libcfs_nid2str(peer->gnp_nid),
978 atomic_read(&peer->gnp_dirty_eps));
979 LASSERTF(peer->gnp_net != NULL, "peer %p (%s) with NULL net\n",
980 peer, libcfs_nid2str(peer->gnp_nid));
981 LASSERTF(!kgnilnd_peer_active(peer),
983 peer, libcfs_nid2str(peer->gnp_nid));
984 LASSERTF(peer->gnp_connecting == GNILND_PEER_IDLE || peer->gnp_connecting == GNILND_PEER_KILL,
985 "peer 0x%p->%s, connecting %d\n",
986 peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
987 LASSERTF(list_empty(&peer->gnp_conns),
989 peer, libcfs_nid2str(peer->gnp_nid));
990 LASSERTF(list_empty(&peer->gnp_tx_queue),
992 peer, libcfs_nid2str(peer->gnp_nid));
993 LASSERTF(list_empty(&peer->gnp_connd_list),
995 peer, libcfs_nid2str(peer->gnp_nid));
997 /* NB a peer's connections keep a reference on their peer until
998 * they are destroyed, so we can be assured that _all_ state to do
999 * with this peer has been cleaned up when its refcount drops to
1002 atomic_dec(&kgnilnd_data.kgn_npeers);
1003 kgnilnd_net_decref(peer->gnp_net);
1005 LIBCFS_FREE(peer, sizeof(*peer));
1008 /* the conn might not have made it all the way through to a connected
1009 * state - but we need to purgatory any conn that a remote peer might
1010 * have seen through a posted dgram as well */
1012 kgnilnd_add_purgatory_locked(kgn_conn_t *conn, kgn_peer_t *peer)
1014 kgn_mbox_info_t *mbox = NULL;
1017 /* NB - the caller should own conn by removing him from the
1018 * scheduler thread when finishing the close */
1020 LASSERTF(peer != NULL, "conn %p with NULL peer\n", conn);
1022 /* If this is still true, need to add the calls to unlink back in and
1023 * figure out how to close the hole on loopback conns */
1024 LASSERTF(kgnilnd_peer_active(peer), "can't use inactive peer %s (%p)"
1025 " we'll never recover the resources\n",
1026 libcfs_nid2str(peer->gnp_nid), peer);
1028 CDEBUG(D_NET, "conn %p peer %p dev %p\n", conn, peer,
1031 /* add ref for mbox purgatory hold */
1032 kgnilnd_peer_addref(peer);
1033 kgnilnd_conn_addref(conn);
1034 conn->gnc_in_purgatory = 1;
1036 mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
1037 mbox->mbx_prev_nid = peer->gnp_nid;
1038 mbox->mbx_add_purgatory = jiffies;
1039 kgnilnd_release_mbox(conn, 1);
1041 LASSERTF(list_empty(&conn->gnc_mdd_list),
1042 "conn 0x%p->%s with active purgatory hold MDD %d\n",
1043 conn, libcfs_nid2str(peer->gnp_nid),
1044 kgnilnd_count_list(&conn->gnc_mdd_list));
1049 /* Instead of detaching everything from purgatory here we just mark the conn as needing
1050 * detach, when the reaper checks the conn the next time it will detach it.
1051 * Calling function requires write_lock held on kgn_peer_conn_lock
1054 kgnilnd_mark_for_detach_purgatory_all_locked(kgn_peer_t *peer) {
1057 list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
1058 if (conn->gnc_in_purgatory && !conn->gnc_needs_detach) {
1059 conn->gnc_needs_detach = 1;
1060 kgnilnd_admin_addref(kgnilnd_data.kgn_npending_detach);
1065 /* Calling function needs a write_lock held on kgn_peer_conn_lock */
1067 kgnilnd_detach_purgatory_locked(kgn_conn_t *conn, struct list_head *conn_list)
1069 kgn_mbox_info_t *mbox = NULL;
1071 /* if needed, add the conn purgatory data to the list passed in */
1072 if (conn->gnc_in_purgatory) {
1073 CDEBUG(D_NET, "peer %p->%s purg_conn %p@%s mdd_list #tx %d\n",
1074 conn->gnc_peer, libcfs_nid2str(conn->gnc_peer->gnp_nid),
1075 conn, kgnilnd_conn_state2str(conn),
1076 kgnilnd_count_list(&conn->gnc_mdd_list));
1078 mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
1079 mbox->mbx_detach_of_purgatory = jiffies;
1081 /* conn->gnc_list is the entry point on peer->gnp_conns, so detaching it
1082 * here removes it from the list of 'valid' peer connections.
1083 * We put the current conn onto a list of conns to call kgnilnd_release_purgatory_locked()
1084 * and as such the caller of kgnilnd_detach_purgatory_locked() now owns that conn, since its not
1085 * on the peer's conn_list anymore.
1088 kgnilnd_peer_decref(conn->gnc_peer);
1089 list_del_init(&conn->gnc_list);
1091 /* NB - only unlinking if we set pending in del_peer_locked from admin or
1093 if (kgnilnd_peer_active(conn->gnc_peer) &&
1094 conn->gnc_peer->gnp_pending_unlink &&
1095 kgnilnd_can_unlink_peer_locked(conn->gnc_peer)) {
1096 kgnilnd_unlink_peer_locked(conn->gnc_peer);
1098 /* The reaper will not call detach unless the conn is fully through kgnilnd_complete_closed_conn.
1099 * If the conn is not in a DONE state somehow we are attempting to detach even though
1100 * the conn has not been fully cleaned up. If we detach while the conn is still closing
1101 * we will end up with an orphaned connection that has valid ep_handle, that is not on a
1105 LASSERTF(conn->gnc_state == GNILND_CONN_DONE, "Conn in invalid state %p@%s \n",
1106 conn, kgnilnd_conn_state2str(conn));
1108 /* move from peer to the delayed release list */
1109 list_add_tail(&conn->gnc_list, conn_list);
1114 kgnilnd_release_purgatory_list(struct list_head *conn_list)
1117 kgn_conn_t *conn, *connN;
1118 kgn_mdd_purgatory_t *gmp, *gmpN;
1120 list_for_each_entry_safe(conn, connN, conn_list, gnc_list) {
1121 dev = conn->gnc_device;
1123 kgnilnd_release_mbox(conn, -1);
1124 conn->gnc_in_purgatory = 0;
1126 list_del_init(&conn->gnc_list);
1128 /* gnc_needs_detach is set in kgnilnd_del_conn_or_peer. It is used to keep track
1129 * of conns that have been marked for detach by kgnilnd_del_conn_or_peer.
1130 * The function uses kgn_npending_detach to verify the conn has
1131 * actually been detached.
1134 if (conn->gnc_needs_detach)
1135 kgnilnd_admin_decref(kgnilnd_data.kgn_npending_detach);
1137 /* if this guy is really dead (we are doing release from reaper),
1138 * make sure we tell LNet - if this is from other context,
1139 * the checks in the function will prevent an errant
1141 kgnilnd_peer_notify(conn->gnc_peer, conn->gnc_error);
1143 list_for_each_entry_safe(gmp, gmpN, &conn->gnc_mdd_list,
1146 "dev %p releasing held mdd "LPX64"."LPX64"\n",
1147 conn->gnc_device, gmp->gmp_map_key.qword1,
1148 gmp->gmp_map_key.qword2);
1150 atomic_dec(&dev->gnd_n_mdd_held);
1151 kgnilnd_mem_mdd_release(conn->gnc_device->gnd_handle,
1153 /* ignoring the return code - if kgni/ghal can't find it
1154 * it must be released already */
1156 list_del_init(&gmp->gmp_list);
1157 LIBCFS_FREE(gmp, sizeof(*gmp));
1159 /* lose conn ref for purgatory */
1160 kgnilnd_conn_decref(conn);
1164 /* needs write_lock on kgnilnd_data.kgn_peer_conn_lock held */
1166 kgnilnd_peer_increase_reconnect_locked(kgn_peer_t *peer)
1170 current_to = peer->gnp_reconnect_interval;
1172 /* we'll try to reconnect fast the first time, then back-off */
1173 if (current_to == 0) {
1174 peer->gnp_reconnect_time = jiffies - 1;
1175 current_to = *kgnilnd_tunables.kgn_min_reconnect_interval;
1177 peer->gnp_reconnect_time = jiffies + cfs_time_seconds(current_to);
1178 /* add 50% of min timeout & retry */
1179 current_to += *kgnilnd_tunables.kgn_min_reconnect_interval / 2;
1182 current_to = MIN(current_to,
1183 *kgnilnd_tunables.kgn_max_reconnect_interval);
1185 peer->gnp_reconnect_interval = current_to;
1186 CDEBUG(D_NET, "peer %s can reconnect at %lu interval %lu\n",
1187 libcfs_nid2str(peer->gnp_nid), peer->gnp_reconnect_time,
1188 peer->gnp_reconnect_interval);
1191 /* needs kgnilnd_data.kgn_peer_conn_lock held */
1193 kgnilnd_find_peer_locked(lnet_nid_t nid)
1195 struct list_head *peer_list = kgnilnd_nid2peerlist(nid);
1198 /* Chopping nid down to only NIDADDR using LNET_NIDADDR so we only
1199 * have a single peer per device instead of a peer per nid/net combo.
1202 list_for_each_entry(peer, peer_list, gnp_list) {
1203 if (LNET_NIDADDR(nid) != LNET_NIDADDR(peer->gnp_nid))
1206 CDEBUG(D_NET, "got peer [%p] -> %s c %d (%d)\n",
1207 peer, libcfs_nid2str(nid),
1208 peer->gnp_connecting,
1209 atomic_read(&peer->gnp_refcount));
1215 /* need write_lock on kgn_peer_conn_lock */
1217 kgnilnd_unlink_peer_locked(kgn_peer_t *peer)
1219 LASSERTF(list_empty(&peer->gnp_conns),
1221 peer, libcfs_nid2str(peer->gnp_nid));
1222 LASSERTF(list_empty(&peer->gnp_tx_queue),
1224 peer, libcfs_nid2str(peer->gnp_nid));
1225 LASSERTF(kgnilnd_peer_active(peer),
1227 peer, libcfs_nid2str(peer->gnp_nid));
1228 CDEBUG(D_NET, "unlinking peer 0x%p->%s\n",
1229 peer, libcfs_nid2str(peer->gnp_nid));
1231 list_del_init(&peer->gnp_list);
1232 kgnilnd_data.kgn_peer_version++;
1233 kgnilnd_admin_decref(kgnilnd_data.kgn_npending_unlink);
1234 /* lose peerlist's ref */
1235 kgnilnd_peer_decref(peer);
1239 kgnilnd_get_peer_info(int index,
1240 kgn_peer_t **found_peer,
1241 lnet_nid_t *id, __u32 *nic_addr,
1242 int *refcount, int *connecting)
1244 struct list_head *ptmp;
1249 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1251 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
1253 list_for_each(ptmp, &kgnilnd_data.kgn_peers[i]) {
1254 peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1256 if (peer->gnp_nid != *id)
1262 CDEBUG(D_NET, "found peer %p (%s) at index %d\n",
1263 peer, libcfs_nid2str(peer->gnp_nid), index);
1266 *id = peer->gnp_nid;
1267 *nic_addr = peer->gnp_host_id;
1268 *refcount = atomic_read(&peer->gnp_refcount);
1269 *connecting = peer->gnp_connecting;
1276 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1278 CDEBUG(D_NET, "no gni peer at index %d\n", index);
1282 /* requires write_lock on kgn_peer_conn_lock held */
1284 kgnilnd_add_peer_locked(lnet_nid_t nid, kgn_peer_t *new_stub_peer, kgn_peer_t **peerp)
1286 kgn_peer_t *peer, *peer2;
1288 LASSERTF(new_stub_peer != NULL, "bad stub peer for nid %s\n",
1289 libcfs_nid2str(nid));
1291 peer2 = kgnilnd_find_peer_locked(nid);
1292 if (peer2 != NULL) {
1293 /* A peer was created during the lock transition, so drop
1294 * the new one we created */
1295 kgnilnd_peer_decref(new_stub_peer);
1298 peer = new_stub_peer;
1299 /* peer table takes existing ref on peer */
1301 LASSERTF(!kgnilnd_peer_active(peer),
1302 "peer 0x%p->%s already in peer table\n",
1303 peer, libcfs_nid2str(peer->gnp_nid));
1304 list_add_tail(&peer->gnp_list,
1305 kgnilnd_nid2peerlist(nid));
1306 kgnilnd_data.kgn_peer_version++;
1309 LASSERTF(peer->gnp_net != NULL, "peer 0x%p->%s with NULL net\n",
1310 peer, libcfs_nid2str(peer->gnp_nid));
1315 kgnilnd_add_peer(kgn_net_t *net, lnet_nid_t nid, kgn_peer_t **peerp)
1321 if (nid == LNET_NID_ANY)
1324 /* NB - this will not block during normal operations -
1325 * the only writer of this is in the startup/shutdown path. */
1326 rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
1331 rc = kgnilnd_create_peer_safe(&peer, nid, net);
1333 up_read(&kgnilnd_data.kgn_net_rw_sem);
1337 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1338 up_read(&kgnilnd_data.kgn_net_rw_sem);
1340 kgnilnd_add_peer_locked(nid, peer, peerp);
1342 CDEBUG(D_NET, "peer 0x%p->%s connecting %d\n",
1343 peerp, libcfs_nid2str((*peerp)->gnp_nid),
1344 (*peerp)->gnp_connecting);
1346 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1350 /* needs write_lock on kgn_peer_conn_lock */
1352 kgnilnd_cancel_peer_connect_locked(kgn_peer_t *peer, struct list_head *zombies)
1356 /* we do care about state of gnp_connecting - we could be between
1357 * reconnect attempts, so try to find the dgram and cancel the TX
1358 * anyways. If we are in the process of posting DONT do anything;
1359 * once it fails or succeeds we can nuke the connect attempt.
1360 * We have no idea where in kgnilnd_post_dgram we are so we cant
1361 * attempt to cancel until the function is done.
1364 /* make sure peer isn't in process of connecting or waiting for connect*/
1365 spin_lock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1366 if (!(list_empty(&peer->gnp_connd_list))) {
1367 list_del_init(&peer->gnp_connd_list);
1368 /* remove connd ref */
1369 kgnilnd_peer_decref(peer);
1371 spin_unlock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1373 if (peer->gnp_connecting == GNILND_PEER_POSTING || peer->gnp_connecting == GNILND_PEER_NEEDS_DEATH) {
1374 peer->gnp_connecting = GNILND_PEER_NEEDS_DEATH;
1375 /* We are in process of posting right now the xchg set it up for us to
1376 * cancel the connect so we are finished for now */
1378 /* no need for exchange we have the peer lock and its ready for us to nuke */
1379 LASSERTF(peer->gnp_connecting != GNILND_PEER_POSTING,
1380 "Peer in invalid state 0x%p->%s, connecting %d\n",
1381 peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
1382 peer->gnp_connecting = GNILND_PEER_IDLE;
1383 set_mb(peer->gnp_last_dgram_errno, -ETIMEDOUT);
1384 kgnilnd_find_and_cancel_dgram(peer->gnp_net->gnn_dev,
1388 /* The least we can do is nuke the tx's no matter what.... */
1389 list_for_each_entry_safe(tx, txn, &peer->gnp_tx_queue, tx_list) {
1390 kgnilnd_tx_del_state_locked(tx, peer, NULL,
1392 list_add_tail(&tx->tx_list, zombies);
1396 /* needs write_lock on kgn_peer_conn_lock */
1398 kgnilnd_del_peer_locked(kgn_peer_t *peer, int error)
1400 /* this peer could be passive and only held for purgatory,
1401 * take a ref to ensure it doesn't disappear in this function */
1402 kgnilnd_peer_addref(peer);
1404 CFS_RACE(CFS_FAIL_GNI_FIND_TARGET);
1406 /* if purgatory release cleared it out, don't try again */
1407 if (kgnilnd_peer_active(peer)) {
1408 /* always do this to allow kgnilnd_start_connect and
1409 * kgnilnd_finish_connect to catch this before they
1410 * wrap up their operations */
1411 if (kgnilnd_can_unlink_peer_locked(peer)) {
1412 /* already released purgatory, so only active
1414 kgnilnd_unlink_peer_locked(peer);
1416 kgnilnd_close_peer_conns_locked(peer, error);
1417 /* peer unlinks itself when last conn is closed */
1421 /* we are done, release back to the wild */
1422 kgnilnd_peer_decref(peer);
1426 kgnilnd_del_conn_or_peer(kgn_net_t *net, lnet_nid_t nid, int command,
1430 LIST_HEAD (zombies);
1431 struct list_head *ptmp, *pnxt;
1438 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1440 if (nid != LNET_NID_ANY)
1441 lo = hi = kgnilnd_nid2peerlist(nid) - kgnilnd_data.kgn_peers;
1444 hi = *kgnilnd_tunables.kgn_peer_hash_size - 1;
1445 /* wildcards always succeed */
1449 for (i = lo; i <= hi; i++) {
1450 list_for_each_safe(ptmp, pnxt, &kgnilnd_data.kgn_peers[i]) {
1451 peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1453 LASSERTF(peer->gnp_net != NULL,
1454 "peer %p (%s) with NULL net\n",
1455 peer, libcfs_nid2str(peer->gnp_nid));
1457 if (net != NULL && peer->gnp_net != net)
1460 if (!(nid == LNET_NID_ANY || LNET_NIDADDR(peer->gnp_nid) == LNET_NIDADDR(nid)))
1463 /* In both cases, we want to stop any in-flight
1464 * connect attempts */
1465 kgnilnd_cancel_peer_connect_locked(peer, &zombies);
1468 case GNILND_DEL_CONN:
1469 kgnilnd_close_peer_conns_locked(peer, error);
1471 case GNILND_DEL_PEER:
1472 peer->gnp_pending_unlink = 1;
1473 kgnilnd_admin_addref(kgnilnd_data.kgn_npending_unlink);
1474 kgnilnd_mark_for_detach_purgatory_all_locked(peer);
1475 kgnilnd_del_peer_locked(peer, error);
1477 case GNILND_CLEAR_PURGATORY:
1478 /* Mark everything ready for detach reaper will cleanup
1479 * once we release the kgn_peer_conn_lock
1481 kgnilnd_mark_for_detach_purgatory_all_locked(peer);
1482 peer->gnp_last_errno = -EISCONN;
1483 /* clear reconnect so he can reconnect soon */
1484 peer->gnp_reconnect_time = 0;
1485 peer->gnp_reconnect_interval = 0;
1488 CERROR("bad command %d\n", command);
1491 /* we matched something */
1496 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1498 /* release all of the souls found held in purgatory */
1499 kgnilnd_release_purgatory_list(&souls);
1502 kgnilnd_txlist_done(&zombies, error);
1504 /* This function does not return until the commands it initiated have completed,
1505 * since they have to work there way through the other threads. In the case of shutdown
1506 * threads are not woken up until after this call is initiated so we cannot wait, we just
1507 * need to return. The same applies for stack reset we shouldnt wait as the reset thread
1511 CFS_RACE(CFS_FAIL_GNI_RACE_RESET);
1513 if (error == -ENOTRECOVERABLE || error == -ESHUTDOWN) {
1518 while (atomic_read(&kgnilnd_data.kgn_npending_conns) ||
1519 atomic_read(&kgnilnd_data.kgn_npending_detach) ||
1520 atomic_read(&kgnilnd_data.kgn_npending_unlink)) {
1522 cfs_pause(cfs_time_seconds(1));
1525 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, "Waiting on %d peers %d closes %d detaches\n",
1526 atomic_read(&kgnilnd_data.kgn_npending_unlink),
1527 atomic_read(&kgnilnd_data.kgn_npending_conns),
1528 atomic_read(&kgnilnd_data.kgn_npending_detach));
1535 kgnilnd_get_conn_by_idx(int index)
1538 struct list_head *ptmp;
1540 struct list_head *ctmp;
1544 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
1545 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1546 list_for_each(ptmp, &kgnilnd_data.kgn_peers[i]) {
1548 peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1550 list_for_each(ctmp, &peer->gnp_conns) {
1551 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
1553 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
1559 CDEBUG(D_NET, "++conn[%p] -> %s (%d)\n", conn,
1560 libcfs_nid2str(conn->gnc_peer->gnp_nid),
1561 atomic_read(&conn->gnc_refcount));
1562 kgnilnd_conn_addref(conn);
1563 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1567 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1574 kgnilnd_get_conn_info(kgn_peer_t *peer,
1575 int *device_id, __u64 *peerstamp,
1576 int *tx_seq, int *rx_seq,
1577 int *fmaq_len, int *nfma, int *nrdma)
1582 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1584 conn = kgnilnd_find_conn_locked(peer);
1590 *device_id = conn->gnc_device->gnd_host_id;
1591 *peerstamp = conn->gnc_peerstamp;
1592 *tx_seq = conn->gnc_tx_seq;
1593 *rx_seq = conn->gnc_rx_seq;
1594 *fmaq_len = kgnilnd_count_list(&conn->gnc_fmaq);
1595 *nfma = atomic_read(&conn->gnc_nlive_fma);
1596 *nrdma = atomic_read(&conn->gnc_nlive_rdma);
1598 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1602 /* needs write_lock on kgn_peer_conn_lock */
1604 kgnilnd_close_peer_conns_locked(kgn_peer_t *peer, int why)
1607 struct list_head *ctmp, *cnxt;
1610 list_for_each_safe(ctmp, cnxt, &peer->gnp_conns) {
1611 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
1613 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
1617 /* we mark gnc_needs closing and increment kgn_npending_conns so that
1618 * kgnilnd_del_conn_or_peer can wait on the other threads closing
1619 * and cleaning up the connection.
1621 if (!conn->gnc_needs_closing) {
1622 conn->gnc_needs_closing = 1;
1623 kgnilnd_admin_addref(kgnilnd_data.kgn_npending_conns);
1625 kgnilnd_close_conn_locked(conn, why);
1631 kgnilnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
1633 struct libcfs_ioctl_data *data = arg;
1634 kgn_net_t *net = ni->ni_data;
1637 LASSERT(ni == net->gnn_ni);
1640 case IOC_LIBCFS_GET_PEER: {
1642 kgn_peer_t *peer = NULL;
1644 __u64 peerstamp = 0;
1645 int peer_refcount = 0, peer_connecting = 0;
1647 int tx_seq = 0, rx_seq = 0;
1648 int fmaq_len = 0, nfma = 0, nrdma = 0;
1650 rc = kgnilnd_get_peer_info(data->ioc_count, &peer,
1651 &nid, &nic_addr, &peer_refcount,
1657 /* LNET_MKNID is used to mask from lnet the multiplexing/demultiplexing of connections and peers
1658 * LNET assumes a conn and peer per net, the LNET_MKNID/LNET_NIDADDR allows us to let Lnet see what it
1659 * wants to see instead of the underlying network that is being used to send the data
1661 data->ioc_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), LNET_NIDADDR(nid));
1662 data->ioc_flags = peer_connecting;
1663 data->ioc_count = peer_refcount;
1665 rc = kgnilnd_get_conn_info(peer, &device_id, &peerstamp,
1666 &tx_seq, &rx_seq, &fmaq_len,
1669 /* This is allowable - a persistent peer could not
1670 * have a connection */
1672 /* flag to indicate we are not connected -
1673 * need to print as such */
1674 data->ioc_flags |= (1<<16);
1678 data->ioc_net = device_id;
1679 data->ioc_u64[0] = peerstamp;
1680 data->ioc_u32[0] = fmaq_len;
1681 data->ioc_u32[1] = nfma;
1682 data->ioc_u32[2] = tx_seq;
1683 data->ioc_u32[3] = rx_seq;
1684 data->ioc_u32[4] = nrdma;
1688 case IOC_LIBCFS_ADD_PEER: {
1689 /* just dummy value to allow using common interface */
1691 rc = kgnilnd_add_peer(net, data->ioc_nid, &peer);
1694 case IOC_LIBCFS_DEL_PEER: {
1695 /* NULL is passed in so it affects all peers in existence without regard to network
1696 * as the peer may not exist on the network LNET believes it to be on.
1698 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1699 GNILND_DEL_PEER, -EUCLEAN);
1702 case IOC_LIBCFS_GET_CONN: {
1703 kgn_conn_t *conn = kgnilnd_get_conn_by_idx(data->ioc_count);
1709 /* LNET_MKNID is used to build the correct address based on what LNET wants to see instead of
1710 * the generic connection that is used to send the data
1712 data->ioc_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), LNET_NIDADDR(conn->gnc_peer->gnp_nid));
1713 data->ioc_u32[0] = conn->gnc_device->gnd_id;
1714 kgnilnd_conn_decref(conn);
1718 case IOC_LIBCFS_CLOSE_CONNECTION: {
1719 /* use error = -ENETRESET to indicate it was lctl disconnect */
1720 /* NULL is passed in so it affects all the nets as the connection is virtual
1721 * and may not exist on the network LNET believes it to be on.
1723 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1724 GNILND_DEL_CONN, -ENETRESET);
1727 case IOC_LIBCFS_PUSH_CONNECTION: {
1728 /* we use this to flush purgatory */
1729 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1730 GNILND_CLEAR_PURGATORY, -EUCLEAN);
1733 case IOC_LIBCFS_REGISTER_MYNID: {
1734 /* Ignore if this is a noop */
1735 if (data->ioc_nid == ni->ni_nid) {
1738 CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
1739 libcfs_nid2str(data->ioc_nid),
1740 libcfs_nid2str(ni->ni_nid));
1751 kgnilnd_query(lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
1753 kgn_net_t *net = ni->ni_data;
1755 kgn_peer_t *peer = NULL;
1756 kgn_conn_t *conn = NULL;
1757 lnet_process_id_t id = {.nid = nid, .pid = LUSTRE_SRV_LNET_PID};
1760 /* I expect to find him, so only take a read lock */
1761 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1762 peer = kgnilnd_find_peer_locked(nid);
1764 /* LIE if in a quiesce - we will update the timeouts after,
1765 * but we don't want sends failing during it */
1766 if (kgnilnd_data.kgn_quiesce_trigger) {
1768 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1772 /* Update to best guess, might refine on later checks */
1773 *when = peer->gnp_last_alive;
1775 /* we have a peer, how about a conn? */
1776 conn = kgnilnd_find_conn_locked(peer);
1779 /* if there is no conn, check peer last errno to see if clean disconnect
1780 * - if it was, we lie to LNet because we believe a TX would complete
1782 if (kgnilnd_conn_clean_errno(peer->gnp_last_errno)) {
1785 /* we still want to fire a TX and new conn in this case */
1787 /* gnp_last_alive is valid, run for the hills */
1788 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1792 /* if we get here, either we have no peer or no conn for him, so fire off
1793 * new TX to trigger conn setup */
1794 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1796 /* if we couldn't find him, we'll fire up a TX and get connected -
1797 * if we don't do this, after ni_peer_timeout, LNet will declare him dead.
1798 * So really we treat kgnilnd_query as a bit of a 'connect now' type
1799 * event because it'll only do this when it wants to send
1801 * Use a real TX for this to get the proper gnp_tx_queue behavior, etc
1802 * normally we'd use kgnilnd_send_ctlmsg for this, but we don't really
1803 * care that this goes out quickly since we already know we need a new conn
1805 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_NOOP_SEND))
1808 tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP, ni->ni_nid);
1810 kgnilnd_launch_tx(tx, net, &id);
1813 CDEBUG(D_NETTRACE, "peer 0x%p->%s when %lu\n", peer,
1814 libcfs_nid2str(nid), *when);
1819 kgnilnd_dev_init(kgn_device_t *dev)
1823 unsigned int cq_size;
1826 /* size of these CQs should be able to accommodate the outgoing
1827 * RDMA and SMSG transactions. Since we really don't know what we
1828 * really need here, we'll take credits * 2 * 3 to allow a bunch.
1829 * We need to dig into this more with the performance work. */
1830 cq_size = *kgnilnd_tunables.kgn_credits * 2 * 3;
1832 rrc = kgnilnd_cdm_create(dev->gnd_id, *kgnilnd_tunables.kgn_ptag,
1835 if (rrc != GNI_RC_SUCCESS) {
1836 CERROR("Can't create CDM %d (%d)\n", dev->gnd_id, rrc);
1841 rrc = kgnilnd_cdm_attach(dev->gnd_domain, dev->gnd_id,
1842 &dev->gnd_host_id, &dev->gnd_handle);
1843 if (rrc != GNI_RC_SUCCESS) {
1844 CERROR("Can't attach CDM to device %d (%d)\n",
1850 rc = kgnilnd_setup_nic_translation(dev->gnd_host_id);
1856 /* only dev 0 gets the errors - no need to reset the stack twice
1857 * - this works because we have a single PTAG, if we had more
1858 * then we'd need to have multiple handlers */
1859 if (dev->gnd_id == 0) {
1860 rrc = kgnilnd_subscribe_errors(dev->gnd_handle, GNI_ERRMASK_CRITICAL,
1861 0, NULL, kgnilnd_critical_error,
1862 &dev->gnd_err_handle);
1863 if (rrc != GNI_RC_SUCCESS) {
1864 CERROR("Can't subscribe for errors on device %d: rc %d\n",
1870 rc = kgnilnd_set_quiesce_callback(dev->gnd_handle,
1871 kgnilnd_quiesce_end_callback);
1872 if (rc != GNI_RC_SUCCESS) {
1873 CERROR("Can't subscribe for quiesce callback on device %d: rc %d\n",
1880 rc = kgnilnd_nicaddr_to_nid(dev->gnd_host_id, &dev->gnd_nid);
1882 /* log messages during startup */
1883 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
1884 CERROR("couldn't translate host_id 0x%x to nid. rc %d\n",
1885 dev->gnd_host_id, rc);
1890 CDEBUG(D_NET, "NIC %x -> NID %d\n", dev->gnd_host_id, dev->gnd_nid);
1892 rrc = kgnilnd_cq_create(dev->gnd_handle, cq_size,
1893 0, kgnilnd_device_callback,
1894 dev->gnd_id, &dev->gnd_snd_rdma_cqh);
1895 if (rrc != GNI_RC_SUCCESS) {
1896 CERROR("Can't create rdma send cq size %u for device "
1897 "%d (%d)\n", cq_size, dev->gnd_id, rrc);
1902 rrc = kgnilnd_cq_create(dev->gnd_handle, cq_size,
1903 0, kgnilnd_device_callback, dev->gnd_id,
1904 &dev->gnd_snd_fma_cqh);
1905 if (rrc != GNI_RC_SUCCESS) {
1906 CERROR("Can't create fma send cq size %u for device %d (%d)\n",
1907 cq_size, dev->gnd_id, rrc);
1912 /* This one we size differently - overflows are possible and it needs to be
1913 * sized based on machine size */
1914 rrc = kgnilnd_cq_create(dev->gnd_handle,
1915 *kgnilnd_tunables.kgn_fma_cq_size,
1916 0, kgnilnd_device_callback, dev->gnd_id,
1917 &dev->gnd_rcv_fma_cqh);
1918 if (rrc != GNI_RC_SUCCESS) {
1919 CERROR("Can't create fma cq size %d for device %d (%d)\n",
1920 *kgnilnd_tunables.kgn_fma_cq_size, dev->gnd_id, rrc);
1928 kgnilnd_dev_fini(dev);
1933 kgnilnd_dev_fini(kgn_device_t *dev)
1938 /* At quiesce or rest time, need to loop through and clear gnd_ready_conns ?*/
1939 LASSERTF(list_empty(&dev->gnd_ready_conns) &&
1940 list_empty(&dev->gnd_map_tx) &&
1941 list_empty(&dev->gnd_rdmaq),
1942 "dev 0x%p ready_conns %d@0x%p map_tx %d@0x%p rdmaq %d@0x%p\n",
1943 dev, kgnilnd_count_list(&dev->gnd_ready_conns), &dev->gnd_ready_conns,
1944 kgnilnd_count_list(&dev->gnd_map_tx), &dev->gnd_map_tx,
1945 kgnilnd_count_list(&dev->gnd_rdmaq), &dev->gnd_rdmaq);
1947 /* These should follow from tearing down all connections */
1948 LASSERTF(dev->gnd_map_nphys == 0 && dev->gnd_map_physnop == 0,
1949 "%d physical mappings of %d pages still mapped\n",
1950 dev->gnd_map_nphys, dev->gnd_map_physnop);
1952 LASSERTF(dev->gnd_map_nvirt == 0 && dev->gnd_map_virtnob == 0,
1953 "%d virtual mappings of "LPU64" bytes still mapped\n",
1954 dev->gnd_map_nvirt, dev->gnd_map_virtnob);
1956 LASSERTF(atomic_read(&dev->gnd_n_mdd) == 0 &&
1957 atomic_read(&dev->gnd_n_mdd_held) == 0 &&
1958 atomic64_read(&dev->gnd_nbytes_map) == 0,
1959 "%d SMSG mappings of %ld bytes still mapped or held %d\n",
1960 atomic_read(&dev->gnd_n_mdd),
1961 atomic64_read(&dev->gnd_nbytes_map), atomic_read(&dev->gnd_n_mdd_held));
1963 LASSERT(list_empty(&dev->gnd_map_list));
1965 /* What other assertions needed to ensure all connections torn down ? */
1967 /* check all counters == 0 (EP, MDD, etc) */
1969 /* if we are resetting due to quiese (stack reset), don't check
1971 LASSERTF(kgnilnd_data.kgn_quiesce_trigger ||
1972 atomic_read(&kgnilnd_data.kgn_nthreads) == 0,
1973 "tried to shutdown with threads active\n");
1975 if (dev->gnd_rcv_fma_cqh) {
1976 rrc = kgnilnd_cq_destroy(dev->gnd_rcv_fma_cqh);
1977 LASSERTF(rrc == GNI_RC_SUCCESS,
1978 "bad rc from gni_cq_destroy on rcv_fma_cqh: %d\n", rrc);
1979 dev->gnd_rcv_fma_cqh = NULL;
1982 if (dev->gnd_snd_rdma_cqh) {
1983 rrc = kgnilnd_cq_destroy(dev->gnd_snd_rdma_cqh);
1984 LASSERTF(rrc == GNI_RC_SUCCESS,
1985 "bad rc from gni_cq_destroy on send_rdma_cqh: %d\n", rrc);
1986 dev->gnd_snd_rdma_cqh = NULL;
1989 if (dev->gnd_snd_fma_cqh) {
1990 rrc = kgnilnd_cq_destroy(dev->gnd_snd_fma_cqh);
1991 LASSERTF(rrc == GNI_RC_SUCCESS,
1992 "bad rc from gni_cq_destroy on snd_fma_cqh: %d\n", rrc);
1993 dev->gnd_snd_fma_cqh = NULL;
1996 if (dev->gnd_err_handle) {
1997 rrc = kgnilnd_release_errors(dev->gnd_err_handle);
1998 LASSERTF(rrc == GNI_RC_SUCCESS,
1999 "bad rc from gni_release_errors: %d\n", rrc);
2000 dev->gnd_err_handle = NULL;
2003 if (dev->gnd_domain) {
2004 rrc = kgnilnd_cdm_destroy(dev->gnd_domain);
2005 LASSERTF(rrc == GNI_RC_SUCCESS,
2006 "bad rc from gni_cdm_destroy: %d\n", rrc);
2007 dev->gnd_domain = NULL;
2014 int kgnilnd_base_startup(void)
2017 int pkmem = atomic_read(&libcfs_kmemory);
2021 struct task_struct *thrd;
2024 LASSERTF(kgnilnd_data.kgn_init == GNILND_INIT_NOTHING,
2025 "init %d\n", kgnilnd_data.kgn_init);
2027 /* zero pointers, flags etc */
2028 memset(&kgnilnd_data, 0, sizeof(kgnilnd_data));
2029 memset(&kgnilnd_hssops, 0, sizeof(kgnilnd_hssops));
2031 /* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and
2032 * a unique (for all time) connstamp so we can uniquely identify
2033 * the sender. The connstamp is an incrementing counter
2034 * initialised with seconds + microseconds at startup time. So we
2035 * rely on NOT creating connections more frequently on average than
2036 * 1MHz to ensure we don't use old connstamps when we reboot. */
2037 do_gettimeofday(&tv);
2038 kgnilnd_data.kgn_connstamp =
2039 kgnilnd_data.kgn_peerstamp =
2040 (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
2042 init_rwsem(&kgnilnd_data.kgn_net_rw_sem);
2044 for (i = 0; i < GNILND_MAXDEVS; i++) {
2045 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2048 INIT_LIST_HEAD(&dev->gnd_ready_conns);
2049 INIT_LIST_HEAD(&dev->gnd_map_tx);
2050 INIT_LIST_HEAD(&dev->gnd_fma_buffs);
2051 mutex_init(&dev->gnd_cq_mutex);
2052 sema_init(&dev->gnd_fmablk_sem, 1);
2053 spin_lock_init(&dev->gnd_fmablk_lock);
2054 init_waitqueue_head(&dev->gnd_waitq);
2055 init_waitqueue_head(&dev->gnd_dgram_waitq);
2056 init_waitqueue_head(&dev->gnd_dgping_waitq);
2057 spin_lock_init(&dev->gnd_lock);
2058 INIT_LIST_HEAD(&dev->gnd_map_list);
2059 spin_lock_init(&dev->gnd_map_lock);
2060 atomic_set(&dev->gnd_nfmablk, 0);
2061 atomic_set(&dev->gnd_fmablk_vers, 1);
2062 atomic_set(&dev->gnd_neps, 0);
2063 atomic_set(&dev->gnd_canceled_dgrams, 0);
2064 INIT_LIST_HEAD(&dev->gnd_connd_peers);
2065 spin_lock_init(&dev->gnd_connd_lock);
2066 spin_lock_init(&dev->gnd_dgram_lock);
2067 spin_lock_init(&dev->gnd_rdmaq_lock);
2068 INIT_LIST_HEAD(&dev->gnd_rdmaq);
2070 /* alloc & setup nid based dgram table */
2071 LIBCFS_ALLOC(dev->gnd_dgrams,
2072 sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2074 if (dev->gnd_dgrams == NULL) {
2079 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2080 INIT_LIST_HEAD(&dev->gnd_dgrams[i]);
2082 atomic_set(&dev->gnd_ndgrams, 0);
2084 /* setup timer for RDMAQ processing */
2085 setup_timer(&dev->gnd_rdmaq_timer, kgnilnd_schedule_device_timer,
2086 (unsigned long)dev);
2089 /* CQID 0 isn't allowed, set to MAX_MSG_ID - 1 to check for conflicts early */
2090 kgnilnd_data.kgn_next_cqid = GNILND_MAX_MSG_ID - 1;
2091 kgnilnd_data.kgn_new_min_timeout = *kgnilnd_tunables.kgn_timeout;
2092 init_waitqueue_head(&kgnilnd_data.kgn_reaper_waitq);
2093 init_waitqueue_head(&kgnilnd_data.kgn_ruhroh_waitq);
2094 spin_lock_init(&kgnilnd_data.kgn_reaper_lock);
2096 sema_init(&kgnilnd_data.kgn_quiesce_sem, 1);
2097 atomic_set(&kgnilnd_data.kgn_nquiesce, 0);
2098 atomic_set(&kgnilnd_data.kgn_npending_conns, 0);
2099 atomic_set(&kgnilnd_data.kgn_npending_unlink, 0);
2100 atomic_set(&kgnilnd_data.kgn_npending_detach, 0);
2101 /* OK to call kgnilnd_api_shutdown() to cleanup now */
2102 kgnilnd_data.kgn_init = GNILND_INIT_DATA;
2105 rwlock_init(&kgnilnd_data.kgn_peer_conn_lock);
2107 LIBCFS_ALLOC(kgnilnd_data.kgn_peers,
2108 sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2110 if (kgnilnd_data.kgn_peers == NULL) {
2115 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2116 INIT_LIST_HEAD(&kgnilnd_data.kgn_peers[i]);
2119 LIBCFS_ALLOC(kgnilnd_data.kgn_conns,
2120 sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2122 if (kgnilnd_data.kgn_conns == NULL) {
2127 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2128 INIT_LIST_HEAD(&kgnilnd_data.kgn_conns[i]);
2131 LIBCFS_ALLOC(kgnilnd_data.kgn_nets,
2132 sizeof(struct list_head) * *kgnilnd_tunables.kgn_net_hash_size);
2134 if (kgnilnd_data.kgn_nets == NULL) {
2139 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
2140 INIT_LIST_HEAD(&kgnilnd_data.kgn_nets[i]);
2143 kgnilnd_data.kgn_mbox_cache =
2144 cfs_mem_cache_create("kgn_mbox_block",
2147 SLAB_HWCACHE_ALIGN); /* flags */
2148 if (kgnilnd_data.kgn_mbox_cache == NULL) {
2149 CERROR("Can't create slab for physical mbox blocks\n");
2154 kgnilnd_data.kgn_rx_cache =
2155 cfs_mem_cache_create("kgn_rx_t",
2159 if (kgnilnd_data.kgn_rx_cache == NULL) {
2160 CERROR("Can't create slab for kgn_rx_t descriptors\n");
2165 kgnilnd_data.kgn_tx_cache =
2166 cfs_mem_cache_create("kgn_tx_t",
2170 if (kgnilnd_data.kgn_tx_cache == NULL) {
2171 CERROR("Can't create slab for kgn_tx_t\n");
2176 kgnilnd_data.kgn_tx_phys_cache =
2177 cfs_mem_cache_create("kgn_tx_phys",
2178 LNET_MAX_IOV * sizeof(gni_mem_segment_t),
2181 if (kgnilnd_data.kgn_tx_phys_cache == NULL) {
2182 CERROR("Can't create slab for kgn_tx_phys\n");
2187 kgnilnd_data.kgn_dgram_cache =
2188 cfs_mem_cache_create("kgn_dgram_t",
2189 sizeof(kgn_dgram_t),
2192 if (kgnilnd_data.kgn_dgram_cache == NULL) {
2193 CERROR("Can't create slab for outgoing datagrams\n");
2198 /* allocate a MAX_IOV array of page pointers for each cpu */
2199 kgnilnd_data.kgn_cksum_map_pages = kmalloc(num_possible_cpus() * sizeof (struct page *),
2201 if (kgnilnd_data.kgn_cksum_map_pages == NULL) {
2202 CERROR("Can't allocate vmap cksum pages\n");
2206 kgnilnd_data.kgn_cksum_npages = num_possible_cpus();
2207 memset(kgnilnd_data.kgn_cksum_map_pages, 0,
2208 kgnilnd_data.kgn_cksum_npages * sizeof (struct page *));
2210 for (i = 0; i < kgnilnd_data.kgn_cksum_npages; i++) {
2211 kgnilnd_data.kgn_cksum_map_pages[i] = kmalloc(LNET_MAX_IOV * sizeof (struct page *),
2213 if (kgnilnd_data.kgn_cksum_map_pages[i] == NULL) {
2214 CERROR("Can't allocate vmap cksum pages for cpu %d\n", i);
2220 LASSERT(kgnilnd_data.kgn_ndevs == 0);
2222 /* Use all available GNI devices */
2223 for (i = 0; i < GNILND_MAXDEVS; i++) {
2224 dev = &kgnilnd_data.kgn_devices[kgnilnd_data.kgn_ndevs];
2226 rc = kgnilnd_dev_init(dev);
2228 /* Increment here so base_shutdown cleans it up */
2229 kgnilnd_data.kgn_ndevs++;
2231 rc = kgnilnd_allocate_phys_fmablk(dev);
2238 if (kgnilnd_data.kgn_ndevs == 0) {
2239 CERROR("Can't initialise any GNI devices\n");
2244 rc = kgnilnd_thread_start(kgnilnd_reaper, NULL, "kgnilnd_rpr", 0);
2246 CERROR("Can't spawn gnilnd reaper: %d\n", rc);
2251 * Start ruhroh thread. We can't use kgnilnd_thread_start() because
2252 * we don't want this thread included in kgnilnd_data.kgn_nthreads
2253 * count. This thread controls quiesce, so it mustn't
2256 thrd = kthread_run(kgnilnd_ruhroh_thread, NULL, "%s_%02d", "kgnilnd_rr", 0);
2259 CERROR("Can't spawn gnilnd ruhroh thread: %d\n", rc);
2263 /* threads will load balance across devs as they are available */
2264 for (i = 0; i < *kgnilnd_tunables.kgn_sched_threads; i++) {
2265 rc = kgnilnd_thread_start(kgnilnd_scheduler, (void *)((long)i),
2268 CERROR("Can't spawn gnilnd scheduler[%d]: %d\n",
2274 for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2275 dev = &kgnilnd_data.kgn_devices[i];
2276 rc = kgnilnd_thread_start(kgnilnd_dgram_mover, dev,
2277 "kgnilnd_dg", dev->gnd_id);
2279 CERROR("Can't spawn gnilnd dgram_mover[%d]: %d\n",
2284 rc = kgnilnd_thread_start(kgnilnd_dgram_waitq, dev,
2285 "kgnilnd_dgn", dev->gnd_id);
2287 CERROR("Can't spawn gnilnd dgram_waitq[%d]: %d\n",
2292 rc = kgnilnd_setup_wildcard_dgram(dev);
2295 CERROR("Can't create wildcard dgrams[%d]: %d\n",
2303 /* flag everything initialised */
2304 kgnilnd_data.kgn_init = GNILND_INIT_ALL;
2305 /*****************************************************/
2307 CDEBUG(D_MALLOC, "initial kmem %d\n", pkmem);
2311 kgnilnd_base_shutdown();
2312 kgnilnd_data.kgn_init = GNILND_INIT_NOTHING;
2317 kgnilnd_base_shutdown(void)
2322 while (CFS_FAIL_TIMEOUT(CFS_FAIL_GNI_PAUSE_SHUTDOWN, 1)) {};
2324 kgnilnd_data.kgn_wc_kill = 1;
2326 for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2327 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2328 kgnilnd_cancel_wc_dgrams(dev);
2329 kgnilnd_del_conn_or_peer(NULL, LNET_NID_ANY, GNILND_DEL_PEER, -ESHUTDOWN);
2330 kgnilnd_wait_for_canceled_dgrams(dev);
2333 /* Peer state all cleaned up BEFORE setting shutdown, so threads don't
2334 * have to worry about shutdown races. NB connections may be created
2335 * while there are still active connds, but these will be temporary
2336 * since peer creation always fails after the listener has started to
2338 * all peers should have been cleared out on the nets */
2339 LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
2340 "peers left %d\n", atomic_read(&kgnilnd_data.kgn_npeers));
2342 /* Wait for the ruhroh thread to shut down. */
2343 kgnilnd_data.kgn_ruhroh_shutdown = 1;
2344 wake_up(&kgnilnd_data.kgn_ruhroh_waitq);
2346 while (kgnilnd_data.kgn_ruhroh_running != 0) {
2348 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2349 "Waiting for ruhroh thread to terminate\n");
2350 cfs_pause(cfs_time_seconds(1));
2353 /* Flag threads to terminate */
2354 kgnilnd_data.kgn_shutdown = 1;
2356 for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2357 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2359 /* should clear all the MDDs */
2360 kgnilnd_unmap_phys_fmablk(dev);
2362 kgnilnd_schedule_device(dev);
2363 wake_up_all(&dev->gnd_dgram_waitq);
2364 wake_up_all(&dev->gnd_dgping_waitq);
2365 LASSERT(list_empty(&dev->gnd_connd_peers));
2368 spin_lock(&kgnilnd_data.kgn_reaper_lock);
2369 wake_up_all(&kgnilnd_data.kgn_reaper_waitq);
2370 spin_unlock(&kgnilnd_data.kgn_reaper_lock);
2372 /* Wait for threads to exit */
2374 while (atomic_read(&kgnilnd_data.kgn_nthreads) != 0) {
2376 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2377 "Waiting for %d threads to terminate\n",
2378 atomic_read(&kgnilnd_data.kgn_nthreads));
2379 cfs_pause(cfs_time_seconds(1));
2382 LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
2383 "peers left %d\n", atomic_read(&kgnilnd_data.kgn_npeers));
2385 if (kgnilnd_data.kgn_peers != NULL) {
2386 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2387 LASSERT(list_empty(&kgnilnd_data.kgn_peers[i]));
2389 LIBCFS_FREE(kgnilnd_data.kgn_peers,
2390 sizeof (struct list_head) *
2391 *kgnilnd_tunables.kgn_peer_hash_size);
2394 down_write(&kgnilnd_data.kgn_net_rw_sem);
2395 if (kgnilnd_data.kgn_nets != NULL) {
2396 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++)
2397 LASSERT(list_empty(&kgnilnd_data.kgn_nets[i]));
2399 LIBCFS_FREE(kgnilnd_data.kgn_nets,
2400 sizeof (struct list_head) *
2401 *kgnilnd_tunables.kgn_net_hash_size);
2403 up_write(&kgnilnd_data.kgn_net_rw_sem);
2405 LASSERTF(atomic_read(&kgnilnd_data.kgn_nconns) == 0,
2406 "conns left %d\n", atomic_read(&kgnilnd_data.kgn_nconns));
2408 if (kgnilnd_data.kgn_conns != NULL) {
2409 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2410 LASSERT(list_empty(&kgnilnd_data.kgn_conns[i]));
2412 LIBCFS_FREE(kgnilnd_data.kgn_conns,
2413 sizeof (struct list_head) *
2414 *kgnilnd_tunables.kgn_peer_hash_size);
2417 for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2418 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2419 kgnilnd_dev_fini(dev);
2421 LASSERTF(atomic_read(&dev->gnd_ndgrams) == 0,
2422 "dgrams left %d\n", atomic_read(&dev->gnd_ndgrams));
2424 if (dev->gnd_dgrams != NULL) {
2425 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2426 LASSERT(list_empty(&dev->gnd_dgrams[i]));
2428 LIBCFS_FREE(dev->gnd_dgrams,
2429 sizeof (struct list_head) *
2430 *kgnilnd_tunables.kgn_peer_hash_size);
2433 kgnilnd_free_phys_fmablk(dev);
2436 if (kgnilnd_data.kgn_mbox_cache != NULL) {
2437 i = cfs_mem_cache_destroy(kgnilnd_data.kgn_mbox_cache);
2438 LASSERTF(i == 0, "rc %d destroying kgn_mbox_cache\n", i);
2441 if (kgnilnd_data.kgn_rx_cache != NULL) {
2442 i = cfs_mem_cache_destroy(kgnilnd_data.kgn_rx_cache);
2443 LASSERTF(i == 0, "rc %d destroying kgn_rx_cache\n", i);
2446 if (kgnilnd_data.kgn_tx_cache != NULL) {
2447 i = cfs_mem_cache_destroy(kgnilnd_data.kgn_tx_cache);
2448 LASSERTF(i == 0, "rc %d destroying kgn_tx_cache\n", i);
2451 if (kgnilnd_data.kgn_tx_phys_cache != NULL) {
2452 i = cfs_mem_cache_destroy(kgnilnd_data.kgn_tx_phys_cache);
2453 LASSERTF(i == 0, "rc %d destroying kgn_tx_phys_cache\n", i);
2456 if (kgnilnd_data.kgn_dgram_cache != NULL) {
2457 i = cfs_mem_cache_destroy(kgnilnd_data.kgn_dgram_cache);
2458 LASSERTF(i == 0, "rc %d destroying kgn_dgram_cache\n", i);
2461 if (kgnilnd_data.kgn_cksum_map_pages != NULL) {
2462 for (i = 0; i < kgnilnd_data.kgn_cksum_npages; i++) {
2463 if (kgnilnd_data.kgn_cksum_map_pages[i] != NULL) {
2464 kfree(kgnilnd_data.kgn_cksum_map_pages[i]);
2467 kfree(kgnilnd_data.kgn_cksum_map_pages);
2470 CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2471 atomic_read(&libcfs_kmemory));
2473 kgnilnd_data.kgn_init = GNILND_INIT_NOTHING;
2474 PORTAL_MODULE_UNUSE;
2480 kgnilnd_startup(lnet_ni_t *ni)
2486 LASSERTF(ni->ni_lnd == &the_kgnilnd,
2487 "bad LND 0x%p != the_kgnilnd @ 0x%p\n",
2488 ni->ni_lnd, &the_kgnilnd);
2490 if (kgnilnd_data.kgn_init == GNILND_INIT_NOTHING) {
2491 rc = kgnilnd_base_startup();
2496 /* Serialize with shutdown. */
2497 down(&kgnilnd_data.kgn_quiesce_sem);
2499 LIBCFS_ALLOC(net, sizeof(*net));
2501 CERROR("could not allocate net for new interface instance\n");
2503 /* no need to cleanup the CDM... */
2506 INIT_LIST_HEAD(&net->gnn_list);
2509 ni->ni_maxtxcredits = *kgnilnd_tunables.kgn_credits;
2510 ni->ni_peertxcredits = *kgnilnd_tunables.kgn_peer_credits;
2512 if (*kgnilnd_tunables.kgn_peer_health) {
2515 /* give this a bit of leeway - we don't have a hard timeout
2516 * as we only check timeouts periodically - see comment in kgnilnd_reaper */
2517 fudge = (GNILND_TO2KA(*kgnilnd_tunables.kgn_timeout) / GNILND_REAPER_NCHECKS);
2519 ni->ni_peertimeout = *kgnilnd_tunables.kgn_timeout + fudge;
2521 LCONSOLE_INFO("Enabling LNet peer health for gnilnd, timeout %ds\n",
2522 ni->ni_peertimeout);
2525 atomic_set(&net->gnn_refcount, 1);
2527 /* if we have multiple devices, spread the nets around */
2528 net->gnn_netnum = LNET_NETNUM(LNET_NIDNET(ni->ni_nid));
2530 devno = LNET_NIDNET(ni->ni_nid) % GNILND_MAXDEVS;
2531 net->gnn_dev = &kgnilnd_data.kgn_devices[devno];
2533 /* allocate a 'dummy' cdm for datagram use. We can only have a single
2534 * datagram between a nid:inst_id and nid2:inst_id. The fake cdm
2535 * give us additional inst_id to use, allowing the datagrams to flow
2536 * like rivers of honey and beer */
2538 /* the instance id for the cdm is the NETNUM offset by MAXDEVS -
2539 * ensuring we'll have a unique id */
2542 ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), net->gnn_dev->gnd_nid);
2543 CDEBUG(D_NET, "adding net %p nid=%s on dev %d \n",
2544 net, libcfs_nid2str(ni->ni_nid), net->gnn_dev->gnd_id);
2545 /* until the gnn_list is set, we need to cleanup ourselves as
2546 * kgnilnd_shutdown is just gonna get confused */
2548 down_write(&kgnilnd_data.kgn_net_rw_sem);
2549 list_add_tail(&net->gnn_list, kgnilnd_netnum2netlist(net->gnn_netnum));
2550 up_write(&kgnilnd_data.kgn_net_rw_sem);
2552 /* we need a separate thread to call probe_wait_by_id until
2553 * we get a function callback notifier from kgni */
2554 up(&kgnilnd_data.kgn_quiesce_sem);
2557 up(&kgnilnd_data.kgn_quiesce_sem);
2558 kgnilnd_shutdown(ni);
2563 kgnilnd_shutdown(lnet_ni_t *ni)
2565 kgn_net_t *net = ni->ni_data;
2570 CFS_RACE(CFS_FAIL_GNI_SR_DOWN_RACE);
2572 LASSERTF(kgnilnd_data.kgn_init == GNILND_INIT_ALL,
2573 "init %d\n", kgnilnd_data.kgn_init);
2575 /* Serialize with startup. */
2576 down(&kgnilnd_data.kgn_quiesce_sem);
2577 CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
2578 atomic_read(&libcfs_kmemory));
2581 CERROR("got NULL net for ni %p\n", ni);
2586 LASSERTF(ni == net->gnn_ni,
2587 "ni %p gnn_ni %p\n", net, net->gnn_ni);
2591 LASSERT(!net->gnn_shutdown);
2592 LASSERTF(atomic_read(&net->gnn_refcount) != 0,
2593 "net %p refcount %d\n",
2594 net, atomic_read(&net->gnn_refcount));
2596 if (!list_empty(&net->gnn_list)) {
2597 /* serialize with peer creation */
2598 down_write(&kgnilnd_data.kgn_net_rw_sem);
2599 net->gnn_shutdown = 1;
2600 up_write(&kgnilnd_data.kgn_net_rw_sem);
2602 kgnilnd_cancel_net_dgrams(net);
2604 kgnilnd_del_conn_or_peer(net, LNET_NID_ANY, GNILND_DEL_PEER, -ESHUTDOWN);
2606 /* if we are quiesced, need to wake up - we need those threads
2607 * alive to release peers, etc */
2608 if (GNILND_IS_QUIESCED) {
2609 set_mb(kgnilnd_data.kgn_quiesce_trigger, GNILND_QUIESCE_IDLE);
2610 kgnilnd_quiesce_wait("shutdown");
2613 kgnilnd_wait_for_canceled_dgrams(net->gnn_dev);
2615 /* We wait until the nets ref's are 1, we will release final ref which is ours
2616 * this allows us to make sure everything else is done before we free the
2620 while (atomic_read(&net->gnn_refcount) != 1) {
2622 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2623 "Waiting for %d references to clear on net %d\n",
2624 atomic_read(&net->gnn_refcount),
2626 cfs_pause(cfs_time_seconds(1));
2629 /* release ref from kgnilnd_startup */
2630 kgnilnd_net_decref(net);
2631 /* serialize with reaper and conn_task looping */
2632 down_write(&kgnilnd_data.kgn_net_rw_sem);
2633 list_del_init(&net->gnn_list);
2634 up_write(&kgnilnd_data.kgn_net_rw_sem);
2638 /* not locking, this can't race with writers */
2639 LASSERTF(atomic_read(&net->gnn_refcount) == 0,
2640 "net %p refcount %d\n",
2641 net, atomic_read(&net->gnn_refcount));
2642 LIBCFS_FREE(net, sizeof(*net));
2645 down_read(&kgnilnd_data.kgn_net_rw_sem);
2646 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
2647 if (!list_empty(&kgnilnd_data.kgn_nets[i])) {
2648 up_read(&kgnilnd_data.kgn_net_rw_sem);
2652 if (i == *kgnilnd_tunables.kgn_net_hash_size - 1) {
2653 up_read(&kgnilnd_data.kgn_net_rw_sem);
2654 kgnilnd_base_shutdown();
2657 CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2658 atomic_read(&libcfs_kmemory));
2660 up(&kgnilnd_data.kgn_quiesce_sem);
2666 kgnilnd_module_fini(void)
2668 lnet_unregister_lnd(&the_kgnilnd);
2669 kgnilnd_proc_fini();
2670 kgnilnd_remove_sysctl();
2671 kgnilnd_tunables_fini();
2675 kgnilnd_module_init(void)
2679 rc = kgnilnd_tunables_init();
2683 printk(KERN_INFO "Lustre: kgnilnd build version: "KGNILND_BUILD_REV"\n");
2685 kgnilnd_insert_sysctl();
2686 kgnilnd_proc_init();
2688 lnet_register_lnd(&the_kgnilnd);
2693 MODULE_AUTHOR("Cray, Inc. <nic@cray.com>");
2694 MODULE_DESCRIPTION("Kernel Gemini LND v"KGNILND_BUILD_REV);
2695 MODULE_LICENSE("GPL");
2697 module_init(kgnilnd_module_init);
2698 module_exit(kgnilnd_module_fini);