2 * Copyright (C) 2012 Cray, Inc.
4 * Copyright (c) 2013, 2017, Intel Corporation.
6 * Author: Nic Henke <nic@cray.com>
7 * Author: James Shimek <jshimek@cray.com>
9 * This file is part of Lustre, http://www.lustre.org.
11 * Lustre is free software; you can redistribute it and/or
12 * modify it under the terms of version 2 of the GNU General Public
13 * License as published by the Free Software Foundation.
15 * Lustre is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with Lustre; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 /* Primary entry points from LNET. There are no guarantees against reentrance. */
28 const struct lnet_lnd the_kgnilnd = {
30 .lnd_startup = kgnilnd_startup,
31 .lnd_shutdown = kgnilnd_shutdown,
32 .lnd_ctl = kgnilnd_ctl,
33 .lnd_send = kgnilnd_send,
34 .lnd_recv = kgnilnd_recv,
35 .lnd_eager_recv = kgnilnd_eager_recv,
36 .lnd_query = kgnilnd_query,
39 kgn_data_t kgnilnd_data;
42 kgnilnd_thread_start(int(*fn)(void *arg), void *arg, char *name, int id)
44 struct task_struct *thrd;
46 thrd = kthread_run(fn, arg, "%s_%02d", name, id);
50 atomic_inc(&kgnilnd_data.kgn_nthreads);
54 /* bind scheduler threads to cpus */
56 kgnilnd_start_sd_threads(void)
60 struct task_struct *task;
62 for_each_online_cpu(cpu) {
63 /* don't bind to cpu 0 - all interrupts are processed here */
67 task = kthread_create(kgnilnd_scheduler, (void *)((long)i),
68 "%s_%02d", "kgnilnd_sd", i);
70 kthread_bind(task, cpu);
71 wake_up_process(task);
73 CERROR("Can't spawn gnilnd scheduler[%d] %ld\n", i,
77 atomic_inc(&kgnilnd_data.kgn_nthreads);
79 if (++i >= *kgnilnd_tunables.kgn_sched_threads) {
87 /* needs write_lock on kgn_peer_conn_lock */
89 kgnilnd_close_stale_conns_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
92 struct list_head *ctmp, *cnxt;
96 loopback = peer->gnp_nid == peer->gnp_net->gnn_ni->ni_nid;
98 list_for_each_safe(ctmp, cnxt, &peer->gnp_conns) {
99 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
101 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
107 if (conn->gnc_device != newconn->gnc_device)
110 /* This is a two connection loopback - one talking to the other */
112 newconn->gnc_my_connstamp == conn->gnc_peer_connstamp &&
113 newconn->gnc_peer_connstamp == conn->gnc_my_connstamp) {
114 CDEBUG(D_NET, "skipping prune of %p, "
115 "loopback and matching stamps"
116 " connstamp %llu(%llu)"
117 " peerstamp %llu(%llu)\n",
118 conn, newconn->gnc_my_connstamp,
119 conn->gnc_peer_connstamp,
120 newconn->gnc_peer_connstamp,
121 conn->gnc_my_connstamp);
125 if (conn->gnc_peerstamp != newconn->gnc_peerstamp) {
126 LASSERTF(conn->gnc_peerstamp < newconn->gnc_peerstamp,
127 "conn 0x%p peerstamp %llu >= "
128 "newconn 0x%p peerstamp %llu\n",
129 conn, conn->gnc_peerstamp,
130 newconn, newconn->gnc_peerstamp);
132 CDEBUG(D_NET, "Closing stale conn nid: %s "
133 " peerstamp:%#llx(%#llx)\n",
134 libcfs_nid2str(peer->gnp_nid),
135 conn->gnc_peerstamp, newconn->gnc_peerstamp);
138 LASSERTF(conn->gnc_peer_connstamp < newconn->gnc_peer_connstamp,
139 "conn 0x%p peer_connstamp %llu >= "
140 "newconn 0x%p peer_connstamp %llu\n",
141 conn, conn->gnc_peer_connstamp,
142 newconn, newconn->gnc_peer_connstamp);
144 CDEBUG(D_NET, "Closing stale conn nid: %s"
145 " connstamp:%llu(%llu)\n",
146 libcfs_nid2str(peer->gnp_nid),
147 conn->gnc_peer_connstamp, newconn->gnc_peer_connstamp);
151 kgnilnd_close_conn_locked(conn, -ESTALE);
155 CWARN("Closed %d stale conns to %s\n", count, libcfs_nid2str(peer->gnp_nid));
162 kgnilnd_conn_isdup_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
165 struct list_head *tmp;
169 loopback = peer->gnp_nid == peer->gnp_net->gnn_ni->ni_nid;
171 list_for_each(tmp, &peer->gnp_conns) {
172 conn = list_entry(tmp, kgn_conn_t, gnc_list);
173 CDEBUG(D_NET, "checking conn 0x%p for peer %s"
174 " lo %d new %llu existing %llu"
175 " new peer %llu existing peer %llu"
176 " new dev %p existing dev %p\n",
177 conn, libcfs_nid2str(peer->gnp_nid),
179 newconn->gnc_peerstamp, conn->gnc_peerstamp,
180 newconn->gnc_peer_connstamp, conn->gnc_peer_connstamp,
181 newconn->gnc_device, conn->gnc_device);
183 /* conn is in the process of closing */
184 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
187 /* 'newconn' is from an earlier version of 'peer'!!! */
188 if (newconn->gnc_peerstamp < conn->gnc_peerstamp)
191 /* 'conn' is from an earlier version of 'peer': it will be
192 * removed when we cull stale conns later on... */
193 if (newconn->gnc_peerstamp > conn->gnc_peerstamp)
196 /* Different devices are OK */
197 if (conn->gnc_device != newconn->gnc_device)
200 /* It's me connecting to myself */
202 newconn->gnc_my_connstamp == conn->gnc_peer_connstamp &&
203 newconn->gnc_peer_connstamp == conn->gnc_my_connstamp)
206 /* 'newconn' is an earlier connection from 'peer'!!! */
207 if (newconn->gnc_peer_connstamp < conn->gnc_peer_connstamp)
210 /* 'conn' is an earlier connection from 'peer': it will be
211 * removed when we cull stale conns later on... */
212 if (newconn->gnc_peer_connstamp > conn->gnc_peer_connstamp)
215 /* 'newconn' has the SAME connection stamp; 'peer' isn't
216 * playing the game... */
224 kgnilnd_create_conn(kgn_conn_t **connp, kgn_device_t *dev)
230 LASSERT (!in_interrupt());
231 atomic_inc(&kgnilnd_data.kgn_nconns);
233 /* divide by 2 to allow for complete reset and immediate reconnect */
234 if (atomic_read(&kgnilnd_data.kgn_nconns) >= GNILND_MAX_CQID/2) {
235 CERROR("Too many conn are live: %d > %d\n",
236 atomic_read(&kgnilnd_data.kgn_nconns), GNILND_MAX_CQID/2);
237 atomic_dec(&kgnilnd_data.kgn_nconns);
241 LIBCFS_ALLOC(conn, sizeof(*conn));
243 atomic_dec(&kgnilnd_data.kgn_nconns);
247 conn->gnc_tx_ref_table =
248 kgnilnd_vzalloc(GNILND_MAX_MSG_ID * sizeof(void *));
249 if (conn->gnc_tx_ref_table == NULL) {
250 CERROR("Can't allocate conn tx_ref_table\n");
251 GOTO(failed, rc = -ENOMEM);
254 mutex_init(&conn->gnc_smsg_mutex);
255 mutex_init(&conn->gnc_rdma_mutex);
256 atomic_set(&conn->gnc_refcount, 1);
257 atomic_set(&conn->gnc_reaper_noop, 0);
258 atomic_set(&conn->gnc_sched_noop, 0);
259 atomic_set(&conn->gnc_tx_in_use, 0);
260 INIT_LIST_HEAD(&conn->gnc_list);
261 INIT_LIST_HEAD(&conn->gnc_hashlist);
262 INIT_LIST_HEAD(&conn->gnc_schedlist);
263 INIT_LIST_HEAD(&conn->gnc_fmaq);
264 INIT_LIST_HEAD(&conn->gnc_mdd_list);
265 INIT_LIST_HEAD(&conn->gnc_delaylist);
266 spin_lock_init(&conn->gnc_list_lock);
267 spin_lock_init(&conn->gnc_tx_lock);
268 conn->gnc_magic = GNILND_CONN_MAGIC;
270 /* set tx id to nearly the end to make sure we find wrapping
272 conn->gnc_next_tx = (int) GNILND_MAX_MSG_ID - 10;
274 /* if this fails, we have conflicts and MAX_TX is too large */
275 BUILD_BUG_ON(GNILND_MAX_MSG_ID >= GNILND_MSGID_CLOSE);
277 /* get a new unique CQ id for this conn */
278 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
279 conn->gnc_my_connstamp = kgnilnd_data.kgn_connstamp++;
280 conn->gnc_cqid = kgnilnd_get_cqid_locked();
281 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
283 if (conn->gnc_cqid == 0) {
284 CERROR("Could not allocate unique CQ ID for conn 0x%p\n", conn);
285 GOTO(failed, rc = -E2BIG);
288 CDEBUG(D_NET, "alloc cqid %u for conn 0x%p\n",
289 conn->gnc_cqid, conn);
291 /* need to be set before gnc_ephandle to allow kgnilnd_destroy_conn_ep to
293 conn->gnc_device = dev;
295 conn->gnc_timeout = MAX(*kgnilnd_tunables.kgn_timeout,
297 kgnilnd_update_reaper_timeout(conn->gnc_timeout);
299 /* this is the ep_handle for doing SMSG & BTE */
300 mutex_lock(&dev->gnd_cq_mutex);
301 rrc = kgnilnd_ep_create(dev->gnd_handle, dev->gnd_snd_fma_cqh,
302 &conn->gnc_ephandle);
303 mutex_unlock(&dev->gnd_cq_mutex);
304 if (rrc != GNI_RC_SUCCESS)
305 GOTO(failed, rc = -ENETDOWN);
307 CDEBUG(D_NET, "created conn 0x%p ep_hndl 0x%p\n",
308 conn, conn->gnc_ephandle);
310 /* add ref for EP canceling */
311 kgnilnd_conn_addref(conn);
312 atomic_inc(&dev->gnd_neps);
318 atomic_dec(&kgnilnd_data.kgn_nconns);
319 kgnilnd_vfree(conn->gnc_tx_ref_table,
320 GNILND_MAX_MSG_ID * sizeof(void *));
321 LIBCFS_FREE(conn, sizeof(*conn));
325 /* needs to be called with kgn_peer_conn_lock held (read or write) */
327 kgnilnd_find_conn_locked(kgn_peer_t *peer)
329 kgn_conn_t *conn = NULL;
331 /* if we are in reset, this conn is going to die soon */
332 if (unlikely(kgnilnd_data.kgn_in_reset)) {
336 /* just return the first ESTABLISHED connection */
337 list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
338 /* kgnilnd_finish_connect doesn't put connections on the
339 * peer list until they are actually established */
340 LASSERTF(conn->gnc_state >= GNILND_CONN_ESTABLISHED,
341 "found conn %p state %s on peer %p (%s)\n",
342 conn, kgnilnd_conn_state2str(conn), peer,
343 libcfs_nid2str(peer->gnp_nid));
344 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
352 /* needs write_lock on kgn_peer_conn_lock held */
354 kgnilnd_find_or_create_conn_locked(kgn_peer_t *peer) {
356 kgn_device_t *dev = peer->gnp_net->gnn_dev;
359 conn = kgnilnd_find_conn_locked(peer);
365 /* if the peer was previously connecting, check if we should
366 * trigger another connection attempt yet. */
367 if (time_before(jiffies, peer->gnp_reconnect_time)) {
371 /* This check prevents us from creating a new connection to a peer while we are
372 * still in the process of closing an existing connection to the peer.
374 list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
375 if (conn->gnc_ephandle != NULL) {
376 CDEBUG(D_NET, "Not connecting non-null ephandle found peer 0x%p->%s\n", peer,
377 libcfs_nid2str(peer->gnp_nid));
382 if (peer->gnp_connecting != GNILND_PEER_IDLE) {
383 /* if we are not connecting, fire up a new connection */
384 /* or if we are anything but IDLE DONT start a new connection */
388 CDEBUG(D_NET, "starting connect to %s\n",
389 libcfs_nid2str(peer->gnp_nid));
390 peer->gnp_connecting = GNILND_PEER_CONNECT;
391 kgnilnd_peer_addref(peer); /* extra ref for connd */
393 spin_lock(&dev->gnd_connd_lock);
394 list_add_tail(&peer->gnp_connd_list, &dev->gnd_connd_peers);
395 spin_unlock(&dev->gnd_connd_lock);
397 kgnilnd_schedule_dgram(dev);
398 CDEBUG(D_NETTRACE, "scheduling new connect\n");
403 /* Caller is responsible for deciding if/when to call this */
405 kgnilnd_destroy_conn_ep(kgn_conn_t *conn)
408 gni_ep_handle_t tmp_ep;
410 /* only if we actually initialized it,
411 * then set NULL to tell kgnilnd_destroy_conn to leave it alone */
413 tmp_ep = xchg(&conn->gnc_ephandle, NULL);
414 if (tmp_ep != NULL) {
415 /* we never re-use the EP, so unbind is not needed */
416 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
417 rrc = kgnilnd_ep_destroy(tmp_ep);
419 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
421 /* if this fails, it could hork up kgni smsg retransmit and others
422 * since we could free the SMSG mbox memory, etc. */
423 LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d conn 0x%p ep 0x%p\n",
424 rrc, conn, conn->gnc_ephandle);
426 atomic_dec(&conn->gnc_device->gnd_neps);
428 /* clear out count added in kgnilnd_close_conn_locked
429 * conn will have a peer once it hits finish_connect, where it
430 * is the first spot we'll mark it ESTABLISHED as well */
431 if (conn->gnc_peer) {
432 kgnilnd_admin_decref(conn->gnc_peer->gnp_dirty_eps);
435 /* drop ref for EP */
436 kgnilnd_conn_decref(conn);
441 kgnilnd_destroy_conn(kgn_conn_t *conn)
443 LASSERTF(!in_interrupt() &&
444 !conn->gnc_scheduled &&
445 !conn->gnc_in_purgatory &&
446 conn->gnc_ephandle == NULL &&
447 list_empty(&conn->gnc_list) &&
448 list_empty(&conn->gnc_hashlist) &&
449 list_empty(&conn->gnc_schedlist) &&
450 list_empty(&conn->gnc_mdd_list) &&
451 list_empty(&conn->gnc_delaylist) &&
452 conn->gnc_magic == GNILND_CONN_MAGIC,
453 "conn 0x%p->%s IRQ %d sched %d purg %d ep 0x%p Mg %d lists %d/%d/%d/%d/%d\n",
454 conn, conn->gnc_peer ? libcfs_nid2str(conn->gnc_peer->gnp_nid)
456 !!in_interrupt(), conn->gnc_scheduled,
457 conn->gnc_in_purgatory,
460 list_empty(&conn->gnc_list),
461 list_empty(&conn->gnc_hashlist),
462 list_empty(&conn->gnc_schedlist),
463 list_empty(&conn->gnc_mdd_list),
464 list_empty(&conn->gnc_delaylist));
466 /* Tripping these is especially bad, as it means we have items on the
467 * lists that didn't keep their refcount on the connection - or
468 * somebody evil released their own */
469 LASSERTF(list_empty(&conn->gnc_fmaq) &&
470 atomic_read(&conn->gnc_nlive_fma) == 0 &&
471 atomic_read(&conn->gnc_nlive_rdma) == 0,
472 "conn 0x%p fmaq %d@0x%p nfma %d nrdma %d\n",
473 conn, kgnilnd_count_list(&conn->gnc_fmaq), &conn->gnc_fmaq,
474 atomic_read(&conn->gnc_nlive_fma), atomic_read(&conn->gnc_nlive_rdma));
476 CDEBUG(D_NET, "destroying conn %p ephandle %p error %d\n",
477 conn, conn->gnc_ephandle, conn->gnc_error);
479 /* We are freeing this memory remove the magic value from the connection */
482 /* if there is an FMA blk left here, we'll tear it down */
483 if (conn->gnc_fma_blk) {
484 if (conn->gnc_peer) {
485 kgn_mbox_info_t *mbox;
486 mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
487 mbox->mbx_prev_nid = conn->gnc_peer->gnp_nid;
489 kgnilnd_release_mbox(conn, 0);
492 if (conn->gnc_peer != NULL)
493 kgnilnd_peer_decref(conn->gnc_peer);
495 if (conn->gnc_tx_ref_table != NULL) {
496 kgnilnd_vfree(conn->gnc_tx_ref_table,
497 GNILND_MAX_MSG_ID * sizeof(void *));
500 LIBCFS_FREE(conn, sizeof(*conn));
501 atomic_dec(&kgnilnd_data.kgn_nconns);
504 /* peer_alive and peer_notify done in the style of the o2iblnd */
506 kgnilnd_peer_alive(kgn_peer_t *peer)
508 time64_t now = ktime_get_seconds();
510 set_mb(peer->gnp_last_alive, now);
514 kgnilnd_peer_notify(kgn_peer_t *peer, int error, int alive)
525 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DONT_NOTIFY))
528 /* Tell LNet we are giving ups on this peer - but only
529 * if it isn't already reconnected or trying to reconnect */
530 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
532 /* use kgnilnd_find_conn_locked to avoid any conns in the process of being nuked
534 * don't tell LNet if we are in reset - we assume that everyone will be able to
535 * reconnect just fine
537 conn = kgnilnd_find_conn_locked(peer);
539 CDEBUG(D_NETTRACE, "peer 0x%p->%s ting %d conn 0x%p, rst %d error %d\n",
540 peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting, conn,
541 kgnilnd_data.kgn_in_reset, error);
543 if (((peer->gnp_connecting == GNILND_PEER_IDLE) &&
545 (!kgnilnd_data.kgn_in_reset) &&
546 (!kgnilnd_conn_clean_errno(error))) || alive) {
550 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
553 /* short circuit if we dont need to notify Lnet */
557 rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
560 /* dont do this if this fails since LNET is in shutdown or something else
563 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
564 list_for_each_entry(net , &kgnilnd_data.kgn_nets[i], gnn_list) {
565 /* if gnn_shutdown set for any net shutdown is in progress just return */
566 if (net->gnn_shutdown) {
567 up_read(&kgnilnd_data.kgn_net_rw_sem);
575 /* shutdown in progress most likely */
576 up_read(&kgnilnd_data.kgn_net_rw_sem);
580 LIBCFS_ALLOC(nets, nnets * sizeof(*nets));
583 up_read(&kgnilnd_data.kgn_net_rw_sem);
584 CERROR("Failed to allocate nets[%d]\n", nnets);
589 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
590 list_for_each_entry(net, &kgnilnd_data.kgn_nets[i], gnn_list) {
592 kgnilnd_net_addref(net);
596 up_read(&kgnilnd_data.kgn_net_rw_sem);
598 for (i = 0; i < nnets; i++) {
603 peer_nid = kgnilnd_lnd2lnetnid(net->gnn_ni->ni_nid,
606 CDEBUG(D_NET, "peer 0x%p->%s last_alive %lld (%llds ago)\n",
607 peer, libcfs_nid2str(peer_nid), peer->gnp_last_alive,
608 ktime_get_seconds() - peer->gnp_last_alive);
610 lnet_notify(net->gnn_ni, peer_nid, alive,
611 (alive) ? true : false,
612 peer->gnp_last_alive);
614 kgnilnd_net_decref(net);
617 LIBCFS_FREE(nets, nnets * sizeof(*nets));
621 /* need write_lock on kgn_peer_conn_lock */
623 kgnilnd_close_conn_locked(kgn_conn_t *conn, int error)
625 kgn_peer_t *peer = conn->gnc_peer;
628 LASSERT(!in_interrupt());
630 /* store error for tx completion */
631 conn->gnc_error = error;
632 peer->gnp_last_errno = error;
634 /* use real error from peer if possible */
635 if (error == -ECONNRESET) {
636 error = conn->gnc_peer_error;
639 /* if we NETERROR, make sure it is rate limited */
640 if (!kgnilnd_conn_clean_errno(error) &&
641 peer->gnp_state != GNILND_PEER_DOWN) {
642 CNETERR("closing conn to %s: error %d\n",
643 libcfs_nid2str(peer->gnp_nid), error);
645 CDEBUG(D_NET, "closing conn to %s: error %d\n",
646 libcfs_nid2str(peer->gnp_nid), error);
649 LASSERTF(conn->gnc_state == GNILND_CONN_ESTABLISHED,
650 "conn %p to %s with bogus state %s\n", conn,
651 libcfs_nid2str(conn->gnc_peer->gnp_nid),
652 kgnilnd_conn_state2str(conn));
653 LASSERT(!list_empty(&conn->gnc_hashlist));
654 LASSERT(!list_empty(&conn->gnc_list));
657 /* mark peer count here so any place the EP gets destroyed will
658 * open up the peer count so that a new ESTABLISHED conn is then free
659 * to send new messages -- sending before the previous EPs are destroyed
660 * could end up with messages on the network for the old conn _after_
661 * the new conn and break the mbox safety protocol */
662 kgnilnd_admin_addref(conn->gnc_peer->gnp_dirty_eps);
664 /* Remove from conn hash table: no new callbacks */
665 list_del_init(&conn->gnc_hashlist);
666 kgnilnd_data.kgn_conn_version++;
667 kgnilnd_conn_decref(conn);
669 /* if we are in reset, go right to CLOSED as there is no scheduler
670 * thread to move from CLOSING to CLOSED */
671 if (unlikely(kgnilnd_data.kgn_in_reset)) {
672 conn->gnc_state = GNILND_CONN_CLOSED;
674 conn->gnc_state = GNILND_CONN_CLOSING;
677 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_RDMA_CQ_ERROR)) {
678 msleep_interruptible(MSEC_PER_SEC);
681 /* leave on peer->gnp_conns to make sure we don't let the reaper
682 * or others try to unlink this peer until the conn is fully
683 * processed for closing */
685 if (kgnilnd_check_purgatory_conn(conn)) {
686 kgnilnd_add_purgatory_locked(conn, conn->gnc_peer);
689 /* Reset RX timeout to ensure we wait for an incoming CLOSE
690 * for the full timeout. If we get a CLOSE we know the
691 * peer has stopped all RDMA. Otherwise if we wait for
692 * the full timeout we can also be sure all RDMA has stopped. */
693 conn->gnc_last_rx = conn->gnc_last_rx_cq = jiffies;
696 /* schedule sending CLOSE - if we are in quiesce, this adds to
697 * gnd_ready_conns and allows us to find it in quiesce processing */
698 kgnilnd_schedule_conn(conn);
704 kgnilnd_close_conn(kgn_conn_t *conn, int error)
706 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
707 /* need to check the state here - this call is racy and we don't
708 * know the state until after the lock is grabbed */
709 if (conn->gnc_state == GNILND_CONN_ESTABLISHED) {
710 kgnilnd_close_conn_locked(conn, error);
712 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
716 kgnilnd_complete_closed_conn(kgn_conn_t *conn)
726 /* Dump log on cksum error - wait until complete phase to let
727 * RX of error happen */
728 if (*kgnilnd_tunables.kgn_checksum_dump &&
729 (conn != NULL && conn->gnc_peer_error == -ENOKEY)) {
730 libcfs_debug_dumplog();
733 /* _CLOSED set in kgnilnd_process_fmaq once we decide to
734 * send the CLOSE or not */
735 LASSERTF(conn->gnc_state == GNILND_CONN_CLOSED,
736 "conn 0x%p->%s with bad state %s\n",
737 conn, conn->gnc_peer ?
738 libcfs_nid2str(conn->gnc_peer->gnp_nid) :
740 kgnilnd_conn_state2str(conn));
742 LASSERT(list_empty(&conn->gnc_hashlist));
743 /* We shouldnt be on the delay list, the conn can
744 * get added to this list during a retransmit, and retransmits
745 * only occur within scheduler threads.
747 LASSERT(list_empty(&conn->gnc_delaylist));
749 /* we've sent the close, start nuking */
750 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_SCHEDULE_COMPLETE))
751 kgnilnd_schedule_conn(conn);
753 if (conn->gnc_scheduled != GNILND_CONN_PROCESS) {
754 CDEBUG(D_NETERROR, "Error someone scheduled us after we were "
755 "done, Attempting to recover conn 0x%p "
756 "scheduled %d function: %s line: %d\n", conn,
757 conn->gnc_scheduled, conn->gnc_sched_caller,
758 conn->gnc_sched_line);
762 /* we don't use lists to track things that we can get out of the
765 /* need to hold locks for tx_list_state, sampling it is too racy:
766 * - the lock actually protects tx != NULL, but we can't take the proper
767 * lock until we check tx_list_state, which would be too late and
768 * we could have the TX change under us.
769 * gnd_rdmaq_lock and gnd_lock and not used together, so taking both
771 spin_lock(&conn->gnc_device->gnd_rdmaq_lock);
772 spin_lock(&conn->gnc_device->gnd_lock);
774 for (nrdma = 0; nrdma < GNILND_MAX_MSG_ID; nrdma++) {
775 tx = conn->gnc_tx_ref_table[nrdma];
778 /* only print the first error and if not CLOSE, we often don't see
779 * CQ events for that by the time we get here... and really don't care */
780 if (nlive || tx->tx_msg.gnm_type == GNILND_MSG_CLOSE)
781 tx->tx_state |= GNILND_TX_QUIET_ERROR;
783 GNIDBG_TX(D_NET, tx, "cleaning up on close, nlive %d", nlive);
785 /* don't worry about gnc_lock here as nobody else should be
786 * touching this conn */
787 kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
788 list_add_tail(&tx->tx_list, &sinners);
791 spin_unlock(&conn->gnc_device->gnd_lock);
792 spin_unlock(&conn->gnc_device->gnd_rdmaq_lock);
794 /* nobody should have marked this as needing scheduling after
795 * we called close - so only ref should be us handling it */
796 if (conn->gnc_scheduled != GNILND_CONN_PROCESS) {
797 CDEBUG(D_NETERROR, "Error someone scheduled us after we were "
798 "done, Attempting to recover conn 0x%p "
799 "scheduled %d function %s line: %d\n", conn,
800 conn->gnc_scheduled, conn->gnc_sched_caller,
801 conn->gnc_sched_line);
803 /* now reset a few to actual counters... */
804 nrdma = atomic_read(&conn->gnc_nlive_rdma);
805 nq_rdma = atomic_read(&conn->gnc_nq_rdma);
807 if (!list_empty(&sinners)) {
808 list_for_each_entry_safe(tx, txn, &sinners, tx_list) {
809 /* clear tx_list to make tx_add_list_locked happy */
810 list_del_init(&tx->tx_list);
811 /* The error codes determine if we hold onto the MDD */
812 kgnilnd_tx_done(tx, conn->gnc_error);
816 logmsg = (nlive + nrdma + nq_rdma);
819 int level = conn->gnc_peer->gnp_state == GNILND_PEER_UP ?
821 CDEBUG(level, "Closed conn 0x%p->%s (errno %d,"
822 " peer errno %d): canceled %d TX, %d/%d RDMA\n",
823 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
824 conn->gnc_error, conn->gnc_peer_error,
825 nlive, nq_rdma, nrdma);
828 kgnilnd_destroy_conn_ep(conn);
830 /* Bug 765042 - race this with completing a new conn to same peer - we need
831 * finish_connect to detach purgatory before we can do it ourselves here */
832 CFS_RACE(CFS_FAIL_GNI_FINISH_PURG);
834 /* now it is safe to remove from peer list - anyone looking at
835 * gnp_conns now is free to unlink if not on purgatory */
836 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
838 conn->gnc_state = GNILND_CONN_DONE;
840 /* Decrement counter if we are marked by del_conn_or_peers for closing
842 if (conn->gnc_needs_closing)
843 kgnilnd_admin_decref(kgnilnd_data.kgn_npending_conns);
845 /* Remove from peer's list of valid connections if its not in purgatory */
846 if (!conn->gnc_in_purgatory) {
847 list_del_init(&conn->gnc_list);
848 /* Lose peers reference on the conn */
849 kgnilnd_conn_decref(conn);
852 /* NB - only unlinking if we set pending in del_peer_locked from admin or
854 if (kgnilnd_peer_active(conn->gnc_peer) &&
855 conn->gnc_peer->gnp_pending_unlink &&
856 kgnilnd_can_unlink_peer_locked(conn->gnc_peer)) {
857 kgnilnd_unlink_peer_locked(conn->gnc_peer);
860 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
862 /* I'm telling Mommy! - use peer_error if they initiated close */
863 kgnilnd_peer_notify(conn->gnc_peer,
864 conn->gnc_error == -ECONNRESET ?
865 conn->gnc_peer_error : conn->gnc_error, 0);
871 kgnilnd_set_conn_params(kgn_dgram_t *dgram)
873 kgn_conn_t *conn = dgram->gndg_conn;
874 kgn_connreq_t *connreq = &dgram->gndg_conn_in;
875 kgn_gniparams_t *rem_param = &connreq->gncr_gnparams;
878 gni_smsg_attr_t *remote = &connreq->gncr_gnparams.gnpr_smsg_attr;
880 /* set timeout vals in conn early so we can use them for the NAK */
882 /* use max of the requested and our timeout, peer will do the same */
883 conn->gnc_timeout = MAX(conn->gnc_timeout, connreq->gncr_timeout);
885 /* only ep_bind really mucks around with the CQ */
886 /* only ep bind if we are not connecting to ourself and the dstnid is not a wildcard. this check
887 * is necessary as you can only bind an ep once and we must make sure we dont bind when already bound.
889 if (connreq->gncr_dstnid != LNET_NID_ANY && dgram->gndg_conn_out.gncr_dstnid != connreq->gncr_srcnid) {
890 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
891 rrc = kgnilnd_ep_bind(conn->gnc_ephandle,
892 connreq->gncr_gnparams.gnpr_host_id,
894 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
895 if (rrc != GNI_RC_SUCCESS) {
901 rrc = kgnilnd_ep_set_eventdata(conn->gnc_ephandle, conn->gnc_cqid,
902 connreq->gncr_gnparams.gnpr_cqid);
903 if (rrc != GNI_RC_SUCCESS) {
908 /* Initialize SMSG */
909 rrc = kgnilnd_smsg_init(conn->gnc_ephandle, &conn->gnpr_smsg_attr,
910 &connreq->gncr_gnparams.gnpr_smsg_attr);
911 if (unlikely(rrc == GNI_RC_INVALID_PARAM)) {
912 gni_smsg_attr_t *local = &conn->gnpr_smsg_attr;
913 /* help folks figure out if there is a tunable off, etc. */
914 LCONSOLE_ERROR("SMSG attribute mismatch. Data from local/remote:"
915 " type %d/%d msg_maxsize %u/%u"
916 " mbox_maxcredit %u/%u. Please check kgni"
917 " logs for further data\n",
918 local->msg_type, remote->msg_type,
919 local->msg_maxsize, remote->msg_maxsize,
920 local->mbox_maxcredit, remote->mbox_maxcredit);
922 if (rrc != GNI_RC_SUCCESS) {
927 /* log this for help in debuggin SMSG buffer re-use */
928 CDEBUG(D_NET, "conn %p src %s dst %s smsg %p acquired"
929 " local cqid %u SMSG %p->%u hndl %#llx.%#llx"
930 " remote cqid %u SMSG %p->%u hndl %#llx.%#llx\n",
931 conn, libcfs_nid2str(connreq->gncr_srcnid),
932 libcfs_nid2str(connreq->gncr_dstnid),
933 &conn->gnpr_smsg_attr,
935 conn->gnpr_smsg_attr.msg_buffer,
936 conn->gnpr_smsg_attr.mbox_offset,
937 conn->gnpr_smsg_attr.mem_hndl.qword1,
938 conn->gnpr_smsg_attr.mem_hndl.qword2,
939 rem_param->gnpr_cqid,
940 rem_param->gnpr_smsg_attr.msg_buffer,
941 rem_param->gnpr_smsg_attr.mbox_offset,
942 rem_param->gnpr_smsg_attr.mem_hndl.qword1,
943 rem_param->gnpr_smsg_attr.mem_hndl.qword2);
945 conn->gnc_peerstamp = connreq->gncr_peerstamp;
946 conn->gnc_peer_connstamp = connreq->gncr_connstamp;
947 conn->remote_mbox_addr = (void *)((char *)remote->msg_buffer + remote->mbox_offset);
949 /* We update the reaper timeout once we have a valid conn and timeout */
950 kgnilnd_update_reaper_timeout(GNILND_TO2KA(conn->gnc_timeout));
955 rrc = kgnilnd_ep_unbind(conn->gnc_ephandle);
956 /* not sure I can just let this fly */
957 LASSERTF(rrc == GNI_RC_SUCCESS,
958 "bad rc from gni_ep_unbind trying to cleanup: %d\n", rrc);
961 LASSERTF(rc != 0, "SOFTWARE BUG: rc == 0\n");
962 CERROR("Error setting connection params from %s: %d\n",
963 libcfs_nid2str(connreq->gncr_srcnid), rc);
967 /* needs down_read on kgn_net_rw_sem held from before this call until
968 * after the write_lock on kgn_peer_conn_lock - this ensures we stay sane
969 * with kgnilnd_shutdown - it'll get the sem and set shutdown, then get the
970 * kgn_peer_conn_lock to start del_peer'ing. If we hold the sem until after
971 * kgn_peer_conn_lock is held, we guarantee that nobody calls
972 * kgnilnd_add_peer_locked without checking gnn_shutdown */
974 kgnilnd_create_peer_safe(kgn_peer_t **peerp,
982 LASSERT(nid != LNET_NID_ANY);
984 /* We dont pass the net around in the dgram anymore so here is where we find it
985 * this will work unless its in shutdown or the nid has a net that is invalid.
986 * Either way error code needs to be returned in that case.
988 * If the net passed in is not NULL then we can use it, this alleviates looking it
989 * when the calling function has access to the data.
992 rc = kgnilnd_find_net(nid, &net);
996 /* find net adds a reference on the net if we are not using
997 * it we must do it manually so the net references are
998 * correct when tearing down the net
1000 kgnilnd_net_addref(net);
1003 LIBCFS_ALLOC(peer, sizeof(*peer));
1005 kgnilnd_net_decref(net);
1008 peer->gnp_nid = nid;
1009 peer->gnp_state = node_state;
1011 /* translate from nid to nic addr & store */
1012 rc = kgnilnd_nid_to_nicaddrs(LNET_NIDADDR(nid), 1, &peer->gnp_host_id);
1014 kgnilnd_net_decref(net);
1015 LIBCFS_FREE(peer, sizeof(*peer));
1018 CDEBUG(D_NET, "peer 0x%p->%s -> NIC 0x%x\n", peer,
1019 libcfs_nid2str(nid), peer->gnp_host_id);
1021 atomic_set(&peer->gnp_refcount, 1); /* 1 ref for caller */
1022 atomic_set(&peer->gnp_dirty_eps, 0);
1024 INIT_LIST_HEAD(&peer->gnp_list);
1025 INIT_LIST_HEAD(&peer->gnp_connd_list);
1026 INIT_LIST_HEAD(&peer->gnp_conns);
1027 INIT_LIST_HEAD(&peer->gnp_tx_queue);
1029 /* the first reconnect should happen immediately, so we leave
1030 * gnp_reconnect_interval set to 0 */
1032 LASSERTF(net != NULL, "peer 0x%p->%s with NULL net\n",
1033 peer, libcfs_nid2str(nid));
1035 /* must have kgn_net_rw_sem held for this... */
1036 if (net->gnn_shutdown) {
1037 /* shutdown has started already */
1038 kgnilnd_net_decref(net);
1039 LIBCFS_FREE(peer, sizeof(*peer));
1043 peer->gnp_net = net;
1045 atomic_inc(&kgnilnd_data.kgn_npeers);
1052 kgnilnd_destroy_peer(kgn_peer_t *peer)
1054 CDEBUG(D_NET, "peer %s %p deleted\n",
1055 libcfs_nid2str(peer->gnp_nid), peer);
1056 LASSERTF(atomic_read(&peer->gnp_refcount) == 0,
1057 "peer 0x%p->%s refs %d\n",
1058 peer, libcfs_nid2str(peer->gnp_nid),
1059 atomic_read(&peer->gnp_refcount));
1060 LASSERTF(atomic_read(&peer->gnp_dirty_eps) == 0,
1061 "peer 0x%p->%s dirty eps %d\n",
1062 peer, libcfs_nid2str(peer->gnp_nid),
1063 atomic_read(&peer->gnp_dirty_eps));
1064 LASSERTF(peer->gnp_net != NULL, "peer %p (%s) with NULL net\n",
1065 peer, libcfs_nid2str(peer->gnp_nid));
1066 LASSERTF(!kgnilnd_peer_active(peer),
1068 peer, libcfs_nid2str(peer->gnp_nid));
1069 LASSERTF(peer->gnp_connecting == GNILND_PEER_IDLE || peer->gnp_connecting == GNILND_PEER_KILL,
1070 "peer 0x%p->%s, connecting %d\n",
1071 peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
1072 LASSERTF(list_empty(&peer->gnp_conns),
1074 peer, libcfs_nid2str(peer->gnp_nid));
1075 LASSERTF(list_empty(&peer->gnp_tx_queue),
1077 peer, libcfs_nid2str(peer->gnp_nid));
1078 LASSERTF(list_empty(&peer->gnp_connd_list),
1080 peer, libcfs_nid2str(peer->gnp_nid));
1082 /* NB a peer's connections keep a reference on their peer until
1083 * they are destroyed, so we can be assured that _all_ state to do
1084 * with this peer has been cleaned up when its refcount drops to
1087 atomic_dec(&kgnilnd_data.kgn_npeers);
1088 kgnilnd_net_decref(peer->gnp_net);
1090 LIBCFS_FREE(peer, sizeof(*peer));
1093 /* the conn might not have made it all the way through to a connected
1094 * state - but we need to purgatory any conn that a remote peer might
1095 * have seen through a posted dgram as well */
1097 kgnilnd_add_purgatory_locked(kgn_conn_t *conn, kgn_peer_t *peer)
1099 kgn_mbox_info_t *mbox = NULL;
1102 /* NB - the caller should own conn by removing him from the
1103 * scheduler thread when finishing the close */
1105 LASSERTF(peer != NULL, "conn %p with NULL peer\n", conn);
1107 /* If this is still true, need to add the calls to unlink back in and
1108 * figure out how to close the hole on loopback conns */
1109 LASSERTF(kgnilnd_peer_active(peer), "can't use inactive peer %s (%p)"
1110 " we'll never recover the resources\n",
1111 libcfs_nid2str(peer->gnp_nid), peer);
1113 CDEBUG(D_NET, "conn %p peer %p dev %p\n", conn, peer,
1116 LASSERTF(conn->gnc_in_purgatory == 0,
1117 "Conn already in purgatory\n");
1118 conn->gnc_in_purgatory = 1;
1120 mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
1121 mbox->mbx_prev_purg_nid = peer->gnp_nid;
1122 mbox->mbx_add_purgatory = jiffies;
1123 kgnilnd_release_mbox(conn, 1);
1125 LASSERTF(list_empty(&conn->gnc_mdd_list),
1126 "conn 0x%p->%s with active purgatory hold MDD %d\n",
1127 conn, libcfs_nid2str(peer->gnp_nid),
1128 kgnilnd_count_list(&conn->gnc_mdd_list));
1133 /* Instead of detaching everything from purgatory here we just mark the conn as needing
1134 * detach, when the reaper checks the conn the next time it will detach it.
1135 * Calling function requires write_lock held on kgn_peer_conn_lock
1138 kgnilnd_mark_for_detach_purgatory_all_locked(kgn_peer_t *peer) {
1141 list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
1142 if (conn->gnc_in_purgatory && !conn->gnc_needs_detach) {
1143 conn->gnc_needs_detach = 1;
1144 kgnilnd_admin_addref(kgnilnd_data.kgn_npending_detach);
1149 /* Calling function needs a write_lock held on kgn_peer_conn_lock */
1151 kgnilnd_detach_purgatory_locked(kgn_conn_t *conn, struct list_head *conn_list)
1153 kgn_mbox_info_t *mbox = NULL;
1155 /* if needed, add the conn purgatory data to the list passed in */
1156 if (conn->gnc_in_purgatory) {
1157 CDEBUG(D_NET, "peer %p->%s purg_conn %p@%s mdd_list #tx %d\n",
1158 conn->gnc_peer, libcfs_nid2str(conn->gnc_peer->gnp_nid),
1159 conn, kgnilnd_conn_state2str(conn),
1160 kgnilnd_count_list(&conn->gnc_mdd_list));
1162 mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
1163 mbox->mbx_detach_of_purgatory = jiffies;
1165 /* conn->gnc_list is the entry point on peer->gnp_conns, so detaching it
1166 * here removes it from the list of 'valid' peer connections.
1167 * We put the current conn onto a list of conns to call kgnilnd_release_purgatory_locked()
1168 * and as such the caller of kgnilnd_detach_purgatory_locked() now owns that conn, since its not
1169 * on the peer's conn_list anymore.
1172 list_del_init(&conn->gnc_list);
1174 /* NB - only unlinking if we set pending in del_peer_locked from admin or
1176 if (kgnilnd_peer_active(conn->gnc_peer) &&
1177 conn->gnc_peer->gnp_pending_unlink &&
1178 kgnilnd_can_unlink_peer_locked(conn->gnc_peer)) {
1179 kgnilnd_unlink_peer_locked(conn->gnc_peer);
1181 /* The reaper will not call detach unless the conn is fully through kgnilnd_complete_closed_conn.
1182 * If the conn is not in a DONE state somehow we are attempting to detach even though
1183 * the conn has not been fully cleaned up. If we detach while the conn is still closing
1184 * we will end up with an orphaned connection that has valid ep_handle, that is not on a
1188 LASSERTF(conn->gnc_state == GNILND_CONN_DONE, "Conn in invalid state %p@%s \n",
1189 conn, kgnilnd_conn_state2str(conn));
1191 /* move from peer to the delayed release list */
1192 list_add_tail(&conn->gnc_list, conn_list);
1197 kgnilnd_release_purgatory_list(struct list_head *conn_list)
1200 kgn_conn_t *conn, *connN;
1201 kgn_mdd_purgatory_t *gmp, *gmpN;
1203 list_for_each_entry_safe(conn, connN, conn_list, gnc_list) {
1204 dev = conn->gnc_device;
1206 kgnilnd_release_mbox(conn, -1);
1207 conn->gnc_in_purgatory = 0;
1209 list_del_init(&conn->gnc_list);
1211 /* gnc_needs_detach is set in kgnilnd_del_conn_or_peer. It is used to keep track
1212 * of conns that have been marked for detach by kgnilnd_del_conn_or_peer.
1213 * The function uses kgn_npending_detach to verify the conn has
1214 * actually been detached.
1217 if (conn->gnc_needs_detach)
1218 kgnilnd_admin_decref(kgnilnd_data.kgn_npending_detach);
1220 /* if this guy is really dead (we are doing release from reaper),
1221 * make sure we tell LNet - if this is from other context,
1222 * the checks in the function will prevent an errant
1224 kgnilnd_peer_notify(conn->gnc_peer, conn->gnc_error, 0);
1226 list_for_each_entry_safe(gmp, gmpN, &conn->gnc_mdd_list,
1229 "dev %p releasing held mdd %#llx.%#llx\n",
1230 conn->gnc_device, gmp->gmp_map_key.qword1,
1231 gmp->gmp_map_key.qword2);
1233 atomic_dec(&dev->gnd_n_mdd_held);
1234 kgnilnd_mem_mdd_release(conn->gnc_device->gnd_handle,
1236 /* ignoring the return code - if kgni/ghal can't find it
1237 * it must be released already */
1239 list_del_init(&gmp->gmp_list);
1240 LIBCFS_FREE(gmp, sizeof(*gmp));
1242 /* lose conn ref for purgatory */
1243 kgnilnd_conn_decref(conn);
1247 /* needs write_lock on kgnilnd_data.kgn_peer_conn_lock held */
1249 kgnilnd_peer_increase_reconnect_locked(kgn_peer_t *peer)
1253 current_to = peer->gnp_reconnect_interval;
1255 /* we'll try to reconnect fast the first time, then back-off */
1256 if (current_to == 0) {
1257 peer->gnp_reconnect_time = jiffies - 1;
1258 current_to = *kgnilnd_tunables.kgn_min_reconnect_interval;
1260 peer->gnp_reconnect_time = jiffies + cfs_time_seconds(current_to);
1261 /* add 50% of min timeout & retry */
1262 current_to += *kgnilnd_tunables.kgn_min_reconnect_interval / 2;
1265 current_to = MIN(current_to,
1266 *kgnilnd_tunables.kgn_max_reconnect_interval);
1268 peer->gnp_reconnect_interval = current_to;
1269 CDEBUG(D_NET, "peer %s can reconnect at %lu interval %lu\n",
1270 libcfs_nid2str(peer->gnp_nid), peer->gnp_reconnect_time,
1271 peer->gnp_reconnect_interval);
1274 /* needs kgnilnd_data.kgn_peer_conn_lock held */
1276 kgnilnd_find_peer_locked(lnet_nid_t nid)
1278 struct list_head *peer_list = kgnilnd_nid2peerlist(nid);
1281 /* Chopping nid down to only NIDADDR using LNET_NIDADDR so we only
1282 * have a single peer per device instead of a peer per nid/net combo.
1285 list_for_each_entry(peer, peer_list, gnp_list) {
1286 if (LNET_NIDADDR(nid) != LNET_NIDADDR(peer->gnp_nid))
1289 CDEBUG(D_NET, "got peer [%p] -> %s c %d (%d)\n",
1290 peer, libcfs_nid2str(nid),
1291 peer->gnp_connecting,
1292 atomic_read(&peer->gnp_refcount));
1298 /* need write_lock on kgn_peer_conn_lock */
1300 kgnilnd_unlink_peer_locked(kgn_peer_t *peer)
1302 LASSERTF(list_empty(&peer->gnp_conns),
1304 peer, libcfs_nid2str(peer->gnp_nid));
1305 LASSERTF(list_empty(&peer->gnp_tx_queue),
1307 peer, libcfs_nid2str(peer->gnp_nid));
1308 LASSERTF(kgnilnd_peer_active(peer),
1310 peer, libcfs_nid2str(peer->gnp_nid));
1311 CDEBUG(D_NET, "unlinking peer 0x%p->%s\n",
1312 peer, libcfs_nid2str(peer->gnp_nid));
1314 list_del_init(&peer->gnp_list);
1315 kgnilnd_data.kgn_peer_version++;
1316 kgnilnd_admin_decref(kgnilnd_data.kgn_npending_unlink);
1317 /* lose peerlist's ref */
1318 kgnilnd_peer_decref(peer);
1322 kgnilnd_get_peer_info(int index,
1323 kgn_peer_t **found_peer,
1324 lnet_nid_t *id, __u32 *nic_addr,
1325 int *refcount, int *connecting)
1327 struct list_head *ptmp;
1332 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1334 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
1336 list_for_each(ptmp, &kgnilnd_data.kgn_peers[i]) {
1337 peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1342 CDEBUG(D_NET, "found peer %p (%s) at index %d\n",
1343 peer, libcfs_nid2str(peer->gnp_nid), index);
1346 *id = peer->gnp_nid;
1347 *nic_addr = peer->gnp_host_id;
1348 *refcount = atomic_read(&peer->gnp_refcount);
1349 *connecting = peer->gnp_connecting;
1356 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1358 CDEBUG(D_NET, "no gni peer at index %d\n", index);
1362 /* requires write_lock on kgn_peer_conn_lock held */
1364 kgnilnd_add_peer_locked(lnet_nid_t nid, kgn_peer_t *new_stub_peer, kgn_peer_t **peerp)
1366 kgn_peer_t *peer, *peer2;
1368 LASSERTF(new_stub_peer != NULL, "bad stub peer for nid %s\n",
1369 libcfs_nid2str(nid));
1371 peer2 = kgnilnd_find_peer_locked(nid);
1372 if (peer2 != NULL) {
1373 /* A peer was created during the lock transition, so drop
1374 * the new one we created */
1375 kgnilnd_peer_decref(new_stub_peer);
1378 peer = new_stub_peer;
1379 /* peer table takes existing ref on peer */
1381 LASSERTF(!kgnilnd_peer_active(peer),
1382 "peer 0x%p->%s already in peer table\n",
1383 peer, libcfs_nid2str(peer->gnp_nid));
1384 list_add_tail(&peer->gnp_list,
1385 kgnilnd_nid2peerlist(nid));
1386 kgnilnd_data.kgn_peer_version++;
1389 LASSERTF(peer->gnp_net != NULL, "peer 0x%p->%s with NULL net\n",
1390 peer, libcfs_nid2str(peer->gnp_nid));
1395 kgnilnd_add_peer(kgn_net_t *net, lnet_nid_t nid, kgn_peer_t **peerp)
1402 if (nid == LNET_NID_ANY)
1405 node_state = kgnilnd_get_node_state(LNET_NIDADDR(nid));
1407 /* NB - this will not block during normal operations -
1408 * the only writer of this is in the startup/shutdown path. */
1409 rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
1414 rc = kgnilnd_create_peer_safe(&peer, nid, net, node_state);
1416 up_read(&kgnilnd_data.kgn_net_rw_sem);
1420 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1421 up_read(&kgnilnd_data.kgn_net_rw_sem);
1423 kgnilnd_add_peer_locked(nid, peer, peerp);
1425 CDEBUG(D_NET, "peer 0x%p->%s connecting %d\n",
1426 peerp, libcfs_nid2str((*peerp)->gnp_nid),
1427 (*peerp)->gnp_connecting);
1429 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1433 /* needs write_lock on kgn_peer_conn_lock */
1435 kgnilnd_cancel_peer_connect_locked(kgn_peer_t *peer, struct list_head *zombies)
1439 /* we do care about state of gnp_connecting - we could be between
1440 * reconnect attempts, so try to find the dgram and cancel the TX
1441 * anyways. If we are in the process of posting DONT do anything;
1442 * once it fails or succeeds we can nuke the connect attempt.
1443 * We have no idea where in kgnilnd_post_dgram we are so we cant
1444 * attempt to cancel until the function is done.
1447 /* make sure peer isn't in process of connecting or waiting for connect*/
1448 spin_lock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1449 if (!(list_empty(&peer->gnp_connd_list))) {
1450 list_del_init(&peer->gnp_connd_list);
1451 /* remove connd ref */
1452 kgnilnd_peer_decref(peer);
1454 spin_unlock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1456 if (peer->gnp_connecting == GNILND_PEER_POSTING || peer->gnp_connecting == GNILND_PEER_NEEDS_DEATH) {
1457 peer->gnp_connecting = GNILND_PEER_NEEDS_DEATH;
1458 /* We are in process of posting right now the xchg set it up for us to
1459 * cancel the connect so we are finished for now */
1461 /* no need for exchange we have the peer lock and its ready for us to nuke */
1462 LASSERTF(peer->gnp_connecting != GNILND_PEER_POSTING,
1463 "Peer in invalid state 0x%p->%s, connecting %d\n",
1464 peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
1465 peer->gnp_connecting = GNILND_PEER_IDLE;
1466 set_mb(peer->gnp_last_dgram_errno, -ETIMEDOUT);
1467 kgnilnd_find_and_cancel_dgram(peer->gnp_net->gnn_dev,
1471 /* The least we can do is nuke the tx's no matter what.... */
1472 list_for_each_entry_safe(tx, txn, &peer->gnp_tx_queue, tx_list) {
1473 kgnilnd_tx_del_state_locked(tx, peer, NULL,
1475 list_add_tail(&tx->tx_list, zombies);
1479 /* needs write_lock on kgn_peer_conn_lock */
1481 kgnilnd_del_peer_locked(kgn_peer_t *peer, int error)
1483 /* this peer could be passive and only held for purgatory,
1484 * take a ref to ensure it doesn't disappear in this function */
1485 kgnilnd_peer_addref(peer);
1487 CFS_RACE(CFS_FAIL_GNI_FIND_TARGET);
1489 /* if purgatory release cleared it out, don't try again */
1490 if (kgnilnd_peer_active(peer)) {
1491 /* always do this to allow kgnilnd_start_connect and
1492 * kgnilnd_finish_connect to catch this before they
1493 * wrap up their operations */
1494 if (kgnilnd_can_unlink_peer_locked(peer)) {
1495 /* already released purgatory, so only active
1497 kgnilnd_unlink_peer_locked(peer);
1499 kgnilnd_close_peer_conns_locked(peer, error);
1500 /* peer unlinks itself when last conn is closed */
1504 /* we are done, release back to the wild */
1505 kgnilnd_peer_decref(peer);
1509 kgnilnd_del_conn_or_peer(kgn_net_t *net, lnet_nid_t nid, int command,
1513 LIST_HEAD (zombies);
1514 struct list_head *ptmp, *pnxt;
1521 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1523 if (nid != LNET_NID_ANY)
1524 lo = hi = kgnilnd_nid2peerlist(nid) - kgnilnd_data.kgn_peers;
1527 hi = *kgnilnd_tunables.kgn_peer_hash_size - 1;
1528 /* wildcards always succeed */
1532 for (i = lo; i <= hi; i++) {
1533 list_for_each_safe(ptmp, pnxt, &kgnilnd_data.kgn_peers[i]) {
1534 peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1536 LASSERTF(peer->gnp_net != NULL,
1537 "peer %p (%s) with NULL net\n",
1538 peer, libcfs_nid2str(peer->gnp_nid));
1540 if (net != NULL && peer->gnp_net != net)
1543 if (!(nid == LNET_NID_ANY || LNET_NIDADDR(peer->gnp_nid) == LNET_NIDADDR(nid)))
1546 /* In both cases, we want to stop any in-flight
1547 * connect attempts */
1548 kgnilnd_cancel_peer_connect_locked(peer, &zombies);
1551 case GNILND_DEL_CONN:
1552 kgnilnd_close_peer_conns_locked(peer, error);
1554 case GNILND_DEL_PEER:
1555 peer->gnp_pending_unlink = 1;
1556 kgnilnd_admin_addref(kgnilnd_data.kgn_npending_unlink);
1557 kgnilnd_mark_for_detach_purgatory_all_locked(peer);
1558 kgnilnd_del_peer_locked(peer, error);
1560 case GNILND_CLEAR_PURGATORY:
1561 /* Mark everything ready for detach reaper will cleanup
1562 * once we release the kgn_peer_conn_lock
1564 kgnilnd_mark_for_detach_purgatory_all_locked(peer);
1565 peer->gnp_last_errno = -EISCONN;
1566 /* clear reconnect so he can reconnect soon */
1567 peer->gnp_reconnect_time = 0;
1568 peer->gnp_reconnect_interval = 0;
1571 CERROR("bad command %d\n", command);
1574 /* we matched something */
1579 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1582 kgnilnd_txlist_done(&zombies, error);
1584 /* This function does not return until the commands it initiated have completed,
1585 * since they have to work there way through the other threads. In the case of shutdown
1586 * threads are not woken up until after this call is initiated so we cannot wait, we just
1587 * need to return. The same applies for stack reset we shouldnt wait as the reset thread
1591 CFS_RACE(CFS_FAIL_GNI_RACE_RESET);
1593 if (error == -ENOTRECOVERABLE || error == -ESHUTDOWN) {
1598 while (atomic_read(&kgnilnd_data.kgn_npending_conns) ||
1599 atomic_read(&kgnilnd_data.kgn_npending_detach) ||
1600 atomic_read(&kgnilnd_data.kgn_npending_unlink)) {
1602 set_current_state(TASK_UNINTERRUPTIBLE);
1603 schedule_timeout(cfs_time_seconds(1));
1606 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, "Waiting on %d peers %d closes %d detaches\n",
1607 atomic_read(&kgnilnd_data.kgn_npending_unlink),
1608 atomic_read(&kgnilnd_data.kgn_npending_conns),
1609 atomic_read(&kgnilnd_data.kgn_npending_detach));
1616 kgnilnd_get_conn_by_idx(int index)
1619 struct list_head *ptmp;
1621 struct list_head *ctmp;
1625 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
1626 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1627 list_for_each(ptmp, &kgnilnd_data.kgn_peers[i]) {
1629 peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1631 list_for_each(ctmp, &peer->gnp_conns) {
1632 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
1634 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
1640 CDEBUG(D_NET, "++conn[%p] -> %s (%d)\n", conn,
1641 libcfs_nid2str(conn->gnc_peer->gnp_nid),
1642 atomic_read(&conn->gnc_refcount));
1643 kgnilnd_conn_addref(conn);
1644 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1648 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1655 kgnilnd_get_conn_info(kgn_peer_t *peer,
1656 int *device_id, __u64 *peerstamp,
1657 int *tx_seq, int *rx_seq,
1658 int *fmaq_len, int *nfma, int *nrdma)
1663 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1665 conn = kgnilnd_find_conn_locked(peer);
1671 *device_id = conn->gnc_device->gnd_host_id;
1672 *peerstamp = conn->gnc_peerstamp;
1673 *tx_seq = atomic_read(&conn->gnc_tx_seq);
1674 *rx_seq = atomic_read(&conn->gnc_rx_seq);
1675 *fmaq_len = kgnilnd_count_list(&conn->gnc_fmaq);
1676 *nfma = atomic_read(&conn->gnc_nlive_fma);
1677 *nrdma = atomic_read(&conn->gnc_nlive_rdma);
1679 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1683 /* needs write_lock on kgn_peer_conn_lock */
1685 kgnilnd_close_peer_conns_locked(kgn_peer_t *peer, int why)
1688 struct list_head *ctmp, *cnxt;
1691 list_for_each_safe(ctmp, cnxt, &peer->gnp_conns) {
1692 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
1694 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
1698 /* we mark gnc_needs closing and increment kgn_npending_conns so that
1699 * kgnilnd_del_conn_or_peer can wait on the other threads closing
1700 * and cleaning up the connection.
1702 if (!conn->gnc_needs_closing) {
1703 conn->gnc_needs_closing = 1;
1704 kgnilnd_admin_addref(kgnilnd_data.kgn_npending_conns);
1706 kgnilnd_close_conn_locked(conn, why);
1712 kgnilnd_report_node_state(lnet_nid_t nid, int down)
1715 kgn_peer_t *peer, *new_peer;
1718 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1719 peer = kgnilnd_find_peer_locked(nid);
1726 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1728 /* Don't add a peer for node up events */
1729 if (down == GNILND_PEER_UP)
1732 /* find any valid net - we don't care which one... */
1733 down_read(&kgnilnd_data.kgn_net_rw_sem);
1734 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
1735 list_for_each_entry(net, &kgnilnd_data.kgn_nets[i],
1745 up_read(&kgnilnd_data.kgn_net_rw_sem);
1748 CNETERR("Could not find a net for nid %lld\n", nid);
1752 /* The nid passed in does not yet contain the net portion.
1753 * Let's build it up now
1755 nid = LNET_MKNID(LNET_NIDNET(net->gnn_ni->ni_nid), nid);
1756 rc = kgnilnd_add_peer(net, nid, &new_peer);
1759 CNETERR("Could not add peer for nid %lld, rc %d\n",
1764 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1765 peer = kgnilnd_find_peer_locked(nid);
1768 CNETERR("Could not find peer for nid %lld\n", nid);
1769 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1774 peer->gnp_state = down;
1776 if (down == GNILND_PEER_DOWN) {
1779 peer->gnp_down_event_time = jiffies;
1780 kgnilnd_cancel_peer_connect_locked(peer, &zombies);
1781 conn = kgnilnd_find_conn_locked(peer);
1784 kgnilnd_close_conn_locked(conn, -ENETRESET);
1787 peer->gnp_up_event_time = jiffies;
1790 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1792 if (down == GNILND_PEER_DOWN) {
1793 /* using ENETRESET so we don't get messages from
1796 kgnilnd_txlist_done(&zombies, -ENETRESET);
1797 kgnilnd_peer_notify(peer, -ECONNRESET, 0);
1798 LCONSOLE_INFO("Received down event for nid %d\n",
1806 kgnilnd_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg)
1808 struct libcfs_ioctl_data *data = arg;
1809 kgn_net_t *net = ni->ni_data;
1812 LASSERT(ni == net->gnn_ni);
1815 case IOC_LIBCFS_GET_PEER: {
1817 kgn_peer_t *peer = NULL;
1819 __u64 peerstamp = 0;
1820 int peer_refcount = 0, peer_connecting = 0;
1822 int tx_seq = 0, rx_seq = 0;
1823 int fmaq_len = 0, nfma = 0, nrdma = 0;
1825 rc = kgnilnd_get_peer_info(data->ioc_count, &peer,
1826 &nid, &nic_addr, &peer_refcount,
1832 /* LNET_MKNID is used to mask from lnet the multiplexing/demultiplexing of connections and peers
1833 * LNET assumes a conn and peer per net, the LNET_MKNID/LNET_NIDADDR allows us to let Lnet see what it
1834 * wants to see instead of the underlying network that is being used to send the data
1836 data->ioc_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), LNET_NIDADDR(nid));
1837 data->ioc_flags = peer_connecting;
1838 data->ioc_count = peer_refcount;
1840 rc = kgnilnd_get_conn_info(peer, &device_id, &peerstamp,
1841 &tx_seq, &rx_seq, &fmaq_len,
1844 /* This is allowable - a persistent peer could not
1845 * have a connection */
1847 /* flag to indicate we are not connected -
1848 * need to print as such */
1849 data->ioc_flags |= (1<<16);
1853 data->ioc_net = device_id;
1854 data->ioc_u64[0] = peerstamp;
1855 data->ioc_u32[0] = fmaq_len;
1856 data->ioc_u32[1] = nfma;
1857 data->ioc_u32[2] = tx_seq;
1858 data->ioc_u32[3] = rx_seq;
1859 data->ioc_u32[4] = nrdma;
1863 case IOC_LIBCFS_ADD_PEER: {
1864 /* just dummy value to allow using common interface */
1866 rc = kgnilnd_add_peer(net, data->ioc_nid, &peer);
1869 case IOC_LIBCFS_DEL_PEER: {
1870 /* NULL is passed in so it affects all peers in existence without regard to network
1871 * as the peer may not exist on the network LNET believes it to be on.
1873 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1874 GNILND_DEL_PEER, -EUCLEAN);
1877 case IOC_LIBCFS_GET_CONN: {
1878 kgn_conn_t *conn = kgnilnd_get_conn_by_idx(data->ioc_count);
1884 /* LNET_MKNID is used to build the correct address based on what LNET wants to see instead of
1885 * the generic connection that is used to send the data
1887 data->ioc_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), LNET_NIDADDR(conn->gnc_peer->gnp_nid));
1888 data->ioc_u32[0] = conn->gnc_device->gnd_id;
1889 kgnilnd_conn_decref(conn);
1893 case IOC_LIBCFS_CLOSE_CONNECTION: {
1894 /* use error = -ENETRESET to indicate it was lctl disconnect */
1895 /* NULL is passed in so it affects all the nets as the connection is virtual
1896 * and may not exist on the network LNET believes it to be on.
1898 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1899 GNILND_DEL_CONN, -ENETRESET);
1902 case IOC_LIBCFS_PUSH_CONNECTION: {
1903 /* we use this to flush purgatory */
1904 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1905 GNILND_CLEAR_PURGATORY, -EUCLEAN);
1908 case IOC_LIBCFS_REGISTER_MYNID: {
1909 /* Ignore if this is a noop */
1910 if (data->ioc_nid == ni->ni_nid) {
1913 CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
1914 libcfs_nid2str(data->ioc_nid),
1915 libcfs_nid2str(ni->ni_nid));
1926 kgnilnd_query(struct lnet_ni *ni, lnet_nid_t nid, time64_t *when)
1928 kgn_net_t *net = ni->ni_data;
1930 kgn_peer_t *peer = NULL;
1931 kgn_conn_t *conn = NULL;
1932 struct lnet_process_id id = {
1934 .pid = LNET_PID_LUSTRE,
1938 /* I expect to find him, so only take a read lock */
1939 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1940 peer = kgnilnd_find_peer_locked(nid);
1942 /* LIE if in a quiesce - we will update the timeouts after,
1943 * but we don't want sends failing during it */
1944 if (kgnilnd_data.kgn_quiesce_trigger) {
1945 *when = ktime_get_seconds();
1946 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1950 /* Update to best guess, might refine on later checks */
1951 *when = peer->gnp_last_alive;
1953 /* we have a peer, how about a conn? */
1954 conn = kgnilnd_find_conn_locked(peer);
1957 /* if there is no conn, check peer last errno to see if clean disconnect
1958 * - if it was, we lie to LNet because we believe a TX would complete
1960 if (kgnilnd_conn_clean_errno(peer->gnp_last_errno)) {
1961 *when = ktime_get_seconds();
1963 /* we still want to fire a TX and new conn in this case */
1965 /* gnp_last_alive is valid, run for the hills */
1966 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1970 /* if we get here, either we have no peer or no conn for him, so fire off
1971 * new TX to trigger conn setup */
1972 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1974 /* if we couldn't find him, we'll fire up a TX and get connected -
1975 * if we don't do this, after ni_peer_timeout, LNet will declare him dead.
1976 * So really we treat kgnilnd_query as a bit of a 'connect now' type
1977 * event because it'll only do this when it wants to send
1979 * Use a real TX for this to get the proper gnp_tx_queue behavior, etc
1980 * normally we'd use kgnilnd_send_ctlmsg for this, but we don't really
1981 * care that this goes out quickly since we already know we need a new conn
1983 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_NOOP_SEND))
1986 tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP, ni->ni_nid);
1988 kgnilnd_launch_tx(tx, net, &id);
1991 CDEBUG(D_NETTRACE, "peer 0x%p->%s when %lld\n", peer,
1992 libcfs_nid2str(nid), *when);
1997 kgnilnd_dev_init(kgn_device_t *dev)
2001 unsigned int cq_size;
2004 /* size of these CQs should be able to accommodate the outgoing
2005 * RDMA and SMSG transactions. Since we really don't know what we
2006 * really need here, we'll take credits * 2 * 3 to allow a bunch.
2007 * We need to dig into this more with the performance work. */
2008 cq_size = *kgnilnd_tunables.kgn_credits * 2 * 3;
2010 rrc = kgnilnd_cdm_create(dev->gnd_id, *kgnilnd_tunables.kgn_ptag,
2011 *kgnilnd_tunables.kgn_pkey, 0,
2013 if (rrc != GNI_RC_SUCCESS) {
2014 CERROR("Can't create CDM %d (%d)\n", dev->gnd_id, rrc);
2015 GOTO(failed, rc = -ENODEV);
2018 rrc = kgnilnd_cdm_attach(dev->gnd_domain, dev->gnd_id,
2019 &dev->gnd_host_id, &dev->gnd_handle);
2020 if (rrc != GNI_RC_SUCCESS) {
2021 CERROR("Can't attach CDM to device %d (%d)\n",
2023 GOTO(failed, rc = -ENODEV);
2026 /* a bit gross, but not much we can do - Aries Sim doesn't have
2027 * hardcoded NIC/NID that we can use */
2028 rc = kgnilnd_setup_nic_translation(dev->gnd_host_id);
2030 GOTO(failed, rc = -ENODEV);
2032 /* only dev 0 gets the errors - no need to reset the stack twice
2033 * - this works because we have a single PTAG, if we had more
2034 * then we'd need to have multiple handlers */
2035 if (dev->gnd_id == 0) {
2036 rrc = kgnilnd_subscribe_errors(dev->gnd_handle,
2037 GNI_ERRMASK_CRITICAL |
2038 GNI_ERRMASK_UNKNOWN_TRANSACTION,
2039 0, NULL, kgnilnd_critical_error,
2040 &dev->gnd_err_handle);
2041 if (rrc != GNI_RC_SUCCESS) {
2042 CERROR("Can't subscribe for errors on device %d: rc %d\n",
2044 GOTO(failed, rc = -ENODEV);
2047 rc = kgnilnd_set_quiesce_callback(dev->gnd_handle,
2048 kgnilnd_quiesce_end_callback);
2049 if (rc != GNI_RC_SUCCESS) {
2050 CERROR("Can't subscribe for quiesce callback on device %d: rc %d\n",
2052 GOTO(failed, rc = -ENODEV);
2056 rc = kgnilnd_nicaddr_to_nid(dev->gnd_host_id, &dev->gnd_nid);
2058 /* log messages during startup */
2059 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
2060 CERROR("couldn't translate host_id 0x%x to nid. rc %d\n",
2061 dev->gnd_host_id, rc);
2063 GOTO(failed, rc = -ESRCH);
2065 CDEBUG(D_NET, "NIC %x -> NID %d\n", dev->gnd_host_id, dev->gnd_nid);
2067 rrc = kgnilnd_cq_create(dev->gnd_handle, *kgnilnd_tunables.kgn_credits,
2068 0, kgnilnd_device_callback,
2069 dev->gnd_id, &dev->gnd_snd_rdma_cqh);
2070 if (rrc != GNI_RC_SUCCESS) {
2071 CERROR("Can't create rdma send cq size %u for device "
2072 "%d (%d)\n", cq_size, dev->gnd_id, rrc);
2073 GOTO(failed, rc = -EINVAL);
2076 rrc = kgnilnd_cq_create(dev->gnd_handle, cq_size,
2077 0, kgnilnd_device_callback, dev->gnd_id,
2078 &dev->gnd_snd_fma_cqh);
2079 if (rrc != GNI_RC_SUCCESS) {
2080 CERROR("Can't create fma send cq size %u for device %d (%d)\n",
2081 cq_size, dev->gnd_id, rrc);
2082 GOTO(failed, rc = -EINVAL);
2085 /* This one we size differently - overflows are possible and it needs to be
2086 * sized based on machine size */
2087 rrc = kgnilnd_cq_create(dev->gnd_handle,
2088 *kgnilnd_tunables.kgn_fma_cq_size,
2089 0, kgnilnd_device_callback, dev->gnd_id,
2090 &dev->gnd_rcv_fma_cqh);
2091 if (rrc != GNI_RC_SUCCESS) {
2092 CERROR("Can't create fma cq size %d for device %d (%d)\n",
2093 *kgnilnd_tunables.kgn_fma_cq_size, dev->gnd_id, rrc);
2094 GOTO(failed, rc = -EINVAL);
2097 rrc = kgnilnd_register_smdd_buf(dev);
2098 if (rrc != GNI_RC_SUCCESS) {
2099 GOTO(failed, rc = -EINVAL);
2105 kgnilnd_dev_fini(dev);
2110 kgnilnd_dev_fini(kgn_device_t *dev)
2115 /* At quiesce or rest time, need to loop through and clear gnd_ready_conns ?*/
2116 LASSERTF(list_empty(&dev->gnd_ready_conns) &&
2117 list_empty(&dev->gnd_map_tx) &&
2118 list_empty(&dev->gnd_rdmaq) &&
2119 list_empty(&dev->gnd_delay_conns),
2120 "dev 0x%p ready_conns %d@0x%p delay_conns %d@0x%p"
2121 "map_tx %d@0x%p rdmaq %d@0x%p\n",
2122 dev, kgnilnd_count_list(&dev->gnd_ready_conns), &dev->gnd_ready_conns,
2123 kgnilnd_count_list(&dev->gnd_delay_conns), &dev->gnd_delay_conns,
2124 kgnilnd_count_list(&dev->gnd_map_tx), &dev->gnd_map_tx,
2125 kgnilnd_count_list(&dev->gnd_rdmaq), &dev->gnd_rdmaq);
2127 /* These should follow from tearing down all connections */
2128 LASSERTF(dev->gnd_map_nphys == 0 && dev->gnd_map_physnop == 0,
2129 "%d physical mappings of %d pages still mapped\n",
2130 dev->gnd_map_nphys, dev->gnd_map_physnop);
2132 LASSERTF(dev->gnd_map_nvirt == 0 && dev->gnd_map_virtnob == 0,
2133 "%d virtual mappings of %llu bytes still mapped\n",
2134 dev->gnd_map_nvirt, dev->gnd_map_virtnob);
2136 LASSERTF(atomic_read(&dev->gnd_n_mdd) == 0 &&
2137 atomic_read(&dev->gnd_n_mdd_held) == 0 &&
2138 atomic64_read(&dev->gnd_nbytes_map) == 0,
2139 "%d SMSG mappings of %ld bytes still mapped or held %d\n",
2140 atomic_read(&dev->gnd_n_mdd),
2141 atomic64_read(&dev->gnd_nbytes_map), atomic_read(&dev->gnd_n_mdd_held));
2143 LASSERT(list_empty(&dev->gnd_map_list));
2145 /* What other assertions needed to ensure all connections torn down ? */
2147 /* check all counters == 0 (EP, MDD, etc) */
2149 /* if we are resetting due to quiese (stack reset), don't check
2151 LASSERTF(kgnilnd_data.kgn_quiesce_trigger ||
2152 atomic_read(&kgnilnd_data.kgn_nthreads) == 0,
2153 "tried to shutdown with threads active\n");
2155 if (dev->gnd_smdd_hold_buf) {
2156 rrc = kgnilnd_deregister_smdd_buf(dev);
2157 LASSERTF(rrc == GNI_RC_SUCCESS,
2158 "bad rc from deregistion of sMDD buffer: %d\n", rrc);
2159 dev->gnd_smdd_hold_buf = NULL;
2162 if (dev->gnd_rcv_fma_cqh) {
2163 rrc = kgnilnd_cq_destroy(dev->gnd_rcv_fma_cqh);
2164 LASSERTF(rrc == GNI_RC_SUCCESS,
2165 "bad rc from gni_cq_destroy on rcv_fma_cqh: %d\n", rrc);
2166 dev->gnd_rcv_fma_cqh = NULL;
2169 if (dev->gnd_snd_rdma_cqh) {
2170 rrc = kgnilnd_cq_destroy(dev->gnd_snd_rdma_cqh);
2171 LASSERTF(rrc == GNI_RC_SUCCESS,
2172 "bad rc from gni_cq_destroy on send_rdma_cqh: %d\n", rrc);
2173 dev->gnd_snd_rdma_cqh = NULL;
2176 if (dev->gnd_snd_fma_cqh) {
2177 rrc = kgnilnd_cq_destroy(dev->gnd_snd_fma_cqh);
2178 LASSERTF(rrc == GNI_RC_SUCCESS,
2179 "bad rc from gni_cq_destroy on snd_fma_cqh: %d\n", rrc);
2180 dev->gnd_snd_fma_cqh = NULL;
2183 if (dev->gnd_err_handle) {
2184 rrc = kgnilnd_release_errors(dev->gnd_err_handle);
2185 LASSERTF(rrc == GNI_RC_SUCCESS,
2186 "bad rc from gni_release_errors: %d\n", rrc);
2187 dev->gnd_err_handle = NULL;
2190 if (dev->gnd_domain) {
2191 rrc = kgnilnd_cdm_destroy(dev->gnd_domain);
2192 LASSERTF(rrc == GNI_RC_SUCCESS,
2193 "bad rc from gni_cdm_destroy: %d\n", rrc);
2194 dev->gnd_domain = NULL;
2200 int kgnilnd_base_startup(void)
2203 int pkmem = atomic_read(&libcfs_kmemory);
2207 struct task_struct *thrd;
2209 #if defined(CONFIG_CRAY_XT) && !defined(CONFIG_CRAY_COMPUTE)
2210 /* limit how much memory can be allocated for fma blocks in
2211 * instances where many nodes need to reconnects at the same time */
2214 kgnilnd_data.free_pages_limit = si.totalram/4;
2219 LASSERTF(kgnilnd_data.kgn_init == GNILND_INIT_NOTHING,
2220 "init %d\n", kgnilnd_data.kgn_init);
2222 /* zero pointers, flags etc */
2223 memset(&kgnilnd_data, 0, sizeof(kgnilnd_data));
2224 kgnilnd_check_kgni_version();
2226 /* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and
2227 * a unique (for all time) connstamp so we can uniquely identify
2228 * the sender. The connstamp is an incrementing counter
2229 * initialised with seconds + microseconds at startup time. So we
2230 * rely on NOT creating connections more frequently on average than
2231 * 1MHz to ensure we don't use old connstamps when we reboot. */
2232 do_gettimeofday(&tv);
2233 kgnilnd_data.kgn_connstamp =
2234 kgnilnd_data.kgn_peerstamp =
2235 (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
2237 init_rwsem(&kgnilnd_data.kgn_net_rw_sem);
2239 for (i = 0; i < GNILND_MAXDEVS; i++) {
2240 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2243 INIT_LIST_HEAD(&dev->gnd_ready_conns);
2244 INIT_LIST_HEAD(&dev->gnd_delay_conns);
2245 INIT_LIST_HEAD(&dev->gnd_map_tx);
2246 INIT_LIST_HEAD(&dev->gnd_fma_buffs);
2247 mutex_init(&dev->gnd_cq_mutex);
2248 mutex_init(&dev->gnd_fmablk_mutex);
2249 spin_lock_init(&dev->gnd_fmablk_lock);
2250 init_waitqueue_head(&dev->gnd_waitq);
2251 init_waitqueue_head(&dev->gnd_dgram_waitq);
2252 init_waitqueue_head(&dev->gnd_dgping_waitq);
2253 spin_lock_init(&dev->gnd_lock);
2254 INIT_LIST_HEAD(&dev->gnd_map_list);
2255 spin_lock_init(&dev->gnd_map_lock);
2256 atomic_set(&dev->gnd_nfmablk, 0);
2257 atomic_set(&dev->gnd_fmablk_vers, 1);
2258 atomic_set(&dev->gnd_neps, 0);
2259 atomic_set(&dev->gnd_canceled_dgrams, 0);
2260 INIT_LIST_HEAD(&dev->gnd_connd_peers);
2261 spin_lock_init(&dev->gnd_connd_lock);
2262 spin_lock_init(&dev->gnd_dgram_lock);
2263 spin_lock_init(&dev->gnd_rdmaq_lock);
2264 INIT_LIST_HEAD(&dev->gnd_rdmaq);
2265 init_rwsem(&dev->gnd_conn_sem);
2267 /* alloc & setup nid based dgram table */
2268 LIBCFS_ALLOC(dev->gnd_dgrams,
2269 sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2271 if (dev->gnd_dgrams == NULL)
2272 GOTO(failed, rc = -ENOMEM);
2274 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2275 INIT_LIST_HEAD(&dev->gnd_dgrams[i]);
2277 atomic_set(&dev->gnd_ndgrams, 0);
2278 atomic_set(&dev->gnd_nwcdgrams, 0);
2279 /* setup timer for RDMAQ processing */
2280 setup_timer(&dev->gnd_rdmaq_timer, kgnilnd_schedule_device_timer,
2281 (unsigned long)dev);
2283 /* setup timer for mapping processing */
2284 setup_timer(&dev->gnd_map_timer, kgnilnd_schedule_device_timer,
2285 (unsigned long)dev);
2289 /* CQID 0 isn't allowed, set to MAX_MSG_ID - 1 to check for conflicts early */
2290 kgnilnd_data.kgn_next_cqid = GNILND_MAX_MSG_ID - 1;
2291 kgnilnd_data.kgn_new_min_timeout = *kgnilnd_tunables.kgn_timeout;
2292 init_waitqueue_head(&kgnilnd_data.kgn_reaper_waitq);
2293 init_waitqueue_head(&kgnilnd_data.kgn_ruhroh_waitq);
2294 spin_lock_init(&kgnilnd_data.kgn_reaper_lock);
2296 mutex_init(&kgnilnd_data.kgn_quiesce_mutex);
2297 atomic_set(&kgnilnd_data.kgn_nquiesce, 0);
2298 atomic_set(&kgnilnd_data.kgn_npending_conns, 0);
2299 atomic_set(&kgnilnd_data.kgn_npending_unlink, 0);
2300 atomic_set(&kgnilnd_data.kgn_npending_detach, 0);
2301 atomic_set(&kgnilnd_data.kgn_rev_offset, 0);
2302 atomic_set(&kgnilnd_data.kgn_rev_length, 0);
2303 atomic_set(&kgnilnd_data.kgn_rev_copy_buff, 0);
2305 /* OK to call kgnilnd_api_shutdown() to cleanup now */
2306 kgnilnd_data.kgn_init = GNILND_INIT_DATA;
2307 if (!try_module_get(THIS_MODULE))
2308 GOTO(failed, rc = -ENOENT);
2310 rwlock_init(&kgnilnd_data.kgn_peer_conn_lock);
2312 LIBCFS_ALLOC(kgnilnd_data.kgn_peers,
2313 sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2315 if (kgnilnd_data.kgn_peers == NULL)
2316 GOTO(failed, rc = -ENOMEM);
2318 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2319 INIT_LIST_HEAD(&kgnilnd_data.kgn_peers[i]);
2322 LIBCFS_ALLOC(kgnilnd_data.kgn_conns,
2323 sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2325 if (kgnilnd_data.kgn_conns == NULL)
2326 GOTO(failed, rc = -ENOMEM);
2328 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2329 INIT_LIST_HEAD(&kgnilnd_data.kgn_conns[i]);
2332 LIBCFS_ALLOC(kgnilnd_data.kgn_nets,
2333 sizeof(struct list_head) * *kgnilnd_tunables.kgn_net_hash_size);
2335 if (kgnilnd_data.kgn_nets == NULL)
2336 GOTO(failed, rc = -ENOMEM);
2338 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
2339 INIT_LIST_HEAD(&kgnilnd_data.kgn_nets[i]);
2342 kgnilnd_data.kgn_mbox_cache =
2343 kmem_cache_create("kgn_mbox_block", GNILND_MBOX_SIZE, 0,
2344 SLAB_HWCACHE_ALIGN, NULL);
2345 if (kgnilnd_data.kgn_mbox_cache == NULL) {
2346 CERROR("Can't create slab for physical mbox blocks\n");
2347 GOTO(failed, rc = -ENOMEM);
2350 kgnilnd_data.kgn_rx_cache =
2351 kmem_cache_create("kgn_rx_t", sizeof(kgn_rx_t), 0, 0, NULL);
2352 if (kgnilnd_data.kgn_rx_cache == NULL) {
2353 CERROR("Can't create slab for kgn_rx_t descriptors\n");
2354 GOTO(failed, rc = -ENOMEM);
2357 kgnilnd_data.kgn_tx_cache =
2358 kmem_cache_create("kgn_tx_t", sizeof(kgn_tx_t), 0, 0, NULL);
2359 if (kgnilnd_data.kgn_tx_cache == NULL) {
2360 CERROR("Can't create slab for kgn_tx_t\n");
2361 GOTO(failed, rc = -ENOMEM);
2364 kgnilnd_data.kgn_tx_phys_cache =
2365 kmem_cache_create("kgn_tx_phys",
2366 LNET_MAX_IOV * sizeof(gni_mem_segment_t),
2368 if (kgnilnd_data.kgn_tx_phys_cache == NULL) {
2369 CERROR("Can't create slab for kgn_tx_phys\n");
2370 GOTO(failed, rc = -ENOMEM);
2373 kgnilnd_data.kgn_dgram_cache =
2374 kmem_cache_create("kgn_dgram_t", sizeof(kgn_dgram_t), 0, 0, NULL);
2375 if (kgnilnd_data.kgn_dgram_cache == NULL) {
2376 CERROR("Can't create slab for outgoing datagrams\n");
2377 GOTO(failed, rc = -ENOMEM);
2380 /* allocate a MAX_IOV array of page pointers for each cpu */
2381 kgnilnd_data.kgn_cksum_map_pages = kmalloc(num_possible_cpus() * sizeof (struct page *),
2383 if (kgnilnd_data.kgn_cksum_map_pages == NULL) {
2384 CERROR("Can't allocate vmap cksum pages\n");
2385 GOTO(failed, rc = -ENOMEM);
2387 kgnilnd_data.kgn_cksum_npages = num_possible_cpus();
2388 memset(kgnilnd_data.kgn_cksum_map_pages, 0,
2389 kgnilnd_data.kgn_cksum_npages * sizeof (struct page *));
2391 for (i = 0; i < kgnilnd_data.kgn_cksum_npages; i++) {
2392 kgnilnd_data.kgn_cksum_map_pages[i] = kmalloc(LNET_MAX_IOV * sizeof (struct page *),
2394 if (kgnilnd_data.kgn_cksum_map_pages[i] == NULL) {
2395 CERROR("Can't allocate vmap cksum pages for cpu %d\n", i);
2396 GOTO(failed, rc = -ENOMEM);
2400 LASSERT(kgnilnd_data.kgn_ndevs == 0);
2402 /* Use all available GNI devices */
2403 for (i = 0; i < GNILND_MAXDEVS; i++) {
2404 dev = &kgnilnd_data.kgn_devices[kgnilnd_data.kgn_ndevs];
2406 rc = kgnilnd_dev_init(dev);
2408 /* Increment here so base_shutdown cleans it up */
2409 kgnilnd_data.kgn_ndevs++;
2411 rc = kgnilnd_allocate_phys_fmablk(dev);
2417 if (kgnilnd_data.kgn_ndevs == 0) {
2418 CERROR("Can't initialise any GNI devices\n");
2419 GOTO(failed, rc = -ENODEV);
2422 rc = kgnilnd_thread_start(kgnilnd_reaper, NULL, "kgnilnd_rpr", 0);
2424 CERROR("Can't spawn gnilnd reaper: %d\n", rc);
2428 rc = kgnilnd_start_rca_thread();
2430 CERROR("Can't spawn gnilnd rca: %d\n", rc);
2435 * Start ruhroh thread. We can't use kgnilnd_thread_start() because
2436 * we don't want this thread included in kgnilnd_data.kgn_nthreads
2437 * count. This thread controls quiesce, so it mustn't
2440 thrd = kthread_run(kgnilnd_ruhroh_thread, NULL, "%s_%02d", "kgnilnd_rr", 0);
2443 CERROR("Can't spawn gnilnd ruhroh thread: %d\n", rc);
2447 /* threads will load balance across devs as they are available */
2448 if (*kgnilnd_tunables.kgn_thread_affinity) {
2449 rc = kgnilnd_start_sd_threads();
2453 for (i = 0; i < *kgnilnd_tunables.kgn_sched_threads; i++) {
2454 rc = kgnilnd_thread_start(kgnilnd_scheduler,
2458 CERROR("Can't spawn gnilnd scheduler[%d]: %d\n",
2465 for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2466 dev = &kgnilnd_data.kgn_devices[i];
2467 rc = kgnilnd_thread_start(kgnilnd_dgram_mover, dev,
2468 "kgnilnd_dg", dev->gnd_id);
2470 CERROR("Can't spawn gnilnd dgram_mover[%d]: %d\n",
2475 rc = kgnilnd_thread_start(kgnilnd_dgram_waitq, dev,
2476 "kgnilnd_dgn", dev->gnd_id);
2478 CERROR("Can't spawn gnilnd dgram_waitq[%d]: %d\n",
2483 rc = kgnilnd_setup_wildcard_dgram(dev);
2486 CERROR("Can't create wildcard dgrams[%d]: %d\n",
2492 /* flag everything initialised */
2493 kgnilnd_data.kgn_init = GNILND_INIT_ALL;
2494 /*****************************************************/
2496 CDEBUG(D_MALLOC, "initial kmem %d\n", pkmem);
2500 kgnilnd_base_shutdown();
2501 kgnilnd_data.kgn_init = GNILND_INIT_NOTHING;
2506 kgnilnd_base_shutdown(void)
2511 while (CFS_FAIL_TIMEOUT(CFS_FAIL_GNI_PAUSE_SHUTDOWN, 1)) {};
2513 kgnilnd_data.kgn_wc_kill = 1;
2515 for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2516 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2517 kgnilnd_cancel_wc_dgrams(dev);
2518 kgnilnd_cancel_dgrams(dev);
2519 kgnilnd_del_conn_or_peer(NULL, LNET_NID_ANY, GNILND_DEL_PEER, -ESHUTDOWN);
2520 kgnilnd_wait_for_canceled_dgrams(dev);
2523 /* We need to verify there are no conns left before we let the threads
2524 * shut down otherwise we could clean up the peers but still have
2525 * some outstanding conns due to orphaned datagram conns that are
2529 while (atomic_read(&kgnilnd_data.kgn_nconns) != 0) {
2532 for(j = 0; j < kgnilnd_data.kgn_ndevs; ++j) {
2533 kgn_device_t *dev = &kgnilnd_data.kgn_devices[j];
2534 kgnilnd_schedule_device(dev);
2537 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2538 "Waiting for conns to be cleaned up %d\n",atomic_read(&kgnilnd_data.kgn_nconns));
2539 set_current_state(TASK_UNINTERRUPTIBLE);
2540 schedule_timeout(cfs_time_seconds(1));
2542 /* Peer state all cleaned up BEFORE setting shutdown, so threads don't
2543 * have to worry about shutdown races. NB connections may be created
2544 * while there are still active connds, but these will be temporary
2545 * since peer creation always fails after the listener has started to
2547 * all peers should have been cleared out on the nets */
2548 LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
2549 "peers left %d\n", atomic_read(&kgnilnd_data.kgn_npeers));
2551 /* Wait for the ruhroh thread to shut down. */
2552 kgnilnd_data.kgn_ruhroh_shutdown = 1;
2553 wake_up(&kgnilnd_data.kgn_ruhroh_waitq);
2555 while (kgnilnd_data.kgn_ruhroh_running != 0) {
2557 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2558 "Waiting for ruhroh thread to terminate\n");
2559 set_current_state(TASK_UNINTERRUPTIBLE);
2560 schedule_timeout(cfs_time_seconds(1));
2563 /* Flag threads to terminate */
2564 kgnilnd_data.kgn_shutdown = 1;
2566 for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2567 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2569 /* should clear all the MDDs */
2570 kgnilnd_unmap_fma_blocks(dev);
2572 kgnilnd_schedule_device(dev);
2573 wake_up_all(&dev->gnd_dgram_waitq);
2574 wake_up_all(&dev->gnd_dgping_waitq);
2575 LASSERT(list_empty(&dev->gnd_connd_peers));
2578 spin_lock(&kgnilnd_data.kgn_reaper_lock);
2579 wake_up_all(&kgnilnd_data.kgn_reaper_waitq);
2580 spin_unlock(&kgnilnd_data.kgn_reaper_lock);
2582 if (atomic_read(&kgnilnd_data.kgn_nthreads))
2583 kgnilnd_wakeup_rca_thread();
2585 /* Wait for threads to exit */
2587 while (atomic_read(&kgnilnd_data.kgn_nthreads) != 0) {
2589 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2590 "Waiting for %d threads to terminate\n",
2591 atomic_read(&kgnilnd_data.kgn_nthreads));
2592 set_current_state(TASK_UNINTERRUPTIBLE);
2593 schedule_timeout(cfs_time_seconds(1));
2596 LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
2597 "peers left %d\n", atomic_read(&kgnilnd_data.kgn_npeers));
2599 if (kgnilnd_data.kgn_peers != NULL) {
2600 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2601 LASSERT(list_empty(&kgnilnd_data.kgn_peers[i]));
2603 LIBCFS_FREE(kgnilnd_data.kgn_peers,
2604 sizeof (struct list_head) *
2605 *kgnilnd_tunables.kgn_peer_hash_size);
2608 down_write(&kgnilnd_data.kgn_net_rw_sem);
2609 if (kgnilnd_data.kgn_nets != NULL) {
2610 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++)
2611 LASSERT(list_empty(&kgnilnd_data.kgn_nets[i]));
2613 LIBCFS_FREE(kgnilnd_data.kgn_nets,
2614 sizeof (struct list_head) *
2615 *kgnilnd_tunables.kgn_net_hash_size);
2617 up_write(&kgnilnd_data.kgn_net_rw_sem);
2619 LASSERTF(atomic_read(&kgnilnd_data.kgn_nconns) == 0,
2620 "conns left %d\n", atomic_read(&kgnilnd_data.kgn_nconns));
2622 if (kgnilnd_data.kgn_conns != NULL) {
2623 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2624 LASSERT(list_empty(&kgnilnd_data.kgn_conns[i]));
2626 LIBCFS_FREE(kgnilnd_data.kgn_conns,
2627 sizeof (struct list_head) *
2628 *kgnilnd_tunables.kgn_peer_hash_size);
2631 for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2632 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2633 kgnilnd_dev_fini(dev);
2635 LASSERTF(atomic_read(&dev->gnd_ndgrams) == 0,
2636 "dgrams left %d\n", atomic_read(&dev->gnd_ndgrams));
2638 if (dev->gnd_dgrams != NULL) {
2639 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2640 LASSERT(list_empty(&dev->gnd_dgrams[i]));
2642 LIBCFS_FREE(dev->gnd_dgrams,
2643 sizeof (struct list_head) *
2644 *kgnilnd_tunables.kgn_peer_hash_size);
2647 kgnilnd_free_phys_fmablk(dev);
2650 if (kgnilnd_data.kgn_mbox_cache != NULL)
2651 kmem_cache_destroy(kgnilnd_data.kgn_mbox_cache);
2653 if (kgnilnd_data.kgn_rx_cache != NULL)
2654 kmem_cache_destroy(kgnilnd_data.kgn_rx_cache);
2656 if (kgnilnd_data.kgn_tx_cache != NULL)
2657 kmem_cache_destroy(kgnilnd_data.kgn_tx_cache);
2659 if (kgnilnd_data.kgn_tx_phys_cache != NULL)
2660 kmem_cache_destroy(kgnilnd_data.kgn_tx_phys_cache);
2662 if (kgnilnd_data.kgn_dgram_cache != NULL)
2663 kmem_cache_destroy(kgnilnd_data.kgn_dgram_cache);
2665 if (kgnilnd_data.kgn_cksum_map_pages != NULL) {
2666 for (i = 0; i < kgnilnd_data.kgn_cksum_npages; i++) {
2667 if (kgnilnd_data.kgn_cksum_map_pages[i] != NULL) {
2668 kfree(kgnilnd_data.kgn_cksum_map_pages[i]);
2671 kfree(kgnilnd_data.kgn_cksum_map_pages);
2674 CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2675 atomic_read(&libcfs_kmemory));
2677 kgnilnd_data.kgn_init = GNILND_INIT_NOTHING;
2678 module_put(THIS_MODULE);
2684 kgnilnd_startup(struct lnet_ni *ni)
2690 LASSERTF(ni->ni_net->net_lnd == &the_kgnilnd,
2691 "bad LND 0x%p != the_kgnilnd @ 0x%p\n",
2692 ni->ni_net->net_lnd, &the_kgnilnd);
2694 if (kgnilnd_data.kgn_init == GNILND_INIT_NOTHING) {
2695 rc = kgnilnd_base_startup();
2700 /* Serialize with shutdown. */
2701 mutex_lock(&kgnilnd_data.kgn_quiesce_mutex);
2703 LIBCFS_ALLOC(net, sizeof(*net));
2705 CERROR("could not allocate net for new interface instance\n");
2706 /* no need to cleanup the CDM... */
2707 GOTO(failed, rc = -ENOMEM);
2709 INIT_LIST_HEAD(&net->gnn_list);
2712 if (!ni->ni_net->net_tunables_set) {
2713 ni->ni_net->net_tunables.lct_max_tx_credits =
2714 *kgnilnd_tunables.kgn_credits;
2715 ni->ni_net->net_tunables.lct_peer_tx_credits =
2716 *kgnilnd_tunables.kgn_peer_credits;
2719 if (*kgnilnd_tunables.kgn_peer_health) {
2722 /* give this a bit of leeway - we don't have a hard timeout
2723 * as we only check timeouts periodically - see comment in kgnilnd_reaper */
2724 fudge = (GNILND_TO2KA(*kgnilnd_tunables.kgn_timeout) / GNILND_REAPER_NCHECKS);
2725 timeout = *kgnilnd_tunables.kgn_timeout + fudge;
2727 if (*kgnilnd_tunables.kgn_peer_timeout >= timeout) {
2728 ni->ni_net->net_tunables.lct_peer_timeout =
2729 *kgnilnd_tunables.kgn_peer_timeout;
2730 } else if (*kgnilnd_tunables.kgn_peer_timeout > -1) {
2731 LCONSOLE_ERROR("Peer_timeout is set to %d but needs to be >= %d\n",
2732 *kgnilnd_tunables.kgn_peer_timeout,
2735 LIBCFS_FREE(net, sizeof(*net));
2736 GOTO(failed, rc = -EINVAL);
2738 ni->ni_net->net_tunables.lct_peer_timeout = timeout;
2740 LCONSOLE_INFO("Enabling LNet peer health for gnilnd, timeout %ds\n",
2741 ni->ni_net->net_tunables.lct_peer_timeout);
2744 atomic_set(&net->gnn_refcount, 1);
2746 /* if we have multiple devices, spread the nets around */
2747 net->gnn_netnum = LNET_NETNUM(LNET_NIDNET(ni->ni_nid));
2749 devno = LNET_NIDNET(ni->ni_nid) % GNILND_MAXDEVS;
2750 net->gnn_dev = &kgnilnd_data.kgn_devices[devno];
2752 /* allocate a 'dummy' cdm for datagram use. We can only have a single
2753 * datagram between a nid:inst_id and nid2:inst_id. The fake cdm
2754 * give us additional inst_id to use, allowing the datagrams to flow
2755 * like rivers of honey and beer */
2757 /* the instance id for the cdm is the NETNUM offset by MAXDEVS -
2758 * ensuring we'll have a unique id */
2761 ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), net->gnn_dev->gnd_nid);
2762 CDEBUG(D_NET, "adding net %p nid=%s on dev %d \n",
2763 net, libcfs_nid2str(ni->ni_nid), net->gnn_dev->gnd_id);
2764 /* until the gnn_list is set, we need to cleanup ourselves as
2765 * kgnilnd_shutdown is just gonna get confused */
2767 down_write(&kgnilnd_data.kgn_net_rw_sem);
2768 list_add_tail(&net->gnn_list, kgnilnd_netnum2netlist(net->gnn_netnum));
2769 up_write(&kgnilnd_data.kgn_net_rw_sem);
2771 /* we need a separate thread to call probe_wait_by_id until
2772 * we get a function callback notifier from kgni */
2773 mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2776 mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2777 kgnilnd_shutdown(ni);
2782 kgnilnd_shutdown(struct lnet_ni *ni)
2784 kgn_net_t *net = ni->ni_data;
2789 CFS_RACE(CFS_FAIL_GNI_SR_DOWN_RACE);
2791 LASSERTF(kgnilnd_data.kgn_init == GNILND_INIT_ALL,
2792 "init %d\n", kgnilnd_data.kgn_init);
2794 /* Serialize with startup. */
2795 mutex_lock(&kgnilnd_data.kgn_quiesce_mutex);
2796 CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
2797 atomic_read(&libcfs_kmemory));
2800 CERROR("got NULL net for ni %p\n", ni);
2801 GOTO(out, rc = -EINVAL);
2804 LASSERTF(ni == net->gnn_ni,
2805 "ni %p gnn_ni %p\n", net, net->gnn_ni);
2809 LASSERT(!net->gnn_shutdown);
2810 LASSERTF(atomic_read(&net->gnn_refcount) != 0,
2811 "net %p refcount %d\n",
2812 net, atomic_read(&net->gnn_refcount));
2814 if (!list_empty(&net->gnn_list)) {
2815 /* serialize with peer creation */
2816 down_write(&kgnilnd_data.kgn_net_rw_sem);
2817 net->gnn_shutdown = 1;
2818 up_write(&kgnilnd_data.kgn_net_rw_sem);
2820 kgnilnd_cancel_net_dgrams(net);
2822 kgnilnd_del_conn_or_peer(net, LNET_NID_ANY, GNILND_DEL_PEER, -ESHUTDOWN);
2824 /* if we are quiesced, need to wake up - we need those threads
2825 * alive to release peers, etc */
2826 if (GNILND_IS_QUIESCED) {
2827 set_mb(kgnilnd_data.kgn_quiesce_trigger, GNILND_QUIESCE_IDLE);
2828 kgnilnd_quiesce_wait("shutdown");
2831 kgnilnd_wait_for_canceled_dgrams(net->gnn_dev);
2833 /* We wait until the nets ref's are 1, we will release final ref which is ours
2834 * this allows us to make sure everything else is done before we free the
2838 while (atomic_read(&net->gnn_refcount) != 1) {
2840 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2841 "Waiting for %d references to clear on net %d\n",
2842 atomic_read(&net->gnn_refcount),
2844 set_current_state(TASK_UNINTERRUPTIBLE);
2845 schedule_timeout(cfs_time_seconds(1));
2848 /* release ref from kgnilnd_startup */
2849 kgnilnd_net_decref(net);
2850 /* serialize with reaper and conn_task looping */
2851 down_write(&kgnilnd_data.kgn_net_rw_sem);
2852 list_del_init(&net->gnn_list);
2853 up_write(&kgnilnd_data.kgn_net_rw_sem);
2857 /* not locking, this can't race with writers */
2858 LASSERTF(atomic_read(&net->gnn_refcount) == 0,
2859 "net %p refcount %d\n",
2860 net, atomic_read(&net->gnn_refcount));
2861 LIBCFS_FREE(net, sizeof(*net));
2864 down_read(&kgnilnd_data.kgn_net_rw_sem);
2865 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
2866 if (!list_empty(&kgnilnd_data.kgn_nets[i])) {
2867 up_read(&kgnilnd_data.kgn_net_rw_sem);
2871 if (i == *kgnilnd_tunables.kgn_net_hash_size - 1) {
2872 up_read(&kgnilnd_data.kgn_net_rw_sem);
2873 kgnilnd_base_shutdown();
2876 CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2877 atomic_read(&libcfs_kmemory));
2879 mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2883 static void __exit kgnilnd_exit(void)
2885 lnet_unregister_lnd(&the_kgnilnd);
2886 kgnilnd_proc_fini();
2887 kgnilnd_remove_sysctl();
2890 static int __init kgnilnd_init(void)
2894 rc = kgnilnd_tunables_init();
2898 LCONSOLE_INFO("Lustre: kgnilnd build version: "LUSTRE_VERSION_STRING"\n");
2900 kgnilnd_insert_sysctl();
2901 kgnilnd_proc_init();
2903 lnet_register_lnd(&the_kgnilnd);
2908 MODULE_AUTHOR("Cray, Inc. <nic@cray.com>");
2909 MODULE_DESCRIPTION("Gemini LNet Network Driver");
2910 MODULE_VERSION(LUSTRE_VERSION_STRING);
2911 MODULE_LICENSE("GPL");
2913 module_init(kgnilnd_init);
2914 module_exit(kgnilnd_exit);