2 * Copyright (C) 2012 Cray, Inc.
4 * Copyright (c) 2013, 2014, Intel Corporation.
6 * Author: Nic Henke <nic@cray.com>
7 * Author: James Shimek <jshimek@cray.com>
9 * This file is part of Lustre, http://www.lustre.org.
11 * Lustre is free software; you can redistribute it and/or
12 * modify it under the terms of version 2 of the GNU General Public
13 * License as published by the Free Software Foundation.
15 * Lustre is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with Lustre; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 /* Primary entry points from LNET. There are no guarantees against reentrance. */
34 .lnd_startup = kgnilnd_startup,
35 .lnd_shutdown = kgnilnd_shutdown,
36 .lnd_ctl = kgnilnd_ctl,
37 .lnd_send = kgnilnd_send,
38 .lnd_recv = kgnilnd_recv,
39 .lnd_eager_recv = kgnilnd_eager_recv,
40 .lnd_query = kgnilnd_query,
43 kgn_data_t kgnilnd_data;
46 kgnilnd_thread_start(int(*fn)(void *arg), void *arg, char *name, int id)
48 struct task_struct *thrd;
50 thrd = kthread_run(fn, arg, "%s_%02d", name, id);
54 atomic_inc(&kgnilnd_data.kgn_nthreads);
58 /* bind scheduler threads to cpus */
60 kgnilnd_start_sd_threads(void)
64 struct task_struct *task;
66 for_each_online_cpu(cpu) {
67 /* don't bind to cpu 0 - all interrupts are processed here */
71 task = kthread_create(kgnilnd_scheduler, (void *)((long)i),
72 "%s_%02d", "kgnilnd_sd", i);
74 kthread_bind(task, cpu);
75 wake_up_process(task);
77 CERROR("Can't spawn gnilnd scheduler[%d] %ld\n", i,
81 atomic_inc(&kgnilnd_data.kgn_nthreads);
83 if (++i >= *kgnilnd_tunables.kgn_sched_threads) {
91 /* needs write_lock on kgn_peer_conn_lock */
93 kgnilnd_close_stale_conns_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
96 struct list_head *ctmp, *cnxt;
100 loopback = peer->gnp_nid == peer->gnp_net->gnn_ni->ni_nid;
102 list_for_each_safe(ctmp, cnxt, &peer->gnp_conns) {
103 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
105 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
111 if (conn->gnc_device != newconn->gnc_device)
114 /* This is a two connection loopback - one talking to the other */
116 newconn->gnc_my_connstamp == conn->gnc_peer_connstamp &&
117 newconn->gnc_peer_connstamp == conn->gnc_my_connstamp) {
118 CDEBUG(D_NET, "skipping prune of %p, "
119 "loopback and matching stamps"
120 " connstamp "LPU64"("LPU64")"
121 " peerstamp "LPU64"("LPU64")\n",
122 conn, newconn->gnc_my_connstamp,
123 conn->gnc_peer_connstamp,
124 newconn->gnc_peer_connstamp,
125 conn->gnc_my_connstamp);
129 if (conn->gnc_peerstamp != newconn->gnc_peerstamp) {
130 LASSERTF(conn->gnc_peerstamp < newconn->gnc_peerstamp,
131 "conn 0x%p peerstamp "LPU64" >= "
132 "newconn 0x%p peerstamp "LPU64"\n",
133 conn, conn->gnc_peerstamp,
134 newconn, newconn->gnc_peerstamp);
136 CDEBUG(D_NET, "Closing stale conn nid: %s "
137 " peerstamp:"LPX64"("LPX64")\n",
138 libcfs_nid2str(peer->gnp_nid),
139 conn->gnc_peerstamp, newconn->gnc_peerstamp);
142 LASSERTF(conn->gnc_peer_connstamp < newconn->gnc_peer_connstamp,
143 "conn 0x%p peer_connstamp "LPU64" >= "
144 "newconn 0x%p peer_connstamp "LPU64"\n",
145 conn, conn->gnc_peer_connstamp,
146 newconn, newconn->gnc_peer_connstamp);
148 CDEBUG(D_NET, "Closing stale conn nid: %s"
149 " connstamp:"LPU64"("LPU64")\n",
150 libcfs_nid2str(peer->gnp_nid),
151 conn->gnc_peer_connstamp, newconn->gnc_peer_connstamp);
155 kgnilnd_close_conn_locked(conn, -ESTALE);
159 CWARN("Closed %d stale conns to %s\n", count, libcfs_nid2str(peer->gnp_nid));
166 kgnilnd_conn_isdup_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
169 struct list_head *tmp;
173 loopback = peer->gnp_nid == peer->gnp_net->gnn_ni->ni_nid;
175 list_for_each(tmp, &peer->gnp_conns) {
176 conn = list_entry(tmp, kgn_conn_t, gnc_list);
177 CDEBUG(D_NET, "checking conn 0x%p for peer %s"
178 " lo %d new "LPU64" existing "LPU64
179 " new peer "LPU64" existing peer "LPU64
180 " new dev %p existing dev %p\n",
181 conn, libcfs_nid2str(peer->gnp_nid),
183 newconn->gnc_peerstamp, conn->gnc_peerstamp,
184 newconn->gnc_peer_connstamp, conn->gnc_peer_connstamp,
185 newconn->gnc_device, conn->gnc_device);
187 /* conn is in the process of closing */
188 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
191 /* 'newconn' is from an earlier version of 'peer'!!! */
192 if (newconn->gnc_peerstamp < conn->gnc_peerstamp)
195 /* 'conn' is from an earlier version of 'peer': it will be
196 * removed when we cull stale conns later on... */
197 if (newconn->gnc_peerstamp > conn->gnc_peerstamp)
200 /* Different devices are OK */
201 if (conn->gnc_device != newconn->gnc_device)
204 /* It's me connecting to myself */
206 newconn->gnc_my_connstamp == conn->gnc_peer_connstamp &&
207 newconn->gnc_peer_connstamp == conn->gnc_my_connstamp)
210 /* 'newconn' is an earlier connection from 'peer'!!! */
211 if (newconn->gnc_peer_connstamp < conn->gnc_peer_connstamp)
214 /* 'conn' is an earlier connection from 'peer': it will be
215 * removed when we cull stale conns later on... */
216 if (newconn->gnc_peer_connstamp > conn->gnc_peer_connstamp)
219 /* 'newconn' has the SAME connection stamp; 'peer' isn't
220 * playing the game... */
228 kgnilnd_create_conn(kgn_conn_t **connp, kgn_device_t *dev)
234 LASSERT (!in_interrupt());
235 atomic_inc(&kgnilnd_data.kgn_nconns);
237 /* divide by 2 to allow for complete reset and immediate reconnect */
238 if (atomic_read(&kgnilnd_data.kgn_nconns) >= GNILND_MAX_CQID/2) {
239 CERROR("Too many conn are live: %d > %d\n",
240 atomic_read(&kgnilnd_data.kgn_nconns), GNILND_MAX_CQID/2);
241 atomic_dec(&kgnilnd_data.kgn_nconns);
245 LIBCFS_ALLOC(conn, sizeof(*conn));
247 atomic_dec(&kgnilnd_data.kgn_nconns);
251 LIBCFS_ALLOC(conn->gnc_tx_ref_table, GNILND_MAX_MSG_ID * sizeof(void *));
252 if (conn->gnc_tx_ref_table == NULL) {
253 CERROR("Can't allocate conn tx_ref_table\n");
254 GOTO(failed, rc = -ENOMEM);
257 mutex_init(&conn->gnc_smsg_mutex);
258 mutex_init(&conn->gnc_rdma_mutex);
259 atomic_set(&conn->gnc_refcount, 1);
260 atomic_set(&conn->gnc_reaper_noop, 0);
261 atomic_set(&conn->gnc_sched_noop, 0);
262 atomic_set(&conn->gnc_tx_in_use, 0);
263 INIT_LIST_HEAD(&conn->gnc_list);
264 INIT_LIST_HEAD(&conn->gnc_hashlist);
265 INIT_LIST_HEAD(&conn->gnc_schedlist);
266 INIT_LIST_HEAD(&conn->gnc_fmaq);
267 INIT_LIST_HEAD(&conn->gnc_mdd_list);
268 spin_lock_init(&conn->gnc_list_lock);
269 spin_lock_init(&conn->gnc_tx_lock);
270 conn->gnc_magic = GNILND_CONN_MAGIC;
272 /* set tx id to nearly the end to make sure we find wrapping
274 conn->gnc_next_tx = (int) GNILND_MAX_MSG_ID - 10;
276 /* if this fails, we have conflicts and MAX_TX is too large */
277 CLASSERT(GNILND_MAX_MSG_ID < GNILND_MSGID_CLOSE);
279 /* get a new unique CQ id for this conn */
280 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
281 conn->gnc_my_connstamp = kgnilnd_data.kgn_connstamp++;
282 conn->gnc_cqid = kgnilnd_get_cqid_locked();
283 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
285 if (conn->gnc_cqid == 0) {
286 CERROR("Could not allocate unique CQ ID for conn 0x%p\n", conn);
287 GOTO(failed, rc = -E2BIG);
290 CDEBUG(D_NET, "alloc cqid %u for conn 0x%p\n",
291 conn->gnc_cqid, conn);
293 /* need to be set before gnc_ephandle to allow kgnilnd_destroy_conn_ep to
295 conn->gnc_device = dev;
297 conn->gnc_timeout = MAX(*kgnilnd_tunables.kgn_timeout,
299 kgnilnd_update_reaper_timeout(conn->gnc_timeout);
301 /* this is the ep_handle for doing SMSG & BTE */
302 mutex_lock(&dev->gnd_cq_mutex);
303 rrc = kgnilnd_ep_create(dev->gnd_handle, dev->gnd_snd_fma_cqh,
304 &conn->gnc_ephandle);
305 mutex_unlock(&dev->gnd_cq_mutex);
306 if (rrc != GNI_RC_SUCCESS)
307 GOTO(failed, rc = -ENETDOWN);
309 CDEBUG(D_NET, "created conn 0x%p ep_hndl 0x%p\n",
310 conn, conn->gnc_ephandle);
312 /* add ref for EP canceling */
313 kgnilnd_conn_addref(conn);
314 atomic_inc(&dev->gnd_neps);
320 atomic_dec(&kgnilnd_data.kgn_nconns);
321 LIBCFS_FREE(conn->gnc_tx_ref_table, GNILND_MAX_MSG_ID * sizeof(void *));
322 LIBCFS_FREE(conn, sizeof(*conn));
326 /* needs to be called with kgn_peer_conn_lock held (read or write) */
328 kgnilnd_find_conn_locked(kgn_peer_t *peer)
330 kgn_conn_t *conn = NULL;
332 /* if we are in reset, this conn is going to die soon */
333 if (unlikely(kgnilnd_data.kgn_in_reset)) {
337 /* just return the first ESTABLISHED connection */
338 list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
339 /* kgnilnd_finish_connect doesn't put connections on the
340 * peer list until they are actually established */
341 LASSERTF(conn->gnc_state >= GNILND_CONN_ESTABLISHED,
342 "found conn %p state %s on peer %p (%s)\n",
343 conn, kgnilnd_conn_state2str(conn), peer,
344 libcfs_nid2str(peer->gnp_nid));
345 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
353 /* needs write_lock on kgn_peer_conn_lock held */
355 kgnilnd_find_or_create_conn_locked(kgn_peer_t *peer) {
357 kgn_device_t *dev = peer->gnp_net->gnn_dev;
360 conn = kgnilnd_find_conn_locked(peer);
366 /* if the peer was previously connecting, check if we should
367 * trigger another connection attempt yet. */
368 if (time_before(jiffies, peer->gnp_reconnect_time)) {
372 /* This check prevents us from creating a new connection to a peer while we are
373 * still in the process of closing an existing connection to the peer.
375 list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
376 if (conn->gnc_ephandle != NULL) {
377 CDEBUG(D_NET, "Not connecting non-null ephandle found peer 0x%p->%s\n", peer,
378 libcfs_nid2str(peer->gnp_nid));
383 if (peer->gnp_connecting != GNILND_PEER_IDLE) {
384 /* if we are not connecting, fire up a new connection */
385 /* or if we are anything but IDLE DONT start a new connection */
389 CDEBUG(D_NET, "starting connect to %s\n",
390 libcfs_nid2str(peer->gnp_nid));
391 peer->gnp_connecting = GNILND_PEER_CONNECT;
392 kgnilnd_peer_addref(peer); /* extra ref for connd */
394 spin_lock(&dev->gnd_connd_lock);
395 list_add_tail(&peer->gnp_connd_list, &dev->gnd_connd_peers);
396 spin_unlock(&dev->gnd_connd_lock);
398 kgnilnd_schedule_dgram(dev);
399 CDEBUG(D_NETTRACE, "scheduling new connect\n");
404 /* Caller is responsible for deciding if/when to call this */
406 kgnilnd_destroy_conn_ep(kgn_conn_t *conn)
409 gni_ep_handle_t tmp_ep;
411 /* only if we actually initialized it,
412 * then set NULL to tell kgnilnd_destroy_conn to leave it alone */
414 tmp_ep = xchg(&conn->gnc_ephandle, NULL);
415 if (tmp_ep != NULL) {
416 /* we never re-use the EP, so unbind is not needed */
417 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
418 rrc = kgnilnd_ep_destroy(tmp_ep);
420 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
422 /* if this fails, it could hork up kgni smsg retransmit and others
423 * since we could free the SMSG mbox memory, etc. */
424 LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d conn 0x%p ep 0x%p\n",
425 rrc, conn, conn->gnc_ephandle);
427 atomic_dec(&conn->gnc_device->gnd_neps);
429 /* clear out count added in kgnilnd_close_conn_locked
430 * conn will have a peer once it hits finish_connect, where it
431 * is the first spot we'll mark it ESTABLISHED as well */
432 if (conn->gnc_peer) {
433 kgnilnd_admin_decref(conn->gnc_peer->gnp_dirty_eps);
436 /* drop ref for EP */
437 kgnilnd_conn_decref(conn);
442 kgnilnd_destroy_conn(kgn_conn_t *conn)
444 LASSERTF(!in_interrupt() &&
445 !conn->gnc_scheduled &&
446 !conn->gnc_in_purgatory &&
447 conn->gnc_ephandle == NULL &&
448 list_empty(&conn->gnc_list) &&
449 list_empty(&conn->gnc_hashlist) &&
450 list_empty(&conn->gnc_schedlist) &&
451 list_empty(&conn->gnc_mdd_list) &&
452 conn->gnc_magic == GNILND_CONN_MAGIC,
453 "conn 0x%p->%s IRQ %d sched %d purg %d ep 0x%p Mg %d lists %d/%d/%d/%d\n",
454 conn, conn->gnc_peer ? libcfs_nid2str(conn->gnc_peer->gnp_nid)
456 !!in_interrupt(), conn->gnc_scheduled,
457 conn->gnc_in_purgatory,
460 list_empty(&conn->gnc_list),
461 list_empty(&conn->gnc_hashlist),
462 list_empty(&conn->gnc_schedlist),
463 list_empty(&conn->gnc_mdd_list));
465 /* Tripping these is especially bad, as it means we have items on the
466 * lists that didn't keep their refcount on the connection - or
467 * somebody evil released their own */
468 LASSERTF(list_empty(&conn->gnc_fmaq) &&
469 atomic_read(&conn->gnc_nlive_fma) == 0 &&
470 atomic_read(&conn->gnc_nlive_rdma) == 0,
471 "conn 0x%p fmaq %d@0x%p nfma %d nrdma %d\n",
472 conn, kgnilnd_count_list(&conn->gnc_fmaq), &conn->gnc_fmaq,
473 atomic_read(&conn->gnc_nlive_fma), atomic_read(&conn->gnc_nlive_rdma));
475 CDEBUG(D_NET, "destroying conn %p ephandle %p error %d\n",
476 conn, conn->gnc_ephandle, conn->gnc_error);
478 /* We are freeing this memory remove the magic value from the connection */
481 /* if there is an FMA blk left here, we'll tear it down */
482 if (conn->gnc_fma_blk) {
483 if (conn->gnc_peer) {
484 kgn_mbox_info_t *mbox;
485 mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
486 mbox->mbx_prev_nid = conn->gnc_peer->gnp_nid;
488 kgnilnd_release_mbox(conn, 0);
491 if (conn->gnc_peer != NULL)
492 kgnilnd_peer_decref(conn->gnc_peer);
494 if (conn->gnc_tx_ref_table != NULL) {
495 LIBCFS_FREE(conn->gnc_tx_ref_table,
496 GNILND_MAX_MSG_ID * sizeof(void *));
499 LIBCFS_FREE(conn, sizeof(*conn));
500 atomic_dec(&kgnilnd_data.kgn_nconns);
503 /* peer_alive and peer_notify done in the style of the o2iblnd */
505 kgnilnd_peer_alive(kgn_peer_t *peer)
507 set_mb(peer->gnp_last_alive, jiffies);
511 kgnilnd_peer_notify(kgn_peer_t *peer, int error, int alive)
522 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DONT_NOTIFY))
525 /* Tell LNet we are giving ups on this peer - but only
526 * if it isn't already reconnected or trying to reconnect */
527 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
529 /* use kgnilnd_find_conn_locked to avoid any conns in the process of being nuked
531 * don't tell LNet if we are in reset - we assume that everyone will be able to
532 * reconnect just fine
534 conn = kgnilnd_find_conn_locked(peer);
536 CDEBUG(D_NETTRACE, "peer 0x%p->%s ting %d conn 0x%p, rst %d error %d\n",
537 peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting, conn,
538 kgnilnd_data.kgn_in_reset, error);
540 if (((peer->gnp_connecting == GNILND_PEER_IDLE) &&
542 (!kgnilnd_data.kgn_in_reset) &&
543 (!kgnilnd_conn_clean_errno(error))) || alive) {
547 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
550 /* short circuit if we dont need to notify Lnet */
554 rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
557 /* dont do this if this fails since LNET is in shutdown or something else
560 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
561 list_for_each_entry(net , &kgnilnd_data.kgn_nets[i], gnn_list) {
562 /* if gnn_shutdown set for any net shutdown is in progress just return */
563 if (net->gnn_shutdown) {
564 up_read(&kgnilnd_data.kgn_net_rw_sem);
572 /* shutdown in progress most likely */
573 up_read(&kgnilnd_data.kgn_net_rw_sem);
577 LIBCFS_ALLOC(nets, nnets * sizeof(*nets));
580 up_read(&kgnilnd_data.kgn_net_rw_sem);
581 CERROR("Failed to allocate nets[%d]\n", nnets);
586 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
587 list_for_each_entry(net, &kgnilnd_data.kgn_nets[i], gnn_list) {
589 kgnilnd_net_addref(net);
593 up_read(&kgnilnd_data.kgn_net_rw_sem);
595 for (i = 0; i < nnets; i++) {
600 peer_nid = kgnilnd_lnd2lnetnid(net->gnn_ni->ni_nid,
603 CDEBUG(D_NET, "peer 0x%p->%s last_alive %lu (%lus ago)\n",
604 peer, libcfs_nid2str(peer_nid), peer->gnp_last_alive,
605 cfs_duration_sec(jiffies - peer->gnp_last_alive));
607 lnet_notify(net->gnn_ni, peer_nid, alive,
608 peer->gnp_last_alive);
610 kgnilnd_net_decref(net);
613 LIBCFS_FREE(nets, nnets * sizeof(*nets));
617 /* need write_lock on kgn_peer_conn_lock */
619 kgnilnd_close_conn_locked(kgn_conn_t *conn, int error)
621 kgn_peer_t *peer = conn->gnc_peer;
624 LASSERT(!in_interrupt());
626 /* store error for tx completion */
627 conn->gnc_error = error;
628 peer->gnp_last_errno = error;
630 /* use real error from peer if possible */
631 if (error == -ECONNRESET) {
632 error = conn->gnc_peer_error;
635 /* if we NETERROR, make sure it is rate limited */
636 if (!kgnilnd_conn_clean_errno(error) &&
637 peer->gnp_down == GNILND_RCA_NODE_UP) {
638 CNETERR("closing conn to %s: error %d\n",
639 libcfs_nid2str(peer->gnp_nid), error);
641 CDEBUG(D_NET, "closing conn to %s: error %d\n",
642 libcfs_nid2str(peer->gnp_nid), error);
645 LASSERTF(conn->gnc_state == GNILND_CONN_ESTABLISHED,
646 "conn %p to %s with bogus state %s\n", conn,
647 libcfs_nid2str(conn->gnc_peer->gnp_nid),
648 kgnilnd_conn_state2str(conn));
649 LASSERT(!list_empty(&conn->gnc_hashlist));
650 LASSERT(!list_empty(&conn->gnc_list));
653 /* mark peer count here so any place the EP gets destroyed will
654 * open up the peer count so that a new ESTABLISHED conn is then free
655 * to send new messages -- sending before the previous EPs are destroyed
656 * could end up with messages on the network for the old conn _after_
657 * the new conn and break the mbox safety protocol */
658 kgnilnd_admin_addref(conn->gnc_peer->gnp_dirty_eps);
660 /* Remove from conn hash table: no new callbacks */
661 list_del_init(&conn->gnc_hashlist);
662 kgnilnd_data.kgn_conn_version++;
663 kgnilnd_conn_decref(conn);
665 /* if we are in reset, go right to CLOSED as there is no scheduler
666 * thread to move from CLOSING to CLOSED */
667 if (unlikely(kgnilnd_data.kgn_in_reset)) {
668 conn->gnc_state = GNILND_CONN_CLOSED;
670 conn->gnc_state = GNILND_CONN_CLOSING;
673 /* leave on peer->gnp_conns to make sure we don't let the reaper
674 * or others try to unlink this peer until the conn is fully
675 * processed for closing */
677 if (kgnilnd_check_purgatory_conn(conn)) {
678 kgnilnd_add_purgatory_locked(conn, conn->gnc_peer);
681 /* Reset RX timeout to ensure we wait for an incoming CLOSE
682 * for the full timeout. If we get a CLOSE we know the
683 * peer has stopped all RDMA. Otherwise if we wait for
684 * the full timeout we can also be sure all RDMA has stopped. */
685 conn->gnc_last_rx = conn->gnc_last_rx_cq = jiffies;
688 /* schedule sending CLOSE - if we are in quiesce, this adds to
689 * gnd_ready_conns and allows us to find it in quiesce processing */
690 kgnilnd_schedule_conn(conn);
696 kgnilnd_close_conn(kgn_conn_t *conn, int error)
698 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
699 /* need to check the state here - this call is racy and we don't
700 * know the state until after the lock is grabbed */
701 if (conn->gnc_state == GNILND_CONN_ESTABLISHED) {
702 kgnilnd_close_conn_locked(conn, error);
704 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
708 kgnilnd_complete_closed_conn(kgn_conn_t *conn)
718 /* Dump log on cksum error - wait until complete phase to let
719 * RX of error happen */
720 if (*kgnilnd_tunables.kgn_checksum_dump &&
721 (conn != NULL && conn->gnc_peer_error == -ENOKEY)) {
722 libcfs_debug_dumplog();
725 /* _CLOSED set in kgnilnd_process_fmaq once we decide to
726 * send the CLOSE or not */
727 LASSERTF(conn->gnc_state == GNILND_CONN_CLOSED,
728 "conn 0x%p->%s with bad state %s\n",
729 conn, conn->gnc_peer ?
730 libcfs_nid2str(conn->gnc_peer->gnp_nid) :
732 kgnilnd_conn_state2str(conn));
734 LASSERT(list_empty(&conn->gnc_hashlist));
736 /* we've sent the close, start nuking */
737 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_SCHEDULE_COMPLETE))
738 kgnilnd_schedule_conn(conn);
740 if (conn->gnc_scheduled != GNILND_CONN_PROCESS) {
741 CDEBUG(D_NETERROR, "Error someone scheduled us after we were "
742 "done, Attempting to recover conn 0x%p "
743 "scheduled %d function: %s line: %d\n", conn,
744 conn->gnc_scheduled, conn->gnc_sched_caller,
745 conn->gnc_sched_line);
749 /* we don't use lists to track things that we can get out of the
752 /* need to hold locks for tx_list_state, sampling it is too racy:
753 * - the lock actually protects tx != NULL, but we can't take the proper
754 * lock until we check tx_list_state, which would be too late and
755 * we could have the TX change under us.
756 * gnd_rdmaq_lock and gnd_lock and not used together, so taking both
758 spin_lock(&conn->gnc_device->gnd_rdmaq_lock);
759 spin_lock(&conn->gnc_device->gnd_lock);
761 for (nrdma = 0; nrdma < GNILND_MAX_MSG_ID; nrdma++) {
762 tx = conn->gnc_tx_ref_table[nrdma];
765 /* only print the first error and if not CLOSE, we often don't see
766 * CQ events for that by the time we get here... and really don't care */
767 if (nlive || tx->tx_msg.gnm_type == GNILND_MSG_CLOSE)
768 tx->tx_state |= GNILND_TX_QUIET_ERROR;
770 GNIDBG_TX(D_NET, tx, "cleaning up on close, nlive %d", nlive);
772 /* don't worry about gnc_lock here as nobody else should be
773 * touching this conn */
774 kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
775 list_add_tail(&tx->tx_list, &sinners);
778 spin_unlock(&conn->gnc_device->gnd_lock);
779 spin_unlock(&conn->gnc_device->gnd_rdmaq_lock);
781 /* nobody should have marked this as needing scheduling after
782 * we called close - so only ref should be us handling it */
783 if (conn->gnc_scheduled != GNILND_CONN_PROCESS) {
784 CDEBUG(D_NETERROR, "Error someone scheduled us after we were "
785 "done, Attempting to recover conn 0x%p "
786 "scheduled %d function %s line: %d\n", conn,
787 conn->gnc_scheduled, conn->gnc_sched_caller,
788 conn->gnc_sched_line);
790 /* now reset a few to actual counters... */
791 nrdma = atomic_read(&conn->gnc_nlive_rdma);
792 nq_rdma = atomic_read(&conn->gnc_nq_rdma);
794 if (!list_empty(&sinners)) {
795 list_for_each_entry_safe(tx, txn, &sinners, tx_list) {
796 /* clear tx_list to make tx_add_list_locked happy */
797 list_del_init(&tx->tx_list);
798 /* The error codes determine if we hold onto the MDD */
799 kgnilnd_tx_done(tx, conn->gnc_error);
803 logmsg = (nlive + nrdma + nq_rdma);
806 if (conn->gnc_peer->gnp_down == GNILND_RCA_NODE_UP) {
807 CNETERR("Closed conn 0x%p->%s (errno %d, peer errno %d): "
808 "canceled %d TX, %d/%d RDMA\n",
809 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
810 conn->gnc_error, conn->gnc_peer_error,
811 nlive, nq_rdma, nrdma);
813 CDEBUG(D_NET, "Closed conn 0x%p->%s (errno %d,"
814 " peer errno %d): canceled %d TX, %d/%d RDMA\n",
815 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
816 conn->gnc_error, conn->gnc_peer_error,
817 nlive, nq_rdma, nrdma);
821 kgnilnd_destroy_conn_ep(conn);
823 /* Bug 765042 - race this with completing a new conn to same peer - we need
824 * finish_connect to detach purgatory before we can do it ourselves here */
825 CFS_RACE(CFS_FAIL_GNI_FINISH_PURG);
827 /* now it is safe to remove from peer list - anyone looking at
828 * gnp_conns now is free to unlink if not on purgatory */
829 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
831 conn->gnc_state = GNILND_CONN_DONE;
833 /* Decrement counter if we are marked by del_conn_or_peers for closing
835 if (conn->gnc_needs_closing)
836 kgnilnd_admin_decref(kgnilnd_data.kgn_npending_conns);
838 /* Remove from peer's list of valid connections if its not in purgatory */
839 if (!conn->gnc_in_purgatory) {
840 list_del_init(&conn->gnc_list);
841 /* Lose peers reference on the conn */
842 kgnilnd_conn_decref(conn);
845 /* NB - only unlinking if we set pending in del_peer_locked from admin or
847 if (kgnilnd_peer_active(conn->gnc_peer) &&
848 conn->gnc_peer->gnp_pending_unlink &&
849 kgnilnd_can_unlink_peer_locked(conn->gnc_peer)) {
850 kgnilnd_unlink_peer_locked(conn->gnc_peer);
853 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
855 /* I'm telling Mommy! - use peer_error if they initiated close */
856 kgnilnd_peer_notify(conn->gnc_peer,
857 conn->gnc_error == -ECONNRESET ?
858 conn->gnc_peer_error : conn->gnc_error, 0);
864 kgnilnd_set_conn_params(kgn_dgram_t *dgram)
866 kgn_conn_t *conn = dgram->gndg_conn;
867 kgn_connreq_t *connreq = &dgram->gndg_conn_in;
868 kgn_gniparams_t *rem_param = &connreq->gncr_gnparams;
871 gni_smsg_attr_t *remote = &connreq->gncr_gnparams.gnpr_smsg_attr;
873 /* set timeout vals in conn early so we can use them for the NAK */
875 /* use max of the requested and our timeout, peer will do the same */
876 conn->gnc_timeout = MAX(conn->gnc_timeout, connreq->gncr_timeout);
878 /* only ep_bind really mucks around with the CQ */
879 /* only ep bind if we are not connecting to ourself and the dstnid is not a wildcard. this check
880 * is necessary as you can only bind an ep once and we must make sure we dont bind when already bound.
882 if (connreq->gncr_dstnid != LNET_NID_ANY && dgram->gndg_conn_out.gncr_dstnid != connreq->gncr_srcnid) {
883 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
884 rrc = kgnilnd_ep_bind(conn->gnc_ephandle,
885 connreq->gncr_gnparams.gnpr_host_id,
887 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
888 if (rrc != GNI_RC_SUCCESS) {
894 rrc = kgnilnd_ep_set_eventdata(conn->gnc_ephandle, conn->gnc_cqid,
895 connreq->gncr_gnparams.gnpr_cqid);
896 if (rrc != GNI_RC_SUCCESS) {
901 /* Initialize SMSG */
902 rrc = kgnilnd_smsg_init(conn->gnc_ephandle, &conn->gnpr_smsg_attr,
903 &connreq->gncr_gnparams.gnpr_smsg_attr);
904 if (unlikely(rrc == GNI_RC_INVALID_PARAM)) {
905 gni_smsg_attr_t *local = &conn->gnpr_smsg_attr;
906 /* help folks figure out if there is a tunable off, etc. */
907 LCONSOLE_ERROR("SMSG attribute mismatch. Data from local/remote:"
908 " type %d/%d msg_maxsize %u/%u"
909 " mbox_maxcredit %u/%u. Please check kgni"
910 " logs for further data\n",
911 local->msg_type, remote->msg_type,
912 local->msg_maxsize, remote->msg_maxsize,
913 local->mbox_maxcredit, remote->mbox_maxcredit);
915 if (rrc != GNI_RC_SUCCESS) {
920 /* log this for help in debuggin SMSG buffer re-use */
921 CDEBUG(D_NET, "conn %p src %s dst %s smsg %p acquired"
922 " local cqid %u SMSG %p->%u hndl "LPX64"."LPX64
923 " remote cqid %u SMSG %p->%u hndl "LPX64"."LPX64"\n",
924 conn, libcfs_nid2str(connreq->gncr_srcnid),
925 libcfs_nid2str(connreq->gncr_dstnid),
926 &conn->gnpr_smsg_attr,
928 conn->gnpr_smsg_attr.msg_buffer,
929 conn->gnpr_smsg_attr.mbox_offset,
930 conn->gnpr_smsg_attr.mem_hndl.qword1,
931 conn->gnpr_smsg_attr.mem_hndl.qword2,
932 rem_param->gnpr_cqid,
933 rem_param->gnpr_smsg_attr.msg_buffer,
934 rem_param->gnpr_smsg_attr.mbox_offset,
935 rem_param->gnpr_smsg_attr.mem_hndl.qword1,
936 rem_param->gnpr_smsg_attr.mem_hndl.qword2);
938 conn->gnc_peerstamp = connreq->gncr_peerstamp;
939 conn->gnc_peer_connstamp = connreq->gncr_connstamp;
940 conn->remote_mbox_addr = (void *)((char *)remote->msg_buffer + remote->mbox_offset);
942 /* We update the reaper timeout once we have a valid conn and timeout */
943 kgnilnd_update_reaper_timeout(GNILND_TO2KA(conn->gnc_timeout));
948 rrc = kgnilnd_ep_unbind(conn->gnc_ephandle);
949 /* not sure I can just let this fly */
950 LASSERTF(rrc == GNI_RC_SUCCESS,
951 "bad rc from gni_ep_unbind trying to cleanup: %d\n", rrc);
954 LASSERTF(rc != 0, "SOFTWARE BUG: rc == 0\n");
955 CERROR("Error setting connection params from %s: %d\n",
956 libcfs_nid2str(connreq->gncr_srcnid), rc);
960 /* needs down_read on kgn_net_rw_sem held from before this call until
961 * after the write_lock on kgn_peer_conn_lock - this ensures we stay sane
962 * with kgnilnd_shutdown - it'll get the sem and set shutdown, then get the
963 * kgn_peer_conn_lock to start del_peer'ing. If we hold the sem until after
964 * kgn_peer_conn_lock is held, we guarantee that nobody calls
965 * kgnilnd_add_peer_locked without checking gnn_shutdown */
967 kgnilnd_create_peer_safe(kgn_peer_t **peerp,
975 LASSERT(nid != LNET_NID_ANY);
977 /* We dont pass the net around in the dgram anymore so here is where we find it
978 * this will work unless its in shutdown or the nid has a net that is invalid.
979 * Either way error code needs to be returned in that case.
981 * If the net passed in is not NULL then we can use it, this alleviates looking it
982 * when the calling function has access to the data.
985 rc = kgnilnd_find_net(nid, &net);
989 /* find net adds a reference on the net if we are not using
990 * it we must do it manually so the net references are
991 * correct when tearing down the net
993 kgnilnd_net_addref(net);
996 LIBCFS_ALLOC(peer, sizeof(*peer));
998 kgnilnd_net_decref(net);
1001 peer->gnp_nid = nid;
1002 peer->gnp_down = node_state;
1004 /* translate from nid to nic addr & store */
1005 rc = kgnilnd_nid_to_nicaddrs(LNET_NIDADDR(nid), 1, &peer->gnp_host_id);
1007 kgnilnd_net_decref(net);
1008 LIBCFS_FREE(peer, sizeof(*peer));
1011 CDEBUG(D_NET, "peer 0x%p->%s -> NIC 0x%x\n", peer,
1012 libcfs_nid2str(nid), peer->gnp_host_id);
1014 atomic_set(&peer->gnp_refcount, 1); /* 1 ref for caller */
1015 atomic_set(&peer->gnp_dirty_eps, 0);
1017 INIT_LIST_HEAD(&peer->gnp_list);
1018 INIT_LIST_HEAD(&peer->gnp_connd_list);
1019 INIT_LIST_HEAD(&peer->gnp_conns);
1020 INIT_LIST_HEAD(&peer->gnp_tx_queue);
1022 /* the first reconnect should happen immediately, so we leave
1023 * gnp_reconnect_interval set to 0 */
1025 LASSERTF(net != NULL, "peer 0x%p->%s with NULL net\n",
1026 peer, libcfs_nid2str(nid));
1028 /* must have kgn_net_rw_sem held for this... */
1029 if (net->gnn_shutdown) {
1030 /* shutdown has started already */
1031 kgnilnd_net_decref(net);
1032 LIBCFS_FREE(peer, sizeof(*peer));
1036 peer->gnp_net = net;
1038 atomic_inc(&kgnilnd_data.kgn_npeers);
1045 kgnilnd_destroy_peer(kgn_peer_t *peer)
1047 CDEBUG(D_NET, "peer %s %p deleted\n",
1048 libcfs_nid2str(peer->gnp_nid), peer);
1049 LASSERTF(atomic_read(&peer->gnp_refcount) == 0,
1050 "peer 0x%p->%s refs %d\n",
1051 peer, libcfs_nid2str(peer->gnp_nid),
1052 atomic_read(&peer->gnp_refcount));
1053 LASSERTF(atomic_read(&peer->gnp_dirty_eps) == 0,
1054 "peer 0x%p->%s dirty eps %d\n",
1055 peer, libcfs_nid2str(peer->gnp_nid),
1056 atomic_read(&peer->gnp_dirty_eps));
1057 LASSERTF(peer->gnp_net != NULL, "peer %p (%s) with NULL net\n",
1058 peer, libcfs_nid2str(peer->gnp_nid));
1059 LASSERTF(!kgnilnd_peer_active(peer),
1061 peer, libcfs_nid2str(peer->gnp_nid));
1062 LASSERTF(peer->gnp_connecting == GNILND_PEER_IDLE || peer->gnp_connecting == GNILND_PEER_KILL,
1063 "peer 0x%p->%s, connecting %d\n",
1064 peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
1065 LASSERTF(list_empty(&peer->gnp_conns),
1067 peer, libcfs_nid2str(peer->gnp_nid));
1068 LASSERTF(list_empty(&peer->gnp_tx_queue),
1070 peer, libcfs_nid2str(peer->gnp_nid));
1071 LASSERTF(list_empty(&peer->gnp_connd_list),
1073 peer, libcfs_nid2str(peer->gnp_nid));
1075 /* NB a peer's connections keep a reference on their peer until
1076 * they are destroyed, so we can be assured that _all_ state to do
1077 * with this peer has been cleaned up when its refcount drops to
1080 atomic_dec(&kgnilnd_data.kgn_npeers);
1081 kgnilnd_net_decref(peer->gnp_net);
1083 LIBCFS_FREE(peer, sizeof(*peer));
1086 /* the conn might not have made it all the way through to a connected
1087 * state - but we need to purgatory any conn that a remote peer might
1088 * have seen through a posted dgram as well */
1090 kgnilnd_add_purgatory_locked(kgn_conn_t *conn, kgn_peer_t *peer)
1092 kgn_mbox_info_t *mbox = NULL;
1095 /* NB - the caller should own conn by removing him from the
1096 * scheduler thread when finishing the close */
1098 LASSERTF(peer != NULL, "conn %p with NULL peer\n", conn);
1100 /* If this is still true, need to add the calls to unlink back in and
1101 * figure out how to close the hole on loopback conns */
1102 LASSERTF(kgnilnd_peer_active(peer), "can't use inactive peer %s (%p)"
1103 " we'll never recover the resources\n",
1104 libcfs_nid2str(peer->gnp_nid), peer);
1106 CDEBUG(D_NET, "conn %p peer %p dev %p\n", conn, peer,
1109 LASSERTF(conn->gnc_in_purgatory == 0,
1110 "Conn already in purgatory\n");
1111 conn->gnc_in_purgatory = 1;
1113 mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
1114 mbox->mbx_prev_purg_nid = peer->gnp_nid;
1115 mbox->mbx_add_purgatory = jiffies;
1116 kgnilnd_release_mbox(conn, 1);
1118 LASSERTF(list_empty(&conn->gnc_mdd_list),
1119 "conn 0x%p->%s with active purgatory hold MDD %d\n",
1120 conn, libcfs_nid2str(peer->gnp_nid),
1121 kgnilnd_count_list(&conn->gnc_mdd_list));
1126 /* Instead of detaching everything from purgatory here we just mark the conn as needing
1127 * detach, when the reaper checks the conn the next time it will detach it.
1128 * Calling function requires write_lock held on kgn_peer_conn_lock
1131 kgnilnd_mark_for_detach_purgatory_all_locked(kgn_peer_t *peer) {
1134 list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
1135 if (conn->gnc_in_purgatory && !conn->gnc_needs_detach) {
1136 conn->gnc_needs_detach = 1;
1137 kgnilnd_admin_addref(kgnilnd_data.kgn_npending_detach);
1142 /* Calling function needs a write_lock held on kgn_peer_conn_lock */
1144 kgnilnd_detach_purgatory_locked(kgn_conn_t *conn, struct list_head *conn_list)
1146 kgn_mbox_info_t *mbox = NULL;
1148 /* if needed, add the conn purgatory data to the list passed in */
1149 if (conn->gnc_in_purgatory) {
1150 CDEBUG(D_NET, "peer %p->%s purg_conn %p@%s mdd_list #tx %d\n",
1151 conn->gnc_peer, libcfs_nid2str(conn->gnc_peer->gnp_nid),
1152 conn, kgnilnd_conn_state2str(conn),
1153 kgnilnd_count_list(&conn->gnc_mdd_list));
1155 mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
1156 mbox->mbx_detach_of_purgatory = jiffies;
1158 /* conn->gnc_list is the entry point on peer->gnp_conns, so detaching it
1159 * here removes it from the list of 'valid' peer connections.
1160 * We put the current conn onto a list of conns to call kgnilnd_release_purgatory_locked()
1161 * and as such the caller of kgnilnd_detach_purgatory_locked() now owns that conn, since its not
1162 * on the peer's conn_list anymore.
1165 list_del_init(&conn->gnc_list);
1167 /* NB - only unlinking if we set pending in del_peer_locked from admin or
1169 if (kgnilnd_peer_active(conn->gnc_peer) &&
1170 conn->gnc_peer->gnp_pending_unlink &&
1171 kgnilnd_can_unlink_peer_locked(conn->gnc_peer)) {
1172 kgnilnd_unlink_peer_locked(conn->gnc_peer);
1174 /* The reaper will not call detach unless the conn is fully through kgnilnd_complete_closed_conn.
1175 * If the conn is not in a DONE state somehow we are attempting to detach even though
1176 * the conn has not been fully cleaned up. If we detach while the conn is still closing
1177 * we will end up with an orphaned connection that has valid ep_handle, that is not on a
1181 LASSERTF(conn->gnc_state == GNILND_CONN_DONE, "Conn in invalid state %p@%s \n",
1182 conn, kgnilnd_conn_state2str(conn));
1184 /* move from peer to the delayed release list */
1185 list_add_tail(&conn->gnc_list, conn_list);
1190 kgnilnd_release_purgatory_list(struct list_head *conn_list)
1193 kgn_conn_t *conn, *connN;
1194 kgn_mdd_purgatory_t *gmp, *gmpN;
1196 list_for_each_entry_safe(conn, connN, conn_list, gnc_list) {
1197 dev = conn->gnc_device;
1199 kgnilnd_release_mbox(conn, -1);
1200 conn->gnc_in_purgatory = 0;
1202 list_del_init(&conn->gnc_list);
1204 /* gnc_needs_detach is set in kgnilnd_del_conn_or_peer. It is used to keep track
1205 * of conns that have been marked for detach by kgnilnd_del_conn_or_peer.
1206 * The function uses kgn_npending_detach to verify the conn has
1207 * actually been detached.
1210 if (conn->gnc_needs_detach)
1211 kgnilnd_admin_decref(kgnilnd_data.kgn_npending_detach);
1213 /* if this guy is really dead (we are doing release from reaper),
1214 * make sure we tell LNet - if this is from other context,
1215 * the checks in the function will prevent an errant
1217 kgnilnd_peer_notify(conn->gnc_peer, conn->gnc_error, 0);
1219 list_for_each_entry_safe(gmp, gmpN, &conn->gnc_mdd_list,
1222 "dev %p releasing held mdd "LPX64"."LPX64"\n",
1223 conn->gnc_device, gmp->gmp_map_key.qword1,
1224 gmp->gmp_map_key.qword2);
1226 atomic_dec(&dev->gnd_n_mdd_held);
1227 kgnilnd_mem_mdd_release(conn->gnc_device->gnd_handle,
1229 /* ignoring the return code - if kgni/ghal can't find it
1230 * it must be released already */
1232 list_del_init(&gmp->gmp_list);
1233 LIBCFS_FREE(gmp, sizeof(*gmp));
1235 /* lose conn ref for purgatory */
1236 kgnilnd_conn_decref(conn);
1240 /* needs write_lock on kgnilnd_data.kgn_peer_conn_lock held */
1242 kgnilnd_peer_increase_reconnect_locked(kgn_peer_t *peer)
1246 current_to = peer->gnp_reconnect_interval;
1248 /* we'll try to reconnect fast the first time, then back-off */
1249 if (current_to == 0) {
1250 peer->gnp_reconnect_time = jiffies - 1;
1251 current_to = *kgnilnd_tunables.kgn_min_reconnect_interval;
1253 peer->gnp_reconnect_time = jiffies + cfs_time_seconds(current_to);
1254 /* add 50% of min timeout & retry */
1255 current_to += *kgnilnd_tunables.kgn_min_reconnect_interval / 2;
1258 current_to = MIN(current_to,
1259 *kgnilnd_tunables.kgn_max_reconnect_interval);
1261 peer->gnp_reconnect_interval = current_to;
1262 CDEBUG(D_NET, "peer %s can reconnect at %lu interval %lu\n",
1263 libcfs_nid2str(peer->gnp_nid), peer->gnp_reconnect_time,
1264 peer->gnp_reconnect_interval);
1267 /* needs kgnilnd_data.kgn_peer_conn_lock held */
1269 kgnilnd_find_peer_locked(lnet_nid_t nid)
1271 struct list_head *peer_list = kgnilnd_nid2peerlist(nid);
1274 /* Chopping nid down to only NIDADDR using LNET_NIDADDR so we only
1275 * have a single peer per device instead of a peer per nid/net combo.
1278 list_for_each_entry(peer, peer_list, gnp_list) {
1279 if (LNET_NIDADDR(nid) != LNET_NIDADDR(peer->gnp_nid))
1282 CDEBUG(D_NET, "got peer [%p] -> %s c %d (%d)\n",
1283 peer, libcfs_nid2str(nid),
1284 peer->gnp_connecting,
1285 atomic_read(&peer->gnp_refcount));
1291 /* need write_lock on kgn_peer_conn_lock */
1293 kgnilnd_unlink_peer_locked(kgn_peer_t *peer)
1295 LASSERTF(list_empty(&peer->gnp_conns),
1297 peer, libcfs_nid2str(peer->gnp_nid));
1298 LASSERTF(list_empty(&peer->gnp_tx_queue),
1300 peer, libcfs_nid2str(peer->gnp_nid));
1301 LASSERTF(kgnilnd_peer_active(peer),
1303 peer, libcfs_nid2str(peer->gnp_nid));
1304 CDEBUG(D_NET, "unlinking peer 0x%p->%s\n",
1305 peer, libcfs_nid2str(peer->gnp_nid));
1307 list_del_init(&peer->gnp_list);
1308 kgnilnd_data.kgn_peer_version++;
1309 kgnilnd_admin_decref(kgnilnd_data.kgn_npending_unlink);
1310 /* lose peerlist's ref */
1311 kgnilnd_peer_decref(peer);
1315 kgnilnd_get_peer_info(int index,
1316 kgn_peer_t **found_peer,
1317 lnet_nid_t *id, __u32 *nic_addr,
1318 int *refcount, int *connecting)
1320 struct list_head *ptmp;
1325 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1327 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
1329 list_for_each(ptmp, &kgnilnd_data.kgn_peers[i]) {
1330 peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1335 CDEBUG(D_NET, "found peer %p (%s) at index %d\n",
1336 peer, libcfs_nid2str(peer->gnp_nid), index);
1339 *id = peer->gnp_nid;
1340 *nic_addr = peer->gnp_host_id;
1341 *refcount = atomic_read(&peer->gnp_refcount);
1342 *connecting = peer->gnp_connecting;
1349 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1351 CDEBUG(D_NET, "no gni peer at index %d\n", index);
1355 /* requires write_lock on kgn_peer_conn_lock held */
1357 kgnilnd_add_peer_locked(lnet_nid_t nid, kgn_peer_t *new_stub_peer, kgn_peer_t **peerp)
1359 kgn_peer_t *peer, *peer2;
1361 LASSERTF(new_stub_peer != NULL, "bad stub peer for nid %s\n",
1362 libcfs_nid2str(nid));
1364 peer2 = kgnilnd_find_peer_locked(nid);
1365 if (peer2 != NULL) {
1366 /* A peer was created during the lock transition, so drop
1367 * the new one we created */
1368 kgnilnd_peer_decref(new_stub_peer);
1371 peer = new_stub_peer;
1372 /* peer table takes existing ref on peer */
1374 LASSERTF(!kgnilnd_peer_active(peer),
1375 "peer 0x%p->%s already in peer table\n",
1376 peer, libcfs_nid2str(peer->gnp_nid));
1377 list_add_tail(&peer->gnp_list,
1378 kgnilnd_nid2peerlist(nid));
1379 kgnilnd_data.kgn_peer_version++;
1382 LASSERTF(peer->gnp_net != NULL, "peer 0x%p->%s with NULL net\n",
1383 peer, libcfs_nid2str(peer->gnp_nid));
1388 kgnilnd_add_peer(kgn_net_t *net, lnet_nid_t nid, kgn_peer_t **peerp)
1395 if (nid == LNET_NID_ANY)
1398 node_state = kgnilnd_get_node_state(LNET_NIDADDR(nid));
1400 /* NB - this will not block during normal operations -
1401 * the only writer of this is in the startup/shutdown path. */
1402 rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
1407 rc = kgnilnd_create_peer_safe(&peer, nid, net, node_state);
1409 up_read(&kgnilnd_data.kgn_net_rw_sem);
1413 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1414 up_read(&kgnilnd_data.kgn_net_rw_sem);
1416 kgnilnd_add_peer_locked(nid, peer, peerp);
1418 CDEBUG(D_NET, "peer 0x%p->%s connecting %d\n",
1419 peerp, libcfs_nid2str((*peerp)->gnp_nid),
1420 (*peerp)->gnp_connecting);
1422 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1426 /* needs write_lock on kgn_peer_conn_lock */
1428 kgnilnd_cancel_peer_connect_locked(kgn_peer_t *peer, struct list_head *zombies)
1432 /* we do care about state of gnp_connecting - we could be between
1433 * reconnect attempts, so try to find the dgram and cancel the TX
1434 * anyways. If we are in the process of posting DONT do anything;
1435 * once it fails or succeeds we can nuke the connect attempt.
1436 * We have no idea where in kgnilnd_post_dgram we are so we cant
1437 * attempt to cancel until the function is done.
1440 /* make sure peer isn't in process of connecting or waiting for connect*/
1441 spin_lock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1442 if (!(list_empty(&peer->gnp_connd_list))) {
1443 list_del_init(&peer->gnp_connd_list);
1444 /* remove connd ref */
1445 kgnilnd_peer_decref(peer);
1447 spin_unlock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1449 if (peer->gnp_connecting == GNILND_PEER_POSTING || peer->gnp_connecting == GNILND_PEER_NEEDS_DEATH) {
1450 peer->gnp_connecting = GNILND_PEER_NEEDS_DEATH;
1451 /* We are in process of posting right now the xchg set it up for us to
1452 * cancel the connect so we are finished for now */
1454 /* no need for exchange we have the peer lock and its ready for us to nuke */
1455 LASSERTF(peer->gnp_connecting != GNILND_PEER_POSTING,
1456 "Peer in invalid state 0x%p->%s, connecting %d\n",
1457 peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
1458 peer->gnp_connecting = GNILND_PEER_IDLE;
1459 set_mb(peer->gnp_last_dgram_errno, -ETIMEDOUT);
1460 kgnilnd_find_and_cancel_dgram(peer->gnp_net->gnn_dev,
1464 /* The least we can do is nuke the tx's no matter what.... */
1465 list_for_each_entry_safe(tx, txn, &peer->gnp_tx_queue, tx_list) {
1466 kgnilnd_tx_del_state_locked(tx, peer, NULL,
1468 list_add_tail(&tx->tx_list, zombies);
1472 /* needs write_lock on kgn_peer_conn_lock */
1474 kgnilnd_del_peer_locked(kgn_peer_t *peer, int error)
1476 /* this peer could be passive and only held for purgatory,
1477 * take a ref to ensure it doesn't disappear in this function */
1478 kgnilnd_peer_addref(peer);
1480 CFS_RACE(CFS_FAIL_GNI_FIND_TARGET);
1482 /* if purgatory release cleared it out, don't try again */
1483 if (kgnilnd_peer_active(peer)) {
1484 /* always do this to allow kgnilnd_start_connect and
1485 * kgnilnd_finish_connect to catch this before they
1486 * wrap up their operations */
1487 if (kgnilnd_can_unlink_peer_locked(peer)) {
1488 /* already released purgatory, so only active
1490 kgnilnd_unlink_peer_locked(peer);
1492 kgnilnd_close_peer_conns_locked(peer, error);
1493 /* peer unlinks itself when last conn is closed */
1497 /* we are done, release back to the wild */
1498 kgnilnd_peer_decref(peer);
1502 kgnilnd_del_conn_or_peer(kgn_net_t *net, lnet_nid_t nid, int command,
1506 LIST_HEAD (zombies);
1507 struct list_head *ptmp, *pnxt;
1514 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1516 if (nid != LNET_NID_ANY)
1517 lo = hi = kgnilnd_nid2peerlist(nid) - kgnilnd_data.kgn_peers;
1520 hi = *kgnilnd_tunables.kgn_peer_hash_size - 1;
1521 /* wildcards always succeed */
1525 for (i = lo; i <= hi; i++) {
1526 list_for_each_safe(ptmp, pnxt, &kgnilnd_data.kgn_peers[i]) {
1527 peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1529 LASSERTF(peer->gnp_net != NULL,
1530 "peer %p (%s) with NULL net\n",
1531 peer, libcfs_nid2str(peer->gnp_nid));
1533 if (net != NULL && peer->gnp_net != net)
1536 if (!(nid == LNET_NID_ANY || LNET_NIDADDR(peer->gnp_nid) == LNET_NIDADDR(nid)))
1539 /* In both cases, we want to stop any in-flight
1540 * connect attempts */
1541 kgnilnd_cancel_peer_connect_locked(peer, &zombies);
1544 case GNILND_DEL_CONN:
1545 kgnilnd_close_peer_conns_locked(peer, error);
1547 case GNILND_DEL_PEER:
1548 peer->gnp_pending_unlink = 1;
1549 kgnilnd_admin_addref(kgnilnd_data.kgn_npending_unlink);
1550 kgnilnd_mark_for_detach_purgatory_all_locked(peer);
1551 kgnilnd_del_peer_locked(peer, error);
1553 case GNILND_CLEAR_PURGATORY:
1554 /* Mark everything ready for detach reaper will cleanup
1555 * once we release the kgn_peer_conn_lock
1557 kgnilnd_mark_for_detach_purgatory_all_locked(peer);
1558 peer->gnp_last_errno = -EISCONN;
1559 /* clear reconnect so he can reconnect soon */
1560 peer->gnp_reconnect_time = 0;
1561 peer->gnp_reconnect_interval = 0;
1564 CERROR("bad command %d\n", command);
1567 /* we matched something */
1572 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1575 kgnilnd_txlist_done(&zombies, error);
1577 /* This function does not return until the commands it initiated have completed,
1578 * since they have to work there way through the other threads. In the case of shutdown
1579 * threads are not woken up until after this call is initiated so we cannot wait, we just
1580 * need to return. The same applies for stack reset we shouldnt wait as the reset thread
1584 CFS_RACE(CFS_FAIL_GNI_RACE_RESET);
1586 if (error == -ENOTRECOVERABLE || error == -ESHUTDOWN) {
1591 while (atomic_read(&kgnilnd_data.kgn_npending_conns) ||
1592 atomic_read(&kgnilnd_data.kgn_npending_detach) ||
1593 atomic_read(&kgnilnd_data.kgn_npending_unlink)) {
1595 set_current_state(TASK_UNINTERRUPTIBLE);
1596 schedule_timeout(cfs_time_seconds(1));
1599 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, "Waiting on %d peers %d closes %d detaches\n",
1600 atomic_read(&kgnilnd_data.kgn_npending_unlink),
1601 atomic_read(&kgnilnd_data.kgn_npending_conns),
1602 atomic_read(&kgnilnd_data.kgn_npending_detach));
1609 kgnilnd_get_conn_by_idx(int index)
1612 struct list_head *ptmp;
1614 struct list_head *ctmp;
1618 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
1619 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1620 list_for_each(ptmp, &kgnilnd_data.kgn_peers[i]) {
1622 peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1624 list_for_each(ctmp, &peer->gnp_conns) {
1625 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
1627 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
1633 CDEBUG(D_NET, "++conn[%p] -> %s (%d)\n", conn,
1634 libcfs_nid2str(conn->gnc_peer->gnp_nid),
1635 atomic_read(&conn->gnc_refcount));
1636 kgnilnd_conn_addref(conn);
1637 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1641 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1648 kgnilnd_get_conn_info(kgn_peer_t *peer,
1649 int *device_id, __u64 *peerstamp,
1650 int *tx_seq, int *rx_seq,
1651 int *fmaq_len, int *nfma, int *nrdma)
1656 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1658 conn = kgnilnd_find_conn_locked(peer);
1664 *device_id = conn->gnc_device->gnd_host_id;
1665 *peerstamp = conn->gnc_peerstamp;
1666 *tx_seq = atomic_read(&conn->gnc_tx_seq);
1667 *rx_seq = atomic_read(&conn->gnc_rx_seq);
1668 *fmaq_len = kgnilnd_count_list(&conn->gnc_fmaq);
1669 *nfma = atomic_read(&conn->gnc_nlive_fma);
1670 *nrdma = atomic_read(&conn->gnc_nlive_rdma);
1672 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1676 /* needs write_lock on kgn_peer_conn_lock */
1678 kgnilnd_close_peer_conns_locked(kgn_peer_t *peer, int why)
1681 struct list_head *ctmp, *cnxt;
1684 list_for_each_safe(ctmp, cnxt, &peer->gnp_conns) {
1685 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
1687 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
1691 /* we mark gnc_needs closing and increment kgn_npending_conns so that
1692 * kgnilnd_del_conn_or_peer can wait on the other threads closing
1693 * and cleaning up the connection.
1695 if (!conn->gnc_needs_closing) {
1696 conn->gnc_needs_closing = 1;
1697 kgnilnd_admin_addref(kgnilnd_data.kgn_npending_conns);
1699 kgnilnd_close_conn_locked(conn, why);
1705 kgnilnd_report_node_state(lnet_nid_t nid, int down)
1708 kgn_peer_t *peer, *new_peer;
1711 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1712 peer = kgnilnd_find_peer_locked(nid);
1719 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1721 /* Don't add a peer for node up events */
1722 if (down == GNILND_RCA_NODE_UP) {
1726 /* find any valid net - we don't care which one... */
1727 down_read(&kgnilnd_data.kgn_net_rw_sem);
1728 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
1729 list_for_each_entry(net, &kgnilnd_data.kgn_nets[i],
1739 up_read(&kgnilnd_data.kgn_net_rw_sem);
1742 CNETERR("Could not find a net for nid %lld\n", nid);
1746 /* The nid passed in does not yet contain the net portion.
1747 * Let's build it up now
1749 nid = LNET_MKNID(LNET_NIDNET(net->gnn_ni->ni_nid), nid);
1750 rc = kgnilnd_add_peer(net, nid, &new_peer);
1753 CNETERR("Could not add peer for nid %lld, rc %d\n",
1758 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1759 peer = kgnilnd_find_peer_locked(nid);
1762 CNETERR("Could not find peer for nid %lld\n", nid);
1763 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1768 peer->gnp_down = down;
1770 if (down == GNILND_RCA_NODE_DOWN) {
1773 peer->gnp_down_event_time = jiffies;
1774 kgnilnd_cancel_peer_connect_locked(peer, &zombies);
1775 conn = kgnilnd_find_conn_locked(peer);
1778 kgnilnd_close_conn_locked(conn, -ENETRESET);
1781 peer->gnp_up_event_time = jiffies;
1784 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1786 if (down == GNILND_RCA_NODE_DOWN) {
1787 /* using ENETRESET so we don't get messages from
1790 kgnilnd_txlist_done(&zombies, -ENETRESET);
1791 kgnilnd_peer_notify(peer, -ECONNRESET, 0);
1792 LCONSOLE_INFO("Recieved down event for nid %lld\n", nid);
1799 kgnilnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
1801 struct libcfs_ioctl_data *data = arg;
1802 kgn_net_t *net = ni->ni_data;
1805 LASSERT(ni == net->gnn_ni);
1808 case IOC_LIBCFS_GET_PEER: {
1810 kgn_peer_t *peer = NULL;
1812 __u64 peerstamp = 0;
1813 int peer_refcount = 0, peer_connecting = 0;
1815 int tx_seq = 0, rx_seq = 0;
1816 int fmaq_len = 0, nfma = 0, nrdma = 0;
1818 rc = kgnilnd_get_peer_info(data->ioc_count, &peer,
1819 &nid, &nic_addr, &peer_refcount,
1825 /* LNET_MKNID is used to mask from lnet the multiplexing/demultiplexing of connections and peers
1826 * LNET assumes a conn and peer per net, the LNET_MKNID/LNET_NIDADDR allows us to let Lnet see what it
1827 * wants to see instead of the underlying network that is being used to send the data
1829 data->ioc_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), LNET_NIDADDR(nid));
1830 data->ioc_flags = peer_connecting;
1831 data->ioc_count = peer_refcount;
1833 rc = kgnilnd_get_conn_info(peer, &device_id, &peerstamp,
1834 &tx_seq, &rx_seq, &fmaq_len,
1837 /* This is allowable - a persistent peer could not
1838 * have a connection */
1840 /* flag to indicate we are not connected -
1841 * need to print as such */
1842 data->ioc_flags |= (1<<16);
1846 data->ioc_net = device_id;
1847 data->ioc_u64[0] = peerstamp;
1848 data->ioc_u32[0] = fmaq_len;
1849 data->ioc_u32[1] = nfma;
1850 data->ioc_u32[2] = tx_seq;
1851 data->ioc_u32[3] = rx_seq;
1852 data->ioc_u32[4] = nrdma;
1856 case IOC_LIBCFS_ADD_PEER: {
1857 /* just dummy value to allow using common interface */
1859 rc = kgnilnd_add_peer(net, data->ioc_nid, &peer);
1862 case IOC_LIBCFS_DEL_PEER: {
1863 /* NULL is passed in so it affects all peers in existence without regard to network
1864 * as the peer may not exist on the network LNET believes it to be on.
1866 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1867 GNILND_DEL_PEER, -EUCLEAN);
1870 case IOC_LIBCFS_GET_CONN: {
1871 kgn_conn_t *conn = kgnilnd_get_conn_by_idx(data->ioc_count);
1877 /* LNET_MKNID is used to build the correct address based on what LNET wants to see instead of
1878 * the generic connection that is used to send the data
1880 data->ioc_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), LNET_NIDADDR(conn->gnc_peer->gnp_nid));
1881 data->ioc_u32[0] = conn->gnc_device->gnd_id;
1882 kgnilnd_conn_decref(conn);
1886 case IOC_LIBCFS_CLOSE_CONNECTION: {
1887 /* use error = -ENETRESET to indicate it was lctl disconnect */
1888 /* NULL is passed in so it affects all the nets as the connection is virtual
1889 * and may not exist on the network LNET believes it to be on.
1891 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1892 GNILND_DEL_CONN, -ENETRESET);
1895 case IOC_LIBCFS_PUSH_CONNECTION: {
1896 /* we use this to flush purgatory */
1897 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1898 GNILND_CLEAR_PURGATORY, -EUCLEAN);
1901 case IOC_LIBCFS_REGISTER_MYNID: {
1902 /* Ignore if this is a noop */
1903 if (data->ioc_nid == ni->ni_nid) {
1906 CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
1907 libcfs_nid2str(data->ioc_nid),
1908 libcfs_nid2str(ni->ni_nid));
1919 kgnilnd_query(lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
1921 kgn_net_t *net = ni->ni_data;
1923 kgn_peer_t *peer = NULL;
1924 kgn_conn_t *conn = NULL;
1925 lnet_process_id_t id = {
1927 .pid = LNET_PID_LUSTRE,
1931 /* I expect to find him, so only take a read lock */
1932 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1933 peer = kgnilnd_find_peer_locked(nid);
1935 /* LIE if in a quiesce - we will update the timeouts after,
1936 * but we don't want sends failing during it */
1937 if (kgnilnd_data.kgn_quiesce_trigger) {
1939 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1943 /* Update to best guess, might refine on later checks */
1944 *when = peer->gnp_last_alive;
1946 /* we have a peer, how about a conn? */
1947 conn = kgnilnd_find_conn_locked(peer);
1950 /* if there is no conn, check peer last errno to see if clean disconnect
1951 * - if it was, we lie to LNet because we believe a TX would complete
1953 if (kgnilnd_conn_clean_errno(peer->gnp_last_errno)) {
1956 /* we still want to fire a TX and new conn in this case */
1958 /* gnp_last_alive is valid, run for the hills */
1959 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1963 /* if we get here, either we have no peer or no conn for him, so fire off
1964 * new TX to trigger conn setup */
1965 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1967 /* if we couldn't find him, we'll fire up a TX and get connected -
1968 * if we don't do this, after ni_peer_timeout, LNet will declare him dead.
1969 * So really we treat kgnilnd_query as a bit of a 'connect now' type
1970 * event because it'll only do this when it wants to send
1972 * Use a real TX for this to get the proper gnp_tx_queue behavior, etc
1973 * normally we'd use kgnilnd_send_ctlmsg for this, but we don't really
1974 * care that this goes out quickly since we already know we need a new conn
1976 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_NOOP_SEND))
1979 tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP, ni->ni_nid);
1981 kgnilnd_launch_tx(tx, net, &id);
1984 CDEBUG(D_NETTRACE, "peer 0x%p->%s when %lu\n", peer,
1985 libcfs_nid2str(nid), *when);
1990 kgnilnd_dev_init(kgn_device_t *dev)
1994 unsigned int cq_size;
1997 /* size of these CQs should be able to accommodate the outgoing
1998 * RDMA and SMSG transactions. Since we really don't know what we
1999 * really need here, we'll take credits * 2 * 3 to allow a bunch.
2000 * We need to dig into this more with the performance work. */
2001 cq_size = *kgnilnd_tunables.kgn_credits * 2 * 3;
2003 rrc = kgnilnd_cdm_create(dev->gnd_id, *kgnilnd_tunables.kgn_ptag,
2004 *kgnilnd_tunables.kgn_pkey, 0,
2006 if (rrc != GNI_RC_SUCCESS) {
2007 CERROR("Can't create CDM %d (%d)\n", dev->gnd_id, rrc);
2008 GOTO(failed, rc = -ENODEV);
2011 rrc = kgnilnd_cdm_attach(dev->gnd_domain, dev->gnd_id,
2012 &dev->gnd_host_id, &dev->gnd_handle);
2013 if (rrc != GNI_RC_SUCCESS) {
2014 CERROR("Can't attach CDM to device %d (%d)\n",
2016 GOTO(failed, rc = -ENODEV);
2019 /* a bit gross, but not much we can do - Aries Sim doesn't have
2020 * hardcoded NIC/NID that we can use */
2021 rc = kgnilnd_setup_nic_translation(dev->gnd_host_id);
2023 GOTO(failed, rc = -ENODEV);
2025 /* only dev 0 gets the errors - no need to reset the stack twice
2026 * - this works because we have a single PTAG, if we had more
2027 * then we'd need to have multiple handlers */
2028 if (dev->gnd_id == 0) {
2029 rrc = kgnilnd_subscribe_errors(dev->gnd_handle,
2030 GNI_ERRMASK_CRITICAL |
2031 GNI_ERRMASK_UNKNOWN_TRANSACTION,
2032 0, NULL, kgnilnd_critical_error,
2033 &dev->gnd_err_handle);
2034 if (rrc != GNI_RC_SUCCESS) {
2035 CERROR("Can't subscribe for errors on device %d: rc %d\n",
2037 GOTO(failed, rc = -ENODEV);
2040 rc = kgnilnd_set_quiesce_callback(dev->gnd_handle,
2041 kgnilnd_quiesce_end_callback);
2042 if (rc != GNI_RC_SUCCESS) {
2043 CERROR("Can't subscribe for quiesce callback on device %d: rc %d\n",
2045 GOTO(failed, rc = -ENODEV);
2049 rrc = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_IP, &kgnilnd_data.kgn_sock);
2051 CERROR("sock_create returned %d\n", rrc);
2055 rc = kgnilnd_nicaddr_to_nid(dev->gnd_host_id, &dev->gnd_nid);
2057 /* log messages during startup */
2058 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
2059 CERROR("couldn't translate host_id 0x%x to nid. rc %d\n",
2060 dev->gnd_host_id, rc);
2062 GOTO(failed, rc = -ESRCH);
2064 CDEBUG(D_NET, "NIC %x -> NID %d\n", dev->gnd_host_id, dev->gnd_nid);
2066 rrc = kgnilnd_cq_create(dev->gnd_handle, *kgnilnd_tunables.kgn_credits,
2067 0, kgnilnd_device_callback,
2068 dev->gnd_id, &dev->gnd_snd_rdma_cqh);
2069 if (rrc != GNI_RC_SUCCESS) {
2070 CERROR("Can't create rdma send cq size %u for device "
2071 "%d (%d)\n", cq_size, dev->gnd_id, rrc);
2072 GOTO(failed, rc = -EINVAL);
2075 rrc = kgnilnd_cq_create(dev->gnd_handle, cq_size,
2076 0, kgnilnd_device_callback, dev->gnd_id,
2077 &dev->gnd_snd_fma_cqh);
2078 if (rrc != GNI_RC_SUCCESS) {
2079 CERROR("Can't create fma send cq size %u for device %d (%d)\n",
2080 cq_size, dev->gnd_id, rrc);
2081 GOTO(failed, rc = -EINVAL);
2084 /* This one we size differently - overflows are possible and it needs to be
2085 * sized based on machine size */
2086 rrc = kgnilnd_cq_create(dev->gnd_handle,
2087 *kgnilnd_tunables.kgn_fma_cq_size,
2088 0, kgnilnd_device_callback, dev->gnd_id,
2089 &dev->gnd_rcv_fma_cqh);
2090 if (rrc != GNI_RC_SUCCESS) {
2091 CERROR("Can't create fma cq size %d for device %d (%d)\n",
2092 *kgnilnd_tunables.kgn_fma_cq_size, dev->gnd_id, rrc);
2093 GOTO(failed, rc = -EINVAL);
2099 kgnilnd_dev_fini(dev);
2104 kgnilnd_dev_fini(kgn_device_t *dev)
2109 /* At quiesce or rest time, need to loop through and clear gnd_ready_conns ?*/
2110 LASSERTF(list_empty(&dev->gnd_ready_conns) &&
2111 list_empty(&dev->gnd_map_tx) &&
2112 list_empty(&dev->gnd_rdmaq),
2113 "dev 0x%p ready_conns %d@0x%p map_tx %d@0x%p rdmaq %d@0x%p\n",
2114 dev, kgnilnd_count_list(&dev->gnd_ready_conns), &dev->gnd_ready_conns,
2115 kgnilnd_count_list(&dev->gnd_map_tx), &dev->gnd_map_tx,
2116 kgnilnd_count_list(&dev->gnd_rdmaq), &dev->gnd_rdmaq);
2118 /* These should follow from tearing down all connections */
2119 LASSERTF(dev->gnd_map_nphys == 0 && dev->gnd_map_physnop == 0,
2120 "%d physical mappings of %d pages still mapped\n",
2121 dev->gnd_map_nphys, dev->gnd_map_physnop);
2123 LASSERTF(dev->gnd_map_nvirt == 0 && dev->gnd_map_virtnob == 0,
2124 "%d virtual mappings of "LPU64" bytes still mapped\n",
2125 dev->gnd_map_nvirt, dev->gnd_map_virtnob);
2127 LASSERTF(atomic_read(&dev->gnd_n_mdd) == 0 &&
2128 atomic_read(&dev->gnd_n_mdd_held) == 0 &&
2129 atomic64_read(&dev->gnd_nbytes_map) == 0,
2130 "%d SMSG mappings of %ld bytes still mapped or held %d\n",
2131 atomic_read(&dev->gnd_n_mdd),
2132 atomic64_read(&dev->gnd_nbytes_map), atomic_read(&dev->gnd_n_mdd_held));
2134 LASSERT(list_empty(&dev->gnd_map_list));
2136 /* What other assertions needed to ensure all connections torn down ? */
2138 /* check all counters == 0 (EP, MDD, etc) */
2140 /* if we are resetting due to quiese (stack reset), don't check
2142 LASSERTF(kgnilnd_data.kgn_quiesce_trigger ||
2143 atomic_read(&kgnilnd_data.kgn_nthreads) == 0,
2144 "tried to shutdown with threads active\n");
2146 if (dev->gnd_rcv_fma_cqh) {
2147 rrc = kgnilnd_cq_destroy(dev->gnd_rcv_fma_cqh);
2148 LASSERTF(rrc == GNI_RC_SUCCESS,
2149 "bad rc from gni_cq_destroy on rcv_fma_cqh: %d\n", rrc);
2150 dev->gnd_rcv_fma_cqh = NULL;
2153 if (dev->gnd_snd_rdma_cqh) {
2154 rrc = kgnilnd_cq_destroy(dev->gnd_snd_rdma_cqh);
2155 LASSERTF(rrc == GNI_RC_SUCCESS,
2156 "bad rc from gni_cq_destroy on send_rdma_cqh: %d\n", rrc);
2157 dev->gnd_snd_rdma_cqh = NULL;
2160 if (dev->gnd_snd_fma_cqh) {
2161 rrc = kgnilnd_cq_destroy(dev->gnd_snd_fma_cqh);
2162 LASSERTF(rrc == GNI_RC_SUCCESS,
2163 "bad rc from gni_cq_destroy on snd_fma_cqh: %d\n", rrc);
2164 dev->gnd_snd_fma_cqh = NULL;
2167 if (dev->gnd_err_handle) {
2168 rrc = kgnilnd_release_errors(dev->gnd_err_handle);
2169 LASSERTF(rrc == GNI_RC_SUCCESS,
2170 "bad rc from gni_release_errors: %d\n", rrc);
2171 dev->gnd_err_handle = NULL;
2174 if (dev->gnd_domain) {
2175 rrc = kgnilnd_cdm_destroy(dev->gnd_domain);
2176 LASSERTF(rrc == GNI_RC_SUCCESS,
2177 "bad rc from gni_cdm_destroy: %d\n", rrc);
2178 dev->gnd_domain = NULL;
2181 if (kgnilnd_data.kgn_sock)
2182 sock_release(kgnilnd_data.kgn_sock);
2187 int kgnilnd_base_startup(void)
2190 int pkmem = atomic_read(&libcfs_kmemory);
2194 struct task_struct *thrd;
2196 #if defined(CONFIG_CRAY_XT) && !defined(CONFIG_CRAY_COMPUTE)
2197 /* limit how much memory can be allocated for fma blocks in
2198 * instances where many nodes need to reconnects at the same time */
2201 kgnilnd_data.free_pages_limit = si.totalram/4;
2206 LASSERTF(kgnilnd_data.kgn_init == GNILND_INIT_NOTHING,
2207 "init %d\n", kgnilnd_data.kgn_init);
2209 /* zero pointers, flags etc */
2210 memset(&kgnilnd_data, 0, sizeof(kgnilnd_data));
2211 kgnilnd_check_kgni_version();
2213 /* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and
2214 * a unique (for all time) connstamp so we can uniquely identify
2215 * the sender. The connstamp is an incrementing counter
2216 * initialised with seconds + microseconds at startup time. So we
2217 * rely on NOT creating connections more frequently on average than
2218 * 1MHz to ensure we don't use old connstamps when we reboot. */
2219 do_gettimeofday(&tv);
2220 kgnilnd_data.kgn_connstamp =
2221 kgnilnd_data.kgn_peerstamp =
2222 (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
2224 init_rwsem(&kgnilnd_data.kgn_net_rw_sem);
2226 for (i = 0; i < GNILND_MAXDEVS; i++) {
2227 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2230 INIT_LIST_HEAD(&dev->gnd_ready_conns);
2231 INIT_LIST_HEAD(&dev->gnd_map_tx);
2232 INIT_LIST_HEAD(&dev->gnd_fma_buffs);
2233 mutex_init(&dev->gnd_cq_mutex);
2234 mutex_init(&dev->gnd_fmablk_mutex);
2235 spin_lock_init(&dev->gnd_fmablk_lock);
2236 init_waitqueue_head(&dev->gnd_waitq);
2237 init_waitqueue_head(&dev->gnd_dgram_waitq);
2238 init_waitqueue_head(&dev->gnd_dgping_waitq);
2239 spin_lock_init(&dev->gnd_lock);
2240 INIT_LIST_HEAD(&dev->gnd_map_list);
2241 spin_lock_init(&dev->gnd_map_lock);
2242 atomic_set(&dev->gnd_nfmablk, 0);
2243 atomic_set(&dev->gnd_fmablk_vers, 1);
2244 atomic_set(&dev->gnd_neps, 0);
2245 atomic_set(&dev->gnd_canceled_dgrams, 0);
2246 INIT_LIST_HEAD(&dev->gnd_connd_peers);
2247 spin_lock_init(&dev->gnd_connd_lock);
2248 spin_lock_init(&dev->gnd_dgram_lock);
2249 spin_lock_init(&dev->gnd_rdmaq_lock);
2250 INIT_LIST_HEAD(&dev->gnd_rdmaq);
2251 init_rwsem(&dev->gnd_conn_sem);
2253 /* alloc & setup nid based dgram table */
2254 LIBCFS_ALLOC(dev->gnd_dgrams,
2255 sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2257 if (dev->gnd_dgrams == NULL)
2258 GOTO(failed, rc = -ENOMEM);
2260 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2261 INIT_LIST_HEAD(&dev->gnd_dgrams[i]);
2263 atomic_set(&dev->gnd_ndgrams, 0);
2264 atomic_set(&dev->gnd_nwcdgrams, 0);
2265 /* setup timer for RDMAQ processing */
2266 setup_timer(&dev->gnd_rdmaq_timer, kgnilnd_schedule_device_timer,
2267 (unsigned long)dev);
2269 /* setup timer for mapping processing */
2270 setup_timer(&dev->gnd_map_timer, kgnilnd_schedule_device_timer,
2271 (unsigned long)dev);
2275 /* CQID 0 isn't allowed, set to MAX_MSG_ID - 1 to check for conflicts early */
2276 kgnilnd_data.kgn_next_cqid = GNILND_MAX_MSG_ID - 1;
2277 kgnilnd_data.kgn_new_min_timeout = *kgnilnd_tunables.kgn_timeout;
2278 init_waitqueue_head(&kgnilnd_data.kgn_reaper_waitq);
2279 init_waitqueue_head(&kgnilnd_data.kgn_ruhroh_waitq);
2280 spin_lock_init(&kgnilnd_data.kgn_reaper_lock);
2282 mutex_init(&kgnilnd_data.kgn_quiesce_mutex);
2283 atomic_set(&kgnilnd_data.kgn_nquiesce, 0);
2284 atomic_set(&kgnilnd_data.kgn_npending_conns, 0);
2285 atomic_set(&kgnilnd_data.kgn_npending_unlink, 0);
2286 atomic_set(&kgnilnd_data.kgn_npending_detach, 0);
2287 atomic_set(&kgnilnd_data.kgn_rev_offset, 0);
2288 atomic_set(&kgnilnd_data.kgn_rev_length, 0);
2289 atomic_set(&kgnilnd_data.kgn_rev_copy_buff, 0);
2291 /* OK to call kgnilnd_api_shutdown() to cleanup now */
2292 kgnilnd_data.kgn_init = GNILND_INIT_DATA;
2293 try_module_get(THIS_MODULE);
2295 rwlock_init(&kgnilnd_data.kgn_peer_conn_lock);
2297 LIBCFS_ALLOC(kgnilnd_data.kgn_peers,
2298 sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2300 if (kgnilnd_data.kgn_peers == NULL)
2301 GOTO(failed, rc = -ENOMEM);
2303 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2304 INIT_LIST_HEAD(&kgnilnd_data.kgn_peers[i]);
2307 LIBCFS_ALLOC(kgnilnd_data.kgn_conns,
2308 sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2310 if (kgnilnd_data.kgn_conns == NULL)
2311 GOTO(failed, rc = -ENOMEM);
2313 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2314 INIT_LIST_HEAD(&kgnilnd_data.kgn_conns[i]);
2317 LIBCFS_ALLOC(kgnilnd_data.kgn_nets,
2318 sizeof(struct list_head) * *kgnilnd_tunables.kgn_net_hash_size);
2320 if (kgnilnd_data.kgn_nets == NULL)
2321 GOTO(failed, rc = -ENOMEM);
2323 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
2324 INIT_LIST_HEAD(&kgnilnd_data.kgn_nets[i]);
2327 kgnilnd_data.kgn_mbox_cache =
2328 kmem_cache_create("kgn_mbox_block", KMALLOC_MAX_SIZE, 0,
2329 SLAB_HWCACHE_ALIGN, NULL);
2330 if (kgnilnd_data.kgn_mbox_cache == NULL) {
2331 CERROR("Can't create slab for physical mbox blocks\n");
2332 GOTO(failed, rc = -ENOMEM);
2335 kgnilnd_data.kgn_rx_cache =
2336 kmem_cache_create("kgn_rx_t", sizeof(kgn_rx_t), 0, 0, NULL);
2337 if (kgnilnd_data.kgn_rx_cache == NULL) {
2338 CERROR("Can't create slab for kgn_rx_t descriptors\n");
2339 GOTO(failed, rc = -ENOMEM);
2342 kgnilnd_data.kgn_tx_cache =
2343 kmem_cache_create("kgn_tx_t", sizeof(kgn_tx_t), 0, 0, NULL);
2344 if (kgnilnd_data.kgn_tx_cache == NULL) {
2345 CERROR("Can't create slab for kgn_tx_t\n");
2346 GOTO(failed, rc = -ENOMEM);
2349 kgnilnd_data.kgn_tx_phys_cache =
2350 kmem_cache_create("kgn_tx_phys",
2351 LNET_MAX_IOV * sizeof(gni_mem_segment_t),
2353 if (kgnilnd_data.kgn_tx_phys_cache == NULL) {
2354 CERROR("Can't create slab for kgn_tx_phys\n");
2355 GOTO(failed, rc = -ENOMEM);
2358 kgnilnd_data.kgn_dgram_cache =
2359 kmem_cache_create("kgn_dgram_t", sizeof(kgn_dgram_t), 0, 0, NULL);
2360 if (kgnilnd_data.kgn_dgram_cache == NULL) {
2361 CERROR("Can't create slab for outgoing datagrams\n");
2362 GOTO(failed, rc = -ENOMEM);
2365 /* allocate a MAX_IOV array of page pointers for each cpu */
2366 kgnilnd_data.kgn_cksum_map_pages = kmalloc(num_possible_cpus() * sizeof (struct page *),
2368 if (kgnilnd_data.kgn_cksum_map_pages == NULL) {
2369 CERROR("Can't allocate vmap cksum pages\n");
2370 GOTO(failed, rc = -ENOMEM);
2372 kgnilnd_data.kgn_cksum_npages = num_possible_cpus();
2373 memset(kgnilnd_data.kgn_cksum_map_pages, 0,
2374 kgnilnd_data.kgn_cksum_npages * sizeof (struct page *));
2376 for (i = 0; i < kgnilnd_data.kgn_cksum_npages; i++) {
2377 kgnilnd_data.kgn_cksum_map_pages[i] = kmalloc(LNET_MAX_IOV * sizeof (struct page *),
2379 if (kgnilnd_data.kgn_cksum_map_pages[i] == NULL) {
2380 CERROR("Can't allocate vmap cksum pages for cpu %d\n", i);
2381 GOTO(failed, rc = -ENOMEM);
2385 LASSERT(kgnilnd_data.kgn_ndevs == 0);
2387 /* Use all available GNI devices */
2388 for (i = 0; i < GNILND_MAXDEVS; i++) {
2389 dev = &kgnilnd_data.kgn_devices[kgnilnd_data.kgn_ndevs];
2391 rc = kgnilnd_dev_init(dev);
2393 /* Increment here so base_shutdown cleans it up */
2394 kgnilnd_data.kgn_ndevs++;
2396 rc = kgnilnd_allocate_phys_fmablk(dev);
2402 if (kgnilnd_data.kgn_ndevs == 0) {
2403 CERROR("Can't initialise any GNI devices\n");
2404 GOTO(failed, rc = -ENODEV);
2407 rc = kgnilnd_thread_start(kgnilnd_reaper, NULL, "kgnilnd_rpr", 0);
2409 CERROR("Can't spawn gnilnd reaper: %d\n", rc);
2413 rc = kgnilnd_start_rca_thread();
2415 CERROR("Can't spawn gnilnd rca: %d\n", rc);
2420 * Start ruhroh thread. We can't use kgnilnd_thread_start() because
2421 * we don't want this thread included in kgnilnd_data.kgn_nthreads
2422 * count. This thread controls quiesce, so it mustn't
2425 thrd = kthread_run(kgnilnd_ruhroh_thread, NULL, "%s_%02d", "kgnilnd_rr", 0);
2428 CERROR("Can't spawn gnilnd ruhroh thread: %d\n", rc);
2432 /* threads will load balance across devs as they are available */
2433 if (*kgnilnd_tunables.kgn_thread_affinity) {
2434 rc = kgnilnd_start_sd_threads();
2438 for (i = 0; i < *kgnilnd_tunables.kgn_sched_threads; i++) {
2439 rc = kgnilnd_thread_start(kgnilnd_scheduler,
2443 CERROR("Can't spawn gnilnd scheduler[%d]: %d\n",
2450 for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2451 dev = &kgnilnd_data.kgn_devices[i];
2452 rc = kgnilnd_thread_start(kgnilnd_dgram_mover, dev,
2453 "kgnilnd_dg", dev->gnd_id);
2455 CERROR("Can't spawn gnilnd dgram_mover[%d]: %d\n",
2460 rc = kgnilnd_thread_start(kgnilnd_dgram_waitq, dev,
2461 "kgnilnd_dgn", dev->gnd_id);
2463 CERROR("Can't spawn gnilnd dgram_waitq[%d]: %d\n",
2468 rc = kgnilnd_setup_wildcard_dgram(dev);
2471 CERROR("Can't create wildcard dgrams[%d]: %d\n",
2477 /* flag everything initialised */
2478 kgnilnd_data.kgn_init = GNILND_INIT_ALL;
2479 /*****************************************************/
2481 CDEBUG(D_MALLOC, "initial kmem %d\n", pkmem);
2485 kgnilnd_base_shutdown();
2486 kgnilnd_data.kgn_init = GNILND_INIT_NOTHING;
2491 kgnilnd_base_shutdown(void)
2496 while (CFS_FAIL_TIMEOUT(CFS_FAIL_GNI_PAUSE_SHUTDOWN, 1)) {};
2498 kgnilnd_data.kgn_wc_kill = 1;
2500 for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2501 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2502 kgnilnd_cancel_wc_dgrams(dev);
2503 kgnilnd_cancel_dgrams(dev);
2504 kgnilnd_del_conn_or_peer(NULL, LNET_NID_ANY, GNILND_DEL_PEER, -ESHUTDOWN);
2505 kgnilnd_wait_for_canceled_dgrams(dev);
2508 /* We need to verify there are no conns left before we let the threads
2509 * shut down otherwise we could clean up the peers but still have
2510 * some outstanding conns due to orphaned datagram conns that are
2514 while (atomic_read(&kgnilnd_data.kgn_nconns) != 0) {
2517 for(j = 0; j < kgnilnd_data.kgn_ndevs; ++j) {
2518 kgn_device_t *dev = &kgnilnd_data.kgn_devices[j];
2519 kgnilnd_schedule_device(dev);
2522 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2523 "Waiting for conns to be cleaned up %d\n",atomic_read(&kgnilnd_data.kgn_nconns));
2524 set_current_state(TASK_UNINTERRUPTIBLE);
2525 schedule_timeout(cfs_time_seconds(1));
2527 /* Peer state all cleaned up BEFORE setting shutdown, so threads don't
2528 * have to worry about shutdown races. NB connections may be created
2529 * while there are still active connds, but these will be temporary
2530 * since peer creation always fails after the listener has started to
2532 * all peers should have been cleared out on the nets */
2533 LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
2534 "peers left %d\n", atomic_read(&kgnilnd_data.kgn_npeers));
2536 /* Wait for the ruhroh thread to shut down. */
2537 kgnilnd_data.kgn_ruhroh_shutdown = 1;
2538 wake_up(&kgnilnd_data.kgn_ruhroh_waitq);
2540 while (kgnilnd_data.kgn_ruhroh_running != 0) {
2542 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2543 "Waiting for ruhroh thread to terminate\n");
2544 set_current_state(TASK_UNINTERRUPTIBLE);
2545 schedule_timeout(cfs_time_seconds(1));
2548 /* Flag threads to terminate */
2549 kgnilnd_data.kgn_shutdown = 1;
2551 for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2552 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2554 /* should clear all the MDDs */
2555 kgnilnd_unmap_fma_blocks(dev);
2557 kgnilnd_schedule_device(dev);
2558 wake_up_all(&dev->gnd_dgram_waitq);
2559 wake_up_all(&dev->gnd_dgping_waitq);
2560 LASSERT(list_empty(&dev->gnd_connd_peers));
2563 spin_lock(&kgnilnd_data.kgn_reaper_lock);
2564 wake_up_all(&kgnilnd_data.kgn_reaper_waitq);
2565 spin_unlock(&kgnilnd_data.kgn_reaper_lock);
2567 if (atomic_read(&kgnilnd_data.kgn_nthreads))
2568 kgnilnd_wakeup_rca_thread();
2570 /* Wait for threads to exit */
2572 while (atomic_read(&kgnilnd_data.kgn_nthreads) != 0) {
2574 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2575 "Waiting for %d threads to terminate\n",
2576 atomic_read(&kgnilnd_data.kgn_nthreads));
2577 set_current_state(TASK_UNINTERRUPTIBLE);
2578 schedule_timeout(cfs_time_seconds(1));
2581 LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
2582 "peers left %d\n", atomic_read(&kgnilnd_data.kgn_npeers));
2584 if (kgnilnd_data.kgn_peers != NULL) {
2585 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2586 LASSERT(list_empty(&kgnilnd_data.kgn_peers[i]));
2588 LIBCFS_FREE(kgnilnd_data.kgn_peers,
2589 sizeof (struct list_head) *
2590 *kgnilnd_tunables.kgn_peer_hash_size);
2593 down_write(&kgnilnd_data.kgn_net_rw_sem);
2594 if (kgnilnd_data.kgn_nets != NULL) {
2595 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++)
2596 LASSERT(list_empty(&kgnilnd_data.kgn_nets[i]));
2598 LIBCFS_FREE(kgnilnd_data.kgn_nets,
2599 sizeof (struct list_head) *
2600 *kgnilnd_tunables.kgn_net_hash_size);
2602 up_write(&kgnilnd_data.kgn_net_rw_sem);
2604 LASSERTF(atomic_read(&kgnilnd_data.kgn_nconns) == 0,
2605 "conns left %d\n", atomic_read(&kgnilnd_data.kgn_nconns));
2607 if (kgnilnd_data.kgn_conns != NULL) {
2608 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2609 LASSERT(list_empty(&kgnilnd_data.kgn_conns[i]));
2611 LIBCFS_FREE(kgnilnd_data.kgn_conns,
2612 sizeof (struct list_head) *
2613 *kgnilnd_tunables.kgn_peer_hash_size);
2616 for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2617 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2618 kgnilnd_dev_fini(dev);
2620 LASSERTF(atomic_read(&dev->gnd_ndgrams) == 0,
2621 "dgrams left %d\n", atomic_read(&dev->gnd_ndgrams));
2623 if (dev->gnd_dgrams != NULL) {
2624 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2625 LASSERT(list_empty(&dev->gnd_dgrams[i]));
2627 LIBCFS_FREE(dev->gnd_dgrams,
2628 sizeof (struct list_head) *
2629 *kgnilnd_tunables.kgn_peer_hash_size);
2632 kgnilnd_free_phys_fmablk(dev);
2635 if (kgnilnd_data.kgn_mbox_cache != NULL)
2636 kmem_cache_destroy(kgnilnd_data.kgn_mbox_cache);
2638 if (kgnilnd_data.kgn_rx_cache != NULL)
2639 kmem_cache_destroy(kgnilnd_data.kgn_rx_cache);
2641 if (kgnilnd_data.kgn_tx_cache != NULL)
2642 kmem_cache_destroy(kgnilnd_data.kgn_tx_cache);
2644 if (kgnilnd_data.kgn_tx_phys_cache != NULL)
2645 kmem_cache_destroy(kgnilnd_data.kgn_tx_phys_cache);
2647 if (kgnilnd_data.kgn_dgram_cache != NULL)
2648 kmem_cache_destroy(kgnilnd_data.kgn_dgram_cache);
2650 if (kgnilnd_data.kgn_cksum_map_pages != NULL) {
2651 for (i = 0; i < kgnilnd_data.kgn_cksum_npages; i++) {
2652 if (kgnilnd_data.kgn_cksum_map_pages[i] != NULL) {
2653 kfree(kgnilnd_data.kgn_cksum_map_pages[i]);
2656 kfree(kgnilnd_data.kgn_cksum_map_pages);
2659 CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2660 atomic_read(&libcfs_kmemory));
2662 kgnilnd_data.kgn_init = GNILND_INIT_NOTHING;
2663 module_put(THIS_MODULE);
2669 kgnilnd_startup(lnet_ni_t *ni)
2675 LASSERTF(ni->ni_lnd == &the_kgnilnd,
2676 "bad LND 0x%p != the_kgnilnd @ 0x%p\n",
2677 ni->ni_lnd, &the_kgnilnd);
2679 if (kgnilnd_data.kgn_init == GNILND_INIT_NOTHING) {
2680 rc = kgnilnd_base_startup();
2685 /* Serialize with shutdown. */
2686 mutex_lock(&kgnilnd_data.kgn_quiesce_mutex);
2688 LIBCFS_ALLOC(net, sizeof(*net));
2690 CERROR("could not allocate net for new interface instance\n");
2691 /* no need to cleanup the CDM... */
2692 GOTO(failed, rc = -ENOMEM);
2694 INIT_LIST_HEAD(&net->gnn_list);
2697 ni->ni_maxtxcredits = *kgnilnd_tunables.kgn_credits;
2698 ni->ni_peertxcredits = *kgnilnd_tunables.kgn_peer_credits;
2700 if (*kgnilnd_tunables.kgn_peer_health) {
2703 /* give this a bit of leeway - we don't have a hard timeout
2704 * as we only check timeouts periodically - see comment in kgnilnd_reaper */
2705 fudge = (GNILND_TO2KA(*kgnilnd_tunables.kgn_timeout) / GNILND_REAPER_NCHECKS);
2706 timeout = *kgnilnd_tunables.kgn_timeout + fudge;
2708 if (*kgnilnd_tunables.kgn_peer_timeout >= timeout)
2709 ni->ni_peertimeout = *kgnilnd_tunables.kgn_peer_timeout;
2710 else if (*kgnilnd_tunables.kgn_peer_timeout > -1) {
2711 LCONSOLE_ERROR("Peer_timeout is set to %d but needs to be >= %d\n",
2712 *kgnilnd_tunables.kgn_peer_timeout,
2715 LIBCFS_FREE(net, sizeof(*net));
2716 GOTO(failed, rc = -EINVAL);
2718 ni->ni_peertimeout = timeout;
2720 LCONSOLE_INFO("Enabling LNet peer health for gnilnd, timeout %ds\n",
2721 ni->ni_peertimeout);
2724 atomic_set(&net->gnn_refcount, 1);
2726 /* if we have multiple devices, spread the nets around */
2727 net->gnn_netnum = LNET_NETNUM(LNET_NIDNET(ni->ni_nid));
2729 devno = LNET_NIDNET(ni->ni_nid) % GNILND_MAXDEVS;
2730 net->gnn_dev = &kgnilnd_data.kgn_devices[devno];
2732 /* allocate a 'dummy' cdm for datagram use. We can only have a single
2733 * datagram between a nid:inst_id and nid2:inst_id. The fake cdm
2734 * give us additional inst_id to use, allowing the datagrams to flow
2735 * like rivers of honey and beer */
2737 /* the instance id for the cdm is the NETNUM offset by MAXDEVS -
2738 * ensuring we'll have a unique id */
2741 ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), net->gnn_dev->gnd_nid);
2742 CDEBUG(D_NET, "adding net %p nid=%s on dev %d \n",
2743 net, libcfs_nid2str(ni->ni_nid), net->gnn_dev->gnd_id);
2744 /* until the gnn_list is set, we need to cleanup ourselves as
2745 * kgnilnd_shutdown is just gonna get confused */
2747 down_write(&kgnilnd_data.kgn_net_rw_sem);
2748 list_add_tail(&net->gnn_list, kgnilnd_netnum2netlist(net->gnn_netnum));
2749 up_write(&kgnilnd_data.kgn_net_rw_sem);
2751 /* we need a separate thread to call probe_wait_by_id until
2752 * we get a function callback notifier from kgni */
2753 mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2756 mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2757 kgnilnd_shutdown(ni);
2762 kgnilnd_shutdown(lnet_ni_t *ni)
2764 kgn_net_t *net = ni->ni_data;
2769 CFS_RACE(CFS_FAIL_GNI_SR_DOWN_RACE);
2771 LASSERTF(kgnilnd_data.kgn_init == GNILND_INIT_ALL,
2772 "init %d\n", kgnilnd_data.kgn_init);
2774 /* Serialize with startup. */
2775 mutex_lock(&kgnilnd_data.kgn_quiesce_mutex);
2776 CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
2777 atomic_read(&libcfs_kmemory));
2780 CERROR("got NULL net for ni %p\n", ni);
2781 GOTO(out, rc = -EINVAL);
2784 LASSERTF(ni == net->gnn_ni,
2785 "ni %p gnn_ni %p\n", net, net->gnn_ni);
2789 LASSERT(!net->gnn_shutdown);
2790 LASSERTF(atomic_read(&net->gnn_refcount) != 0,
2791 "net %p refcount %d\n",
2792 net, atomic_read(&net->gnn_refcount));
2794 if (!list_empty(&net->gnn_list)) {
2795 /* serialize with peer creation */
2796 down_write(&kgnilnd_data.kgn_net_rw_sem);
2797 net->gnn_shutdown = 1;
2798 up_write(&kgnilnd_data.kgn_net_rw_sem);
2800 kgnilnd_cancel_net_dgrams(net);
2802 kgnilnd_del_conn_or_peer(net, LNET_NID_ANY, GNILND_DEL_PEER, -ESHUTDOWN);
2804 /* if we are quiesced, need to wake up - we need those threads
2805 * alive to release peers, etc */
2806 if (GNILND_IS_QUIESCED) {
2807 set_mb(kgnilnd_data.kgn_quiesce_trigger, GNILND_QUIESCE_IDLE);
2808 kgnilnd_quiesce_wait("shutdown");
2811 kgnilnd_wait_for_canceled_dgrams(net->gnn_dev);
2813 /* We wait until the nets ref's are 1, we will release final ref which is ours
2814 * this allows us to make sure everything else is done before we free the
2818 while (atomic_read(&net->gnn_refcount) != 1) {
2820 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2821 "Waiting for %d references to clear on net %d\n",
2822 atomic_read(&net->gnn_refcount),
2824 set_current_state(TASK_UNINTERRUPTIBLE);
2825 schedule_timeout(cfs_time_seconds(1));
2828 /* release ref from kgnilnd_startup */
2829 kgnilnd_net_decref(net);
2830 /* serialize with reaper and conn_task looping */
2831 down_write(&kgnilnd_data.kgn_net_rw_sem);
2832 list_del_init(&net->gnn_list);
2833 up_write(&kgnilnd_data.kgn_net_rw_sem);
2837 /* not locking, this can't race with writers */
2838 LASSERTF(atomic_read(&net->gnn_refcount) == 0,
2839 "net %p refcount %d\n",
2840 net, atomic_read(&net->gnn_refcount));
2841 LIBCFS_FREE(net, sizeof(*net));
2844 down_read(&kgnilnd_data.kgn_net_rw_sem);
2845 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
2846 if (!list_empty(&kgnilnd_data.kgn_nets[i])) {
2847 up_read(&kgnilnd_data.kgn_net_rw_sem);
2851 if (i == *kgnilnd_tunables.kgn_net_hash_size - 1) {
2852 up_read(&kgnilnd_data.kgn_net_rw_sem);
2853 kgnilnd_base_shutdown();
2856 CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2857 atomic_read(&libcfs_kmemory));
2859 mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2864 kgnilnd_module_fini(void)
2866 lnet_unregister_lnd(&the_kgnilnd);
2867 kgnilnd_proc_fini();
2868 kgnilnd_remove_sysctl();
2869 kgnilnd_tunables_fini();
2873 kgnilnd_module_init(void)
2877 rc = kgnilnd_tunables_init();
2881 printk(KERN_INFO "Lustre: kgnilnd build version: "KGNILND_BUILD_REV"\n");
2883 kgnilnd_insert_sysctl();
2884 kgnilnd_proc_init();
2886 lnet_register_lnd(&the_kgnilnd);
2891 MODULE_AUTHOR("Cray, Inc. <nic@cray.com>");
2892 MODULE_DESCRIPTION("Kernel Gemini LND v"KGNILND_BUILD_REV);
2893 MODULE_LICENSE("GPL");
2895 module_init(kgnilnd_module_init);
2896 module_exit(kgnilnd_module_fini);