Whamcloud - gitweb
LU-10391 lnet: change lnet_notify() to take struct lnet_nid
[fs/lustre-release.git] / lnet / klnds / gnilnd / gnilnd.c
1 /*
2  * Copyright (C) 2012 Cray, Inc.
3  *
4  * Copyright (c) 2013, 2017, Intel Corporation.
5  *
6  *   Author: Nic Henke <nic@cray.com>
7  *   Author: James Shimek <jshimek@cray.com>
8  *
9  *   This file is part of Lustre, http://www.lustre.org.
10  *
11  *   Lustre is free software; you can redistribute it and/or
12  *   modify it under the terms of version 2 of the GNU General Public
13  *   License as published by the Free Software Foundation.
14  *
15  *   Lustre is distributed in the hope that it will be useful,
16  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  *   GNU General Public License for more details.
19  *
20  *   You should have received a copy of the GNU General Public License
21  *   along with Lustre; if not, write to the Free Software
22  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23  *
24  */
25 #include "gnilnd.h"
26
27 /* Primary entry points from LNET.  There are no guarantees against reentrance. */
28 const struct lnet_lnd the_kgnilnd = {
29         .lnd_type       = GNILND,
30         .lnd_startup    = kgnilnd_startup,
31         .lnd_shutdown   = kgnilnd_shutdown,
32         .lnd_ctl        = kgnilnd_ctl,
33         .lnd_send       = kgnilnd_send,
34         .lnd_recv       = kgnilnd_recv,
35         .lnd_eager_recv = kgnilnd_eager_recv,
36 };
37
38 kgn_data_t      kgnilnd_data;
39
40 int
41 kgnilnd_thread_start(int(*fn)(void *arg), void *arg, char *name, int id)
42 {
43         struct task_struct *thrd;
44
45         thrd = kthread_run(fn, arg, "%s_%02d", name, id);
46         if (IS_ERR(thrd))
47                 return PTR_ERR(thrd);
48
49         atomic_inc(&kgnilnd_data.kgn_nthreads);
50         return 0;
51 }
52
53 /* bind scheduler threads to cpus */
54 int
55 kgnilnd_start_sd_threads(void)
56 {
57         int cpu;
58         int i = 0;
59         struct task_struct *task;
60
61         for_each_online_cpu(cpu) {
62                 /* don't bind to cpu 0 - all interrupts are processed here */
63                 if (cpu == 0)
64                         continue;
65
66                 task = kthread_create(kgnilnd_scheduler, (void *)((long)i),
67                                       "%s_%02d", "kgnilnd_sd", i);
68                 if (!IS_ERR(task)) {
69                         kthread_bind(task, cpu);
70                         wake_up_process(task);
71                 } else {
72                         CERROR("Can't spawn gnilnd scheduler[%d] %ld\n", i,
73                                 PTR_ERR(task));
74                         return PTR_ERR(task);
75                 }
76                 atomic_inc(&kgnilnd_data.kgn_nthreads);
77
78                 if (++i >= *kgnilnd_tunables.kgn_sched_threads) {
79                         break;
80                 }
81         }
82
83         return 0;
84 }
85
86 /* needs write_lock on kgn_peer_conn_lock */
87 int
88 kgnilnd_close_stale_conns_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
89 {
90         kgn_conn_t *conn, *cnxt;
91         int                 loopback;
92         int                 count = 0;
93
94         loopback = (peer->gnp_nid ==
95                     lnet_nid_to_nid4(&peer->gnp_net->gnn_ni->ni_nid));
96
97         list_for_each_entry_safe(conn, cnxt, &peer->gnp_conns, gnc_list) {
98                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
99                         continue;
100
101                 if (conn == newconn)
102                         continue;
103
104                 if (conn->gnc_device != newconn->gnc_device)
105                         continue;
106
107                 /* This is a two connection loopback - one talking to the other */
108                 if (loopback &&
109                     newconn->gnc_my_connstamp == conn->gnc_peer_connstamp &&
110                     newconn->gnc_peer_connstamp == conn->gnc_my_connstamp) {
111                         CDEBUG(D_NET, "skipping prune of %p, "
112                                 "loopback and matching stamps"
113                                 " connstamp %llu(%llu)"
114                                 " peerstamp %llu(%llu)\n",
115                                 conn, newconn->gnc_my_connstamp,
116                                 conn->gnc_peer_connstamp,
117                                 newconn->gnc_peer_connstamp,
118                                 conn->gnc_my_connstamp);
119                         continue;
120                 }
121
122                 if (conn->gnc_peerstamp != newconn->gnc_peerstamp) {
123                         LASSERTF(conn->gnc_peerstamp < newconn->gnc_peerstamp,
124                                 "conn 0x%p peerstamp %llu >= "
125                                 "newconn 0x%p peerstamp %llu\n",
126                                 conn, conn->gnc_peerstamp,
127                                 newconn, newconn->gnc_peerstamp);
128
129                         CDEBUG(D_NET, "Closing stale conn nid: %s "
130                                " peerstamp:%#llx(%#llx)\n",
131                                libcfs_nid2str(peer->gnp_nid),
132                                conn->gnc_peerstamp, newconn->gnc_peerstamp);
133                 } else {
134
135                         LASSERTF(conn->gnc_peer_connstamp < newconn->gnc_peer_connstamp,
136                                 "conn 0x%p peer_connstamp %llu >= "
137                                 "newconn 0x%p peer_connstamp %llu\n",
138                                 conn, conn->gnc_peer_connstamp,
139                                 newconn, newconn->gnc_peer_connstamp);
140
141                         CDEBUG(D_NET, "Closing stale conn nid: %s"
142                                " connstamp:%llu(%llu)\n",
143                                libcfs_nid2str(peer->gnp_nid),
144                                conn->gnc_peer_connstamp, newconn->gnc_peer_connstamp);
145                 }
146
147                 count++;
148                 kgnilnd_close_conn_locked(conn, -ESTALE);
149         }
150
151         if (count != 0) {
152                 CWARN("Closed %d stale conns to %s\n", count, libcfs_nid2str(peer->gnp_nid));
153         }
154
155         RETURN(count);
156 }
157
158 int
159 kgnilnd_conn_isdup_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
160 {
161         kgn_conn_t       *conn;
162         int               loopback;
163         ENTRY;
164
165         loopback = (peer->gnp_nid ==
166                     lnet_nid_to_nid4(&peer->gnp_net->gnn_ni->ni_nid));
167
168         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
169                 CDEBUG(D_NET, "checking conn 0x%p for peer %s"
170                         " lo %d new %llu existing %llu"
171                         " new peer %llu existing peer %llu"
172                         " new dev %p existing dev %p\n",
173                         conn, libcfs_nid2str(peer->gnp_nid),
174                         loopback,
175                         newconn->gnc_peerstamp, conn->gnc_peerstamp,
176                         newconn->gnc_peer_connstamp, conn->gnc_peer_connstamp,
177                         newconn->gnc_device, conn->gnc_device);
178
179                 /* conn is in the process of closing */
180                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
181                         continue;
182
183                 /* 'newconn' is from an earlier version of 'peer'!!! */
184                 if (newconn->gnc_peerstamp < conn->gnc_peerstamp)
185                         RETURN(1);
186
187                 /* 'conn' is from an earlier version of 'peer': it will be
188                  * removed when we cull stale conns later on... */
189                 if (newconn->gnc_peerstamp > conn->gnc_peerstamp)
190                         continue;
191
192                 /* Different devices are OK */
193                 if (conn->gnc_device != newconn->gnc_device)
194                         continue;
195
196                 /* It's me connecting to myself */
197                 if (loopback &&
198                     newconn->gnc_my_connstamp == conn->gnc_peer_connstamp &&
199                     newconn->gnc_peer_connstamp == conn->gnc_my_connstamp)
200                         continue;
201
202                 /* 'newconn' is an earlier connection from 'peer'!!! */
203                 if (newconn->gnc_peer_connstamp < conn->gnc_peer_connstamp)
204                         RETURN(2);
205
206                 /* 'conn' is an earlier connection from 'peer': it will be
207                  * removed when we cull stale conns later on... */
208                 if (newconn->gnc_peer_connstamp > conn->gnc_peer_connstamp)
209                         continue;
210
211                 /* 'newconn' has the SAME connection stamp; 'peer' isn't
212                  * playing the game... */
213                 RETURN(3);
214         }
215
216         RETURN(0);
217 }
218
219 int
220 kgnilnd_create_conn(kgn_conn_t **connp, kgn_device_t *dev)
221 {
222         kgn_conn_t      *conn;
223         gni_return_t    rrc;
224         int             rc = 0;
225
226         LASSERT (!in_interrupt());
227         atomic_inc(&kgnilnd_data.kgn_nconns);
228
229         /* divide by 2 to allow for complete reset and immediate reconnect */
230         if (atomic_read(&kgnilnd_data.kgn_nconns) >= GNILND_MAX_CQID/2) {
231                 CERROR("Too many conn are live: %d > %d\n",
232                         atomic_read(&kgnilnd_data.kgn_nconns), GNILND_MAX_CQID/2);
233                 atomic_dec(&kgnilnd_data.kgn_nconns);
234                 return -E2BIG;
235         }
236
237         LIBCFS_ALLOC(conn, sizeof(*conn));
238         if (conn == NULL) {
239                 atomic_dec(&kgnilnd_data.kgn_nconns);
240                 return -ENOMEM;
241         }
242
243         conn->gnc_tx_ref_table =
244                 kgnilnd_vzalloc(GNILND_MAX_MSG_ID * sizeof(void *));
245         if (conn->gnc_tx_ref_table == NULL) {
246                 CERROR("Can't allocate conn tx_ref_table\n");
247                 GOTO(failed, rc = -ENOMEM);
248         }
249
250         mutex_init(&conn->gnc_smsg_mutex);
251         mutex_init(&conn->gnc_rdma_mutex);
252         atomic_set(&conn->gnc_refcount, 1);
253         atomic_set(&conn->gnc_reaper_noop, 0);
254         atomic_set(&conn->gnc_sched_noop, 0);
255         atomic_set(&conn->gnc_tx_in_use, 0);
256         INIT_LIST_HEAD(&conn->gnc_list);
257         INIT_LIST_HEAD(&conn->gnc_hashlist);
258         INIT_LIST_HEAD(&conn->gnc_schedlist);
259         INIT_LIST_HEAD(&conn->gnc_fmaq);
260         INIT_LIST_HEAD(&conn->gnc_mdd_list);
261         INIT_LIST_HEAD(&conn->gnc_delaylist);
262         spin_lock_init(&conn->gnc_list_lock);
263         spin_lock_init(&conn->gnc_tx_lock);
264         conn->gnc_magic = GNILND_CONN_MAGIC;
265
266         /* set tx id to nearly the end to make sure we find wrapping
267          * issues soon */
268         conn->gnc_next_tx = (int) GNILND_MAX_MSG_ID - 10;
269
270         /* if this fails, we have conflicts and MAX_TX is too large */
271         BUILD_BUG_ON(GNILND_MAX_MSG_ID >= GNILND_MSGID_CLOSE);
272
273         /* get a new unique CQ id for this conn */
274         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
275         conn->gnc_my_connstamp = kgnilnd_data.kgn_connstamp++;
276         conn->gnc_cqid = kgnilnd_get_cqid_locked();
277         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
278
279         if (conn->gnc_cqid == 0) {
280                 CERROR("Could not allocate unique CQ ID for conn 0x%p\n", conn);
281                 GOTO(failed, rc = -E2BIG);
282         }
283
284         CDEBUG(D_NET, "alloc cqid %u for conn 0x%p\n",
285                 conn->gnc_cqid, conn);
286
287         /* need to be set before gnc_ephandle to allow kgnilnd_destroy_conn_ep to
288          * check context */
289         conn->gnc_device = dev;
290
291         conn->gnc_timeout = max(*kgnilnd_tunables.kgn_timeout,
292                                  GNILND_MIN_TIMEOUT);
293         kgnilnd_update_reaper_timeout(conn->gnc_timeout);
294
295         /* this is the ep_handle for doing SMSG & BTE */
296         mutex_lock(&dev->gnd_cq_mutex);
297         rrc = kgnilnd_ep_create(dev->gnd_handle, dev->gnd_snd_fma_cqh,
298                                 &conn->gnc_ephandle);
299         mutex_unlock(&dev->gnd_cq_mutex);
300         if (rrc != GNI_RC_SUCCESS)
301                 GOTO(failed, rc = -ENETDOWN);
302
303         CDEBUG(D_NET, "created conn 0x%p ep_hndl 0x%p\n",
304                conn, conn->gnc_ephandle);
305
306         /* add ref for EP canceling */
307         kgnilnd_conn_addref(conn);
308         atomic_inc(&dev->gnd_neps);
309
310         *connp = conn;
311         return 0;
312
313 failed:
314         atomic_dec(&kgnilnd_data.kgn_nconns);
315         kgnilnd_vfree(conn->gnc_tx_ref_table,
316                       GNILND_MAX_MSG_ID * sizeof(void *));
317         LIBCFS_FREE(conn, sizeof(*conn));
318         return rc;
319 }
320
321 /* needs to be called with kgn_peer_conn_lock held (read or write) */
322 kgn_conn_t *
323 kgnilnd_find_conn_locked(kgn_peer_t *peer)
324 {
325         kgn_conn_t      *conn = NULL;
326
327         /* if we are in reset, this conn is going to die soon */
328         if (unlikely(kgnilnd_data.kgn_in_reset)) {
329                 RETURN(NULL);
330         }
331
332         /* just return the first ESTABLISHED connection */
333         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
334                 /* kgnilnd_finish_connect doesn't put connections on the
335                  * peer list until they are actually established */
336                 LASSERTF(conn->gnc_state >= GNILND_CONN_ESTABLISHED,
337                         "found conn %p state %s on peer %p (%s)\n",
338                         conn, kgnilnd_conn_state2str(conn), peer,
339                         libcfs_nid2str(peer->gnp_nid));
340                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
341                         continue;
342
343                 RETURN(conn);
344         }
345         RETURN(NULL);
346 }
347
348 /* needs write_lock on kgn_peer_conn_lock held */
349 kgn_conn_t *
350 kgnilnd_find_or_create_conn_locked(kgn_peer_t *peer) {
351
352         kgn_device_t    *dev = peer->gnp_net->gnn_dev;
353         kgn_conn_t      *conn;
354
355         conn = kgnilnd_find_conn_locked(peer);
356
357         if (conn != NULL) {
358                 return conn;
359         }
360
361         /* if the peer was previously connecting, check if we should
362          * trigger another connection attempt yet. */
363         if (time_before(jiffies, peer->gnp_reconnect_time)) {
364                 return NULL;
365         }
366
367         /* This check prevents us from creating a new connection to a peer while we are
368          * still in the process of closing an existing connection to the peer.
369          */
370         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
371                 if (conn->gnc_ephandle != NULL) {
372                         CDEBUG(D_NET, "Not connecting non-null ephandle found peer 0x%p->%s\n", peer,
373                                 libcfs_nid2str(peer->gnp_nid));
374                         return NULL;
375                 }
376         }
377
378         if (peer->gnp_connecting != GNILND_PEER_IDLE) {
379                 /* if we are not connecting, fire up a new connection */
380                 /* or if we are anything but IDLE DONT start a new connection */
381                return NULL;
382         }
383
384         CDEBUG(D_NET, "starting connect to %s\n",
385                 libcfs_nid2str(peer->gnp_nid));
386         peer->gnp_connecting = GNILND_PEER_CONNECT;
387         kgnilnd_peer_addref(peer); /* extra ref for connd */
388
389         spin_lock(&dev->gnd_connd_lock);
390         list_add_tail(&peer->gnp_connd_list, &dev->gnd_connd_peers);
391         spin_unlock(&dev->gnd_connd_lock);
392
393         kgnilnd_schedule_dgram(dev);
394         CDEBUG(D_NETTRACE, "scheduling new connect\n");
395
396         return NULL;
397 }
398
399 /* Caller is responsible for deciding if/when to call this */
400 void
401 kgnilnd_destroy_conn_ep(kgn_conn_t *conn)
402 {
403         gni_return_t    rrc;
404         gni_ep_handle_t tmp_ep;
405
406         /* only if we actually initialized it,
407          *  then set NULL to tell kgnilnd_destroy_conn to leave it alone */
408
409         tmp_ep = xchg(&conn->gnc_ephandle, NULL);
410         if (tmp_ep != NULL) {
411                 /* we never re-use the EP, so unbind is not needed */
412                 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
413                 rrc = kgnilnd_ep_destroy(tmp_ep);
414
415                 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
416
417                 /* if this fails, it could hork up kgni smsg retransmit and others
418                  * since we could free the SMSG mbox memory, etc. */
419                 LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d conn 0x%p ep 0x%p\n",
420                          rrc, conn, conn->gnc_ephandle);
421
422                 atomic_dec(&conn->gnc_device->gnd_neps);
423
424                 /* clear out count added in kgnilnd_close_conn_locked
425                  * conn will have a peer once it hits finish_connect, where it
426                  * is the first spot we'll mark it ESTABLISHED as well */
427                 if (conn->gnc_peer) {
428                         kgnilnd_admin_decref(conn->gnc_peer->gnp_dirty_eps);
429                 }
430
431                 /* drop ref for EP */
432                 kgnilnd_conn_decref(conn);
433         }
434 }
435
436 void
437 kgnilnd_destroy_conn(kgn_conn_t *conn)
438 {
439         LASSERTF(!in_interrupt() &&
440                 !conn->gnc_scheduled &&
441                 !conn->gnc_in_purgatory &&
442                 conn->gnc_ephandle == NULL &&
443                 list_empty(&conn->gnc_list) &&
444                 list_empty(&conn->gnc_hashlist) &&
445                 list_empty(&conn->gnc_schedlist) &&
446                 list_empty(&conn->gnc_mdd_list) &&
447                 list_empty(&conn->gnc_delaylist) &&
448                 conn->gnc_magic == GNILND_CONN_MAGIC,
449                 "conn 0x%p->%s IRQ %d sched %d purg %d ep 0x%p Mg %d lists %d/%d/%d/%d/%d\n",
450                 conn, conn->gnc_peer ? libcfs_nid2str(conn->gnc_peer->gnp_nid)
451                                      : "<?>",
452                 !!in_interrupt(), conn->gnc_scheduled,
453                 conn->gnc_in_purgatory,
454                 conn->gnc_ephandle,
455                 conn->gnc_magic,
456                 list_empty(&conn->gnc_list),
457                 list_empty(&conn->gnc_hashlist),
458                 list_empty(&conn->gnc_schedlist),
459                 list_empty(&conn->gnc_mdd_list),
460                 list_empty(&conn->gnc_delaylist));
461
462         /* Tripping these is especially bad, as it means we have items on the
463          *  lists that didn't keep their refcount on the connection - or
464          *  somebody evil released their own */
465         LASSERTF(list_empty(&conn->gnc_fmaq) &&
466                  atomic_read(&conn->gnc_nlive_fma) == 0 &&
467                  atomic_read(&conn->gnc_nlive_rdma) == 0,
468                  "conn 0x%p fmaq %d@0x%p nfma %d nrdma %d\n",
469                  conn, kgnilnd_count_list(&conn->gnc_fmaq), &conn->gnc_fmaq,
470                  atomic_read(&conn->gnc_nlive_fma), atomic_read(&conn->gnc_nlive_rdma));
471
472         CDEBUG(D_NET, "destroying conn %p ephandle %p error %d\n",
473                 conn, conn->gnc_ephandle, conn->gnc_error);
474
475         /* We are freeing this memory remove the magic value from the connection */
476         conn->gnc_magic = 0;
477
478         /* if there is an FMA blk left here, we'll tear it down */
479         if (conn->gnc_fma_blk) {
480                 if (conn->gnc_peer) {
481                         kgn_mbox_info_t *mbox;
482                         mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
483                         mbox->mbx_prev_nid = conn->gnc_peer->gnp_nid;
484                 }
485                 kgnilnd_release_mbox(conn, 0);
486         }
487
488         if (conn->gnc_peer != NULL)
489                 kgnilnd_peer_decref(conn->gnc_peer);
490
491         if (conn->gnc_tx_ref_table != NULL) {
492                 kgnilnd_vfree(conn->gnc_tx_ref_table,
493                               GNILND_MAX_MSG_ID * sizeof(void *));
494         }
495
496         LIBCFS_FREE(conn, sizeof(*conn));
497         atomic_dec(&kgnilnd_data.kgn_nconns);
498 }
499
500 /* peer_alive and peer_notify done in the style of the o2iblnd */
501 void
502 kgnilnd_peer_alive(kgn_peer_t *peer)
503 {
504         time64_t now = ktime_get_seconds();
505
506         set_mb(peer->gnp_last_alive, now);
507 }
508
509 void
510 kgnilnd_peer_notify(kgn_peer_t *peer, int error, int alive)
511 {
512         int                     tell_lnet = 0;
513         int                     nnets = 0;
514         int                     rc;
515         int                     i, j;
516         kgn_conn_t             *conn;
517         kgn_net_t             **nets;
518         kgn_net_t              *net;
519
520
521         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DONT_NOTIFY))
522                 return;
523
524         /* Tell LNet we are giving ups on this peer - but only
525          * if it isn't already reconnected or trying to reconnect */
526         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
527
528         /* use kgnilnd_find_conn_locked to avoid any conns in the process of being nuked
529          *
530          * don't tell LNet if we are in reset - we assume that everyone will be able to
531          * reconnect just fine
532          */
533         conn = kgnilnd_find_conn_locked(peer);
534
535         CDEBUG(D_NETTRACE, "peer 0x%p->%s ting %d conn 0x%p, rst %d error %d\n",
536                peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting, conn,
537                kgnilnd_data.kgn_in_reset, error);
538
539         if (((peer->gnp_connecting == GNILND_PEER_IDLE) &&
540             (conn == NULL) &&
541             (!kgnilnd_data.kgn_in_reset) &&
542             (!kgnilnd_conn_clean_errno(error))) || alive) {
543                 tell_lnet = 1;
544         }
545
546         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
547
548         if (!tell_lnet) {
549                 /* short circuit if we dont need to notify Lnet */
550                 return;
551         }
552
553         rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
554
555         if (rc) {
556             /* dont do this if this fails since LNET is in shutdown or something else
557              */
558
559                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
560                         list_for_each_entry(net, &kgnilnd_data.kgn_nets[i], gnn_list) {
561                                 /* if gnn_shutdown set for any net shutdown is in progress just return */
562                                 if (net->gnn_shutdown) {
563                                         up_read(&kgnilnd_data.kgn_net_rw_sem);
564                                         return;
565                                 }
566                                 nnets++;
567                         }
568                 }
569
570                 if (nnets == 0) {
571                         /* shutdown in progress most likely */
572                         up_read(&kgnilnd_data.kgn_net_rw_sem);
573                         return;
574                 }
575
576                 CFS_ALLOC_PTR_ARRAY(nets, nnets);
577
578                 if (nets == NULL) {
579                         up_read(&kgnilnd_data.kgn_net_rw_sem);
580                         CERROR("Failed to allocate nets[%d]\n", nnets);
581                         return;
582                 }
583
584                 j = 0;
585                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
586                         list_for_each_entry(net, &kgnilnd_data.kgn_nets[i], gnn_list) {
587                                 nets[j] = net;
588                                 kgnilnd_net_addref(net);
589                                 j++;
590                         }
591                 }
592                 up_read(&kgnilnd_data.kgn_net_rw_sem);
593
594                 for (i = 0; i < nnets; i++) {
595                         struct lnet_nid peer_nid;
596
597                         net = nets[i];
598
599                         lnet_nid4_to_nid(kgnilnd_lnd2lnetnid(
600                                                  lnet_nid_to_nid4(&net->gnn_ni->ni_nid),
601                                                  peer->gnp_nid),
602                                          &peer_nid);
603
604                         CDEBUG(D_NET, "peer 0x%p->%s last_alive %lld (%llds ago)\n",
605                                 peer, libcfs_nid2str(peer_nid), peer->gnp_last_alive,
606                                 ktime_get_seconds() - peer->gnp_last_alive);
607
608                         lnet_notify(net->gnn_ni, &peer_nid, alive, true,
609                                     peer->gnp_last_alive);
610
611                         kgnilnd_net_decref(net);
612                 }
613
614                 CFS_FREE_PTR_ARRAY(nets, nnets);
615         }
616 }
617
618 /* need write_lock on kgn_peer_conn_lock */
619 void
620 kgnilnd_close_conn_locked(kgn_conn_t *conn, int error)
621 {
622         kgn_peer_t        *peer = conn->gnc_peer;
623         ENTRY;
624
625         LASSERT(!in_interrupt());
626
627         /* store error for tx completion */
628         conn->gnc_error = error;
629         peer->gnp_last_errno = error;
630
631         /* use real error from peer if possible */
632         if (error == -ECONNRESET) {
633                 error = conn->gnc_peer_error;
634         }
635
636         /* if we NETERROR, make sure it is rate limited */
637         if (!kgnilnd_conn_clean_errno(error) &&
638             peer->gnp_state != GNILND_PEER_DOWN) {
639                 CNETERR("closing conn to %s: error %d\n",
640                        libcfs_nid2str(peer->gnp_nid), error);
641         } else {
642                 CDEBUG(D_NET, "closing conn to %s: error %d\n",
643                        libcfs_nid2str(peer->gnp_nid), error);
644         }
645
646         LASSERTF(conn->gnc_state == GNILND_CONN_ESTABLISHED,
647                 "conn %p to %s with bogus state %s\n", conn,
648                 libcfs_nid2str(conn->gnc_peer->gnp_nid),
649                 kgnilnd_conn_state2str(conn));
650         LASSERT(!list_empty(&conn->gnc_hashlist));
651         LASSERT(!list_empty(&conn->gnc_list));
652
653
654         /* mark peer count here so any place the EP gets destroyed will
655          * open up the peer count so that a new ESTABLISHED conn is then free
656          * to send new messages -- sending before the previous EPs are destroyed
657          * could end up with messages on the network for the old conn _after_
658          * the new conn and break the mbox safety protocol */
659         kgnilnd_admin_addref(conn->gnc_peer->gnp_dirty_eps);
660
661         /* Remove from conn hash table: no new callbacks */
662         list_del_init(&conn->gnc_hashlist);
663         kgnilnd_data.kgn_conn_version++;
664         kgnilnd_conn_decref(conn);
665
666         /* if we are in reset, go right to CLOSED as there is no scheduler
667          * thread to move from CLOSING to CLOSED */
668         if (unlikely(kgnilnd_data.kgn_in_reset)) {
669                 conn->gnc_state = GNILND_CONN_CLOSED;
670         } else {
671                 conn->gnc_state = GNILND_CONN_CLOSING;
672         }
673
674         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_RDMA_CQ_ERROR)) {
675                 msleep_interruptible(MSEC_PER_SEC);
676         }
677
678         /* leave on peer->gnp_conns to make sure we don't let the reaper
679          * or others try to unlink this peer until the conn is fully
680          * processed for closing */
681
682         if (kgnilnd_check_purgatory_conn(conn)) {
683                 kgnilnd_add_purgatory_locked(conn, conn->gnc_peer);
684         }
685
686         /* Reset RX timeout to ensure we wait for an incoming CLOSE
687          * for the full timeout.  If we get a CLOSE we know the
688          * peer has stopped all RDMA.  Otherwise if we wait for
689          * the full timeout we can also be sure all RDMA has stopped. */
690         conn->gnc_last_rx = conn->gnc_last_rx_cq = jiffies;
691         mb();
692
693         /* schedule sending CLOSE - if we are in quiesce, this adds to
694          * gnd_ready_conns and allows us to find it in quiesce processing */
695         kgnilnd_schedule_conn(conn);
696
697         EXIT;
698 }
699
700 void
701 kgnilnd_close_conn(kgn_conn_t *conn, int error)
702 {
703         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
704         /* need to check the state here - this call is racy and we don't
705          * know the state until after the lock is grabbed */
706         if (conn->gnc_state == GNILND_CONN_ESTABLISHED) {
707                 kgnilnd_close_conn_locked(conn, error);
708         }
709         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
710 }
711
712 void
713 kgnilnd_complete_closed_conn(kgn_conn_t *conn)
714 {
715         LIST_HEAD               (sinners);
716         kgn_tx_t               *tx, *txn;
717         int                     nlive = 0;
718         int                     nrdma = 0;
719         int                     nq_rdma = 0;
720         int                     logmsg;
721         ENTRY;
722
723         /* Dump log  on cksum error - wait until complete phase to let
724          * RX of error happen */
725         if (*kgnilnd_tunables.kgn_checksum_dump &&
726             (conn != NULL && conn->gnc_peer_error == -ENOKEY)) {
727                 libcfs_debug_dumplog();
728         }
729
730         /* _CLOSED set in kgnilnd_process_fmaq once we decide to
731          * send the CLOSE or not */
732         LASSERTF(conn->gnc_state == GNILND_CONN_CLOSED,
733                  "conn 0x%p->%s with bad state %s\n",
734                  conn, conn->gnc_peer ?
735                         libcfs_nid2str(conn->gnc_peer->gnp_nid) :
736                         "<?>",
737                  kgnilnd_conn_state2str(conn));
738
739         LASSERT(list_empty(&conn->gnc_hashlist));
740         /* We shouldnt be on the delay list, the conn can 
741          * get added to this list during a retransmit, and retransmits
742          * only occur within scheduler threads.
743          */
744         LASSERT(list_empty(&conn->gnc_delaylist));
745
746         /* we've sent the close, start nuking */
747         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_SCHEDULE_COMPLETE))
748                 kgnilnd_schedule_conn(conn);
749
750         if (conn->gnc_scheduled != GNILND_CONN_PROCESS) {
751                 CDEBUG(D_NETERROR, "Error someone scheduled us after we were "
752                                 "done, Attempting to recover conn 0x%p "
753                                 "scheduled %d function: %s line: %d\n", conn,
754                                 conn->gnc_scheduled, conn->gnc_sched_caller,
755                                 conn->gnc_sched_line);
756                 RETURN_EXIT;
757         }
758
759         /* we don't use lists to track things that we can get out of the
760          * tx_ref table... */
761
762         /* need to hold locks for tx_list_state, sampling it is too racy:
763          * - the lock actually protects tx != NULL, but we can't take the proper
764          *   lock until we check tx_list_state, which would be too late and
765          *   we could have the TX change under us.
766          * gnd_rdmaq_lock and gnd_lock and not used together, so taking both
767          * should be fine */
768         spin_lock(&conn->gnc_device->gnd_rdmaq_lock);
769         spin_lock(&conn->gnc_device->gnd_lock);
770
771         for (nrdma = 0; nrdma < GNILND_MAX_MSG_ID; nrdma++) {
772                 tx = conn->gnc_tx_ref_table[nrdma];
773
774                 if (tx != NULL) {
775                         /* only print the first error and if not CLOSE, we often don't see
776                          * CQ events for that by the time we get here... and really don't care */
777                         if (nlive || tx->tx_msg.gnm_type == GNILND_MSG_CLOSE)
778                                 tx->tx_state |= GNILND_TX_QUIET_ERROR;
779                         nlive++;
780                         GNIDBG_TX(D_NET, tx, "cleaning up on close, nlive %d", nlive);
781
782                         /* don't worry about gnc_lock here as nobody else should be
783                          * touching this conn */
784                         kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
785                         list_add_tail(&tx->tx_list, &sinners);
786                 }
787         }
788         spin_unlock(&conn->gnc_device->gnd_lock);
789         spin_unlock(&conn->gnc_device->gnd_rdmaq_lock);
790
791         /* nobody should have marked this as needing scheduling after
792          * we called close - so only ref should be us handling it */
793         if (conn->gnc_scheduled != GNILND_CONN_PROCESS) {
794                 CDEBUG(D_NETERROR, "Error someone scheduled us after we were "
795                                 "done, Attempting to recover conn 0x%p "
796                                 "scheduled %d function %s line: %d\n", conn,
797                                 conn->gnc_scheduled, conn->gnc_sched_caller,
798                                 conn->gnc_sched_line);
799         }
800         /* now reset a few to actual counters... */
801         nrdma = atomic_read(&conn->gnc_nlive_rdma);
802         nq_rdma = atomic_read(&conn->gnc_nq_rdma);
803
804         if (!list_empty(&sinners)) {
805                 list_for_each_entry_safe(tx, txn, &sinners, tx_list) {
806                         /* clear tx_list to make tx_add_list_locked happy */
807                         list_del_init(&tx->tx_list);
808                         /* The error codes determine if we hold onto the MDD */
809                         kgnilnd_tx_done(tx, conn->gnc_error);
810                 }
811         }
812
813         logmsg = (nlive + nrdma + nq_rdma);
814
815         if (logmsg) {
816                 int level = conn->gnc_peer->gnp_state == GNILND_PEER_UP ?
817                                 D_NETERROR : D_NET;
818                 CDEBUG(level, "Closed conn 0x%p->%s (errno %d,"
819                         " peer errno %d): canceled %d TX, %d/%d RDMA\n",
820                         conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
821                         conn->gnc_error, conn->gnc_peer_error,
822                         nlive, nq_rdma, nrdma);
823         }
824
825         kgnilnd_destroy_conn_ep(conn);
826
827         /* Bug 765042 - race this with completing a new conn to same peer - we need
828          * finish_connect to detach purgatory before we can do it ourselves here */
829         CFS_RACE(CFS_FAIL_GNI_FINISH_PURG);
830
831         /* now it is safe to remove from peer list - anyone looking at
832          * gnp_conns now is free to unlink if not on purgatory */
833         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
834
835         conn->gnc_state = GNILND_CONN_DONE;
836
837         /* Decrement counter if we are marked by del_conn_or_peers for closing
838          */
839         if (conn->gnc_needs_closing)
840                 kgnilnd_admin_decref(kgnilnd_data.kgn_npending_conns);
841
842         /* Remove from peer's list of valid connections if its not in purgatory */
843         if (!conn->gnc_in_purgatory) {
844                 list_del_init(&conn->gnc_list);
845                 /* Lose peers reference on the conn */
846                 kgnilnd_conn_decref(conn);
847         }
848
849         /* NB - only unlinking if we set pending in del_peer_locked from admin or
850          * shutdown */
851         if (kgnilnd_peer_active(conn->gnc_peer) &&
852             conn->gnc_peer->gnp_pending_unlink &&
853             kgnilnd_can_unlink_peer_locked(conn->gnc_peer)) {
854                 kgnilnd_unlink_peer_locked(conn->gnc_peer);
855         }
856
857         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
858
859         /* I'm telling Mommy! - use peer_error if they initiated close */
860         kgnilnd_peer_notify(conn->gnc_peer,
861                             conn->gnc_error == -ECONNRESET ?
862                             conn->gnc_peer_error : conn->gnc_error, 0);
863
864         EXIT;
865 }
866
867 int
868 kgnilnd_set_conn_params(kgn_dgram_t *dgram)
869 {
870         kgn_conn_t             *conn = dgram->gndg_conn;
871         kgn_connreq_t          *connreq = &dgram->gndg_conn_in;
872         kgn_gniparams_t        *rem_param = &connreq->gncr_gnparams;
873         gni_return_t            rrc;
874         int                     rc = 0;
875         gni_smsg_attr_t        *remote = &connreq->gncr_gnparams.gnpr_smsg_attr;
876
877         /* set timeout vals in conn early so we can use them for the NAK */
878
879         /* use max of the requested and our timeout, peer will do the same */
880         conn->gnc_timeout = max(conn->gnc_timeout, connreq->gncr_timeout);
881
882         /* only ep_bind really mucks around with the CQ */
883         /* only ep bind if we are not connecting to ourself and the dstnid is not a wildcard. this check
884          * is necessary as you can only bind an ep once and we must make sure we dont bind when already bound.
885          */
886         if (connreq->gncr_dstnid != LNET_NID_ANY && dgram->gndg_conn_out.gncr_dstnid != connreq->gncr_srcnid) {
887                 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
888                 rrc = kgnilnd_ep_bind(conn->gnc_ephandle,
889                         connreq->gncr_gnparams.gnpr_host_id,
890                         conn->gnc_cqid);
891                 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
892                 if (rrc != GNI_RC_SUCCESS) {
893                         rc = -ECONNABORTED;
894                         goto return_out;
895                 }
896         }
897
898         rrc = kgnilnd_ep_set_eventdata(conn->gnc_ephandle, conn->gnc_cqid,
899                          connreq->gncr_gnparams.gnpr_cqid);
900         if (rrc != GNI_RC_SUCCESS) {
901                 rc = -ECONNABORTED;
902                 goto cleanup_out;
903         }
904
905         /* Initialize SMSG */
906         rrc = kgnilnd_smsg_init(conn->gnc_ephandle, &conn->gnpr_smsg_attr,
907                         &connreq->gncr_gnparams.gnpr_smsg_attr);
908         if (unlikely(rrc == GNI_RC_INVALID_PARAM)) {
909                 gni_smsg_attr_t *local = &conn->gnpr_smsg_attr;
910                 /* help folks figure out if there is a tunable off, etc. */
911                 LCONSOLE_ERROR("SMSG attribute mismatch. Data from local/remote:"
912                                " type %d/%d msg_maxsize %u/%u"
913                                " mbox_maxcredit %u/%u. Please check kgni"
914                                " logs for further data\n",
915                                local->msg_type, remote->msg_type,
916                                local->msg_maxsize, remote->msg_maxsize,
917                                local->mbox_maxcredit, remote->mbox_maxcredit);
918         }
919         if (rrc != GNI_RC_SUCCESS) {
920                 rc = -ECONNABORTED;
921                 goto cleanup_out;
922         }
923
924         /* log this for help in debuggin SMSG buffer re-use */
925         CDEBUG(D_NET, "conn %p src %s dst %s smsg %p acquired"
926                 " local cqid %u SMSG %p->%u hndl %#llx.%#llx"
927                 " remote cqid %u SMSG %p->%u hndl %#llx.%#llx\n",
928                 conn, libcfs_nid2str(connreq->gncr_srcnid),
929                 libcfs_nid2str(connreq->gncr_dstnid),
930                 &conn->gnpr_smsg_attr,
931                 conn->gnc_cqid,
932                 conn->gnpr_smsg_attr.msg_buffer,
933                 conn->gnpr_smsg_attr.mbox_offset,
934                 conn->gnpr_smsg_attr.mem_hndl.qword1,
935                 conn->gnpr_smsg_attr.mem_hndl.qword2,
936                 rem_param->gnpr_cqid,
937                 rem_param->gnpr_smsg_attr.msg_buffer,
938                 rem_param->gnpr_smsg_attr.mbox_offset,
939                 rem_param->gnpr_smsg_attr.mem_hndl.qword1,
940                 rem_param->gnpr_smsg_attr.mem_hndl.qword2);
941
942         conn->gnc_peerstamp = connreq->gncr_peerstamp;
943         conn->gnc_peer_connstamp = connreq->gncr_connstamp;
944         conn->remote_mbox_addr = (void *)((char *)remote->msg_buffer + remote->mbox_offset);
945
946         /* We update the reaper timeout once we have a valid conn and timeout */
947         kgnilnd_update_reaper_timeout(GNILND_TO2KA(conn->gnc_timeout));
948
949         return 0;
950
951 cleanup_out:
952         rrc = kgnilnd_ep_unbind(conn->gnc_ephandle);
953         /* not sure I can just let this fly */
954         LASSERTF(rrc == GNI_RC_SUCCESS,
955                 "bad rc from gni_ep_unbind trying to cleanup: %d\n", rrc);
956
957 return_out:
958         LASSERTF(rc != 0, "SOFTWARE BUG: rc == 0\n");
959         CERROR("Error setting connection params from %s: %d\n",
960                libcfs_nid2str(connreq->gncr_srcnid), rc);
961         return rc;
962 }
963
964 /* needs down_read on kgn_net_rw_sem held from before this call until
965  * after the write_lock on kgn_peer_conn_lock - this ensures we stay sane
966  * with kgnilnd_shutdown - it'll get the sem and set shutdown, then get the
967  * kgn_peer_conn_lock to start del_peer'ing. If we hold the sem until after
968  * kgn_peer_conn_lock is held, we guarantee that nobody calls
969  * kgnilnd_add_peer_locked without checking gnn_shutdown */
970 int
971 kgnilnd_create_peer_safe(kgn_peer_t **peerp,
972                          lnet_nid_t nid,
973                          kgn_net_t *net,
974                          int node_state)
975 {
976         kgn_peer_t      *peer;
977         int             rc;
978
979         LASSERT(nid != LNET_NID_ANY);
980
981         /* We dont pass the net around in the dgram anymore so here is where we find it
982          * this will work unless its in shutdown or the nid has a net that is invalid.
983          * Either way error code needs to be returned in that case.
984          *
985          * If the net passed in is not NULL then we can use it, this alleviates looking it
986          * when the calling function has access to the data.
987          */
988         if (net == NULL) {
989                 rc = kgnilnd_find_net(nid, &net);
990                 if (rc < 0)
991                         return rc;
992         } else {
993                 /* find net adds a reference on the net if we are not using
994                  * it we must do it manually so the net references are
995                  * correct when tearing down the net
996                  */
997                 kgnilnd_net_addref(net);
998         }
999
1000         LIBCFS_ALLOC(peer, sizeof(*peer));
1001         if (peer == NULL) {
1002                 kgnilnd_net_decref(net);
1003                 return -ENOMEM;
1004         }
1005         peer->gnp_nid = nid;
1006         peer->gnp_state = node_state;
1007
1008         /* translate from nid to nic addr & store */
1009         rc = kgnilnd_nid_to_nicaddrs(LNET_NIDADDR(nid), 1, &peer->gnp_host_id);
1010         if (rc <= 0) {
1011                 kgnilnd_net_decref(net);
1012                 LIBCFS_FREE(peer, sizeof(*peer));
1013                 return -ESRCH;
1014         }
1015         CDEBUG(D_NET, "peer 0x%p->%s -> NIC 0x%x\n", peer,
1016                 libcfs_nid2str(nid), peer->gnp_host_id);
1017
1018         atomic_set(&peer->gnp_refcount, 1);     /* 1 ref for caller */
1019         atomic_set(&peer->gnp_dirty_eps, 0);
1020
1021         INIT_LIST_HEAD(&peer->gnp_list);
1022         INIT_LIST_HEAD(&peer->gnp_connd_list);
1023         INIT_LIST_HEAD(&peer->gnp_conns);
1024         INIT_LIST_HEAD(&peer->gnp_tx_queue);
1025
1026         /* the first reconnect should happen immediately, so we leave
1027          * gnp_reconnect_interval set to 0 */
1028
1029         LASSERTF(net != NULL, "peer 0x%p->%s with NULL net\n",
1030                  peer, libcfs_nid2str(nid));
1031
1032         /* must have kgn_net_rw_sem held for this...  */
1033         if (net->gnn_shutdown) {
1034                 /* shutdown has started already */
1035                 kgnilnd_net_decref(net);
1036                 LIBCFS_FREE(peer, sizeof(*peer));
1037                 return -ESHUTDOWN;
1038         }
1039
1040         peer->gnp_net = net;
1041
1042         atomic_inc(&kgnilnd_data.kgn_npeers);
1043
1044         *peerp = peer;
1045         return 0;
1046 }
1047
1048 void
1049 kgnilnd_destroy_peer(kgn_peer_t *peer)
1050 {
1051         CDEBUG(D_NET, "peer %s %p deleted\n",
1052                libcfs_nid2str(peer->gnp_nid), peer);
1053         LASSERTF(atomic_read(&peer->gnp_refcount) == 0,
1054                  "peer 0x%p->%s refs %d\n",
1055                  peer, libcfs_nid2str(peer->gnp_nid),
1056                  atomic_read(&peer->gnp_refcount));
1057         LASSERTF(atomic_read(&peer->gnp_dirty_eps) == 0,
1058                  "peer 0x%p->%s dirty eps %d\n",
1059                  peer, libcfs_nid2str(peer->gnp_nid),
1060                  atomic_read(&peer->gnp_dirty_eps));
1061         LASSERTF(peer->gnp_net != NULL, "peer %p (%s) with NULL net\n",
1062                  peer, libcfs_nid2str(peer->gnp_nid));
1063         LASSERTF(!kgnilnd_peer_active(peer),
1064                  "peer 0x%p->%s\n",
1065                 peer, libcfs_nid2str(peer->gnp_nid));
1066         LASSERTF(peer->gnp_connecting == GNILND_PEER_IDLE || peer->gnp_connecting == GNILND_PEER_KILL,
1067                  "peer 0x%p->%s, connecting %d\n",
1068                 peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
1069         LASSERTF(list_empty(&peer->gnp_conns),
1070                  "peer 0x%p->%s\n",
1071                 peer, libcfs_nid2str(peer->gnp_nid));
1072         LASSERTF(list_empty(&peer->gnp_tx_queue),
1073                  "peer 0x%p->%s\n",
1074                 peer, libcfs_nid2str(peer->gnp_nid));
1075         LASSERTF(list_empty(&peer->gnp_connd_list),
1076                  "peer 0x%p->%s\n",
1077                 peer, libcfs_nid2str(peer->gnp_nid));
1078
1079         /* NB a peer's connections keep a reference on their peer until
1080          * they are destroyed, so we can be assured that _all_ state to do
1081          * with this peer has been cleaned up when its refcount drops to
1082          * zero. */
1083
1084         atomic_dec(&kgnilnd_data.kgn_npeers);
1085         kgnilnd_net_decref(peer->gnp_net);
1086
1087         LIBCFS_FREE(peer, sizeof(*peer));
1088 }
1089
1090 /* the conn might not have made it all the way through to a connected
1091  * state - but we need to purgatory any conn that a remote peer might
1092  * have seen through a posted dgram as well */
1093 void
1094 kgnilnd_add_purgatory_locked(kgn_conn_t *conn, kgn_peer_t *peer)
1095 {
1096         kgn_mbox_info_t *mbox = NULL;
1097         ENTRY;
1098
1099         /* NB - the caller should own conn by removing him from the
1100          * scheduler thread when finishing the close */
1101
1102         LASSERTF(peer != NULL, "conn %p with NULL peer\n", conn);
1103
1104         /* If this is still true, need to add the calls to unlink back in and
1105          * figure out how to close the hole on loopback conns */
1106         LASSERTF(kgnilnd_peer_active(peer), "can't use inactive peer %s (%p)"
1107                 " we'll never recover the resources\n",
1108                 libcfs_nid2str(peer->gnp_nid), peer);
1109
1110         CDEBUG(D_NET, "conn %p peer %p dev %p\n", conn, peer,
1111                 conn->gnc_device);
1112
1113         LASSERTF(conn->gnc_in_purgatory == 0,
1114                 "Conn already in purgatory\n");
1115         conn->gnc_in_purgatory = 1;
1116
1117         mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
1118         mbox->mbx_prev_purg_nid = peer->gnp_nid;
1119         mbox->mbx_add_purgatory = jiffies;
1120         kgnilnd_release_mbox(conn, 1);
1121
1122         LASSERTF(list_empty(&conn->gnc_mdd_list),
1123                 "conn 0x%p->%s with active purgatory hold MDD %d\n",
1124                 conn, libcfs_nid2str(peer->gnp_nid),
1125                 kgnilnd_count_list(&conn->gnc_mdd_list));
1126
1127         EXIT;
1128 }
1129
1130 /* Instead of detaching everything from purgatory here we just mark the conn as needing
1131  * detach, when the reaper checks the conn the next time it will detach it.
1132  * Calling function requires write_lock held on kgn_peer_conn_lock
1133  */
1134 void
1135 kgnilnd_mark_for_detach_purgatory_all_locked(kgn_peer_t *peer) {
1136         kgn_conn_t       *conn;
1137
1138         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
1139                 if (conn->gnc_in_purgatory && !conn->gnc_needs_detach) {
1140                         conn->gnc_needs_detach = 1;
1141                         kgnilnd_admin_addref(kgnilnd_data.kgn_npending_detach);
1142                 }
1143         }
1144 }
1145
1146 /* Calling function needs a write_lock held on kgn_peer_conn_lock */
1147 void
1148 kgnilnd_detach_purgatory_locked(kgn_conn_t *conn, struct list_head *conn_list)
1149 {
1150         kgn_mbox_info_t *mbox = NULL;
1151
1152         /* if needed, add the conn purgatory data to the list passed in */
1153         if (conn->gnc_in_purgatory) {
1154                 CDEBUG(D_NET, "peer %p->%s purg_conn %p@%s mdd_list #tx %d\n",
1155                         conn->gnc_peer, libcfs_nid2str(conn->gnc_peer->gnp_nid),
1156                         conn, kgnilnd_conn_state2str(conn),
1157                         kgnilnd_count_list(&conn->gnc_mdd_list));
1158
1159                 mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
1160                 mbox->mbx_detach_of_purgatory = jiffies;
1161
1162                 /* conn->gnc_list is the entry point on peer->gnp_conns, so detaching it
1163                  * here removes it from the list of 'valid' peer connections.
1164                  * We put the current conn onto a list of conns to call kgnilnd_release_purgatory_locked()
1165                  * and as such the caller of kgnilnd_detach_purgatory_locked() now owns that conn, since its not
1166                  * on the peer's conn_list anymore.
1167                  */
1168
1169                 list_del_init(&conn->gnc_list);
1170
1171                 /* NB - only unlinking if we set pending in del_peer_locked from admin or
1172                  * shutdown */
1173                 if (kgnilnd_peer_active(conn->gnc_peer) &&
1174                     conn->gnc_peer->gnp_pending_unlink &&
1175                     kgnilnd_can_unlink_peer_locked(conn->gnc_peer)) {
1176                         kgnilnd_unlink_peer_locked(conn->gnc_peer);
1177                 }
1178                 /* The reaper will not call detach unless the conn is fully through kgnilnd_complete_closed_conn.
1179                  * If the conn is not in a DONE state somehow we are attempting to detach even though
1180                  * the conn has not been fully cleaned up. If we detach while the conn is still closing
1181                  * we will end up with an orphaned connection that has valid ep_handle, that is not on a
1182                  * peer.
1183                  */
1184
1185                 LASSERTF(conn->gnc_state == GNILND_CONN_DONE, "Conn in invalid state  %p@%s \n",
1186                                 conn, kgnilnd_conn_state2str(conn));
1187
1188                 /* move from peer to the delayed release list */
1189                 list_add_tail(&conn->gnc_list, conn_list);
1190         }
1191 }
1192
1193 void
1194 kgnilnd_release_purgatory_list(struct list_head *conn_list)
1195 {
1196         kgn_device_t            *dev;
1197         kgn_conn_t              *conn, *connN;
1198         kgn_mdd_purgatory_t     *gmp, *gmpN;
1199
1200         list_for_each_entry_safe(conn, connN, conn_list, gnc_list) {
1201                 dev = conn->gnc_device;
1202
1203                 kgnilnd_release_mbox(conn, -1);
1204                 conn->gnc_in_purgatory = 0;
1205
1206                 list_del_init(&conn->gnc_list);
1207
1208                 /* gnc_needs_detach is set in kgnilnd_del_conn_or_peer. It is used to keep track
1209                  * of conns that have been marked for detach by kgnilnd_del_conn_or_peer.
1210                  * The function uses kgn_npending_detach to verify the conn has
1211                  * actually been detached.
1212                  */
1213
1214                 if (conn->gnc_needs_detach)
1215                         kgnilnd_admin_decref(kgnilnd_data.kgn_npending_detach);
1216
1217                 /* if this guy is really dead (we are doing release from reaper),
1218                  * make sure we tell LNet - if this is from other context,
1219                  * the checks in the function will prevent an errant
1220                  * notification */
1221                 kgnilnd_peer_notify(conn->gnc_peer, conn->gnc_error, 0);
1222
1223                 list_for_each_entry_safe(gmp, gmpN, &conn->gnc_mdd_list,
1224                                          gmp_list) {
1225                         CDEBUG(D_NET,
1226                                "dev %p releasing held mdd %#llx.%#llx\n",
1227                                conn->gnc_device, gmp->gmp_map_key.qword1,
1228                                gmp->gmp_map_key.qword2);
1229
1230                         atomic_dec(&dev->gnd_n_mdd_held);
1231                         kgnilnd_mem_mdd_release(conn->gnc_device->gnd_handle,
1232                                                 &gmp->gmp_map_key);
1233                         /* ignoring the return code - if kgni/ghal can't find it
1234                          * it must be released already */
1235
1236                         list_del_init(&gmp->gmp_list);
1237                         LIBCFS_FREE(gmp, sizeof(*gmp));
1238                 }
1239                 /* lose conn ref for purgatory */
1240                 kgnilnd_conn_decref(conn);
1241         }
1242 }
1243
1244 /* needs write_lock on kgnilnd_data.kgn_peer_conn_lock held */
1245 void
1246 kgnilnd_peer_increase_reconnect_locked(kgn_peer_t *peer)
1247 {
1248         int current_to;
1249
1250         current_to = peer->gnp_reconnect_interval;
1251
1252         /* we'll try to reconnect fast the first time, then back-off */
1253         if (current_to == 0) {
1254                 peer->gnp_reconnect_time = jiffies - 1;
1255                 current_to = *kgnilnd_tunables.kgn_min_reconnect_interval;
1256         } else {
1257                 peer->gnp_reconnect_time = jiffies + cfs_time_seconds(current_to);
1258                 /* add 50% of min timeout & retry */
1259                 current_to += *kgnilnd_tunables.kgn_min_reconnect_interval / 2;
1260         }
1261
1262         current_to = min(current_to,
1263                          *kgnilnd_tunables.kgn_max_reconnect_interval);
1264
1265         peer->gnp_reconnect_interval = current_to;
1266         CDEBUG(D_NET, "peer %s can reconnect at %lu interval %lu\n",
1267                libcfs_nid2str(peer->gnp_nid), peer->gnp_reconnect_time,
1268                peer->gnp_reconnect_interval);
1269 }
1270
1271 /* needs kgnilnd_data.kgn_peer_conn_lock held */
1272 kgn_peer_t *
1273 kgnilnd_find_peer_locked(lnet_nid_t nid)
1274 {
1275         struct list_head *peer_list = kgnilnd_nid2peerlist(nid);
1276         kgn_peer_t       *peer;
1277
1278         /* Chopping nid down to only NIDADDR using LNET_NIDADDR so we only
1279          * have a single peer per device instead of a peer per nid/net combo.
1280          */
1281
1282         list_for_each_entry(peer, peer_list, gnp_list) {
1283                 if (LNET_NIDADDR(nid) != LNET_NIDADDR(peer->gnp_nid))
1284                         continue;
1285
1286                 CDEBUG(D_NET, "got peer [%p] -> %s c %d (%d)\n",
1287                        peer, libcfs_nid2str(nid),
1288                        peer->gnp_connecting,
1289                        atomic_read(&peer->gnp_refcount));
1290                 return peer;
1291         }
1292         return NULL;
1293 }
1294
1295 /* need write_lock on kgn_peer_conn_lock */
1296 void
1297 kgnilnd_unlink_peer_locked(kgn_peer_t *peer)
1298 {
1299         LASSERTF(list_empty(&peer->gnp_conns),
1300                 "peer 0x%p->%s\n",
1301                  peer, libcfs_nid2str(peer->gnp_nid));
1302         LASSERTF(list_empty(&peer->gnp_tx_queue),
1303                 "peer 0x%p->%s\n",
1304                  peer, libcfs_nid2str(peer->gnp_nid));
1305         LASSERTF(kgnilnd_peer_active(peer),
1306                 "peer 0x%p->%s\n",
1307                  peer, libcfs_nid2str(peer->gnp_nid));
1308         CDEBUG(D_NET, "unlinking peer 0x%p->%s\n",
1309                 peer, libcfs_nid2str(peer->gnp_nid));
1310
1311         list_del_init(&peer->gnp_list);
1312         kgnilnd_data.kgn_peer_version++;
1313         kgnilnd_admin_decref(kgnilnd_data.kgn_npending_unlink);
1314         /* lose peerlist's ref */
1315         kgnilnd_peer_decref(peer);
1316 }
1317
1318 int
1319 kgnilnd_get_peer_info(int index,
1320                       kgn_peer_t **found_peer,
1321                       lnet_nid_t *id, __u32 *nic_addr,
1322                       int *refcount, int *connecting)
1323 {
1324         kgn_peer_t        *peer;
1325         int               i;
1326         int               rc = -ENOENT;
1327
1328         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1329
1330         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
1331                 list_for_each_entry(peer, &kgnilnd_data.kgn_peers[i], gnp_list) {
1332                         if (index-- > 0)
1333                                 continue;
1334
1335                         CDEBUG(D_NET, "found peer %p (%s) at index %d\n",
1336                                peer, libcfs_nid2str(peer->gnp_nid), index);
1337
1338                         *found_peer  = peer;
1339                         *id          = peer->gnp_nid;
1340                         *nic_addr    = peer->gnp_host_id;
1341                         *refcount    = atomic_read(&peer->gnp_refcount);
1342                         *connecting  = peer->gnp_connecting;
1343
1344                         rc = 0;
1345                         goto out;
1346                 }
1347         }
1348 out:
1349         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1350         if (rc)
1351                 CDEBUG(D_NET, "no gni peer at index %d\n", index);
1352         return rc;
1353 }
1354
1355 /* requires write_lock on kgn_peer_conn_lock held */
1356 void
1357 kgnilnd_add_peer_locked(lnet_nid_t nid, kgn_peer_t *new_stub_peer, kgn_peer_t **peerp)
1358 {
1359         kgn_peer_t        *peer, *peer2;
1360
1361         LASSERTF(new_stub_peer != NULL, "bad stub peer for nid %s\n",
1362                  libcfs_nid2str(nid));
1363
1364         peer2 = kgnilnd_find_peer_locked(nid);
1365         if (peer2 != NULL) {
1366                 /* A peer was created during the lock transition, so drop
1367                  * the new one we created */
1368                 kgnilnd_peer_decref(new_stub_peer);
1369                 peer = peer2;
1370         } else {
1371                 peer = new_stub_peer;
1372                 /* peer table takes existing ref on peer */
1373
1374                 LASSERTF(!kgnilnd_peer_active(peer),
1375                         "peer 0x%p->%s already in peer table\n",
1376                         peer, libcfs_nid2str(peer->gnp_nid));
1377                 list_add_tail(&peer->gnp_list,
1378                               kgnilnd_nid2peerlist(nid));
1379                 kgnilnd_data.kgn_peer_version++;
1380         }
1381
1382         LASSERTF(peer->gnp_net != NULL, "peer 0x%p->%s with NULL net\n",
1383                  peer, libcfs_nid2str(peer->gnp_nid));
1384         *peerp = peer;
1385 }
1386
1387 int
1388 kgnilnd_add_peer(kgn_net_t *net, lnet_nid_t nid, kgn_peer_t **peerp)
1389 {
1390         kgn_peer_t        *peer;
1391         int                rc;
1392         int                node_state;
1393         ENTRY;
1394
1395         if (nid == LNET_NID_ANY)
1396                 return -EINVAL;
1397
1398         node_state = kgnilnd_get_node_state(LNET_NIDADDR(nid));
1399
1400         /* NB - this will not block during normal operations -
1401          * the only writer of this is in the startup/shutdown path. */
1402         rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
1403         if (!rc) {
1404                 rc = -ESHUTDOWN;
1405                 RETURN(rc);
1406         }
1407         rc = kgnilnd_create_peer_safe(&peer, nid, net, node_state);
1408         if (rc != 0) {
1409                 up_read(&kgnilnd_data.kgn_net_rw_sem);
1410                 RETURN(rc);
1411         }
1412
1413         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1414         up_read(&kgnilnd_data.kgn_net_rw_sem);
1415
1416         kgnilnd_add_peer_locked(nid, peer, peerp);
1417
1418         CDEBUG(D_NET, "peer 0x%p->%s connecting %d\n",
1419                peerp, libcfs_nid2str((*peerp)->gnp_nid),
1420                (*peerp)->gnp_connecting);
1421
1422         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1423         RETURN(0);
1424 }
1425
1426 /* needs write_lock on kgn_peer_conn_lock */
1427 void
1428 kgnilnd_cancel_peer_connect_locked(kgn_peer_t *peer, struct list_head *zombies)
1429 {
1430         kgn_tx_t        *tx, *txn;
1431
1432         /* we do care about state of gnp_connecting - we could be between
1433          * reconnect attempts, so try to find the dgram and cancel the TX
1434          * anyways. If we are in the process of posting DONT do anything;
1435          * once it fails or succeeds we can nuke the connect attempt.
1436          * We have no idea where in kgnilnd_post_dgram we are so we cant
1437          * attempt to cancel until the function is done.
1438          */
1439
1440         /* make sure peer isn't in process of connecting or waiting for connect*/
1441         spin_lock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1442         if (!(list_empty(&peer->gnp_connd_list))) {
1443                 list_del_init(&peer->gnp_connd_list);
1444                 /* remove connd ref */
1445                 kgnilnd_peer_decref(peer);
1446         }
1447         spin_unlock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1448
1449         if (peer->gnp_connecting == GNILND_PEER_POSTING || peer->gnp_connecting == GNILND_PEER_NEEDS_DEATH) {
1450                 peer->gnp_connecting = GNILND_PEER_NEEDS_DEATH;
1451                 /* We are in process of posting right now the xchg set it up for us to
1452                  * cancel the connect so we are finished for now */
1453         } else {
1454                 /* no need for exchange we have the peer lock and its ready for us to nuke */
1455                 LASSERTF(peer->gnp_connecting != GNILND_PEER_POSTING,
1456                         "Peer in invalid state 0x%p->%s, connecting %d\n",
1457                         peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
1458                 peer->gnp_connecting = GNILND_PEER_IDLE;
1459                 set_mb(peer->gnp_last_dgram_errno, -ETIMEDOUT);
1460                 kgnilnd_find_and_cancel_dgram(peer->gnp_net->gnn_dev,
1461                                                       peer->gnp_nid);
1462         }
1463
1464         /* The least we can do is nuke the tx's no matter what.... */
1465         list_for_each_entry_safe(tx, txn, &peer->gnp_tx_queue, tx_list) {
1466                 kgnilnd_tx_del_state_locked(tx, peer, NULL,
1467                                            GNILND_TX_ALLOCD);
1468                 list_add_tail(&tx->tx_list, zombies);
1469         }
1470 }
1471
1472 /* needs write_lock on kgn_peer_conn_lock */
1473 void
1474 kgnilnd_del_peer_locked(kgn_peer_t *peer, int error)
1475 {
1476         /* this peer could be passive and only held for purgatory,
1477          * take a ref to ensure it doesn't disappear in this function */
1478         kgnilnd_peer_addref(peer);
1479
1480         CFS_RACE(CFS_FAIL_GNI_FIND_TARGET);
1481
1482         /* if purgatory release cleared it out, don't try again */
1483         if (kgnilnd_peer_active(peer)) {
1484                 /* always do this to allow kgnilnd_start_connect and
1485                  * kgnilnd_finish_connect to catch this before they
1486                  * wrap up their operations */
1487                 if (kgnilnd_can_unlink_peer_locked(peer)) {
1488                         /* already released purgatory, so only active
1489                          * conns hold it */
1490                         kgnilnd_unlink_peer_locked(peer);
1491                 } else {
1492                         kgnilnd_close_peer_conns_locked(peer, error);
1493                         /* peer unlinks itself when last conn is closed */
1494                 }
1495         }
1496
1497         /* we are done, release back to the wild */
1498         kgnilnd_peer_decref(peer);
1499 }
1500
1501 int
1502 kgnilnd_del_conn_or_peer(kgn_net_t *net, lnet_nid_t nid, int command,
1503                           int error)
1504 {
1505         LIST_HEAD               (souls);
1506         LIST_HEAD               (zombies);
1507         kgn_peer_t *peer, *pnxt;
1508         int                     lo;
1509         int                     hi;
1510         int                     i;
1511         int                     rc = -ENOENT;
1512
1513         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1514
1515         if (nid != LNET_NID_ANY)
1516                 lo = hi = kgnilnd_nid2peerlist(nid) - kgnilnd_data.kgn_peers;
1517         else {
1518                 lo = 0;
1519                 hi = *kgnilnd_tunables.kgn_peer_hash_size - 1;
1520                 /* wildcards always succeed */
1521                 rc = 0;
1522         }
1523
1524         for (i = lo; i <= hi; i++) {
1525                 list_for_each_entry_safe(peer, pnxt, &kgnilnd_data.kgn_peers[i],
1526                                          gnp_list) {
1527                         LASSERTF(peer->gnp_net != NULL,
1528                                 "peer %p (%s) with NULL net\n",
1529                                  peer, libcfs_nid2str(peer->gnp_nid));
1530
1531                         if (net != NULL && peer->gnp_net != net)
1532                                 continue;
1533
1534                         if (!(nid == LNET_NID_ANY || LNET_NIDADDR(peer->gnp_nid) == LNET_NIDADDR(nid)))
1535                                 continue;
1536
1537                         /* In both cases, we want to stop any in-flight
1538                          * connect attempts */
1539                         kgnilnd_cancel_peer_connect_locked(peer, &zombies);
1540
1541                         switch (command) {
1542                         case GNILND_DEL_CONN:
1543                                 kgnilnd_close_peer_conns_locked(peer, error);
1544                                 break;
1545                         case GNILND_DEL_PEER:
1546                                 peer->gnp_pending_unlink = 1;
1547                                 kgnilnd_admin_addref(kgnilnd_data.kgn_npending_unlink);
1548                                 kgnilnd_mark_for_detach_purgatory_all_locked(peer);
1549                                 kgnilnd_del_peer_locked(peer, error);
1550                                 break;
1551                         case GNILND_CLEAR_PURGATORY:
1552                                 /* Mark everything ready for detach reaper will cleanup
1553                                  * once we release the kgn_peer_conn_lock
1554                                  */
1555                                 kgnilnd_mark_for_detach_purgatory_all_locked(peer);
1556                                 peer->gnp_last_errno = -EISCONN;
1557                                 /* clear reconnect so he can reconnect soon */
1558                                 peer->gnp_reconnect_time = 0;
1559                                 peer->gnp_reconnect_interval = 0;
1560                                 break;
1561                         default:
1562                                 CERROR("bad command %d\n", command);
1563                                 LBUG();
1564                         }
1565                         /* we matched something */
1566                         rc = 0;
1567                 }
1568         }
1569
1570         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1571
1572         /* nuke peer TX */
1573         kgnilnd_txlist_done(&zombies, error);
1574
1575         /* This function does not return until the commands it initiated have completed,
1576          * since they have to work there way through the other threads. In the case of shutdown
1577          * threads are not woken up until after this call is initiated so we cannot wait, we just
1578          * need to return. The same applies for stack reset we shouldnt wait as the reset thread
1579          * handles closing.
1580          */
1581
1582         CFS_RACE(CFS_FAIL_GNI_RACE_RESET);
1583
1584         if (error == -ENOTRECOVERABLE || error == -ESHUTDOWN) {
1585                 return rc;
1586         }
1587
1588         wait_var_event_warning(&kgnilnd_data,
1589                                !atomic_read(&kgnilnd_data.kgn_npending_conns) &&
1590                                !atomic_read(&kgnilnd_data.kgn_npending_detach) &&
1591                                !atomic_read(&kgnilnd_data.kgn_npending_unlink),
1592                                "Waiting on %d peers %d closes %d detaches\n",
1593                                 atomic_read(&kgnilnd_data.kgn_npending_unlink),
1594                                 atomic_read(&kgnilnd_data.kgn_npending_conns),
1595                                 atomic_read(&kgnilnd_data.kgn_npending_detach));
1596
1597         return rc;
1598 }
1599
1600 kgn_conn_t *
1601 kgnilnd_get_conn_by_idx(int index)
1602 {
1603         kgn_peer_t        *peer;
1604         kgn_conn_t        *conn;
1605         int                i;
1606
1607
1608         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
1609                 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1610                 list_for_each_entry(peer, &kgnilnd_data.kgn_peers[i], gnp_list) {
1611                         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
1612                                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
1613                                         continue;
1614
1615                                 if (index-- > 0)
1616                                         continue;
1617
1618                                 CDEBUG(D_NET, "++conn[%p] -> %s (%d)\n", conn,
1619                                        libcfs_nid2str(conn->gnc_peer->gnp_nid),
1620                                        atomic_read(&conn->gnc_refcount));
1621                                 kgnilnd_conn_addref(conn);
1622                                 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1623                                 return conn;
1624                         }
1625                 }
1626                 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1627         }
1628
1629         return NULL;
1630 }
1631
1632 int
1633 kgnilnd_get_conn_info(kgn_peer_t *peer,
1634                       int *device_id, __u64 *peerstamp,
1635                       int *tx_seq, int *rx_seq,
1636                       int *fmaq_len, int *nfma, int *nrdma)
1637 {
1638         kgn_conn_t        *conn;
1639         int               rc = 0;
1640
1641         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1642
1643         conn = kgnilnd_find_conn_locked(peer);
1644         if (conn == NULL) {
1645                 rc = -ENOENT;
1646                 goto out;
1647         }
1648
1649         *device_id = conn->gnc_device->gnd_host_id;
1650         *peerstamp = conn->gnc_peerstamp;
1651         *tx_seq = atomic_read(&conn->gnc_tx_seq);
1652         *rx_seq = atomic_read(&conn->gnc_rx_seq);
1653         *fmaq_len = kgnilnd_count_list(&conn->gnc_fmaq);
1654         *nfma = atomic_read(&conn->gnc_nlive_fma);
1655         *nrdma = atomic_read(&conn->gnc_nlive_rdma);
1656 out:
1657         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1658         return rc;
1659 }
1660
1661 /* needs write_lock on kgn_peer_conn_lock */
1662 int
1663 kgnilnd_close_peer_conns_locked(kgn_peer_t *peer, int why)
1664 {
1665         kgn_conn_t         *conn;
1666         struct list_head   *ctmp, *cnxt;
1667         int                 count = 0;
1668
1669         list_for_each_safe(ctmp, cnxt, &peer->gnp_conns) {
1670                 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
1671
1672                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
1673                         continue;
1674
1675                 count++;
1676                 /* we mark gnc_needs closing and increment kgn_npending_conns so that
1677                  * kgnilnd_del_conn_or_peer can wait on the other threads closing
1678                  * and cleaning up the connection.
1679                  */
1680                 if (!conn->gnc_needs_closing) {
1681                         conn->gnc_needs_closing = 1;
1682                         kgnilnd_admin_addref(kgnilnd_data.kgn_npending_conns);
1683                 }
1684                 kgnilnd_close_conn_locked(conn, why);
1685         }
1686         return count;
1687 }
1688
1689 int
1690 kgnilnd_report_node_state(lnet_nid_t nid, int down)
1691 {
1692         int         rc;
1693         kgn_peer_t  *peer, *new_peer;
1694         LIST_HEAD(zombies);
1695
1696         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1697         peer = kgnilnd_find_peer_locked(nid);
1698
1699         if (peer == NULL) {
1700                 int       i;
1701                 int       found_net = 0;
1702                 kgn_net_t *net;
1703
1704                 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1705
1706                 /* Don't add a peer for node up events */
1707                 if (down == GNILND_PEER_UP)
1708                         return 0;
1709
1710                 /* find any valid net - we don't care which one... */
1711                 down_read(&kgnilnd_data.kgn_net_rw_sem);
1712                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
1713                         list_for_each_entry(net, &kgnilnd_data.kgn_nets[i],
1714                                             gnn_list) {
1715                                 found_net = 1;
1716                                 break;
1717                         }
1718
1719                         if (found_net) {
1720                                 break;
1721                         }
1722                 }
1723                 up_read(&kgnilnd_data.kgn_net_rw_sem);
1724
1725                 if (!found_net) {
1726                         CNETERR("Could not find a net for nid %lld\n", nid);
1727                         return 1;
1728                 }
1729
1730                 /* The nid passed in does not yet contain the net portion.
1731                  * Let's build it up now
1732                  */
1733                 nid = LNET_MKNID(LNET_NID_NET(&net->gnn_ni->ni_nid), nid);
1734                 rc = kgnilnd_add_peer(net, nid, &new_peer);
1735
1736                 if (rc) {
1737                         CNETERR("Could not add peer for nid %lld, rc %d\n",
1738                                 nid, rc);
1739                         return 1;
1740                 }
1741
1742                 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1743                 peer = kgnilnd_find_peer_locked(nid);
1744
1745                 if (peer == NULL) {
1746                         CNETERR("Could not find peer for nid %lld\n", nid);
1747                         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1748                         return 1;
1749                 }
1750         }
1751
1752         peer->gnp_state = down;
1753
1754         if (down == GNILND_PEER_DOWN) {
1755                 kgn_conn_t *conn;
1756
1757                 peer->gnp_down_event_time = jiffies;
1758                 kgnilnd_cancel_peer_connect_locked(peer, &zombies);
1759                 conn = kgnilnd_find_conn_locked(peer);
1760
1761                 if (conn != NULL) {
1762                         kgnilnd_close_conn_locked(conn, -ENETRESET);
1763                 }
1764         } else {
1765                 peer->gnp_up_event_time = jiffies;
1766         }
1767
1768         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1769
1770         if (down == GNILND_PEER_DOWN) {
1771                 /* using ENETRESET so we don't get messages from
1772                  * kgnilnd_tx_done
1773                  */
1774                 kgnilnd_txlist_done(&zombies, -ENETRESET);
1775                 kgnilnd_peer_notify(peer, -ECONNRESET, 0);
1776                 LCONSOLE_INFO("Received down event for nid %d\n",
1777                               LNET_NIDADDR(nid));
1778         }
1779
1780         return 0;
1781 }
1782
1783 int
1784 kgnilnd_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg)
1785 {
1786         struct libcfs_ioctl_data *data = arg;
1787         kgn_net_t                *net = ni->ni_data;
1788         int                       rc = -EINVAL;
1789
1790         LASSERT(ni == net->gnn_ni);
1791
1792         switch (cmd) {
1793         case IOC_LIBCFS_GET_PEER: {
1794                 lnet_nid_t   nid = 0;
1795                 kgn_peer_t  *peer = NULL;
1796                 __u32 nic_addr = 0;
1797                 __u64 peerstamp = 0;
1798                 int peer_refcount = 0, peer_connecting = 0;
1799                 int device_id = 0;
1800                 int tx_seq = 0, rx_seq = 0;
1801                 int fmaq_len = 0, nfma = 0, nrdma = 0;
1802
1803                 rc = kgnilnd_get_peer_info(data->ioc_count, &peer,
1804                                            &nid, &nic_addr, &peer_refcount,
1805                                            &peer_connecting);
1806                 if (rc)
1807                         break;
1808
1809                 /* Barf */
1810                 /* LNET_MKNID is used to mask from lnet the multiplexing/demultiplexing of connections and peers
1811                  * LNET assumes a conn and peer per net, the LNET_MKNID/LNET_NIDADDR allows us to let Lnet see what it
1812                  * wants to see instead of the underlying network that is being used to send the data
1813                  */
1814                 data->ioc_nid    = LNET_MKNID(LNET_NID_NET(&ni->ni_nid),
1815                                               LNET_NIDADDR(nid));
1816                 data->ioc_flags  = peer_connecting;
1817                 data->ioc_count  = peer_refcount;
1818
1819                 rc = kgnilnd_get_conn_info(peer, &device_id, &peerstamp,
1820                                            &tx_seq, &rx_seq, &fmaq_len,
1821                                            &nfma, &nrdma);
1822
1823                 /* This is allowable - a persistent peer could not
1824                  * have a connection */
1825                 if (rc) {
1826                         /* flag to indicate we are not connected -
1827                          * need to print as such */
1828                         data->ioc_flags |= (1<<16);
1829                         rc = 0;
1830                 } else {
1831                         /* still barf */
1832                         data->ioc_net = device_id;
1833                         data->ioc_u64[0] = peerstamp;
1834                         data->ioc_u32[0] = fmaq_len;
1835                         data->ioc_u32[1] = nfma;
1836                         data->ioc_u32[2] = tx_seq;
1837                         data->ioc_u32[3] = rx_seq;
1838                         data->ioc_u32[4] = nrdma;
1839                 }
1840                 break;
1841         }
1842         case IOC_LIBCFS_ADD_PEER: {
1843                 /* just dummy value to allow using common interface */
1844                 kgn_peer_t      *peer;
1845                 rc = kgnilnd_add_peer(net, data->ioc_nid, &peer);
1846                 break;
1847         }
1848         case IOC_LIBCFS_DEL_PEER: {
1849                 /* NULL is passed in so it affects all peers in existence without regard to network
1850                  * as the peer may not exist on the network LNET believes it to be on.
1851                  */
1852                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1853                                               GNILND_DEL_PEER, -EUCLEAN);
1854                 break;
1855         }
1856         case IOC_LIBCFS_GET_CONN: {
1857                 kgn_conn_t *conn = kgnilnd_get_conn_by_idx(data->ioc_count);
1858
1859                 if (conn == NULL)
1860                         rc = -ENOENT;
1861                 else {
1862                         rc = 0;
1863                         /* LNET_MKNID is used to build the correct address based on what LNET wants to see instead of
1864                          * the generic connection that is used to send the data
1865                          */
1866                         data->ioc_nid    = LNET_MKNID(LNET_NID_NET(&ni->ni_nid),
1867                                                       LNET_NIDADDR(conn->gnc_peer->gnp_nid));
1868                         data->ioc_u32[0] = conn->gnc_device->gnd_id;
1869                         kgnilnd_conn_decref(conn);
1870                 }
1871                 break;
1872         }
1873         case IOC_LIBCFS_CLOSE_CONNECTION: {
1874                 /* use error = -ENETRESET to indicate it was lctl disconnect */
1875                 /* NULL is passed in so it affects all the nets as the connection is virtual
1876                  * and may not exist on the network LNET believes it to be on.
1877                  */
1878                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1879                                               GNILND_DEL_CONN, -ENETRESET);
1880                 break;
1881         }
1882         case IOC_LIBCFS_PUSH_CONNECTION: {
1883                 /* we use this to flush purgatory */
1884                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1885                                               GNILND_CLEAR_PURGATORY, -EUCLEAN);
1886                 break;
1887         }
1888         case IOC_LIBCFS_REGISTER_MYNID: {
1889                 /* Ignore if this is a noop */
1890                 if (data->ioc_nid == lnet_nid_to_nid4(&ni->ni_nid)) {
1891                         rc = 0;
1892                 } else {
1893                         CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
1894                                libcfs_nid2str(data->ioc_nid),
1895                                libcfs_nidstr(&ni->ni_nid));
1896                         rc = -EINVAL;
1897                 }
1898                 break;
1899         }
1900         }
1901
1902         return rc;
1903 }
1904
1905 int
1906 kgnilnd_dev_init(kgn_device_t *dev)
1907 {
1908         gni_return_t      rrc;
1909         int               rc = 0;
1910         unsigned int      cq_size;
1911         ENTRY;
1912
1913         /* size of these CQs should be able to accommodate the outgoing
1914          * RDMA and SMSG transactions.  Since we really don't know what we
1915          * really need here, we'll take credits * 2 * 3 to allow a bunch.
1916          * We need to dig into this more with the performance work. */
1917         cq_size = *kgnilnd_tunables.kgn_credits * 2 * 3;
1918
1919         rrc = kgnilnd_cdm_create(dev->gnd_id, *kgnilnd_tunables.kgn_ptag,
1920                                  *kgnilnd_tunables.kgn_pkey, 0,
1921                                  &dev->gnd_domain);
1922         if (rrc != GNI_RC_SUCCESS) {
1923                 CERROR("Can't create CDM %d (%d)\n", dev->gnd_id, rrc);
1924                 GOTO(failed, rc = -ENODEV);
1925         }
1926
1927         rrc = kgnilnd_cdm_attach(dev->gnd_domain, dev->gnd_id,
1928                                  &dev->gnd_host_id, &dev->gnd_handle);
1929         if (rrc != GNI_RC_SUCCESS) {
1930                 CERROR("Can't attach CDM to device %d (%d)\n",
1931                         dev->gnd_id, rrc);
1932                 GOTO(failed, rc = -ENODEV);
1933         }
1934
1935         /* a bit gross, but not much we can do - Aries Sim doesn't have
1936          * hardcoded NIC/NID that we can use */
1937         rc = kgnilnd_setup_nic_translation(dev->gnd_host_id);
1938         if (rc != 0)
1939                 GOTO(failed, rc = -ENODEV);
1940
1941         /* only dev 0 gets the errors - no need to reset the stack twice
1942          * - this works because we have a single PTAG, if we had more
1943          * then we'd need to have multiple handlers */
1944         if (dev->gnd_id == 0) {
1945                 rrc = kgnilnd_subscribe_errors(dev->gnd_handle,
1946                                                 GNI_ERRMASK_CRITICAL |
1947                                                 GNI_ERRMASK_UNKNOWN_TRANSACTION,
1948                                               0, NULL, kgnilnd_critical_error,
1949                                               &dev->gnd_err_handle);
1950                 if (rrc != GNI_RC_SUCCESS) {
1951                         CERROR("Can't subscribe for errors on device %d: rc %d\n",
1952                                 dev->gnd_id, rrc);
1953                         GOTO(failed, rc = -ENODEV);
1954                 }
1955
1956                 rc = kgnilnd_set_quiesce_callback(dev->gnd_handle,
1957                                                   kgnilnd_quiesce_end_callback);
1958                 if (rc != GNI_RC_SUCCESS) {
1959                         CERROR("Can't subscribe for quiesce callback on device %d: rc %d\n",
1960                                 dev->gnd_id, rrc);
1961                         GOTO(failed, rc = -ENODEV);
1962                 }
1963         }
1964
1965         rc = kgnilnd_nicaddr_to_nid(dev->gnd_host_id, &dev->gnd_nid);
1966         if (rc < 0) {
1967                 /* log messages during startup */
1968                 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
1969                         CERROR("couldn't translate host_id 0x%x to nid. rc %d\n",
1970                                 dev->gnd_host_id, rc);
1971                 }
1972                 GOTO(failed, rc = -ESRCH);
1973         }
1974         CDEBUG(D_NET, "NIC %x -> NID %d\n", dev->gnd_host_id, dev->gnd_nid);
1975
1976         rrc = kgnilnd_cq_create(dev->gnd_handle, *kgnilnd_tunables.kgn_credits,
1977                                 0, kgnilnd_device_callback,
1978                                 dev->gnd_id, &dev->gnd_snd_rdma_cqh);
1979         if (rrc != GNI_RC_SUCCESS) {
1980                 CERROR("Can't create rdma send cq size %u for device "
1981                        "%d (%d)\n", cq_size, dev->gnd_id, rrc);
1982                 GOTO(failed, rc = -EINVAL);
1983         }
1984
1985         rrc = kgnilnd_cq_create(dev->gnd_handle, cq_size,
1986                         0, kgnilnd_device_callback, dev->gnd_id,
1987                         &dev->gnd_snd_fma_cqh);
1988         if (rrc != GNI_RC_SUCCESS) {
1989                 CERROR("Can't create fma send cq size %u for device %d (%d)\n",
1990                        cq_size, dev->gnd_id, rrc);
1991                 GOTO(failed, rc = -EINVAL);
1992         }
1993
1994         /* This one we size differently - overflows are possible and it needs to be
1995          * sized based on machine size */
1996         rrc = kgnilnd_cq_create(dev->gnd_handle,
1997                         *kgnilnd_tunables.kgn_fma_cq_size,
1998                         0, kgnilnd_device_callback, dev->gnd_id,
1999                         &dev->gnd_rcv_fma_cqh);
2000         if (rrc != GNI_RC_SUCCESS) {
2001                 CERROR("Can't create fma cq size %d for device %d (%d)\n",
2002                        *kgnilnd_tunables.kgn_fma_cq_size, dev->gnd_id, rrc);
2003                 GOTO(failed, rc = -EINVAL);
2004         }
2005
2006         rrc = kgnilnd_register_smdd_buf(dev);
2007         if (rrc != GNI_RC_SUCCESS) {
2008                 GOTO(failed, rc = -EINVAL);
2009         }
2010
2011         RETURN(0);
2012
2013 failed:
2014         kgnilnd_dev_fini(dev);
2015         RETURN(rc);
2016 }
2017
2018 void
2019 kgnilnd_dev_fini(kgn_device_t *dev)
2020 {
2021         gni_return_t rrc;
2022         ENTRY;
2023
2024         /* At quiesce or rest time, need to loop through and clear gnd_ready_conns ?*/
2025         LASSERTF(list_empty(&dev->gnd_ready_conns) &&
2026                  list_empty(&dev->gnd_map_tx) &&
2027                  list_empty(&dev->gnd_rdmaq) &&
2028                  list_empty(&dev->gnd_delay_conns),
2029                  "dev 0x%p ready_conns %d@0x%p delay_conns %d@0x%p" 
2030                  "map_tx %d@0x%p rdmaq %d@0x%p\n",
2031                  dev, kgnilnd_count_list(&dev->gnd_ready_conns), &dev->gnd_ready_conns,
2032                  kgnilnd_count_list(&dev->gnd_delay_conns), &dev->gnd_delay_conns,
2033                  kgnilnd_count_list(&dev->gnd_map_tx), &dev->gnd_map_tx,
2034                  kgnilnd_count_list(&dev->gnd_rdmaq), &dev->gnd_rdmaq);
2035
2036         /* These should follow from tearing down all connections */
2037         LASSERTF(dev->gnd_map_nphys == 0 && dev->gnd_map_physnop == 0,
2038                 "%d physical mappings of %d pages still mapped\n",
2039                  dev->gnd_map_nphys, dev->gnd_map_physnop);
2040
2041         LASSERTF(atomic_read(&dev->gnd_n_mdd) == 0 &&
2042                  atomic_read(&dev->gnd_n_mdd_held) == 0 &&
2043                  atomic64_read(&dev->gnd_nbytes_map) == 0,
2044                  "%d SMSG mappings of %lld bytes still mapped or held %d\n",
2045                  atomic_read(&dev->gnd_n_mdd),
2046                  (u64)atomic64_read(&dev->gnd_nbytes_map),
2047                  atomic_read(&dev->gnd_n_mdd_held));
2048
2049         LASSERT(list_empty(&dev->gnd_map_list));
2050
2051         /* What other assertions needed to ensure all connections torn down ? */
2052
2053         /* check all counters == 0 (EP, MDD, etc) */
2054
2055         /* if we are resetting due to quiese (stack reset), don't check
2056          * thread states */
2057         LASSERTF(kgnilnd_data.kgn_quiesce_trigger ||
2058                 atomic_read(&kgnilnd_data.kgn_nthreads) == 0,
2059                 "tried to shutdown with threads active\n");
2060
2061         if (dev->gnd_smdd_hold_buf) {
2062                 rrc = kgnilnd_deregister_smdd_buf(dev);
2063                 LASSERTF(rrc == GNI_RC_SUCCESS,
2064                         "bad rc from deregistion of sMDD buffer: %d\n", rrc);
2065                 dev->gnd_smdd_hold_buf = NULL;
2066         }
2067
2068         if (dev->gnd_rcv_fma_cqh) {
2069                 rrc = kgnilnd_cq_destroy(dev->gnd_rcv_fma_cqh);
2070                 LASSERTF(rrc == GNI_RC_SUCCESS,
2071                         "bad rc from gni_cq_destroy on rcv_fma_cqh: %d\n", rrc);
2072                 dev->gnd_rcv_fma_cqh = NULL;
2073         }
2074
2075         if (dev->gnd_snd_rdma_cqh) {
2076                 rrc = kgnilnd_cq_destroy(dev->gnd_snd_rdma_cqh);
2077                 LASSERTF(rrc == GNI_RC_SUCCESS,
2078                         "bad rc from gni_cq_destroy on send_rdma_cqh: %d\n", rrc);
2079                 dev->gnd_snd_rdma_cqh = NULL;
2080         }
2081
2082         if (dev->gnd_snd_fma_cqh) {
2083                 rrc = kgnilnd_cq_destroy(dev->gnd_snd_fma_cqh);
2084                 LASSERTF(rrc == GNI_RC_SUCCESS,
2085                         "bad rc from gni_cq_destroy on snd_fma_cqh: %d\n", rrc);
2086                 dev->gnd_snd_fma_cqh = NULL;
2087         }
2088
2089         if (dev->gnd_err_handle) {
2090                 rrc = kgnilnd_release_errors(dev->gnd_err_handle);
2091                 LASSERTF(rrc == GNI_RC_SUCCESS,
2092                         "bad rc from gni_release_errors: %d\n", rrc);
2093                 dev->gnd_err_handle = NULL;
2094         }
2095
2096         if (dev->gnd_domain) {
2097                 rrc = kgnilnd_cdm_destroy(dev->gnd_domain);
2098                 LASSERTF(rrc == GNI_RC_SUCCESS,
2099                         "bad rc from gni_cdm_destroy: %d\n", rrc);
2100                 dev->gnd_domain = NULL;
2101         }
2102
2103         EXIT;
2104 }
2105
2106 int kgnilnd_base_startup(void)
2107 {
2108         long long            pkmem = libcfs_kmem_read();
2109         int                  rc;
2110         int                  i;
2111         kgn_device_t        *dev;
2112         struct task_struct  *thrd;
2113
2114 #if defined(CONFIG_CRAY_XT) && !defined(CONFIG_CRAY_COMPUTE)
2115         /* limit how much memory can be allocated for fma blocks in
2116          * instances where many nodes need to reconnects at the same time */
2117         struct sysinfo si;
2118         si_meminfo(&si);
2119         kgnilnd_data.free_pages_limit = si.totalram/4;
2120 #endif
2121
2122         ENTRY;
2123
2124         LASSERTF(kgnilnd_data.kgn_init == GNILND_INIT_NOTHING,
2125                 "init %d\n", kgnilnd_data.kgn_init);
2126
2127         /* zero pointers, flags etc */
2128         memset(&kgnilnd_data, 0, sizeof(kgnilnd_data));
2129         kgnilnd_check_kgni_version();
2130
2131         /* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and
2132          * a unique (for all time) connstamp so we can uniquely identify
2133          * the sender.  The connstamp is an incrementing counter
2134          * initialised with seconds + microseconds at startup time.  So we
2135          * rely on NOT creating connections more frequently on average than
2136          * 1MHz to ensure we don't use old connstamps when we reboot. */
2137         kgnilnd_data.kgn_connstamp =
2138                  kgnilnd_data.kgn_peerstamp =
2139                         ktime_get_seconds();
2140
2141         init_rwsem(&kgnilnd_data.kgn_net_rw_sem);
2142
2143         for (i = 0; i < GNILND_MAXDEVS; i++) {
2144                 kgn_device_t  *dev = &kgnilnd_data.kgn_devices[i];
2145
2146                 dev->gnd_id = i;
2147                 INIT_LIST_HEAD(&dev->gnd_ready_conns);
2148                 INIT_LIST_HEAD(&dev->gnd_delay_conns);
2149                 INIT_LIST_HEAD(&dev->gnd_map_tx);
2150                 INIT_LIST_HEAD(&dev->gnd_fma_buffs);
2151                 mutex_init(&dev->gnd_cq_mutex);
2152                 mutex_init(&dev->gnd_fmablk_mutex);
2153                 spin_lock_init(&dev->gnd_fmablk_lock);
2154                 init_waitqueue_head(&dev->gnd_waitq);
2155                 init_waitqueue_head(&dev->gnd_dgram_waitq);
2156                 init_waitqueue_head(&dev->gnd_dgping_waitq);
2157                 spin_lock_init(&dev->gnd_lock);
2158                 INIT_LIST_HEAD(&dev->gnd_map_list);
2159                 spin_lock_init(&dev->gnd_map_lock);
2160                 atomic_set(&dev->gnd_nfmablk, 0);
2161                 atomic_set(&dev->gnd_fmablk_vers, 1);
2162                 atomic_set(&dev->gnd_neps, 0);
2163                 atomic_set(&dev->gnd_canceled_dgrams, 0);
2164                 INIT_LIST_HEAD(&dev->gnd_connd_peers);
2165                 spin_lock_init(&dev->gnd_connd_lock);
2166                 spin_lock_init(&dev->gnd_dgram_lock);
2167                 spin_lock_init(&dev->gnd_rdmaq_lock);
2168                 INIT_LIST_HEAD(&dev->gnd_rdmaq);
2169                 init_rwsem(&dev->gnd_conn_sem);
2170
2171                 /* alloc & setup nid based dgram table */
2172                 CFS_ALLOC_PTR_ARRAY(dev->gnd_dgrams,
2173                                     *kgnilnd_tunables.kgn_peer_hash_size);
2174
2175                 if (dev->gnd_dgrams == NULL)
2176                         GOTO(failed, rc = -ENOMEM);
2177
2178                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2179                         INIT_LIST_HEAD(&dev->gnd_dgrams[i]);
2180                 }
2181                 atomic_set(&dev->gnd_ndgrams, 0);
2182                 atomic_set(&dev->gnd_nwcdgrams, 0);
2183                 /* setup timer for RDMAQ processing */
2184                 cfs_timer_setup(&dev->gnd_rdmaq_timer,
2185                                 kgnilnd_schedule_device_timer,
2186                                 (unsigned long)dev, 0);
2187
2188                 /* setup timer for mapping processing */
2189                 cfs_timer_setup(&dev->gnd_map_timer,
2190                                 kgnilnd_schedule_device_timer,
2191                                 (unsigned long)dev, 0);
2192
2193         }
2194
2195         /* CQID 0 isn't allowed, set to MAX_MSG_ID - 1 to check for conflicts early */
2196         kgnilnd_data.kgn_next_cqid = GNILND_MAX_MSG_ID - 1;
2197         kgnilnd_data.kgn_new_min_timeout = *kgnilnd_tunables.kgn_timeout;
2198         init_waitqueue_head(&kgnilnd_data.kgn_reaper_waitq);
2199         init_waitqueue_head(&kgnilnd_data.kgn_ruhroh_waitq);
2200         spin_lock_init(&kgnilnd_data.kgn_reaper_lock);
2201
2202         mutex_init(&kgnilnd_data.kgn_quiesce_mutex);
2203         atomic_set(&kgnilnd_data.kgn_nquiesce, 0);
2204         atomic_set(&kgnilnd_data.kgn_npending_conns, 0);
2205         atomic_set(&kgnilnd_data.kgn_npending_unlink, 0);
2206         atomic_set(&kgnilnd_data.kgn_npending_detach, 0);
2207         atomic_set(&kgnilnd_data.kgn_rev_offset, 0);
2208         atomic_set(&kgnilnd_data.kgn_rev_length, 0);
2209         atomic_set(&kgnilnd_data.kgn_rev_copy_buff, 0);
2210
2211         /* OK to call kgnilnd_api_shutdown() to cleanup now */
2212         kgnilnd_data.kgn_init = GNILND_INIT_DATA;
2213         if (!try_module_get(THIS_MODULE))
2214                 GOTO(failed, rc = -ENOENT);
2215
2216         rwlock_init(&kgnilnd_data.kgn_peer_conn_lock);
2217
2218         CFS_ALLOC_PTR_ARRAY(kgnilnd_data.kgn_peers,
2219                             *kgnilnd_tunables.kgn_peer_hash_size);
2220
2221         if (kgnilnd_data.kgn_peers == NULL)
2222                 GOTO(failed, rc = -ENOMEM);
2223
2224         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2225                 INIT_LIST_HEAD(&kgnilnd_data.kgn_peers[i]);
2226         }
2227
2228         CFS_ALLOC_PTR_ARRAY(kgnilnd_data.kgn_conns,
2229                             *kgnilnd_tunables.kgn_peer_hash_size);
2230
2231         if (kgnilnd_data.kgn_conns == NULL)
2232                 GOTO(failed, rc = -ENOMEM);
2233
2234         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2235                 INIT_LIST_HEAD(&kgnilnd_data.kgn_conns[i]);
2236         }
2237
2238         CFS_ALLOC_PTR_ARRAY(kgnilnd_data.kgn_nets,
2239                             *kgnilnd_tunables.kgn_net_hash_size);
2240
2241         if (kgnilnd_data.kgn_nets == NULL)
2242                 GOTO(failed, rc = -ENOMEM);
2243
2244         for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
2245                 INIT_LIST_HEAD(&kgnilnd_data.kgn_nets[i]);
2246         }
2247
2248         kgnilnd_data.kgn_mbox_cache =
2249                 kmem_cache_create("kgn_mbox_block", GNILND_MBOX_SIZE, 0,
2250                                   SLAB_HWCACHE_ALIGN, NULL);
2251         if (kgnilnd_data.kgn_mbox_cache == NULL) {
2252                 CERROR("Can't create slab for physical mbox blocks\n");
2253                 GOTO(failed, rc = -ENOMEM);
2254         }
2255
2256         kgnilnd_data.kgn_rx_cache =
2257                 kmem_cache_create("kgn_rx_t", sizeof(kgn_rx_t), 0, 0, NULL);
2258         if (kgnilnd_data.kgn_rx_cache == NULL) {
2259                 CERROR("Can't create slab for kgn_rx_t descriptors\n");
2260                 GOTO(failed, rc = -ENOMEM);
2261         }
2262
2263         kgnilnd_data.kgn_tx_cache =
2264                 kmem_cache_create("kgn_tx_t", sizeof(kgn_tx_t), 0, 0, NULL);
2265         if (kgnilnd_data.kgn_tx_cache == NULL) {
2266                 CERROR("Can't create slab for kgn_tx_t\n");
2267                 GOTO(failed, rc = -ENOMEM);
2268         }
2269
2270         kgnilnd_data.kgn_tx_phys_cache =
2271                 kmem_cache_create("kgn_tx_phys",
2272                                    GNILND_MAX_IOV * sizeof(gni_mem_segment_t),
2273                                    0, 0, NULL);
2274         if (kgnilnd_data.kgn_tx_phys_cache == NULL) {
2275                 CERROR("Can't create slab for kgn_tx_phys\n");
2276                 GOTO(failed, rc = -ENOMEM);
2277         }
2278
2279         kgnilnd_data.kgn_dgram_cache =
2280                 kmem_cache_create("kgn_dgram_t", sizeof(kgn_dgram_t), 0, 0, NULL);
2281         if (kgnilnd_data.kgn_dgram_cache == NULL) {
2282                 CERROR("Can't create slab for outgoing datagrams\n");
2283                 GOTO(failed, rc = -ENOMEM);
2284         }
2285
2286         /* allocate a MAX_IOV array of page pointers for each cpu */
2287         kgnilnd_data.kgn_cksum_map_pages = kmalloc(num_possible_cpus() * sizeof (struct page *),
2288                                                    GFP_KERNEL);
2289         if (kgnilnd_data.kgn_cksum_map_pages == NULL) {
2290                 CERROR("Can't allocate vmap cksum pages\n");
2291                 GOTO(failed, rc = -ENOMEM);
2292         }
2293         kgnilnd_data.kgn_cksum_npages = num_possible_cpus();
2294         memset(kgnilnd_data.kgn_cksum_map_pages, 0,
2295                 kgnilnd_data.kgn_cksum_npages * sizeof (struct page *));
2296
2297         for (i = 0; i < kgnilnd_data.kgn_cksum_npages; i++) {
2298                 kgnilnd_data.kgn_cksum_map_pages[i] = kmalloc(GNILND_MAX_IOV * sizeof (struct page *),
2299                                                               GFP_KERNEL);
2300                 if (kgnilnd_data.kgn_cksum_map_pages[i] == NULL) {
2301                         CERROR("Can't allocate vmap cksum pages for cpu %d\n", i);
2302                         GOTO(failed, rc = -ENOMEM);
2303                 }
2304         }
2305
2306         LASSERT(kgnilnd_data.kgn_ndevs == 0);
2307
2308         /* Use all available GNI devices */
2309         for (i = 0; i < GNILND_MAXDEVS; i++) {
2310                 dev = &kgnilnd_data.kgn_devices[kgnilnd_data.kgn_ndevs];
2311
2312                 rc = kgnilnd_dev_init(dev);
2313                 if (rc == 0) {
2314                         /* Increment here so base_shutdown cleans it up */
2315                         kgnilnd_data.kgn_ndevs++;
2316
2317                         rc = kgnilnd_allocate_phys_fmablk(dev);
2318                         if (rc)
2319                                 GOTO(failed, rc);
2320                 }
2321         }
2322
2323         if (kgnilnd_data.kgn_ndevs == 0) {
2324                 CERROR("Can't initialise any GNI devices\n");
2325                 GOTO(failed, rc = -ENODEV);
2326         }
2327
2328         rc = kgnilnd_thread_start(kgnilnd_reaper, NULL, "kgnilnd_rpr", 0);
2329         if (rc != 0) {
2330                 CERROR("Can't spawn gnilnd reaper: %d\n", rc);
2331                 GOTO(failed, rc);
2332         }
2333
2334         rc = kgnilnd_start_rca_thread();
2335         if (rc != 0) {
2336                 CERROR("Can't spawn gnilnd rca: %d\n", rc);
2337                 GOTO(failed, rc);
2338         }
2339
2340         /*
2341          * Start ruhroh thread.  We can't use kgnilnd_thread_start() because
2342          * we don't want this thread included in kgnilnd_data.kgn_nthreads
2343          * count.  This thread controls quiesce, so it mustn't
2344          * quiesce itself.
2345          */
2346         thrd = kthread_run(kgnilnd_ruhroh_thread, NULL, "%s_%02d", "kgnilnd_rr", 0);
2347         if (IS_ERR(thrd)) {
2348                 rc = PTR_ERR(thrd);
2349                 CERROR("Can't spawn gnilnd ruhroh thread: %d\n", rc);
2350                 GOTO(failed, rc);
2351         }
2352
2353         /* threads will load balance across devs as they are available */
2354         if (*kgnilnd_tunables.kgn_thread_affinity) {
2355                 rc = kgnilnd_start_sd_threads();
2356                 if (rc != 0)
2357                         GOTO(failed, rc);
2358         } else {
2359                 for (i = 0; i < *kgnilnd_tunables.kgn_sched_threads; i++) {
2360                         rc = kgnilnd_thread_start(kgnilnd_scheduler,
2361                                                   (void *)((long)i),
2362                                                   "kgnilnd_sd", i);
2363                         if (rc != 0) {
2364                                 CERROR("Can't spawn gnilnd scheduler[%d]: %d\n",
2365                                        i, rc);
2366                                 GOTO(failed, rc);
2367                         }
2368                 }
2369         }
2370
2371         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2372                 dev = &kgnilnd_data.kgn_devices[i];
2373                 rc = kgnilnd_thread_start(kgnilnd_dgram_mover, dev,
2374                                           "kgnilnd_dg", dev->gnd_id);
2375                 if (rc != 0) {
2376                         CERROR("Can't spawn gnilnd dgram_mover[%d]: %d\n",
2377                                dev->gnd_id, rc);
2378                         GOTO(failed, rc);
2379                 }
2380
2381                 rc = kgnilnd_thread_start(kgnilnd_dgram_waitq, dev,
2382                                           "kgnilnd_dgn", dev->gnd_id);
2383                 if (rc != 0) {
2384                         CERROR("Can't spawn gnilnd dgram_waitq[%d]: %d\n",
2385                                 dev->gnd_id, rc);
2386                         GOTO(failed, rc);
2387                 }
2388
2389                 rc = kgnilnd_setup_wildcard_dgram(dev);
2390
2391                 if (rc != 0) {
2392                         CERROR("Can't create wildcard dgrams[%d]: %d\n",
2393                                 dev->gnd_id, rc);
2394                         GOTO(failed, rc);
2395                 }
2396         }
2397
2398         /* flag everything initialised */
2399         kgnilnd_data.kgn_init = GNILND_INIT_ALL;
2400         /*****************************************************/
2401
2402         CDEBUG(D_MALLOC, "initial kmem %lld\n", pkmem);
2403         RETURN(0);
2404
2405 failed:
2406         kgnilnd_base_shutdown();
2407         kgnilnd_data.kgn_init = GNILND_INIT_NOTHING;
2408         RETURN(rc);
2409 }
2410
2411 void
2412 kgnilnd_base_shutdown(void)
2413 {
2414         int                     i, j;
2415         ENTRY;
2416
2417         while (CFS_FAIL_TIMEOUT(CFS_FAIL_GNI_PAUSE_SHUTDOWN, 1)) {};
2418
2419         kgnilnd_data.kgn_wc_kill = 1;
2420
2421         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2422                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2423                 kgnilnd_cancel_wc_dgrams(dev);
2424                 kgnilnd_cancel_dgrams(dev);
2425                 kgnilnd_del_conn_or_peer(NULL, LNET_NID_ANY, GNILND_DEL_PEER, -ESHUTDOWN);
2426                 kgnilnd_wait_for_canceled_dgrams(dev);
2427         }
2428
2429         /* We need to verify there are no conns left before we let the threads
2430          * shut down otherwise we could clean up the peers but still have
2431          * some outstanding conns due to orphaned datagram conns that are
2432          * being cleaned up.
2433          */
2434         i = 2;
2435         while (atomic_read(&kgnilnd_data.kgn_nconns) != 0) {
2436                 i++;
2437
2438                 for(j = 0; j < kgnilnd_data.kgn_ndevs; ++j) {
2439                         kgn_device_t *dev = &kgnilnd_data.kgn_devices[j];
2440                         kgnilnd_schedule_device(dev);
2441                 }
2442
2443                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2444                         "Waiting for conns to be cleaned up %d\n",atomic_read(&kgnilnd_data.kgn_nconns));
2445                 schedule_timeout_uninterruptible(cfs_time_seconds(1));
2446         }
2447         /* Peer state all cleaned up BEFORE setting shutdown, so threads don't
2448          * have to worry about shutdown races.  NB connections may be created
2449          * while there are still active connds, but these will be temporary
2450          * since peer creation always fails after the listener has started to
2451          * shut down.
2452          * all peers should have been cleared out on the nets */
2453         LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
2454                 "peers left %d\n", atomic_read(&kgnilnd_data.kgn_npeers));
2455
2456         /* Wait for the ruhroh thread to shut down. */
2457         kgnilnd_data.kgn_ruhroh_shutdown = 1;
2458         wake_up(&kgnilnd_data.kgn_ruhroh_waitq);
2459         i = 2;
2460         while (kgnilnd_data.kgn_ruhroh_running != 0) {
2461                 i++;
2462                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2463                        "Waiting for ruhroh thread to terminate\n");
2464                 schedule_timeout_uninterruptible(cfs_time_seconds(1));
2465         }
2466
2467        /* Flag threads to terminate */
2468         kgnilnd_data.kgn_shutdown = 1;
2469
2470         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2471                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2472
2473                 /* should clear all the MDDs */
2474                 kgnilnd_unmap_fma_blocks(dev);
2475
2476                 kgnilnd_schedule_device(dev);
2477                 wake_up(&dev->gnd_dgram_waitq);
2478                 wake_up(&dev->gnd_dgping_waitq);
2479                 LASSERT(list_empty(&dev->gnd_connd_peers));
2480         }
2481
2482         spin_lock(&kgnilnd_data.kgn_reaper_lock);
2483         wake_up(&kgnilnd_data.kgn_reaper_waitq);
2484         spin_unlock(&kgnilnd_data.kgn_reaper_lock);
2485
2486         if (atomic_read(&kgnilnd_data.kgn_nthreads))
2487                 kgnilnd_wakeup_rca_thread();
2488
2489         /* Wait for threads to exit */
2490         i = 2;
2491         while (atomic_read(&kgnilnd_data.kgn_nthreads) != 0) {
2492                 i++;
2493                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2494                        "Waiting for %d threads to terminate\n",
2495                        atomic_read(&kgnilnd_data.kgn_nthreads));
2496                 schedule_timeout_uninterruptible(cfs_time_seconds(1));
2497         }
2498
2499         LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
2500                 "peers left %d\n", atomic_read(&kgnilnd_data.kgn_npeers));
2501
2502         if (kgnilnd_data.kgn_peers != NULL) {
2503                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2504                         LASSERT(list_empty(&kgnilnd_data.kgn_peers[i]));
2505
2506                 CFS_FREE_PTR_ARRAY(kgnilnd_data.kgn_peers,
2507                                    *kgnilnd_tunables.kgn_peer_hash_size);
2508         }
2509
2510         down_write(&kgnilnd_data.kgn_net_rw_sem);
2511         if (kgnilnd_data.kgn_nets != NULL) {
2512                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++)
2513                         LASSERT(list_empty(&kgnilnd_data.kgn_nets[i]));
2514
2515                 CFS_FREE_PTR_ARRAY(kgnilnd_data.kgn_nets,
2516                                    *kgnilnd_tunables.kgn_net_hash_size);
2517         }
2518         up_write(&kgnilnd_data.kgn_net_rw_sem);
2519
2520         LASSERTF(atomic_read(&kgnilnd_data.kgn_nconns) == 0,
2521                 "conns left %d\n", atomic_read(&kgnilnd_data.kgn_nconns));
2522
2523         if (kgnilnd_data.kgn_conns != NULL) {
2524                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2525                         LASSERT(list_empty(&kgnilnd_data.kgn_conns[i]));
2526
2527                 CFS_FREE_PTR_ARRAY(kgnilnd_data.kgn_conns,
2528                                    *kgnilnd_tunables.kgn_peer_hash_size);
2529         }
2530
2531         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2532                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2533                 kgnilnd_dev_fini(dev);
2534
2535                 LASSERTF(atomic_read(&dev->gnd_ndgrams) == 0,
2536                         "dgrams left %d\n", atomic_read(&dev->gnd_ndgrams));
2537
2538                 if (dev->gnd_dgrams != NULL) {
2539                         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size;
2540                              i++)
2541                                 LASSERT(list_empty(&dev->gnd_dgrams[i]));
2542
2543                         CFS_FREE_PTR_ARRAY(dev->gnd_dgrams,
2544                                            *kgnilnd_tunables.kgn_peer_hash_size);
2545                 }
2546
2547                 kgnilnd_free_phys_fmablk(dev);
2548         }
2549
2550         if (kgnilnd_data.kgn_mbox_cache != NULL)
2551                 kmem_cache_destroy(kgnilnd_data.kgn_mbox_cache);
2552
2553         if (kgnilnd_data.kgn_rx_cache != NULL)
2554                 kmem_cache_destroy(kgnilnd_data.kgn_rx_cache);
2555
2556         if (kgnilnd_data.kgn_tx_cache != NULL)
2557                 kmem_cache_destroy(kgnilnd_data.kgn_tx_cache);
2558
2559         if (kgnilnd_data.kgn_tx_phys_cache != NULL)
2560                 kmem_cache_destroy(kgnilnd_data.kgn_tx_phys_cache);
2561
2562         if (kgnilnd_data.kgn_dgram_cache != NULL)
2563                 kmem_cache_destroy(kgnilnd_data.kgn_dgram_cache);
2564
2565         if (kgnilnd_data.kgn_cksum_map_pages != NULL) {
2566                 for (i = 0; i < kgnilnd_data.kgn_cksum_npages; i++) {
2567                         if (kgnilnd_data.kgn_cksum_map_pages[i] != NULL) {
2568                                 kfree(kgnilnd_data.kgn_cksum_map_pages[i]);
2569                         }
2570                 }
2571                 kfree(kgnilnd_data.kgn_cksum_map_pages);
2572         }
2573
2574         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %lld\n",
2575                libcfs_kmem_read());
2576
2577         kgnilnd_data.kgn_init = GNILND_INIT_NOTHING;
2578         module_put(THIS_MODULE);
2579
2580         EXIT;
2581 }
2582
2583 int
2584 kgnilnd_startup(struct lnet_ni *ni)
2585 {
2586         int               rc, devno;
2587         kgn_net_t        *net;
2588         ENTRY;
2589
2590         LASSERTF(ni->ni_net->net_lnd == &the_kgnilnd,
2591                 "bad LND 0x%p != the_kgnilnd @ 0x%p\n",
2592                 ni->ni_net->net_lnd, &the_kgnilnd);
2593
2594         if (kgnilnd_data.kgn_init == GNILND_INIT_NOTHING) {
2595                 rc = kgnilnd_base_startup();
2596                 if (rc != 0)
2597                         RETURN(rc);
2598         }
2599
2600         /* Serialize with shutdown. */
2601         mutex_lock(&kgnilnd_data.kgn_quiesce_mutex);
2602
2603         LIBCFS_ALLOC(net, sizeof(*net));
2604         if (net == NULL) {
2605                 CERROR("could not allocate net for new interface instance\n");
2606                 /* no need to cleanup the CDM... */
2607                 GOTO(failed, rc = -ENOMEM);
2608         }
2609         INIT_LIST_HEAD(&net->gnn_list);
2610         ni->ni_data = net;
2611         net->gnn_ni = ni;
2612         if (!ni->ni_net->net_tunables_set) {
2613                 ni->ni_net->net_tunables.lct_max_tx_credits =
2614                         *kgnilnd_tunables.kgn_credits;
2615                 ni->ni_net->net_tunables.lct_peer_tx_credits =
2616                         *kgnilnd_tunables.kgn_peer_credits;
2617         }
2618
2619         if (!ni->ni_interface) {
2620                 rc = lnet_ni_add_interface(ni, "ipogif0");
2621                 if (rc < 0)
2622                         CWARN("gnilnd failed to allocate ni_interface\n");
2623         }
2624
2625         if (*kgnilnd_tunables.kgn_peer_health) {
2626                 int     fudge;
2627                 int     timeout;
2628                 /* give this a bit of leeway - we don't have a hard timeout
2629                  * as we only check timeouts periodically - see comment in kgnilnd_reaper */
2630                 fudge = (GNILND_TO2KA(*kgnilnd_tunables.kgn_timeout) / GNILND_REAPER_NCHECKS);
2631                 timeout = *kgnilnd_tunables.kgn_timeout + fudge;
2632
2633                 if (*kgnilnd_tunables.kgn_peer_timeout >= timeout) {
2634                         ni->ni_net->net_tunables.lct_peer_timeout =
2635                                  *kgnilnd_tunables.kgn_peer_timeout;
2636                 } else if (*kgnilnd_tunables.kgn_peer_timeout > -1) {
2637                         LCONSOLE_ERROR("Peer_timeout is set to %d but needs to be >= %d\n",
2638                                         *kgnilnd_tunables.kgn_peer_timeout,
2639                                         timeout);
2640                         ni->ni_data = NULL;
2641                         LIBCFS_FREE(net, sizeof(*net));
2642                         GOTO(failed, rc = -EINVAL);
2643                 } else
2644                         ni->ni_net->net_tunables.lct_peer_timeout = timeout;
2645
2646                 LCONSOLE_INFO("Enabling LNet peer health for gnilnd, timeout %ds\n",
2647                               ni->ni_net->net_tunables.lct_peer_timeout);
2648         }
2649
2650         atomic_set(&net->gnn_refcount, 1);
2651
2652         /* if we have multiple devices, spread the nets around */
2653         net->gnn_netnum = LNET_NETNUM(LNET_NID_NET(&ni->ni_nid));
2654
2655         devno = LNET_NID_NET(&ni->ni_nid) % GNILND_MAXDEVS;
2656         net->gnn_dev = &kgnilnd_data.kgn_devices[devno];
2657
2658         /* allocate a 'dummy' cdm for datagram use. We can only have a single
2659          * datagram between a nid:inst_id and nid2:inst_id. The fake cdm
2660          * give us additional inst_id to use, allowing the datagrams to flow
2661          * like rivers of honey and beer */
2662
2663         /* the instance id for the cdm is the NETNUM offset by MAXDEVS -
2664          * ensuring we'll have a unique id */
2665
2666         ni->ni_nid.nid_addr[0] =
2667                 cpu_to_be32(LNET_NIDADDR(net->gnn_dev->gnd_nid));
2668         CDEBUG(D_NET, "adding net %p nid=%s on dev %d \n",
2669                 net, libcfs_nidstr(&ni->ni_nid), net->gnn_dev->gnd_id);
2670         /* until the gnn_list is set, we need to cleanup ourselves as
2671          * kgnilnd_shutdown is just gonna get confused */
2672
2673         down_write(&kgnilnd_data.kgn_net_rw_sem);
2674         list_add_tail(&net->gnn_list, kgnilnd_netnum2netlist(net->gnn_netnum));
2675         up_write(&kgnilnd_data.kgn_net_rw_sem);
2676
2677         /* we need a separate thread to call probe_wait_by_id until
2678          * we get a function callback notifier from kgni */
2679         mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2680         RETURN(0);
2681  failed:
2682         mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2683         kgnilnd_shutdown(ni);
2684         RETURN(rc);
2685 }
2686
2687 void
2688 kgnilnd_shutdown(struct lnet_ni *ni)
2689 {
2690         kgn_net_t     *net = ni->ni_data;
2691         int           i;
2692         int           rc;
2693         ENTRY;
2694
2695         CFS_RACE(CFS_FAIL_GNI_SR_DOWN_RACE);
2696
2697         LASSERTF(kgnilnd_data.kgn_init == GNILND_INIT_ALL,
2698                 "init %d\n", kgnilnd_data.kgn_init);
2699
2700         /* Serialize with startup. */
2701         mutex_lock(&kgnilnd_data.kgn_quiesce_mutex);
2702         CDEBUG(D_MALLOC, "before NAL cleanup: kmem %lld\n",
2703                libcfs_kmem_read());
2704
2705         if (net == NULL) {
2706                 CERROR("got NULL net for ni %p\n", ni);
2707                 GOTO(out, rc = -EINVAL);
2708         }
2709
2710         LASSERTF(ni == net->gnn_ni,
2711                 "ni %p gnn_ni %p\n", net, net->gnn_ni);
2712
2713         ni->ni_data = NULL;
2714
2715         LASSERT(!net->gnn_shutdown);
2716         LASSERTF(atomic_read(&net->gnn_refcount) != 0,
2717                 "net %p refcount %d\n",
2718                  net, atomic_read(&net->gnn_refcount));
2719
2720         if (!list_empty(&net->gnn_list)) {
2721                 /* serialize with peer creation */
2722                 down_write(&kgnilnd_data.kgn_net_rw_sem);
2723                 net->gnn_shutdown = 1;
2724                 up_write(&kgnilnd_data.kgn_net_rw_sem);
2725
2726                 kgnilnd_cancel_net_dgrams(net);
2727
2728                 kgnilnd_del_conn_or_peer(net, LNET_NID_ANY, GNILND_DEL_PEER, -ESHUTDOWN);
2729
2730                 /* if we are quiesced, need to wake up - we need those threads
2731                  * alive to release peers, etc */
2732                 if (GNILND_IS_QUIESCED) {
2733                         set_mb(kgnilnd_data.kgn_quiesce_trigger, GNILND_QUIESCE_IDLE);
2734                         kgnilnd_quiesce_wait("shutdown");
2735                 }
2736
2737                 kgnilnd_wait_for_canceled_dgrams(net->gnn_dev);
2738
2739                 /* We wait until the nets ref's are 1, we will release final ref which is ours
2740                  * this allows us to make sure everything else is done before we free the
2741                  * net.
2742                  */
2743                 i = 4;
2744                 while (atomic_read(&net->gnn_refcount) != 1) {
2745                         i++;
2746                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2747                                 "Waiting for %d references to clear on net %d\n",
2748                                 atomic_read(&net->gnn_refcount),
2749                                 net->gnn_netnum);
2750                         schedule_timeout_uninterruptible(cfs_time_seconds(1));
2751                 }
2752
2753                 /* release ref from kgnilnd_startup */
2754                 kgnilnd_net_decref(net);
2755                 /* serialize with reaper and conn_task looping */
2756                 down_write(&kgnilnd_data.kgn_net_rw_sem);
2757                 list_del_init(&net->gnn_list);
2758                 up_write(&kgnilnd_data.kgn_net_rw_sem);
2759
2760         }
2761
2762         /* not locking, this can't race with writers */
2763         LASSERTF(atomic_read(&net->gnn_refcount) == 0,
2764                 "net %p refcount %d\n",
2765                  net, atomic_read(&net->gnn_refcount));
2766         LIBCFS_FREE(net, sizeof(*net));
2767
2768 out:
2769         down_read(&kgnilnd_data.kgn_net_rw_sem);
2770         for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
2771                 if (!list_empty(&kgnilnd_data.kgn_nets[i])) {
2772                         up_read(&kgnilnd_data.kgn_net_rw_sem);
2773                         break;
2774                 }
2775
2776                 if (i == *kgnilnd_tunables.kgn_net_hash_size - 1) {
2777                         up_read(&kgnilnd_data.kgn_net_rw_sem);
2778                         kgnilnd_base_shutdown();
2779                 }
2780         }
2781         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %lld\n",
2782                libcfs_kmem_read());
2783
2784         mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2785         EXIT;
2786 }
2787
2788 static void __exit kgnilnd_exit(void)
2789 {
2790         lnet_unregister_lnd(&the_kgnilnd);
2791         kgnilnd_proc_fini();
2792         kgnilnd_remove_sysctl();
2793 }
2794
2795 static int __init kgnilnd_init(void)
2796 {
2797         int    rc;
2798
2799         rc = kgnilnd_tunables_init();
2800         if (rc != 0)
2801                 return rc;
2802
2803         LCONSOLE_INFO("Lustre: kgnilnd build version: "LUSTRE_VERSION_STRING"\n");
2804
2805         kgnilnd_insert_sysctl();
2806         kgnilnd_proc_init();
2807
2808         lnet_register_lnd(&the_kgnilnd);
2809
2810         return 0;
2811 }
2812
2813 MODULE_AUTHOR("Cray, Inc. <nic@cray.com>");
2814 MODULE_DESCRIPTION("Gemini LNet Network Driver");
2815 MODULE_VERSION(LUSTRE_VERSION_STRING);
2816 MODULE_LICENSE("GPL");
2817
2818 module_init(kgnilnd_init);
2819 module_exit(kgnilnd_exit);