Whamcloud - gitweb
b23b31718b238dcf657d44f43f72bedb74c66333
[fs/lustre-release.git] / lnet / klnds / gnilnd / gnilnd.c
1 /*
2  * Copyright (C) 2012 Cray, Inc.
3  *
4  * Copyright (c) 2013, 2017, Intel Corporation.
5  *
6  *   Author: Nic Henke <nic@cray.com>
7  *   Author: James Shimek <jshimek@cray.com>
8  *
9  *   This file is part of Lustre, http://www.lustre.org.
10  *
11  *   Lustre is free software; you can redistribute it and/or
12  *   modify it under the terms of version 2 of the GNU General Public
13  *   License as published by the Free Software Foundation.
14  *
15  *   Lustre is distributed in the hope that it will be useful,
16  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  *   GNU General Public License for more details.
19  *
20  *   You should have received a copy of the GNU General Public License
21  *   along with Lustre; if not, write to the Free Software
22  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23  *
24  */
25 #include "gnilnd.h"
26
27 /* Primary entry points from LNET.  There are no guarantees against reentrance. */
28 const struct lnet_lnd the_kgnilnd = {
29         .lnd_type       = GNILND,
30         .lnd_startup    = kgnilnd_startup,
31         .lnd_shutdown   = kgnilnd_shutdown,
32         .lnd_ctl        = kgnilnd_ctl,
33         .lnd_send       = kgnilnd_send,
34         .lnd_recv       = kgnilnd_recv,
35         .lnd_eager_recv = kgnilnd_eager_recv,
36 };
37
38 kgn_data_t      kgnilnd_data;
39
40 int
41 kgnilnd_thread_start(int(*fn)(void *arg), void *arg, char *name, int id)
42 {
43         struct task_struct *thrd;
44
45         thrd = kthread_run(fn, arg, "%s_%02d", name, id);
46         if (IS_ERR(thrd))
47                 return PTR_ERR(thrd);
48
49         atomic_inc(&kgnilnd_data.kgn_nthreads);
50         return 0;
51 }
52
53 /* bind scheduler threads to cpus */
54 int
55 kgnilnd_start_sd_threads(void)
56 {
57         int cpu;
58         int i = 0;
59         struct task_struct *task;
60
61         for_each_online_cpu(cpu) {
62                 /* don't bind to cpu 0 - all interrupts are processed here */
63                 if (cpu == 0)
64                         continue;
65
66                 task = kthread_create(kgnilnd_scheduler, (void *)((long)i),
67                                       "%s_%02d", "kgnilnd_sd", i);
68                 if (!IS_ERR(task)) {
69                         kthread_bind(task, cpu);
70                         wake_up_process(task);
71                 } else {
72                         CERROR("Can't spawn gnilnd scheduler[%d] %ld\n", i,
73                                 PTR_ERR(task));
74                         return PTR_ERR(task);
75                 }
76                 atomic_inc(&kgnilnd_data.kgn_nthreads);
77
78                 if (++i >= *kgnilnd_tunables.kgn_sched_threads) {
79                         break;
80                 }
81         }
82
83         return 0;
84 }
85
86 /* needs write_lock on kgn_peer_conn_lock */
87 int
88 kgnilnd_close_stale_conns_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
89 {
90         kgn_conn_t *conn, *cnxt;
91         int                 loopback;
92         int                 count = 0;
93
94         loopback = (peer->gnp_nid ==
95                     lnet_nid_to_nid4(&peer->gnp_net->gnn_ni->ni_nid));
96
97         list_for_each_entry_safe(conn, cnxt, &peer->gnp_conns, gnc_list) {
98                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
99                         continue;
100
101                 if (conn == newconn)
102                         continue;
103
104                 if (conn->gnc_device != newconn->gnc_device)
105                         continue;
106
107                 /* This is a two connection loopback - one talking to the other */
108                 if (loopback &&
109                     newconn->gnc_my_connstamp == conn->gnc_peer_connstamp &&
110                     newconn->gnc_peer_connstamp == conn->gnc_my_connstamp) {
111                         CDEBUG(D_NET, "skipping prune of %p, "
112                                 "loopback and matching stamps"
113                                 " connstamp %llu(%llu)"
114                                 " peerstamp %llu(%llu)\n",
115                                 conn, newconn->gnc_my_connstamp,
116                                 conn->gnc_peer_connstamp,
117                                 newconn->gnc_peer_connstamp,
118                                 conn->gnc_my_connstamp);
119                         continue;
120                 }
121
122                 if (conn->gnc_peerstamp != newconn->gnc_peerstamp) {
123                         LASSERTF(conn->gnc_peerstamp < newconn->gnc_peerstamp,
124                                 "conn 0x%p peerstamp %llu >= "
125                                 "newconn 0x%p peerstamp %llu\n",
126                                 conn, conn->gnc_peerstamp,
127                                 newconn, newconn->gnc_peerstamp);
128
129                         CDEBUG(D_NET, "Closing stale conn nid: %s "
130                                " peerstamp:%#llx(%#llx)\n",
131                                libcfs_nid2str(peer->gnp_nid),
132                                conn->gnc_peerstamp, newconn->gnc_peerstamp);
133                 } else {
134
135                         LASSERTF(conn->gnc_peer_connstamp < newconn->gnc_peer_connstamp,
136                                 "conn 0x%p peer_connstamp %llu >= "
137                                 "newconn 0x%p peer_connstamp %llu\n",
138                                 conn, conn->gnc_peer_connstamp,
139                                 newconn, newconn->gnc_peer_connstamp);
140
141                         CDEBUG(D_NET, "Closing stale conn nid: %s"
142                                " connstamp:%llu(%llu)\n",
143                                libcfs_nid2str(peer->gnp_nid),
144                                conn->gnc_peer_connstamp, newconn->gnc_peer_connstamp);
145                 }
146
147                 count++;
148                 kgnilnd_close_conn_locked(conn, -ESTALE);
149         }
150
151         if (count != 0) {
152                 CWARN("Closed %d stale conns to %s\n", count, libcfs_nid2str(peer->gnp_nid));
153         }
154
155         RETURN(count);
156 }
157
158 int
159 kgnilnd_conn_isdup_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
160 {
161         kgn_conn_t       *conn;
162         int               loopback;
163         ENTRY;
164
165         loopback = (peer->gnp_nid ==
166                     lnet_nid_to_nid4(&peer->gnp_net->gnn_ni->ni_nid));
167
168         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
169                 CDEBUG(D_NET, "checking conn 0x%p for peer %s"
170                         " lo %d new %llu existing %llu"
171                         " new peer %llu existing peer %llu"
172                         " new dev %p existing dev %p\n",
173                         conn, libcfs_nid2str(peer->gnp_nid),
174                         loopback,
175                         newconn->gnc_peerstamp, conn->gnc_peerstamp,
176                         newconn->gnc_peer_connstamp, conn->gnc_peer_connstamp,
177                         newconn->gnc_device, conn->gnc_device);
178
179                 /* conn is in the process of closing */
180                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
181                         continue;
182
183                 /* 'newconn' is from an earlier version of 'peer'!!! */
184                 if (newconn->gnc_peerstamp < conn->gnc_peerstamp)
185                         RETURN(1);
186
187                 /* 'conn' is from an earlier version of 'peer': it will be
188                  * removed when we cull stale conns later on... */
189                 if (newconn->gnc_peerstamp > conn->gnc_peerstamp)
190                         continue;
191
192                 /* Different devices are OK */
193                 if (conn->gnc_device != newconn->gnc_device)
194                         continue;
195
196                 /* It's me connecting to myself */
197                 if (loopback &&
198                     newconn->gnc_my_connstamp == conn->gnc_peer_connstamp &&
199                     newconn->gnc_peer_connstamp == conn->gnc_my_connstamp)
200                         continue;
201
202                 /* 'newconn' is an earlier connection from 'peer'!!! */
203                 if (newconn->gnc_peer_connstamp < conn->gnc_peer_connstamp)
204                         RETURN(2);
205
206                 /* 'conn' is an earlier connection from 'peer': it will be
207                  * removed when we cull stale conns later on... */
208                 if (newconn->gnc_peer_connstamp > conn->gnc_peer_connstamp)
209                         continue;
210
211                 /* 'newconn' has the SAME connection stamp; 'peer' isn't
212                  * playing the game... */
213                 RETURN(3);
214         }
215
216         RETURN(0);
217 }
218
219 int
220 kgnilnd_create_conn(kgn_conn_t **connp, kgn_device_t *dev)
221 {
222         kgn_conn_t      *conn;
223         gni_return_t    rrc;
224         int             rc = 0;
225
226         LASSERT (!in_interrupt());
227         atomic_inc(&kgnilnd_data.kgn_nconns);
228
229         /* divide by 2 to allow for complete reset and immediate reconnect */
230         if (atomic_read(&kgnilnd_data.kgn_nconns) >= GNILND_MAX_CQID/2) {
231                 CERROR("Too many conn are live: %d > %d\n",
232                         atomic_read(&kgnilnd_data.kgn_nconns), GNILND_MAX_CQID/2);
233                 atomic_dec(&kgnilnd_data.kgn_nconns);
234                 return -E2BIG;
235         }
236
237         LIBCFS_ALLOC(conn, sizeof(*conn));
238         if (conn == NULL) {
239                 atomic_dec(&kgnilnd_data.kgn_nconns);
240                 return -ENOMEM;
241         }
242
243         conn->gnc_tx_ref_table =
244                 kgnilnd_vzalloc(GNILND_MAX_MSG_ID * sizeof(void *));
245         if (conn->gnc_tx_ref_table == NULL) {
246                 CERROR("Can't allocate conn tx_ref_table\n");
247                 GOTO(failed, rc = -ENOMEM);
248         }
249
250         mutex_init(&conn->gnc_smsg_mutex);
251         mutex_init(&conn->gnc_rdma_mutex);
252         atomic_set(&conn->gnc_refcount, 1);
253         atomic_set(&conn->gnc_reaper_noop, 0);
254         atomic_set(&conn->gnc_sched_noop, 0);
255         atomic_set(&conn->gnc_tx_in_use, 0);
256         INIT_LIST_HEAD(&conn->gnc_list);
257         INIT_LIST_HEAD(&conn->gnc_hashlist);
258         INIT_LIST_HEAD(&conn->gnc_schedlist);
259         INIT_LIST_HEAD(&conn->gnc_fmaq);
260         INIT_LIST_HEAD(&conn->gnc_mdd_list);
261         INIT_LIST_HEAD(&conn->gnc_delaylist);
262         spin_lock_init(&conn->gnc_list_lock);
263         spin_lock_init(&conn->gnc_tx_lock);
264         conn->gnc_magic = GNILND_CONN_MAGIC;
265
266         /* set tx id to nearly the end to make sure we find wrapping
267          * issues soon */
268         conn->gnc_next_tx = (int) GNILND_MAX_MSG_ID - 10;
269
270         /* if this fails, we have conflicts and MAX_TX is too large */
271         BUILD_BUG_ON(GNILND_MAX_MSG_ID >= GNILND_MSGID_CLOSE);
272
273         /* get a new unique CQ id for this conn */
274         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
275         conn->gnc_my_connstamp = kgnilnd_data.kgn_connstamp++;
276         conn->gnc_cqid = kgnilnd_get_cqid_locked();
277         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
278
279         if (conn->gnc_cqid == 0) {
280                 CERROR("Could not allocate unique CQ ID for conn 0x%p\n", conn);
281                 GOTO(failed, rc = -E2BIG);
282         }
283
284         CDEBUG(D_NET, "alloc cqid %u for conn 0x%p\n",
285                 conn->gnc_cqid, conn);
286
287         /* need to be set before gnc_ephandle to allow kgnilnd_destroy_conn_ep to
288          * check context */
289         conn->gnc_device = dev;
290
291         conn->gnc_timeout = max(*kgnilnd_tunables.kgn_timeout,
292                                  GNILND_MIN_TIMEOUT);
293         kgnilnd_update_reaper_timeout(conn->gnc_timeout);
294
295         /* this is the ep_handle for doing SMSG & BTE */
296         mutex_lock(&dev->gnd_cq_mutex);
297         rrc = kgnilnd_ep_create(dev->gnd_handle, dev->gnd_snd_fma_cqh,
298                                 &conn->gnc_ephandle);
299         mutex_unlock(&dev->gnd_cq_mutex);
300         if (rrc != GNI_RC_SUCCESS)
301                 GOTO(failed, rc = -ENETDOWN);
302
303         CDEBUG(D_NET, "created conn 0x%p ep_hndl 0x%p\n",
304                conn, conn->gnc_ephandle);
305
306         /* add ref for EP canceling */
307         kgnilnd_conn_addref(conn);
308         atomic_inc(&dev->gnd_neps);
309
310         *connp = conn;
311         return 0;
312
313 failed:
314         atomic_dec(&kgnilnd_data.kgn_nconns);
315         kgnilnd_vfree(conn->gnc_tx_ref_table,
316                       GNILND_MAX_MSG_ID * sizeof(void *));
317         LIBCFS_FREE(conn, sizeof(*conn));
318         return rc;
319 }
320
321 /* needs to be called with kgn_peer_conn_lock held (read or write) */
322 kgn_conn_t *
323 kgnilnd_find_conn_locked(kgn_peer_t *peer)
324 {
325         kgn_conn_t      *conn = NULL;
326
327         /* if we are in reset, this conn is going to die soon */
328         if (unlikely(kgnilnd_data.kgn_in_reset)) {
329                 RETURN(NULL);
330         }
331
332         /* just return the first ESTABLISHED connection */
333         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
334                 /* kgnilnd_finish_connect doesn't put connections on the
335                  * peer list until they are actually established */
336                 LASSERTF(conn->gnc_state >= GNILND_CONN_ESTABLISHED,
337                         "found conn %p state %s on peer %p (%s)\n",
338                         conn, kgnilnd_conn_state2str(conn), peer,
339                         libcfs_nid2str(peer->gnp_nid));
340                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
341                         continue;
342
343                 RETURN(conn);
344         }
345         RETURN(NULL);
346 }
347
348 /* needs write_lock on kgn_peer_conn_lock held */
349 kgn_conn_t *
350 kgnilnd_find_or_create_conn_locked(kgn_peer_t *peer) {
351
352         kgn_device_t    *dev = peer->gnp_net->gnn_dev;
353         kgn_conn_t      *conn;
354
355         conn = kgnilnd_find_conn_locked(peer);
356
357         if (conn != NULL) {
358                 return conn;
359         }
360
361         /* if the peer was previously connecting, check if we should
362          * trigger another connection attempt yet. */
363         if (time_before(jiffies, peer->gnp_reconnect_time)) {
364                 return NULL;
365         }
366
367         /* This check prevents us from creating a new connection to a peer while we are
368          * still in the process of closing an existing connection to the peer.
369          */
370         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
371                 if (conn->gnc_ephandle != NULL) {
372                         CDEBUG(D_NET, "Not connecting non-null ephandle found peer 0x%p->%s\n", peer,
373                                 libcfs_nid2str(peer->gnp_nid));
374                         return NULL;
375                 }
376         }
377
378         if (peer->gnp_connecting != GNILND_PEER_IDLE) {
379                 /* if we are not connecting, fire up a new connection */
380                 /* or if we are anything but IDLE DONT start a new connection */
381                return NULL;
382         }
383
384         CDEBUG(D_NET, "starting connect to %s\n",
385                 libcfs_nid2str(peer->gnp_nid));
386         peer->gnp_connecting = GNILND_PEER_CONNECT;
387         kgnilnd_peer_addref(peer); /* extra ref for connd */
388
389         spin_lock(&dev->gnd_connd_lock);
390         list_add_tail(&peer->gnp_connd_list, &dev->gnd_connd_peers);
391         spin_unlock(&dev->gnd_connd_lock);
392
393         kgnilnd_schedule_dgram(dev);
394         CDEBUG(D_NETTRACE, "scheduling new connect\n");
395
396         return NULL;
397 }
398
399 /* Caller is responsible for deciding if/when to call this */
400 void
401 kgnilnd_destroy_conn_ep(kgn_conn_t *conn)
402 {
403         gni_return_t    rrc;
404         gni_ep_handle_t tmp_ep;
405
406         /* only if we actually initialized it,
407          *  then set NULL to tell kgnilnd_destroy_conn to leave it alone */
408
409         tmp_ep = xchg(&conn->gnc_ephandle, NULL);
410         if (tmp_ep != NULL) {
411                 /* we never re-use the EP, so unbind is not needed */
412                 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
413                 rrc = kgnilnd_ep_destroy(tmp_ep);
414
415                 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
416
417                 /* if this fails, it could hork up kgni smsg retransmit and others
418                  * since we could free the SMSG mbox memory, etc. */
419                 LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d conn 0x%p ep 0x%p\n",
420                          rrc, conn, conn->gnc_ephandle);
421
422                 atomic_dec(&conn->gnc_device->gnd_neps);
423
424                 /* clear out count added in kgnilnd_close_conn_locked
425                  * conn will have a peer once it hits finish_connect, where it
426                  * is the first spot we'll mark it ESTABLISHED as well */
427                 if (conn->gnc_peer) {
428                         kgnilnd_admin_decref(conn->gnc_peer->gnp_dirty_eps);
429                 }
430
431                 /* drop ref for EP */
432                 kgnilnd_conn_decref(conn);
433         }
434 }
435
436 void
437 kgnilnd_destroy_conn(kgn_conn_t *conn)
438 {
439         LASSERTF(!in_interrupt() &&
440                 !conn->gnc_scheduled &&
441                 !conn->gnc_in_purgatory &&
442                 conn->gnc_ephandle == NULL &&
443                 list_empty(&conn->gnc_list) &&
444                 list_empty(&conn->gnc_hashlist) &&
445                 list_empty(&conn->gnc_schedlist) &&
446                 list_empty(&conn->gnc_mdd_list) &&
447                 list_empty(&conn->gnc_delaylist) &&
448                 conn->gnc_magic == GNILND_CONN_MAGIC,
449                 "conn 0x%p->%s IRQ %d sched %d purg %d ep 0x%p Mg %d lists %d/%d/%d/%d/%d\n",
450                 conn, conn->gnc_peer ? libcfs_nid2str(conn->gnc_peer->gnp_nid)
451                                      : "<?>",
452                 !!in_interrupt(), conn->gnc_scheduled,
453                 conn->gnc_in_purgatory,
454                 conn->gnc_ephandle,
455                 conn->gnc_magic,
456                 list_empty(&conn->gnc_list),
457                 list_empty(&conn->gnc_hashlist),
458                 list_empty(&conn->gnc_schedlist),
459                 list_empty(&conn->gnc_mdd_list),
460                 list_empty(&conn->gnc_delaylist));
461
462         /* Tripping these is especially bad, as it means we have items on the
463          *  lists that didn't keep their refcount on the connection - or
464          *  somebody evil released their own */
465         LASSERTF(list_empty(&conn->gnc_fmaq) &&
466                  atomic_read(&conn->gnc_nlive_fma) == 0 &&
467                  atomic_read(&conn->gnc_nlive_rdma) == 0,
468                  "conn 0x%p fmaq %d@0x%p nfma %d nrdma %d\n",
469                  conn, kgnilnd_count_list(&conn->gnc_fmaq), &conn->gnc_fmaq,
470                  atomic_read(&conn->gnc_nlive_fma), atomic_read(&conn->gnc_nlive_rdma));
471
472         CDEBUG(D_NET, "destroying conn %p ephandle %p error %d\n",
473                 conn, conn->gnc_ephandle, conn->gnc_error);
474
475         /* We are freeing this memory remove the magic value from the connection */
476         conn->gnc_magic = 0;
477
478         /* if there is an FMA blk left here, we'll tear it down */
479         if (conn->gnc_fma_blk) {
480                 if (conn->gnc_peer) {
481                         kgn_mbox_info_t *mbox;
482                         mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
483                         mbox->mbx_prev_nid = conn->gnc_peer->gnp_nid;
484                 }
485                 kgnilnd_release_mbox(conn, 0);
486         }
487
488         if (conn->gnc_peer != NULL)
489                 kgnilnd_peer_decref(conn->gnc_peer);
490
491         if (conn->gnc_tx_ref_table != NULL) {
492                 kgnilnd_vfree(conn->gnc_tx_ref_table,
493                               GNILND_MAX_MSG_ID * sizeof(void *));
494         }
495
496         LIBCFS_FREE(conn, sizeof(*conn));
497         atomic_dec(&kgnilnd_data.kgn_nconns);
498 }
499
500 /* peer_alive and peer_notify done in the style of the o2iblnd */
501 void
502 kgnilnd_peer_alive(kgn_peer_t *peer)
503 {
504         time64_t now = ktime_get_seconds();
505
506         set_mb(peer->gnp_last_alive, now);
507 }
508
509 void
510 kgnilnd_peer_notify(kgn_peer_t *peer, int error, int alive)
511 {
512         int                     tell_lnet = 0;
513         int                     nnets = 0;
514         int                     rc;
515         int                     i, j;
516         kgn_conn_t             *conn;
517         kgn_net_t             **nets;
518         kgn_net_t              *net;
519
520
521         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DONT_NOTIFY))
522                 return;
523
524         /* Tell LNet we are giving ups on this peer - but only
525          * if it isn't already reconnected or trying to reconnect */
526         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
527
528         /* use kgnilnd_find_conn_locked to avoid any conns in the process of being nuked
529          *
530          * don't tell LNet if we are in reset - we assume that everyone will be able to
531          * reconnect just fine
532          */
533         conn = kgnilnd_find_conn_locked(peer);
534
535         CDEBUG(D_NETTRACE, "peer 0x%p->%s ting %d conn 0x%p, rst %d error %d\n",
536                peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting, conn,
537                kgnilnd_data.kgn_in_reset, error);
538
539         if (((peer->gnp_connecting == GNILND_PEER_IDLE) &&
540             (conn == NULL) &&
541             (!kgnilnd_data.kgn_in_reset) &&
542             (!kgnilnd_conn_clean_errno(error))) || alive) {
543                 tell_lnet = 1;
544         }
545
546         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
547
548         if (!tell_lnet) {
549                 /* short circuit if we dont need to notify Lnet */
550                 return;
551         }
552
553         rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
554
555         if (rc) {
556             /* dont do this if this fails since LNET is in shutdown or something else
557              */
558
559                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
560                         list_for_each_entry(net, &kgnilnd_data.kgn_nets[i], gnn_list) {
561                                 /* if gnn_shutdown set for any net shutdown is in progress just return */
562                                 if (net->gnn_shutdown) {
563                                         up_read(&kgnilnd_data.kgn_net_rw_sem);
564                                         return;
565                                 }
566                                 nnets++;
567                         }
568                 }
569
570                 if (nnets == 0) {
571                         /* shutdown in progress most likely */
572                         up_read(&kgnilnd_data.kgn_net_rw_sem);
573                         return;
574                 }
575
576                 CFS_ALLOC_PTR_ARRAY(nets, nnets);
577
578                 if (nets == NULL) {
579                         up_read(&kgnilnd_data.kgn_net_rw_sem);
580                         CERROR("Failed to allocate nets[%d]\n", nnets);
581                         return;
582                 }
583
584                 j = 0;
585                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
586                         list_for_each_entry(net, &kgnilnd_data.kgn_nets[i], gnn_list) {
587                                 nets[j] = net;
588                                 kgnilnd_net_addref(net);
589                                 j++;
590                         }
591                 }
592                 up_read(&kgnilnd_data.kgn_net_rw_sem);
593
594                 for (i = 0; i < nnets; i++) {
595                         lnet_nid_t peer_nid;
596
597                         net = nets[i];
598
599                         peer_nid = kgnilnd_lnd2lnetnid(
600                                 lnet_nid_to_nid4(&net->gnn_ni->ni_nid),
601                                 peer->gnp_nid);
602
603                         CDEBUG(D_NET, "peer 0x%p->%s last_alive %lld (%llds ago)\n",
604                                 peer, libcfs_nid2str(peer_nid), peer->gnp_last_alive,
605                                 ktime_get_seconds() - peer->gnp_last_alive);
606
607                         lnet_notify(net->gnn_ni, peer_nid, alive, true,
608                                     peer->gnp_last_alive);
609
610                         kgnilnd_net_decref(net);
611                 }
612
613                 CFS_FREE_PTR_ARRAY(nets, nnets);
614         }
615 }
616
617 /* need write_lock on kgn_peer_conn_lock */
618 void
619 kgnilnd_close_conn_locked(kgn_conn_t *conn, int error)
620 {
621         kgn_peer_t        *peer = conn->gnc_peer;
622         ENTRY;
623
624         LASSERT(!in_interrupt());
625
626         /* store error for tx completion */
627         conn->gnc_error = error;
628         peer->gnp_last_errno = error;
629
630         /* use real error from peer if possible */
631         if (error == -ECONNRESET) {
632                 error = conn->gnc_peer_error;
633         }
634
635         /* if we NETERROR, make sure it is rate limited */
636         if (!kgnilnd_conn_clean_errno(error) &&
637             peer->gnp_state != GNILND_PEER_DOWN) {
638                 CNETERR("closing conn to %s: error %d\n",
639                        libcfs_nid2str(peer->gnp_nid), error);
640         } else {
641                 CDEBUG(D_NET, "closing conn to %s: error %d\n",
642                        libcfs_nid2str(peer->gnp_nid), error);
643         }
644
645         LASSERTF(conn->gnc_state == GNILND_CONN_ESTABLISHED,
646                 "conn %p to %s with bogus state %s\n", conn,
647                 libcfs_nid2str(conn->gnc_peer->gnp_nid),
648                 kgnilnd_conn_state2str(conn));
649         LASSERT(!list_empty(&conn->gnc_hashlist));
650         LASSERT(!list_empty(&conn->gnc_list));
651
652
653         /* mark peer count here so any place the EP gets destroyed will
654          * open up the peer count so that a new ESTABLISHED conn is then free
655          * to send new messages -- sending before the previous EPs are destroyed
656          * could end up with messages on the network for the old conn _after_
657          * the new conn and break the mbox safety protocol */
658         kgnilnd_admin_addref(conn->gnc_peer->gnp_dirty_eps);
659
660         /* Remove from conn hash table: no new callbacks */
661         list_del_init(&conn->gnc_hashlist);
662         kgnilnd_data.kgn_conn_version++;
663         kgnilnd_conn_decref(conn);
664
665         /* if we are in reset, go right to CLOSED as there is no scheduler
666          * thread to move from CLOSING to CLOSED */
667         if (unlikely(kgnilnd_data.kgn_in_reset)) {
668                 conn->gnc_state = GNILND_CONN_CLOSED;
669         } else {
670                 conn->gnc_state = GNILND_CONN_CLOSING;
671         }
672
673         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_RDMA_CQ_ERROR)) {
674                 msleep_interruptible(MSEC_PER_SEC);
675         }
676
677         /* leave on peer->gnp_conns to make sure we don't let the reaper
678          * or others try to unlink this peer until the conn is fully
679          * processed for closing */
680
681         if (kgnilnd_check_purgatory_conn(conn)) {
682                 kgnilnd_add_purgatory_locked(conn, conn->gnc_peer);
683         }
684
685         /* Reset RX timeout to ensure we wait for an incoming CLOSE
686          * for the full timeout.  If we get a CLOSE we know the
687          * peer has stopped all RDMA.  Otherwise if we wait for
688          * the full timeout we can also be sure all RDMA has stopped. */
689         conn->gnc_last_rx = conn->gnc_last_rx_cq = jiffies;
690         mb();
691
692         /* schedule sending CLOSE - if we are in quiesce, this adds to
693          * gnd_ready_conns and allows us to find it in quiesce processing */
694         kgnilnd_schedule_conn(conn);
695
696         EXIT;
697 }
698
699 void
700 kgnilnd_close_conn(kgn_conn_t *conn, int error)
701 {
702         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
703         /* need to check the state here - this call is racy and we don't
704          * know the state until after the lock is grabbed */
705         if (conn->gnc_state == GNILND_CONN_ESTABLISHED) {
706                 kgnilnd_close_conn_locked(conn, error);
707         }
708         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
709 }
710
711 void
712 kgnilnd_complete_closed_conn(kgn_conn_t *conn)
713 {
714         LIST_HEAD               (sinners);
715         kgn_tx_t               *tx, *txn;
716         int                     nlive = 0;
717         int                     nrdma = 0;
718         int                     nq_rdma = 0;
719         int                     logmsg;
720         ENTRY;
721
722         /* Dump log  on cksum error - wait until complete phase to let
723          * RX of error happen */
724         if (*kgnilnd_tunables.kgn_checksum_dump &&
725             (conn != NULL && conn->gnc_peer_error == -ENOKEY)) {
726                 libcfs_debug_dumplog();
727         }
728
729         /* _CLOSED set in kgnilnd_process_fmaq once we decide to
730          * send the CLOSE or not */
731         LASSERTF(conn->gnc_state == GNILND_CONN_CLOSED,
732                  "conn 0x%p->%s with bad state %s\n",
733                  conn, conn->gnc_peer ?
734                         libcfs_nid2str(conn->gnc_peer->gnp_nid) :
735                         "<?>",
736                  kgnilnd_conn_state2str(conn));
737
738         LASSERT(list_empty(&conn->gnc_hashlist));
739         /* We shouldnt be on the delay list, the conn can 
740          * get added to this list during a retransmit, and retransmits
741          * only occur within scheduler threads.
742          */
743         LASSERT(list_empty(&conn->gnc_delaylist));
744
745         /* we've sent the close, start nuking */
746         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_SCHEDULE_COMPLETE))
747                 kgnilnd_schedule_conn(conn);
748
749         if (conn->gnc_scheduled != GNILND_CONN_PROCESS) {
750                 CDEBUG(D_NETERROR, "Error someone scheduled us after we were "
751                                 "done, Attempting to recover conn 0x%p "
752                                 "scheduled %d function: %s line: %d\n", conn,
753                                 conn->gnc_scheduled, conn->gnc_sched_caller,
754                                 conn->gnc_sched_line);
755                 RETURN_EXIT;
756         }
757
758         /* we don't use lists to track things that we can get out of the
759          * tx_ref table... */
760
761         /* need to hold locks for tx_list_state, sampling it is too racy:
762          * - the lock actually protects tx != NULL, but we can't take the proper
763          *   lock until we check tx_list_state, which would be too late and
764          *   we could have the TX change under us.
765          * gnd_rdmaq_lock and gnd_lock and not used together, so taking both
766          * should be fine */
767         spin_lock(&conn->gnc_device->gnd_rdmaq_lock);
768         spin_lock(&conn->gnc_device->gnd_lock);
769
770         for (nrdma = 0; nrdma < GNILND_MAX_MSG_ID; nrdma++) {
771                 tx = conn->gnc_tx_ref_table[nrdma];
772
773                 if (tx != NULL) {
774                         /* only print the first error and if not CLOSE, we often don't see
775                          * CQ events for that by the time we get here... and really don't care */
776                         if (nlive || tx->tx_msg.gnm_type == GNILND_MSG_CLOSE)
777                                 tx->tx_state |= GNILND_TX_QUIET_ERROR;
778                         nlive++;
779                         GNIDBG_TX(D_NET, tx, "cleaning up on close, nlive %d", nlive);
780
781                         /* don't worry about gnc_lock here as nobody else should be
782                          * touching this conn */
783                         kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
784                         list_add_tail(&tx->tx_list, &sinners);
785                 }
786         }
787         spin_unlock(&conn->gnc_device->gnd_lock);
788         spin_unlock(&conn->gnc_device->gnd_rdmaq_lock);
789
790         /* nobody should have marked this as needing scheduling after
791          * we called close - so only ref should be us handling it */
792         if (conn->gnc_scheduled != GNILND_CONN_PROCESS) {
793                 CDEBUG(D_NETERROR, "Error someone scheduled us after we were "
794                                 "done, Attempting to recover conn 0x%p "
795                                 "scheduled %d function %s line: %d\n", conn,
796                                 conn->gnc_scheduled, conn->gnc_sched_caller,
797                                 conn->gnc_sched_line);
798         }
799         /* now reset a few to actual counters... */
800         nrdma = atomic_read(&conn->gnc_nlive_rdma);
801         nq_rdma = atomic_read(&conn->gnc_nq_rdma);
802
803         if (!list_empty(&sinners)) {
804                 list_for_each_entry_safe(tx, txn, &sinners, tx_list) {
805                         /* clear tx_list to make tx_add_list_locked happy */
806                         list_del_init(&tx->tx_list);
807                         /* The error codes determine if we hold onto the MDD */
808                         kgnilnd_tx_done(tx, conn->gnc_error);
809                 }
810         }
811
812         logmsg = (nlive + nrdma + nq_rdma);
813
814         if (logmsg) {
815                 int level = conn->gnc_peer->gnp_state == GNILND_PEER_UP ?
816                                 D_NETERROR : D_NET;
817                 CDEBUG(level, "Closed conn 0x%p->%s (errno %d,"
818                         " peer errno %d): canceled %d TX, %d/%d RDMA\n",
819                         conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
820                         conn->gnc_error, conn->gnc_peer_error,
821                         nlive, nq_rdma, nrdma);
822         }
823
824         kgnilnd_destroy_conn_ep(conn);
825
826         /* Bug 765042 - race this with completing a new conn to same peer - we need
827          * finish_connect to detach purgatory before we can do it ourselves here */
828         CFS_RACE(CFS_FAIL_GNI_FINISH_PURG);
829
830         /* now it is safe to remove from peer list - anyone looking at
831          * gnp_conns now is free to unlink if not on purgatory */
832         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
833
834         conn->gnc_state = GNILND_CONN_DONE;
835
836         /* Decrement counter if we are marked by del_conn_or_peers for closing
837          */
838         if (conn->gnc_needs_closing)
839                 kgnilnd_admin_decref(kgnilnd_data.kgn_npending_conns);
840
841         /* Remove from peer's list of valid connections if its not in purgatory */
842         if (!conn->gnc_in_purgatory) {
843                 list_del_init(&conn->gnc_list);
844                 /* Lose peers reference on the conn */
845                 kgnilnd_conn_decref(conn);
846         }
847
848         /* NB - only unlinking if we set pending in del_peer_locked from admin or
849          * shutdown */
850         if (kgnilnd_peer_active(conn->gnc_peer) &&
851             conn->gnc_peer->gnp_pending_unlink &&
852             kgnilnd_can_unlink_peer_locked(conn->gnc_peer)) {
853                 kgnilnd_unlink_peer_locked(conn->gnc_peer);
854         }
855
856         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
857
858         /* I'm telling Mommy! - use peer_error if they initiated close */
859         kgnilnd_peer_notify(conn->gnc_peer,
860                             conn->gnc_error == -ECONNRESET ?
861                             conn->gnc_peer_error : conn->gnc_error, 0);
862
863         EXIT;
864 }
865
866 int
867 kgnilnd_set_conn_params(kgn_dgram_t *dgram)
868 {
869         kgn_conn_t             *conn = dgram->gndg_conn;
870         kgn_connreq_t          *connreq = &dgram->gndg_conn_in;
871         kgn_gniparams_t        *rem_param = &connreq->gncr_gnparams;
872         gni_return_t            rrc;
873         int                     rc = 0;
874         gni_smsg_attr_t        *remote = &connreq->gncr_gnparams.gnpr_smsg_attr;
875
876         /* set timeout vals in conn early so we can use them for the NAK */
877
878         /* use max of the requested and our timeout, peer will do the same */
879         conn->gnc_timeout = max(conn->gnc_timeout, connreq->gncr_timeout);
880
881         /* only ep_bind really mucks around with the CQ */
882         /* only ep bind if we are not connecting to ourself and the dstnid is not a wildcard. this check
883          * is necessary as you can only bind an ep once and we must make sure we dont bind when already bound.
884          */
885         if (connreq->gncr_dstnid != LNET_NID_ANY && dgram->gndg_conn_out.gncr_dstnid != connreq->gncr_srcnid) {
886                 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
887                 rrc = kgnilnd_ep_bind(conn->gnc_ephandle,
888                         connreq->gncr_gnparams.gnpr_host_id,
889                         conn->gnc_cqid);
890                 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
891                 if (rrc != GNI_RC_SUCCESS) {
892                         rc = -ECONNABORTED;
893                         goto return_out;
894                 }
895         }
896
897         rrc = kgnilnd_ep_set_eventdata(conn->gnc_ephandle, conn->gnc_cqid,
898                          connreq->gncr_gnparams.gnpr_cqid);
899         if (rrc != GNI_RC_SUCCESS) {
900                 rc = -ECONNABORTED;
901                 goto cleanup_out;
902         }
903
904         /* Initialize SMSG */
905         rrc = kgnilnd_smsg_init(conn->gnc_ephandle, &conn->gnpr_smsg_attr,
906                         &connreq->gncr_gnparams.gnpr_smsg_attr);
907         if (unlikely(rrc == GNI_RC_INVALID_PARAM)) {
908                 gni_smsg_attr_t *local = &conn->gnpr_smsg_attr;
909                 /* help folks figure out if there is a tunable off, etc. */
910                 LCONSOLE_ERROR("SMSG attribute mismatch. Data from local/remote:"
911                                " type %d/%d msg_maxsize %u/%u"
912                                " mbox_maxcredit %u/%u. Please check kgni"
913                                " logs for further data\n",
914                                local->msg_type, remote->msg_type,
915                                local->msg_maxsize, remote->msg_maxsize,
916                                local->mbox_maxcredit, remote->mbox_maxcredit);
917         }
918         if (rrc != GNI_RC_SUCCESS) {
919                 rc = -ECONNABORTED;
920                 goto cleanup_out;
921         }
922
923         /* log this for help in debuggin SMSG buffer re-use */
924         CDEBUG(D_NET, "conn %p src %s dst %s smsg %p acquired"
925                 " local cqid %u SMSG %p->%u hndl %#llx.%#llx"
926                 " remote cqid %u SMSG %p->%u hndl %#llx.%#llx\n",
927                 conn, libcfs_nid2str(connreq->gncr_srcnid),
928                 libcfs_nid2str(connreq->gncr_dstnid),
929                 &conn->gnpr_smsg_attr,
930                 conn->gnc_cqid,
931                 conn->gnpr_smsg_attr.msg_buffer,
932                 conn->gnpr_smsg_attr.mbox_offset,
933                 conn->gnpr_smsg_attr.mem_hndl.qword1,
934                 conn->gnpr_smsg_attr.mem_hndl.qword2,
935                 rem_param->gnpr_cqid,
936                 rem_param->gnpr_smsg_attr.msg_buffer,
937                 rem_param->gnpr_smsg_attr.mbox_offset,
938                 rem_param->gnpr_smsg_attr.mem_hndl.qword1,
939                 rem_param->gnpr_smsg_attr.mem_hndl.qword2);
940
941         conn->gnc_peerstamp = connreq->gncr_peerstamp;
942         conn->gnc_peer_connstamp = connreq->gncr_connstamp;
943         conn->remote_mbox_addr = (void *)((char *)remote->msg_buffer + remote->mbox_offset);
944
945         /* We update the reaper timeout once we have a valid conn and timeout */
946         kgnilnd_update_reaper_timeout(GNILND_TO2KA(conn->gnc_timeout));
947
948         return 0;
949
950 cleanup_out:
951         rrc = kgnilnd_ep_unbind(conn->gnc_ephandle);
952         /* not sure I can just let this fly */
953         LASSERTF(rrc == GNI_RC_SUCCESS,
954                 "bad rc from gni_ep_unbind trying to cleanup: %d\n", rrc);
955
956 return_out:
957         LASSERTF(rc != 0, "SOFTWARE BUG: rc == 0\n");
958         CERROR("Error setting connection params from %s: %d\n",
959                libcfs_nid2str(connreq->gncr_srcnid), rc);
960         return rc;
961 }
962
963 /* needs down_read on kgn_net_rw_sem held from before this call until
964  * after the write_lock on kgn_peer_conn_lock - this ensures we stay sane
965  * with kgnilnd_shutdown - it'll get the sem and set shutdown, then get the
966  * kgn_peer_conn_lock to start del_peer'ing. If we hold the sem until after
967  * kgn_peer_conn_lock is held, we guarantee that nobody calls
968  * kgnilnd_add_peer_locked without checking gnn_shutdown */
969 int
970 kgnilnd_create_peer_safe(kgn_peer_t **peerp,
971                          lnet_nid_t nid,
972                          kgn_net_t *net,
973                          int node_state)
974 {
975         kgn_peer_t      *peer;
976         int             rc;
977
978         LASSERT(nid != LNET_NID_ANY);
979
980         /* We dont pass the net around in the dgram anymore so here is where we find it
981          * this will work unless its in shutdown or the nid has a net that is invalid.
982          * Either way error code needs to be returned in that case.
983          *
984          * If the net passed in is not NULL then we can use it, this alleviates looking it
985          * when the calling function has access to the data.
986          */
987         if (net == NULL) {
988                 rc = kgnilnd_find_net(nid, &net);
989                 if (rc < 0)
990                         return rc;
991         } else {
992                 /* find net adds a reference on the net if we are not using
993                  * it we must do it manually so the net references are
994                  * correct when tearing down the net
995                  */
996                 kgnilnd_net_addref(net);
997         }
998
999         LIBCFS_ALLOC(peer, sizeof(*peer));
1000         if (peer == NULL) {
1001                 kgnilnd_net_decref(net);
1002                 return -ENOMEM;
1003         }
1004         peer->gnp_nid = nid;
1005         peer->gnp_state = node_state;
1006
1007         /* translate from nid to nic addr & store */
1008         rc = kgnilnd_nid_to_nicaddrs(LNET_NIDADDR(nid), 1, &peer->gnp_host_id);
1009         if (rc <= 0) {
1010                 kgnilnd_net_decref(net);
1011                 LIBCFS_FREE(peer, sizeof(*peer));
1012                 return -ESRCH;
1013         }
1014         CDEBUG(D_NET, "peer 0x%p->%s -> NIC 0x%x\n", peer,
1015                 libcfs_nid2str(nid), peer->gnp_host_id);
1016
1017         atomic_set(&peer->gnp_refcount, 1);     /* 1 ref for caller */
1018         atomic_set(&peer->gnp_dirty_eps, 0);
1019
1020         INIT_LIST_HEAD(&peer->gnp_list);
1021         INIT_LIST_HEAD(&peer->gnp_connd_list);
1022         INIT_LIST_HEAD(&peer->gnp_conns);
1023         INIT_LIST_HEAD(&peer->gnp_tx_queue);
1024
1025         /* the first reconnect should happen immediately, so we leave
1026          * gnp_reconnect_interval set to 0 */
1027
1028         LASSERTF(net != NULL, "peer 0x%p->%s with NULL net\n",
1029                  peer, libcfs_nid2str(nid));
1030
1031         /* must have kgn_net_rw_sem held for this...  */
1032         if (net->gnn_shutdown) {
1033                 /* shutdown has started already */
1034                 kgnilnd_net_decref(net);
1035                 LIBCFS_FREE(peer, sizeof(*peer));
1036                 return -ESHUTDOWN;
1037         }
1038
1039         peer->gnp_net = net;
1040
1041         atomic_inc(&kgnilnd_data.kgn_npeers);
1042
1043         *peerp = peer;
1044         return 0;
1045 }
1046
1047 void
1048 kgnilnd_destroy_peer(kgn_peer_t *peer)
1049 {
1050         CDEBUG(D_NET, "peer %s %p deleted\n",
1051                libcfs_nid2str(peer->gnp_nid), peer);
1052         LASSERTF(atomic_read(&peer->gnp_refcount) == 0,
1053                  "peer 0x%p->%s refs %d\n",
1054                  peer, libcfs_nid2str(peer->gnp_nid),
1055                  atomic_read(&peer->gnp_refcount));
1056         LASSERTF(atomic_read(&peer->gnp_dirty_eps) == 0,
1057                  "peer 0x%p->%s dirty eps %d\n",
1058                  peer, libcfs_nid2str(peer->gnp_nid),
1059                  atomic_read(&peer->gnp_dirty_eps));
1060         LASSERTF(peer->gnp_net != NULL, "peer %p (%s) with NULL net\n",
1061                  peer, libcfs_nid2str(peer->gnp_nid));
1062         LASSERTF(!kgnilnd_peer_active(peer),
1063                  "peer 0x%p->%s\n",
1064                 peer, libcfs_nid2str(peer->gnp_nid));
1065         LASSERTF(peer->gnp_connecting == GNILND_PEER_IDLE || peer->gnp_connecting == GNILND_PEER_KILL,
1066                  "peer 0x%p->%s, connecting %d\n",
1067                 peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
1068         LASSERTF(list_empty(&peer->gnp_conns),
1069                  "peer 0x%p->%s\n",
1070                 peer, libcfs_nid2str(peer->gnp_nid));
1071         LASSERTF(list_empty(&peer->gnp_tx_queue),
1072                  "peer 0x%p->%s\n",
1073                 peer, libcfs_nid2str(peer->gnp_nid));
1074         LASSERTF(list_empty(&peer->gnp_connd_list),
1075                  "peer 0x%p->%s\n",
1076                 peer, libcfs_nid2str(peer->gnp_nid));
1077
1078         /* NB a peer's connections keep a reference on their peer until
1079          * they are destroyed, so we can be assured that _all_ state to do
1080          * with this peer has been cleaned up when its refcount drops to
1081          * zero. */
1082
1083         atomic_dec(&kgnilnd_data.kgn_npeers);
1084         kgnilnd_net_decref(peer->gnp_net);
1085
1086         LIBCFS_FREE(peer, sizeof(*peer));
1087 }
1088
1089 /* the conn might not have made it all the way through to a connected
1090  * state - but we need to purgatory any conn that a remote peer might
1091  * have seen through a posted dgram as well */
1092 void
1093 kgnilnd_add_purgatory_locked(kgn_conn_t *conn, kgn_peer_t *peer)
1094 {
1095         kgn_mbox_info_t *mbox = NULL;
1096         ENTRY;
1097
1098         /* NB - the caller should own conn by removing him from the
1099          * scheduler thread when finishing the close */
1100
1101         LASSERTF(peer != NULL, "conn %p with NULL peer\n", conn);
1102
1103         /* If this is still true, need to add the calls to unlink back in and
1104          * figure out how to close the hole on loopback conns */
1105         LASSERTF(kgnilnd_peer_active(peer), "can't use inactive peer %s (%p)"
1106                 " we'll never recover the resources\n",
1107                 libcfs_nid2str(peer->gnp_nid), peer);
1108
1109         CDEBUG(D_NET, "conn %p peer %p dev %p\n", conn, peer,
1110                 conn->gnc_device);
1111
1112         LASSERTF(conn->gnc_in_purgatory == 0,
1113                 "Conn already in purgatory\n");
1114         conn->gnc_in_purgatory = 1;
1115
1116         mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
1117         mbox->mbx_prev_purg_nid = peer->gnp_nid;
1118         mbox->mbx_add_purgatory = jiffies;
1119         kgnilnd_release_mbox(conn, 1);
1120
1121         LASSERTF(list_empty(&conn->gnc_mdd_list),
1122                 "conn 0x%p->%s with active purgatory hold MDD %d\n",
1123                 conn, libcfs_nid2str(peer->gnp_nid),
1124                 kgnilnd_count_list(&conn->gnc_mdd_list));
1125
1126         EXIT;
1127 }
1128
1129 /* Instead of detaching everything from purgatory here we just mark the conn as needing
1130  * detach, when the reaper checks the conn the next time it will detach it.
1131  * Calling function requires write_lock held on kgn_peer_conn_lock
1132  */
1133 void
1134 kgnilnd_mark_for_detach_purgatory_all_locked(kgn_peer_t *peer) {
1135         kgn_conn_t       *conn;
1136
1137         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
1138                 if (conn->gnc_in_purgatory && !conn->gnc_needs_detach) {
1139                         conn->gnc_needs_detach = 1;
1140                         kgnilnd_admin_addref(kgnilnd_data.kgn_npending_detach);
1141                 }
1142         }
1143 }
1144
1145 /* Calling function needs a write_lock held on kgn_peer_conn_lock */
1146 void
1147 kgnilnd_detach_purgatory_locked(kgn_conn_t *conn, struct list_head *conn_list)
1148 {
1149         kgn_mbox_info_t *mbox = NULL;
1150
1151         /* if needed, add the conn purgatory data to the list passed in */
1152         if (conn->gnc_in_purgatory) {
1153                 CDEBUG(D_NET, "peer %p->%s purg_conn %p@%s mdd_list #tx %d\n",
1154                         conn->gnc_peer, libcfs_nid2str(conn->gnc_peer->gnp_nid),
1155                         conn, kgnilnd_conn_state2str(conn),
1156                         kgnilnd_count_list(&conn->gnc_mdd_list));
1157
1158                 mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
1159                 mbox->mbx_detach_of_purgatory = jiffies;
1160
1161                 /* conn->gnc_list is the entry point on peer->gnp_conns, so detaching it
1162                  * here removes it from the list of 'valid' peer connections.
1163                  * We put the current conn onto a list of conns to call kgnilnd_release_purgatory_locked()
1164                  * and as such the caller of kgnilnd_detach_purgatory_locked() now owns that conn, since its not
1165                  * on the peer's conn_list anymore.
1166                  */
1167
1168                 list_del_init(&conn->gnc_list);
1169
1170                 /* NB - only unlinking if we set pending in del_peer_locked from admin or
1171                  * shutdown */
1172                 if (kgnilnd_peer_active(conn->gnc_peer) &&
1173                     conn->gnc_peer->gnp_pending_unlink &&
1174                     kgnilnd_can_unlink_peer_locked(conn->gnc_peer)) {
1175                         kgnilnd_unlink_peer_locked(conn->gnc_peer);
1176                 }
1177                 /* The reaper will not call detach unless the conn is fully through kgnilnd_complete_closed_conn.
1178                  * If the conn is not in a DONE state somehow we are attempting to detach even though
1179                  * the conn has not been fully cleaned up. If we detach while the conn is still closing
1180                  * we will end up with an orphaned connection that has valid ep_handle, that is not on a
1181                  * peer.
1182                  */
1183
1184                 LASSERTF(conn->gnc_state == GNILND_CONN_DONE, "Conn in invalid state  %p@%s \n",
1185                                 conn, kgnilnd_conn_state2str(conn));
1186
1187                 /* move from peer to the delayed release list */
1188                 list_add_tail(&conn->gnc_list, conn_list);
1189         }
1190 }
1191
1192 void
1193 kgnilnd_release_purgatory_list(struct list_head *conn_list)
1194 {
1195         kgn_device_t            *dev;
1196         kgn_conn_t              *conn, *connN;
1197         kgn_mdd_purgatory_t     *gmp, *gmpN;
1198
1199         list_for_each_entry_safe(conn, connN, conn_list, gnc_list) {
1200                 dev = conn->gnc_device;
1201
1202                 kgnilnd_release_mbox(conn, -1);
1203                 conn->gnc_in_purgatory = 0;
1204
1205                 list_del_init(&conn->gnc_list);
1206
1207                 /* gnc_needs_detach is set in kgnilnd_del_conn_or_peer. It is used to keep track
1208                  * of conns that have been marked for detach by kgnilnd_del_conn_or_peer.
1209                  * The function uses kgn_npending_detach to verify the conn has
1210                  * actually been detached.
1211                  */
1212
1213                 if (conn->gnc_needs_detach)
1214                         kgnilnd_admin_decref(kgnilnd_data.kgn_npending_detach);
1215
1216                 /* if this guy is really dead (we are doing release from reaper),
1217                  * make sure we tell LNet - if this is from other context,
1218                  * the checks in the function will prevent an errant
1219                  * notification */
1220                 kgnilnd_peer_notify(conn->gnc_peer, conn->gnc_error, 0);
1221
1222                 list_for_each_entry_safe(gmp, gmpN, &conn->gnc_mdd_list,
1223                                          gmp_list) {
1224                         CDEBUG(D_NET,
1225                                "dev %p releasing held mdd %#llx.%#llx\n",
1226                                conn->gnc_device, gmp->gmp_map_key.qword1,
1227                                gmp->gmp_map_key.qword2);
1228
1229                         atomic_dec(&dev->gnd_n_mdd_held);
1230                         kgnilnd_mem_mdd_release(conn->gnc_device->gnd_handle,
1231                                                 &gmp->gmp_map_key);
1232                         /* ignoring the return code - if kgni/ghal can't find it
1233                          * it must be released already */
1234
1235                         list_del_init(&gmp->gmp_list);
1236                         LIBCFS_FREE(gmp, sizeof(*gmp));
1237                 }
1238                 /* lose conn ref for purgatory */
1239                 kgnilnd_conn_decref(conn);
1240         }
1241 }
1242
1243 /* needs write_lock on kgnilnd_data.kgn_peer_conn_lock held */
1244 void
1245 kgnilnd_peer_increase_reconnect_locked(kgn_peer_t *peer)
1246 {
1247         int current_to;
1248
1249         current_to = peer->gnp_reconnect_interval;
1250
1251         /* we'll try to reconnect fast the first time, then back-off */
1252         if (current_to == 0) {
1253                 peer->gnp_reconnect_time = jiffies - 1;
1254                 current_to = *kgnilnd_tunables.kgn_min_reconnect_interval;
1255         } else {
1256                 peer->gnp_reconnect_time = jiffies + cfs_time_seconds(current_to);
1257                 /* add 50% of min timeout & retry */
1258                 current_to += *kgnilnd_tunables.kgn_min_reconnect_interval / 2;
1259         }
1260
1261         current_to = min(current_to,
1262                          *kgnilnd_tunables.kgn_max_reconnect_interval);
1263
1264         peer->gnp_reconnect_interval = current_to;
1265         CDEBUG(D_NET, "peer %s can reconnect at %lu interval %lu\n",
1266                libcfs_nid2str(peer->gnp_nid), peer->gnp_reconnect_time,
1267                peer->gnp_reconnect_interval);
1268 }
1269
1270 /* needs kgnilnd_data.kgn_peer_conn_lock held */
1271 kgn_peer_t *
1272 kgnilnd_find_peer_locked(lnet_nid_t nid)
1273 {
1274         struct list_head *peer_list = kgnilnd_nid2peerlist(nid);
1275         kgn_peer_t       *peer;
1276
1277         /* Chopping nid down to only NIDADDR using LNET_NIDADDR so we only
1278          * have a single peer per device instead of a peer per nid/net combo.
1279          */
1280
1281         list_for_each_entry(peer, peer_list, gnp_list) {
1282                 if (LNET_NIDADDR(nid) != LNET_NIDADDR(peer->gnp_nid))
1283                         continue;
1284
1285                 CDEBUG(D_NET, "got peer [%p] -> %s c %d (%d)\n",
1286                        peer, libcfs_nid2str(nid),
1287                        peer->gnp_connecting,
1288                        atomic_read(&peer->gnp_refcount));
1289                 return peer;
1290         }
1291         return NULL;
1292 }
1293
1294 /* need write_lock on kgn_peer_conn_lock */
1295 void
1296 kgnilnd_unlink_peer_locked(kgn_peer_t *peer)
1297 {
1298         LASSERTF(list_empty(&peer->gnp_conns),
1299                 "peer 0x%p->%s\n",
1300                  peer, libcfs_nid2str(peer->gnp_nid));
1301         LASSERTF(list_empty(&peer->gnp_tx_queue),
1302                 "peer 0x%p->%s\n",
1303                  peer, libcfs_nid2str(peer->gnp_nid));
1304         LASSERTF(kgnilnd_peer_active(peer),
1305                 "peer 0x%p->%s\n",
1306                  peer, libcfs_nid2str(peer->gnp_nid));
1307         CDEBUG(D_NET, "unlinking peer 0x%p->%s\n",
1308                 peer, libcfs_nid2str(peer->gnp_nid));
1309
1310         list_del_init(&peer->gnp_list);
1311         kgnilnd_data.kgn_peer_version++;
1312         kgnilnd_admin_decref(kgnilnd_data.kgn_npending_unlink);
1313         /* lose peerlist's ref */
1314         kgnilnd_peer_decref(peer);
1315 }
1316
1317 int
1318 kgnilnd_get_peer_info(int index,
1319                       kgn_peer_t **found_peer,
1320                       lnet_nid_t *id, __u32 *nic_addr,
1321                       int *refcount, int *connecting)
1322 {
1323         kgn_peer_t        *peer;
1324         int               i;
1325         int               rc = -ENOENT;
1326
1327         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1328
1329         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
1330                 list_for_each_entry(peer, &kgnilnd_data.kgn_peers[i], gnp_list) {
1331                         if (index-- > 0)
1332                                 continue;
1333
1334                         CDEBUG(D_NET, "found peer %p (%s) at index %d\n",
1335                                peer, libcfs_nid2str(peer->gnp_nid), index);
1336
1337                         *found_peer  = peer;
1338                         *id          = peer->gnp_nid;
1339                         *nic_addr    = peer->gnp_host_id;
1340                         *refcount    = atomic_read(&peer->gnp_refcount);
1341                         *connecting  = peer->gnp_connecting;
1342
1343                         rc = 0;
1344                         goto out;
1345                 }
1346         }
1347 out:
1348         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1349         if (rc)
1350                 CDEBUG(D_NET, "no gni peer at index %d\n", index);
1351         return rc;
1352 }
1353
1354 /* requires write_lock on kgn_peer_conn_lock held */
1355 void
1356 kgnilnd_add_peer_locked(lnet_nid_t nid, kgn_peer_t *new_stub_peer, kgn_peer_t **peerp)
1357 {
1358         kgn_peer_t        *peer, *peer2;
1359
1360         LASSERTF(new_stub_peer != NULL, "bad stub peer for nid %s\n",
1361                  libcfs_nid2str(nid));
1362
1363         peer2 = kgnilnd_find_peer_locked(nid);
1364         if (peer2 != NULL) {
1365                 /* A peer was created during the lock transition, so drop
1366                  * the new one we created */
1367                 kgnilnd_peer_decref(new_stub_peer);
1368                 peer = peer2;
1369         } else {
1370                 peer = new_stub_peer;
1371                 /* peer table takes existing ref on peer */
1372
1373                 LASSERTF(!kgnilnd_peer_active(peer),
1374                         "peer 0x%p->%s already in peer table\n",
1375                         peer, libcfs_nid2str(peer->gnp_nid));
1376                 list_add_tail(&peer->gnp_list,
1377                               kgnilnd_nid2peerlist(nid));
1378                 kgnilnd_data.kgn_peer_version++;
1379         }
1380
1381         LASSERTF(peer->gnp_net != NULL, "peer 0x%p->%s with NULL net\n",
1382                  peer, libcfs_nid2str(peer->gnp_nid));
1383         *peerp = peer;
1384 }
1385
1386 int
1387 kgnilnd_add_peer(kgn_net_t *net, lnet_nid_t nid, kgn_peer_t **peerp)
1388 {
1389         kgn_peer_t        *peer;
1390         int                rc;
1391         int                node_state;
1392         ENTRY;
1393
1394         if (nid == LNET_NID_ANY)
1395                 return -EINVAL;
1396
1397         node_state = kgnilnd_get_node_state(LNET_NIDADDR(nid));
1398
1399         /* NB - this will not block during normal operations -
1400          * the only writer of this is in the startup/shutdown path. */
1401         rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
1402         if (!rc) {
1403                 rc = -ESHUTDOWN;
1404                 RETURN(rc);
1405         }
1406         rc = kgnilnd_create_peer_safe(&peer, nid, net, node_state);
1407         if (rc != 0) {
1408                 up_read(&kgnilnd_data.kgn_net_rw_sem);
1409                 RETURN(rc);
1410         }
1411
1412         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1413         up_read(&kgnilnd_data.kgn_net_rw_sem);
1414
1415         kgnilnd_add_peer_locked(nid, peer, peerp);
1416
1417         CDEBUG(D_NET, "peer 0x%p->%s connecting %d\n",
1418                peerp, libcfs_nid2str((*peerp)->gnp_nid),
1419                (*peerp)->gnp_connecting);
1420
1421         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1422         RETURN(0);
1423 }
1424
1425 /* needs write_lock on kgn_peer_conn_lock */
1426 void
1427 kgnilnd_cancel_peer_connect_locked(kgn_peer_t *peer, struct list_head *zombies)
1428 {
1429         kgn_tx_t        *tx, *txn;
1430
1431         /* we do care about state of gnp_connecting - we could be between
1432          * reconnect attempts, so try to find the dgram and cancel the TX
1433          * anyways. If we are in the process of posting DONT do anything;
1434          * once it fails or succeeds we can nuke the connect attempt.
1435          * We have no idea where in kgnilnd_post_dgram we are so we cant
1436          * attempt to cancel until the function is done.
1437          */
1438
1439         /* make sure peer isn't in process of connecting or waiting for connect*/
1440         spin_lock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1441         if (!(list_empty(&peer->gnp_connd_list))) {
1442                 list_del_init(&peer->gnp_connd_list);
1443                 /* remove connd ref */
1444                 kgnilnd_peer_decref(peer);
1445         }
1446         spin_unlock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1447
1448         if (peer->gnp_connecting == GNILND_PEER_POSTING || peer->gnp_connecting == GNILND_PEER_NEEDS_DEATH) {
1449                 peer->gnp_connecting = GNILND_PEER_NEEDS_DEATH;
1450                 /* We are in process of posting right now the xchg set it up for us to
1451                  * cancel the connect so we are finished for now */
1452         } else {
1453                 /* no need for exchange we have the peer lock and its ready for us to nuke */
1454                 LASSERTF(peer->gnp_connecting != GNILND_PEER_POSTING,
1455                         "Peer in invalid state 0x%p->%s, connecting %d\n",
1456                         peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
1457                 peer->gnp_connecting = GNILND_PEER_IDLE;
1458                 set_mb(peer->gnp_last_dgram_errno, -ETIMEDOUT);
1459                 kgnilnd_find_and_cancel_dgram(peer->gnp_net->gnn_dev,
1460                                                       peer->gnp_nid);
1461         }
1462
1463         /* The least we can do is nuke the tx's no matter what.... */
1464         list_for_each_entry_safe(tx, txn, &peer->gnp_tx_queue, tx_list) {
1465                 kgnilnd_tx_del_state_locked(tx, peer, NULL,
1466                                            GNILND_TX_ALLOCD);
1467                 list_add_tail(&tx->tx_list, zombies);
1468         }
1469 }
1470
1471 /* needs write_lock on kgn_peer_conn_lock */
1472 void
1473 kgnilnd_del_peer_locked(kgn_peer_t *peer, int error)
1474 {
1475         /* this peer could be passive and only held for purgatory,
1476          * take a ref to ensure it doesn't disappear in this function */
1477         kgnilnd_peer_addref(peer);
1478
1479         CFS_RACE(CFS_FAIL_GNI_FIND_TARGET);
1480
1481         /* if purgatory release cleared it out, don't try again */
1482         if (kgnilnd_peer_active(peer)) {
1483                 /* always do this to allow kgnilnd_start_connect and
1484                  * kgnilnd_finish_connect to catch this before they
1485                  * wrap up their operations */
1486                 if (kgnilnd_can_unlink_peer_locked(peer)) {
1487                         /* already released purgatory, so only active
1488                          * conns hold it */
1489                         kgnilnd_unlink_peer_locked(peer);
1490                 } else {
1491                         kgnilnd_close_peer_conns_locked(peer, error);
1492                         /* peer unlinks itself when last conn is closed */
1493                 }
1494         }
1495
1496         /* we are done, release back to the wild */
1497         kgnilnd_peer_decref(peer);
1498 }
1499
1500 int
1501 kgnilnd_del_conn_or_peer(kgn_net_t *net, lnet_nid_t nid, int command,
1502                           int error)
1503 {
1504         LIST_HEAD               (souls);
1505         LIST_HEAD               (zombies);
1506         kgn_peer_t *peer, *pnxt;
1507         int                     lo;
1508         int                     hi;
1509         int                     i;
1510         int                     rc = -ENOENT;
1511
1512         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1513
1514         if (nid != LNET_NID_ANY)
1515                 lo = hi = kgnilnd_nid2peerlist(nid) - kgnilnd_data.kgn_peers;
1516         else {
1517                 lo = 0;
1518                 hi = *kgnilnd_tunables.kgn_peer_hash_size - 1;
1519                 /* wildcards always succeed */
1520                 rc = 0;
1521         }
1522
1523         for (i = lo; i <= hi; i++) {
1524                 list_for_each_entry_safe(peer, pnxt, &kgnilnd_data.kgn_peers[i],
1525                                          gnp_list) {
1526                         LASSERTF(peer->gnp_net != NULL,
1527                                 "peer %p (%s) with NULL net\n",
1528                                  peer, libcfs_nid2str(peer->gnp_nid));
1529
1530                         if (net != NULL && peer->gnp_net != net)
1531                                 continue;
1532
1533                         if (!(nid == LNET_NID_ANY || LNET_NIDADDR(peer->gnp_nid) == LNET_NIDADDR(nid)))
1534                                 continue;
1535
1536                         /* In both cases, we want to stop any in-flight
1537                          * connect attempts */
1538                         kgnilnd_cancel_peer_connect_locked(peer, &zombies);
1539
1540                         switch (command) {
1541                         case GNILND_DEL_CONN:
1542                                 kgnilnd_close_peer_conns_locked(peer, error);
1543                                 break;
1544                         case GNILND_DEL_PEER:
1545                                 peer->gnp_pending_unlink = 1;
1546                                 kgnilnd_admin_addref(kgnilnd_data.kgn_npending_unlink);
1547                                 kgnilnd_mark_for_detach_purgatory_all_locked(peer);
1548                                 kgnilnd_del_peer_locked(peer, error);
1549                                 break;
1550                         case GNILND_CLEAR_PURGATORY:
1551                                 /* Mark everything ready for detach reaper will cleanup
1552                                  * once we release the kgn_peer_conn_lock
1553                                  */
1554                                 kgnilnd_mark_for_detach_purgatory_all_locked(peer);
1555                                 peer->gnp_last_errno = -EISCONN;
1556                                 /* clear reconnect so he can reconnect soon */
1557                                 peer->gnp_reconnect_time = 0;
1558                                 peer->gnp_reconnect_interval = 0;
1559                                 break;
1560                         default:
1561                                 CERROR("bad command %d\n", command);
1562                                 LBUG();
1563                         }
1564                         /* we matched something */
1565                         rc = 0;
1566                 }
1567         }
1568
1569         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1570
1571         /* nuke peer TX */
1572         kgnilnd_txlist_done(&zombies, error);
1573
1574         /* This function does not return until the commands it initiated have completed,
1575          * since they have to work there way through the other threads. In the case of shutdown
1576          * threads are not woken up until after this call is initiated so we cannot wait, we just
1577          * need to return. The same applies for stack reset we shouldnt wait as the reset thread
1578          * handles closing.
1579          */
1580
1581         CFS_RACE(CFS_FAIL_GNI_RACE_RESET);
1582
1583         if (error == -ENOTRECOVERABLE || error == -ESHUTDOWN) {
1584                 return rc;
1585         }
1586
1587         wait_var_event_warning(&kgnilnd_data,
1588                                !atomic_read(&kgnilnd_data.kgn_npending_conns) &&
1589                                !atomic_read(&kgnilnd_data.kgn_npending_detach) &&
1590                                !atomic_read(&kgnilnd_data.kgn_npending_unlink),
1591                                "Waiting on %d peers %d closes %d detaches\n",
1592                                 atomic_read(&kgnilnd_data.kgn_npending_unlink),
1593                                 atomic_read(&kgnilnd_data.kgn_npending_conns),
1594                                 atomic_read(&kgnilnd_data.kgn_npending_detach));
1595
1596         return rc;
1597 }
1598
1599 kgn_conn_t *
1600 kgnilnd_get_conn_by_idx(int index)
1601 {
1602         kgn_peer_t        *peer;
1603         kgn_conn_t        *conn;
1604         int                i;
1605
1606
1607         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
1608                 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1609                 list_for_each_entry(peer, &kgnilnd_data.kgn_peers[i], gnp_list) {
1610                         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
1611                                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
1612                                         continue;
1613
1614                                 if (index-- > 0)
1615                                         continue;
1616
1617                                 CDEBUG(D_NET, "++conn[%p] -> %s (%d)\n", conn,
1618                                        libcfs_nid2str(conn->gnc_peer->gnp_nid),
1619                                        atomic_read(&conn->gnc_refcount));
1620                                 kgnilnd_conn_addref(conn);
1621                                 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1622                                 return conn;
1623                         }
1624                 }
1625                 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1626         }
1627
1628         return NULL;
1629 }
1630
1631 int
1632 kgnilnd_get_conn_info(kgn_peer_t *peer,
1633                       int *device_id, __u64 *peerstamp,
1634                       int *tx_seq, int *rx_seq,
1635                       int *fmaq_len, int *nfma, int *nrdma)
1636 {
1637         kgn_conn_t        *conn;
1638         int               rc = 0;
1639
1640         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1641
1642         conn = kgnilnd_find_conn_locked(peer);
1643         if (conn == NULL) {
1644                 rc = -ENOENT;
1645                 goto out;
1646         }
1647
1648         *device_id = conn->gnc_device->gnd_host_id;
1649         *peerstamp = conn->gnc_peerstamp;
1650         *tx_seq = atomic_read(&conn->gnc_tx_seq);
1651         *rx_seq = atomic_read(&conn->gnc_rx_seq);
1652         *fmaq_len = kgnilnd_count_list(&conn->gnc_fmaq);
1653         *nfma = atomic_read(&conn->gnc_nlive_fma);
1654         *nrdma = atomic_read(&conn->gnc_nlive_rdma);
1655 out:
1656         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1657         return rc;
1658 }
1659
1660 /* needs write_lock on kgn_peer_conn_lock */
1661 int
1662 kgnilnd_close_peer_conns_locked(kgn_peer_t *peer, int why)
1663 {
1664         kgn_conn_t         *conn;
1665         struct list_head   *ctmp, *cnxt;
1666         int                 count = 0;
1667
1668         list_for_each_safe(ctmp, cnxt, &peer->gnp_conns) {
1669                 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
1670
1671                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
1672                         continue;
1673
1674                 count++;
1675                 /* we mark gnc_needs closing and increment kgn_npending_conns so that
1676                  * kgnilnd_del_conn_or_peer can wait on the other threads closing
1677                  * and cleaning up the connection.
1678                  */
1679                 if (!conn->gnc_needs_closing) {
1680                         conn->gnc_needs_closing = 1;
1681                         kgnilnd_admin_addref(kgnilnd_data.kgn_npending_conns);
1682                 }
1683                 kgnilnd_close_conn_locked(conn, why);
1684         }
1685         return count;
1686 }
1687
1688 int
1689 kgnilnd_report_node_state(lnet_nid_t nid, int down)
1690 {
1691         int         rc;
1692         kgn_peer_t  *peer, *new_peer;
1693         LIST_HEAD(zombies);
1694
1695         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1696         peer = kgnilnd_find_peer_locked(nid);
1697
1698         if (peer == NULL) {
1699                 int       i;
1700                 int       found_net = 0;
1701                 kgn_net_t *net;
1702
1703                 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1704
1705                 /* Don't add a peer for node up events */
1706                 if (down == GNILND_PEER_UP)
1707                         return 0;
1708
1709                 /* find any valid net - we don't care which one... */
1710                 down_read(&kgnilnd_data.kgn_net_rw_sem);
1711                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
1712                         list_for_each_entry(net, &kgnilnd_data.kgn_nets[i],
1713                                             gnn_list) {
1714                                 found_net = 1;
1715                                 break;
1716                         }
1717
1718                         if (found_net) {
1719                                 break;
1720                         }
1721                 }
1722                 up_read(&kgnilnd_data.kgn_net_rw_sem);
1723
1724                 if (!found_net) {
1725                         CNETERR("Could not find a net for nid %lld\n", nid);
1726                         return 1;
1727                 }
1728
1729                 /* The nid passed in does not yet contain the net portion.
1730                  * Let's build it up now
1731                  */
1732                 nid = LNET_MKNID(LNET_NID_NET(&net->gnn_ni->ni_nid), nid);
1733                 rc = kgnilnd_add_peer(net, nid, &new_peer);
1734
1735                 if (rc) {
1736                         CNETERR("Could not add peer for nid %lld, rc %d\n",
1737                                 nid, rc);
1738                         return 1;
1739                 }
1740
1741                 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1742                 peer = kgnilnd_find_peer_locked(nid);
1743
1744                 if (peer == NULL) {
1745                         CNETERR("Could not find peer for nid %lld\n", nid);
1746                         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1747                         return 1;
1748                 }
1749         }
1750
1751         peer->gnp_state = down;
1752
1753         if (down == GNILND_PEER_DOWN) {
1754                 kgn_conn_t *conn;
1755
1756                 peer->gnp_down_event_time = jiffies;
1757                 kgnilnd_cancel_peer_connect_locked(peer, &zombies);
1758                 conn = kgnilnd_find_conn_locked(peer);
1759
1760                 if (conn != NULL) {
1761                         kgnilnd_close_conn_locked(conn, -ENETRESET);
1762                 }
1763         } else {
1764                 peer->gnp_up_event_time = jiffies;
1765         }
1766
1767         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1768
1769         if (down == GNILND_PEER_DOWN) {
1770                 /* using ENETRESET so we don't get messages from
1771                  * kgnilnd_tx_done
1772                  */
1773                 kgnilnd_txlist_done(&zombies, -ENETRESET);
1774                 kgnilnd_peer_notify(peer, -ECONNRESET, 0);
1775                 LCONSOLE_INFO("Received down event for nid %d\n",
1776                               LNET_NIDADDR(nid));
1777         }
1778
1779         return 0;
1780 }
1781
1782 int
1783 kgnilnd_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg)
1784 {
1785         struct libcfs_ioctl_data *data = arg;
1786         kgn_net_t                *net = ni->ni_data;
1787         int                       rc = -EINVAL;
1788
1789         LASSERT(ni == net->gnn_ni);
1790
1791         switch (cmd) {
1792         case IOC_LIBCFS_GET_PEER: {
1793                 lnet_nid_t   nid = 0;
1794                 kgn_peer_t  *peer = NULL;
1795                 __u32 nic_addr = 0;
1796                 __u64 peerstamp = 0;
1797                 int peer_refcount = 0, peer_connecting = 0;
1798                 int device_id = 0;
1799                 int tx_seq = 0, rx_seq = 0;
1800                 int fmaq_len = 0, nfma = 0, nrdma = 0;
1801
1802                 rc = kgnilnd_get_peer_info(data->ioc_count, &peer,
1803                                            &nid, &nic_addr, &peer_refcount,
1804                                            &peer_connecting);
1805                 if (rc)
1806                         break;
1807
1808                 /* Barf */
1809                 /* LNET_MKNID is used to mask from lnet the multiplexing/demultiplexing of connections and peers
1810                  * LNET assumes a conn and peer per net, the LNET_MKNID/LNET_NIDADDR allows us to let Lnet see what it
1811                  * wants to see instead of the underlying network that is being used to send the data
1812                  */
1813                 data->ioc_nid    = LNET_MKNID(LNET_NID_NET(&ni->ni_nid),
1814                                               LNET_NIDADDR(nid));
1815                 data->ioc_flags  = peer_connecting;
1816                 data->ioc_count  = peer_refcount;
1817
1818                 rc = kgnilnd_get_conn_info(peer, &device_id, &peerstamp,
1819                                            &tx_seq, &rx_seq, &fmaq_len,
1820                                            &nfma, &nrdma);
1821
1822                 /* This is allowable - a persistent peer could not
1823                  * have a connection */
1824                 if (rc) {
1825                         /* flag to indicate we are not connected -
1826                          * need to print as such */
1827                         data->ioc_flags |= (1<<16);
1828                         rc = 0;
1829                 } else {
1830                         /* still barf */
1831                         data->ioc_net = device_id;
1832                         data->ioc_u64[0] = peerstamp;
1833                         data->ioc_u32[0] = fmaq_len;
1834                         data->ioc_u32[1] = nfma;
1835                         data->ioc_u32[2] = tx_seq;
1836                         data->ioc_u32[3] = rx_seq;
1837                         data->ioc_u32[4] = nrdma;
1838                 }
1839                 break;
1840         }
1841         case IOC_LIBCFS_ADD_PEER: {
1842                 /* just dummy value to allow using common interface */
1843                 kgn_peer_t      *peer;
1844                 rc = kgnilnd_add_peer(net, data->ioc_nid, &peer);
1845                 break;
1846         }
1847         case IOC_LIBCFS_DEL_PEER: {
1848                 /* NULL is passed in so it affects all peers in existence without regard to network
1849                  * as the peer may not exist on the network LNET believes it to be on.
1850                  */
1851                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1852                                               GNILND_DEL_PEER, -EUCLEAN);
1853                 break;
1854         }
1855         case IOC_LIBCFS_GET_CONN: {
1856                 kgn_conn_t *conn = kgnilnd_get_conn_by_idx(data->ioc_count);
1857
1858                 if (conn == NULL)
1859                         rc = -ENOENT;
1860                 else {
1861                         rc = 0;
1862                         /* LNET_MKNID is used to build the correct address based on what LNET wants to see instead of
1863                          * the generic connection that is used to send the data
1864                          */
1865                         data->ioc_nid    = LNET_MKNID(LNET_NID_NET(&ni->ni_nid),
1866                                                       LNET_NIDADDR(conn->gnc_peer->gnp_nid));
1867                         data->ioc_u32[0] = conn->gnc_device->gnd_id;
1868                         kgnilnd_conn_decref(conn);
1869                 }
1870                 break;
1871         }
1872         case IOC_LIBCFS_CLOSE_CONNECTION: {
1873                 /* use error = -ENETRESET to indicate it was lctl disconnect */
1874                 /* NULL is passed in so it affects all the nets as the connection is virtual
1875                  * and may not exist on the network LNET believes it to be on.
1876                  */
1877                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1878                                               GNILND_DEL_CONN, -ENETRESET);
1879                 break;
1880         }
1881         case IOC_LIBCFS_PUSH_CONNECTION: {
1882                 /* we use this to flush purgatory */
1883                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1884                                               GNILND_CLEAR_PURGATORY, -EUCLEAN);
1885                 break;
1886         }
1887         case IOC_LIBCFS_REGISTER_MYNID: {
1888                 /* Ignore if this is a noop */
1889                 if (data->ioc_nid == lnet_nid_to_nid4(&ni->ni_nid)) {
1890                         rc = 0;
1891                 } else {
1892                         CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
1893                                libcfs_nid2str(data->ioc_nid),
1894                                libcfs_nidstr(&ni->ni_nid));
1895                         rc = -EINVAL;
1896                 }
1897                 break;
1898         }
1899         }
1900
1901         return rc;
1902 }
1903
1904 int
1905 kgnilnd_dev_init(kgn_device_t *dev)
1906 {
1907         gni_return_t      rrc;
1908         int               rc = 0;
1909         unsigned int      cq_size;
1910         ENTRY;
1911
1912         /* size of these CQs should be able to accommodate the outgoing
1913          * RDMA and SMSG transactions.  Since we really don't know what we
1914          * really need here, we'll take credits * 2 * 3 to allow a bunch.
1915          * We need to dig into this more with the performance work. */
1916         cq_size = *kgnilnd_tunables.kgn_credits * 2 * 3;
1917
1918         rrc = kgnilnd_cdm_create(dev->gnd_id, *kgnilnd_tunables.kgn_ptag,
1919                                  *kgnilnd_tunables.kgn_pkey, 0,
1920                                  &dev->gnd_domain);
1921         if (rrc != GNI_RC_SUCCESS) {
1922                 CERROR("Can't create CDM %d (%d)\n", dev->gnd_id, rrc);
1923                 GOTO(failed, rc = -ENODEV);
1924         }
1925
1926         rrc = kgnilnd_cdm_attach(dev->gnd_domain, dev->gnd_id,
1927                                  &dev->gnd_host_id, &dev->gnd_handle);
1928         if (rrc != GNI_RC_SUCCESS) {
1929                 CERROR("Can't attach CDM to device %d (%d)\n",
1930                         dev->gnd_id, rrc);
1931                 GOTO(failed, rc = -ENODEV);
1932         }
1933
1934         /* a bit gross, but not much we can do - Aries Sim doesn't have
1935          * hardcoded NIC/NID that we can use */
1936         rc = kgnilnd_setup_nic_translation(dev->gnd_host_id);
1937         if (rc != 0)
1938                 GOTO(failed, rc = -ENODEV);
1939
1940         /* only dev 0 gets the errors - no need to reset the stack twice
1941          * - this works because we have a single PTAG, if we had more
1942          * then we'd need to have multiple handlers */
1943         if (dev->gnd_id == 0) {
1944                 rrc = kgnilnd_subscribe_errors(dev->gnd_handle,
1945                                                 GNI_ERRMASK_CRITICAL |
1946                                                 GNI_ERRMASK_UNKNOWN_TRANSACTION,
1947                                               0, NULL, kgnilnd_critical_error,
1948                                               &dev->gnd_err_handle);
1949                 if (rrc != GNI_RC_SUCCESS) {
1950                         CERROR("Can't subscribe for errors on device %d: rc %d\n",
1951                                 dev->gnd_id, rrc);
1952                         GOTO(failed, rc = -ENODEV);
1953                 }
1954
1955                 rc = kgnilnd_set_quiesce_callback(dev->gnd_handle,
1956                                                   kgnilnd_quiesce_end_callback);
1957                 if (rc != GNI_RC_SUCCESS) {
1958                         CERROR("Can't subscribe for quiesce callback on device %d: rc %d\n",
1959                                 dev->gnd_id, rrc);
1960                         GOTO(failed, rc = -ENODEV);
1961                 }
1962         }
1963
1964         rc = kgnilnd_nicaddr_to_nid(dev->gnd_host_id, &dev->gnd_nid);
1965         if (rc < 0) {
1966                 /* log messages during startup */
1967                 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
1968                         CERROR("couldn't translate host_id 0x%x to nid. rc %d\n",
1969                                 dev->gnd_host_id, rc);
1970                 }
1971                 GOTO(failed, rc = -ESRCH);
1972         }
1973         CDEBUG(D_NET, "NIC %x -> NID %d\n", dev->gnd_host_id, dev->gnd_nid);
1974
1975         rrc = kgnilnd_cq_create(dev->gnd_handle, *kgnilnd_tunables.kgn_credits,
1976                                 0, kgnilnd_device_callback,
1977                                 dev->gnd_id, &dev->gnd_snd_rdma_cqh);
1978         if (rrc != GNI_RC_SUCCESS) {
1979                 CERROR("Can't create rdma send cq size %u for device "
1980                        "%d (%d)\n", cq_size, dev->gnd_id, rrc);
1981                 GOTO(failed, rc = -EINVAL);
1982         }
1983
1984         rrc = kgnilnd_cq_create(dev->gnd_handle, cq_size,
1985                         0, kgnilnd_device_callback, dev->gnd_id,
1986                         &dev->gnd_snd_fma_cqh);
1987         if (rrc != GNI_RC_SUCCESS) {
1988                 CERROR("Can't create fma send cq size %u for device %d (%d)\n",
1989                        cq_size, dev->gnd_id, rrc);
1990                 GOTO(failed, rc = -EINVAL);
1991         }
1992
1993         /* This one we size differently - overflows are possible and it needs to be
1994          * sized based on machine size */
1995         rrc = kgnilnd_cq_create(dev->gnd_handle,
1996                         *kgnilnd_tunables.kgn_fma_cq_size,
1997                         0, kgnilnd_device_callback, dev->gnd_id,
1998                         &dev->gnd_rcv_fma_cqh);
1999         if (rrc != GNI_RC_SUCCESS) {
2000                 CERROR("Can't create fma cq size %d for device %d (%d)\n",
2001                        *kgnilnd_tunables.kgn_fma_cq_size, dev->gnd_id, rrc);
2002                 GOTO(failed, rc = -EINVAL);
2003         }
2004
2005         rrc = kgnilnd_register_smdd_buf(dev);
2006         if (rrc != GNI_RC_SUCCESS) {
2007                 GOTO(failed, rc = -EINVAL);
2008         }
2009
2010         RETURN(0);
2011
2012 failed:
2013         kgnilnd_dev_fini(dev);
2014         RETURN(rc);
2015 }
2016
2017 void
2018 kgnilnd_dev_fini(kgn_device_t *dev)
2019 {
2020         gni_return_t rrc;
2021         ENTRY;
2022
2023         /* At quiesce or rest time, need to loop through and clear gnd_ready_conns ?*/
2024         LASSERTF(list_empty(&dev->gnd_ready_conns) &&
2025                  list_empty(&dev->gnd_map_tx) &&
2026                  list_empty(&dev->gnd_rdmaq) &&
2027                  list_empty(&dev->gnd_delay_conns),
2028                  "dev 0x%p ready_conns %d@0x%p delay_conns %d@0x%p" 
2029                  "map_tx %d@0x%p rdmaq %d@0x%p\n",
2030                  dev, kgnilnd_count_list(&dev->gnd_ready_conns), &dev->gnd_ready_conns,
2031                  kgnilnd_count_list(&dev->gnd_delay_conns), &dev->gnd_delay_conns,
2032                  kgnilnd_count_list(&dev->gnd_map_tx), &dev->gnd_map_tx,
2033                  kgnilnd_count_list(&dev->gnd_rdmaq), &dev->gnd_rdmaq);
2034
2035         /* These should follow from tearing down all connections */
2036         LASSERTF(dev->gnd_map_nphys == 0 && dev->gnd_map_physnop == 0,
2037                 "%d physical mappings of %d pages still mapped\n",
2038                  dev->gnd_map_nphys, dev->gnd_map_physnop);
2039
2040         LASSERTF(atomic_read(&dev->gnd_n_mdd) == 0 &&
2041                  atomic_read(&dev->gnd_n_mdd_held) == 0 &&
2042                  atomic64_read(&dev->gnd_nbytes_map) == 0,
2043                  "%d SMSG mappings of %lld bytes still mapped or held %d\n",
2044                  atomic_read(&dev->gnd_n_mdd),
2045                  (u64)atomic64_read(&dev->gnd_nbytes_map),
2046                  atomic_read(&dev->gnd_n_mdd_held));
2047
2048         LASSERT(list_empty(&dev->gnd_map_list));
2049
2050         /* What other assertions needed to ensure all connections torn down ? */
2051
2052         /* check all counters == 0 (EP, MDD, etc) */
2053
2054         /* if we are resetting due to quiese (stack reset), don't check
2055          * thread states */
2056         LASSERTF(kgnilnd_data.kgn_quiesce_trigger ||
2057                 atomic_read(&kgnilnd_data.kgn_nthreads) == 0,
2058                 "tried to shutdown with threads active\n");
2059
2060         if (dev->gnd_smdd_hold_buf) {
2061                 rrc = kgnilnd_deregister_smdd_buf(dev);
2062                 LASSERTF(rrc == GNI_RC_SUCCESS,
2063                         "bad rc from deregistion of sMDD buffer: %d\n", rrc);
2064                 dev->gnd_smdd_hold_buf = NULL;
2065         }
2066
2067         if (dev->gnd_rcv_fma_cqh) {
2068                 rrc = kgnilnd_cq_destroy(dev->gnd_rcv_fma_cqh);
2069                 LASSERTF(rrc == GNI_RC_SUCCESS,
2070                         "bad rc from gni_cq_destroy on rcv_fma_cqh: %d\n", rrc);
2071                 dev->gnd_rcv_fma_cqh = NULL;
2072         }
2073
2074         if (dev->gnd_snd_rdma_cqh) {
2075                 rrc = kgnilnd_cq_destroy(dev->gnd_snd_rdma_cqh);
2076                 LASSERTF(rrc == GNI_RC_SUCCESS,
2077                         "bad rc from gni_cq_destroy on send_rdma_cqh: %d\n", rrc);
2078                 dev->gnd_snd_rdma_cqh = NULL;
2079         }
2080
2081         if (dev->gnd_snd_fma_cqh) {
2082                 rrc = kgnilnd_cq_destroy(dev->gnd_snd_fma_cqh);
2083                 LASSERTF(rrc == GNI_RC_SUCCESS,
2084                         "bad rc from gni_cq_destroy on snd_fma_cqh: %d\n", rrc);
2085                 dev->gnd_snd_fma_cqh = NULL;
2086         }
2087
2088         if (dev->gnd_err_handle) {
2089                 rrc = kgnilnd_release_errors(dev->gnd_err_handle);
2090                 LASSERTF(rrc == GNI_RC_SUCCESS,
2091                         "bad rc from gni_release_errors: %d\n", rrc);
2092                 dev->gnd_err_handle = NULL;
2093         }
2094
2095         if (dev->gnd_domain) {
2096                 rrc = kgnilnd_cdm_destroy(dev->gnd_domain);
2097                 LASSERTF(rrc == GNI_RC_SUCCESS,
2098                         "bad rc from gni_cdm_destroy: %d\n", rrc);
2099                 dev->gnd_domain = NULL;
2100         }
2101
2102         EXIT;
2103 }
2104
2105 int kgnilnd_base_startup(void)
2106 {
2107         struct timespec64    ts;
2108         long long            pkmem = libcfs_kmem_read();
2109         int                  rc;
2110         int                  i;
2111         kgn_device_t        *dev;
2112         struct task_struct  *thrd;
2113
2114 #if defined(CONFIG_CRAY_XT) && !defined(CONFIG_CRAY_COMPUTE)
2115         /* limit how much memory can be allocated for fma blocks in
2116          * instances where many nodes need to reconnects at the same time */
2117         struct sysinfo si;
2118         si_meminfo(&si);
2119         kgnilnd_data.free_pages_limit = si.totalram/4;
2120 #endif
2121
2122         ENTRY;
2123
2124         LASSERTF(kgnilnd_data.kgn_init == GNILND_INIT_NOTHING,
2125                 "init %d\n", kgnilnd_data.kgn_init);
2126
2127         /* zero pointers, flags etc */
2128         memset(&kgnilnd_data, 0, sizeof(kgnilnd_data));
2129         kgnilnd_check_kgni_version();
2130
2131         /* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and
2132          * a unique (for all time) connstamp so we can uniquely identify
2133          * the sender.  The connstamp is an incrementing counter
2134          * initialised with seconds + microseconds at startup time.  So we
2135          * rely on NOT creating connections more frequently on average than
2136          * 1MHz to ensure we don't use old connstamps when we reboot. */
2137         ktime_get_ts64(&ts);
2138         kgnilnd_data.kgn_connstamp =
2139                  kgnilnd_data.kgn_peerstamp =
2140                         (ts.tv_sec * 1000000) + (ts.tv_nsec / 100);
2141
2142         init_rwsem(&kgnilnd_data.kgn_net_rw_sem);
2143
2144         for (i = 0; i < GNILND_MAXDEVS; i++) {
2145                 kgn_device_t  *dev = &kgnilnd_data.kgn_devices[i];
2146
2147                 dev->gnd_id = i;
2148                 INIT_LIST_HEAD(&dev->gnd_ready_conns);
2149                 INIT_LIST_HEAD(&dev->gnd_delay_conns);
2150                 INIT_LIST_HEAD(&dev->gnd_map_tx);
2151                 INIT_LIST_HEAD(&dev->gnd_fma_buffs);
2152                 mutex_init(&dev->gnd_cq_mutex);
2153                 mutex_init(&dev->gnd_fmablk_mutex);
2154                 spin_lock_init(&dev->gnd_fmablk_lock);
2155                 init_waitqueue_head(&dev->gnd_waitq);
2156                 init_waitqueue_head(&dev->gnd_dgram_waitq);
2157                 init_waitqueue_head(&dev->gnd_dgping_waitq);
2158                 spin_lock_init(&dev->gnd_lock);
2159                 INIT_LIST_HEAD(&dev->gnd_map_list);
2160                 spin_lock_init(&dev->gnd_map_lock);
2161                 atomic_set(&dev->gnd_nfmablk, 0);
2162                 atomic_set(&dev->gnd_fmablk_vers, 1);
2163                 atomic_set(&dev->gnd_neps, 0);
2164                 atomic_set(&dev->gnd_canceled_dgrams, 0);
2165                 INIT_LIST_HEAD(&dev->gnd_connd_peers);
2166                 spin_lock_init(&dev->gnd_connd_lock);
2167                 spin_lock_init(&dev->gnd_dgram_lock);
2168                 spin_lock_init(&dev->gnd_rdmaq_lock);
2169                 INIT_LIST_HEAD(&dev->gnd_rdmaq);
2170                 init_rwsem(&dev->gnd_conn_sem);
2171
2172                 /* alloc & setup nid based dgram table */
2173                 CFS_ALLOC_PTR_ARRAY(dev->gnd_dgrams,
2174                                     *kgnilnd_tunables.kgn_peer_hash_size);
2175
2176                 if (dev->gnd_dgrams == NULL)
2177                         GOTO(failed, rc = -ENOMEM);
2178
2179                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2180                         INIT_LIST_HEAD(&dev->gnd_dgrams[i]);
2181                 }
2182                 atomic_set(&dev->gnd_ndgrams, 0);
2183                 atomic_set(&dev->gnd_nwcdgrams, 0);
2184                 /* setup timer for RDMAQ processing */
2185                 cfs_timer_setup(&dev->gnd_rdmaq_timer,
2186                                 kgnilnd_schedule_device_timer,
2187                                 (unsigned long)dev, 0);
2188
2189                 /* setup timer for mapping processing */
2190                 cfs_timer_setup(&dev->gnd_map_timer,
2191                                 kgnilnd_schedule_device_timer,
2192                                 (unsigned long)dev, 0);
2193
2194         }
2195
2196         /* CQID 0 isn't allowed, set to MAX_MSG_ID - 1 to check for conflicts early */
2197         kgnilnd_data.kgn_next_cqid = GNILND_MAX_MSG_ID - 1;
2198         kgnilnd_data.kgn_new_min_timeout = *kgnilnd_tunables.kgn_timeout;
2199         init_waitqueue_head(&kgnilnd_data.kgn_reaper_waitq);
2200         init_waitqueue_head(&kgnilnd_data.kgn_ruhroh_waitq);
2201         spin_lock_init(&kgnilnd_data.kgn_reaper_lock);
2202
2203         mutex_init(&kgnilnd_data.kgn_quiesce_mutex);
2204         atomic_set(&kgnilnd_data.kgn_nquiesce, 0);
2205         atomic_set(&kgnilnd_data.kgn_npending_conns, 0);
2206         atomic_set(&kgnilnd_data.kgn_npending_unlink, 0);
2207         atomic_set(&kgnilnd_data.kgn_npending_detach, 0);
2208         atomic_set(&kgnilnd_data.kgn_rev_offset, 0);
2209         atomic_set(&kgnilnd_data.kgn_rev_length, 0);
2210         atomic_set(&kgnilnd_data.kgn_rev_copy_buff, 0);
2211
2212         /* OK to call kgnilnd_api_shutdown() to cleanup now */
2213         kgnilnd_data.kgn_init = GNILND_INIT_DATA;
2214         if (!try_module_get(THIS_MODULE))
2215                 GOTO(failed, rc = -ENOENT);
2216
2217         rwlock_init(&kgnilnd_data.kgn_peer_conn_lock);
2218
2219         CFS_ALLOC_PTR_ARRAY(kgnilnd_data.kgn_peers,
2220                             *kgnilnd_tunables.kgn_peer_hash_size);
2221
2222         if (kgnilnd_data.kgn_peers == NULL)
2223                 GOTO(failed, rc = -ENOMEM);
2224
2225         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2226                 INIT_LIST_HEAD(&kgnilnd_data.kgn_peers[i]);
2227         }
2228
2229         CFS_ALLOC_PTR_ARRAY(kgnilnd_data.kgn_conns,
2230                             *kgnilnd_tunables.kgn_peer_hash_size);
2231
2232         if (kgnilnd_data.kgn_conns == NULL)
2233                 GOTO(failed, rc = -ENOMEM);
2234
2235         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2236                 INIT_LIST_HEAD(&kgnilnd_data.kgn_conns[i]);
2237         }
2238
2239         CFS_ALLOC_PTR_ARRAY(kgnilnd_data.kgn_nets,
2240                             *kgnilnd_tunables.kgn_net_hash_size);
2241
2242         if (kgnilnd_data.kgn_nets == NULL)
2243                 GOTO(failed, rc = -ENOMEM);
2244
2245         for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
2246                 INIT_LIST_HEAD(&kgnilnd_data.kgn_nets[i]);
2247         }
2248
2249         kgnilnd_data.kgn_mbox_cache =
2250                 kmem_cache_create("kgn_mbox_block", GNILND_MBOX_SIZE, 0,
2251                                   SLAB_HWCACHE_ALIGN, NULL);
2252         if (kgnilnd_data.kgn_mbox_cache == NULL) {
2253                 CERROR("Can't create slab for physical mbox blocks\n");
2254                 GOTO(failed, rc = -ENOMEM);
2255         }
2256
2257         kgnilnd_data.kgn_rx_cache =
2258                 kmem_cache_create("kgn_rx_t", sizeof(kgn_rx_t), 0, 0, NULL);
2259         if (kgnilnd_data.kgn_rx_cache == NULL) {
2260                 CERROR("Can't create slab for kgn_rx_t descriptors\n");
2261                 GOTO(failed, rc = -ENOMEM);
2262         }
2263
2264         kgnilnd_data.kgn_tx_cache =
2265                 kmem_cache_create("kgn_tx_t", sizeof(kgn_tx_t), 0, 0, NULL);
2266         if (kgnilnd_data.kgn_tx_cache == NULL) {
2267                 CERROR("Can't create slab for kgn_tx_t\n");
2268                 GOTO(failed, rc = -ENOMEM);
2269         }
2270
2271         kgnilnd_data.kgn_tx_phys_cache =
2272                 kmem_cache_create("kgn_tx_phys",
2273                                    LNET_MAX_IOV * sizeof(gni_mem_segment_t),
2274                                    0, 0, NULL);
2275         if (kgnilnd_data.kgn_tx_phys_cache == NULL) {
2276                 CERROR("Can't create slab for kgn_tx_phys\n");
2277                 GOTO(failed, rc = -ENOMEM);
2278         }
2279
2280         kgnilnd_data.kgn_dgram_cache =
2281                 kmem_cache_create("kgn_dgram_t", sizeof(kgn_dgram_t), 0, 0, NULL);
2282         if (kgnilnd_data.kgn_dgram_cache == NULL) {
2283                 CERROR("Can't create slab for outgoing datagrams\n");
2284                 GOTO(failed, rc = -ENOMEM);
2285         }
2286
2287         /* allocate a MAX_IOV array of page pointers for each cpu */
2288         kgnilnd_data.kgn_cksum_map_pages = kmalloc(num_possible_cpus() * sizeof (struct page *),
2289                                                    GFP_KERNEL);
2290         if (kgnilnd_data.kgn_cksum_map_pages == NULL) {
2291                 CERROR("Can't allocate vmap cksum pages\n");
2292                 GOTO(failed, rc = -ENOMEM);
2293         }
2294         kgnilnd_data.kgn_cksum_npages = num_possible_cpus();
2295         memset(kgnilnd_data.kgn_cksum_map_pages, 0,
2296                 kgnilnd_data.kgn_cksum_npages * sizeof (struct page *));
2297
2298         for (i = 0; i < kgnilnd_data.kgn_cksum_npages; i++) {
2299                 kgnilnd_data.kgn_cksum_map_pages[i] = kmalloc(LNET_MAX_IOV * sizeof (struct page *),
2300                                                               GFP_KERNEL);
2301                 if (kgnilnd_data.kgn_cksum_map_pages[i] == NULL) {
2302                         CERROR("Can't allocate vmap cksum pages for cpu %d\n", i);
2303                         GOTO(failed, rc = -ENOMEM);
2304                 }
2305         }
2306
2307         LASSERT(kgnilnd_data.kgn_ndevs == 0);
2308
2309         /* Use all available GNI devices */
2310         for (i = 0; i < GNILND_MAXDEVS; i++) {
2311                 dev = &kgnilnd_data.kgn_devices[kgnilnd_data.kgn_ndevs];
2312
2313                 rc = kgnilnd_dev_init(dev);
2314                 if (rc == 0) {
2315                         /* Increment here so base_shutdown cleans it up */
2316                         kgnilnd_data.kgn_ndevs++;
2317
2318                         rc = kgnilnd_allocate_phys_fmablk(dev);
2319                         if (rc)
2320                                 GOTO(failed, rc);
2321                 }
2322         }
2323
2324         if (kgnilnd_data.kgn_ndevs == 0) {
2325                 CERROR("Can't initialise any GNI devices\n");
2326                 GOTO(failed, rc = -ENODEV);
2327         }
2328
2329         rc = kgnilnd_thread_start(kgnilnd_reaper, NULL, "kgnilnd_rpr", 0);
2330         if (rc != 0) {
2331                 CERROR("Can't spawn gnilnd reaper: %d\n", rc);
2332                 GOTO(failed, rc);
2333         }
2334
2335         rc = kgnilnd_start_rca_thread();
2336         if (rc != 0) {
2337                 CERROR("Can't spawn gnilnd rca: %d\n", rc);
2338                 GOTO(failed, rc);
2339         }
2340
2341         /*
2342          * Start ruhroh thread.  We can't use kgnilnd_thread_start() because
2343          * we don't want this thread included in kgnilnd_data.kgn_nthreads
2344          * count.  This thread controls quiesce, so it mustn't
2345          * quiesce itself.
2346          */
2347         thrd = kthread_run(kgnilnd_ruhroh_thread, NULL, "%s_%02d", "kgnilnd_rr", 0);
2348         if (IS_ERR(thrd)) {
2349                 rc = PTR_ERR(thrd);
2350                 CERROR("Can't spawn gnilnd ruhroh thread: %d\n", rc);
2351                 GOTO(failed, rc);
2352         }
2353
2354         /* threads will load balance across devs as they are available */
2355         if (*kgnilnd_tunables.kgn_thread_affinity) {
2356                 rc = kgnilnd_start_sd_threads();
2357                 if (rc != 0)
2358                         GOTO(failed, rc);
2359         } else {
2360                 for (i = 0; i < *kgnilnd_tunables.kgn_sched_threads; i++) {
2361                         rc = kgnilnd_thread_start(kgnilnd_scheduler,
2362                                                   (void *)((long)i),
2363                                                   "kgnilnd_sd", i);
2364                         if (rc != 0) {
2365                                 CERROR("Can't spawn gnilnd scheduler[%d]: %d\n",
2366                                        i, rc);
2367                                 GOTO(failed, rc);
2368                         }
2369                 }
2370         }
2371
2372         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2373                 dev = &kgnilnd_data.kgn_devices[i];
2374                 rc = kgnilnd_thread_start(kgnilnd_dgram_mover, dev,
2375                                           "kgnilnd_dg", dev->gnd_id);
2376                 if (rc != 0) {
2377                         CERROR("Can't spawn gnilnd dgram_mover[%d]: %d\n",
2378                                dev->gnd_id, rc);
2379                         GOTO(failed, rc);
2380                 }
2381
2382                 rc = kgnilnd_thread_start(kgnilnd_dgram_waitq, dev,
2383                                           "kgnilnd_dgn", dev->gnd_id);
2384                 if (rc != 0) {
2385                         CERROR("Can't spawn gnilnd dgram_waitq[%d]: %d\n",
2386                                 dev->gnd_id, rc);
2387                         GOTO(failed, rc);
2388                 }
2389
2390                 rc = kgnilnd_setup_wildcard_dgram(dev);
2391
2392                 if (rc != 0) {
2393                         CERROR("Can't create wildcard dgrams[%d]: %d\n",
2394                                 dev->gnd_id, rc);
2395                         GOTO(failed, rc);
2396                 }
2397         }
2398
2399         /* flag everything initialised */
2400         kgnilnd_data.kgn_init = GNILND_INIT_ALL;
2401         /*****************************************************/
2402
2403         CDEBUG(D_MALLOC, "initial kmem %lld\n", pkmem);
2404         RETURN(0);
2405
2406 failed:
2407         kgnilnd_base_shutdown();
2408         kgnilnd_data.kgn_init = GNILND_INIT_NOTHING;
2409         RETURN(rc);
2410 }
2411
2412 void
2413 kgnilnd_base_shutdown(void)
2414 {
2415         int                     i, j;
2416         ENTRY;
2417
2418         while (CFS_FAIL_TIMEOUT(CFS_FAIL_GNI_PAUSE_SHUTDOWN, 1)) {};
2419
2420         kgnilnd_data.kgn_wc_kill = 1;
2421
2422         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2423                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2424                 kgnilnd_cancel_wc_dgrams(dev);
2425                 kgnilnd_cancel_dgrams(dev);
2426                 kgnilnd_del_conn_or_peer(NULL, LNET_NID_ANY, GNILND_DEL_PEER, -ESHUTDOWN);
2427                 kgnilnd_wait_for_canceled_dgrams(dev);
2428         }
2429
2430         /* We need to verify there are no conns left before we let the threads
2431          * shut down otherwise we could clean up the peers but still have
2432          * some outstanding conns due to orphaned datagram conns that are
2433          * being cleaned up.
2434          */
2435         i = 2;
2436         while (atomic_read(&kgnilnd_data.kgn_nconns) != 0) {
2437                 i++;
2438
2439                 for(j = 0; j < kgnilnd_data.kgn_ndevs; ++j) {
2440                         kgn_device_t *dev = &kgnilnd_data.kgn_devices[j];
2441                         kgnilnd_schedule_device(dev);
2442                 }
2443
2444                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2445                         "Waiting for conns to be cleaned up %d\n",atomic_read(&kgnilnd_data.kgn_nconns));
2446                 schedule_timeout_uninterruptible(cfs_time_seconds(1));
2447         }
2448         /* Peer state all cleaned up BEFORE setting shutdown, so threads don't
2449          * have to worry about shutdown races.  NB connections may be created
2450          * while there are still active connds, but these will be temporary
2451          * since peer creation always fails after the listener has started to
2452          * shut down.
2453          * all peers should have been cleared out on the nets */
2454         LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
2455                 "peers left %d\n", atomic_read(&kgnilnd_data.kgn_npeers));
2456
2457         /* Wait for the ruhroh thread to shut down. */
2458         kgnilnd_data.kgn_ruhroh_shutdown = 1;
2459         wake_up(&kgnilnd_data.kgn_ruhroh_waitq);
2460         i = 2;
2461         while (kgnilnd_data.kgn_ruhroh_running != 0) {
2462                 i++;
2463                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2464                        "Waiting for ruhroh thread to terminate\n");
2465                 schedule_timeout_uninterruptible(cfs_time_seconds(1));
2466         }
2467
2468        /* Flag threads to terminate */
2469         kgnilnd_data.kgn_shutdown = 1;
2470
2471         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2472                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2473
2474                 /* should clear all the MDDs */
2475                 kgnilnd_unmap_fma_blocks(dev);
2476
2477                 kgnilnd_schedule_device(dev);
2478                 wake_up(&dev->gnd_dgram_waitq);
2479                 wake_up(&dev->gnd_dgping_waitq);
2480                 LASSERT(list_empty(&dev->gnd_connd_peers));
2481         }
2482
2483         spin_lock(&kgnilnd_data.kgn_reaper_lock);
2484         wake_up(&kgnilnd_data.kgn_reaper_waitq);
2485         spin_unlock(&kgnilnd_data.kgn_reaper_lock);
2486
2487         if (atomic_read(&kgnilnd_data.kgn_nthreads))
2488                 kgnilnd_wakeup_rca_thread();
2489
2490         /* Wait for threads to exit */
2491         i = 2;
2492         while (atomic_read(&kgnilnd_data.kgn_nthreads) != 0) {
2493                 i++;
2494                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2495                        "Waiting for %d threads to terminate\n",
2496                        atomic_read(&kgnilnd_data.kgn_nthreads));
2497                 schedule_timeout_uninterruptible(cfs_time_seconds(1));
2498         }
2499
2500         LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
2501                 "peers left %d\n", atomic_read(&kgnilnd_data.kgn_npeers));
2502
2503         if (kgnilnd_data.kgn_peers != NULL) {
2504                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2505                         LASSERT(list_empty(&kgnilnd_data.kgn_peers[i]));
2506
2507                 CFS_FREE_PTR_ARRAY(kgnilnd_data.kgn_peers,
2508                                    *kgnilnd_tunables.kgn_peer_hash_size);
2509         }
2510
2511         down_write(&kgnilnd_data.kgn_net_rw_sem);
2512         if (kgnilnd_data.kgn_nets != NULL) {
2513                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++)
2514                         LASSERT(list_empty(&kgnilnd_data.kgn_nets[i]));
2515
2516                 CFS_FREE_PTR_ARRAY(kgnilnd_data.kgn_nets,
2517                                    *kgnilnd_tunables.kgn_net_hash_size);
2518         }
2519         up_write(&kgnilnd_data.kgn_net_rw_sem);
2520
2521         LASSERTF(atomic_read(&kgnilnd_data.kgn_nconns) == 0,
2522                 "conns left %d\n", atomic_read(&kgnilnd_data.kgn_nconns));
2523
2524         if (kgnilnd_data.kgn_conns != NULL) {
2525                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2526                         LASSERT(list_empty(&kgnilnd_data.kgn_conns[i]));
2527
2528                 CFS_FREE_PTR_ARRAY(kgnilnd_data.kgn_conns,
2529                                    *kgnilnd_tunables.kgn_peer_hash_size);
2530         }
2531
2532         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2533                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2534                 kgnilnd_dev_fini(dev);
2535
2536                 LASSERTF(atomic_read(&dev->gnd_ndgrams) == 0,
2537                         "dgrams left %d\n", atomic_read(&dev->gnd_ndgrams));
2538
2539                 if (dev->gnd_dgrams != NULL) {
2540                         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size;
2541                              i++)
2542                                 LASSERT(list_empty(&dev->gnd_dgrams[i]));
2543
2544                         CFS_FREE_PTR_ARRAY(dev->gnd_dgrams,
2545                                            *kgnilnd_tunables.kgn_peer_hash_size);
2546                 }
2547
2548                 kgnilnd_free_phys_fmablk(dev);
2549         }
2550
2551         if (kgnilnd_data.kgn_mbox_cache != NULL)
2552                 kmem_cache_destroy(kgnilnd_data.kgn_mbox_cache);
2553
2554         if (kgnilnd_data.kgn_rx_cache != NULL)
2555                 kmem_cache_destroy(kgnilnd_data.kgn_rx_cache);
2556
2557         if (kgnilnd_data.kgn_tx_cache != NULL)
2558                 kmem_cache_destroy(kgnilnd_data.kgn_tx_cache);
2559
2560         if (kgnilnd_data.kgn_tx_phys_cache != NULL)
2561                 kmem_cache_destroy(kgnilnd_data.kgn_tx_phys_cache);
2562
2563         if (kgnilnd_data.kgn_dgram_cache != NULL)
2564                 kmem_cache_destroy(kgnilnd_data.kgn_dgram_cache);
2565
2566         if (kgnilnd_data.kgn_cksum_map_pages != NULL) {
2567                 for (i = 0; i < kgnilnd_data.kgn_cksum_npages; i++) {
2568                         if (kgnilnd_data.kgn_cksum_map_pages[i] != NULL) {
2569                                 kfree(kgnilnd_data.kgn_cksum_map_pages[i]);
2570                         }
2571                 }
2572                 kfree(kgnilnd_data.kgn_cksum_map_pages);
2573         }
2574
2575         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %lld\n",
2576                libcfs_kmem_read());
2577
2578         kgnilnd_data.kgn_init = GNILND_INIT_NOTHING;
2579         module_put(THIS_MODULE);
2580
2581         EXIT;
2582 }
2583
2584 int
2585 kgnilnd_startup(struct lnet_ni *ni)
2586 {
2587         int               rc, devno;
2588         kgn_net_t        *net;
2589         ENTRY;
2590
2591         LASSERTF(ni->ni_net->net_lnd == &the_kgnilnd,
2592                 "bad LND 0x%p != the_kgnilnd @ 0x%p\n",
2593                 ni->ni_net->net_lnd, &the_kgnilnd);
2594
2595         if (kgnilnd_data.kgn_init == GNILND_INIT_NOTHING) {
2596                 rc = kgnilnd_base_startup();
2597                 if (rc != 0)
2598                         RETURN(rc);
2599         }
2600
2601         /* Serialize with shutdown. */
2602         mutex_lock(&kgnilnd_data.kgn_quiesce_mutex);
2603
2604         LIBCFS_ALLOC(net, sizeof(*net));
2605         if (net == NULL) {
2606                 CERROR("could not allocate net for new interface instance\n");
2607                 /* no need to cleanup the CDM... */
2608                 GOTO(failed, rc = -ENOMEM);
2609         }
2610         INIT_LIST_HEAD(&net->gnn_list);
2611         ni->ni_data = net;
2612         net->gnn_ni = ni;
2613         if (!ni->ni_net->net_tunables_set) {
2614                 ni->ni_net->net_tunables.lct_max_tx_credits =
2615                         *kgnilnd_tunables.kgn_credits;
2616                 ni->ni_net->net_tunables.lct_peer_tx_credits =
2617                         *kgnilnd_tunables.kgn_peer_credits;
2618         }
2619
2620         if (*kgnilnd_tunables.kgn_peer_health) {
2621                 int     fudge;
2622                 int     timeout;
2623                 /* give this a bit of leeway - we don't have a hard timeout
2624                  * as we only check timeouts periodically - see comment in kgnilnd_reaper */
2625                 fudge = (GNILND_TO2KA(*kgnilnd_tunables.kgn_timeout) / GNILND_REAPER_NCHECKS);
2626                 timeout = *kgnilnd_tunables.kgn_timeout + fudge;
2627
2628                 if (*kgnilnd_tunables.kgn_peer_timeout >= timeout) {
2629                         ni->ni_net->net_tunables.lct_peer_timeout =
2630                                  *kgnilnd_tunables.kgn_peer_timeout;
2631                 } else if (*kgnilnd_tunables.kgn_peer_timeout > -1) {
2632                         LCONSOLE_ERROR("Peer_timeout is set to %d but needs to be >= %d\n",
2633                                         *kgnilnd_tunables.kgn_peer_timeout,
2634                                         timeout);
2635                         ni->ni_data = NULL;
2636                         LIBCFS_FREE(net, sizeof(*net));
2637                         GOTO(failed, rc = -EINVAL);
2638                 } else
2639                         ni->ni_net->net_tunables.lct_peer_timeout = timeout;
2640
2641                 LCONSOLE_INFO("Enabling LNet peer health for gnilnd, timeout %ds\n",
2642                               ni->ni_net->net_tunables.lct_peer_timeout);
2643         }
2644
2645         atomic_set(&net->gnn_refcount, 1);
2646
2647         /* if we have multiple devices, spread the nets around */
2648         net->gnn_netnum = LNET_NETNUM(LNET_NID_NET(&ni->ni_nid));
2649
2650         devno = LNET_NID_NET(&ni->ni_nid) % GNILND_MAXDEVS;
2651         net->gnn_dev = &kgnilnd_data.kgn_devices[devno];
2652
2653         /* allocate a 'dummy' cdm for datagram use. We can only have a single
2654          * datagram between a nid:inst_id and nid2:inst_id. The fake cdm
2655          * give us additional inst_id to use, allowing the datagrams to flow
2656          * like rivers of honey and beer */
2657
2658         /* the instance id for the cdm is the NETNUM offset by MAXDEVS -
2659          * ensuring we'll have a unique id */
2660
2661         ni->ni_nid.nid_addr[0] =
2662                 cpu_to_be32(LNET_NIDADDR(net->gnn_dev->gnd_nid));
2663         CDEBUG(D_NET, "adding net %p nid=%s on dev %d \n",
2664                 net, libcfs_nidstr(&ni->ni_nid), net->gnn_dev->gnd_id);
2665         /* until the gnn_list is set, we need to cleanup ourselves as
2666          * kgnilnd_shutdown is just gonna get confused */
2667
2668         down_write(&kgnilnd_data.kgn_net_rw_sem);
2669         list_add_tail(&net->gnn_list, kgnilnd_netnum2netlist(net->gnn_netnum));
2670         up_write(&kgnilnd_data.kgn_net_rw_sem);
2671
2672         /* we need a separate thread to call probe_wait_by_id until
2673          * we get a function callback notifier from kgni */
2674         mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2675         RETURN(0);
2676  failed:
2677         mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2678         kgnilnd_shutdown(ni);
2679         RETURN(rc);
2680 }
2681
2682 void
2683 kgnilnd_shutdown(struct lnet_ni *ni)
2684 {
2685         kgn_net_t     *net = ni->ni_data;
2686         int           i;
2687         int           rc;
2688         ENTRY;
2689
2690         CFS_RACE(CFS_FAIL_GNI_SR_DOWN_RACE);
2691
2692         LASSERTF(kgnilnd_data.kgn_init == GNILND_INIT_ALL,
2693                 "init %d\n", kgnilnd_data.kgn_init);
2694
2695         /* Serialize with startup. */
2696         mutex_lock(&kgnilnd_data.kgn_quiesce_mutex);
2697         CDEBUG(D_MALLOC, "before NAL cleanup: kmem %lld\n",
2698                libcfs_kmem_read());
2699
2700         if (net == NULL) {
2701                 CERROR("got NULL net for ni %p\n", ni);
2702                 GOTO(out, rc = -EINVAL);
2703         }
2704
2705         LASSERTF(ni == net->gnn_ni,
2706                 "ni %p gnn_ni %p\n", net, net->gnn_ni);
2707
2708         ni->ni_data = NULL;
2709
2710         LASSERT(!net->gnn_shutdown);
2711         LASSERTF(atomic_read(&net->gnn_refcount) != 0,
2712                 "net %p refcount %d\n",
2713                  net, atomic_read(&net->gnn_refcount));
2714
2715         if (!list_empty(&net->gnn_list)) {
2716                 /* serialize with peer creation */
2717                 down_write(&kgnilnd_data.kgn_net_rw_sem);
2718                 net->gnn_shutdown = 1;
2719                 up_write(&kgnilnd_data.kgn_net_rw_sem);
2720
2721                 kgnilnd_cancel_net_dgrams(net);
2722
2723                 kgnilnd_del_conn_or_peer(net, LNET_NID_ANY, GNILND_DEL_PEER, -ESHUTDOWN);
2724
2725                 /* if we are quiesced, need to wake up - we need those threads
2726                  * alive to release peers, etc */
2727                 if (GNILND_IS_QUIESCED) {
2728                         set_mb(kgnilnd_data.kgn_quiesce_trigger, GNILND_QUIESCE_IDLE);
2729                         kgnilnd_quiesce_wait("shutdown");
2730                 }
2731
2732                 kgnilnd_wait_for_canceled_dgrams(net->gnn_dev);
2733
2734                 /* We wait until the nets ref's are 1, we will release final ref which is ours
2735                  * this allows us to make sure everything else is done before we free the
2736                  * net.
2737                  */
2738                 i = 4;
2739                 while (atomic_read(&net->gnn_refcount) != 1) {
2740                         i++;
2741                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2742                                 "Waiting for %d references to clear on net %d\n",
2743                                 atomic_read(&net->gnn_refcount),
2744                                 net->gnn_netnum);
2745                         schedule_timeout_uninterruptible(cfs_time_seconds(1));
2746                 }
2747
2748                 /* release ref from kgnilnd_startup */
2749                 kgnilnd_net_decref(net);
2750                 /* serialize with reaper and conn_task looping */
2751                 down_write(&kgnilnd_data.kgn_net_rw_sem);
2752                 list_del_init(&net->gnn_list);
2753                 up_write(&kgnilnd_data.kgn_net_rw_sem);
2754
2755         }
2756
2757         /* not locking, this can't race with writers */
2758         LASSERTF(atomic_read(&net->gnn_refcount) == 0,
2759                 "net %p refcount %d\n",
2760                  net, atomic_read(&net->gnn_refcount));
2761         LIBCFS_FREE(net, sizeof(*net));
2762
2763 out:
2764         down_read(&kgnilnd_data.kgn_net_rw_sem);
2765         for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
2766                 if (!list_empty(&kgnilnd_data.kgn_nets[i])) {
2767                         up_read(&kgnilnd_data.kgn_net_rw_sem);
2768                         break;
2769                 }
2770
2771                 if (i == *kgnilnd_tunables.kgn_net_hash_size - 1) {
2772                         up_read(&kgnilnd_data.kgn_net_rw_sem);
2773                         kgnilnd_base_shutdown();
2774                 }
2775         }
2776         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %lld\n",
2777                libcfs_kmem_read());
2778
2779         mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2780         EXIT;
2781 }
2782
2783 static void __exit kgnilnd_exit(void)
2784 {
2785         lnet_unregister_lnd(&the_kgnilnd);
2786         kgnilnd_proc_fini();
2787         kgnilnd_remove_sysctl();
2788 }
2789
2790 static int __init kgnilnd_init(void)
2791 {
2792         int    rc;
2793
2794         rc = kgnilnd_tunables_init();
2795         if (rc != 0)
2796                 return rc;
2797
2798         LCONSOLE_INFO("Lustre: kgnilnd build version: "LUSTRE_VERSION_STRING"\n");
2799
2800         kgnilnd_insert_sysctl();
2801         kgnilnd_proc_init();
2802
2803         lnet_register_lnd(&the_kgnilnd);
2804
2805         return 0;
2806 }
2807
2808 MODULE_AUTHOR("Cray, Inc. <nic@cray.com>");
2809 MODULE_DESCRIPTION("Gemini LNet Network Driver");
2810 MODULE_VERSION(LUSTRE_VERSION_STRING);
2811 MODULE_LICENSE("GPL");
2812
2813 module_init(kgnilnd_init);
2814 module_exit(kgnilnd_exit);