Whamcloud - gitweb
14fb5d6125f5a9e2c8b1625451ecb0d8a60d64d9
[fs/lustre-release.git] / lnet / klnds / gnilnd / gnilnd.c
1 /*
2  * Copyright (C) 2012 Cray, Inc.
3  *
4  * Copyright (c) 2013, 2017, Intel Corporation.
5  *
6  *   Author: Nic Henke <nic@cray.com>
7  *   Author: James Shimek <jshimek@cray.com>
8  *
9  *   This file is part of Lustre, http://www.lustre.org.
10  *
11  *   Lustre is free software; you can redistribute it and/or
12  *   modify it under the terms of version 2 of the GNU General Public
13  *   License as published by the Free Software Foundation.
14  *
15  *   Lustre is distributed in the hope that it will be useful,
16  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  *   GNU General Public License for more details.
19  *
20  *   You should have received a copy of the GNU General Public License
21  *   along with Lustre; if not, write to the Free Software
22  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23  *
24  */
25 #include "gnilnd.h"
26
27 /* Primary entry points from LNET.  There are no guarantees against reentrance. */
28 const struct lnet_lnd the_kgnilnd = {
29         .lnd_type       = GNILND,
30         .lnd_startup    = kgnilnd_startup,
31         .lnd_shutdown   = kgnilnd_shutdown,
32         .lnd_ctl        = kgnilnd_ctl,
33         .lnd_send       = kgnilnd_send,
34         .lnd_recv       = kgnilnd_recv,
35         .lnd_eager_recv = kgnilnd_eager_recv,
36 };
37
38 kgn_data_t      kgnilnd_data;
39
40 int
41 kgnilnd_thread_start(int(*fn)(void *arg), void *arg, char *name, int id)
42 {
43         struct task_struct *thrd;
44
45         thrd = kthread_run(fn, arg, "%s_%02d", name, id);
46         if (IS_ERR(thrd))
47                 return PTR_ERR(thrd);
48
49         atomic_inc(&kgnilnd_data.kgn_nthreads);
50         return 0;
51 }
52
53 /* bind scheduler threads to cpus */
54 int
55 kgnilnd_start_sd_threads(void)
56 {
57         int cpu;
58         int i = 0;
59         struct task_struct *task;
60
61         for_each_online_cpu(cpu) {
62                 /* don't bind to cpu 0 - all interrupts are processed here */
63                 if (cpu == 0)
64                         continue;
65
66                 task = kthread_create(kgnilnd_scheduler, (void *)((long)i),
67                                       "%s_%02d", "kgnilnd_sd", i);
68                 if (!IS_ERR(task)) {
69                         kthread_bind(task, cpu);
70                         wake_up_process(task);
71                 } else {
72                         CERROR("Can't spawn gnilnd scheduler[%d] %ld\n", i,
73                                 PTR_ERR(task));
74                         return PTR_ERR(task);
75                 }
76                 atomic_inc(&kgnilnd_data.kgn_nthreads);
77
78                 if (++i >= *kgnilnd_tunables.kgn_sched_threads) {
79                         break;
80                 }
81         }
82
83         return 0;
84 }
85
86 /* needs write_lock on kgn_peer_conn_lock */
87 int
88 kgnilnd_close_stale_conns_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
89 {
90         kgn_conn_t         *conn;
91         struct list_head   *ctmp, *cnxt;
92         int                 loopback;
93         int                 count = 0;
94
95         loopback = peer->gnp_nid == peer->gnp_net->gnn_ni->ni_nid;
96
97         list_for_each_safe(ctmp, cnxt, &peer->gnp_conns) {
98                 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
99
100                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
101                         continue;
102
103                 if (conn == newconn)
104                         continue;
105
106                 if (conn->gnc_device != newconn->gnc_device)
107                         continue;
108
109                 /* This is a two connection loopback - one talking to the other */
110                 if (loopback &&
111                     newconn->gnc_my_connstamp == conn->gnc_peer_connstamp &&
112                     newconn->gnc_peer_connstamp == conn->gnc_my_connstamp) {
113                         CDEBUG(D_NET, "skipping prune of %p, "
114                                 "loopback and matching stamps"
115                                 " connstamp %llu(%llu)"
116                                 " peerstamp %llu(%llu)\n",
117                                 conn, newconn->gnc_my_connstamp,
118                                 conn->gnc_peer_connstamp,
119                                 newconn->gnc_peer_connstamp,
120                                 conn->gnc_my_connstamp);
121                         continue;
122                 }
123
124                 if (conn->gnc_peerstamp != newconn->gnc_peerstamp) {
125                         LASSERTF(conn->gnc_peerstamp < newconn->gnc_peerstamp,
126                                 "conn 0x%p peerstamp %llu >= "
127                                 "newconn 0x%p peerstamp %llu\n",
128                                 conn, conn->gnc_peerstamp,
129                                 newconn, newconn->gnc_peerstamp);
130
131                         CDEBUG(D_NET, "Closing stale conn nid: %s "
132                                " peerstamp:%#llx(%#llx)\n",
133                                libcfs_nid2str(peer->gnp_nid),
134                                conn->gnc_peerstamp, newconn->gnc_peerstamp);
135                 } else {
136
137                         LASSERTF(conn->gnc_peer_connstamp < newconn->gnc_peer_connstamp,
138                                 "conn 0x%p peer_connstamp %llu >= "
139                                 "newconn 0x%p peer_connstamp %llu\n",
140                                 conn, conn->gnc_peer_connstamp,
141                                 newconn, newconn->gnc_peer_connstamp);
142
143                         CDEBUG(D_NET, "Closing stale conn nid: %s"
144                                " connstamp:%llu(%llu)\n",
145                                libcfs_nid2str(peer->gnp_nid),
146                                conn->gnc_peer_connstamp, newconn->gnc_peer_connstamp);
147                 }
148
149                 count++;
150                 kgnilnd_close_conn_locked(conn, -ESTALE);
151         }
152
153         if (count != 0) {
154                 CWARN("Closed %d stale conns to %s\n", count, libcfs_nid2str(peer->gnp_nid));
155         }
156
157         RETURN(count);
158 }
159
160 int
161 kgnilnd_conn_isdup_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
162 {
163         kgn_conn_t       *conn;
164         struct list_head *tmp;
165         int               loopback;
166         ENTRY;
167
168         loopback = peer->gnp_nid == peer->gnp_net->gnn_ni->ni_nid;
169
170         list_for_each(tmp, &peer->gnp_conns) {
171                 conn = list_entry(tmp, kgn_conn_t, gnc_list);
172                 CDEBUG(D_NET, "checking conn 0x%p for peer %s"
173                         " lo %d new %llu existing %llu"
174                         " new peer %llu existing peer %llu"
175                         " new dev %p existing dev %p\n",
176                         conn, libcfs_nid2str(peer->gnp_nid),
177                         loopback,
178                         newconn->gnc_peerstamp, conn->gnc_peerstamp,
179                         newconn->gnc_peer_connstamp, conn->gnc_peer_connstamp,
180                         newconn->gnc_device, conn->gnc_device);
181
182                 /* conn is in the process of closing */
183                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
184                         continue;
185
186                 /* 'newconn' is from an earlier version of 'peer'!!! */
187                 if (newconn->gnc_peerstamp < conn->gnc_peerstamp)
188                         RETURN(1);
189
190                 /* 'conn' is from an earlier version of 'peer': it will be
191                  * removed when we cull stale conns later on... */
192                 if (newconn->gnc_peerstamp > conn->gnc_peerstamp)
193                         continue;
194
195                 /* Different devices are OK */
196                 if (conn->gnc_device != newconn->gnc_device)
197                         continue;
198
199                 /* It's me connecting to myself */
200                 if (loopback &&
201                     newconn->gnc_my_connstamp == conn->gnc_peer_connstamp &&
202                     newconn->gnc_peer_connstamp == conn->gnc_my_connstamp)
203                         continue;
204
205                 /* 'newconn' is an earlier connection from 'peer'!!! */
206                 if (newconn->gnc_peer_connstamp < conn->gnc_peer_connstamp)
207                         RETURN(2);
208
209                 /* 'conn' is an earlier connection from 'peer': it will be
210                  * removed when we cull stale conns later on... */
211                 if (newconn->gnc_peer_connstamp > conn->gnc_peer_connstamp)
212                         continue;
213
214                 /* 'newconn' has the SAME connection stamp; 'peer' isn't
215                  * playing the game... */
216                 RETURN(3);
217         }
218
219         RETURN(0);
220 }
221
222 int
223 kgnilnd_create_conn(kgn_conn_t **connp, kgn_device_t *dev)
224 {
225         kgn_conn_t      *conn;
226         gni_return_t    rrc;
227         int             rc = 0;
228
229         LASSERT (!in_interrupt());
230         atomic_inc(&kgnilnd_data.kgn_nconns);
231
232         /* divide by 2 to allow for complete reset and immediate reconnect */
233         if (atomic_read(&kgnilnd_data.kgn_nconns) >= GNILND_MAX_CQID/2) {
234                 CERROR("Too many conn are live: %d > %d\n",
235                         atomic_read(&kgnilnd_data.kgn_nconns), GNILND_MAX_CQID/2);
236                 atomic_dec(&kgnilnd_data.kgn_nconns);
237                 return -E2BIG;
238         }
239
240         LIBCFS_ALLOC(conn, sizeof(*conn));
241         if (conn == NULL) {
242                 atomic_dec(&kgnilnd_data.kgn_nconns);
243                 return -ENOMEM;
244         }
245
246         conn->gnc_tx_ref_table =
247                 kgnilnd_vzalloc(GNILND_MAX_MSG_ID * sizeof(void *));
248         if (conn->gnc_tx_ref_table == NULL) {
249                 CERROR("Can't allocate conn tx_ref_table\n");
250                 GOTO(failed, rc = -ENOMEM);
251         }
252
253         mutex_init(&conn->gnc_smsg_mutex);
254         mutex_init(&conn->gnc_rdma_mutex);
255         atomic_set(&conn->gnc_refcount, 1);
256         atomic_set(&conn->gnc_reaper_noop, 0);
257         atomic_set(&conn->gnc_sched_noop, 0);
258         atomic_set(&conn->gnc_tx_in_use, 0);
259         INIT_LIST_HEAD(&conn->gnc_list);
260         INIT_LIST_HEAD(&conn->gnc_hashlist);
261         INIT_LIST_HEAD(&conn->gnc_schedlist);
262         INIT_LIST_HEAD(&conn->gnc_fmaq);
263         INIT_LIST_HEAD(&conn->gnc_mdd_list);
264         INIT_LIST_HEAD(&conn->gnc_delaylist);
265         spin_lock_init(&conn->gnc_list_lock);
266         spin_lock_init(&conn->gnc_tx_lock);
267         conn->gnc_magic = GNILND_CONN_MAGIC;
268
269         /* set tx id to nearly the end to make sure we find wrapping
270          * issues soon */
271         conn->gnc_next_tx = (int) GNILND_MAX_MSG_ID - 10;
272
273         /* if this fails, we have conflicts and MAX_TX is too large */
274         BUILD_BUG_ON(GNILND_MAX_MSG_ID >= GNILND_MSGID_CLOSE);
275
276         /* get a new unique CQ id for this conn */
277         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
278         conn->gnc_my_connstamp = kgnilnd_data.kgn_connstamp++;
279         conn->gnc_cqid = kgnilnd_get_cqid_locked();
280         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
281
282         if (conn->gnc_cqid == 0) {
283                 CERROR("Could not allocate unique CQ ID for conn 0x%p\n", conn);
284                 GOTO(failed, rc = -E2BIG);
285         }
286
287         CDEBUG(D_NET, "alloc cqid %u for conn 0x%p\n",
288                 conn->gnc_cqid, conn);
289
290         /* need to be set before gnc_ephandle to allow kgnilnd_destroy_conn_ep to
291          * check context */
292         conn->gnc_device = dev;
293
294         conn->gnc_timeout = max(*kgnilnd_tunables.kgn_timeout,
295                                  GNILND_MIN_TIMEOUT);
296         kgnilnd_update_reaper_timeout(conn->gnc_timeout);
297
298         /* this is the ep_handle for doing SMSG & BTE */
299         mutex_lock(&dev->gnd_cq_mutex);
300         rrc = kgnilnd_ep_create(dev->gnd_handle, dev->gnd_snd_fma_cqh,
301                                 &conn->gnc_ephandle);
302         mutex_unlock(&dev->gnd_cq_mutex);
303         if (rrc != GNI_RC_SUCCESS)
304                 GOTO(failed, rc = -ENETDOWN);
305
306         CDEBUG(D_NET, "created conn 0x%p ep_hndl 0x%p\n",
307                conn, conn->gnc_ephandle);
308
309         /* add ref for EP canceling */
310         kgnilnd_conn_addref(conn);
311         atomic_inc(&dev->gnd_neps);
312
313         *connp = conn;
314         return 0;
315
316 failed:
317         atomic_dec(&kgnilnd_data.kgn_nconns);
318         kgnilnd_vfree(conn->gnc_tx_ref_table,
319                       GNILND_MAX_MSG_ID * sizeof(void *));
320         LIBCFS_FREE(conn, sizeof(*conn));
321         return rc;
322 }
323
324 /* needs to be called with kgn_peer_conn_lock held (read or write) */
325 kgn_conn_t *
326 kgnilnd_find_conn_locked(kgn_peer_t *peer)
327 {
328         kgn_conn_t      *conn = NULL;
329
330         /* if we are in reset, this conn is going to die soon */
331         if (unlikely(kgnilnd_data.kgn_in_reset)) {
332                 RETURN(NULL);
333         }
334
335         /* just return the first ESTABLISHED connection */
336         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
337                 /* kgnilnd_finish_connect doesn't put connections on the
338                  * peer list until they are actually established */
339                 LASSERTF(conn->gnc_state >= GNILND_CONN_ESTABLISHED,
340                         "found conn %p state %s on peer %p (%s)\n",
341                         conn, kgnilnd_conn_state2str(conn), peer,
342                         libcfs_nid2str(peer->gnp_nid));
343                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
344                         continue;
345
346                 RETURN(conn);
347         }
348         RETURN(NULL);
349 }
350
351 /* needs write_lock on kgn_peer_conn_lock held */
352 kgn_conn_t *
353 kgnilnd_find_or_create_conn_locked(kgn_peer_t *peer) {
354
355         kgn_device_t    *dev = peer->gnp_net->gnn_dev;
356         kgn_conn_t      *conn;
357
358         conn = kgnilnd_find_conn_locked(peer);
359
360         if (conn != NULL) {
361                 return conn;
362         }
363
364         /* if the peer was previously connecting, check if we should
365          * trigger another connection attempt yet. */
366         if (time_before(jiffies, peer->gnp_reconnect_time)) {
367                 return NULL;
368         }
369
370         /* This check prevents us from creating a new connection to a peer while we are
371          * still in the process of closing an existing connection to the peer.
372          */
373         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
374                 if (conn->gnc_ephandle != NULL) {
375                         CDEBUG(D_NET, "Not connecting non-null ephandle found peer 0x%p->%s\n", peer,
376                                 libcfs_nid2str(peer->gnp_nid));
377                         return NULL;
378                 }
379         }
380
381         if (peer->gnp_connecting != GNILND_PEER_IDLE) {
382                 /* if we are not connecting, fire up a new connection */
383                 /* or if we are anything but IDLE DONT start a new connection */
384                return NULL;
385         }
386
387         CDEBUG(D_NET, "starting connect to %s\n",
388                 libcfs_nid2str(peer->gnp_nid));
389         peer->gnp_connecting = GNILND_PEER_CONNECT;
390         kgnilnd_peer_addref(peer); /* extra ref for connd */
391
392         spin_lock(&dev->gnd_connd_lock);
393         list_add_tail(&peer->gnp_connd_list, &dev->gnd_connd_peers);
394         spin_unlock(&dev->gnd_connd_lock);
395
396         kgnilnd_schedule_dgram(dev);
397         CDEBUG(D_NETTRACE, "scheduling new connect\n");
398
399         return NULL;
400 }
401
402 /* Caller is responsible for deciding if/when to call this */
403 void
404 kgnilnd_destroy_conn_ep(kgn_conn_t *conn)
405 {
406         gni_return_t    rrc;
407         gni_ep_handle_t tmp_ep;
408
409         /* only if we actually initialized it,
410          *  then set NULL to tell kgnilnd_destroy_conn to leave it alone */
411
412         tmp_ep = xchg(&conn->gnc_ephandle, NULL);
413         if (tmp_ep != NULL) {
414                 /* we never re-use the EP, so unbind is not needed */
415                 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
416                 rrc = kgnilnd_ep_destroy(tmp_ep);
417
418                 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
419
420                 /* if this fails, it could hork up kgni smsg retransmit and others
421                  * since we could free the SMSG mbox memory, etc. */
422                 LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d conn 0x%p ep 0x%p\n",
423                          rrc, conn, conn->gnc_ephandle);
424
425                 atomic_dec(&conn->gnc_device->gnd_neps);
426
427                 /* clear out count added in kgnilnd_close_conn_locked
428                  * conn will have a peer once it hits finish_connect, where it
429                  * is the first spot we'll mark it ESTABLISHED as well */
430                 if (conn->gnc_peer) {
431                         kgnilnd_admin_decref(conn->gnc_peer->gnp_dirty_eps);
432                 }
433
434                 /* drop ref for EP */
435                 kgnilnd_conn_decref(conn);
436         }
437 }
438
439 void
440 kgnilnd_destroy_conn(kgn_conn_t *conn)
441 {
442         LASSERTF(!in_interrupt() &&
443                 !conn->gnc_scheduled &&
444                 !conn->gnc_in_purgatory &&
445                 conn->gnc_ephandle == NULL &&
446                 list_empty(&conn->gnc_list) &&
447                 list_empty(&conn->gnc_hashlist) &&
448                 list_empty(&conn->gnc_schedlist) &&
449                 list_empty(&conn->gnc_mdd_list) &&
450                 list_empty(&conn->gnc_delaylist) &&
451                 conn->gnc_magic == GNILND_CONN_MAGIC,
452                 "conn 0x%p->%s IRQ %d sched %d purg %d ep 0x%p Mg %d lists %d/%d/%d/%d/%d\n",
453                 conn, conn->gnc_peer ? libcfs_nid2str(conn->gnc_peer->gnp_nid)
454                                      : "<?>",
455                 !!in_interrupt(), conn->gnc_scheduled,
456                 conn->gnc_in_purgatory,
457                 conn->gnc_ephandle,
458                 conn->gnc_magic,
459                 list_empty(&conn->gnc_list),
460                 list_empty(&conn->gnc_hashlist),
461                 list_empty(&conn->gnc_schedlist),
462                 list_empty(&conn->gnc_mdd_list),
463                 list_empty(&conn->gnc_delaylist));
464
465         /* Tripping these is especially bad, as it means we have items on the
466          *  lists that didn't keep their refcount on the connection - or
467          *  somebody evil released their own */
468         LASSERTF(list_empty(&conn->gnc_fmaq) &&
469                  atomic_read(&conn->gnc_nlive_fma) == 0 &&
470                  atomic_read(&conn->gnc_nlive_rdma) == 0,
471                  "conn 0x%p fmaq %d@0x%p nfma %d nrdma %d\n",
472                  conn, kgnilnd_count_list(&conn->gnc_fmaq), &conn->gnc_fmaq,
473                  atomic_read(&conn->gnc_nlive_fma), atomic_read(&conn->gnc_nlive_rdma));
474
475         CDEBUG(D_NET, "destroying conn %p ephandle %p error %d\n",
476                 conn, conn->gnc_ephandle, conn->gnc_error);
477
478         /* We are freeing this memory remove the magic value from the connection */
479         conn->gnc_magic = 0;
480
481         /* if there is an FMA blk left here, we'll tear it down */
482         if (conn->gnc_fma_blk) {
483                 if (conn->gnc_peer) {
484                         kgn_mbox_info_t *mbox;
485                         mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
486                         mbox->mbx_prev_nid = conn->gnc_peer->gnp_nid;
487                 }
488                 kgnilnd_release_mbox(conn, 0);
489         }
490
491         if (conn->gnc_peer != NULL)
492                 kgnilnd_peer_decref(conn->gnc_peer);
493
494         if (conn->gnc_tx_ref_table != NULL) {
495                 kgnilnd_vfree(conn->gnc_tx_ref_table,
496                               GNILND_MAX_MSG_ID * sizeof(void *));
497         }
498
499         LIBCFS_FREE(conn, sizeof(*conn));
500         atomic_dec(&kgnilnd_data.kgn_nconns);
501 }
502
503 /* peer_alive and peer_notify done in the style of the o2iblnd */
504 void
505 kgnilnd_peer_alive(kgn_peer_t *peer)
506 {
507         time64_t now = ktime_get_seconds();
508
509         set_mb(peer->gnp_last_alive, now);
510 }
511
512 void
513 kgnilnd_peer_notify(kgn_peer_t *peer, int error, int alive)
514 {
515         int                     tell_lnet = 0;
516         int                     nnets = 0;
517         int                     rc;
518         int                     i, j;
519         kgn_conn_t             *conn;
520         kgn_net_t             **nets;
521         kgn_net_t              *net;
522
523
524         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DONT_NOTIFY))
525                 return;
526
527         /* Tell LNet we are giving ups on this peer - but only
528          * if it isn't already reconnected or trying to reconnect */
529         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
530
531         /* use kgnilnd_find_conn_locked to avoid any conns in the process of being nuked
532          *
533          * don't tell LNet if we are in reset - we assume that everyone will be able to
534          * reconnect just fine
535          */
536         conn = kgnilnd_find_conn_locked(peer);
537
538         CDEBUG(D_NETTRACE, "peer 0x%p->%s ting %d conn 0x%p, rst %d error %d\n",
539                peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting, conn,
540                kgnilnd_data.kgn_in_reset, error);
541
542         if (((peer->gnp_connecting == GNILND_PEER_IDLE) &&
543             (conn == NULL) &&
544             (!kgnilnd_data.kgn_in_reset) &&
545             (!kgnilnd_conn_clean_errno(error))) || alive) {
546                 tell_lnet = 1;
547         }
548
549         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
550
551         if (!tell_lnet) {
552                 /* short circuit if we dont need to notify Lnet */
553                 return;
554         }
555
556         rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
557
558         if (rc) {
559             /* dont do this if this fails since LNET is in shutdown or something else
560              */
561
562                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
563                         list_for_each_entry(net , &kgnilnd_data.kgn_nets[i], gnn_list) {
564                                 /* if gnn_shutdown set for any net shutdown is in progress just return */
565                                 if (net->gnn_shutdown) {
566                                         up_read(&kgnilnd_data.kgn_net_rw_sem);
567                                         return;
568                                 }
569                                 nnets++;
570                         }
571                 }
572
573                 if (nnets == 0) {
574                         /* shutdown in progress most likely */
575                         up_read(&kgnilnd_data.kgn_net_rw_sem);
576                         return;
577                 }
578
579                 LIBCFS_ALLOC(nets, nnets * sizeof(*nets));
580
581                 if (nets == NULL) {
582                         up_read(&kgnilnd_data.kgn_net_rw_sem);
583                         CERROR("Failed to allocate nets[%d]\n", nnets);
584                         return;
585                 }
586
587                 j = 0;
588                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
589                         list_for_each_entry(net, &kgnilnd_data.kgn_nets[i], gnn_list) {
590                                 nets[j] = net;
591                                 kgnilnd_net_addref(net);
592                                 j++;
593                         }
594                 }
595                 up_read(&kgnilnd_data.kgn_net_rw_sem);
596
597                 for (i = 0; i < nnets; i++) {
598                         lnet_nid_t peer_nid;
599
600                         net = nets[i];
601
602                         peer_nid = kgnilnd_lnd2lnetnid(net->gnn_ni->ni_nid,
603                                                                  peer->gnp_nid);
604
605                         CDEBUG(D_NET, "peer 0x%p->%s last_alive %lld (%llds ago)\n",
606                                 peer, libcfs_nid2str(peer_nid), peer->gnp_last_alive,
607                                 ktime_get_seconds() - peer->gnp_last_alive);
608
609                         lnet_notify(net->gnn_ni, peer_nid, alive,
610                                     (alive) ? true : false,
611                                     peer->gnp_last_alive);
612
613                         kgnilnd_net_decref(net);
614                 }
615
616                 LIBCFS_FREE(nets, nnets * sizeof(*nets));
617         }
618 }
619
620 /* need write_lock on kgn_peer_conn_lock */
621 void
622 kgnilnd_close_conn_locked(kgn_conn_t *conn, int error)
623 {
624         kgn_peer_t        *peer = conn->gnc_peer;
625         ENTRY;
626
627         LASSERT(!in_interrupt());
628
629         /* store error for tx completion */
630         conn->gnc_error = error;
631         peer->gnp_last_errno = error;
632
633         /* use real error from peer if possible */
634         if (error == -ECONNRESET) {
635                 error = conn->gnc_peer_error;
636         }
637
638         /* if we NETERROR, make sure it is rate limited */
639         if (!kgnilnd_conn_clean_errno(error) &&
640             peer->gnp_state != GNILND_PEER_DOWN) {
641                 CNETERR("closing conn to %s: error %d\n",
642                        libcfs_nid2str(peer->gnp_nid), error);
643         } else {
644                 CDEBUG(D_NET, "closing conn to %s: error %d\n",
645                        libcfs_nid2str(peer->gnp_nid), error);
646         }
647
648         LASSERTF(conn->gnc_state == GNILND_CONN_ESTABLISHED,
649                 "conn %p to %s with bogus state %s\n", conn,
650                 libcfs_nid2str(conn->gnc_peer->gnp_nid),
651                 kgnilnd_conn_state2str(conn));
652         LASSERT(!list_empty(&conn->gnc_hashlist));
653         LASSERT(!list_empty(&conn->gnc_list));
654
655
656         /* mark peer count here so any place the EP gets destroyed will
657          * open up the peer count so that a new ESTABLISHED conn is then free
658          * to send new messages -- sending before the previous EPs are destroyed
659          * could end up with messages on the network for the old conn _after_
660          * the new conn and break the mbox safety protocol */
661         kgnilnd_admin_addref(conn->gnc_peer->gnp_dirty_eps);
662
663         /* Remove from conn hash table: no new callbacks */
664         list_del_init(&conn->gnc_hashlist);
665         kgnilnd_data.kgn_conn_version++;
666         kgnilnd_conn_decref(conn);
667
668         /* if we are in reset, go right to CLOSED as there is no scheduler
669          * thread to move from CLOSING to CLOSED */
670         if (unlikely(kgnilnd_data.kgn_in_reset)) {
671                 conn->gnc_state = GNILND_CONN_CLOSED;
672         } else {
673                 conn->gnc_state = GNILND_CONN_CLOSING;
674         }
675
676         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_RDMA_CQ_ERROR)) {
677                 msleep_interruptible(MSEC_PER_SEC);
678         }
679
680         /* leave on peer->gnp_conns to make sure we don't let the reaper
681          * or others try to unlink this peer until the conn is fully
682          * processed for closing */
683
684         if (kgnilnd_check_purgatory_conn(conn)) {
685                 kgnilnd_add_purgatory_locked(conn, conn->gnc_peer);
686         }
687
688         /* Reset RX timeout to ensure we wait for an incoming CLOSE
689          * for the full timeout.  If we get a CLOSE we know the
690          * peer has stopped all RDMA.  Otherwise if we wait for
691          * the full timeout we can also be sure all RDMA has stopped. */
692         conn->gnc_last_rx = conn->gnc_last_rx_cq = jiffies;
693         mb();
694
695         /* schedule sending CLOSE - if we are in quiesce, this adds to
696          * gnd_ready_conns and allows us to find it in quiesce processing */
697         kgnilnd_schedule_conn(conn);
698
699         EXIT;
700 }
701
702 void
703 kgnilnd_close_conn(kgn_conn_t *conn, int error)
704 {
705         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
706         /* need to check the state here - this call is racy and we don't
707          * know the state until after the lock is grabbed */
708         if (conn->gnc_state == GNILND_CONN_ESTABLISHED) {
709                 kgnilnd_close_conn_locked(conn, error);
710         }
711         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
712 }
713
714 void
715 kgnilnd_complete_closed_conn(kgn_conn_t *conn)
716 {
717         LIST_HEAD               (sinners);
718         kgn_tx_t               *tx, *txn;
719         int                     nlive = 0;
720         int                     nrdma = 0;
721         int                     nq_rdma = 0;
722         int                     logmsg;
723         ENTRY;
724
725         /* Dump log  on cksum error - wait until complete phase to let
726          * RX of error happen */
727         if (*kgnilnd_tunables.kgn_checksum_dump &&
728             (conn != NULL && conn->gnc_peer_error == -ENOKEY)) {
729                 libcfs_debug_dumplog();
730         }
731
732         /* _CLOSED set in kgnilnd_process_fmaq once we decide to
733          * send the CLOSE or not */
734         LASSERTF(conn->gnc_state == GNILND_CONN_CLOSED,
735                  "conn 0x%p->%s with bad state %s\n",
736                  conn, conn->gnc_peer ?
737                         libcfs_nid2str(conn->gnc_peer->gnp_nid) :
738                         "<?>",
739                  kgnilnd_conn_state2str(conn));
740
741         LASSERT(list_empty(&conn->gnc_hashlist));
742         /* We shouldnt be on the delay list, the conn can 
743          * get added to this list during a retransmit, and retransmits
744          * only occur within scheduler threads.
745          */
746         LASSERT(list_empty(&conn->gnc_delaylist));
747
748         /* we've sent the close, start nuking */
749         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_SCHEDULE_COMPLETE))
750                 kgnilnd_schedule_conn(conn);
751
752         if (conn->gnc_scheduled != GNILND_CONN_PROCESS) {
753                 CDEBUG(D_NETERROR, "Error someone scheduled us after we were "
754                                 "done, Attempting to recover conn 0x%p "
755                                 "scheduled %d function: %s line: %d\n", conn,
756                                 conn->gnc_scheduled, conn->gnc_sched_caller,
757                                 conn->gnc_sched_line);
758                 RETURN_EXIT;
759         }
760
761         /* we don't use lists to track things that we can get out of the
762          * tx_ref table... */
763
764         /* need to hold locks for tx_list_state, sampling it is too racy:
765          * - the lock actually protects tx != NULL, but we can't take the proper
766          *   lock until we check tx_list_state, which would be too late and
767          *   we could have the TX change under us.
768          * gnd_rdmaq_lock and gnd_lock and not used together, so taking both
769          * should be fine */
770         spin_lock(&conn->gnc_device->gnd_rdmaq_lock);
771         spin_lock(&conn->gnc_device->gnd_lock);
772
773         for (nrdma = 0; nrdma < GNILND_MAX_MSG_ID; nrdma++) {
774                 tx = conn->gnc_tx_ref_table[nrdma];
775
776                 if (tx != NULL) {
777                         /* only print the first error and if not CLOSE, we often don't see
778                          * CQ events for that by the time we get here... and really don't care */
779                         if (nlive || tx->tx_msg.gnm_type == GNILND_MSG_CLOSE)
780                                 tx->tx_state |= GNILND_TX_QUIET_ERROR;
781                         nlive++;
782                         GNIDBG_TX(D_NET, tx, "cleaning up on close, nlive %d", nlive);
783
784                         /* don't worry about gnc_lock here as nobody else should be
785                          * touching this conn */
786                         kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
787                         list_add_tail(&tx->tx_list, &sinners);
788                 }
789         }
790         spin_unlock(&conn->gnc_device->gnd_lock);
791         spin_unlock(&conn->gnc_device->gnd_rdmaq_lock);
792
793         /* nobody should have marked this as needing scheduling after
794          * we called close - so only ref should be us handling it */
795         if (conn->gnc_scheduled != GNILND_CONN_PROCESS) {
796                 CDEBUG(D_NETERROR, "Error someone scheduled us after we were "
797                                 "done, Attempting to recover conn 0x%p "
798                                 "scheduled %d function %s line: %d\n", conn,
799                                 conn->gnc_scheduled, conn->gnc_sched_caller,
800                                 conn->gnc_sched_line);
801         }
802         /* now reset a few to actual counters... */
803         nrdma = atomic_read(&conn->gnc_nlive_rdma);
804         nq_rdma = atomic_read(&conn->gnc_nq_rdma);
805
806         if (!list_empty(&sinners)) {
807                 list_for_each_entry_safe(tx, txn, &sinners, tx_list) {
808                         /* clear tx_list to make tx_add_list_locked happy */
809                         list_del_init(&tx->tx_list);
810                         /* The error codes determine if we hold onto the MDD */
811                         kgnilnd_tx_done(tx, conn->gnc_error);
812                 }
813         }
814
815         logmsg = (nlive + nrdma + nq_rdma);
816
817         if (logmsg) {
818                 int level = conn->gnc_peer->gnp_state == GNILND_PEER_UP ?
819                                 D_NETERROR : D_NET;
820                 CDEBUG(level, "Closed conn 0x%p->%s (errno %d,"
821                         " peer errno %d): canceled %d TX, %d/%d RDMA\n",
822                         conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
823                         conn->gnc_error, conn->gnc_peer_error,
824                         nlive, nq_rdma, nrdma);
825         }
826
827         kgnilnd_destroy_conn_ep(conn);
828
829         /* Bug 765042 - race this with completing a new conn to same peer - we need
830          * finish_connect to detach purgatory before we can do it ourselves here */
831         CFS_RACE(CFS_FAIL_GNI_FINISH_PURG);
832
833         /* now it is safe to remove from peer list - anyone looking at
834          * gnp_conns now is free to unlink if not on purgatory */
835         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
836
837         conn->gnc_state = GNILND_CONN_DONE;
838
839         /* Decrement counter if we are marked by del_conn_or_peers for closing
840          */
841         if (conn->gnc_needs_closing)
842                 kgnilnd_admin_decref(kgnilnd_data.kgn_npending_conns);
843
844         /* Remove from peer's list of valid connections if its not in purgatory */
845         if (!conn->gnc_in_purgatory) {
846                 list_del_init(&conn->gnc_list);
847                 /* Lose peers reference on the conn */
848                 kgnilnd_conn_decref(conn);
849         }
850
851         /* NB - only unlinking if we set pending in del_peer_locked from admin or
852          * shutdown */
853         if (kgnilnd_peer_active(conn->gnc_peer) &&
854             conn->gnc_peer->gnp_pending_unlink &&
855             kgnilnd_can_unlink_peer_locked(conn->gnc_peer)) {
856                 kgnilnd_unlink_peer_locked(conn->gnc_peer);
857         }
858
859         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
860
861         /* I'm telling Mommy! - use peer_error if they initiated close */
862         kgnilnd_peer_notify(conn->gnc_peer,
863                             conn->gnc_error == -ECONNRESET ?
864                             conn->gnc_peer_error : conn->gnc_error, 0);
865
866         EXIT;
867 }
868
869 int
870 kgnilnd_set_conn_params(kgn_dgram_t *dgram)
871 {
872         kgn_conn_t             *conn = dgram->gndg_conn;
873         kgn_connreq_t          *connreq = &dgram->gndg_conn_in;
874         kgn_gniparams_t        *rem_param = &connreq->gncr_gnparams;
875         gni_return_t            rrc;
876         int                     rc = 0;
877         gni_smsg_attr_t        *remote = &connreq->gncr_gnparams.gnpr_smsg_attr;
878
879         /* set timeout vals in conn early so we can use them for the NAK */
880
881         /* use max of the requested and our timeout, peer will do the same */
882         conn->gnc_timeout = max(conn->gnc_timeout, connreq->gncr_timeout);
883
884         /* only ep_bind really mucks around with the CQ */
885         /* only ep bind if we are not connecting to ourself and the dstnid is not a wildcard. this check
886          * is necessary as you can only bind an ep once and we must make sure we dont bind when already bound.
887          */
888         if (connreq->gncr_dstnid != LNET_NID_ANY && dgram->gndg_conn_out.gncr_dstnid != connreq->gncr_srcnid) {
889                 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
890                 rrc = kgnilnd_ep_bind(conn->gnc_ephandle,
891                         connreq->gncr_gnparams.gnpr_host_id,
892                         conn->gnc_cqid);
893                 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
894                 if (rrc != GNI_RC_SUCCESS) {
895                         rc = -ECONNABORTED;
896                         goto return_out;
897                 }
898         }
899
900         rrc = kgnilnd_ep_set_eventdata(conn->gnc_ephandle, conn->gnc_cqid,
901                          connreq->gncr_gnparams.gnpr_cqid);
902         if (rrc != GNI_RC_SUCCESS) {
903                 rc = -ECONNABORTED;
904                 goto cleanup_out;
905         }
906
907         /* Initialize SMSG */
908         rrc = kgnilnd_smsg_init(conn->gnc_ephandle, &conn->gnpr_smsg_attr,
909                         &connreq->gncr_gnparams.gnpr_smsg_attr);
910         if (unlikely(rrc == GNI_RC_INVALID_PARAM)) {
911                 gni_smsg_attr_t *local = &conn->gnpr_smsg_attr;
912                 /* help folks figure out if there is a tunable off, etc. */
913                 LCONSOLE_ERROR("SMSG attribute mismatch. Data from local/remote:"
914                                " type %d/%d msg_maxsize %u/%u"
915                                " mbox_maxcredit %u/%u. Please check kgni"
916                                " logs for further data\n",
917                                local->msg_type, remote->msg_type,
918                                local->msg_maxsize, remote->msg_maxsize,
919                                local->mbox_maxcredit, remote->mbox_maxcredit);
920         }
921         if (rrc != GNI_RC_SUCCESS) {
922                 rc = -ECONNABORTED;
923                 goto cleanup_out;
924         }
925
926         /* log this for help in debuggin SMSG buffer re-use */
927         CDEBUG(D_NET, "conn %p src %s dst %s smsg %p acquired"
928                 " local cqid %u SMSG %p->%u hndl %#llx.%#llx"
929                 " remote cqid %u SMSG %p->%u hndl %#llx.%#llx\n",
930                 conn, libcfs_nid2str(connreq->gncr_srcnid),
931                 libcfs_nid2str(connreq->gncr_dstnid),
932                 &conn->gnpr_smsg_attr,
933                 conn->gnc_cqid,
934                 conn->gnpr_smsg_attr.msg_buffer,
935                 conn->gnpr_smsg_attr.mbox_offset,
936                 conn->gnpr_smsg_attr.mem_hndl.qword1,
937                 conn->gnpr_smsg_attr.mem_hndl.qword2,
938                 rem_param->gnpr_cqid,
939                 rem_param->gnpr_smsg_attr.msg_buffer,
940                 rem_param->gnpr_smsg_attr.mbox_offset,
941                 rem_param->gnpr_smsg_attr.mem_hndl.qword1,
942                 rem_param->gnpr_smsg_attr.mem_hndl.qword2);
943
944         conn->gnc_peerstamp = connreq->gncr_peerstamp;
945         conn->gnc_peer_connstamp = connreq->gncr_connstamp;
946         conn->remote_mbox_addr = (void *)((char *)remote->msg_buffer + remote->mbox_offset);
947
948         /* We update the reaper timeout once we have a valid conn and timeout */
949         kgnilnd_update_reaper_timeout(GNILND_TO2KA(conn->gnc_timeout));
950
951         return 0;
952
953 cleanup_out:
954         rrc = kgnilnd_ep_unbind(conn->gnc_ephandle);
955         /* not sure I can just let this fly */
956         LASSERTF(rrc == GNI_RC_SUCCESS,
957                 "bad rc from gni_ep_unbind trying to cleanup: %d\n", rrc);
958
959 return_out:
960         LASSERTF(rc != 0, "SOFTWARE BUG: rc == 0\n");
961         CERROR("Error setting connection params from %s: %d\n",
962                libcfs_nid2str(connreq->gncr_srcnid), rc);
963         return rc;
964 }
965
966 /* needs down_read on kgn_net_rw_sem held from before this call until
967  * after the write_lock on kgn_peer_conn_lock - this ensures we stay sane
968  * with kgnilnd_shutdown - it'll get the sem and set shutdown, then get the
969  * kgn_peer_conn_lock to start del_peer'ing. If we hold the sem until after
970  * kgn_peer_conn_lock is held, we guarantee that nobody calls
971  * kgnilnd_add_peer_locked without checking gnn_shutdown */
972 int
973 kgnilnd_create_peer_safe(kgn_peer_t **peerp,
974                          lnet_nid_t nid,
975                          kgn_net_t *net,
976                          int node_state)
977 {
978         kgn_peer_t      *peer;
979         int             rc;
980
981         LASSERT(nid != LNET_NID_ANY);
982
983         /* We dont pass the net around in the dgram anymore so here is where we find it
984          * this will work unless its in shutdown or the nid has a net that is invalid.
985          * Either way error code needs to be returned in that case.
986          *
987          * If the net passed in is not NULL then we can use it, this alleviates looking it
988          * when the calling function has access to the data.
989          */
990         if (net == NULL) {
991                 rc = kgnilnd_find_net(nid, &net);
992                 if (rc < 0)
993                         return rc;
994         } else {
995                 /* find net adds a reference on the net if we are not using
996                  * it we must do it manually so the net references are
997                  * correct when tearing down the net
998                  */
999                 kgnilnd_net_addref(net);
1000         }
1001
1002         LIBCFS_ALLOC(peer, sizeof(*peer));
1003         if (peer == NULL) {
1004                 kgnilnd_net_decref(net);
1005                 return -ENOMEM;
1006         }
1007         peer->gnp_nid = nid;
1008         peer->gnp_state = node_state;
1009
1010         /* translate from nid to nic addr & store */
1011         rc = kgnilnd_nid_to_nicaddrs(LNET_NIDADDR(nid), 1, &peer->gnp_host_id);
1012         if (rc <= 0) {
1013                 kgnilnd_net_decref(net);
1014                 LIBCFS_FREE(peer, sizeof(*peer));
1015                 return -ESRCH;
1016         }
1017         CDEBUG(D_NET, "peer 0x%p->%s -> NIC 0x%x\n", peer,
1018                 libcfs_nid2str(nid), peer->gnp_host_id);
1019
1020         atomic_set(&peer->gnp_refcount, 1);     /* 1 ref for caller */
1021         atomic_set(&peer->gnp_dirty_eps, 0);
1022
1023         INIT_LIST_HEAD(&peer->gnp_list);
1024         INIT_LIST_HEAD(&peer->gnp_connd_list);
1025         INIT_LIST_HEAD(&peer->gnp_conns);
1026         INIT_LIST_HEAD(&peer->gnp_tx_queue);
1027
1028         /* the first reconnect should happen immediately, so we leave
1029          * gnp_reconnect_interval set to 0 */
1030
1031         LASSERTF(net != NULL, "peer 0x%p->%s with NULL net\n",
1032                  peer, libcfs_nid2str(nid));
1033
1034         /* must have kgn_net_rw_sem held for this...  */
1035         if (net->gnn_shutdown) {
1036                 /* shutdown has started already */
1037                 kgnilnd_net_decref(net);
1038                 LIBCFS_FREE(peer, sizeof(*peer));
1039                 return -ESHUTDOWN;
1040         }
1041
1042         peer->gnp_net = net;
1043
1044         atomic_inc(&kgnilnd_data.kgn_npeers);
1045
1046         *peerp = peer;
1047         return 0;
1048 }
1049
1050 void
1051 kgnilnd_destroy_peer(kgn_peer_t *peer)
1052 {
1053         CDEBUG(D_NET, "peer %s %p deleted\n",
1054                libcfs_nid2str(peer->gnp_nid), peer);
1055         LASSERTF(atomic_read(&peer->gnp_refcount) == 0,
1056                  "peer 0x%p->%s refs %d\n",
1057                  peer, libcfs_nid2str(peer->gnp_nid),
1058                  atomic_read(&peer->gnp_refcount));
1059         LASSERTF(atomic_read(&peer->gnp_dirty_eps) == 0,
1060                  "peer 0x%p->%s dirty eps %d\n",
1061                  peer, libcfs_nid2str(peer->gnp_nid),
1062                  atomic_read(&peer->gnp_dirty_eps));
1063         LASSERTF(peer->gnp_net != NULL, "peer %p (%s) with NULL net\n",
1064                  peer, libcfs_nid2str(peer->gnp_nid));
1065         LASSERTF(!kgnilnd_peer_active(peer),
1066                  "peer 0x%p->%s\n",
1067                 peer, libcfs_nid2str(peer->gnp_nid));
1068         LASSERTF(peer->gnp_connecting == GNILND_PEER_IDLE || peer->gnp_connecting == GNILND_PEER_KILL,
1069                  "peer 0x%p->%s, connecting %d\n",
1070                 peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
1071         LASSERTF(list_empty(&peer->gnp_conns),
1072                  "peer 0x%p->%s\n",
1073                 peer, libcfs_nid2str(peer->gnp_nid));
1074         LASSERTF(list_empty(&peer->gnp_tx_queue),
1075                  "peer 0x%p->%s\n",
1076                 peer, libcfs_nid2str(peer->gnp_nid));
1077         LASSERTF(list_empty(&peer->gnp_connd_list),
1078                  "peer 0x%p->%s\n",
1079                 peer, libcfs_nid2str(peer->gnp_nid));
1080
1081         /* NB a peer's connections keep a reference on their peer until
1082          * they are destroyed, so we can be assured that _all_ state to do
1083          * with this peer has been cleaned up when its refcount drops to
1084          * zero. */
1085
1086         atomic_dec(&kgnilnd_data.kgn_npeers);
1087         kgnilnd_net_decref(peer->gnp_net);
1088
1089         LIBCFS_FREE(peer, sizeof(*peer));
1090 }
1091
1092 /* the conn might not have made it all the way through to a connected
1093  * state - but we need to purgatory any conn that a remote peer might
1094  * have seen through a posted dgram as well */
1095 void
1096 kgnilnd_add_purgatory_locked(kgn_conn_t *conn, kgn_peer_t *peer)
1097 {
1098         kgn_mbox_info_t *mbox = NULL;
1099         ENTRY;
1100
1101         /* NB - the caller should own conn by removing him from the
1102          * scheduler thread when finishing the close */
1103
1104         LASSERTF(peer != NULL, "conn %p with NULL peer\n", conn);
1105
1106         /* If this is still true, need to add the calls to unlink back in and
1107          * figure out how to close the hole on loopback conns */
1108         LASSERTF(kgnilnd_peer_active(peer), "can't use inactive peer %s (%p)"
1109                 " we'll never recover the resources\n",
1110                 libcfs_nid2str(peer->gnp_nid), peer);
1111
1112         CDEBUG(D_NET, "conn %p peer %p dev %p\n", conn, peer,
1113                 conn->gnc_device);
1114
1115         LASSERTF(conn->gnc_in_purgatory == 0,
1116                 "Conn already in purgatory\n");
1117         conn->gnc_in_purgatory = 1;
1118
1119         mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
1120         mbox->mbx_prev_purg_nid = peer->gnp_nid;
1121         mbox->mbx_add_purgatory = jiffies;
1122         kgnilnd_release_mbox(conn, 1);
1123
1124         LASSERTF(list_empty(&conn->gnc_mdd_list),
1125                 "conn 0x%p->%s with active purgatory hold MDD %d\n",
1126                 conn, libcfs_nid2str(peer->gnp_nid),
1127                 kgnilnd_count_list(&conn->gnc_mdd_list));
1128
1129         EXIT;
1130 }
1131
1132 /* Instead of detaching everything from purgatory here we just mark the conn as needing
1133  * detach, when the reaper checks the conn the next time it will detach it.
1134  * Calling function requires write_lock held on kgn_peer_conn_lock
1135  */
1136 void
1137 kgnilnd_mark_for_detach_purgatory_all_locked(kgn_peer_t *peer) {
1138         kgn_conn_t       *conn;
1139
1140         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
1141                 if (conn->gnc_in_purgatory && !conn->gnc_needs_detach) {
1142                         conn->gnc_needs_detach = 1;
1143                         kgnilnd_admin_addref(kgnilnd_data.kgn_npending_detach);
1144                 }
1145         }
1146 }
1147
1148 /* Calling function needs a write_lock held on kgn_peer_conn_lock */
1149 void
1150 kgnilnd_detach_purgatory_locked(kgn_conn_t *conn, struct list_head *conn_list)
1151 {
1152         kgn_mbox_info_t *mbox = NULL;
1153
1154         /* if needed, add the conn purgatory data to the list passed in */
1155         if (conn->gnc_in_purgatory) {
1156                 CDEBUG(D_NET, "peer %p->%s purg_conn %p@%s mdd_list #tx %d\n",
1157                         conn->gnc_peer, libcfs_nid2str(conn->gnc_peer->gnp_nid),
1158                         conn, kgnilnd_conn_state2str(conn),
1159                         kgnilnd_count_list(&conn->gnc_mdd_list));
1160
1161                 mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
1162                 mbox->mbx_detach_of_purgatory = jiffies;
1163
1164                 /* conn->gnc_list is the entry point on peer->gnp_conns, so detaching it
1165                  * here removes it from the list of 'valid' peer connections.
1166                  * We put the current conn onto a list of conns to call kgnilnd_release_purgatory_locked()
1167                  * and as such the caller of kgnilnd_detach_purgatory_locked() now owns that conn, since its not
1168                  * on the peer's conn_list anymore.
1169                  */
1170
1171                 list_del_init(&conn->gnc_list);
1172
1173                 /* NB - only unlinking if we set pending in del_peer_locked from admin or
1174                  * shutdown */
1175                 if (kgnilnd_peer_active(conn->gnc_peer) &&
1176                     conn->gnc_peer->gnp_pending_unlink &&
1177                     kgnilnd_can_unlink_peer_locked(conn->gnc_peer)) {
1178                         kgnilnd_unlink_peer_locked(conn->gnc_peer);
1179                 }
1180                 /* The reaper will not call detach unless the conn is fully through kgnilnd_complete_closed_conn.
1181                  * If the conn is not in a DONE state somehow we are attempting to detach even though
1182                  * the conn has not been fully cleaned up. If we detach while the conn is still closing
1183                  * we will end up with an orphaned connection that has valid ep_handle, that is not on a
1184                  * peer.
1185                  */
1186
1187                 LASSERTF(conn->gnc_state == GNILND_CONN_DONE, "Conn in invalid state  %p@%s \n",
1188                                 conn, kgnilnd_conn_state2str(conn));
1189
1190                 /* move from peer to the delayed release list */
1191                 list_add_tail(&conn->gnc_list, conn_list);
1192         }
1193 }
1194
1195 void
1196 kgnilnd_release_purgatory_list(struct list_head *conn_list)
1197 {
1198         kgn_device_t            *dev;
1199         kgn_conn_t              *conn, *connN;
1200         kgn_mdd_purgatory_t     *gmp, *gmpN;
1201
1202         list_for_each_entry_safe(conn, connN, conn_list, gnc_list) {
1203                 dev = conn->gnc_device;
1204
1205                 kgnilnd_release_mbox(conn, -1);
1206                 conn->gnc_in_purgatory = 0;
1207
1208                 list_del_init(&conn->gnc_list);
1209
1210                 /* gnc_needs_detach is set in kgnilnd_del_conn_or_peer. It is used to keep track
1211                  * of conns that have been marked for detach by kgnilnd_del_conn_or_peer.
1212                  * The function uses kgn_npending_detach to verify the conn has
1213                  * actually been detached.
1214                  */
1215
1216                 if (conn->gnc_needs_detach)
1217                         kgnilnd_admin_decref(kgnilnd_data.kgn_npending_detach);
1218
1219                 /* if this guy is really dead (we are doing release from reaper),
1220                  * make sure we tell LNet - if this is from other context,
1221                  * the checks in the function will prevent an errant
1222                  * notification */
1223                 kgnilnd_peer_notify(conn->gnc_peer, conn->gnc_error, 0);
1224
1225                 list_for_each_entry_safe(gmp, gmpN, &conn->gnc_mdd_list,
1226                                          gmp_list) {
1227                         CDEBUG(D_NET,
1228                                "dev %p releasing held mdd %#llx.%#llx\n",
1229                                conn->gnc_device, gmp->gmp_map_key.qword1,
1230                                gmp->gmp_map_key.qword2);
1231
1232                         atomic_dec(&dev->gnd_n_mdd_held);
1233                         kgnilnd_mem_mdd_release(conn->gnc_device->gnd_handle,
1234                                                 &gmp->gmp_map_key);
1235                         /* ignoring the return code - if kgni/ghal can't find it
1236                          * it must be released already */
1237
1238                         list_del_init(&gmp->gmp_list);
1239                         LIBCFS_FREE(gmp, sizeof(*gmp));
1240                 }
1241                 /* lose conn ref for purgatory */
1242                 kgnilnd_conn_decref(conn);
1243         }
1244 }
1245
1246 /* needs write_lock on kgnilnd_data.kgn_peer_conn_lock held */
1247 void
1248 kgnilnd_peer_increase_reconnect_locked(kgn_peer_t *peer)
1249 {
1250         int current_to;
1251
1252         current_to = peer->gnp_reconnect_interval;
1253
1254         /* we'll try to reconnect fast the first time, then back-off */
1255         if (current_to == 0) {
1256                 peer->gnp_reconnect_time = jiffies - 1;
1257                 current_to = *kgnilnd_tunables.kgn_min_reconnect_interval;
1258         } else {
1259                 peer->gnp_reconnect_time = jiffies + cfs_time_seconds(current_to);
1260                 /* add 50% of min timeout & retry */
1261                 current_to += *kgnilnd_tunables.kgn_min_reconnect_interval / 2;
1262         }
1263
1264         current_to = min(current_to,
1265                          *kgnilnd_tunables.kgn_max_reconnect_interval);
1266
1267         peer->gnp_reconnect_interval = current_to;
1268         CDEBUG(D_NET, "peer %s can reconnect at %lu interval %lu\n",
1269                libcfs_nid2str(peer->gnp_nid), peer->gnp_reconnect_time,
1270                peer->gnp_reconnect_interval);
1271 }
1272
1273 /* needs kgnilnd_data.kgn_peer_conn_lock held */
1274 kgn_peer_t *
1275 kgnilnd_find_peer_locked(lnet_nid_t nid)
1276 {
1277         struct list_head *peer_list = kgnilnd_nid2peerlist(nid);
1278         kgn_peer_t       *peer;
1279
1280         /* Chopping nid down to only NIDADDR using LNET_NIDADDR so we only
1281          * have a single peer per device instead of a peer per nid/net combo.
1282          */
1283
1284         list_for_each_entry(peer, peer_list, gnp_list) {
1285                 if (LNET_NIDADDR(nid) != LNET_NIDADDR(peer->gnp_nid))
1286                         continue;
1287
1288                 CDEBUG(D_NET, "got peer [%p] -> %s c %d (%d)\n",
1289                        peer, libcfs_nid2str(nid),
1290                        peer->gnp_connecting,
1291                        atomic_read(&peer->gnp_refcount));
1292                 return peer;
1293         }
1294         return NULL;
1295 }
1296
1297 /* need write_lock on kgn_peer_conn_lock */
1298 void
1299 kgnilnd_unlink_peer_locked(kgn_peer_t *peer)
1300 {
1301         LASSERTF(list_empty(&peer->gnp_conns),
1302                 "peer 0x%p->%s\n",
1303                  peer, libcfs_nid2str(peer->gnp_nid));
1304         LASSERTF(list_empty(&peer->gnp_tx_queue),
1305                 "peer 0x%p->%s\n",
1306                  peer, libcfs_nid2str(peer->gnp_nid));
1307         LASSERTF(kgnilnd_peer_active(peer),
1308                 "peer 0x%p->%s\n",
1309                  peer, libcfs_nid2str(peer->gnp_nid));
1310         CDEBUG(D_NET, "unlinking peer 0x%p->%s\n",
1311                 peer, libcfs_nid2str(peer->gnp_nid));
1312
1313         list_del_init(&peer->gnp_list);
1314         kgnilnd_data.kgn_peer_version++;
1315         kgnilnd_admin_decref(kgnilnd_data.kgn_npending_unlink);
1316         /* lose peerlist's ref */
1317         kgnilnd_peer_decref(peer);
1318 }
1319
1320 int
1321 kgnilnd_get_peer_info(int index,
1322                       kgn_peer_t **found_peer,
1323                       lnet_nid_t *id, __u32 *nic_addr,
1324                       int *refcount, int *connecting)
1325 {
1326         struct list_head  *ptmp;
1327         kgn_peer_t        *peer;
1328         int               i;
1329         int               rc = -ENOENT;
1330
1331         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1332
1333         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
1334
1335                 list_for_each(ptmp, &kgnilnd_data.kgn_peers[i]) {
1336                         peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1337
1338                         if (index-- > 0)
1339                                 continue;
1340
1341                         CDEBUG(D_NET, "found peer %p (%s) at index %d\n",
1342                                peer, libcfs_nid2str(peer->gnp_nid), index);
1343
1344                         *found_peer  = peer;
1345                         *id          = peer->gnp_nid;
1346                         *nic_addr    = peer->gnp_host_id;
1347                         *refcount    = atomic_read(&peer->gnp_refcount);
1348                         *connecting  = peer->gnp_connecting;
1349
1350                         rc = 0;
1351                         goto out;
1352                 }
1353         }
1354 out:
1355         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1356         if (rc)
1357                 CDEBUG(D_NET, "no gni peer at index %d\n", index);
1358         return rc;
1359 }
1360
1361 /* requires write_lock on kgn_peer_conn_lock held */
1362 void
1363 kgnilnd_add_peer_locked(lnet_nid_t nid, kgn_peer_t *new_stub_peer, kgn_peer_t **peerp)
1364 {
1365         kgn_peer_t        *peer, *peer2;
1366
1367         LASSERTF(new_stub_peer != NULL, "bad stub peer for nid %s\n",
1368                  libcfs_nid2str(nid));
1369
1370         peer2 = kgnilnd_find_peer_locked(nid);
1371         if (peer2 != NULL) {
1372                 /* A peer was created during the lock transition, so drop
1373                  * the new one we created */
1374                 kgnilnd_peer_decref(new_stub_peer);
1375                 peer = peer2;
1376         } else {
1377                 peer = new_stub_peer;
1378                 /* peer table takes existing ref on peer */
1379
1380                 LASSERTF(!kgnilnd_peer_active(peer),
1381                         "peer 0x%p->%s already in peer table\n",
1382                         peer, libcfs_nid2str(peer->gnp_nid));
1383                 list_add_tail(&peer->gnp_list,
1384                               kgnilnd_nid2peerlist(nid));
1385                 kgnilnd_data.kgn_peer_version++;
1386         }
1387
1388         LASSERTF(peer->gnp_net != NULL, "peer 0x%p->%s with NULL net\n",
1389                  peer, libcfs_nid2str(peer->gnp_nid));
1390         *peerp = peer;
1391 }
1392
1393 int
1394 kgnilnd_add_peer(kgn_net_t *net, lnet_nid_t nid, kgn_peer_t **peerp)
1395 {
1396         kgn_peer_t        *peer;
1397         int                rc;
1398         int                node_state;
1399         ENTRY;
1400
1401         if (nid == LNET_NID_ANY)
1402                 return -EINVAL;
1403
1404         node_state = kgnilnd_get_node_state(LNET_NIDADDR(nid));
1405
1406         /* NB - this will not block during normal operations -
1407          * the only writer of this is in the startup/shutdown path. */
1408         rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
1409         if (!rc) {
1410                 rc = -ESHUTDOWN;
1411                 RETURN(rc);
1412         }
1413         rc = kgnilnd_create_peer_safe(&peer, nid, net, node_state);
1414         if (rc != 0) {
1415                 up_read(&kgnilnd_data.kgn_net_rw_sem);
1416                 RETURN(rc);
1417         }
1418
1419         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1420         up_read(&kgnilnd_data.kgn_net_rw_sem);
1421
1422         kgnilnd_add_peer_locked(nid, peer, peerp);
1423
1424         CDEBUG(D_NET, "peer 0x%p->%s connecting %d\n",
1425                peerp, libcfs_nid2str((*peerp)->gnp_nid),
1426                (*peerp)->gnp_connecting);
1427
1428         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1429         RETURN(0);
1430 }
1431
1432 /* needs write_lock on kgn_peer_conn_lock */
1433 void
1434 kgnilnd_cancel_peer_connect_locked(kgn_peer_t *peer, struct list_head *zombies)
1435 {
1436         kgn_tx_t        *tx, *txn;
1437
1438         /* we do care about state of gnp_connecting - we could be between
1439          * reconnect attempts, so try to find the dgram and cancel the TX
1440          * anyways. If we are in the process of posting DONT do anything;
1441          * once it fails or succeeds we can nuke the connect attempt.
1442          * We have no idea where in kgnilnd_post_dgram we are so we cant
1443          * attempt to cancel until the function is done.
1444          */
1445
1446         /* make sure peer isn't in process of connecting or waiting for connect*/
1447         spin_lock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1448         if (!(list_empty(&peer->gnp_connd_list))) {
1449                 list_del_init(&peer->gnp_connd_list);
1450                 /* remove connd ref */
1451                 kgnilnd_peer_decref(peer);
1452         }
1453         spin_unlock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1454
1455         if (peer->gnp_connecting == GNILND_PEER_POSTING || peer->gnp_connecting == GNILND_PEER_NEEDS_DEATH) {
1456                 peer->gnp_connecting = GNILND_PEER_NEEDS_DEATH;
1457                 /* We are in process of posting right now the xchg set it up for us to
1458                  * cancel the connect so we are finished for now */
1459         } else {
1460                 /* no need for exchange we have the peer lock and its ready for us to nuke */
1461                 LASSERTF(peer->gnp_connecting != GNILND_PEER_POSTING,
1462                         "Peer in invalid state 0x%p->%s, connecting %d\n",
1463                         peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
1464                 peer->gnp_connecting = GNILND_PEER_IDLE;
1465                 set_mb(peer->gnp_last_dgram_errno, -ETIMEDOUT);
1466                 kgnilnd_find_and_cancel_dgram(peer->gnp_net->gnn_dev,
1467                                                       peer->gnp_nid);
1468         }
1469
1470         /* The least we can do is nuke the tx's no matter what.... */
1471         list_for_each_entry_safe(tx, txn, &peer->gnp_tx_queue, tx_list) {
1472                 kgnilnd_tx_del_state_locked(tx, peer, NULL,
1473                                            GNILND_TX_ALLOCD);
1474                 list_add_tail(&tx->tx_list, zombies);
1475         }
1476 }
1477
1478 /* needs write_lock on kgn_peer_conn_lock */
1479 void
1480 kgnilnd_del_peer_locked(kgn_peer_t *peer, int error)
1481 {
1482         /* this peer could be passive and only held for purgatory,
1483          * take a ref to ensure it doesn't disappear in this function */
1484         kgnilnd_peer_addref(peer);
1485
1486         CFS_RACE(CFS_FAIL_GNI_FIND_TARGET);
1487
1488         /* if purgatory release cleared it out, don't try again */
1489         if (kgnilnd_peer_active(peer)) {
1490                 /* always do this to allow kgnilnd_start_connect and
1491                  * kgnilnd_finish_connect to catch this before they
1492                  * wrap up their operations */
1493                 if (kgnilnd_can_unlink_peer_locked(peer)) {
1494                         /* already released purgatory, so only active
1495                          * conns hold it */
1496                         kgnilnd_unlink_peer_locked(peer);
1497                 } else {
1498                         kgnilnd_close_peer_conns_locked(peer, error);
1499                         /* peer unlinks itself when last conn is closed */
1500                 }
1501         }
1502
1503         /* we are done, release back to the wild */
1504         kgnilnd_peer_decref(peer);
1505 }
1506
1507 int
1508 kgnilnd_del_conn_or_peer(kgn_net_t *net, lnet_nid_t nid, int command,
1509                           int error)
1510 {
1511         LIST_HEAD               (souls);
1512         LIST_HEAD               (zombies);
1513         struct list_head        *ptmp, *pnxt;
1514         kgn_peer_t              *peer;
1515         int                     lo;
1516         int                     hi;
1517         int                     i;
1518         int                     rc = -ENOENT;
1519
1520         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1521
1522         if (nid != LNET_NID_ANY)
1523                 lo = hi = kgnilnd_nid2peerlist(nid) - kgnilnd_data.kgn_peers;
1524         else {
1525                 lo = 0;
1526                 hi = *kgnilnd_tunables.kgn_peer_hash_size - 1;
1527                 /* wildcards always succeed */
1528                 rc = 0;
1529         }
1530
1531         for (i = lo; i <= hi; i++) {
1532                 list_for_each_safe(ptmp, pnxt, &kgnilnd_data.kgn_peers[i]) {
1533                         peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1534
1535                         LASSERTF(peer->gnp_net != NULL,
1536                                 "peer %p (%s) with NULL net\n",
1537                                  peer, libcfs_nid2str(peer->gnp_nid));
1538
1539                         if (net != NULL && peer->gnp_net != net)
1540                                 continue;
1541
1542                         if (!(nid == LNET_NID_ANY || LNET_NIDADDR(peer->gnp_nid) == LNET_NIDADDR(nid)))
1543                                 continue;
1544
1545                         /* In both cases, we want to stop any in-flight
1546                          * connect attempts */
1547                         kgnilnd_cancel_peer_connect_locked(peer, &zombies);
1548
1549                         switch (command) {
1550                         case GNILND_DEL_CONN:
1551                                 kgnilnd_close_peer_conns_locked(peer, error);
1552                                 break;
1553                         case GNILND_DEL_PEER:
1554                                 peer->gnp_pending_unlink = 1;
1555                                 kgnilnd_admin_addref(kgnilnd_data.kgn_npending_unlink);
1556                                 kgnilnd_mark_for_detach_purgatory_all_locked(peer);
1557                                 kgnilnd_del_peer_locked(peer, error);
1558                                 break;
1559                         case GNILND_CLEAR_PURGATORY:
1560                                 /* Mark everything ready for detach reaper will cleanup
1561                                  * once we release the kgn_peer_conn_lock
1562                                  */
1563                                 kgnilnd_mark_for_detach_purgatory_all_locked(peer);
1564                                 peer->gnp_last_errno = -EISCONN;
1565                                 /* clear reconnect so he can reconnect soon */
1566                                 peer->gnp_reconnect_time = 0;
1567                                 peer->gnp_reconnect_interval = 0;
1568                                 break;
1569                         default:
1570                                 CERROR("bad command %d\n", command);
1571                                 LBUG();
1572                         }
1573                         /* we matched something */
1574                         rc = 0;
1575                 }
1576         }
1577
1578         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1579
1580         /* nuke peer TX */
1581         kgnilnd_txlist_done(&zombies, error);
1582
1583         /* This function does not return until the commands it initiated have completed,
1584          * since they have to work there way through the other threads. In the case of shutdown
1585          * threads are not woken up until after this call is initiated so we cannot wait, we just
1586          * need to return. The same applies for stack reset we shouldnt wait as the reset thread
1587          * handles closing.
1588          */
1589
1590         CFS_RACE(CFS_FAIL_GNI_RACE_RESET);
1591
1592         if (error == -ENOTRECOVERABLE || error == -ESHUTDOWN) {
1593                 return rc;
1594         }
1595
1596         i = 4;
1597         while (atomic_read(&kgnilnd_data.kgn_npending_conns)   ||
1598                atomic_read(&kgnilnd_data.kgn_npending_detach)  ||
1599                atomic_read(&kgnilnd_data.kgn_npending_unlink)) {
1600
1601                 schedule_timeout_uninterruptible(cfs_time_seconds(1));
1602                 i++;
1603
1604                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, "Waiting on %d peers %d closes %d detaches\n",
1605                                 atomic_read(&kgnilnd_data.kgn_npending_unlink),
1606                                 atomic_read(&kgnilnd_data.kgn_npending_conns),
1607                                 atomic_read(&kgnilnd_data.kgn_npending_detach));
1608         }
1609
1610         return rc;
1611 }
1612
1613 kgn_conn_t *
1614 kgnilnd_get_conn_by_idx(int index)
1615 {
1616         kgn_peer_t        *peer;
1617         struct list_head  *ptmp;
1618         kgn_conn_t        *conn;
1619         struct list_head  *ctmp;
1620         int                i;
1621
1622
1623         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
1624                 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1625                 list_for_each(ptmp, &kgnilnd_data.kgn_peers[i]) {
1626
1627                         peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1628
1629                         list_for_each(ctmp, &peer->gnp_conns) {
1630                                 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
1631
1632                                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
1633                                         continue;
1634
1635                                 if (index-- > 0)
1636                                         continue;
1637
1638                                 CDEBUG(D_NET, "++conn[%p] -> %s (%d)\n", conn,
1639                                        libcfs_nid2str(conn->gnc_peer->gnp_nid),
1640                                        atomic_read(&conn->gnc_refcount));
1641                                 kgnilnd_conn_addref(conn);
1642                                 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1643                                 return conn;
1644                         }
1645                 }
1646                 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1647         }
1648
1649         return NULL;
1650 }
1651
1652 int
1653 kgnilnd_get_conn_info(kgn_peer_t *peer,
1654                       int *device_id, __u64 *peerstamp,
1655                       int *tx_seq, int *rx_seq,
1656                       int *fmaq_len, int *nfma, int *nrdma)
1657 {
1658         kgn_conn_t        *conn;
1659         int               rc = 0;
1660
1661         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1662
1663         conn = kgnilnd_find_conn_locked(peer);
1664         if (conn == NULL) {
1665                 rc = -ENOENT;
1666                 goto out;
1667         }
1668
1669         *device_id = conn->gnc_device->gnd_host_id;
1670         *peerstamp = conn->gnc_peerstamp;
1671         *tx_seq = atomic_read(&conn->gnc_tx_seq);
1672         *rx_seq = atomic_read(&conn->gnc_rx_seq);
1673         *fmaq_len = kgnilnd_count_list(&conn->gnc_fmaq);
1674         *nfma = atomic_read(&conn->gnc_nlive_fma);
1675         *nrdma = atomic_read(&conn->gnc_nlive_rdma);
1676 out:
1677         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1678         return rc;
1679 }
1680
1681 /* needs write_lock on kgn_peer_conn_lock */
1682 int
1683 kgnilnd_close_peer_conns_locked(kgn_peer_t *peer, int why)
1684 {
1685         kgn_conn_t         *conn;
1686         struct list_head   *ctmp, *cnxt;
1687         int                 count = 0;
1688
1689         list_for_each_safe(ctmp, cnxt, &peer->gnp_conns) {
1690                 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
1691
1692                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
1693                         continue;
1694
1695                 count++;
1696                 /* we mark gnc_needs closing and increment kgn_npending_conns so that
1697                  * kgnilnd_del_conn_or_peer can wait on the other threads closing
1698                  * and cleaning up the connection.
1699                  */
1700                 if (!conn->gnc_needs_closing) {
1701                         conn->gnc_needs_closing = 1;
1702                         kgnilnd_admin_addref(kgnilnd_data.kgn_npending_conns);
1703                 }
1704                 kgnilnd_close_conn_locked(conn, why);
1705         }
1706         return count;
1707 }
1708
1709 int
1710 kgnilnd_report_node_state(lnet_nid_t nid, int down)
1711 {
1712         int         rc;
1713         kgn_peer_t  *peer, *new_peer;
1714         LIST_HEAD(zombies);
1715
1716         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1717         peer = kgnilnd_find_peer_locked(nid);
1718
1719         if (peer == NULL) {
1720                 int       i;
1721                 int       found_net = 0;
1722                 kgn_net_t *net;
1723
1724                 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1725
1726                 /* Don't add a peer for node up events */
1727                 if (down == GNILND_PEER_UP)
1728                         return 0;
1729
1730                 /* find any valid net - we don't care which one... */
1731                 down_read(&kgnilnd_data.kgn_net_rw_sem);
1732                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
1733                         list_for_each_entry(net, &kgnilnd_data.kgn_nets[i],
1734                                             gnn_list) {
1735                                 found_net = 1;
1736                                 break;
1737                         }
1738
1739                         if (found_net) {
1740                                 break;
1741                         }
1742                 }
1743                 up_read(&kgnilnd_data.kgn_net_rw_sem);
1744
1745                 if (!found_net) {
1746                         CNETERR("Could not find a net for nid %lld\n", nid);
1747                         return 1;
1748                 }
1749
1750                 /* The nid passed in does not yet contain the net portion.
1751                  * Let's build it up now
1752                  */
1753                 nid = LNET_MKNID(LNET_NIDNET(net->gnn_ni->ni_nid), nid);
1754                 rc = kgnilnd_add_peer(net, nid, &new_peer);
1755
1756                 if (rc) {
1757                         CNETERR("Could not add peer for nid %lld, rc %d\n",
1758                                 nid, rc);
1759                         return 1;
1760                 }
1761
1762                 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1763                 peer = kgnilnd_find_peer_locked(nid);
1764
1765                 if (peer == NULL) {
1766                         CNETERR("Could not find peer for nid %lld\n", nid);
1767                         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1768                         return 1;
1769                 }
1770         }
1771
1772         peer->gnp_state = down;
1773
1774         if (down == GNILND_PEER_DOWN) {
1775                 kgn_conn_t *conn;
1776
1777                 peer->gnp_down_event_time = jiffies;
1778                 kgnilnd_cancel_peer_connect_locked(peer, &zombies);
1779                 conn = kgnilnd_find_conn_locked(peer);
1780
1781                 if (conn != NULL) {
1782                         kgnilnd_close_conn_locked(conn, -ENETRESET);
1783                 }
1784         } else {
1785                 peer->gnp_up_event_time = jiffies;
1786         }
1787
1788         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1789
1790         if (down == GNILND_PEER_DOWN) {
1791                 /* using ENETRESET so we don't get messages from
1792                  * kgnilnd_tx_done
1793                  */
1794                 kgnilnd_txlist_done(&zombies, -ENETRESET);
1795                 kgnilnd_peer_notify(peer, -ECONNRESET, 0);
1796                 LCONSOLE_INFO("Received down event for nid %d\n",
1797                               LNET_NIDADDR(nid));
1798         }
1799
1800         return 0;
1801 }
1802
1803 int
1804 kgnilnd_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg)
1805 {
1806         struct libcfs_ioctl_data *data = arg;
1807         kgn_net_t                *net = ni->ni_data;
1808         int                       rc = -EINVAL;
1809
1810         LASSERT(ni == net->gnn_ni);
1811
1812         switch (cmd) {
1813         case IOC_LIBCFS_GET_PEER: {
1814                 lnet_nid_t   nid = 0;
1815                 kgn_peer_t  *peer = NULL;
1816                 __u32 nic_addr = 0;
1817                 __u64 peerstamp = 0;
1818                 int peer_refcount = 0, peer_connecting = 0;
1819                 int device_id = 0;
1820                 int tx_seq = 0, rx_seq = 0;
1821                 int fmaq_len = 0, nfma = 0, nrdma = 0;
1822
1823                 rc = kgnilnd_get_peer_info(data->ioc_count, &peer,
1824                                            &nid, &nic_addr, &peer_refcount,
1825                                            &peer_connecting);
1826                 if (rc)
1827                         break;
1828
1829                 /* Barf */
1830                 /* LNET_MKNID is used to mask from lnet the multiplexing/demultiplexing of connections and peers
1831                  * LNET assumes a conn and peer per net, the LNET_MKNID/LNET_NIDADDR allows us to let Lnet see what it
1832                  * wants to see instead of the underlying network that is being used to send the data
1833                  */
1834                 data->ioc_nid    = LNET_MKNID(LNET_NIDNET(ni->ni_nid), LNET_NIDADDR(nid));
1835                 data->ioc_flags  = peer_connecting;
1836                 data->ioc_count  = peer_refcount;
1837
1838                 rc = kgnilnd_get_conn_info(peer, &device_id, &peerstamp,
1839                                            &tx_seq, &rx_seq, &fmaq_len,
1840                                            &nfma, &nrdma);
1841
1842                 /* This is allowable - a persistent peer could not
1843                  * have a connection */
1844                 if (rc) {
1845                         /* flag to indicate we are not connected -
1846                          * need to print as such */
1847                         data->ioc_flags |= (1<<16);
1848                         rc = 0;
1849                 } else {
1850                         /* still barf */
1851                         data->ioc_net = device_id;
1852                         data->ioc_u64[0] = peerstamp;
1853                         data->ioc_u32[0] = fmaq_len;
1854                         data->ioc_u32[1] = nfma;
1855                         data->ioc_u32[2] = tx_seq;
1856                         data->ioc_u32[3] = rx_seq;
1857                         data->ioc_u32[4] = nrdma;
1858                 }
1859                 break;
1860         }
1861         case IOC_LIBCFS_ADD_PEER: {
1862                 /* just dummy value to allow using common interface */
1863                 kgn_peer_t      *peer;
1864                 rc = kgnilnd_add_peer(net, data->ioc_nid, &peer);
1865                 break;
1866         }
1867         case IOC_LIBCFS_DEL_PEER: {
1868                 /* NULL is passed in so it affects all peers in existence without regard to network
1869                  * as the peer may not exist on the network LNET believes it to be on.
1870                  */
1871                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1872                                               GNILND_DEL_PEER, -EUCLEAN);
1873                 break;
1874         }
1875         case IOC_LIBCFS_GET_CONN: {
1876                 kgn_conn_t *conn = kgnilnd_get_conn_by_idx(data->ioc_count);
1877
1878                 if (conn == NULL)
1879                         rc = -ENOENT;
1880                 else {
1881                         rc = 0;
1882                         /* LNET_MKNID is used to build the correct address based on what LNET wants to see instead of
1883                          * the generic connection that is used to send the data
1884                          */
1885                         data->ioc_nid    = LNET_MKNID(LNET_NIDNET(ni->ni_nid), LNET_NIDADDR(conn->gnc_peer->gnp_nid));
1886                         data->ioc_u32[0] = conn->gnc_device->gnd_id;
1887                         kgnilnd_conn_decref(conn);
1888                 }
1889                 break;
1890         }
1891         case IOC_LIBCFS_CLOSE_CONNECTION: {
1892                 /* use error = -ENETRESET to indicate it was lctl disconnect */
1893                 /* NULL is passed in so it affects all the nets as the connection is virtual
1894                  * and may not exist on the network LNET believes it to be on.
1895                  */
1896                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1897                                               GNILND_DEL_CONN, -ENETRESET);
1898                 break;
1899         }
1900         case IOC_LIBCFS_PUSH_CONNECTION: {
1901                 /* we use this to flush purgatory */
1902                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1903                                               GNILND_CLEAR_PURGATORY, -EUCLEAN);
1904                 break;
1905         }
1906         case IOC_LIBCFS_REGISTER_MYNID: {
1907                 /* Ignore if this is a noop */
1908                 if (data->ioc_nid == ni->ni_nid) {
1909                         rc = 0;
1910                 } else {
1911                         CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
1912                                libcfs_nid2str(data->ioc_nid),
1913                                libcfs_nid2str(ni->ni_nid));
1914                         rc = -EINVAL;
1915                 }
1916                 break;
1917         }
1918         }
1919
1920         return rc;
1921 }
1922
1923 int
1924 kgnilnd_dev_init(kgn_device_t *dev)
1925 {
1926         gni_return_t      rrc;
1927         int               rc = 0;
1928         unsigned int      cq_size;
1929         ENTRY;
1930
1931         /* size of these CQs should be able to accommodate the outgoing
1932          * RDMA and SMSG transactions.  Since we really don't know what we
1933          * really need here, we'll take credits * 2 * 3 to allow a bunch.
1934          * We need to dig into this more with the performance work. */
1935         cq_size = *kgnilnd_tunables.kgn_credits * 2 * 3;
1936
1937         rrc = kgnilnd_cdm_create(dev->gnd_id, *kgnilnd_tunables.kgn_ptag,
1938                                  *kgnilnd_tunables.kgn_pkey, 0,
1939                                  &dev->gnd_domain);
1940         if (rrc != GNI_RC_SUCCESS) {
1941                 CERROR("Can't create CDM %d (%d)\n", dev->gnd_id, rrc);
1942                 GOTO(failed, rc = -ENODEV);
1943         }
1944
1945         rrc = kgnilnd_cdm_attach(dev->gnd_domain, dev->gnd_id,
1946                                  &dev->gnd_host_id, &dev->gnd_handle);
1947         if (rrc != GNI_RC_SUCCESS) {
1948                 CERROR("Can't attach CDM to device %d (%d)\n",
1949                         dev->gnd_id, rrc);
1950                 GOTO(failed, rc = -ENODEV);
1951         }
1952
1953         /* a bit gross, but not much we can do - Aries Sim doesn't have
1954          * hardcoded NIC/NID that we can use */
1955         rc = kgnilnd_setup_nic_translation(dev->gnd_host_id);
1956         if (rc != 0)
1957                 GOTO(failed, rc = -ENODEV);
1958
1959         /* only dev 0 gets the errors - no need to reset the stack twice
1960          * - this works because we have a single PTAG, if we had more
1961          * then we'd need to have multiple handlers */
1962         if (dev->gnd_id == 0) {
1963                 rrc = kgnilnd_subscribe_errors(dev->gnd_handle,
1964                                                 GNI_ERRMASK_CRITICAL |
1965                                                 GNI_ERRMASK_UNKNOWN_TRANSACTION,
1966                                               0, NULL, kgnilnd_critical_error,
1967                                               &dev->gnd_err_handle);
1968                 if (rrc != GNI_RC_SUCCESS) {
1969                         CERROR("Can't subscribe for errors on device %d: rc %d\n",
1970                                 dev->gnd_id, rrc);
1971                         GOTO(failed, rc = -ENODEV);
1972                 }
1973
1974                 rc = kgnilnd_set_quiesce_callback(dev->gnd_handle,
1975                                                   kgnilnd_quiesce_end_callback);
1976                 if (rc != GNI_RC_SUCCESS) {
1977                         CERROR("Can't subscribe for quiesce callback on device %d: rc %d\n",
1978                                 dev->gnd_id, rrc);
1979                         GOTO(failed, rc = -ENODEV);
1980                 }
1981         }
1982
1983         rc = kgnilnd_nicaddr_to_nid(dev->gnd_host_id, &dev->gnd_nid);
1984         if (rc < 0) {
1985                 /* log messages during startup */
1986                 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
1987                         CERROR("couldn't translate host_id 0x%x to nid. rc %d\n",
1988                                 dev->gnd_host_id, rc);
1989                 }
1990                 GOTO(failed, rc = -ESRCH);
1991         }
1992         CDEBUG(D_NET, "NIC %x -> NID %d\n", dev->gnd_host_id, dev->gnd_nid);
1993
1994         rrc = kgnilnd_cq_create(dev->gnd_handle, *kgnilnd_tunables.kgn_credits,
1995                                 0, kgnilnd_device_callback,
1996                                 dev->gnd_id, &dev->gnd_snd_rdma_cqh);
1997         if (rrc != GNI_RC_SUCCESS) {
1998                 CERROR("Can't create rdma send cq size %u for device "
1999                        "%d (%d)\n", cq_size, dev->gnd_id, rrc);
2000                 GOTO(failed, rc = -EINVAL);
2001         }
2002
2003         rrc = kgnilnd_cq_create(dev->gnd_handle, cq_size,
2004                         0, kgnilnd_device_callback, dev->gnd_id,
2005                         &dev->gnd_snd_fma_cqh);
2006         if (rrc != GNI_RC_SUCCESS) {
2007                 CERROR("Can't create fma send cq size %u for device %d (%d)\n",
2008                        cq_size, dev->gnd_id, rrc);
2009                 GOTO(failed, rc = -EINVAL);
2010         }
2011
2012         /* This one we size differently - overflows are possible and it needs to be
2013          * sized based on machine size */
2014         rrc = kgnilnd_cq_create(dev->gnd_handle,
2015                         *kgnilnd_tunables.kgn_fma_cq_size,
2016                         0, kgnilnd_device_callback, dev->gnd_id,
2017                         &dev->gnd_rcv_fma_cqh);
2018         if (rrc != GNI_RC_SUCCESS) {
2019                 CERROR("Can't create fma cq size %d for device %d (%d)\n",
2020                        *kgnilnd_tunables.kgn_fma_cq_size, dev->gnd_id, rrc);
2021                 GOTO(failed, rc = -EINVAL);
2022         }
2023
2024         rrc = kgnilnd_register_smdd_buf(dev);
2025         if (rrc != GNI_RC_SUCCESS) {
2026                 GOTO(failed, rc = -EINVAL);
2027         }
2028
2029         RETURN(0);
2030
2031 failed:
2032         kgnilnd_dev_fini(dev);
2033         RETURN(rc);
2034 }
2035
2036 void
2037 kgnilnd_dev_fini(kgn_device_t *dev)
2038 {
2039         gni_return_t rrc;
2040         ENTRY;
2041
2042         /* At quiesce or rest time, need to loop through and clear gnd_ready_conns ?*/
2043         LASSERTF(list_empty(&dev->gnd_ready_conns) &&
2044                  list_empty(&dev->gnd_map_tx) &&
2045                  list_empty(&dev->gnd_rdmaq) &&
2046                  list_empty(&dev->gnd_delay_conns),
2047                  "dev 0x%p ready_conns %d@0x%p delay_conns %d@0x%p" 
2048                  "map_tx %d@0x%p rdmaq %d@0x%p\n",
2049                  dev, kgnilnd_count_list(&dev->gnd_ready_conns), &dev->gnd_ready_conns,
2050                  kgnilnd_count_list(&dev->gnd_delay_conns), &dev->gnd_delay_conns,
2051                  kgnilnd_count_list(&dev->gnd_map_tx), &dev->gnd_map_tx,
2052                  kgnilnd_count_list(&dev->gnd_rdmaq), &dev->gnd_rdmaq);
2053
2054         /* These should follow from tearing down all connections */
2055         LASSERTF(dev->gnd_map_nphys == 0 && dev->gnd_map_physnop == 0,
2056                 "%d physical mappings of %d pages still mapped\n",
2057                  dev->gnd_map_nphys, dev->gnd_map_physnop);
2058
2059         LASSERTF(dev->gnd_map_nvirt == 0 && dev->gnd_map_virtnob == 0,
2060                 "%d virtual mappings of %llu bytes still mapped\n",
2061                  dev->gnd_map_nvirt, dev->gnd_map_virtnob);
2062
2063         LASSERTF(atomic_read(&dev->gnd_n_mdd) == 0 &&
2064                  atomic_read(&dev->gnd_n_mdd_held) == 0 &&
2065                  atomic64_read(&dev->gnd_nbytes_map) == 0,
2066                 "%d SMSG mappings of %ld bytes still mapped or held %d\n",
2067                  atomic_read(&dev->gnd_n_mdd),
2068                  atomic64_read(&dev->gnd_nbytes_map), atomic_read(&dev->gnd_n_mdd_held));
2069
2070         LASSERT(list_empty(&dev->gnd_map_list));
2071
2072         /* What other assertions needed to ensure all connections torn down ? */
2073
2074         /* check all counters == 0 (EP, MDD, etc) */
2075
2076         /* if we are resetting due to quiese (stack reset), don't check
2077          * thread states */
2078         LASSERTF(kgnilnd_data.kgn_quiesce_trigger ||
2079                 atomic_read(&kgnilnd_data.kgn_nthreads) == 0,
2080                 "tried to shutdown with threads active\n");
2081
2082         if (dev->gnd_smdd_hold_buf) {
2083                 rrc = kgnilnd_deregister_smdd_buf(dev);
2084                 LASSERTF(rrc == GNI_RC_SUCCESS,
2085                         "bad rc from deregistion of sMDD buffer: %d\n", rrc);
2086                 dev->gnd_smdd_hold_buf = NULL;
2087         }
2088
2089         if (dev->gnd_rcv_fma_cqh) {
2090                 rrc = kgnilnd_cq_destroy(dev->gnd_rcv_fma_cqh);
2091                 LASSERTF(rrc == GNI_RC_SUCCESS,
2092                         "bad rc from gni_cq_destroy on rcv_fma_cqh: %d\n", rrc);
2093                 dev->gnd_rcv_fma_cqh = NULL;
2094         }
2095
2096         if (dev->gnd_snd_rdma_cqh) {
2097                 rrc = kgnilnd_cq_destroy(dev->gnd_snd_rdma_cqh);
2098                 LASSERTF(rrc == GNI_RC_SUCCESS,
2099                         "bad rc from gni_cq_destroy on send_rdma_cqh: %d\n", rrc);
2100                 dev->gnd_snd_rdma_cqh = NULL;
2101         }
2102
2103         if (dev->gnd_snd_fma_cqh) {
2104                 rrc = kgnilnd_cq_destroy(dev->gnd_snd_fma_cqh);
2105                 LASSERTF(rrc == GNI_RC_SUCCESS,
2106                         "bad rc from gni_cq_destroy on snd_fma_cqh: %d\n", rrc);
2107                 dev->gnd_snd_fma_cqh = NULL;
2108         }
2109
2110         if (dev->gnd_err_handle) {
2111                 rrc = kgnilnd_release_errors(dev->gnd_err_handle);
2112                 LASSERTF(rrc == GNI_RC_SUCCESS,
2113                         "bad rc from gni_release_errors: %d\n", rrc);
2114                 dev->gnd_err_handle = NULL;
2115         }
2116
2117         if (dev->gnd_domain) {
2118                 rrc = kgnilnd_cdm_destroy(dev->gnd_domain);
2119                 LASSERTF(rrc == GNI_RC_SUCCESS,
2120                         "bad rc from gni_cdm_destroy: %d\n", rrc);
2121                 dev->gnd_domain = NULL;
2122         }
2123
2124         EXIT;
2125 }
2126
2127 int kgnilnd_base_startup(void)
2128 {
2129         struct timeval       tv;
2130         int                  pkmem = atomic_read(&libcfs_kmemory);
2131         int                  rc;
2132         int                  i;
2133         kgn_device_t        *dev;
2134         struct task_struct  *thrd;
2135
2136 #if defined(CONFIG_CRAY_XT) && !defined(CONFIG_CRAY_COMPUTE)
2137         /* limit how much memory can be allocated for fma blocks in
2138          * instances where many nodes need to reconnects at the same time */
2139         struct sysinfo si;
2140         si_meminfo(&si);
2141         kgnilnd_data.free_pages_limit = si.totalram/4;
2142 #endif
2143
2144         ENTRY;
2145
2146         LASSERTF(kgnilnd_data.kgn_init == GNILND_INIT_NOTHING,
2147                 "init %d\n", kgnilnd_data.kgn_init);
2148
2149         /* zero pointers, flags etc */
2150         memset(&kgnilnd_data, 0, sizeof(kgnilnd_data));
2151         kgnilnd_check_kgni_version();
2152
2153         /* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and
2154          * a unique (for all time) connstamp so we can uniquely identify
2155          * the sender.  The connstamp is an incrementing counter
2156          * initialised with seconds + microseconds at startup time.  So we
2157          * rely on NOT creating connections more frequently on average than
2158          * 1MHz to ensure we don't use old connstamps when we reboot. */
2159         do_gettimeofday(&tv);
2160         kgnilnd_data.kgn_connstamp =
2161                  kgnilnd_data.kgn_peerstamp =
2162                         (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
2163
2164         init_rwsem(&kgnilnd_data.kgn_net_rw_sem);
2165
2166         for (i = 0; i < GNILND_MAXDEVS; i++) {
2167                 kgn_device_t  *dev = &kgnilnd_data.kgn_devices[i];
2168
2169                 dev->gnd_id = i;
2170                 INIT_LIST_HEAD(&dev->gnd_ready_conns);
2171                 INIT_LIST_HEAD(&dev->gnd_delay_conns);
2172                 INIT_LIST_HEAD(&dev->gnd_map_tx);
2173                 INIT_LIST_HEAD(&dev->gnd_fma_buffs);
2174                 mutex_init(&dev->gnd_cq_mutex);
2175                 mutex_init(&dev->gnd_fmablk_mutex);
2176                 spin_lock_init(&dev->gnd_fmablk_lock);
2177                 init_waitqueue_head(&dev->gnd_waitq);
2178                 init_waitqueue_head(&dev->gnd_dgram_waitq);
2179                 init_waitqueue_head(&dev->gnd_dgping_waitq);
2180                 spin_lock_init(&dev->gnd_lock);
2181                 INIT_LIST_HEAD(&dev->gnd_map_list);
2182                 spin_lock_init(&dev->gnd_map_lock);
2183                 atomic_set(&dev->gnd_nfmablk, 0);
2184                 atomic_set(&dev->gnd_fmablk_vers, 1);
2185                 atomic_set(&dev->gnd_neps, 0);
2186                 atomic_set(&dev->gnd_canceled_dgrams, 0);
2187                 INIT_LIST_HEAD(&dev->gnd_connd_peers);
2188                 spin_lock_init(&dev->gnd_connd_lock);
2189                 spin_lock_init(&dev->gnd_dgram_lock);
2190                 spin_lock_init(&dev->gnd_rdmaq_lock);
2191                 INIT_LIST_HEAD(&dev->gnd_rdmaq);
2192                 init_rwsem(&dev->gnd_conn_sem);
2193
2194                 /* alloc & setup nid based dgram table */
2195                 LIBCFS_ALLOC(dev->gnd_dgrams,
2196                             sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2197
2198                 if (dev->gnd_dgrams == NULL)
2199                         GOTO(failed, rc = -ENOMEM);
2200
2201                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2202                         INIT_LIST_HEAD(&dev->gnd_dgrams[i]);
2203                 }
2204                 atomic_set(&dev->gnd_ndgrams, 0);
2205                 atomic_set(&dev->gnd_nwcdgrams, 0);
2206                 /* setup timer for RDMAQ processing */
2207                 setup_timer(&dev->gnd_rdmaq_timer, kgnilnd_schedule_device_timer,
2208                             (unsigned long)dev);
2209
2210                 /* setup timer for mapping processing */
2211                 setup_timer(&dev->gnd_map_timer, kgnilnd_schedule_device_timer,
2212                             (unsigned long)dev);
2213
2214         }
2215
2216         /* CQID 0 isn't allowed, set to MAX_MSG_ID - 1 to check for conflicts early */
2217         kgnilnd_data.kgn_next_cqid = GNILND_MAX_MSG_ID - 1;
2218         kgnilnd_data.kgn_new_min_timeout = *kgnilnd_tunables.kgn_timeout;
2219         init_waitqueue_head(&kgnilnd_data.kgn_reaper_waitq);
2220         init_waitqueue_head(&kgnilnd_data.kgn_ruhroh_waitq);
2221         spin_lock_init(&kgnilnd_data.kgn_reaper_lock);
2222
2223         mutex_init(&kgnilnd_data.kgn_quiesce_mutex);
2224         atomic_set(&kgnilnd_data.kgn_nquiesce, 0);
2225         atomic_set(&kgnilnd_data.kgn_npending_conns, 0);
2226         atomic_set(&kgnilnd_data.kgn_npending_unlink, 0);
2227         atomic_set(&kgnilnd_data.kgn_npending_detach, 0);
2228         atomic_set(&kgnilnd_data.kgn_rev_offset, 0);
2229         atomic_set(&kgnilnd_data.kgn_rev_length, 0);
2230         atomic_set(&kgnilnd_data.kgn_rev_copy_buff, 0);
2231
2232         /* OK to call kgnilnd_api_shutdown() to cleanup now */
2233         kgnilnd_data.kgn_init = GNILND_INIT_DATA;
2234         if (!try_module_get(THIS_MODULE))
2235                 GOTO(failed, rc = -ENOENT);
2236
2237         rwlock_init(&kgnilnd_data.kgn_peer_conn_lock);
2238
2239         LIBCFS_ALLOC(kgnilnd_data.kgn_peers,
2240                     sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2241
2242         if (kgnilnd_data.kgn_peers == NULL)
2243                 GOTO(failed, rc = -ENOMEM);
2244
2245         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2246                 INIT_LIST_HEAD(&kgnilnd_data.kgn_peers[i]);
2247         }
2248
2249         LIBCFS_ALLOC(kgnilnd_data.kgn_conns,
2250                     sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2251
2252         if (kgnilnd_data.kgn_conns == NULL)
2253                 GOTO(failed, rc = -ENOMEM);
2254
2255         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2256                 INIT_LIST_HEAD(&kgnilnd_data.kgn_conns[i]);
2257         }
2258
2259         LIBCFS_ALLOC(kgnilnd_data.kgn_nets,
2260                     sizeof(struct list_head) * *kgnilnd_tunables.kgn_net_hash_size);
2261
2262         if (kgnilnd_data.kgn_nets == NULL)
2263                 GOTO(failed, rc = -ENOMEM);
2264
2265         for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
2266                 INIT_LIST_HEAD(&kgnilnd_data.kgn_nets[i]);
2267         }
2268
2269         kgnilnd_data.kgn_mbox_cache =
2270                 kmem_cache_create("kgn_mbox_block", GNILND_MBOX_SIZE, 0,
2271                                   SLAB_HWCACHE_ALIGN, NULL);
2272         if (kgnilnd_data.kgn_mbox_cache == NULL) {
2273                 CERROR("Can't create slab for physical mbox blocks\n");
2274                 GOTO(failed, rc = -ENOMEM);
2275         }
2276
2277         kgnilnd_data.kgn_rx_cache =
2278                 kmem_cache_create("kgn_rx_t", sizeof(kgn_rx_t), 0, 0, NULL);
2279         if (kgnilnd_data.kgn_rx_cache == NULL) {
2280                 CERROR("Can't create slab for kgn_rx_t descriptors\n");
2281                 GOTO(failed, rc = -ENOMEM);
2282         }
2283
2284         kgnilnd_data.kgn_tx_cache =
2285                 kmem_cache_create("kgn_tx_t", sizeof(kgn_tx_t), 0, 0, NULL);
2286         if (kgnilnd_data.kgn_tx_cache == NULL) {
2287                 CERROR("Can't create slab for kgn_tx_t\n");
2288                 GOTO(failed, rc = -ENOMEM);
2289         }
2290
2291         kgnilnd_data.kgn_tx_phys_cache =
2292                 kmem_cache_create("kgn_tx_phys",
2293                                    LNET_MAX_IOV * sizeof(gni_mem_segment_t),
2294                                    0, 0, NULL);
2295         if (kgnilnd_data.kgn_tx_phys_cache == NULL) {
2296                 CERROR("Can't create slab for kgn_tx_phys\n");
2297                 GOTO(failed, rc = -ENOMEM);
2298         }
2299
2300         kgnilnd_data.kgn_dgram_cache =
2301                 kmem_cache_create("kgn_dgram_t", sizeof(kgn_dgram_t), 0, 0, NULL);
2302         if (kgnilnd_data.kgn_dgram_cache == NULL) {
2303                 CERROR("Can't create slab for outgoing datagrams\n");
2304                 GOTO(failed, rc = -ENOMEM);
2305         }
2306
2307         /* allocate a MAX_IOV array of page pointers for each cpu */
2308         kgnilnd_data.kgn_cksum_map_pages = kmalloc(num_possible_cpus() * sizeof (struct page *),
2309                                                    GFP_KERNEL);
2310         if (kgnilnd_data.kgn_cksum_map_pages == NULL) {
2311                 CERROR("Can't allocate vmap cksum pages\n");
2312                 GOTO(failed, rc = -ENOMEM);
2313         }
2314         kgnilnd_data.kgn_cksum_npages = num_possible_cpus();
2315         memset(kgnilnd_data.kgn_cksum_map_pages, 0,
2316                 kgnilnd_data.kgn_cksum_npages * sizeof (struct page *));
2317
2318         for (i = 0; i < kgnilnd_data.kgn_cksum_npages; i++) {
2319                 kgnilnd_data.kgn_cksum_map_pages[i] = kmalloc(LNET_MAX_IOV * sizeof (struct page *),
2320                                                               GFP_KERNEL);
2321                 if (kgnilnd_data.kgn_cksum_map_pages[i] == NULL) {
2322                         CERROR("Can't allocate vmap cksum pages for cpu %d\n", i);
2323                         GOTO(failed, rc = -ENOMEM);
2324                 }
2325         }
2326
2327         LASSERT(kgnilnd_data.kgn_ndevs == 0);
2328
2329         /* Use all available GNI devices */
2330         for (i = 0; i < GNILND_MAXDEVS; i++) {
2331                 dev = &kgnilnd_data.kgn_devices[kgnilnd_data.kgn_ndevs];
2332
2333                 rc = kgnilnd_dev_init(dev);
2334                 if (rc == 0) {
2335                         /* Increment here so base_shutdown cleans it up */
2336                         kgnilnd_data.kgn_ndevs++;
2337
2338                         rc = kgnilnd_allocate_phys_fmablk(dev);
2339                         if (rc)
2340                                 GOTO(failed, rc);
2341                 }
2342         }
2343
2344         if (kgnilnd_data.kgn_ndevs == 0) {
2345                 CERROR("Can't initialise any GNI devices\n");
2346                 GOTO(failed, rc = -ENODEV);
2347         }
2348
2349         rc = kgnilnd_thread_start(kgnilnd_reaper, NULL, "kgnilnd_rpr", 0);
2350         if (rc != 0) {
2351                 CERROR("Can't spawn gnilnd reaper: %d\n", rc);
2352                 GOTO(failed, rc);
2353         }
2354
2355         rc = kgnilnd_start_rca_thread();
2356         if (rc != 0) {
2357                 CERROR("Can't spawn gnilnd rca: %d\n", rc);
2358                 GOTO(failed, rc);
2359         }
2360
2361         /*
2362          * Start ruhroh thread.  We can't use kgnilnd_thread_start() because
2363          * we don't want this thread included in kgnilnd_data.kgn_nthreads
2364          * count.  This thread controls quiesce, so it mustn't
2365          * quiesce itself.
2366          */
2367         thrd = kthread_run(kgnilnd_ruhroh_thread, NULL, "%s_%02d", "kgnilnd_rr", 0);
2368         if (IS_ERR(thrd)) {
2369                 rc = PTR_ERR(thrd);
2370                 CERROR("Can't spawn gnilnd ruhroh thread: %d\n", rc);
2371                 GOTO(failed, rc);
2372         }
2373
2374         /* threads will load balance across devs as they are available */
2375         if (*kgnilnd_tunables.kgn_thread_affinity) {
2376                 rc = kgnilnd_start_sd_threads();
2377                 if (rc != 0)
2378                         GOTO(failed, rc);
2379         } else {
2380                 for (i = 0; i < *kgnilnd_tunables.kgn_sched_threads; i++) {
2381                         rc = kgnilnd_thread_start(kgnilnd_scheduler,
2382                                                   (void *)((long)i),
2383                                                   "kgnilnd_sd", i);
2384                         if (rc != 0) {
2385                                 CERROR("Can't spawn gnilnd scheduler[%d]: %d\n",
2386                                        i, rc);
2387                                 GOTO(failed, rc);
2388                         }
2389                 }
2390         }
2391
2392         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2393                 dev = &kgnilnd_data.kgn_devices[i];
2394                 rc = kgnilnd_thread_start(kgnilnd_dgram_mover, dev,
2395                                           "kgnilnd_dg", dev->gnd_id);
2396                 if (rc != 0) {
2397                         CERROR("Can't spawn gnilnd dgram_mover[%d]: %d\n",
2398                                dev->gnd_id, rc);
2399                         GOTO(failed, rc);
2400                 }
2401
2402                 rc = kgnilnd_thread_start(kgnilnd_dgram_waitq, dev,
2403                                           "kgnilnd_dgn", dev->gnd_id);
2404                 if (rc != 0) {
2405                         CERROR("Can't spawn gnilnd dgram_waitq[%d]: %d\n",
2406                                 dev->gnd_id, rc);
2407                         GOTO(failed, rc);
2408                 }
2409
2410                 rc = kgnilnd_setup_wildcard_dgram(dev);
2411
2412                 if (rc != 0) {
2413                         CERROR("Can't create wildcard dgrams[%d]: %d\n",
2414                                 dev->gnd_id, rc);
2415                         GOTO(failed, rc);
2416                 }
2417         }
2418
2419         /* flag everything initialised */
2420         kgnilnd_data.kgn_init = GNILND_INIT_ALL;
2421         /*****************************************************/
2422
2423         CDEBUG(D_MALLOC, "initial kmem %d\n", pkmem);
2424         RETURN(0);
2425
2426 failed:
2427         kgnilnd_base_shutdown();
2428         kgnilnd_data.kgn_init = GNILND_INIT_NOTHING;
2429         RETURN(rc);
2430 }
2431
2432 void
2433 kgnilnd_base_shutdown(void)
2434 {
2435         int                     i, j;
2436         ENTRY;
2437
2438         while (CFS_FAIL_TIMEOUT(CFS_FAIL_GNI_PAUSE_SHUTDOWN, 1)) {};
2439
2440         kgnilnd_data.kgn_wc_kill = 1;
2441
2442         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2443                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2444                 kgnilnd_cancel_wc_dgrams(dev);
2445                 kgnilnd_cancel_dgrams(dev);
2446                 kgnilnd_del_conn_or_peer(NULL, LNET_NID_ANY, GNILND_DEL_PEER, -ESHUTDOWN);
2447                 kgnilnd_wait_for_canceled_dgrams(dev);
2448         }
2449
2450         /* We need to verify there are no conns left before we let the threads
2451          * shut down otherwise we could clean up the peers but still have
2452          * some outstanding conns due to orphaned datagram conns that are
2453          * being cleaned up.
2454          */
2455         i = 2;
2456         while (atomic_read(&kgnilnd_data.kgn_nconns) != 0) {
2457                 i++;
2458
2459                 for(j = 0; j < kgnilnd_data.kgn_ndevs; ++j) {
2460                         kgn_device_t *dev = &kgnilnd_data.kgn_devices[j];
2461                         kgnilnd_schedule_device(dev);
2462                 }
2463
2464                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2465                         "Waiting for conns to be cleaned up %d\n",atomic_read(&kgnilnd_data.kgn_nconns));
2466                 schedule_timeout_uninterruptible(cfs_time_seconds(1));
2467         }
2468         /* Peer state all cleaned up BEFORE setting shutdown, so threads don't
2469          * have to worry about shutdown races.  NB connections may be created
2470          * while there are still active connds, but these will be temporary
2471          * since peer creation always fails after the listener has started to
2472          * shut down.
2473          * all peers should have been cleared out on the nets */
2474         LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
2475                 "peers left %d\n", atomic_read(&kgnilnd_data.kgn_npeers));
2476
2477         /* Wait for the ruhroh thread to shut down. */
2478         kgnilnd_data.kgn_ruhroh_shutdown = 1;
2479         wake_up(&kgnilnd_data.kgn_ruhroh_waitq);
2480         i = 2;
2481         while (kgnilnd_data.kgn_ruhroh_running != 0) {
2482                 i++;
2483                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2484                        "Waiting for ruhroh thread to terminate\n");
2485                 schedule_timeout_uninterruptible(cfs_time_seconds(1));
2486         }
2487
2488        /* Flag threads to terminate */
2489         kgnilnd_data.kgn_shutdown = 1;
2490
2491         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2492                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2493
2494                 /* should clear all the MDDs */
2495                 kgnilnd_unmap_fma_blocks(dev);
2496
2497                 kgnilnd_schedule_device(dev);
2498                 wake_up_all(&dev->gnd_dgram_waitq);
2499                 wake_up_all(&dev->gnd_dgping_waitq);
2500                 LASSERT(list_empty(&dev->gnd_connd_peers));
2501         }
2502
2503         spin_lock(&kgnilnd_data.kgn_reaper_lock);
2504         wake_up_all(&kgnilnd_data.kgn_reaper_waitq);
2505         spin_unlock(&kgnilnd_data.kgn_reaper_lock);
2506
2507         if (atomic_read(&kgnilnd_data.kgn_nthreads))
2508                 kgnilnd_wakeup_rca_thread();
2509
2510         /* Wait for threads to exit */
2511         i = 2;
2512         while (atomic_read(&kgnilnd_data.kgn_nthreads) != 0) {
2513                 i++;
2514                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2515                        "Waiting for %d threads to terminate\n",
2516                        atomic_read(&kgnilnd_data.kgn_nthreads));
2517                 schedule_timeout_uninterruptible(cfs_time_seconds(1));
2518         }
2519
2520         LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
2521                 "peers left %d\n", atomic_read(&kgnilnd_data.kgn_npeers));
2522
2523         if (kgnilnd_data.kgn_peers != NULL) {
2524                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2525                         LASSERT(list_empty(&kgnilnd_data.kgn_peers[i]));
2526
2527                 LIBCFS_FREE(kgnilnd_data.kgn_peers,
2528                             sizeof (struct list_head) *
2529                             *kgnilnd_tunables.kgn_peer_hash_size);
2530         }
2531
2532         down_write(&kgnilnd_data.kgn_net_rw_sem);
2533         if (kgnilnd_data.kgn_nets != NULL) {
2534                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++)
2535                         LASSERT(list_empty(&kgnilnd_data.kgn_nets[i]));
2536
2537                 LIBCFS_FREE(kgnilnd_data.kgn_nets,
2538                             sizeof (struct list_head) *
2539                             *kgnilnd_tunables.kgn_net_hash_size);
2540         }
2541         up_write(&kgnilnd_data.kgn_net_rw_sem);
2542
2543         LASSERTF(atomic_read(&kgnilnd_data.kgn_nconns) == 0,
2544                 "conns left %d\n", atomic_read(&kgnilnd_data.kgn_nconns));
2545
2546         if (kgnilnd_data.kgn_conns != NULL) {
2547                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2548                         LASSERT(list_empty(&kgnilnd_data.kgn_conns[i]));
2549
2550                 LIBCFS_FREE(kgnilnd_data.kgn_conns,
2551                             sizeof (struct list_head) *
2552                             *kgnilnd_tunables.kgn_peer_hash_size);
2553         }
2554
2555         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2556                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2557                 kgnilnd_dev_fini(dev);
2558
2559                 LASSERTF(atomic_read(&dev->gnd_ndgrams) == 0,
2560                         "dgrams left %d\n", atomic_read(&dev->gnd_ndgrams));
2561
2562                 if (dev->gnd_dgrams != NULL) {
2563                         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2564                                 LASSERT(list_empty(&dev->gnd_dgrams[i]));
2565
2566                         LIBCFS_FREE(dev->gnd_dgrams,
2567                                     sizeof (struct list_head) *
2568                                     *kgnilnd_tunables.kgn_peer_hash_size);
2569                 }
2570
2571                 kgnilnd_free_phys_fmablk(dev);
2572         }
2573
2574         if (kgnilnd_data.kgn_mbox_cache != NULL)
2575                 kmem_cache_destroy(kgnilnd_data.kgn_mbox_cache);
2576
2577         if (kgnilnd_data.kgn_rx_cache != NULL)
2578                 kmem_cache_destroy(kgnilnd_data.kgn_rx_cache);
2579
2580         if (kgnilnd_data.kgn_tx_cache != NULL)
2581                 kmem_cache_destroy(kgnilnd_data.kgn_tx_cache);
2582
2583         if (kgnilnd_data.kgn_tx_phys_cache != NULL)
2584                 kmem_cache_destroy(kgnilnd_data.kgn_tx_phys_cache);
2585
2586         if (kgnilnd_data.kgn_dgram_cache != NULL)
2587                 kmem_cache_destroy(kgnilnd_data.kgn_dgram_cache);
2588
2589         if (kgnilnd_data.kgn_cksum_map_pages != NULL) {
2590                 for (i = 0; i < kgnilnd_data.kgn_cksum_npages; i++) {
2591                         if (kgnilnd_data.kgn_cksum_map_pages[i] != NULL) {
2592                                 kfree(kgnilnd_data.kgn_cksum_map_pages[i]);
2593                         }
2594                 }
2595                 kfree(kgnilnd_data.kgn_cksum_map_pages);
2596         }
2597
2598         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2599                atomic_read(&libcfs_kmemory));
2600
2601         kgnilnd_data.kgn_init = GNILND_INIT_NOTHING;
2602         module_put(THIS_MODULE);
2603
2604         EXIT;
2605 }
2606
2607 int
2608 kgnilnd_startup(struct lnet_ni *ni)
2609 {
2610         int               rc, devno;
2611         kgn_net_t        *net;
2612         ENTRY;
2613
2614         LASSERTF(ni->ni_net->net_lnd == &the_kgnilnd,
2615                 "bad LND 0x%p != the_kgnilnd @ 0x%p\n",
2616                 ni->ni_net->net_lnd, &the_kgnilnd);
2617
2618         if (kgnilnd_data.kgn_init == GNILND_INIT_NOTHING) {
2619                 rc = kgnilnd_base_startup();
2620                 if (rc != 0)
2621                         RETURN(rc);
2622         }
2623
2624         /* Serialize with shutdown. */
2625         mutex_lock(&kgnilnd_data.kgn_quiesce_mutex);
2626
2627         LIBCFS_ALLOC(net, sizeof(*net));
2628         if (net == NULL) {
2629                 CERROR("could not allocate net for new interface instance\n");
2630                 /* no need to cleanup the CDM... */
2631                 GOTO(failed, rc = -ENOMEM);
2632         }
2633         INIT_LIST_HEAD(&net->gnn_list);
2634         ni->ni_data = net;
2635         net->gnn_ni = ni;
2636         if (!ni->ni_net->net_tunables_set) {
2637                 ni->ni_net->net_tunables.lct_max_tx_credits =
2638                         *kgnilnd_tunables.kgn_credits;
2639                 ni->ni_net->net_tunables.lct_peer_tx_credits =
2640                         *kgnilnd_tunables.kgn_peer_credits;
2641         }
2642
2643         if (*kgnilnd_tunables.kgn_peer_health) {
2644                 int     fudge;
2645                 int     timeout;
2646                 /* give this a bit of leeway - we don't have a hard timeout
2647                  * as we only check timeouts periodically - see comment in kgnilnd_reaper */
2648                 fudge = (GNILND_TO2KA(*kgnilnd_tunables.kgn_timeout) / GNILND_REAPER_NCHECKS);
2649                 timeout = *kgnilnd_tunables.kgn_timeout + fudge;
2650
2651                 if (*kgnilnd_tunables.kgn_peer_timeout >= timeout) {
2652                         ni->ni_net->net_tunables.lct_peer_timeout =
2653                                  *kgnilnd_tunables.kgn_peer_timeout;
2654                 } else if (*kgnilnd_tunables.kgn_peer_timeout > -1) {
2655                         LCONSOLE_ERROR("Peer_timeout is set to %d but needs to be >= %d\n",
2656                                         *kgnilnd_tunables.kgn_peer_timeout,
2657                                         timeout);
2658                         ni->ni_data = NULL;
2659                         LIBCFS_FREE(net, sizeof(*net));
2660                         GOTO(failed, rc = -EINVAL);
2661                 } else
2662                         ni->ni_net->net_tunables.lct_peer_timeout = timeout;
2663
2664                 LCONSOLE_INFO("Enabling LNet peer health for gnilnd, timeout %ds\n",
2665                               ni->ni_net->net_tunables.lct_peer_timeout);
2666         }
2667
2668         atomic_set(&net->gnn_refcount, 1);
2669
2670         /* if we have multiple devices, spread the nets around */
2671         net->gnn_netnum = LNET_NETNUM(LNET_NIDNET(ni->ni_nid));
2672
2673         devno = LNET_NIDNET(ni->ni_nid) % GNILND_MAXDEVS;
2674         net->gnn_dev = &kgnilnd_data.kgn_devices[devno];
2675
2676         /* allocate a 'dummy' cdm for datagram use. We can only have a single
2677          * datagram between a nid:inst_id and nid2:inst_id. The fake cdm
2678          * give us additional inst_id to use, allowing the datagrams to flow
2679          * like rivers of honey and beer */
2680
2681         /* the instance id for the cdm is the NETNUM offset by MAXDEVS -
2682          * ensuring we'll have a unique id */
2683
2684
2685         ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), net->gnn_dev->gnd_nid);
2686         CDEBUG(D_NET, "adding net %p nid=%s on dev %d \n",
2687                 net, libcfs_nid2str(ni->ni_nid), net->gnn_dev->gnd_id);
2688         /* until the gnn_list is set, we need to cleanup ourselves as
2689          * kgnilnd_shutdown is just gonna get confused */
2690
2691         down_write(&kgnilnd_data.kgn_net_rw_sem);
2692         list_add_tail(&net->gnn_list, kgnilnd_netnum2netlist(net->gnn_netnum));
2693         up_write(&kgnilnd_data.kgn_net_rw_sem);
2694
2695         /* we need a separate thread to call probe_wait_by_id until
2696          * we get a function callback notifier from kgni */
2697         mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2698         RETURN(0);
2699  failed:
2700         mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2701         kgnilnd_shutdown(ni);
2702         RETURN(rc);
2703 }
2704
2705 void
2706 kgnilnd_shutdown(struct lnet_ni *ni)
2707 {
2708         kgn_net_t     *net = ni->ni_data;
2709         int           i;
2710         int           rc;
2711         ENTRY;
2712
2713         CFS_RACE(CFS_FAIL_GNI_SR_DOWN_RACE);
2714
2715         LASSERTF(kgnilnd_data.kgn_init == GNILND_INIT_ALL,
2716                 "init %d\n", kgnilnd_data.kgn_init);
2717
2718         /* Serialize with startup. */
2719         mutex_lock(&kgnilnd_data.kgn_quiesce_mutex);
2720         CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
2721                atomic_read(&libcfs_kmemory));
2722
2723         if (net == NULL) {
2724                 CERROR("got NULL net for ni %p\n", ni);
2725                 GOTO(out, rc = -EINVAL);
2726         }
2727
2728         LASSERTF(ni == net->gnn_ni,
2729                 "ni %p gnn_ni %p\n", net, net->gnn_ni);
2730
2731         ni->ni_data = NULL;
2732
2733         LASSERT(!net->gnn_shutdown);
2734         LASSERTF(atomic_read(&net->gnn_refcount) != 0,
2735                 "net %p refcount %d\n",
2736                  net, atomic_read(&net->gnn_refcount));
2737
2738         if (!list_empty(&net->gnn_list)) {
2739                 /* serialize with peer creation */
2740                 down_write(&kgnilnd_data.kgn_net_rw_sem);
2741                 net->gnn_shutdown = 1;
2742                 up_write(&kgnilnd_data.kgn_net_rw_sem);
2743
2744                 kgnilnd_cancel_net_dgrams(net);
2745
2746                 kgnilnd_del_conn_or_peer(net, LNET_NID_ANY, GNILND_DEL_PEER, -ESHUTDOWN);
2747
2748                 /* if we are quiesced, need to wake up - we need those threads
2749                  * alive to release peers, etc */
2750                 if (GNILND_IS_QUIESCED) {
2751                         set_mb(kgnilnd_data.kgn_quiesce_trigger, GNILND_QUIESCE_IDLE);
2752                         kgnilnd_quiesce_wait("shutdown");
2753                 }
2754
2755                 kgnilnd_wait_for_canceled_dgrams(net->gnn_dev);
2756
2757                 /* We wait until the nets ref's are 1, we will release final ref which is ours
2758                  * this allows us to make sure everything else is done before we free the
2759                  * net.
2760                  */
2761                 i = 4;
2762                 while (atomic_read(&net->gnn_refcount) != 1) {
2763                         i++;
2764                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2765                                 "Waiting for %d references to clear on net %d\n",
2766                                 atomic_read(&net->gnn_refcount),
2767                                 net->gnn_netnum);
2768                         schedule_timeout_uninterruptible(cfs_time_seconds(1));
2769                 }
2770
2771                 /* release ref from kgnilnd_startup */
2772                 kgnilnd_net_decref(net);
2773                 /* serialize with reaper and conn_task looping */
2774                 down_write(&kgnilnd_data.kgn_net_rw_sem);
2775                 list_del_init(&net->gnn_list);
2776                 up_write(&kgnilnd_data.kgn_net_rw_sem);
2777
2778         }
2779
2780         /* not locking, this can't race with writers */
2781         LASSERTF(atomic_read(&net->gnn_refcount) == 0,
2782                 "net %p refcount %d\n",
2783                  net, atomic_read(&net->gnn_refcount));
2784         LIBCFS_FREE(net, sizeof(*net));
2785
2786 out:
2787         down_read(&kgnilnd_data.kgn_net_rw_sem);
2788         for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
2789                 if (!list_empty(&kgnilnd_data.kgn_nets[i])) {
2790                         up_read(&kgnilnd_data.kgn_net_rw_sem);
2791                         break;
2792                 }
2793
2794                 if (i == *kgnilnd_tunables.kgn_net_hash_size - 1) {
2795                         up_read(&kgnilnd_data.kgn_net_rw_sem);
2796                         kgnilnd_base_shutdown();
2797                 }
2798         }
2799         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2800                atomic_read(&libcfs_kmemory));
2801
2802         mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2803         EXIT;
2804 }
2805
2806 static void __exit kgnilnd_exit(void)
2807 {
2808         lnet_unregister_lnd(&the_kgnilnd);
2809         kgnilnd_proc_fini();
2810         kgnilnd_remove_sysctl();
2811 }
2812
2813 static int __init kgnilnd_init(void)
2814 {
2815         int    rc;
2816
2817         rc = kgnilnd_tunables_init();
2818         if (rc != 0)
2819                 return rc;
2820
2821         LCONSOLE_INFO("Lustre: kgnilnd build version: "LUSTRE_VERSION_STRING"\n");
2822
2823         kgnilnd_insert_sysctl();
2824         kgnilnd_proc_init();
2825
2826         lnet_register_lnd(&the_kgnilnd);
2827
2828         return 0;
2829 }
2830
2831 MODULE_AUTHOR("Cray, Inc. <nic@cray.com>");
2832 MODULE_DESCRIPTION("Gemini LNet Network Driver");
2833 MODULE_VERSION(LUSTRE_VERSION_STRING);
2834 MODULE_LICENSE("GPL");
2835
2836 module_init(kgnilnd_init);
2837 module_exit(kgnilnd_exit);