Whamcloud - gitweb
LU-4069 build: cleanup from GOTO(label, -ERRNO)
[fs/lustre-release.git] / lnet / klnds / gnilnd / gnilnd.c
1 /*
2  * Copyright (C) 2012 Cray, Inc.
3  *
4  *   Author: Nic Henke <nic@cray.com>
5  *   Author: James Shimek <jshimek@cray.com>
6  *
7  *   This file is part of Lustre, http://www.lustre.org.
8  *
9  *   Lustre is free software; you can redistribute it and/or
10  *   modify it under the terms of version 2 of the GNU General Public
11  *   License as published by the Free Software Foundation.
12  *
13  *   Lustre is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *   GNU General Public License for more details.
17  *
18  *   You should have received a copy of the GNU General Public License
19  *   along with Lustre; if not, write to the Free Software
20  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  */
23 #include "gnilnd.h"
24
25 /* Primary entry points from LNET.  There are no guarantees against reentrance. */
26 lnd_t the_kgnilnd = {
27         .lnd_type       = GNILND,
28         .lnd_startup    = kgnilnd_startup,
29         .lnd_shutdown   = kgnilnd_shutdown,
30         .lnd_ctl        = kgnilnd_ctl,
31         .lnd_send       = kgnilnd_send,
32         .lnd_recv       = kgnilnd_recv,
33         .lnd_eager_recv = kgnilnd_eager_recv,
34         .lnd_query      = kgnilnd_query,
35 };
36
37 kgn_data_t      kgnilnd_data;
38
39 /* needs write_lock on kgn_peer_conn_lock */
40 int
41 kgnilnd_close_stale_conns_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
42 {
43         kgn_conn_t         *conn;
44         struct list_head   *ctmp, *cnxt;
45         int                 loopback;
46         int                 count = 0;
47
48         loopback = peer->gnp_nid == peer->gnp_net->gnn_ni->ni_nid;
49
50         list_for_each_safe(ctmp, cnxt, &peer->gnp_conns) {
51                 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
52
53                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
54                         continue;
55
56                 if (conn == newconn)
57                         continue;
58
59                 if (conn->gnc_device != newconn->gnc_device)
60                         continue;
61
62                 /* This is a two connection loopback - one talking to the other */
63                 if (loopback &&
64                     newconn->gnc_my_connstamp == conn->gnc_peer_connstamp &&
65                     newconn->gnc_peer_connstamp == conn->gnc_my_connstamp) {
66                         CDEBUG(D_NET, "skipping prune of %p, "
67                                 "loopback and matching stamps"
68                                 " connstamp "LPU64"("LPU64")"
69                                 " peerstamp "LPU64"("LPU64")\n",
70                                 conn, newconn->gnc_my_connstamp,
71                                 conn->gnc_peer_connstamp,
72                                 newconn->gnc_peer_connstamp,
73                                 conn->gnc_my_connstamp);
74                         continue;
75                 }
76
77                 if (conn->gnc_peerstamp != newconn->gnc_peerstamp) {
78                         LASSERTF(conn->gnc_peerstamp < newconn->gnc_peerstamp,
79                                 "conn 0x%p peerstamp "LPU64" >= "
80                                 "newconn 0x%p peerstamp "LPU64"\n",
81                                 conn, conn->gnc_peerstamp,
82                                 newconn, newconn->gnc_peerstamp);
83
84                         CDEBUG(D_NET, "Closing stale conn nid: %s "
85                                " peerstamp:"LPX64"("LPX64")\n",
86                                libcfs_nid2str(peer->gnp_nid),
87                                conn->gnc_peerstamp, newconn->gnc_peerstamp);
88                 } else {
89
90                         LASSERTF(conn->gnc_peer_connstamp < newconn->gnc_peer_connstamp,
91                                 "conn 0x%p peer_connstamp "LPU64" >= "
92                                 "newconn 0x%p peer_connstamp "LPU64"\n",
93                                 conn, conn->gnc_peer_connstamp,
94                                 newconn, newconn->gnc_peer_connstamp);
95
96                         CDEBUG(D_NET, "Closing stale conn nid: %s"
97                                " connstamp:"LPU64"("LPU64")\n",
98                                libcfs_nid2str(peer->gnp_nid),
99                                conn->gnc_peer_connstamp, newconn->gnc_peer_connstamp);
100                 }
101
102                 count++;
103                 kgnilnd_close_conn_locked(conn, -ESTALE);
104         }
105
106         if (count != 0) {
107                 CWARN("Closed %d stale conns to %s\n", count, libcfs_nid2str(peer->gnp_nid));
108         }
109
110         RETURN(count);
111 }
112
113 int
114 kgnilnd_conn_isdup_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
115 {
116         kgn_conn_t       *conn;
117         struct list_head *tmp;
118         int               loopback;
119         ENTRY;
120
121         loopback = peer->gnp_nid == peer->gnp_net->gnn_ni->ni_nid;
122
123         list_for_each(tmp, &peer->gnp_conns) {
124                 conn = list_entry(tmp, kgn_conn_t, gnc_list);
125                 CDEBUG(D_NET, "checking conn 0x%p for peer %s"
126                         " lo %d new "LPU64" existing "LPU64
127                         " new peer "LPU64" existing peer "LPU64
128                         " new dev %p existing dev %p\n",
129                         conn, libcfs_nid2str(peer->gnp_nid),
130                         loopback,
131                         newconn->gnc_peerstamp, conn->gnc_peerstamp,
132                         newconn->gnc_peer_connstamp, conn->gnc_peer_connstamp,
133                         newconn->gnc_device, conn->gnc_device);
134
135                 /* conn is in the process of closing */
136                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
137                         continue;
138
139                 /* 'newconn' is from an earlier version of 'peer'!!! */
140                 if (newconn->gnc_peerstamp < conn->gnc_peerstamp)
141                         RETURN(1);
142
143                 /* 'conn' is from an earlier version of 'peer': it will be
144                  * removed when we cull stale conns later on... */
145                 if (newconn->gnc_peerstamp > conn->gnc_peerstamp)
146                         continue;
147
148                 /* Different devices are OK */
149                 if (conn->gnc_device != newconn->gnc_device)
150                         continue;
151
152                 /* It's me connecting to myself */
153                 if (loopback &&
154                     newconn->gnc_my_connstamp == conn->gnc_peer_connstamp &&
155                     newconn->gnc_peer_connstamp == conn->gnc_my_connstamp)
156                         continue;
157
158                 /* 'newconn' is an earlier connection from 'peer'!!! */
159                 if (newconn->gnc_peer_connstamp < conn->gnc_peer_connstamp)
160                         RETURN(2);
161
162                 /* 'conn' is an earlier connection from 'peer': it will be
163                  * removed when we cull stale conns later on... */
164                 if (newconn->gnc_peer_connstamp > conn->gnc_peer_connstamp)
165                         continue;
166
167                 /* 'newconn' has the SAME connection stamp; 'peer' isn't
168                  * playing the game... */
169                 RETURN(3);
170         }
171
172         RETURN(0);
173 }
174
175 int
176 kgnilnd_create_conn(kgn_conn_t **connp, kgn_device_t *dev)
177 {
178         kgn_conn_t      *conn;
179         gni_return_t    rrc;
180         int             rc = 0;
181
182         LASSERT (!in_interrupt());
183         atomic_inc(&kgnilnd_data.kgn_nconns);
184
185         /* divide by 2 to allow for complete reset and immediate reconnect */
186         if (atomic_read(&kgnilnd_data.kgn_nconns) >= GNILND_MAX_CQID/2) {
187                 CERROR("Too many conn are live: %d > %d\n",
188                         atomic_read(&kgnilnd_data.kgn_nconns), GNILND_MAX_CQID/2);
189                 atomic_dec(&kgnilnd_data.kgn_nconns);
190                 return -E2BIG;
191         }
192
193         LIBCFS_ALLOC(conn, sizeof(*conn));
194         if (conn == NULL) {
195                 atomic_dec(&kgnilnd_data.kgn_nconns);
196                 return -ENOMEM;
197         }
198
199         LIBCFS_ALLOC(conn->gnc_tx_ref_table, GNILND_MAX_MSG_ID * sizeof(void *));
200         if (conn->gnc_tx_ref_table == NULL) {
201                 CERROR("Can't allocate conn tx_ref_table\n");
202                 GOTO(failed, rc = -ENOMEM);
203         }
204
205         atomic_set(&conn->gnc_refcount, 1);
206         atomic_set(&conn->gnc_reaper_noop, 0);
207         atomic_set(&conn->gnc_sched_noop, 0);
208         atomic_set(&conn->gnc_tx_in_use, 0);
209         INIT_LIST_HEAD(&conn->gnc_list);
210         INIT_LIST_HEAD(&conn->gnc_hashlist);
211         INIT_LIST_HEAD(&conn->gnc_schedlist);
212         INIT_LIST_HEAD(&conn->gnc_fmaq);
213         INIT_LIST_HEAD(&conn->gnc_mdd_list);
214         spin_lock_init(&conn->gnc_list_lock);
215         spin_lock_init(&conn->gnc_tx_lock);
216         conn->gnc_magic = GNILND_CONN_MAGIC;
217
218         /* set tx id to nearly the end to make sure we find wrapping
219          * issues soon */
220         conn->gnc_next_tx = (int) GNILND_MAX_MSG_ID - 10;
221
222         /* if this fails, we have conflicts and MAX_TX is too large */
223         CLASSERT(GNILND_MAX_MSG_ID < GNILND_MSGID_CLOSE);
224
225         /* get a new unique CQ id for this conn */
226         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
227         conn->gnc_my_connstamp = kgnilnd_data.kgn_connstamp++;
228         conn->gnc_cqid = kgnilnd_get_cqid_locked();
229         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
230
231         if (conn->gnc_cqid == 0) {
232                 CERROR("Could not allocate unique CQ ID for conn 0x%p\n", conn);
233                 GOTO(failed, rc = -E2BIG);
234         }
235
236         CDEBUG(D_NET, "alloc cqid %u for conn 0x%p\n",
237                 conn->gnc_cqid, conn);
238
239         /* need to be set before gnc_ephandle to allow kgnilnd_destroy_conn_ep to
240          * check context */
241         conn->gnc_device = dev;
242
243         conn->gnc_timeout = MAX(*kgnilnd_tunables.kgn_timeout,
244                                 GNILND_MIN_TIMEOUT);
245         kgnilnd_update_reaper_timeout(conn->gnc_timeout);
246
247         /* this is the ep_handle for doing SMSG & BTE */
248         mutex_lock(&dev->gnd_cq_mutex);
249         rrc = kgnilnd_ep_create(dev->gnd_handle, dev->gnd_snd_fma_cqh,
250                                 &conn->gnc_ephandle);
251         mutex_unlock(&dev->gnd_cq_mutex);
252         if (rrc != GNI_RC_SUCCESS)
253                 GOTO(failed, rc = -ENETDOWN);
254
255         CDEBUG(D_NET, "created conn 0x%p ep_hndl 0x%p\n",
256                conn, conn->gnc_ephandle);
257
258         /* add ref for EP canceling */
259         kgnilnd_conn_addref(conn);
260         atomic_inc(&dev->gnd_neps);
261
262         *connp = conn;
263         return 0;
264
265 failed:
266         atomic_dec(&kgnilnd_data.kgn_nconns);
267         LIBCFS_FREE(conn->gnc_tx_ref_table, GNILND_MAX_MSG_ID * sizeof(void *));
268         LIBCFS_FREE(conn, sizeof(*conn));
269         return rc;
270 }
271
272 /* needs to be called with kgn_peer_conn_lock held (read or write) */
273 kgn_conn_t *
274 kgnilnd_find_conn_locked(kgn_peer_t *peer)
275 {
276         kgn_conn_t      *conn = NULL;
277
278         /* if we are in reset, this conn is going to die soon */
279         if (unlikely(kgnilnd_data.kgn_in_reset)) {
280                 RETURN(NULL);
281         }
282
283         /* just return the first ESTABLISHED connection */
284         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
285                 /* kgnilnd_finish_connect doesn't put connections on the
286                  * peer list until they are actually established */
287                 LASSERTF(conn->gnc_state >= GNILND_CONN_ESTABLISHED,
288                         "found conn %p state %s on peer %p (%s)\n",
289                         conn, kgnilnd_conn_state2str(conn), peer,
290                         libcfs_nid2str(peer->gnp_nid));
291                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
292                         continue;
293
294                 RETURN(conn);
295         }
296         RETURN(NULL);
297 }
298
299 /* needs write_lock on kgn_peer_conn_lock held */
300 kgn_conn_t *
301 kgnilnd_find_or_create_conn_locked(kgn_peer_t *peer) {
302
303         kgn_device_t    *dev = peer->gnp_net->gnn_dev;
304         kgn_conn_t      *conn;
305
306         conn = kgnilnd_find_conn_locked(peer);
307
308         if (conn != NULL) {
309                 return conn;
310         }
311
312         /* if the peer was previously connecting, check if we should
313          * trigger another connection attempt yet. */
314         if (time_before(jiffies, peer->gnp_reconnect_time)) {
315                 return NULL;
316         }
317
318         /* This check prevents us from creating a new connection to a peer while we are
319          * still in the process of closing an existing connection to the peer.
320          */
321         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
322                 if (conn->gnc_ephandle != NULL) {
323                         CDEBUG(D_NET, "Not connecting non-null ephandle found peer 0x%p->%s\n", peer,
324                                 libcfs_nid2str(peer->gnp_nid));
325                         return NULL;
326                 }
327         }
328
329         if (peer->gnp_connecting != GNILND_PEER_IDLE) {
330                 /* if we are not connecting, fire up a new connection */
331                 /* or if we are anything but IDLE DONT start a new connection */
332                return NULL;
333         }
334
335         CDEBUG(D_NET, "starting connect to %s\n",
336                 libcfs_nid2str(peer->gnp_nid));
337         peer->gnp_connecting = GNILND_PEER_CONNECT;
338         kgnilnd_peer_addref(peer); /* extra ref for connd */
339
340         spin_lock(&dev->gnd_connd_lock);
341         list_add_tail(&peer->gnp_connd_list, &dev->gnd_connd_peers);
342         spin_unlock(&dev->gnd_connd_lock);
343
344         kgnilnd_schedule_dgram(dev);
345         CDEBUG(D_NETTRACE, "scheduling new connect\n");
346
347         return NULL;
348 }
349
350 /* Caller is responsible for deciding if/when to call this */
351 void
352 kgnilnd_destroy_conn_ep(kgn_conn_t *conn)
353 {
354         gni_return_t    rrc;
355         gni_ep_handle_t tmp_ep;
356
357         /* only if we actually initialized it,
358          *  then set NULL to tell kgnilnd_destroy_conn to leave it alone */
359
360         tmp_ep = xchg(&conn->gnc_ephandle, NULL);
361         if (tmp_ep != NULL) {
362                 /* we never re-use the EP, so unbind is not needed */
363                 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
364                 rrc = kgnilnd_ep_destroy(tmp_ep);
365
366                 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
367
368                 /* if this fails, it could hork up kgni smsg retransmit and others
369                  * since we could free the SMSG mbox memory, etc. */
370                 LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d conn 0x%p ep 0x%p\n",
371                          rrc, conn, conn->gnc_ephandle);
372
373                 atomic_dec(&conn->gnc_device->gnd_neps);
374
375                 /* clear out count added in kgnilnd_close_conn_locked
376                  * conn will have a peer once it hits finish_connect, where it
377                  * is the first spot we'll mark it ESTABLISHED as well */
378                 if (conn->gnc_peer) {
379                         kgnilnd_admin_decref(conn->gnc_peer->gnp_dirty_eps);
380                 }
381
382                 /* drop ref for EP */
383                 kgnilnd_conn_decref(conn);
384         }
385 }
386
387 void
388 kgnilnd_destroy_conn(kgn_conn_t *conn)
389 {
390         LASSERTF(!in_interrupt() &&
391                 !conn->gnc_scheduled &&
392                 !conn->gnc_in_purgatory &&
393                 conn->gnc_ephandle == NULL &&
394                 list_empty(&conn->gnc_list) &&
395                 list_empty(&conn->gnc_hashlist) &&
396                 list_empty(&conn->gnc_schedlist) &&
397                 list_empty(&conn->gnc_mdd_list) &&
398                 conn->gnc_magic == GNILND_CONN_MAGIC,
399                 "conn 0x%p->%s IRQ %d sched %d purg %d ep 0x%p Mg %d lists %d/%d/%d/%d\n",
400                 conn, conn->gnc_peer ? libcfs_nid2str(conn->gnc_peer->gnp_nid)
401                                      : "<?>",
402                 !!in_interrupt(), conn->gnc_scheduled,
403                 conn->gnc_in_purgatory,
404                 conn->gnc_ephandle,
405                 conn->gnc_magic,
406                 list_empty(&conn->gnc_list),
407                 list_empty(&conn->gnc_hashlist),
408                 list_empty(&conn->gnc_schedlist),
409                 list_empty(&conn->gnc_mdd_list));
410
411         /* Tripping these is especially bad, as it means we have items on the
412          *  lists that didn't keep their refcount on the connection - or
413          *  somebody evil released their own */
414         LASSERTF(list_empty(&conn->gnc_fmaq) &&
415                  atomic_read(&conn->gnc_nlive_fma) == 0 &&
416                  atomic_read(&conn->gnc_nlive_rdma) == 0,
417                  "conn 0x%p fmaq %d@0x%p nfma %d nrdma %d\n",
418                  conn, kgnilnd_count_list(&conn->gnc_fmaq), &conn->gnc_fmaq,
419                  atomic_read(&conn->gnc_nlive_fma), atomic_read(&conn->gnc_nlive_rdma));
420
421         CDEBUG(D_NET, "destroying conn %p ephandle %p error %d\n",
422                 conn, conn->gnc_ephandle, conn->gnc_error);
423
424         /* We are freeing this memory remove the magic value from the connection */
425         conn->gnc_magic = 0;
426
427         /* if there is an FMA blk left here, we'll tear it down */
428         if (conn->gnc_fma_blk) {
429                 if (conn->gnc_peer) {
430                         kgn_mbox_info_t *mbox;
431                         mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
432                         mbox->mbx_prev_nid = conn->gnc_peer->gnp_nid;
433                 }
434                 kgnilnd_release_mbox(conn, 0);
435         }
436
437         if (conn->gnc_peer != NULL)
438                 kgnilnd_peer_decref(conn->gnc_peer);
439
440         if (conn->gnc_tx_ref_table != NULL) {
441                 LIBCFS_FREE(conn->gnc_tx_ref_table,
442                             GNILND_MAX_MSG_ID * sizeof(void *));
443         }
444
445         LIBCFS_FREE(conn, sizeof(*conn));
446         atomic_dec(&kgnilnd_data.kgn_nconns);
447 }
448
449 /* peer_alive and peer_notify done in the style of the o2iblnd */
450 void
451 kgnilnd_peer_alive(kgn_peer_t *peer)
452 {
453         set_mb(peer->gnp_last_alive, jiffies);
454 }
455
456 void
457 kgnilnd_peer_notify(kgn_peer_t *peer, int error)
458 {
459         int                     tell_lnet = 0;
460         int                     nnets = 0;
461         int                     rc;
462         int                     i, j;
463         kgn_conn_t             *conn;
464         kgn_net_t             **nets;
465         kgn_net_t              *net;
466
467
468         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DONT_NOTIFY))
469                 return;
470
471         /* Tell LNet we are giving ups on this peer - but only
472          * if it isn't already reconnected or trying to reconnect */
473         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
474
475         /* use kgnilnd_find_conn_locked to avoid any conns in the process of being nuked
476          *
477          * don't tell LNet if we are in reset - we assume that everyone will be able to
478          * reconnect just fine
479          */
480         conn = kgnilnd_find_conn_locked(peer);
481
482         CDEBUG(D_NETTRACE, "peer 0x%p->%s ting %d conn 0x%p, rst %d error %d\n",
483                peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting, conn,
484                kgnilnd_data.kgn_in_reset, error);
485
486         if ((peer->gnp_connecting == GNILND_PEER_IDLE) &&
487             (conn == NULL) &&
488             (!kgnilnd_data.kgn_in_reset) &&
489             (!kgnilnd_conn_clean_errno(error))) {
490                 tell_lnet = 1;
491         }
492
493         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
494
495         if (!tell_lnet) {
496                 /* short circuit if we dont need to notify Lnet */
497                 return;
498         }
499
500         rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
501
502         if (rc) {
503             /* dont do this if this fails since LNET is in shutdown or something else
504              */
505
506                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
507                         list_for_each_entry(net , &kgnilnd_data.kgn_nets[i], gnn_list) {
508                                 /* if gnn_shutdown set for any net shutdown is in progress just return */
509                                 if (net->gnn_shutdown) {
510                                         up_read(&kgnilnd_data.kgn_net_rw_sem);
511                                         return;
512                                 }
513                                 nnets++;
514                         }
515                 }
516
517                 if (nnets == 0) {
518                         /* shutdown in progress most likely */
519                         up_read(&kgnilnd_data.kgn_net_rw_sem);
520                         return;
521                 }
522
523                 LIBCFS_ALLOC(nets, nnets * sizeof(*nets));
524
525                 if (nets == NULL) {
526                         up_read(&kgnilnd_data.kgn_net_rw_sem);
527                         CERROR("Failed to allocate nets[%d]\n", nnets);
528                         return;
529                 }
530
531                 j = 0;
532                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
533                         list_for_each_entry(net, &kgnilnd_data.kgn_nets[i], gnn_list) {
534                                 nets[j] = net;
535                                 kgnilnd_net_addref(net);
536                                 j++;
537                         }
538                 }
539                 up_read(&kgnilnd_data.kgn_net_rw_sem);
540
541                 for (i = 0; i < nnets; i++) {
542                         lnet_nid_t peer_nid;
543
544                         net = nets[i];
545
546                         peer_nid = kgnilnd_lnd2lnetnid(net->gnn_ni->ni_nid,
547                                                                  peer->gnp_nid);
548
549                         CDEBUG(D_NET, "peer 0x%p->%s last_alive %lu (%lus ago)\n",
550                                 peer, libcfs_nid2str(peer_nid), peer->gnp_last_alive,
551                                 cfs_duration_sec(jiffies - peer->gnp_last_alive));
552
553                         lnet_notify(net->gnn_ni, peer_nid, 0, peer->gnp_last_alive);
554
555
556                         kgnilnd_net_decref(net);
557                 }
558
559                 LIBCFS_FREE(nets, nnets * sizeof(*nets));
560         }
561 }
562
563 /* need write_lock on kgn_peer_conn_lock */
564 void
565 kgnilnd_close_conn_locked(kgn_conn_t *conn, int error)
566 {
567         kgn_peer_t        *peer = conn->gnc_peer;
568         ENTRY;
569
570         LASSERT(!in_interrupt());
571
572         /* store error for tx completion */
573         conn->gnc_error = error;
574         peer->gnp_last_errno = error;
575
576         /* use real error from peer if possible */
577         if (error == -ECONNRESET) {
578                 error = conn->gnc_peer_error;
579         }
580
581         /* if we NETERROR, make sure it is rate limited */
582         if (!kgnilnd_conn_clean_errno(error) &&
583             peer->gnp_down == GNILND_RCA_NODE_UP) {
584                 CNETERR("closing conn to %s: error %d\n",
585                        libcfs_nid2str(peer->gnp_nid), error);
586         } else {
587                 CDEBUG(D_NET, "closing conn to %s: error %d\n",
588                        libcfs_nid2str(peer->gnp_nid), error);
589         }
590
591         LASSERTF(conn->gnc_state == GNILND_CONN_ESTABLISHED,
592                 "conn %p to %s with bogus state %s\n", conn,
593                 libcfs_nid2str(conn->gnc_peer->gnp_nid),
594                 kgnilnd_conn_state2str(conn));
595         LASSERT(!list_empty(&conn->gnc_hashlist));
596         LASSERT(!list_empty(&conn->gnc_list));
597
598
599         /* mark peer count here so any place the EP gets destroyed will
600          * open up the peer count so that a new ESTABLISHED conn is then free
601          * to send new messages -- sending before the previous EPs are destroyed
602          * could end up with messages on the network for the old conn _after_
603          * the new conn and break the mbox safety protocol */
604         kgnilnd_admin_addref(conn->gnc_peer->gnp_dirty_eps);
605
606         /* Remove from conn hash table: no new callbacks */
607         list_del_init(&conn->gnc_hashlist);
608         kgnilnd_data.kgn_conn_version++;
609         kgnilnd_conn_decref(conn);
610
611         /* if we are in reset, go right to CLOSED as there is no scheduler
612          * thread to move from CLOSING to CLOSED */
613         if (unlikely(kgnilnd_data.kgn_in_reset)) {
614                 conn->gnc_state = GNILND_CONN_CLOSED;
615         } else {
616                 conn->gnc_state = GNILND_CONN_CLOSING;
617         }
618
619         /* leave on peer->gnp_conns to make sure we don't let the reaper
620          * or others try to unlink this peer until the conn is fully
621          * processed for closing */
622
623         if (kgnilnd_check_purgatory_conn(conn)) {
624                 kgnilnd_add_purgatory_locked(conn, conn->gnc_peer);
625         }
626
627         /* Reset RX timeout to ensure we wait for an incoming CLOSE
628          * for the full timeout.  If we get a CLOSE we know the
629          * peer has stopped all RDMA.  Otherwise if we wait for
630          * the full timeout we can also be sure all RDMA has stopped. */
631         conn->gnc_last_rx = conn->gnc_last_rx_cq = jiffies;
632         mb();
633
634         /* schedule sending CLOSE - if we are in quiesce, this adds to
635          * gnd_ready_conns and allows us to find it in quiesce processing */
636         kgnilnd_schedule_conn(conn);
637
638         EXIT;
639 }
640
641 void
642 kgnilnd_close_conn(kgn_conn_t *conn, int error)
643 {
644         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
645         /* need to check the state here - this call is racy and we don't
646          * know the state until after the lock is grabbed */
647         if (conn->gnc_state == GNILND_CONN_ESTABLISHED) {
648                 kgnilnd_close_conn_locked(conn, error);
649         }
650         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
651 }
652
653 void
654 kgnilnd_complete_closed_conn(kgn_conn_t *conn)
655 {
656         LIST_HEAD               (sinners);
657         kgn_tx_t               *tx, *txn;
658         int                     nlive = 0;
659         int                     nrdma = 0;
660         int                     nq_rdma = 0;
661         int                     logmsg;
662         ENTRY;
663
664         /* Dump log  on cksum error - wait until complete phase to let
665          * RX of error happen */
666         if (*kgnilnd_tunables.kgn_checksum_dump &&
667             (conn != NULL && conn->gnc_peer_error == -ENOKEY)) {
668                 libcfs_debug_dumplog();
669         }
670
671         /* _CLOSED set in kgnilnd_process_fmaq once we decide to
672          * send the CLOSE or not */
673         LASSERTF(conn->gnc_state == GNILND_CONN_CLOSED,
674                  "conn 0x%p->%s with bad state %s\n",
675                  conn, conn->gnc_peer ?
676                         libcfs_nid2str(conn->gnc_peer->gnp_nid) :
677                         "<?>",
678                  kgnilnd_conn_state2str(conn));
679
680         LASSERT(list_empty(&conn->gnc_hashlist));
681
682         /* we've sent the close, start nuking */
683         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_SCHEDULE_COMPLETE))
684                 kgnilnd_schedule_conn(conn);
685
686         if (conn->gnc_scheduled != GNILND_CONN_PROCESS) {
687                 CDEBUG(D_NETERROR, "Error someone scheduled us after we were "
688                                 "done, Attempting to recover conn 0x%p "
689                                 "scheduled %d function: %s line: %d\n", conn,
690                                 conn->gnc_scheduled, conn->gnc_sched_caller,
691                                 conn->gnc_sched_line);
692                 RETURN_EXIT;
693         }
694
695         /* we don't use lists to track things that we can get out of the
696          * tx_ref table... */
697
698         /* need to hold locks for tx_list_state, sampling it is too racy:
699          * - the lock actually protects tx != NULL, but we can't take the proper
700          *   lock until we check tx_list_state, which would be too late and
701          *   we could have the TX change under us.
702          * gnd_rdmaq_lock and gnd_lock and not used together, so taking both
703          * should be fine */
704         spin_lock(&conn->gnc_device->gnd_rdmaq_lock);
705         spin_lock(&conn->gnc_device->gnd_lock);
706
707         for (nrdma = 0; nrdma < GNILND_MAX_MSG_ID; nrdma++) {
708                 tx = conn->gnc_tx_ref_table[nrdma];
709
710                 if (tx != NULL) {
711                         /* only print the first error and if not CLOSE, we often don't see
712                          * CQ events for that by the time we get here... and really don't care */
713                         if (nlive || tx->tx_msg.gnm_type == GNILND_MSG_CLOSE)
714                                 tx->tx_state |= GNILND_TX_QUIET_ERROR;
715                         nlive++;
716                         GNIDBG_TX(D_NET, tx, "cleaning up on close, nlive %d", nlive);
717
718                         /* don't worry about gnc_lock here as nobody else should be
719                          * touching this conn */
720                         kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
721                         list_add_tail(&tx->tx_list, &sinners);
722                 }
723         }
724         spin_unlock(&conn->gnc_device->gnd_lock);
725         spin_unlock(&conn->gnc_device->gnd_rdmaq_lock);
726
727         /* nobody should have marked this as needing scheduling after
728          * we called close - so only ref should be us handling it */
729         if (conn->gnc_scheduled != GNILND_CONN_PROCESS) {
730                 CDEBUG(D_NETERROR, "Error someone scheduled us after we were "
731                                 "done, Attempting to recover conn 0x%p "
732                                 "scheduled %d function %s line: %d\n", conn,
733                                 conn->gnc_scheduled, conn->gnc_sched_caller,
734                                 conn->gnc_sched_line);
735         }
736         /* now reset a few to actual counters... */
737         nrdma = atomic_read(&conn->gnc_nlive_rdma);
738         nq_rdma = atomic_read(&conn->gnc_nq_rdma);
739
740         if (!list_empty(&sinners)) {
741                 list_for_each_entry_safe(tx, txn, &sinners, tx_list) {
742                         /* clear tx_list to make tx_add_list_locked happy */
743                         list_del_init(&tx->tx_list);
744                         /* The error codes determine if we hold onto the MDD */
745                         kgnilnd_tx_done(tx, conn->gnc_error);
746                 }
747         }
748
749         logmsg = (nlive + nrdma + nq_rdma);
750
751         if (logmsg) {
752                 if (conn->gnc_peer->gnp_down == GNILND_RCA_NODE_UP) {
753                         CNETERR("Closed conn 0x%p->%s (errno %d, peer errno %d): "
754                                 "canceled %d TX, %d/%d RDMA\n",
755                                 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
756                                 conn->gnc_error, conn->gnc_peer_error,
757                                 nlive, nq_rdma, nrdma);
758                 } else {
759                         CDEBUG(D_NET, "Closed conn 0x%p->%s (errno %d,"
760                                 " peer errno %d): canceled %d TX, %d/%d RDMA\n",
761                                 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
762                                 conn->gnc_error, conn->gnc_peer_error,
763                                 nlive, nq_rdma, nrdma);
764                 }
765         }
766
767         kgnilnd_destroy_conn_ep(conn);
768
769         /* Bug 765042 - race this with completing a new conn to same peer - we need
770          * finish_connect to detach purgatory before we can do it ourselves here */
771         CFS_RACE(CFS_FAIL_GNI_FINISH_PURG);
772
773         /* now it is safe to remove from peer list - anyone looking at
774          * gnp_conns now is free to unlink if not on purgatory */
775         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
776
777         conn->gnc_state = GNILND_CONN_DONE;
778
779         /* Decrement counter if we are marked by del_conn_or_peers for closing
780          */
781         if (conn->gnc_needs_closing)
782                 kgnilnd_admin_decref(kgnilnd_data.kgn_npending_conns);
783
784         /* Remove from peer's list of valid connections if its not in purgatory */
785         if (!conn->gnc_in_purgatory) {
786                 list_del_init(&conn->gnc_list);
787                 /* Lose peers reference on the conn */
788                 kgnilnd_conn_decref(conn);
789         }
790
791         /* NB - only unlinking if we set pending in del_peer_locked from admin or
792          * shutdown */
793         if (kgnilnd_peer_active(conn->gnc_peer) &&
794             conn->gnc_peer->gnp_pending_unlink &&
795             kgnilnd_can_unlink_peer_locked(conn->gnc_peer)) {
796                 kgnilnd_unlink_peer_locked(conn->gnc_peer);
797         }
798
799         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
800
801         /* I'm telling Mommy! - use peer_error if they initiated close */
802         kgnilnd_peer_notify(conn->gnc_peer,
803                             conn->gnc_error == -ECONNRESET ? conn->gnc_peer_error
804                                                            : conn->gnc_error);
805
806         EXIT;
807 }
808
809 int
810 kgnilnd_set_conn_params(kgn_dgram_t *dgram)
811 {
812         kgn_conn_t             *conn = dgram->gndg_conn;
813         kgn_connreq_t          *connreq = &dgram->gndg_conn_in;
814         kgn_gniparams_t        *rem_param = &connreq->gncr_gnparams;
815         gni_return_t            rrc;
816         int                     rc = 0;
817         gni_smsg_attr_t        *remote = &connreq->gncr_gnparams.gnpr_smsg_attr;
818
819         /* set timeout vals in conn early so we can use them for the NAK */
820
821         /* use max of the requested and our timeout, peer will do the same */
822         conn->gnc_timeout = MAX(conn->gnc_timeout, connreq->gncr_timeout);
823
824         /* only ep_bind really mucks around with the CQ */
825         /* only ep bind if we are not connecting to ourself and the dstnid is not a wildcard. this check
826          * is necessary as you can only bind an ep once and we must make sure we dont bind when already bound.
827          */
828         if (connreq->gncr_dstnid != LNET_NID_ANY && dgram->gndg_conn_out.gncr_dstnid != connreq->gncr_srcnid) {
829                 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
830                 rrc = kgnilnd_ep_bind(conn->gnc_ephandle,
831                         connreq->gncr_gnparams.gnpr_host_id,
832                         conn->gnc_cqid);
833                 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
834                 if (rrc != GNI_RC_SUCCESS) {
835                         rc = -ECONNABORTED;
836                         goto return_out;
837                 }
838         }
839
840         rrc = kgnilnd_ep_set_eventdata(conn->gnc_ephandle, conn->gnc_cqid,
841                          connreq->gncr_gnparams.gnpr_cqid);
842         if (rrc != GNI_RC_SUCCESS) {
843                 rc = -ECONNABORTED;
844                 goto cleanup_out;
845         }
846
847         /* Initialize SMSG */
848         rrc = kgnilnd_smsg_init(conn->gnc_ephandle, &conn->gnpr_smsg_attr,
849                         &connreq->gncr_gnparams.gnpr_smsg_attr);
850         if (unlikely(rrc == GNI_RC_INVALID_PARAM)) {
851                 gni_smsg_attr_t *local = &conn->gnpr_smsg_attr;
852                 /* help folks figure out if there is a tunable off, etc. */
853                 LCONSOLE_ERROR("SMSG attribute mismatch. Data from local/remote:"
854                                " type %d/%d msg_maxsize %u/%u"
855                                " mbox_maxcredit %u/%u. Please check kgni"
856                                " logs for further data\n",
857                                local->msg_type, remote->msg_type,
858                                local->msg_maxsize, remote->msg_maxsize,
859                                local->mbox_maxcredit, remote->mbox_maxcredit);
860         }
861         if (rrc != GNI_RC_SUCCESS) {
862                 rc = -ECONNABORTED;
863                 goto cleanup_out;
864         }
865
866         /* log this for help in debuggin SMSG buffer re-use */
867         CDEBUG(D_NET, "conn %p src %s dst %s smsg %p acquired"
868                 " local cqid %u SMSG %p->%u hndl "LPX64"."LPX64
869                 " remote cqid %u SMSG %p->%u hndl "LPX64"."LPX64"\n",
870                 conn, libcfs_nid2str(connreq->gncr_srcnid),
871                 libcfs_nid2str(connreq->gncr_dstnid),
872                 &conn->gnpr_smsg_attr,
873                 conn->gnc_cqid,
874                 conn->gnpr_smsg_attr.msg_buffer,
875                 conn->gnpr_smsg_attr.mbox_offset,
876                 conn->gnpr_smsg_attr.mem_hndl.qword1,
877                 conn->gnpr_smsg_attr.mem_hndl.qword2,
878                 rem_param->gnpr_cqid,
879                 rem_param->gnpr_smsg_attr.msg_buffer,
880                 rem_param->gnpr_smsg_attr.mbox_offset,
881                 rem_param->gnpr_smsg_attr.mem_hndl.qword1,
882                 rem_param->gnpr_smsg_attr.mem_hndl.qword2);
883
884         conn->gnc_peerstamp = connreq->gncr_peerstamp;
885         conn->gnc_peer_connstamp = connreq->gncr_connstamp;
886         conn->remote_mbox_addr = (void *)((char *)remote->msg_buffer + remote->mbox_offset);
887
888         /* We update the reaper timeout once we have a valid conn and timeout */
889         kgnilnd_update_reaper_timeout(GNILND_TO2KA(conn->gnc_timeout));
890
891         return 0;
892
893 cleanup_out:
894         rrc = kgnilnd_ep_unbind(conn->gnc_ephandle);
895         /* not sure I can just let this fly */
896         LASSERTF(rrc == GNI_RC_SUCCESS,
897                 "bad rc from gni_ep_unbind trying to cleanup: %d\n", rrc);
898
899 return_out:
900         LASSERTF(rc != 0, "SOFTWARE BUG: rc == 0\n");
901         CERROR("Error setting connection params from %s: %d\n",
902                libcfs_nid2str(connreq->gncr_srcnid), rc);
903         return rc;
904 }
905
906 /* needs down_read on kgn_net_rw_sem held from before this call until
907  * after the write_lock on kgn_peer_conn_lock - this ensures we stay sane
908  * with kgnilnd_shutdown - it'll get the sem and set shutdown, then get the
909  * kgn_peer_conn_lock to start del_peer'ing. If we hold the sem until after
910  * kgn_peer_conn_lock is held, we guarantee that nobody calls
911  * kgnilnd_add_peer_locked without checking gnn_shutdown */
912 int
913 kgnilnd_create_peer_safe(kgn_peer_t **peerp, lnet_nid_t nid, kgn_net_t *net)
914 {
915         kgn_peer_t      *peer;
916         int             rc;
917
918         LASSERT(nid != LNET_NID_ANY);
919
920         /* We dont pass the net around in the dgram anymore so here is where we find it
921          * this will work unless its in shutdown or the nid has a net that is invalid.
922          * Either way error code needs to be returned in that case.
923          *
924          * If the net passed in is not NULL then we can use it, this alleviates looking it
925          * when the calling function has access to the data.
926          */
927         if (net == NULL) {
928                 rc = kgnilnd_find_net(nid, &net);
929                 if (rc < 0)
930                         return rc;
931         } else {
932                 /* find net adds a reference on the net if we are not using
933                  * it we must do it manually so the net references are
934                  * correct when tearing down the net
935                  */
936                 kgnilnd_net_addref(net);
937         }
938
939         LIBCFS_ALLOC(peer, sizeof(*peer));
940         if (peer == NULL) {
941                 kgnilnd_net_decref(net);
942                 return -ENOMEM;
943         }
944         peer->gnp_nid = nid;
945         peer->gnp_down = GNILND_RCA_NODE_UP;
946
947         /* translate from nid to nic addr & store */
948         rc = kgnilnd_nid_to_nicaddrs(LNET_NIDADDR(nid), 1, &peer->gnp_host_id);
949         if (rc <= 0) {
950                 kgnilnd_net_decref(net);
951                 LIBCFS_FREE(peer, sizeof(*peer));
952                 return -ESRCH;
953         }
954         CDEBUG(D_NET, "peer 0x%p->%s -> NIC 0x%x\n", peer,
955                 libcfs_nid2str(nid), peer->gnp_host_id);
956
957         atomic_set(&peer->gnp_refcount, 1);     /* 1 ref for caller */
958         atomic_set(&peer->gnp_dirty_eps, 0);
959
960         INIT_LIST_HEAD(&peer->gnp_list);
961         INIT_LIST_HEAD(&peer->gnp_connd_list);
962         INIT_LIST_HEAD(&peer->gnp_conns);
963         INIT_LIST_HEAD(&peer->gnp_tx_queue);
964
965         /* the first reconnect should happen immediately, so we leave
966          * gnp_reconnect_interval set to 0 */
967
968         LASSERTF(net != NULL, "peer 0x%p->%s with NULL net\n",
969                  peer, libcfs_nid2str(nid));
970
971         /* must have kgn_net_rw_sem held for this...  */
972         if (net->gnn_shutdown) {
973                 /* shutdown has started already */
974                 kgnilnd_net_decref(net);
975                 LIBCFS_FREE(peer, sizeof(*peer));
976                 return -ESHUTDOWN;
977         }
978
979         peer->gnp_net = net;
980
981         atomic_inc(&kgnilnd_data.kgn_npeers);
982
983         *peerp = peer;
984         return 0;
985 }
986
987 void
988 kgnilnd_destroy_peer(kgn_peer_t *peer)
989 {
990         CDEBUG(D_NET, "peer %s %p deleted\n",
991                libcfs_nid2str(peer->gnp_nid), peer);
992         LASSERTF(atomic_read(&peer->gnp_refcount) == 0,
993                  "peer 0x%p->%s refs %d\n",
994                  peer, libcfs_nid2str(peer->gnp_nid),
995                  atomic_read(&peer->gnp_refcount));
996         LASSERTF(atomic_read(&peer->gnp_dirty_eps) == 0,
997                  "peer 0x%p->%s dirty eps %d\n",
998                  peer, libcfs_nid2str(peer->gnp_nid),
999                  atomic_read(&peer->gnp_dirty_eps));
1000         LASSERTF(peer->gnp_net != NULL, "peer %p (%s) with NULL net\n",
1001                  peer, libcfs_nid2str(peer->gnp_nid));
1002         LASSERTF(!kgnilnd_peer_active(peer),
1003                  "peer 0x%p->%s\n",
1004                 peer, libcfs_nid2str(peer->gnp_nid));
1005         LASSERTF(peer->gnp_connecting == GNILND_PEER_IDLE || peer->gnp_connecting == GNILND_PEER_KILL,
1006                  "peer 0x%p->%s, connecting %d\n",
1007                 peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
1008         LASSERTF(list_empty(&peer->gnp_conns),
1009                  "peer 0x%p->%s\n",
1010                 peer, libcfs_nid2str(peer->gnp_nid));
1011         LASSERTF(list_empty(&peer->gnp_tx_queue),
1012                  "peer 0x%p->%s\n",
1013                 peer, libcfs_nid2str(peer->gnp_nid));
1014         LASSERTF(list_empty(&peer->gnp_connd_list),
1015                  "peer 0x%p->%s\n",
1016                 peer, libcfs_nid2str(peer->gnp_nid));
1017
1018         /* NB a peer's connections keep a reference on their peer until
1019          * they are destroyed, so we can be assured that _all_ state to do
1020          * with this peer has been cleaned up when its refcount drops to
1021          * zero. */
1022
1023         atomic_dec(&kgnilnd_data.kgn_npeers);
1024         kgnilnd_net_decref(peer->gnp_net);
1025
1026         LIBCFS_FREE(peer, sizeof(*peer));
1027 }
1028
1029 /* the conn might not have made it all the way through to a connected
1030  * state - but we need to purgatory any conn that a remote peer might
1031  * have seen through a posted dgram as well */
1032 void
1033 kgnilnd_add_purgatory_locked(kgn_conn_t *conn, kgn_peer_t *peer)
1034 {
1035         kgn_mbox_info_t *mbox = NULL;
1036         ENTRY;
1037
1038         /* NB - the caller should own conn by removing him from the
1039          * scheduler thread when finishing the close */
1040
1041         LASSERTF(peer != NULL, "conn %p with NULL peer\n", conn);
1042
1043         /* If this is still true, need to add the calls to unlink back in and
1044          * figure out how to close the hole on loopback conns */
1045         LASSERTF(kgnilnd_peer_active(peer), "can't use inactive peer %s (%p)"
1046                 " we'll never recover the resources\n",
1047                 libcfs_nid2str(peer->gnp_nid), peer);
1048
1049         CDEBUG(D_NET, "conn %p peer %p dev %p\n", conn, peer,
1050                 conn->gnc_device);
1051
1052         conn->gnc_in_purgatory = 1;
1053
1054         mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
1055         mbox->mbx_prev_purg_nid = peer->gnp_nid;
1056         mbox->mbx_add_purgatory = jiffies;
1057         kgnilnd_release_mbox(conn, 1);
1058
1059         LASSERTF(list_empty(&conn->gnc_mdd_list),
1060                 "conn 0x%p->%s with active purgatory hold MDD %d\n",
1061                 conn, libcfs_nid2str(peer->gnp_nid),
1062                 kgnilnd_count_list(&conn->gnc_mdd_list));
1063
1064         EXIT;
1065 }
1066
1067 /* Instead of detaching everything from purgatory here we just mark the conn as needing
1068  * detach, when the reaper checks the conn the next time it will detach it.
1069  * Calling function requires write_lock held on kgn_peer_conn_lock
1070  */
1071 void
1072 kgnilnd_mark_for_detach_purgatory_all_locked(kgn_peer_t *peer) {
1073         kgn_conn_t       *conn;
1074
1075         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
1076                 if (conn->gnc_in_purgatory && !conn->gnc_needs_detach) {
1077                         conn->gnc_needs_detach = 1;
1078                         kgnilnd_admin_addref(kgnilnd_data.kgn_npending_detach);
1079                 }
1080         }
1081 }
1082
1083 /* Calling function needs a write_lock held on kgn_peer_conn_lock */
1084 void
1085 kgnilnd_detach_purgatory_locked(kgn_conn_t *conn, struct list_head *conn_list)
1086 {
1087         kgn_mbox_info_t *mbox = NULL;
1088
1089         /* if needed, add the conn purgatory data to the list passed in */
1090         if (conn->gnc_in_purgatory) {
1091                 CDEBUG(D_NET, "peer %p->%s purg_conn %p@%s mdd_list #tx %d\n",
1092                         conn->gnc_peer, libcfs_nid2str(conn->gnc_peer->gnp_nid),
1093                         conn, kgnilnd_conn_state2str(conn),
1094                         kgnilnd_count_list(&conn->gnc_mdd_list));
1095
1096                 mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
1097                 mbox->mbx_detach_of_purgatory = jiffies;
1098
1099                 /* conn->gnc_list is the entry point on peer->gnp_conns, so detaching it
1100                  * here removes it from the list of 'valid' peer connections.
1101                  * We put the current conn onto a list of conns to call kgnilnd_release_purgatory_locked()
1102                  * and as such the caller of kgnilnd_detach_purgatory_locked() now owns that conn, since its not
1103                  * on the peer's conn_list anymore.
1104                  */
1105
1106                 list_del_init(&conn->gnc_list);
1107
1108                 /* NB - only unlinking if we set pending in del_peer_locked from admin or
1109                  * shutdown */
1110                 if (kgnilnd_peer_active(conn->gnc_peer) &&
1111                     conn->gnc_peer->gnp_pending_unlink &&
1112                     kgnilnd_can_unlink_peer_locked(conn->gnc_peer)) {
1113                         kgnilnd_unlink_peer_locked(conn->gnc_peer);
1114                 }
1115                 /* The reaper will not call detach unless the conn is fully through kgnilnd_complete_closed_conn.
1116                  * If the conn is not in a DONE state somehow we are attempting to detach even though
1117                  * the conn has not been fully cleaned up. If we detach while the conn is still closing
1118                  * we will end up with an orphaned connection that has valid ep_handle, that is not on a
1119                  * peer.
1120                  */
1121
1122                 LASSERTF(conn->gnc_state == GNILND_CONN_DONE, "Conn in invalid state  %p@%s \n",
1123                                 conn, kgnilnd_conn_state2str(conn));
1124
1125                 /* move from peer to the delayed release list */
1126                 list_add_tail(&conn->gnc_list, conn_list);
1127         }
1128 }
1129
1130 void
1131 kgnilnd_release_purgatory_list(struct list_head *conn_list)
1132 {
1133         kgn_device_t            *dev;
1134         kgn_conn_t              *conn, *connN;
1135         kgn_mdd_purgatory_t     *gmp, *gmpN;
1136
1137         list_for_each_entry_safe(conn, connN, conn_list, gnc_list) {
1138                 dev = conn->gnc_device;
1139
1140                 kgnilnd_release_mbox(conn, -1);
1141                 conn->gnc_in_purgatory = 0;
1142
1143                 list_del_init(&conn->gnc_list);
1144
1145                 /* gnc_needs_detach is set in kgnilnd_del_conn_or_peer. It is used to keep track
1146                  * of conns that have been marked for detach by kgnilnd_del_conn_or_peer.
1147                  * The function uses kgn_npending_detach to verify the conn has
1148                  * actually been detached.
1149                  */
1150
1151                 if (conn->gnc_needs_detach)
1152                         kgnilnd_admin_decref(kgnilnd_data.kgn_npending_detach);
1153
1154                 /* if this guy is really dead (we are doing release from reaper),
1155                  * make sure we tell LNet - if this is from other context,
1156                  * the checks in the function will prevent an errant
1157                  * notification */
1158                 kgnilnd_peer_notify(conn->gnc_peer, conn->gnc_error);
1159
1160                 list_for_each_entry_safe(gmp, gmpN, &conn->gnc_mdd_list,
1161                                          gmp_list) {
1162                         CDEBUG(D_NET,
1163                                "dev %p releasing held mdd "LPX64"."LPX64"\n",
1164                                conn->gnc_device, gmp->gmp_map_key.qword1,
1165                                gmp->gmp_map_key.qword2);
1166
1167                         atomic_dec(&dev->gnd_n_mdd_held);
1168                         kgnilnd_mem_mdd_release(conn->gnc_device->gnd_handle,
1169                                                 &gmp->gmp_map_key);
1170                         /* ignoring the return code - if kgni/ghal can't find it
1171                          * it must be released already */
1172
1173                         list_del_init(&gmp->gmp_list);
1174                         LIBCFS_FREE(gmp, sizeof(*gmp));
1175                 }
1176                 /* lose conn ref for purgatory */
1177                 kgnilnd_conn_decref(conn);
1178         }
1179 }
1180
1181 /* needs write_lock on kgnilnd_data.kgn_peer_conn_lock held */
1182 void
1183 kgnilnd_peer_increase_reconnect_locked(kgn_peer_t *peer)
1184 {
1185         int current_to;
1186
1187         current_to = peer->gnp_reconnect_interval;
1188
1189         /* we'll try to reconnect fast the first time, then back-off */
1190         if (current_to == 0) {
1191                 peer->gnp_reconnect_time = jiffies - 1;
1192                 current_to = *kgnilnd_tunables.kgn_min_reconnect_interval;
1193         } else {
1194                 peer->gnp_reconnect_time = jiffies + cfs_time_seconds(current_to);
1195                 /* add 50% of min timeout & retry */
1196                 current_to += *kgnilnd_tunables.kgn_min_reconnect_interval / 2;
1197         }
1198
1199         current_to = MIN(current_to,
1200                                 *kgnilnd_tunables.kgn_max_reconnect_interval);
1201
1202         peer->gnp_reconnect_interval = current_to;
1203         CDEBUG(D_NET, "peer %s can reconnect at %lu interval %lu\n",
1204                libcfs_nid2str(peer->gnp_nid), peer->gnp_reconnect_time,
1205                peer->gnp_reconnect_interval);
1206 }
1207
1208 /* needs kgnilnd_data.kgn_peer_conn_lock held */
1209 kgn_peer_t *
1210 kgnilnd_find_peer_locked(lnet_nid_t nid)
1211 {
1212         struct list_head *peer_list = kgnilnd_nid2peerlist(nid);
1213         kgn_peer_t       *peer;
1214
1215         /* Chopping nid down to only NIDADDR using LNET_NIDADDR so we only
1216          * have a single peer per device instead of a peer per nid/net combo.
1217          */
1218
1219         list_for_each_entry(peer, peer_list, gnp_list) {
1220                 if (LNET_NIDADDR(nid) != LNET_NIDADDR(peer->gnp_nid))
1221                         continue;
1222
1223                 CDEBUG(D_NET, "got peer [%p] -> %s c %d (%d)\n",
1224                        peer, libcfs_nid2str(nid),
1225                        peer->gnp_connecting,
1226                        atomic_read(&peer->gnp_refcount));
1227                 return peer;
1228         }
1229         return NULL;
1230 }
1231
1232 /* need write_lock on kgn_peer_conn_lock */
1233 void
1234 kgnilnd_unlink_peer_locked(kgn_peer_t *peer)
1235 {
1236         LASSERTF(list_empty(&peer->gnp_conns),
1237                 "peer 0x%p->%s\n",
1238                  peer, libcfs_nid2str(peer->gnp_nid));
1239         LASSERTF(list_empty(&peer->gnp_tx_queue),
1240                 "peer 0x%p->%s\n",
1241                  peer, libcfs_nid2str(peer->gnp_nid));
1242         LASSERTF(kgnilnd_peer_active(peer),
1243                 "peer 0x%p->%s\n",
1244                  peer, libcfs_nid2str(peer->gnp_nid));
1245         CDEBUG(D_NET, "unlinking peer 0x%p->%s\n",
1246                 peer, libcfs_nid2str(peer->gnp_nid));
1247
1248         list_del_init(&peer->gnp_list);
1249         kgnilnd_data.kgn_peer_version++;
1250         kgnilnd_admin_decref(kgnilnd_data.kgn_npending_unlink);
1251         /* lose peerlist's ref */
1252         kgnilnd_peer_decref(peer);
1253 }
1254
1255 int
1256 kgnilnd_get_peer_info(int index,
1257                       kgn_peer_t **found_peer,
1258                       lnet_nid_t *id, __u32 *nic_addr,
1259                       int *refcount, int *connecting)
1260 {
1261         struct list_head  *ptmp;
1262         kgn_peer_t        *peer;
1263         int               i;
1264         int               rc = -ENOENT;
1265
1266         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1267
1268         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
1269
1270                 list_for_each(ptmp, &kgnilnd_data.kgn_peers[i]) {
1271                         peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1272
1273                         if (index-- > 0)
1274                                 continue;
1275
1276                         CDEBUG(D_NET, "found peer %p (%s) at index %d\n",
1277                                peer, libcfs_nid2str(peer->gnp_nid), index);
1278
1279                         *found_peer  = peer;
1280                         *id          = peer->gnp_nid;
1281                         *nic_addr    = peer->gnp_host_id;
1282                         *refcount    = atomic_read(&peer->gnp_refcount);
1283                         *connecting  = peer->gnp_connecting;
1284
1285                         rc = 0;
1286                         goto out;
1287                 }
1288         }
1289 out:
1290         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1291         if (rc)
1292                 CDEBUG(D_NET, "no gni peer at index %d\n", index);
1293         return rc;
1294 }
1295
1296 /* requires write_lock on kgn_peer_conn_lock held */
1297 void
1298 kgnilnd_add_peer_locked(lnet_nid_t nid, kgn_peer_t *new_stub_peer, kgn_peer_t **peerp)
1299 {
1300         kgn_peer_t        *peer, *peer2;
1301
1302         LASSERTF(new_stub_peer != NULL, "bad stub peer for nid %s\n",
1303                  libcfs_nid2str(nid));
1304
1305         peer2 = kgnilnd_find_peer_locked(nid);
1306         if (peer2 != NULL) {
1307                 /* A peer was created during the lock transition, so drop
1308                  * the new one we created */
1309                 kgnilnd_peer_decref(new_stub_peer);
1310                 peer = peer2;
1311         } else {
1312                 peer = new_stub_peer;
1313                 /* peer table takes existing ref on peer */
1314
1315                 LASSERTF(!kgnilnd_peer_active(peer),
1316                         "peer 0x%p->%s already in peer table\n",
1317                         peer, libcfs_nid2str(peer->gnp_nid));
1318                 list_add_tail(&peer->gnp_list,
1319                               kgnilnd_nid2peerlist(nid));
1320                 kgnilnd_data.kgn_peer_version++;
1321         }
1322
1323         LASSERTF(peer->gnp_net != NULL, "peer 0x%p->%s with NULL net\n",
1324                  peer, libcfs_nid2str(peer->gnp_nid));
1325         *peerp = peer;
1326 }
1327
1328 int
1329 kgnilnd_add_peer(kgn_net_t *net, lnet_nid_t nid, kgn_peer_t **peerp)
1330 {
1331         kgn_peer_t        *peer;
1332         int                rc;
1333         ENTRY;
1334
1335         if (nid == LNET_NID_ANY)
1336                 return -EINVAL;
1337
1338         /* NB - this will not block during normal operations -
1339          * the only writer of this is in the startup/shutdown path. */
1340         rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
1341         if (!rc) {
1342                 rc = -ESHUTDOWN;
1343                 RETURN(rc);
1344         }
1345         rc = kgnilnd_create_peer_safe(&peer, nid, net);
1346         if (rc != 0) {
1347                 up_read(&kgnilnd_data.kgn_net_rw_sem);
1348                 RETURN(rc);
1349         }
1350
1351         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1352         up_read(&kgnilnd_data.kgn_net_rw_sem);
1353
1354         kgnilnd_add_peer_locked(nid, peer, peerp);
1355
1356         CDEBUG(D_NET, "peer 0x%p->%s connecting %d\n",
1357                peerp, libcfs_nid2str((*peerp)->gnp_nid),
1358                (*peerp)->gnp_connecting);
1359
1360         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1361         RETURN(0);
1362 }
1363
1364 /* needs write_lock on kgn_peer_conn_lock */
1365 void
1366 kgnilnd_cancel_peer_connect_locked(kgn_peer_t *peer, struct list_head *zombies)
1367 {
1368         kgn_tx_t        *tx, *txn;
1369
1370         /* we do care about state of gnp_connecting - we could be between
1371          * reconnect attempts, so try to find the dgram and cancel the TX
1372          * anyways. If we are in the process of posting DONT do anything;
1373          * once it fails or succeeds we can nuke the connect attempt.
1374          * We have no idea where in kgnilnd_post_dgram we are so we cant
1375          * attempt to cancel until the function is done.
1376          */
1377
1378         /* make sure peer isn't in process of connecting or waiting for connect*/
1379         spin_lock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1380         if (!(list_empty(&peer->gnp_connd_list))) {
1381                 list_del_init(&peer->gnp_connd_list);
1382                 /* remove connd ref */
1383                 kgnilnd_peer_decref(peer);
1384         }
1385         spin_unlock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1386
1387         if (peer->gnp_connecting == GNILND_PEER_POSTING || peer->gnp_connecting == GNILND_PEER_NEEDS_DEATH) {
1388                 peer->gnp_connecting = GNILND_PEER_NEEDS_DEATH;
1389                 /* We are in process of posting right now the xchg set it up for us to
1390                  * cancel the connect so we are finished for now */
1391         } else {
1392                 /* no need for exchange we have the peer lock and its ready for us to nuke */
1393                 LASSERTF(peer->gnp_connecting != GNILND_PEER_POSTING,
1394                         "Peer in invalid state 0x%p->%s, connecting %d\n",
1395                         peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
1396                 peer->gnp_connecting = GNILND_PEER_IDLE;
1397                 set_mb(peer->gnp_last_dgram_errno, -ETIMEDOUT);
1398                 kgnilnd_find_and_cancel_dgram(peer->gnp_net->gnn_dev,
1399                                                       peer->gnp_nid);
1400         }
1401
1402         /* The least we can do is nuke the tx's no matter what.... */
1403         list_for_each_entry_safe(tx, txn, &peer->gnp_tx_queue, tx_list) {
1404                 kgnilnd_tx_del_state_locked(tx, peer, NULL,
1405                                            GNILND_TX_ALLOCD);
1406                 list_add_tail(&tx->tx_list, zombies);
1407         }
1408 }
1409
1410 /* needs write_lock on kgn_peer_conn_lock */
1411 void
1412 kgnilnd_del_peer_locked(kgn_peer_t *peer, int error)
1413 {
1414         /* this peer could be passive and only held for purgatory,
1415          * take a ref to ensure it doesn't disappear in this function */
1416         kgnilnd_peer_addref(peer);
1417
1418         CFS_RACE(CFS_FAIL_GNI_FIND_TARGET);
1419
1420         /* if purgatory release cleared it out, don't try again */
1421         if (kgnilnd_peer_active(peer)) {
1422                 /* always do this to allow kgnilnd_start_connect and
1423                  * kgnilnd_finish_connect to catch this before they
1424                  * wrap up their operations */
1425                 if (kgnilnd_can_unlink_peer_locked(peer)) {
1426                         /* already released purgatory, so only active
1427                          * conns hold it */
1428                         kgnilnd_unlink_peer_locked(peer);
1429                 } else {
1430                         kgnilnd_close_peer_conns_locked(peer, error);
1431                         /* peer unlinks itself when last conn is closed */
1432                 }
1433         }
1434
1435         /* we are done, release back to the wild */
1436         kgnilnd_peer_decref(peer);
1437 }
1438
1439 int
1440 kgnilnd_del_conn_or_peer(kgn_net_t *net, lnet_nid_t nid, int command,
1441                           int error)
1442 {
1443         LIST_HEAD               (souls);
1444         LIST_HEAD               (zombies);
1445         struct list_head        *ptmp, *pnxt;
1446         kgn_peer_t              *peer;
1447         int                     lo;
1448         int                     hi;
1449         int                     i;
1450         int                     rc = -ENOENT;
1451
1452         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1453
1454         if (nid != LNET_NID_ANY)
1455                 lo = hi = kgnilnd_nid2peerlist(nid) - kgnilnd_data.kgn_peers;
1456         else {
1457                 lo = 0;
1458                 hi = *kgnilnd_tunables.kgn_peer_hash_size - 1;
1459                 /* wildcards always succeed */
1460                 rc = 0;
1461         }
1462
1463         for (i = lo; i <= hi; i++) {
1464                 list_for_each_safe(ptmp, pnxt, &kgnilnd_data.kgn_peers[i]) {
1465                         peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1466
1467                         LASSERTF(peer->gnp_net != NULL,
1468                                 "peer %p (%s) with NULL net\n",
1469                                  peer, libcfs_nid2str(peer->gnp_nid));
1470
1471                         if (net != NULL && peer->gnp_net != net)
1472                                 continue;
1473
1474                         if (!(nid == LNET_NID_ANY || LNET_NIDADDR(peer->gnp_nid) == LNET_NIDADDR(nid)))
1475                                 continue;
1476
1477                         /* In both cases, we want to stop any in-flight
1478                          * connect attempts */
1479                         kgnilnd_cancel_peer_connect_locked(peer, &zombies);
1480
1481                         switch (command) {
1482                         case GNILND_DEL_CONN:
1483                                 kgnilnd_close_peer_conns_locked(peer, error);
1484                                 break;
1485                         case GNILND_DEL_PEER:
1486                                 peer->gnp_pending_unlink = 1;
1487                                 kgnilnd_admin_addref(kgnilnd_data.kgn_npending_unlink);
1488                                 kgnilnd_mark_for_detach_purgatory_all_locked(peer);
1489                                 kgnilnd_del_peer_locked(peer, error);
1490                                 break;
1491                         case GNILND_CLEAR_PURGATORY:
1492                                 /* Mark everything ready for detach reaper will cleanup
1493                                  * once we release the kgn_peer_conn_lock
1494                                  */
1495                                 kgnilnd_mark_for_detach_purgatory_all_locked(peer);
1496                                 peer->gnp_last_errno = -EISCONN;
1497                                 /* clear reconnect so he can reconnect soon */
1498                                 peer->gnp_reconnect_time = 0;
1499                                 peer->gnp_reconnect_interval = 0;
1500                                 break;
1501                         default:
1502                                 CERROR("bad command %d\n", command);
1503                                 LBUG();
1504                         }
1505                         /* we matched something */
1506                         rc = 0;
1507                 }
1508         }
1509
1510         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1511
1512         /* release all of the souls found held in purgatory */
1513         kgnilnd_release_purgatory_list(&souls);
1514
1515         /* nuke peer TX */
1516         kgnilnd_txlist_done(&zombies, error);
1517
1518         /* This function does not return until the commands it initiated have completed,
1519          * since they have to work there way through the other threads. In the case of shutdown
1520          * threads are not woken up until after this call is initiated so we cannot wait, we just
1521          * need to return. The same applies for stack reset we shouldnt wait as the reset thread
1522          * handles closing.
1523          */
1524
1525         CFS_RACE(CFS_FAIL_GNI_RACE_RESET);
1526
1527         if (error == -ENOTRECOVERABLE || error == -ESHUTDOWN) {
1528                 return rc;
1529         }
1530
1531         i = 4;
1532         while (atomic_read(&kgnilnd_data.kgn_npending_conns)   ||
1533                atomic_read(&kgnilnd_data.kgn_npending_detach)  ||
1534                atomic_read(&kgnilnd_data.kgn_npending_unlink)) {
1535
1536                 cfs_pause(cfs_time_seconds(1));
1537                 i++;
1538
1539                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, "Waiting on %d peers %d closes %d detaches\n",
1540                                 atomic_read(&kgnilnd_data.kgn_npending_unlink),
1541                                 atomic_read(&kgnilnd_data.kgn_npending_conns),
1542                                 atomic_read(&kgnilnd_data.kgn_npending_detach));
1543         }
1544
1545         return rc;
1546 }
1547
1548 kgn_conn_t *
1549 kgnilnd_get_conn_by_idx(int index)
1550 {
1551         kgn_peer_t        *peer;
1552         struct list_head  *ptmp;
1553         kgn_conn_t        *conn;
1554         struct list_head  *ctmp;
1555         int                i;
1556
1557
1558         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
1559                 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1560                 list_for_each(ptmp, &kgnilnd_data.kgn_peers[i]) {
1561
1562                         peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1563
1564                         list_for_each(ctmp, &peer->gnp_conns) {
1565                                 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
1566
1567                                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
1568                                         continue;
1569
1570                                 if (index-- > 0)
1571                                         continue;
1572
1573                                 CDEBUG(D_NET, "++conn[%p] -> %s (%d)\n", conn,
1574                                        libcfs_nid2str(conn->gnc_peer->gnp_nid),
1575                                        atomic_read(&conn->gnc_refcount));
1576                                 kgnilnd_conn_addref(conn);
1577                                 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1578                                 return conn;
1579                         }
1580                 }
1581                 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1582         }
1583
1584         return NULL;
1585 }
1586
1587 int
1588 kgnilnd_get_conn_info(kgn_peer_t *peer,
1589                       int *device_id, __u64 *peerstamp,
1590                       int *tx_seq, int *rx_seq,
1591                       int *fmaq_len, int *nfma, int *nrdma)
1592 {
1593         kgn_conn_t        *conn;
1594         int               rc = 0;
1595
1596         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1597
1598         conn = kgnilnd_find_conn_locked(peer);
1599         if (conn == NULL) {
1600                 rc = -ENOENT;
1601                 goto out;
1602         }
1603
1604         *device_id = conn->gnc_device->gnd_host_id;
1605         *peerstamp = conn->gnc_peerstamp;
1606         *tx_seq = conn->gnc_tx_seq;
1607         *rx_seq = conn->gnc_rx_seq;
1608         *fmaq_len = kgnilnd_count_list(&conn->gnc_fmaq);
1609         *nfma = atomic_read(&conn->gnc_nlive_fma);
1610         *nrdma = atomic_read(&conn->gnc_nlive_rdma);
1611 out:
1612         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1613         return rc;
1614 }
1615
1616 /* needs write_lock on kgn_peer_conn_lock */
1617 int
1618 kgnilnd_close_peer_conns_locked(kgn_peer_t *peer, int why)
1619 {
1620         kgn_conn_t         *conn;
1621         struct list_head   *ctmp, *cnxt;
1622         int                 count = 0;
1623
1624         list_for_each_safe(ctmp, cnxt, &peer->gnp_conns) {
1625                 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
1626
1627                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
1628                         continue;
1629
1630                 count++;
1631                 /* we mark gnc_needs closing and increment kgn_npending_conns so that
1632                  * kgnilnd_del_conn_or_peer can wait on the other threads closing
1633                  * and cleaning up the connection.
1634                  */
1635                 if (!conn->gnc_needs_closing) {
1636                         conn->gnc_needs_closing = 1;
1637                         kgnilnd_admin_addref(kgnilnd_data.kgn_npending_conns);
1638                 }
1639                 kgnilnd_close_conn_locked(conn, why);
1640         }
1641         return count;
1642 }
1643
1644 int
1645 kgnilnd_report_node_state(lnet_nid_t nid, int down)
1646 {
1647         int         rc;
1648         kgn_peer_t  *peer, *new_peer;
1649         LIST_HEAD(zombies);
1650
1651         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1652         peer = kgnilnd_find_peer_locked(nid);
1653
1654         if (peer == NULL) {
1655                 int       i;
1656                 int       found_net = 0;
1657                 kgn_net_t *net;
1658
1659                 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1660
1661                 /* Don't add a peer for node up events */
1662                 if (down == GNILND_RCA_NODE_UP) {
1663                         return 0;
1664                 }
1665
1666                 /* find any valid net - we don't care which one... */
1667                 down_read(&kgnilnd_data.kgn_net_rw_sem);
1668                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
1669                         list_for_each_entry(net, &kgnilnd_data.kgn_nets[i],
1670                                             gnn_list) {
1671                                 found_net = 1;
1672                                 break;
1673                         }
1674
1675                         if (found_net) {
1676                                 break;
1677                         }
1678                 }
1679                 up_read(&kgnilnd_data.kgn_net_rw_sem);
1680
1681                 if (!found_net) {
1682                         CNETERR("Could not find a net for nid %lld\n", nid);
1683                         return 1;
1684                 }
1685
1686                 /* The nid passed in does not yet contain the net portion.
1687                  * Let's build it up now
1688                  */
1689                 nid = LNET_MKNID(LNET_NIDNET(net->gnn_ni->ni_nid), nid);
1690                 rc = kgnilnd_add_peer(net, nid, &new_peer);
1691
1692                 if (rc) {
1693                         CNETERR("Could not add peer for nid %lld, rc %d\n",
1694                                 nid, rc);
1695                         return 1;
1696                 }
1697
1698                 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1699                 peer = kgnilnd_find_peer_locked(nid);
1700
1701                 if (peer == NULL) {
1702                         CNETERR("Could not find peer for nid %lld\n", nid);
1703                         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1704                         return 1;
1705                 }
1706         }
1707
1708         peer->gnp_down = down;
1709
1710         if (down == GNILND_RCA_NODE_DOWN) {
1711                 kgn_conn_t *conn;
1712
1713                 peer->gnp_down_event_time = jiffies;
1714                 kgnilnd_cancel_peer_connect_locked(peer, &zombies);
1715                 conn = kgnilnd_find_conn_locked(peer);
1716
1717                 if (conn != NULL) {
1718                         kgnilnd_close_conn_locked(conn, -ENETRESET);
1719                 }
1720         } else {
1721                 peer->gnp_up_event_time = jiffies;
1722         }
1723
1724         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1725
1726         if (down == GNILND_RCA_NODE_DOWN) {
1727                 /* using ENETRESET so we don't get messages from
1728                  * kgnilnd_tx_done
1729                  */
1730                 kgnilnd_txlist_done(&zombies, -ENETRESET);
1731
1732                 if (*kgnilnd_tunables.kgn_peer_health) {
1733                         kgnilnd_peer_notify(peer, -ECONNRESET);
1734                 }
1735         }
1736
1737         CDEBUG(D_INFO, "marking nid %lld %s\n", nid, down ? "down" : "up");
1738         return 0;
1739 }
1740
1741 int
1742 kgnilnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
1743 {
1744         struct libcfs_ioctl_data *data = arg;
1745         kgn_net_t                *net = ni->ni_data;
1746         int                       rc = -EINVAL;
1747
1748         LASSERT(ni == net->gnn_ni);
1749
1750         switch (cmd) {
1751         case IOC_LIBCFS_GET_PEER: {
1752                 lnet_nid_t   nid = 0;
1753                 kgn_peer_t  *peer = NULL;
1754                 __u32 nic_addr = 0;
1755                 __u64 peerstamp = 0;
1756                 int peer_refcount = 0, peer_connecting = 0;
1757                 int device_id = 0;
1758                 int tx_seq = 0, rx_seq = 0;
1759                 int fmaq_len = 0, nfma = 0, nrdma = 0;
1760
1761                 rc = kgnilnd_get_peer_info(data->ioc_count, &peer,
1762                                            &nid, &nic_addr, &peer_refcount,
1763                                            &peer_connecting);
1764                 if (rc)
1765                         break;
1766
1767                 /* Barf */
1768                 /* LNET_MKNID is used to mask from lnet the multiplexing/demultiplexing of connections and peers
1769                  * LNET assumes a conn and peer per net, the LNET_MKNID/LNET_NIDADDR allows us to let Lnet see what it
1770                  * wants to see instead of the underlying network that is being used to send the data
1771                  */
1772                 data->ioc_nid    = LNET_MKNID(LNET_NIDNET(ni->ni_nid), LNET_NIDADDR(nid));
1773                 data->ioc_flags  = peer_connecting;
1774                 data->ioc_count  = peer_refcount;
1775
1776                 rc = kgnilnd_get_conn_info(peer, &device_id, &peerstamp,
1777                                            &tx_seq, &rx_seq, &fmaq_len,
1778                                            &nfma, &nrdma);
1779
1780                 /* This is allowable - a persistent peer could not
1781                  * have a connection */
1782                 if (rc) {
1783                         /* flag to indicate we are not connected -
1784                          * need to print as such */
1785                         data->ioc_flags |= (1<<16);
1786                         rc = 0;
1787                 } else {
1788                         /* still barf */
1789                         data->ioc_net = device_id;
1790                         data->ioc_u64[0] = peerstamp;
1791                         data->ioc_u32[0] = fmaq_len;
1792                         data->ioc_u32[1] = nfma;
1793                         data->ioc_u32[2] = tx_seq;
1794                         data->ioc_u32[3] = rx_seq;
1795                         data->ioc_u32[4] = nrdma;
1796                 }
1797                 break;
1798         }
1799         case IOC_LIBCFS_ADD_PEER: {
1800                 /* just dummy value to allow using common interface */
1801                 kgn_peer_t      *peer;
1802                 rc = kgnilnd_add_peer(net, data->ioc_nid, &peer);
1803                 break;
1804         }
1805         case IOC_LIBCFS_DEL_PEER: {
1806                 /* NULL is passed in so it affects all peers in existence without regard to network
1807                  * as the peer may not exist on the network LNET believes it to be on.
1808                  */
1809                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1810                                               GNILND_DEL_PEER, -EUCLEAN);
1811                 break;
1812         }
1813         case IOC_LIBCFS_GET_CONN: {
1814                 kgn_conn_t *conn = kgnilnd_get_conn_by_idx(data->ioc_count);
1815
1816                 if (conn == NULL)
1817                         rc = -ENOENT;
1818                 else {
1819                         rc = 0;
1820                         /* LNET_MKNID is used to build the correct address based on what LNET wants to see instead of
1821                          * the generic connection that is used to send the data
1822                          */
1823                         data->ioc_nid    = LNET_MKNID(LNET_NIDNET(ni->ni_nid), LNET_NIDADDR(conn->gnc_peer->gnp_nid));
1824                         data->ioc_u32[0] = conn->gnc_device->gnd_id;
1825                         kgnilnd_conn_decref(conn);
1826                 }
1827                 break;
1828         }
1829         case IOC_LIBCFS_CLOSE_CONNECTION: {
1830                 /* use error = -ENETRESET to indicate it was lctl disconnect */
1831                 /* NULL is passed in so it affects all the nets as the connection is virtual
1832                  * and may not exist on the network LNET believes it to be on.
1833                  */
1834                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1835                                               GNILND_DEL_CONN, -ENETRESET);
1836                 break;
1837         }
1838         case IOC_LIBCFS_PUSH_CONNECTION: {
1839                 /* we use this to flush purgatory */
1840                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1841                                               GNILND_CLEAR_PURGATORY, -EUCLEAN);
1842                 break;
1843         }
1844         case IOC_LIBCFS_REGISTER_MYNID: {
1845                 /* Ignore if this is a noop */
1846                 if (data->ioc_nid == ni->ni_nid) {
1847                         rc = 0;
1848                 } else {
1849                         CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
1850                                libcfs_nid2str(data->ioc_nid),
1851                                libcfs_nid2str(ni->ni_nid));
1852                         rc = -EINVAL;
1853                 }
1854                 break;
1855         }
1856         }
1857
1858         return rc;
1859 }
1860
1861 void
1862 kgnilnd_query(lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
1863 {
1864         kgn_net_t               *net = ni->ni_data;
1865         kgn_tx_t                *tx;
1866         kgn_peer_t              *peer = NULL;
1867         kgn_conn_t              *conn = NULL;
1868         lnet_process_id_t       id = {.nid = nid, .pid = LUSTRE_SRV_LNET_PID};
1869         ENTRY;
1870
1871         /* I expect to find him, so only take a read lock */
1872         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1873         peer = kgnilnd_find_peer_locked(nid);
1874         if (peer != NULL) {
1875                 /* LIE if in a quiesce - we will update the timeouts after,
1876                  * but we don't want sends failing during it */
1877                 if (kgnilnd_data.kgn_quiesce_trigger) {
1878                         *when = jiffies;
1879                         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1880                         GOTO(out, 0);
1881                 }
1882
1883                 /* Update to best guess, might refine on later checks */
1884                 *when = peer->gnp_last_alive;
1885
1886                 /* we have a peer, how about a conn? */
1887                 conn = kgnilnd_find_conn_locked(peer);
1888
1889                 if (conn == NULL)  {
1890                         /* if there is no conn, check peer last errno to see if clean disconnect
1891                          * - if it was, we lie to LNet because we believe a TX would complete
1892                          * on reconnect */
1893                         if (kgnilnd_conn_clean_errno(peer->gnp_last_errno)) {
1894                                 *when = jiffies;
1895                         }
1896                         /* we still want to fire a TX and new conn in this case */
1897                 } else {
1898                         /* gnp_last_alive is valid, run for the hills */
1899                         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1900                         GOTO(out, 0);
1901                 }
1902         }
1903         /* if we get here, either we have no peer or no conn for him, so fire off
1904          * new TX to trigger conn setup */
1905         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1906
1907         /* if we couldn't find him, we'll fire up a TX and get connected -
1908          * if we don't do this, after ni_peer_timeout, LNet will declare him dead.
1909          * So really we treat kgnilnd_query as a bit of a 'connect now' type
1910          * event because it'll only do this when it wants to send
1911          *
1912          * Use a real TX for this to get the proper gnp_tx_queue behavior, etc
1913          * normally we'd use kgnilnd_send_ctlmsg for this, but we don't really
1914          * care that this goes out quickly since we already know we need a new conn
1915          * formed */
1916         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_NOOP_SEND))
1917                 return;
1918
1919         tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP, ni->ni_nid);
1920         if (tx != NULL) {
1921                 kgnilnd_launch_tx(tx, net, &id);
1922         }
1923 out:
1924         CDEBUG(D_NETTRACE, "peer 0x%p->%s when %lu\n", peer,
1925                libcfs_nid2str(nid), *when);
1926         EXIT;
1927 }
1928
1929 int
1930 kgnilnd_dev_init(kgn_device_t *dev)
1931 {
1932         gni_return_t      rrc;
1933         int               rc = 0;
1934         unsigned int      cq_size;
1935         ENTRY;
1936
1937         /* size of these CQs should be able to accommodate the outgoing
1938          * RDMA and SMSG transactions.  Since we really don't know what we
1939          * really need here, we'll take credits * 2 * 3 to allow a bunch.
1940          * We need to dig into this more with the performance work. */
1941         cq_size = *kgnilnd_tunables.kgn_credits * 2 * 3;
1942
1943         rrc = kgnilnd_cdm_create(dev->gnd_id, *kgnilnd_tunables.kgn_ptag,
1944                                  GNILND_COOKIE, 0,
1945                                  &dev->gnd_domain);
1946         if (rrc != GNI_RC_SUCCESS) {
1947                 CERROR("Can't create CDM %d (%d)\n", dev->gnd_id, rrc);
1948                 GOTO(failed, rc = -ENODEV);
1949         }
1950
1951         rrc = kgnilnd_cdm_attach(dev->gnd_domain, dev->gnd_id,
1952                                  &dev->gnd_host_id, &dev->gnd_handle);
1953         if (rrc != GNI_RC_SUCCESS) {
1954                 CERROR("Can't attach CDM to device %d (%d)\n",
1955                         dev->gnd_id, rrc);
1956                 GOTO(failed, rc = -ENODEV);
1957         }
1958
1959         /* a bit gross, but not much we can do - Aries Sim doesn't have
1960          * hardcoded NIC/NID that we can use */
1961         rc = kgnilnd_setup_nic_translation(dev->gnd_host_id);
1962         if (rc != 0)
1963                 GOTO(failed, rc = -ENODEV);
1964
1965         /* only dev 0 gets the errors - no need to reset the stack twice
1966          * - this works because we have a single PTAG, if we had more
1967          * then we'd need to have multiple handlers */
1968         if (dev->gnd_id == 0) {
1969                 rrc = kgnilnd_subscribe_errors(dev->gnd_handle,
1970                                                 GNI_ERRMASK_CRITICAL |
1971                                                 GNI_ERRMASK_UNKNOWN_TRANSACTION,
1972                                               0, NULL, kgnilnd_critical_error,
1973                                               &dev->gnd_err_handle);
1974                 if (rrc != GNI_RC_SUCCESS) {
1975                         CERROR("Can't subscribe for errors on device %d: rc %d\n",
1976                                 dev->gnd_id, rrc);
1977                         GOTO(failed, rc = -ENODEV);
1978                 }
1979
1980                 rc = kgnilnd_set_quiesce_callback(dev->gnd_handle,
1981                                                   kgnilnd_quiesce_end_callback);
1982                 if (rc != GNI_RC_SUCCESS) {
1983                         CERROR("Can't subscribe for quiesce callback on device %d: rc %d\n",
1984                                 dev->gnd_id, rrc);
1985                         GOTO(failed, rc = -ENODEV);
1986                 }
1987         }
1988
1989         rc = kgnilnd_nicaddr_to_nid(dev->gnd_host_id, &dev->gnd_nid);
1990         if (rc < 0) {
1991                 /* log messages during startup */
1992                 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
1993                         CERROR("couldn't translate host_id 0x%x to nid. rc %d\n",
1994                                 dev->gnd_host_id, rc);
1995                 }
1996                 GOTO(failed, rc = -ESRCH);
1997         }
1998         CDEBUG(D_NET, "NIC %x -> NID %d\n", dev->gnd_host_id, dev->gnd_nid);
1999
2000         rrc = kgnilnd_cq_create(dev->gnd_handle, cq_size,
2001                                 0, kgnilnd_device_callback,
2002                                 dev->gnd_id, &dev->gnd_snd_rdma_cqh);
2003         if (rrc != GNI_RC_SUCCESS) {
2004                 CERROR("Can't create rdma send cq size %u for device "
2005                        "%d (%d)\n", cq_size, dev->gnd_id, rrc);
2006                 GOTO(failed, rc = -EINVAL);
2007         }
2008
2009         rrc = kgnilnd_cq_create(dev->gnd_handle, cq_size,
2010                         0, kgnilnd_device_callback, dev->gnd_id,
2011                         &dev->gnd_snd_fma_cqh);
2012         if (rrc != GNI_RC_SUCCESS) {
2013                 CERROR("Can't create fma send cq size %u for device %d (%d)\n",
2014                        cq_size, dev->gnd_id, rrc);
2015                 GOTO(failed, rc = -EINVAL);
2016         }
2017
2018         /* This one we size differently - overflows are possible and it needs to be
2019          * sized based on machine size */
2020         rrc = kgnilnd_cq_create(dev->gnd_handle,
2021                         *kgnilnd_tunables.kgn_fma_cq_size,
2022                         0, kgnilnd_device_callback, dev->gnd_id,
2023                         &dev->gnd_rcv_fma_cqh);
2024         if (rrc != GNI_RC_SUCCESS) {
2025                 CERROR("Can't create fma cq size %d for device %d (%d)\n",
2026                        *kgnilnd_tunables.kgn_fma_cq_size, dev->gnd_id, rrc);
2027                 GOTO(failed, rc = -EINVAL);
2028         }
2029
2030         RETURN(0);
2031
2032 failed:
2033         kgnilnd_dev_fini(dev);
2034         RETURN(rc);
2035 }
2036
2037 void
2038 kgnilnd_dev_fini(kgn_device_t *dev)
2039 {
2040         gni_return_t rrc;
2041         ENTRY;
2042
2043         /* At quiesce or rest time, need to loop through and clear gnd_ready_conns ?*/
2044         LASSERTF(list_empty(&dev->gnd_ready_conns) &&
2045                  list_empty(&dev->gnd_map_tx) &&
2046                  list_empty(&dev->gnd_rdmaq),
2047                  "dev 0x%p ready_conns %d@0x%p map_tx %d@0x%p rdmaq %d@0x%p\n",
2048                  dev, kgnilnd_count_list(&dev->gnd_ready_conns), &dev->gnd_ready_conns,
2049                  kgnilnd_count_list(&dev->gnd_map_tx), &dev->gnd_map_tx,
2050                  kgnilnd_count_list(&dev->gnd_rdmaq), &dev->gnd_rdmaq);
2051
2052         /* These should follow from tearing down all connections */
2053         LASSERTF(dev->gnd_map_nphys == 0 && dev->gnd_map_physnop == 0,
2054                 "%d physical mappings of %d pages still mapped\n",
2055                  dev->gnd_map_nphys, dev->gnd_map_physnop);
2056
2057         LASSERTF(dev->gnd_map_nvirt == 0 && dev->gnd_map_virtnob == 0,
2058                 "%d virtual mappings of "LPU64" bytes still mapped\n",
2059                  dev->gnd_map_nvirt, dev->gnd_map_virtnob);
2060
2061         LASSERTF(atomic_read(&dev->gnd_n_mdd) == 0 &&
2062                  atomic_read(&dev->gnd_n_mdd_held) == 0 &&
2063                  atomic64_read(&dev->gnd_nbytes_map) == 0,
2064                 "%d SMSG mappings of %ld bytes still mapped or held %d\n",
2065                  atomic_read(&dev->gnd_n_mdd),
2066                  atomic64_read(&dev->gnd_nbytes_map), atomic_read(&dev->gnd_n_mdd_held));
2067
2068         LASSERT(list_empty(&dev->gnd_map_list));
2069
2070         /* What other assertions needed to ensure all connections torn down ? */
2071
2072         /* check all counters == 0 (EP, MDD, etc) */
2073
2074         /* if we are resetting due to quiese (stack reset), don't check
2075          * thread states */
2076         LASSERTF(kgnilnd_data.kgn_quiesce_trigger ||
2077                 atomic_read(&kgnilnd_data.kgn_nthreads) == 0,
2078                 "tried to shutdown with threads active\n");
2079
2080         if (dev->gnd_rcv_fma_cqh) {
2081                 rrc = kgnilnd_cq_destroy(dev->gnd_rcv_fma_cqh);
2082                 LASSERTF(rrc == GNI_RC_SUCCESS,
2083                         "bad rc from gni_cq_destroy on rcv_fma_cqh: %d\n", rrc);
2084                 dev->gnd_rcv_fma_cqh = NULL;
2085         }
2086
2087         if (dev->gnd_snd_rdma_cqh) {
2088                 rrc = kgnilnd_cq_destroy(dev->gnd_snd_rdma_cqh);
2089                 LASSERTF(rrc == GNI_RC_SUCCESS,
2090                         "bad rc from gni_cq_destroy on send_rdma_cqh: %d\n", rrc);
2091                 dev->gnd_snd_rdma_cqh = NULL;
2092         }
2093
2094         if (dev->gnd_snd_fma_cqh) {
2095                 rrc = kgnilnd_cq_destroy(dev->gnd_snd_fma_cqh);
2096                 LASSERTF(rrc == GNI_RC_SUCCESS,
2097                         "bad rc from gni_cq_destroy on snd_fma_cqh: %d\n", rrc);
2098                 dev->gnd_snd_fma_cqh = NULL;
2099         }
2100
2101         if (dev->gnd_err_handle) {
2102                 rrc = kgnilnd_release_errors(dev->gnd_err_handle);
2103                 LASSERTF(rrc == GNI_RC_SUCCESS,
2104                         "bad rc from gni_release_errors: %d\n", rrc);
2105                 dev->gnd_err_handle = NULL;
2106         }
2107
2108         if (dev->gnd_domain) {
2109                 rrc = kgnilnd_cdm_destroy(dev->gnd_domain);
2110                 LASSERTF(rrc == GNI_RC_SUCCESS,
2111                         "bad rc from gni_cdm_destroy: %d\n", rrc);
2112                 dev->gnd_domain = NULL;
2113         }
2114
2115         EXIT;
2116 }
2117
2118
2119 int kgnilnd_base_startup(void)
2120 {
2121         struct timeval       tv;
2122         int                  pkmem = atomic_read(&libcfs_kmemory);
2123         int                  rc;
2124         int                  i;
2125         kgn_device_t        *dev;
2126         struct task_struct  *thrd;
2127         ENTRY;
2128
2129         LASSERTF(kgnilnd_data.kgn_init == GNILND_INIT_NOTHING,
2130                 "init %d\n", kgnilnd_data.kgn_init);
2131
2132         /* zero pointers, flags etc */
2133         memset(&kgnilnd_data, 0, sizeof(kgnilnd_data));
2134
2135         /* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and
2136          * a unique (for all time) connstamp so we can uniquely identify
2137          * the sender.  The connstamp is an incrementing counter
2138          * initialised with seconds + microseconds at startup time.  So we
2139          * rely on NOT creating connections more frequently on average than
2140          * 1MHz to ensure we don't use old connstamps when we reboot. */
2141         do_gettimeofday(&tv);
2142         kgnilnd_data.kgn_connstamp =
2143                  kgnilnd_data.kgn_peerstamp =
2144                         (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
2145
2146         init_rwsem(&kgnilnd_data.kgn_net_rw_sem);
2147
2148         for (i = 0; i < GNILND_MAXDEVS; i++) {
2149                 kgn_device_t  *dev = &kgnilnd_data.kgn_devices[i];
2150
2151                 dev->gnd_id = i;
2152                 INIT_LIST_HEAD(&dev->gnd_ready_conns);
2153                 INIT_LIST_HEAD(&dev->gnd_map_tx);
2154                 INIT_LIST_HEAD(&dev->gnd_fma_buffs);
2155                 mutex_init(&dev->gnd_cq_mutex);
2156                 sema_init(&dev->gnd_fmablk_sem, 1);
2157                 spin_lock_init(&dev->gnd_fmablk_lock);
2158                 init_waitqueue_head(&dev->gnd_waitq);
2159                 init_waitqueue_head(&dev->gnd_dgram_waitq);
2160                 init_waitqueue_head(&dev->gnd_dgping_waitq);
2161                 spin_lock_init(&dev->gnd_lock);
2162                 INIT_LIST_HEAD(&dev->gnd_map_list);
2163                 spin_lock_init(&dev->gnd_map_lock);
2164                 atomic_set(&dev->gnd_nfmablk, 0);
2165                 atomic_set(&dev->gnd_fmablk_vers, 1);
2166                 atomic_set(&dev->gnd_neps, 0);
2167                 atomic_set(&dev->gnd_canceled_dgrams, 0);
2168                 INIT_LIST_HEAD(&dev->gnd_connd_peers);
2169                 spin_lock_init(&dev->gnd_connd_lock);
2170                 spin_lock_init(&dev->gnd_dgram_lock);
2171                 spin_lock_init(&dev->gnd_rdmaq_lock);
2172                 INIT_LIST_HEAD(&dev->gnd_rdmaq);
2173                 init_rwsem(&dev->gnd_conn_sem);
2174
2175                 /* alloc & setup nid based dgram table */
2176                 LIBCFS_ALLOC(dev->gnd_dgrams,
2177                             sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2178
2179                 if (dev->gnd_dgrams == NULL)
2180                         GOTO(failed, rc = -ENOMEM);
2181
2182                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2183                         INIT_LIST_HEAD(&dev->gnd_dgrams[i]);
2184                 }
2185                 atomic_set(&dev->gnd_ndgrams, 0);
2186                 atomic_set(&dev->gnd_nwcdgrams, 0);
2187                 /* setup timer for RDMAQ processing */
2188                 setup_timer(&dev->gnd_rdmaq_timer, kgnilnd_schedule_device_timer,
2189                             (unsigned long)dev);
2190
2191                 /* setup timer for mapping processing */
2192                 setup_timer(&dev->gnd_map_timer, kgnilnd_schedule_device_timer,
2193                             (unsigned long)dev);
2194
2195         }
2196
2197         /* CQID 0 isn't allowed, set to MAX_MSG_ID - 1 to check for conflicts early */
2198         kgnilnd_data.kgn_next_cqid = GNILND_MAX_MSG_ID - 1;
2199         kgnilnd_data.kgn_new_min_timeout = *kgnilnd_tunables.kgn_timeout;
2200         init_waitqueue_head(&kgnilnd_data.kgn_reaper_waitq);
2201         init_waitqueue_head(&kgnilnd_data.kgn_ruhroh_waitq);
2202         spin_lock_init(&kgnilnd_data.kgn_reaper_lock);
2203
2204         sema_init(&kgnilnd_data.kgn_quiesce_sem, 1);
2205         atomic_set(&kgnilnd_data.kgn_nquiesce, 0);
2206         atomic_set(&kgnilnd_data.kgn_npending_conns, 0);
2207         atomic_set(&kgnilnd_data.kgn_npending_unlink, 0);
2208         atomic_set(&kgnilnd_data.kgn_npending_detach, 0);
2209         atomic_set(&kgnilnd_data.kgn_rev_offset, 0);
2210         atomic_set(&kgnilnd_data.kgn_rev_length, 0);
2211         atomic_set(&kgnilnd_data.kgn_rev_copy_buff, 0);
2212
2213         /* OK to call kgnilnd_api_shutdown() to cleanup now */
2214         kgnilnd_data.kgn_init = GNILND_INIT_DATA;
2215         try_module_get(THIS_MODULE);
2216
2217         rwlock_init(&kgnilnd_data.kgn_peer_conn_lock);
2218
2219         LIBCFS_ALLOC(kgnilnd_data.kgn_peers,
2220                     sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2221
2222         if (kgnilnd_data.kgn_peers == NULL)
2223                 GOTO(failed, rc = -ENOMEM);
2224
2225         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2226                 INIT_LIST_HEAD(&kgnilnd_data.kgn_peers[i]);
2227         }
2228
2229         LIBCFS_ALLOC(kgnilnd_data.kgn_conns,
2230                     sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2231
2232         if (kgnilnd_data.kgn_conns == NULL)
2233                 GOTO(failed, rc = -ENOMEM);
2234
2235         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2236                 INIT_LIST_HEAD(&kgnilnd_data.kgn_conns[i]);
2237         }
2238
2239         LIBCFS_ALLOC(kgnilnd_data.kgn_nets,
2240                     sizeof(struct list_head) * *kgnilnd_tunables.kgn_net_hash_size);
2241
2242         if (kgnilnd_data.kgn_nets == NULL)
2243                 GOTO(failed, rc = -ENOMEM);
2244
2245         for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
2246                 INIT_LIST_HEAD(&kgnilnd_data.kgn_nets[i]);
2247         }
2248
2249         kgnilnd_data.kgn_mbox_cache =
2250                 kmem_cache_create("kgn_mbox_block", KMALLOC_MAX_SIZE, 0,
2251                                   SLAB_HWCACHE_ALIGN, NULL);
2252         if (kgnilnd_data.kgn_mbox_cache == NULL) {
2253                 CERROR("Can't create slab for physical mbox blocks\n");
2254                 GOTO(failed, rc = -ENOMEM);
2255         }
2256
2257         kgnilnd_data.kgn_rx_cache =
2258                 kmem_cache_create("kgn_rx_t", sizeof(kgn_rx_t), 0, 0, NULL);
2259         if (kgnilnd_data.kgn_rx_cache == NULL) {
2260                 CERROR("Can't create slab for kgn_rx_t descriptors\n");
2261                 GOTO(failed, rc = -ENOMEM);
2262         }
2263
2264         kgnilnd_data.kgn_tx_cache =
2265                 kmem_cache_create("kgn_tx_t", sizeof(kgn_tx_t), 0, 0, NULL);
2266         if (kgnilnd_data.kgn_tx_cache == NULL) {
2267                 CERROR("Can't create slab for kgn_tx_t\n");
2268                 GOTO(failed, rc = -ENOMEM);
2269         }
2270
2271         kgnilnd_data.kgn_tx_phys_cache =
2272                 kmem_cache_create("kgn_tx_phys",
2273                                    LNET_MAX_IOV * sizeof(gni_mem_segment_t),
2274                                    0, 0, NULL);
2275         if (kgnilnd_data.kgn_tx_phys_cache == NULL) {
2276                 CERROR("Can't create slab for kgn_tx_phys\n");
2277                 GOTO(failed, rc = -ENOMEM);
2278         }
2279
2280         kgnilnd_data.kgn_dgram_cache =
2281                 kmem_cache_create("kgn_dgram_t", sizeof(kgn_dgram_t), 0, 0, NULL);
2282         if (kgnilnd_data.kgn_dgram_cache == NULL) {
2283                 CERROR("Can't create slab for outgoing datagrams\n");
2284                 GOTO(failed, rc = -ENOMEM);
2285         }
2286
2287         /* allocate a MAX_IOV array of page pointers for each cpu */
2288         kgnilnd_data.kgn_cksum_map_pages = kmalloc(num_possible_cpus() * sizeof (struct page *),
2289                                                    GFP_KERNEL);
2290         if (kgnilnd_data.kgn_cksum_map_pages == NULL) {
2291                 CERROR("Can't allocate vmap cksum pages\n");
2292                 GOTO(failed, rc = -ENOMEM);
2293         }
2294         kgnilnd_data.kgn_cksum_npages = num_possible_cpus();
2295         memset(kgnilnd_data.kgn_cksum_map_pages, 0,
2296                 kgnilnd_data.kgn_cksum_npages * sizeof (struct page *));
2297
2298         for (i = 0; i < kgnilnd_data.kgn_cksum_npages; i++) {
2299                 kgnilnd_data.kgn_cksum_map_pages[i] = kmalloc(LNET_MAX_IOV * sizeof (struct page *),
2300                                                               GFP_KERNEL);
2301                 if (kgnilnd_data.kgn_cksum_map_pages[i] == NULL) {
2302                         CERROR("Can't allocate vmap cksum pages for cpu %d\n", i);
2303                         GOTO(failed, rc = -ENOMEM);
2304                 }
2305         }
2306
2307         LASSERT(kgnilnd_data.kgn_ndevs == 0);
2308
2309         /* Use all available GNI devices */
2310         for (i = 0; i < GNILND_MAXDEVS; i++) {
2311                 dev = &kgnilnd_data.kgn_devices[kgnilnd_data.kgn_ndevs];
2312
2313                 rc = kgnilnd_dev_init(dev);
2314                 if (rc == 0) {
2315                         /* Increment here so base_shutdown cleans it up */
2316                         kgnilnd_data.kgn_ndevs++;
2317
2318                         rc = kgnilnd_allocate_phys_fmablk(dev);
2319                         if (rc)
2320                                 GOTO(failed, rc);
2321                 }
2322         }
2323
2324         if (kgnilnd_data.kgn_ndevs == 0) {
2325                 CERROR("Can't initialise any GNI devices\n");
2326                 GOTO(failed, rc = -ENODEV);
2327         }
2328
2329         rc = kgnilnd_thread_start(kgnilnd_reaper, NULL, "kgnilnd_rpr", 0);
2330         if (rc != 0) {
2331                 CERROR("Can't spawn gnilnd reaper: %d\n", rc);
2332                 GOTO(failed, rc);
2333         }
2334
2335         rc = kgnilnd_start_rca_thread();
2336         if (rc != 0) {
2337                 CERROR("Can't spawn gnilnd rca: %d\n", rc);
2338                 GOTO(failed, rc);
2339         }
2340
2341         /*
2342          * Start ruhroh thread.  We can't use kgnilnd_thread_start() because
2343          * we don't want this thread included in kgnilnd_data.kgn_nthreads
2344          * count.  This thread controls quiesce, so it mustn't
2345          * quiesce itself.
2346          */
2347         thrd = kthread_run(kgnilnd_ruhroh_thread, NULL, "%s_%02d", "kgnilnd_rr", 0);
2348         if (IS_ERR(thrd)) {
2349                 rc = PTR_ERR(thrd);
2350                 CERROR("Can't spawn gnilnd ruhroh thread: %d\n", rc);
2351                 GOTO(failed, rc);
2352         }
2353
2354         /* threads will load balance across devs as they are available */
2355         for (i = 0; i < *kgnilnd_tunables.kgn_sched_threads; i++) {
2356                 rc = kgnilnd_thread_start(kgnilnd_scheduler, (void *)((long)i),
2357                                           "kgnilnd_sd", i);
2358                 if (rc != 0) {
2359                         CERROR("Can't spawn gnilnd scheduler[%d]: %d\n",
2360                                i, rc);
2361                         GOTO(failed, rc);
2362                 }
2363         }
2364
2365         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2366                 dev = &kgnilnd_data.kgn_devices[i];
2367                 rc = kgnilnd_thread_start(kgnilnd_dgram_mover, dev,
2368                                           "kgnilnd_dg", dev->gnd_id);
2369                 if (rc != 0) {
2370                         CERROR("Can't spawn gnilnd dgram_mover[%d]: %d\n",
2371                                dev->gnd_id, rc);
2372                         GOTO(failed, rc);
2373                 }
2374
2375                 rc = kgnilnd_thread_start(kgnilnd_dgram_waitq, dev,
2376                                           "kgnilnd_dgn", dev->gnd_id);
2377                 if (rc != 0) {
2378                         CERROR("Can't spawn gnilnd dgram_waitq[%d]: %d\n",
2379                                 dev->gnd_id, rc);
2380                         GOTO(failed, rc);
2381                 }
2382
2383                 rc = kgnilnd_setup_wildcard_dgram(dev);
2384
2385                 if (rc != 0) {
2386                         CERROR("Can't create wildcard dgrams[%d]: %d\n",
2387                                 dev->gnd_id, rc);
2388                         GOTO(failed, rc);
2389                 }
2390         }
2391
2392
2393
2394         /* flag everything initialised */
2395         kgnilnd_data.kgn_init = GNILND_INIT_ALL;
2396         /*****************************************************/
2397
2398         CDEBUG(D_MALLOC, "initial kmem %d\n", pkmem);
2399         RETURN(0);
2400
2401 failed:
2402         kgnilnd_base_shutdown();
2403         kgnilnd_data.kgn_init = GNILND_INIT_NOTHING;
2404         RETURN(rc);
2405 }
2406
2407 void
2408 kgnilnd_base_shutdown(void)
2409 {
2410         int                     i;
2411         ENTRY;
2412
2413         while (CFS_FAIL_TIMEOUT(CFS_FAIL_GNI_PAUSE_SHUTDOWN, 1)) {};
2414
2415         kgnilnd_data.kgn_wc_kill = 1;
2416
2417         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2418                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2419                 kgnilnd_cancel_wc_dgrams(dev);
2420                 kgnilnd_del_conn_or_peer(NULL, LNET_NID_ANY, GNILND_DEL_PEER, -ESHUTDOWN);
2421                 kgnilnd_wait_for_canceled_dgrams(dev);
2422         }
2423
2424         /* Peer state all cleaned up BEFORE setting shutdown, so threads don't
2425          * have to worry about shutdown races.  NB connections may be created
2426          * while there are still active connds, but these will be temporary
2427          * since peer creation always fails after the listener has started to
2428          * shut down.
2429          * all peers should have been cleared out on the nets */
2430         LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
2431                 "peers left %d\n", atomic_read(&kgnilnd_data.kgn_npeers));
2432
2433         /* Wait for the ruhroh thread to shut down. */
2434         kgnilnd_data.kgn_ruhroh_shutdown = 1;
2435         wake_up(&kgnilnd_data.kgn_ruhroh_waitq);
2436         i = 2;
2437         while (kgnilnd_data.kgn_ruhroh_running != 0) {
2438                 i++;
2439                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2440                        "Waiting for ruhroh thread to terminate\n");
2441                 cfs_pause(cfs_time_seconds(1));
2442         }
2443
2444        /* Flag threads to terminate */
2445         kgnilnd_data.kgn_shutdown = 1;
2446
2447         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2448                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2449
2450                 /* should clear all the MDDs */
2451                 kgnilnd_unmap_phys_fmablk(dev);
2452
2453                 kgnilnd_schedule_device(dev);
2454                 wake_up_all(&dev->gnd_dgram_waitq);
2455                 wake_up_all(&dev->gnd_dgping_waitq);
2456                 LASSERT(list_empty(&dev->gnd_connd_peers));
2457         }
2458
2459         spin_lock(&kgnilnd_data.kgn_reaper_lock);
2460         wake_up_all(&kgnilnd_data.kgn_reaper_waitq);
2461         spin_unlock(&kgnilnd_data.kgn_reaper_lock);
2462
2463         kgnilnd_wakeup_rca_thread();
2464
2465         /* Wait for threads to exit */
2466         i = 2;
2467         while (atomic_read(&kgnilnd_data.kgn_nthreads) != 0) {
2468                 i++;
2469                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2470                        "Waiting for %d threads to terminate\n",
2471                        atomic_read(&kgnilnd_data.kgn_nthreads));
2472                 cfs_pause(cfs_time_seconds(1));
2473         }
2474
2475         LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
2476                 "peers left %d\n", atomic_read(&kgnilnd_data.kgn_npeers));
2477
2478         if (kgnilnd_data.kgn_peers != NULL) {
2479                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2480                         LASSERT(list_empty(&kgnilnd_data.kgn_peers[i]));
2481
2482                 LIBCFS_FREE(kgnilnd_data.kgn_peers,
2483                             sizeof (struct list_head) *
2484                             *kgnilnd_tunables.kgn_peer_hash_size);
2485         }
2486
2487         down_write(&kgnilnd_data.kgn_net_rw_sem);
2488         if (kgnilnd_data.kgn_nets != NULL) {
2489                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++)
2490                         LASSERT(list_empty(&kgnilnd_data.kgn_nets[i]));
2491
2492                 LIBCFS_FREE(kgnilnd_data.kgn_nets,
2493                             sizeof (struct list_head) *
2494                             *kgnilnd_tunables.kgn_net_hash_size);
2495         }
2496         up_write(&kgnilnd_data.kgn_net_rw_sem);
2497
2498         LASSERTF(atomic_read(&kgnilnd_data.kgn_nconns) == 0,
2499                 "conns left %d\n", atomic_read(&kgnilnd_data.kgn_nconns));
2500
2501         if (kgnilnd_data.kgn_conns != NULL) {
2502                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2503                         LASSERT(list_empty(&kgnilnd_data.kgn_conns[i]));
2504
2505                 LIBCFS_FREE(kgnilnd_data.kgn_conns,
2506                             sizeof (struct list_head) *
2507                             *kgnilnd_tunables.kgn_peer_hash_size);
2508         }
2509
2510         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2511                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2512                 kgnilnd_dev_fini(dev);
2513
2514                 LASSERTF(atomic_read(&dev->gnd_ndgrams) == 0,
2515                         "dgrams left %d\n", atomic_read(&dev->gnd_ndgrams));
2516
2517                 if (dev->gnd_dgrams != NULL) {
2518                         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2519                                 LASSERT(list_empty(&dev->gnd_dgrams[i]));
2520
2521                         LIBCFS_FREE(dev->gnd_dgrams,
2522                                     sizeof (struct list_head) *
2523                                     *kgnilnd_tunables.kgn_peer_hash_size);
2524                 }
2525
2526                 kgnilnd_free_phys_fmablk(dev);
2527         }
2528
2529         if (kgnilnd_data.kgn_mbox_cache != NULL)
2530                 kmem_cache_destroy(kgnilnd_data.kgn_mbox_cache);
2531
2532         if (kgnilnd_data.kgn_rx_cache != NULL)
2533                 kmem_cache_destroy(kgnilnd_data.kgn_rx_cache);
2534
2535         if (kgnilnd_data.kgn_tx_cache != NULL)
2536                 kmem_cache_destroy(kgnilnd_data.kgn_tx_cache);
2537
2538         if (kgnilnd_data.kgn_tx_phys_cache != NULL)
2539                 kmem_cache_destroy(kgnilnd_data.kgn_tx_phys_cache);
2540
2541         if (kgnilnd_data.kgn_dgram_cache != NULL)
2542                 kmem_cache_destroy(kgnilnd_data.kgn_dgram_cache);
2543
2544         if (kgnilnd_data.kgn_cksum_map_pages != NULL) {
2545                 for (i = 0; i < kgnilnd_data.kgn_cksum_npages; i++) {
2546                         if (kgnilnd_data.kgn_cksum_map_pages[i] != NULL) {
2547                                 kfree(kgnilnd_data.kgn_cksum_map_pages[i]);
2548                         }
2549                 }
2550                 kfree(kgnilnd_data.kgn_cksum_map_pages);
2551         }
2552
2553         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2554                atomic_read(&libcfs_kmemory));
2555
2556         kgnilnd_data.kgn_init = GNILND_INIT_NOTHING;
2557         module_put(THIS_MODULE);
2558
2559         EXIT;
2560 }
2561
2562 int
2563 kgnilnd_startup(lnet_ni_t *ni)
2564 {
2565         int               rc, devno;
2566         kgn_net_t        *net;
2567         ENTRY;
2568
2569         LASSERTF(ni->ni_lnd == &the_kgnilnd,
2570                 "bad LND 0x%p != the_kgnilnd @ 0x%p\n",
2571                 ni->ni_lnd, &the_kgnilnd);
2572
2573         if (kgnilnd_data.kgn_init == GNILND_INIT_NOTHING) {
2574                 rc = kgnilnd_base_startup();
2575                 if (rc != 0)
2576                         RETURN(rc);
2577         }
2578
2579         /* Serialize with shutdown. */
2580         down(&kgnilnd_data.kgn_quiesce_sem);
2581
2582         LIBCFS_ALLOC(net, sizeof(*net));
2583         if (net == NULL) {
2584                 CERROR("could not allocate net for new interface instance\n");
2585                 /* no need to cleanup the CDM... */
2586                 GOTO(failed, rc = -ENOMEM);
2587         }
2588         INIT_LIST_HEAD(&net->gnn_list);
2589         ni->ni_data = net;
2590         net->gnn_ni = ni;
2591         ni->ni_maxtxcredits = *kgnilnd_tunables.kgn_credits;
2592         ni->ni_peertxcredits = *kgnilnd_tunables.kgn_peer_credits;
2593
2594         if (*kgnilnd_tunables.kgn_peer_health) {
2595                 int     fudge;
2596                 int     timeout;
2597                 /* give this a bit of leeway - we don't have a hard timeout
2598                  * as we only check timeouts periodically - see comment in kgnilnd_reaper */
2599                 fudge = (GNILND_TO2KA(*kgnilnd_tunables.kgn_timeout) / GNILND_REAPER_NCHECKS);
2600                 timeout = *kgnilnd_tunables.kgn_timeout + fudge;
2601
2602                 if (*kgnilnd_tunables.kgn_peer_timeout >= timeout)
2603                         ni->ni_peertimeout = *kgnilnd_tunables.kgn_peer_timeout;
2604                 else if (*kgnilnd_tunables.kgn_peer_timeout > -1) {
2605                         LCONSOLE_ERROR("Peer_timeout is set to %d but needs to be >= %d\n",
2606                                         *kgnilnd_tunables.kgn_peer_timeout,
2607                                         timeout);
2608                         ni->ni_data = NULL;
2609                         LIBCFS_FREE(net, sizeof(*net));
2610                         GOTO(failed, rc = -EINVAL);
2611                 } else
2612                         ni->ni_peertimeout = timeout;
2613
2614                 LCONSOLE_INFO("Enabling LNet peer health for gnilnd, timeout %ds\n",
2615                               ni->ni_peertimeout);
2616         }
2617
2618         atomic_set(&net->gnn_refcount, 1);
2619
2620         /* if we have multiple devices, spread the nets around */
2621         net->gnn_netnum = LNET_NETNUM(LNET_NIDNET(ni->ni_nid));
2622
2623         devno = LNET_NIDNET(ni->ni_nid) % GNILND_MAXDEVS;
2624         net->gnn_dev = &kgnilnd_data.kgn_devices[devno];
2625
2626         /* allocate a 'dummy' cdm for datagram use. We can only have a single
2627          * datagram between a nid:inst_id and nid2:inst_id. The fake cdm
2628          * give us additional inst_id to use, allowing the datagrams to flow
2629          * like rivers of honey and beer */
2630
2631         /* the instance id for the cdm is the NETNUM offset by MAXDEVS -
2632          * ensuring we'll have a unique id */
2633
2634
2635         ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), net->gnn_dev->gnd_nid);
2636         CDEBUG(D_NET, "adding net %p nid=%s on dev %d \n",
2637                 net, libcfs_nid2str(ni->ni_nid), net->gnn_dev->gnd_id);
2638         /* until the gnn_list is set, we need to cleanup ourselves as
2639          * kgnilnd_shutdown is just gonna get confused */
2640
2641         down_write(&kgnilnd_data.kgn_net_rw_sem);
2642         list_add_tail(&net->gnn_list, kgnilnd_netnum2netlist(net->gnn_netnum));
2643         up_write(&kgnilnd_data.kgn_net_rw_sem);
2644
2645         /* we need a separate thread to call probe_wait_by_id until
2646          * we get a function callback notifier from kgni */
2647         up(&kgnilnd_data.kgn_quiesce_sem);
2648         RETURN(0);
2649  failed:
2650         up(&kgnilnd_data.kgn_quiesce_sem);
2651         kgnilnd_shutdown(ni);
2652         RETURN(rc);
2653 }
2654
2655 void
2656 kgnilnd_shutdown(lnet_ni_t *ni)
2657 {
2658         kgn_net_t     *net = ni->ni_data;
2659         int           i;
2660         int           rc;
2661         ENTRY;
2662
2663         CFS_RACE(CFS_FAIL_GNI_SR_DOWN_RACE);
2664
2665         LASSERTF(kgnilnd_data.kgn_init == GNILND_INIT_ALL,
2666                 "init %d\n", kgnilnd_data.kgn_init);
2667
2668         /* Serialize with startup. */
2669         down(&kgnilnd_data.kgn_quiesce_sem);
2670         CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
2671                atomic_read(&libcfs_kmemory));
2672
2673         if (net == NULL) {
2674                 CERROR("got NULL net for ni %p\n", ni);
2675                 GOTO(out, rc = -EINVAL);
2676         }
2677
2678         LASSERTF(ni == net->gnn_ni,
2679                 "ni %p gnn_ni %p\n", net, net->gnn_ni);
2680
2681         ni->ni_data = NULL;
2682
2683         LASSERT(!net->gnn_shutdown);
2684         LASSERTF(atomic_read(&net->gnn_refcount) != 0,
2685                 "net %p refcount %d\n",
2686                  net, atomic_read(&net->gnn_refcount));
2687
2688         if (!list_empty(&net->gnn_list)) {
2689                 /* serialize with peer creation */
2690                 down_write(&kgnilnd_data.kgn_net_rw_sem);
2691                 net->gnn_shutdown = 1;
2692                 up_write(&kgnilnd_data.kgn_net_rw_sem);
2693
2694                 kgnilnd_cancel_net_dgrams(net);
2695
2696                 kgnilnd_del_conn_or_peer(net, LNET_NID_ANY, GNILND_DEL_PEER, -ESHUTDOWN);
2697
2698                 /* if we are quiesced, need to wake up - we need those threads
2699                  * alive to release peers, etc */
2700                 if (GNILND_IS_QUIESCED) {
2701                         set_mb(kgnilnd_data.kgn_quiesce_trigger, GNILND_QUIESCE_IDLE);
2702                         kgnilnd_quiesce_wait("shutdown");
2703                 }
2704
2705                 kgnilnd_wait_for_canceled_dgrams(net->gnn_dev);
2706
2707                 /* We wait until the nets ref's are 1, we will release final ref which is ours
2708                  * this allows us to make sure everything else is done before we free the
2709                  * net.
2710                  */
2711                 i = 4;
2712                 while (atomic_read(&net->gnn_refcount) != 1) {
2713                         i++;
2714                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2715                                 "Waiting for %d references to clear on net %d\n",
2716                                 atomic_read(&net->gnn_refcount),
2717                                 net->gnn_netnum);
2718                         cfs_pause(cfs_time_seconds(1));
2719                 }
2720
2721                 /* release ref from kgnilnd_startup */
2722                 kgnilnd_net_decref(net);
2723                 /* serialize with reaper and conn_task looping */
2724                 down_write(&kgnilnd_data.kgn_net_rw_sem);
2725                 list_del_init(&net->gnn_list);
2726                 up_write(&kgnilnd_data.kgn_net_rw_sem);
2727
2728         }
2729
2730         /* not locking, this can't race with writers */
2731         LASSERTF(atomic_read(&net->gnn_refcount) == 0,
2732                 "net %p refcount %d\n",
2733                  net, atomic_read(&net->gnn_refcount));
2734         LIBCFS_FREE(net, sizeof(*net));
2735
2736 out:
2737         down_read(&kgnilnd_data.kgn_net_rw_sem);
2738         for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
2739                 if (!list_empty(&kgnilnd_data.kgn_nets[i])) {
2740                         up_read(&kgnilnd_data.kgn_net_rw_sem);
2741                         break;
2742                 }
2743
2744                 if (i == *kgnilnd_tunables.kgn_net_hash_size - 1) {
2745                         up_read(&kgnilnd_data.kgn_net_rw_sem);
2746                         kgnilnd_base_shutdown();
2747                 }
2748         }
2749         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2750                atomic_read(&libcfs_kmemory));
2751
2752         up(&kgnilnd_data.kgn_quiesce_sem);
2753         EXIT;
2754 }
2755
2756 void __exit
2757 kgnilnd_module_fini(void)
2758 {
2759         lnet_unregister_lnd(&the_kgnilnd);
2760         kgnilnd_proc_fini();
2761         kgnilnd_remove_sysctl();
2762         kgnilnd_tunables_fini();
2763 }
2764
2765 int __init
2766 kgnilnd_module_init(void)
2767 {
2768         int    rc;
2769
2770         rc = kgnilnd_tunables_init();
2771         if (rc != 0)
2772                 return rc;
2773
2774         printk(KERN_INFO "Lustre: kgnilnd build version: "KGNILND_BUILD_REV"\n");
2775
2776         kgnilnd_insert_sysctl();
2777         kgnilnd_proc_init();
2778
2779         lnet_register_lnd(&the_kgnilnd);
2780
2781         return 0;
2782 }
2783
2784 MODULE_AUTHOR("Cray, Inc. <nic@cray.com>");
2785 MODULE_DESCRIPTION("Kernel Gemini LND v"KGNILND_BUILD_REV);
2786 MODULE_LICENSE("GPL");
2787
2788 module_init(kgnilnd_module_init);
2789 module_exit(kgnilnd_module_fini);