Whamcloud - gitweb
8abc6339f998973d4918bcd108695ea41743a14e
[fs/lustre-release.git] / lnet / klnds / gnilnd / gnilnd.c
1 /*
2  * Copyright (C) 2012 Cray, Inc.
3  *
4  * Copyright (c) 2013, 2014, Intel Corporation.
5  *
6  *   Author: Nic Henke <nic@cray.com>
7  *   Author: James Shimek <jshimek@cray.com>
8  *
9  *   This file is part of Lustre, http://www.lustre.org.
10  *
11  *   Lustre is free software; you can redistribute it and/or
12  *   modify it under the terms of version 2 of the GNU General Public
13  *   License as published by the Free Software Foundation.
14  *
15  *   Lustre is distributed in the hope that it will be useful,
16  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  *   GNU General Public License for more details.
19  *
20  *   You should have received a copy of the GNU General Public License
21  *   along with Lustre; if not, write to the Free Software
22  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23  *
24  */
25 #include "gnilnd.h"
26
27 /* Primary entry points from LNET.  There are no guarantees against reentrance. */
28 lnd_t the_kgnilnd = {
29 #ifdef CONFIG_CRAY_XT
30         .lnd_type       = GNILND,
31 #else
32         .lnd_type       = GNIIPLND,
33 #endif
34         .lnd_startup    = kgnilnd_startup,
35         .lnd_shutdown   = kgnilnd_shutdown,
36         .lnd_ctl        = kgnilnd_ctl,
37         .lnd_send       = kgnilnd_send,
38         .lnd_recv       = kgnilnd_recv,
39         .lnd_eager_recv = kgnilnd_eager_recv,
40         .lnd_query      = kgnilnd_query,
41 };
42
43 kgn_data_t      kgnilnd_data;
44
45 /* needs write_lock on kgn_peer_conn_lock */
46 int
47 kgnilnd_close_stale_conns_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
48 {
49         kgn_conn_t         *conn;
50         struct list_head   *ctmp, *cnxt;
51         int                 loopback;
52         int                 count = 0;
53
54         loopback = peer->gnp_nid == peer->gnp_net->gnn_ni->ni_nid;
55
56         list_for_each_safe(ctmp, cnxt, &peer->gnp_conns) {
57                 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
58
59                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
60                         continue;
61
62                 if (conn == newconn)
63                         continue;
64
65                 if (conn->gnc_device != newconn->gnc_device)
66                         continue;
67
68                 /* This is a two connection loopback - one talking to the other */
69                 if (loopback &&
70                     newconn->gnc_my_connstamp == conn->gnc_peer_connstamp &&
71                     newconn->gnc_peer_connstamp == conn->gnc_my_connstamp) {
72                         CDEBUG(D_NET, "skipping prune of %p, "
73                                 "loopback and matching stamps"
74                                 " connstamp "LPU64"("LPU64")"
75                                 " peerstamp "LPU64"("LPU64")\n",
76                                 conn, newconn->gnc_my_connstamp,
77                                 conn->gnc_peer_connstamp,
78                                 newconn->gnc_peer_connstamp,
79                                 conn->gnc_my_connstamp);
80                         continue;
81                 }
82
83                 if (conn->gnc_peerstamp != newconn->gnc_peerstamp) {
84                         LASSERTF(conn->gnc_peerstamp < newconn->gnc_peerstamp,
85                                 "conn 0x%p peerstamp "LPU64" >= "
86                                 "newconn 0x%p peerstamp "LPU64"\n",
87                                 conn, conn->gnc_peerstamp,
88                                 newconn, newconn->gnc_peerstamp);
89
90                         CDEBUG(D_NET, "Closing stale conn nid: %s "
91                                " peerstamp:"LPX64"("LPX64")\n",
92                                libcfs_nid2str(peer->gnp_nid),
93                                conn->gnc_peerstamp, newconn->gnc_peerstamp);
94                 } else {
95
96                         LASSERTF(conn->gnc_peer_connstamp < newconn->gnc_peer_connstamp,
97                                 "conn 0x%p peer_connstamp "LPU64" >= "
98                                 "newconn 0x%p peer_connstamp "LPU64"\n",
99                                 conn, conn->gnc_peer_connstamp,
100                                 newconn, newconn->gnc_peer_connstamp);
101
102                         CDEBUG(D_NET, "Closing stale conn nid: %s"
103                                " connstamp:"LPU64"("LPU64")\n",
104                                libcfs_nid2str(peer->gnp_nid),
105                                conn->gnc_peer_connstamp, newconn->gnc_peer_connstamp);
106                 }
107
108                 count++;
109                 kgnilnd_close_conn_locked(conn, -ESTALE);
110         }
111
112         if (count != 0) {
113                 CWARN("Closed %d stale conns to %s\n", count, libcfs_nid2str(peer->gnp_nid));
114         }
115
116         RETURN(count);
117 }
118
119 int
120 kgnilnd_conn_isdup_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
121 {
122         kgn_conn_t       *conn;
123         struct list_head *tmp;
124         int               loopback;
125         ENTRY;
126
127         loopback = peer->gnp_nid == peer->gnp_net->gnn_ni->ni_nid;
128
129         list_for_each(tmp, &peer->gnp_conns) {
130                 conn = list_entry(tmp, kgn_conn_t, gnc_list);
131                 CDEBUG(D_NET, "checking conn 0x%p for peer %s"
132                         " lo %d new "LPU64" existing "LPU64
133                         " new peer "LPU64" existing peer "LPU64
134                         " new dev %p existing dev %p\n",
135                         conn, libcfs_nid2str(peer->gnp_nid),
136                         loopback,
137                         newconn->gnc_peerstamp, conn->gnc_peerstamp,
138                         newconn->gnc_peer_connstamp, conn->gnc_peer_connstamp,
139                         newconn->gnc_device, conn->gnc_device);
140
141                 /* conn is in the process of closing */
142                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
143                         continue;
144
145                 /* 'newconn' is from an earlier version of 'peer'!!! */
146                 if (newconn->gnc_peerstamp < conn->gnc_peerstamp)
147                         RETURN(1);
148
149                 /* 'conn' is from an earlier version of 'peer': it will be
150                  * removed when we cull stale conns later on... */
151                 if (newconn->gnc_peerstamp > conn->gnc_peerstamp)
152                         continue;
153
154                 /* Different devices are OK */
155                 if (conn->gnc_device != newconn->gnc_device)
156                         continue;
157
158                 /* It's me connecting to myself */
159                 if (loopback &&
160                     newconn->gnc_my_connstamp == conn->gnc_peer_connstamp &&
161                     newconn->gnc_peer_connstamp == conn->gnc_my_connstamp)
162                         continue;
163
164                 /* 'newconn' is an earlier connection from 'peer'!!! */
165                 if (newconn->gnc_peer_connstamp < conn->gnc_peer_connstamp)
166                         RETURN(2);
167
168                 /* 'conn' is an earlier connection from 'peer': it will be
169                  * removed when we cull stale conns later on... */
170                 if (newconn->gnc_peer_connstamp > conn->gnc_peer_connstamp)
171                         continue;
172
173                 /* 'newconn' has the SAME connection stamp; 'peer' isn't
174                  * playing the game... */
175                 RETURN(3);
176         }
177
178         RETURN(0);
179 }
180
181 int
182 kgnilnd_create_conn(kgn_conn_t **connp, kgn_device_t *dev)
183 {
184         kgn_conn_t      *conn;
185         gni_return_t    rrc;
186         int             rc = 0;
187
188         LASSERT (!in_interrupt());
189         atomic_inc(&kgnilnd_data.kgn_nconns);
190
191         /* divide by 2 to allow for complete reset and immediate reconnect */
192         if (atomic_read(&kgnilnd_data.kgn_nconns) >= GNILND_MAX_CQID/2) {
193                 CERROR("Too many conn are live: %d > %d\n",
194                         atomic_read(&kgnilnd_data.kgn_nconns), GNILND_MAX_CQID/2);
195                 atomic_dec(&kgnilnd_data.kgn_nconns);
196                 return -E2BIG;
197         }
198
199         LIBCFS_ALLOC(conn, sizeof(*conn));
200         if (conn == NULL) {
201                 atomic_dec(&kgnilnd_data.kgn_nconns);
202                 return -ENOMEM;
203         }
204
205         LIBCFS_ALLOC(conn->gnc_tx_ref_table, GNILND_MAX_MSG_ID * sizeof(void *));
206         if (conn->gnc_tx_ref_table == NULL) {
207                 CERROR("Can't allocate conn tx_ref_table\n");
208                 GOTO(failed, rc = -ENOMEM);
209         }
210
211         atomic_set(&conn->gnc_refcount, 1);
212         atomic_set(&conn->gnc_reaper_noop, 0);
213         atomic_set(&conn->gnc_sched_noop, 0);
214         atomic_set(&conn->gnc_tx_in_use, 0);
215         INIT_LIST_HEAD(&conn->gnc_list);
216         INIT_LIST_HEAD(&conn->gnc_hashlist);
217         INIT_LIST_HEAD(&conn->gnc_schedlist);
218         INIT_LIST_HEAD(&conn->gnc_fmaq);
219         INIT_LIST_HEAD(&conn->gnc_mdd_list);
220         spin_lock_init(&conn->gnc_list_lock);
221         spin_lock_init(&conn->gnc_tx_lock);
222         conn->gnc_magic = GNILND_CONN_MAGIC;
223
224         /* set tx id to nearly the end to make sure we find wrapping
225          * issues soon */
226         conn->gnc_next_tx = (int) GNILND_MAX_MSG_ID - 10;
227
228         /* if this fails, we have conflicts and MAX_TX is too large */
229         CLASSERT(GNILND_MAX_MSG_ID < GNILND_MSGID_CLOSE);
230
231         /* get a new unique CQ id for this conn */
232         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
233         conn->gnc_my_connstamp = kgnilnd_data.kgn_connstamp++;
234         conn->gnc_cqid = kgnilnd_get_cqid_locked();
235         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
236
237         if (conn->gnc_cqid == 0) {
238                 CERROR("Could not allocate unique CQ ID for conn 0x%p\n", conn);
239                 GOTO(failed, rc = -E2BIG);
240         }
241
242         CDEBUG(D_NET, "alloc cqid %u for conn 0x%p\n",
243                 conn->gnc_cqid, conn);
244
245         /* need to be set before gnc_ephandle to allow kgnilnd_destroy_conn_ep to
246          * check context */
247         conn->gnc_device = dev;
248
249         conn->gnc_timeout = MAX(*kgnilnd_tunables.kgn_timeout,
250                                 GNILND_MIN_TIMEOUT);
251         kgnilnd_update_reaper_timeout(conn->gnc_timeout);
252
253         /* this is the ep_handle for doing SMSG & BTE */
254         mutex_lock(&dev->gnd_cq_mutex);
255         rrc = kgnilnd_ep_create(dev->gnd_handle, dev->gnd_snd_fma_cqh,
256                                 &conn->gnc_ephandle);
257         mutex_unlock(&dev->gnd_cq_mutex);
258         if (rrc != GNI_RC_SUCCESS)
259                 GOTO(failed, rc = -ENETDOWN);
260
261         CDEBUG(D_NET, "created conn 0x%p ep_hndl 0x%p\n",
262                conn, conn->gnc_ephandle);
263
264         /* add ref for EP canceling */
265         kgnilnd_conn_addref(conn);
266         atomic_inc(&dev->gnd_neps);
267
268         *connp = conn;
269         return 0;
270
271 failed:
272         atomic_dec(&kgnilnd_data.kgn_nconns);
273         LIBCFS_FREE(conn->gnc_tx_ref_table, GNILND_MAX_MSG_ID * sizeof(void *));
274         LIBCFS_FREE(conn, sizeof(*conn));
275         return rc;
276 }
277
278 /* needs to be called with kgn_peer_conn_lock held (read or write) */
279 kgn_conn_t *
280 kgnilnd_find_conn_locked(kgn_peer_t *peer)
281 {
282         kgn_conn_t      *conn = NULL;
283
284         /* if we are in reset, this conn is going to die soon */
285         if (unlikely(kgnilnd_data.kgn_in_reset)) {
286                 RETURN(NULL);
287         }
288
289         /* just return the first ESTABLISHED connection */
290         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
291                 /* kgnilnd_finish_connect doesn't put connections on the
292                  * peer list until they are actually established */
293                 LASSERTF(conn->gnc_state >= GNILND_CONN_ESTABLISHED,
294                         "found conn %p state %s on peer %p (%s)\n",
295                         conn, kgnilnd_conn_state2str(conn), peer,
296                         libcfs_nid2str(peer->gnp_nid));
297                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
298                         continue;
299
300                 RETURN(conn);
301         }
302         RETURN(NULL);
303 }
304
305 /* needs write_lock on kgn_peer_conn_lock held */
306 kgn_conn_t *
307 kgnilnd_find_or_create_conn_locked(kgn_peer_t *peer) {
308
309         kgn_device_t    *dev = peer->gnp_net->gnn_dev;
310         kgn_conn_t      *conn;
311
312         conn = kgnilnd_find_conn_locked(peer);
313
314         if (conn != NULL) {
315                 return conn;
316         }
317
318         /* if the peer was previously connecting, check if we should
319          * trigger another connection attempt yet. */
320         if (time_before(jiffies, peer->gnp_reconnect_time)) {
321                 return NULL;
322         }
323
324         /* This check prevents us from creating a new connection to a peer while we are
325          * still in the process of closing an existing connection to the peer.
326          */
327         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
328                 if (conn->gnc_ephandle != NULL) {
329                         CDEBUG(D_NET, "Not connecting non-null ephandle found peer 0x%p->%s\n", peer,
330                                 libcfs_nid2str(peer->gnp_nid));
331                         return NULL;
332                 }
333         }
334
335         if (peer->gnp_connecting != GNILND_PEER_IDLE) {
336                 /* if we are not connecting, fire up a new connection */
337                 /* or if we are anything but IDLE DONT start a new connection */
338                return NULL;
339         }
340
341         CDEBUG(D_NET, "starting connect to %s\n",
342                 libcfs_nid2str(peer->gnp_nid));
343         peer->gnp_connecting = GNILND_PEER_CONNECT;
344         kgnilnd_peer_addref(peer); /* extra ref for connd */
345
346         spin_lock(&dev->gnd_connd_lock);
347         list_add_tail(&peer->gnp_connd_list, &dev->gnd_connd_peers);
348         spin_unlock(&dev->gnd_connd_lock);
349
350         kgnilnd_schedule_dgram(dev);
351         CDEBUG(D_NETTRACE, "scheduling new connect\n");
352
353         return NULL;
354 }
355
356 /* Caller is responsible for deciding if/when to call this */
357 void
358 kgnilnd_destroy_conn_ep(kgn_conn_t *conn)
359 {
360         gni_return_t    rrc;
361         gni_ep_handle_t tmp_ep;
362
363         /* only if we actually initialized it,
364          *  then set NULL to tell kgnilnd_destroy_conn to leave it alone */
365
366         tmp_ep = xchg(&conn->gnc_ephandle, NULL);
367         if (tmp_ep != NULL) {
368                 /* we never re-use the EP, so unbind is not needed */
369                 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
370                 rrc = kgnilnd_ep_destroy(tmp_ep);
371
372                 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
373
374                 /* if this fails, it could hork up kgni smsg retransmit and others
375                  * since we could free the SMSG mbox memory, etc. */
376                 LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d conn 0x%p ep 0x%p\n",
377                          rrc, conn, conn->gnc_ephandle);
378
379                 atomic_dec(&conn->gnc_device->gnd_neps);
380
381                 /* clear out count added in kgnilnd_close_conn_locked
382                  * conn will have a peer once it hits finish_connect, where it
383                  * is the first spot we'll mark it ESTABLISHED as well */
384                 if (conn->gnc_peer) {
385                         kgnilnd_admin_decref(conn->gnc_peer->gnp_dirty_eps);
386                 }
387
388                 /* drop ref for EP */
389                 kgnilnd_conn_decref(conn);
390         }
391 }
392
393 void
394 kgnilnd_destroy_conn(kgn_conn_t *conn)
395 {
396         LASSERTF(!in_interrupt() &&
397                 !conn->gnc_scheduled &&
398                 !conn->gnc_in_purgatory &&
399                 conn->gnc_ephandle == NULL &&
400                 list_empty(&conn->gnc_list) &&
401                 list_empty(&conn->gnc_hashlist) &&
402                 list_empty(&conn->gnc_schedlist) &&
403                 list_empty(&conn->gnc_mdd_list) &&
404                 conn->gnc_magic == GNILND_CONN_MAGIC,
405                 "conn 0x%p->%s IRQ %d sched %d purg %d ep 0x%p Mg %d lists %d/%d/%d/%d\n",
406                 conn, conn->gnc_peer ? libcfs_nid2str(conn->gnc_peer->gnp_nid)
407                                      : "<?>",
408                 !!in_interrupt(), conn->gnc_scheduled,
409                 conn->gnc_in_purgatory,
410                 conn->gnc_ephandle,
411                 conn->gnc_magic,
412                 list_empty(&conn->gnc_list),
413                 list_empty(&conn->gnc_hashlist),
414                 list_empty(&conn->gnc_schedlist),
415                 list_empty(&conn->gnc_mdd_list));
416
417         /* Tripping these is especially bad, as it means we have items on the
418          *  lists that didn't keep their refcount on the connection - or
419          *  somebody evil released their own */
420         LASSERTF(list_empty(&conn->gnc_fmaq) &&
421                  atomic_read(&conn->gnc_nlive_fma) == 0 &&
422                  atomic_read(&conn->gnc_nlive_rdma) == 0,
423                  "conn 0x%p fmaq %d@0x%p nfma %d nrdma %d\n",
424                  conn, kgnilnd_count_list(&conn->gnc_fmaq), &conn->gnc_fmaq,
425                  atomic_read(&conn->gnc_nlive_fma), atomic_read(&conn->gnc_nlive_rdma));
426
427         CDEBUG(D_NET, "destroying conn %p ephandle %p error %d\n",
428                 conn, conn->gnc_ephandle, conn->gnc_error);
429
430         /* We are freeing this memory remove the magic value from the connection */
431         conn->gnc_magic = 0;
432
433         /* if there is an FMA blk left here, we'll tear it down */
434         if (conn->gnc_fma_blk) {
435                 if (conn->gnc_peer) {
436                         kgn_mbox_info_t *mbox;
437                         mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
438                         mbox->mbx_prev_nid = conn->gnc_peer->gnp_nid;
439                 }
440                 kgnilnd_release_mbox(conn, 0);
441         }
442
443         if (conn->gnc_peer != NULL)
444                 kgnilnd_peer_decref(conn->gnc_peer);
445
446         if (conn->gnc_tx_ref_table != NULL) {
447                 LIBCFS_FREE(conn->gnc_tx_ref_table,
448                             GNILND_MAX_MSG_ID * sizeof(void *));
449         }
450
451         LIBCFS_FREE(conn, sizeof(*conn));
452         atomic_dec(&kgnilnd_data.kgn_nconns);
453 }
454
455 /* peer_alive and peer_notify done in the style of the o2iblnd */
456 void
457 kgnilnd_peer_alive(kgn_peer_t *peer)
458 {
459         set_mb(peer->gnp_last_alive, jiffies);
460 }
461
462 void
463 kgnilnd_peer_notify(kgn_peer_t *peer, int error, int alive)
464 {
465         int                     tell_lnet = 0;
466         int                     nnets = 0;
467         int                     rc;
468         int                     i, j;
469         kgn_conn_t             *conn;
470         kgn_net_t             **nets;
471         kgn_net_t              *net;
472
473
474         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DONT_NOTIFY))
475                 return;
476
477         /* Tell LNet we are giving ups on this peer - but only
478          * if it isn't already reconnected or trying to reconnect */
479         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
480
481         /* use kgnilnd_find_conn_locked to avoid any conns in the process of being nuked
482          *
483          * don't tell LNet if we are in reset - we assume that everyone will be able to
484          * reconnect just fine
485          */
486         conn = kgnilnd_find_conn_locked(peer);
487
488         CDEBUG(D_NETTRACE, "peer 0x%p->%s ting %d conn 0x%p, rst %d error %d\n",
489                peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting, conn,
490                kgnilnd_data.kgn_in_reset, error);
491
492         if (((peer->gnp_connecting == GNILND_PEER_IDLE) &&
493             (conn == NULL) &&
494             (!kgnilnd_data.kgn_in_reset) &&
495             (!kgnilnd_conn_clean_errno(error))) || alive) {
496                 tell_lnet = 1;
497         }
498
499         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
500
501         if (!tell_lnet) {
502                 /* short circuit if we dont need to notify Lnet */
503                 return;
504         }
505
506         rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
507
508         if (rc) {
509             /* dont do this if this fails since LNET is in shutdown or something else
510              */
511
512                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
513                         list_for_each_entry(net , &kgnilnd_data.kgn_nets[i], gnn_list) {
514                                 /* if gnn_shutdown set for any net shutdown is in progress just return */
515                                 if (net->gnn_shutdown) {
516                                         up_read(&kgnilnd_data.kgn_net_rw_sem);
517                                         return;
518                                 }
519                                 nnets++;
520                         }
521                 }
522
523                 if (nnets == 0) {
524                         /* shutdown in progress most likely */
525                         up_read(&kgnilnd_data.kgn_net_rw_sem);
526                         return;
527                 }
528
529                 LIBCFS_ALLOC(nets, nnets * sizeof(*nets));
530
531                 if (nets == NULL) {
532                         up_read(&kgnilnd_data.kgn_net_rw_sem);
533                         CERROR("Failed to allocate nets[%d]\n", nnets);
534                         return;
535                 }
536
537                 j = 0;
538                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
539                         list_for_each_entry(net, &kgnilnd_data.kgn_nets[i], gnn_list) {
540                                 nets[j] = net;
541                                 kgnilnd_net_addref(net);
542                                 j++;
543                         }
544                 }
545                 up_read(&kgnilnd_data.kgn_net_rw_sem);
546
547                 for (i = 0; i < nnets; i++) {
548                         lnet_nid_t peer_nid;
549
550                         net = nets[i];
551
552                         peer_nid = kgnilnd_lnd2lnetnid(net->gnn_ni->ni_nid,
553                                                                  peer->gnp_nid);
554
555                         CDEBUG(D_NET, "peer 0x%p->%s last_alive %lu (%lus ago)\n",
556                                 peer, libcfs_nid2str(peer_nid), peer->gnp_last_alive,
557                                 cfs_duration_sec(jiffies - peer->gnp_last_alive));
558
559                         lnet_notify(net->gnn_ni, peer_nid, alive,
560                                     peer->gnp_last_alive);
561
562                         kgnilnd_net_decref(net);
563                 }
564
565                 LIBCFS_FREE(nets, nnets * sizeof(*nets));
566         }
567 }
568
569 /* need write_lock on kgn_peer_conn_lock */
570 void
571 kgnilnd_close_conn_locked(kgn_conn_t *conn, int error)
572 {
573         kgn_peer_t        *peer = conn->gnc_peer;
574         ENTRY;
575
576         LASSERT(!in_interrupt());
577
578         /* store error for tx completion */
579         conn->gnc_error = error;
580         peer->gnp_last_errno = error;
581
582         /* use real error from peer if possible */
583         if (error == -ECONNRESET) {
584                 error = conn->gnc_peer_error;
585         }
586
587         /* if we NETERROR, make sure it is rate limited */
588         if (!kgnilnd_conn_clean_errno(error) &&
589             peer->gnp_down == GNILND_RCA_NODE_UP) {
590                 CNETERR("closing conn to %s: error %d\n",
591                        libcfs_nid2str(peer->gnp_nid), error);
592         } else {
593                 CDEBUG(D_NET, "closing conn to %s: error %d\n",
594                        libcfs_nid2str(peer->gnp_nid), error);
595         }
596
597         LASSERTF(conn->gnc_state == GNILND_CONN_ESTABLISHED,
598                 "conn %p to %s with bogus state %s\n", conn,
599                 libcfs_nid2str(conn->gnc_peer->gnp_nid),
600                 kgnilnd_conn_state2str(conn));
601         LASSERT(!list_empty(&conn->gnc_hashlist));
602         LASSERT(!list_empty(&conn->gnc_list));
603
604
605         /* mark peer count here so any place the EP gets destroyed will
606          * open up the peer count so that a new ESTABLISHED conn is then free
607          * to send new messages -- sending before the previous EPs are destroyed
608          * could end up with messages on the network for the old conn _after_
609          * the new conn and break the mbox safety protocol */
610         kgnilnd_admin_addref(conn->gnc_peer->gnp_dirty_eps);
611
612         /* Remove from conn hash table: no new callbacks */
613         list_del_init(&conn->gnc_hashlist);
614         kgnilnd_data.kgn_conn_version++;
615         kgnilnd_conn_decref(conn);
616
617         /* if we are in reset, go right to CLOSED as there is no scheduler
618          * thread to move from CLOSING to CLOSED */
619         if (unlikely(kgnilnd_data.kgn_in_reset)) {
620                 conn->gnc_state = GNILND_CONN_CLOSED;
621         } else {
622                 conn->gnc_state = GNILND_CONN_CLOSING;
623         }
624
625         /* leave on peer->gnp_conns to make sure we don't let the reaper
626          * or others try to unlink this peer until the conn is fully
627          * processed for closing */
628
629         if (kgnilnd_check_purgatory_conn(conn)) {
630                 kgnilnd_add_purgatory_locked(conn, conn->gnc_peer);
631         }
632
633         /* Reset RX timeout to ensure we wait for an incoming CLOSE
634          * for the full timeout.  If we get a CLOSE we know the
635          * peer has stopped all RDMA.  Otherwise if we wait for
636          * the full timeout we can also be sure all RDMA has stopped. */
637         conn->gnc_last_rx = conn->gnc_last_rx_cq = jiffies;
638         mb();
639
640         /* schedule sending CLOSE - if we are in quiesce, this adds to
641          * gnd_ready_conns and allows us to find it in quiesce processing */
642         kgnilnd_schedule_conn(conn);
643
644         EXIT;
645 }
646
647 void
648 kgnilnd_close_conn(kgn_conn_t *conn, int error)
649 {
650         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
651         /* need to check the state here - this call is racy and we don't
652          * know the state until after the lock is grabbed */
653         if (conn->gnc_state == GNILND_CONN_ESTABLISHED) {
654                 kgnilnd_close_conn_locked(conn, error);
655         }
656         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
657 }
658
659 void
660 kgnilnd_complete_closed_conn(kgn_conn_t *conn)
661 {
662         LIST_HEAD               (sinners);
663         kgn_tx_t               *tx, *txn;
664         int                     nlive = 0;
665         int                     nrdma = 0;
666         int                     nq_rdma = 0;
667         int                     logmsg;
668         ENTRY;
669
670         /* Dump log  on cksum error - wait until complete phase to let
671          * RX of error happen */
672         if (*kgnilnd_tunables.kgn_checksum_dump &&
673             (conn != NULL && conn->gnc_peer_error == -ENOKEY)) {
674                 libcfs_debug_dumplog();
675         }
676
677         /* _CLOSED set in kgnilnd_process_fmaq once we decide to
678          * send the CLOSE or not */
679         LASSERTF(conn->gnc_state == GNILND_CONN_CLOSED,
680                  "conn 0x%p->%s with bad state %s\n",
681                  conn, conn->gnc_peer ?
682                         libcfs_nid2str(conn->gnc_peer->gnp_nid) :
683                         "<?>",
684                  kgnilnd_conn_state2str(conn));
685
686         LASSERT(list_empty(&conn->gnc_hashlist));
687
688         /* we've sent the close, start nuking */
689         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_SCHEDULE_COMPLETE))
690                 kgnilnd_schedule_conn(conn);
691
692         if (conn->gnc_scheduled != GNILND_CONN_PROCESS) {
693                 CDEBUG(D_NETERROR, "Error someone scheduled us after we were "
694                                 "done, Attempting to recover conn 0x%p "
695                                 "scheduled %d function: %s line: %d\n", conn,
696                                 conn->gnc_scheduled, conn->gnc_sched_caller,
697                                 conn->gnc_sched_line);
698                 RETURN_EXIT;
699         }
700
701         /* we don't use lists to track things that we can get out of the
702          * tx_ref table... */
703
704         /* need to hold locks for tx_list_state, sampling it is too racy:
705          * - the lock actually protects tx != NULL, but we can't take the proper
706          *   lock until we check tx_list_state, which would be too late and
707          *   we could have the TX change under us.
708          * gnd_rdmaq_lock and gnd_lock and not used together, so taking both
709          * should be fine */
710         spin_lock(&conn->gnc_device->gnd_rdmaq_lock);
711         spin_lock(&conn->gnc_device->gnd_lock);
712
713         for (nrdma = 0; nrdma < GNILND_MAX_MSG_ID; nrdma++) {
714                 tx = conn->gnc_tx_ref_table[nrdma];
715
716                 if (tx != NULL) {
717                         /* only print the first error and if not CLOSE, we often don't see
718                          * CQ events for that by the time we get here... and really don't care */
719                         if (nlive || tx->tx_msg.gnm_type == GNILND_MSG_CLOSE)
720                                 tx->tx_state |= GNILND_TX_QUIET_ERROR;
721                         nlive++;
722                         GNIDBG_TX(D_NET, tx, "cleaning up on close, nlive %d", nlive);
723
724                         /* don't worry about gnc_lock here as nobody else should be
725                          * touching this conn */
726                         kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
727                         list_add_tail(&tx->tx_list, &sinners);
728                 }
729         }
730         spin_unlock(&conn->gnc_device->gnd_lock);
731         spin_unlock(&conn->gnc_device->gnd_rdmaq_lock);
732
733         /* nobody should have marked this as needing scheduling after
734          * we called close - so only ref should be us handling it */
735         if (conn->gnc_scheduled != GNILND_CONN_PROCESS) {
736                 CDEBUG(D_NETERROR, "Error someone scheduled us after we were "
737                                 "done, Attempting to recover conn 0x%p "
738                                 "scheduled %d function %s line: %d\n", conn,
739                                 conn->gnc_scheduled, conn->gnc_sched_caller,
740                                 conn->gnc_sched_line);
741         }
742         /* now reset a few to actual counters... */
743         nrdma = atomic_read(&conn->gnc_nlive_rdma);
744         nq_rdma = atomic_read(&conn->gnc_nq_rdma);
745
746         if (!list_empty(&sinners)) {
747                 list_for_each_entry_safe(tx, txn, &sinners, tx_list) {
748                         /* clear tx_list to make tx_add_list_locked happy */
749                         list_del_init(&tx->tx_list);
750                         /* The error codes determine if we hold onto the MDD */
751                         kgnilnd_tx_done(tx, conn->gnc_error);
752                 }
753         }
754
755         logmsg = (nlive + nrdma + nq_rdma);
756
757         if (logmsg) {
758                 if (conn->gnc_peer->gnp_down == GNILND_RCA_NODE_UP) {
759                         CNETERR("Closed conn 0x%p->%s (errno %d, peer errno %d): "
760                                 "canceled %d TX, %d/%d RDMA\n",
761                                 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
762                                 conn->gnc_error, conn->gnc_peer_error,
763                                 nlive, nq_rdma, nrdma);
764                 } else {
765                         CDEBUG(D_NET, "Closed conn 0x%p->%s (errno %d,"
766                                 " peer errno %d): canceled %d TX, %d/%d RDMA\n",
767                                 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
768                                 conn->gnc_error, conn->gnc_peer_error,
769                                 nlive, nq_rdma, nrdma);
770                 }
771         }
772
773         kgnilnd_destroy_conn_ep(conn);
774
775         /* Bug 765042 - race this with completing a new conn to same peer - we need
776          * finish_connect to detach purgatory before we can do it ourselves here */
777         CFS_RACE(CFS_FAIL_GNI_FINISH_PURG);
778
779         /* now it is safe to remove from peer list - anyone looking at
780          * gnp_conns now is free to unlink if not on purgatory */
781         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
782
783         conn->gnc_state = GNILND_CONN_DONE;
784
785         /* Decrement counter if we are marked by del_conn_or_peers for closing
786          */
787         if (conn->gnc_needs_closing)
788                 kgnilnd_admin_decref(kgnilnd_data.kgn_npending_conns);
789
790         /* Remove from peer's list of valid connections if its not in purgatory */
791         if (!conn->gnc_in_purgatory) {
792                 list_del_init(&conn->gnc_list);
793                 /* Lose peers reference on the conn */
794                 kgnilnd_conn_decref(conn);
795         }
796
797         /* NB - only unlinking if we set pending in del_peer_locked from admin or
798          * shutdown */
799         if (kgnilnd_peer_active(conn->gnc_peer) &&
800             conn->gnc_peer->gnp_pending_unlink &&
801             kgnilnd_can_unlink_peer_locked(conn->gnc_peer)) {
802                 kgnilnd_unlink_peer_locked(conn->gnc_peer);
803         }
804
805         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
806
807         /* I'm telling Mommy! - use peer_error if they initiated close */
808         kgnilnd_peer_notify(conn->gnc_peer,
809                             conn->gnc_error == -ECONNRESET ?
810                             conn->gnc_peer_error : conn->gnc_error, 0);
811
812         EXIT;
813 }
814
815 int
816 kgnilnd_set_conn_params(kgn_dgram_t *dgram)
817 {
818         kgn_conn_t             *conn = dgram->gndg_conn;
819         kgn_connreq_t          *connreq = &dgram->gndg_conn_in;
820         kgn_gniparams_t        *rem_param = &connreq->gncr_gnparams;
821         gni_return_t            rrc;
822         int                     rc = 0;
823         gni_smsg_attr_t        *remote = &connreq->gncr_gnparams.gnpr_smsg_attr;
824
825         /* set timeout vals in conn early so we can use them for the NAK */
826
827         /* use max of the requested and our timeout, peer will do the same */
828         conn->gnc_timeout = MAX(conn->gnc_timeout, connreq->gncr_timeout);
829
830         /* only ep_bind really mucks around with the CQ */
831         /* only ep bind if we are not connecting to ourself and the dstnid is not a wildcard. this check
832          * is necessary as you can only bind an ep once and we must make sure we dont bind when already bound.
833          */
834         if (connreq->gncr_dstnid != LNET_NID_ANY && dgram->gndg_conn_out.gncr_dstnid != connreq->gncr_srcnid) {
835                 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
836                 rrc = kgnilnd_ep_bind(conn->gnc_ephandle,
837                         connreq->gncr_gnparams.gnpr_host_id,
838                         conn->gnc_cqid);
839                 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
840                 if (rrc != GNI_RC_SUCCESS) {
841                         rc = -ECONNABORTED;
842                         goto return_out;
843                 }
844         }
845
846         rrc = kgnilnd_ep_set_eventdata(conn->gnc_ephandle, conn->gnc_cqid,
847                          connreq->gncr_gnparams.gnpr_cqid);
848         if (rrc != GNI_RC_SUCCESS) {
849                 rc = -ECONNABORTED;
850                 goto cleanup_out;
851         }
852
853         /* Initialize SMSG */
854         rrc = kgnilnd_smsg_init(conn->gnc_ephandle, &conn->gnpr_smsg_attr,
855                         &connreq->gncr_gnparams.gnpr_smsg_attr);
856         if (unlikely(rrc == GNI_RC_INVALID_PARAM)) {
857                 gni_smsg_attr_t *local = &conn->gnpr_smsg_attr;
858                 /* help folks figure out if there is a tunable off, etc. */
859                 LCONSOLE_ERROR("SMSG attribute mismatch. Data from local/remote:"
860                                " type %d/%d msg_maxsize %u/%u"
861                                " mbox_maxcredit %u/%u. Please check kgni"
862                                " logs for further data\n",
863                                local->msg_type, remote->msg_type,
864                                local->msg_maxsize, remote->msg_maxsize,
865                                local->mbox_maxcredit, remote->mbox_maxcredit);
866         }
867         if (rrc != GNI_RC_SUCCESS) {
868                 rc = -ECONNABORTED;
869                 goto cleanup_out;
870         }
871
872         /* log this for help in debuggin SMSG buffer re-use */
873         CDEBUG(D_NET, "conn %p src %s dst %s smsg %p acquired"
874                 " local cqid %u SMSG %p->%u hndl "LPX64"."LPX64
875                 " remote cqid %u SMSG %p->%u hndl "LPX64"."LPX64"\n",
876                 conn, libcfs_nid2str(connreq->gncr_srcnid),
877                 libcfs_nid2str(connreq->gncr_dstnid),
878                 &conn->gnpr_smsg_attr,
879                 conn->gnc_cqid,
880                 conn->gnpr_smsg_attr.msg_buffer,
881                 conn->gnpr_smsg_attr.mbox_offset,
882                 conn->gnpr_smsg_attr.mem_hndl.qword1,
883                 conn->gnpr_smsg_attr.mem_hndl.qword2,
884                 rem_param->gnpr_cqid,
885                 rem_param->gnpr_smsg_attr.msg_buffer,
886                 rem_param->gnpr_smsg_attr.mbox_offset,
887                 rem_param->gnpr_smsg_attr.mem_hndl.qword1,
888                 rem_param->gnpr_smsg_attr.mem_hndl.qword2);
889
890         conn->gnc_peerstamp = connreq->gncr_peerstamp;
891         conn->gnc_peer_connstamp = connreq->gncr_connstamp;
892         conn->remote_mbox_addr = (void *)((char *)remote->msg_buffer + remote->mbox_offset);
893
894         /* We update the reaper timeout once we have a valid conn and timeout */
895         kgnilnd_update_reaper_timeout(GNILND_TO2KA(conn->gnc_timeout));
896
897         return 0;
898
899 cleanup_out:
900         rrc = kgnilnd_ep_unbind(conn->gnc_ephandle);
901         /* not sure I can just let this fly */
902         LASSERTF(rrc == GNI_RC_SUCCESS,
903                 "bad rc from gni_ep_unbind trying to cleanup: %d\n", rrc);
904
905 return_out:
906         LASSERTF(rc != 0, "SOFTWARE BUG: rc == 0\n");
907         CERROR("Error setting connection params from %s: %d\n",
908                libcfs_nid2str(connreq->gncr_srcnid), rc);
909         return rc;
910 }
911
912 /* needs down_read on kgn_net_rw_sem held from before this call until
913  * after the write_lock on kgn_peer_conn_lock - this ensures we stay sane
914  * with kgnilnd_shutdown - it'll get the sem and set shutdown, then get the
915  * kgn_peer_conn_lock to start del_peer'ing. If we hold the sem until after
916  * kgn_peer_conn_lock is held, we guarantee that nobody calls
917  * kgnilnd_add_peer_locked without checking gnn_shutdown */
918 int
919 kgnilnd_create_peer_safe(kgn_peer_t **peerp,
920                          lnet_nid_t nid,
921                          kgn_net_t *net,
922                          int node_state)
923 {
924         kgn_peer_t      *peer;
925         int             rc;
926
927         LASSERT(nid != LNET_NID_ANY);
928
929         /* We dont pass the net around in the dgram anymore so here is where we find it
930          * this will work unless its in shutdown or the nid has a net that is invalid.
931          * Either way error code needs to be returned in that case.
932          *
933          * If the net passed in is not NULL then we can use it, this alleviates looking it
934          * when the calling function has access to the data.
935          */
936         if (net == NULL) {
937                 rc = kgnilnd_find_net(nid, &net);
938                 if (rc < 0)
939                         return rc;
940         } else {
941                 /* find net adds a reference on the net if we are not using
942                  * it we must do it manually so the net references are
943                  * correct when tearing down the net
944                  */
945                 kgnilnd_net_addref(net);
946         }
947
948         LIBCFS_ALLOC(peer, sizeof(*peer));
949         if (peer == NULL) {
950                 kgnilnd_net_decref(net);
951                 return -ENOMEM;
952         }
953         peer->gnp_nid = nid;
954         peer->gnp_down = node_state;
955
956         /* translate from nid to nic addr & store */
957         rc = kgnilnd_nid_to_nicaddrs(LNET_NIDADDR(nid), 1, &peer->gnp_host_id);
958         if (rc <= 0) {
959                 kgnilnd_net_decref(net);
960                 LIBCFS_FREE(peer, sizeof(*peer));
961                 return -ESRCH;
962         }
963         CDEBUG(D_NET, "peer 0x%p->%s -> NIC 0x%x\n", peer,
964                 libcfs_nid2str(nid), peer->gnp_host_id);
965
966         atomic_set(&peer->gnp_refcount, 1);     /* 1 ref for caller */
967         atomic_set(&peer->gnp_dirty_eps, 0);
968
969         INIT_LIST_HEAD(&peer->gnp_list);
970         INIT_LIST_HEAD(&peer->gnp_connd_list);
971         INIT_LIST_HEAD(&peer->gnp_conns);
972         INIT_LIST_HEAD(&peer->gnp_tx_queue);
973
974         /* the first reconnect should happen immediately, so we leave
975          * gnp_reconnect_interval set to 0 */
976
977         LASSERTF(net != NULL, "peer 0x%p->%s with NULL net\n",
978                  peer, libcfs_nid2str(nid));
979
980         /* must have kgn_net_rw_sem held for this...  */
981         if (net->gnn_shutdown) {
982                 /* shutdown has started already */
983                 kgnilnd_net_decref(net);
984                 LIBCFS_FREE(peer, sizeof(*peer));
985                 return -ESHUTDOWN;
986         }
987
988         peer->gnp_net = net;
989
990         atomic_inc(&kgnilnd_data.kgn_npeers);
991
992         *peerp = peer;
993         return 0;
994 }
995
996 void
997 kgnilnd_destroy_peer(kgn_peer_t *peer)
998 {
999         CDEBUG(D_NET, "peer %s %p deleted\n",
1000                libcfs_nid2str(peer->gnp_nid), peer);
1001         LASSERTF(atomic_read(&peer->gnp_refcount) == 0,
1002                  "peer 0x%p->%s refs %d\n",
1003                  peer, libcfs_nid2str(peer->gnp_nid),
1004                  atomic_read(&peer->gnp_refcount));
1005         LASSERTF(atomic_read(&peer->gnp_dirty_eps) == 0,
1006                  "peer 0x%p->%s dirty eps %d\n",
1007                  peer, libcfs_nid2str(peer->gnp_nid),
1008                  atomic_read(&peer->gnp_dirty_eps));
1009         LASSERTF(peer->gnp_net != NULL, "peer %p (%s) with NULL net\n",
1010                  peer, libcfs_nid2str(peer->gnp_nid));
1011         LASSERTF(!kgnilnd_peer_active(peer),
1012                  "peer 0x%p->%s\n",
1013                 peer, libcfs_nid2str(peer->gnp_nid));
1014         LASSERTF(peer->gnp_connecting == GNILND_PEER_IDLE || peer->gnp_connecting == GNILND_PEER_KILL,
1015                  "peer 0x%p->%s, connecting %d\n",
1016                 peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
1017         LASSERTF(list_empty(&peer->gnp_conns),
1018                  "peer 0x%p->%s\n",
1019                 peer, libcfs_nid2str(peer->gnp_nid));
1020         LASSERTF(list_empty(&peer->gnp_tx_queue),
1021                  "peer 0x%p->%s\n",
1022                 peer, libcfs_nid2str(peer->gnp_nid));
1023         LASSERTF(list_empty(&peer->gnp_connd_list),
1024                  "peer 0x%p->%s\n",
1025                 peer, libcfs_nid2str(peer->gnp_nid));
1026
1027         /* NB a peer's connections keep a reference on their peer until
1028          * they are destroyed, so we can be assured that _all_ state to do
1029          * with this peer has been cleaned up when its refcount drops to
1030          * zero. */
1031
1032         atomic_dec(&kgnilnd_data.kgn_npeers);
1033         kgnilnd_net_decref(peer->gnp_net);
1034
1035         LIBCFS_FREE(peer, sizeof(*peer));
1036 }
1037
1038 /* the conn might not have made it all the way through to a connected
1039  * state - but we need to purgatory any conn that a remote peer might
1040  * have seen through a posted dgram as well */
1041 void
1042 kgnilnd_add_purgatory_locked(kgn_conn_t *conn, kgn_peer_t *peer)
1043 {
1044         kgn_mbox_info_t *mbox = NULL;
1045         ENTRY;
1046
1047         /* NB - the caller should own conn by removing him from the
1048          * scheduler thread when finishing the close */
1049
1050         LASSERTF(peer != NULL, "conn %p with NULL peer\n", conn);
1051
1052         /* If this is still true, need to add the calls to unlink back in and
1053          * figure out how to close the hole on loopback conns */
1054         LASSERTF(kgnilnd_peer_active(peer), "can't use inactive peer %s (%p)"
1055                 " we'll never recover the resources\n",
1056                 libcfs_nid2str(peer->gnp_nid), peer);
1057
1058         CDEBUG(D_NET, "conn %p peer %p dev %p\n", conn, peer,
1059                 conn->gnc_device);
1060
1061         LASSERTF(conn->gnc_in_purgatory == 0,
1062                 "Conn already in purgatory\n");
1063         conn->gnc_in_purgatory = 1;
1064
1065         mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
1066         mbox->mbx_prev_purg_nid = peer->gnp_nid;
1067         mbox->mbx_add_purgatory = jiffies;
1068         kgnilnd_release_mbox(conn, 1);
1069
1070         LASSERTF(list_empty(&conn->gnc_mdd_list),
1071                 "conn 0x%p->%s with active purgatory hold MDD %d\n",
1072                 conn, libcfs_nid2str(peer->gnp_nid),
1073                 kgnilnd_count_list(&conn->gnc_mdd_list));
1074
1075         EXIT;
1076 }
1077
1078 /* Instead of detaching everything from purgatory here we just mark the conn as needing
1079  * detach, when the reaper checks the conn the next time it will detach it.
1080  * Calling function requires write_lock held on kgn_peer_conn_lock
1081  */
1082 void
1083 kgnilnd_mark_for_detach_purgatory_all_locked(kgn_peer_t *peer) {
1084         kgn_conn_t       *conn;
1085
1086         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
1087                 if (conn->gnc_in_purgatory && !conn->gnc_needs_detach) {
1088                         conn->gnc_needs_detach = 1;
1089                         kgnilnd_admin_addref(kgnilnd_data.kgn_npending_detach);
1090                 }
1091         }
1092 }
1093
1094 /* Calling function needs a write_lock held on kgn_peer_conn_lock */
1095 void
1096 kgnilnd_detach_purgatory_locked(kgn_conn_t *conn, struct list_head *conn_list)
1097 {
1098         kgn_mbox_info_t *mbox = NULL;
1099
1100         /* if needed, add the conn purgatory data to the list passed in */
1101         if (conn->gnc_in_purgatory) {
1102                 CDEBUG(D_NET, "peer %p->%s purg_conn %p@%s mdd_list #tx %d\n",
1103                         conn->gnc_peer, libcfs_nid2str(conn->gnc_peer->gnp_nid),
1104                         conn, kgnilnd_conn_state2str(conn),
1105                         kgnilnd_count_list(&conn->gnc_mdd_list));
1106
1107                 mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
1108                 mbox->mbx_detach_of_purgatory = jiffies;
1109
1110                 /* conn->gnc_list is the entry point on peer->gnp_conns, so detaching it
1111                  * here removes it from the list of 'valid' peer connections.
1112                  * We put the current conn onto a list of conns to call kgnilnd_release_purgatory_locked()
1113                  * and as such the caller of kgnilnd_detach_purgatory_locked() now owns that conn, since its not
1114                  * on the peer's conn_list anymore.
1115                  */
1116
1117                 list_del_init(&conn->gnc_list);
1118
1119                 /* NB - only unlinking if we set pending in del_peer_locked from admin or
1120                  * shutdown */
1121                 if (kgnilnd_peer_active(conn->gnc_peer) &&
1122                     conn->gnc_peer->gnp_pending_unlink &&
1123                     kgnilnd_can_unlink_peer_locked(conn->gnc_peer)) {
1124                         kgnilnd_unlink_peer_locked(conn->gnc_peer);
1125                 }
1126                 /* The reaper will not call detach unless the conn is fully through kgnilnd_complete_closed_conn.
1127                  * If the conn is not in a DONE state somehow we are attempting to detach even though
1128                  * the conn has not been fully cleaned up. If we detach while the conn is still closing
1129                  * we will end up with an orphaned connection that has valid ep_handle, that is not on a
1130                  * peer.
1131                  */
1132
1133                 LASSERTF(conn->gnc_state == GNILND_CONN_DONE, "Conn in invalid state  %p@%s \n",
1134                                 conn, kgnilnd_conn_state2str(conn));
1135
1136                 /* move from peer to the delayed release list */
1137                 list_add_tail(&conn->gnc_list, conn_list);
1138         }
1139 }
1140
1141 void
1142 kgnilnd_release_purgatory_list(struct list_head *conn_list)
1143 {
1144         kgn_device_t            *dev;
1145         kgn_conn_t              *conn, *connN;
1146         kgn_mdd_purgatory_t     *gmp, *gmpN;
1147
1148         list_for_each_entry_safe(conn, connN, conn_list, gnc_list) {
1149                 dev = conn->gnc_device;
1150
1151                 kgnilnd_release_mbox(conn, -1);
1152                 conn->gnc_in_purgatory = 0;
1153
1154                 list_del_init(&conn->gnc_list);
1155
1156                 /* gnc_needs_detach is set in kgnilnd_del_conn_or_peer. It is used to keep track
1157                  * of conns that have been marked for detach by kgnilnd_del_conn_or_peer.
1158                  * The function uses kgn_npending_detach to verify the conn has
1159                  * actually been detached.
1160                  */
1161
1162                 if (conn->gnc_needs_detach)
1163                         kgnilnd_admin_decref(kgnilnd_data.kgn_npending_detach);
1164
1165                 /* if this guy is really dead (we are doing release from reaper),
1166                  * make sure we tell LNet - if this is from other context,
1167                  * the checks in the function will prevent an errant
1168                  * notification */
1169                 kgnilnd_peer_notify(conn->gnc_peer, conn->gnc_error, 0);
1170
1171                 list_for_each_entry_safe(gmp, gmpN, &conn->gnc_mdd_list,
1172                                          gmp_list) {
1173                         CDEBUG(D_NET,
1174                                "dev %p releasing held mdd "LPX64"."LPX64"\n",
1175                                conn->gnc_device, gmp->gmp_map_key.qword1,
1176                                gmp->gmp_map_key.qword2);
1177
1178                         atomic_dec(&dev->gnd_n_mdd_held);
1179                         kgnilnd_mem_mdd_release(conn->gnc_device->gnd_handle,
1180                                                 &gmp->gmp_map_key);
1181                         /* ignoring the return code - if kgni/ghal can't find it
1182                          * it must be released already */
1183
1184                         list_del_init(&gmp->gmp_list);
1185                         LIBCFS_FREE(gmp, sizeof(*gmp));
1186                 }
1187                 /* lose conn ref for purgatory */
1188                 kgnilnd_conn_decref(conn);
1189         }
1190 }
1191
1192 /* needs write_lock on kgnilnd_data.kgn_peer_conn_lock held */
1193 void
1194 kgnilnd_peer_increase_reconnect_locked(kgn_peer_t *peer)
1195 {
1196         int current_to;
1197
1198         current_to = peer->gnp_reconnect_interval;
1199
1200         /* we'll try to reconnect fast the first time, then back-off */
1201         if (current_to == 0) {
1202                 peer->gnp_reconnect_time = jiffies - 1;
1203                 current_to = *kgnilnd_tunables.kgn_min_reconnect_interval;
1204         } else {
1205                 peer->gnp_reconnect_time = jiffies + cfs_time_seconds(current_to);
1206                 /* add 50% of min timeout & retry */
1207                 current_to += *kgnilnd_tunables.kgn_min_reconnect_interval / 2;
1208         }
1209
1210         current_to = MIN(current_to,
1211                                 *kgnilnd_tunables.kgn_max_reconnect_interval);
1212
1213         peer->gnp_reconnect_interval = current_to;
1214         CDEBUG(D_NET, "peer %s can reconnect at %lu interval %lu\n",
1215                libcfs_nid2str(peer->gnp_nid), peer->gnp_reconnect_time,
1216                peer->gnp_reconnect_interval);
1217 }
1218
1219 /* needs kgnilnd_data.kgn_peer_conn_lock held */
1220 kgn_peer_t *
1221 kgnilnd_find_peer_locked(lnet_nid_t nid)
1222 {
1223         struct list_head *peer_list = kgnilnd_nid2peerlist(nid);
1224         kgn_peer_t       *peer;
1225
1226         /* Chopping nid down to only NIDADDR using LNET_NIDADDR so we only
1227          * have a single peer per device instead of a peer per nid/net combo.
1228          */
1229
1230         list_for_each_entry(peer, peer_list, gnp_list) {
1231                 if (LNET_NIDADDR(nid) != LNET_NIDADDR(peer->gnp_nid))
1232                         continue;
1233
1234                 CDEBUG(D_NET, "got peer [%p] -> %s c %d (%d)\n",
1235                        peer, libcfs_nid2str(nid),
1236                        peer->gnp_connecting,
1237                        atomic_read(&peer->gnp_refcount));
1238                 return peer;
1239         }
1240         return NULL;
1241 }
1242
1243 /* need write_lock on kgn_peer_conn_lock */
1244 void
1245 kgnilnd_unlink_peer_locked(kgn_peer_t *peer)
1246 {
1247         LASSERTF(list_empty(&peer->gnp_conns),
1248                 "peer 0x%p->%s\n",
1249                  peer, libcfs_nid2str(peer->gnp_nid));
1250         LASSERTF(list_empty(&peer->gnp_tx_queue),
1251                 "peer 0x%p->%s\n",
1252                  peer, libcfs_nid2str(peer->gnp_nid));
1253         LASSERTF(kgnilnd_peer_active(peer),
1254                 "peer 0x%p->%s\n",
1255                  peer, libcfs_nid2str(peer->gnp_nid));
1256         CDEBUG(D_NET, "unlinking peer 0x%p->%s\n",
1257                 peer, libcfs_nid2str(peer->gnp_nid));
1258
1259         list_del_init(&peer->gnp_list);
1260         kgnilnd_data.kgn_peer_version++;
1261         kgnilnd_admin_decref(kgnilnd_data.kgn_npending_unlink);
1262         /* lose peerlist's ref */
1263         kgnilnd_peer_decref(peer);
1264 }
1265
1266 int
1267 kgnilnd_get_peer_info(int index,
1268                       kgn_peer_t **found_peer,
1269                       lnet_nid_t *id, __u32 *nic_addr,
1270                       int *refcount, int *connecting)
1271 {
1272         struct list_head  *ptmp;
1273         kgn_peer_t        *peer;
1274         int               i;
1275         int               rc = -ENOENT;
1276
1277         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1278
1279         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
1280
1281                 list_for_each(ptmp, &kgnilnd_data.kgn_peers[i]) {
1282                         peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1283
1284                         if (index-- > 0)
1285                                 continue;
1286
1287                         CDEBUG(D_NET, "found peer %p (%s) at index %d\n",
1288                                peer, libcfs_nid2str(peer->gnp_nid), index);
1289
1290                         *found_peer  = peer;
1291                         *id          = peer->gnp_nid;
1292                         *nic_addr    = peer->gnp_host_id;
1293                         *refcount    = atomic_read(&peer->gnp_refcount);
1294                         *connecting  = peer->gnp_connecting;
1295
1296                         rc = 0;
1297                         goto out;
1298                 }
1299         }
1300 out:
1301         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1302         if (rc)
1303                 CDEBUG(D_NET, "no gni peer at index %d\n", index);
1304         return rc;
1305 }
1306
1307 /* requires write_lock on kgn_peer_conn_lock held */
1308 void
1309 kgnilnd_add_peer_locked(lnet_nid_t nid, kgn_peer_t *new_stub_peer, kgn_peer_t **peerp)
1310 {
1311         kgn_peer_t        *peer, *peer2;
1312
1313         LASSERTF(new_stub_peer != NULL, "bad stub peer for nid %s\n",
1314                  libcfs_nid2str(nid));
1315
1316         peer2 = kgnilnd_find_peer_locked(nid);
1317         if (peer2 != NULL) {
1318                 /* A peer was created during the lock transition, so drop
1319                  * the new one we created */
1320                 kgnilnd_peer_decref(new_stub_peer);
1321                 peer = peer2;
1322         } else {
1323                 peer = new_stub_peer;
1324                 /* peer table takes existing ref on peer */
1325
1326                 LASSERTF(!kgnilnd_peer_active(peer),
1327                         "peer 0x%p->%s already in peer table\n",
1328                         peer, libcfs_nid2str(peer->gnp_nid));
1329                 list_add_tail(&peer->gnp_list,
1330                               kgnilnd_nid2peerlist(nid));
1331                 kgnilnd_data.kgn_peer_version++;
1332         }
1333
1334         LASSERTF(peer->gnp_net != NULL, "peer 0x%p->%s with NULL net\n",
1335                  peer, libcfs_nid2str(peer->gnp_nid));
1336         *peerp = peer;
1337 }
1338
1339 int
1340 kgnilnd_add_peer(kgn_net_t *net, lnet_nid_t nid, kgn_peer_t **peerp)
1341 {
1342         kgn_peer_t        *peer;
1343         int                rc;
1344         int                node_state;
1345         ENTRY;
1346
1347         if (nid == LNET_NID_ANY)
1348                 return -EINVAL;
1349
1350         node_state = kgnilnd_get_node_state(LNET_NIDADDR(nid));
1351
1352         /* NB - this will not block during normal operations -
1353          * the only writer of this is in the startup/shutdown path. */
1354         rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
1355         if (!rc) {
1356                 rc = -ESHUTDOWN;
1357                 RETURN(rc);
1358         }
1359         rc = kgnilnd_create_peer_safe(&peer, nid, net, node_state);
1360         if (rc != 0) {
1361                 up_read(&kgnilnd_data.kgn_net_rw_sem);
1362                 RETURN(rc);
1363         }
1364
1365         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1366         up_read(&kgnilnd_data.kgn_net_rw_sem);
1367
1368         kgnilnd_add_peer_locked(nid, peer, peerp);
1369
1370         CDEBUG(D_NET, "peer 0x%p->%s connecting %d\n",
1371                peerp, libcfs_nid2str((*peerp)->gnp_nid),
1372                (*peerp)->gnp_connecting);
1373
1374         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1375         RETURN(0);
1376 }
1377
1378 /* needs write_lock on kgn_peer_conn_lock */
1379 void
1380 kgnilnd_cancel_peer_connect_locked(kgn_peer_t *peer, struct list_head *zombies)
1381 {
1382         kgn_tx_t        *tx, *txn;
1383
1384         /* we do care about state of gnp_connecting - we could be between
1385          * reconnect attempts, so try to find the dgram and cancel the TX
1386          * anyways. If we are in the process of posting DONT do anything;
1387          * once it fails or succeeds we can nuke the connect attempt.
1388          * We have no idea where in kgnilnd_post_dgram we are so we cant
1389          * attempt to cancel until the function is done.
1390          */
1391
1392         /* make sure peer isn't in process of connecting or waiting for connect*/
1393         spin_lock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1394         if (!(list_empty(&peer->gnp_connd_list))) {
1395                 list_del_init(&peer->gnp_connd_list);
1396                 /* remove connd ref */
1397                 kgnilnd_peer_decref(peer);
1398         }
1399         spin_unlock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1400
1401         if (peer->gnp_connecting == GNILND_PEER_POSTING || peer->gnp_connecting == GNILND_PEER_NEEDS_DEATH) {
1402                 peer->gnp_connecting = GNILND_PEER_NEEDS_DEATH;
1403                 /* We are in process of posting right now the xchg set it up for us to
1404                  * cancel the connect so we are finished for now */
1405         } else {
1406                 /* no need for exchange we have the peer lock and its ready for us to nuke */
1407                 LASSERTF(peer->gnp_connecting != GNILND_PEER_POSTING,
1408                         "Peer in invalid state 0x%p->%s, connecting %d\n",
1409                         peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
1410                 peer->gnp_connecting = GNILND_PEER_IDLE;
1411                 set_mb(peer->gnp_last_dgram_errno, -ETIMEDOUT);
1412                 kgnilnd_find_and_cancel_dgram(peer->gnp_net->gnn_dev,
1413                                                       peer->gnp_nid);
1414         }
1415
1416         /* The least we can do is nuke the tx's no matter what.... */
1417         list_for_each_entry_safe(tx, txn, &peer->gnp_tx_queue, tx_list) {
1418                 kgnilnd_tx_del_state_locked(tx, peer, NULL,
1419                                            GNILND_TX_ALLOCD);
1420                 list_add_tail(&tx->tx_list, zombies);
1421         }
1422 }
1423
1424 /* needs write_lock on kgn_peer_conn_lock */
1425 void
1426 kgnilnd_del_peer_locked(kgn_peer_t *peer, int error)
1427 {
1428         /* this peer could be passive and only held for purgatory,
1429          * take a ref to ensure it doesn't disappear in this function */
1430         kgnilnd_peer_addref(peer);
1431
1432         CFS_RACE(CFS_FAIL_GNI_FIND_TARGET);
1433
1434         /* if purgatory release cleared it out, don't try again */
1435         if (kgnilnd_peer_active(peer)) {
1436                 /* always do this to allow kgnilnd_start_connect and
1437                  * kgnilnd_finish_connect to catch this before they
1438                  * wrap up their operations */
1439                 if (kgnilnd_can_unlink_peer_locked(peer)) {
1440                         /* already released purgatory, so only active
1441                          * conns hold it */
1442                         kgnilnd_unlink_peer_locked(peer);
1443                 } else {
1444                         kgnilnd_close_peer_conns_locked(peer, error);
1445                         /* peer unlinks itself when last conn is closed */
1446                 }
1447         }
1448
1449         /* we are done, release back to the wild */
1450         kgnilnd_peer_decref(peer);
1451 }
1452
1453 int
1454 kgnilnd_del_conn_or_peer(kgn_net_t *net, lnet_nid_t nid, int command,
1455                           int error)
1456 {
1457         LIST_HEAD               (souls);
1458         LIST_HEAD               (zombies);
1459         struct list_head        *ptmp, *pnxt;
1460         kgn_peer_t              *peer;
1461         int                     lo;
1462         int                     hi;
1463         int                     i;
1464         int                     rc = -ENOENT;
1465
1466         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1467
1468         if (nid != LNET_NID_ANY)
1469                 lo = hi = kgnilnd_nid2peerlist(nid) - kgnilnd_data.kgn_peers;
1470         else {
1471                 lo = 0;
1472                 hi = *kgnilnd_tunables.kgn_peer_hash_size - 1;
1473                 /* wildcards always succeed */
1474                 rc = 0;
1475         }
1476
1477         for (i = lo; i <= hi; i++) {
1478                 list_for_each_safe(ptmp, pnxt, &kgnilnd_data.kgn_peers[i]) {
1479                         peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1480
1481                         LASSERTF(peer->gnp_net != NULL,
1482                                 "peer %p (%s) with NULL net\n",
1483                                  peer, libcfs_nid2str(peer->gnp_nid));
1484
1485                         if (net != NULL && peer->gnp_net != net)
1486                                 continue;
1487
1488                         if (!(nid == LNET_NID_ANY || LNET_NIDADDR(peer->gnp_nid) == LNET_NIDADDR(nid)))
1489                                 continue;
1490
1491                         /* In both cases, we want to stop any in-flight
1492                          * connect attempts */
1493                         kgnilnd_cancel_peer_connect_locked(peer, &zombies);
1494
1495                         switch (command) {
1496                         case GNILND_DEL_CONN:
1497                                 kgnilnd_close_peer_conns_locked(peer, error);
1498                                 break;
1499                         case GNILND_DEL_PEER:
1500                                 peer->gnp_pending_unlink = 1;
1501                                 kgnilnd_admin_addref(kgnilnd_data.kgn_npending_unlink);
1502                                 kgnilnd_mark_for_detach_purgatory_all_locked(peer);
1503                                 kgnilnd_del_peer_locked(peer, error);
1504                                 break;
1505                         case GNILND_CLEAR_PURGATORY:
1506                                 /* Mark everything ready for detach reaper will cleanup
1507                                  * once we release the kgn_peer_conn_lock
1508                                  */
1509                                 kgnilnd_mark_for_detach_purgatory_all_locked(peer);
1510                                 peer->gnp_last_errno = -EISCONN;
1511                                 /* clear reconnect so he can reconnect soon */
1512                                 peer->gnp_reconnect_time = 0;
1513                                 peer->gnp_reconnect_interval = 0;
1514                                 break;
1515                         default:
1516                                 CERROR("bad command %d\n", command);
1517                                 LBUG();
1518                         }
1519                         /* we matched something */
1520                         rc = 0;
1521                 }
1522         }
1523
1524         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1525
1526         /* nuke peer TX */
1527         kgnilnd_txlist_done(&zombies, error);
1528
1529         /* This function does not return until the commands it initiated have completed,
1530          * since they have to work there way through the other threads. In the case of shutdown
1531          * threads are not woken up until after this call is initiated so we cannot wait, we just
1532          * need to return. The same applies for stack reset we shouldnt wait as the reset thread
1533          * handles closing.
1534          */
1535
1536         CFS_RACE(CFS_FAIL_GNI_RACE_RESET);
1537
1538         if (error == -ENOTRECOVERABLE || error == -ESHUTDOWN) {
1539                 return rc;
1540         }
1541
1542         i = 4;
1543         while (atomic_read(&kgnilnd_data.kgn_npending_conns)   ||
1544                atomic_read(&kgnilnd_data.kgn_npending_detach)  ||
1545                atomic_read(&kgnilnd_data.kgn_npending_unlink)) {
1546
1547                 cfs_pause(cfs_time_seconds(1));
1548                 i++;
1549
1550                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, "Waiting on %d peers %d closes %d detaches\n",
1551                                 atomic_read(&kgnilnd_data.kgn_npending_unlink),
1552                                 atomic_read(&kgnilnd_data.kgn_npending_conns),
1553                                 atomic_read(&kgnilnd_data.kgn_npending_detach));
1554         }
1555
1556         return rc;
1557 }
1558
1559 kgn_conn_t *
1560 kgnilnd_get_conn_by_idx(int index)
1561 {
1562         kgn_peer_t        *peer;
1563         struct list_head  *ptmp;
1564         kgn_conn_t        *conn;
1565         struct list_head  *ctmp;
1566         int                i;
1567
1568
1569         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
1570                 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1571                 list_for_each(ptmp, &kgnilnd_data.kgn_peers[i]) {
1572
1573                         peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1574
1575                         list_for_each(ctmp, &peer->gnp_conns) {
1576                                 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
1577
1578                                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
1579                                         continue;
1580
1581                                 if (index-- > 0)
1582                                         continue;
1583
1584                                 CDEBUG(D_NET, "++conn[%p] -> %s (%d)\n", conn,
1585                                        libcfs_nid2str(conn->gnc_peer->gnp_nid),
1586                                        atomic_read(&conn->gnc_refcount));
1587                                 kgnilnd_conn_addref(conn);
1588                                 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1589                                 return conn;
1590                         }
1591                 }
1592                 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1593         }
1594
1595         return NULL;
1596 }
1597
1598 int
1599 kgnilnd_get_conn_info(kgn_peer_t *peer,
1600                       int *device_id, __u64 *peerstamp,
1601                       int *tx_seq, int *rx_seq,
1602                       int *fmaq_len, int *nfma, int *nrdma)
1603 {
1604         kgn_conn_t        *conn;
1605         int               rc = 0;
1606
1607         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1608
1609         conn = kgnilnd_find_conn_locked(peer);
1610         if (conn == NULL) {
1611                 rc = -ENOENT;
1612                 goto out;
1613         }
1614
1615         *device_id = conn->gnc_device->gnd_host_id;
1616         *peerstamp = conn->gnc_peerstamp;
1617         *tx_seq = conn->gnc_tx_seq;
1618         *rx_seq = conn->gnc_rx_seq;
1619         *fmaq_len = kgnilnd_count_list(&conn->gnc_fmaq);
1620         *nfma = atomic_read(&conn->gnc_nlive_fma);
1621         *nrdma = atomic_read(&conn->gnc_nlive_rdma);
1622 out:
1623         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1624         return rc;
1625 }
1626
1627 /* needs write_lock on kgn_peer_conn_lock */
1628 int
1629 kgnilnd_close_peer_conns_locked(kgn_peer_t *peer, int why)
1630 {
1631         kgn_conn_t         *conn;
1632         struct list_head   *ctmp, *cnxt;
1633         int                 count = 0;
1634
1635         list_for_each_safe(ctmp, cnxt, &peer->gnp_conns) {
1636                 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
1637
1638                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
1639                         continue;
1640
1641                 count++;
1642                 /* we mark gnc_needs closing and increment kgn_npending_conns so that
1643                  * kgnilnd_del_conn_or_peer can wait on the other threads closing
1644                  * and cleaning up the connection.
1645                  */
1646                 if (!conn->gnc_needs_closing) {
1647                         conn->gnc_needs_closing = 1;
1648                         kgnilnd_admin_addref(kgnilnd_data.kgn_npending_conns);
1649                 }
1650                 kgnilnd_close_conn_locked(conn, why);
1651         }
1652         return count;
1653 }
1654
1655 int
1656 kgnilnd_report_node_state(lnet_nid_t nid, int down)
1657 {
1658         int         rc;
1659         kgn_peer_t  *peer, *new_peer;
1660         LIST_HEAD(zombies);
1661
1662         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1663         peer = kgnilnd_find_peer_locked(nid);
1664
1665         if (peer == NULL) {
1666                 int       i;
1667                 int       found_net = 0;
1668                 kgn_net_t *net;
1669
1670                 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1671
1672                 /* Don't add a peer for node up events */
1673                 if (down == GNILND_RCA_NODE_UP) {
1674                         return 0;
1675                 }
1676
1677                 /* find any valid net - we don't care which one... */
1678                 down_read(&kgnilnd_data.kgn_net_rw_sem);
1679                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
1680                         list_for_each_entry(net, &kgnilnd_data.kgn_nets[i],
1681                                             gnn_list) {
1682                                 found_net = 1;
1683                                 break;
1684                         }
1685
1686                         if (found_net) {
1687                                 break;
1688                         }
1689                 }
1690                 up_read(&kgnilnd_data.kgn_net_rw_sem);
1691
1692                 if (!found_net) {
1693                         CNETERR("Could not find a net for nid %lld\n", nid);
1694                         return 1;
1695                 }
1696
1697                 /* The nid passed in does not yet contain the net portion.
1698                  * Let's build it up now
1699                  */
1700                 nid = LNET_MKNID(LNET_NIDNET(net->gnn_ni->ni_nid), nid);
1701                 rc = kgnilnd_add_peer(net, nid, &new_peer);
1702
1703                 if (rc) {
1704                         CNETERR("Could not add peer for nid %lld, rc %d\n",
1705                                 nid, rc);
1706                         return 1;
1707                 }
1708
1709                 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1710                 peer = kgnilnd_find_peer_locked(nid);
1711
1712                 if (peer == NULL) {
1713                         CNETERR("Could not find peer for nid %lld\n", nid);
1714                         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1715                         return 1;
1716                 }
1717         }
1718
1719         peer->gnp_down = down;
1720
1721         if (down == GNILND_RCA_NODE_DOWN) {
1722                 kgn_conn_t *conn;
1723
1724                 peer->gnp_down_event_time = jiffies;
1725                 kgnilnd_cancel_peer_connect_locked(peer, &zombies);
1726                 conn = kgnilnd_find_conn_locked(peer);
1727
1728                 if (conn != NULL) {
1729                         kgnilnd_close_conn_locked(conn, -ENETRESET);
1730                 }
1731         } else {
1732                 peer->gnp_up_event_time = jiffies;
1733         }
1734
1735         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1736
1737         if (down == GNILND_RCA_NODE_DOWN) {
1738                 /* using ENETRESET so we don't get messages from
1739                  * kgnilnd_tx_done
1740                  */
1741                 kgnilnd_txlist_done(&zombies, -ENETRESET);
1742                 kgnilnd_peer_notify(peer, -ECONNRESET, 0);
1743                 LCONSOLE_INFO("Recieved down event for nid %lld\n", nid);
1744         }
1745
1746         return 0;
1747 }
1748
1749 int
1750 kgnilnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
1751 {
1752         struct libcfs_ioctl_data *data = arg;
1753         kgn_net_t                *net = ni->ni_data;
1754         int                       rc = -EINVAL;
1755
1756         LASSERT(ni == net->gnn_ni);
1757
1758         switch (cmd) {
1759         case IOC_LIBCFS_GET_PEER: {
1760                 lnet_nid_t   nid = 0;
1761                 kgn_peer_t  *peer = NULL;
1762                 __u32 nic_addr = 0;
1763                 __u64 peerstamp = 0;
1764                 int peer_refcount = 0, peer_connecting = 0;
1765                 int device_id = 0;
1766                 int tx_seq = 0, rx_seq = 0;
1767                 int fmaq_len = 0, nfma = 0, nrdma = 0;
1768
1769                 rc = kgnilnd_get_peer_info(data->ioc_count, &peer,
1770                                            &nid, &nic_addr, &peer_refcount,
1771                                            &peer_connecting);
1772                 if (rc)
1773                         break;
1774
1775                 /* Barf */
1776                 /* LNET_MKNID is used to mask from lnet the multiplexing/demultiplexing of connections and peers
1777                  * LNET assumes a conn and peer per net, the LNET_MKNID/LNET_NIDADDR allows us to let Lnet see what it
1778                  * wants to see instead of the underlying network that is being used to send the data
1779                  */
1780                 data->ioc_nid    = LNET_MKNID(LNET_NIDNET(ni->ni_nid), LNET_NIDADDR(nid));
1781                 data->ioc_flags  = peer_connecting;
1782                 data->ioc_count  = peer_refcount;
1783
1784                 rc = kgnilnd_get_conn_info(peer, &device_id, &peerstamp,
1785                                            &tx_seq, &rx_seq, &fmaq_len,
1786                                            &nfma, &nrdma);
1787
1788                 /* This is allowable - a persistent peer could not
1789                  * have a connection */
1790                 if (rc) {
1791                         /* flag to indicate we are not connected -
1792                          * need to print as such */
1793                         data->ioc_flags |= (1<<16);
1794                         rc = 0;
1795                 } else {
1796                         /* still barf */
1797                         data->ioc_net = device_id;
1798                         data->ioc_u64[0] = peerstamp;
1799                         data->ioc_u32[0] = fmaq_len;
1800                         data->ioc_u32[1] = nfma;
1801                         data->ioc_u32[2] = tx_seq;
1802                         data->ioc_u32[3] = rx_seq;
1803                         data->ioc_u32[4] = nrdma;
1804                 }
1805                 break;
1806         }
1807         case IOC_LIBCFS_ADD_PEER: {
1808                 /* just dummy value to allow using common interface */
1809                 kgn_peer_t      *peer;
1810                 rc = kgnilnd_add_peer(net, data->ioc_nid, &peer);
1811                 break;
1812         }
1813         case IOC_LIBCFS_DEL_PEER: {
1814                 /* NULL is passed in so it affects all peers in existence without regard to network
1815                  * as the peer may not exist on the network LNET believes it to be on.
1816                  */
1817                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1818                                               GNILND_DEL_PEER, -EUCLEAN);
1819                 break;
1820         }
1821         case IOC_LIBCFS_GET_CONN: {
1822                 kgn_conn_t *conn = kgnilnd_get_conn_by_idx(data->ioc_count);
1823
1824                 if (conn == NULL)
1825                         rc = -ENOENT;
1826                 else {
1827                         rc = 0;
1828                         /* LNET_MKNID is used to build the correct address based on what LNET wants to see instead of
1829                          * the generic connection that is used to send the data
1830                          */
1831                         data->ioc_nid    = LNET_MKNID(LNET_NIDNET(ni->ni_nid), LNET_NIDADDR(conn->gnc_peer->gnp_nid));
1832                         data->ioc_u32[0] = conn->gnc_device->gnd_id;
1833                         kgnilnd_conn_decref(conn);
1834                 }
1835                 break;
1836         }
1837         case IOC_LIBCFS_CLOSE_CONNECTION: {
1838                 /* use error = -ENETRESET to indicate it was lctl disconnect */
1839                 /* NULL is passed in so it affects all the nets as the connection is virtual
1840                  * and may not exist on the network LNET believes it to be on.
1841                  */
1842                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1843                                               GNILND_DEL_CONN, -ENETRESET);
1844                 break;
1845         }
1846         case IOC_LIBCFS_PUSH_CONNECTION: {
1847                 /* we use this to flush purgatory */
1848                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1849                                               GNILND_CLEAR_PURGATORY, -EUCLEAN);
1850                 break;
1851         }
1852         case IOC_LIBCFS_REGISTER_MYNID: {
1853                 /* Ignore if this is a noop */
1854                 if (data->ioc_nid == ni->ni_nid) {
1855                         rc = 0;
1856                 } else {
1857                         CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
1858                                libcfs_nid2str(data->ioc_nid),
1859                                libcfs_nid2str(ni->ni_nid));
1860                         rc = -EINVAL;
1861                 }
1862                 break;
1863         }
1864         }
1865
1866         return rc;
1867 }
1868
1869 void
1870 kgnilnd_query(lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
1871 {
1872         kgn_net_t               *net = ni->ni_data;
1873         kgn_tx_t                *tx;
1874         kgn_peer_t              *peer = NULL;
1875         kgn_conn_t              *conn = NULL;
1876         lnet_process_id_t       id = {
1877                 .nid = nid,
1878                 .pid = LNET_PID_LUSTRE,
1879         };
1880         ENTRY;
1881
1882         /* I expect to find him, so only take a read lock */
1883         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1884         peer = kgnilnd_find_peer_locked(nid);
1885         if (peer != NULL) {
1886                 /* LIE if in a quiesce - we will update the timeouts after,
1887                  * but we don't want sends failing during it */
1888                 if (kgnilnd_data.kgn_quiesce_trigger) {
1889                         *when = jiffies;
1890                         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1891                         GOTO(out, 0);
1892                 }
1893
1894                 /* Update to best guess, might refine on later checks */
1895                 *when = peer->gnp_last_alive;
1896
1897                 /* we have a peer, how about a conn? */
1898                 conn = kgnilnd_find_conn_locked(peer);
1899
1900                 if (conn == NULL)  {
1901                         /* if there is no conn, check peer last errno to see if clean disconnect
1902                          * - if it was, we lie to LNet because we believe a TX would complete
1903                          * on reconnect */
1904                         if (kgnilnd_conn_clean_errno(peer->gnp_last_errno)) {
1905                                 *when = jiffies;
1906                         }
1907                         /* we still want to fire a TX and new conn in this case */
1908                 } else {
1909                         /* gnp_last_alive is valid, run for the hills */
1910                         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1911                         GOTO(out, 0);
1912                 }
1913         }
1914         /* if we get here, either we have no peer or no conn for him, so fire off
1915          * new TX to trigger conn setup */
1916         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1917
1918         /* if we couldn't find him, we'll fire up a TX and get connected -
1919          * if we don't do this, after ni_peer_timeout, LNet will declare him dead.
1920          * So really we treat kgnilnd_query as a bit of a 'connect now' type
1921          * event because it'll only do this when it wants to send
1922          *
1923          * Use a real TX for this to get the proper gnp_tx_queue behavior, etc
1924          * normally we'd use kgnilnd_send_ctlmsg for this, but we don't really
1925          * care that this goes out quickly since we already know we need a new conn
1926          * formed */
1927         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_NOOP_SEND))
1928                 return;
1929
1930         tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP, ni->ni_nid);
1931         if (tx != NULL) {
1932                 kgnilnd_launch_tx(tx, net, &id);
1933         }
1934 out:
1935         CDEBUG(D_NETTRACE, "peer 0x%p->%s when %lu\n", peer,
1936                libcfs_nid2str(nid), *when);
1937         EXIT;
1938 }
1939
1940 int
1941 kgnilnd_dev_init(kgn_device_t *dev)
1942 {
1943         gni_return_t      rrc;
1944         int               rc = 0;
1945         unsigned int      cq_size;
1946         ENTRY;
1947
1948         /* size of these CQs should be able to accommodate the outgoing
1949          * RDMA and SMSG transactions.  Since we really don't know what we
1950          * really need here, we'll take credits * 2 * 3 to allow a bunch.
1951          * We need to dig into this more with the performance work. */
1952         cq_size = *kgnilnd_tunables.kgn_credits * 2 * 3;
1953
1954         rrc = kgnilnd_cdm_create(dev->gnd_id, *kgnilnd_tunables.kgn_ptag,
1955                                  GNILND_COOKIE, 0,
1956                                  &dev->gnd_domain);
1957         if (rrc != GNI_RC_SUCCESS) {
1958                 CERROR("Can't create CDM %d (%d)\n", dev->gnd_id, rrc);
1959                 GOTO(failed, rc = -ENODEV);
1960         }
1961
1962         rrc = kgnilnd_cdm_attach(dev->gnd_domain, dev->gnd_id,
1963                                  &dev->gnd_host_id, &dev->gnd_handle);
1964         if (rrc != GNI_RC_SUCCESS) {
1965                 CERROR("Can't attach CDM to device %d (%d)\n",
1966                         dev->gnd_id, rrc);
1967                 GOTO(failed, rc = -ENODEV);
1968         }
1969
1970         /* a bit gross, but not much we can do - Aries Sim doesn't have
1971          * hardcoded NIC/NID that we can use */
1972         rc = kgnilnd_setup_nic_translation(dev->gnd_host_id);
1973         if (rc != 0)
1974                 GOTO(failed, rc = -ENODEV);
1975
1976         /* only dev 0 gets the errors - no need to reset the stack twice
1977          * - this works because we have a single PTAG, if we had more
1978          * then we'd need to have multiple handlers */
1979         if (dev->gnd_id == 0) {
1980                 rrc = kgnilnd_subscribe_errors(dev->gnd_handle,
1981                                                 GNI_ERRMASK_CRITICAL |
1982                                                 GNI_ERRMASK_UNKNOWN_TRANSACTION,
1983                                               0, NULL, kgnilnd_critical_error,
1984                                               &dev->gnd_err_handle);
1985                 if (rrc != GNI_RC_SUCCESS) {
1986                         CERROR("Can't subscribe for errors on device %d: rc %d\n",
1987                                 dev->gnd_id, rrc);
1988                         GOTO(failed, rc = -ENODEV);
1989                 }
1990
1991                 rc = kgnilnd_set_quiesce_callback(dev->gnd_handle,
1992                                                   kgnilnd_quiesce_end_callback);
1993                 if (rc != GNI_RC_SUCCESS) {
1994                         CERROR("Can't subscribe for quiesce callback on device %d: rc %d\n",
1995                                 dev->gnd_id, rrc);
1996                         GOTO(failed, rc = -ENODEV);
1997                 }
1998         }
1999
2000         rrc = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_IP, &kgnilnd_data.kgn_sock);
2001         if (rrc < 0) {
2002                 CERROR("sock_create returned %d\n", rrc);
2003                 GOTO(failed, rrc);
2004         }
2005
2006         rc = kgnilnd_nicaddr_to_nid(dev->gnd_host_id, &dev->gnd_nid);
2007         if (rc < 0) {
2008                 /* log messages during startup */
2009                 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
2010                         CERROR("couldn't translate host_id 0x%x to nid. rc %d\n",
2011                                 dev->gnd_host_id, rc);
2012                 }
2013                 GOTO(failed, rc = -ESRCH);
2014         }
2015         CDEBUG(D_NET, "NIC %x -> NID %d\n", dev->gnd_host_id, dev->gnd_nid);
2016
2017         rrc = kgnilnd_cq_create(dev->gnd_handle, *kgnilnd_tunables.kgn_credits,
2018                                 0, kgnilnd_device_callback,
2019                                 dev->gnd_id, &dev->gnd_snd_rdma_cqh);
2020         if (rrc != GNI_RC_SUCCESS) {
2021                 CERROR("Can't create rdma send cq size %u for device "
2022                        "%d (%d)\n", cq_size, dev->gnd_id, rrc);
2023                 GOTO(failed, rc = -EINVAL);
2024         }
2025
2026         rrc = kgnilnd_cq_create(dev->gnd_handle, cq_size,
2027                         0, kgnilnd_device_callback, dev->gnd_id,
2028                         &dev->gnd_snd_fma_cqh);
2029         if (rrc != GNI_RC_SUCCESS) {
2030                 CERROR("Can't create fma send cq size %u for device %d (%d)\n",
2031                        cq_size, dev->gnd_id, rrc);
2032                 GOTO(failed, rc = -EINVAL);
2033         }
2034
2035         /* This one we size differently - overflows are possible and it needs to be
2036          * sized based on machine size */
2037         rrc = kgnilnd_cq_create(dev->gnd_handle,
2038                         *kgnilnd_tunables.kgn_fma_cq_size,
2039                         0, kgnilnd_device_callback, dev->gnd_id,
2040                         &dev->gnd_rcv_fma_cqh);
2041         if (rrc != GNI_RC_SUCCESS) {
2042                 CERROR("Can't create fma cq size %d for device %d (%d)\n",
2043                        *kgnilnd_tunables.kgn_fma_cq_size, dev->gnd_id, rrc);
2044                 GOTO(failed, rc = -EINVAL);
2045         }
2046
2047         RETURN(0);
2048
2049 failed:
2050         kgnilnd_dev_fini(dev);
2051         RETURN(rc);
2052 }
2053
2054 void
2055 kgnilnd_dev_fini(kgn_device_t *dev)
2056 {
2057         gni_return_t rrc;
2058         ENTRY;
2059
2060         /* At quiesce or rest time, need to loop through and clear gnd_ready_conns ?*/
2061         LASSERTF(list_empty(&dev->gnd_ready_conns) &&
2062                  list_empty(&dev->gnd_map_tx) &&
2063                  list_empty(&dev->gnd_rdmaq),
2064                  "dev 0x%p ready_conns %d@0x%p map_tx %d@0x%p rdmaq %d@0x%p\n",
2065                  dev, kgnilnd_count_list(&dev->gnd_ready_conns), &dev->gnd_ready_conns,
2066                  kgnilnd_count_list(&dev->gnd_map_tx), &dev->gnd_map_tx,
2067                  kgnilnd_count_list(&dev->gnd_rdmaq), &dev->gnd_rdmaq);
2068
2069         /* These should follow from tearing down all connections */
2070         LASSERTF(dev->gnd_map_nphys == 0 && dev->gnd_map_physnop == 0,
2071                 "%d physical mappings of %d pages still mapped\n",
2072                  dev->gnd_map_nphys, dev->gnd_map_physnop);
2073
2074         LASSERTF(dev->gnd_map_nvirt == 0 && dev->gnd_map_virtnob == 0,
2075                 "%d virtual mappings of "LPU64" bytes still mapped\n",
2076                  dev->gnd_map_nvirt, dev->gnd_map_virtnob);
2077
2078         LASSERTF(atomic_read(&dev->gnd_n_mdd) == 0 &&
2079                  atomic_read(&dev->gnd_n_mdd_held) == 0 &&
2080                  atomic64_read(&dev->gnd_nbytes_map) == 0,
2081                 "%d SMSG mappings of %ld bytes still mapped or held %d\n",
2082                  atomic_read(&dev->gnd_n_mdd),
2083                  atomic64_read(&dev->gnd_nbytes_map), atomic_read(&dev->gnd_n_mdd_held));
2084
2085         LASSERT(list_empty(&dev->gnd_map_list));
2086
2087         /* What other assertions needed to ensure all connections torn down ? */
2088
2089         /* check all counters == 0 (EP, MDD, etc) */
2090
2091         /* if we are resetting due to quiese (stack reset), don't check
2092          * thread states */
2093         LASSERTF(kgnilnd_data.kgn_quiesce_trigger ||
2094                 atomic_read(&kgnilnd_data.kgn_nthreads) == 0,
2095                 "tried to shutdown with threads active\n");
2096
2097         if (dev->gnd_rcv_fma_cqh) {
2098                 rrc = kgnilnd_cq_destroy(dev->gnd_rcv_fma_cqh);
2099                 LASSERTF(rrc == GNI_RC_SUCCESS,
2100                         "bad rc from gni_cq_destroy on rcv_fma_cqh: %d\n", rrc);
2101                 dev->gnd_rcv_fma_cqh = NULL;
2102         }
2103
2104         if (dev->gnd_snd_rdma_cqh) {
2105                 rrc = kgnilnd_cq_destroy(dev->gnd_snd_rdma_cqh);
2106                 LASSERTF(rrc == GNI_RC_SUCCESS,
2107                         "bad rc from gni_cq_destroy on send_rdma_cqh: %d\n", rrc);
2108                 dev->gnd_snd_rdma_cqh = NULL;
2109         }
2110
2111         if (dev->gnd_snd_fma_cqh) {
2112                 rrc = kgnilnd_cq_destroy(dev->gnd_snd_fma_cqh);
2113                 LASSERTF(rrc == GNI_RC_SUCCESS,
2114                         "bad rc from gni_cq_destroy on snd_fma_cqh: %d\n", rrc);
2115                 dev->gnd_snd_fma_cqh = NULL;
2116         }
2117
2118         if (dev->gnd_err_handle) {
2119                 rrc = kgnilnd_release_errors(dev->gnd_err_handle);
2120                 LASSERTF(rrc == GNI_RC_SUCCESS,
2121                         "bad rc from gni_release_errors: %d\n", rrc);
2122                 dev->gnd_err_handle = NULL;
2123         }
2124
2125         if (dev->gnd_domain) {
2126                 rrc = kgnilnd_cdm_destroy(dev->gnd_domain);
2127                 LASSERTF(rrc == GNI_RC_SUCCESS,
2128                         "bad rc from gni_cdm_destroy: %d\n", rrc);
2129                 dev->gnd_domain = NULL;
2130         }
2131
2132         if (kgnilnd_data.kgn_sock)
2133                 sock_release(kgnilnd_data.kgn_sock);
2134
2135         EXIT;
2136 }
2137
2138
2139 int kgnilnd_base_startup(void)
2140 {
2141         struct timeval       tv;
2142         int                  pkmem = atomic_read(&libcfs_kmemory);
2143         int                  rc;
2144         int                  i;
2145         kgn_device_t        *dev;
2146         struct task_struct  *thrd;
2147
2148 #if defined(CONFIG_CRAY_XT) && !defined(CONFIG_CRAY_COMPUTE)
2149         /* limit how much memory can be allocated for fma blocks in
2150          * instances where many nodes need to reconnects at the same time */
2151         struct sysinfo si;
2152         si_meminfo(&si);
2153         kgnilnd_data.free_pages_limit = si.totalram/4;
2154 #endif
2155
2156         ENTRY;
2157
2158         LASSERTF(kgnilnd_data.kgn_init == GNILND_INIT_NOTHING,
2159                 "init %d\n", kgnilnd_data.kgn_init);
2160
2161         /* zero pointers, flags etc */
2162         memset(&kgnilnd_data, 0, sizeof(kgnilnd_data));
2163
2164         /* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and
2165          * a unique (for all time) connstamp so we can uniquely identify
2166          * the sender.  The connstamp is an incrementing counter
2167          * initialised with seconds + microseconds at startup time.  So we
2168          * rely on NOT creating connections more frequently on average than
2169          * 1MHz to ensure we don't use old connstamps when we reboot. */
2170         do_gettimeofday(&tv);
2171         kgnilnd_data.kgn_connstamp =
2172                  kgnilnd_data.kgn_peerstamp =
2173                         (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
2174
2175         init_rwsem(&kgnilnd_data.kgn_net_rw_sem);
2176
2177         for (i = 0; i < GNILND_MAXDEVS; i++) {
2178                 kgn_device_t  *dev = &kgnilnd_data.kgn_devices[i];
2179
2180                 dev->gnd_id = i;
2181                 INIT_LIST_HEAD(&dev->gnd_ready_conns);
2182                 INIT_LIST_HEAD(&dev->gnd_map_tx);
2183                 INIT_LIST_HEAD(&dev->gnd_fma_buffs);
2184                 mutex_init(&dev->gnd_cq_mutex);
2185                 mutex_init(&dev->gnd_fmablk_mutex);
2186                 spin_lock_init(&dev->gnd_fmablk_lock);
2187                 init_waitqueue_head(&dev->gnd_waitq);
2188                 init_waitqueue_head(&dev->gnd_dgram_waitq);
2189                 init_waitqueue_head(&dev->gnd_dgping_waitq);
2190                 spin_lock_init(&dev->gnd_lock);
2191                 INIT_LIST_HEAD(&dev->gnd_map_list);
2192                 spin_lock_init(&dev->gnd_map_lock);
2193                 atomic_set(&dev->gnd_nfmablk, 0);
2194                 atomic_set(&dev->gnd_fmablk_vers, 1);
2195                 atomic_set(&dev->gnd_neps, 0);
2196                 atomic_set(&dev->gnd_canceled_dgrams, 0);
2197                 INIT_LIST_HEAD(&dev->gnd_connd_peers);
2198                 spin_lock_init(&dev->gnd_connd_lock);
2199                 spin_lock_init(&dev->gnd_dgram_lock);
2200                 spin_lock_init(&dev->gnd_rdmaq_lock);
2201                 INIT_LIST_HEAD(&dev->gnd_rdmaq);
2202                 init_rwsem(&dev->gnd_conn_sem);
2203
2204                 /* alloc & setup nid based dgram table */
2205                 LIBCFS_ALLOC(dev->gnd_dgrams,
2206                             sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2207
2208                 if (dev->gnd_dgrams == NULL)
2209                         GOTO(failed, rc = -ENOMEM);
2210
2211                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2212                         INIT_LIST_HEAD(&dev->gnd_dgrams[i]);
2213                 }
2214                 atomic_set(&dev->gnd_ndgrams, 0);
2215                 atomic_set(&dev->gnd_nwcdgrams, 0);
2216                 /* setup timer for RDMAQ processing */
2217                 setup_timer(&dev->gnd_rdmaq_timer, kgnilnd_schedule_device_timer,
2218                             (unsigned long)dev);
2219
2220                 /* setup timer for mapping processing */
2221                 setup_timer(&dev->gnd_map_timer, kgnilnd_schedule_device_timer,
2222                             (unsigned long)dev);
2223
2224         }
2225
2226         /* CQID 0 isn't allowed, set to MAX_MSG_ID - 1 to check for conflicts early */
2227         kgnilnd_data.kgn_next_cqid = GNILND_MAX_MSG_ID - 1;
2228         kgnilnd_data.kgn_new_min_timeout = *kgnilnd_tunables.kgn_timeout;
2229         init_waitqueue_head(&kgnilnd_data.kgn_reaper_waitq);
2230         init_waitqueue_head(&kgnilnd_data.kgn_ruhroh_waitq);
2231         spin_lock_init(&kgnilnd_data.kgn_reaper_lock);
2232
2233         mutex_init(&kgnilnd_data.kgn_quiesce_mutex);
2234         atomic_set(&kgnilnd_data.kgn_nquiesce, 0);
2235         atomic_set(&kgnilnd_data.kgn_npending_conns, 0);
2236         atomic_set(&kgnilnd_data.kgn_npending_unlink, 0);
2237         atomic_set(&kgnilnd_data.kgn_npending_detach, 0);
2238         atomic_set(&kgnilnd_data.kgn_rev_offset, 0);
2239         atomic_set(&kgnilnd_data.kgn_rev_length, 0);
2240         atomic_set(&kgnilnd_data.kgn_rev_copy_buff, 0);
2241
2242         /* OK to call kgnilnd_api_shutdown() to cleanup now */
2243         kgnilnd_data.kgn_init = GNILND_INIT_DATA;
2244         try_module_get(THIS_MODULE);
2245
2246         rwlock_init(&kgnilnd_data.kgn_peer_conn_lock);
2247
2248         LIBCFS_ALLOC(kgnilnd_data.kgn_peers,
2249                     sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2250
2251         if (kgnilnd_data.kgn_peers == NULL)
2252                 GOTO(failed, rc = -ENOMEM);
2253
2254         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2255                 INIT_LIST_HEAD(&kgnilnd_data.kgn_peers[i]);
2256         }
2257
2258         LIBCFS_ALLOC(kgnilnd_data.kgn_conns,
2259                     sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2260
2261         if (kgnilnd_data.kgn_conns == NULL)
2262                 GOTO(failed, rc = -ENOMEM);
2263
2264         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2265                 INIT_LIST_HEAD(&kgnilnd_data.kgn_conns[i]);
2266         }
2267
2268         LIBCFS_ALLOC(kgnilnd_data.kgn_nets,
2269                     sizeof(struct list_head) * *kgnilnd_tunables.kgn_net_hash_size);
2270
2271         if (kgnilnd_data.kgn_nets == NULL)
2272                 GOTO(failed, rc = -ENOMEM);
2273
2274         for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
2275                 INIT_LIST_HEAD(&kgnilnd_data.kgn_nets[i]);
2276         }
2277
2278         kgnilnd_data.kgn_mbox_cache =
2279                 kmem_cache_create("kgn_mbox_block", KMALLOC_MAX_SIZE, 0,
2280                                   SLAB_HWCACHE_ALIGN, NULL);
2281         if (kgnilnd_data.kgn_mbox_cache == NULL) {
2282                 CERROR("Can't create slab for physical mbox blocks\n");
2283                 GOTO(failed, rc = -ENOMEM);
2284         }
2285
2286         kgnilnd_data.kgn_rx_cache =
2287                 kmem_cache_create("kgn_rx_t", sizeof(kgn_rx_t), 0, 0, NULL);
2288         if (kgnilnd_data.kgn_rx_cache == NULL) {
2289                 CERROR("Can't create slab for kgn_rx_t descriptors\n");
2290                 GOTO(failed, rc = -ENOMEM);
2291         }
2292
2293         kgnilnd_data.kgn_tx_cache =
2294                 kmem_cache_create("kgn_tx_t", sizeof(kgn_tx_t), 0, 0, NULL);
2295         if (kgnilnd_data.kgn_tx_cache == NULL) {
2296                 CERROR("Can't create slab for kgn_tx_t\n");
2297                 GOTO(failed, rc = -ENOMEM);
2298         }
2299
2300         kgnilnd_data.kgn_tx_phys_cache =
2301                 kmem_cache_create("kgn_tx_phys",
2302                                    LNET_MAX_IOV * sizeof(gni_mem_segment_t),
2303                                    0, 0, NULL);
2304         if (kgnilnd_data.kgn_tx_phys_cache == NULL) {
2305                 CERROR("Can't create slab for kgn_tx_phys\n");
2306                 GOTO(failed, rc = -ENOMEM);
2307         }
2308
2309         kgnilnd_data.kgn_dgram_cache =
2310                 kmem_cache_create("kgn_dgram_t", sizeof(kgn_dgram_t), 0, 0, NULL);
2311         if (kgnilnd_data.kgn_dgram_cache == NULL) {
2312                 CERROR("Can't create slab for outgoing datagrams\n");
2313                 GOTO(failed, rc = -ENOMEM);
2314         }
2315
2316         /* allocate a MAX_IOV array of page pointers for each cpu */
2317         kgnilnd_data.kgn_cksum_map_pages = kmalloc(num_possible_cpus() * sizeof (struct page *),
2318                                                    GFP_KERNEL);
2319         if (kgnilnd_data.kgn_cksum_map_pages == NULL) {
2320                 CERROR("Can't allocate vmap cksum pages\n");
2321                 GOTO(failed, rc = -ENOMEM);
2322         }
2323         kgnilnd_data.kgn_cksum_npages = num_possible_cpus();
2324         memset(kgnilnd_data.kgn_cksum_map_pages, 0,
2325                 kgnilnd_data.kgn_cksum_npages * sizeof (struct page *));
2326
2327         for (i = 0; i < kgnilnd_data.kgn_cksum_npages; i++) {
2328                 kgnilnd_data.kgn_cksum_map_pages[i] = kmalloc(LNET_MAX_IOV * sizeof (struct page *),
2329                                                               GFP_KERNEL);
2330                 if (kgnilnd_data.kgn_cksum_map_pages[i] == NULL) {
2331                         CERROR("Can't allocate vmap cksum pages for cpu %d\n", i);
2332                         GOTO(failed, rc = -ENOMEM);
2333                 }
2334         }
2335
2336         LASSERT(kgnilnd_data.kgn_ndevs == 0);
2337
2338         /* Use all available GNI devices */
2339         for (i = 0; i < GNILND_MAXDEVS; i++) {
2340                 dev = &kgnilnd_data.kgn_devices[kgnilnd_data.kgn_ndevs];
2341
2342                 rc = kgnilnd_dev_init(dev);
2343                 if (rc == 0) {
2344                         /* Increment here so base_shutdown cleans it up */
2345                         kgnilnd_data.kgn_ndevs++;
2346
2347                         rc = kgnilnd_allocate_phys_fmablk(dev);
2348                         if (rc)
2349                                 GOTO(failed, rc);
2350                 }
2351         }
2352
2353         if (kgnilnd_data.kgn_ndevs == 0) {
2354                 CERROR("Can't initialise any GNI devices\n");
2355                 GOTO(failed, rc = -ENODEV);
2356         }
2357
2358         rc = kgnilnd_thread_start(kgnilnd_reaper, NULL, "kgnilnd_rpr", 0);
2359         if (rc != 0) {
2360                 CERROR("Can't spawn gnilnd reaper: %d\n", rc);
2361                 GOTO(failed, rc);
2362         }
2363
2364         rc = kgnilnd_start_rca_thread();
2365         if (rc != 0) {
2366                 CERROR("Can't spawn gnilnd rca: %d\n", rc);
2367                 GOTO(failed, rc);
2368         }
2369
2370         /*
2371          * Start ruhroh thread.  We can't use kgnilnd_thread_start() because
2372          * we don't want this thread included in kgnilnd_data.kgn_nthreads
2373          * count.  This thread controls quiesce, so it mustn't
2374          * quiesce itself.
2375          */
2376         thrd = kthread_run(kgnilnd_ruhroh_thread, NULL, "%s_%02d", "kgnilnd_rr", 0);
2377         if (IS_ERR(thrd)) {
2378                 rc = PTR_ERR(thrd);
2379                 CERROR("Can't spawn gnilnd ruhroh thread: %d\n", rc);
2380                 GOTO(failed, rc);
2381         }
2382
2383         /* threads will load balance across devs as they are available */
2384         for (i = 0; i < *kgnilnd_tunables.kgn_sched_threads; i++) {
2385                 rc = kgnilnd_thread_start(kgnilnd_scheduler, (void *)((long)i),
2386                                           "kgnilnd_sd", i);
2387                 if (rc != 0) {
2388                         CERROR("Can't spawn gnilnd scheduler[%d]: %d\n",
2389                                i, rc);
2390                         GOTO(failed, rc);
2391                 }
2392         }
2393
2394         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2395                 dev = &kgnilnd_data.kgn_devices[i];
2396                 rc = kgnilnd_thread_start(kgnilnd_dgram_mover, dev,
2397                                           "kgnilnd_dg", dev->gnd_id);
2398                 if (rc != 0) {
2399                         CERROR("Can't spawn gnilnd dgram_mover[%d]: %d\n",
2400                                dev->gnd_id, rc);
2401                         GOTO(failed, rc);
2402                 }
2403
2404                 rc = kgnilnd_thread_start(kgnilnd_dgram_waitq, dev,
2405                                           "kgnilnd_dgn", dev->gnd_id);
2406                 if (rc != 0) {
2407                         CERROR("Can't spawn gnilnd dgram_waitq[%d]: %d\n",
2408                                 dev->gnd_id, rc);
2409                         GOTO(failed, rc);
2410                 }
2411
2412                 rc = kgnilnd_setup_wildcard_dgram(dev);
2413
2414                 if (rc != 0) {
2415                         CERROR("Can't create wildcard dgrams[%d]: %d\n",
2416                                 dev->gnd_id, rc);
2417                         GOTO(failed, rc);
2418                 }
2419         }
2420
2421
2422
2423         /* flag everything initialised */
2424         kgnilnd_data.kgn_init = GNILND_INIT_ALL;
2425         /*****************************************************/
2426
2427         CDEBUG(D_MALLOC, "initial kmem %d\n", pkmem);
2428         RETURN(0);
2429
2430 failed:
2431         kgnilnd_base_shutdown();
2432         kgnilnd_data.kgn_init = GNILND_INIT_NOTHING;
2433         RETURN(rc);
2434 }
2435
2436 void
2437 kgnilnd_base_shutdown(void)
2438 {
2439         int                     i, j;
2440         ENTRY;
2441
2442         while (CFS_FAIL_TIMEOUT(CFS_FAIL_GNI_PAUSE_SHUTDOWN, 1)) {};
2443
2444         kgnilnd_data.kgn_wc_kill = 1;
2445
2446         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2447                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2448                 kgnilnd_cancel_wc_dgrams(dev);
2449                 kgnilnd_cancel_dgrams(dev);
2450                 kgnilnd_del_conn_or_peer(NULL, LNET_NID_ANY, GNILND_DEL_PEER, -ESHUTDOWN);
2451                 kgnilnd_wait_for_canceled_dgrams(dev);
2452         }
2453
2454         /* We need to verify there are no conns left before we let the threads
2455          * shut down otherwise we could clean up the peers but still have
2456          * some outstanding conns due to orphaned datagram conns that are
2457          * being cleaned up.
2458          */
2459         i = 2;
2460         while (atomic_read(&kgnilnd_data.kgn_nconns) != 0) {
2461                 i++;
2462
2463                 for(j = 0; j < kgnilnd_data.kgn_ndevs; ++j) {
2464                         kgn_device_t *dev = &kgnilnd_data.kgn_devices[j];
2465                         kgnilnd_schedule_device(dev);
2466                 }
2467
2468                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2469                         "Waiting for conns to be cleaned up %d\n",atomic_read(&kgnilnd_data.kgn_nconns));
2470                 cfs_pause(cfs_time_seconds(1));
2471         }
2472         /* Peer state all cleaned up BEFORE setting shutdown, so threads don't
2473          * have to worry about shutdown races.  NB connections may be created
2474          * while there are still active connds, but these will be temporary
2475          * since peer creation always fails after the listener has started to
2476          * shut down.
2477          * all peers should have been cleared out on the nets */
2478         LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
2479                 "peers left %d\n", atomic_read(&kgnilnd_data.kgn_npeers));
2480
2481         /* Wait for the ruhroh thread to shut down. */
2482         kgnilnd_data.kgn_ruhroh_shutdown = 1;
2483         wake_up(&kgnilnd_data.kgn_ruhroh_waitq);
2484         i = 2;
2485         while (kgnilnd_data.kgn_ruhroh_running != 0) {
2486                 i++;
2487                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2488                        "Waiting for ruhroh thread to terminate\n");
2489                 cfs_pause(cfs_time_seconds(1));
2490         }
2491
2492        /* Flag threads to terminate */
2493         kgnilnd_data.kgn_shutdown = 1;
2494
2495         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2496                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2497
2498                 /* should clear all the MDDs */
2499                 kgnilnd_unmap_fma_blocks(dev);
2500
2501                 kgnilnd_schedule_device(dev);
2502                 wake_up_all(&dev->gnd_dgram_waitq);
2503                 wake_up_all(&dev->gnd_dgping_waitq);
2504                 LASSERT(list_empty(&dev->gnd_connd_peers));
2505         }
2506
2507         spin_lock(&kgnilnd_data.kgn_reaper_lock);
2508         wake_up_all(&kgnilnd_data.kgn_reaper_waitq);
2509         spin_unlock(&kgnilnd_data.kgn_reaper_lock);
2510
2511         if (atomic_read(&kgnilnd_data.kgn_nthreads))
2512                 kgnilnd_wakeup_rca_thread();
2513
2514         /* Wait for threads to exit */
2515         i = 2;
2516         while (atomic_read(&kgnilnd_data.kgn_nthreads) != 0) {
2517                 i++;
2518                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2519                        "Waiting for %d threads to terminate\n",
2520                        atomic_read(&kgnilnd_data.kgn_nthreads));
2521                 cfs_pause(cfs_time_seconds(1));
2522         }
2523
2524         LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
2525                 "peers left %d\n", atomic_read(&kgnilnd_data.kgn_npeers));
2526
2527         if (kgnilnd_data.kgn_peers != NULL) {
2528                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2529                         LASSERT(list_empty(&kgnilnd_data.kgn_peers[i]));
2530
2531                 LIBCFS_FREE(kgnilnd_data.kgn_peers,
2532                             sizeof (struct list_head) *
2533                             *kgnilnd_tunables.kgn_peer_hash_size);
2534         }
2535
2536         down_write(&kgnilnd_data.kgn_net_rw_sem);
2537         if (kgnilnd_data.kgn_nets != NULL) {
2538                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++)
2539                         LASSERT(list_empty(&kgnilnd_data.kgn_nets[i]));
2540
2541                 LIBCFS_FREE(kgnilnd_data.kgn_nets,
2542                             sizeof (struct list_head) *
2543                             *kgnilnd_tunables.kgn_net_hash_size);
2544         }
2545         up_write(&kgnilnd_data.kgn_net_rw_sem);
2546
2547         LASSERTF(atomic_read(&kgnilnd_data.kgn_nconns) == 0,
2548                 "conns left %d\n", atomic_read(&kgnilnd_data.kgn_nconns));
2549
2550         if (kgnilnd_data.kgn_conns != NULL) {
2551                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2552                         LASSERT(list_empty(&kgnilnd_data.kgn_conns[i]));
2553
2554                 LIBCFS_FREE(kgnilnd_data.kgn_conns,
2555                             sizeof (struct list_head) *
2556                             *kgnilnd_tunables.kgn_peer_hash_size);
2557         }
2558
2559         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2560                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2561                 kgnilnd_dev_fini(dev);
2562
2563                 LASSERTF(atomic_read(&dev->gnd_ndgrams) == 0,
2564                         "dgrams left %d\n", atomic_read(&dev->gnd_ndgrams));
2565
2566                 if (dev->gnd_dgrams != NULL) {
2567                         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2568                                 LASSERT(list_empty(&dev->gnd_dgrams[i]));
2569
2570                         LIBCFS_FREE(dev->gnd_dgrams,
2571                                     sizeof (struct list_head) *
2572                                     *kgnilnd_tunables.kgn_peer_hash_size);
2573                 }
2574
2575                 kgnilnd_free_phys_fmablk(dev);
2576         }
2577
2578         if (kgnilnd_data.kgn_mbox_cache != NULL)
2579                 kmem_cache_destroy(kgnilnd_data.kgn_mbox_cache);
2580
2581         if (kgnilnd_data.kgn_rx_cache != NULL)
2582                 kmem_cache_destroy(kgnilnd_data.kgn_rx_cache);
2583
2584         if (kgnilnd_data.kgn_tx_cache != NULL)
2585                 kmem_cache_destroy(kgnilnd_data.kgn_tx_cache);
2586
2587         if (kgnilnd_data.kgn_tx_phys_cache != NULL)
2588                 kmem_cache_destroy(kgnilnd_data.kgn_tx_phys_cache);
2589
2590         if (kgnilnd_data.kgn_dgram_cache != NULL)
2591                 kmem_cache_destroy(kgnilnd_data.kgn_dgram_cache);
2592
2593         if (kgnilnd_data.kgn_cksum_map_pages != NULL) {
2594                 for (i = 0; i < kgnilnd_data.kgn_cksum_npages; i++) {
2595                         if (kgnilnd_data.kgn_cksum_map_pages[i] != NULL) {
2596                                 kfree(kgnilnd_data.kgn_cksum_map_pages[i]);
2597                         }
2598                 }
2599                 kfree(kgnilnd_data.kgn_cksum_map_pages);
2600         }
2601
2602         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2603                atomic_read(&libcfs_kmemory));
2604
2605         kgnilnd_data.kgn_init = GNILND_INIT_NOTHING;
2606         module_put(THIS_MODULE);
2607
2608         EXIT;
2609 }
2610
2611 int
2612 kgnilnd_startup(lnet_ni_t *ni)
2613 {
2614         int               rc, devno;
2615         kgn_net_t        *net;
2616         ENTRY;
2617
2618         LASSERTF(ni->ni_lnd == &the_kgnilnd,
2619                 "bad LND 0x%p != the_kgnilnd @ 0x%p\n",
2620                 ni->ni_lnd, &the_kgnilnd);
2621
2622         if (kgnilnd_data.kgn_init == GNILND_INIT_NOTHING) {
2623                 rc = kgnilnd_base_startup();
2624                 if (rc != 0)
2625                         RETURN(rc);
2626         }
2627
2628         /* Serialize with shutdown. */
2629         mutex_lock(&kgnilnd_data.kgn_quiesce_mutex);
2630
2631         LIBCFS_ALLOC(net, sizeof(*net));
2632         if (net == NULL) {
2633                 CERROR("could not allocate net for new interface instance\n");
2634                 /* no need to cleanup the CDM... */
2635                 GOTO(failed, rc = -ENOMEM);
2636         }
2637         INIT_LIST_HEAD(&net->gnn_list);
2638         ni->ni_data = net;
2639         net->gnn_ni = ni;
2640         ni->ni_maxtxcredits = *kgnilnd_tunables.kgn_credits;
2641         ni->ni_peertxcredits = *kgnilnd_tunables.kgn_peer_credits;
2642
2643         if (*kgnilnd_tunables.kgn_peer_health) {
2644                 int     fudge;
2645                 int     timeout;
2646                 /* give this a bit of leeway - we don't have a hard timeout
2647                  * as we only check timeouts periodically - see comment in kgnilnd_reaper */
2648                 fudge = (GNILND_TO2KA(*kgnilnd_tunables.kgn_timeout) / GNILND_REAPER_NCHECKS);
2649                 timeout = *kgnilnd_tunables.kgn_timeout + fudge;
2650
2651                 if (*kgnilnd_tunables.kgn_peer_timeout >= timeout)
2652                         ni->ni_peertimeout = *kgnilnd_tunables.kgn_peer_timeout;
2653                 else if (*kgnilnd_tunables.kgn_peer_timeout > -1) {
2654                         LCONSOLE_ERROR("Peer_timeout is set to %d but needs to be >= %d\n",
2655                                         *kgnilnd_tunables.kgn_peer_timeout,
2656                                         timeout);
2657                         ni->ni_data = NULL;
2658                         LIBCFS_FREE(net, sizeof(*net));
2659                         GOTO(failed, rc = -EINVAL);
2660                 } else
2661                         ni->ni_peertimeout = timeout;
2662
2663                 LCONSOLE_INFO("Enabling LNet peer health for gnilnd, timeout %ds\n",
2664                               ni->ni_peertimeout);
2665         }
2666
2667         atomic_set(&net->gnn_refcount, 1);
2668
2669         /* if we have multiple devices, spread the nets around */
2670         net->gnn_netnum = LNET_NETNUM(LNET_NIDNET(ni->ni_nid));
2671
2672         devno = LNET_NIDNET(ni->ni_nid) % GNILND_MAXDEVS;
2673         net->gnn_dev = &kgnilnd_data.kgn_devices[devno];
2674
2675         /* allocate a 'dummy' cdm for datagram use. We can only have a single
2676          * datagram between a nid:inst_id and nid2:inst_id. The fake cdm
2677          * give us additional inst_id to use, allowing the datagrams to flow
2678          * like rivers of honey and beer */
2679
2680         /* the instance id for the cdm is the NETNUM offset by MAXDEVS -
2681          * ensuring we'll have a unique id */
2682
2683
2684         ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), net->gnn_dev->gnd_nid);
2685         CDEBUG(D_NET, "adding net %p nid=%s on dev %d \n",
2686                 net, libcfs_nid2str(ni->ni_nid), net->gnn_dev->gnd_id);
2687         /* until the gnn_list is set, we need to cleanup ourselves as
2688          * kgnilnd_shutdown is just gonna get confused */
2689
2690         down_write(&kgnilnd_data.kgn_net_rw_sem);
2691         list_add_tail(&net->gnn_list, kgnilnd_netnum2netlist(net->gnn_netnum));
2692         up_write(&kgnilnd_data.kgn_net_rw_sem);
2693
2694         /* we need a separate thread to call probe_wait_by_id until
2695          * we get a function callback notifier from kgni */
2696         mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2697         RETURN(0);
2698  failed:
2699         mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2700         kgnilnd_shutdown(ni);
2701         RETURN(rc);
2702 }
2703
2704 void
2705 kgnilnd_shutdown(lnet_ni_t *ni)
2706 {
2707         kgn_net_t     *net = ni->ni_data;
2708         int           i;
2709         int           rc;
2710         ENTRY;
2711
2712         CFS_RACE(CFS_FAIL_GNI_SR_DOWN_RACE);
2713
2714         LASSERTF(kgnilnd_data.kgn_init == GNILND_INIT_ALL,
2715                 "init %d\n", kgnilnd_data.kgn_init);
2716
2717         /* Serialize with startup. */
2718         mutex_lock(&kgnilnd_data.kgn_quiesce_mutex);
2719         CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
2720                atomic_read(&libcfs_kmemory));
2721
2722         if (net == NULL) {
2723                 CERROR("got NULL net for ni %p\n", ni);
2724                 GOTO(out, rc = -EINVAL);
2725         }
2726
2727         LASSERTF(ni == net->gnn_ni,
2728                 "ni %p gnn_ni %p\n", net, net->gnn_ni);
2729
2730         ni->ni_data = NULL;
2731
2732         LASSERT(!net->gnn_shutdown);
2733         LASSERTF(atomic_read(&net->gnn_refcount) != 0,
2734                 "net %p refcount %d\n",
2735                  net, atomic_read(&net->gnn_refcount));
2736
2737         if (!list_empty(&net->gnn_list)) {
2738                 /* serialize with peer creation */
2739                 down_write(&kgnilnd_data.kgn_net_rw_sem);
2740                 net->gnn_shutdown = 1;
2741                 up_write(&kgnilnd_data.kgn_net_rw_sem);
2742
2743                 kgnilnd_cancel_net_dgrams(net);
2744
2745                 kgnilnd_del_conn_or_peer(net, LNET_NID_ANY, GNILND_DEL_PEER, -ESHUTDOWN);
2746
2747                 /* if we are quiesced, need to wake up - we need those threads
2748                  * alive to release peers, etc */
2749                 if (GNILND_IS_QUIESCED) {
2750                         set_mb(kgnilnd_data.kgn_quiesce_trigger, GNILND_QUIESCE_IDLE);
2751                         kgnilnd_quiesce_wait("shutdown");
2752                 }
2753
2754                 kgnilnd_wait_for_canceled_dgrams(net->gnn_dev);
2755
2756                 /* We wait until the nets ref's are 1, we will release final ref which is ours
2757                  * this allows us to make sure everything else is done before we free the
2758                  * net.
2759                  */
2760                 i = 4;
2761                 while (atomic_read(&net->gnn_refcount) != 1) {
2762                         i++;
2763                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2764                                 "Waiting for %d references to clear on net %d\n",
2765                                 atomic_read(&net->gnn_refcount),
2766                                 net->gnn_netnum);
2767                         cfs_pause(cfs_time_seconds(1));
2768                 }
2769
2770                 /* release ref from kgnilnd_startup */
2771                 kgnilnd_net_decref(net);
2772                 /* serialize with reaper and conn_task looping */
2773                 down_write(&kgnilnd_data.kgn_net_rw_sem);
2774                 list_del_init(&net->gnn_list);
2775                 up_write(&kgnilnd_data.kgn_net_rw_sem);
2776
2777         }
2778
2779         /* not locking, this can't race with writers */
2780         LASSERTF(atomic_read(&net->gnn_refcount) == 0,
2781                 "net %p refcount %d\n",
2782                  net, atomic_read(&net->gnn_refcount));
2783         LIBCFS_FREE(net, sizeof(*net));
2784
2785 out:
2786         down_read(&kgnilnd_data.kgn_net_rw_sem);
2787         for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
2788                 if (!list_empty(&kgnilnd_data.kgn_nets[i])) {
2789                         up_read(&kgnilnd_data.kgn_net_rw_sem);
2790                         break;
2791                 }
2792
2793                 if (i == *kgnilnd_tunables.kgn_net_hash_size - 1) {
2794                         up_read(&kgnilnd_data.kgn_net_rw_sem);
2795                         kgnilnd_base_shutdown();
2796                 }
2797         }
2798         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2799                atomic_read(&libcfs_kmemory));
2800
2801         mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2802         EXIT;
2803 }
2804
2805 void __exit
2806 kgnilnd_module_fini(void)
2807 {
2808         lnet_unregister_lnd(&the_kgnilnd);
2809         kgnilnd_proc_fini();
2810         kgnilnd_remove_sysctl();
2811         kgnilnd_tunables_fini();
2812 }
2813
2814 int __init
2815 kgnilnd_module_init(void)
2816 {
2817         int    rc;
2818
2819         rc = kgnilnd_tunables_init();
2820         if (rc != 0)
2821                 return rc;
2822
2823         printk(KERN_INFO "Lustre: kgnilnd build version: "KGNILND_BUILD_REV"\n");
2824
2825         kgnilnd_insert_sysctl();
2826         kgnilnd_proc_init();
2827
2828         lnet_register_lnd(&the_kgnilnd);
2829
2830         return 0;
2831 }
2832
2833 MODULE_AUTHOR("Cray, Inc. <nic@cray.com>");
2834 MODULE_DESCRIPTION("Kernel Gemini LND v"KGNILND_BUILD_REV);
2835 MODULE_LICENSE("GPL");
2836
2837 module_init(kgnilnd_module_init);
2838 module_exit(kgnilnd_module_fini);