Whamcloud - gitweb
LU-2675 libcfs: remove LUSTRE_{,SRV_}LNET_PID
[fs/lustre-release.git] / lnet / klnds / gnilnd / gnilnd.c
1 /*
2  * Copyright (C) 2012 Cray, Inc.
3  *
4  *   Author: Nic Henke <nic@cray.com>
5  *   Author: James Shimek <jshimek@cray.com>
6  *
7  *   This file is part of Lustre, http://www.lustre.org.
8  *
9  *   Lustre is free software; you can redistribute it and/or
10  *   modify it under the terms of version 2 of the GNU General Public
11  *   License as published by the Free Software Foundation.
12  *
13  *   Lustre is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *   GNU General Public License for more details.
17  *
18  *   You should have received a copy of the GNU General Public License
19  *   along with Lustre; if not, write to the Free Software
20  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  */
23 #include "gnilnd.h"
24
25 /* Primary entry points from LNET.  There are no guarantees against reentrance. */
26 lnd_t the_kgnilnd = {
27 #ifdef CONFIG_CRAY_XT
28         .lnd_type       = GNILND,
29 #else
30         .lnd_type       = GNIIPLND,
31 #endif
32         .lnd_startup    = kgnilnd_startup,
33         .lnd_shutdown   = kgnilnd_shutdown,
34         .lnd_ctl        = kgnilnd_ctl,
35         .lnd_send       = kgnilnd_send,
36         .lnd_recv       = kgnilnd_recv,
37         .lnd_eager_recv = kgnilnd_eager_recv,
38         .lnd_query      = kgnilnd_query,
39 };
40
41 kgn_data_t      kgnilnd_data;
42
43 /* needs write_lock on kgn_peer_conn_lock */
44 int
45 kgnilnd_close_stale_conns_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
46 {
47         kgn_conn_t         *conn;
48         struct list_head   *ctmp, *cnxt;
49         int                 loopback;
50         int                 count = 0;
51
52         loopback = peer->gnp_nid == peer->gnp_net->gnn_ni->ni_nid;
53
54         list_for_each_safe(ctmp, cnxt, &peer->gnp_conns) {
55                 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
56
57                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
58                         continue;
59
60                 if (conn == newconn)
61                         continue;
62
63                 if (conn->gnc_device != newconn->gnc_device)
64                         continue;
65
66                 /* This is a two connection loopback - one talking to the other */
67                 if (loopback &&
68                     newconn->gnc_my_connstamp == conn->gnc_peer_connstamp &&
69                     newconn->gnc_peer_connstamp == conn->gnc_my_connstamp) {
70                         CDEBUG(D_NET, "skipping prune of %p, "
71                                 "loopback and matching stamps"
72                                 " connstamp "LPU64"("LPU64")"
73                                 " peerstamp "LPU64"("LPU64")\n",
74                                 conn, newconn->gnc_my_connstamp,
75                                 conn->gnc_peer_connstamp,
76                                 newconn->gnc_peer_connstamp,
77                                 conn->gnc_my_connstamp);
78                         continue;
79                 }
80
81                 if (conn->gnc_peerstamp != newconn->gnc_peerstamp) {
82                         LASSERTF(conn->gnc_peerstamp < newconn->gnc_peerstamp,
83                                 "conn 0x%p peerstamp "LPU64" >= "
84                                 "newconn 0x%p peerstamp "LPU64"\n",
85                                 conn, conn->gnc_peerstamp,
86                                 newconn, newconn->gnc_peerstamp);
87
88                         CDEBUG(D_NET, "Closing stale conn nid: %s "
89                                " peerstamp:"LPX64"("LPX64")\n",
90                                libcfs_nid2str(peer->gnp_nid),
91                                conn->gnc_peerstamp, newconn->gnc_peerstamp);
92                 } else {
93
94                         LASSERTF(conn->gnc_peer_connstamp < newconn->gnc_peer_connstamp,
95                                 "conn 0x%p peer_connstamp "LPU64" >= "
96                                 "newconn 0x%p peer_connstamp "LPU64"\n",
97                                 conn, conn->gnc_peer_connstamp,
98                                 newconn, newconn->gnc_peer_connstamp);
99
100                         CDEBUG(D_NET, "Closing stale conn nid: %s"
101                                " connstamp:"LPU64"("LPU64")\n",
102                                libcfs_nid2str(peer->gnp_nid),
103                                conn->gnc_peer_connstamp, newconn->gnc_peer_connstamp);
104                 }
105
106                 count++;
107                 kgnilnd_close_conn_locked(conn, -ESTALE);
108         }
109
110         if (count != 0) {
111                 CWARN("Closed %d stale conns to %s\n", count, libcfs_nid2str(peer->gnp_nid));
112         }
113
114         RETURN(count);
115 }
116
117 int
118 kgnilnd_conn_isdup_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
119 {
120         kgn_conn_t       *conn;
121         struct list_head *tmp;
122         int               loopback;
123         ENTRY;
124
125         loopback = peer->gnp_nid == peer->gnp_net->gnn_ni->ni_nid;
126
127         list_for_each(tmp, &peer->gnp_conns) {
128                 conn = list_entry(tmp, kgn_conn_t, gnc_list);
129                 CDEBUG(D_NET, "checking conn 0x%p for peer %s"
130                         " lo %d new "LPU64" existing "LPU64
131                         " new peer "LPU64" existing peer "LPU64
132                         " new dev %p existing dev %p\n",
133                         conn, libcfs_nid2str(peer->gnp_nid),
134                         loopback,
135                         newconn->gnc_peerstamp, conn->gnc_peerstamp,
136                         newconn->gnc_peer_connstamp, conn->gnc_peer_connstamp,
137                         newconn->gnc_device, conn->gnc_device);
138
139                 /* conn is in the process of closing */
140                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
141                         continue;
142
143                 /* 'newconn' is from an earlier version of 'peer'!!! */
144                 if (newconn->gnc_peerstamp < conn->gnc_peerstamp)
145                         RETURN(1);
146
147                 /* 'conn' is from an earlier version of 'peer': it will be
148                  * removed when we cull stale conns later on... */
149                 if (newconn->gnc_peerstamp > conn->gnc_peerstamp)
150                         continue;
151
152                 /* Different devices are OK */
153                 if (conn->gnc_device != newconn->gnc_device)
154                         continue;
155
156                 /* It's me connecting to myself */
157                 if (loopback &&
158                     newconn->gnc_my_connstamp == conn->gnc_peer_connstamp &&
159                     newconn->gnc_peer_connstamp == conn->gnc_my_connstamp)
160                         continue;
161
162                 /* 'newconn' is an earlier connection from 'peer'!!! */
163                 if (newconn->gnc_peer_connstamp < conn->gnc_peer_connstamp)
164                         RETURN(2);
165
166                 /* 'conn' is an earlier connection from 'peer': it will be
167                  * removed when we cull stale conns later on... */
168                 if (newconn->gnc_peer_connstamp > conn->gnc_peer_connstamp)
169                         continue;
170
171                 /* 'newconn' has the SAME connection stamp; 'peer' isn't
172                  * playing the game... */
173                 RETURN(3);
174         }
175
176         RETURN(0);
177 }
178
179 int
180 kgnilnd_create_conn(kgn_conn_t **connp, kgn_device_t *dev)
181 {
182         kgn_conn_t      *conn;
183         gni_return_t    rrc;
184         int             rc = 0;
185
186         LASSERT (!in_interrupt());
187         atomic_inc(&kgnilnd_data.kgn_nconns);
188
189         /* divide by 2 to allow for complete reset and immediate reconnect */
190         if (atomic_read(&kgnilnd_data.kgn_nconns) >= GNILND_MAX_CQID/2) {
191                 CERROR("Too many conn are live: %d > %d\n",
192                         atomic_read(&kgnilnd_data.kgn_nconns), GNILND_MAX_CQID/2);
193                 atomic_dec(&kgnilnd_data.kgn_nconns);
194                 return -E2BIG;
195         }
196
197         LIBCFS_ALLOC(conn, sizeof(*conn));
198         if (conn == NULL) {
199                 atomic_dec(&kgnilnd_data.kgn_nconns);
200                 return -ENOMEM;
201         }
202
203         LIBCFS_ALLOC(conn->gnc_tx_ref_table, GNILND_MAX_MSG_ID * sizeof(void *));
204         if (conn->gnc_tx_ref_table == NULL) {
205                 CERROR("Can't allocate conn tx_ref_table\n");
206                 GOTO(failed, rc = -ENOMEM);
207         }
208
209         atomic_set(&conn->gnc_refcount, 1);
210         atomic_set(&conn->gnc_reaper_noop, 0);
211         atomic_set(&conn->gnc_sched_noop, 0);
212         atomic_set(&conn->gnc_tx_in_use, 0);
213         INIT_LIST_HEAD(&conn->gnc_list);
214         INIT_LIST_HEAD(&conn->gnc_hashlist);
215         INIT_LIST_HEAD(&conn->gnc_schedlist);
216         INIT_LIST_HEAD(&conn->gnc_fmaq);
217         INIT_LIST_HEAD(&conn->gnc_mdd_list);
218         spin_lock_init(&conn->gnc_list_lock);
219         spin_lock_init(&conn->gnc_tx_lock);
220         conn->gnc_magic = GNILND_CONN_MAGIC;
221
222         /* set tx id to nearly the end to make sure we find wrapping
223          * issues soon */
224         conn->gnc_next_tx = (int) GNILND_MAX_MSG_ID - 10;
225
226         /* if this fails, we have conflicts and MAX_TX is too large */
227         CLASSERT(GNILND_MAX_MSG_ID < GNILND_MSGID_CLOSE);
228
229         /* get a new unique CQ id for this conn */
230         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
231         conn->gnc_my_connstamp = kgnilnd_data.kgn_connstamp++;
232         conn->gnc_cqid = kgnilnd_get_cqid_locked();
233         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
234
235         if (conn->gnc_cqid == 0) {
236                 CERROR("Could not allocate unique CQ ID for conn 0x%p\n", conn);
237                 GOTO(failed, rc = -E2BIG);
238         }
239
240         CDEBUG(D_NET, "alloc cqid %u for conn 0x%p\n",
241                 conn->gnc_cqid, conn);
242
243         /* need to be set before gnc_ephandle to allow kgnilnd_destroy_conn_ep to
244          * check context */
245         conn->gnc_device = dev;
246
247         conn->gnc_timeout = MAX(*kgnilnd_tunables.kgn_timeout,
248                                 GNILND_MIN_TIMEOUT);
249         kgnilnd_update_reaper_timeout(conn->gnc_timeout);
250
251         /* this is the ep_handle for doing SMSG & BTE */
252         mutex_lock(&dev->gnd_cq_mutex);
253         rrc = kgnilnd_ep_create(dev->gnd_handle, dev->gnd_snd_fma_cqh,
254                                 &conn->gnc_ephandle);
255         mutex_unlock(&dev->gnd_cq_mutex);
256         if (rrc != GNI_RC_SUCCESS)
257                 GOTO(failed, rc = -ENETDOWN);
258
259         CDEBUG(D_NET, "created conn 0x%p ep_hndl 0x%p\n",
260                conn, conn->gnc_ephandle);
261
262         /* add ref for EP canceling */
263         kgnilnd_conn_addref(conn);
264         atomic_inc(&dev->gnd_neps);
265
266         *connp = conn;
267         return 0;
268
269 failed:
270         atomic_dec(&kgnilnd_data.kgn_nconns);
271         LIBCFS_FREE(conn->gnc_tx_ref_table, GNILND_MAX_MSG_ID * sizeof(void *));
272         LIBCFS_FREE(conn, sizeof(*conn));
273         return rc;
274 }
275
276 /* needs to be called with kgn_peer_conn_lock held (read or write) */
277 kgn_conn_t *
278 kgnilnd_find_conn_locked(kgn_peer_t *peer)
279 {
280         kgn_conn_t      *conn = NULL;
281
282         /* if we are in reset, this conn is going to die soon */
283         if (unlikely(kgnilnd_data.kgn_in_reset)) {
284                 RETURN(NULL);
285         }
286
287         /* just return the first ESTABLISHED connection */
288         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
289                 /* kgnilnd_finish_connect doesn't put connections on the
290                  * peer list until they are actually established */
291                 LASSERTF(conn->gnc_state >= GNILND_CONN_ESTABLISHED,
292                         "found conn %p state %s on peer %p (%s)\n",
293                         conn, kgnilnd_conn_state2str(conn), peer,
294                         libcfs_nid2str(peer->gnp_nid));
295                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
296                         continue;
297
298                 RETURN(conn);
299         }
300         RETURN(NULL);
301 }
302
303 /* needs write_lock on kgn_peer_conn_lock held */
304 kgn_conn_t *
305 kgnilnd_find_or_create_conn_locked(kgn_peer_t *peer) {
306
307         kgn_device_t    *dev = peer->gnp_net->gnn_dev;
308         kgn_conn_t      *conn;
309
310         conn = kgnilnd_find_conn_locked(peer);
311
312         if (conn != NULL) {
313                 return conn;
314         }
315
316         /* if the peer was previously connecting, check if we should
317          * trigger another connection attempt yet. */
318         if (time_before(jiffies, peer->gnp_reconnect_time)) {
319                 return NULL;
320         }
321
322         /* This check prevents us from creating a new connection to a peer while we are
323          * still in the process of closing an existing connection to the peer.
324          */
325         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
326                 if (conn->gnc_ephandle != NULL) {
327                         CDEBUG(D_NET, "Not connecting non-null ephandle found peer 0x%p->%s\n", peer,
328                                 libcfs_nid2str(peer->gnp_nid));
329                         return NULL;
330                 }
331         }
332
333         if (peer->gnp_connecting != GNILND_PEER_IDLE) {
334                 /* if we are not connecting, fire up a new connection */
335                 /* or if we are anything but IDLE DONT start a new connection */
336                return NULL;
337         }
338
339         CDEBUG(D_NET, "starting connect to %s\n",
340                 libcfs_nid2str(peer->gnp_nid));
341         peer->gnp_connecting = GNILND_PEER_CONNECT;
342         kgnilnd_peer_addref(peer); /* extra ref for connd */
343
344         spin_lock(&dev->gnd_connd_lock);
345         list_add_tail(&peer->gnp_connd_list, &dev->gnd_connd_peers);
346         spin_unlock(&dev->gnd_connd_lock);
347
348         kgnilnd_schedule_dgram(dev);
349         CDEBUG(D_NETTRACE, "scheduling new connect\n");
350
351         return NULL;
352 }
353
354 /* Caller is responsible for deciding if/when to call this */
355 void
356 kgnilnd_destroy_conn_ep(kgn_conn_t *conn)
357 {
358         gni_return_t    rrc;
359         gni_ep_handle_t tmp_ep;
360
361         /* only if we actually initialized it,
362          *  then set NULL to tell kgnilnd_destroy_conn to leave it alone */
363
364         tmp_ep = xchg(&conn->gnc_ephandle, NULL);
365         if (tmp_ep != NULL) {
366                 /* we never re-use the EP, so unbind is not needed */
367                 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
368                 rrc = kgnilnd_ep_destroy(tmp_ep);
369
370                 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
371
372                 /* if this fails, it could hork up kgni smsg retransmit and others
373                  * since we could free the SMSG mbox memory, etc. */
374                 LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d conn 0x%p ep 0x%p\n",
375                          rrc, conn, conn->gnc_ephandle);
376
377                 atomic_dec(&conn->gnc_device->gnd_neps);
378
379                 /* clear out count added in kgnilnd_close_conn_locked
380                  * conn will have a peer once it hits finish_connect, where it
381                  * is the first spot we'll mark it ESTABLISHED as well */
382                 if (conn->gnc_peer) {
383                         kgnilnd_admin_decref(conn->gnc_peer->gnp_dirty_eps);
384                 }
385
386                 /* drop ref for EP */
387                 kgnilnd_conn_decref(conn);
388         }
389 }
390
391 void
392 kgnilnd_destroy_conn(kgn_conn_t *conn)
393 {
394         LASSERTF(!in_interrupt() &&
395                 !conn->gnc_scheduled &&
396                 !conn->gnc_in_purgatory &&
397                 conn->gnc_ephandle == NULL &&
398                 list_empty(&conn->gnc_list) &&
399                 list_empty(&conn->gnc_hashlist) &&
400                 list_empty(&conn->gnc_schedlist) &&
401                 list_empty(&conn->gnc_mdd_list) &&
402                 conn->gnc_magic == GNILND_CONN_MAGIC,
403                 "conn 0x%p->%s IRQ %d sched %d purg %d ep 0x%p Mg %d lists %d/%d/%d/%d\n",
404                 conn, conn->gnc_peer ? libcfs_nid2str(conn->gnc_peer->gnp_nid)
405                                      : "<?>",
406                 !!in_interrupt(), conn->gnc_scheduled,
407                 conn->gnc_in_purgatory,
408                 conn->gnc_ephandle,
409                 conn->gnc_magic,
410                 list_empty(&conn->gnc_list),
411                 list_empty(&conn->gnc_hashlist),
412                 list_empty(&conn->gnc_schedlist),
413                 list_empty(&conn->gnc_mdd_list));
414
415         /* Tripping these is especially bad, as it means we have items on the
416          *  lists that didn't keep their refcount on the connection - or
417          *  somebody evil released their own */
418         LASSERTF(list_empty(&conn->gnc_fmaq) &&
419                  atomic_read(&conn->gnc_nlive_fma) == 0 &&
420                  atomic_read(&conn->gnc_nlive_rdma) == 0,
421                  "conn 0x%p fmaq %d@0x%p nfma %d nrdma %d\n",
422                  conn, kgnilnd_count_list(&conn->gnc_fmaq), &conn->gnc_fmaq,
423                  atomic_read(&conn->gnc_nlive_fma), atomic_read(&conn->gnc_nlive_rdma));
424
425         CDEBUG(D_NET, "destroying conn %p ephandle %p error %d\n",
426                 conn, conn->gnc_ephandle, conn->gnc_error);
427
428         /* We are freeing this memory remove the magic value from the connection */
429         conn->gnc_magic = 0;
430
431         /* if there is an FMA blk left here, we'll tear it down */
432         if (conn->gnc_fma_blk) {
433                 if (conn->gnc_peer) {
434                         kgn_mbox_info_t *mbox;
435                         mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
436                         mbox->mbx_prev_nid = conn->gnc_peer->gnp_nid;
437                 }
438                 kgnilnd_release_mbox(conn, 0);
439         }
440
441         if (conn->gnc_peer != NULL)
442                 kgnilnd_peer_decref(conn->gnc_peer);
443
444         if (conn->gnc_tx_ref_table != NULL) {
445                 LIBCFS_FREE(conn->gnc_tx_ref_table,
446                             GNILND_MAX_MSG_ID * sizeof(void *));
447         }
448
449         LIBCFS_FREE(conn, sizeof(*conn));
450         atomic_dec(&kgnilnd_data.kgn_nconns);
451 }
452
453 /* peer_alive and peer_notify done in the style of the o2iblnd */
454 void
455 kgnilnd_peer_alive(kgn_peer_t *peer)
456 {
457         set_mb(peer->gnp_last_alive, jiffies);
458 }
459
460 void
461 kgnilnd_peer_notify(kgn_peer_t *peer, int error)
462 {
463         int                     tell_lnet = 0;
464         int                     nnets = 0;
465         int                     rc;
466         int                     i, j;
467         kgn_conn_t             *conn;
468         kgn_net_t             **nets;
469         kgn_net_t              *net;
470
471
472         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DONT_NOTIFY))
473                 return;
474
475         /* Tell LNet we are giving ups on this peer - but only
476          * if it isn't already reconnected or trying to reconnect */
477         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
478
479         /* use kgnilnd_find_conn_locked to avoid any conns in the process of being nuked
480          *
481          * don't tell LNet if we are in reset - we assume that everyone will be able to
482          * reconnect just fine
483          */
484         conn = kgnilnd_find_conn_locked(peer);
485
486         CDEBUG(D_NETTRACE, "peer 0x%p->%s ting %d conn 0x%p, rst %d error %d\n",
487                peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting, conn,
488                kgnilnd_data.kgn_in_reset, error);
489
490         if ((peer->gnp_connecting == GNILND_PEER_IDLE) &&
491             (conn == NULL) &&
492             (!kgnilnd_data.kgn_in_reset) &&
493             (!kgnilnd_conn_clean_errno(error))) {
494                 tell_lnet = 1;
495         }
496
497         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
498
499         if (!tell_lnet) {
500                 /* short circuit if we dont need to notify Lnet */
501                 return;
502         }
503
504         rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
505
506         if (rc) {
507             /* dont do this if this fails since LNET is in shutdown or something else
508              */
509
510                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
511                         list_for_each_entry(net , &kgnilnd_data.kgn_nets[i], gnn_list) {
512                                 /* if gnn_shutdown set for any net shutdown is in progress just return */
513                                 if (net->gnn_shutdown) {
514                                         up_read(&kgnilnd_data.kgn_net_rw_sem);
515                                         return;
516                                 }
517                                 nnets++;
518                         }
519                 }
520
521                 if (nnets == 0) {
522                         /* shutdown in progress most likely */
523                         up_read(&kgnilnd_data.kgn_net_rw_sem);
524                         return;
525                 }
526
527                 LIBCFS_ALLOC(nets, nnets * sizeof(*nets));
528
529                 if (nets == NULL) {
530                         up_read(&kgnilnd_data.kgn_net_rw_sem);
531                         CERROR("Failed to allocate nets[%d]\n", nnets);
532                         return;
533                 }
534
535                 j = 0;
536                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
537                         list_for_each_entry(net, &kgnilnd_data.kgn_nets[i], gnn_list) {
538                                 nets[j] = net;
539                                 kgnilnd_net_addref(net);
540                                 j++;
541                         }
542                 }
543                 up_read(&kgnilnd_data.kgn_net_rw_sem);
544
545                 for (i = 0; i < nnets; i++) {
546                         lnet_nid_t peer_nid;
547
548                         net = nets[i];
549
550                         peer_nid = kgnilnd_lnd2lnetnid(net->gnn_ni->ni_nid,
551                                                                  peer->gnp_nid);
552
553                         CDEBUG(D_NET, "peer 0x%p->%s last_alive %lu (%lus ago)\n",
554                                 peer, libcfs_nid2str(peer_nid), peer->gnp_last_alive,
555                                 cfs_duration_sec(jiffies - peer->gnp_last_alive));
556
557                         lnet_notify(net->gnn_ni, peer_nid, 0, peer->gnp_last_alive);
558
559
560                         kgnilnd_net_decref(net);
561                 }
562
563                 LIBCFS_FREE(nets, nnets * sizeof(*nets));
564         }
565 }
566
567 /* need write_lock on kgn_peer_conn_lock */
568 void
569 kgnilnd_close_conn_locked(kgn_conn_t *conn, int error)
570 {
571         kgn_peer_t        *peer = conn->gnc_peer;
572         ENTRY;
573
574         LASSERT(!in_interrupt());
575
576         /* store error for tx completion */
577         conn->gnc_error = error;
578         peer->gnp_last_errno = error;
579
580         /* use real error from peer if possible */
581         if (error == -ECONNRESET) {
582                 error = conn->gnc_peer_error;
583         }
584
585         /* if we NETERROR, make sure it is rate limited */
586         if (!kgnilnd_conn_clean_errno(error) &&
587             peer->gnp_down == GNILND_RCA_NODE_UP) {
588                 CNETERR("closing conn to %s: error %d\n",
589                        libcfs_nid2str(peer->gnp_nid), error);
590         } else {
591                 CDEBUG(D_NET, "closing conn to %s: error %d\n",
592                        libcfs_nid2str(peer->gnp_nid), error);
593         }
594
595         LASSERTF(conn->gnc_state == GNILND_CONN_ESTABLISHED,
596                 "conn %p to %s with bogus state %s\n", conn,
597                 libcfs_nid2str(conn->gnc_peer->gnp_nid),
598                 kgnilnd_conn_state2str(conn));
599         LASSERT(!list_empty(&conn->gnc_hashlist));
600         LASSERT(!list_empty(&conn->gnc_list));
601
602
603         /* mark peer count here so any place the EP gets destroyed will
604          * open up the peer count so that a new ESTABLISHED conn is then free
605          * to send new messages -- sending before the previous EPs are destroyed
606          * could end up with messages on the network for the old conn _after_
607          * the new conn and break the mbox safety protocol */
608         kgnilnd_admin_addref(conn->gnc_peer->gnp_dirty_eps);
609
610         /* Remove from conn hash table: no new callbacks */
611         list_del_init(&conn->gnc_hashlist);
612         kgnilnd_data.kgn_conn_version++;
613         kgnilnd_conn_decref(conn);
614
615         /* if we are in reset, go right to CLOSED as there is no scheduler
616          * thread to move from CLOSING to CLOSED */
617         if (unlikely(kgnilnd_data.kgn_in_reset)) {
618                 conn->gnc_state = GNILND_CONN_CLOSED;
619         } else {
620                 conn->gnc_state = GNILND_CONN_CLOSING;
621         }
622
623         /* leave on peer->gnp_conns to make sure we don't let the reaper
624          * or others try to unlink this peer until the conn is fully
625          * processed for closing */
626
627         if (kgnilnd_check_purgatory_conn(conn)) {
628                 kgnilnd_add_purgatory_locked(conn, conn->gnc_peer);
629         }
630
631         /* Reset RX timeout to ensure we wait for an incoming CLOSE
632          * for the full timeout.  If we get a CLOSE we know the
633          * peer has stopped all RDMA.  Otherwise if we wait for
634          * the full timeout we can also be sure all RDMA has stopped. */
635         conn->gnc_last_rx = conn->gnc_last_rx_cq = jiffies;
636         mb();
637
638         /* schedule sending CLOSE - if we are in quiesce, this adds to
639          * gnd_ready_conns and allows us to find it in quiesce processing */
640         kgnilnd_schedule_conn(conn);
641
642         EXIT;
643 }
644
645 void
646 kgnilnd_close_conn(kgn_conn_t *conn, int error)
647 {
648         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
649         /* need to check the state here - this call is racy and we don't
650          * know the state until after the lock is grabbed */
651         if (conn->gnc_state == GNILND_CONN_ESTABLISHED) {
652                 kgnilnd_close_conn_locked(conn, error);
653         }
654         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
655 }
656
657 void
658 kgnilnd_complete_closed_conn(kgn_conn_t *conn)
659 {
660         LIST_HEAD               (sinners);
661         kgn_tx_t               *tx, *txn;
662         int                     nlive = 0;
663         int                     nrdma = 0;
664         int                     nq_rdma = 0;
665         int                     logmsg;
666         ENTRY;
667
668         /* Dump log  on cksum error - wait until complete phase to let
669          * RX of error happen */
670         if (*kgnilnd_tunables.kgn_checksum_dump &&
671             (conn != NULL && conn->gnc_peer_error == -ENOKEY)) {
672                 libcfs_debug_dumplog();
673         }
674
675         /* _CLOSED set in kgnilnd_process_fmaq once we decide to
676          * send the CLOSE or not */
677         LASSERTF(conn->gnc_state == GNILND_CONN_CLOSED,
678                  "conn 0x%p->%s with bad state %s\n",
679                  conn, conn->gnc_peer ?
680                         libcfs_nid2str(conn->gnc_peer->gnp_nid) :
681                         "<?>",
682                  kgnilnd_conn_state2str(conn));
683
684         LASSERT(list_empty(&conn->gnc_hashlist));
685
686         /* we've sent the close, start nuking */
687         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_SCHEDULE_COMPLETE))
688                 kgnilnd_schedule_conn(conn);
689
690         if (conn->gnc_scheduled != GNILND_CONN_PROCESS) {
691                 CDEBUG(D_NETERROR, "Error someone scheduled us after we were "
692                                 "done, Attempting to recover conn 0x%p "
693                                 "scheduled %d function: %s line: %d\n", conn,
694                                 conn->gnc_scheduled, conn->gnc_sched_caller,
695                                 conn->gnc_sched_line);
696                 RETURN_EXIT;
697         }
698
699         /* we don't use lists to track things that we can get out of the
700          * tx_ref table... */
701
702         /* need to hold locks for tx_list_state, sampling it is too racy:
703          * - the lock actually protects tx != NULL, but we can't take the proper
704          *   lock until we check tx_list_state, which would be too late and
705          *   we could have the TX change under us.
706          * gnd_rdmaq_lock and gnd_lock and not used together, so taking both
707          * should be fine */
708         spin_lock(&conn->gnc_device->gnd_rdmaq_lock);
709         spin_lock(&conn->gnc_device->gnd_lock);
710
711         for (nrdma = 0; nrdma < GNILND_MAX_MSG_ID; nrdma++) {
712                 tx = conn->gnc_tx_ref_table[nrdma];
713
714                 if (tx != NULL) {
715                         /* only print the first error and if not CLOSE, we often don't see
716                          * CQ events for that by the time we get here... and really don't care */
717                         if (nlive || tx->tx_msg.gnm_type == GNILND_MSG_CLOSE)
718                                 tx->tx_state |= GNILND_TX_QUIET_ERROR;
719                         nlive++;
720                         GNIDBG_TX(D_NET, tx, "cleaning up on close, nlive %d", nlive);
721
722                         /* don't worry about gnc_lock here as nobody else should be
723                          * touching this conn */
724                         kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
725                         list_add_tail(&tx->tx_list, &sinners);
726                 }
727         }
728         spin_unlock(&conn->gnc_device->gnd_lock);
729         spin_unlock(&conn->gnc_device->gnd_rdmaq_lock);
730
731         /* nobody should have marked this as needing scheduling after
732          * we called close - so only ref should be us handling it */
733         if (conn->gnc_scheduled != GNILND_CONN_PROCESS) {
734                 CDEBUG(D_NETERROR, "Error someone scheduled us after we were "
735                                 "done, Attempting to recover conn 0x%p "
736                                 "scheduled %d function %s line: %d\n", conn,
737                                 conn->gnc_scheduled, conn->gnc_sched_caller,
738                                 conn->gnc_sched_line);
739         }
740         /* now reset a few to actual counters... */
741         nrdma = atomic_read(&conn->gnc_nlive_rdma);
742         nq_rdma = atomic_read(&conn->gnc_nq_rdma);
743
744         if (!list_empty(&sinners)) {
745                 list_for_each_entry_safe(tx, txn, &sinners, tx_list) {
746                         /* clear tx_list to make tx_add_list_locked happy */
747                         list_del_init(&tx->tx_list);
748                         /* The error codes determine if we hold onto the MDD */
749                         kgnilnd_tx_done(tx, conn->gnc_error);
750                 }
751         }
752
753         logmsg = (nlive + nrdma + nq_rdma);
754
755         if (logmsg) {
756                 if (conn->gnc_peer->gnp_down == GNILND_RCA_NODE_UP) {
757                         CNETERR("Closed conn 0x%p->%s (errno %d, peer errno %d): "
758                                 "canceled %d TX, %d/%d RDMA\n",
759                                 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
760                                 conn->gnc_error, conn->gnc_peer_error,
761                                 nlive, nq_rdma, nrdma);
762                 } else {
763                         CDEBUG(D_NET, "Closed conn 0x%p->%s (errno %d,"
764                                 " peer errno %d): canceled %d TX, %d/%d RDMA\n",
765                                 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
766                                 conn->gnc_error, conn->gnc_peer_error,
767                                 nlive, nq_rdma, nrdma);
768                 }
769         }
770
771         kgnilnd_destroy_conn_ep(conn);
772
773         /* Bug 765042 - race this with completing a new conn to same peer - we need
774          * finish_connect to detach purgatory before we can do it ourselves here */
775         CFS_RACE(CFS_FAIL_GNI_FINISH_PURG);
776
777         /* now it is safe to remove from peer list - anyone looking at
778          * gnp_conns now is free to unlink if not on purgatory */
779         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
780
781         conn->gnc_state = GNILND_CONN_DONE;
782
783         /* Decrement counter if we are marked by del_conn_or_peers for closing
784          */
785         if (conn->gnc_needs_closing)
786                 kgnilnd_admin_decref(kgnilnd_data.kgn_npending_conns);
787
788         /* Remove from peer's list of valid connections if its not in purgatory */
789         if (!conn->gnc_in_purgatory) {
790                 list_del_init(&conn->gnc_list);
791                 /* Lose peers reference on the conn */
792                 kgnilnd_conn_decref(conn);
793         }
794
795         /* NB - only unlinking if we set pending in del_peer_locked from admin or
796          * shutdown */
797         if (kgnilnd_peer_active(conn->gnc_peer) &&
798             conn->gnc_peer->gnp_pending_unlink &&
799             kgnilnd_can_unlink_peer_locked(conn->gnc_peer)) {
800                 kgnilnd_unlink_peer_locked(conn->gnc_peer);
801         }
802
803         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
804
805         /* I'm telling Mommy! - use peer_error if they initiated close */
806         kgnilnd_peer_notify(conn->gnc_peer,
807                             conn->gnc_error == -ECONNRESET ? conn->gnc_peer_error
808                                                            : conn->gnc_error);
809
810         EXIT;
811 }
812
813 int
814 kgnilnd_set_conn_params(kgn_dgram_t *dgram)
815 {
816         kgn_conn_t             *conn = dgram->gndg_conn;
817         kgn_connreq_t          *connreq = &dgram->gndg_conn_in;
818         kgn_gniparams_t        *rem_param = &connreq->gncr_gnparams;
819         gni_return_t            rrc;
820         int                     rc = 0;
821         gni_smsg_attr_t        *remote = &connreq->gncr_gnparams.gnpr_smsg_attr;
822
823         /* set timeout vals in conn early so we can use them for the NAK */
824
825         /* use max of the requested and our timeout, peer will do the same */
826         conn->gnc_timeout = MAX(conn->gnc_timeout, connreq->gncr_timeout);
827
828         /* only ep_bind really mucks around with the CQ */
829         /* only ep bind if we are not connecting to ourself and the dstnid is not a wildcard. this check
830          * is necessary as you can only bind an ep once and we must make sure we dont bind when already bound.
831          */
832         if (connreq->gncr_dstnid != LNET_NID_ANY && dgram->gndg_conn_out.gncr_dstnid != connreq->gncr_srcnid) {
833                 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
834                 rrc = kgnilnd_ep_bind(conn->gnc_ephandle,
835                         connreq->gncr_gnparams.gnpr_host_id,
836                         conn->gnc_cqid);
837                 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
838                 if (rrc != GNI_RC_SUCCESS) {
839                         rc = -ECONNABORTED;
840                         goto return_out;
841                 }
842         }
843
844         rrc = kgnilnd_ep_set_eventdata(conn->gnc_ephandle, conn->gnc_cqid,
845                          connreq->gncr_gnparams.gnpr_cqid);
846         if (rrc != GNI_RC_SUCCESS) {
847                 rc = -ECONNABORTED;
848                 goto cleanup_out;
849         }
850
851         /* Initialize SMSG */
852         rrc = kgnilnd_smsg_init(conn->gnc_ephandle, &conn->gnpr_smsg_attr,
853                         &connreq->gncr_gnparams.gnpr_smsg_attr);
854         if (unlikely(rrc == GNI_RC_INVALID_PARAM)) {
855                 gni_smsg_attr_t *local = &conn->gnpr_smsg_attr;
856                 /* help folks figure out if there is a tunable off, etc. */
857                 LCONSOLE_ERROR("SMSG attribute mismatch. Data from local/remote:"
858                                " type %d/%d msg_maxsize %u/%u"
859                                " mbox_maxcredit %u/%u. Please check kgni"
860                                " logs for further data\n",
861                                local->msg_type, remote->msg_type,
862                                local->msg_maxsize, remote->msg_maxsize,
863                                local->mbox_maxcredit, remote->mbox_maxcredit);
864         }
865         if (rrc != GNI_RC_SUCCESS) {
866                 rc = -ECONNABORTED;
867                 goto cleanup_out;
868         }
869
870         /* log this for help in debuggin SMSG buffer re-use */
871         CDEBUG(D_NET, "conn %p src %s dst %s smsg %p acquired"
872                 " local cqid %u SMSG %p->%u hndl "LPX64"."LPX64
873                 " remote cqid %u SMSG %p->%u hndl "LPX64"."LPX64"\n",
874                 conn, libcfs_nid2str(connreq->gncr_srcnid),
875                 libcfs_nid2str(connreq->gncr_dstnid),
876                 &conn->gnpr_smsg_attr,
877                 conn->gnc_cqid,
878                 conn->gnpr_smsg_attr.msg_buffer,
879                 conn->gnpr_smsg_attr.mbox_offset,
880                 conn->gnpr_smsg_attr.mem_hndl.qword1,
881                 conn->gnpr_smsg_attr.mem_hndl.qword2,
882                 rem_param->gnpr_cqid,
883                 rem_param->gnpr_smsg_attr.msg_buffer,
884                 rem_param->gnpr_smsg_attr.mbox_offset,
885                 rem_param->gnpr_smsg_attr.mem_hndl.qword1,
886                 rem_param->gnpr_smsg_attr.mem_hndl.qword2);
887
888         conn->gnc_peerstamp = connreq->gncr_peerstamp;
889         conn->gnc_peer_connstamp = connreq->gncr_connstamp;
890         conn->remote_mbox_addr = (void *)((char *)remote->msg_buffer + remote->mbox_offset);
891
892         /* We update the reaper timeout once we have a valid conn and timeout */
893         kgnilnd_update_reaper_timeout(GNILND_TO2KA(conn->gnc_timeout));
894
895         return 0;
896
897 cleanup_out:
898         rrc = kgnilnd_ep_unbind(conn->gnc_ephandle);
899         /* not sure I can just let this fly */
900         LASSERTF(rrc == GNI_RC_SUCCESS,
901                 "bad rc from gni_ep_unbind trying to cleanup: %d\n", rrc);
902
903 return_out:
904         LASSERTF(rc != 0, "SOFTWARE BUG: rc == 0\n");
905         CERROR("Error setting connection params from %s: %d\n",
906                libcfs_nid2str(connreq->gncr_srcnid), rc);
907         return rc;
908 }
909
910 /* needs down_read on kgn_net_rw_sem held from before this call until
911  * after the write_lock on kgn_peer_conn_lock - this ensures we stay sane
912  * with kgnilnd_shutdown - it'll get the sem and set shutdown, then get the
913  * kgn_peer_conn_lock to start del_peer'ing. If we hold the sem until after
914  * kgn_peer_conn_lock is held, we guarantee that nobody calls
915  * kgnilnd_add_peer_locked without checking gnn_shutdown */
916 int
917 kgnilnd_create_peer_safe(kgn_peer_t **peerp,
918                          lnet_nid_t nid,
919                          kgn_net_t *net,
920                          int node_state)
921 {
922         kgn_peer_t      *peer;
923         int             rc;
924
925         LASSERT(nid != LNET_NID_ANY);
926
927         /* We dont pass the net around in the dgram anymore so here is where we find it
928          * this will work unless its in shutdown or the nid has a net that is invalid.
929          * Either way error code needs to be returned in that case.
930          *
931          * If the net passed in is not NULL then we can use it, this alleviates looking it
932          * when the calling function has access to the data.
933          */
934         if (net == NULL) {
935                 rc = kgnilnd_find_net(nid, &net);
936                 if (rc < 0)
937                         return rc;
938         } else {
939                 /* find net adds a reference on the net if we are not using
940                  * it we must do it manually so the net references are
941                  * correct when tearing down the net
942                  */
943                 kgnilnd_net_addref(net);
944         }
945
946         LIBCFS_ALLOC(peer, sizeof(*peer));
947         if (peer == NULL) {
948                 kgnilnd_net_decref(net);
949                 return -ENOMEM;
950         }
951         peer->gnp_nid = nid;
952         peer->gnp_down = node_state;
953
954         /* translate from nid to nic addr & store */
955         rc = kgnilnd_nid_to_nicaddrs(LNET_NIDADDR(nid), 1, &peer->gnp_host_id);
956         if (rc <= 0) {
957                 kgnilnd_net_decref(net);
958                 LIBCFS_FREE(peer, sizeof(*peer));
959                 return -ESRCH;
960         }
961         CDEBUG(D_NET, "peer 0x%p->%s -> NIC 0x%x\n", peer,
962                 libcfs_nid2str(nid), peer->gnp_host_id);
963
964         atomic_set(&peer->gnp_refcount, 1);     /* 1 ref for caller */
965         atomic_set(&peer->gnp_dirty_eps, 0);
966
967         INIT_LIST_HEAD(&peer->gnp_list);
968         INIT_LIST_HEAD(&peer->gnp_connd_list);
969         INIT_LIST_HEAD(&peer->gnp_conns);
970         INIT_LIST_HEAD(&peer->gnp_tx_queue);
971
972         /* the first reconnect should happen immediately, so we leave
973          * gnp_reconnect_interval set to 0 */
974
975         LASSERTF(net != NULL, "peer 0x%p->%s with NULL net\n",
976                  peer, libcfs_nid2str(nid));
977
978         /* must have kgn_net_rw_sem held for this...  */
979         if (net->gnn_shutdown) {
980                 /* shutdown has started already */
981                 kgnilnd_net_decref(net);
982                 LIBCFS_FREE(peer, sizeof(*peer));
983                 return -ESHUTDOWN;
984         }
985
986         peer->gnp_net = net;
987
988         atomic_inc(&kgnilnd_data.kgn_npeers);
989
990         *peerp = peer;
991         return 0;
992 }
993
994 void
995 kgnilnd_destroy_peer(kgn_peer_t *peer)
996 {
997         CDEBUG(D_NET, "peer %s %p deleted\n",
998                libcfs_nid2str(peer->gnp_nid), peer);
999         LASSERTF(atomic_read(&peer->gnp_refcount) == 0,
1000                  "peer 0x%p->%s refs %d\n",
1001                  peer, libcfs_nid2str(peer->gnp_nid),
1002                  atomic_read(&peer->gnp_refcount));
1003         LASSERTF(atomic_read(&peer->gnp_dirty_eps) == 0,
1004                  "peer 0x%p->%s dirty eps %d\n",
1005                  peer, libcfs_nid2str(peer->gnp_nid),
1006                  atomic_read(&peer->gnp_dirty_eps));
1007         LASSERTF(peer->gnp_net != NULL, "peer %p (%s) with NULL net\n",
1008                  peer, libcfs_nid2str(peer->gnp_nid));
1009         LASSERTF(!kgnilnd_peer_active(peer),
1010                  "peer 0x%p->%s\n",
1011                 peer, libcfs_nid2str(peer->gnp_nid));
1012         LASSERTF(peer->gnp_connecting == GNILND_PEER_IDLE || peer->gnp_connecting == GNILND_PEER_KILL,
1013                  "peer 0x%p->%s, connecting %d\n",
1014                 peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
1015         LASSERTF(list_empty(&peer->gnp_conns),
1016                  "peer 0x%p->%s\n",
1017                 peer, libcfs_nid2str(peer->gnp_nid));
1018         LASSERTF(list_empty(&peer->gnp_tx_queue),
1019                  "peer 0x%p->%s\n",
1020                 peer, libcfs_nid2str(peer->gnp_nid));
1021         LASSERTF(list_empty(&peer->gnp_connd_list),
1022                  "peer 0x%p->%s\n",
1023                 peer, libcfs_nid2str(peer->gnp_nid));
1024
1025         /* NB a peer's connections keep a reference on their peer until
1026          * they are destroyed, so we can be assured that _all_ state to do
1027          * with this peer has been cleaned up when its refcount drops to
1028          * zero. */
1029
1030         atomic_dec(&kgnilnd_data.kgn_npeers);
1031         kgnilnd_net_decref(peer->gnp_net);
1032
1033         LIBCFS_FREE(peer, sizeof(*peer));
1034 }
1035
1036 /* the conn might not have made it all the way through to a connected
1037  * state - but we need to purgatory any conn that a remote peer might
1038  * have seen through a posted dgram as well */
1039 void
1040 kgnilnd_add_purgatory_locked(kgn_conn_t *conn, kgn_peer_t *peer)
1041 {
1042         kgn_mbox_info_t *mbox = NULL;
1043         ENTRY;
1044
1045         /* NB - the caller should own conn by removing him from the
1046          * scheduler thread when finishing the close */
1047
1048         LASSERTF(peer != NULL, "conn %p with NULL peer\n", conn);
1049
1050         /* If this is still true, need to add the calls to unlink back in and
1051          * figure out how to close the hole on loopback conns */
1052         LASSERTF(kgnilnd_peer_active(peer), "can't use inactive peer %s (%p)"
1053                 " we'll never recover the resources\n",
1054                 libcfs_nid2str(peer->gnp_nid), peer);
1055
1056         CDEBUG(D_NET, "conn %p peer %p dev %p\n", conn, peer,
1057                 conn->gnc_device);
1058
1059         LASSERTF(conn->gnc_in_purgatory == 0,
1060                 "Conn already in purgatory\n");
1061         conn->gnc_in_purgatory = 1;
1062
1063         mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
1064         mbox->mbx_prev_purg_nid = peer->gnp_nid;
1065         mbox->mbx_add_purgatory = jiffies;
1066         kgnilnd_release_mbox(conn, 1);
1067
1068         LASSERTF(list_empty(&conn->gnc_mdd_list),
1069                 "conn 0x%p->%s with active purgatory hold MDD %d\n",
1070                 conn, libcfs_nid2str(peer->gnp_nid),
1071                 kgnilnd_count_list(&conn->gnc_mdd_list));
1072
1073         EXIT;
1074 }
1075
1076 /* Instead of detaching everything from purgatory here we just mark the conn as needing
1077  * detach, when the reaper checks the conn the next time it will detach it.
1078  * Calling function requires write_lock held on kgn_peer_conn_lock
1079  */
1080 void
1081 kgnilnd_mark_for_detach_purgatory_all_locked(kgn_peer_t *peer) {
1082         kgn_conn_t       *conn;
1083
1084         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
1085                 if (conn->gnc_in_purgatory && !conn->gnc_needs_detach) {
1086                         conn->gnc_needs_detach = 1;
1087                         kgnilnd_admin_addref(kgnilnd_data.kgn_npending_detach);
1088                 }
1089         }
1090 }
1091
1092 /* Calling function needs a write_lock held on kgn_peer_conn_lock */
1093 void
1094 kgnilnd_detach_purgatory_locked(kgn_conn_t *conn, struct list_head *conn_list)
1095 {
1096         kgn_mbox_info_t *mbox = NULL;
1097
1098         /* if needed, add the conn purgatory data to the list passed in */
1099         if (conn->gnc_in_purgatory) {
1100                 CDEBUG(D_NET, "peer %p->%s purg_conn %p@%s mdd_list #tx %d\n",
1101                         conn->gnc_peer, libcfs_nid2str(conn->gnc_peer->gnp_nid),
1102                         conn, kgnilnd_conn_state2str(conn),
1103                         kgnilnd_count_list(&conn->gnc_mdd_list));
1104
1105                 mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
1106                 mbox->mbx_detach_of_purgatory = jiffies;
1107
1108                 /* conn->gnc_list is the entry point on peer->gnp_conns, so detaching it
1109                  * here removes it from the list of 'valid' peer connections.
1110                  * We put the current conn onto a list of conns to call kgnilnd_release_purgatory_locked()
1111                  * and as such the caller of kgnilnd_detach_purgatory_locked() now owns that conn, since its not
1112                  * on the peer's conn_list anymore.
1113                  */
1114
1115                 list_del_init(&conn->gnc_list);
1116
1117                 /* NB - only unlinking if we set pending in del_peer_locked from admin or
1118                  * shutdown */
1119                 if (kgnilnd_peer_active(conn->gnc_peer) &&
1120                     conn->gnc_peer->gnp_pending_unlink &&
1121                     kgnilnd_can_unlink_peer_locked(conn->gnc_peer)) {
1122                         kgnilnd_unlink_peer_locked(conn->gnc_peer);
1123                 }
1124                 /* The reaper will not call detach unless the conn is fully through kgnilnd_complete_closed_conn.
1125                  * If the conn is not in a DONE state somehow we are attempting to detach even though
1126                  * the conn has not been fully cleaned up. If we detach while the conn is still closing
1127                  * we will end up with an orphaned connection that has valid ep_handle, that is not on a
1128                  * peer.
1129                  */
1130
1131                 LASSERTF(conn->gnc_state == GNILND_CONN_DONE, "Conn in invalid state  %p@%s \n",
1132                                 conn, kgnilnd_conn_state2str(conn));
1133
1134                 /* move from peer to the delayed release list */
1135                 list_add_tail(&conn->gnc_list, conn_list);
1136         }
1137 }
1138
1139 void
1140 kgnilnd_release_purgatory_list(struct list_head *conn_list)
1141 {
1142         kgn_device_t            *dev;
1143         kgn_conn_t              *conn, *connN;
1144         kgn_mdd_purgatory_t     *gmp, *gmpN;
1145
1146         list_for_each_entry_safe(conn, connN, conn_list, gnc_list) {
1147                 dev = conn->gnc_device;
1148
1149                 kgnilnd_release_mbox(conn, -1);
1150                 conn->gnc_in_purgatory = 0;
1151
1152                 list_del_init(&conn->gnc_list);
1153
1154                 /* gnc_needs_detach is set in kgnilnd_del_conn_or_peer. It is used to keep track
1155                  * of conns that have been marked for detach by kgnilnd_del_conn_or_peer.
1156                  * The function uses kgn_npending_detach to verify the conn has
1157                  * actually been detached.
1158                  */
1159
1160                 if (conn->gnc_needs_detach)
1161                         kgnilnd_admin_decref(kgnilnd_data.kgn_npending_detach);
1162
1163                 /* if this guy is really dead (we are doing release from reaper),
1164                  * make sure we tell LNet - if this is from other context,
1165                  * the checks in the function will prevent an errant
1166                  * notification */
1167                 kgnilnd_peer_notify(conn->gnc_peer, conn->gnc_error);
1168
1169                 list_for_each_entry_safe(gmp, gmpN, &conn->gnc_mdd_list,
1170                                          gmp_list) {
1171                         CDEBUG(D_NET,
1172                                "dev %p releasing held mdd "LPX64"."LPX64"\n",
1173                                conn->gnc_device, gmp->gmp_map_key.qword1,
1174                                gmp->gmp_map_key.qword2);
1175
1176                         atomic_dec(&dev->gnd_n_mdd_held);
1177                         kgnilnd_mem_mdd_release(conn->gnc_device->gnd_handle,
1178                                                 &gmp->gmp_map_key);
1179                         /* ignoring the return code - if kgni/ghal can't find it
1180                          * it must be released already */
1181
1182                         list_del_init(&gmp->gmp_list);
1183                         LIBCFS_FREE(gmp, sizeof(*gmp));
1184                 }
1185                 /* lose conn ref for purgatory */
1186                 kgnilnd_conn_decref(conn);
1187         }
1188 }
1189
1190 /* needs write_lock on kgnilnd_data.kgn_peer_conn_lock held */
1191 void
1192 kgnilnd_peer_increase_reconnect_locked(kgn_peer_t *peer)
1193 {
1194         int current_to;
1195
1196         current_to = peer->gnp_reconnect_interval;
1197
1198         /* we'll try to reconnect fast the first time, then back-off */
1199         if (current_to == 0) {
1200                 peer->gnp_reconnect_time = jiffies - 1;
1201                 current_to = *kgnilnd_tunables.kgn_min_reconnect_interval;
1202         } else {
1203                 peer->gnp_reconnect_time = jiffies + cfs_time_seconds(current_to);
1204                 /* add 50% of min timeout & retry */
1205                 current_to += *kgnilnd_tunables.kgn_min_reconnect_interval / 2;
1206         }
1207
1208         current_to = MIN(current_to,
1209                                 *kgnilnd_tunables.kgn_max_reconnect_interval);
1210
1211         peer->gnp_reconnect_interval = current_to;
1212         CDEBUG(D_NET, "peer %s can reconnect at %lu interval %lu\n",
1213                libcfs_nid2str(peer->gnp_nid), peer->gnp_reconnect_time,
1214                peer->gnp_reconnect_interval);
1215 }
1216
1217 /* needs kgnilnd_data.kgn_peer_conn_lock held */
1218 kgn_peer_t *
1219 kgnilnd_find_peer_locked(lnet_nid_t nid)
1220 {
1221         struct list_head *peer_list = kgnilnd_nid2peerlist(nid);
1222         kgn_peer_t       *peer;
1223
1224         /* Chopping nid down to only NIDADDR using LNET_NIDADDR so we only
1225          * have a single peer per device instead of a peer per nid/net combo.
1226          */
1227
1228         list_for_each_entry(peer, peer_list, gnp_list) {
1229                 if (LNET_NIDADDR(nid) != LNET_NIDADDR(peer->gnp_nid))
1230                         continue;
1231
1232                 CDEBUG(D_NET, "got peer [%p] -> %s c %d (%d)\n",
1233                        peer, libcfs_nid2str(nid),
1234                        peer->gnp_connecting,
1235                        atomic_read(&peer->gnp_refcount));
1236                 return peer;
1237         }
1238         return NULL;
1239 }
1240
1241 /* need write_lock on kgn_peer_conn_lock */
1242 void
1243 kgnilnd_unlink_peer_locked(kgn_peer_t *peer)
1244 {
1245         LASSERTF(list_empty(&peer->gnp_conns),
1246                 "peer 0x%p->%s\n",
1247                  peer, libcfs_nid2str(peer->gnp_nid));
1248         LASSERTF(list_empty(&peer->gnp_tx_queue),
1249                 "peer 0x%p->%s\n",
1250                  peer, libcfs_nid2str(peer->gnp_nid));
1251         LASSERTF(kgnilnd_peer_active(peer),
1252                 "peer 0x%p->%s\n",
1253                  peer, libcfs_nid2str(peer->gnp_nid));
1254         CDEBUG(D_NET, "unlinking peer 0x%p->%s\n",
1255                 peer, libcfs_nid2str(peer->gnp_nid));
1256
1257         list_del_init(&peer->gnp_list);
1258         kgnilnd_data.kgn_peer_version++;
1259         kgnilnd_admin_decref(kgnilnd_data.kgn_npending_unlink);
1260         /* lose peerlist's ref */
1261         kgnilnd_peer_decref(peer);
1262 }
1263
1264 int
1265 kgnilnd_get_peer_info(int index,
1266                       kgn_peer_t **found_peer,
1267                       lnet_nid_t *id, __u32 *nic_addr,
1268                       int *refcount, int *connecting)
1269 {
1270         struct list_head  *ptmp;
1271         kgn_peer_t        *peer;
1272         int               i;
1273         int               rc = -ENOENT;
1274
1275         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1276
1277         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
1278
1279                 list_for_each(ptmp, &kgnilnd_data.kgn_peers[i]) {
1280                         peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1281
1282                         if (index-- > 0)
1283                                 continue;
1284
1285                         CDEBUG(D_NET, "found peer %p (%s) at index %d\n",
1286                                peer, libcfs_nid2str(peer->gnp_nid), index);
1287
1288                         *found_peer  = peer;
1289                         *id          = peer->gnp_nid;
1290                         *nic_addr    = peer->gnp_host_id;
1291                         *refcount    = atomic_read(&peer->gnp_refcount);
1292                         *connecting  = peer->gnp_connecting;
1293
1294                         rc = 0;
1295                         goto out;
1296                 }
1297         }
1298 out:
1299         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1300         if (rc)
1301                 CDEBUG(D_NET, "no gni peer at index %d\n", index);
1302         return rc;
1303 }
1304
1305 /* requires write_lock on kgn_peer_conn_lock held */
1306 void
1307 kgnilnd_add_peer_locked(lnet_nid_t nid, kgn_peer_t *new_stub_peer, kgn_peer_t **peerp)
1308 {
1309         kgn_peer_t        *peer, *peer2;
1310
1311         LASSERTF(new_stub_peer != NULL, "bad stub peer for nid %s\n",
1312                  libcfs_nid2str(nid));
1313
1314         peer2 = kgnilnd_find_peer_locked(nid);
1315         if (peer2 != NULL) {
1316                 /* A peer was created during the lock transition, so drop
1317                  * the new one we created */
1318                 kgnilnd_peer_decref(new_stub_peer);
1319                 peer = peer2;
1320         } else {
1321                 peer = new_stub_peer;
1322                 /* peer table takes existing ref on peer */
1323
1324                 LASSERTF(!kgnilnd_peer_active(peer),
1325                         "peer 0x%p->%s already in peer table\n",
1326                         peer, libcfs_nid2str(peer->gnp_nid));
1327                 list_add_tail(&peer->gnp_list,
1328                               kgnilnd_nid2peerlist(nid));
1329                 kgnilnd_data.kgn_peer_version++;
1330         }
1331
1332         LASSERTF(peer->gnp_net != NULL, "peer 0x%p->%s with NULL net\n",
1333                  peer, libcfs_nid2str(peer->gnp_nid));
1334         *peerp = peer;
1335 }
1336
1337 int
1338 kgnilnd_add_peer(kgn_net_t *net, lnet_nid_t nid, kgn_peer_t **peerp)
1339 {
1340         kgn_peer_t        *peer;
1341         int                rc;
1342         int                node_state;
1343         ENTRY;
1344
1345         if (nid == LNET_NID_ANY)
1346                 return -EINVAL;
1347
1348         node_state = kgnilnd_get_node_state(LNET_NIDADDR(nid));
1349
1350         /* NB - this will not block during normal operations -
1351          * the only writer of this is in the startup/shutdown path. */
1352         rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
1353         if (!rc) {
1354                 rc = -ESHUTDOWN;
1355                 RETURN(rc);
1356         }
1357         rc = kgnilnd_create_peer_safe(&peer, nid, net, node_state);
1358         if (rc != 0) {
1359                 up_read(&kgnilnd_data.kgn_net_rw_sem);
1360                 RETURN(rc);
1361         }
1362
1363         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1364         up_read(&kgnilnd_data.kgn_net_rw_sem);
1365
1366         kgnilnd_add_peer_locked(nid, peer, peerp);
1367
1368         CDEBUG(D_NET, "peer 0x%p->%s connecting %d\n",
1369                peerp, libcfs_nid2str((*peerp)->gnp_nid),
1370                (*peerp)->gnp_connecting);
1371
1372         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1373         RETURN(0);
1374 }
1375
1376 /* needs write_lock on kgn_peer_conn_lock */
1377 void
1378 kgnilnd_cancel_peer_connect_locked(kgn_peer_t *peer, struct list_head *zombies)
1379 {
1380         kgn_tx_t        *tx, *txn;
1381
1382         /* we do care about state of gnp_connecting - we could be between
1383          * reconnect attempts, so try to find the dgram and cancel the TX
1384          * anyways. If we are in the process of posting DONT do anything;
1385          * once it fails or succeeds we can nuke the connect attempt.
1386          * We have no idea where in kgnilnd_post_dgram we are so we cant
1387          * attempt to cancel until the function is done.
1388          */
1389
1390         /* make sure peer isn't in process of connecting or waiting for connect*/
1391         spin_lock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1392         if (!(list_empty(&peer->gnp_connd_list))) {
1393                 list_del_init(&peer->gnp_connd_list);
1394                 /* remove connd ref */
1395                 kgnilnd_peer_decref(peer);
1396         }
1397         spin_unlock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1398
1399         if (peer->gnp_connecting == GNILND_PEER_POSTING || peer->gnp_connecting == GNILND_PEER_NEEDS_DEATH) {
1400                 peer->gnp_connecting = GNILND_PEER_NEEDS_DEATH;
1401                 /* We are in process of posting right now the xchg set it up for us to
1402                  * cancel the connect so we are finished for now */
1403         } else {
1404                 /* no need for exchange we have the peer lock and its ready for us to nuke */
1405                 LASSERTF(peer->gnp_connecting != GNILND_PEER_POSTING,
1406                         "Peer in invalid state 0x%p->%s, connecting %d\n",
1407                         peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
1408                 peer->gnp_connecting = GNILND_PEER_IDLE;
1409                 set_mb(peer->gnp_last_dgram_errno, -ETIMEDOUT);
1410                 kgnilnd_find_and_cancel_dgram(peer->gnp_net->gnn_dev,
1411                                                       peer->gnp_nid);
1412         }
1413
1414         /* The least we can do is nuke the tx's no matter what.... */
1415         list_for_each_entry_safe(tx, txn, &peer->gnp_tx_queue, tx_list) {
1416                 kgnilnd_tx_del_state_locked(tx, peer, NULL,
1417                                            GNILND_TX_ALLOCD);
1418                 list_add_tail(&tx->tx_list, zombies);
1419         }
1420 }
1421
1422 /* needs write_lock on kgn_peer_conn_lock */
1423 void
1424 kgnilnd_del_peer_locked(kgn_peer_t *peer, int error)
1425 {
1426         /* this peer could be passive and only held for purgatory,
1427          * take a ref to ensure it doesn't disappear in this function */
1428         kgnilnd_peer_addref(peer);
1429
1430         CFS_RACE(CFS_FAIL_GNI_FIND_TARGET);
1431
1432         /* if purgatory release cleared it out, don't try again */
1433         if (kgnilnd_peer_active(peer)) {
1434                 /* always do this to allow kgnilnd_start_connect and
1435                  * kgnilnd_finish_connect to catch this before they
1436                  * wrap up their operations */
1437                 if (kgnilnd_can_unlink_peer_locked(peer)) {
1438                         /* already released purgatory, so only active
1439                          * conns hold it */
1440                         kgnilnd_unlink_peer_locked(peer);
1441                 } else {
1442                         kgnilnd_close_peer_conns_locked(peer, error);
1443                         /* peer unlinks itself when last conn is closed */
1444                 }
1445         }
1446
1447         /* we are done, release back to the wild */
1448         kgnilnd_peer_decref(peer);
1449 }
1450
1451 int
1452 kgnilnd_del_conn_or_peer(kgn_net_t *net, lnet_nid_t nid, int command,
1453                           int error)
1454 {
1455         LIST_HEAD               (souls);
1456         LIST_HEAD               (zombies);
1457         struct list_head        *ptmp, *pnxt;
1458         kgn_peer_t              *peer;
1459         int                     lo;
1460         int                     hi;
1461         int                     i;
1462         int                     rc = -ENOENT;
1463
1464         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1465
1466         if (nid != LNET_NID_ANY)
1467                 lo = hi = kgnilnd_nid2peerlist(nid) - kgnilnd_data.kgn_peers;
1468         else {
1469                 lo = 0;
1470                 hi = *kgnilnd_tunables.kgn_peer_hash_size - 1;
1471                 /* wildcards always succeed */
1472                 rc = 0;
1473         }
1474
1475         for (i = lo; i <= hi; i++) {
1476                 list_for_each_safe(ptmp, pnxt, &kgnilnd_data.kgn_peers[i]) {
1477                         peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1478
1479                         LASSERTF(peer->gnp_net != NULL,
1480                                 "peer %p (%s) with NULL net\n",
1481                                  peer, libcfs_nid2str(peer->gnp_nid));
1482
1483                         if (net != NULL && peer->gnp_net != net)
1484                                 continue;
1485
1486                         if (!(nid == LNET_NID_ANY || LNET_NIDADDR(peer->gnp_nid) == LNET_NIDADDR(nid)))
1487                                 continue;
1488
1489                         /* In both cases, we want to stop any in-flight
1490                          * connect attempts */
1491                         kgnilnd_cancel_peer_connect_locked(peer, &zombies);
1492
1493                         switch (command) {
1494                         case GNILND_DEL_CONN:
1495                                 kgnilnd_close_peer_conns_locked(peer, error);
1496                                 break;
1497                         case GNILND_DEL_PEER:
1498                                 peer->gnp_pending_unlink = 1;
1499                                 kgnilnd_admin_addref(kgnilnd_data.kgn_npending_unlink);
1500                                 kgnilnd_mark_for_detach_purgatory_all_locked(peer);
1501                                 kgnilnd_del_peer_locked(peer, error);
1502                                 break;
1503                         case GNILND_CLEAR_PURGATORY:
1504                                 /* Mark everything ready for detach reaper will cleanup
1505                                  * once we release the kgn_peer_conn_lock
1506                                  */
1507                                 kgnilnd_mark_for_detach_purgatory_all_locked(peer);
1508                                 peer->gnp_last_errno = -EISCONN;
1509                                 /* clear reconnect so he can reconnect soon */
1510                                 peer->gnp_reconnect_time = 0;
1511                                 peer->gnp_reconnect_interval = 0;
1512                                 break;
1513                         default:
1514                                 CERROR("bad command %d\n", command);
1515                                 LBUG();
1516                         }
1517                         /* we matched something */
1518                         rc = 0;
1519                 }
1520         }
1521
1522         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1523
1524         /* nuke peer TX */
1525         kgnilnd_txlist_done(&zombies, error);
1526
1527         /* This function does not return until the commands it initiated have completed,
1528          * since they have to work there way through the other threads. In the case of shutdown
1529          * threads are not woken up until after this call is initiated so we cannot wait, we just
1530          * need to return. The same applies for stack reset we shouldnt wait as the reset thread
1531          * handles closing.
1532          */
1533
1534         CFS_RACE(CFS_FAIL_GNI_RACE_RESET);
1535
1536         if (error == -ENOTRECOVERABLE || error == -ESHUTDOWN) {
1537                 return rc;
1538         }
1539
1540         i = 4;
1541         while (atomic_read(&kgnilnd_data.kgn_npending_conns)   ||
1542                atomic_read(&kgnilnd_data.kgn_npending_detach)  ||
1543                atomic_read(&kgnilnd_data.kgn_npending_unlink)) {
1544
1545                 cfs_pause(cfs_time_seconds(1));
1546                 i++;
1547
1548                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, "Waiting on %d peers %d closes %d detaches\n",
1549                                 atomic_read(&kgnilnd_data.kgn_npending_unlink),
1550                                 atomic_read(&kgnilnd_data.kgn_npending_conns),
1551                                 atomic_read(&kgnilnd_data.kgn_npending_detach));
1552         }
1553
1554         return rc;
1555 }
1556
1557 kgn_conn_t *
1558 kgnilnd_get_conn_by_idx(int index)
1559 {
1560         kgn_peer_t        *peer;
1561         struct list_head  *ptmp;
1562         kgn_conn_t        *conn;
1563         struct list_head  *ctmp;
1564         int                i;
1565
1566
1567         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
1568                 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1569                 list_for_each(ptmp, &kgnilnd_data.kgn_peers[i]) {
1570
1571                         peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1572
1573                         list_for_each(ctmp, &peer->gnp_conns) {
1574                                 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
1575
1576                                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
1577                                         continue;
1578
1579                                 if (index-- > 0)
1580                                         continue;
1581
1582                                 CDEBUG(D_NET, "++conn[%p] -> %s (%d)\n", conn,
1583                                        libcfs_nid2str(conn->gnc_peer->gnp_nid),
1584                                        atomic_read(&conn->gnc_refcount));
1585                                 kgnilnd_conn_addref(conn);
1586                                 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1587                                 return conn;
1588                         }
1589                 }
1590                 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1591         }
1592
1593         return NULL;
1594 }
1595
1596 int
1597 kgnilnd_get_conn_info(kgn_peer_t *peer,
1598                       int *device_id, __u64 *peerstamp,
1599                       int *tx_seq, int *rx_seq,
1600                       int *fmaq_len, int *nfma, int *nrdma)
1601 {
1602         kgn_conn_t        *conn;
1603         int               rc = 0;
1604
1605         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1606
1607         conn = kgnilnd_find_conn_locked(peer);
1608         if (conn == NULL) {
1609                 rc = -ENOENT;
1610                 goto out;
1611         }
1612
1613         *device_id = conn->gnc_device->gnd_host_id;
1614         *peerstamp = conn->gnc_peerstamp;
1615         *tx_seq = conn->gnc_tx_seq;
1616         *rx_seq = conn->gnc_rx_seq;
1617         *fmaq_len = kgnilnd_count_list(&conn->gnc_fmaq);
1618         *nfma = atomic_read(&conn->gnc_nlive_fma);
1619         *nrdma = atomic_read(&conn->gnc_nlive_rdma);
1620 out:
1621         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1622         return rc;
1623 }
1624
1625 /* needs write_lock on kgn_peer_conn_lock */
1626 int
1627 kgnilnd_close_peer_conns_locked(kgn_peer_t *peer, int why)
1628 {
1629         kgn_conn_t         *conn;
1630         struct list_head   *ctmp, *cnxt;
1631         int                 count = 0;
1632
1633         list_for_each_safe(ctmp, cnxt, &peer->gnp_conns) {
1634                 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
1635
1636                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
1637                         continue;
1638
1639                 count++;
1640                 /* we mark gnc_needs closing and increment kgn_npending_conns so that
1641                  * kgnilnd_del_conn_or_peer can wait on the other threads closing
1642                  * and cleaning up the connection.
1643                  */
1644                 if (!conn->gnc_needs_closing) {
1645                         conn->gnc_needs_closing = 1;
1646                         kgnilnd_admin_addref(kgnilnd_data.kgn_npending_conns);
1647                 }
1648                 kgnilnd_close_conn_locked(conn, why);
1649         }
1650         return count;
1651 }
1652
1653 int
1654 kgnilnd_report_node_state(lnet_nid_t nid, int down)
1655 {
1656         int         rc;
1657         kgn_peer_t  *peer, *new_peer;
1658         LIST_HEAD(zombies);
1659
1660         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1661         peer = kgnilnd_find_peer_locked(nid);
1662
1663         if (peer == NULL) {
1664                 int       i;
1665                 int       found_net = 0;
1666                 kgn_net_t *net;
1667
1668                 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1669
1670                 /* Don't add a peer for node up events */
1671                 if (down == GNILND_RCA_NODE_UP) {
1672                         return 0;
1673                 }
1674
1675                 /* find any valid net - we don't care which one... */
1676                 down_read(&kgnilnd_data.kgn_net_rw_sem);
1677                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
1678                         list_for_each_entry(net, &kgnilnd_data.kgn_nets[i],
1679                                             gnn_list) {
1680                                 found_net = 1;
1681                                 break;
1682                         }
1683
1684                         if (found_net) {
1685                                 break;
1686                         }
1687                 }
1688                 up_read(&kgnilnd_data.kgn_net_rw_sem);
1689
1690                 if (!found_net) {
1691                         CNETERR("Could not find a net for nid %lld\n", nid);
1692                         return 1;
1693                 }
1694
1695                 /* The nid passed in does not yet contain the net portion.
1696                  * Let's build it up now
1697                  */
1698                 nid = LNET_MKNID(LNET_NIDNET(net->gnn_ni->ni_nid), nid);
1699                 rc = kgnilnd_add_peer(net, nid, &new_peer);
1700
1701                 if (rc) {
1702                         CNETERR("Could not add peer for nid %lld, rc %d\n",
1703                                 nid, rc);
1704                         return 1;
1705                 }
1706
1707                 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1708                 peer = kgnilnd_find_peer_locked(nid);
1709
1710                 if (peer == NULL) {
1711                         CNETERR("Could not find peer for nid %lld\n", nid);
1712                         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1713                         return 1;
1714                 }
1715         }
1716
1717         peer->gnp_down = down;
1718
1719         if (down == GNILND_RCA_NODE_DOWN) {
1720                 kgn_conn_t *conn;
1721
1722                 peer->gnp_down_event_time = jiffies;
1723                 kgnilnd_cancel_peer_connect_locked(peer, &zombies);
1724                 conn = kgnilnd_find_conn_locked(peer);
1725
1726                 if (conn != NULL) {
1727                         kgnilnd_close_conn_locked(conn, -ENETRESET);
1728                 }
1729         } else {
1730                 peer->gnp_up_event_time = jiffies;
1731         }
1732
1733         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1734
1735         if (down == GNILND_RCA_NODE_DOWN) {
1736                 /* using ENETRESET so we don't get messages from
1737                  * kgnilnd_tx_done
1738                  */
1739                 kgnilnd_txlist_done(&zombies, -ENETRESET);
1740
1741                 if (*kgnilnd_tunables.kgn_peer_health) {
1742                         kgnilnd_peer_notify(peer, -ECONNRESET);
1743                 }
1744         }
1745
1746         CDEBUG(D_INFO, "marking nid %lld %s\n", nid, down ? "down" : "up");
1747         return 0;
1748 }
1749
1750 int
1751 kgnilnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
1752 {
1753         struct libcfs_ioctl_data *data = arg;
1754         kgn_net_t                *net = ni->ni_data;
1755         int                       rc = -EINVAL;
1756
1757         LASSERT(ni == net->gnn_ni);
1758
1759         switch (cmd) {
1760         case IOC_LIBCFS_GET_PEER: {
1761                 lnet_nid_t   nid = 0;
1762                 kgn_peer_t  *peer = NULL;
1763                 __u32 nic_addr = 0;
1764                 __u64 peerstamp = 0;
1765                 int peer_refcount = 0, peer_connecting = 0;
1766                 int device_id = 0;
1767                 int tx_seq = 0, rx_seq = 0;
1768                 int fmaq_len = 0, nfma = 0, nrdma = 0;
1769
1770                 rc = kgnilnd_get_peer_info(data->ioc_count, &peer,
1771                                            &nid, &nic_addr, &peer_refcount,
1772                                            &peer_connecting);
1773                 if (rc)
1774                         break;
1775
1776                 /* Barf */
1777                 /* LNET_MKNID is used to mask from lnet the multiplexing/demultiplexing of connections and peers
1778                  * LNET assumes a conn and peer per net, the LNET_MKNID/LNET_NIDADDR allows us to let Lnet see what it
1779                  * wants to see instead of the underlying network that is being used to send the data
1780                  */
1781                 data->ioc_nid    = LNET_MKNID(LNET_NIDNET(ni->ni_nid), LNET_NIDADDR(nid));
1782                 data->ioc_flags  = peer_connecting;
1783                 data->ioc_count  = peer_refcount;
1784
1785                 rc = kgnilnd_get_conn_info(peer, &device_id, &peerstamp,
1786                                            &tx_seq, &rx_seq, &fmaq_len,
1787                                            &nfma, &nrdma);
1788
1789                 /* This is allowable - a persistent peer could not
1790                  * have a connection */
1791                 if (rc) {
1792                         /* flag to indicate we are not connected -
1793                          * need to print as such */
1794                         data->ioc_flags |= (1<<16);
1795                         rc = 0;
1796                 } else {
1797                         /* still barf */
1798                         data->ioc_net = device_id;
1799                         data->ioc_u64[0] = peerstamp;
1800                         data->ioc_u32[0] = fmaq_len;
1801                         data->ioc_u32[1] = nfma;
1802                         data->ioc_u32[2] = tx_seq;
1803                         data->ioc_u32[3] = rx_seq;
1804                         data->ioc_u32[4] = nrdma;
1805                 }
1806                 break;
1807         }
1808         case IOC_LIBCFS_ADD_PEER: {
1809                 /* just dummy value to allow using common interface */
1810                 kgn_peer_t      *peer;
1811                 rc = kgnilnd_add_peer(net, data->ioc_nid, &peer);
1812                 break;
1813         }
1814         case IOC_LIBCFS_DEL_PEER: {
1815                 /* NULL is passed in so it affects all peers in existence without regard to network
1816                  * as the peer may not exist on the network LNET believes it to be on.
1817                  */
1818                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1819                                               GNILND_DEL_PEER, -EUCLEAN);
1820                 break;
1821         }
1822         case IOC_LIBCFS_GET_CONN: {
1823                 kgn_conn_t *conn = kgnilnd_get_conn_by_idx(data->ioc_count);
1824
1825                 if (conn == NULL)
1826                         rc = -ENOENT;
1827                 else {
1828                         rc = 0;
1829                         /* LNET_MKNID is used to build the correct address based on what LNET wants to see instead of
1830                          * the generic connection that is used to send the data
1831                          */
1832                         data->ioc_nid    = LNET_MKNID(LNET_NIDNET(ni->ni_nid), LNET_NIDADDR(conn->gnc_peer->gnp_nid));
1833                         data->ioc_u32[0] = conn->gnc_device->gnd_id;
1834                         kgnilnd_conn_decref(conn);
1835                 }
1836                 break;
1837         }
1838         case IOC_LIBCFS_CLOSE_CONNECTION: {
1839                 /* use error = -ENETRESET to indicate it was lctl disconnect */
1840                 /* NULL is passed in so it affects all the nets as the connection is virtual
1841                  * and may not exist on the network LNET believes it to be on.
1842                  */
1843                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1844                                               GNILND_DEL_CONN, -ENETRESET);
1845                 break;
1846         }
1847         case IOC_LIBCFS_PUSH_CONNECTION: {
1848                 /* we use this to flush purgatory */
1849                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1850                                               GNILND_CLEAR_PURGATORY, -EUCLEAN);
1851                 break;
1852         }
1853         case IOC_LIBCFS_REGISTER_MYNID: {
1854                 /* Ignore if this is a noop */
1855                 if (data->ioc_nid == ni->ni_nid) {
1856                         rc = 0;
1857                 } else {
1858                         CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
1859                                libcfs_nid2str(data->ioc_nid),
1860                                libcfs_nid2str(ni->ni_nid));
1861                         rc = -EINVAL;
1862                 }
1863                 break;
1864         }
1865         }
1866
1867         return rc;
1868 }
1869
1870 void
1871 kgnilnd_query(lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
1872 {
1873         kgn_net_t               *net = ni->ni_data;
1874         kgn_tx_t                *tx;
1875         kgn_peer_t              *peer = NULL;
1876         kgn_conn_t              *conn = NULL;
1877         lnet_process_id_t       id = {
1878                 .nid = nid,
1879                 .pid = LNET_PID_LUSTRE,
1880         };
1881         ENTRY;
1882
1883         /* I expect to find him, so only take a read lock */
1884         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1885         peer = kgnilnd_find_peer_locked(nid);
1886         if (peer != NULL) {
1887                 /* LIE if in a quiesce - we will update the timeouts after,
1888                  * but we don't want sends failing during it */
1889                 if (kgnilnd_data.kgn_quiesce_trigger) {
1890                         *when = jiffies;
1891                         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1892                         GOTO(out, 0);
1893                 }
1894
1895                 /* Update to best guess, might refine on later checks */
1896                 *when = peer->gnp_last_alive;
1897
1898                 /* we have a peer, how about a conn? */
1899                 conn = kgnilnd_find_conn_locked(peer);
1900
1901                 if (conn == NULL)  {
1902                         /* if there is no conn, check peer last errno to see if clean disconnect
1903                          * - if it was, we lie to LNet because we believe a TX would complete
1904                          * on reconnect */
1905                         if (kgnilnd_conn_clean_errno(peer->gnp_last_errno)) {
1906                                 *when = jiffies;
1907                         }
1908                         /* we still want to fire a TX and new conn in this case */
1909                 } else {
1910                         /* gnp_last_alive is valid, run for the hills */
1911                         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1912                         GOTO(out, 0);
1913                 }
1914         }
1915         /* if we get here, either we have no peer or no conn for him, so fire off
1916          * new TX to trigger conn setup */
1917         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1918
1919         /* if we couldn't find him, we'll fire up a TX and get connected -
1920          * if we don't do this, after ni_peer_timeout, LNet will declare him dead.
1921          * So really we treat kgnilnd_query as a bit of a 'connect now' type
1922          * event because it'll only do this when it wants to send
1923          *
1924          * Use a real TX for this to get the proper gnp_tx_queue behavior, etc
1925          * normally we'd use kgnilnd_send_ctlmsg for this, but we don't really
1926          * care that this goes out quickly since we already know we need a new conn
1927          * formed */
1928         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_NOOP_SEND))
1929                 return;
1930
1931         tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP, ni->ni_nid);
1932         if (tx != NULL) {
1933                 kgnilnd_launch_tx(tx, net, &id);
1934         }
1935 out:
1936         CDEBUG(D_NETTRACE, "peer 0x%p->%s when %lu\n", peer,
1937                libcfs_nid2str(nid), *when);
1938         EXIT;
1939 }
1940
1941 int
1942 kgnilnd_dev_init(kgn_device_t *dev)
1943 {
1944         gni_return_t      rrc;
1945         int               rc = 0;
1946         unsigned int      cq_size;
1947         ENTRY;
1948
1949         /* size of these CQs should be able to accommodate the outgoing
1950          * RDMA and SMSG transactions.  Since we really don't know what we
1951          * really need here, we'll take credits * 2 * 3 to allow a bunch.
1952          * We need to dig into this more with the performance work. */
1953         cq_size = *kgnilnd_tunables.kgn_credits * 2 * 3;
1954
1955         rrc = kgnilnd_cdm_create(dev->gnd_id, *kgnilnd_tunables.kgn_ptag,
1956                                  GNILND_COOKIE, 0,
1957                                  &dev->gnd_domain);
1958         if (rrc != GNI_RC_SUCCESS) {
1959                 CERROR("Can't create CDM %d (%d)\n", dev->gnd_id, rrc);
1960                 GOTO(failed, rc = -ENODEV);
1961         }
1962
1963         rrc = kgnilnd_cdm_attach(dev->gnd_domain, dev->gnd_id,
1964                                  &dev->gnd_host_id, &dev->gnd_handle);
1965         if (rrc != GNI_RC_SUCCESS) {
1966                 CERROR("Can't attach CDM to device %d (%d)\n",
1967                         dev->gnd_id, rrc);
1968                 GOTO(failed, rc = -ENODEV);
1969         }
1970
1971         /* a bit gross, but not much we can do - Aries Sim doesn't have
1972          * hardcoded NIC/NID that we can use */
1973         rc = kgnilnd_setup_nic_translation(dev->gnd_host_id);
1974         if (rc != 0)
1975                 GOTO(failed, rc = -ENODEV);
1976
1977         /* only dev 0 gets the errors - no need to reset the stack twice
1978          * - this works because we have a single PTAG, if we had more
1979          * then we'd need to have multiple handlers */
1980         if (dev->gnd_id == 0) {
1981                 rrc = kgnilnd_subscribe_errors(dev->gnd_handle,
1982                                                 GNI_ERRMASK_CRITICAL |
1983                                                 GNI_ERRMASK_UNKNOWN_TRANSACTION,
1984                                               0, NULL, kgnilnd_critical_error,
1985                                               &dev->gnd_err_handle);
1986                 if (rrc != GNI_RC_SUCCESS) {
1987                         CERROR("Can't subscribe for errors on device %d: rc %d\n",
1988                                 dev->gnd_id, rrc);
1989                         GOTO(failed, rc = -ENODEV);
1990                 }
1991
1992                 rc = kgnilnd_set_quiesce_callback(dev->gnd_handle,
1993                                                   kgnilnd_quiesce_end_callback);
1994                 if (rc != GNI_RC_SUCCESS) {
1995                         CERROR("Can't subscribe for quiesce callback on device %d: rc %d\n",
1996                                 dev->gnd_id, rrc);
1997                         GOTO(failed, rc = -ENODEV);
1998                 }
1999         }
2000
2001         rrc = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_IP, &kgnilnd_data.kgn_sock);
2002         if (rrc < 0) {
2003                 CERROR("sock_create returned %d\n", rrc);
2004                 GOTO(failed, rrc);
2005         }
2006
2007         rc = kgnilnd_nicaddr_to_nid(dev->gnd_host_id, &dev->gnd_nid);
2008         if (rc < 0) {
2009                 /* log messages during startup */
2010                 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
2011                         CERROR("couldn't translate host_id 0x%x to nid. rc %d\n",
2012                                 dev->gnd_host_id, rc);
2013                 }
2014                 GOTO(failed, rc = -ESRCH);
2015         }
2016         CDEBUG(D_NET, "NIC %x -> NID %d\n", dev->gnd_host_id, dev->gnd_nid);
2017
2018         rrc = kgnilnd_cq_create(dev->gnd_handle, cq_size,
2019                                 0, kgnilnd_device_callback,
2020                                 dev->gnd_id, &dev->gnd_snd_rdma_cqh);
2021         if (rrc != GNI_RC_SUCCESS) {
2022                 CERROR("Can't create rdma send cq size %u for device "
2023                        "%d (%d)\n", cq_size, dev->gnd_id, rrc);
2024                 GOTO(failed, rc = -EINVAL);
2025         }
2026
2027         rrc = kgnilnd_cq_create(dev->gnd_handle, cq_size,
2028                         0, kgnilnd_device_callback, dev->gnd_id,
2029                         &dev->gnd_snd_fma_cqh);
2030         if (rrc != GNI_RC_SUCCESS) {
2031                 CERROR("Can't create fma send cq size %u for device %d (%d)\n",
2032                        cq_size, dev->gnd_id, rrc);
2033                 GOTO(failed, rc = -EINVAL);
2034         }
2035
2036         /* This one we size differently - overflows are possible and it needs to be
2037          * sized based on machine size */
2038         rrc = kgnilnd_cq_create(dev->gnd_handle,
2039                         *kgnilnd_tunables.kgn_fma_cq_size,
2040                         0, kgnilnd_device_callback, dev->gnd_id,
2041                         &dev->gnd_rcv_fma_cqh);
2042         if (rrc != GNI_RC_SUCCESS) {
2043                 CERROR("Can't create fma cq size %d for device %d (%d)\n",
2044                        *kgnilnd_tunables.kgn_fma_cq_size, dev->gnd_id, rrc);
2045                 GOTO(failed, rc = -EINVAL);
2046         }
2047
2048         RETURN(0);
2049
2050 failed:
2051         kgnilnd_dev_fini(dev);
2052         RETURN(rc);
2053 }
2054
2055 void
2056 kgnilnd_dev_fini(kgn_device_t *dev)
2057 {
2058         gni_return_t rrc;
2059         ENTRY;
2060
2061         /* At quiesce or rest time, need to loop through and clear gnd_ready_conns ?*/
2062         LASSERTF(list_empty(&dev->gnd_ready_conns) &&
2063                  list_empty(&dev->gnd_map_tx) &&
2064                  list_empty(&dev->gnd_rdmaq),
2065                  "dev 0x%p ready_conns %d@0x%p map_tx %d@0x%p rdmaq %d@0x%p\n",
2066                  dev, kgnilnd_count_list(&dev->gnd_ready_conns), &dev->gnd_ready_conns,
2067                  kgnilnd_count_list(&dev->gnd_map_tx), &dev->gnd_map_tx,
2068                  kgnilnd_count_list(&dev->gnd_rdmaq), &dev->gnd_rdmaq);
2069
2070         /* These should follow from tearing down all connections */
2071         LASSERTF(dev->gnd_map_nphys == 0 && dev->gnd_map_physnop == 0,
2072                 "%d physical mappings of %d pages still mapped\n",
2073                  dev->gnd_map_nphys, dev->gnd_map_physnop);
2074
2075         LASSERTF(dev->gnd_map_nvirt == 0 && dev->gnd_map_virtnob == 0,
2076                 "%d virtual mappings of "LPU64" bytes still mapped\n",
2077                  dev->gnd_map_nvirt, dev->gnd_map_virtnob);
2078
2079         LASSERTF(atomic_read(&dev->gnd_n_mdd) == 0 &&
2080                  atomic_read(&dev->gnd_n_mdd_held) == 0 &&
2081                  atomic64_read(&dev->gnd_nbytes_map) == 0,
2082                 "%d SMSG mappings of %ld bytes still mapped or held %d\n",
2083                  atomic_read(&dev->gnd_n_mdd),
2084                  atomic64_read(&dev->gnd_nbytes_map), atomic_read(&dev->gnd_n_mdd_held));
2085
2086         LASSERT(list_empty(&dev->gnd_map_list));
2087
2088         /* What other assertions needed to ensure all connections torn down ? */
2089
2090         /* check all counters == 0 (EP, MDD, etc) */
2091
2092         /* if we are resetting due to quiese (stack reset), don't check
2093          * thread states */
2094         LASSERTF(kgnilnd_data.kgn_quiesce_trigger ||
2095                 atomic_read(&kgnilnd_data.kgn_nthreads) == 0,
2096                 "tried to shutdown with threads active\n");
2097
2098         if (dev->gnd_rcv_fma_cqh) {
2099                 rrc = kgnilnd_cq_destroy(dev->gnd_rcv_fma_cqh);
2100                 LASSERTF(rrc == GNI_RC_SUCCESS,
2101                         "bad rc from gni_cq_destroy on rcv_fma_cqh: %d\n", rrc);
2102                 dev->gnd_rcv_fma_cqh = NULL;
2103         }
2104
2105         if (dev->gnd_snd_rdma_cqh) {
2106                 rrc = kgnilnd_cq_destroy(dev->gnd_snd_rdma_cqh);
2107                 LASSERTF(rrc == GNI_RC_SUCCESS,
2108                         "bad rc from gni_cq_destroy on send_rdma_cqh: %d\n", rrc);
2109                 dev->gnd_snd_rdma_cqh = NULL;
2110         }
2111
2112         if (dev->gnd_snd_fma_cqh) {
2113                 rrc = kgnilnd_cq_destroy(dev->gnd_snd_fma_cqh);
2114                 LASSERTF(rrc == GNI_RC_SUCCESS,
2115                         "bad rc from gni_cq_destroy on snd_fma_cqh: %d\n", rrc);
2116                 dev->gnd_snd_fma_cqh = NULL;
2117         }
2118
2119         if (dev->gnd_err_handle) {
2120                 rrc = kgnilnd_release_errors(dev->gnd_err_handle);
2121                 LASSERTF(rrc == GNI_RC_SUCCESS,
2122                         "bad rc from gni_release_errors: %d\n", rrc);
2123                 dev->gnd_err_handle = NULL;
2124         }
2125
2126         if (dev->gnd_domain) {
2127                 rrc = kgnilnd_cdm_destroy(dev->gnd_domain);
2128                 LASSERTF(rrc == GNI_RC_SUCCESS,
2129                         "bad rc from gni_cdm_destroy: %d\n", rrc);
2130                 dev->gnd_domain = NULL;
2131         }
2132
2133         sock_release(kgnilnd_data.kgn_sock);
2134
2135         EXIT;
2136 }
2137
2138
2139 int kgnilnd_base_startup(void)
2140 {
2141         struct timeval       tv;
2142         int                  pkmem = atomic_read(&libcfs_kmemory);
2143         int                  rc;
2144         int                  i;
2145         kgn_device_t        *dev;
2146         struct task_struct  *thrd;
2147         ENTRY;
2148
2149         LASSERTF(kgnilnd_data.kgn_init == GNILND_INIT_NOTHING,
2150                 "init %d\n", kgnilnd_data.kgn_init);
2151
2152         /* zero pointers, flags etc */
2153         memset(&kgnilnd_data, 0, sizeof(kgnilnd_data));
2154
2155         /* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and
2156          * a unique (for all time) connstamp so we can uniquely identify
2157          * the sender.  The connstamp is an incrementing counter
2158          * initialised with seconds + microseconds at startup time.  So we
2159          * rely on NOT creating connections more frequently on average than
2160          * 1MHz to ensure we don't use old connstamps when we reboot. */
2161         do_gettimeofday(&tv);
2162         kgnilnd_data.kgn_connstamp =
2163                  kgnilnd_data.kgn_peerstamp =
2164                         (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
2165
2166         init_rwsem(&kgnilnd_data.kgn_net_rw_sem);
2167
2168         for (i = 0; i < GNILND_MAXDEVS; i++) {
2169                 kgn_device_t  *dev = &kgnilnd_data.kgn_devices[i];
2170
2171                 dev->gnd_id = i;
2172                 INIT_LIST_HEAD(&dev->gnd_ready_conns);
2173                 INIT_LIST_HEAD(&dev->gnd_map_tx);
2174                 INIT_LIST_HEAD(&dev->gnd_fma_buffs);
2175                 mutex_init(&dev->gnd_cq_mutex);
2176                 mutex_init(&dev->gnd_fmablk_mutex);
2177                 spin_lock_init(&dev->gnd_fmablk_lock);
2178                 init_waitqueue_head(&dev->gnd_waitq);
2179                 init_waitqueue_head(&dev->gnd_dgram_waitq);
2180                 init_waitqueue_head(&dev->gnd_dgping_waitq);
2181                 spin_lock_init(&dev->gnd_lock);
2182                 INIT_LIST_HEAD(&dev->gnd_map_list);
2183                 spin_lock_init(&dev->gnd_map_lock);
2184                 atomic_set(&dev->gnd_nfmablk, 0);
2185                 atomic_set(&dev->gnd_fmablk_vers, 1);
2186                 atomic_set(&dev->gnd_neps, 0);
2187                 atomic_set(&dev->gnd_canceled_dgrams, 0);
2188                 INIT_LIST_HEAD(&dev->gnd_connd_peers);
2189                 spin_lock_init(&dev->gnd_connd_lock);
2190                 spin_lock_init(&dev->gnd_dgram_lock);
2191                 spin_lock_init(&dev->gnd_rdmaq_lock);
2192                 INIT_LIST_HEAD(&dev->gnd_rdmaq);
2193                 init_rwsem(&dev->gnd_conn_sem);
2194
2195                 /* alloc & setup nid based dgram table */
2196                 LIBCFS_ALLOC(dev->gnd_dgrams,
2197                             sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2198
2199                 if (dev->gnd_dgrams == NULL)
2200                         GOTO(failed, rc = -ENOMEM);
2201
2202                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2203                         INIT_LIST_HEAD(&dev->gnd_dgrams[i]);
2204                 }
2205                 atomic_set(&dev->gnd_ndgrams, 0);
2206                 atomic_set(&dev->gnd_nwcdgrams, 0);
2207                 /* setup timer for RDMAQ processing */
2208                 setup_timer(&dev->gnd_rdmaq_timer, kgnilnd_schedule_device_timer,
2209                             (unsigned long)dev);
2210
2211                 /* setup timer for mapping processing */
2212                 setup_timer(&dev->gnd_map_timer, kgnilnd_schedule_device_timer,
2213                             (unsigned long)dev);
2214
2215         }
2216
2217         /* CQID 0 isn't allowed, set to MAX_MSG_ID - 1 to check for conflicts early */
2218         kgnilnd_data.kgn_next_cqid = GNILND_MAX_MSG_ID - 1;
2219         kgnilnd_data.kgn_new_min_timeout = *kgnilnd_tunables.kgn_timeout;
2220         init_waitqueue_head(&kgnilnd_data.kgn_reaper_waitq);
2221         init_waitqueue_head(&kgnilnd_data.kgn_ruhroh_waitq);
2222         spin_lock_init(&kgnilnd_data.kgn_reaper_lock);
2223
2224         mutex_init(&kgnilnd_data.kgn_quiesce_mutex);
2225         atomic_set(&kgnilnd_data.kgn_nquiesce, 0);
2226         atomic_set(&kgnilnd_data.kgn_npending_conns, 0);
2227         atomic_set(&kgnilnd_data.kgn_npending_unlink, 0);
2228         atomic_set(&kgnilnd_data.kgn_npending_detach, 0);
2229         atomic_set(&kgnilnd_data.kgn_rev_offset, 0);
2230         atomic_set(&kgnilnd_data.kgn_rev_length, 0);
2231         atomic_set(&kgnilnd_data.kgn_rev_copy_buff, 0);
2232
2233         /* OK to call kgnilnd_api_shutdown() to cleanup now */
2234         kgnilnd_data.kgn_init = GNILND_INIT_DATA;
2235         try_module_get(THIS_MODULE);
2236
2237         rwlock_init(&kgnilnd_data.kgn_peer_conn_lock);
2238
2239         LIBCFS_ALLOC(kgnilnd_data.kgn_peers,
2240                     sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2241
2242         if (kgnilnd_data.kgn_peers == NULL)
2243                 GOTO(failed, rc = -ENOMEM);
2244
2245         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2246                 INIT_LIST_HEAD(&kgnilnd_data.kgn_peers[i]);
2247         }
2248
2249         LIBCFS_ALLOC(kgnilnd_data.kgn_conns,
2250                     sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2251
2252         if (kgnilnd_data.kgn_conns == NULL)
2253                 GOTO(failed, rc = -ENOMEM);
2254
2255         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2256                 INIT_LIST_HEAD(&kgnilnd_data.kgn_conns[i]);
2257         }
2258
2259         LIBCFS_ALLOC(kgnilnd_data.kgn_nets,
2260                     sizeof(struct list_head) * *kgnilnd_tunables.kgn_net_hash_size);
2261
2262         if (kgnilnd_data.kgn_nets == NULL)
2263                 GOTO(failed, rc = -ENOMEM);
2264
2265         for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
2266                 INIT_LIST_HEAD(&kgnilnd_data.kgn_nets[i]);
2267         }
2268
2269         kgnilnd_data.kgn_mbox_cache =
2270                 kmem_cache_create("kgn_mbox_block", KMALLOC_MAX_SIZE, 0,
2271                                   SLAB_HWCACHE_ALIGN, NULL);
2272         if (kgnilnd_data.kgn_mbox_cache == NULL) {
2273                 CERROR("Can't create slab for physical mbox blocks\n");
2274                 GOTO(failed, rc = -ENOMEM);
2275         }
2276
2277         kgnilnd_data.kgn_rx_cache =
2278                 kmem_cache_create("kgn_rx_t", sizeof(kgn_rx_t), 0, 0, NULL);
2279         if (kgnilnd_data.kgn_rx_cache == NULL) {
2280                 CERROR("Can't create slab for kgn_rx_t descriptors\n");
2281                 GOTO(failed, rc = -ENOMEM);
2282         }
2283
2284         kgnilnd_data.kgn_tx_cache =
2285                 kmem_cache_create("kgn_tx_t", sizeof(kgn_tx_t), 0, 0, NULL);
2286         if (kgnilnd_data.kgn_tx_cache == NULL) {
2287                 CERROR("Can't create slab for kgn_tx_t\n");
2288                 GOTO(failed, rc = -ENOMEM);
2289         }
2290
2291         kgnilnd_data.kgn_tx_phys_cache =
2292                 kmem_cache_create("kgn_tx_phys",
2293                                    LNET_MAX_IOV * sizeof(gni_mem_segment_t),
2294                                    0, 0, NULL);
2295         if (kgnilnd_data.kgn_tx_phys_cache == NULL) {
2296                 CERROR("Can't create slab for kgn_tx_phys\n");
2297                 GOTO(failed, rc = -ENOMEM);
2298         }
2299
2300         kgnilnd_data.kgn_dgram_cache =
2301                 kmem_cache_create("kgn_dgram_t", sizeof(kgn_dgram_t), 0, 0, NULL);
2302         if (kgnilnd_data.kgn_dgram_cache == NULL) {
2303                 CERROR("Can't create slab for outgoing datagrams\n");
2304                 GOTO(failed, rc = -ENOMEM);
2305         }
2306
2307         /* allocate a MAX_IOV array of page pointers for each cpu */
2308         kgnilnd_data.kgn_cksum_map_pages = kmalloc(num_possible_cpus() * sizeof (struct page *),
2309                                                    GFP_KERNEL);
2310         if (kgnilnd_data.kgn_cksum_map_pages == NULL) {
2311                 CERROR("Can't allocate vmap cksum pages\n");
2312                 GOTO(failed, rc = -ENOMEM);
2313         }
2314         kgnilnd_data.kgn_cksum_npages = num_possible_cpus();
2315         memset(kgnilnd_data.kgn_cksum_map_pages, 0,
2316                 kgnilnd_data.kgn_cksum_npages * sizeof (struct page *));
2317
2318         for (i = 0; i < kgnilnd_data.kgn_cksum_npages; i++) {
2319                 kgnilnd_data.kgn_cksum_map_pages[i] = kmalloc(LNET_MAX_IOV * sizeof (struct page *),
2320                                                               GFP_KERNEL);
2321                 if (kgnilnd_data.kgn_cksum_map_pages[i] == NULL) {
2322                         CERROR("Can't allocate vmap cksum pages for cpu %d\n", i);
2323                         GOTO(failed, rc = -ENOMEM);
2324                 }
2325         }
2326
2327         LASSERT(kgnilnd_data.kgn_ndevs == 0);
2328
2329         /* Use all available GNI devices */
2330         for (i = 0; i < GNILND_MAXDEVS; i++) {
2331                 dev = &kgnilnd_data.kgn_devices[kgnilnd_data.kgn_ndevs];
2332
2333                 rc = kgnilnd_dev_init(dev);
2334                 if (rc == 0) {
2335                         /* Increment here so base_shutdown cleans it up */
2336                         kgnilnd_data.kgn_ndevs++;
2337
2338                         rc = kgnilnd_allocate_phys_fmablk(dev);
2339                         if (rc)
2340                                 GOTO(failed, rc);
2341                 }
2342         }
2343
2344         if (kgnilnd_data.kgn_ndevs == 0) {
2345                 CERROR("Can't initialise any GNI devices\n");
2346                 GOTO(failed, rc = -ENODEV);
2347         }
2348
2349         rc = kgnilnd_thread_start(kgnilnd_reaper, NULL, "kgnilnd_rpr", 0);
2350         if (rc != 0) {
2351                 CERROR("Can't spawn gnilnd reaper: %d\n", rc);
2352                 GOTO(failed, rc);
2353         }
2354
2355         rc = kgnilnd_start_rca_thread();
2356         if (rc != 0) {
2357                 CERROR("Can't spawn gnilnd rca: %d\n", rc);
2358                 GOTO(failed, rc);
2359         }
2360
2361         /*
2362          * Start ruhroh thread.  We can't use kgnilnd_thread_start() because
2363          * we don't want this thread included in kgnilnd_data.kgn_nthreads
2364          * count.  This thread controls quiesce, so it mustn't
2365          * quiesce itself.
2366          */
2367         thrd = kthread_run(kgnilnd_ruhroh_thread, NULL, "%s_%02d", "kgnilnd_rr", 0);
2368         if (IS_ERR(thrd)) {
2369                 rc = PTR_ERR(thrd);
2370                 CERROR("Can't spawn gnilnd ruhroh thread: %d\n", rc);
2371                 GOTO(failed, rc);
2372         }
2373
2374         /* threads will load balance across devs as they are available */
2375         for (i = 0; i < *kgnilnd_tunables.kgn_sched_threads; i++) {
2376                 rc = kgnilnd_thread_start(kgnilnd_scheduler, (void *)((long)i),
2377                                           "kgnilnd_sd", i);
2378                 if (rc != 0) {
2379                         CERROR("Can't spawn gnilnd scheduler[%d]: %d\n",
2380                                i, rc);
2381                         GOTO(failed, rc);
2382                 }
2383         }
2384
2385         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2386                 dev = &kgnilnd_data.kgn_devices[i];
2387                 rc = kgnilnd_thread_start(kgnilnd_dgram_mover, dev,
2388                                           "kgnilnd_dg", dev->gnd_id);
2389                 if (rc != 0) {
2390                         CERROR("Can't spawn gnilnd dgram_mover[%d]: %d\n",
2391                                dev->gnd_id, rc);
2392                         GOTO(failed, rc);
2393                 }
2394
2395                 rc = kgnilnd_thread_start(kgnilnd_dgram_waitq, dev,
2396                                           "kgnilnd_dgn", dev->gnd_id);
2397                 if (rc != 0) {
2398                         CERROR("Can't spawn gnilnd dgram_waitq[%d]: %d\n",
2399                                 dev->gnd_id, rc);
2400                         GOTO(failed, rc);
2401                 }
2402
2403                 rc = kgnilnd_setup_wildcard_dgram(dev);
2404
2405                 if (rc != 0) {
2406                         CERROR("Can't create wildcard dgrams[%d]: %d\n",
2407                                 dev->gnd_id, rc);
2408                         GOTO(failed, rc);
2409                 }
2410         }
2411
2412
2413
2414         /* flag everything initialised */
2415         kgnilnd_data.kgn_init = GNILND_INIT_ALL;
2416         /*****************************************************/
2417
2418         CDEBUG(D_MALLOC, "initial kmem %d\n", pkmem);
2419         RETURN(0);
2420
2421 failed:
2422         kgnilnd_base_shutdown();
2423         kgnilnd_data.kgn_init = GNILND_INIT_NOTHING;
2424         RETURN(rc);
2425 }
2426
2427 void
2428 kgnilnd_base_shutdown(void)
2429 {
2430         int                     i, j;
2431         ENTRY;
2432
2433         while (CFS_FAIL_TIMEOUT(CFS_FAIL_GNI_PAUSE_SHUTDOWN, 1)) {};
2434
2435         kgnilnd_data.kgn_wc_kill = 1;
2436
2437         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2438                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2439                 kgnilnd_cancel_wc_dgrams(dev);
2440                 kgnilnd_cancel_dgrams(dev);
2441                 kgnilnd_del_conn_or_peer(NULL, LNET_NID_ANY, GNILND_DEL_PEER, -ESHUTDOWN);
2442                 kgnilnd_wait_for_canceled_dgrams(dev);
2443         }
2444
2445         /* We need to verify there are no conns left before we let the threads
2446          * shut down otherwise we could clean up the peers but still have
2447          * some outstanding conns due to orphaned datagram conns that are
2448          * being cleaned up.
2449          */
2450         i = 2;
2451         while (atomic_read(&kgnilnd_data.kgn_nconns) != 0) {
2452                 i++;
2453
2454                 for(j = 0; j < kgnilnd_data.kgn_ndevs; ++j) {
2455                         kgn_device_t *dev = &kgnilnd_data.kgn_devices[j];
2456                         kgnilnd_schedule_device(dev);
2457                 }
2458
2459                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2460                         "Waiting for conns to be cleaned up %d\n",atomic_read(&kgnilnd_data.kgn_nconns));
2461                 cfs_pause(cfs_time_seconds(1));
2462         }
2463         /* Peer state all cleaned up BEFORE setting shutdown, so threads don't
2464          * have to worry about shutdown races.  NB connections may be created
2465          * while there are still active connds, but these will be temporary
2466          * since peer creation always fails after the listener has started to
2467          * shut down.
2468          * all peers should have been cleared out on the nets */
2469         LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
2470                 "peers left %d\n", atomic_read(&kgnilnd_data.kgn_npeers));
2471
2472         /* Wait for the ruhroh thread to shut down. */
2473         kgnilnd_data.kgn_ruhroh_shutdown = 1;
2474         wake_up(&kgnilnd_data.kgn_ruhroh_waitq);
2475         i = 2;
2476         while (kgnilnd_data.kgn_ruhroh_running != 0) {
2477                 i++;
2478                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2479                        "Waiting for ruhroh thread to terminate\n");
2480                 cfs_pause(cfs_time_seconds(1));
2481         }
2482
2483        /* Flag threads to terminate */
2484         kgnilnd_data.kgn_shutdown = 1;
2485
2486         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2487                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2488
2489                 /* should clear all the MDDs */
2490                 kgnilnd_unmap_fma_blocks(dev);
2491
2492                 kgnilnd_schedule_device(dev);
2493                 wake_up_all(&dev->gnd_dgram_waitq);
2494                 wake_up_all(&dev->gnd_dgping_waitq);
2495                 LASSERT(list_empty(&dev->gnd_connd_peers));
2496         }
2497
2498         spin_lock(&kgnilnd_data.kgn_reaper_lock);
2499         wake_up_all(&kgnilnd_data.kgn_reaper_waitq);
2500         spin_unlock(&kgnilnd_data.kgn_reaper_lock);
2501
2502         kgnilnd_wakeup_rca_thread();
2503
2504         /* Wait for threads to exit */
2505         i = 2;
2506         while (atomic_read(&kgnilnd_data.kgn_nthreads) != 0) {
2507                 i++;
2508                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2509                        "Waiting for %d threads to terminate\n",
2510                        atomic_read(&kgnilnd_data.kgn_nthreads));
2511                 cfs_pause(cfs_time_seconds(1));
2512         }
2513
2514         LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
2515                 "peers left %d\n", atomic_read(&kgnilnd_data.kgn_npeers));
2516
2517         if (kgnilnd_data.kgn_peers != NULL) {
2518                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2519                         LASSERT(list_empty(&kgnilnd_data.kgn_peers[i]));
2520
2521                 LIBCFS_FREE(kgnilnd_data.kgn_peers,
2522                             sizeof (struct list_head) *
2523                             *kgnilnd_tunables.kgn_peer_hash_size);
2524         }
2525
2526         down_write(&kgnilnd_data.kgn_net_rw_sem);
2527         if (kgnilnd_data.kgn_nets != NULL) {
2528                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++)
2529                         LASSERT(list_empty(&kgnilnd_data.kgn_nets[i]));
2530
2531                 LIBCFS_FREE(kgnilnd_data.kgn_nets,
2532                             sizeof (struct list_head) *
2533                             *kgnilnd_tunables.kgn_net_hash_size);
2534         }
2535         up_write(&kgnilnd_data.kgn_net_rw_sem);
2536
2537         LASSERTF(atomic_read(&kgnilnd_data.kgn_nconns) == 0,
2538                 "conns left %d\n", atomic_read(&kgnilnd_data.kgn_nconns));
2539
2540         if (kgnilnd_data.kgn_conns != NULL) {
2541                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2542                         LASSERT(list_empty(&kgnilnd_data.kgn_conns[i]));
2543
2544                 LIBCFS_FREE(kgnilnd_data.kgn_conns,
2545                             sizeof (struct list_head) *
2546                             *kgnilnd_tunables.kgn_peer_hash_size);
2547         }
2548
2549         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2550                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2551                 kgnilnd_dev_fini(dev);
2552
2553                 LASSERTF(atomic_read(&dev->gnd_ndgrams) == 0,
2554                         "dgrams left %d\n", atomic_read(&dev->gnd_ndgrams));
2555
2556                 if (dev->gnd_dgrams != NULL) {
2557                         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2558                                 LASSERT(list_empty(&dev->gnd_dgrams[i]));
2559
2560                         LIBCFS_FREE(dev->gnd_dgrams,
2561                                     sizeof (struct list_head) *
2562                                     *kgnilnd_tunables.kgn_peer_hash_size);
2563                 }
2564
2565                 kgnilnd_free_phys_fmablk(dev);
2566         }
2567
2568         if (kgnilnd_data.kgn_mbox_cache != NULL)
2569                 kmem_cache_destroy(kgnilnd_data.kgn_mbox_cache);
2570
2571         if (kgnilnd_data.kgn_rx_cache != NULL)
2572                 kmem_cache_destroy(kgnilnd_data.kgn_rx_cache);
2573
2574         if (kgnilnd_data.kgn_tx_cache != NULL)
2575                 kmem_cache_destroy(kgnilnd_data.kgn_tx_cache);
2576
2577         if (kgnilnd_data.kgn_tx_phys_cache != NULL)
2578                 kmem_cache_destroy(kgnilnd_data.kgn_tx_phys_cache);
2579
2580         if (kgnilnd_data.kgn_dgram_cache != NULL)
2581                 kmem_cache_destroy(kgnilnd_data.kgn_dgram_cache);
2582
2583         if (kgnilnd_data.kgn_cksum_map_pages != NULL) {
2584                 for (i = 0; i < kgnilnd_data.kgn_cksum_npages; i++) {
2585                         if (kgnilnd_data.kgn_cksum_map_pages[i] != NULL) {
2586                                 kfree(kgnilnd_data.kgn_cksum_map_pages[i]);
2587                         }
2588                 }
2589                 kfree(kgnilnd_data.kgn_cksum_map_pages);
2590         }
2591
2592         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2593                atomic_read(&libcfs_kmemory));
2594
2595         kgnilnd_data.kgn_init = GNILND_INIT_NOTHING;
2596         module_put(THIS_MODULE);
2597
2598         EXIT;
2599 }
2600
2601 int
2602 kgnilnd_startup(lnet_ni_t *ni)
2603 {
2604         int               rc, devno;
2605         kgn_net_t        *net;
2606         ENTRY;
2607
2608         LASSERTF(ni->ni_lnd == &the_kgnilnd,
2609                 "bad LND 0x%p != the_kgnilnd @ 0x%p\n",
2610                 ni->ni_lnd, &the_kgnilnd);
2611
2612         if (kgnilnd_data.kgn_init == GNILND_INIT_NOTHING) {
2613                 rc = kgnilnd_base_startup();
2614                 if (rc != 0)
2615                         RETURN(rc);
2616         }
2617
2618         /* Serialize with shutdown. */
2619         mutex_lock(&kgnilnd_data.kgn_quiesce_mutex);
2620
2621         LIBCFS_ALLOC(net, sizeof(*net));
2622         if (net == NULL) {
2623                 CERROR("could not allocate net for new interface instance\n");
2624                 /* no need to cleanup the CDM... */
2625                 GOTO(failed, rc = -ENOMEM);
2626         }
2627         INIT_LIST_HEAD(&net->gnn_list);
2628         ni->ni_data = net;
2629         net->gnn_ni = ni;
2630         ni->ni_maxtxcredits = *kgnilnd_tunables.kgn_credits;
2631         ni->ni_peertxcredits = *kgnilnd_tunables.kgn_peer_credits;
2632
2633         if (*kgnilnd_tunables.kgn_peer_health) {
2634                 int     fudge;
2635                 int     timeout;
2636                 /* give this a bit of leeway - we don't have a hard timeout
2637                  * as we only check timeouts periodically - see comment in kgnilnd_reaper */
2638                 fudge = (GNILND_TO2KA(*kgnilnd_tunables.kgn_timeout) / GNILND_REAPER_NCHECKS);
2639                 timeout = *kgnilnd_tunables.kgn_timeout + fudge;
2640
2641                 if (*kgnilnd_tunables.kgn_peer_timeout >= timeout)
2642                         ni->ni_peertimeout = *kgnilnd_tunables.kgn_peer_timeout;
2643                 else if (*kgnilnd_tunables.kgn_peer_timeout > -1) {
2644                         LCONSOLE_ERROR("Peer_timeout is set to %d but needs to be >= %d\n",
2645                                         *kgnilnd_tunables.kgn_peer_timeout,
2646                                         timeout);
2647                         ni->ni_data = NULL;
2648                         LIBCFS_FREE(net, sizeof(*net));
2649                         GOTO(failed, rc = -EINVAL);
2650                 } else
2651                         ni->ni_peertimeout = timeout;
2652
2653                 LCONSOLE_INFO("Enabling LNet peer health for gnilnd, timeout %ds\n",
2654                               ni->ni_peertimeout);
2655         }
2656
2657         atomic_set(&net->gnn_refcount, 1);
2658
2659         /* if we have multiple devices, spread the nets around */
2660         net->gnn_netnum = LNET_NETNUM(LNET_NIDNET(ni->ni_nid));
2661
2662         devno = LNET_NIDNET(ni->ni_nid) % GNILND_MAXDEVS;
2663         net->gnn_dev = &kgnilnd_data.kgn_devices[devno];
2664
2665         /* allocate a 'dummy' cdm for datagram use. We can only have a single
2666          * datagram between a nid:inst_id and nid2:inst_id. The fake cdm
2667          * give us additional inst_id to use, allowing the datagrams to flow
2668          * like rivers of honey and beer */
2669
2670         /* the instance id for the cdm is the NETNUM offset by MAXDEVS -
2671          * ensuring we'll have a unique id */
2672
2673
2674         ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), net->gnn_dev->gnd_nid);
2675         CDEBUG(D_NET, "adding net %p nid=%s on dev %d \n",
2676                 net, libcfs_nid2str(ni->ni_nid), net->gnn_dev->gnd_id);
2677         /* until the gnn_list is set, we need to cleanup ourselves as
2678          * kgnilnd_shutdown is just gonna get confused */
2679
2680         down_write(&kgnilnd_data.kgn_net_rw_sem);
2681         list_add_tail(&net->gnn_list, kgnilnd_netnum2netlist(net->gnn_netnum));
2682         up_write(&kgnilnd_data.kgn_net_rw_sem);
2683
2684         /* we need a separate thread to call probe_wait_by_id until
2685          * we get a function callback notifier from kgni */
2686         mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2687         RETURN(0);
2688  failed:
2689         mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2690         kgnilnd_shutdown(ni);
2691         RETURN(rc);
2692 }
2693
2694 void
2695 kgnilnd_shutdown(lnet_ni_t *ni)
2696 {
2697         kgn_net_t     *net = ni->ni_data;
2698         int           i;
2699         int           rc;
2700         ENTRY;
2701
2702         CFS_RACE(CFS_FAIL_GNI_SR_DOWN_RACE);
2703
2704         LASSERTF(kgnilnd_data.kgn_init == GNILND_INIT_ALL,
2705                 "init %d\n", kgnilnd_data.kgn_init);
2706
2707         /* Serialize with startup. */
2708         mutex_lock(&kgnilnd_data.kgn_quiesce_mutex);
2709         CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
2710                atomic_read(&libcfs_kmemory));
2711
2712         if (net == NULL) {
2713                 CERROR("got NULL net for ni %p\n", ni);
2714                 GOTO(out, rc = -EINVAL);
2715         }
2716
2717         LASSERTF(ni == net->gnn_ni,
2718                 "ni %p gnn_ni %p\n", net, net->gnn_ni);
2719
2720         ni->ni_data = NULL;
2721
2722         LASSERT(!net->gnn_shutdown);
2723         LASSERTF(atomic_read(&net->gnn_refcount) != 0,
2724                 "net %p refcount %d\n",
2725                  net, atomic_read(&net->gnn_refcount));
2726
2727         if (!list_empty(&net->gnn_list)) {
2728                 /* serialize with peer creation */
2729                 down_write(&kgnilnd_data.kgn_net_rw_sem);
2730                 net->gnn_shutdown = 1;
2731                 up_write(&kgnilnd_data.kgn_net_rw_sem);
2732
2733                 kgnilnd_cancel_net_dgrams(net);
2734
2735                 kgnilnd_del_conn_or_peer(net, LNET_NID_ANY, GNILND_DEL_PEER, -ESHUTDOWN);
2736
2737                 /* if we are quiesced, need to wake up - we need those threads
2738                  * alive to release peers, etc */
2739                 if (GNILND_IS_QUIESCED) {
2740                         set_mb(kgnilnd_data.kgn_quiesce_trigger, GNILND_QUIESCE_IDLE);
2741                         kgnilnd_quiesce_wait("shutdown");
2742                 }
2743
2744                 kgnilnd_wait_for_canceled_dgrams(net->gnn_dev);
2745
2746                 /* We wait until the nets ref's are 1, we will release final ref which is ours
2747                  * this allows us to make sure everything else is done before we free the
2748                  * net.
2749                  */
2750                 i = 4;
2751                 while (atomic_read(&net->gnn_refcount) != 1) {
2752                         i++;
2753                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2754                                 "Waiting for %d references to clear on net %d\n",
2755                                 atomic_read(&net->gnn_refcount),
2756                                 net->gnn_netnum);
2757                         cfs_pause(cfs_time_seconds(1));
2758                 }
2759
2760                 /* release ref from kgnilnd_startup */
2761                 kgnilnd_net_decref(net);
2762                 /* serialize with reaper and conn_task looping */
2763                 down_write(&kgnilnd_data.kgn_net_rw_sem);
2764                 list_del_init(&net->gnn_list);
2765                 up_write(&kgnilnd_data.kgn_net_rw_sem);
2766
2767         }
2768
2769         /* not locking, this can't race with writers */
2770         LASSERTF(atomic_read(&net->gnn_refcount) == 0,
2771                 "net %p refcount %d\n",
2772                  net, atomic_read(&net->gnn_refcount));
2773         LIBCFS_FREE(net, sizeof(*net));
2774
2775 out:
2776         down_read(&kgnilnd_data.kgn_net_rw_sem);
2777         for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
2778                 if (!list_empty(&kgnilnd_data.kgn_nets[i])) {
2779                         up_read(&kgnilnd_data.kgn_net_rw_sem);
2780                         break;
2781                 }
2782
2783                 if (i == *kgnilnd_tunables.kgn_net_hash_size - 1) {
2784                         up_read(&kgnilnd_data.kgn_net_rw_sem);
2785                         kgnilnd_base_shutdown();
2786                 }
2787         }
2788         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2789                atomic_read(&libcfs_kmemory));
2790
2791         mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2792         EXIT;
2793 }
2794
2795 void __exit
2796 kgnilnd_module_fini(void)
2797 {
2798         lnet_unregister_lnd(&the_kgnilnd);
2799         kgnilnd_proc_fini();
2800         kgnilnd_remove_sysctl();
2801         kgnilnd_tunables_fini();
2802 }
2803
2804 int __init
2805 kgnilnd_module_init(void)
2806 {
2807         int    rc;
2808
2809         rc = kgnilnd_tunables_init();
2810         if (rc != 0)
2811                 return rc;
2812
2813         printk(KERN_INFO "Lustre: kgnilnd build version: "KGNILND_BUILD_REV"\n");
2814
2815         kgnilnd_insert_sysctl();
2816         kgnilnd_proc_init();
2817
2818         lnet_register_lnd(&the_kgnilnd);
2819
2820         return 0;
2821 }
2822
2823 MODULE_AUTHOR("Cray, Inc. <nic@cray.com>");
2824 MODULE_DESCRIPTION("Kernel Gemini LND v"KGNILND_BUILD_REV);
2825 MODULE_LICENSE("GPL");
2826
2827 module_init(kgnilnd_module_init);
2828 module_exit(kgnilnd_module_fini);