Whamcloud - gitweb
4961d44c33a4998a68412c3b56638f45425bed2f
[fs/lustre-release.git] / lnet / klnds / gnilnd / gnilnd.c
1 /*
2  * Copyright (C) 2012 Cray, Inc.
3  *
4  * Copyright (c) 2013, 2014, Intel Corporation.
5  *
6  *   Author: Nic Henke <nic@cray.com>
7  *   Author: James Shimek <jshimek@cray.com>
8  *
9  *   This file is part of Lustre, http://www.lustre.org.
10  *
11  *   Lustre is free software; you can redistribute it and/or
12  *   modify it under the terms of version 2 of the GNU General Public
13  *   License as published by the Free Software Foundation.
14  *
15  *   Lustre is distributed in the hope that it will be useful,
16  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  *   GNU General Public License for more details.
19  *
20  *   You should have received a copy of the GNU General Public License
21  *   along with Lustre; if not, write to the Free Software
22  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23  *
24  */
25 #include "gnilnd.h"
26
27 /* Primary entry points from LNET.  There are no guarantees against reentrance. */
28 lnd_t the_kgnilnd = {
29 #ifdef CONFIG_CRAY_XT
30         .lnd_type       = GNILND,
31 #else
32         .lnd_type       = GNIIPLND,
33 #endif
34         .lnd_startup    = kgnilnd_startup,
35         .lnd_shutdown   = kgnilnd_shutdown,
36         .lnd_ctl        = kgnilnd_ctl,
37         .lnd_send       = kgnilnd_send,
38         .lnd_recv       = kgnilnd_recv,
39         .lnd_eager_recv = kgnilnd_eager_recv,
40         .lnd_query      = kgnilnd_query,
41 };
42
43 kgn_data_t      kgnilnd_data;
44
45 /* needs write_lock on kgn_peer_conn_lock */
46 int
47 kgnilnd_close_stale_conns_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
48 {
49         kgn_conn_t         *conn;
50         struct list_head   *ctmp, *cnxt;
51         int                 loopback;
52         int                 count = 0;
53
54         loopback = peer->gnp_nid == peer->gnp_net->gnn_ni->ni_nid;
55
56         list_for_each_safe(ctmp, cnxt, &peer->gnp_conns) {
57                 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
58
59                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
60                         continue;
61
62                 if (conn == newconn)
63                         continue;
64
65                 if (conn->gnc_device != newconn->gnc_device)
66                         continue;
67
68                 /* This is a two connection loopback - one talking to the other */
69                 if (loopback &&
70                     newconn->gnc_my_connstamp == conn->gnc_peer_connstamp &&
71                     newconn->gnc_peer_connstamp == conn->gnc_my_connstamp) {
72                         CDEBUG(D_NET, "skipping prune of %p, "
73                                 "loopback and matching stamps"
74                                 " connstamp "LPU64"("LPU64")"
75                                 " peerstamp "LPU64"("LPU64")\n",
76                                 conn, newconn->gnc_my_connstamp,
77                                 conn->gnc_peer_connstamp,
78                                 newconn->gnc_peer_connstamp,
79                                 conn->gnc_my_connstamp);
80                         continue;
81                 }
82
83                 if (conn->gnc_peerstamp != newconn->gnc_peerstamp) {
84                         LASSERTF(conn->gnc_peerstamp < newconn->gnc_peerstamp,
85                                 "conn 0x%p peerstamp "LPU64" >= "
86                                 "newconn 0x%p peerstamp "LPU64"\n",
87                                 conn, conn->gnc_peerstamp,
88                                 newconn, newconn->gnc_peerstamp);
89
90                         CDEBUG(D_NET, "Closing stale conn nid: %s "
91                                " peerstamp:"LPX64"("LPX64")\n",
92                                libcfs_nid2str(peer->gnp_nid),
93                                conn->gnc_peerstamp, newconn->gnc_peerstamp);
94                 } else {
95
96                         LASSERTF(conn->gnc_peer_connstamp < newconn->gnc_peer_connstamp,
97                                 "conn 0x%p peer_connstamp "LPU64" >= "
98                                 "newconn 0x%p peer_connstamp "LPU64"\n",
99                                 conn, conn->gnc_peer_connstamp,
100                                 newconn, newconn->gnc_peer_connstamp);
101
102                         CDEBUG(D_NET, "Closing stale conn nid: %s"
103                                " connstamp:"LPU64"("LPU64")\n",
104                                libcfs_nid2str(peer->gnp_nid),
105                                conn->gnc_peer_connstamp, newconn->gnc_peer_connstamp);
106                 }
107
108                 count++;
109                 kgnilnd_close_conn_locked(conn, -ESTALE);
110         }
111
112         if (count != 0) {
113                 CWARN("Closed %d stale conns to %s\n", count, libcfs_nid2str(peer->gnp_nid));
114         }
115
116         RETURN(count);
117 }
118
119 int
120 kgnilnd_conn_isdup_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
121 {
122         kgn_conn_t       *conn;
123         struct list_head *tmp;
124         int               loopback;
125         ENTRY;
126
127         loopback = peer->gnp_nid == peer->gnp_net->gnn_ni->ni_nid;
128
129         list_for_each(tmp, &peer->gnp_conns) {
130                 conn = list_entry(tmp, kgn_conn_t, gnc_list);
131                 CDEBUG(D_NET, "checking conn 0x%p for peer %s"
132                         " lo %d new "LPU64" existing "LPU64
133                         " new peer "LPU64" existing peer "LPU64
134                         " new dev %p existing dev %p\n",
135                         conn, libcfs_nid2str(peer->gnp_nid),
136                         loopback,
137                         newconn->gnc_peerstamp, conn->gnc_peerstamp,
138                         newconn->gnc_peer_connstamp, conn->gnc_peer_connstamp,
139                         newconn->gnc_device, conn->gnc_device);
140
141                 /* conn is in the process of closing */
142                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
143                         continue;
144
145                 /* 'newconn' is from an earlier version of 'peer'!!! */
146                 if (newconn->gnc_peerstamp < conn->gnc_peerstamp)
147                         RETURN(1);
148
149                 /* 'conn' is from an earlier version of 'peer': it will be
150                  * removed when we cull stale conns later on... */
151                 if (newconn->gnc_peerstamp > conn->gnc_peerstamp)
152                         continue;
153
154                 /* Different devices are OK */
155                 if (conn->gnc_device != newconn->gnc_device)
156                         continue;
157
158                 /* It's me connecting to myself */
159                 if (loopback &&
160                     newconn->gnc_my_connstamp == conn->gnc_peer_connstamp &&
161                     newconn->gnc_peer_connstamp == conn->gnc_my_connstamp)
162                         continue;
163
164                 /* 'newconn' is an earlier connection from 'peer'!!! */
165                 if (newconn->gnc_peer_connstamp < conn->gnc_peer_connstamp)
166                         RETURN(2);
167
168                 /* 'conn' is an earlier connection from 'peer': it will be
169                  * removed when we cull stale conns later on... */
170                 if (newconn->gnc_peer_connstamp > conn->gnc_peer_connstamp)
171                         continue;
172
173                 /* 'newconn' has the SAME connection stamp; 'peer' isn't
174                  * playing the game... */
175                 RETURN(3);
176         }
177
178         RETURN(0);
179 }
180
181 int
182 kgnilnd_create_conn(kgn_conn_t **connp, kgn_device_t *dev)
183 {
184         kgn_conn_t      *conn;
185         gni_return_t    rrc;
186         int             rc = 0;
187
188         LASSERT (!in_interrupt());
189         atomic_inc(&kgnilnd_data.kgn_nconns);
190
191         /* divide by 2 to allow for complete reset and immediate reconnect */
192         if (atomic_read(&kgnilnd_data.kgn_nconns) >= GNILND_MAX_CQID/2) {
193                 CERROR("Too many conn are live: %d > %d\n",
194                         atomic_read(&kgnilnd_data.kgn_nconns), GNILND_MAX_CQID/2);
195                 atomic_dec(&kgnilnd_data.kgn_nconns);
196                 return -E2BIG;
197         }
198
199         LIBCFS_ALLOC(conn, sizeof(*conn));
200         if (conn == NULL) {
201                 atomic_dec(&kgnilnd_data.kgn_nconns);
202                 return -ENOMEM;
203         }
204
205         LIBCFS_ALLOC(conn->gnc_tx_ref_table, GNILND_MAX_MSG_ID * sizeof(void *));
206         if (conn->gnc_tx_ref_table == NULL) {
207                 CERROR("Can't allocate conn tx_ref_table\n");
208                 GOTO(failed, rc = -ENOMEM);
209         }
210
211         mutex_init(&conn->gnc_smsg_mutex);
212         mutex_init(&conn->gnc_rdma_mutex);
213         atomic_set(&conn->gnc_refcount, 1);
214         atomic_set(&conn->gnc_reaper_noop, 0);
215         atomic_set(&conn->gnc_sched_noop, 0);
216         atomic_set(&conn->gnc_tx_in_use, 0);
217         INIT_LIST_HEAD(&conn->gnc_list);
218         INIT_LIST_HEAD(&conn->gnc_hashlist);
219         INIT_LIST_HEAD(&conn->gnc_schedlist);
220         INIT_LIST_HEAD(&conn->gnc_fmaq);
221         INIT_LIST_HEAD(&conn->gnc_mdd_list);
222         spin_lock_init(&conn->gnc_list_lock);
223         spin_lock_init(&conn->gnc_tx_lock);
224         conn->gnc_magic = GNILND_CONN_MAGIC;
225
226         /* set tx id to nearly the end to make sure we find wrapping
227          * issues soon */
228         conn->gnc_next_tx = (int) GNILND_MAX_MSG_ID - 10;
229
230         /* if this fails, we have conflicts and MAX_TX is too large */
231         CLASSERT(GNILND_MAX_MSG_ID < GNILND_MSGID_CLOSE);
232
233         /* get a new unique CQ id for this conn */
234         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
235         conn->gnc_my_connstamp = kgnilnd_data.kgn_connstamp++;
236         conn->gnc_cqid = kgnilnd_get_cqid_locked();
237         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
238
239         if (conn->gnc_cqid == 0) {
240                 CERROR("Could not allocate unique CQ ID for conn 0x%p\n", conn);
241                 GOTO(failed, rc = -E2BIG);
242         }
243
244         CDEBUG(D_NET, "alloc cqid %u for conn 0x%p\n",
245                 conn->gnc_cqid, conn);
246
247         /* need to be set before gnc_ephandle to allow kgnilnd_destroy_conn_ep to
248          * check context */
249         conn->gnc_device = dev;
250
251         conn->gnc_timeout = MAX(*kgnilnd_tunables.kgn_timeout,
252                                 GNILND_MIN_TIMEOUT);
253         kgnilnd_update_reaper_timeout(conn->gnc_timeout);
254
255         /* this is the ep_handle for doing SMSG & BTE */
256         mutex_lock(&dev->gnd_cq_mutex);
257         rrc = kgnilnd_ep_create(dev->gnd_handle, dev->gnd_snd_fma_cqh,
258                                 &conn->gnc_ephandle);
259         mutex_unlock(&dev->gnd_cq_mutex);
260         if (rrc != GNI_RC_SUCCESS)
261                 GOTO(failed, rc = -ENETDOWN);
262
263         CDEBUG(D_NET, "created conn 0x%p ep_hndl 0x%p\n",
264                conn, conn->gnc_ephandle);
265
266         /* add ref for EP canceling */
267         kgnilnd_conn_addref(conn);
268         atomic_inc(&dev->gnd_neps);
269
270         *connp = conn;
271         return 0;
272
273 failed:
274         atomic_dec(&kgnilnd_data.kgn_nconns);
275         LIBCFS_FREE(conn->gnc_tx_ref_table, GNILND_MAX_MSG_ID * sizeof(void *));
276         LIBCFS_FREE(conn, sizeof(*conn));
277         return rc;
278 }
279
280 /* needs to be called with kgn_peer_conn_lock held (read or write) */
281 kgn_conn_t *
282 kgnilnd_find_conn_locked(kgn_peer_t *peer)
283 {
284         kgn_conn_t      *conn = NULL;
285
286         /* if we are in reset, this conn is going to die soon */
287         if (unlikely(kgnilnd_data.kgn_in_reset)) {
288                 RETURN(NULL);
289         }
290
291         /* just return the first ESTABLISHED connection */
292         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
293                 /* kgnilnd_finish_connect doesn't put connections on the
294                  * peer list until they are actually established */
295                 LASSERTF(conn->gnc_state >= GNILND_CONN_ESTABLISHED,
296                         "found conn %p state %s on peer %p (%s)\n",
297                         conn, kgnilnd_conn_state2str(conn), peer,
298                         libcfs_nid2str(peer->gnp_nid));
299                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
300                         continue;
301
302                 RETURN(conn);
303         }
304         RETURN(NULL);
305 }
306
307 /* needs write_lock on kgn_peer_conn_lock held */
308 kgn_conn_t *
309 kgnilnd_find_or_create_conn_locked(kgn_peer_t *peer) {
310
311         kgn_device_t    *dev = peer->gnp_net->gnn_dev;
312         kgn_conn_t      *conn;
313
314         conn = kgnilnd_find_conn_locked(peer);
315
316         if (conn != NULL) {
317                 return conn;
318         }
319
320         /* if the peer was previously connecting, check if we should
321          * trigger another connection attempt yet. */
322         if (time_before(jiffies, peer->gnp_reconnect_time)) {
323                 return NULL;
324         }
325
326         /* This check prevents us from creating a new connection to a peer while we are
327          * still in the process of closing an existing connection to the peer.
328          */
329         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
330                 if (conn->gnc_ephandle != NULL) {
331                         CDEBUG(D_NET, "Not connecting non-null ephandle found peer 0x%p->%s\n", peer,
332                                 libcfs_nid2str(peer->gnp_nid));
333                         return NULL;
334                 }
335         }
336
337         if (peer->gnp_connecting != GNILND_PEER_IDLE) {
338                 /* if we are not connecting, fire up a new connection */
339                 /* or if we are anything but IDLE DONT start a new connection */
340                return NULL;
341         }
342
343         CDEBUG(D_NET, "starting connect to %s\n",
344                 libcfs_nid2str(peer->gnp_nid));
345         peer->gnp_connecting = GNILND_PEER_CONNECT;
346         kgnilnd_peer_addref(peer); /* extra ref for connd */
347
348         spin_lock(&dev->gnd_connd_lock);
349         list_add_tail(&peer->gnp_connd_list, &dev->gnd_connd_peers);
350         spin_unlock(&dev->gnd_connd_lock);
351
352         kgnilnd_schedule_dgram(dev);
353         CDEBUG(D_NETTRACE, "scheduling new connect\n");
354
355         return NULL;
356 }
357
358 /* Caller is responsible for deciding if/when to call this */
359 void
360 kgnilnd_destroy_conn_ep(kgn_conn_t *conn)
361 {
362         gni_return_t    rrc;
363         gni_ep_handle_t tmp_ep;
364
365         /* only if we actually initialized it,
366          *  then set NULL to tell kgnilnd_destroy_conn to leave it alone */
367
368         tmp_ep = xchg(&conn->gnc_ephandle, NULL);
369         if (tmp_ep != NULL) {
370                 /* we never re-use the EP, so unbind is not needed */
371                 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
372                 rrc = kgnilnd_ep_destroy(tmp_ep);
373
374                 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
375
376                 /* if this fails, it could hork up kgni smsg retransmit and others
377                  * since we could free the SMSG mbox memory, etc. */
378                 LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d conn 0x%p ep 0x%p\n",
379                          rrc, conn, conn->gnc_ephandle);
380
381                 atomic_dec(&conn->gnc_device->gnd_neps);
382
383                 /* clear out count added in kgnilnd_close_conn_locked
384                  * conn will have a peer once it hits finish_connect, where it
385                  * is the first spot we'll mark it ESTABLISHED as well */
386                 if (conn->gnc_peer) {
387                         kgnilnd_admin_decref(conn->gnc_peer->gnp_dirty_eps);
388                 }
389
390                 /* drop ref for EP */
391                 kgnilnd_conn_decref(conn);
392         }
393 }
394
395 void
396 kgnilnd_destroy_conn(kgn_conn_t *conn)
397 {
398         LASSERTF(!in_interrupt() &&
399                 !conn->gnc_scheduled &&
400                 !conn->gnc_in_purgatory &&
401                 conn->gnc_ephandle == NULL &&
402                 list_empty(&conn->gnc_list) &&
403                 list_empty(&conn->gnc_hashlist) &&
404                 list_empty(&conn->gnc_schedlist) &&
405                 list_empty(&conn->gnc_mdd_list) &&
406                 conn->gnc_magic == GNILND_CONN_MAGIC,
407                 "conn 0x%p->%s IRQ %d sched %d purg %d ep 0x%p Mg %d lists %d/%d/%d/%d\n",
408                 conn, conn->gnc_peer ? libcfs_nid2str(conn->gnc_peer->gnp_nid)
409                                      : "<?>",
410                 !!in_interrupt(), conn->gnc_scheduled,
411                 conn->gnc_in_purgatory,
412                 conn->gnc_ephandle,
413                 conn->gnc_magic,
414                 list_empty(&conn->gnc_list),
415                 list_empty(&conn->gnc_hashlist),
416                 list_empty(&conn->gnc_schedlist),
417                 list_empty(&conn->gnc_mdd_list));
418
419         /* Tripping these is especially bad, as it means we have items on the
420          *  lists that didn't keep their refcount on the connection - or
421          *  somebody evil released their own */
422         LASSERTF(list_empty(&conn->gnc_fmaq) &&
423                  atomic_read(&conn->gnc_nlive_fma) == 0 &&
424                  atomic_read(&conn->gnc_nlive_rdma) == 0,
425                  "conn 0x%p fmaq %d@0x%p nfma %d nrdma %d\n",
426                  conn, kgnilnd_count_list(&conn->gnc_fmaq), &conn->gnc_fmaq,
427                  atomic_read(&conn->gnc_nlive_fma), atomic_read(&conn->gnc_nlive_rdma));
428
429         CDEBUG(D_NET, "destroying conn %p ephandle %p error %d\n",
430                 conn, conn->gnc_ephandle, conn->gnc_error);
431
432         /* We are freeing this memory remove the magic value from the connection */
433         conn->gnc_magic = 0;
434
435         /* if there is an FMA blk left here, we'll tear it down */
436         if (conn->gnc_fma_blk) {
437                 if (conn->gnc_peer) {
438                         kgn_mbox_info_t *mbox;
439                         mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
440                         mbox->mbx_prev_nid = conn->gnc_peer->gnp_nid;
441                 }
442                 kgnilnd_release_mbox(conn, 0);
443         }
444
445         if (conn->gnc_peer != NULL)
446                 kgnilnd_peer_decref(conn->gnc_peer);
447
448         if (conn->gnc_tx_ref_table != NULL) {
449                 LIBCFS_FREE(conn->gnc_tx_ref_table,
450                             GNILND_MAX_MSG_ID * sizeof(void *));
451         }
452
453         LIBCFS_FREE(conn, sizeof(*conn));
454         atomic_dec(&kgnilnd_data.kgn_nconns);
455 }
456
457 /* peer_alive and peer_notify done in the style of the o2iblnd */
458 void
459 kgnilnd_peer_alive(kgn_peer_t *peer)
460 {
461         set_mb(peer->gnp_last_alive, jiffies);
462 }
463
464 void
465 kgnilnd_peer_notify(kgn_peer_t *peer, int error, int alive)
466 {
467         int                     tell_lnet = 0;
468         int                     nnets = 0;
469         int                     rc;
470         int                     i, j;
471         kgn_conn_t             *conn;
472         kgn_net_t             **nets;
473         kgn_net_t              *net;
474
475
476         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DONT_NOTIFY))
477                 return;
478
479         /* Tell LNet we are giving ups on this peer - but only
480          * if it isn't already reconnected or trying to reconnect */
481         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
482
483         /* use kgnilnd_find_conn_locked to avoid any conns in the process of being nuked
484          *
485          * don't tell LNet if we are in reset - we assume that everyone will be able to
486          * reconnect just fine
487          */
488         conn = kgnilnd_find_conn_locked(peer);
489
490         CDEBUG(D_NETTRACE, "peer 0x%p->%s ting %d conn 0x%p, rst %d error %d\n",
491                peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting, conn,
492                kgnilnd_data.kgn_in_reset, error);
493
494         if (((peer->gnp_connecting == GNILND_PEER_IDLE) &&
495             (conn == NULL) &&
496             (!kgnilnd_data.kgn_in_reset) &&
497             (!kgnilnd_conn_clean_errno(error))) || alive) {
498                 tell_lnet = 1;
499         }
500
501         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
502
503         if (!tell_lnet) {
504                 /* short circuit if we dont need to notify Lnet */
505                 return;
506         }
507
508         rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
509
510         if (rc) {
511             /* dont do this if this fails since LNET is in shutdown or something else
512              */
513
514                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
515                         list_for_each_entry(net , &kgnilnd_data.kgn_nets[i], gnn_list) {
516                                 /* if gnn_shutdown set for any net shutdown is in progress just return */
517                                 if (net->gnn_shutdown) {
518                                         up_read(&kgnilnd_data.kgn_net_rw_sem);
519                                         return;
520                                 }
521                                 nnets++;
522                         }
523                 }
524
525                 if (nnets == 0) {
526                         /* shutdown in progress most likely */
527                         up_read(&kgnilnd_data.kgn_net_rw_sem);
528                         return;
529                 }
530
531                 LIBCFS_ALLOC(nets, nnets * sizeof(*nets));
532
533                 if (nets == NULL) {
534                         up_read(&kgnilnd_data.kgn_net_rw_sem);
535                         CERROR("Failed to allocate nets[%d]\n", nnets);
536                         return;
537                 }
538
539                 j = 0;
540                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
541                         list_for_each_entry(net, &kgnilnd_data.kgn_nets[i], gnn_list) {
542                                 nets[j] = net;
543                                 kgnilnd_net_addref(net);
544                                 j++;
545                         }
546                 }
547                 up_read(&kgnilnd_data.kgn_net_rw_sem);
548
549                 for (i = 0; i < nnets; i++) {
550                         lnet_nid_t peer_nid;
551
552                         net = nets[i];
553
554                         peer_nid = kgnilnd_lnd2lnetnid(net->gnn_ni->ni_nid,
555                                                                  peer->gnp_nid);
556
557                         CDEBUG(D_NET, "peer 0x%p->%s last_alive %lu (%lus ago)\n",
558                                 peer, libcfs_nid2str(peer_nid), peer->gnp_last_alive,
559                                 cfs_duration_sec(jiffies - peer->gnp_last_alive));
560
561                         lnet_notify(net->gnn_ni, peer_nid, alive,
562                                     peer->gnp_last_alive);
563
564                         kgnilnd_net_decref(net);
565                 }
566
567                 LIBCFS_FREE(nets, nnets * sizeof(*nets));
568         }
569 }
570
571 /* need write_lock on kgn_peer_conn_lock */
572 void
573 kgnilnd_close_conn_locked(kgn_conn_t *conn, int error)
574 {
575         kgn_peer_t        *peer = conn->gnc_peer;
576         ENTRY;
577
578         LASSERT(!in_interrupt());
579
580         /* store error for tx completion */
581         conn->gnc_error = error;
582         peer->gnp_last_errno = error;
583
584         /* use real error from peer if possible */
585         if (error == -ECONNRESET) {
586                 error = conn->gnc_peer_error;
587         }
588
589         /* if we NETERROR, make sure it is rate limited */
590         if (!kgnilnd_conn_clean_errno(error) &&
591             peer->gnp_down == GNILND_RCA_NODE_UP) {
592                 CNETERR("closing conn to %s: error %d\n",
593                        libcfs_nid2str(peer->gnp_nid), error);
594         } else {
595                 CDEBUG(D_NET, "closing conn to %s: error %d\n",
596                        libcfs_nid2str(peer->gnp_nid), error);
597         }
598
599         LASSERTF(conn->gnc_state == GNILND_CONN_ESTABLISHED,
600                 "conn %p to %s with bogus state %s\n", conn,
601                 libcfs_nid2str(conn->gnc_peer->gnp_nid),
602                 kgnilnd_conn_state2str(conn));
603         LASSERT(!list_empty(&conn->gnc_hashlist));
604         LASSERT(!list_empty(&conn->gnc_list));
605
606
607         /* mark peer count here so any place the EP gets destroyed will
608          * open up the peer count so that a new ESTABLISHED conn is then free
609          * to send new messages -- sending before the previous EPs are destroyed
610          * could end up with messages on the network for the old conn _after_
611          * the new conn and break the mbox safety protocol */
612         kgnilnd_admin_addref(conn->gnc_peer->gnp_dirty_eps);
613
614         /* Remove from conn hash table: no new callbacks */
615         list_del_init(&conn->gnc_hashlist);
616         kgnilnd_data.kgn_conn_version++;
617         kgnilnd_conn_decref(conn);
618
619         /* if we are in reset, go right to CLOSED as there is no scheduler
620          * thread to move from CLOSING to CLOSED */
621         if (unlikely(kgnilnd_data.kgn_in_reset)) {
622                 conn->gnc_state = GNILND_CONN_CLOSED;
623         } else {
624                 conn->gnc_state = GNILND_CONN_CLOSING;
625         }
626
627         /* leave on peer->gnp_conns to make sure we don't let the reaper
628          * or others try to unlink this peer until the conn is fully
629          * processed for closing */
630
631         if (kgnilnd_check_purgatory_conn(conn)) {
632                 kgnilnd_add_purgatory_locked(conn, conn->gnc_peer);
633         }
634
635         /* Reset RX timeout to ensure we wait for an incoming CLOSE
636          * for the full timeout.  If we get a CLOSE we know the
637          * peer has stopped all RDMA.  Otherwise if we wait for
638          * the full timeout we can also be sure all RDMA has stopped. */
639         conn->gnc_last_rx = conn->gnc_last_rx_cq = jiffies;
640         mb();
641
642         /* schedule sending CLOSE - if we are in quiesce, this adds to
643          * gnd_ready_conns and allows us to find it in quiesce processing */
644         kgnilnd_schedule_conn(conn);
645
646         EXIT;
647 }
648
649 void
650 kgnilnd_close_conn(kgn_conn_t *conn, int error)
651 {
652         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
653         /* need to check the state here - this call is racy and we don't
654          * know the state until after the lock is grabbed */
655         if (conn->gnc_state == GNILND_CONN_ESTABLISHED) {
656                 kgnilnd_close_conn_locked(conn, error);
657         }
658         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
659 }
660
661 void
662 kgnilnd_complete_closed_conn(kgn_conn_t *conn)
663 {
664         LIST_HEAD               (sinners);
665         kgn_tx_t               *tx, *txn;
666         int                     nlive = 0;
667         int                     nrdma = 0;
668         int                     nq_rdma = 0;
669         int                     logmsg;
670         ENTRY;
671
672         /* Dump log  on cksum error - wait until complete phase to let
673          * RX of error happen */
674         if (*kgnilnd_tunables.kgn_checksum_dump &&
675             (conn != NULL && conn->gnc_peer_error == -ENOKEY)) {
676                 libcfs_debug_dumplog();
677         }
678
679         /* _CLOSED set in kgnilnd_process_fmaq once we decide to
680          * send the CLOSE or not */
681         LASSERTF(conn->gnc_state == GNILND_CONN_CLOSED,
682                  "conn 0x%p->%s with bad state %s\n",
683                  conn, conn->gnc_peer ?
684                         libcfs_nid2str(conn->gnc_peer->gnp_nid) :
685                         "<?>",
686                  kgnilnd_conn_state2str(conn));
687
688         LASSERT(list_empty(&conn->gnc_hashlist));
689
690         /* we've sent the close, start nuking */
691         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_SCHEDULE_COMPLETE))
692                 kgnilnd_schedule_conn(conn);
693
694         if (conn->gnc_scheduled != GNILND_CONN_PROCESS) {
695                 CDEBUG(D_NETERROR, "Error someone scheduled us after we were "
696                                 "done, Attempting to recover conn 0x%p "
697                                 "scheduled %d function: %s line: %d\n", conn,
698                                 conn->gnc_scheduled, conn->gnc_sched_caller,
699                                 conn->gnc_sched_line);
700                 RETURN_EXIT;
701         }
702
703         /* we don't use lists to track things that we can get out of the
704          * tx_ref table... */
705
706         /* need to hold locks for tx_list_state, sampling it is too racy:
707          * - the lock actually protects tx != NULL, but we can't take the proper
708          *   lock until we check tx_list_state, which would be too late and
709          *   we could have the TX change under us.
710          * gnd_rdmaq_lock and gnd_lock and not used together, so taking both
711          * should be fine */
712         spin_lock(&conn->gnc_device->gnd_rdmaq_lock);
713         spin_lock(&conn->gnc_device->gnd_lock);
714
715         for (nrdma = 0; nrdma < GNILND_MAX_MSG_ID; nrdma++) {
716                 tx = conn->gnc_tx_ref_table[nrdma];
717
718                 if (tx != NULL) {
719                         /* only print the first error and if not CLOSE, we often don't see
720                          * CQ events for that by the time we get here... and really don't care */
721                         if (nlive || tx->tx_msg.gnm_type == GNILND_MSG_CLOSE)
722                                 tx->tx_state |= GNILND_TX_QUIET_ERROR;
723                         nlive++;
724                         GNIDBG_TX(D_NET, tx, "cleaning up on close, nlive %d", nlive);
725
726                         /* don't worry about gnc_lock here as nobody else should be
727                          * touching this conn */
728                         kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
729                         list_add_tail(&tx->tx_list, &sinners);
730                 }
731         }
732         spin_unlock(&conn->gnc_device->gnd_lock);
733         spin_unlock(&conn->gnc_device->gnd_rdmaq_lock);
734
735         /* nobody should have marked this as needing scheduling after
736          * we called close - so only ref should be us handling it */
737         if (conn->gnc_scheduled != GNILND_CONN_PROCESS) {
738                 CDEBUG(D_NETERROR, "Error someone scheduled us after we were "
739                                 "done, Attempting to recover conn 0x%p "
740                                 "scheduled %d function %s line: %d\n", conn,
741                                 conn->gnc_scheduled, conn->gnc_sched_caller,
742                                 conn->gnc_sched_line);
743         }
744         /* now reset a few to actual counters... */
745         nrdma = atomic_read(&conn->gnc_nlive_rdma);
746         nq_rdma = atomic_read(&conn->gnc_nq_rdma);
747
748         if (!list_empty(&sinners)) {
749                 list_for_each_entry_safe(tx, txn, &sinners, tx_list) {
750                         /* clear tx_list to make tx_add_list_locked happy */
751                         list_del_init(&tx->tx_list);
752                         /* The error codes determine if we hold onto the MDD */
753                         kgnilnd_tx_done(tx, conn->gnc_error);
754                 }
755         }
756
757         logmsg = (nlive + nrdma + nq_rdma);
758
759         if (logmsg) {
760                 if (conn->gnc_peer->gnp_down == GNILND_RCA_NODE_UP) {
761                         CNETERR("Closed conn 0x%p->%s (errno %d, peer errno %d): "
762                                 "canceled %d TX, %d/%d RDMA\n",
763                                 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
764                                 conn->gnc_error, conn->gnc_peer_error,
765                                 nlive, nq_rdma, nrdma);
766                 } else {
767                         CDEBUG(D_NET, "Closed conn 0x%p->%s (errno %d,"
768                                 " peer errno %d): canceled %d TX, %d/%d RDMA\n",
769                                 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
770                                 conn->gnc_error, conn->gnc_peer_error,
771                                 nlive, nq_rdma, nrdma);
772                 }
773         }
774
775         kgnilnd_destroy_conn_ep(conn);
776
777         /* Bug 765042 - race this with completing a new conn to same peer - we need
778          * finish_connect to detach purgatory before we can do it ourselves here */
779         CFS_RACE(CFS_FAIL_GNI_FINISH_PURG);
780
781         /* now it is safe to remove from peer list - anyone looking at
782          * gnp_conns now is free to unlink if not on purgatory */
783         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
784
785         conn->gnc_state = GNILND_CONN_DONE;
786
787         /* Decrement counter if we are marked by del_conn_or_peers for closing
788          */
789         if (conn->gnc_needs_closing)
790                 kgnilnd_admin_decref(kgnilnd_data.kgn_npending_conns);
791
792         /* Remove from peer's list of valid connections if its not in purgatory */
793         if (!conn->gnc_in_purgatory) {
794                 list_del_init(&conn->gnc_list);
795                 /* Lose peers reference on the conn */
796                 kgnilnd_conn_decref(conn);
797         }
798
799         /* NB - only unlinking if we set pending in del_peer_locked from admin or
800          * shutdown */
801         if (kgnilnd_peer_active(conn->gnc_peer) &&
802             conn->gnc_peer->gnp_pending_unlink &&
803             kgnilnd_can_unlink_peer_locked(conn->gnc_peer)) {
804                 kgnilnd_unlink_peer_locked(conn->gnc_peer);
805         }
806
807         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
808
809         /* I'm telling Mommy! - use peer_error if they initiated close */
810         kgnilnd_peer_notify(conn->gnc_peer,
811                             conn->gnc_error == -ECONNRESET ?
812                             conn->gnc_peer_error : conn->gnc_error, 0);
813
814         EXIT;
815 }
816
817 int
818 kgnilnd_set_conn_params(kgn_dgram_t *dgram)
819 {
820         kgn_conn_t             *conn = dgram->gndg_conn;
821         kgn_connreq_t          *connreq = &dgram->gndg_conn_in;
822         kgn_gniparams_t        *rem_param = &connreq->gncr_gnparams;
823         gni_return_t            rrc;
824         int                     rc = 0;
825         gni_smsg_attr_t        *remote = &connreq->gncr_gnparams.gnpr_smsg_attr;
826
827         /* set timeout vals in conn early so we can use them for the NAK */
828
829         /* use max of the requested and our timeout, peer will do the same */
830         conn->gnc_timeout = MAX(conn->gnc_timeout, connreq->gncr_timeout);
831
832         /* only ep_bind really mucks around with the CQ */
833         /* only ep bind if we are not connecting to ourself and the dstnid is not a wildcard. this check
834          * is necessary as you can only bind an ep once and we must make sure we dont bind when already bound.
835          */
836         if (connreq->gncr_dstnid != LNET_NID_ANY && dgram->gndg_conn_out.gncr_dstnid != connreq->gncr_srcnid) {
837                 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
838                 rrc = kgnilnd_ep_bind(conn->gnc_ephandle,
839                         connreq->gncr_gnparams.gnpr_host_id,
840                         conn->gnc_cqid);
841                 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
842                 if (rrc != GNI_RC_SUCCESS) {
843                         rc = -ECONNABORTED;
844                         goto return_out;
845                 }
846         }
847
848         rrc = kgnilnd_ep_set_eventdata(conn->gnc_ephandle, conn->gnc_cqid,
849                          connreq->gncr_gnparams.gnpr_cqid);
850         if (rrc != GNI_RC_SUCCESS) {
851                 rc = -ECONNABORTED;
852                 goto cleanup_out;
853         }
854
855         /* Initialize SMSG */
856         rrc = kgnilnd_smsg_init(conn->gnc_ephandle, &conn->gnpr_smsg_attr,
857                         &connreq->gncr_gnparams.gnpr_smsg_attr);
858         if (unlikely(rrc == GNI_RC_INVALID_PARAM)) {
859                 gni_smsg_attr_t *local = &conn->gnpr_smsg_attr;
860                 /* help folks figure out if there is a tunable off, etc. */
861                 LCONSOLE_ERROR("SMSG attribute mismatch. Data from local/remote:"
862                                " type %d/%d msg_maxsize %u/%u"
863                                " mbox_maxcredit %u/%u. Please check kgni"
864                                " logs for further data\n",
865                                local->msg_type, remote->msg_type,
866                                local->msg_maxsize, remote->msg_maxsize,
867                                local->mbox_maxcredit, remote->mbox_maxcredit);
868         }
869         if (rrc != GNI_RC_SUCCESS) {
870                 rc = -ECONNABORTED;
871                 goto cleanup_out;
872         }
873
874         /* log this for help in debuggin SMSG buffer re-use */
875         CDEBUG(D_NET, "conn %p src %s dst %s smsg %p acquired"
876                 " local cqid %u SMSG %p->%u hndl "LPX64"."LPX64
877                 " remote cqid %u SMSG %p->%u hndl "LPX64"."LPX64"\n",
878                 conn, libcfs_nid2str(connreq->gncr_srcnid),
879                 libcfs_nid2str(connreq->gncr_dstnid),
880                 &conn->gnpr_smsg_attr,
881                 conn->gnc_cqid,
882                 conn->gnpr_smsg_attr.msg_buffer,
883                 conn->gnpr_smsg_attr.mbox_offset,
884                 conn->gnpr_smsg_attr.mem_hndl.qword1,
885                 conn->gnpr_smsg_attr.mem_hndl.qword2,
886                 rem_param->gnpr_cqid,
887                 rem_param->gnpr_smsg_attr.msg_buffer,
888                 rem_param->gnpr_smsg_attr.mbox_offset,
889                 rem_param->gnpr_smsg_attr.mem_hndl.qword1,
890                 rem_param->gnpr_smsg_attr.mem_hndl.qword2);
891
892         conn->gnc_peerstamp = connreq->gncr_peerstamp;
893         conn->gnc_peer_connstamp = connreq->gncr_connstamp;
894         conn->remote_mbox_addr = (void *)((char *)remote->msg_buffer + remote->mbox_offset);
895
896         /* We update the reaper timeout once we have a valid conn and timeout */
897         kgnilnd_update_reaper_timeout(GNILND_TO2KA(conn->gnc_timeout));
898
899         return 0;
900
901 cleanup_out:
902         rrc = kgnilnd_ep_unbind(conn->gnc_ephandle);
903         /* not sure I can just let this fly */
904         LASSERTF(rrc == GNI_RC_SUCCESS,
905                 "bad rc from gni_ep_unbind trying to cleanup: %d\n", rrc);
906
907 return_out:
908         LASSERTF(rc != 0, "SOFTWARE BUG: rc == 0\n");
909         CERROR("Error setting connection params from %s: %d\n",
910                libcfs_nid2str(connreq->gncr_srcnid), rc);
911         return rc;
912 }
913
914 /* needs down_read on kgn_net_rw_sem held from before this call until
915  * after the write_lock on kgn_peer_conn_lock - this ensures we stay sane
916  * with kgnilnd_shutdown - it'll get the sem and set shutdown, then get the
917  * kgn_peer_conn_lock to start del_peer'ing. If we hold the sem until after
918  * kgn_peer_conn_lock is held, we guarantee that nobody calls
919  * kgnilnd_add_peer_locked without checking gnn_shutdown */
920 int
921 kgnilnd_create_peer_safe(kgn_peer_t **peerp,
922                          lnet_nid_t nid,
923                          kgn_net_t *net,
924                          int node_state)
925 {
926         kgn_peer_t      *peer;
927         int             rc;
928
929         LASSERT(nid != LNET_NID_ANY);
930
931         /* We dont pass the net around in the dgram anymore so here is where we find it
932          * this will work unless its in shutdown or the nid has a net that is invalid.
933          * Either way error code needs to be returned in that case.
934          *
935          * If the net passed in is not NULL then we can use it, this alleviates looking it
936          * when the calling function has access to the data.
937          */
938         if (net == NULL) {
939                 rc = kgnilnd_find_net(nid, &net);
940                 if (rc < 0)
941                         return rc;
942         } else {
943                 /* find net adds a reference on the net if we are not using
944                  * it we must do it manually so the net references are
945                  * correct when tearing down the net
946                  */
947                 kgnilnd_net_addref(net);
948         }
949
950         LIBCFS_ALLOC(peer, sizeof(*peer));
951         if (peer == NULL) {
952                 kgnilnd_net_decref(net);
953                 return -ENOMEM;
954         }
955         peer->gnp_nid = nid;
956         peer->gnp_down = node_state;
957
958         /* translate from nid to nic addr & store */
959         rc = kgnilnd_nid_to_nicaddrs(LNET_NIDADDR(nid), 1, &peer->gnp_host_id);
960         if (rc <= 0) {
961                 kgnilnd_net_decref(net);
962                 LIBCFS_FREE(peer, sizeof(*peer));
963                 return -ESRCH;
964         }
965         CDEBUG(D_NET, "peer 0x%p->%s -> NIC 0x%x\n", peer,
966                 libcfs_nid2str(nid), peer->gnp_host_id);
967
968         atomic_set(&peer->gnp_refcount, 1);     /* 1 ref for caller */
969         atomic_set(&peer->gnp_dirty_eps, 0);
970
971         INIT_LIST_HEAD(&peer->gnp_list);
972         INIT_LIST_HEAD(&peer->gnp_connd_list);
973         INIT_LIST_HEAD(&peer->gnp_conns);
974         INIT_LIST_HEAD(&peer->gnp_tx_queue);
975
976         /* the first reconnect should happen immediately, so we leave
977          * gnp_reconnect_interval set to 0 */
978
979         LASSERTF(net != NULL, "peer 0x%p->%s with NULL net\n",
980                  peer, libcfs_nid2str(nid));
981
982         /* must have kgn_net_rw_sem held for this...  */
983         if (net->gnn_shutdown) {
984                 /* shutdown has started already */
985                 kgnilnd_net_decref(net);
986                 LIBCFS_FREE(peer, sizeof(*peer));
987                 return -ESHUTDOWN;
988         }
989
990         peer->gnp_net = net;
991
992         atomic_inc(&kgnilnd_data.kgn_npeers);
993
994         *peerp = peer;
995         return 0;
996 }
997
998 void
999 kgnilnd_destroy_peer(kgn_peer_t *peer)
1000 {
1001         CDEBUG(D_NET, "peer %s %p deleted\n",
1002                libcfs_nid2str(peer->gnp_nid), peer);
1003         LASSERTF(atomic_read(&peer->gnp_refcount) == 0,
1004                  "peer 0x%p->%s refs %d\n",
1005                  peer, libcfs_nid2str(peer->gnp_nid),
1006                  atomic_read(&peer->gnp_refcount));
1007         LASSERTF(atomic_read(&peer->gnp_dirty_eps) == 0,
1008                  "peer 0x%p->%s dirty eps %d\n",
1009                  peer, libcfs_nid2str(peer->gnp_nid),
1010                  atomic_read(&peer->gnp_dirty_eps));
1011         LASSERTF(peer->gnp_net != NULL, "peer %p (%s) with NULL net\n",
1012                  peer, libcfs_nid2str(peer->gnp_nid));
1013         LASSERTF(!kgnilnd_peer_active(peer),
1014                  "peer 0x%p->%s\n",
1015                 peer, libcfs_nid2str(peer->gnp_nid));
1016         LASSERTF(peer->gnp_connecting == GNILND_PEER_IDLE || peer->gnp_connecting == GNILND_PEER_KILL,
1017                  "peer 0x%p->%s, connecting %d\n",
1018                 peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
1019         LASSERTF(list_empty(&peer->gnp_conns),
1020                  "peer 0x%p->%s\n",
1021                 peer, libcfs_nid2str(peer->gnp_nid));
1022         LASSERTF(list_empty(&peer->gnp_tx_queue),
1023                  "peer 0x%p->%s\n",
1024                 peer, libcfs_nid2str(peer->gnp_nid));
1025         LASSERTF(list_empty(&peer->gnp_connd_list),
1026                  "peer 0x%p->%s\n",
1027                 peer, libcfs_nid2str(peer->gnp_nid));
1028
1029         /* NB a peer's connections keep a reference on their peer until
1030          * they are destroyed, so we can be assured that _all_ state to do
1031          * with this peer has been cleaned up when its refcount drops to
1032          * zero. */
1033
1034         atomic_dec(&kgnilnd_data.kgn_npeers);
1035         kgnilnd_net_decref(peer->gnp_net);
1036
1037         LIBCFS_FREE(peer, sizeof(*peer));
1038 }
1039
1040 /* the conn might not have made it all the way through to a connected
1041  * state - but we need to purgatory any conn that a remote peer might
1042  * have seen through a posted dgram as well */
1043 void
1044 kgnilnd_add_purgatory_locked(kgn_conn_t *conn, kgn_peer_t *peer)
1045 {
1046         kgn_mbox_info_t *mbox = NULL;
1047         ENTRY;
1048
1049         /* NB - the caller should own conn by removing him from the
1050          * scheduler thread when finishing the close */
1051
1052         LASSERTF(peer != NULL, "conn %p with NULL peer\n", conn);
1053
1054         /* If this is still true, need to add the calls to unlink back in and
1055          * figure out how to close the hole on loopback conns */
1056         LASSERTF(kgnilnd_peer_active(peer), "can't use inactive peer %s (%p)"
1057                 " we'll never recover the resources\n",
1058                 libcfs_nid2str(peer->gnp_nid), peer);
1059
1060         CDEBUG(D_NET, "conn %p peer %p dev %p\n", conn, peer,
1061                 conn->gnc_device);
1062
1063         LASSERTF(conn->gnc_in_purgatory == 0,
1064                 "Conn already in purgatory\n");
1065         conn->gnc_in_purgatory = 1;
1066
1067         mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
1068         mbox->mbx_prev_purg_nid = peer->gnp_nid;
1069         mbox->mbx_add_purgatory = jiffies;
1070         kgnilnd_release_mbox(conn, 1);
1071
1072         LASSERTF(list_empty(&conn->gnc_mdd_list),
1073                 "conn 0x%p->%s with active purgatory hold MDD %d\n",
1074                 conn, libcfs_nid2str(peer->gnp_nid),
1075                 kgnilnd_count_list(&conn->gnc_mdd_list));
1076
1077         EXIT;
1078 }
1079
1080 /* Instead of detaching everything from purgatory here we just mark the conn as needing
1081  * detach, when the reaper checks the conn the next time it will detach it.
1082  * Calling function requires write_lock held on kgn_peer_conn_lock
1083  */
1084 void
1085 kgnilnd_mark_for_detach_purgatory_all_locked(kgn_peer_t *peer) {
1086         kgn_conn_t       *conn;
1087
1088         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
1089                 if (conn->gnc_in_purgatory && !conn->gnc_needs_detach) {
1090                         conn->gnc_needs_detach = 1;
1091                         kgnilnd_admin_addref(kgnilnd_data.kgn_npending_detach);
1092                 }
1093         }
1094 }
1095
1096 /* Calling function needs a write_lock held on kgn_peer_conn_lock */
1097 void
1098 kgnilnd_detach_purgatory_locked(kgn_conn_t *conn, struct list_head *conn_list)
1099 {
1100         kgn_mbox_info_t *mbox = NULL;
1101
1102         /* if needed, add the conn purgatory data to the list passed in */
1103         if (conn->gnc_in_purgatory) {
1104                 CDEBUG(D_NET, "peer %p->%s purg_conn %p@%s mdd_list #tx %d\n",
1105                         conn->gnc_peer, libcfs_nid2str(conn->gnc_peer->gnp_nid),
1106                         conn, kgnilnd_conn_state2str(conn),
1107                         kgnilnd_count_list(&conn->gnc_mdd_list));
1108
1109                 mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
1110                 mbox->mbx_detach_of_purgatory = jiffies;
1111
1112                 /* conn->gnc_list is the entry point on peer->gnp_conns, so detaching it
1113                  * here removes it from the list of 'valid' peer connections.
1114                  * We put the current conn onto a list of conns to call kgnilnd_release_purgatory_locked()
1115                  * and as such the caller of kgnilnd_detach_purgatory_locked() now owns that conn, since its not
1116                  * on the peer's conn_list anymore.
1117                  */
1118
1119                 list_del_init(&conn->gnc_list);
1120
1121                 /* NB - only unlinking if we set pending in del_peer_locked from admin or
1122                  * shutdown */
1123                 if (kgnilnd_peer_active(conn->gnc_peer) &&
1124                     conn->gnc_peer->gnp_pending_unlink &&
1125                     kgnilnd_can_unlink_peer_locked(conn->gnc_peer)) {
1126                         kgnilnd_unlink_peer_locked(conn->gnc_peer);
1127                 }
1128                 /* The reaper will not call detach unless the conn is fully through kgnilnd_complete_closed_conn.
1129                  * If the conn is not in a DONE state somehow we are attempting to detach even though
1130                  * the conn has not been fully cleaned up. If we detach while the conn is still closing
1131                  * we will end up with an orphaned connection that has valid ep_handle, that is not on a
1132                  * peer.
1133                  */
1134
1135                 LASSERTF(conn->gnc_state == GNILND_CONN_DONE, "Conn in invalid state  %p@%s \n",
1136                                 conn, kgnilnd_conn_state2str(conn));
1137
1138                 /* move from peer to the delayed release list */
1139                 list_add_tail(&conn->gnc_list, conn_list);
1140         }
1141 }
1142
1143 void
1144 kgnilnd_release_purgatory_list(struct list_head *conn_list)
1145 {
1146         kgn_device_t            *dev;
1147         kgn_conn_t              *conn, *connN;
1148         kgn_mdd_purgatory_t     *gmp, *gmpN;
1149
1150         list_for_each_entry_safe(conn, connN, conn_list, gnc_list) {
1151                 dev = conn->gnc_device;
1152
1153                 kgnilnd_release_mbox(conn, -1);
1154                 conn->gnc_in_purgatory = 0;
1155
1156                 list_del_init(&conn->gnc_list);
1157
1158                 /* gnc_needs_detach is set in kgnilnd_del_conn_or_peer. It is used to keep track
1159                  * of conns that have been marked for detach by kgnilnd_del_conn_or_peer.
1160                  * The function uses kgn_npending_detach to verify the conn has
1161                  * actually been detached.
1162                  */
1163
1164                 if (conn->gnc_needs_detach)
1165                         kgnilnd_admin_decref(kgnilnd_data.kgn_npending_detach);
1166
1167                 /* if this guy is really dead (we are doing release from reaper),
1168                  * make sure we tell LNet - if this is from other context,
1169                  * the checks in the function will prevent an errant
1170                  * notification */
1171                 kgnilnd_peer_notify(conn->gnc_peer, conn->gnc_error, 0);
1172
1173                 list_for_each_entry_safe(gmp, gmpN, &conn->gnc_mdd_list,
1174                                          gmp_list) {
1175                         CDEBUG(D_NET,
1176                                "dev %p releasing held mdd "LPX64"."LPX64"\n",
1177                                conn->gnc_device, gmp->gmp_map_key.qword1,
1178                                gmp->gmp_map_key.qword2);
1179
1180                         atomic_dec(&dev->gnd_n_mdd_held);
1181                         kgnilnd_mem_mdd_release(conn->gnc_device->gnd_handle,
1182                                                 &gmp->gmp_map_key);
1183                         /* ignoring the return code - if kgni/ghal can't find it
1184                          * it must be released already */
1185
1186                         list_del_init(&gmp->gmp_list);
1187                         LIBCFS_FREE(gmp, sizeof(*gmp));
1188                 }
1189                 /* lose conn ref for purgatory */
1190                 kgnilnd_conn_decref(conn);
1191         }
1192 }
1193
1194 /* needs write_lock on kgnilnd_data.kgn_peer_conn_lock held */
1195 void
1196 kgnilnd_peer_increase_reconnect_locked(kgn_peer_t *peer)
1197 {
1198         int current_to;
1199
1200         current_to = peer->gnp_reconnect_interval;
1201
1202         /* we'll try to reconnect fast the first time, then back-off */
1203         if (current_to == 0) {
1204                 peer->gnp_reconnect_time = jiffies - 1;
1205                 current_to = *kgnilnd_tunables.kgn_min_reconnect_interval;
1206         } else {
1207                 peer->gnp_reconnect_time = jiffies + cfs_time_seconds(current_to);
1208                 /* add 50% of min timeout & retry */
1209                 current_to += *kgnilnd_tunables.kgn_min_reconnect_interval / 2;
1210         }
1211
1212         current_to = MIN(current_to,
1213                                 *kgnilnd_tunables.kgn_max_reconnect_interval);
1214
1215         peer->gnp_reconnect_interval = current_to;
1216         CDEBUG(D_NET, "peer %s can reconnect at %lu interval %lu\n",
1217                libcfs_nid2str(peer->gnp_nid), peer->gnp_reconnect_time,
1218                peer->gnp_reconnect_interval);
1219 }
1220
1221 /* needs kgnilnd_data.kgn_peer_conn_lock held */
1222 kgn_peer_t *
1223 kgnilnd_find_peer_locked(lnet_nid_t nid)
1224 {
1225         struct list_head *peer_list = kgnilnd_nid2peerlist(nid);
1226         kgn_peer_t       *peer;
1227
1228         /* Chopping nid down to only NIDADDR using LNET_NIDADDR so we only
1229          * have a single peer per device instead of a peer per nid/net combo.
1230          */
1231
1232         list_for_each_entry(peer, peer_list, gnp_list) {
1233                 if (LNET_NIDADDR(nid) != LNET_NIDADDR(peer->gnp_nid))
1234                         continue;
1235
1236                 CDEBUG(D_NET, "got peer [%p] -> %s c %d (%d)\n",
1237                        peer, libcfs_nid2str(nid),
1238                        peer->gnp_connecting,
1239                        atomic_read(&peer->gnp_refcount));
1240                 return peer;
1241         }
1242         return NULL;
1243 }
1244
1245 /* need write_lock on kgn_peer_conn_lock */
1246 void
1247 kgnilnd_unlink_peer_locked(kgn_peer_t *peer)
1248 {
1249         LASSERTF(list_empty(&peer->gnp_conns),
1250                 "peer 0x%p->%s\n",
1251                  peer, libcfs_nid2str(peer->gnp_nid));
1252         LASSERTF(list_empty(&peer->gnp_tx_queue),
1253                 "peer 0x%p->%s\n",
1254                  peer, libcfs_nid2str(peer->gnp_nid));
1255         LASSERTF(kgnilnd_peer_active(peer),
1256                 "peer 0x%p->%s\n",
1257                  peer, libcfs_nid2str(peer->gnp_nid));
1258         CDEBUG(D_NET, "unlinking peer 0x%p->%s\n",
1259                 peer, libcfs_nid2str(peer->gnp_nid));
1260
1261         list_del_init(&peer->gnp_list);
1262         kgnilnd_data.kgn_peer_version++;
1263         kgnilnd_admin_decref(kgnilnd_data.kgn_npending_unlink);
1264         /* lose peerlist's ref */
1265         kgnilnd_peer_decref(peer);
1266 }
1267
1268 int
1269 kgnilnd_get_peer_info(int index,
1270                       kgn_peer_t **found_peer,
1271                       lnet_nid_t *id, __u32 *nic_addr,
1272                       int *refcount, int *connecting)
1273 {
1274         struct list_head  *ptmp;
1275         kgn_peer_t        *peer;
1276         int               i;
1277         int               rc = -ENOENT;
1278
1279         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1280
1281         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
1282
1283                 list_for_each(ptmp, &kgnilnd_data.kgn_peers[i]) {
1284                         peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1285
1286                         if (index-- > 0)
1287                                 continue;
1288
1289                         CDEBUG(D_NET, "found peer %p (%s) at index %d\n",
1290                                peer, libcfs_nid2str(peer->gnp_nid), index);
1291
1292                         *found_peer  = peer;
1293                         *id          = peer->gnp_nid;
1294                         *nic_addr    = peer->gnp_host_id;
1295                         *refcount    = atomic_read(&peer->gnp_refcount);
1296                         *connecting  = peer->gnp_connecting;
1297
1298                         rc = 0;
1299                         goto out;
1300                 }
1301         }
1302 out:
1303         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1304         if (rc)
1305                 CDEBUG(D_NET, "no gni peer at index %d\n", index);
1306         return rc;
1307 }
1308
1309 /* requires write_lock on kgn_peer_conn_lock held */
1310 void
1311 kgnilnd_add_peer_locked(lnet_nid_t nid, kgn_peer_t *new_stub_peer, kgn_peer_t **peerp)
1312 {
1313         kgn_peer_t        *peer, *peer2;
1314
1315         LASSERTF(new_stub_peer != NULL, "bad stub peer for nid %s\n",
1316                  libcfs_nid2str(nid));
1317
1318         peer2 = kgnilnd_find_peer_locked(nid);
1319         if (peer2 != NULL) {
1320                 /* A peer was created during the lock transition, so drop
1321                  * the new one we created */
1322                 kgnilnd_peer_decref(new_stub_peer);
1323                 peer = peer2;
1324         } else {
1325                 peer = new_stub_peer;
1326                 /* peer table takes existing ref on peer */
1327
1328                 LASSERTF(!kgnilnd_peer_active(peer),
1329                         "peer 0x%p->%s already in peer table\n",
1330                         peer, libcfs_nid2str(peer->gnp_nid));
1331                 list_add_tail(&peer->gnp_list,
1332                               kgnilnd_nid2peerlist(nid));
1333                 kgnilnd_data.kgn_peer_version++;
1334         }
1335
1336         LASSERTF(peer->gnp_net != NULL, "peer 0x%p->%s with NULL net\n",
1337                  peer, libcfs_nid2str(peer->gnp_nid));
1338         *peerp = peer;
1339 }
1340
1341 int
1342 kgnilnd_add_peer(kgn_net_t *net, lnet_nid_t nid, kgn_peer_t **peerp)
1343 {
1344         kgn_peer_t        *peer;
1345         int                rc;
1346         int                node_state;
1347         ENTRY;
1348
1349         if (nid == LNET_NID_ANY)
1350                 return -EINVAL;
1351
1352         node_state = kgnilnd_get_node_state(LNET_NIDADDR(nid));
1353
1354         /* NB - this will not block during normal operations -
1355          * the only writer of this is in the startup/shutdown path. */
1356         rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
1357         if (!rc) {
1358                 rc = -ESHUTDOWN;
1359                 RETURN(rc);
1360         }
1361         rc = kgnilnd_create_peer_safe(&peer, nid, net, node_state);
1362         if (rc != 0) {
1363                 up_read(&kgnilnd_data.kgn_net_rw_sem);
1364                 RETURN(rc);
1365         }
1366
1367         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1368         up_read(&kgnilnd_data.kgn_net_rw_sem);
1369
1370         kgnilnd_add_peer_locked(nid, peer, peerp);
1371
1372         CDEBUG(D_NET, "peer 0x%p->%s connecting %d\n",
1373                peerp, libcfs_nid2str((*peerp)->gnp_nid),
1374                (*peerp)->gnp_connecting);
1375
1376         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1377         RETURN(0);
1378 }
1379
1380 /* needs write_lock on kgn_peer_conn_lock */
1381 void
1382 kgnilnd_cancel_peer_connect_locked(kgn_peer_t *peer, struct list_head *zombies)
1383 {
1384         kgn_tx_t        *tx, *txn;
1385
1386         /* we do care about state of gnp_connecting - we could be between
1387          * reconnect attempts, so try to find the dgram and cancel the TX
1388          * anyways. If we are in the process of posting DONT do anything;
1389          * once it fails or succeeds we can nuke the connect attempt.
1390          * We have no idea where in kgnilnd_post_dgram we are so we cant
1391          * attempt to cancel until the function is done.
1392          */
1393
1394         /* make sure peer isn't in process of connecting or waiting for connect*/
1395         spin_lock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1396         if (!(list_empty(&peer->gnp_connd_list))) {
1397                 list_del_init(&peer->gnp_connd_list);
1398                 /* remove connd ref */
1399                 kgnilnd_peer_decref(peer);
1400         }
1401         spin_unlock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1402
1403         if (peer->gnp_connecting == GNILND_PEER_POSTING || peer->gnp_connecting == GNILND_PEER_NEEDS_DEATH) {
1404                 peer->gnp_connecting = GNILND_PEER_NEEDS_DEATH;
1405                 /* We are in process of posting right now the xchg set it up for us to
1406                  * cancel the connect so we are finished for now */
1407         } else {
1408                 /* no need for exchange we have the peer lock and its ready for us to nuke */
1409                 LASSERTF(peer->gnp_connecting != GNILND_PEER_POSTING,
1410                         "Peer in invalid state 0x%p->%s, connecting %d\n",
1411                         peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
1412                 peer->gnp_connecting = GNILND_PEER_IDLE;
1413                 set_mb(peer->gnp_last_dgram_errno, -ETIMEDOUT);
1414                 kgnilnd_find_and_cancel_dgram(peer->gnp_net->gnn_dev,
1415                                                       peer->gnp_nid);
1416         }
1417
1418         /* The least we can do is nuke the tx's no matter what.... */
1419         list_for_each_entry_safe(tx, txn, &peer->gnp_tx_queue, tx_list) {
1420                 kgnilnd_tx_del_state_locked(tx, peer, NULL,
1421                                            GNILND_TX_ALLOCD);
1422                 list_add_tail(&tx->tx_list, zombies);
1423         }
1424 }
1425
1426 /* needs write_lock on kgn_peer_conn_lock */
1427 void
1428 kgnilnd_del_peer_locked(kgn_peer_t *peer, int error)
1429 {
1430         /* this peer could be passive and only held for purgatory,
1431          * take a ref to ensure it doesn't disappear in this function */
1432         kgnilnd_peer_addref(peer);
1433
1434         CFS_RACE(CFS_FAIL_GNI_FIND_TARGET);
1435
1436         /* if purgatory release cleared it out, don't try again */
1437         if (kgnilnd_peer_active(peer)) {
1438                 /* always do this to allow kgnilnd_start_connect and
1439                  * kgnilnd_finish_connect to catch this before they
1440                  * wrap up their operations */
1441                 if (kgnilnd_can_unlink_peer_locked(peer)) {
1442                         /* already released purgatory, so only active
1443                          * conns hold it */
1444                         kgnilnd_unlink_peer_locked(peer);
1445                 } else {
1446                         kgnilnd_close_peer_conns_locked(peer, error);
1447                         /* peer unlinks itself when last conn is closed */
1448                 }
1449         }
1450
1451         /* we are done, release back to the wild */
1452         kgnilnd_peer_decref(peer);
1453 }
1454
1455 int
1456 kgnilnd_del_conn_or_peer(kgn_net_t *net, lnet_nid_t nid, int command,
1457                           int error)
1458 {
1459         LIST_HEAD               (souls);
1460         LIST_HEAD               (zombies);
1461         struct list_head        *ptmp, *pnxt;
1462         kgn_peer_t              *peer;
1463         int                     lo;
1464         int                     hi;
1465         int                     i;
1466         int                     rc = -ENOENT;
1467
1468         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1469
1470         if (nid != LNET_NID_ANY)
1471                 lo = hi = kgnilnd_nid2peerlist(nid) - kgnilnd_data.kgn_peers;
1472         else {
1473                 lo = 0;
1474                 hi = *kgnilnd_tunables.kgn_peer_hash_size - 1;
1475                 /* wildcards always succeed */
1476                 rc = 0;
1477         }
1478
1479         for (i = lo; i <= hi; i++) {
1480                 list_for_each_safe(ptmp, pnxt, &kgnilnd_data.kgn_peers[i]) {
1481                         peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1482
1483                         LASSERTF(peer->gnp_net != NULL,
1484                                 "peer %p (%s) with NULL net\n",
1485                                  peer, libcfs_nid2str(peer->gnp_nid));
1486
1487                         if (net != NULL && peer->gnp_net != net)
1488                                 continue;
1489
1490                         if (!(nid == LNET_NID_ANY || LNET_NIDADDR(peer->gnp_nid) == LNET_NIDADDR(nid)))
1491                                 continue;
1492
1493                         /* In both cases, we want to stop any in-flight
1494                          * connect attempts */
1495                         kgnilnd_cancel_peer_connect_locked(peer, &zombies);
1496
1497                         switch (command) {
1498                         case GNILND_DEL_CONN:
1499                                 kgnilnd_close_peer_conns_locked(peer, error);
1500                                 break;
1501                         case GNILND_DEL_PEER:
1502                                 peer->gnp_pending_unlink = 1;
1503                                 kgnilnd_admin_addref(kgnilnd_data.kgn_npending_unlink);
1504                                 kgnilnd_mark_for_detach_purgatory_all_locked(peer);
1505                                 kgnilnd_del_peer_locked(peer, error);
1506                                 break;
1507                         case GNILND_CLEAR_PURGATORY:
1508                                 /* Mark everything ready for detach reaper will cleanup
1509                                  * once we release the kgn_peer_conn_lock
1510                                  */
1511                                 kgnilnd_mark_for_detach_purgatory_all_locked(peer);
1512                                 peer->gnp_last_errno = -EISCONN;
1513                                 /* clear reconnect so he can reconnect soon */
1514                                 peer->gnp_reconnect_time = 0;
1515                                 peer->gnp_reconnect_interval = 0;
1516                                 break;
1517                         default:
1518                                 CERROR("bad command %d\n", command);
1519                                 LBUG();
1520                         }
1521                         /* we matched something */
1522                         rc = 0;
1523                 }
1524         }
1525
1526         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1527
1528         /* nuke peer TX */
1529         kgnilnd_txlist_done(&zombies, error);
1530
1531         /* This function does not return until the commands it initiated have completed,
1532          * since they have to work there way through the other threads. In the case of shutdown
1533          * threads are not woken up until after this call is initiated so we cannot wait, we just
1534          * need to return. The same applies for stack reset we shouldnt wait as the reset thread
1535          * handles closing.
1536          */
1537
1538         CFS_RACE(CFS_FAIL_GNI_RACE_RESET);
1539
1540         if (error == -ENOTRECOVERABLE || error == -ESHUTDOWN) {
1541                 return rc;
1542         }
1543
1544         i = 4;
1545         while (atomic_read(&kgnilnd_data.kgn_npending_conns)   ||
1546                atomic_read(&kgnilnd_data.kgn_npending_detach)  ||
1547                atomic_read(&kgnilnd_data.kgn_npending_unlink)) {
1548
1549                 set_current_state(TASK_UNINTERRUPTIBLE);
1550                 schedule_timeout(cfs_time_seconds(1));
1551                 i++;
1552
1553                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, "Waiting on %d peers %d closes %d detaches\n",
1554                                 atomic_read(&kgnilnd_data.kgn_npending_unlink),
1555                                 atomic_read(&kgnilnd_data.kgn_npending_conns),
1556                                 atomic_read(&kgnilnd_data.kgn_npending_detach));
1557         }
1558
1559         return rc;
1560 }
1561
1562 kgn_conn_t *
1563 kgnilnd_get_conn_by_idx(int index)
1564 {
1565         kgn_peer_t        *peer;
1566         struct list_head  *ptmp;
1567         kgn_conn_t        *conn;
1568         struct list_head  *ctmp;
1569         int                i;
1570
1571
1572         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
1573                 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1574                 list_for_each(ptmp, &kgnilnd_data.kgn_peers[i]) {
1575
1576                         peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1577
1578                         list_for_each(ctmp, &peer->gnp_conns) {
1579                                 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
1580
1581                                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
1582                                         continue;
1583
1584                                 if (index-- > 0)
1585                                         continue;
1586
1587                                 CDEBUG(D_NET, "++conn[%p] -> %s (%d)\n", conn,
1588                                        libcfs_nid2str(conn->gnc_peer->gnp_nid),
1589                                        atomic_read(&conn->gnc_refcount));
1590                                 kgnilnd_conn_addref(conn);
1591                                 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1592                                 return conn;
1593                         }
1594                 }
1595                 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1596         }
1597
1598         return NULL;
1599 }
1600
1601 int
1602 kgnilnd_get_conn_info(kgn_peer_t *peer,
1603                       int *device_id, __u64 *peerstamp,
1604                       int *tx_seq, int *rx_seq,
1605                       int *fmaq_len, int *nfma, int *nrdma)
1606 {
1607         kgn_conn_t        *conn;
1608         int               rc = 0;
1609
1610         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1611
1612         conn = kgnilnd_find_conn_locked(peer);
1613         if (conn == NULL) {
1614                 rc = -ENOENT;
1615                 goto out;
1616         }
1617
1618         *device_id = conn->gnc_device->gnd_host_id;
1619         *peerstamp = conn->gnc_peerstamp;
1620         *tx_seq = atomic_read(&conn->gnc_tx_seq);
1621         *rx_seq = atomic_read(&conn->gnc_rx_seq);
1622         *fmaq_len = kgnilnd_count_list(&conn->gnc_fmaq);
1623         *nfma = atomic_read(&conn->gnc_nlive_fma);
1624         *nrdma = atomic_read(&conn->gnc_nlive_rdma);
1625 out:
1626         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1627         return rc;
1628 }
1629
1630 /* needs write_lock on kgn_peer_conn_lock */
1631 int
1632 kgnilnd_close_peer_conns_locked(kgn_peer_t *peer, int why)
1633 {
1634         kgn_conn_t         *conn;
1635         struct list_head   *ctmp, *cnxt;
1636         int                 count = 0;
1637
1638         list_for_each_safe(ctmp, cnxt, &peer->gnp_conns) {
1639                 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
1640
1641                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
1642                         continue;
1643
1644                 count++;
1645                 /* we mark gnc_needs closing and increment kgn_npending_conns so that
1646                  * kgnilnd_del_conn_or_peer can wait on the other threads closing
1647                  * and cleaning up the connection.
1648                  */
1649                 if (!conn->gnc_needs_closing) {
1650                         conn->gnc_needs_closing = 1;
1651                         kgnilnd_admin_addref(kgnilnd_data.kgn_npending_conns);
1652                 }
1653                 kgnilnd_close_conn_locked(conn, why);
1654         }
1655         return count;
1656 }
1657
1658 int
1659 kgnilnd_report_node_state(lnet_nid_t nid, int down)
1660 {
1661         int         rc;
1662         kgn_peer_t  *peer, *new_peer;
1663         LIST_HEAD(zombies);
1664
1665         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1666         peer = kgnilnd_find_peer_locked(nid);
1667
1668         if (peer == NULL) {
1669                 int       i;
1670                 int       found_net = 0;
1671                 kgn_net_t *net;
1672
1673                 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1674
1675                 /* Don't add a peer for node up events */
1676                 if (down == GNILND_RCA_NODE_UP) {
1677                         return 0;
1678                 }
1679
1680                 /* find any valid net - we don't care which one... */
1681                 down_read(&kgnilnd_data.kgn_net_rw_sem);
1682                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
1683                         list_for_each_entry(net, &kgnilnd_data.kgn_nets[i],
1684                                             gnn_list) {
1685                                 found_net = 1;
1686                                 break;
1687                         }
1688
1689                         if (found_net) {
1690                                 break;
1691                         }
1692                 }
1693                 up_read(&kgnilnd_data.kgn_net_rw_sem);
1694
1695                 if (!found_net) {
1696                         CNETERR("Could not find a net for nid %lld\n", nid);
1697                         return 1;
1698                 }
1699
1700                 /* The nid passed in does not yet contain the net portion.
1701                  * Let's build it up now
1702                  */
1703                 nid = LNET_MKNID(LNET_NIDNET(net->gnn_ni->ni_nid), nid);
1704                 rc = kgnilnd_add_peer(net, nid, &new_peer);
1705
1706                 if (rc) {
1707                         CNETERR("Could not add peer for nid %lld, rc %d\n",
1708                                 nid, rc);
1709                         return 1;
1710                 }
1711
1712                 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1713                 peer = kgnilnd_find_peer_locked(nid);
1714
1715                 if (peer == NULL) {
1716                         CNETERR("Could not find peer for nid %lld\n", nid);
1717                         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1718                         return 1;
1719                 }
1720         }
1721
1722         peer->gnp_down = down;
1723
1724         if (down == GNILND_RCA_NODE_DOWN) {
1725                 kgn_conn_t *conn;
1726
1727                 peer->gnp_down_event_time = jiffies;
1728                 kgnilnd_cancel_peer_connect_locked(peer, &zombies);
1729                 conn = kgnilnd_find_conn_locked(peer);
1730
1731                 if (conn != NULL) {
1732                         kgnilnd_close_conn_locked(conn, -ENETRESET);
1733                 }
1734         } else {
1735                 peer->gnp_up_event_time = jiffies;
1736         }
1737
1738         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1739
1740         if (down == GNILND_RCA_NODE_DOWN) {
1741                 /* using ENETRESET so we don't get messages from
1742                  * kgnilnd_tx_done
1743                  */
1744                 kgnilnd_txlist_done(&zombies, -ENETRESET);
1745                 kgnilnd_peer_notify(peer, -ECONNRESET, 0);
1746                 LCONSOLE_INFO("Recieved down event for nid %lld\n", nid);
1747         }
1748
1749         return 0;
1750 }
1751
1752 int
1753 kgnilnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
1754 {
1755         struct libcfs_ioctl_data *data = arg;
1756         kgn_net_t                *net = ni->ni_data;
1757         int                       rc = -EINVAL;
1758
1759         LASSERT(ni == net->gnn_ni);
1760
1761         switch (cmd) {
1762         case IOC_LIBCFS_GET_PEER: {
1763                 lnet_nid_t   nid = 0;
1764                 kgn_peer_t  *peer = NULL;
1765                 __u32 nic_addr = 0;
1766                 __u64 peerstamp = 0;
1767                 int peer_refcount = 0, peer_connecting = 0;
1768                 int device_id = 0;
1769                 int tx_seq = 0, rx_seq = 0;
1770                 int fmaq_len = 0, nfma = 0, nrdma = 0;
1771
1772                 rc = kgnilnd_get_peer_info(data->ioc_count, &peer,
1773                                            &nid, &nic_addr, &peer_refcount,
1774                                            &peer_connecting);
1775                 if (rc)
1776                         break;
1777
1778                 /* Barf */
1779                 /* LNET_MKNID is used to mask from lnet the multiplexing/demultiplexing of connections and peers
1780                  * LNET assumes a conn and peer per net, the LNET_MKNID/LNET_NIDADDR allows us to let Lnet see what it
1781                  * wants to see instead of the underlying network that is being used to send the data
1782                  */
1783                 data->ioc_nid    = LNET_MKNID(LNET_NIDNET(ni->ni_nid), LNET_NIDADDR(nid));
1784                 data->ioc_flags  = peer_connecting;
1785                 data->ioc_count  = peer_refcount;
1786
1787                 rc = kgnilnd_get_conn_info(peer, &device_id, &peerstamp,
1788                                            &tx_seq, &rx_seq, &fmaq_len,
1789                                            &nfma, &nrdma);
1790
1791                 /* This is allowable - a persistent peer could not
1792                  * have a connection */
1793                 if (rc) {
1794                         /* flag to indicate we are not connected -
1795                          * need to print as such */
1796                         data->ioc_flags |= (1<<16);
1797                         rc = 0;
1798                 } else {
1799                         /* still barf */
1800                         data->ioc_net = device_id;
1801                         data->ioc_u64[0] = peerstamp;
1802                         data->ioc_u32[0] = fmaq_len;
1803                         data->ioc_u32[1] = nfma;
1804                         data->ioc_u32[2] = tx_seq;
1805                         data->ioc_u32[3] = rx_seq;
1806                         data->ioc_u32[4] = nrdma;
1807                 }
1808                 break;
1809         }
1810         case IOC_LIBCFS_ADD_PEER: {
1811                 /* just dummy value to allow using common interface */
1812                 kgn_peer_t      *peer;
1813                 rc = kgnilnd_add_peer(net, data->ioc_nid, &peer);
1814                 break;
1815         }
1816         case IOC_LIBCFS_DEL_PEER: {
1817                 /* NULL is passed in so it affects all peers in existence without regard to network
1818                  * as the peer may not exist on the network LNET believes it to be on.
1819                  */
1820                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1821                                               GNILND_DEL_PEER, -EUCLEAN);
1822                 break;
1823         }
1824         case IOC_LIBCFS_GET_CONN: {
1825                 kgn_conn_t *conn = kgnilnd_get_conn_by_idx(data->ioc_count);
1826
1827                 if (conn == NULL)
1828                         rc = -ENOENT;
1829                 else {
1830                         rc = 0;
1831                         /* LNET_MKNID is used to build the correct address based on what LNET wants to see instead of
1832                          * the generic connection that is used to send the data
1833                          */
1834                         data->ioc_nid    = LNET_MKNID(LNET_NIDNET(ni->ni_nid), LNET_NIDADDR(conn->gnc_peer->gnp_nid));
1835                         data->ioc_u32[0] = conn->gnc_device->gnd_id;
1836                         kgnilnd_conn_decref(conn);
1837                 }
1838                 break;
1839         }
1840         case IOC_LIBCFS_CLOSE_CONNECTION: {
1841                 /* use error = -ENETRESET to indicate it was lctl disconnect */
1842                 /* NULL is passed in so it affects all the nets as the connection is virtual
1843                  * and may not exist on the network LNET believes it to be on.
1844                  */
1845                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1846                                               GNILND_DEL_CONN, -ENETRESET);
1847                 break;
1848         }
1849         case IOC_LIBCFS_PUSH_CONNECTION: {
1850                 /* we use this to flush purgatory */
1851                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1852                                               GNILND_CLEAR_PURGATORY, -EUCLEAN);
1853                 break;
1854         }
1855         case IOC_LIBCFS_REGISTER_MYNID: {
1856                 /* Ignore if this is a noop */
1857                 if (data->ioc_nid == ni->ni_nid) {
1858                         rc = 0;
1859                 } else {
1860                         CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
1861                                libcfs_nid2str(data->ioc_nid),
1862                                libcfs_nid2str(ni->ni_nid));
1863                         rc = -EINVAL;
1864                 }
1865                 break;
1866         }
1867         }
1868
1869         return rc;
1870 }
1871
1872 void
1873 kgnilnd_query(lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
1874 {
1875         kgn_net_t               *net = ni->ni_data;
1876         kgn_tx_t                *tx;
1877         kgn_peer_t              *peer = NULL;
1878         kgn_conn_t              *conn = NULL;
1879         lnet_process_id_t       id = {
1880                 .nid = nid,
1881                 .pid = LNET_PID_LUSTRE,
1882         };
1883         ENTRY;
1884
1885         /* I expect to find him, so only take a read lock */
1886         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1887         peer = kgnilnd_find_peer_locked(nid);
1888         if (peer != NULL) {
1889                 /* LIE if in a quiesce - we will update the timeouts after,
1890                  * but we don't want sends failing during it */
1891                 if (kgnilnd_data.kgn_quiesce_trigger) {
1892                         *when = jiffies;
1893                         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1894                         GOTO(out, 0);
1895                 }
1896
1897                 /* Update to best guess, might refine on later checks */
1898                 *when = peer->gnp_last_alive;
1899
1900                 /* we have a peer, how about a conn? */
1901                 conn = kgnilnd_find_conn_locked(peer);
1902
1903                 if (conn == NULL)  {
1904                         /* if there is no conn, check peer last errno to see if clean disconnect
1905                          * - if it was, we lie to LNet because we believe a TX would complete
1906                          * on reconnect */
1907                         if (kgnilnd_conn_clean_errno(peer->gnp_last_errno)) {
1908                                 *when = jiffies;
1909                         }
1910                         /* we still want to fire a TX and new conn in this case */
1911                 } else {
1912                         /* gnp_last_alive is valid, run for the hills */
1913                         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1914                         GOTO(out, 0);
1915                 }
1916         }
1917         /* if we get here, either we have no peer or no conn for him, so fire off
1918          * new TX to trigger conn setup */
1919         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1920
1921         /* if we couldn't find him, we'll fire up a TX and get connected -
1922          * if we don't do this, after ni_peer_timeout, LNet will declare him dead.
1923          * So really we treat kgnilnd_query as a bit of a 'connect now' type
1924          * event because it'll only do this when it wants to send
1925          *
1926          * Use a real TX for this to get the proper gnp_tx_queue behavior, etc
1927          * normally we'd use kgnilnd_send_ctlmsg for this, but we don't really
1928          * care that this goes out quickly since we already know we need a new conn
1929          * formed */
1930         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_NOOP_SEND))
1931                 return;
1932
1933         tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP, ni->ni_nid);
1934         if (tx != NULL) {
1935                 kgnilnd_launch_tx(tx, net, &id);
1936         }
1937 out:
1938         CDEBUG(D_NETTRACE, "peer 0x%p->%s when %lu\n", peer,
1939                libcfs_nid2str(nid), *when);
1940         EXIT;
1941 }
1942
1943 int
1944 kgnilnd_dev_init(kgn_device_t *dev)
1945 {
1946         gni_return_t      rrc;
1947         int               rc = 0;
1948         unsigned int      cq_size;
1949         ENTRY;
1950
1951         /* size of these CQs should be able to accommodate the outgoing
1952          * RDMA and SMSG transactions.  Since we really don't know what we
1953          * really need here, we'll take credits * 2 * 3 to allow a bunch.
1954          * We need to dig into this more with the performance work. */
1955         cq_size = *kgnilnd_tunables.kgn_credits * 2 * 3;
1956
1957         rrc = kgnilnd_cdm_create(dev->gnd_id, *kgnilnd_tunables.kgn_ptag,
1958                                  GNILND_COOKIE, 0,
1959                                  &dev->gnd_domain);
1960         if (rrc != GNI_RC_SUCCESS) {
1961                 CERROR("Can't create CDM %d (%d)\n", dev->gnd_id, rrc);
1962                 GOTO(failed, rc = -ENODEV);
1963         }
1964
1965         rrc = kgnilnd_cdm_attach(dev->gnd_domain, dev->gnd_id,
1966                                  &dev->gnd_host_id, &dev->gnd_handle);
1967         if (rrc != GNI_RC_SUCCESS) {
1968                 CERROR("Can't attach CDM to device %d (%d)\n",
1969                         dev->gnd_id, rrc);
1970                 GOTO(failed, rc = -ENODEV);
1971         }
1972
1973         /* a bit gross, but not much we can do - Aries Sim doesn't have
1974          * hardcoded NIC/NID that we can use */
1975         rc = kgnilnd_setup_nic_translation(dev->gnd_host_id);
1976         if (rc != 0)
1977                 GOTO(failed, rc = -ENODEV);
1978
1979         /* only dev 0 gets the errors - no need to reset the stack twice
1980          * - this works because we have a single PTAG, if we had more
1981          * then we'd need to have multiple handlers */
1982         if (dev->gnd_id == 0) {
1983                 rrc = kgnilnd_subscribe_errors(dev->gnd_handle,
1984                                                 GNI_ERRMASK_CRITICAL |
1985                                                 GNI_ERRMASK_UNKNOWN_TRANSACTION,
1986                                               0, NULL, kgnilnd_critical_error,
1987                                               &dev->gnd_err_handle);
1988                 if (rrc != GNI_RC_SUCCESS) {
1989                         CERROR("Can't subscribe for errors on device %d: rc %d\n",
1990                                 dev->gnd_id, rrc);
1991                         GOTO(failed, rc = -ENODEV);
1992                 }
1993
1994                 rc = kgnilnd_set_quiesce_callback(dev->gnd_handle,
1995                                                   kgnilnd_quiesce_end_callback);
1996                 if (rc != GNI_RC_SUCCESS) {
1997                         CERROR("Can't subscribe for quiesce callback on device %d: rc %d\n",
1998                                 dev->gnd_id, rrc);
1999                         GOTO(failed, rc = -ENODEV);
2000                 }
2001         }
2002
2003         rrc = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_IP, &kgnilnd_data.kgn_sock);
2004         if (rrc < 0) {
2005                 CERROR("sock_create returned %d\n", rrc);
2006                 GOTO(failed, rrc);
2007         }
2008
2009         rc = kgnilnd_nicaddr_to_nid(dev->gnd_host_id, &dev->gnd_nid);
2010         if (rc < 0) {
2011                 /* log messages during startup */
2012                 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
2013                         CERROR("couldn't translate host_id 0x%x to nid. rc %d\n",
2014                                 dev->gnd_host_id, rc);
2015                 }
2016                 GOTO(failed, rc = -ESRCH);
2017         }
2018         CDEBUG(D_NET, "NIC %x -> NID %d\n", dev->gnd_host_id, dev->gnd_nid);
2019
2020         rrc = kgnilnd_cq_create(dev->gnd_handle, *kgnilnd_tunables.kgn_credits,
2021                                 0, kgnilnd_device_callback,
2022                                 dev->gnd_id, &dev->gnd_snd_rdma_cqh);
2023         if (rrc != GNI_RC_SUCCESS) {
2024                 CERROR("Can't create rdma send cq size %u for device "
2025                        "%d (%d)\n", cq_size, dev->gnd_id, rrc);
2026                 GOTO(failed, rc = -EINVAL);
2027         }
2028
2029         rrc = kgnilnd_cq_create(dev->gnd_handle, cq_size,
2030                         0, kgnilnd_device_callback, dev->gnd_id,
2031                         &dev->gnd_snd_fma_cqh);
2032         if (rrc != GNI_RC_SUCCESS) {
2033                 CERROR("Can't create fma send cq size %u for device %d (%d)\n",
2034                        cq_size, dev->gnd_id, rrc);
2035                 GOTO(failed, rc = -EINVAL);
2036         }
2037
2038         /* This one we size differently - overflows are possible and it needs to be
2039          * sized based on machine size */
2040         rrc = kgnilnd_cq_create(dev->gnd_handle,
2041                         *kgnilnd_tunables.kgn_fma_cq_size,
2042                         0, kgnilnd_device_callback, dev->gnd_id,
2043                         &dev->gnd_rcv_fma_cqh);
2044         if (rrc != GNI_RC_SUCCESS) {
2045                 CERROR("Can't create fma cq size %d for device %d (%d)\n",
2046                        *kgnilnd_tunables.kgn_fma_cq_size, dev->gnd_id, rrc);
2047                 GOTO(failed, rc = -EINVAL);
2048         }
2049
2050         RETURN(0);
2051
2052 failed:
2053         kgnilnd_dev_fini(dev);
2054         RETURN(rc);
2055 }
2056
2057 void
2058 kgnilnd_dev_fini(kgn_device_t *dev)
2059 {
2060         gni_return_t rrc;
2061         ENTRY;
2062
2063         /* At quiesce or rest time, need to loop through and clear gnd_ready_conns ?*/
2064         LASSERTF(list_empty(&dev->gnd_ready_conns) &&
2065                  list_empty(&dev->gnd_map_tx) &&
2066                  list_empty(&dev->gnd_rdmaq),
2067                  "dev 0x%p ready_conns %d@0x%p map_tx %d@0x%p rdmaq %d@0x%p\n",
2068                  dev, kgnilnd_count_list(&dev->gnd_ready_conns), &dev->gnd_ready_conns,
2069                  kgnilnd_count_list(&dev->gnd_map_tx), &dev->gnd_map_tx,
2070                  kgnilnd_count_list(&dev->gnd_rdmaq), &dev->gnd_rdmaq);
2071
2072         /* These should follow from tearing down all connections */
2073         LASSERTF(dev->gnd_map_nphys == 0 && dev->gnd_map_physnop == 0,
2074                 "%d physical mappings of %d pages still mapped\n",
2075                  dev->gnd_map_nphys, dev->gnd_map_physnop);
2076
2077         LASSERTF(dev->gnd_map_nvirt == 0 && dev->gnd_map_virtnob == 0,
2078                 "%d virtual mappings of "LPU64" bytes still mapped\n",
2079                  dev->gnd_map_nvirt, dev->gnd_map_virtnob);
2080
2081         LASSERTF(atomic_read(&dev->gnd_n_mdd) == 0 &&
2082                  atomic_read(&dev->gnd_n_mdd_held) == 0 &&
2083                  atomic64_read(&dev->gnd_nbytes_map) == 0,
2084                 "%d SMSG mappings of %ld bytes still mapped or held %d\n",
2085                  atomic_read(&dev->gnd_n_mdd),
2086                  atomic64_read(&dev->gnd_nbytes_map), atomic_read(&dev->gnd_n_mdd_held));
2087
2088         LASSERT(list_empty(&dev->gnd_map_list));
2089
2090         /* What other assertions needed to ensure all connections torn down ? */
2091
2092         /* check all counters == 0 (EP, MDD, etc) */
2093
2094         /* if we are resetting due to quiese (stack reset), don't check
2095          * thread states */
2096         LASSERTF(kgnilnd_data.kgn_quiesce_trigger ||
2097                 atomic_read(&kgnilnd_data.kgn_nthreads) == 0,
2098                 "tried to shutdown with threads active\n");
2099
2100         if (dev->gnd_rcv_fma_cqh) {
2101                 rrc = kgnilnd_cq_destroy(dev->gnd_rcv_fma_cqh);
2102                 LASSERTF(rrc == GNI_RC_SUCCESS,
2103                         "bad rc from gni_cq_destroy on rcv_fma_cqh: %d\n", rrc);
2104                 dev->gnd_rcv_fma_cqh = NULL;
2105         }
2106
2107         if (dev->gnd_snd_rdma_cqh) {
2108                 rrc = kgnilnd_cq_destroy(dev->gnd_snd_rdma_cqh);
2109                 LASSERTF(rrc == GNI_RC_SUCCESS,
2110                         "bad rc from gni_cq_destroy on send_rdma_cqh: %d\n", rrc);
2111                 dev->gnd_snd_rdma_cqh = NULL;
2112         }
2113
2114         if (dev->gnd_snd_fma_cqh) {
2115                 rrc = kgnilnd_cq_destroy(dev->gnd_snd_fma_cqh);
2116                 LASSERTF(rrc == GNI_RC_SUCCESS,
2117                         "bad rc from gni_cq_destroy on snd_fma_cqh: %d\n", rrc);
2118                 dev->gnd_snd_fma_cqh = NULL;
2119         }
2120
2121         if (dev->gnd_err_handle) {
2122                 rrc = kgnilnd_release_errors(dev->gnd_err_handle);
2123                 LASSERTF(rrc == GNI_RC_SUCCESS,
2124                         "bad rc from gni_release_errors: %d\n", rrc);
2125                 dev->gnd_err_handle = NULL;
2126         }
2127
2128         if (dev->gnd_domain) {
2129                 rrc = kgnilnd_cdm_destroy(dev->gnd_domain);
2130                 LASSERTF(rrc == GNI_RC_SUCCESS,
2131                         "bad rc from gni_cdm_destroy: %d\n", rrc);
2132                 dev->gnd_domain = NULL;
2133         }
2134
2135         if (kgnilnd_data.kgn_sock)
2136                 sock_release(kgnilnd_data.kgn_sock);
2137
2138         EXIT;
2139 }
2140
2141 int kgnilnd_base_startup(void)
2142 {
2143         struct timeval       tv;
2144         int                  pkmem = atomic_read(&libcfs_kmemory);
2145         int                  rc;
2146         int                  i;
2147         kgn_device_t        *dev;
2148         struct task_struct  *thrd;
2149
2150 #if defined(CONFIG_CRAY_XT) && !defined(CONFIG_CRAY_COMPUTE)
2151         /* limit how much memory can be allocated for fma blocks in
2152          * instances where many nodes need to reconnects at the same time */
2153         struct sysinfo si;
2154         si_meminfo(&si);
2155         kgnilnd_data.free_pages_limit = si.totalram/4;
2156 #endif
2157
2158         ENTRY;
2159
2160         LASSERTF(kgnilnd_data.kgn_init == GNILND_INIT_NOTHING,
2161                 "init %d\n", kgnilnd_data.kgn_init);
2162
2163         /* zero pointers, flags etc */
2164         memset(&kgnilnd_data, 0, sizeof(kgnilnd_data));
2165         kgnilnd_check_kgni_version();
2166
2167         /* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and
2168          * a unique (for all time) connstamp so we can uniquely identify
2169          * the sender.  The connstamp is an incrementing counter
2170          * initialised with seconds + microseconds at startup time.  So we
2171          * rely on NOT creating connections more frequently on average than
2172          * 1MHz to ensure we don't use old connstamps when we reboot. */
2173         do_gettimeofday(&tv);
2174         kgnilnd_data.kgn_connstamp =
2175                  kgnilnd_data.kgn_peerstamp =
2176                         (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
2177
2178         init_rwsem(&kgnilnd_data.kgn_net_rw_sem);
2179
2180         for (i = 0; i < GNILND_MAXDEVS; i++) {
2181                 kgn_device_t  *dev = &kgnilnd_data.kgn_devices[i];
2182
2183                 dev->gnd_id = i;
2184                 INIT_LIST_HEAD(&dev->gnd_ready_conns);
2185                 INIT_LIST_HEAD(&dev->gnd_map_tx);
2186                 INIT_LIST_HEAD(&dev->gnd_fma_buffs);
2187                 mutex_init(&dev->gnd_cq_mutex);
2188                 mutex_init(&dev->gnd_fmablk_mutex);
2189                 spin_lock_init(&dev->gnd_fmablk_lock);
2190                 init_waitqueue_head(&dev->gnd_waitq);
2191                 init_waitqueue_head(&dev->gnd_dgram_waitq);
2192                 init_waitqueue_head(&dev->gnd_dgping_waitq);
2193                 spin_lock_init(&dev->gnd_lock);
2194                 INIT_LIST_HEAD(&dev->gnd_map_list);
2195                 spin_lock_init(&dev->gnd_map_lock);
2196                 atomic_set(&dev->gnd_nfmablk, 0);
2197                 atomic_set(&dev->gnd_fmablk_vers, 1);
2198                 atomic_set(&dev->gnd_neps, 0);
2199                 atomic_set(&dev->gnd_canceled_dgrams, 0);
2200                 INIT_LIST_HEAD(&dev->gnd_connd_peers);
2201                 spin_lock_init(&dev->gnd_connd_lock);
2202                 spin_lock_init(&dev->gnd_dgram_lock);
2203                 spin_lock_init(&dev->gnd_rdmaq_lock);
2204                 INIT_LIST_HEAD(&dev->gnd_rdmaq);
2205                 init_rwsem(&dev->gnd_conn_sem);
2206
2207                 /* alloc & setup nid based dgram table */
2208                 LIBCFS_ALLOC(dev->gnd_dgrams,
2209                             sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2210
2211                 if (dev->gnd_dgrams == NULL)
2212                         GOTO(failed, rc = -ENOMEM);
2213
2214                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2215                         INIT_LIST_HEAD(&dev->gnd_dgrams[i]);
2216                 }
2217                 atomic_set(&dev->gnd_ndgrams, 0);
2218                 atomic_set(&dev->gnd_nwcdgrams, 0);
2219                 /* setup timer for RDMAQ processing */
2220                 setup_timer(&dev->gnd_rdmaq_timer, kgnilnd_schedule_device_timer,
2221                             (unsigned long)dev);
2222
2223                 /* setup timer for mapping processing */
2224                 setup_timer(&dev->gnd_map_timer, kgnilnd_schedule_device_timer,
2225                             (unsigned long)dev);
2226
2227         }
2228
2229         /* CQID 0 isn't allowed, set to MAX_MSG_ID - 1 to check for conflicts early */
2230         kgnilnd_data.kgn_next_cqid = GNILND_MAX_MSG_ID - 1;
2231         kgnilnd_data.kgn_new_min_timeout = *kgnilnd_tunables.kgn_timeout;
2232         init_waitqueue_head(&kgnilnd_data.kgn_reaper_waitq);
2233         init_waitqueue_head(&kgnilnd_data.kgn_ruhroh_waitq);
2234         spin_lock_init(&kgnilnd_data.kgn_reaper_lock);
2235
2236         mutex_init(&kgnilnd_data.kgn_quiesce_mutex);
2237         atomic_set(&kgnilnd_data.kgn_nquiesce, 0);
2238         atomic_set(&kgnilnd_data.kgn_npending_conns, 0);
2239         atomic_set(&kgnilnd_data.kgn_npending_unlink, 0);
2240         atomic_set(&kgnilnd_data.kgn_npending_detach, 0);
2241         atomic_set(&kgnilnd_data.kgn_rev_offset, 0);
2242         atomic_set(&kgnilnd_data.kgn_rev_length, 0);
2243         atomic_set(&kgnilnd_data.kgn_rev_copy_buff, 0);
2244
2245         /* OK to call kgnilnd_api_shutdown() to cleanup now */
2246         kgnilnd_data.kgn_init = GNILND_INIT_DATA;
2247         try_module_get(THIS_MODULE);
2248
2249         rwlock_init(&kgnilnd_data.kgn_peer_conn_lock);
2250
2251         LIBCFS_ALLOC(kgnilnd_data.kgn_peers,
2252                     sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2253
2254         if (kgnilnd_data.kgn_peers == NULL)
2255                 GOTO(failed, rc = -ENOMEM);
2256
2257         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2258                 INIT_LIST_HEAD(&kgnilnd_data.kgn_peers[i]);
2259         }
2260
2261         LIBCFS_ALLOC(kgnilnd_data.kgn_conns,
2262                     sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2263
2264         if (kgnilnd_data.kgn_conns == NULL)
2265                 GOTO(failed, rc = -ENOMEM);
2266
2267         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2268                 INIT_LIST_HEAD(&kgnilnd_data.kgn_conns[i]);
2269         }
2270
2271         LIBCFS_ALLOC(kgnilnd_data.kgn_nets,
2272                     sizeof(struct list_head) * *kgnilnd_tunables.kgn_net_hash_size);
2273
2274         if (kgnilnd_data.kgn_nets == NULL)
2275                 GOTO(failed, rc = -ENOMEM);
2276
2277         for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
2278                 INIT_LIST_HEAD(&kgnilnd_data.kgn_nets[i]);
2279         }
2280
2281         kgnilnd_data.kgn_mbox_cache =
2282                 kmem_cache_create("kgn_mbox_block", KMALLOC_MAX_SIZE, 0,
2283                                   SLAB_HWCACHE_ALIGN, NULL);
2284         if (kgnilnd_data.kgn_mbox_cache == NULL) {
2285                 CERROR("Can't create slab for physical mbox blocks\n");
2286                 GOTO(failed, rc = -ENOMEM);
2287         }
2288
2289         kgnilnd_data.kgn_rx_cache =
2290                 kmem_cache_create("kgn_rx_t", sizeof(kgn_rx_t), 0, 0, NULL);
2291         if (kgnilnd_data.kgn_rx_cache == NULL) {
2292                 CERROR("Can't create slab for kgn_rx_t descriptors\n");
2293                 GOTO(failed, rc = -ENOMEM);
2294         }
2295
2296         kgnilnd_data.kgn_tx_cache =
2297                 kmem_cache_create("kgn_tx_t", sizeof(kgn_tx_t), 0, 0, NULL);
2298         if (kgnilnd_data.kgn_tx_cache == NULL) {
2299                 CERROR("Can't create slab for kgn_tx_t\n");
2300                 GOTO(failed, rc = -ENOMEM);
2301         }
2302
2303         kgnilnd_data.kgn_tx_phys_cache =
2304                 kmem_cache_create("kgn_tx_phys",
2305                                    LNET_MAX_IOV * sizeof(gni_mem_segment_t),
2306                                    0, 0, NULL);
2307         if (kgnilnd_data.kgn_tx_phys_cache == NULL) {
2308                 CERROR("Can't create slab for kgn_tx_phys\n");
2309                 GOTO(failed, rc = -ENOMEM);
2310         }
2311
2312         kgnilnd_data.kgn_dgram_cache =
2313                 kmem_cache_create("kgn_dgram_t", sizeof(kgn_dgram_t), 0, 0, NULL);
2314         if (kgnilnd_data.kgn_dgram_cache == NULL) {
2315                 CERROR("Can't create slab for outgoing datagrams\n");
2316                 GOTO(failed, rc = -ENOMEM);
2317         }
2318
2319         /* allocate a MAX_IOV array of page pointers for each cpu */
2320         kgnilnd_data.kgn_cksum_map_pages = kmalloc(num_possible_cpus() * sizeof (struct page *),
2321                                                    GFP_KERNEL);
2322         if (kgnilnd_data.kgn_cksum_map_pages == NULL) {
2323                 CERROR("Can't allocate vmap cksum pages\n");
2324                 GOTO(failed, rc = -ENOMEM);
2325         }
2326         kgnilnd_data.kgn_cksum_npages = num_possible_cpus();
2327         memset(kgnilnd_data.kgn_cksum_map_pages, 0,
2328                 kgnilnd_data.kgn_cksum_npages * sizeof (struct page *));
2329
2330         for (i = 0; i < kgnilnd_data.kgn_cksum_npages; i++) {
2331                 kgnilnd_data.kgn_cksum_map_pages[i] = kmalloc(LNET_MAX_IOV * sizeof (struct page *),
2332                                                               GFP_KERNEL);
2333                 if (kgnilnd_data.kgn_cksum_map_pages[i] == NULL) {
2334                         CERROR("Can't allocate vmap cksum pages for cpu %d\n", i);
2335                         GOTO(failed, rc = -ENOMEM);
2336                 }
2337         }
2338
2339         LASSERT(kgnilnd_data.kgn_ndevs == 0);
2340
2341         /* Use all available GNI devices */
2342         for (i = 0; i < GNILND_MAXDEVS; i++) {
2343                 dev = &kgnilnd_data.kgn_devices[kgnilnd_data.kgn_ndevs];
2344
2345                 rc = kgnilnd_dev_init(dev);
2346                 if (rc == 0) {
2347                         /* Increment here so base_shutdown cleans it up */
2348                         kgnilnd_data.kgn_ndevs++;
2349
2350                         rc = kgnilnd_allocate_phys_fmablk(dev);
2351                         if (rc)
2352                                 GOTO(failed, rc);
2353                 }
2354         }
2355
2356         if (kgnilnd_data.kgn_ndevs == 0) {
2357                 CERROR("Can't initialise any GNI devices\n");
2358                 GOTO(failed, rc = -ENODEV);
2359         }
2360
2361         rc = kgnilnd_thread_start(kgnilnd_reaper, NULL, "kgnilnd_rpr", 0);
2362         if (rc != 0) {
2363                 CERROR("Can't spawn gnilnd reaper: %d\n", rc);
2364                 GOTO(failed, rc);
2365         }
2366
2367         rc = kgnilnd_start_rca_thread();
2368         if (rc != 0) {
2369                 CERROR("Can't spawn gnilnd rca: %d\n", rc);
2370                 GOTO(failed, rc);
2371         }
2372
2373         /*
2374          * Start ruhroh thread.  We can't use kgnilnd_thread_start() because
2375          * we don't want this thread included in kgnilnd_data.kgn_nthreads
2376          * count.  This thread controls quiesce, so it mustn't
2377          * quiesce itself.
2378          */
2379         thrd = kthread_run(kgnilnd_ruhroh_thread, NULL, "%s_%02d", "kgnilnd_rr", 0);
2380         if (IS_ERR(thrd)) {
2381                 rc = PTR_ERR(thrd);
2382                 CERROR("Can't spawn gnilnd ruhroh thread: %d\n", rc);
2383                 GOTO(failed, rc);
2384         }
2385
2386         /* threads will load balance across devs as they are available */
2387         for (i = 0; i < *kgnilnd_tunables.kgn_sched_threads; i++) {
2388                 rc = kgnilnd_thread_start(kgnilnd_scheduler, (void *)((long)i),
2389                                           "kgnilnd_sd", i);
2390                 if (rc != 0) {
2391                         CERROR("Can't spawn gnilnd scheduler[%d]: %d\n",
2392                                i, rc);
2393                         GOTO(failed, rc);
2394                 }
2395         }
2396
2397         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2398                 dev = &kgnilnd_data.kgn_devices[i];
2399                 rc = kgnilnd_thread_start(kgnilnd_dgram_mover, dev,
2400                                           "kgnilnd_dg", dev->gnd_id);
2401                 if (rc != 0) {
2402                         CERROR("Can't spawn gnilnd dgram_mover[%d]: %d\n",
2403                                dev->gnd_id, rc);
2404                         GOTO(failed, rc);
2405                 }
2406
2407                 rc = kgnilnd_thread_start(kgnilnd_dgram_waitq, dev,
2408                                           "kgnilnd_dgn", dev->gnd_id);
2409                 if (rc != 0) {
2410                         CERROR("Can't spawn gnilnd dgram_waitq[%d]: %d\n",
2411                                 dev->gnd_id, rc);
2412                         GOTO(failed, rc);
2413                 }
2414
2415                 rc = kgnilnd_setup_wildcard_dgram(dev);
2416
2417                 if (rc != 0) {
2418                         CERROR("Can't create wildcard dgrams[%d]: %d\n",
2419                                 dev->gnd_id, rc);
2420                         GOTO(failed, rc);
2421                 }
2422         }
2423
2424
2425
2426         /* flag everything initialised */
2427         kgnilnd_data.kgn_init = GNILND_INIT_ALL;
2428         /*****************************************************/
2429
2430         CDEBUG(D_MALLOC, "initial kmem %d\n", pkmem);
2431         RETURN(0);
2432
2433 failed:
2434         kgnilnd_base_shutdown();
2435         kgnilnd_data.kgn_init = GNILND_INIT_NOTHING;
2436         RETURN(rc);
2437 }
2438
2439 void
2440 kgnilnd_base_shutdown(void)
2441 {
2442         int                     i, j;
2443         ENTRY;
2444
2445         while (CFS_FAIL_TIMEOUT(CFS_FAIL_GNI_PAUSE_SHUTDOWN, 1)) {};
2446
2447         kgnilnd_data.kgn_wc_kill = 1;
2448
2449         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2450                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2451                 kgnilnd_cancel_wc_dgrams(dev);
2452                 kgnilnd_cancel_dgrams(dev);
2453                 kgnilnd_del_conn_or_peer(NULL, LNET_NID_ANY, GNILND_DEL_PEER, -ESHUTDOWN);
2454                 kgnilnd_wait_for_canceled_dgrams(dev);
2455         }
2456
2457         /* We need to verify there are no conns left before we let the threads
2458          * shut down otherwise we could clean up the peers but still have
2459          * some outstanding conns due to orphaned datagram conns that are
2460          * being cleaned up.
2461          */
2462         i = 2;
2463         while (atomic_read(&kgnilnd_data.kgn_nconns) != 0) {
2464                 i++;
2465
2466                 for(j = 0; j < kgnilnd_data.kgn_ndevs; ++j) {
2467                         kgn_device_t *dev = &kgnilnd_data.kgn_devices[j];
2468                         kgnilnd_schedule_device(dev);
2469                 }
2470
2471                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2472                         "Waiting for conns to be cleaned up %d\n",atomic_read(&kgnilnd_data.kgn_nconns));
2473                 set_current_state(TASK_UNINTERRUPTIBLE);
2474                 schedule_timeout(cfs_time_seconds(1));
2475         }
2476         /* Peer state all cleaned up BEFORE setting shutdown, so threads don't
2477          * have to worry about shutdown races.  NB connections may be created
2478          * while there are still active connds, but these will be temporary
2479          * since peer creation always fails after the listener has started to
2480          * shut down.
2481          * all peers should have been cleared out on the nets */
2482         LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
2483                 "peers left %d\n", atomic_read(&kgnilnd_data.kgn_npeers));
2484
2485         /* Wait for the ruhroh thread to shut down. */
2486         kgnilnd_data.kgn_ruhroh_shutdown = 1;
2487         wake_up(&kgnilnd_data.kgn_ruhroh_waitq);
2488         i = 2;
2489         while (kgnilnd_data.kgn_ruhroh_running != 0) {
2490                 i++;
2491                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2492                        "Waiting for ruhroh thread to terminate\n");
2493                 set_current_state(TASK_UNINTERRUPTIBLE);
2494                 schedule_timeout(cfs_time_seconds(1));
2495         }
2496
2497        /* Flag threads to terminate */
2498         kgnilnd_data.kgn_shutdown = 1;
2499
2500         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2501                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2502
2503                 /* should clear all the MDDs */
2504                 kgnilnd_unmap_fma_blocks(dev);
2505
2506                 kgnilnd_schedule_device(dev);
2507                 wake_up_all(&dev->gnd_dgram_waitq);
2508                 wake_up_all(&dev->gnd_dgping_waitq);
2509                 LASSERT(list_empty(&dev->gnd_connd_peers));
2510         }
2511
2512         spin_lock(&kgnilnd_data.kgn_reaper_lock);
2513         wake_up_all(&kgnilnd_data.kgn_reaper_waitq);
2514         spin_unlock(&kgnilnd_data.kgn_reaper_lock);
2515
2516         if (atomic_read(&kgnilnd_data.kgn_nthreads))
2517                 kgnilnd_wakeup_rca_thread();
2518
2519         /* Wait for threads to exit */
2520         i = 2;
2521         while (atomic_read(&kgnilnd_data.kgn_nthreads) != 0) {
2522                 i++;
2523                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2524                        "Waiting for %d threads to terminate\n",
2525                        atomic_read(&kgnilnd_data.kgn_nthreads));
2526                 set_current_state(TASK_UNINTERRUPTIBLE);
2527                 schedule_timeout(cfs_time_seconds(1));
2528         }
2529
2530         LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
2531                 "peers left %d\n", atomic_read(&kgnilnd_data.kgn_npeers));
2532
2533         if (kgnilnd_data.kgn_peers != NULL) {
2534                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2535                         LASSERT(list_empty(&kgnilnd_data.kgn_peers[i]));
2536
2537                 LIBCFS_FREE(kgnilnd_data.kgn_peers,
2538                             sizeof (struct list_head) *
2539                             *kgnilnd_tunables.kgn_peer_hash_size);
2540         }
2541
2542         down_write(&kgnilnd_data.kgn_net_rw_sem);
2543         if (kgnilnd_data.kgn_nets != NULL) {
2544                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++)
2545                         LASSERT(list_empty(&kgnilnd_data.kgn_nets[i]));
2546
2547                 LIBCFS_FREE(kgnilnd_data.kgn_nets,
2548                             sizeof (struct list_head) *
2549                             *kgnilnd_tunables.kgn_net_hash_size);
2550         }
2551         up_write(&kgnilnd_data.kgn_net_rw_sem);
2552
2553         LASSERTF(atomic_read(&kgnilnd_data.kgn_nconns) == 0,
2554                 "conns left %d\n", atomic_read(&kgnilnd_data.kgn_nconns));
2555
2556         if (kgnilnd_data.kgn_conns != NULL) {
2557                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2558                         LASSERT(list_empty(&kgnilnd_data.kgn_conns[i]));
2559
2560                 LIBCFS_FREE(kgnilnd_data.kgn_conns,
2561                             sizeof (struct list_head) *
2562                             *kgnilnd_tunables.kgn_peer_hash_size);
2563         }
2564
2565         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2566                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2567                 kgnilnd_dev_fini(dev);
2568
2569                 LASSERTF(atomic_read(&dev->gnd_ndgrams) == 0,
2570                         "dgrams left %d\n", atomic_read(&dev->gnd_ndgrams));
2571
2572                 if (dev->gnd_dgrams != NULL) {
2573                         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2574                                 LASSERT(list_empty(&dev->gnd_dgrams[i]));
2575
2576                         LIBCFS_FREE(dev->gnd_dgrams,
2577                                     sizeof (struct list_head) *
2578                                     *kgnilnd_tunables.kgn_peer_hash_size);
2579                 }
2580
2581                 kgnilnd_free_phys_fmablk(dev);
2582         }
2583
2584         if (kgnilnd_data.kgn_mbox_cache != NULL)
2585                 kmem_cache_destroy(kgnilnd_data.kgn_mbox_cache);
2586
2587         if (kgnilnd_data.kgn_rx_cache != NULL)
2588                 kmem_cache_destroy(kgnilnd_data.kgn_rx_cache);
2589
2590         if (kgnilnd_data.kgn_tx_cache != NULL)
2591                 kmem_cache_destroy(kgnilnd_data.kgn_tx_cache);
2592
2593         if (kgnilnd_data.kgn_tx_phys_cache != NULL)
2594                 kmem_cache_destroy(kgnilnd_data.kgn_tx_phys_cache);
2595
2596         if (kgnilnd_data.kgn_dgram_cache != NULL)
2597                 kmem_cache_destroy(kgnilnd_data.kgn_dgram_cache);
2598
2599         if (kgnilnd_data.kgn_cksum_map_pages != NULL) {
2600                 for (i = 0; i < kgnilnd_data.kgn_cksum_npages; i++) {
2601                         if (kgnilnd_data.kgn_cksum_map_pages[i] != NULL) {
2602                                 kfree(kgnilnd_data.kgn_cksum_map_pages[i]);
2603                         }
2604                 }
2605                 kfree(kgnilnd_data.kgn_cksum_map_pages);
2606         }
2607
2608         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2609                atomic_read(&libcfs_kmemory));
2610
2611         kgnilnd_data.kgn_init = GNILND_INIT_NOTHING;
2612         module_put(THIS_MODULE);
2613
2614         EXIT;
2615 }
2616
2617 int
2618 kgnilnd_startup(lnet_ni_t *ni)
2619 {
2620         int               rc, devno;
2621         kgn_net_t        *net;
2622         ENTRY;
2623
2624         LASSERTF(ni->ni_lnd == &the_kgnilnd,
2625                 "bad LND 0x%p != the_kgnilnd @ 0x%p\n",
2626                 ni->ni_lnd, &the_kgnilnd);
2627
2628         if (kgnilnd_data.kgn_init == GNILND_INIT_NOTHING) {
2629                 rc = kgnilnd_base_startup();
2630                 if (rc != 0)
2631                         RETURN(rc);
2632         }
2633
2634         /* Serialize with shutdown. */
2635         mutex_lock(&kgnilnd_data.kgn_quiesce_mutex);
2636
2637         LIBCFS_ALLOC(net, sizeof(*net));
2638         if (net == NULL) {
2639                 CERROR("could not allocate net for new interface instance\n");
2640                 /* no need to cleanup the CDM... */
2641                 GOTO(failed, rc = -ENOMEM);
2642         }
2643         INIT_LIST_HEAD(&net->gnn_list);
2644         ni->ni_data = net;
2645         net->gnn_ni = ni;
2646         ni->ni_maxtxcredits = *kgnilnd_tunables.kgn_credits;
2647         ni->ni_peertxcredits = *kgnilnd_tunables.kgn_peer_credits;
2648
2649         if (*kgnilnd_tunables.kgn_peer_health) {
2650                 int     fudge;
2651                 int     timeout;
2652                 /* give this a bit of leeway - we don't have a hard timeout
2653                  * as we only check timeouts periodically - see comment in kgnilnd_reaper */
2654                 fudge = (GNILND_TO2KA(*kgnilnd_tunables.kgn_timeout) / GNILND_REAPER_NCHECKS);
2655                 timeout = *kgnilnd_tunables.kgn_timeout + fudge;
2656
2657                 if (*kgnilnd_tunables.kgn_peer_timeout >= timeout)
2658                         ni->ni_peertimeout = *kgnilnd_tunables.kgn_peer_timeout;
2659                 else if (*kgnilnd_tunables.kgn_peer_timeout > -1) {
2660                         LCONSOLE_ERROR("Peer_timeout is set to %d but needs to be >= %d\n",
2661                                         *kgnilnd_tunables.kgn_peer_timeout,
2662                                         timeout);
2663                         ni->ni_data = NULL;
2664                         LIBCFS_FREE(net, sizeof(*net));
2665                         GOTO(failed, rc = -EINVAL);
2666                 } else
2667                         ni->ni_peertimeout = timeout;
2668
2669                 LCONSOLE_INFO("Enabling LNet peer health for gnilnd, timeout %ds\n",
2670                               ni->ni_peertimeout);
2671         }
2672
2673         atomic_set(&net->gnn_refcount, 1);
2674
2675         /* if we have multiple devices, spread the nets around */
2676         net->gnn_netnum = LNET_NETNUM(LNET_NIDNET(ni->ni_nid));
2677
2678         devno = LNET_NIDNET(ni->ni_nid) % GNILND_MAXDEVS;
2679         net->gnn_dev = &kgnilnd_data.kgn_devices[devno];
2680
2681         /* allocate a 'dummy' cdm for datagram use. We can only have a single
2682          * datagram between a nid:inst_id and nid2:inst_id. The fake cdm
2683          * give us additional inst_id to use, allowing the datagrams to flow
2684          * like rivers of honey and beer */
2685
2686         /* the instance id for the cdm is the NETNUM offset by MAXDEVS -
2687          * ensuring we'll have a unique id */
2688
2689
2690         ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), net->gnn_dev->gnd_nid);
2691         CDEBUG(D_NET, "adding net %p nid=%s on dev %d \n",
2692                 net, libcfs_nid2str(ni->ni_nid), net->gnn_dev->gnd_id);
2693         /* until the gnn_list is set, we need to cleanup ourselves as
2694          * kgnilnd_shutdown is just gonna get confused */
2695
2696         down_write(&kgnilnd_data.kgn_net_rw_sem);
2697         list_add_tail(&net->gnn_list, kgnilnd_netnum2netlist(net->gnn_netnum));
2698         up_write(&kgnilnd_data.kgn_net_rw_sem);
2699
2700         /* we need a separate thread to call probe_wait_by_id until
2701          * we get a function callback notifier from kgni */
2702         mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2703         RETURN(0);
2704  failed:
2705         mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2706         kgnilnd_shutdown(ni);
2707         RETURN(rc);
2708 }
2709
2710 void
2711 kgnilnd_shutdown(lnet_ni_t *ni)
2712 {
2713         kgn_net_t     *net = ni->ni_data;
2714         int           i;
2715         int           rc;
2716         ENTRY;
2717
2718         CFS_RACE(CFS_FAIL_GNI_SR_DOWN_RACE);
2719
2720         LASSERTF(kgnilnd_data.kgn_init == GNILND_INIT_ALL,
2721                 "init %d\n", kgnilnd_data.kgn_init);
2722
2723         /* Serialize with startup. */
2724         mutex_lock(&kgnilnd_data.kgn_quiesce_mutex);
2725         CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
2726                atomic_read(&libcfs_kmemory));
2727
2728         if (net == NULL) {
2729                 CERROR("got NULL net for ni %p\n", ni);
2730                 GOTO(out, rc = -EINVAL);
2731         }
2732
2733         LASSERTF(ni == net->gnn_ni,
2734                 "ni %p gnn_ni %p\n", net, net->gnn_ni);
2735
2736         ni->ni_data = NULL;
2737
2738         LASSERT(!net->gnn_shutdown);
2739         LASSERTF(atomic_read(&net->gnn_refcount) != 0,
2740                 "net %p refcount %d\n",
2741                  net, atomic_read(&net->gnn_refcount));
2742
2743         if (!list_empty(&net->gnn_list)) {
2744                 /* serialize with peer creation */
2745                 down_write(&kgnilnd_data.kgn_net_rw_sem);
2746                 net->gnn_shutdown = 1;
2747                 up_write(&kgnilnd_data.kgn_net_rw_sem);
2748
2749                 kgnilnd_cancel_net_dgrams(net);
2750
2751                 kgnilnd_del_conn_or_peer(net, LNET_NID_ANY, GNILND_DEL_PEER, -ESHUTDOWN);
2752
2753                 /* if we are quiesced, need to wake up - we need those threads
2754                  * alive to release peers, etc */
2755                 if (GNILND_IS_QUIESCED) {
2756                         set_mb(kgnilnd_data.kgn_quiesce_trigger, GNILND_QUIESCE_IDLE);
2757                         kgnilnd_quiesce_wait("shutdown");
2758                 }
2759
2760                 kgnilnd_wait_for_canceled_dgrams(net->gnn_dev);
2761
2762                 /* We wait until the nets ref's are 1, we will release final ref which is ours
2763                  * this allows us to make sure everything else is done before we free the
2764                  * net.
2765                  */
2766                 i = 4;
2767                 while (atomic_read(&net->gnn_refcount) != 1) {
2768                         i++;
2769                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2770                                 "Waiting for %d references to clear on net %d\n",
2771                                 atomic_read(&net->gnn_refcount),
2772                                 net->gnn_netnum);
2773                         set_current_state(TASK_UNINTERRUPTIBLE);
2774                         schedule_timeout(cfs_time_seconds(1));
2775                 }
2776
2777                 /* release ref from kgnilnd_startup */
2778                 kgnilnd_net_decref(net);
2779                 /* serialize with reaper and conn_task looping */
2780                 down_write(&kgnilnd_data.kgn_net_rw_sem);
2781                 list_del_init(&net->gnn_list);
2782                 up_write(&kgnilnd_data.kgn_net_rw_sem);
2783
2784         }
2785
2786         /* not locking, this can't race with writers */
2787         LASSERTF(atomic_read(&net->gnn_refcount) == 0,
2788                 "net %p refcount %d\n",
2789                  net, atomic_read(&net->gnn_refcount));
2790         LIBCFS_FREE(net, sizeof(*net));
2791
2792 out:
2793         down_read(&kgnilnd_data.kgn_net_rw_sem);
2794         for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
2795                 if (!list_empty(&kgnilnd_data.kgn_nets[i])) {
2796                         up_read(&kgnilnd_data.kgn_net_rw_sem);
2797                         break;
2798                 }
2799
2800                 if (i == *kgnilnd_tunables.kgn_net_hash_size - 1) {
2801                         up_read(&kgnilnd_data.kgn_net_rw_sem);
2802                         kgnilnd_base_shutdown();
2803                 }
2804         }
2805         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2806                atomic_read(&libcfs_kmemory));
2807
2808         mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2809         EXIT;
2810 }
2811
2812 void __exit
2813 kgnilnd_module_fini(void)
2814 {
2815         lnet_unregister_lnd(&the_kgnilnd);
2816         kgnilnd_proc_fini();
2817         kgnilnd_remove_sysctl();
2818         kgnilnd_tunables_fini();
2819 }
2820
2821 int __init
2822 kgnilnd_module_init(void)
2823 {
2824         int    rc;
2825
2826         rc = kgnilnd_tunables_init();
2827         if (rc != 0)
2828                 return rc;
2829
2830         printk(KERN_INFO "Lustre: kgnilnd build version: "KGNILND_BUILD_REV"\n");
2831
2832         kgnilnd_insert_sysctl();
2833         kgnilnd_proc_init();
2834
2835         lnet_register_lnd(&the_kgnilnd);
2836
2837         return 0;
2838 }
2839
2840 MODULE_AUTHOR("Cray, Inc. <nic@cray.com>");
2841 MODULE_DESCRIPTION("Kernel Gemini LND v"KGNILND_BUILD_REV);
2842 MODULE_LICENSE("GPL");
2843
2844 module_init(kgnilnd_module_init);
2845 module_exit(kgnilnd_module_fini);