Whamcloud - gitweb
a41aafb056da0d5f6f312cb7c281fd8d1359b037
[fs/lustre-release.git] / lnet / klnds / gnilnd / gnilnd.c
1 /*
2  * Copyright (C) 2012 Cray, Inc.
3  *
4  * Copyright (c) 2013, 2015, Intel Corporation.
5  *
6  *   Author: Nic Henke <nic@cray.com>
7  *   Author: James Shimek <jshimek@cray.com>
8  *
9  *   This file is part of Lustre, http://www.lustre.org.
10  *
11  *   Lustre is free software; you can redistribute it and/or
12  *   modify it under the terms of version 2 of the GNU General Public
13  *   License as published by the Free Software Foundation.
14  *
15  *   Lustre is distributed in the hope that it will be useful,
16  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  *   GNU General Public License for more details.
19  *
20  *   You should have received a copy of the GNU General Public License
21  *   along with Lustre; if not, write to the Free Software
22  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23  *
24  */
25 #include "gnilnd.h"
26
27 /* Primary entry points from LNET.  There are no guarantees against reentrance. */
28 lnd_t the_kgnilnd = {
29 #ifdef CONFIG_CRAY_XT
30         .lnd_type       = GNILND,
31 #else
32         .lnd_type       = GNIIPLND,
33 #endif
34         .lnd_startup    = kgnilnd_startup,
35         .lnd_shutdown   = kgnilnd_shutdown,
36         .lnd_ctl        = kgnilnd_ctl,
37         .lnd_send       = kgnilnd_send,
38         .lnd_recv       = kgnilnd_recv,
39         .lnd_eager_recv = kgnilnd_eager_recv,
40         .lnd_query      = kgnilnd_query,
41 };
42
43 kgn_data_t      kgnilnd_data;
44
45 int
46 kgnilnd_thread_start(int(*fn)(void *arg), void *arg, char *name, int id)
47 {
48         struct task_struct *thrd;
49
50         thrd = kthread_run(fn, arg, "%s_%02d", name, id);
51         if (IS_ERR(thrd))
52                 return PTR_ERR(thrd);
53
54         atomic_inc(&kgnilnd_data.kgn_nthreads);
55         return 0;
56 }
57
58 /* bind scheduler threads to cpus */
59 int
60 kgnilnd_start_sd_threads(void)
61 {
62         int cpu;
63         int i = 0;
64         struct task_struct *task;
65
66         for_each_online_cpu(cpu) {
67                 /* don't bind to cpu 0 - all interrupts are processed here */
68                 if (cpu == 0)
69                         continue;
70
71                 task = kthread_create(kgnilnd_scheduler, (void *)((long)i),
72                                       "%s_%02d", "kgnilnd_sd", i);
73                 if (!IS_ERR(task)) {
74                         kthread_bind(task, cpu);
75                         wake_up_process(task);
76                 } else {
77                         CERROR("Can't spawn gnilnd scheduler[%d] %ld\n", i,
78                                 PTR_ERR(task));
79                         return PTR_ERR(task);
80                 }
81                 atomic_inc(&kgnilnd_data.kgn_nthreads);
82
83                 if (++i >= *kgnilnd_tunables.kgn_sched_threads) {
84                         break;
85                 }
86         }
87
88         return 0;
89 }
90
91 /* needs write_lock on kgn_peer_conn_lock */
92 int
93 kgnilnd_close_stale_conns_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
94 {
95         kgn_conn_t         *conn;
96         struct list_head   *ctmp, *cnxt;
97         int                 loopback;
98         int                 count = 0;
99
100         loopback = peer->gnp_nid == peer->gnp_net->gnn_ni->ni_nid;
101
102         list_for_each_safe(ctmp, cnxt, &peer->gnp_conns) {
103                 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
104
105                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
106                         continue;
107
108                 if (conn == newconn)
109                         continue;
110
111                 if (conn->gnc_device != newconn->gnc_device)
112                         continue;
113
114                 /* This is a two connection loopback - one talking to the other */
115                 if (loopback &&
116                     newconn->gnc_my_connstamp == conn->gnc_peer_connstamp &&
117                     newconn->gnc_peer_connstamp == conn->gnc_my_connstamp) {
118                         CDEBUG(D_NET, "skipping prune of %p, "
119                                 "loopback and matching stamps"
120                                 " connstamp %llu(%llu)"
121                                 " peerstamp %llu(%llu)\n",
122                                 conn, newconn->gnc_my_connstamp,
123                                 conn->gnc_peer_connstamp,
124                                 newconn->gnc_peer_connstamp,
125                                 conn->gnc_my_connstamp);
126                         continue;
127                 }
128
129                 if (conn->gnc_peerstamp != newconn->gnc_peerstamp) {
130                         LASSERTF(conn->gnc_peerstamp < newconn->gnc_peerstamp,
131                                 "conn 0x%p peerstamp %llu >= "
132                                 "newconn 0x%p peerstamp %llu\n",
133                                 conn, conn->gnc_peerstamp,
134                                 newconn, newconn->gnc_peerstamp);
135
136                         CDEBUG(D_NET, "Closing stale conn nid: %s "
137                                " peerstamp:%#llx(%#llx)\n",
138                                libcfs_nid2str(peer->gnp_nid),
139                                conn->gnc_peerstamp, newconn->gnc_peerstamp);
140                 } else {
141
142                         LASSERTF(conn->gnc_peer_connstamp < newconn->gnc_peer_connstamp,
143                                 "conn 0x%p peer_connstamp %llu >= "
144                                 "newconn 0x%p peer_connstamp %llu\n",
145                                 conn, conn->gnc_peer_connstamp,
146                                 newconn, newconn->gnc_peer_connstamp);
147
148                         CDEBUG(D_NET, "Closing stale conn nid: %s"
149                                " connstamp:%llu(%llu)\n",
150                                libcfs_nid2str(peer->gnp_nid),
151                                conn->gnc_peer_connstamp, newconn->gnc_peer_connstamp);
152                 }
153
154                 count++;
155                 kgnilnd_close_conn_locked(conn, -ESTALE);
156         }
157
158         if (count != 0) {
159                 CWARN("Closed %d stale conns to %s\n", count, libcfs_nid2str(peer->gnp_nid));
160         }
161
162         RETURN(count);
163 }
164
165 int
166 kgnilnd_conn_isdup_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
167 {
168         kgn_conn_t       *conn;
169         struct list_head *tmp;
170         int               loopback;
171         ENTRY;
172
173         loopback = peer->gnp_nid == peer->gnp_net->gnn_ni->ni_nid;
174
175         list_for_each(tmp, &peer->gnp_conns) {
176                 conn = list_entry(tmp, kgn_conn_t, gnc_list);
177                 CDEBUG(D_NET, "checking conn 0x%p for peer %s"
178                         " lo %d new %llu existing %llu"
179                         " new peer %llu existing peer %llu"
180                         " new dev %p existing dev %p\n",
181                         conn, libcfs_nid2str(peer->gnp_nid),
182                         loopback,
183                         newconn->gnc_peerstamp, conn->gnc_peerstamp,
184                         newconn->gnc_peer_connstamp, conn->gnc_peer_connstamp,
185                         newconn->gnc_device, conn->gnc_device);
186
187                 /* conn is in the process of closing */
188                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
189                         continue;
190
191                 /* 'newconn' is from an earlier version of 'peer'!!! */
192                 if (newconn->gnc_peerstamp < conn->gnc_peerstamp)
193                         RETURN(1);
194
195                 /* 'conn' is from an earlier version of 'peer': it will be
196                  * removed when we cull stale conns later on... */
197                 if (newconn->gnc_peerstamp > conn->gnc_peerstamp)
198                         continue;
199
200                 /* Different devices are OK */
201                 if (conn->gnc_device != newconn->gnc_device)
202                         continue;
203
204                 /* It's me connecting to myself */
205                 if (loopback &&
206                     newconn->gnc_my_connstamp == conn->gnc_peer_connstamp &&
207                     newconn->gnc_peer_connstamp == conn->gnc_my_connstamp)
208                         continue;
209
210                 /* 'newconn' is an earlier connection from 'peer'!!! */
211                 if (newconn->gnc_peer_connstamp < conn->gnc_peer_connstamp)
212                         RETURN(2);
213
214                 /* 'conn' is an earlier connection from 'peer': it will be
215                  * removed when we cull stale conns later on... */
216                 if (newconn->gnc_peer_connstamp > conn->gnc_peer_connstamp)
217                         continue;
218
219                 /* 'newconn' has the SAME connection stamp; 'peer' isn't
220                  * playing the game... */
221                 RETURN(3);
222         }
223
224         RETURN(0);
225 }
226
227 int
228 kgnilnd_create_conn(kgn_conn_t **connp, kgn_device_t *dev)
229 {
230         kgn_conn_t      *conn;
231         gni_return_t    rrc;
232         int             rc = 0;
233
234         LASSERT (!in_interrupt());
235         atomic_inc(&kgnilnd_data.kgn_nconns);
236
237         /* divide by 2 to allow for complete reset and immediate reconnect */
238         if (atomic_read(&kgnilnd_data.kgn_nconns) >= GNILND_MAX_CQID/2) {
239                 CERROR("Too many conn are live: %d > %d\n",
240                         atomic_read(&kgnilnd_data.kgn_nconns), GNILND_MAX_CQID/2);
241                 atomic_dec(&kgnilnd_data.kgn_nconns);
242                 return -E2BIG;
243         }
244
245         LIBCFS_ALLOC(conn, sizeof(*conn));
246         if (conn == NULL) {
247                 atomic_dec(&kgnilnd_data.kgn_nconns);
248                 return -ENOMEM;
249         }
250
251         conn->gnc_tx_ref_table =
252                 kgnilnd_vzalloc(GNILND_MAX_MSG_ID * sizeof(void *));
253         if (conn->gnc_tx_ref_table == NULL) {
254                 CERROR("Can't allocate conn tx_ref_table\n");
255                 GOTO(failed, rc = -ENOMEM);
256         }
257
258         mutex_init(&conn->gnc_smsg_mutex);
259         mutex_init(&conn->gnc_rdma_mutex);
260         atomic_set(&conn->gnc_refcount, 1);
261         atomic_set(&conn->gnc_reaper_noop, 0);
262         atomic_set(&conn->gnc_sched_noop, 0);
263         atomic_set(&conn->gnc_tx_in_use, 0);
264         INIT_LIST_HEAD(&conn->gnc_list);
265         INIT_LIST_HEAD(&conn->gnc_hashlist);
266         INIT_LIST_HEAD(&conn->gnc_schedlist);
267         INIT_LIST_HEAD(&conn->gnc_fmaq);
268         INIT_LIST_HEAD(&conn->gnc_mdd_list);
269         INIT_LIST_HEAD(&conn->gnc_delaylist);
270         spin_lock_init(&conn->gnc_list_lock);
271         spin_lock_init(&conn->gnc_tx_lock);
272         conn->gnc_magic = GNILND_CONN_MAGIC;
273
274         /* set tx id to nearly the end to make sure we find wrapping
275          * issues soon */
276         conn->gnc_next_tx = (int) GNILND_MAX_MSG_ID - 10;
277
278         /* if this fails, we have conflicts and MAX_TX is too large */
279         CLASSERT(GNILND_MAX_MSG_ID < GNILND_MSGID_CLOSE);
280
281         /* get a new unique CQ id for this conn */
282         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
283         conn->gnc_my_connstamp = kgnilnd_data.kgn_connstamp++;
284         conn->gnc_cqid = kgnilnd_get_cqid_locked();
285         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
286
287         if (conn->gnc_cqid == 0) {
288                 CERROR("Could not allocate unique CQ ID for conn 0x%p\n", conn);
289                 GOTO(failed, rc = -E2BIG);
290         }
291
292         CDEBUG(D_NET, "alloc cqid %u for conn 0x%p\n",
293                 conn->gnc_cqid, conn);
294
295         /* need to be set before gnc_ephandle to allow kgnilnd_destroy_conn_ep to
296          * check context */
297         conn->gnc_device = dev;
298
299         conn->gnc_timeout = MAX(*kgnilnd_tunables.kgn_timeout,
300                                 GNILND_MIN_TIMEOUT);
301         kgnilnd_update_reaper_timeout(conn->gnc_timeout);
302
303         /* this is the ep_handle for doing SMSG & BTE */
304         mutex_lock(&dev->gnd_cq_mutex);
305         rrc = kgnilnd_ep_create(dev->gnd_handle, dev->gnd_snd_fma_cqh,
306                                 &conn->gnc_ephandle);
307         mutex_unlock(&dev->gnd_cq_mutex);
308         if (rrc != GNI_RC_SUCCESS)
309                 GOTO(failed, rc = -ENETDOWN);
310
311         CDEBUG(D_NET, "created conn 0x%p ep_hndl 0x%p\n",
312                conn, conn->gnc_ephandle);
313
314         /* add ref for EP canceling */
315         kgnilnd_conn_addref(conn);
316         atomic_inc(&dev->gnd_neps);
317
318         *connp = conn;
319         return 0;
320
321 failed:
322         atomic_dec(&kgnilnd_data.kgn_nconns);
323         kgnilnd_vfree(conn->gnc_tx_ref_table,
324                       GNILND_MAX_MSG_ID * sizeof(void *));
325         LIBCFS_FREE(conn, sizeof(*conn));
326         return rc;
327 }
328
329 /* needs to be called with kgn_peer_conn_lock held (read or write) */
330 kgn_conn_t *
331 kgnilnd_find_conn_locked(kgn_peer_t *peer)
332 {
333         kgn_conn_t      *conn = NULL;
334
335         /* if we are in reset, this conn is going to die soon */
336         if (unlikely(kgnilnd_data.kgn_in_reset)) {
337                 RETURN(NULL);
338         }
339
340         /* just return the first ESTABLISHED connection */
341         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
342                 /* kgnilnd_finish_connect doesn't put connections on the
343                  * peer list until they are actually established */
344                 LASSERTF(conn->gnc_state >= GNILND_CONN_ESTABLISHED,
345                         "found conn %p state %s on peer %p (%s)\n",
346                         conn, kgnilnd_conn_state2str(conn), peer,
347                         libcfs_nid2str(peer->gnp_nid));
348                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
349                         continue;
350
351                 RETURN(conn);
352         }
353         RETURN(NULL);
354 }
355
356 /* needs write_lock on kgn_peer_conn_lock held */
357 kgn_conn_t *
358 kgnilnd_find_or_create_conn_locked(kgn_peer_t *peer) {
359
360         kgn_device_t    *dev = peer->gnp_net->gnn_dev;
361         kgn_conn_t      *conn;
362
363         conn = kgnilnd_find_conn_locked(peer);
364
365         if (conn != NULL) {
366                 return conn;
367         }
368
369         /* if the peer was previously connecting, check if we should
370          * trigger another connection attempt yet. */
371         if (time_before(jiffies, peer->gnp_reconnect_time)) {
372                 return NULL;
373         }
374
375         /* This check prevents us from creating a new connection to a peer while we are
376          * still in the process of closing an existing connection to the peer.
377          */
378         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
379                 if (conn->gnc_ephandle != NULL) {
380                         CDEBUG(D_NET, "Not connecting non-null ephandle found peer 0x%p->%s\n", peer,
381                                 libcfs_nid2str(peer->gnp_nid));
382                         return NULL;
383                 }
384         }
385
386         if (peer->gnp_connecting != GNILND_PEER_IDLE) {
387                 /* if we are not connecting, fire up a new connection */
388                 /* or if we are anything but IDLE DONT start a new connection */
389                return NULL;
390         }
391
392         CDEBUG(D_NET, "starting connect to %s\n",
393                 libcfs_nid2str(peer->gnp_nid));
394         peer->gnp_connecting = GNILND_PEER_CONNECT;
395         kgnilnd_peer_addref(peer); /* extra ref for connd */
396
397         spin_lock(&dev->gnd_connd_lock);
398         list_add_tail(&peer->gnp_connd_list, &dev->gnd_connd_peers);
399         spin_unlock(&dev->gnd_connd_lock);
400
401         kgnilnd_schedule_dgram(dev);
402         CDEBUG(D_NETTRACE, "scheduling new connect\n");
403
404         return NULL;
405 }
406
407 /* Caller is responsible for deciding if/when to call this */
408 void
409 kgnilnd_destroy_conn_ep(kgn_conn_t *conn)
410 {
411         gni_return_t    rrc;
412         gni_ep_handle_t tmp_ep;
413
414         /* only if we actually initialized it,
415          *  then set NULL to tell kgnilnd_destroy_conn to leave it alone */
416
417         tmp_ep = xchg(&conn->gnc_ephandle, NULL);
418         if (tmp_ep != NULL) {
419                 /* we never re-use the EP, so unbind is not needed */
420                 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
421                 rrc = kgnilnd_ep_destroy(tmp_ep);
422
423                 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
424
425                 /* if this fails, it could hork up kgni smsg retransmit and others
426                  * since we could free the SMSG mbox memory, etc. */
427                 LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d conn 0x%p ep 0x%p\n",
428                          rrc, conn, conn->gnc_ephandle);
429
430                 atomic_dec(&conn->gnc_device->gnd_neps);
431
432                 /* clear out count added in kgnilnd_close_conn_locked
433                  * conn will have a peer once it hits finish_connect, where it
434                  * is the first spot we'll mark it ESTABLISHED as well */
435                 if (conn->gnc_peer) {
436                         kgnilnd_admin_decref(conn->gnc_peer->gnp_dirty_eps);
437                 }
438
439                 /* drop ref for EP */
440                 kgnilnd_conn_decref(conn);
441         }
442 }
443
444 void
445 kgnilnd_destroy_conn(kgn_conn_t *conn)
446 {
447         LASSERTF(!in_interrupt() &&
448                 !conn->gnc_scheduled &&
449                 !conn->gnc_in_purgatory &&
450                 conn->gnc_ephandle == NULL &&
451                 list_empty(&conn->gnc_list) &&
452                 list_empty(&conn->gnc_hashlist) &&
453                 list_empty(&conn->gnc_schedlist) &&
454                 list_empty(&conn->gnc_mdd_list) &&
455                 list_empty(&conn->gnc_delaylist) &&
456                 conn->gnc_magic == GNILND_CONN_MAGIC,
457                 "conn 0x%p->%s IRQ %d sched %d purg %d ep 0x%p Mg %d lists %d/%d/%d/%d/%d\n",
458                 conn, conn->gnc_peer ? libcfs_nid2str(conn->gnc_peer->gnp_nid)
459                                      : "<?>",
460                 !!in_interrupt(), conn->gnc_scheduled,
461                 conn->gnc_in_purgatory,
462                 conn->gnc_ephandle,
463                 conn->gnc_magic,
464                 list_empty(&conn->gnc_list),
465                 list_empty(&conn->gnc_hashlist),
466                 list_empty(&conn->gnc_schedlist),
467                 list_empty(&conn->gnc_mdd_list),
468                 list_empty(&conn->gnc_delaylist));
469
470         /* Tripping these is especially bad, as it means we have items on the
471          *  lists that didn't keep their refcount on the connection - or
472          *  somebody evil released their own */
473         LASSERTF(list_empty(&conn->gnc_fmaq) &&
474                  atomic_read(&conn->gnc_nlive_fma) == 0 &&
475                  atomic_read(&conn->gnc_nlive_rdma) == 0,
476                  "conn 0x%p fmaq %d@0x%p nfma %d nrdma %d\n",
477                  conn, kgnilnd_count_list(&conn->gnc_fmaq), &conn->gnc_fmaq,
478                  atomic_read(&conn->gnc_nlive_fma), atomic_read(&conn->gnc_nlive_rdma));
479
480         CDEBUG(D_NET, "destroying conn %p ephandle %p error %d\n",
481                 conn, conn->gnc_ephandle, conn->gnc_error);
482
483         /* We are freeing this memory remove the magic value from the connection */
484         conn->gnc_magic = 0;
485
486         /* if there is an FMA blk left here, we'll tear it down */
487         if (conn->gnc_fma_blk) {
488                 if (conn->gnc_peer) {
489                         kgn_mbox_info_t *mbox;
490                         mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
491                         mbox->mbx_prev_nid = conn->gnc_peer->gnp_nid;
492                 }
493                 kgnilnd_release_mbox(conn, 0);
494         }
495
496         if (conn->gnc_peer != NULL)
497                 kgnilnd_peer_decref(conn->gnc_peer);
498
499         if (conn->gnc_tx_ref_table != NULL) {
500                 kgnilnd_vfree(conn->gnc_tx_ref_table,
501                               GNILND_MAX_MSG_ID * sizeof(void *));
502         }
503
504         LIBCFS_FREE(conn, sizeof(*conn));
505         atomic_dec(&kgnilnd_data.kgn_nconns);
506 }
507
508 /* peer_alive and peer_notify done in the style of the o2iblnd */
509 void
510 kgnilnd_peer_alive(kgn_peer_t *peer)
511 {
512         set_mb(peer->gnp_last_alive, jiffies);
513 }
514
515 void
516 kgnilnd_peer_notify(kgn_peer_t *peer, int error, int alive)
517 {
518         int                     tell_lnet = 0;
519         int                     nnets = 0;
520         int                     rc;
521         int                     i, j;
522         kgn_conn_t             *conn;
523         kgn_net_t             **nets;
524         kgn_net_t              *net;
525
526
527         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DONT_NOTIFY))
528                 return;
529
530         /* Tell LNet we are giving ups on this peer - but only
531          * if it isn't already reconnected or trying to reconnect */
532         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
533
534         /* use kgnilnd_find_conn_locked to avoid any conns in the process of being nuked
535          *
536          * don't tell LNet if we are in reset - we assume that everyone will be able to
537          * reconnect just fine
538          */
539         conn = kgnilnd_find_conn_locked(peer);
540
541         CDEBUG(D_NETTRACE, "peer 0x%p->%s ting %d conn 0x%p, rst %d error %d\n",
542                peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting, conn,
543                kgnilnd_data.kgn_in_reset, error);
544
545         if (((peer->gnp_connecting == GNILND_PEER_IDLE) &&
546             (conn == NULL) &&
547             (!kgnilnd_data.kgn_in_reset) &&
548             (!kgnilnd_conn_clean_errno(error))) || alive) {
549                 tell_lnet = 1;
550         }
551
552         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
553
554         if (!tell_lnet) {
555                 /* short circuit if we dont need to notify Lnet */
556                 return;
557         }
558
559         rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
560
561         if (rc) {
562             /* dont do this if this fails since LNET is in shutdown or something else
563              */
564
565                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
566                         list_for_each_entry(net , &kgnilnd_data.kgn_nets[i], gnn_list) {
567                                 /* if gnn_shutdown set for any net shutdown is in progress just return */
568                                 if (net->gnn_shutdown) {
569                                         up_read(&kgnilnd_data.kgn_net_rw_sem);
570                                         return;
571                                 }
572                                 nnets++;
573                         }
574                 }
575
576                 if (nnets == 0) {
577                         /* shutdown in progress most likely */
578                         up_read(&kgnilnd_data.kgn_net_rw_sem);
579                         return;
580                 }
581
582                 LIBCFS_ALLOC(nets, nnets * sizeof(*nets));
583
584                 if (nets == NULL) {
585                         up_read(&kgnilnd_data.kgn_net_rw_sem);
586                         CERROR("Failed to allocate nets[%d]\n", nnets);
587                         return;
588                 }
589
590                 j = 0;
591                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
592                         list_for_each_entry(net, &kgnilnd_data.kgn_nets[i], gnn_list) {
593                                 nets[j] = net;
594                                 kgnilnd_net_addref(net);
595                                 j++;
596                         }
597                 }
598                 up_read(&kgnilnd_data.kgn_net_rw_sem);
599
600                 for (i = 0; i < nnets; i++) {
601                         lnet_nid_t peer_nid;
602
603                         net = nets[i];
604
605                         peer_nid = kgnilnd_lnd2lnetnid(net->gnn_ni->ni_nid,
606                                                                  peer->gnp_nid);
607
608                         CDEBUG(D_NET, "peer 0x%p->%s last_alive %lu (%lus ago)\n",
609                                 peer, libcfs_nid2str(peer_nid), peer->gnp_last_alive,
610                                 cfs_duration_sec(jiffies - peer->gnp_last_alive));
611
612                         lnet_notify(net->gnn_ni, peer_nid, alive,
613                                     peer->gnp_last_alive);
614
615                         kgnilnd_net_decref(net);
616                 }
617
618                 LIBCFS_FREE(nets, nnets * sizeof(*nets));
619         }
620 }
621
622 /* need write_lock on kgn_peer_conn_lock */
623 void
624 kgnilnd_close_conn_locked(kgn_conn_t *conn, int error)
625 {
626         kgn_peer_t        *peer = conn->gnc_peer;
627         ENTRY;
628
629         LASSERT(!in_interrupt());
630
631         /* store error for tx completion */
632         conn->gnc_error = error;
633         peer->gnp_last_errno = error;
634
635         /* use real error from peer if possible */
636         if (error == -ECONNRESET) {
637                 error = conn->gnc_peer_error;
638         }
639
640         /* if we NETERROR, make sure it is rate limited */
641         if (!kgnilnd_conn_clean_errno(error) &&
642             peer->gnp_state != GNILND_PEER_DOWN) {
643                 CNETERR("closing conn to %s: error %d\n",
644                        libcfs_nid2str(peer->gnp_nid), error);
645         } else {
646                 CDEBUG(D_NET, "closing conn to %s: error %d\n",
647                        libcfs_nid2str(peer->gnp_nid), error);
648         }
649
650         LASSERTF(conn->gnc_state == GNILND_CONN_ESTABLISHED,
651                 "conn %p to %s with bogus state %s\n", conn,
652                 libcfs_nid2str(conn->gnc_peer->gnp_nid),
653                 kgnilnd_conn_state2str(conn));
654         LASSERT(!list_empty(&conn->gnc_hashlist));
655         LASSERT(!list_empty(&conn->gnc_list));
656
657
658         /* mark peer count here so any place the EP gets destroyed will
659          * open up the peer count so that a new ESTABLISHED conn is then free
660          * to send new messages -- sending before the previous EPs are destroyed
661          * could end up with messages on the network for the old conn _after_
662          * the new conn and break the mbox safety protocol */
663         kgnilnd_admin_addref(conn->gnc_peer->gnp_dirty_eps);
664
665         /* Remove from conn hash table: no new callbacks */
666         list_del_init(&conn->gnc_hashlist);
667         kgnilnd_data.kgn_conn_version++;
668         kgnilnd_conn_decref(conn);
669
670         /* if we are in reset, go right to CLOSED as there is no scheduler
671          * thread to move from CLOSING to CLOSED */
672         if (unlikely(kgnilnd_data.kgn_in_reset)) {
673                 conn->gnc_state = GNILND_CONN_CLOSED;
674         } else {
675                 conn->gnc_state = GNILND_CONN_CLOSING;
676         }
677
678         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_RDMA_CQ_ERROR)) {
679                 msleep_interruptible(MSEC_PER_SEC);
680         }
681
682         /* leave on peer->gnp_conns to make sure we don't let the reaper
683          * or others try to unlink this peer until the conn is fully
684          * processed for closing */
685
686         if (kgnilnd_check_purgatory_conn(conn)) {
687                 kgnilnd_add_purgatory_locked(conn, conn->gnc_peer);
688         }
689
690         /* Reset RX timeout to ensure we wait for an incoming CLOSE
691          * for the full timeout.  If we get a CLOSE we know the
692          * peer has stopped all RDMA.  Otherwise if we wait for
693          * the full timeout we can also be sure all RDMA has stopped. */
694         conn->gnc_last_rx = conn->gnc_last_rx_cq = jiffies;
695         mb();
696
697         /* schedule sending CLOSE - if we are in quiesce, this adds to
698          * gnd_ready_conns and allows us to find it in quiesce processing */
699         kgnilnd_schedule_conn(conn);
700
701         EXIT;
702 }
703
704 void
705 kgnilnd_close_conn(kgn_conn_t *conn, int error)
706 {
707         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
708         /* need to check the state here - this call is racy and we don't
709          * know the state until after the lock is grabbed */
710         if (conn->gnc_state == GNILND_CONN_ESTABLISHED) {
711                 kgnilnd_close_conn_locked(conn, error);
712         }
713         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
714 }
715
716 void
717 kgnilnd_complete_closed_conn(kgn_conn_t *conn)
718 {
719         LIST_HEAD               (sinners);
720         kgn_tx_t               *tx, *txn;
721         int                     nlive = 0;
722         int                     nrdma = 0;
723         int                     nq_rdma = 0;
724         int                     logmsg;
725         ENTRY;
726
727         /* Dump log  on cksum error - wait until complete phase to let
728          * RX of error happen */
729         if (*kgnilnd_tunables.kgn_checksum_dump &&
730             (conn != NULL && conn->gnc_peer_error == -ENOKEY)) {
731                 libcfs_debug_dumplog();
732         }
733
734         /* _CLOSED set in kgnilnd_process_fmaq once we decide to
735          * send the CLOSE or not */
736         LASSERTF(conn->gnc_state == GNILND_CONN_CLOSED,
737                  "conn 0x%p->%s with bad state %s\n",
738                  conn, conn->gnc_peer ?
739                         libcfs_nid2str(conn->gnc_peer->gnp_nid) :
740                         "<?>",
741                  kgnilnd_conn_state2str(conn));
742
743         LASSERT(list_empty(&conn->gnc_hashlist));
744         /* We shouldnt be on the delay list, the conn can 
745          * get added to this list during a retransmit, and retransmits
746          * only occur within scheduler threads.
747          */
748         LASSERT(list_empty(&conn->gnc_delaylist));
749
750         /* we've sent the close, start nuking */
751         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_SCHEDULE_COMPLETE))
752                 kgnilnd_schedule_conn(conn);
753
754         if (conn->gnc_scheduled != GNILND_CONN_PROCESS) {
755                 CDEBUG(D_NETERROR, "Error someone scheduled us after we were "
756                                 "done, Attempting to recover conn 0x%p "
757                                 "scheduled %d function: %s line: %d\n", conn,
758                                 conn->gnc_scheduled, conn->gnc_sched_caller,
759                                 conn->gnc_sched_line);
760                 RETURN_EXIT;
761         }
762
763         /* we don't use lists to track things that we can get out of the
764          * tx_ref table... */
765
766         /* need to hold locks for tx_list_state, sampling it is too racy:
767          * - the lock actually protects tx != NULL, but we can't take the proper
768          *   lock until we check tx_list_state, which would be too late and
769          *   we could have the TX change under us.
770          * gnd_rdmaq_lock and gnd_lock and not used together, so taking both
771          * should be fine */
772         spin_lock(&conn->gnc_device->gnd_rdmaq_lock);
773         spin_lock(&conn->gnc_device->gnd_lock);
774
775         for (nrdma = 0; nrdma < GNILND_MAX_MSG_ID; nrdma++) {
776                 tx = conn->gnc_tx_ref_table[nrdma];
777
778                 if (tx != NULL) {
779                         /* only print the first error and if not CLOSE, we often don't see
780                          * CQ events for that by the time we get here... and really don't care */
781                         if (nlive || tx->tx_msg.gnm_type == GNILND_MSG_CLOSE)
782                                 tx->tx_state |= GNILND_TX_QUIET_ERROR;
783                         nlive++;
784                         GNIDBG_TX(D_NET, tx, "cleaning up on close, nlive %d", nlive);
785
786                         /* don't worry about gnc_lock here as nobody else should be
787                          * touching this conn */
788                         kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
789                         list_add_tail(&tx->tx_list, &sinners);
790                 }
791         }
792         spin_unlock(&conn->gnc_device->gnd_lock);
793         spin_unlock(&conn->gnc_device->gnd_rdmaq_lock);
794
795         /* nobody should have marked this as needing scheduling after
796          * we called close - so only ref should be us handling it */
797         if (conn->gnc_scheduled != GNILND_CONN_PROCESS) {
798                 CDEBUG(D_NETERROR, "Error someone scheduled us after we were "
799                                 "done, Attempting to recover conn 0x%p "
800                                 "scheduled %d function %s line: %d\n", conn,
801                                 conn->gnc_scheduled, conn->gnc_sched_caller,
802                                 conn->gnc_sched_line);
803         }
804         /* now reset a few to actual counters... */
805         nrdma = atomic_read(&conn->gnc_nlive_rdma);
806         nq_rdma = atomic_read(&conn->gnc_nq_rdma);
807
808         if (!list_empty(&sinners)) {
809                 list_for_each_entry_safe(tx, txn, &sinners, tx_list) {
810                         /* clear tx_list to make tx_add_list_locked happy */
811                         list_del_init(&tx->tx_list);
812                         /* The error codes determine if we hold onto the MDD */
813                         kgnilnd_tx_done(tx, conn->gnc_error);
814                 }
815         }
816
817         logmsg = (nlive + nrdma + nq_rdma);
818
819         if (logmsg) {
820                 int level = conn->gnc_peer->gnp_state == GNILND_PEER_UP ?
821                                 D_NETERROR : D_NET;
822                 CDEBUG(level, "Closed conn 0x%p->%s (errno %d,"
823                         " peer errno %d): canceled %d TX, %d/%d RDMA\n",
824                         conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
825                         conn->gnc_error, conn->gnc_peer_error,
826                         nlive, nq_rdma, nrdma);
827         }
828
829         kgnilnd_destroy_conn_ep(conn);
830
831         /* Bug 765042 - race this with completing a new conn to same peer - we need
832          * finish_connect to detach purgatory before we can do it ourselves here */
833         CFS_RACE(CFS_FAIL_GNI_FINISH_PURG);
834
835         /* now it is safe to remove from peer list - anyone looking at
836          * gnp_conns now is free to unlink if not on purgatory */
837         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
838
839         conn->gnc_state = GNILND_CONN_DONE;
840
841         /* Decrement counter if we are marked by del_conn_or_peers for closing
842          */
843         if (conn->gnc_needs_closing)
844                 kgnilnd_admin_decref(kgnilnd_data.kgn_npending_conns);
845
846         /* Remove from peer's list of valid connections if its not in purgatory */
847         if (!conn->gnc_in_purgatory) {
848                 list_del_init(&conn->gnc_list);
849                 /* Lose peers reference on the conn */
850                 kgnilnd_conn_decref(conn);
851         }
852
853         /* NB - only unlinking if we set pending in del_peer_locked from admin or
854          * shutdown */
855         if (kgnilnd_peer_active(conn->gnc_peer) &&
856             conn->gnc_peer->gnp_pending_unlink &&
857             kgnilnd_can_unlink_peer_locked(conn->gnc_peer)) {
858                 kgnilnd_unlink_peer_locked(conn->gnc_peer);
859         }
860
861         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
862
863         /* I'm telling Mommy! - use peer_error if they initiated close */
864         kgnilnd_peer_notify(conn->gnc_peer,
865                             conn->gnc_error == -ECONNRESET ?
866                             conn->gnc_peer_error : conn->gnc_error, 0);
867
868         EXIT;
869 }
870
871 int
872 kgnilnd_set_conn_params(kgn_dgram_t *dgram)
873 {
874         kgn_conn_t             *conn = dgram->gndg_conn;
875         kgn_connreq_t          *connreq = &dgram->gndg_conn_in;
876         kgn_gniparams_t        *rem_param = &connreq->gncr_gnparams;
877         gni_return_t            rrc;
878         int                     rc = 0;
879         gni_smsg_attr_t        *remote = &connreq->gncr_gnparams.gnpr_smsg_attr;
880
881         /* set timeout vals in conn early so we can use them for the NAK */
882
883         /* use max of the requested and our timeout, peer will do the same */
884         conn->gnc_timeout = MAX(conn->gnc_timeout, connreq->gncr_timeout);
885
886         /* only ep_bind really mucks around with the CQ */
887         /* only ep bind if we are not connecting to ourself and the dstnid is not a wildcard. this check
888          * is necessary as you can only bind an ep once and we must make sure we dont bind when already bound.
889          */
890         if (connreq->gncr_dstnid != LNET_NID_ANY && dgram->gndg_conn_out.gncr_dstnid != connreq->gncr_srcnid) {
891                 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
892                 rrc = kgnilnd_ep_bind(conn->gnc_ephandle,
893                         connreq->gncr_gnparams.gnpr_host_id,
894                         conn->gnc_cqid);
895                 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
896                 if (rrc != GNI_RC_SUCCESS) {
897                         rc = -ECONNABORTED;
898                         goto return_out;
899                 }
900         }
901
902         rrc = kgnilnd_ep_set_eventdata(conn->gnc_ephandle, conn->gnc_cqid,
903                          connreq->gncr_gnparams.gnpr_cqid);
904         if (rrc != GNI_RC_SUCCESS) {
905                 rc = -ECONNABORTED;
906                 goto cleanup_out;
907         }
908
909         /* Initialize SMSG */
910         rrc = kgnilnd_smsg_init(conn->gnc_ephandle, &conn->gnpr_smsg_attr,
911                         &connreq->gncr_gnparams.gnpr_smsg_attr);
912         if (unlikely(rrc == GNI_RC_INVALID_PARAM)) {
913                 gni_smsg_attr_t *local = &conn->gnpr_smsg_attr;
914                 /* help folks figure out if there is a tunable off, etc. */
915                 LCONSOLE_ERROR("SMSG attribute mismatch. Data from local/remote:"
916                                " type %d/%d msg_maxsize %u/%u"
917                                " mbox_maxcredit %u/%u. Please check kgni"
918                                " logs for further data\n",
919                                local->msg_type, remote->msg_type,
920                                local->msg_maxsize, remote->msg_maxsize,
921                                local->mbox_maxcredit, remote->mbox_maxcredit);
922         }
923         if (rrc != GNI_RC_SUCCESS) {
924                 rc = -ECONNABORTED;
925                 goto cleanup_out;
926         }
927
928         /* log this for help in debuggin SMSG buffer re-use */
929         CDEBUG(D_NET, "conn %p src %s dst %s smsg %p acquired"
930                 " local cqid %u SMSG %p->%u hndl %#llx.%#llx"
931                 " remote cqid %u SMSG %p->%u hndl %#llx.%#llx\n",
932                 conn, libcfs_nid2str(connreq->gncr_srcnid),
933                 libcfs_nid2str(connreq->gncr_dstnid),
934                 &conn->gnpr_smsg_attr,
935                 conn->gnc_cqid,
936                 conn->gnpr_smsg_attr.msg_buffer,
937                 conn->gnpr_smsg_attr.mbox_offset,
938                 conn->gnpr_smsg_attr.mem_hndl.qword1,
939                 conn->gnpr_smsg_attr.mem_hndl.qword2,
940                 rem_param->gnpr_cqid,
941                 rem_param->gnpr_smsg_attr.msg_buffer,
942                 rem_param->gnpr_smsg_attr.mbox_offset,
943                 rem_param->gnpr_smsg_attr.mem_hndl.qword1,
944                 rem_param->gnpr_smsg_attr.mem_hndl.qword2);
945
946         conn->gnc_peerstamp = connreq->gncr_peerstamp;
947         conn->gnc_peer_connstamp = connreq->gncr_connstamp;
948         conn->remote_mbox_addr = (void *)((char *)remote->msg_buffer + remote->mbox_offset);
949
950         /* We update the reaper timeout once we have a valid conn and timeout */
951         kgnilnd_update_reaper_timeout(GNILND_TO2KA(conn->gnc_timeout));
952
953         return 0;
954
955 cleanup_out:
956         rrc = kgnilnd_ep_unbind(conn->gnc_ephandle);
957         /* not sure I can just let this fly */
958         LASSERTF(rrc == GNI_RC_SUCCESS,
959                 "bad rc from gni_ep_unbind trying to cleanup: %d\n", rrc);
960
961 return_out:
962         LASSERTF(rc != 0, "SOFTWARE BUG: rc == 0\n");
963         CERROR("Error setting connection params from %s: %d\n",
964                libcfs_nid2str(connreq->gncr_srcnid), rc);
965         return rc;
966 }
967
968 /* needs down_read on kgn_net_rw_sem held from before this call until
969  * after the write_lock on kgn_peer_conn_lock - this ensures we stay sane
970  * with kgnilnd_shutdown - it'll get the sem and set shutdown, then get the
971  * kgn_peer_conn_lock to start del_peer'ing. If we hold the sem until after
972  * kgn_peer_conn_lock is held, we guarantee that nobody calls
973  * kgnilnd_add_peer_locked without checking gnn_shutdown */
974 int
975 kgnilnd_create_peer_safe(kgn_peer_t **peerp,
976                          lnet_nid_t nid,
977                          kgn_net_t *net,
978                          int node_state)
979 {
980         kgn_peer_t      *peer;
981         int             rc;
982
983         LASSERT(nid != LNET_NID_ANY);
984
985         /* We dont pass the net around in the dgram anymore so here is where we find it
986          * this will work unless its in shutdown or the nid has a net that is invalid.
987          * Either way error code needs to be returned in that case.
988          *
989          * If the net passed in is not NULL then we can use it, this alleviates looking it
990          * when the calling function has access to the data.
991          */
992         if (net == NULL) {
993                 rc = kgnilnd_find_net(nid, &net);
994                 if (rc < 0)
995                         return rc;
996         } else {
997                 /* find net adds a reference on the net if we are not using
998                  * it we must do it manually so the net references are
999                  * correct when tearing down the net
1000                  */
1001                 kgnilnd_net_addref(net);
1002         }
1003
1004         LIBCFS_ALLOC(peer, sizeof(*peer));
1005         if (peer == NULL) {
1006                 kgnilnd_net_decref(net);
1007                 return -ENOMEM;
1008         }
1009         peer->gnp_nid = nid;
1010         peer->gnp_state = node_state;
1011
1012         /* translate from nid to nic addr & store */
1013         rc = kgnilnd_nid_to_nicaddrs(LNET_NIDADDR(nid), 1, &peer->gnp_host_id);
1014         if (rc <= 0) {
1015                 kgnilnd_net_decref(net);
1016                 LIBCFS_FREE(peer, sizeof(*peer));
1017                 return -ESRCH;
1018         }
1019         CDEBUG(D_NET, "peer 0x%p->%s -> NIC 0x%x\n", peer,
1020                 libcfs_nid2str(nid), peer->gnp_host_id);
1021
1022         atomic_set(&peer->gnp_refcount, 1);     /* 1 ref for caller */
1023         atomic_set(&peer->gnp_dirty_eps, 0);
1024
1025         INIT_LIST_HEAD(&peer->gnp_list);
1026         INIT_LIST_HEAD(&peer->gnp_connd_list);
1027         INIT_LIST_HEAD(&peer->gnp_conns);
1028         INIT_LIST_HEAD(&peer->gnp_tx_queue);
1029
1030         /* the first reconnect should happen immediately, so we leave
1031          * gnp_reconnect_interval set to 0 */
1032
1033         LASSERTF(net != NULL, "peer 0x%p->%s with NULL net\n",
1034                  peer, libcfs_nid2str(nid));
1035
1036         /* must have kgn_net_rw_sem held for this...  */
1037         if (net->gnn_shutdown) {
1038                 /* shutdown has started already */
1039                 kgnilnd_net_decref(net);
1040                 LIBCFS_FREE(peer, sizeof(*peer));
1041                 return -ESHUTDOWN;
1042         }
1043
1044         peer->gnp_net = net;
1045
1046         atomic_inc(&kgnilnd_data.kgn_npeers);
1047
1048         *peerp = peer;
1049         return 0;
1050 }
1051
1052 void
1053 kgnilnd_destroy_peer(kgn_peer_t *peer)
1054 {
1055         CDEBUG(D_NET, "peer %s %p deleted\n",
1056                libcfs_nid2str(peer->gnp_nid), peer);
1057         LASSERTF(atomic_read(&peer->gnp_refcount) == 0,
1058                  "peer 0x%p->%s refs %d\n",
1059                  peer, libcfs_nid2str(peer->gnp_nid),
1060                  atomic_read(&peer->gnp_refcount));
1061         LASSERTF(atomic_read(&peer->gnp_dirty_eps) == 0,
1062                  "peer 0x%p->%s dirty eps %d\n",
1063                  peer, libcfs_nid2str(peer->gnp_nid),
1064                  atomic_read(&peer->gnp_dirty_eps));
1065         LASSERTF(peer->gnp_net != NULL, "peer %p (%s) with NULL net\n",
1066                  peer, libcfs_nid2str(peer->gnp_nid));
1067         LASSERTF(!kgnilnd_peer_active(peer),
1068                  "peer 0x%p->%s\n",
1069                 peer, libcfs_nid2str(peer->gnp_nid));
1070         LASSERTF(peer->gnp_connecting == GNILND_PEER_IDLE || peer->gnp_connecting == GNILND_PEER_KILL,
1071                  "peer 0x%p->%s, connecting %d\n",
1072                 peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
1073         LASSERTF(list_empty(&peer->gnp_conns),
1074                  "peer 0x%p->%s\n",
1075                 peer, libcfs_nid2str(peer->gnp_nid));
1076         LASSERTF(list_empty(&peer->gnp_tx_queue),
1077                  "peer 0x%p->%s\n",
1078                 peer, libcfs_nid2str(peer->gnp_nid));
1079         LASSERTF(list_empty(&peer->gnp_connd_list),
1080                  "peer 0x%p->%s\n",
1081                 peer, libcfs_nid2str(peer->gnp_nid));
1082
1083         /* NB a peer's connections keep a reference on their peer until
1084          * they are destroyed, so we can be assured that _all_ state to do
1085          * with this peer has been cleaned up when its refcount drops to
1086          * zero. */
1087
1088         atomic_dec(&kgnilnd_data.kgn_npeers);
1089         kgnilnd_net_decref(peer->gnp_net);
1090
1091         LIBCFS_FREE(peer, sizeof(*peer));
1092 }
1093
1094 /* the conn might not have made it all the way through to a connected
1095  * state - but we need to purgatory any conn that a remote peer might
1096  * have seen through a posted dgram as well */
1097 void
1098 kgnilnd_add_purgatory_locked(kgn_conn_t *conn, kgn_peer_t *peer)
1099 {
1100         kgn_mbox_info_t *mbox = NULL;
1101         ENTRY;
1102
1103         /* NB - the caller should own conn by removing him from the
1104          * scheduler thread when finishing the close */
1105
1106         LASSERTF(peer != NULL, "conn %p with NULL peer\n", conn);
1107
1108         /* If this is still true, need to add the calls to unlink back in and
1109          * figure out how to close the hole on loopback conns */
1110         LASSERTF(kgnilnd_peer_active(peer), "can't use inactive peer %s (%p)"
1111                 " we'll never recover the resources\n",
1112                 libcfs_nid2str(peer->gnp_nid), peer);
1113
1114         CDEBUG(D_NET, "conn %p peer %p dev %p\n", conn, peer,
1115                 conn->gnc_device);
1116
1117         LASSERTF(conn->gnc_in_purgatory == 0,
1118                 "Conn already in purgatory\n");
1119         conn->gnc_in_purgatory = 1;
1120
1121         mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
1122         mbox->mbx_prev_purg_nid = peer->gnp_nid;
1123         mbox->mbx_add_purgatory = jiffies;
1124         kgnilnd_release_mbox(conn, 1);
1125
1126         LASSERTF(list_empty(&conn->gnc_mdd_list),
1127                 "conn 0x%p->%s with active purgatory hold MDD %d\n",
1128                 conn, libcfs_nid2str(peer->gnp_nid),
1129                 kgnilnd_count_list(&conn->gnc_mdd_list));
1130
1131         EXIT;
1132 }
1133
1134 /* Instead of detaching everything from purgatory here we just mark the conn as needing
1135  * detach, when the reaper checks the conn the next time it will detach it.
1136  * Calling function requires write_lock held on kgn_peer_conn_lock
1137  */
1138 void
1139 kgnilnd_mark_for_detach_purgatory_all_locked(kgn_peer_t *peer) {
1140         kgn_conn_t       *conn;
1141
1142         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
1143                 if (conn->gnc_in_purgatory && !conn->gnc_needs_detach) {
1144                         conn->gnc_needs_detach = 1;
1145                         kgnilnd_admin_addref(kgnilnd_data.kgn_npending_detach);
1146                 }
1147         }
1148 }
1149
1150 /* Calling function needs a write_lock held on kgn_peer_conn_lock */
1151 void
1152 kgnilnd_detach_purgatory_locked(kgn_conn_t *conn, struct list_head *conn_list)
1153 {
1154         kgn_mbox_info_t *mbox = NULL;
1155
1156         /* if needed, add the conn purgatory data to the list passed in */
1157         if (conn->gnc_in_purgatory) {
1158                 CDEBUG(D_NET, "peer %p->%s purg_conn %p@%s mdd_list #tx %d\n",
1159                         conn->gnc_peer, libcfs_nid2str(conn->gnc_peer->gnp_nid),
1160                         conn, kgnilnd_conn_state2str(conn),
1161                         kgnilnd_count_list(&conn->gnc_mdd_list));
1162
1163                 mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
1164                 mbox->mbx_detach_of_purgatory = jiffies;
1165
1166                 /* conn->gnc_list is the entry point on peer->gnp_conns, so detaching it
1167                  * here removes it from the list of 'valid' peer connections.
1168                  * We put the current conn onto a list of conns to call kgnilnd_release_purgatory_locked()
1169                  * and as such the caller of kgnilnd_detach_purgatory_locked() now owns that conn, since its not
1170                  * on the peer's conn_list anymore.
1171                  */
1172
1173                 list_del_init(&conn->gnc_list);
1174
1175                 /* NB - only unlinking if we set pending in del_peer_locked from admin or
1176                  * shutdown */
1177                 if (kgnilnd_peer_active(conn->gnc_peer) &&
1178                     conn->gnc_peer->gnp_pending_unlink &&
1179                     kgnilnd_can_unlink_peer_locked(conn->gnc_peer)) {
1180                         kgnilnd_unlink_peer_locked(conn->gnc_peer);
1181                 }
1182                 /* The reaper will not call detach unless the conn is fully through kgnilnd_complete_closed_conn.
1183                  * If the conn is not in a DONE state somehow we are attempting to detach even though
1184                  * the conn has not been fully cleaned up. If we detach while the conn is still closing
1185                  * we will end up with an orphaned connection that has valid ep_handle, that is not on a
1186                  * peer.
1187                  */
1188
1189                 LASSERTF(conn->gnc_state == GNILND_CONN_DONE, "Conn in invalid state  %p@%s \n",
1190                                 conn, kgnilnd_conn_state2str(conn));
1191
1192                 /* move from peer to the delayed release list */
1193                 list_add_tail(&conn->gnc_list, conn_list);
1194         }
1195 }
1196
1197 void
1198 kgnilnd_release_purgatory_list(struct list_head *conn_list)
1199 {
1200         kgn_device_t            *dev;
1201         kgn_conn_t              *conn, *connN;
1202         kgn_mdd_purgatory_t     *gmp, *gmpN;
1203
1204         list_for_each_entry_safe(conn, connN, conn_list, gnc_list) {
1205                 dev = conn->gnc_device;
1206
1207                 kgnilnd_release_mbox(conn, -1);
1208                 conn->gnc_in_purgatory = 0;
1209
1210                 list_del_init(&conn->gnc_list);
1211
1212                 /* gnc_needs_detach is set in kgnilnd_del_conn_or_peer. It is used to keep track
1213                  * of conns that have been marked for detach by kgnilnd_del_conn_or_peer.
1214                  * The function uses kgn_npending_detach to verify the conn has
1215                  * actually been detached.
1216                  */
1217
1218                 if (conn->gnc_needs_detach)
1219                         kgnilnd_admin_decref(kgnilnd_data.kgn_npending_detach);
1220
1221                 /* if this guy is really dead (we are doing release from reaper),
1222                  * make sure we tell LNet - if this is from other context,
1223                  * the checks in the function will prevent an errant
1224                  * notification */
1225                 kgnilnd_peer_notify(conn->gnc_peer, conn->gnc_error, 0);
1226
1227                 list_for_each_entry_safe(gmp, gmpN, &conn->gnc_mdd_list,
1228                                          gmp_list) {
1229                         CDEBUG(D_NET,
1230                                "dev %p releasing held mdd %#llx.%#llx\n",
1231                                conn->gnc_device, gmp->gmp_map_key.qword1,
1232                                gmp->gmp_map_key.qword2);
1233
1234                         atomic_dec(&dev->gnd_n_mdd_held);
1235                         kgnilnd_mem_mdd_release(conn->gnc_device->gnd_handle,
1236                                                 &gmp->gmp_map_key);
1237                         /* ignoring the return code - if kgni/ghal can't find it
1238                          * it must be released already */
1239
1240                         list_del_init(&gmp->gmp_list);
1241                         LIBCFS_FREE(gmp, sizeof(*gmp));
1242                 }
1243                 /* lose conn ref for purgatory */
1244                 kgnilnd_conn_decref(conn);
1245         }
1246 }
1247
1248 /* needs write_lock on kgnilnd_data.kgn_peer_conn_lock held */
1249 void
1250 kgnilnd_peer_increase_reconnect_locked(kgn_peer_t *peer)
1251 {
1252         int current_to;
1253
1254         current_to = peer->gnp_reconnect_interval;
1255
1256         /* we'll try to reconnect fast the first time, then back-off */
1257         if (current_to == 0) {
1258                 peer->gnp_reconnect_time = jiffies - 1;
1259                 current_to = *kgnilnd_tunables.kgn_min_reconnect_interval;
1260         } else {
1261                 peer->gnp_reconnect_time = jiffies + cfs_time_seconds(current_to);
1262                 /* add 50% of min timeout & retry */
1263                 current_to += *kgnilnd_tunables.kgn_min_reconnect_interval / 2;
1264         }
1265
1266         current_to = MIN(current_to,
1267                                 *kgnilnd_tunables.kgn_max_reconnect_interval);
1268
1269         peer->gnp_reconnect_interval = current_to;
1270         CDEBUG(D_NET, "peer %s can reconnect at %lu interval %lu\n",
1271                libcfs_nid2str(peer->gnp_nid), peer->gnp_reconnect_time,
1272                peer->gnp_reconnect_interval);
1273 }
1274
1275 /* needs kgnilnd_data.kgn_peer_conn_lock held */
1276 kgn_peer_t *
1277 kgnilnd_find_peer_locked(lnet_nid_t nid)
1278 {
1279         struct list_head *peer_list = kgnilnd_nid2peerlist(nid);
1280         kgn_peer_t       *peer;
1281
1282         /* Chopping nid down to only NIDADDR using LNET_NIDADDR so we only
1283          * have a single peer per device instead of a peer per nid/net combo.
1284          */
1285
1286         list_for_each_entry(peer, peer_list, gnp_list) {
1287                 if (LNET_NIDADDR(nid) != LNET_NIDADDR(peer->gnp_nid))
1288                         continue;
1289
1290                 CDEBUG(D_NET, "got peer [%p] -> %s c %d (%d)\n",
1291                        peer, libcfs_nid2str(nid),
1292                        peer->gnp_connecting,
1293                        atomic_read(&peer->gnp_refcount));
1294                 return peer;
1295         }
1296         return NULL;
1297 }
1298
1299 /* need write_lock on kgn_peer_conn_lock */
1300 void
1301 kgnilnd_unlink_peer_locked(kgn_peer_t *peer)
1302 {
1303         LASSERTF(list_empty(&peer->gnp_conns),
1304                 "peer 0x%p->%s\n",
1305                  peer, libcfs_nid2str(peer->gnp_nid));
1306         LASSERTF(list_empty(&peer->gnp_tx_queue),
1307                 "peer 0x%p->%s\n",
1308                  peer, libcfs_nid2str(peer->gnp_nid));
1309         LASSERTF(kgnilnd_peer_active(peer),
1310                 "peer 0x%p->%s\n",
1311                  peer, libcfs_nid2str(peer->gnp_nid));
1312         CDEBUG(D_NET, "unlinking peer 0x%p->%s\n",
1313                 peer, libcfs_nid2str(peer->gnp_nid));
1314
1315         list_del_init(&peer->gnp_list);
1316         kgnilnd_data.kgn_peer_version++;
1317         kgnilnd_admin_decref(kgnilnd_data.kgn_npending_unlink);
1318         /* lose peerlist's ref */
1319         kgnilnd_peer_decref(peer);
1320 }
1321
1322 int
1323 kgnilnd_get_peer_info(int index,
1324                       kgn_peer_t **found_peer,
1325                       lnet_nid_t *id, __u32 *nic_addr,
1326                       int *refcount, int *connecting)
1327 {
1328         struct list_head  *ptmp;
1329         kgn_peer_t        *peer;
1330         int               i;
1331         int               rc = -ENOENT;
1332
1333         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1334
1335         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
1336
1337                 list_for_each(ptmp, &kgnilnd_data.kgn_peers[i]) {
1338                         peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1339
1340                         if (index-- > 0)
1341                                 continue;
1342
1343                         CDEBUG(D_NET, "found peer %p (%s) at index %d\n",
1344                                peer, libcfs_nid2str(peer->gnp_nid), index);
1345
1346                         *found_peer  = peer;
1347                         *id          = peer->gnp_nid;
1348                         *nic_addr    = peer->gnp_host_id;
1349                         *refcount    = atomic_read(&peer->gnp_refcount);
1350                         *connecting  = peer->gnp_connecting;
1351
1352                         rc = 0;
1353                         goto out;
1354                 }
1355         }
1356 out:
1357         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1358         if (rc)
1359                 CDEBUG(D_NET, "no gni peer at index %d\n", index);
1360         return rc;
1361 }
1362
1363 /* requires write_lock on kgn_peer_conn_lock held */
1364 void
1365 kgnilnd_add_peer_locked(lnet_nid_t nid, kgn_peer_t *new_stub_peer, kgn_peer_t **peerp)
1366 {
1367         kgn_peer_t        *peer, *peer2;
1368
1369         LASSERTF(new_stub_peer != NULL, "bad stub peer for nid %s\n",
1370                  libcfs_nid2str(nid));
1371
1372         peer2 = kgnilnd_find_peer_locked(nid);
1373         if (peer2 != NULL) {
1374                 /* A peer was created during the lock transition, so drop
1375                  * the new one we created */
1376                 kgnilnd_peer_decref(new_stub_peer);
1377                 peer = peer2;
1378         } else {
1379                 peer = new_stub_peer;
1380                 /* peer table takes existing ref on peer */
1381
1382                 LASSERTF(!kgnilnd_peer_active(peer),
1383                         "peer 0x%p->%s already in peer table\n",
1384                         peer, libcfs_nid2str(peer->gnp_nid));
1385                 list_add_tail(&peer->gnp_list,
1386                               kgnilnd_nid2peerlist(nid));
1387                 kgnilnd_data.kgn_peer_version++;
1388         }
1389
1390         LASSERTF(peer->gnp_net != NULL, "peer 0x%p->%s with NULL net\n",
1391                  peer, libcfs_nid2str(peer->gnp_nid));
1392         *peerp = peer;
1393 }
1394
1395 int
1396 kgnilnd_add_peer(kgn_net_t *net, lnet_nid_t nid, kgn_peer_t **peerp)
1397 {
1398         kgn_peer_t        *peer;
1399         int                rc;
1400         int                node_state;
1401         ENTRY;
1402
1403         if (nid == LNET_NID_ANY)
1404                 return -EINVAL;
1405
1406         node_state = kgnilnd_get_node_state(LNET_NIDADDR(nid));
1407
1408         /* NB - this will not block during normal operations -
1409          * the only writer of this is in the startup/shutdown path. */
1410         rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
1411         if (!rc) {
1412                 rc = -ESHUTDOWN;
1413                 RETURN(rc);
1414         }
1415         rc = kgnilnd_create_peer_safe(&peer, nid, net, node_state);
1416         if (rc != 0) {
1417                 up_read(&kgnilnd_data.kgn_net_rw_sem);
1418                 RETURN(rc);
1419         }
1420
1421         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1422         up_read(&kgnilnd_data.kgn_net_rw_sem);
1423
1424         kgnilnd_add_peer_locked(nid, peer, peerp);
1425
1426         CDEBUG(D_NET, "peer 0x%p->%s connecting %d\n",
1427                peerp, libcfs_nid2str((*peerp)->gnp_nid),
1428                (*peerp)->gnp_connecting);
1429
1430         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1431         RETURN(0);
1432 }
1433
1434 /* needs write_lock on kgn_peer_conn_lock */
1435 void
1436 kgnilnd_cancel_peer_connect_locked(kgn_peer_t *peer, struct list_head *zombies)
1437 {
1438         kgn_tx_t        *tx, *txn;
1439
1440         /* we do care about state of gnp_connecting - we could be between
1441          * reconnect attempts, so try to find the dgram and cancel the TX
1442          * anyways. If we are in the process of posting DONT do anything;
1443          * once it fails or succeeds we can nuke the connect attempt.
1444          * We have no idea where in kgnilnd_post_dgram we are so we cant
1445          * attempt to cancel until the function is done.
1446          */
1447
1448         /* make sure peer isn't in process of connecting or waiting for connect*/
1449         spin_lock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1450         if (!(list_empty(&peer->gnp_connd_list))) {
1451                 list_del_init(&peer->gnp_connd_list);
1452                 /* remove connd ref */
1453                 kgnilnd_peer_decref(peer);
1454         }
1455         spin_unlock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1456
1457         if (peer->gnp_connecting == GNILND_PEER_POSTING || peer->gnp_connecting == GNILND_PEER_NEEDS_DEATH) {
1458                 peer->gnp_connecting = GNILND_PEER_NEEDS_DEATH;
1459                 /* We are in process of posting right now the xchg set it up for us to
1460                  * cancel the connect so we are finished for now */
1461         } else {
1462                 /* no need for exchange we have the peer lock and its ready for us to nuke */
1463                 LASSERTF(peer->gnp_connecting != GNILND_PEER_POSTING,
1464                         "Peer in invalid state 0x%p->%s, connecting %d\n",
1465                         peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
1466                 peer->gnp_connecting = GNILND_PEER_IDLE;
1467                 set_mb(peer->gnp_last_dgram_errno, -ETIMEDOUT);
1468                 kgnilnd_find_and_cancel_dgram(peer->gnp_net->gnn_dev,
1469                                                       peer->gnp_nid);
1470         }
1471
1472         /* The least we can do is nuke the tx's no matter what.... */
1473         list_for_each_entry_safe(tx, txn, &peer->gnp_tx_queue, tx_list) {
1474                 kgnilnd_tx_del_state_locked(tx, peer, NULL,
1475                                            GNILND_TX_ALLOCD);
1476                 list_add_tail(&tx->tx_list, zombies);
1477         }
1478 }
1479
1480 /* needs write_lock on kgn_peer_conn_lock */
1481 void
1482 kgnilnd_del_peer_locked(kgn_peer_t *peer, int error)
1483 {
1484         /* this peer could be passive and only held for purgatory,
1485          * take a ref to ensure it doesn't disappear in this function */
1486         kgnilnd_peer_addref(peer);
1487
1488         CFS_RACE(CFS_FAIL_GNI_FIND_TARGET);
1489
1490         /* if purgatory release cleared it out, don't try again */
1491         if (kgnilnd_peer_active(peer)) {
1492                 /* always do this to allow kgnilnd_start_connect and
1493                  * kgnilnd_finish_connect to catch this before they
1494                  * wrap up their operations */
1495                 if (kgnilnd_can_unlink_peer_locked(peer)) {
1496                         /* already released purgatory, so only active
1497                          * conns hold it */
1498                         kgnilnd_unlink_peer_locked(peer);
1499                 } else {
1500                         kgnilnd_close_peer_conns_locked(peer, error);
1501                         /* peer unlinks itself when last conn is closed */
1502                 }
1503         }
1504
1505         /* we are done, release back to the wild */
1506         kgnilnd_peer_decref(peer);
1507 }
1508
1509 int
1510 kgnilnd_del_conn_or_peer(kgn_net_t *net, lnet_nid_t nid, int command,
1511                           int error)
1512 {
1513         LIST_HEAD               (souls);
1514         LIST_HEAD               (zombies);
1515         struct list_head        *ptmp, *pnxt;
1516         kgn_peer_t              *peer;
1517         int                     lo;
1518         int                     hi;
1519         int                     i;
1520         int                     rc = -ENOENT;
1521
1522         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1523
1524         if (nid != LNET_NID_ANY)
1525                 lo = hi = kgnilnd_nid2peerlist(nid) - kgnilnd_data.kgn_peers;
1526         else {
1527                 lo = 0;
1528                 hi = *kgnilnd_tunables.kgn_peer_hash_size - 1;
1529                 /* wildcards always succeed */
1530                 rc = 0;
1531         }
1532
1533         for (i = lo; i <= hi; i++) {
1534                 list_for_each_safe(ptmp, pnxt, &kgnilnd_data.kgn_peers[i]) {
1535                         peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1536
1537                         LASSERTF(peer->gnp_net != NULL,
1538                                 "peer %p (%s) with NULL net\n",
1539                                  peer, libcfs_nid2str(peer->gnp_nid));
1540
1541                         if (net != NULL && peer->gnp_net != net)
1542                                 continue;
1543
1544                         if (!(nid == LNET_NID_ANY || LNET_NIDADDR(peer->gnp_nid) == LNET_NIDADDR(nid)))
1545                                 continue;
1546
1547                         /* In both cases, we want to stop any in-flight
1548                          * connect attempts */
1549                         kgnilnd_cancel_peer_connect_locked(peer, &zombies);
1550
1551                         switch (command) {
1552                         case GNILND_DEL_CONN:
1553                                 kgnilnd_close_peer_conns_locked(peer, error);
1554                                 break;
1555                         case GNILND_DEL_PEER:
1556                                 peer->gnp_pending_unlink = 1;
1557                                 kgnilnd_admin_addref(kgnilnd_data.kgn_npending_unlink);
1558                                 kgnilnd_mark_for_detach_purgatory_all_locked(peer);
1559                                 kgnilnd_del_peer_locked(peer, error);
1560                                 break;
1561                         case GNILND_CLEAR_PURGATORY:
1562                                 /* Mark everything ready for detach reaper will cleanup
1563                                  * once we release the kgn_peer_conn_lock
1564                                  */
1565                                 kgnilnd_mark_for_detach_purgatory_all_locked(peer);
1566                                 peer->gnp_last_errno = -EISCONN;
1567                                 /* clear reconnect so he can reconnect soon */
1568                                 peer->gnp_reconnect_time = 0;
1569                                 peer->gnp_reconnect_interval = 0;
1570                                 break;
1571                         default:
1572                                 CERROR("bad command %d\n", command);
1573                                 LBUG();
1574                         }
1575                         /* we matched something */
1576                         rc = 0;
1577                 }
1578         }
1579
1580         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1581
1582         /* nuke peer TX */
1583         kgnilnd_txlist_done(&zombies, error);
1584
1585         /* This function does not return until the commands it initiated have completed,
1586          * since they have to work there way through the other threads. In the case of shutdown
1587          * threads are not woken up until after this call is initiated so we cannot wait, we just
1588          * need to return. The same applies for stack reset we shouldnt wait as the reset thread
1589          * handles closing.
1590          */
1591
1592         CFS_RACE(CFS_FAIL_GNI_RACE_RESET);
1593
1594         if (error == -ENOTRECOVERABLE || error == -ESHUTDOWN) {
1595                 return rc;
1596         }
1597
1598         i = 4;
1599         while (atomic_read(&kgnilnd_data.kgn_npending_conns)   ||
1600                atomic_read(&kgnilnd_data.kgn_npending_detach)  ||
1601                atomic_read(&kgnilnd_data.kgn_npending_unlink)) {
1602
1603                 set_current_state(TASK_UNINTERRUPTIBLE);
1604                 schedule_timeout(cfs_time_seconds(1));
1605                 i++;
1606
1607                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, "Waiting on %d peers %d closes %d detaches\n",
1608                                 atomic_read(&kgnilnd_data.kgn_npending_unlink),
1609                                 atomic_read(&kgnilnd_data.kgn_npending_conns),
1610                                 atomic_read(&kgnilnd_data.kgn_npending_detach));
1611         }
1612
1613         return rc;
1614 }
1615
1616 kgn_conn_t *
1617 kgnilnd_get_conn_by_idx(int index)
1618 {
1619         kgn_peer_t        *peer;
1620         struct list_head  *ptmp;
1621         kgn_conn_t        *conn;
1622         struct list_head  *ctmp;
1623         int                i;
1624
1625
1626         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
1627                 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1628                 list_for_each(ptmp, &kgnilnd_data.kgn_peers[i]) {
1629
1630                         peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1631
1632                         list_for_each(ctmp, &peer->gnp_conns) {
1633                                 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
1634
1635                                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
1636                                         continue;
1637
1638                                 if (index-- > 0)
1639                                         continue;
1640
1641                                 CDEBUG(D_NET, "++conn[%p] -> %s (%d)\n", conn,
1642                                        libcfs_nid2str(conn->gnc_peer->gnp_nid),
1643                                        atomic_read(&conn->gnc_refcount));
1644                                 kgnilnd_conn_addref(conn);
1645                                 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1646                                 return conn;
1647                         }
1648                 }
1649                 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1650         }
1651
1652         return NULL;
1653 }
1654
1655 int
1656 kgnilnd_get_conn_info(kgn_peer_t *peer,
1657                       int *device_id, __u64 *peerstamp,
1658                       int *tx_seq, int *rx_seq,
1659                       int *fmaq_len, int *nfma, int *nrdma)
1660 {
1661         kgn_conn_t        *conn;
1662         int               rc = 0;
1663
1664         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1665
1666         conn = kgnilnd_find_conn_locked(peer);
1667         if (conn == NULL) {
1668                 rc = -ENOENT;
1669                 goto out;
1670         }
1671
1672         *device_id = conn->gnc_device->gnd_host_id;
1673         *peerstamp = conn->gnc_peerstamp;
1674         *tx_seq = atomic_read(&conn->gnc_tx_seq);
1675         *rx_seq = atomic_read(&conn->gnc_rx_seq);
1676         *fmaq_len = kgnilnd_count_list(&conn->gnc_fmaq);
1677         *nfma = atomic_read(&conn->gnc_nlive_fma);
1678         *nrdma = atomic_read(&conn->gnc_nlive_rdma);
1679 out:
1680         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1681         return rc;
1682 }
1683
1684 /* needs write_lock on kgn_peer_conn_lock */
1685 int
1686 kgnilnd_close_peer_conns_locked(kgn_peer_t *peer, int why)
1687 {
1688         kgn_conn_t         *conn;
1689         struct list_head   *ctmp, *cnxt;
1690         int                 count = 0;
1691
1692         list_for_each_safe(ctmp, cnxt, &peer->gnp_conns) {
1693                 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
1694
1695                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
1696                         continue;
1697
1698                 count++;
1699                 /* we mark gnc_needs closing and increment kgn_npending_conns so that
1700                  * kgnilnd_del_conn_or_peer can wait on the other threads closing
1701                  * and cleaning up the connection.
1702                  */
1703                 if (!conn->gnc_needs_closing) {
1704                         conn->gnc_needs_closing = 1;
1705                         kgnilnd_admin_addref(kgnilnd_data.kgn_npending_conns);
1706                 }
1707                 kgnilnd_close_conn_locked(conn, why);
1708         }
1709         return count;
1710 }
1711
1712 int
1713 kgnilnd_report_node_state(lnet_nid_t nid, int down)
1714 {
1715         int         rc;
1716         kgn_peer_t  *peer, *new_peer;
1717         LIST_HEAD(zombies);
1718
1719         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1720         peer = kgnilnd_find_peer_locked(nid);
1721
1722         if (peer == NULL) {
1723                 int       i;
1724                 int       found_net = 0;
1725                 kgn_net_t *net;
1726
1727                 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1728
1729                 /* Don't add a peer for node up events */
1730                 if (down == GNILND_PEER_UP)
1731                         return 0;
1732
1733                 /* find any valid net - we don't care which one... */
1734                 down_read(&kgnilnd_data.kgn_net_rw_sem);
1735                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
1736                         list_for_each_entry(net, &kgnilnd_data.kgn_nets[i],
1737                                             gnn_list) {
1738                                 found_net = 1;
1739                                 break;
1740                         }
1741
1742                         if (found_net) {
1743                                 break;
1744                         }
1745                 }
1746                 up_read(&kgnilnd_data.kgn_net_rw_sem);
1747
1748                 if (!found_net) {
1749                         CNETERR("Could not find a net for nid %lld\n", nid);
1750                         return 1;
1751                 }
1752
1753                 /* The nid passed in does not yet contain the net portion.
1754                  * Let's build it up now
1755                  */
1756                 nid = LNET_MKNID(LNET_NIDNET(net->gnn_ni->ni_nid), nid);
1757                 rc = kgnilnd_add_peer(net, nid, &new_peer);
1758
1759                 if (rc) {
1760                         CNETERR("Could not add peer for nid %lld, rc %d\n",
1761                                 nid, rc);
1762                         return 1;
1763                 }
1764
1765                 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1766                 peer = kgnilnd_find_peer_locked(nid);
1767
1768                 if (peer == NULL) {
1769                         CNETERR("Could not find peer for nid %lld\n", nid);
1770                         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1771                         return 1;
1772                 }
1773         }
1774
1775         peer->gnp_state = down;
1776
1777         if (down == GNILND_PEER_DOWN) {
1778                 kgn_conn_t *conn;
1779
1780                 peer->gnp_down_event_time = jiffies;
1781                 kgnilnd_cancel_peer_connect_locked(peer, &zombies);
1782                 conn = kgnilnd_find_conn_locked(peer);
1783
1784                 if (conn != NULL) {
1785                         kgnilnd_close_conn_locked(conn, -ENETRESET);
1786                 }
1787         } else {
1788                 peer->gnp_up_event_time = jiffies;
1789         }
1790
1791         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1792
1793         if (down == GNILND_PEER_DOWN) {
1794                 /* using ENETRESET so we don't get messages from
1795                  * kgnilnd_tx_done
1796                  */
1797                 kgnilnd_txlist_done(&zombies, -ENETRESET);
1798                 kgnilnd_peer_notify(peer, -ECONNRESET, 0);
1799                 LCONSOLE_INFO("Received down event for nid %d\n",
1800                               LNET_NIDADDR(nid));
1801         }
1802
1803         return 0;
1804 }
1805
1806 int
1807 kgnilnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
1808 {
1809         struct libcfs_ioctl_data *data = arg;
1810         kgn_net_t                *net = ni->ni_data;
1811         int                       rc = -EINVAL;
1812
1813         LASSERT(ni == net->gnn_ni);
1814
1815         switch (cmd) {
1816         case IOC_LIBCFS_GET_PEER: {
1817                 lnet_nid_t   nid = 0;
1818                 kgn_peer_t  *peer = NULL;
1819                 __u32 nic_addr = 0;
1820                 __u64 peerstamp = 0;
1821                 int peer_refcount = 0, peer_connecting = 0;
1822                 int device_id = 0;
1823                 int tx_seq = 0, rx_seq = 0;
1824                 int fmaq_len = 0, nfma = 0, nrdma = 0;
1825
1826                 rc = kgnilnd_get_peer_info(data->ioc_count, &peer,
1827                                            &nid, &nic_addr, &peer_refcount,
1828                                            &peer_connecting);
1829                 if (rc)
1830                         break;
1831
1832                 /* Barf */
1833                 /* LNET_MKNID is used to mask from lnet the multiplexing/demultiplexing of connections and peers
1834                  * LNET assumes a conn and peer per net, the LNET_MKNID/LNET_NIDADDR allows us to let Lnet see what it
1835                  * wants to see instead of the underlying network that is being used to send the data
1836                  */
1837                 data->ioc_nid    = LNET_MKNID(LNET_NIDNET(ni->ni_nid), LNET_NIDADDR(nid));
1838                 data->ioc_flags  = peer_connecting;
1839                 data->ioc_count  = peer_refcount;
1840
1841                 rc = kgnilnd_get_conn_info(peer, &device_id, &peerstamp,
1842                                            &tx_seq, &rx_seq, &fmaq_len,
1843                                            &nfma, &nrdma);
1844
1845                 /* This is allowable - a persistent peer could not
1846                  * have a connection */
1847                 if (rc) {
1848                         /* flag to indicate we are not connected -
1849                          * need to print as such */
1850                         data->ioc_flags |= (1<<16);
1851                         rc = 0;
1852                 } else {
1853                         /* still barf */
1854                         data->ioc_net = device_id;
1855                         data->ioc_u64[0] = peerstamp;
1856                         data->ioc_u32[0] = fmaq_len;
1857                         data->ioc_u32[1] = nfma;
1858                         data->ioc_u32[2] = tx_seq;
1859                         data->ioc_u32[3] = rx_seq;
1860                         data->ioc_u32[4] = nrdma;
1861                 }
1862                 break;
1863         }
1864         case IOC_LIBCFS_ADD_PEER: {
1865                 /* just dummy value to allow using common interface */
1866                 kgn_peer_t      *peer;
1867                 rc = kgnilnd_add_peer(net, data->ioc_nid, &peer);
1868                 break;
1869         }
1870         case IOC_LIBCFS_DEL_PEER: {
1871                 /* NULL is passed in so it affects all peers in existence without regard to network
1872                  * as the peer may not exist on the network LNET believes it to be on.
1873                  */
1874                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1875                                               GNILND_DEL_PEER, -EUCLEAN);
1876                 break;
1877         }
1878         case IOC_LIBCFS_GET_CONN: {
1879                 kgn_conn_t *conn = kgnilnd_get_conn_by_idx(data->ioc_count);
1880
1881                 if (conn == NULL)
1882                         rc = -ENOENT;
1883                 else {
1884                         rc = 0;
1885                         /* LNET_MKNID is used to build the correct address based on what LNET wants to see instead of
1886                          * the generic connection that is used to send the data
1887                          */
1888                         data->ioc_nid    = LNET_MKNID(LNET_NIDNET(ni->ni_nid), LNET_NIDADDR(conn->gnc_peer->gnp_nid));
1889                         data->ioc_u32[0] = conn->gnc_device->gnd_id;
1890                         kgnilnd_conn_decref(conn);
1891                 }
1892                 break;
1893         }
1894         case IOC_LIBCFS_CLOSE_CONNECTION: {
1895                 /* use error = -ENETRESET to indicate it was lctl disconnect */
1896                 /* NULL is passed in so it affects all the nets as the connection is virtual
1897                  * and may not exist on the network LNET believes it to be on.
1898                  */
1899                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1900                                               GNILND_DEL_CONN, -ENETRESET);
1901                 break;
1902         }
1903         case IOC_LIBCFS_PUSH_CONNECTION: {
1904                 /* we use this to flush purgatory */
1905                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1906                                               GNILND_CLEAR_PURGATORY, -EUCLEAN);
1907                 break;
1908         }
1909         case IOC_LIBCFS_REGISTER_MYNID: {
1910                 /* Ignore if this is a noop */
1911                 if (data->ioc_nid == ni->ni_nid) {
1912                         rc = 0;
1913                 } else {
1914                         CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
1915                                libcfs_nid2str(data->ioc_nid),
1916                                libcfs_nid2str(ni->ni_nid));
1917                         rc = -EINVAL;
1918                 }
1919                 break;
1920         }
1921         }
1922
1923         return rc;
1924 }
1925
1926 void
1927 kgnilnd_query(lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
1928 {
1929         kgn_net_t               *net = ni->ni_data;
1930         kgn_tx_t                *tx;
1931         kgn_peer_t              *peer = NULL;
1932         kgn_conn_t              *conn = NULL;
1933         lnet_process_id_t       id = {
1934                 .nid = nid,
1935                 .pid = LNET_PID_LUSTRE,
1936         };
1937         ENTRY;
1938
1939         /* I expect to find him, so only take a read lock */
1940         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1941         peer = kgnilnd_find_peer_locked(nid);
1942         if (peer != NULL) {
1943                 /* LIE if in a quiesce - we will update the timeouts after,
1944                  * but we don't want sends failing during it */
1945                 if (kgnilnd_data.kgn_quiesce_trigger) {
1946                         *when = jiffies;
1947                         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1948                         GOTO(out, 0);
1949                 }
1950
1951                 /* Update to best guess, might refine on later checks */
1952                 *when = peer->gnp_last_alive;
1953
1954                 /* we have a peer, how about a conn? */
1955                 conn = kgnilnd_find_conn_locked(peer);
1956
1957                 if (conn == NULL)  {
1958                         /* if there is no conn, check peer last errno to see if clean disconnect
1959                          * - if it was, we lie to LNet because we believe a TX would complete
1960                          * on reconnect */
1961                         if (kgnilnd_conn_clean_errno(peer->gnp_last_errno)) {
1962                                 *when = jiffies;
1963                         }
1964                         /* we still want to fire a TX and new conn in this case */
1965                 } else {
1966                         /* gnp_last_alive is valid, run for the hills */
1967                         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1968                         GOTO(out, 0);
1969                 }
1970         }
1971         /* if we get here, either we have no peer or no conn for him, so fire off
1972          * new TX to trigger conn setup */
1973         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1974
1975         /* if we couldn't find him, we'll fire up a TX and get connected -
1976          * if we don't do this, after ni_peer_timeout, LNet will declare him dead.
1977          * So really we treat kgnilnd_query as a bit of a 'connect now' type
1978          * event because it'll only do this when it wants to send
1979          *
1980          * Use a real TX for this to get the proper gnp_tx_queue behavior, etc
1981          * normally we'd use kgnilnd_send_ctlmsg for this, but we don't really
1982          * care that this goes out quickly since we already know we need a new conn
1983          * formed */
1984         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_NOOP_SEND))
1985                 return;
1986
1987         tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP, ni->ni_nid);
1988         if (tx != NULL) {
1989                 kgnilnd_launch_tx(tx, net, &id);
1990         }
1991 out:
1992         CDEBUG(D_NETTRACE, "peer 0x%p->%s when %lu\n", peer,
1993                libcfs_nid2str(nid), *when);
1994         EXIT;
1995 }
1996
1997 int
1998 kgnilnd_dev_init(kgn_device_t *dev)
1999 {
2000         gni_return_t      rrc;
2001         int               rc = 0;
2002         unsigned int      cq_size;
2003         ENTRY;
2004
2005         /* size of these CQs should be able to accommodate the outgoing
2006          * RDMA and SMSG transactions.  Since we really don't know what we
2007          * really need here, we'll take credits * 2 * 3 to allow a bunch.
2008          * We need to dig into this more with the performance work. */
2009         cq_size = *kgnilnd_tunables.kgn_credits * 2 * 3;
2010
2011         rrc = kgnilnd_cdm_create(dev->gnd_id, *kgnilnd_tunables.kgn_ptag,
2012                                  *kgnilnd_tunables.kgn_pkey, 0,
2013                                  &dev->gnd_domain);
2014         if (rrc != GNI_RC_SUCCESS) {
2015                 CERROR("Can't create CDM %d (%d)\n", dev->gnd_id, rrc);
2016                 GOTO(failed, rc = -ENODEV);
2017         }
2018
2019         rrc = kgnilnd_cdm_attach(dev->gnd_domain, dev->gnd_id,
2020                                  &dev->gnd_host_id, &dev->gnd_handle);
2021         if (rrc != GNI_RC_SUCCESS) {
2022                 CERROR("Can't attach CDM to device %d (%d)\n",
2023                         dev->gnd_id, rrc);
2024                 GOTO(failed, rc = -ENODEV);
2025         }
2026
2027         /* a bit gross, but not much we can do - Aries Sim doesn't have
2028          * hardcoded NIC/NID that we can use */
2029         rc = kgnilnd_setup_nic_translation(dev->gnd_host_id);
2030         if (rc != 0)
2031                 GOTO(failed, rc = -ENODEV);
2032
2033         /* only dev 0 gets the errors - no need to reset the stack twice
2034          * - this works because we have a single PTAG, if we had more
2035          * then we'd need to have multiple handlers */
2036         if (dev->gnd_id == 0) {
2037                 rrc = kgnilnd_subscribe_errors(dev->gnd_handle,
2038                                                 GNI_ERRMASK_CRITICAL |
2039                                                 GNI_ERRMASK_UNKNOWN_TRANSACTION,
2040                                               0, NULL, kgnilnd_critical_error,
2041                                               &dev->gnd_err_handle);
2042                 if (rrc != GNI_RC_SUCCESS) {
2043                         CERROR("Can't subscribe for errors on device %d: rc %d\n",
2044                                 dev->gnd_id, rrc);
2045                         GOTO(failed, rc = -ENODEV);
2046                 }
2047
2048                 rc = kgnilnd_set_quiesce_callback(dev->gnd_handle,
2049                                                   kgnilnd_quiesce_end_callback);
2050                 if (rc != GNI_RC_SUCCESS) {
2051                         CERROR("Can't subscribe for quiesce callback on device %d: rc %d\n",
2052                                 dev->gnd_id, rrc);
2053                         GOTO(failed, rc = -ENODEV);
2054                 }
2055         }
2056
2057         rrc = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_IP, &kgnilnd_data.kgn_sock);
2058         if (rrc < 0) {
2059                 CERROR("sock_create returned %d\n", rrc);
2060                 GOTO(failed, rrc);
2061         }
2062
2063         rc = kgnilnd_nicaddr_to_nid(dev->gnd_host_id, &dev->gnd_nid);
2064         if (rc < 0) {
2065                 /* log messages during startup */
2066                 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
2067                         CERROR("couldn't translate host_id 0x%x to nid. rc %d\n",
2068                                 dev->gnd_host_id, rc);
2069                 }
2070                 GOTO(failed, rc = -ESRCH);
2071         }
2072         CDEBUG(D_NET, "NIC %x -> NID %d\n", dev->gnd_host_id, dev->gnd_nid);
2073
2074         rrc = kgnilnd_cq_create(dev->gnd_handle, *kgnilnd_tunables.kgn_credits,
2075                                 0, kgnilnd_device_callback,
2076                                 dev->gnd_id, &dev->gnd_snd_rdma_cqh);
2077         if (rrc != GNI_RC_SUCCESS) {
2078                 CERROR("Can't create rdma send cq size %u for device "
2079                        "%d (%d)\n", cq_size, dev->gnd_id, rrc);
2080                 GOTO(failed, rc = -EINVAL);
2081         }
2082
2083         rrc = kgnilnd_cq_create(dev->gnd_handle, cq_size,
2084                         0, kgnilnd_device_callback, dev->gnd_id,
2085                         &dev->gnd_snd_fma_cqh);
2086         if (rrc != GNI_RC_SUCCESS) {
2087                 CERROR("Can't create fma send cq size %u for device %d (%d)\n",
2088                        cq_size, dev->gnd_id, rrc);
2089                 GOTO(failed, rc = -EINVAL);
2090         }
2091
2092         /* This one we size differently - overflows are possible and it needs to be
2093          * sized based on machine size */
2094         rrc = kgnilnd_cq_create(dev->gnd_handle,
2095                         *kgnilnd_tunables.kgn_fma_cq_size,
2096                         0, kgnilnd_device_callback, dev->gnd_id,
2097                         &dev->gnd_rcv_fma_cqh);
2098         if (rrc != GNI_RC_SUCCESS) {
2099                 CERROR("Can't create fma cq size %d for device %d (%d)\n",
2100                        *kgnilnd_tunables.kgn_fma_cq_size, dev->gnd_id, rrc);
2101                 GOTO(failed, rc = -EINVAL);
2102         }
2103
2104         rrc = kgnilnd_register_smdd_buf(dev);
2105         if (rrc != GNI_RC_SUCCESS) {
2106                 GOTO(failed, rc = -EINVAL);
2107         }
2108
2109         RETURN(0);
2110
2111 failed:
2112         kgnilnd_dev_fini(dev);
2113         RETURN(rc);
2114 }
2115
2116 void
2117 kgnilnd_dev_fini(kgn_device_t *dev)
2118 {
2119         gni_return_t rrc;
2120         ENTRY;
2121
2122         /* At quiesce or rest time, need to loop through and clear gnd_ready_conns ?*/
2123         LASSERTF(list_empty(&dev->gnd_ready_conns) &&
2124                  list_empty(&dev->gnd_map_tx) &&
2125                  list_empty(&dev->gnd_rdmaq) &&
2126                  list_empty(&dev->gnd_delay_conns),
2127                  "dev 0x%p ready_conns %d@0x%p delay_conns %d@0x%p" 
2128                  "map_tx %d@0x%p rdmaq %d@0x%p\n",
2129                  dev, kgnilnd_count_list(&dev->gnd_ready_conns), &dev->gnd_ready_conns,
2130                  kgnilnd_count_list(&dev->gnd_delay_conns), &dev->gnd_delay_conns,
2131                  kgnilnd_count_list(&dev->gnd_map_tx), &dev->gnd_map_tx,
2132                  kgnilnd_count_list(&dev->gnd_rdmaq), &dev->gnd_rdmaq);
2133
2134         /* These should follow from tearing down all connections */
2135         LASSERTF(dev->gnd_map_nphys == 0 && dev->gnd_map_physnop == 0,
2136                 "%d physical mappings of %d pages still mapped\n",
2137                  dev->gnd_map_nphys, dev->gnd_map_physnop);
2138
2139         LASSERTF(dev->gnd_map_nvirt == 0 && dev->gnd_map_virtnob == 0,
2140                 "%d virtual mappings of %llu bytes still mapped\n",
2141                  dev->gnd_map_nvirt, dev->gnd_map_virtnob);
2142
2143         LASSERTF(atomic_read(&dev->gnd_n_mdd) == 0 &&
2144                  atomic_read(&dev->gnd_n_mdd_held) == 0 &&
2145                  atomic64_read(&dev->gnd_nbytes_map) == 0,
2146                 "%d SMSG mappings of %ld bytes still mapped or held %d\n",
2147                  atomic_read(&dev->gnd_n_mdd),
2148                  atomic64_read(&dev->gnd_nbytes_map), atomic_read(&dev->gnd_n_mdd_held));
2149
2150         LASSERT(list_empty(&dev->gnd_map_list));
2151
2152         /* What other assertions needed to ensure all connections torn down ? */
2153
2154         /* check all counters == 0 (EP, MDD, etc) */
2155
2156         /* if we are resetting due to quiese (stack reset), don't check
2157          * thread states */
2158         LASSERTF(kgnilnd_data.kgn_quiesce_trigger ||
2159                 atomic_read(&kgnilnd_data.kgn_nthreads) == 0,
2160                 "tried to shutdown with threads active\n");
2161
2162         if (dev->gnd_smdd_hold_buf) {
2163                 rrc = kgnilnd_deregister_smdd_buf(dev);
2164                 LASSERTF(rrc == GNI_RC_SUCCESS,
2165                         "bad rc from deregistion of sMDD buffer: %d\n", rrc);
2166                 dev->gnd_smdd_hold_buf = NULL;
2167         }
2168
2169         if (dev->gnd_rcv_fma_cqh) {
2170                 rrc = kgnilnd_cq_destroy(dev->gnd_rcv_fma_cqh);
2171                 LASSERTF(rrc == GNI_RC_SUCCESS,
2172                         "bad rc from gni_cq_destroy on rcv_fma_cqh: %d\n", rrc);
2173                 dev->gnd_rcv_fma_cqh = NULL;
2174         }
2175
2176         if (dev->gnd_snd_rdma_cqh) {
2177                 rrc = kgnilnd_cq_destroy(dev->gnd_snd_rdma_cqh);
2178                 LASSERTF(rrc == GNI_RC_SUCCESS,
2179                         "bad rc from gni_cq_destroy on send_rdma_cqh: %d\n", rrc);
2180                 dev->gnd_snd_rdma_cqh = NULL;
2181         }
2182
2183         if (dev->gnd_snd_fma_cqh) {
2184                 rrc = kgnilnd_cq_destroy(dev->gnd_snd_fma_cqh);
2185                 LASSERTF(rrc == GNI_RC_SUCCESS,
2186                         "bad rc from gni_cq_destroy on snd_fma_cqh: %d\n", rrc);
2187                 dev->gnd_snd_fma_cqh = NULL;
2188         }
2189
2190         if (dev->gnd_err_handle) {
2191                 rrc = kgnilnd_release_errors(dev->gnd_err_handle);
2192                 LASSERTF(rrc == GNI_RC_SUCCESS,
2193                         "bad rc from gni_release_errors: %d\n", rrc);
2194                 dev->gnd_err_handle = NULL;
2195         }
2196
2197         if (dev->gnd_domain) {
2198                 rrc = kgnilnd_cdm_destroy(dev->gnd_domain);
2199                 LASSERTF(rrc == GNI_RC_SUCCESS,
2200                         "bad rc from gni_cdm_destroy: %d\n", rrc);
2201                 dev->gnd_domain = NULL;
2202         }
2203
2204         if (kgnilnd_data.kgn_sock)
2205                 sock_release(kgnilnd_data.kgn_sock);
2206
2207         EXIT;
2208 }
2209
2210 int kgnilnd_base_startup(void)
2211 {
2212         struct timeval       tv;
2213         int                  pkmem = atomic_read(&libcfs_kmemory);
2214         int                  rc;
2215         int                  i;
2216         kgn_device_t        *dev;
2217         struct task_struct  *thrd;
2218
2219 #if defined(CONFIG_CRAY_XT) && !defined(CONFIG_CRAY_COMPUTE)
2220         /* limit how much memory can be allocated for fma blocks in
2221          * instances where many nodes need to reconnects at the same time */
2222         struct sysinfo si;
2223         si_meminfo(&si);
2224         kgnilnd_data.free_pages_limit = si.totalram/4;
2225 #endif
2226
2227         ENTRY;
2228
2229         LASSERTF(kgnilnd_data.kgn_init == GNILND_INIT_NOTHING,
2230                 "init %d\n", kgnilnd_data.kgn_init);
2231
2232         /* zero pointers, flags etc */
2233         memset(&kgnilnd_data, 0, sizeof(kgnilnd_data));
2234         kgnilnd_check_kgni_version();
2235
2236         /* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and
2237          * a unique (for all time) connstamp so we can uniquely identify
2238          * the sender.  The connstamp is an incrementing counter
2239          * initialised with seconds + microseconds at startup time.  So we
2240          * rely on NOT creating connections more frequently on average than
2241          * 1MHz to ensure we don't use old connstamps when we reboot. */
2242         do_gettimeofday(&tv);
2243         kgnilnd_data.kgn_connstamp =
2244                  kgnilnd_data.kgn_peerstamp =
2245                         (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
2246
2247         init_rwsem(&kgnilnd_data.kgn_net_rw_sem);
2248
2249         for (i = 0; i < GNILND_MAXDEVS; i++) {
2250                 kgn_device_t  *dev = &kgnilnd_data.kgn_devices[i];
2251
2252                 dev->gnd_id = i;
2253                 INIT_LIST_HEAD(&dev->gnd_ready_conns);
2254                 INIT_LIST_HEAD(&dev->gnd_delay_conns);
2255                 INIT_LIST_HEAD(&dev->gnd_map_tx);
2256                 INIT_LIST_HEAD(&dev->gnd_fma_buffs);
2257                 mutex_init(&dev->gnd_cq_mutex);
2258                 mutex_init(&dev->gnd_fmablk_mutex);
2259                 spin_lock_init(&dev->gnd_fmablk_lock);
2260                 init_waitqueue_head(&dev->gnd_waitq);
2261                 init_waitqueue_head(&dev->gnd_dgram_waitq);
2262                 init_waitqueue_head(&dev->gnd_dgping_waitq);
2263                 spin_lock_init(&dev->gnd_lock);
2264                 INIT_LIST_HEAD(&dev->gnd_map_list);
2265                 spin_lock_init(&dev->gnd_map_lock);
2266                 atomic_set(&dev->gnd_nfmablk, 0);
2267                 atomic_set(&dev->gnd_fmablk_vers, 1);
2268                 atomic_set(&dev->gnd_neps, 0);
2269                 atomic_set(&dev->gnd_canceled_dgrams, 0);
2270                 INIT_LIST_HEAD(&dev->gnd_connd_peers);
2271                 spin_lock_init(&dev->gnd_connd_lock);
2272                 spin_lock_init(&dev->gnd_dgram_lock);
2273                 spin_lock_init(&dev->gnd_rdmaq_lock);
2274                 INIT_LIST_HEAD(&dev->gnd_rdmaq);
2275                 init_rwsem(&dev->gnd_conn_sem);
2276
2277                 /* alloc & setup nid based dgram table */
2278                 LIBCFS_ALLOC(dev->gnd_dgrams,
2279                             sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2280
2281                 if (dev->gnd_dgrams == NULL)
2282                         GOTO(failed, rc = -ENOMEM);
2283
2284                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2285                         INIT_LIST_HEAD(&dev->gnd_dgrams[i]);
2286                 }
2287                 atomic_set(&dev->gnd_ndgrams, 0);
2288                 atomic_set(&dev->gnd_nwcdgrams, 0);
2289                 /* setup timer for RDMAQ processing */
2290                 setup_timer(&dev->gnd_rdmaq_timer, kgnilnd_schedule_device_timer,
2291                             (unsigned long)dev);
2292
2293                 /* setup timer for mapping processing */
2294                 setup_timer(&dev->gnd_map_timer, kgnilnd_schedule_device_timer,
2295                             (unsigned long)dev);
2296
2297         }
2298
2299         /* CQID 0 isn't allowed, set to MAX_MSG_ID - 1 to check for conflicts early */
2300         kgnilnd_data.kgn_next_cqid = GNILND_MAX_MSG_ID - 1;
2301         kgnilnd_data.kgn_new_min_timeout = *kgnilnd_tunables.kgn_timeout;
2302         init_waitqueue_head(&kgnilnd_data.kgn_reaper_waitq);
2303         init_waitqueue_head(&kgnilnd_data.kgn_ruhroh_waitq);
2304         spin_lock_init(&kgnilnd_data.kgn_reaper_lock);
2305
2306         mutex_init(&kgnilnd_data.kgn_quiesce_mutex);
2307         atomic_set(&kgnilnd_data.kgn_nquiesce, 0);
2308         atomic_set(&kgnilnd_data.kgn_npending_conns, 0);
2309         atomic_set(&kgnilnd_data.kgn_npending_unlink, 0);
2310         atomic_set(&kgnilnd_data.kgn_npending_detach, 0);
2311         atomic_set(&kgnilnd_data.kgn_rev_offset, 0);
2312         atomic_set(&kgnilnd_data.kgn_rev_length, 0);
2313         atomic_set(&kgnilnd_data.kgn_rev_copy_buff, 0);
2314
2315         /* OK to call kgnilnd_api_shutdown() to cleanup now */
2316         kgnilnd_data.kgn_init = GNILND_INIT_DATA;
2317         try_module_get(THIS_MODULE);
2318
2319         rwlock_init(&kgnilnd_data.kgn_peer_conn_lock);
2320
2321         LIBCFS_ALLOC(kgnilnd_data.kgn_peers,
2322                     sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2323
2324         if (kgnilnd_data.kgn_peers == NULL)
2325                 GOTO(failed, rc = -ENOMEM);
2326
2327         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2328                 INIT_LIST_HEAD(&kgnilnd_data.kgn_peers[i]);
2329         }
2330
2331         LIBCFS_ALLOC(kgnilnd_data.kgn_conns,
2332                     sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2333
2334         if (kgnilnd_data.kgn_conns == NULL)
2335                 GOTO(failed, rc = -ENOMEM);
2336
2337         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2338                 INIT_LIST_HEAD(&kgnilnd_data.kgn_conns[i]);
2339         }
2340
2341         LIBCFS_ALLOC(kgnilnd_data.kgn_nets,
2342                     sizeof(struct list_head) * *kgnilnd_tunables.kgn_net_hash_size);
2343
2344         if (kgnilnd_data.kgn_nets == NULL)
2345                 GOTO(failed, rc = -ENOMEM);
2346
2347         for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
2348                 INIT_LIST_HEAD(&kgnilnd_data.kgn_nets[i]);
2349         }
2350
2351         kgnilnd_data.kgn_mbox_cache =
2352                 kmem_cache_create("kgn_mbox_block", KMALLOC_MAX_SIZE, 0,
2353                                   SLAB_HWCACHE_ALIGN, NULL);
2354         if (kgnilnd_data.kgn_mbox_cache == NULL) {
2355                 CERROR("Can't create slab for physical mbox blocks\n");
2356                 GOTO(failed, rc = -ENOMEM);
2357         }
2358
2359         kgnilnd_data.kgn_rx_cache =
2360                 kmem_cache_create("kgn_rx_t", sizeof(kgn_rx_t), 0, 0, NULL);
2361         if (kgnilnd_data.kgn_rx_cache == NULL) {
2362                 CERROR("Can't create slab for kgn_rx_t descriptors\n");
2363                 GOTO(failed, rc = -ENOMEM);
2364         }
2365
2366         kgnilnd_data.kgn_tx_cache =
2367                 kmem_cache_create("kgn_tx_t", sizeof(kgn_tx_t), 0, 0, NULL);
2368         if (kgnilnd_data.kgn_tx_cache == NULL) {
2369                 CERROR("Can't create slab for kgn_tx_t\n");
2370                 GOTO(failed, rc = -ENOMEM);
2371         }
2372
2373         kgnilnd_data.kgn_tx_phys_cache =
2374                 kmem_cache_create("kgn_tx_phys",
2375                                    LNET_MAX_IOV * sizeof(gni_mem_segment_t),
2376                                    0, 0, NULL);
2377         if (kgnilnd_data.kgn_tx_phys_cache == NULL) {
2378                 CERROR("Can't create slab for kgn_tx_phys\n");
2379                 GOTO(failed, rc = -ENOMEM);
2380         }
2381
2382         kgnilnd_data.kgn_dgram_cache =
2383                 kmem_cache_create("kgn_dgram_t", sizeof(kgn_dgram_t), 0, 0, NULL);
2384         if (kgnilnd_data.kgn_dgram_cache == NULL) {
2385                 CERROR("Can't create slab for outgoing datagrams\n");
2386                 GOTO(failed, rc = -ENOMEM);
2387         }
2388
2389         /* allocate a MAX_IOV array of page pointers for each cpu */
2390         kgnilnd_data.kgn_cksum_map_pages = kmalloc(num_possible_cpus() * sizeof (struct page *),
2391                                                    GFP_KERNEL);
2392         if (kgnilnd_data.kgn_cksum_map_pages == NULL) {
2393                 CERROR("Can't allocate vmap cksum pages\n");
2394                 GOTO(failed, rc = -ENOMEM);
2395         }
2396         kgnilnd_data.kgn_cksum_npages = num_possible_cpus();
2397         memset(kgnilnd_data.kgn_cksum_map_pages, 0,
2398                 kgnilnd_data.kgn_cksum_npages * sizeof (struct page *));
2399
2400         for (i = 0; i < kgnilnd_data.kgn_cksum_npages; i++) {
2401                 kgnilnd_data.kgn_cksum_map_pages[i] = kmalloc(LNET_MAX_IOV * sizeof (struct page *),
2402                                                               GFP_KERNEL);
2403                 if (kgnilnd_data.kgn_cksum_map_pages[i] == NULL) {
2404                         CERROR("Can't allocate vmap cksum pages for cpu %d\n", i);
2405                         GOTO(failed, rc = -ENOMEM);
2406                 }
2407         }
2408
2409         LASSERT(kgnilnd_data.kgn_ndevs == 0);
2410
2411         /* Use all available GNI devices */
2412         for (i = 0; i < GNILND_MAXDEVS; i++) {
2413                 dev = &kgnilnd_data.kgn_devices[kgnilnd_data.kgn_ndevs];
2414
2415                 rc = kgnilnd_dev_init(dev);
2416                 if (rc == 0) {
2417                         /* Increment here so base_shutdown cleans it up */
2418                         kgnilnd_data.kgn_ndevs++;
2419
2420                         rc = kgnilnd_allocate_phys_fmablk(dev);
2421                         if (rc)
2422                                 GOTO(failed, rc);
2423                 }
2424         }
2425
2426         if (kgnilnd_data.kgn_ndevs == 0) {
2427                 CERROR("Can't initialise any GNI devices\n");
2428                 GOTO(failed, rc = -ENODEV);
2429         }
2430
2431         rc = kgnilnd_thread_start(kgnilnd_reaper, NULL, "kgnilnd_rpr", 0);
2432         if (rc != 0) {
2433                 CERROR("Can't spawn gnilnd reaper: %d\n", rc);
2434                 GOTO(failed, rc);
2435         }
2436
2437         rc = kgnilnd_start_rca_thread();
2438         if (rc != 0) {
2439                 CERROR("Can't spawn gnilnd rca: %d\n", rc);
2440                 GOTO(failed, rc);
2441         }
2442
2443         /*
2444          * Start ruhroh thread.  We can't use kgnilnd_thread_start() because
2445          * we don't want this thread included in kgnilnd_data.kgn_nthreads
2446          * count.  This thread controls quiesce, so it mustn't
2447          * quiesce itself.
2448          */
2449         thrd = kthread_run(kgnilnd_ruhroh_thread, NULL, "%s_%02d", "kgnilnd_rr", 0);
2450         if (IS_ERR(thrd)) {
2451                 rc = PTR_ERR(thrd);
2452                 CERROR("Can't spawn gnilnd ruhroh thread: %d\n", rc);
2453                 GOTO(failed, rc);
2454         }
2455
2456         /* threads will load balance across devs as they are available */
2457         if (*kgnilnd_tunables.kgn_thread_affinity) {
2458                 rc = kgnilnd_start_sd_threads();
2459                 if (rc != 0)
2460                         GOTO(failed, rc);
2461         } else {
2462                 for (i = 0; i < *kgnilnd_tunables.kgn_sched_threads; i++) {
2463                         rc = kgnilnd_thread_start(kgnilnd_scheduler,
2464                                                   (void *)((long)i),
2465                                                   "kgnilnd_sd", i);
2466                         if (rc != 0) {
2467                                 CERROR("Can't spawn gnilnd scheduler[%d]: %d\n",
2468                                        i, rc);
2469                                 GOTO(failed, rc);
2470                         }
2471                 }
2472         }
2473
2474         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2475                 dev = &kgnilnd_data.kgn_devices[i];
2476                 rc = kgnilnd_thread_start(kgnilnd_dgram_mover, dev,
2477                                           "kgnilnd_dg", dev->gnd_id);
2478                 if (rc != 0) {
2479                         CERROR("Can't spawn gnilnd dgram_mover[%d]: %d\n",
2480                                dev->gnd_id, rc);
2481                         GOTO(failed, rc);
2482                 }
2483
2484                 rc = kgnilnd_thread_start(kgnilnd_dgram_waitq, dev,
2485                                           "kgnilnd_dgn", dev->gnd_id);
2486                 if (rc != 0) {
2487                         CERROR("Can't spawn gnilnd dgram_waitq[%d]: %d\n",
2488                                 dev->gnd_id, rc);
2489                         GOTO(failed, rc);
2490                 }
2491
2492                 rc = kgnilnd_setup_wildcard_dgram(dev);
2493
2494                 if (rc != 0) {
2495                         CERROR("Can't create wildcard dgrams[%d]: %d\n",
2496                                 dev->gnd_id, rc);
2497                         GOTO(failed, rc);
2498                 }
2499         }
2500
2501         /* flag everything initialised */
2502         kgnilnd_data.kgn_init = GNILND_INIT_ALL;
2503         /*****************************************************/
2504
2505         CDEBUG(D_MALLOC, "initial kmem %d\n", pkmem);
2506         RETURN(0);
2507
2508 failed:
2509         kgnilnd_base_shutdown();
2510         kgnilnd_data.kgn_init = GNILND_INIT_NOTHING;
2511         RETURN(rc);
2512 }
2513
2514 void
2515 kgnilnd_base_shutdown(void)
2516 {
2517         int                     i, j;
2518         ENTRY;
2519
2520         while (CFS_FAIL_TIMEOUT(CFS_FAIL_GNI_PAUSE_SHUTDOWN, 1)) {};
2521
2522         kgnilnd_data.kgn_wc_kill = 1;
2523
2524         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2525                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2526                 kgnilnd_cancel_wc_dgrams(dev);
2527                 kgnilnd_cancel_dgrams(dev);
2528                 kgnilnd_del_conn_or_peer(NULL, LNET_NID_ANY, GNILND_DEL_PEER, -ESHUTDOWN);
2529                 kgnilnd_wait_for_canceled_dgrams(dev);
2530         }
2531
2532         /* We need to verify there are no conns left before we let the threads
2533          * shut down otherwise we could clean up the peers but still have
2534          * some outstanding conns due to orphaned datagram conns that are
2535          * being cleaned up.
2536          */
2537         i = 2;
2538         while (atomic_read(&kgnilnd_data.kgn_nconns) != 0) {
2539                 i++;
2540
2541                 for(j = 0; j < kgnilnd_data.kgn_ndevs; ++j) {
2542                         kgn_device_t *dev = &kgnilnd_data.kgn_devices[j];
2543                         kgnilnd_schedule_device(dev);
2544                 }
2545
2546                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2547                         "Waiting for conns to be cleaned up %d\n",atomic_read(&kgnilnd_data.kgn_nconns));
2548                 set_current_state(TASK_UNINTERRUPTIBLE);
2549                 schedule_timeout(cfs_time_seconds(1));
2550         }
2551         /* Peer state all cleaned up BEFORE setting shutdown, so threads don't
2552          * have to worry about shutdown races.  NB connections may be created
2553          * while there are still active connds, but these will be temporary
2554          * since peer creation always fails after the listener has started to
2555          * shut down.
2556          * all peers should have been cleared out on the nets */
2557         LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
2558                 "peers left %d\n", atomic_read(&kgnilnd_data.kgn_npeers));
2559
2560         /* Wait for the ruhroh thread to shut down. */
2561         kgnilnd_data.kgn_ruhroh_shutdown = 1;
2562         wake_up(&kgnilnd_data.kgn_ruhroh_waitq);
2563         i = 2;
2564         while (kgnilnd_data.kgn_ruhroh_running != 0) {
2565                 i++;
2566                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2567                        "Waiting for ruhroh thread to terminate\n");
2568                 set_current_state(TASK_UNINTERRUPTIBLE);
2569                 schedule_timeout(cfs_time_seconds(1));
2570         }
2571
2572        /* Flag threads to terminate */
2573         kgnilnd_data.kgn_shutdown = 1;
2574
2575         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2576                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2577
2578                 /* should clear all the MDDs */
2579                 kgnilnd_unmap_fma_blocks(dev);
2580
2581                 kgnilnd_schedule_device(dev);
2582                 wake_up_all(&dev->gnd_dgram_waitq);
2583                 wake_up_all(&dev->gnd_dgping_waitq);
2584                 LASSERT(list_empty(&dev->gnd_connd_peers));
2585         }
2586
2587         spin_lock(&kgnilnd_data.kgn_reaper_lock);
2588         wake_up_all(&kgnilnd_data.kgn_reaper_waitq);
2589         spin_unlock(&kgnilnd_data.kgn_reaper_lock);
2590
2591         if (atomic_read(&kgnilnd_data.kgn_nthreads))
2592                 kgnilnd_wakeup_rca_thread();
2593
2594         /* Wait for threads to exit */
2595         i = 2;
2596         while (atomic_read(&kgnilnd_data.kgn_nthreads) != 0) {
2597                 i++;
2598                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2599                        "Waiting for %d threads to terminate\n",
2600                        atomic_read(&kgnilnd_data.kgn_nthreads));
2601                 set_current_state(TASK_UNINTERRUPTIBLE);
2602                 schedule_timeout(cfs_time_seconds(1));
2603         }
2604
2605         LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
2606                 "peers left %d\n", atomic_read(&kgnilnd_data.kgn_npeers));
2607
2608         if (kgnilnd_data.kgn_peers != NULL) {
2609                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2610                         LASSERT(list_empty(&kgnilnd_data.kgn_peers[i]));
2611
2612                 LIBCFS_FREE(kgnilnd_data.kgn_peers,
2613                             sizeof (struct list_head) *
2614                             *kgnilnd_tunables.kgn_peer_hash_size);
2615         }
2616
2617         down_write(&kgnilnd_data.kgn_net_rw_sem);
2618         if (kgnilnd_data.kgn_nets != NULL) {
2619                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++)
2620                         LASSERT(list_empty(&kgnilnd_data.kgn_nets[i]));
2621
2622                 LIBCFS_FREE(kgnilnd_data.kgn_nets,
2623                             sizeof (struct list_head) *
2624                             *kgnilnd_tunables.kgn_net_hash_size);
2625         }
2626         up_write(&kgnilnd_data.kgn_net_rw_sem);
2627
2628         LASSERTF(atomic_read(&kgnilnd_data.kgn_nconns) == 0,
2629                 "conns left %d\n", atomic_read(&kgnilnd_data.kgn_nconns));
2630
2631         if (kgnilnd_data.kgn_conns != NULL) {
2632                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2633                         LASSERT(list_empty(&kgnilnd_data.kgn_conns[i]));
2634
2635                 LIBCFS_FREE(kgnilnd_data.kgn_conns,
2636                             sizeof (struct list_head) *
2637                             *kgnilnd_tunables.kgn_peer_hash_size);
2638         }
2639
2640         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2641                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2642                 kgnilnd_dev_fini(dev);
2643
2644                 LASSERTF(atomic_read(&dev->gnd_ndgrams) == 0,
2645                         "dgrams left %d\n", atomic_read(&dev->gnd_ndgrams));
2646
2647                 if (dev->gnd_dgrams != NULL) {
2648                         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2649                                 LASSERT(list_empty(&dev->gnd_dgrams[i]));
2650
2651                         LIBCFS_FREE(dev->gnd_dgrams,
2652                                     sizeof (struct list_head) *
2653                                     *kgnilnd_tunables.kgn_peer_hash_size);
2654                 }
2655
2656                 kgnilnd_free_phys_fmablk(dev);
2657         }
2658
2659         if (kgnilnd_data.kgn_mbox_cache != NULL)
2660                 kmem_cache_destroy(kgnilnd_data.kgn_mbox_cache);
2661
2662         if (kgnilnd_data.kgn_rx_cache != NULL)
2663                 kmem_cache_destroy(kgnilnd_data.kgn_rx_cache);
2664
2665         if (kgnilnd_data.kgn_tx_cache != NULL)
2666                 kmem_cache_destroy(kgnilnd_data.kgn_tx_cache);
2667
2668         if (kgnilnd_data.kgn_tx_phys_cache != NULL)
2669                 kmem_cache_destroy(kgnilnd_data.kgn_tx_phys_cache);
2670
2671         if (kgnilnd_data.kgn_dgram_cache != NULL)
2672                 kmem_cache_destroy(kgnilnd_data.kgn_dgram_cache);
2673
2674         if (kgnilnd_data.kgn_cksum_map_pages != NULL) {
2675                 for (i = 0; i < kgnilnd_data.kgn_cksum_npages; i++) {
2676                         if (kgnilnd_data.kgn_cksum_map_pages[i] != NULL) {
2677                                 kfree(kgnilnd_data.kgn_cksum_map_pages[i]);
2678                         }
2679                 }
2680                 kfree(kgnilnd_data.kgn_cksum_map_pages);
2681         }
2682
2683         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2684                atomic_read(&libcfs_kmemory));
2685
2686         kgnilnd_data.kgn_init = GNILND_INIT_NOTHING;
2687         module_put(THIS_MODULE);
2688
2689         EXIT;
2690 }
2691
2692 int
2693 kgnilnd_startup(lnet_ni_t *ni)
2694 {
2695         int               rc, devno;
2696         kgn_net_t        *net;
2697         ENTRY;
2698
2699         LASSERTF(ni->ni_net->net_lnd == &the_kgnilnd,
2700                 "bad LND 0x%p != the_kgnilnd @ 0x%p\n",
2701                 ni->ni_net->net_lnd, &the_kgnilnd);
2702
2703         if (kgnilnd_data.kgn_init == GNILND_INIT_NOTHING) {
2704                 rc = kgnilnd_base_startup();
2705                 if (rc != 0)
2706                         RETURN(rc);
2707         }
2708
2709         /* Serialize with shutdown. */
2710         mutex_lock(&kgnilnd_data.kgn_quiesce_mutex);
2711
2712         LIBCFS_ALLOC(net, sizeof(*net));
2713         if (net == NULL) {
2714                 CERROR("could not allocate net for new interface instance\n");
2715                 /* no need to cleanup the CDM... */
2716                 GOTO(failed, rc = -ENOMEM);
2717         }
2718         INIT_LIST_HEAD(&net->gnn_list);
2719         ni->ni_data = net;
2720         net->gnn_ni = ni;
2721         if (!ni->ni_net->net_tunables_set) {
2722                 ni->ni_net->net_tunables.lct_max_tx_credits =
2723                         *kgnilnd_tunables.kgn_credits;
2724                 ni->ni_net->net_tunables.lct_peer_tx_credits =
2725                         *kgnilnd_tunables.kgn_peer_credits;
2726         }
2727
2728         if (*kgnilnd_tunables.kgn_peer_health) {
2729                 int     fudge;
2730                 int     timeout;
2731                 /* give this a bit of leeway - we don't have a hard timeout
2732                  * as we only check timeouts periodically - see comment in kgnilnd_reaper */
2733                 fudge = (GNILND_TO2KA(*kgnilnd_tunables.kgn_timeout) / GNILND_REAPER_NCHECKS);
2734                 timeout = *kgnilnd_tunables.kgn_timeout + fudge;
2735
2736                 if (*kgnilnd_tunables.kgn_peer_timeout >= timeout) {
2737                         ni->ni_net->net_tunables.lct_peer_timeout =
2738                                  *kgnilnd_tunables.kgn_peer_timeout;
2739                 } else if (*kgnilnd_tunables.kgn_peer_timeout > -1) {
2740                         LCONSOLE_ERROR("Peer_timeout is set to %d but needs to be >= %d\n",
2741                                         *kgnilnd_tunables.kgn_peer_timeout,
2742                                         timeout);
2743                         ni->ni_data = NULL;
2744                         LIBCFS_FREE(net, sizeof(*net));
2745                         GOTO(failed, rc = -EINVAL);
2746                 } else
2747                         ni->ni_net->net_tunables.lct_peer_timeout = timeout;
2748
2749                 LCONSOLE_INFO("Enabling LNet peer health for gnilnd, timeout %ds\n",
2750                               ni->ni_net->net_tunables.lct_peer_timeout);
2751         }
2752
2753         atomic_set(&net->gnn_refcount, 1);
2754
2755         /* if we have multiple devices, spread the nets around */
2756         net->gnn_netnum = LNET_NETNUM(LNET_NIDNET(ni->ni_nid));
2757
2758         devno = LNET_NIDNET(ni->ni_nid) % GNILND_MAXDEVS;
2759         net->gnn_dev = &kgnilnd_data.kgn_devices[devno];
2760
2761         /* allocate a 'dummy' cdm for datagram use. We can only have a single
2762          * datagram between a nid:inst_id and nid2:inst_id. The fake cdm
2763          * give us additional inst_id to use, allowing the datagrams to flow
2764          * like rivers of honey and beer */
2765
2766         /* the instance id for the cdm is the NETNUM offset by MAXDEVS -
2767          * ensuring we'll have a unique id */
2768
2769
2770         ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), net->gnn_dev->gnd_nid);
2771         CDEBUG(D_NET, "adding net %p nid=%s on dev %d \n",
2772                 net, libcfs_nid2str(ni->ni_nid), net->gnn_dev->gnd_id);
2773         /* until the gnn_list is set, we need to cleanup ourselves as
2774          * kgnilnd_shutdown is just gonna get confused */
2775
2776         down_write(&kgnilnd_data.kgn_net_rw_sem);
2777         list_add_tail(&net->gnn_list, kgnilnd_netnum2netlist(net->gnn_netnum));
2778         up_write(&kgnilnd_data.kgn_net_rw_sem);
2779
2780         /* we need a separate thread to call probe_wait_by_id until
2781          * we get a function callback notifier from kgni */
2782         mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2783         RETURN(0);
2784  failed:
2785         mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2786         kgnilnd_shutdown(ni);
2787         RETURN(rc);
2788 }
2789
2790 void
2791 kgnilnd_shutdown(lnet_ni_t *ni)
2792 {
2793         kgn_net_t     *net = ni->ni_data;
2794         int           i;
2795         int           rc;
2796         ENTRY;
2797
2798         CFS_RACE(CFS_FAIL_GNI_SR_DOWN_RACE);
2799
2800         LASSERTF(kgnilnd_data.kgn_init == GNILND_INIT_ALL,
2801                 "init %d\n", kgnilnd_data.kgn_init);
2802
2803         /* Serialize with startup. */
2804         mutex_lock(&kgnilnd_data.kgn_quiesce_mutex);
2805         CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
2806                atomic_read(&libcfs_kmemory));
2807
2808         if (net == NULL) {
2809                 CERROR("got NULL net for ni %p\n", ni);
2810                 GOTO(out, rc = -EINVAL);
2811         }
2812
2813         LASSERTF(ni == net->gnn_ni,
2814                 "ni %p gnn_ni %p\n", net, net->gnn_ni);
2815
2816         ni->ni_data = NULL;
2817
2818         LASSERT(!net->gnn_shutdown);
2819         LASSERTF(atomic_read(&net->gnn_refcount) != 0,
2820                 "net %p refcount %d\n",
2821                  net, atomic_read(&net->gnn_refcount));
2822
2823         if (!list_empty(&net->gnn_list)) {
2824                 /* serialize with peer creation */
2825                 down_write(&kgnilnd_data.kgn_net_rw_sem);
2826                 net->gnn_shutdown = 1;
2827                 up_write(&kgnilnd_data.kgn_net_rw_sem);
2828
2829                 kgnilnd_cancel_net_dgrams(net);
2830
2831                 kgnilnd_del_conn_or_peer(net, LNET_NID_ANY, GNILND_DEL_PEER, -ESHUTDOWN);
2832
2833                 /* if we are quiesced, need to wake up - we need those threads
2834                  * alive to release peers, etc */
2835                 if (GNILND_IS_QUIESCED) {
2836                         set_mb(kgnilnd_data.kgn_quiesce_trigger, GNILND_QUIESCE_IDLE);
2837                         kgnilnd_quiesce_wait("shutdown");
2838                 }
2839
2840                 kgnilnd_wait_for_canceled_dgrams(net->gnn_dev);
2841
2842                 /* We wait until the nets ref's are 1, we will release final ref which is ours
2843                  * this allows us to make sure everything else is done before we free the
2844                  * net.
2845                  */
2846                 i = 4;
2847                 while (atomic_read(&net->gnn_refcount) != 1) {
2848                         i++;
2849                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2850                                 "Waiting for %d references to clear on net %d\n",
2851                                 atomic_read(&net->gnn_refcount),
2852                                 net->gnn_netnum);
2853                         set_current_state(TASK_UNINTERRUPTIBLE);
2854                         schedule_timeout(cfs_time_seconds(1));
2855                 }
2856
2857                 /* release ref from kgnilnd_startup */
2858                 kgnilnd_net_decref(net);
2859                 /* serialize with reaper and conn_task looping */
2860                 down_write(&kgnilnd_data.kgn_net_rw_sem);
2861                 list_del_init(&net->gnn_list);
2862                 up_write(&kgnilnd_data.kgn_net_rw_sem);
2863
2864         }
2865
2866         /* not locking, this can't race with writers */
2867         LASSERTF(atomic_read(&net->gnn_refcount) == 0,
2868                 "net %p refcount %d\n",
2869                  net, atomic_read(&net->gnn_refcount));
2870         LIBCFS_FREE(net, sizeof(*net));
2871
2872 out:
2873         down_read(&kgnilnd_data.kgn_net_rw_sem);
2874         for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
2875                 if (!list_empty(&kgnilnd_data.kgn_nets[i])) {
2876                         up_read(&kgnilnd_data.kgn_net_rw_sem);
2877                         break;
2878                 }
2879
2880                 if (i == *kgnilnd_tunables.kgn_net_hash_size - 1) {
2881                         up_read(&kgnilnd_data.kgn_net_rw_sem);
2882                         kgnilnd_base_shutdown();
2883                 }
2884         }
2885         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2886                atomic_read(&libcfs_kmemory));
2887
2888         mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2889         EXIT;
2890 }
2891
2892 static void __exit kgnilnd_exit(void)
2893 {
2894         lnet_unregister_lnd(&the_kgnilnd);
2895         kgnilnd_proc_fini();
2896         kgnilnd_remove_sysctl();
2897 }
2898
2899 static int __init kgnilnd_init(void)
2900 {
2901         int    rc;
2902
2903         rc = kgnilnd_tunables_init();
2904         if (rc != 0)
2905                 return rc;
2906
2907         printk(KERN_INFO "Lustre: kgnilnd build version: "KGNILND_BUILD_REV"\n");
2908
2909         kgnilnd_insert_sysctl();
2910         kgnilnd_proc_init();
2911
2912         lnet_register_lnd(&the_kgnilnd);
2913
2914         return 0;
2915 }
2916
2917 MODULE_AUTHOR("Cray, Inc. <nic@cray.com>");
2918 MODULE_DESCRIPTION("Gemini LNet Network Driver");
2919 MODULE_VERSION(KGNILND_BUILD_REV);
2920 MODULE_LICENSE("GPL");
2921
2922 module_init(kgnilnd_init);
2923 module_exit(kgnilnd_exit);