Whamcloud - gitweb
482b70e707246cc14db9ab1b565d6f5b04494739
[fs/lustre-release.git] / lnet / klnds / gnilnd / gnilnd.c
1 /*
2  * Copyright (C) 2012 Cray, Inc.
3  *
4  * Copyright (c) 2013, 2014, Intel Corporation.
5  *
6  *   Author: Nic Henke <nic@cray.com>
7  *   Author: James Shimek <jshimek@cray.com>
8  *
9  *   This file is part of Lustre, http://www.lustre.org.
10  *
11  *   Lustre is free software; you can redistribute it and/or
12  *   modify it under the terms of version 2 of the GNU General Public
13  *   License as published by the Free Software Foundation.
14  *
15  *   Lustre is distributed in the hope that it will be useful,
16  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  *   GNU General Public License for more details.
19  *
20  *   You should have received a copy of the GNU General Public License
21  *   along with Lustre; if not, write to the Free Software
22  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23  *
24  */
25 #include "gnilnd.h"
26
27 /* Primary entry points from LNET.  There are no guarantees against reentrance. */
28 lnd_t the_kgnilnd = {
29 #ifdef CONFIG_CRAY_XT
30         .lnd_type       = GNILND,
31 #else
32         .lnd_type       = GNIIPLND,
33 #endif
34         .lnd_startup    = kgnilnd_startup,
35         .lnd_shutdown   = kgnilnd_shutdown,
36         .lnd_ctl        = kgnilnd_ctl,
37         .lnd_send       = kgnilnd_send,
38         .lnd_recv       = kgnilnd_recv,
39         .lnd_eager_recv = kgnilnd_eager_recv,
40         .lnd_query      = kgnilnd_query,
41 };
42
43 kgn_data_t      kgnilnd_data;
44
45 int
46 kgnilnd_thread_start(int(*fn)(void *arg), void *arg, char *name, int id)
47 {
48         struct task_struct *thrd;
49
50         thrd = kthread_run(fn, arg, "%s_%02d", name, id);
51         if (IS_ERR(thrd))
52                 return PTR_ERR(thrd);
53
54         atomic_inc(&kgnilnd_data.kgn_nthreads);
55         return 0;
56 }
57
58 /* bind scheduler threads to cpus */
59 int
60 kgnilnd_start_sd_threads(void)
61 {
62         int cpu;
63         int i = 0;
64         struct task_struct *task;
65
66         for_each_online_cpu(cpu) {
67                 /* don't bind to cpu 0 - all interrupts are processed here */
68                 if (cpu == 0)
69                         continue;
70
71                 task = kthread_create(kgnilnd_scheduler, (void *)((long)i),
72                                       "%s_%02d", "kgnilnd_sd", i);
73                 if (!IS_ERR(task)) {
74                         kthread_bind(task, cpu);
75                         wake_up_process(task);
76                 } else {
77                         CERROR("Can't spawn gnilnd scheduler[%d] %ld\n", i,
78                                 PTR_ERR(task));
79                         return PTR_ERR(task);
80                 }
81                 atomic_inc(&kgnilnd_data.kgn_nthreads);
82
83                 if (++i >= *kgnilnd_tunables.kgn_sched_threads) {
84                         break;
85                 }
86         }
87
88         return 0;
89 }
90
91 /* needs write_lock on kgn_peer_conn_lock */
92 int
93 kgnilnd_close_stale_conns_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
94 {
95         kgn_conn_t         *conn;
96         struct list_head   *ctmp, *cnxt;
97         int                 loopback;
98         int                 count = 0;
99
100         loopback = peer->gnp_nid == peer->gnp_net->gnn_ni->ni_nid;
101
102         list_for_each_safe(ctmp, cnxt, &peer->gnp_conns) {
103                 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
104
105                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
106                         continue;
107
108                 if (conn == newconn)
109                         continue;
110
111                 if (conn->gnc_device != newconn->gnc_device)
112                         continue;
113
114                 /* This is a two connection loopback - one talking to the other */
115                 if (loopback &&
116                     newconn->gnc_my_connstamp == conn->gnc_peer_connstamp &&
117                     newconn->gnc_peer_connstamp == conn->gnc_my_connstamp) {
118                         CDEBUG(D_NET, "skipping prune of %p, "
119                                 "loopback and matching stamps"
120                                 " connstamp "LPU64"("LPU64")"
121                                 " peerstamp "LPU64"("LPU64")\n",
122                                 conn, newconn->gnc_my_connstamp,
123                                 conn->gnc_peer_connstamp,
124                                 newconn->gnc_peer_connstamp,
125                                 conn->gnc_my_connstamp);
126                         continue;
127                 }
128
129                 if (conn->gnc_peerstamp != newconn->gnc_peerstamp) {
130                         LASSERTF(conn->gnc_peerstamp < newconn->gnc_peerstamp,
131                                 "conn 0x%p peerstamp "LPU64" >= "
132                                 "newconn 0x%p peerstamp "LPU64"\n",
133                                 conn, conn->gnc_peerstamp,
134                                 newconn, newconn->gnc_peerstamp);
135
136                         CDEBUG(D_NET, "Closing stale conn nid: %s "
137                                " peerstamp:"LPX64"("LPX64")\n",
138                                libcfs_nid2str(peer->gnp_nid),
139                                conn->gnc_peerstamp, newconn->gnc_peerstamp);
140                 } else {
141
142                         LASSERTF(conn->gnc_peer_connstamp < newconn->gnc_peer_connstamp,
143                                 "conn 0x%p peer_connstamp "LPU64" >= "
144                                 "newconn 0x%p peer_connstamp "LPU64"\n",
145                                 conn, conn->gnc_peer_connstamp,
146                                 newconn, newconn->gnc_peer_connstamp);
147
148                         CDEBUG(D_NET, "Closing stale conn nid: %s"
149                                " connstamp:"LPU64"("LPU64")\n",
150                                libcfs_nid2str(peer->gnp_nid),
151                                conn->gnc_peer_connstamp, newconn->gnc_peer_connstamp);
152                 }
153
154                 count++;
155                 kgnilnd_close_conn_locked(conn, -ESTALE);
156         }
157
158         if (count != 0) {
159                 CWARN("Closed %d stale conns to %s\n", count, libcfs_nid2str(peer->gnp_nid));
160         }
161
162         RETURN(count);
163 }
164
165 int
166 kgnilnd_conn_isdup_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
167 {
168         kgn_conn_t       *conn;
169         struct list_head *tmp;
170         int               loopback;
171         ENTRY;
172
173         loopback = peer->gnp_nid == peer->gnp_net->gnn_ni->ni_nid;
174
175         list_for_each(tmp, &peer->gnp_conns) {
176                 conn = list_entry(tmp, kgn_conn_t, gnc_list);
177                 CDEBUG(D_NET, "checking conn 0x%p for peer %s"
178                         " lo %d new "LPU64" existing "LPU64
179                         " new peer "LPU64" existing peer "LPU64
180                         " new dev %p existing dev %p\n",
181                         conn, libcfs_nid2str(peer->gnp_nid),
182                         loopback,
183                         newconn->gnc_peerstamp, conn->gnc_peerstamp,
184                         newconn->gnc_peer_connstamp, conn->gnc_peer_connstamp,
185                         newconn->gnc_device, conn->gnc_device);
186
187                 /* conn is in the process of closing */
188                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
189                         continue;
190
191                 /* 'newconn' is from an earlier version of 'peer'!!! */
192                 if (newconn->gnc_peerstamp < conn->gnc_peerstamp)
193                         RETURN(1);
194
195                 /* 'conn' is from an earlier version of 'peer': it will be
196                  * removed when we cull stale conns later on... */
197                 if (newconn->gnc_peerstamp > conn->gnc_peerstamp)
198                         continue;
199
200                 /* Different devices are OK */
201                 if (conn->gnc_device != newconn->gnc_device)
202                         continue;
203
204                 /* It's me connecting to myself */
205                 if (loopback &&
206                     newconn->gnc_my_connstamp == conn->gnc_peer_connstamp &&
207                     newconn->gnc_peer_connstamp == conn->gnc_my_connstamp)
208                         continue;
209
210                 /* 'newconn' is an earlier connection from 'peer'!!! */
211                 if (newconn->gnc_peer_connstamp < conn->gnc_peer_connstamp)
212                         RETURN(2);
213
214                 /* 'conn' is an earlier connection from 'peer': it will be
215                  * removed when we cull stale conns later on... */
216                 if (newconn->gnc_peer_connstamp > conn->gnc_peer_connstamp)
217                         continue;
218
219                 /* 'newconn' has the SAME connection stamp; 'peer' isn't
220                  * playing the game... */
221                 RETURN(3);
222         }
223
224         RETURN(0);
225 }
226
227 int
228 kgnilnd_create_conn(kgn_conn_t **connp, kgn_device_t *dev)
229 {
230         kgn_conn_t      *conn;
231         gni_return_t    rrc;
232         int             rc = 0;
233
234         LASSERT (!in_interrupt());
235         atomic_inc(&kgnilnd_data.kgn_nconns);
236
237         /* divide by 2 to allow for complete reset and immediate reconnect */
238         if (atomic_read(&kgnilnd_data.kgn_nconns) >= GNILND_MAX_CQID/2) {
239                 CERROR("Too many conn are live: %d > %d\n",
240                         atomic_read(&kgnilnd_data.kgn_nconns), GNILND_MAX_CQID/2);
241                 atomic_dec(&kgnilnd_data.kgn_nconns);
242                 return -E2BIG;
243         }
244
245         LIBCFS_ALLOC(conn, sizeof(*conn));
246         if (conn == NULL) {
247                 atomic_dec(&kgnilnd_data.kgn_nconns);
248                 return -ENOMEM;
249         }
250
251         LIBCFS_ALLOC(conn->gnc_tx_ref_table, GNILND_MAX_MSG_ID * sizeof(void *));
252         if (conn->gnc_tx_ref_table == NULL) {
253                 CERROR("Can't allocate conn tx_ref_table\n");
254                 GOTO(failed, rc = -ENOMEM);
255         }
256
257         mutex_init(&conn->gnc_smsg_mutex);
258         mutex_init(&conn->gnc_rdma_mutex);
259         atomic_set(&conn->gnc_refcount, 1);
260         atomic_set(&conn->gnc_reaper_noop, 0);
261         atomic_set(&conn->gnc_sched_noop, 0);
262         atomic_set(&conn->gnc_tx_in_use, 0);
263         INIT_LIST_HEAD(&conn->gnc_list);
264         INIT_LIST_HEAD(&conn->gnc_hashlist);
265         INIT_LIST_HEAD(&conn->gnc_schedlist);
266         INIT_LIST_HEAD(&conn->gnc_fmaq);
267         INIT_LIST_HEAD(&conn->gnc_mdd_list);
268         spin_lock_init(&conn->gnc_list_lock);
269         spin_lock_init(&conn->gnc_tx_lock);
270         conn->gnc_magic = GNILND_CONN_MAGIC;
271
272         /* set tx id to nearly the end to make sure we find wrapping
273          * issues soon */
274         conn->gnc_next_tx = (int) GNILND_MAX_MSG_ID - 10;
275
276         /* if this fails, we have conflicts and MAX_TX is too large */
277         CLASSERT(GNILND_MAX_MSG_ID < GNILND_MSGID_CLOSE);
278
279         /* get a new unique CQ id for this conn */
280         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
281         conn->gnc_my_connstamp = kgnilnd_data.kgn_connstamp++;
282         conn->gnc_cqid = kgnilnd_get_cqid_locked();
283         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
284
285         if (conn->gnc_cqid == 0) {
286                 CERROR("Could not allocate unique CQ ID for conn 0x%p\n", conn);
287                 GOTO(failed, rc = -E2BIG);
288         }
289
290         CDEBUG(D_NET, "alloc cqid %u for conn 0x%p\n",
291                 conn->gnc_cqid, conn);
292
293         /* need to be set before gnc_ephandle to allow kgnilnd_destroy_conn_ep to
294          * check context */
295         conn->gnc_device = dev;
296
297         conn->gnc_timeout = MAX(*kgnilnd_tunables.kgn_timeout,
298                                 GNILND_MIN_TIMEOUT);
299         kgnilnd_update_reaper_timeout(conn->gnc_timeout);
300
301         /* this is the ep_handle for doing SMSG & BTE */
302         mutex_lock(&dev->gnd_cq_mutex);
303         rrc = kgnilnd_ep_create(dev->gnd_handle, dev->gnd_snd_fma_cqh,
304                                 &conn->gnc_ephandle);
305         mutex_unlock(&dev->gnd_cq_mutex);
306         if (rrc != GNI_RC_SUCCESS)
307                 GOTO(failed, rc = -ENETDOWN);
308
309         CDEBUG(D_NET, "created conn 0x%p ep_hndl 0x%p\n",
310                conn, conn->gnc_ephandle);
311
312         /* add ref for EP canceling */
313         kgnilnd_conn_addref(conn);
314         atomic_inc(&dev->gnd_neps);
315
316         *connp = conn;
317         return 0;
318
319 failed:
320         atomic_dec(&kgnilnd_data.kgn_nconns);
321         LIBCFS_FREE(conn->gnc_tx_ref_table, GNILND_MAX_MSG_ID * sizeof(void *));
322         LIBCFS_FREE(conn, sizeof(*conn));
323         return rc;
324 }
325
326 /* needs to be called with kgn_peer_conn_lock held (read or write) */
327 kgn_conn_t *
328 kgnilnd_find_conn_locked(kgn_peer_t *peer)
329 {
330         kgn_conn_t      *conn = NULL;
331
332         /* if we are in reset, this conn is going to die soon */
333         if (unlikely(kgnilnd_data.kgn_in_reset)) {
334                 RETURN(NULL);
335         }
336
337         /* just return the first ESTABLISHED connection */
338         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
339                 /* kgnilnd_finish_connect doesn't put connections on the
340                  * peer list until they are actually established */
341                 LASSERTF(conn->gnc_state >= GNILND_CONN_ESTABLISHED,
342                         "found conn %p state %s on peer %p (%s)\n",
343                         conn, kgnilnd_conn_state2str(conn), peer,
344                         libcfs_nid2str(peer->gnp_nid));
345                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
346                         continue;
347
348                 RETURN(conn);
349         }
350         RETURN(NULL);
351 }
352
353 /* needs write_lock on kgn_peer_conn_lock held */
354 kgn_conn_t *
355 kgnilnd_find_or_create_conn_locked(kgn_peer_t *peer) {
356
357         kgn_device_t    *dev = peer->gnp_net->gnn_dev;
358         kgn_conn_t      *conn;
359
360         conn = kgnilnd_find_conn_locked(peer);
361
362         if (conn != NULL) {
363                 return conn;
364         }
365
366         /* if the peer was previously connecting, check if we should
367          * trigger another connection attempt yet. */
368         if (time_before(jiffies, peer->gnp_reconnect_time)) {
369                 return NULL;
370         }
371
372         /* This check prevents us from creating a new connection to a peer while we are
373          * still in the process of closing an existing connection to the peer.
374          */
375         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
376                 if (conn->gnc_ephandle != NULL) {
377                         CDEBUG(D_NET, "Not connecting non-null ephandle found peer 0x%p->%s\n", peer,
378                                 libcfs_nid2str(peer->gnp_nid));
379                         return NULL;
380                 }
381         }
382
383         if (peer->gnp_connecting != GNILND_PEER_IDLE) {
384                 /* if we are not connecting, fire up a new connection */
385                 /* or if we are anything but IDLE DONT start a new connection */
386                return NULL;
387         }
388
389         CDEBUG(D_NET, "starting connect to %s\n",
390                 libcfs_nid2str(peer->gnp_nid));
391         peer->gnp_connecting = GNILND_PEER_CONNECT;
392         kgnilnd_peer_addref(peer); /* extra ref for connd */
393
394         spin_lock(&dev->gnd_connd_lock);
395         list_add_tail(&peer->gnp_connd_list, &dev->gnd_connd_peers);
396         spin_unlock(&dev->gnd_connd_lock);
397
398         kgnilnd_schedule_dgram(dev);
399         CDEBUG(D_NETTRACE, "scheduling new connect\n");
400
401         return NULL;
402 }
403
404 /* Caller is responsible for deciding if/when to call this */
405 void
406 kgnilnd_destroy_conn_ep(kgn_conn_t *conn)
407 {
408         gni_return_t    rrc;
409         gni_ep_handle_t tmp_ep;
410
411         /* only if we actually initialized it,
412          *  then set NULL to tell kgnilnd_destroy_conn to leave it alone */
413
414         tmp_ep = xchg(&conn->gnc_ephandle, NULL);
415         if (tmp_ep != NULL) {
416                 /* we never re-use the EP, so unbind is not needed */
417                 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
418                 rrc = kgnilnd_ep_destroy(tmp_ep);
419
420                 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
421
422                 /* if this fails, it could hork up kgni smsg retransmit and others
423                  * since we could free the SMSG mbox memory, etc. */
424                 LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d conn 0x%p ep 0x%p\n",
425                          rrc, conn, conn->gnc_ephandle);
426
427                 atomic_dec(&conn->gnc_device->gnd_neps);
428
429                 /* clear out count added in kgnilnd_close_conn_locked
430                  * conn will have a peer once it hits finish_connect, where it
431                  * is the first spot we'll mark it ESTABLISHED as well */
432                 if (conn->gnc_peer) {
433                         kgnilnd_admin_decref(conn->gnc_peer->gnp_dirty_eps);
434                 }
435
436                 /* drop ref for EP */
437                 kgnilnd_conn_decref(conn);
438         }
439 }
440
441 void
442 kgnilnd_destroy_conn(kgn_conn_t *conn)
443 {
444         LASSERTF(!in_interrupt() &&
445                 !conn->gnc_scheduled &&
446                 !conn->gnc_in_purgatory &&
447                 conn->gnc_ephandle == NULL &&
448                 list_empty(&conn->gnc_list) &&
449                 list_empty(&conn->gnc_hashlist) &&
450                 list_empty(&conn->gnc_schedlist) &&
451                 list_empty(&conn->gnc_mdd_list) &&
452                 conn->gnc_magic == GNILND_CONN_MAGIC,
453                 "conn 0x%p->%s IRQ %d sched %d purg %d ep 0x%p Mg %d lists %d/%d/%d/%d\n",
454                 conn, conn->gnc_peer ? libcfs_nid2str(conn->gnc_peer->gnp_nid)
455                                      : "<?>",
456                 !!in_interrupt(), conn->gnc_scheduled,
457                 conn->gnc_in_purgatory,
458                 conn->gnc_ephandle,
459                 conn->gnc_magic,
460                 list_empty(&conn->gnc_list),
461                 list_empty(&conn->gnc_hashlist),
462                 list_empty(&conn->gnc_schedlist),
463                 list_empty(&conn->gnc_mdd_list));
464
465         /* Tripping these is especially bad, as it means we have items on the
466          *  lists that didn't keep their refcount on the connection - or
467          *  somebody evil released their own */
468         LASSERTF(list_empty(&conn->gnc_fmaq) &&
469                  atomic_read(&conn->gnc_nlive_fma) == 0 &&
470                  atomic_read(&conn->gnc_nlive_rdma) == 0,
471                  "conn 0x%p fmaq %d@0x%p nfma %d nrdma %d\n",
472                  conn, kgnilnd_count_list(&conn->gnc_fmaq), &conn->gnc_fmaq,
473                  atomic_read(&conn->gnc_nlive_fma), atomic_read(&conn->gnc_nlive_rdma));
474
475         CDEBUG(D_NET, "destroying conn %p ephandle %p error %d\n",
476                 conn, conn->gnc_ephandle, conn->gnc_error);
477
478         /* We are freeing this memory remove the magic value from the connection */
479         conn->gnc_magic = 0;
480
481         /* if there is an FMA blk left here, we'll tear it down */
482         if (conn->gnc_fma_blk) {
483                 if (conn->gnc_peer) {
484                         kgn_mbox_info_t *mbox;
485                         mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
486                         mbox->mbx_prev_nid = conn->gnc_peer->gnp_nid;
487                 }
488                 kgnilnd_release_mbox(conn, 0);
489         }
490
491         if (conn->gnc_peer != NULL)
492                 kgnilnd_peer_decref(conn->gnc_peer);
493
494         if (conn->gnc_tx_ref_table != NULL) {
495                 LIBCFS_FREE(conn->gnc_tx_ref_table,
496                             GNILND_MAX_MSG_ID * sizeof(void *));
497         }
498
499         LIBCFS_FREE(conn, sizeof(*conn));
500         atomic_dec(&kgnilnd_data.kgn_nconns);
501 }
502
503 /* peer_alive and peer_notify done in the style of the o2iblnd */
504 void
505 kgnilnd_peer_alive(kgn_peer_t *peer)
506 {
507         set_mb(peer->gnp_last_alive, jiffies);
508 }
509
510 void
511 kgnilnd_peer_notify(kgn_peer_t *peer, int error, int alive)
512 {
513         int                     tell_lnet = 0;
514         int                     nnets = 0;
515         int                     rc;
516         int                     i, j;
517         kgn_conn_t             *conn;
518         kgn_net_t             **nets;
519         kgn_net_t              *net;
520
521
522         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DONT_NOTIFY))
523                 return;
524
525         /* Tell LNet we are giving ups on this peer - but only
526          * if it isn't already reconnected or trying to reconnect */
527         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
528
529         /* use kgnilnd_find_conn_locked to avoid any conns in the process of being nuked
530          *
531          * don't tell LNet if we are in reset - we assume that everyone will be able to
532          * reconnect just fine
533          */
534         conn = kgnilnd_find_conn_locked(peer);
535
536         CDEBUG(D_NETTRACE, "peer 0x%p->%s ting %d conn 0x%p, rst %d error %d\n",
537                peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting, conn,
538                kgnilnd_data.kgn_in_reset, error);
539
540         if (((peer->gnp_connecting == GNILND_PEER_IDLE) &&
541             (conn == NULL) &&
542             (!kgnilnd_data.kgn_in_reset) &&
543             (!kgnilnd_conn_clean_errno(error))) || alive) {
544                 tell_lnet = 1;
545         }
546
547         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
548
549         if (!tell_lnet) {
550                 /* short circuit if we dont need to notify Lnet */
551                 return;
552         }
553
554         rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
555
556         if (rc) {
557             /* dont do this if this fails since LNET is in shutdown or something else
558              */
559
560                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
561                         list_for_each_entry(net , &kgnilnd_data.kgn_nets[i], gnn_list) {
562                                 /* if gnn_shutdown set for any net shutdown is in progress just return */
563                                 if (net->gnn_shutdown) {
564                                         up_read(&kgnilnd_data.kgn_net_rw_sem);
565                                         return;
566                                 }
567                                 nnets++;
568                         }
569                 }
570
571                 if (nnets == 0) {
572                         /* shutdown in progress most likely */
573                         up_read(&kgnilnd_data.kgn_net_rw_sem);
574                         return;
575                 }
576
577                 LIBCFS_ALLOC(nets, nnets * sizeof(*nets));
578
579                 if (nets == NULL) {
580                         up_read(&kgnilnd_data.kgn_net_rw_sem);
581                         CERROR("Failed to allocate nets[%d]\n", nnets);
582                         return;
583                 }
584
585                 j = 0;
586                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
587                         list_for_each_entry(net, &kgnilnd_data.kgn_nets[i], gnn_list) {
588                                 nets[j] = net;
589                                 kgnilnd_net_addref(net);
590                                 j++;
591                         }
592                 }
593                 up_read(&kgnilnd_data.kgn_net_rw_sem);
594
595                 for (i = 0; i < nnets; i++) {
596                         lnet_nid_t peer_nid;
597
598                         net = nets[i];
599
600                         peer_nid = kgnilnd_lnd2lnetnid(net->gnn_ni->ni_nid,
601                                                                  peer->gnp_nid);
602
603                         CDEBUG(D_NET, "peer 0x%p->%s last_alive %lu (%lus ago)\n",
604                                 peer, libcfs_nid2str(peer_nid), peer->gnp_last_alive,
605                                 cfs_duration_sec(jiffies - peer->gnp_last_alive));
606
607                         lnet_notify(net->gnn_ni, peer_nid, alive,
608                                     peer->gnp_last_alive);
609
610                         kgnilnd_net_decref(net);
611                 }
612
613                 LIBCFS_FREE(nets, nnets * sizeof(*nets));
614         }
615 }
616
617 /* need write_lock on kgn_peer_conn_lock */
618 void
619 kgnilnd_close_conn_locked(kgn_conn_t *conn, int error)
620 {
621         kgn_peer_t        *peer = conn->gnc_peer;
622         ENTRY;
623
624         LASSERT(!in_interrupt());
625
626         /* store error for tx completion */
627         conn->gnc_error = error;
628         peer->gnp_last_errno = error;
629
630         /* use real error from peer if possible */
631         if (error == -ECONNRESET) {
632                 error = conn->gnc_peer_error;
633         }
634
635         /* if we NETERROR, make sure it is rate limited */
636         if (!kgnilnd_conn_clean_errno(error) &&
637             peer->gnp_down == GNILND_RCA_NODE_UP) {
638                 CNETERR("closing conn to %s: error %d\n",
639                        libcfs_nid2str(peer->gnp_nid), error);
640         } else {
641                 CDEBUG(D_NET, "closing conn to %s: error %d\n",
642                        libcfs_nid2str(peer->gnp_nid), error);
643         }
644
645         LASSERTF(conn->gnc_state == GNILND_CONN_ESTABLISHED,
646                 "conn %p to %s with bogus state %s\n", conn,
647                 libcfs_nid2str(conn->gnc_peer->gnp_nid),
648                 kgnilnd_conn_state2str(conn));
649         LASSERT(!list_empty(&conn->gnc_hashlist));
650         LASSERT(!list_empty(&conn->gnc_list));
651
652
653         /* mark peer count here so any place the EP gets destroyed will
654          * open up the peer count so that a new ESTABLISHED conn is then free
655          * to send new messages -- sending before the previous EPs are destroyed
656          * could end up with messages on the network for the old conn _after_
657          * the new conn and break the mbox safety protocol */
658         kgnilnd_admin_addref(conn->gnc_peer->gnp_dirty_eps);
659
660         /* Remove from conn hash table: no new callbacks */
661         list_del_init(&conn->gnc_hashlist);
662         kgnilnd_data.kgn_conn_version++;
663         kgnilnd_conn_decref(conn);
664
665         /* if we are in reset, go right to CLOSED as there is no scheduler
666          * thread to move from CLOSING to CLOSED */
667         if (unlikely(kgnilnd_data.kgn_in_reset)) {
668                 conn->gnc_state = GNILND_CONN_CLOSED;
669         } else {
670                 conn->gnc_state = GNILND_CONN_CLOSING;
671         }
672
673         /* leave on peer->gnp_conns to make sure we don't let the reaper
674          * or others try to unlink this peer until the conn is fully
675          * processed for closing */
676
677         if (kgnilnd_check_purgatory_conn(conn)) {
678                 kgnilnd_add_purgatory_locked(conn, conn->gnc_peer);
679         }
680
681         /* Reset RX timeout to ensure we wait for an incoming CLOSE
682          * for the full timeout.  If we get a CLOSE we know the
683          * peer has stopped all RDMA.  Otherwise if we wait for
684          * the full timeout we can also be sure all RDMA has stopped. */
685         conn->gnc_last_rx = conn->gnc_last_rx_cq = jiffies;
686         mb();
687
688         /* schedule sending CLOSE - if we are in quiesce, this adds to
689          * gnd_ready_conns and allows us to find it in quiesce processing */
690         kgnilnd_schedule_conn(conn);
691
692         EXIT;
693 }
694
695 void
696 kgnilnd_close_conn(kgn_conn_t *conn, int error)
697 {
698         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
699         /* need to check the state here - this call is racy and we don't
700          * know the state until after the lock is grabbed */
701         if (conn->gnc_state == GNILND_CONN_ESTABLISHED) {
702                 kgnilnd_close_conn_locked(conn, error);
703         }
704         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
705 }
706
707 void
708 kgnilnd_complete_closed_conn(kgn_conn_t *conn)
709 {
710         LIST_HEAD               (sinners);
711         kgn_tx_t               *tx, *txn;
712         int                     nlive = 0;
713         int                     nrdma = 0;
714         int                     nq_rdma = 0;
715         int                     logmsg;
716         ENTRY;
717
718         /* Dump log  on cksum error - wait until complete phase to let
719          * RX of error happen */
720         if (*kgnilnd_tunables.kgn_checksum_dump &&
721             (conn != NULL && conn->gnc_peer_error == -ENOKEY)) {
722                 libcfs_debug_dumplog();
723         }
724
725         /* _CLOSED set in kgnilnd_process_fmaq once we decide to
726          * send the CLOSE or not */
727         LASSERTF(conn->gnc_state == GNILND_CONN_CLOSED,
728                  "conn 0x%p->%s with bad state %s\n",
729                  conn, conn->gnc_peer ?
730                         libcfs_nid2str(conn->gnc_peer->gnp_nid) :
731                         "<?>",
732                  kgnilnd_conn_state2str(conn));
733
734         LASSERT(list_empty(&conn->gnc_hashlist));
735
736         /* we've sent the close, start nuking */
737         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_SCHEDULE_COMPLETE))
738                 kgnilnd_schedule_conn(conn);
739
740         if (conn->gnc_scheduled != GNILND_CONN_PROCESS) {
741                 CDEBUG(D_NETERROR, "Error someone scheduled us after we were "
742                                 "done, Attempting to recover conn 0x%p "
743                                 "scheduled %d function: %s line: %d\n", conn,
744                                 conn->gnc_scheduled, conn->gnc_sched_caller,
745                                 conn->gnc_sched_line);
746                 RETURN_EXIT;
747         }
748
749         /* we don't use lists to track things that we can get out of the
750          * tx_ref table... */
751
752         /* need to hold locks for tx_list_state, sampling it is too racy:
753          * - the lock actually protects tx != NULL, but we can't take the proper
754          *   lock until we check tx_list_state, which would be too late and
755          *   we could have the TX change under us.
756          * gnd_rdmaq_lock and gnd_lock and not used together, so taking both
757          * should be fine */
758         spin_lock(&conn->gnc_device->gnd_rdmaq_lock);
759         spin_lock(&conn->gnc_device->gnd_lock);
760
761         for (nrdma = 0; nrdma < GNILND_MAX_MSG_ID; nrdma++) {
762                 tx = conn->gnc_tx_ref_table[nrdma];
763
764                 if (tx != NULL) {
765                         /* only print the first error and if not CLOSE, we often don't see
766                          * CQ events for that by the time we get here... and really don't care */
767                         if (nlive || tx->tx_msg.gnm_type == GNILND_MSG_CLOSE)
768                                 tx->tx_state |= GNILND_TX_QUIET_ERROR;
769                         nlive++;
770                         GNIDBG_TX(D_NET, tx, "cleaning up on close, nlive %d", nlive);
771
772                         /* don't worry about gnc_lock here as nobody else should be
773                          * touching this conn */
774                         kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
775                         list_add_tail(&tx->tx_list, &sinners);
776                 }
777         }
778         spin_unlock(&conn->gnc_device->gnd_lock);
779         spin_unlock(&conn->gnc_device->gnd_rdmaq_lock);
780
781         /* nobody should have marked this as needing scheduling after
782          * we called close - so only ref should be us handling it */
783         if (conn->gnc_scheduled != GNILND_CONN_PROCESS) {
784                 CDEBUG(D_NETERROR, "Error someone scheduled us after we were "
785                                 "done, Attempting to recover conn 0x%p "
786                                 "scheduled %d function %s line: %d\n", conn,
787                                 conn->gnc_scheduled, conn->gnc_sched_caller,
788                                 conn->gnc_sched_line);
789         }
790         /* now reset a few to actual counters... */
791         nrdma = atomic_read(&conn->gnc_nlive_rdma);
792         nq_rdma = atomic_read(&conn->gnc_nq_rdma);
793
794         if (!list_empty(&sinners)) {
795                 list_for_each_entry_safe(tx, txn, &sinners, tx_list) {
796                         /* clear tx_list to make tx_add_list_locked happy */
797                         list_del_init(&tx->tx_list);
798                         /* The error codes determine if we hold onto the MDD */
799                         kgnilnd_tx_done(tx, conn->gnc_error);
800                 }
801         }
802
803         logmsg = (nlive + nrdma + nq_rdma);
804
805         if (logmsg) {
806                 if (conn->gnc_peer->gnp_down == GNILND_RCA_NODE_UP) {
807                         CNETERR("Closed conn 0x%p->%s (errno %d, peer errno %d): "
808                                 "canceled %d TX, %d/%d RDMA\n",
809                                 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
810                                 conn->gnc_error, conn->gnc_peer_error,
811                                 nlive, nq_rdma, nrdma);
812                 } else {
813                         CDEBUG(D_NET, "Closed conn 0x%p->%s (errno %d,"
814                                 " peer errno %d): canceled %d TX, %d/%d RDMA\n",
815                                 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
816                                 conn->gnc_error, conn->gnc_peer_error,
817                                 nlive, nq_rdma, nrdma);
818                 }
819         }
820
821         kgnilnd_destroy_conn_ep(conn);
822
823         /* Bug 765042 - race this with completing a new conn to same peer - we need
824          * finish_connect to detach purgatory before we can do it ourselves here */
825         CFS_RACE(CFS_FAIL_GNI_FINISH_PURG);
826
827         /* now it is safe to remove from peer list - anyone looking at
828          * gnp_conns now is free to unlink if not on purgatory */
829         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
830
831         conn->gnc_state = GNILND_CONN_DONE;
832
833         /* Decrement counter if we are marked by del_conn_or_peers for closing
834          */
835         if (conn->gnc_needs_closing)
836                 kgnilnd_admin_decref(kgnilnd_data.kgn_npending_conns);
837
838         /* Remove from peer's list of valid connections if its not in purgatory */
839         if (!conn->gnc_in_purgatory) {
840                 list_del_init(&conn->gnc_list);
841                 /* Lose peers reference on the conn */
842                 kgnilnd_conn_decref(conn);
843         }
844
845         /* NB - only unlinking if we set pending in del_peer_locked from admin or
846          * shutdown */
847         if (kgnilnd_peer_active(conn->gnc_peer) &&
848             conn->gnc_peer->gnp_pending_unlink &&
849             kgnilnd_can_unlink_peer_locked(conn->gnc_peer)) {
850                 kgnilnd_unlink_peer_locked(conn->gnc_peer);
851         }
852
853         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
854
855         /* I'm telling Mommy! - use peer_error if they initiated close */
856         kgnilnd_peer_notify(conn->gnc_peer,
857                             conn->gnc_error == -ECONNRESET ?
858                             conn->gnc_peer_error : conn->gnc_error, 0);
859
860         EXIT;
861 }
862
863 int
864 kgnilnd_set_conn_params(kgn_dgram_t *dgram)
865 {
866         kgn_conn_t             *conn = dgram->gndg_conn;
867         kgn_connreq_t          *connreq = &dgram->gndg_conn_in;
868         kgn_gniparams_t        *rem_param = &connreq->gncr_gnparams;
869         gni_return_t            rrc;
870         int                     rc = 0;
871         gni_smsg_attr_t        *remote = &connreq->gncr_gnparams.gnpr_smsg_attr;
872
873         /* set timeout vals in conn early so we can use them for the NAK */
874
875         /* use max of the requested and our timeout, peer will do the same */
876         conn->gnc_timeout = MAX(conn->gnc_timeout, connreq->gncr_timeout);
877
878         /* only ep_bind really mucks around with the CQ */
879         /* only ep bind if we are not connecting to ourself and the dstnid is not a wildcard. this check
880          * is necessary as you can only bind an ep once and we must make sure we dont bind when already bound.
881          */
882         if (connreq->gncr_dstnid != LNET_NID_ANY && dgram->gndg_conn_out.gncr_dstnid != connreq->gncr_srcnid) {
883                 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
884                 rrc = kgnilnd_ep_bind(conn->gnc_ephandle,
885                         connreq->gncr_gnparams.gnpr_host_id,
886                         conn->gnc_cqid);
887                 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
888                 if (rrc != GNI_RC_SUCCESS) {
889                         rc = -ECONNABORTED;
890                         goto return_out;
891                 }
892         }
893
894         rrc = kgnilnd_ep_set_eventdata(conn->gnc_ephandle, conn->gnc_cqid,
895                          connreq->gncr_gnparams.gnpr_cqid);
896         if (rrc != GNI_RC_SUCCESS) {
897                 rc = -ECONNABORTED;
898                 goto cleanup_out;
899         }
900
901         /* Initialize SMSG */
902         rrc = kgnilnd_smsg_init(conn->gnc_ephandle, &conn->gnpr_smsg_attr,
903                         &connreq->gncr_gnparams.gnpr_smsg_attr);
904         if (unlikely(rrc == GNI_RC_INVALID_PARAM)) {
905                 gni_smsg_attr_t *local = &conn->gnpr_smsg_attr;
906                 /* help folks figure out if there is a tunable off, etc. */
907                 LCONSOLE_ERROR("SMSG attribute mismatch. Data from local/remote:"
908                                " type %d/%d msg_maxsize %u/%u"
909                                " mbox_maxcredit %u/%u. Please check kgni"
910                                " logs for further data\n",
911                                local->msg_type, remote->msg_type,
912                                local->msg_maxsize, remote->msg_maxsize,
913                                local->mbox_maxcredit, remote->mbox_maxcredit);
914         }
915         if (rrc != GNI_RC_SUCCESS) {
916                 rc = -ECONNABORTED;
917                 goto cleanup_out;
918         }
919
920         /* log this for help in debuggin SMSG buffer re-use */
921         CDEBUG(D_NET, "conn %p src %s dst %s smsg %p acquired"
922                 " local cqid %u SMSG %p->%u hndl "LPX64"."LPX64
923                 " remote cqid %u SMSG %p->%u hndl "LPX64"."LPX64"\n",
924                 conn, libcfs_nid2str(connreq->gncr_srcnid),
925                 libcfs_nid2str(connreq->gncr_dstnid),
926                 &conn->gnpr_smsg_attr,
927                 conn->gnc_cqid,
928                 conn->gnpr_smsg_attr.msg_buffer,
929                 conn->gnpr_smsg_attr.mbox_offset,
930                 conn->gnpr_smsg_attr.mem_hndl.qword1,
931                 conn->gnpr_smsg_attr.mem_hndl.qword2,
932                 rem_param->gnpr_cqid,
933                 rem_param->gnpr_smsg_attr.msg_buffer,
934                 rem_param->gnpr_smsg_attr.mbox_offset,
935                 rem_param->gnpr_smsg_attr.mem_hndl.qword1,
936                 rem_param->gnpr_smsg_attr.mem_hndl.qword2);
937
938         conn->gnc_peerstamp = connreq->gncr_peerstamp;
939         conn->gnc_peer_connstamp = connreq->gncr_connstamp;
940         conn->remote_mbox_addr = (void *)((char *)remote->msg_buffer + remote->mbox_offset);
941
942         /* We update the reaper timeout once we have a valid conn and timeout */
943         kgnilnd_update_reaper_timeout(GNILND_TO2KA(conn->gnc_timeout));
944
945         return 0;
946
947 cleanup_out:
948         rrc = kgnilnd_ep_unbind(conn->gnc_ephandle);
949         /* not sure I can just let this fly */
950         LASSERTF(rrc == GNI_RC_SUCCESS,
951                 "bad rc from gni_ep_unbind trying to cleanup: %d\n", rrc);
952
953 return_out:
954         LASSERTF(rc != 0, "SOFTWARE BUG: rc == 0\n");
955         CERROR("Error setting connection params from %s: %d\n",
956                libcfs_nid2str(connreq->gncr_srcnid), rc);
957         return rc;
958 }
959
960 /* needs down_read on kgn_net_rw_sem held from before this call until
961  * after the write_lock on kgn_peer_conn_lock - this ensures we stay sane
962  * with kgnilnd_shutdown - it'll get the sem and set shutdown, then get the
963  * kgn_peer_conn_lock to start del_peer'ing. If we hold the sem until after
964  * kgn_peer_conn_lock is held, we guarantee that nobody calls
965  * kgnilnd_add_peer_locked without checking gnn_shutdown */
966 int
967 kgnilnd_create_peer_safe(kgn_peer_t **peerp,
968                          lnet_nid_t nid,
969                          kgn_net_t *net,
970                          int node_state)
971 {
972         kgn_peer_t      *peer;
973         int             rc;
974
975         LASSERT(nid != LNET_NID_ANY);
976
977         /* We dont pass the net around in the dgram anymore so here is where we find it
978          * this will work unless its in shutdown or the nid has a net that is invalid.
979          * Either way error code needs to be returned in that case.
980          *
981          * If the net passed in is not NULL then we can use it, this alleviates looking it
982          * when the calling function has access to the data.
983          */
984         if (net == NULL) {
985                 rc = kgnilnd_find_net(nid, &net);
986                 if (rc < 0)
987                         return rc;
988         } else {
989                 /* find net adds a reference on the net if we are not using
990                  * it we must do it manually so the net references are
991                  * correct when tearing down the net
992                  */
993                 kgnilnd_net_addref(net);
994         }
995
996         LIBCFS_ALLOC(peer, sizeof(*peer));
997         if (peer == NULL) {
998                 kgnilnd_net_decref(net);
999                 return -ENOMEM;
1000         }
1001         peer->gnp_nid = nid;
1002         peer->gnp_down = node_state;
1003
1004         /* translate from nid to nic addr & store */
1005         rc = kgnilnd_nid_to_nicaddrs(LNET_NIDADDR(nid), 1, &peer->gnp_host_id);
1006         if (rc <= 0) {
1007                 kgnilnd_net_decref(net);
1008                 LIBCFS_FREE(peer, sizeof(*peer));
1009                 return -ESRCH;
1010         }
1011         CDEBUG(D_NET, "peer 0x%p->%s -> NIC 0x%x\n", peer,
1012                 libcfs_nid2str(nid), peer->gnp_host_id);
1013
1014         atomic_set(&peer->gnp_refcount, 1);     /* 1 ref for caller */
1015         atomic_set(&peer->gnp_dirty_eps, 0);
1016
1017         INIT_LIST_HEAD(&peer->gnp_list);
1018         INIT_LIST_HEAD(&peer->gnp_connd_list);
1019         INIT_LIST_HEAD(&peer->gnp_conns);
1020         INIT_LIST_HEAD(&peer->gnp_tx_queue);
1021
1022         /* the first reconnect should happen immediately, so we leave
1023          * gnp_reconnect_interval set to 0 */
1024
1025         LASSERTF(net != NULL, "peer 0x%p->%s with NULL net\n",
1026                  peer, libcfs_nid2str(nid));
1027
1028         /* must have kgn_net_rw_sem held for this...  */
1029         if (net->gnn_shutdown) {
1030                 /* shutdown has started already */
1031                 kgnilnd_net_decref(net);
1032                 LIBCFS_FREE(peer, sizeof(*peer));
1033                 return -ESHUTDOWN;
1034         }
1035
1036         peer->gnp_net = net;
1037
1038         atomic_inc(&kgnilnd_data.kgn_npeers);
1039
1040         *peerp = peer;
1041         return 0;
1042 }
1043
1044 void
1045 kgnilnd_destroy_peer(kgn_peer_t *peer)
1046 {
1047         CDEBUG(D_NET, "peer %s %p deleted\n",
1048                libcfs_nid2str(peer->gnp_nid), peer);
1049         LASSERTF(atomic_read(&peer->gnp_refcount) == 0,
1050                  "peer 0x%p->%s refs %d\n",
1051                  peer, libcfs_nid2str(peer->gnp_nid),
1052                  atomic_read(&peer->gnp_refcount));
1053         LASSERTF(atomic_read(&peer->gnp_dirty_eps) == 0,
1054                  "peer 0x%p->%s dirty eps %d\n",
1055                  peer, libcfs_nid2str(peer->gnp_nid),
1056                  atomic_read(&peer->gnp_dirty_eps));
1057         LASSERTF(peer->gnp_net != NULL, "peer %p (%s) with NULL net\n",
1058                  peer, libcfs_nid2str(peer->gnp_nid));
1059         LASSERTF(!kgnilnd_peer_active(peer),
1060                  "peer 0x%p->%s\n",
1061                 peer, libcfs_nid2str(peer->gnp_nid));
1062         LASSERTF(peer->gnp_connecting == GNILND_PEER_IDLE || peer->gnp_connecting == GNILND_PEER_KILL,
1063                  "peer 0x%p->%s, connecting %d\n",
1064                 peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
1065         LASSERTF(list_empty(&peer->gnp_conns),
1066                  "peer 0x%p->%s\n",
1067                 peer, libcfs_nid2str(peer->gnp_nid));
1068         LASSERTF(list_empty(&peer->gnp_tx_queue),
1069                  "peer 0x%p->%s\n",
1070                 peer, libcfs_nid2str(peer->gnp_nid));
1071         LASSERTF(list_empty(&peer->gnp_connd_list),
1072                  "peer 0x%p->%s\n",
1073                 peer, libcfs_nid2str(peer->gnp_nid));
1074
1075         /* NB a peer's connections keep a reference on their peer until
1076          * they are destroyed, so we can be assured that _all_ state to do
1077          * with this peer has been cleaned up when its refcount drops to
1078          * zero. */
1079
1080         atomic_dec(&kgnilnd_data.kgn_npeers);
1081         kgnilnd_net_decref(peer->gnp_net);
1082
1083         LIBCFS_FREE(peer, sizeof(*peer));
1084 }
1085
1086 /* the conn might not have made it all the way through to a connected
1087  * state - but we need to purgatory any conn that a remote peer might
1088  * have seen through a posted dgram as well */
1089 void
1090 kgnilnd_add_purgatory_locked(kgn_conn_t *conn, kgn_peer_t *peer)
1091 {
1092         kgn_mbox_info_t *mbox = NULL;
1093         ENTRY;
1094
1095         /* NB - the caller should own conn by removing him from the
1096          * scheduler thread when finishing the close */
1097
1098         LASSERTF(peer != NULL, "conn %p with NULL peer\n", conn);
1099
1100         /* If this is still true, need to add the calls to unlink back in and
1101          * figure out how to close the hole on loopback conns */
1102         LASSERTF(kgnilnd_peer_active(peer), "can't use inactive peer %s (%p)"
1103                 " we'll never recover the resources\n",
1104                 libcfs_nid2str(peer->gnp_nid), peer);
1105
1106         CDEBUG(D_NET, "conn %p peer %p dev %p\n", conn, peer,
1107                 conn->gnc_device);
1108
1109         LASSERTF(conn->gnc_in_purgatory == 0,
1110                 "Conn already in purgatory\n");
1111         conn->gnc_in_purgatory = 1;
1112
1113         mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
1114         mbox->mbx_prev_purg_nid = peer->gnp_nid;
1115         mbox->mbx_add_purgatory = jiffies;
1116         kgnilnd_release_mbox(conn, 1);
1117
1118         LASSERTF(list_empty(&conn->gnc_mdd_list),
1119                 "conn 0x%p->%s with active purgatory hold MDD %d\n",
1120                 conn, libcfs_nid2str(peer->gnp_nid),
1121                 kgnilnd_count_list(&conn->gnc_mdd_list));
1122
1123         EXIT;
1124 }
1125
1126 /* Instead of detaching everything from purgatory here we just mark the conn as needing
1127  * detach, when the reaper checks the conn the next time it will detach it.
1128  * Calling function requires write_lock held on kgn_peer_conn_lock
1129  */
1130 void
1131 kgnilnd_mark_for_detach_purgatory_all_locked(kgn_peer_t *peer) {
1132         kgn_conn_t       *conn;
1133
1134         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
1135                 if (conn->gnc_in_purgatory && !conn->gnc_needs_detach) {
1136                         conn->gnc_needs_detach = 1;
1137                         kgnilnd_admin_addref(kgnilnd_data.kgn_npending_detach);
1138                 }
1139         }
1140 }
1141
1142 /* Calling function needs a write_lock held on kgn_peer_conn_lock */
1143 void
1144 kgnilnd_detach_purgatory_locked(kgn_conn_t *conn, struct list_head *conn_list)
1145 {
1146         kgn_mbox_info_t *mbox = NULL;
1147
1148         /* if needed, add the conn purgatory data to the list passed in */
1149         if (conn->gnc_in_purgatory) {
1150                 CDEBUG(D_NET, "peer %p->%s purg_conn %p@%s mdd_list #tx %d\n",
1151                         conn->gnc_peer, libcfs_nid2str(conn->gnc_peer->gnp_nid),
1152                         conn, kgnilnd_conn_state2str(conn),
1153                         kgnilnd_count_list(&conn->gnc_mdd_list));
1154
1155                 mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
1156                 mbox->mbx_detach_of_purgatory = jiffies;
1157
1158                 /* conn->gnc_list is the entry point on peer->gnp_conns, so detaching it
1159                  * here removes it from the list of 'valid' peer connections.
1160                  * We put the current conn onto a list of conns to call kgnilnd_release_purgatory_locked()
1161                  * and as such the caller of kgnilnd_detach_purgatory_locked() now owns that conn, since its not
1162                  * on the peer's conn_list anymore.
1163                  */
1164
1165                 list_del_init(&conn->gnc_list);
1166
1167                 /* NB - only unlinking if we set pending in del_peer_locked from admin or
1168                  * shutdown */
1169                 if (kgnilnd_peer_active(conn->gnc_peer) &&
1170                     conn->gnc_peer->gnp_pending_unlink &&
1171                     kgnilnd_can_unlink_peer_locked(conn->gnc_peer)) {
1172                         kgnilnd_unlink_peer_locked(conn->gnc_peer);
1173                 }
1174                 /* The reaper will not call detach unless the conn is fully through kgnilnd_complete_closed_conn.
1175                  * If the conn is not in a DONE state somehow we are attempting to detach even though
1176                  * the conn has not been fully cleaned up. If we detach while the conn is still closing
1177                  * we will end up with an orphaned connection that has valid ep_handle, that is not on a
1178                  * peer.
1179                  */
1180
1181                 LASSERTF(conn->gnc_state == GNILND_CONN_DONE, "Conn in invalid state  %p@%s \n",
1182                                 conn, kgnilnd_conn_state2str(conn));
1183
1184                 /* move from peer to the delayed release list */
1185                 list_add_tail(&conn->gnc_list, conn_list);
1186         }
1187 }
1188
1189 void
1190 kgnilnd_release_purgatory_list(struct list_head *conn_list)
1191 {
1192         kgn_device_t            *dev;
1193         kgn_conn_t              *conn, *connN;
1194         kgn_mdd_purgatory_t     *gmp, *gmpN;
1195
1196         list_for_each_entry_safe(conn, connN, conn_list, gnc_list) {
1197                 dev = conn->gnc_device;
1198
1199                 kgnilnd_release_mbox(conn, -1);
1200                 conn->gnc_in_purgatory = 0;
1201
1202                 list_del_init(&conn->gnc_list);
1203
1204                 /* gnc_needs_detach is set in kgnilnd_del_conn_or_peer. It is used to keep track
1205                  * of conns that have been marked for detach by kgnilnd_del_conn_or_peer.
1206                  * The function uses kgn_npending_detach to verify the conn has
1207                  * actually been detached.
1208                  */
1209
1210                 if (conn->gnc_needs_detach)
1211                         kgnilnd_admin_decref(kgnilnd_data.kgn_npending_detach);
1212
1213                 /* if this guy is really dead (we are doing release from reaper),
1214                  * make sure we tell LNet - if this is from other context,
1215                  * the checks in the function will prevent an errant
1216                  * notification */
1217                 kgnilnd_peer_notify(conn->gnc_peer, conn->gnc_error, 0);
1218
1219                 list_for_each_entry_safe(gmp, gmpN, &conn->gnc_mdd_list,
1220                                          gmp_list) {
1221                         CDEBUG(D_NET,
1222                                "dev %p releasing held mdd "LPX64"."LPX64"\n",
1223                                conn->gnc_device, gmp->gmp_map_key.qword1,
1224                                gmp->gmp_map_key.qword2);
1225
1226                         atomic_dec(&dev->gnd_n_mdd_held);
1227                         kgnilnd_mem_mdd_release(conn->gnc_device->gnd_handle,
1228                                                 &gmp->gmp_map_key);
1229                         /* ignoring the return code - if kgni/ghal can't find it
1230                          * it must be released already */
1231
1232                         list_del_init(&gmp->gmp_list);
1233                         LIBCFS_FREE(gmp, sizeof(*gmp));
1234                 }
1235                 /* lose conn ref for purgatory */
1236                 kgnilnd_conn_decref(conn);
1237         }
1238 }
1239
1240 /* needs write_lock on kgnilnd_data.kgn_peer_conn_lock held */
1241 void
1242 kgnilnd_peer_increase_reconnect_locked(kgn_peer_t *peer)
1243 {
1244         int current_to;
1245
1246         current_to = peer->gnp_reconnect_interval;
1247
1248         /* we'll try to reconnect fast the first time, then back-off */
1249         if (current_to == 0) {
1250                 peer->gnp_reconnect_time = jiffies - 1;
1251                 current_to = *kgnilnd_tunables.kgn_min_reconnect_interval;
1252         } else {
1253                 peer->gnp_reconnect_time = jiffies + cfs_time_seconds(current_to);
1254                 /* add 50% of min timeout & retry */
1255                 current_to += *kgnilnd_tunables.kgn_min_reconnect_interval / 2;
1256         }
1257
1258         current_to = MIN(current_to,
1259                                 *kgnilnd_tunables.kgn_max_reconnect_interval);
1260
1261         peer->gnp_reconnect_interval = current_to;
1262         CDEBUG(D_NET, "peer %s can reconnect at %lu interval %lu\n",
1263                libcfs_nid2str(peer->gnp_nid), peer->gnp_reconnect_time,
1264                peer->gnp_reconnect_interval);
1265 }
1266
1267 /* needs kgnilnd_data.kgn_peer_conn_lock held */
1268 kgn_peer_t *
1269 kgnilnd_find_peer_locked(lnet_nid_t nid)
1270 {
1271         struct list_head *peer_list = kgnilnd_nid2peerlist(nid);
1272         kgn_peer_t       *peer;
1273
1274         /* Chopping nid down to only NIDADDR using LNET_NIDADDR so we only
1275          * have a single peer per device instead of a peer per nid/net combo.
1276          */
1277
1278         list_for_each_entry(peer, peer_list, gnp_list) {
1279                 if (LNET_NIDADDR(nid) != LNET_NIDADDR(peer->gnp_nid))
1280                         continue;
1281
1282                 CDEBUG(D_NET, "got peer [%p] -> %s c %d (%d)\n",
1283                        peer, libcfs_nid2str(nid),
1284                        peer->gnp_connecting,
1285                        atomic_read(&peer->gnp_refcount));
1286                 return peer;
1287         }
1288         return NULL;
1289 }
1290
1291 /* need write_lock on kgn_peer_conn_lock */
1292 void
1293 kgnilnd_unlink_peer_locked(kgn_peer_t *peer)
1294 {
1295         LASSERTF(list_empty(&peer->gnp_conns),
1296                 "peer 0x%p->%s\n",
1297                  peer, libcfs_nid2str(peer->gnp_nid));
1298         LASSERTF(list_empty(&peer->gnp_tx_queue),
1299                 "peer 0x%p->%s\n",
1300                  peer, libcfs_nid2str(peer->gnp_nid));
1301         LASSERTF(kgnilnd_peer_active(peer),
1302                 "peer 0x%p->%s\n",
1303                  peer, libcfs_nid2str(peer->gnp_nid));
1304         CDEBUG(D_NET, "unlinking peer 0x%p->%s\n",
1305                 peer, libcfs_nid2str(peer->gnp_nid));
1306
1307         list_del_init(&peer->gnp_list);
1308         kgnilnd_data.kgn_peer_version++;
1309         kgnilnd_admin_decref(kgnilnd_data.kgn_npending_unlink);
1310         /* lose peerlist's ref */
1311         kgnilnd_peer_decref(peer);
1312 }
1313
1314 int
1315 kgnilnd_get_peer_info(int index,
1316                       kgn_peer_t **found_peer,
1317                       lnet_nid_t *id, __u32 *nic_addr,
1318                       int *refcount, int *connecting)
1319 {
1320         struct list_head  *ptmp;
1321         kgn_peer_t        *peer;
1322         int               i;
1323         int               rc = -ENOENT;
1324
1325         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1326
1327         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
1328
1329                 list_for_each(ptmp, &kgnilnd_data.kgn_peers[i]) {
1330                         peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1331
1332                         if (index-- > 0)
1333                                 continue;
1334
1335                         CDEBUG(D_NET, "found peer %p (%s) at index %d\n",
1336                                peer, libcfs_nid2str(peer->gnp_nid), index);
1337
1338                         *found_peer  = peer;
1339                         *id          = peer->gnp_nid;
1340                         *nic_addr    = peer->gnp_host_id;
1341                         *refcount    = atomic_read(&peer->gnp_refcount);
1342                         *connecting  = peer->gnp_connecting;
1343
1344                         rc = 0;
1345                         goto out;
1346                 }
1347         }
1348 out:
1349         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1350         if (rc)
1351                 CDEBUG(D_NET, "no gni peer at index %d\n", index);
1352         return rc;
1353 }
1354
1355 /* requires write_lock on kgn_peer_conn_lock held */
1356 void
1357 kgnilnd_add_peer_locked(lnet_nid_t nid, kgn_peer_t *new_stub_peer, kgn_peer_t **peerp)
1358 {
1359         kgn_peer_t        *peer, *peer2;
1360
1361         LASSERTF(new_stub_peer != NULL, "bad stub peer for nid %s\n",
1362                  libcfs_nid2str(nid));
1363
1364         peer2 = kgnilnd_find_peer_locked(nid);
1365         if (peer2 != NULL) {
1366                 /* A peer was created during the lock transition, so drop
1367                  * the new one we created */
1368                 kgnilnd_peer_decref(new_stub_peer);
1369                 peer = peer2;
1370         } else {
1371                 peer = new_stub_peer;
1372                 /* peer table takes existing ref on peer */
1373
1374                 LASSERTF(!kgnilnd_peer_active(peer),
1375                         "peer 0x%p->%s already in peer table\n",
1376                         peer, libcfs_nid2str(peer->gnp_nid));
1377                 list_add_tail(&peer->gnp_list,
1378                               kgnilnd_nid2peerlist(nid));
1379                 kgnilnd_data.kgn_peer_version++;
1380         }
1381
1382         LASSERTF(peer->gnp_net != NULL, "peer 0x%p->%s with NULL net\n",
1383                  peer, libcfs_nid2str(peer->gnp_nid));
1384         *peerp = peer;
1385 }
1386
1387 int
1388 kgnilnd_add_peer(kgn_net_t *net, lnet_nid_t nid, kgn_peer_t **peerp)
1389 {
1390         kgn_peer_t        *peer;
1391         int                rc;
1392         int                node_state;
1393         ENTRY;
1394
1395         if (nid == LNET_NID_ANY)
1396                 return -EINVAL;
1397
1398         node_state = kgnilnd_get_node_state(LNET_NIDADDR(nid));
1399
1400         /* NB - this will not block during normal operations -
1401          * the only writer of this is in the startup/shutdown path. */
1402         rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
1403         if (!rc) {
1404                 rc = -ESHUTDOWN;
1405                 RETURN(rc);
1406         }
1407         rc = kgnilnd_create_peer_safe(&peer, nid, net, node_state);
1408         if (rc != 0) {
1409                 up_read(&kgnilnd_data.kgn_net_rw_sem);
1410                 RETURN(rc);
1411         }
1412
1413         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1414         up_read(&kgnilnd_data.kgn_net_rw_sem);
1415
1416         kgnilnd_add_peer_locked(nid, peer, peerp);
1417
1418         CDEBUG(D_NET, "peer 0x%p->%s connecting %d\n",
1419                peerp, libcfs_nid2str((*peerp)->gnp_nid),
1420                (*peerp)->gnp_connecting);
1421
1422         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1423         RETURN(0);
1424 }
1425
1426 /* needs write_lock on kgn_peer_conn_lock */
1427 void
1428 kgnilnd_cancel_peer_connect_locked(kgn_peer_t *peer, struct list_head *zombies)
1429 {
1430         kgn_tx_t        *tx, *txn;
1431
1432         /* we do care about state of gnp_connecting - we could be between
1433          * reconnect attempts, so try to find the dgram and cancel the TX
1434          * anyways. If we are in the process of posting DONT do anything;
1435          * once it fails or succeeds we can nuke the connect attempt.
1436          * We have no idea where in kgnilnd_post_dgram we are so we cant
1437          * attempt to cancel until the function is done.
1438          */
1439
1440         /* make sure peer isn't in process of connecting or waiting for connect*/
1441         spin_lock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1442         if (!(list_empty(&peer->gnp_connd_list))) {
1443                 list_del_init(&peer->gnp_connd_list);
1444                 /* remove connd ref */
1445                 kgnilnd_peer_decref(peer);
1446         }
1447         spin_unlock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1448
1449         if (peer->gnp_connecting == GNILND_PEER_POSTING || peer->gnp_connecting == GNILND_PEER_NEEDS_DEATH) {
1450                 peer->gnp_connecting = GNILND_PEER_NEEDS_DEATH;
1451                 /* We are in process of posting right now the xchg set it up for us to
1452                  * cancel the connect so we are finished for now */
1453         } else {
1454                 /* no need for exchange we have the peer lock and its ready for us to nuke */
1455                 LASSERTF(peer->gnp_connecting != GNILND_PEER_POSTING,
1456                         "Peer in invalid state 0x%p->%s, connecting %d\n",
1457                         peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
1458                 peer->gnp_connecting = GNILND_PEER_IDLE;
1459                 set_mb(peer->gnp_last_dgram_errno, -ETIMEDOUT);
1460                 kgnilnd_find_and_cancel_dgram(peer->gnp_net->gnn_dev,
1461                                                       peer->gnp_nid);
1462         }
1463
1464         /* The least we can do is nuke the tx's no matter what.... */
1465         list_for_each_entry_safe(tx, txn, &peer->gnp_tx_queue, tx_list) {
1466                 kgnilnd_tx_del_state_locked(tx, peer, NULL,
1467                                            GNILND_TX_ALLOCD);
1468                 list_add_tail(&tx->tx_list, zombies);
1469         }
1470 }
1471
1472 /* needs write_lock on kgn_peer_conn_lock */
1473 void
1474 kgnilnd_del_peer_locked(kgn_peer_t *peer, int error)
1475 {
1476         /* this peer could be passive and only held for purgatory,
1477          * take a ref to ensure it doesn't disappear in this function */
1478         kgnilnd_peer_addref(peer);
1479
1480         CFS_RACE(CFS_FAIL_GNI_FIND_TARGET);
1481
1482         /* if purgatory release cleared it out, don't try again */
1483         if (kgnilnd_peer_active(peer)) {
1484                 /* always do this to allow kgnilnd_start_connect and
1485                  * kgnilnd_finish_connect to catch this before they
1486                  * wrap up their operations */
1487                 if (kgnilnd_can_unlink_peer_locked(peer)) {
1488                         /* already released purgatory, so only active
1489                          * conns hold it */
1490                         kgnilnd_unlink_peer_locked(peer);
1491                 } else {
1492                         kgnilnd_close_peer_conns_locked(peer, error);
1493                         /* peer unlinks itself when last conn is closed */
1494                 }
1495         }
1496
1497         /* we are done, release back to the wild */
1498         kgnilnd_peer_decref(peer);
1499 }
1500
1501 int
1502 kgnilnd_del_conn_or_peer(kgn_net_t *net, lnet_nid_t nid, int command,
1503                           int error)
1504 {
1505         LIST_HEAD               (souls);
1506         LIST_HEAD               (zombies);
1507         struct list_head        *ptmp, *pnxt;
1508         kgn_peer_t              *peer;
1509         int                     lo;
1510         int                     hi;
1511         int                     i;
1512         int                     rc = -ENOENT;
1513
1514         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1515
1516         if (nid != LNET_NID_ANY)
1517                 lo = hi = kgnilnd_nid2peerlist(nid) - kgnilnd_data.kgn_peers;
1518         else {
1519                 lo = 0;
1520                 hi = *kgnilnd_tunables.kgn_peer_hash_size - 1;
1521                 /* wildcards always succeed */
1522                 rc = 0;
1523         }
1524
1525         for (i = lo; i <= hi; i++) {
1526                 list_for_each_safe(ptmp, pnxt, &kgnilnd_data.kgn_peers[i]) {
1527                         peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1528
1529                         LASSERTF(peer->gnp_net != NULL,
1530                                 "peer %p (%s) with NULL net\n",
1531                                  peer, libcfs_nid2str(peer->gnp_nid));
1532
1533                         if (net != NULL && peer->gnp_net != net)
1534                                 continue;
1535
1536                         if (!(nid == LNET_NID_ANY || LNET_NIDADDR(peer->gnp_nid) == LNET_NIDADDR(nid)))
1537                                 continue;
1538
1539                         /* In both cases, we want to stop any in-flight
1540                          * connect attempts */
1541                         kgnilnd_cancel_peer_connect_locked(peer, &zombies);
1542
1543                         switch (command) {
1544                         case GNILND_DEL_CONN:
1545                                 kgnilnd_close_peer_conns_locked(peer, error);
1546                                 break;
1547                         case GNILND_DEL_PEER:
1548                                 peer->gnp_pending_unlink = 1;
1549                                 kgnilnd_admin_addref(kgnilnd_data.kgn_npending_unlink);
1550                                 kgnilnd_mark_for_detach_purgatory_all_locked(peer);
1551                                 kgnilnd_del_peer_locked(peer, error);
1552                                 break;
1553                         case GNILND_CLEAR_PURGATORY:
1554                                 /* Mark everything ready for detach reaper will cleanup
1555                                  * once we release the kgn_peer_conn_lock
1556                                  */
1557                                 kgnilnd_mark_for_detach_purgatory_all_locked(peer);
1558                                 peer->gnp_last_errno = -EISCONN;
1559                                 /* clear reconnect so he can reconnect soon */
1560                                 peer->gnp_reconnect_time = 0;
1561                                 peer->gnp_reconnect_interval = 0;
1562                                 break;
1563                         default:
1564                                 CERROR("bad command %d\n", command);
1565                                 LBUG();
1566                         }
1567                         /* we matched something */
1568                         rc = 0;
1569                 }
1570         }
1571
1572         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1573
1574         /* nuke peer TX */
1575         kgnilnd_txlist_done(&zombies, error);
1576
1577         /* This function does not return until the commands it initiated have completed,
1578          * since they have to work there way through the other threads. In the case of shutdown
1579          * threads are not woken up until after this call is initiated so we cannot wait, we just
1580          * need to return. The same applies for stack reset we shouldnt wait as the reset thread
1581          * handles closing.
1582          */
1583
1584         CFS_RACE(CFS_FAIL_GNI_RACE_RESET);
1585
1586         if (error == -ENOTRECOVERABLE || error == -ESHUTDOWN) {
1587                 return rc;
1588         }
1589
1590         i = 4;
1591         while (atomic_read(&kgnilnd_data.kgn_npending_conns)   ||
1592                atomic_read(&kgnilnd_data.kgn_npending_detach)  ||
1593                atomic_read(&kgnilnd_data.kgn_npending_unlink)) {
1594
1595                 set_current_state(TASK_UNINTERRUPTIBLE);
1596                 schedule_timeout(cfs_time_seconds(1));
1597                 i++;
1598
1599                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, "Waiting on %d peers %d closes %d detaches\n",
1600                                 atomic_read(&kgnilnd_data.kgn_npending_unlink),
1601                                 atomic_read(&kgnilnd_data.kgn_npending_conns),
1602                                 atomic_read(&kgnilnd_data.kgn_npending_detach));
1603         }
1604
1605         return rc;
1606 }
1607
1608 kgn_conn_t *
1609 kgnilnd_get_conn_by_idx(int index)
1610 {
1611         kgn_peer_t        *peer;
1612         struct list_head  *ptmp;
1613         kgn_conn_t        *conn;
1614         struct list_head  *ctmp;
1615         int                i;
1616
1617
1618         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
1619                 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1620                 list_for_each(ptmp, &kgnilnd_data.kgn_peers[i]) {
1621
1622                         peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1623
1624                         list_for_each(ctmp, &peer->gnp_conns) {
1625                                 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
1626
1627                                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
1628                                         continue;
1629
1630                                 if (index-- > 0)
1631                                         continue;
1632
1633                                 CDEBUG(D_NET, "++conn[%p] -> %s (%d)\n", conn,
1634                                        libcfs_nid2str(conn->gnc_peer->gnp_nid),
1635                                        atomic_read(&conn->gnc_refcount));
1636                                 kgnilnd_conn_addref(conn);
1637                                 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1638                                 return conn;
1639                         }
1640                 }
1641                 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1642         }
1643
1644         return NULL;
1645 }
1646
1647 int
1648 kgnilnd_get_conn_info(kgn_peer_t *peer,
1649                       int *device_id, __u64 *peerstamp,
1650                       int *tx_seq, int *rx_seq,
1651                       int *fmaq_len, int *nfma, int *nrdma)
1652 {
1653         kgn_conn_t        *conn;
1654         int               rc = 0;
1655
1656         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1657
1658         conn = kgnilnd_find_conn_locked(peer);
1659         if (conn == NULL) {
1660                 rc = -ENOENT;
1661                 goto out;
1662         }
1663
1664         *device_id = conn->gnc_device->gnd_host_id;
1665         *peerstamp = conn->gnc_peerstamp;
1666         *tx_seq = atomic_read(&conn->gnc_tx_seq);
1667         *rx_seq = atomic_read(&conn->gnc_rx_seq);
1668         *fmaq_len = kgnilnd_count_list(&conn->gnc_fmaq);
1669         *nfma = atomic_read(&conn->gnc_nlive_fma);
1670         *nrdma = atomic_read(&conn->gnc_nlive_rdma);
1671 out:
1672         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1673         return rc;
1674 }
1675
1676 /* needs write_lock on kgn_peer_conn_lock */
1677 int
1678 kgnilnd_close_peer_conns_locked(kgn_peer_t *peer, int why)
1679 {
1680         kgn_conn_t         *conn;
1681         struct list_head   *ctmp, *cnxt;
1682         int                 count = 0;
1683
1684         list_for_each_safe(ctmp, cnxt, &peer->gnp_conns) {
1685                 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
1686
1687                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
1688                         continue;
1689
1690                 count++;
1691                 /* we mark gnc_needs closing and increment kgn_npending_conns so that
1692                  * kgnilnd_del_conn_or_peer can wait on the other threads closing
1693                  * and cleaning up the connection.
1694                  */
1695                 if (!conn->gnc_needs_closing) {
1696                         conn->gnc_needs_closing = 1;
1697                         kgnilnd_admin_addref(kgnilnd_data.kgn_npending_conns);
1698                 }
1699                 kgnilnd_close_conn_locked(conn, why);
1700         }
1701         return count;
1702 }
1703
1704 int
1705 kgnilnd_report_node_state(lnet_nid_t nid, int down)
1706 {
1707         int         rc;
1708         kgn_peer_t  *peer, *new_peer;
1709         LIST_HEAD(zombies);
1710
1711         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1712         peer = kgnilnd_find_peer_locked(nid);
1713
1714         if (peer == NULL) {
1715                 int       i;
1716                 int       found_net = 0;
1717                 kgn_net_t *net;
1718
1719                 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1720
1721                 /* Don't add a peer for node up events */
1722                 if (down == GNILND_RCA_NODE_UP) {
1723                         return 0;
1724                 }
1725
1726                 /* find any valid net - we don't care which one... */
1727                 down_read(&kgnilnd_data.kgn_net_rw_sem);
1728                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
1729                         list_for_each_entry(net, &kgnilnd_data.kgn_nets[i],
1730                                             gnn_list) {
1731                                 found_net = 1;
1732                                 break;
1733                         }
1734
1735                         if (found_net) {
1736                                 break;
1737                         }
1738                 }
1739                 up_read(&kgnilnd_data.kgn_net_rw_sem);
1740
1741                 if (!found_net) {
1742                         CNETERR("Could not find a net for nid %lld\n", nid);
1743                         return 1;
1744                 }
1745
1746                 /* The nid passed in does not yet contain the net portion.
1747                  * Let's build it up now
1748                  */
1749                 nid = LNET_MKNID(LNET_NIDNET(net->gnn_ni->ni_nid), nid);
1750                 rc = kgnilnd_add_peer(net, nid, &new_peer);
1751
1752                 if (rc) {
1753                         CNETERR("Could not add peer for nid %lld, rc %d\n",
1754                                 nid, rc);
1755                         return 1;
1756                 }
1757
1758                 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1759                 peer = kgnilnd_find_peer_locked(nid);
1760
1761                 if (peer == NULL) {
1762                         CNETERR("Could not find peer for nid %lld\n", nid);
1763                         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1764                         return 1;
1765                 }
1766         }
1767
1768         peer->gnp_down = down;
1769
1770         if (down == GNILND_RCA_NODE_DOWN) {
1771                 kgn_conn_t *conn;
1772
1773                 peer->gnp_down_event_time = jiffies;
1774                 kgnilnd_cancel_peer_connect_locked(peer, &zombies);
1775                 conn = kgnilnd_find_conn_locked(peer);
1776
1777                 if (conn != NULL) {
1778                         kgnilnd_close_conn_locked(conn, -ENETRESET);
1779                 }
1780         } else {
1781                 peer->gnp_up_event_time = jiffies;
1782         }
1783
1784         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1785
1786         if (down == GNILND_RCA_NODE_DOWN) {
1787                 /* using ENETRESET so we don't get messages from
1788                  * kgnilnd_tx_done
1789                  */
1790                 kgnilnd_txlist_done(&zombies, -ENETRESET);
1791                 kgnilnd_peer_notify(peer, -ECONNRESET, 0);
1792                 LCONSOLE_INFO("Recieved down event for nid %lld\n", nid);
1793         }
1794
1795         return 0;
1796 }
1797
1798 int
1799 kgnilnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
1800 {
1801         struct libcfs_ioctl_data *data = arg;
1802         kgn_net_t                *net = ni->ni_data;
1803         int                       rc = -EINVAL;
1804
1805         LASSERT(ni == net->gnn_ni);
1806
1807         switch (cmd) {
1808         case IOC_LIBCFS_GET_PEER: {
1809                 lnet_nid_t   nid = 0;
1810                 kgn_peer_t  *peer = NULL;
1811                 __u32 nic_addr = 0;
1812                 __u64 peerstamp = 0;
1813                 int peer_refcount = 0, peer_connecting = 0;
1814                 int device_id = 0;
1815                 int tx_seq = 0, rx_seq = 0;
1816                 int fmaq_len = 0, nfma = 0, nrdma = 0;
1817
1818                 rc = kgnilnd_get_peer_info(data->ioc_count, &peer,
1819                                            &nid, &nic_addr, &peer_refcount,
1820                                            &peer_connecting);
1821                 if (rc)
1822                         break;
1823
1824                 /* Barf */
1825                 /* LNET_MKNID is used to mask from lnet the multiplexing/demultiplexing of connections and peers
1826                  * LNET assumes a conn and peer per net, the LNET_MKNID/LNET_NIDADDR allows us to let Lnet see what it
1827                  * wants to see instead of the underlying network that is being used to send the data
1828                  */
1829                 data->ioc_nid    = LNET_MKNID(LNET_NIDNET(ni->ni_nid), LNET_NIDADDR(nid));
1830                 data->ioc_flags  = peer_connecting;
1831                 data->ioc_count  = peer_refcount;
1832
1833                 rc = kgnilnd_get_conn_info(peer, &device_id, &peerstamp,
1834                                            &tx_seq, &rx_seq, &fmaq_len,
1835                                            &nfma, &nrdma);
1836
1837                 /* This is allowable - a persistent peer could not
1838                  * have a connection */
1839                 if (rc) {
1840                         /* flag to indicate we are not connected -
1841                          * need to print as such */
1842                         data->ioc_flags |= (1<<16);
1843                         rc = 0;
1844                 } else {
1845                         /* still barf */
1846                         data->ioc_net = device_id;
1847                         data->ioc_u64[0] = peerstamp;
1848                         data->ioc_u32[0] = fmaq_len;
1849                         data->ioc_u32[1] = nfma;
1850                         data->ioc_u32[2] = tx_seq;
1851                         data->ioc_u32[3] = rx_seq;
1852                         data->ioc_u32[4] = nrdma;
1853                 }
1854                 break;
1855         }
1856         case IOC_LIBCFS_ADD_PEER: {
1857                 /* just dummy value to allow using common interface */
1858                 kgn_peer_t      *peer;
1859                 rc = kgnilnd_add_peer(net, data->ioc_nid, &peer);
1860                 break;
1861         }
1862         case IOC_LIBCFS_DEL_PEER: {
1863                 /* NULL is passed in so it affects all peers in existence without regard to network
1864                  * as the peer may not exist on the network LNET believes it to be on.
1865                  */
1866                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1867                                               GNILND_DEL_PEER, -EUCLEAN);
1868                 break;
1869         }
1870         case IOC_LIBCFS_GET_CONN: {
1871                 kgn_conn_t *conn = kgnilnd_get_conn_by_idx(data->ioc_count);
1872
1873                 if (conn == NULL)
1874                         rc = -ENOENT;
1875                 else {
1876                         rc = 0;
1877                         /* LNET_MKNID is used to build the correct address based on what LNET wants to see instead of
1878                          * the generic connection that is used to send the data
1879                          */
1880                         data->ioc_nid    = LNET_MKNID(LNET_NIDNET(ni->ni_nid), LNET_NIDADDR(conn->gnc_peer->gnp_nid));
1881                         data->ioc_u32[0] = conn->gnc_device->gnd_id;
1882                         kgnilnd_conn_decref(conn);
1883                 }
1884                 break;
1885         }
1886         case IOC_LIBCFS_CLOSE_CONNECTION: {
1887                 /* use error = -ENETRESET to indicate it was lctl disconnect */
1888                 /* NULL is passed in so it affects all the nets as the connection is virtual
1889                  * and may not exist on the network LNET believes it to be on.
1890                  */
1891                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1892                                               GNILND_DEL_CONN, -ENETRESET);
1893                 break;
1894         }
1895         case IOC_LIBCFS_PUSH_CONNECTION: {
1896                 /* we use this to flush purgatory */
1897                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1898                                               GNILND_CLEAR_PURGATORY, -EUCLEAN);
1899                 break;
1900         }
1901         case IOC_LIBCFS_REGISTER_MYNID: {
1902                 /* Ignore if this is a noop */
1903                 if (data->ioc_nid == ni->ni_nid) {
1904                         rc = 0;
1905                 } else {
1906                         CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
1907                                libcfs_nid2str(data->ioc_nid),
1908                                libcfs_nid2str(ni->ni_nid));
1909                         rc = -EINVAL;
1910                 }
1911                 break;
1912         }
1913         }
1914
1915         return rc;
1916 }
1917
1918 void
1919 kgnilnd_query(lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
1920 {
1921         kgn_net_t               *net = ni->ni_data;
1922         kgn_tx_t                *tx;
1923         kgn_peer_t              *peer = NULL;
1924         kgn_conn_t              *conn = NULL;
1925         lnet_process_id_t       id = {
1926                 .nid = nid,
1927                 .pid = LNET_PID_LUSTRE,
1928         };
1929         ENTRY;
1930
1931         /* I expect to find him, so only take a read lock */
1932         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1933         peer = kgnilnd_find_peer_locked(nid);
1934         if (peer != NULL) {
1935                 /* LIE if in a quiesce - we will update the timeouts after,
1936                  * but we don't want sends failing during it */
1937                 if (kgnilnd_data.kgn_quiesce_trigger) {
1938                         *when = jiffies;
1939                         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1940                         GOTO(out, 0);
1941                 }
1942
1943                 /* Update to best guess, might refine on later checks */
1944                 *when = peer->gnp_last_alive;
1945
1946                 /* we have a peer, how about a conn? */
1947                 conn = kgnilnd_find_conn_locked(peer);
1948
1949                 if (conn == NULL)  {
1950                         /* if there is no conn, check peer last errno to see if clean disconnect
1951                          * - if it was, we lie to LNet because we believe a TX would complete
1952                          * on reconnect */
1953                         if (kgnilnd_conn_clean_errno(peer->gnp_last_errno)) {
1954                                 *when = jiffies;
1955                         }
1956                         /* we still want to fire a TX and new conn in this case */
1957                 } else {
1958                         /* gnp_last_alive is valid, run for the hills */
1959                         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1960                         GOTO(out, 0);
1961                 }
1962         }
1963         /* if we get here, either we have no peer or no conn for him, so fire off
1964          * new TX to trigger conn setup */
1965         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1966
1967         /* if we couldn't find him, we'll fire up a TX and get connected -
1968          * if we don't do this, after ni_peer_timeout, LNet will declare him dead.
1969          * So really we treat kgnilnd_query as a bit of a 'connect now' type
1970          * event because it'll only do this when it wants to send
1971          *
1972          * Use a real TX for this to get the proper gnp_tx_queue behavior, etc
1973          * normally we'd use kgnilnd_send_ctlmsg for this, but we don't really
1974          * care that this goes out quickly since we already know we need a new conn
1975          * formed */
1976         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_NOOP_SEND))
1977                 return;
1978
1979         tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP, ni->ni_nid);
1980         if (tx != NULL) {
1981                 kgnilnd_launch_tx(tx, net, &id);
1982         }
1983 out:
1984         CDEBUG(D_NETTRACE, "peer 0x%p->%s when %lu\n", peer,
1985                libcfs_nid2str(nid), *when);
1986         EXIT;
1987 }
1988
1989 int
1990 kgnilnd_dev_init(kgn_device_t *dev)
1991 {
1992         gni_return_t      rrc;
1993         int               rc = 0;
1994         unsigned int      cq_size;
1995         ENTRY;
1996
1997         /* size of these CQs should be able to accommodate the outgoing
1998          * RDMA and SMSG transactions.  Since we really don't know what we
1999          * really need here, we'll take credits * 2 * 3 to allow a bunch.
2000          * We need to dig into this more with the performance work. */
2001         cq_size = *kgnilnd_tunables.kgn_credits * 2 * 3;
2002
2003         rrc = kgnilnd_cdm_create(dev->gnd_id, *kgnilnd_tunables.kgn_ptag,
2004                                  GNI_JOB_CREATE_COOKIE(GNI_PKEY_LND, 0), 0,
2005                                  &dev->gnd_domain);
2006         if (rrc != GNI_RC_SUCCESS) {
2007                 CERROR("Can't create CDM %d (%d)\n", dev->gnd_id, rrc);
2008                 GOTO(failed, rc = -ENODEV);
2009         }
2010
2011         rrc = kgnilnd_cdm_attach(dev->gnd_domain, dev->gnd_id,
2012                                  &dev->gnd_host_id, &dev->gnd_handle);
2013         if (rrc != GNI_RC_SUCCESS) {
2014                 CERROR("Can't attach CDM to device %d (%d)\n",
2015                         dev->gnd_id, rrc);
2016                 GOTO(failed, rc = -ENODEV);
2017         }
2018
2019         /* a bit gross, but not much we can do - Aries Sim doesn't have
2020          * hardcoded NIC/NID that we can use */
2021         rc = kgnilnd_setup_nic_translation(dev->gnd_host_id);
2022         if (rc != 0)
2023                 GOTO(failed, rc = -ENODEV);
2024
2025         /* only dev 0 gets the errors - no need to reset the stack twice
2026          * - this works because we have a single PTAG, if we had more
2027          * then we'd need to have multiple handlers */
2028         if (dev->gnd_id == 0) {
2029                 rrc = kgnilnd_subscribe_errors(dev->gnd_handle,
2030                                                 GNI_ERRMASK_CRITICAL |
2031                                                 GNI_ERRMASK_UNKNOWN_TRANSACTION,
2032                                               0, NULL, kgnilnd_critical_error,
2033                                               &dev->gnd_err_handle);
2034                 if (rrc != GNI_RC_SUCCESS) {
2035                         CERROR("Can't subscribe for errors on device %d: rc %d\n",
2036                                 dev->gnd_id, rrc);
2037                         GOTO(failed, rc = -ENODEV);
2038                 }
2039
2040                 rc = kgnilnd_set_quiesce_callback(dev->gnd_handle,
2041                                                   kgnilnd_quiesce_end_callback);
2042                 if (rc != GNI_RC_SUCCESS) {
2043                         CERROR("Can't subscribe for quiesce callback on device %d: rc %d\n",
2044                                 dev->gnd_id, rrc);
2045                         GOTO(failed, rc = -ENODEV);
2046                 }
2047         }
2048
2049         rrc = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_IP, &kgnilnd_data.kgn_sock);
2050         if (rrc < 0) {
2051                 CERROR("sock_create returned %d\n", rrc);
2052                 GOTO(failed, rrc);
2053         }
2054
2055         rc = kgnilnd_nicaddr_to_nid(dev->gnd_host_id, &dev->gnd_nid);
2056         if (rc < 0) {
2057                 /* log messages during startup */
2058                 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
2059                         CERROR("couldn't translate host_id 0x%x to nid. rc %d\n",
2060                                 dev->gnd_host_id, rc);
2061                 }
2062                 GOTO(failed, rc = -ESRCH);
2063         }
2064         CDEBUG(D_NET, "NIC %x -> NID %d\n", dev->gnd_host_id, dev->gnd_nid);
2065
2066         rrc = kgnilnd_cq_create(dev->gnd_handle, *kgnilnd_tunables.kgn_credits,
2067                                 0, kgnilnd_device_callback,
2068                                 dev->gnd_id, &dev->gnd_snd_rdma_cqh);
2069         if (rrc != GNI_RC_SUCCESS) {
2070                 CERROR("Can't create rdma send cq size %u for device "
2071                        "%d (%d)\n", cq_size, dev->gnd_id, rrc);
2072                 GOTO(failed, rc = -EINVAL);
2073         }
2074
2075         rrc = kgnilnd_cq_create(dev->gnd_handle, cq_size,
2076                         0, kgnilnd_device_callback, dev->gnd_id,
2077                         &dev->gnd_snd_fma_cqh);
2078         if (rrc != GNI_RC_SUCCESS) {
2079                 CERROR("Can't create fma send cq size %u for device %d (%d)\n",
2080                        cq_size, dev->gnd_id, rrc);
2081                 GOTO(failed, rc = -EINVAL);
2082         }
2083
2084         /* This one we size differently - overflows are possible and it needs to be
2085          * sized based on machine size */
2086         rrc = kgnilnd_cq_create(dev->gnd_handle,
2087                         *kgnilnd_tunables.kgn_fma_cq_size,
2088                         0, kgnilnd_device_callback, dev->gnd_id,
2089                         &dev->gnd_rcv_fma_cqh);
2090         if (rrc != GNI_RC_SUCCESS) {
2091                 CERROR("Can't create fma cq size %d for device %d (%d)\n",
2092                        *kgnilnd_tunables.kgn_fma_cq_size, dev->gnd_id, rrc);
2093                 GOTO(failed, rc = -EINVAL);
2094         }
2095
2096         RETURN(0);
2097
2098 failed:
2099         kgnilnd_dev_fini(dev);
2100         RETURN(rc);
2101 }
2102
2103 void
2104 kgnilnd_dev_fini(kgn_device_t *dev)
2105 {
2106         gni_return_t rrc;
2107         ENTRY;
2108
2109         /* At quiesce or rest time, need to loop through and clear gnd_ready_conns ?*/
2110         LASSERTF(list_empty(&dev->gnd_ready_conns) &&
2111                  list_empty(&dev->gnd_map_tx) &&
2112                  list_empty(&dev->gnd_rdmaq),
2113                  "dev 0x%p ready_conns %d@0x%p map_tx %d@0x%p rdmaq %d@0x%p\n",
2114                  dev, kgnilnd_count_list(&dev->gnd_ready_conns), &dev->gnd_ready_conns,
2115                  kgnilnd_count_list(&dev->gnd_map_tx), &dev->gnd_map_tx,
2116                  kgnilnd_count_list(&dev->gnd_rdmaq), &dev->gnd_rdmaq);
2117
2118         /* These should follow from tearing down all connections */
2119         LASSERTF(dev->gnd_map_nphys == 0 && dev->gnd_map_physnop == 0,
2120                 "%d physical mappings of %d pages still mapped\n",
2121                  dev->gnd_map_nphys, dev->gnd_map_physnop);
2122
2123         LASSERTF(dev->gnd_map_nvirt == 0 && dev->gnd_map_virtnob == 0,
2124                 "%d virtual mappings of "LPU64" bytes still mapped\n",
2125                  dev->gnd_map_nvirt, dev->gnd_map_virtnob);
2126
2127         LASSERTF(atomic_read(&dev->gnd_n_mdd) == 0 &&
2128                  atomic_read(&dev->gnd_n_mdd_held) == 0 &&
2129                  atomic64_read(&dev->gnd_nbytes_map) == 0,
2130                 "%d SMSG mappings of %ld bytes still mapped or held %d\n",
2131                  atomic_read(&dev->gnd_n_mdd),
2132                  atomic64_read(&dev->gnd_nbytes_map), atomic_read(&dev->gnd_n_mdd_held));
2133
2134         LASSERT(list_empty(&dev->gnd_map_list));
2135
2136         /* What other assertions needed to ensure all connections torn down ? */
2137
2138         /* check all counters == 0 (EP, MDD, etc) */
2139
2140         /* if we are resetting due to quiese (stack reset), don't check
2141          * thread states */
2142         LASSERTF(kgnilnd_data.kgn_quiesce_trigger ||
2143                 atomic_read(&kgnilnd_data.kgn_nthreads) == 0,
2144                 "tried to shutdown with threads active\n");
2145
2146         if (dev->gnd_rcv_fma_cqh) {
2147                 rrc = kgnilnd_cq_destroy(dev->gnd_rcv_fma_cqh);
2148                 LASSERTF(rrc == GNI_RC_SUCCESS,
2149                         "bad rc from gni_cq_destroy on rcv_fma_cqh: %d\n", rrc);
2150                 dev->gnd_rcv_fma_cqh = NULL;
2151         }
2152
2153         if (dev->gnd_snd_rdma_cqh) {
2154                 rrc = kgnilnd_cq_destroy(dev->gnd_snd_rdma_cqh);
2155                 LASSERTF(rrc == GNI_RC_SUCCESS,
2156                         "bad rc from gni_cq_destroy on send_rdma_cqh: %d\n", rrc);
2157                 dev->gnd_snd_rdma_cqh = NULL;
2158         }
2159
2160         if (dev->gnd_snd_fma_cqh) {
2161                 rrc = kgnilnd_cq_destroy(dev->gnd_snd_fma_cqh);
2162                 LASSERTF(rrc == GNI_RC_SUCCESS,
2163                         "bad rc from gni_cq_destroy on snd_fma_cqh: %d\n", rrc);
2164                 dev->gnd_snd_fma_cqh = NULL;
2165         }
2166
2167         if (dev->gnd_err_handle) {
2168                 rrc = kgnilnd_release_errors(dev->gnd_err_handle);
2169                 LASSERTF(rrc == GNI_RC_SUCCESS,
2170                         "bad rc from gni_release_errors: %d\n", rrc);
2171                 dev->gnd_err_handle = NULL;
2172         }
2173
2174         if (dev->gnd_domain) {
2175                 rrc = kgnilnd_cdm_destroy(dev->gnd_domain);
2176                 LASSERTF(rrc == GNI_RC_SUCCESS,
2177                         "bad rc from gni_cdm_destroy: %d\n", rrc);
2178                 dev->gnd_domain = NULL;
2179         }
2180
2181         if (kgnilnd_data.kgn_sock)
2182                 sock_release(kgnilnd_data.kgn_sock);
2183
2184         EXIT;
2185 }
2186
2187 int kgnilnd_base_startup(void)
2188 {
2189         struct timeval       tv;
2190         int                  pkmem = atomic_read(&libcfs_kmemory);
2191         int                  rc;
2192         int                  i;
2193         kgn_device_t        *dev;
2194         struct task_struct  *thrd;
2195
2196 #if defined(CONFIG_CRAY_XT) && !defined(CONFIG_CRAY_COMPUTE)
2197         /* limit how much memory can be allocated for fma blocks in
2198          * instances where many nodes need to reconnects at the same time */
2199         struct sysinfo si;
2200         si_meminfo(&si);
2201         kgnilnd_data.free_pages_limit = si.totalram/4;
2202 #endif
2203
2204         ENTRY;
2205
2206         LASSERTF(kgnilnd_data.kgn_init == GNILND_INIT_NOTHING,
2207                 "init %d\n", kgnilnd_data.kgn_init);
2208
2209         /* zero pointers, flags etc */
2210         memset(&kgnilnd_data, 0, sizeof(kgnilnd_data));
2211         kgnilnd_check_kgni_version();
2212
2213         /* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and
2214          * a unique (for all time) connstamp so we can uniquely identify
2215          * the sender.  The connstamp is an incrementing counter
2216          * initialised with seconds + microseconds at startup time.  So we
2217          * rely on NOT creating connections more frequently on average than
2218          * 1MHz to ensure we don't use old connstamps when we reboot. */
2219         do_gettimeofday(&tv);
2220         kgnilnd_data.kgn_connstamp =
2221                  kgnilnd_data.kgn_peerstamp =
2222                         (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
2223
2224         init_rwsem(&kgnilnd_data.kgn_net_rw_sem);
2225
2226         for (i = 0; i < GNILND_MAXDEVS; i++) {
2227                 kgn_device_t  *dev = &kgnilnd_data.kgn_devices[i];
2228
2229                 dev->gnd_id = i;
2230                 INIT_LIST_HEAD(&dev->gnd_ready_conns);
2231                 INIT_LIST_HEAD(&dev->gnd_map_tx);
2232                 INIT_LIST_HEAD(&dev->gnd_fma_buffs);
2233                 mutex_init(&dev->gnd_cq_mutex);
2234                 mutex_init(&dev->gnd_fmablk_mutex);
2235                 spin_lock_init(&dev->gnd_fmablk_lock);
2236                 init_waitqueue_head(&dev->gnd_waitq);
2237                 init_waitqueue_head(&dev->gnd_dgram_waitq);
2238                 init_waitqueue_head(&dev->gnd_dgping_waitq);
2239                 spin_lock_init(&dev->gnd_lock);
2240                 INIT_LIST_HEAD(&dev->gnd_map_list);
2241                 spin_lock_init(&dev->gnd_map_lock);
2242                 atomic_set(&dev->gnd_nfmablk, 0);
2243                 atomic_set(&dev->gnd_fmablk_vers, 1);
2244                 atomic_set(&dev->gnd_neps, 0);
2245                 atomic_set(&dev->gnd_canceled_dgrams, 0);
2246                 INIT_LIST_HEAD(&dev->gnd_connd_peers);
2247                 spin_lock_init(&dev->gnd_connd_lock);
2248                 spin_lock_init(&dev->gnd_dgram_lock);
2249                 spin_lock_init(&dev->gnd_rdmaq_lock);
2250                 INIT_LIST_HEAD(&dev->gnd_rdmaq);
2251                 init_rwsem(&dev->gnd_conn_sem);
2252
2253                 /* alloc & setup nid based dgram table */
2254                 LIBCFS_ALLOC(dev->gnd_dgrams,
2255                             sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2256
2257                 if (dev->gnd_dgrams == NULL)
2258                         GOTO(failed, rc = -ENOMEM);
2259
2260                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2261                         INIT_LIST_HEAD(&dev->gnd_dgrams[i]);
2262                 }
2263                 atomic_set(&dev->gnd_ndgrams, 0);
2264                 atomic_set(&dev->gnd_nwcdgrams, 0);
2265                 /* setup timer for RDMAQ processing */
2266                 setup_timer(&dev->gnd_rdmaq_timer, kgnilnd_schedule_device_timer,
2267                             (unsigned long)dev);
2268
2269                 /* setup timer for mapping processing */
2270                 setup_timer(&dev->gnd_map_timer, kgnilnd_schedule_device_timer,
2271                             (unsigned long)dev);
2272
2273         }
2274
2275         /* CQID 0 isn't allowed, set to MAX_MSG_ID - 1 to check for conflicts early */
2276         kgnilnd_data.kgn_next_cqid = GNILND_MAX_MSG_ID - 1;
2277         kgnilnd_data.kgn_new_min_timeout = *kgnilnd_tunables.kgn_timeout;
2278         init_waitqueue_head(&kgnilnd_data.kgn_reaper_waitq);
2279         init_waitqueue_head(&kgnilnd_data.kgn_ruhroh_waitq);
2280         spin_lock_init(&kgnilnd_data.kgn_reaper_lock);
2281
2282         mutex_init(&kgnilnd_data.kgn_quiesce_mutex);
2283         atomic_set(&kgnilnd_data.kgn_nquiesce, 0);
2284         atomic_set(&kgnilnd_data.kgn_npending_conns, 0);
2285         atomic_set(&kgnilnd_data.kgn_npending_unlink, 0);
2286         atomic_set(&kgnilnd_data.kgn_npending_detach, 0);
2287         atomic_set(&kgnilnd_data.kgn_rev_offset, 0);
2288         atomic_set(&kgnilnd_data.kgn_rev_length, 0);
2289         atomic_set(&kgnilnd_data.kgn_rev_copy_buff, 0);
2290
2291         /* OK to call kgnilnd_api_shutdown() to cleanup now */
2292         kgnilnd_data.kgn_init = GNILND_INIT_DATA;
2293         try_module_get(THIS_MODULE);
2294
2295         rwlock_init(&kgnilnd_data.kgn_peer_conn_lock);
2296
2297         LIBCFS_ALLOC(kgnilnd_data.kgn_peers,
2298                     sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2299
2300         if (kgnilnd_data.kgn_peers == NULL)
2301                 GOTO(failed, rc = -ENOMEM);
2302
2303         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2304                 INIT_LIST_HEAD(&kgnilnd_data.kgn_peers[i]);
2305         }
2306
2307         LIBCFS_ALLOC(kgnilnd_data.kgn_conns,
2308                     sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2309
2310         if (kgnilnd_data.kgn_conns == NULL)
2311                 GOTO(failed, rc = -ENOMEM);
2312
2313         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2314                 INIT_LIST_HEAD(&kgnilnd_data.kgn_conns[i]);
2315         }
2316
2317         LIBCFS_ALLOC(kgnilnd_data.kgn_nets,
2318                     sizeof(struct list_head) * *kgnilnd_tunables.kgn_net_hash_size);
2319
2320         if (kgnilnd_data.kgn_nets == NULL)
2321                 GOTO(failed, rc = -ENOMEM);
2322
2323         for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
2324                 INIT_LIST_HEAD(&kgnilnd_data.kgn_nets[i]);
2325         }
2326
2327         kgnilnd_data.kgn_mbox_cache =
2328                 kmem_cache_create("kgn_mbox_block", KMALLOC_MAX_SIZE, 0,
2329                                   SLAB_HWCACHE_ALIGN, NULL);
2330         if (kgnilnd_data.kgn_mbox_cache == NULL) {
2331                 CERROR("Can't create slab for physical mbox blocks\n");
2332                 GOTO(failed, rc = -ENOMEM);
2333         }
2334
2335         kgnilnd_data.kgn_rx_cache =
2336                 kmem_cache_create("kgn_rx_t", sizeof(kgn_rx_t), 0, 0, NULL);
2337         if (kgnilnd_data.kgn_rx_cache == NULL) {
2338                 CERROR("Can't create slab for kgn_rx_t descriptors\n");
2339                 GOTO(failed, rc = -ENOMEM);
2340         }
2341
2342         kgnilnd_data.kgn_tx_cache =
2343                 kmem_cache_create("kgn_tx_t", sizeof(kgn_tx_t), 0, 0, NULL);
2344         if (kgnilnd_data.kgn_tx_cache == NULL) {
2345                 CERROR("Can't create slab for kgn_tx_t\n");
2346                 GOTO(failed, rc = -ENOMEM);
2347         }
2348
2349         kgnilnd_data.kgn_tx_phys_cache =
2350                 kmem_cache_create("kgn_tx_phys",
2351                                    LNET_MAX_IOV * sizeof(gni_mem_segment_t),
2352                                    0, 0, NULL);
2353         if (kgnilnd_data.kgn_tx_phys_cache == NULL) {
2354                 CERROR("Can't create slab for kgn_tx_phys\n");
2355                 GOTO(failed, rc = -ENOMEM);
2356         }
2357
2358         kgnilnd_data.kgn_dgram_cache =
2359                 kmem_cache_create("kgn_dgram_t", sizeof(kgn_dgram_t), 0, 0, NULL);
2360         if (kgnilnd_data.kgn_dgram_cache == NULL) {
2361                 CERROR("Can't create slab for outgoing datagrams\n");
2362                 GOTO(failed, rc = -ENOMEM);
2363         }
2364
2365         /* allocate a MAX_IOV array of page pointers for each cpu */
2366         kgnilnd_data.kgn_cksum_map_pages = kmalloc(num_possible_cpus() * sizeof (struct page *),
2367                                                    GFP_KERNEL);
2368         if (kgnilnd_data.kgn_cksum_map_pages == NULL) {
2369                 CERROR("Can't allocate vmap cksum pages\n");
2370                 GOTO(failed, rc = -ENOMEM);
2371         }
2372         kgnilnd_data.kgn_cksum_npages = num_possible_cpus();
2373         memset(kgnilnd_data.kgn_cksum_map_pages, 0,
2374                 kgnilnd_data.kgn_cksum_npages * sizeof (struct page *));
2375
2376         for (i = 0; i < kgnilnd_data.kgn_cksum_npages; i++) {
2377                 kgnilnd_data.kgn_cksum_map_pages[i] = kmalloc(LNET_MAX_IOV * sizeof (struct page *),
2378                                                               GFP_KERNEL);
2379                 if (kgnilnd_data.kgn_cksum_map_pages[i] == NULL) {
2380                         CERROR("Can't allocate vmap cksum pages for cpu %d\n", i);
2381                         GOTO(failed, rc = -ENOMEM);
2382                 }
2383         }
2384
2385         LASSERT(kgnilnd_data.kgn_ndevs == 0);
2386
2387         /* Use all available GNI devices */
2388         for (i = 0; i < GNILND_MAXDEVS; i++) {
2389                 dev = &kgnilnd_data.kgn_devices[kgnilnd_data.kgn_ndevs];
2390
2391                 rc = kgnilnd_dev_init(dev);
2392                 if (rc == 0) {
2393                         /* Increment here so base_shutdown cleans it up */
2394                         kgnilnd_data.kgn_ndevs++;
2395
2396                         rc = kgnilnd_allocate_phys_fmablk(dev);
2397                         if (rc)
2398                                 GOTO(failed, rc);
2399                 }
2400         }
2401
2402         if (kgnilnd_data.kgn_ndevs == 0) {
2403                 CERROR("Can't initialise any GNI devices\n");
2404                 GOTO(failed, rc = -ENODEV);
2405         }
2406
2407         rc = kgnilnd_thread_start(kgnilnd_reaper, NULL, "kgnilnd_rpr", 0);
2408         if (rc != 0) {
2409                 CERROR("Can't spawn gnilnd reaper: %d\n", rc);
2410                 GOTO(failed, rc);
2411         }
2412
2413         rc = kgnilnd_start_rca_thread();
2414         if (rc != 0) {
2415                 CERROR("Can't spawn gnilnd rca: %d\n", rc);
2416                 GOTO(failed, rc);
2417         }
2418
2419         /*
2420          * Start ruhroh thread.  We can't use kgnilnd_thread_start() because
2421          * we don't want this thread included in kgnilnd_data.kgn_nthreads
2422          * count.  This thread controls quiesce, so it mustn't
2423          * quiesce itself.
2424          */
2425         thrd = kthread_run(kgnilnd_ruhroh_thread, NULL, "%s_%02d", "kgnilnd_rr", 0);
2426         if (IS_ERR(thrd)) {
2427                 rc = PTR_ERR(thrd);
2428                 CERROR("Can't spawn gnilnd ruhroh thread: %d\n", rc);
2429                 GOTO(failed, rc);
2430         }
2431
2432         /* threads will load balance across devs as they are available */
2433         if (*kgnilnd_tunables.kgn_thread_affinity) {
2434                 rc = kgnilnd_start_sd_threads();
2435                 if (rc != 0)
2436                         GOTO(failed, rc);
2437         } else {
2438                 for (i = 0; i < *kgnilnd_tunables.kgn_sched_threads; i++) {
2439                         rc = kgnilnd_thread_start(kgnilnd_scheduler,
2440                                                   (void *)((long)i),
2441                                                   "kgnilnd_sd", i);
2442                         if (rc != 0) {
2443                                 CERROR("Can't spawn gnilnd scheduler[%d]: %d\n",
2444                                        i, rc);
2445                                 GOTO(failed, rc);
2446                         }
2447                 }
2448         }
2449
2450         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2451                 dev = &kgnilnd_data.kgn_devices[i];
2452                 rc = kgnilnd_thread_start(kgnilnd_dgram_mover, dev,
2453                                           "kgnilnd_dg", dev->gnd_id);
2454                 if (rc != 0) {
2455                         CERROR("Can't spawn gnilnd dgram_mover[%d]: %d\n",
2456                                dev->gnd_id, rc);
2457                         GOTO(failed, rc);
2458                 }
2459
2460                 rc = kgnilnd_thread_start(kgnilnd_dgram_waitq, dev,
2461                                           "kgnilnd_dgn", dev->gnd_id);
2462                 if (rc != 0) {
2463                         CERROR("Can't spawn gnilnd dgram_waitq[%d]: %d\n",
2464                                 dev->gnd_id, rc);
2465                         GOTO(failed, rc);
2466                 }
2467
2468                 rc = kgnilnd_setup_wildcard_dgram(dev);
2469
2470                 if (rc != 0) {
2471                         CERROR("Can't create wildcard dgrams[%d]: %d\n",
2472                                 dev->gnd_id, rc);
2473                         GOTO(failed, rc);
2474                 }
2475         }
2476
2477         /* flag everything initialised */
2478         kgnilnd_data.kgn_init = GNILND_INIT_ALL;
2479         /*****************************************************/
2480
2481         CDEBUG(D_MALLOC, "initial kmem %d\n", pkmem);
2482         RETURN(0);
2483
2484 failed:
2485         kgnilnd_base_shutdown();
2486         kgnilnd_data.kgn_init = GNILND_INIT_NOTHING;
2487         RETURN(rc);
2488 }
2489
2490 void
2491 kgnilnd_base_shutdown(void)
2492 {
2493         int                     i, j;
2494         ENTRY;
2495
2496         while (CFS_FAIL_TIMEOUT(CFS_FAIL_GNI_PAUSE_SHUTDOWN, 1)) {};
2497
2498         kgnilnd_data.kgn_wc_kill = 1;
2499
2500         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2501                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2502                 kgnilnd_cancel_wc_dgrams(dev);
2503                 kgnilnd_cancel_dgrams(dev);
2504                 kgnilnd_del_conn_or_peer(NULL, LNET_NID_ANY, GNILND_DEL_PEER, -ESHUTDOWN);
2505                 kgnilnd_wait_for_canceled_dgrams(dev);
2506         }
2507
2508         /* We need to verify there are no conns left before we let the threads
2509          * shut down otherwise we could clean up the peers but still have
2510          * some outstanding conns due to orphaned datagram conns that are
2511          * being cleaned up.
2512          */
2513         i = 2;
2514         while (atomic_read(&kgnilnd_data.kgn_nconns) != 0) {
2515                 i++;
2516
2517                 for(j = 0; j < kgnilnd_data.kgn_ndevs; ++j) {
2518                         kgn_device_t *dev = &kgnilnd_data.kgn_devices[j];
2519                         kgnilnd_schedule_device(dev);
2520                 }
2521
2522                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2523                         "Waiting for conns to be cleaned up %d\n",atomic_read(&kgnilnd_data.kgn_nconns));
2524                 set_current_state(TASK_UNINTERRUPTIBLE);
2525                 schedule_timeout(cfs_time_seconds(1));
2526         }
2527         /* Peer state all cleaned up BEFORE setting shutdown, so threads don't
2528          * have to worry about shutdown races.  NB connections may be created
2529          * while there are still active connds, but these will be temporary
2530          * since peer creation always fails after the listener has started to
2531          * shut down.
2532          * all peers should have been cleared out on the nets */
2533         LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
2534                 "peers left %d\n", atomic_read(&kgnilnd_data.kgn_npeers));
2535
2536         /* Wait for the ruhroh thread to shut down. */
2537         kgnilnd_data.kgn_ruhroh_shutdown = 1;
2538         wake_up(&kgnilnd_data.kgn_ruhroh_waitq);
2539         i = 2;
2540         while (kgnilnd_data.kgn_ruhroh_running != 0) {
2541                 i++;
2542                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2543                        "Waiting for ruhroh thread to terminate\n");
2544                 set_current_state(TASK_UNINTERRUPTIBLE);
2545                 schedule_timeout(cfs_time_seconds(1));
2546         }
2547
2548        /* Flag threads to terminate */
2549         kgnilnd_data.kgn_shutdown = 1;
2550
2551         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2552                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2553
2554                 /* should clear all the MDDs */
2555                 kgnilnd_unmap_fma_blocks(dev);
2556
2557                 kgnilnd_schedule_device(dev);
2558                 wake_up_all(&dev->gnd_dgram_waitq);
2559                 wake_up_all(&dev->gnd_dgping_waitq);
2560                 LASSERT(list_empty(&dev->gnd_connd_peers));
2561         }
2562
2563         spin_lock(&kgnilnd_data.kgn_reaper_lock);
2564         wake_up_all(&kgnilnd_data.kgn_reaper_waitq);
2565         spin_unlock(&kgnilnd_data.kgn_reaper_lock);
2566
2567         if (atomic_read(&kgnilnd_data.kgn_nthreads))
2568                 kgnilnd_wakeup_rca_thread();
2569
2570         /* Wait for threads to exit */
2571         i = 2;
2572         while (atomic_read(&kgnilnd_data.kgn_nthreads) != 0) {
2573                 i++;
2574                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2575                        "Waiting for %d threads to terminate\n",
2576                        atomic_read(&kgnilnd_data.kgn_nthreads));
2577                 set_current_state(TASK_UNINTERRUPTIBLE);
2578                 schedule_timeout(cfs_time_seconds(1));
2579         }
2580
2581         LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
2582                 "peers left %d\n", atomic_read(&kgnilnd_data.kgn_npeers));
2583
2584         if (kgnilnd_data.kgn_peers != NULL) {
2585                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2586                         LASSERT(list_empty(&kgnilnd_data.kgn_peers[i]));
2587
2588                 LIBCFS_FREE(kgnilnd_data.kgn_peers,
2589                             sizeof (struct list_head) *
2590                             *kgnilnd_tunables.kgn_peer_hash_size);
2591         }
2592
2593         down_write(&kgnilnd_data.kgn_net_rw_sem);
2594         if (kgnilnd_data.kgn_nets != NULL) {
2595                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++)
2596                         LASSERT(list_empty(&kgnilnd_data.kgn_nets[i]));
2597
2598                 LIBCFS_FREE(kgnilnd_data.kgn_nets,
2599                             sizeof (struct list_head) *
2600                             *kgnilnd_tunables.kgn_net_hash_size);
2601         }
2602         up_write(&kgnilnd_data.kgn_net_rw_sem);
2603
2604         LASSERTF(atomic_read(&kgnilnd_data.kgn_nconns) == 0,
2605                 "conns left %d\n", atomic_read(&kgnilnd_data.kgn_nconns));
2606
2607         if (kgnilnd_data.kgn_conns != NULL) {
2608                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2609                         LASSERT(list_empty(&kgnilnd_data.kgn_conns[i]));
2610
2611                 LIBCFS_FREE(kgnilnd_data.kgn_conns,
2612                             sizeof (struct list_head) *
2613                             *kgnilnd_tunables.kgn_peer_hash_size);
2614         }
2615
2616         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2617                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2618                 kgnilnd_dev_fini(dev);
2619
2620                 LASSERTF(atomic_read(&dev->gnd_ndgrams) == 0,
2621                         "dgrams left %d\n", atomic_read(&dev->gnd_ndgrams));
2622
2623                 if (dev->gnd_dgrams != NULL) {
2624                         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2625                                 LASSERT(list_empty(&dev->gnd_dgrams[i]));
2626
2627                         LIBCFS_FREE(dev->gnd_dgrams,
2628                                     sizeof (struct list_head) *
2629                                     *kgnilnd_tunables.kgn_peer_hash_size);
2630                 }
2631
2632                 kgnilnd_free_phys_fmablk(dev);
2633         }
2634
2635         if (kgnilnd_data.kgn_mbox_cache != NULL)
2636                 kmem_cache_destroy(kgnilnd_data.kgn_mbox_cache);
2637
2638         if (kgnilnd_data.kgn_rx_cache != NULL)
2639                 kmem_cache_destroy(kgnilnd_data.kgn_rx_cache);
2640
2641         if (kgnilnd_data.kgn_tx_cache != NULL)
2642                 kmem_cache_destroy(kgnilnd_data.kgn_tx_cache);
2643
2644         if (kgnilnd_data.kgn_tx_phys_cache != NULL)
2645                 kmem_cache_destroy(kgnilnd_data.kgn_tx_phys_cache);
2646
2647         if (kgnilnd_data.kgn_dgram_cache != NULL)
2648                 kmem_cache_destroy(kgnilnd_data.kgn_dgram_cache);
2649
2650         if (kgnilnd_data.kgn_cksum_map_pages != NULL) {
2651                 for (i = 0; i < kgnilnd_data.kgn_cksum_npages; i++) {
2652                         if (kgnilnd_data.kgn_cksum_map_pages[i] != NULL) {
2653                                 kfree(kgnilnd_data.kgn_cksum_map_pages[i]);
2654                         }
2655                 }
2656                 kfree(kgnilnd_data.kgn_cksum_map_pages);
2657         }
2658
2659         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2660                atomic_read(&libcfs_kmemory));
2661
2662         kgnilnd_data.kgn_init = GNILND_INIT_NOTHING;
2663         module_put(THIS_MODULE);
2664
2665         EXIT;
2666 }
2667
2668 int
2669 kgnilnd_startup(lnet_ni_t *ni)
2670 {
2671         int               rc, devno;
2672         kgn_net_t        *net;
2673         ENTRY;
2674
2675         LASSERTF(ni->ni_lnd == &the_kgnilnd,
2676                 "bad LND 0x%p != the_kgnilnd @ 0x%p\n",
2677                 ni->ni_lnd, &the_kgnilnd);
2678
2679         if (kgnilnd_data.kgn_init == GNILND_INIT_NOTHING) {
2680                 rc = kgnilnd_base_startup();
2681                 if (rc != 0)
2682                         RETURN(rc);
2683         }
2684
2685         /* Serialize with shutdown. */
2686         mutex_lock(&kgnilnd_data.kgn_quiesce_mutex);
2687
2688         LIBCFS_ALLOC(net, sizeof(*net));
2689         if (net == NULL) {
2690                 CERROR("could not allocate net for new interface instance\n");
2691                 /* no need to cleanup the CDM... */
2692                 GOTO(failed, rc = -ENOMEM);
2693         }
2694         INIT_LIST_HEAD(&net->gnn_list);
2695         ni->ni_data = net;
2696         net->gnn_ni = ni;
2697         ni->ni_maxtxcredits = *kgnilnd_tunables.kgn_credits;
2698         ni->ni_peertxcredits = *kgnilnd_tunables.kgn_peer_credits;
2699
2700         if (*kgnilnd_tunables.kgn_peer_health) {
2701                 int     fudge;
2702                 int     timeout;
2703                 /* give this a bit of leeway - we don't have a hard timeout
2704                  * as we only check timeouts periodically - see comment in kgnilnd_reaper */
2705                 fudge = (GNILND_TO2KA(*kgnilnd_tunables.kgn_timeout) / GNILND_REAPER_NCHECKS);
2706                 timeout = *kgnilnd_tunables.kgn_timeout + fudge;
2707
2708                 if (*kgnilnd_tunables.kgn_peer_timeout >= timeout)
2709                         ni->ni_peertimeout = *kgnilnd_tunables.kgn_peer_timeout;
2710                 else if (*kgnilnd_tunables.kgn_peer_timeout > -1) {
2711                         LCONSOLE_ERROR("Peer_timeout is set to %d but needs to be >= %d\n",
2712                                         *kgnilnd_tunables.kgn_peer_timeout,
2713                                         timeout);
2714                         ni->ni_data = NULL;
2715                         LIBCFS_FREE(net, sizeof(*net));
2716                         GOTO(failed, rc = -EINVAL);
2717                 } else
2718                         ni->ni_peertimeout = timeout;
2719
2720                 LCONSOLE_INFO("Enabling LNet peer health for gnilnd, timeout %ds\n",
2721                               ni->ni_peertimeout);
2722         }
2723
2724         atomic_set(&net->gnn_refcount, 1);
2725
2726         /* if we have multiple devices, spread the nets around */
2727         net->gnn_netnum = LNET_NETNUM(LNET_NIDNET(ni->ni_nid));
2728
2729         devno = LNET_NIDNET(ni->ni_nid) % GNILND_MAXDEVS;
2730         net->gnn_dev = &kgnilnd_data.kgn_devices[devno];
2731
2732         /* allocate a 'dummy' cdm for datagram use. We can only have a single
2733          * datagram between a nid:inst_id and nid2:inst_id. The fake cdm
2734          * give us additional inst_id to use, allowing the datagrams to flow
2735          * like rivers of honey and beer */
2736
2737         /* the instance id for the cdm is the NETNUM offset by MAXDEVS -
2738          * ensuring we'll have a unique id */
2739
2740
2741         ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), net->gnn_dev->gnd_nid);
2742         CDEBUG(D_NET, "adding net %p nid=%s on dev %d \n",
2743                 net, libcfs_nid2str(ni->ni_nid), net->gnn_dev->gnd_id);
2744         /* until the gnn_list is set, we need to cleanup ourselves as
2745          * kgnilnd_shutdown is just gonna get confused */
2746
2747         down_write(&kgnilnd_data.kgn_net_rw_sem);
2748         list_add_tail(&net->gnn_list, kgnilnd_netnum2netlist(net->gnn_netnum));
2749         up_write(&kgnilnd_data.kgn_net_rw_sem);
2750
2751         /* we need a separate thread to call probe_wait_by_id until
2752          * we get a function callback notifier from kgni */
2753         mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2754         RETURN(0);
2755  failed:
2756         mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2757         kgnilnd_shutdown(ni);
2758         RETURN(rc);
2759 }
2760
2761 void
2762 kgnilnd_shutdown(lnet_ni_t *ni)
2763 {
2764         kgn_net_t     *net = ni->ni_data;
2765         int           i;
2766         int           rc;
2767         ENTRY;
2768
2769         CFS_RACE(CFS_FAIL_GNI_SR_DOWN_RACE);
2770
2771         LASSERTF(kgnilnd_data.kgn_init == GNILND_INIT_ALL,
2772                 "init %d\n", kgnilnd_data.kgn_init);
2773
2774         /* Serialize with startup. */
2775         mutex_lock(&kgnilnd_data.kgn_quiesce_mutex);
2776         CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
2777                atomic_read(&libcfs_kmemory));
2778
2779         if (net == NULL) {
2780                 CERROR("got NULL net for ni %p\n", ni);
2781                 GOTO(out, rc = -EINVAL);
2782         }
2783
2784         LASSERTF(ni == net->gnn_ni,
2785                 "ni %p gnn_ni %p\n", net, net->gnn_ni);
2786
2787         ni->ni_data = NULL;
2788
2789         LASSERT(!net->gnn_shutdown);
2790         LASSERTF(atomic_read(&net->gnn_refcount) != 0,
2791                 "net %p refcount %d\n",
2792                  net, atomic_read(&net->gnn_refcount));
2793
2794         if (!list_empty(&net->gnn_list)) {
2795                 /* serialize with peer creation */
2796                 down_write(&kgnilnd_data.kgn_net_rw_sem);
2797                 net->gnn_shutdown = 1;
2798                 up_write(&kgnilnd_data.kgn_net_rw_sem);
2799
2800                 kgnilnd_cancel_net_dgrams(net);
2801
2802                 kgnilnd_del_conn_or_peer(net, LNET_NID_ANY, GNILND_DEL_PEER, -ESHUTDOWN);
2803
2804                 /* if we are quiesced, need to wake up - we need those threads
2805                  * alive to release peers, etc */
2806                 if (GNILND_IS_QUIESCED) {
2807                         set_mb(kgnilnd_data.kgn_quiesce_trigger, GNILND_QUIESCE_IDLE);
2808                         kgnilnd_quiesce_wait("shutdown");
2809                 }
2810
2811                 kgnilnd_wait_for_canceled_dgrams(net->gnn_dev);
2812
2813                 /* We wait until the nets ref's are 1, we will release final ref which is ours
2814                  * this allows us to make sure everything else is done before we free the
2815                  * net.
2816                  */
2817                 i = 4;
2818                 while (atomic_read(&net->gnn_refcount) != 1) {
2819                         i++;
2820                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2821                                 "Waiting for %d references to clear on net %d\n",
2822                                 atomic_read(&net->gnn_refcount),
2823                                 net->gnn_netnum);
2824                         set_current_state(TASK_UNINTERRUPTIBLE);
2825                         schedule_timeout(cfs_time_seconds(1));
2826                 }
2827
2828                 /* release ref from kgnilnd_startup */
2829                 kgnilnd_net_decref(net);
2830                 /* serialize with reaper and conn_task looping */
2831                 down_write(&kgnilnd_data.kgn_net_rw_sem);
2832                 list_del_init(&net->gnn_list);
2833                 up_write(&kgnilnd_data.kgn_net_rw_sem);
2834
2835         }
2836
2837         /* not locking, this can't race with writers */
2838         LASSERTF(atomic_read(&net->gnn_refcount) == 0,
2839                 "net %p refcount %d\n",
2840                  net, atomic_read(&net->gnn_refcount));
2841         LIBCFS_FREE(net, sizeof(*net));
2842
2843 out:
2844         down_read(&kgnilnd_data.kgn_net_rw_sem);
2845         for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
2846                 if (!list_empty(&kgnilnd_data.kgn_nets[i])) {
2847                         up_read(&kgnilnd_data.kgn_net_rw_sem);
2848                         break;
2849                 }
2850
2851                 if (i == *kgnilnd_tunables.kgn_net_hash_size - 1) {
2852                         up_read(&kgnilnd_data.kgn_net_rw_sem);
2853                         kgnilnd_base_shutdown();
2854                 }
2855         }
2856         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2857                atomic_read(&libcfs_kmemory));
2858
2859         mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2860         EXIT;
2861 }
2862
2863 void __exit
2864 kgnilnd_module_fini(void)
2865 {
2866         lnet_unregister_lnd(&the_kgnilnd);
2867         kgnilnd_proc_fini();
2868         kgnilnd_remove_sysctl();
2869         kgnilnd_tunables_fini();
2870 }
2871
2872 int __init
2873 kgnilnd_module_init(void)
2874 {
2875         int    rc;
2876
2877         rc = kgnilnd_tunables_init();
2878         if (rc != 0)
2879                 return rc;
2880
2881         printk(KERN_INFO "Lustre: kgnilnd build version: "KGNILND_BUILD_REV"\n");
2882
2883         kgnilnd_insert_sysctl();
2884         kgnilnd_proc_init();
2885
2886         lnet_register_lnd(&the_kgnilnd);
2887
2888         return 0;
2889 }
2890
2891 MODULE_AUTHOR("Cray, Inc. <nic@cray.com>");
2892 MODULE_DESCRIPTION("Kernel Gemini LND v"KGNILND_BUILD_REV);
2893 MODULE_LICENSE("GPL");
2894
2895 module_init(kgnilnd_module_init);
2896 module_exit(kgnilnd_module_fini);