Whamcloud - gitweb
LU-17914 lnet: Fix erroneous net set error
[fs/lustre-release.git] / lnet / klnds / gnilnd / gnilnd.c
1 /*
2  * Copyright (C) 2012 Cray, Inc.
3  *
4  * Copyright (c) 2013, 2017, Intel Corporation.
5  *
6  *   Author: Nic Henke <nic@cray.com>
7  *   Author: James Shimek <jshimek@cray.com>
8  *
9  *   This file is part of Lustre, http://www.lustre.org.
10  *
11  *   Lustre is free software; you can redistribute it and/or
12  *   modify it under the terms of version 2 of the GNU General Public
13  *   License as published by the Free Software Foundation.
14  *
15  *   Lustre is distributed in the hope that it will be useful,
16  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  *   GNU General Public License for more details.
19  *
20  *   You should have received a copy of the GNU General Public License
21  *   along with Lustre; if not, write to the Free Software
22  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23  *
24  */
25 #include "gnilnd.h"
26
27 static int
28 kgnilnd_nl_get(int cmd, struct sk_buff *msg, int type, void *data)
29 {
30         struct lnet_ni *ni = data;
31
32         if (!ni || !msg)
33                 return -EINVAL;
34
35         if (cmd != LNET_CMD_NETS || type != LNET_NET_LOCAL_NI_ATTR_LND_TUNABLES)
36                 return -EOPNOTSUPP;
37
38         nla_put_u32(msg, LNET_NET_GNILND_TUNABLES_ATTR_LND_TIMEOUT,
39                     kgnilnd_timeout());
40         return 0;
41 }
42
43 static int
44 kgnilnd_nl_set(int cmd, struct nlattr *attr, int type, void *data)
45 {
46         struct lnet_ni *ni = data;
47
48         if (cmd != LNET_CMD_NETS)
49                 return -EOPNOTSUPP;
50
51         if (!attr)
52                 return 0;
53
54         if (nla_type(attr) != LN_SCALAR_ATTR_INT_VALUE)
55                 return -EINVAL;
56
57         if (type == LNET_NET_GNILND_TUNABLES_ATTR_LND_TIMEOUT) {
58                 s64 timeout = nla_get_s64(attr);
59
60                 ni->ni_lnd_tunables.lnd_tun_u.lnd_gni.lnd_timeout = timeout;
61         }
62
63         return 0;
64 }
65
66 /* Primary entry points from LNET.  There are no guarantees against reentrance. */
67 const struct lnet_lnd the_kgnilnd = {
68         .lnd_type       = GNILND,
69         .lnd_startup    = kgnilnd_startup,
70         .lnd_shutdown   = kgnilnd_shutdown,
71         .lnd_ctl        = kgnilnd_ctl,
72         .lnd_send       = kgnilnd_send,
73         .lnd_recv       = kgnilnd_recv,
74         .lnd_eager_recv = kgnilnd_eager_recv,
75         .lnd_nl_get     = kgnilnd_nl_get,
76         .lnd_nl_set     = kgnilnd_nl_set,
77 };
78
79 kgn_data_t      kgnilnd_data;
80
81 int
82 kgnilnd_thread_start(int(*fn)(void *arg), void *arg, char *name, int id)
83 {
84         struct task_struct *thrd;
85
86         thrd = kthread_run(fn, arg, "%s_%02d", name, id);
87         if (IS_ERR(thrd))
88                 return PTR_ERR(thrd);
89
90         atomic_inc(&kgnilnd_data.kgn_nthreads);
91         return 0;
92 }
93
94 /* bind scheduler threads to cpus */
95 int
96 kgnilnd_start_sd_threads(void)
97 {
98         int cpu;
99         int i = 0;
100         struct task_struct *task;
101
102         for_each_online_cpu(cpu) {
103                 /* don't bind to cpu 0 - all interrupts are processed here */
104                 if (cpu == 0)
105                         continue;
106
107                 task = kthread_create(kgnilnd_scheduler, (void *)((long)i),
108                                       "%s_%02d", "kgnilnd_sd", i);
109                 if (!IS_ERR(task)) {
110                         kthread_bind(task, cpu);
111                         wake_up_process(task);
112                 } else {
113                         CERROR("Can't spawn gnilnd scheduler[%d] %ld\n", i,
114                                 PTR_ERR(task));
115                         return PTR_ERR(task);
116                 }
117                 atomic_inc(&kgnilnd_data.kgn_nthreads);
118
119                 if (++i >= *kgnilnd_tunables.kgn_sched_threads) {
120                         break;
121                 }
122         }
123
124         return 0;
125 }
126
127 /* needs write_lock on kgn_peer_conn_lock */
128 int
129 kgnilnd_close_stale_conns_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
130 {
131         kgn_conn_t *conn, *cnxt;
132         int                 loopback;
133         int                 count = 0;
134
135         loopback = (peer->gnp_nid ==
136                     lnet_nid_to_nid4(&peer->gnp_net->gnn_ni->ni_nid));
137
138         list_for_each_entry_safe(conn, cnxt, &peer->gnp_conns, gnc_list) {
139                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
140                         continue;
141
142                 if (conn == newconn)
143                         continue;
144
145                 if (conn->gnc_device != newconn->gnc_device)
146                         continue;
147
148                 /* This is a two connection loopback - one talking to the other */
149                 if (loopback &&
150                     newconn->gnc_my_connstamp == conn->gnc_peer_connstamp &&
151                     newconn->gnc_peer_connstamp == conn->gnc_my_connstamp) {
152                         CDEBUG(D_NET, "skipping prune of %p, "
153                                 "loopback and matching stamps"
154                                 " connstamp %llu(%llu)"
155                                 " peerstamp %llu(%llu)\n",
156                                 conn, newconn->gnc_my_connstamp,
157                                 conn->gnc_peer_connstamp,
158                                 newconn->gnc_peer_connstamp,
159                                 conn->gnc_my_connstamp);
160                         continue;
161                 }
162
163                 if (conn->gnc_peerstamp != newconn->gnc_peerstamp) {
164                         LASSERTF(conn->gnc_peerstamp < newconn->gnc_peerstamp,
165                                 "conn 0x%p peerstamp %llu >= "
166                                 "newconn 0x%p peerstamp %llu\n",
167                                 conn, conn->gnc_peerstamp,
168                                 newconn, newconn->gnc_peerstamp);
169
170                         CDEBUG(D_NET, "Closing stale conn nid: %s "
171                                " peerstamp:%#llx(%#llx)\n",
172                                libcfs_nid2str(peer->gnp_nid),
173                                conn->gnc_peerstamp, newconn->gnc_peerstamp);
174                 } else {
175
176                         LASSERTF(conn->gnc_peer_connstamp < newconn->gnc_peer_connstamp,
177                                 "conn 0x%p peer_connstamp %llu >= "
178                                 "newconn 0x%p peer_connstamp %llu\n",
179                                 conn, conn->gnc_peer_connstamp,
180                                 newconn, newconn->gnc_peer_connstamp);
181
182                         CDEBUG(D_NET, "Closing stale conn nid: %s"
183                                " connstamp:%llu(%llu)\n",
184                                libcfs_nid2str(peer->gnp_nid),
185                                conn->gnc_peer_connstamp, newconn->gnc_peer_connstamp);
186                 }
187
188                 count++;
189                 kgnilnd_close_conn_locked(conn, -ESTALE);
190         }
191
192         if (count != 0) {
193                 CWARN("Closed %d stale conns to %s\n", count, libcfs_nid2str(peer->gnp_nid));
194         }
195
196         RETURN(count);
197 }
198
199 int
200 kgnilnd_conn_isdup_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
201 {
202         kgn_conn_t       *conn;
203         int               loopback;
204         ENTRY;
205
206         loopback = (peer->gnp_nid ==
207                     lnet_nid_to_nid4(&peer->gnp_net->gnn_ni->ni_nid));
208
209         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
210                 CDEBUG(D_NET, "checking conn 0x%p for peer %s"
211                         " lo %d new %llu existing %llu"
212                         " new peer %llu existing peer %llu"
213                         " new dev %p existing dev %p\n",
214                         conn, libcfs_nid2str(peer->gnp_nid),
215                         loopback,
216                         newconn->gnc_peerstamp, conn->gnc_peerstamp,
217                         newconn->gnc_peer_connstamp, conn->gnc_peer_connstamp,
218                         newconn->gnc_device, conn->gnc_device);
219
220                 /* conn is in the process of closing */
221                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
222                         continue;
223
224                 /* 'newconn' is from an earlier version of 'peer'!!! */
225                 if (newconn->gnc_peerstamp < conn->gnc_peerstamp)
226                         RETURN(1);
227
228                 /* 'conn' is from an earlier version of 'peer': it will be
229                  * removed when we cull stale conns later on... */
230                 if (newconn->gnc_peerstamp > conn->gnc_peerstamp)
231                         continue;
232
233                 /* Different devices are OK */
234                 if (conn->gnc_device != newconn->gnc_device)
235                         continue;
236
237                 /* It's me connecting to myself */
238                 if (loopback &&
239                     newconn->gnc_my_connstamp == conn->gnc_peer_connstamp &&
240                     newconn->gnc_peer_connstamp == conn->gnc_my_connstamp)
241                         continue;
242
243                 /* 'newconn' is an earlier connection from 'peer'!!! */
244                 if (newconn->gnc_peer_connstamp < conn->gnc_peer_connstamp)
245                         RETURN(2);
246
247                 /* 'conn' is an earlier connection from 'peer': it will be
248                  * removed when we cull stale conns later on... */
249                 if (newconn->gnc_peer_connstamp > conn->gnc_peer_connstamp)
250                         continue;
251
252                 /* 'newconn' has the SAME connection stamp; 'peer' isn't
253                  * playing the game... */
254                 RETURN(3);
255         }
256
257         RETURN(0);
258 }
259
260 int
261 kgnilnd_create_conn(kgn_conn_t **connp, kgn_device_t *dev)
262 {
263         kgn_conn_t      *conn;
264         gni_return_t    rrc;
265         int             rc = 0;
266
267         LASSERT (!in_interrupt());
268         atomic_inc(&kgnilnd_data.kgn_nconns);
269
270         /* divide by 2 to allow for complete reset and immediate reconnect */
271         if (atomic_read(&kgnilnd_data.kgn_nconns) >= GNILND_MAX_CQID/2) {
272                 CERROR("Too many conn are live: %d > %d\n",
273                         atomic_read(&kgnilnd_data.kgn_nconns), GNILND_MAX_CQID/2);
274                 atomic_dec(&kgnilnd_data.kgn_nconns);
275                 return -E2BIG;
276         }
277
278         LIBCFS_ALLOC(conn, sizeof(*conn));
279         if (conn == NULL) {
280                 atomic_dec(&kgnilnd_data.kgn_nconns);
281                 return -ENOMEM;
282         }
283
284         conn->gnc_tx_ref_table =
285                 kgnilnd_vzalloc(GNILND_MAX_MSG_ID * sizeof(void *));
286         if (conn->gnc_tx_ref_table == NULL) {
287                 CERROR("Can't allocate conn tx_ref_table\n");
288                 GOTO(failed, rc = -ENOMEM);
289         }
290
291         mutex_init(&conn->gnc_smsg_mutex);
292         mutex_init(&conn->gnc_rdma_mutex);
293         atomic_set(&conn->gnc_refcount, 1);
294         atomic_set(&conn->gnc_reaper_noop, 0);
295         atomic_set(&conn->gnc_sched_noop, 0);
296         atomic_set(&conn->gnc_tx_in_use, 0);
297         INIT_LIST_HEAD(&conn->gnc_list);
298         INIT_LIST_HEAD(&conn->gnc_hashlist);
299         INIT_LIST_HEAD(&conn->gnc_schedlist);
300         INIT_LIST_HEAD(&conn->gnc_fmaq);
301         INIT_LIST_HEAD(&conn->gnc_mdd_list);
302         INIT_LIST_HEAD(&conn->gnc_delaylist);
303         spin_lock_init(&conn->gnc_list_lock);
304         spin_lock_init(&conn->gnc_tx_lock);
305         conn->gnc_magic = GNILND_CONN_MAGIC;
306
307         /* set tx id to nearly the end to make sure we find wrapping
308          * issues soon */
309         conn->gnc_next_tx = (int) GNILND_MAX_MSG_ID - 10;
310
311         /* if this fails, we have conflicts and MAX_TX is too large */
312         BUILD_BUG_ON(GNILND_MAX_MSG_ID >= GNILND_MSGID_CLOSE);
313
314         /* get a new unique CQ id for this conn */
315         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
316         conn->gnc_my_connstamp = kgnilnd_data.kgn_connstamp++;
317         conn->gnc_cqid = kgnilnd_get_cqid_locked();
318         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
319
320         if (conn->gnc_cqid == 0) {
321                 CERROR("Could not allocate unique CQ ID for conn 0x%p\n", conn);
322                 GOTO(failed, rc = -E2BIG);
323         }
324
325         CDEBUG(D_NET, "alloc cqid %u for conn 0x%p\n",
326                 conn->gnc_cqid, conn);
327
328         /* need to be set before gnc_ephandle to allow kgnilnd_destroy_conn_ep to
329          * check context */
330         conn->gnc_device = dev;
331
332         conn->gnc_timeout = max(*kgnilnd_tunables.kgn_timeout,
333                                  GNILND_MIN_TIMEOUT);
334         kgnilnd_update_reaper_timeout(conn->gnc_timeout);
335
336         /* this is the ep_handle for doing SMSG & BTE */
337         mutex_lock(&dev->gnd_cq_mutex);
338         rrc = kgnilnd_ep_create(dev->gnd_handle, dev->gnd_snd_fma_cqh,
339                                 &conn->gnc_ephandle);
340         mutex_unlock(&dev->gnd_cq_mutex);
341         if (rrc != GNI_RC_SUCCESS)
342                 GOTO(failed, rc = -ENETDOWN);
343
344         CDEBUG(D_NET, "created conn 0x%p ep_hndl 0x%p\n",
345                conn, conn->gnc_ephandle);
346
347         /* add ref for EP canceling */
348         kgnilnd_conn_addref(conn);
349         atomic_inc(&dev->gnd_neps);
350
351         *connp = conn;
352         return 0;
353
354 failed:
355         atomic_dec(&kgnilnd_data.kgn_nconns);
356         kgnilnd_vfree(conn->gnc_tx_ref_table,
357                       GNILND_MAX_MSG_ID * sizeof(void *));
358         LIBCFS_FREE(conn, sizeof(*conn));
359         return rc;
360 }
361
362 /* needs to be called with kgn_peer_conn_lock held (read or write) */
363 kgn_conn_t *
364 kgnilnd_find_conn_locked(kgn_peer_t *peer)
365 {
366         kgn_conn_t      *conn = NULL;
367
368         /* if we are in reset, this conn is going to die soon */
369         if (unlikely(kgnilnd_data.kgn_in_reset)) {
370                 RETURN(NULL);
371         }
372
373         /* just return the first ESTABLISHED connection */
374         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
375                 /* kgnilnd_finish_connect doesn't put connections on the
376                  * peer list until they are actually established */
377                 LASSERTF(conn->gnc_state >= GNILND_CONN_ESTABLISHED,
378                         "found conn %px state %s on peer %px (%s)\n",
379                         conn, kgnilnd_conn_state2str(conn), peer,
380                         libcfs_nid2str(peer->gnp_nid));
381                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
382                         continue;
383
384                 RETURN(conn);
385         }
386         RETURN(NULL);
387 }
388
389 /* needs write_lock on kgn_peer_conn_lock held */
390 kgn_conn_t *
391 kgnilnd_find_or_create_conn_locked(kgn_peer_t *peer) {
392
393         kgn_device_t    *dev = peer->gnp_net->gnn_dev;
394         kgn_conn_t      *conn;
395
396         conn = kgnilnd_find_conn_locked(peer);
397
398         if (conn != NULL) {
399                 return conn;
400         }
401
402         /* if the peer was previously connecting, check if we should
403          * trigger another connection attempt yet. */
404         if (time_before(jiffies, peer->gnp_reconnect_time)) {
405                 return NULL;
406         }
407
408         /* This check prevents us from creating a new connection to a peer while we are
409          * still in the process of closing an existing connection to the peer.
410          */
411         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
412                 if (conn->gnc_ephandle != NULL) {
413                         CDEBUG(D_NET, "Not connecting non-null ephandle found peer 0x%p->%s\n", peer,
414                                 libcfs_nid2str(peer->gnp_nid));
415                         return NULL;
416                 }
417         }
418
419         if (peer->gnp_connecting != GNILND_PEER_IDLE) {
420                 /* if we are not connecting, fire up a new connection */
421                 /* or if we are anything but IDLE DONT start a new connection */
422                return NULL;
423         }
424
425         CDEBUG(D_NET, "starting connect to %s\n",
426                 libcfs_nid2str(peer->gnp_nid));
427         peer->gnp_connecting = GNILND_PEER_CONNECT;
428         kgnilnd_peer_addref(peer); /* extra ref for connd */
429
430         spin_lock(&dev->gnd_connd_lock);
431         list_add_tail(&peer->gnp_connd_list, &dev->gnd_connd_peers);
432         spin_unlock(&dev->gnd_connd_lock);
433
434         kgnilnd_schedule_dgram(dev);
435         CDEBUG(D_NETTRACE, "scheduling new connect\n");
436
437         return NULL;
438 }
439
440 /* Caller is responsible for deciding if/when to call this */
441 void
442 kgnilnd_destroy_conn_ep(kgn_conn_t *conn)
443 {
444         gni_return_t    rrc;
445         gni_ep_handle_t tmp_ep;
446
447         /* only if we actually initialized it,
448          *  then set NULL to tell kgnilnd_destroy_conn to leave it alone */
449
450         tmp_ep = xchg(&conn->gnc_ephandle, NULL);
451         if (tmp_ep != NULL) {
452                 /* we never re-use the EP, so unbind is not needed */
453                 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
454                 rrc = kgnilnd_ep_destroy(tmp_ep);
455
456                 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
457
458                 /* if this fails, it could hork up kgni smsg retransmit and others
459                  * since we could free the SMSG mbox memory, etc. */
460                 LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d conn 0x%p ep 0x%p\n",
461                          rrc, conn, conn->gnc_ephandle);
462
463                 atomic_dec(&conn->gnc_device->gnd_neps);
464
465                 /* clear out count added in kgnilnd_close_conn_locked
466                  * conn will have a peer once it hits finish_connect, where it
467                  * is the first spot we'll mark it ESTABLISHED as well */
468                 if (conn->gnc_peer) {
469                         kgnilnd_admin_decref(conn->gnc_peer->gnp_dirty_eps);
470                 }
471
472                 /* drop ref for EP */
473                 kgnilnd_conn_decref(conn);
474         }
475 }
476
477 void
478 kgnilnd_destroy_conn(kgn_conn_t *conn)
479 {
480         LASSERTF(!in_interrupt() &&
481                 !conn->gnc_scheduled &&
482                 !conn->gnc_in_purgatory &&
483                 conn->gnc_ephandle == NULL &&
484                 list_empty(&conn->gnc_list) &&
485                 list_empty(&conn->gnc_hashlist) &&
486                 list_empty(&conn->gnc_schedlist) &&
487                 list_empty(&conn->gnc_mdd_list) &&
488                 list_empty(&conn->gnc_delaylist) &&
489                 conn->gnc_magic == GNILND_CONN_MAGIC,
490                 "conn 0x%p->%s IRQ %d sched %d purg %d ep 0x%p Mg %d lists %d/%d/%d/%d/%d\n",
491                 conn, conn->gnc_peer ? libcfs_nid2str(conn->gnc_peer->gnp_nid)
492                                      : "<?>",
493                 !!in_interrupt(), conn->gnc_scheduled,
494                 conn->gnc_in_purgatory,
495                 conn->gnc_ephandle,
496                 conn->gnc_magic,
497                 list_empty(&conn->gnc_list),
498                 list_empty(&conn->gnc_hashlist),
499                 list_empty(&conn->gnc_schedlist),
500                 list_empty(&conn->gnc_mdd_list),
501                 list_empty(&conn->gnc_delaylist));
502
503         /* Tripping these is especially bad, as it means we have items on the
504          *  lists that didn't keep their refcount on the connection - or
505          *  somebody evil released their own */
506         LASSERTF(list_empty(&conn->gnc_fmaq) &&
507                  atomic_read(&conn->gnc_nlive_fma) == 0 &&
508                  atomic_read(&conn->gnc_nlive_rdma) == 0,
509                  "conn 0x%p fmaq %d@0x%p nfma %d nrdma %d\n",
510                  conn, kgnilnd_count_list(&conn->gnc_fmaq), &conn->gnc_fmaq,
511                  atomic_read(&conn->gnc_nlive_fma), atomic_read(&conn->gnc_nlive_rdma));
512
513         CDEBUG(D_NET, "destroying conn %p ephandle %p error %d\n",
514                 conn, conn->gnc_ephandle, conn->gnc_error);
515
516         /* We are freeing this memory remove the magic value from the connection */
517         conn->gnc_magic = 0;
518
519         /* if there is an FMA blk left here, we'll tear it down */
520         if (conn->gnc_fma_blk) {
521                 if (conn->gnc_peer) {
522                         kgn_mbox_info_t *mbox;
523                         mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
524                         mbox->mbx_prev_nid = conn->gnc_peer->gnp_nid;
525                 }
526                 kgnilnd_release_mbox(conn, 0);
527         }
528
529         if (conn->gnc_peer != NULL)
530                 kgnilnd_peer_decref(conn->gnc_peer);
531
532         if (conn->gnc_tx_ref_table != NULL) {
533                 kgnilnd_vfree(conn->gnc_tx_ref_table,
534                               GNILND_MAX_MSG_ID * sizeof(void *));
535         }
536
537         LIBCFS_FREE(conn, sizeof(*conn));
538         atomic_dec(&kgnilnd_data.kgn_nconns);
539 }
540
541 /* peer_alive and peer_notify done in the style of the o2iblnd */
542 void
543 kgnilnd_peer_alive(kgn_peer_t *peer)
544 {
545         time64_t now = ktime_get_seconds();
546
547         set_mb(peer->gnp_last_alive, now);
548 }
549
550 void
551 kgnilnd_peer_notify(kgn_peer_t *peer, int error, int alive)
552 {
553         int                     tell_lnet = 0;
554         int                     nnets = 0;
555         int                     rc;
556         int                     i, j;
557         kgn_conn_t             *conn;
558         kgn_net_t             **nets;
559         kgn_net_t              *net;
560
561
562         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DONT_NOTIFY))
563                 return;
564
565         /* Tell LNet we are giving ups on this peer - but only
566          * if it isn't already reconnected or trying to reconnect */
567         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
568
569         /* use kgnilnd_find_conn_locked to avoid any conns in the process of being nuked
570          *
571          * don't tell LNet if we are in reset - we assume that everyone will be able to
572          * reconnect just fine
573          */
574         conn = kgnilnd_find_conn_locked(peer);
575
576         CDEBUG(D_NETTRACE, "peer 0x%p->%s ting %d conn 0x%p, rst %d error %d\n",
577                peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting, conn,
578                kgnilnd_data.kgn_in_reset, error);
579
580         if (((peer->gnp_connecting == GNILND_PEER_IDLE) &&
581             (conn == NULL) &&
582             (!kgnilnd_data.kgn_in_reset) &&
583             (!kgnilnd_conn_clean_errno(error))) || alive) {
584                 tell_lnet = 1;
585         }
586
587         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
588
589         if (!tell_lnet) {
590                 /* short circuit if we dont need to notify Lnet */
591                 return;
592         }
593
594         rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
595
596         if (rc) {
597             /* dont do this if this fails since LNET is in shutdown or something else
598              */
599
600                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
601                         list_for_each_entry(net, &kgnilnd_data.kgn_nets[i], gnn_list) {
602                                 /* if gnn_shutdown set for any net shutdown is in progress just return */
603                                 if (net->gnn_shutdown) {
604                                         up_read(&kgnilnd_data.kgn_net_rw_sem);
605                                         return;
606                                 }
607                                 nnets++;
608                         }
609                 }
610
611                 if (nnets == 0) {
612                         /* shutdown in progress most likely */
613                         up_read(&kgnilnd_data.kgn_net_rw_sem);
614                         return;
615                 }
616
617                 CFS_ALLOC_PTR_ARRAY(nets, nnets);
618
619                 if (nets == NULL) {
620                         up_read(&kgnilnd_data.kgn_net_rw_sem);
621                         CERROR("Failed to allocate nets[%d]\n", nnets);
622                         return;
623                 }
624
625                 j = 0;
626                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
627                         list_for_each_entry(net, &kgnilnd_data.kgn_nets[i], gnn_list) {
628                                 nets[j] = net;
629                                 kgnilnd_net_addref(net);
630                                 j++;
631                         }
632                 }
633                 up_read(&kgnilnd_data.kgn_net_rw_sem);
634
635                 for (i = 0; i < nnets; i++) {
636                         struct lnet_nid peer_nid;
637
638                         net = nets[i];
639
640                         lnet_nid4_to_nid(kgnilnd_lnd2lnetnid(
641                                                  lnet_nid_to_nid4(&net->gnn_ni->ni_nid),
642                                                  peer->gnp_nid),
643                                          &peer_nid);
644
645                         CDEBUG(D_NET, "peer 0x%p->%s last_alive %lld (%llds ago)\n",
646                                 peer, libcfs_nidstr(&peer_nid),
647                                 peer->gnp_last_alive,
648                                 ktime_get_seconds() - peer->gnp_last_alive);
649
650                         lnet_notify(net->gnn_ni, &peer_nid, alive, true,
651                                     peer->gnp_last_alive);
652
653                         kgnilnd_net_decref(net);
654                 }
655
656                 CFS_FREE_PTR_ARRAY(nets, nnets);
657         }
658 }
659
660 /* need write_lock on kgn_peer_conn_lock */
661 void
662 kgnilnd_close_conn_locked(kgn_conn_t *conn, int error)
663 {
664         kgn_peer_t        *peer = conn->gnc_peer;
665         ENTRY;
666
667         LASSERT(!in_interrupt());
668
669         /* store error for tx completion */
670         conn->gnc_error = error;
671         peer->gnp_last_errno = error;
672
673         /* use real error from peer if possible */
674         if (error == -ECONNRESET) {
675                 error = conn->gnc_peer_error;
676         }
677
678         /* if we NETERROR, make sure it is rate limited */
679         if (!kgnilnd_conn_clean_errno(error) &&
680             peer->gnp_state != GNILND_PEER_DOWN) {
681                 CNETERR("closing conn to %s: error %d\n",
682                        libcfs_nid2str(peer->gnp_nid), error);
683         } else {
684                 CDEBUG(D_NET, "closing conn to %s: error %d\n",
685                        libcfs_nid2str(peer->gnp_nid), error);
686         }
687
688         LASSERTF(conn->gnc_state == GNILND_CONN_ESTABLISHED,
689                 "conn %px to %s with bogus state %s\n", conn,
690                 libcfs_nid2str(conn->gnc_peer->gnp_nid),
691                 kgnilnd_conn_state2str(conn));
692         LASSERT(!list_empty(&conn->gnc_hashlist));
693         LASSERT(!list_empty(&conn->gnc_list));
694
695
696         /* mark peer count here so any place the EP gets destroyed will
697          * open up the peer count so that a new ESTABLISHED conn is then free
698          * to send new messages -- sending before the previous EPs are destroyed
699          * could end up with messages on the network for the old conn _after_
700          * the new conn and break the mbox safety protocol */
701         kgnilnd_admin_addref(conn->gnc_peer->gnp_dirty_eps);
702
703         /* Remove from conn hash table: no new callbacks */
704         list_del_init(&conn->gnc_hashlist);
705         kgnilnd_data.kgn_conn_version++;
706         kgnilnd_conn_decref(conn);
707
708         /* if we are in reset, go right to CLOSED as there is no scheduler
709          * thread to move from CLOSING to CLOSED */
710         if (unlikely(kgnilnd_data.kgn_in_reset)) {
711                 conn->gnc_state = GNILND_CONN_CLOSED;
712         } else {
713                 conn->gnc_state = GNILND_CONN_CLOSING;
714         }
715
716         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_RDMA_CQ_ERROR)) {
717                 msleep_interruptible(MSEC_PER_SEC);
718         }
719
720         /* leave on peer->gnp_conns to make sure we don't let the reaper
721          * or others try to unlink this peer until the conn is fully
722          * processed for closing */
723
724         if (kgnilnd_check_purgatory_conn(conn)) {
725                 kgnilnd_add_purgatory_locked(conn, conn->gnc_peer);
726         }
727
728         /* Reset RX timeout to ensure we wait for an incoming CLOSE
729          * for the full timeout.  If we get a CLOSE we know the
730          * peer has stopped all RDMA.  Otherwise if we wait for
731          * the full timeout we can also be sure all RDMA has stopped. */
732         conn->gnc_last_rx = conn->gnc_last_rx_cq = jiffies;
733         mb();
734
735         /* schedule sending CLOSE - if we are in quiesce, this adds to
736          * gnd_ready_conns and allows us to find it in quiesce processing */
737         kgnilnd_schedule_conn(conn);
738
739         EXIT;
740 }
741
742 void
743 kgnilnd_close_conn(kgn_conn_t *conn, int error)
744 {
745         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
746         /* need to check the state here - this call is racy and we don't
747          * know the state until after the lock is grabbed */
748         if (conn->gnc_state == GNILND_CONN_ESTABLISHED) {
749                 kgnilnd_close_conn_locked(conn, error);
750         }
751         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
752 }
753
754 void
755 kgnilnd_complete_closed_conn(kgn_conn_t *conn)
756 {
757         LIST_HEAD               (sinners);
758         kgn_tx_t               *tx, *txn;
759         int                     nlive = 0;
760         int                     nrdma = 0;
761         int                     nq_rdma = 0;
762         int                     logmsg;
763         ENTRY;
764
765         /* Dump log  on cksum error - wait until complete phase to let
766          * RX of error happen */
767         if (*kgnilnd_tunables.kgn_checksum_dump &&
768             (conn != NULL && conn->gnc_peer_error == -ENOKEY)) {
769                 libcfs_debug_dumplog();
770         }
771
772         /* _CLOSED set in kgnilnd_process_fmaq once we decide to
773          * send the CLOSE or not */
774         LASSERTF(conn->gnc_state == GNILND_CONN_CLOSED,
775                  "conn 0x%p->%s with bad state %s\n",
776                  conn, conn->gnc_peer ?
777                         libcfs_nid2str(conn->gnc_peer->gnp_nid) :
778                         "<?>",
779                  kgnilnd_conn_state2str(conn));
780
781         LASSERT(list_empty(&conn->gnc_hashlist));
782         /* We shouldnt be on the delay list, the conn can 
783          * get added to this list during a retransmit, and retransmits
784          * only occur within scheduler threads.
785          */
786         LASSERT(list_empty(&conn->gnc_delaylist));
787
788         /* we've sent the close, start nuking */
789         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_SCHEDULE_COMPLETE))
790                 kgnilnd_schedule_conn(conn);
791
792         if (conn->gnc_scheduled != GNILND_CONN_PROCESS) {
793                 CDEBUG(D_NETERROR, "Error someone scheduled us after we were "
794                                 "done, Attempting to recover conn 0x%p "
795                                 "scheduled %d function: %s line: %d\n", conn,
796                                 conn->gnc_scheduled, conn->gnc_sched_caller,
797                                 conn->gnc_sched_line);
798                 RETURN_EXIT;
799         }
800
801         /* we don't use lists to track things that we can get out of the
802          * tx_ref table... */
803
804         /* need to hold locks for tx_list_state, sampling it is too racy:
805          * - the lock actually protects tx != NULL, but we can't take the proper
806          *   lock until we check tx_list_state, which would be too late and
807          *   we could have the TX change under us.
808          * gnd_rdmaq_lock and gnd_lock and not used together, so taking both
809          * should be fine */
810         spin_lock(&conn->gnc_device->gnd_rdmaq_lock);
811         spin_lock(&conn->gnc_device->gnd_lock);
812
813         for (nrdma = 0; nrdma < GNILND_MAX_MSG_ID; nrdma++) {
814                 tx = conn->gnc_tx_ref_table[nrdma];
815
816                 if (tx != NULL) {
817                         /* only print the first error and if not CLOSE, we often don't see
818                          * CQ events for that by the time we get here... and really don't care */
819                         if (nlive || tx->tx_msg.gnm_type == GNILND_MSG_CLOSE)
820                                 tx->tx_state |= GNILND_TX_QUIET_ERROR;
821                         nlive++;
822                         GNIDBG_TX(D_NET, tx, "cleaning up on close, nlive %d", nlive);
823
824                         /* don't worry about gnc_lock here as nobody else should be
825                          * touching this conn */
826                         kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
827                         list_add_tail(&tx->tx_list, &sinners);
828                 }
829         }
830         spin_unlock(&conn->gnc_device->gnd_lock);
831         spin_unlock(&conn->gnc_device->gnd_rdmaq_lock);
832
833         /* nobody should have marked this as needing scheduling after
834          * we called close - so only ref should be us handling it */
835         if (conn->gnc_scheduled != GNILND_CONN_PROCESS) {
836                 CDEBUG(D_NETERROR, "Error someone scheduled us after we were "
837                                 "done, Attempting to recover conn 0x%p "
838                                 "scheduled %d function %s line: %d\n", conn,
839                                 conn->gnc_scheduled, conn->gnc_sched_caller,
840                                 conn->gnc_sched_line);
841         }
842         /* now reset a few to actual counters... */
843         nrdma = atomic_read(&conn->gnc_nlive_rdma);
844         nq_rdma = atomic_read(&conn->gnc_nq_rdma);
845
846         if (!list_empty(&sinners)) {
847                 list_for_each_entry_safe(tx, txn, &sinners, tx_list) {
848                         /* clear tx_list to make tx_add_list_locked happy */
849                         list_del_init(&tx->tx_list);
850                         /* The error codes determine if we hold onto the MDD */
851                         kgnilnd_tx_done(tx, conn->gnc_error);
852                 }
853         }
854
855         logmsg = (nlive + nrdma + nq_rdma);
856
857         if (logmsg) {
858                 int level = conn->gnc_peer->gnp_state == GNILND_PEER_UP ?
859                                 D_NETERROR : D_NET;
860                 CDEBUG(level, "Closed conn 0x%p->%s (errno %d,"
861                         " peer errno %d): canceled %d TX, %d/%d RDMA\n",
862                         conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
863                         conn->gnc_error, conn->gnc_peer_error,
864                         nlive, nq_rdma, nrdma);
865         }
866
867         kgnilnd_destroy_conn_ep(conn);
868
869         /* Bug 765042 - race this with completing a new conn to same peer - we need
870          * finish_connect to detach purgatory before we can do it ourselves here */
871         CFS_RACE(CFS_FAIL_GNI_FINISH_PURG);
872
873         /* now it is safe to remove from peer list - anyone looking at
874          * gnp_conns now is free to unlink if not on purgatory */
875         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
876
877         conn->gnc_state = GNILND_CONN_DONE;
878
879         /* Decrement counter if we are marked by del_conn_or_peers for closing
880          */
881         if (conn->gnc_needs_closing)
882                 kgnilnd_admin_decref(kgnilnd_data.kgn_npending_conns);
883
884         /* Remove from peer's list of valid connections if its not in purgatory */
885         if (!conn->gnc_in_purgatory) {
886                 list_del_init(&conn->gnc_list);
887                 /* Lose peers reference on the conn */
888                 kgnilnd_conn_decref(conn);
889         }
890
891         /* NB - only unlinking if we set pending in del_peer_locked from admin or
892          * shutdown */
893         if (kgnilnd_peer_active(conn->gnc_peer) &&
894             conn->gnc_peer->gnp_pending_unlink &&
895             kgnilnd_can_unlink_peer_locked(conn->gnc_peer)) {
896                 kgnilnd_unlink_peer_locked(conn->gnc_peer);
897         }
898
899         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
900
901         /* I'm telling Mommy! - use peer_error if they initiated close */
902         kgnilnd_peer_notify(conn->gnc_peer,
903                             conn->gnc_error == -ECONNRESET ?
904                             conn->gnc_peer_error : conn->gnc_error, 0);
905
906         EXIT;
907 }
908
909 int
910 kgnilnd_set_conn_params(kgn_dgram_t *dgram)
911 {
912         kgn_conn_t             *conn = dgram->gndg_conn;
913         kgn_connreq_t          *connreq = &dgram->gndg_conn_in;
914         kgn_gniparams_t        *rem_param = &connreq->gncr_gnparams;
915         gni_return_t            rrc;
916         int                     rc = 0;
917         gni_smsg_attr_t        *remote = &connreq->gncr_gnparams.gnpr_smsg_attr;
918
919         /* set timeout vals in conn early so we can use them for the NAK */
920
921         /* use max of the requested and our timeout, peer will do the same */
922         conn->gnc_timeout = max(conn->gnc_timeout, connreq->gncr_timeout);
923
924         /* only ep_bind really mucks around with the CQ */
925         /* only ep bind if we are not connecting to ourself and the dstnid is not a wildcard. this check
926          * is necessary as you can only bind an ep once and we must make sure we dont bind when already bound.
927          */
928         if (connreq->gncr_dstnid != LNET_NID_ANY && dgram->gndg_conn_out.gncr_dstnid != connreq->gncr_srcnid) {
929                 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
930                 rrc = kgnilnd_ep_bind(conn->gnc_ephandle,
931                         connreq->gncr_gnparams.gnpr_host_id,
932                         conn->gnc_cqid);
933                 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
934                 if (rrc != GNI_RC_SUCCESS) {
935                         rc = -ECONNABORTED;
936                         goto return_out;
937                 }
938         }
939
940         rrc = kgnilnd_ep_set_eventdata(conn->gnc_ephandle, conn->gnc_cqid,
941                          connreq->gncr_gnparams.gnpr_cqid);
942         if (rrc != GNI_RC_SUCCESS) {
943                 rc = -ECONNABORTED;
944                 goto cleanup_out;
945         }
946
947         /* Initialize SMSG */
948         rrc = kgnilnd_smsg_init(conn->gnc_ephandle, &conn->gnpr_smsg_attr,
949                         &connreq->gncr_gnparams.gnpr_smsg_attr);
950         if (unlikely(rrc == GNI_RC_INVALID_PARAM)) {
951                 gni_smsg_attr_t *local = &conn->gnpr_smsg_attr;
952                 /* help folks figure out if there is a tunable off, etc. */
953                 LCONSOLE_ERROR("SMSG attribute mismatch. Data from local/remote:"
954                                " type %d/%d msg_maxsize %u/%u"
955                                " mbox_maxcredit %u/%u. Please check kgni"
956                                " logs for further data\n",
957                                local->msg_type, remote->msg_type,
958                                local->msg_maxsize, remote->msg_maxsize,
959                                local->mbox_maxcredit, remote->mbox_maxcredit);
960         }
961         if (rrc != GNI_RC_SUCCESS) {
962                 rc = -ECONNABORTED;
963                 goto cleanup_out;
964         }
965
966         /* log this for help in debuggin SMSG buffer re-use */
967         CDEBUG(D_NET, "conn %p src %s dst %s smsg %p acquired"
968                 " local cqid %u SMSG %p->%u hndl %#llx.%#llx"
969                 " remote cqid %u SMSG %p->%u hndl %#llx.%#llx\n",
970                 conn, libcfs_nid2str(connreq->gncr_srcnid),
971                 libcfs_nid2str(connreq->gncr_dstnid),
972                 &conn->gnpr_smsg_attr,
973                 conn->gnc_cqid,
974                 conn->gnpr_smsg_attr.msg_buffer,
975                 conn->gnpr_smsg_attr.mbox_offset,
976                 conn->gnpr_smsg_attr.mem_hndl.qword1,
977                 conn->gnpr_smsg_attr.mem_hndl.qword2,
978                 rem_param->gnpr_cqid,
979                 rem_param->gnpr_smsg_attr.msg_buffer,
980                 rem_param->gnpr_smsg_attr.mbox_offset,
981                 rem_param->gnpr_smsg_attr.mem_hndl.qword1,
982                 rem_param->gnpr_smsg_attr.mem_hndl.qword2);
983
984         conn->gnc_peerstamp = connreq->gncr_peerstamp;
985         conn->gnc_peer_connstamp = connreq->gncr_connstamp;
986         conn->remote_mbox_addr = (void *)((char *)remote->msg_buffer + remote->mbox_offset);
987
988         /* We update the reaper timeout once we have a valid conn and timeout */
989         kgnilnd_update_reaper_timeout(GNILND_TO2KA(conn->gnc_timeout));
990
991         return 0;
992
993 cleanup_out:
994         rrc = kgnilnd_ep_unbind(conn->gnc_ephandle);
995         /* not sure I can just let this fly */
996         LASSERTF(rrc == GNI_RC_SUCCESS,
997                 "bad rc from gni_ep_unbind trying to cleanup: %d\n", rrc);
998
999 return_out:
1000         LASSERTF(rc != 0, "SOFTWARE BUG: rc == 0\n");
1001         CERROR("Error setting connection params from %s: %d\n",
1002                libcfs_nid2str(connreq->gncr_srcnid), rc);
1003         return rc;
1004 }
1005
1006 /* needs down_read on kgn_net_rw_sem held from before this call until
1007  * after the write_lock on kgn_peer_conn_lock - this ensures we stay sane
1008  * with kgnilnd_shutdown - it'll get the sem and set shutdown, then get the
1009  * kgn_peer_conn_lock to start del_peer'ing. If we hold the sem until after
1010  * kgn_peer_conn_lock is held, we guarantee that nobody calls
1011  * kgnilnd_add_peer_locked without checking gnn_shutdown */
1012 int
1013 kgnilnd_create_peer_safe(kgn_peer_t **peerp,
1014                          lnet_nid_t nid,
1015                          kgn_net_t *net,
1016                          int node_state)
1017 {
1018         kgn_peer_t      *peer;
1019         int             rc;
1020
1021         LASSERT(nid != LNET_NID_ANY);
1022
1023         /* We dont pass the net around in the dgram anymore so here is where we find it
1024          * this will work unless its in shutdown or the nid has a net that is invalid.
1025          * Either way error code needs to be returned in that case.
1026          *
1027          * If the net passed in is not NULL then we can use it, this alleviates looking it
1028          * when the calling function has access to the data.
1029          */
1030         if (net == NULL) {
1031                 rc = kgnilnd_find_net(nid, &net);
1032                 if (rc < 0)
1033                         return rc;
1034         } else {
1035                 /* find net adds a reference on the net if we are not using
1036                  * it we must do it manually so the net references are
1037                  * correct when tearing down the net
1038                  */
1039                 kgnilnd_net_addref(net);
1040         }
1041
1042         LIBCFS_ALLOC(peer, sizeof(*peer));
1043         if (peer == NULL) {
1044                 kgnilnd_net_decref(net);
1045                 return -ENOMEM;
1046         }
1047         peer->gnp_nid = nid;
1048         peer->gnp_state = node_state;
1049
1050         /* translate from nid to nic addr & store */
1051         rc = kgnilnd_nid_to_nicaddrs(LNET_NIDADDR(nid), 1, &peer->gnp_host_id);
1052         if (rc <= 0) {
1053                 kgnilnd_net_decref(net);
1054                 LIBCFS_FREE(peer, sizeof(*peer));
1055                 return -ESRCH;
1056         }
1057         CDEBUG(D_NET, "peer 0x%p->%s -> NIC 0x%x\n", peer,
1058                 libcfs_nid2str(nid), peer->gnp_host_id);
1059
1060         atomic_set(&peer->gnp_refcount, 1);     /* 1 ref for caller */
1061         atomic_set(&peer->gnp_dirty_eps, 0);
1062
1063         INIT_LIST_HEAD(&peer->gnp_list);
1064         INIT_LIST_HEAD(&peer->gnp_connd_list);
1065         INIT_LIST_HEAD(&peer->gnp_conns);
1066         INIT_LIST_HEAD(&peer->gnp_tx_queue);
1067
1068         /* the first reconnect should happen immediately, so we leave
1069          * gnp_reconnect_interval set to 0 */
1070
1071         LASSERTF(net != NULL, "peer 0x%p->%s with NULL net\n",
1072                  peer, libcfs_nid2str(nid));
1073
1074         /* must have kgn_net_rw_sem held for this...  */
1075         if (net->gnn_shutdown) {
1076                 /* shutdown has started already */
1077                 kgnilnd_net_decref(net);
1078                 LIBCFS_FREE(peer, sizeof(*peer));
1079                 return -ESHUTDOWN;
1080         }
1081
1082         peer->gnp_net = net;
1083
1084         atomic_inc(&kgnilnd_data.kgn_npeers);
1085
1086         *peerp = peer;
1087         return 0;
1088 }
1089
1090 void
1091 kgnilnd_destroy_peer(kgn_peer_t *peer)
1092 {
1093         CDEBUG(D_NET, "peer %s %p deleted\n",
1094                libcfs_nid2str(peer->gnp_nid), peer);
1095         LASSERTF(atomic_read(&peer->gnp_refcount) == 0,
1096                  "peer 0x%p->%s refs %d\n",
1097                  peer, libcfs_nid2str(peer->gnp_nid),
1098                  atomic_read(&peer->gnp_refcount));
1099         LASSERTF(atomic_read(&peer->gnp_dirty_eps) == 0,
1100                  "peer 0x%p->%s dirty eps %d\n",
1101                  peer, libcfs_nid2str(peer->gnp_nid),
1102                  atomic_read(&peer->gnp_dirty_eps));
1103         LASSERTF(peer->gnp_net != NULL, "peer %px (%s) with NULL net\n",
1104                  peer, libcfs_nid2str(peer->gnp_nid));
1105         LASSERTF(!kgnilnd_peer_active(peer),
1106                  "peer 0x%p->%s\n",
1107                 peer, libcfs_nid2str(peer->gnp_nid));
1108         LASSERTF(peer->gnp_connecting == GNILND_PEER_IDLE || peer->gnp_connecting == GNILND_PEER_KILL,
1109                  "peer 0x%p->%s, connecting %d\n",
1110                 peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
1111         LASSERTF(list_empty(&peer->gnp_conns),
1112                  "peer 0x%p->%s\n",
1113                 peer, libcfs_nid2str(peer->gnp_nid));
1114         LASSERTF(list_empty(&peer->gnp_tx_queue),
1115                  "peer 0x%p->%s\n",
1116                 peer, libcfs_nid2str(peer->gnp_nid));
1117         LASSERTF(list_empty(&peer->gnp_connd_list),
1118                  "peer 0x%p->%s\n",
1119                 peer, libcfs_nid2str(peer->gnp_nid));
1120
1121         /* NB a peer's connections keep a reference on their peer until
1122          * they are destroyed, so we can be assured that _all_ state to do
1123          * with this peer has been cleaned up when its refcount drops to
1124          * zero. */
1125
1126         atomic_dec(&kgnilnd_data.kgn_npeers);
1127         kgnilnd_net_decref(peer->gnp_net);
1128
1129         LIBCFS_FREE(peer, sizeof(*peer));
1130 }
1131
1132 /* the conn might not have made it all the way through to a connected
1133  * state - but we need to purgatory any conn that a remote peer might
1134  * have seen through a posted dgram as well */
1135 void
1136 kgnilnd_add_purgatory_locked(kgn_conn_t *conn, kgn_peer_t *peer)
1137 {
1138         kgn_mbox_info_t *mbox = NULL;
1139         ENTRY;
1140
1141         /* NB - the caller should own conn by removing him from the
1142          * scheduler thread when finishing the close */
1143
1144         LASSERTF(peer != NULL, "conn %px with NULL peer\n", conn);
1145
1146         /* If this is still true, need to add the calls to unlink back in and
1147          * figure out how to close the hole on loopback conns */
1148         LASSERTF(kgnilnd_peer_active(peer),
1149                 "can't use inactive peer %s (%px) we'll never recover the resources\n",
1150                  libcfs_nid2str(peer->gnp_nid), peer);
1151
1152         CDEBUG(D_NET, "conn %p peer %p dev %p\n", conn, peer,
1153                 conn->gnc_device);
1154
1155         LASSERTF(conn->gnc_in_purgatory == 0,
1156                 "Conn already in purgatory\n");
1157         conn->gnc_in_purgatory = 1;
1158
1159         mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
1160         mbox->mbx_prev_purg_nid = peer->gnp_nid;
1161         mbox->mbx_add_purgatory = jiffies;
1162         kgnilnd_release_mbox(conn, 1);
1163
1164         LASSERTF(list_empty(&conn->gnc_mdd_list),
1165                 "conn 0x%p->%s with active purgatory hold MDD %d\n",
1166                 conn, libcfs_nid2str(peer->gnp_nid),
1167                 kgnilnd_count_list(&conn->gnc_mdd_list));
1168
1169         EXIT;
1170 }
1171
1172 /* Instead of detaching everything from purgatory here we just mark the conn as needing
1173  * detach, when the reaper checks the conn the next time it will detach it.
1174  * Calling function requires write_lock held on kgn_peer_conn_lock
1175  */
1176 void
1177 kgnilnd_mark_for_detach_purgatory_all_locked(kgn_peer_t *peer) {
1178         kgn_conn_t       *conn;
1179
1180         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
1181                 if (conn->gnc_in_purgatory && !conn->gnc_needs_detach) {
1182                         conn->gnc_needs_detach = 1;
1183                         kgnilnd_admin_addref(kgnilnd_data.kgn_npending_detach);
1184                 }
1185         }
1186 }
1187
1188 /* Calling function needs a write_lock held on kgn_peer_conn_lock */
1189 void
1190 kgnilnd_detach_purgatory_locked(kgn_conn_t *conn, struct list_head *conn_list)
1191 {
1192         kgn_mbox_info_t *mbox = NULL;
1193
1194         /* if needed, add the conn purgatory data to the list passed in */
1195         if (conn->gnc_in_purgatory) {
1196                 CDEBUG(D_NET, "peer %p->%s purg_conn %p@%s mdd_list #tx %d\n",
1197                         conn->gnc_peer, libcfs_nid2str(conn->gnc_peer->gnp_nid),
1198                         conn, kgnilnd_conn_state2str(conn),
1199                         kgnilnd_count_list(&conn->gnc_mdd_list));
1200
1201                 mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
1202                 mbox->mbx_detach_of_purgatory = jiffies;
1203
1204                 /* conn->gnc_list is the entry point on peer->gnp_conns, so detaching it
1205                  * here removes it from the list of 'valid' peer connections.
1206                  * We put the current conn onto a list of conns to call kgnilnd_release_purgatory_locked()
1207                  * and as such the caller of kgnilnd_detach_purgatory_locked() now owns that conn, since its not
1208                  * on the peer's conn_list anymore.
1209                  */
1210
1211                 list_del_init(&conn->gnc_list);
1212
1213                 /* NB - only unlinking if we set pending in del_peer_locked from admin or
1214                  * shutdown */
1215                 if (kgnilnd_peer_active(conn->gnc_peer) &&
1216                     conn->gnc_peer->gnp_pending_unlink &&
1217                     kgnilnd_can_unlink_peer_locked(conn->gnc_peer)) {
1218                         kgnilnd_unlink_peer_locked(conn->gnc_peer);
1219                 }
1220                 /* The reaper will not call detach unless the conn is fully through kgnilnd_complete_closed_conn.
1221                  * If the conn is not in a DONE state somehow we are attempting to detach even though
1222                  * the conn has not been fully cleaned up. If we detach while the conn is still closing
1223                  * we will end up with an orphaned connection that has valid ep_handle, that is not on a
1224                  * peer.
1225                  */
1226
1227                 LASSERTF(conn->gnc_state == GNILND_CONN_DONE,
1228                         "Conn in invalid state  %px@%s\n",
1229                          conn, kgnilnd_conn_state2str(conn));
1230
1231                 /* move from peer to the delayed release list */
1232                 list_add_tail(&conn->gnc_list, conn_list);
1233         }
1234 }
1235
1236 void
1237 kgnilnd_release_purgatory_list(struct list_head *conn_list)
1238 {
1239         kgn_device_t            *dev;
1240         kgn_conn_t              *conn, *connN;
1241         kgn_mdd_purgatory_t     *gmp, *gmpN;
1242
1243         list_for_each_entry_safe(conn, connN, conn_list, gnc_list) {
1244                 dev = conn->gnc_device;
1245
1246                 kgnilnd_release_mbox(conn, -1);
1247                 conn->gnc_in_purgatory = 0;
1248
1249                 list_del_init(&conn->gnc_list);
1250
1251                 /* gnc_needs_detach is set in kgnilnd_del_conn_or_peer. It is used to keep track
1252                  * of conns that have been marked for detach by kgnilnd_del_conn_or_peer.
1253                  * The function uses kgn_npending_detach to verify the conn has
1254                  * actually been detached.
1255                  */
1256
1257                 if (conn->gnc_needs_detach)
1258                         kgnilnd_admin_decref(kgnilnd_data.kgn_npending_detach);
1259
1260                 /* if this guy is really dead (we are doing release from reaper),
1261                  * make sure we tell LNet - if this is from other context,
1262                  * the checks in the function will prevent an errant
1263                  * notification */
1264                 kgnilnd_peer_notify(conn->gnc_peer, conn->gnc_error, 0);
1265
1266                 list_for_each_entry_safe(gmp, gmpN, &conn->gnc_mdd_list,
1267                                          gmp_list) {
1268                         CDEBUG(D_NET,
1269                                "dev %p releasing held mdd %#llx.%#llx\n",
1270                                conn->gnc_device, gmp->gmp_map_key.qword1,
1271                                gmp->gmp_map_key.qword2);
1272
1273                         atomic_dec(&dev->gnd_n_mdd_held);
1274                         kgnilnd_mem_mdd_release(conn->gnc_device->gnd_handle,
1275                                                 &gmp->gmp_map_key);
1276                         /* ignoring the return code - if kgni/ghal can't find it
1277                          * it must be released already */
1278
1279                         list_del_init(&gmp->gmp_list);
1280                         LIBCFS_FREE(gmp, sizeof(*gmp));
1281                 }
1282                 /* lose conn ref for purgatory */
1283                 kgnilnd_conn_decref(conn);
1284         }
1285 }
1286
1287 /* needs write_lock on kgnilnd_data.kgn_peer_conn_lock held */
1288 void
1289 kgnilnd_peer_increase_reconnect_locked(kgn_peer_t *peer)
1290 {
1291         int current_to;
1292
1293         current_to = peer->gnp_reconnect_interval;
1294
1295         /* we'll try to reconnect fast the first time, then back-off */
1296         if (current_to == 0) {
1297                 peer->gnp_reconnect_time = jiffies - 1;
1298                 current_to = *kgnilnd_tunables.kgn_min_reconnect_interval;
1299         } else {
1300                 peer->gnp_reconnect_time = jiffies + cfs_time_seconds(current_to);
1301                 /* add 50% of min timeout & retry */
1302                 current_to += *kgnilnd_tunables.kgn_min_reconnect_interval / 2;
1303         }
1304
1305         current_to = min(current_to,
1306                          *kgnilnd_tunables.kgn_max_reconnect_interval);
1307
1308         peer->gnp_reconnect_interval = current_to;
1309         CDEBUG(D_NET, "peer %s can reconnect at %lu interval %lu\n",
1310                libcfs_nid2str(peer->gnp_nid), peer->gnp_reconnect_time,
1311                peer->gnp_reconnect_interval);
1312 }
1313
1314 /* needs kgnilnd_data.kgn_peer_conn_lock held */
1315 kgn_peer_t *
1316 kgnilnd_find_peer_locked(lnet_nid_t nid)
1317 {
1318         struct list_head *peer_list = kgnilnd_nid2peerlist(nid);
1319         kgn_peer_t       *peer;
1320
1321         /* Chopping nid down to only NIDADDR using LNET_NIDADDR so we only
1322          * have a single peer per device instead of a peer per nid/net combo.
1323          */
1324
1325         list_for_each_entry(peer, peer_list, gnp_list) {
1326                 if (LNET_NIDADDR(nid) != LNET_NIDADDR(peer->gnp_nid))
1327                         continue;
1328
1329                 CDEBUG(D_NET, "got peer [%p] -> %s c %d (%d)\n",
1330                        peer, libcfs_nid2str(nid),
1331                        peer->gnp_connecting,
1332                        atomic_read(&peer->gnp_refcount));
1333                 return peer;
1334         }
1335         return NULL;
1336 }
1337
1338 /* need write_lock on kgn_peer_conn_lock */
1339 void
1340 kgnilnd_unlink_peer_locked(kgn_peer_t *peer)
1341 {
1342         LASSERTF(list_empty(&peer->gnp_conns),
1343                 "peer 0x%p->%s\n",
1344                  peer, libcfs_nid2str(peer->gnp_nid));
1345         LASSERTF(list_empty(&peer->gnp_tx_queue),
1346                 "peer 0x%p->%s\n",
1347                  peer, libcfs_nid2str(peer->gnp_nid));
1348         LASSERTF(kgnilnd_peer_active(peer),
1349                 "peer 0x%p->%s\n",
1350                  peer, libcfs_nid2str(peer->gnp_nid));
1351         CDEBUG(D_NET, "unlinking peer 0x%p->%s\n",
1352                 peer, libcfs_nid2str(peer->gnp_nid));
1353
1354         list_del_init(&peer->gnp_list);
1355         kgnilnd_data.kgn_peer_version++;
1356         kgnilnd_admin_decref(kgnilnd_data.kgn_npending_unlink);
1357         /* lose peerlist's ref */
1358         kgnilnd_peer_decref(peer);
1359 }
1360
1361 int
1362 kgnilnd_get_peer_info(int index,
1363                       kgn_peer_t **found_peer,
1364                       lnet_nid_t *id, __u32 *nic_addr,
1365                       int *refcount, int *connecting)
1366 {
1367         kgn_peer_t        *peer;
1368         int               i;
1369         int               rc = -ENOENT;
1370
1371         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1372
1373         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
1374                 list_for_each_entry(peer, &kgnilnd_data.kgn_peers[i], gnp_list) {
1375                         if (index-- > 0)
1376                                 continue;
1377
1378                         CDEBUG(D_NET, "found peer %p (%s) at index %d\n",
1379                                peer, libcfs_nid2str(peer->gnp_nid), index);
1380
1381                         *found_peer  = peer;
1382                         *id          = peer->gnp_nid;
1383                         *nic_addr    = peer->gnp_host_id;
1384                         *refcount    = atomic_read(&peer->gnp_refcount);
1385                         *connecting  = peer->gnp_connecting;
1386
1387                         rc = 0;
1388                         goto out;
1389                 }
1390         }
1391 out:
1392         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1393         if (rc)
1394                 CDEBUG(D_NET, "no gni peer at index %d\n", index);
1395         return rc;
1396 }
1397
1398 /* requires write_lock on kgn_peer_conn_lock held */
1399 void
1400 kgnilnd_add_peer_locked(lnet_nid_t nid, kgn_peer_t *new_stub_peer, kgn_peer_t **peerp)
1401 {
1402         kgn_peer_t        *peer, *peer2;
1403
1404         LASSERTF(new_stub_peer != NULL, "bad stub peer for nid %s\n",
1405                  libcfs_nid2str(nid));
1406
1407         peer2 = kgnilnd_find_peer_locked(nid);
1408         if (peer2 != NULL) {
1409                 /* A peer was created during the lock transition, so drop
1410                  * the new one we created */
1411                 kgnilnd_peer_decref(new_stub_peer);
1412                 peer = peer2;
1413         } else {
1414                 peer = new_stub_peer;
1415                 /* peer table takes existing ref on peer */
1416
1417                 LASSERTF(!kgnilnd_peer_active(peer),
1418                         "peer 0x%p->%s already in peer table\n",
1419                         peer, libcfs_nid2str(peer->gnp_nid));
1420                 list_add_tail(&peer->gnp_list,
1421                               kgnilnd_nid2peerlist(nid));
1422                 kgnilnd_data.kgn_peer_version++;
1423         }
1424
1425         LASSERTF(peer->gnp_net != NULL, "peer 0x%p->%s with NULL net\n",
1426                  peer, libcfs_nid2str(peer->gnp_nid));
1427         *peerp = peer;
1428 }
1429
1430 int
1431 kgnilnd_add_peer(kgn_net_t *net, lnet_nid_t nid, kgn_peer_t **peerp)
1432 {
1433         kgn_peer_t        *peer;
1434         int                rc;
1435         int                node_state;
1436         ENTRY;
1437
1438         if (nid == LNET_NID_ANY)
1439                 return -EINVAL;
1440
1441         node_state = kgnilnd_get_node_state(LNET_NIDADDR(nid));
1442
1443         /* NB - this will not block during normal operations -
1444          * the only writer of this is in the startup/shutdown path. */
1445         rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
1446         if (!rc) {
1447                 rc = -ESHUTDOWN;
1448                 RETURN(rc);
1449         }
1450         rc = kgnilnd_create_peer_safe(&peer, nid, net, node_state);
1451         if (rc != 0) {
1452                 up_read(&kgnilnd_data.kgn_net_rw_sem);
1453                 RETURN(rc);
1454         }
1455
1456         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1457         up_read(&kgnilnd_data.kgn_net_rw_sem);
1458
1459         kgnilnd_add_peer_locked(nid, peer, peerp);
1460
1461         CDEBUG(D_NET, "peer 0x%p->%s connecting %d\n",
1462                peerp, libcfs_nid2str((*peerp)->gnp_nid),
1463                (*peerp)->gnp_connecting);
1464
1465         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1466         RETURN(0);
1467 }
1468
1469 /* needs write_lock on kgn_peer_conn_lock */
1470 void
1471 kgnilnd_cancel_peer_connect_locked(kgn_peer_t *peer, struct list_head *zombies)
1472 {
1473         kgn_tx_t        *tx, *txn;
1474
1475         /* we do care about state of gnp_connecting - we could be between
1476          * reconnect attempts, so try to find the dgram and cancel the TX
1477          * anyways. If we are in the process of posting DONT do anything;
1478          * once it fails or succeeds we can nuke the connect attempt.
1479          * We have no idea where in kgnilnd_post_dgram we are so we cant
1480          * attempt to cancel until the function is done.
1481          */
1482
1483         /* make sure peer isn't in process of connecting or waiting for connect*/
1484         spin_lock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1485         if (!(list_empty(&peer->gnp_connd_list))) {
1486                 list_del_init(&peer->gnp_connd_list);
1487                 /* remove connd ref */
1488                 kgnilnd_peer_decref(peer);
1489         }
1490         spin_unlock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1491
1492         if (peer->gnp_connecting == GNILND_PEER_POSTING || peer->gnp_connecting == GNILND_PEER_NEEDS_DEATH) {
1493                 peer->gnp_connecting = GNILND_PEER_NEEDS_DEATH;
1494                 /* We are in process of posting right now the xchg set it up for us to
1495                  * cancel the connect so we are finished for now */
1496         } else {
1497                 /* no need for exchange we have the peer lock and its ready for us to nuke */
1498                 LASSERTF(peer->gnp_connecting != GNILND_PEER_POSTING,
1499                         "Peer in invalid state 0x%p->%s, connecting %d\n",
1500                         peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
1501                 peer->gnp_connecting = GNILND_PEER_IDLE;
1502                 set_mb(peer->gnp_last_dgram_errno, -ETIMEDOUT);
1503                 kgnilnd_find_and_cancel_dgram(peer->gnp_net->gnn_dev,
1504                                                       peer->gnp_nid);
1505         }
1506
1507         /* The least we can do is nuke the tx's no matter what.... */
1508         list_for_each_entry_safe(tx, txn, &peer->gnp_tx_queue, tx_list) {
1509                 kgnilnd_tx_del_state_locked(tx, peer, NULL,
1510                                            GNILND_TX_ALLOCD);
1511                 list_add_tail(&tx->tx_list, zombies);
1512         }
1513 }
1514
1515 /* needs write_lock on kgn_peer_conn_lock */
1516 void
1517 kgnilnd_del_peer_locked(kgn_peer_t *peer, int error)
1518 {
1519         /* this peer could be passive and only held for purgatory,
1520          * take a ref to ensure it doesn't disappear in this function */
1521         kgnilnd_peer_addref(peer);
1522
1523         CFS_RACE(CFS_FAIL_GNI_FIND_TARGET);
1524
1525         /* if purgatory release cleared it out, don't try again */
1526         if (kgnilnd_peer_active(peer)) {
1527                 /* always do this to allow kgnilnd_start_connect and
1528                  * kgnilnd_finish_connect to catch this before they
1529                  * wrap up their operations */
1530                 if (kgnilnd_can_unlink_peer_locked(peer)) {
1531                         /* already released purgatory, so only active
1532                          * conns hold it */
1533                         kgnilnd_unlink_peer_locked(peer);
1534                 } else {
1535                         kgnilnd_close_peer_conns_locked(peer, error);
1536                         /* peer unlinks itself when last conn is closed */
1537                 }
1538         }
1539
1540         /* we are done, release back to the wild */
1541         kgnilnd_peer_decref(peer);
1542 }
1543
1544 int
1545 kgnilnd_del_conn_or_peer(kgn_net_t *net, lnet_nid_t nid, int command,
1546                           int error)
1547 {
1548         LIST_HEAD               (souls);
1549         LIST_HEAD               (zombies);
1550         kgn_peer_t *peer, *pnxt;
1551         int                     lo;
1552         int                     hi;
1553         int                     i;
1554         int                     rc = -ENOENT;
1555
1556         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1557
1558         if (nid != LNET_NID_ANY)
1559                 lo = hi = kgnilnd_nid2peerlist(nid) - kgnilnd_data.kgn_peers;
1560         else {
1561                 lo = 0;
1562                 hi = *kgnilnd_tunables.kgn_peer_hash_size - 1;
1563                 /* wildcards always succeed */
1564                 rc = 0;
1565         }
1566
1567         for (i = lo; i <= hi; i++) {
1568                 list_for_each_entry_safe(peer, pnxt, &kgnilnd_data.kgn_peers[i],
1569                                          gnp_list) {
1570                         LASSERTF(peer->gnp_net != NULL,
1571                                 "peer %px (%s) with NULL net\n",
1572                                  peer, libcfs_nid2str(peer->gnp_nid));
1573
1574                         if (net != NULL && peer->gnp_net != net)
1575                                 continue;
1576
1577                         if (!(nid == LNET_NID_ANY || LNET_NIDADDR(peer->gnp_nid) == LNET_NIDADDR(nid)))
1578                                 continue;
1579
1580                         /* In both cases, we want to stop any in-flight
1581                          * connect attempts */
1582                         kgnilnd_cancel_peer_connect_locked(peer, &zombies);
1583
1584                         switch (command) {
1585                         case GNILND_DEL_CONN:
1586                                 kgnilnd_close_peer_conns_locked(peer, error);
1587                                 break;
1588                         case GNILND_DEL_PEER:
1589                                 peer->gnp_pending_unlink = 1;
1590                                 kgnilnd_admin_addref(kgnilnd_data.kgn_npending_unlink);
1591                                 kgnilnd_mark_for_detach_purgatory_all_locked(peer);
1592                                 kgnilnd_del_peer_locked(peer, error);
1593                                 break;
1594                         case GNILND_CLEAR_PURGATORY:
1595                                 /* Mark everything ready for detach reaper will cleanup
1596                                  * once we release the kgn_peer_conn_lock
1597                                  */
1598                                 kgnilnd_mark_for_detach_purgatory_all_locked(peer);
1599                                 peer->gnp_last_errno = -EISCONN;
1600                                 /* clear reconnect so he can reconnect soon */
1601                                 peer->gnp_reconnect_time = 0;
1602                                 peer->gnp_reconnect_interval = 0;
1603                                 break;
1604                         default:
1605                                 CERROR("bad command %d\n", command);
1606                                 LBUG();
1607                         }
1608                         /* we matched something */
1609                         rc = 0;
1610                 }
1611         }
1612
1613         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1614
1615         /* nuke peer TX */
1616         kgnilnd_txlist_done(&zombies, error);
1617
1618         /* This function does not return until the commands it initiated have completed,
1619          * since they have to work there way through the other threads. In the case of shutdown
1620          * threads are not woken up until after this call is initiated so we cannot wait, we just
1621          * need to return. The same applies for stack reset we shouldnt wait as the reset thread
1622          * handles closing.
1623          */
1624
1625         CFS_RACE(CFS_FAIL_GNI_RACE_RESET);
1626
1627         if (error == -ENOTRECOVERABLE || error == -ESHUTDOWN) {
1628                 return rc;
1629         }
1630
1631         wait_var_event_warning(&kgnilnd_data,
1632                                !atomic_read(&kgnilnd_data.kgn_npending_conns) &&
1633                                !atomic_read(&kgnilnd_data.kgn_npending_detach) &&
1634                                !atomic_read(&kgnilnd_data.kgn_npending_unlink),
1635                                "Waiting on %d peers %d closes %d detaches\n",
1636                                 atomic_read(&kgnilnd_data.kgn_npending_unlink),
1637                                 atomic_read(&kgnilnd_data.kgn_npending_conns),
1638                                 atomic_read(&kgnilnd_data.kgn_npending_detach));
1639
1640         return rc;
1641 }
1642
1643 kgn_conn_t *
1644 kgnilnd_get_conn_by_idx(int index)
1645 {
1646         kgn_peer_t        *peer;
1647         kgn_conn_t        *conn;
1648         int                i;
1649
1650
1651         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
1652                 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1653                 list_for_each_entry(peer, &kgnilnd_data.kgn_peers[i], gnp_list) {
1654                         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
1655                                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
1656                                         continue;
1657
1658                                 if (index-- > 0)
1659                                         continue;
1660
1661                                 CDEBUG(D_NET, "++conn[%p] -> %s (%d)\n", conn,
1662                                        libcfs_nid2str(conn->gnc_peer->gnp_nid),
1663                                        atomic_read(&conn->gnc_refcount));
1664                                 kgnilnd_conn_addref(conn);
1665                                 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1666                                 return conn;
1667                         }
1668                 }
1669                 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1670         }
1671
1672         return NULL;
1673 }
1674
1675 int
1676 kgnilnd_get_conn_info(kgn_peer_t *peer,
1677                       int *device_id, __u64 *peerstamp,
1678                       int *tx_seq, int *rx_seq,
1679                       int *fmaq_len, int *nfma, int *nrdma)
1680 {
1681         kgn_conn_t        *conn;
1682         int               rc = 0;
1683
1684         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1685
1686         conn = kgnilnd_find_conn_locked(peer);
1687         if (conn == NULL) {
1688                 rc = -ENOENT;
1689                 goto out;
1690         }
1691
1692         *device_id = conn->gnc_device->gnd_host_id;
1693         *peerstamp = conn->gnc_peerstamp;
1694         *tx_seq = atomic_read(&conn->gnc_tx_seq);
1695         *rx_seq = atomic_read(&conn->gnc_rx_seq);
1696         *fmaq_len = kgnilnd_count_list(&conn->gnc_fmaq);
1697         *nfma = atomic_read(&conn->gnc_nlive_fma);
1698         *nrdma = atomic_read(&conn->gnc_nlive_rdma);
1699 out:
1700         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1701         return rc;
1702 }
1703
1704 /* needs write_lock on kgn_peer_conn_lock */
1705 int
1706 kgnilnd_close_peer_conns_locked(kgn_peer_t *peer, int why)
1707 {
1708         kgn_conn_t         *conn;
1709         struct list_head   *ctmp, *cnxt;
1710         int                 count = 0;
1711
1712         list_for_each_safe(ctmp, cnxt, &peer->gnp_conns) {
1713                 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
1714
1715                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
1716                         continue;
1717
1718                 count++;
1719                 /* we mark gnc_needs closing and increment kgn_npending_conns so that
1720                  * kgnilnd_del_conn_or_peer can wait on the other threads closing
1721                  * and cleaning up the connection.
1722                  */
1723                 if (!conn->gnc_needs_closing) {
1724                         conn->gnc_needs_closing = 1;
1725                         kgnilnd_admin_addref(kgnilnd_data.kgn_npending_conns);
1726                 }
1727                 kgnilnd_close_conn_locked(conn, why);
1728         }
1729         return count;
1730 }
1731
1732 int
1733 kgnilnd_report_node_state(lnet_nid_t nid, int down)
1734 {
1735         int         rc;
1736         kgn_peer_t  *peer, *new_peer;
1737         LIST_HEAD(zombies);
1738
1739         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1740         peer = kgnilnd_find_peer_locked(nid);
1741
1742         if (peer == NULL) {
1743                 int       i;
1744                 int       found_net = 0;
1745                 kgn_net_t *net;
1746
1747                 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1748
1749                 /* Don't add a peer for node up events */
1750                 if (down == GNILND_PEER_UP)
1751                         return 0;
1752
1753                 /* find any valid net - we don't care which one... */
1754                 down_read(&kgnilnd_data.kgn_net_rw_sem);
1755                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
1756                         list_for_each_entry(net, &kgnilnd_data.kgn_nets[i],
1757                                             gnn_list) {
1758                                 found_net = 1;
1759                                 break;
1760                         }
1761
1762                         if (found_net) {
1763                                 break;
1764                         }
1765                 }
1766                 up_read(&kgnilnd_data.kgn_net_rw_sem);
1767
1768                 if (!found_net) {
1769                         CNETERR("Could not find a net for nid %lld\n", nid);
1770                         return 1;
1771                 }
1772
1773                 /* The nid passed in does not yet contain the net portion.
1774                  * Let's build it up now
1775                  */
1776                 nid = LNET_MKNID(LNET_NID_NET(&net->gnn_ni->ni_nid), nid);
1777                 rc = kgnilnd_add_peer(net, nid, &new_peer);
1778
1779                 if (rc) {
1780                         CNETERR("Could not add peer for nid %lld, rc %d\n",
1781                                 nid, rc);
1782                         return 1;
1783                 }
1784
1785                 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1786                 peer = kgnilnd_find_peer_locked(nid);
1787
1788                 if (peer == NULL) {
1789                         CNETERR("Could not find peer for nid %lld\n", nid);
1790                         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1791                         return 1;
1792                 }
1793         }
1794
1795         peer->gnp_state = down;
1796
1797         if (down == GNILND_PEER_DOWN) {
1798                 kgn_conn_t *conn;
1799
1800                 peer->gnp_down_event_time = jiffies;
1801                 kgnilnd_cancel_peer_connect_locked(peer, &zombies);
1802                 conn = kgnilnd_find_conn_locked(peer);
1803
1804                 if (conn != NULL) {
1805                         kgnilnd_close_conn_locked(conn, -ENETRESET);
1806                 }
1807         } else {
1808                 peer->gnp_up_event_time = jiffies;
1809         }
1810
1811         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1812
1813         if (down == GNILND_PEER_DOWN) {
1814                 /* using ENETRESET so we don't get messages from
1815                  * kgnilnd_tx_done
1816                  */
1817                 kgnilnd_txlist_done(&zombies, -ENETRESET);
1818                 kgnilnd_peer_notify(peer, -ECONNRESET, 0);
1819                 LCONSOLE_INFO("Received down event for nid %d\n",
1820                               LNET_NIDADDR(nid));
1821         }
1822
1823         return 0;
1824 }
1825
1826 int
1827 kgnilnd_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg)
1828 {
1829         struct libcfs_ioctl_data *data = arg;
1830         kgn_net_t                *net = ni->ni_data;
1831         int                       rc = -EINVAL;
1832
1833         LASSERT(ni == net->gnn_ni);
1834
1835         switch (cmd) {
1836         case IOC_LIBCFS_GET_PEER: {
1837                 lnet_nid_t   nid = 0;
1838                 kgn_peer_t  *peer = NULL;
1839                 __u32 nic_addr = 0;
1840                 __u64 peerstamp = 0;
1841                 int peer_refcount = 0, peer_connecting = 0;
1842                 int device_id = 0;
1843                 int tx_seq = 0, rx_seq = 0;
1844                 int fmaq_len = 0, nfma = 0, nrdma = 0;
1845
1846                 rc = kgnilnd_get_peer_info(data->ioc_count, &peer,
1847                                            &nid, &nic_addr, &peer_refcount,
1848                                            &peer_connecting);
1849                 if (rc)
1850                         break;
1851
1852                 /* Barf */
1853                 /* LNET_MKNID is used to mask from lnet the multiplexing/demultiplexing of connections and peers
1854                  * LNET assumes a conn and peer per net, the LNET_MKNID/LNET_NIDADDR allows us to let Lnet see what it
1855                  * wants to see instead of the underlying network that is being used to send the data
1856                  */
1857                 data->ioc_nid    = LNET_MKNID(LNET_NID_NET(&ni->ni_nid),
1858                                               LNET_NIDADDR(nid));
1859                 data->ioc_flags  = peer_connecting;
1860                 data->ioc_count  = peer_refcount;
1861
1862                 rc = kgnilnd_get_conn_info(peer, &device_id, &peerstamp,
1863                                            &tx_seq, &rx_seq, &fmaq_len,
1864                                            &nfma, &nrdma);
1865
1866                 /* This is allowable - a persistent peer could not
1867                  * have a connection */
1868                 if (rc) {
1869                         /* flag to indicate we are not connected -
1870                          * need to print as such */
1871                         data->ioc_flags |= (1<<16);
1872                         rc = 0;
1873                 } else {
1874                         /* still barf */
1875                         data->ioc_net = device_id;
1876                         data->ioc_u64[0] = peerstamp;
1877                         data->ioc_u32[0] = fmaq_len;
1878                         data->ioc_u32[1] = nfma;
1879                         data->ioc_u32[2] = tx_seq;
1880                         data->ioc_u32[3] = rx_seq;
1881                         data->ioc_u32[4] = nrdma;
1882                 }
1883                 break;
1884         }
1885         case IOC_LIBCFS_ADD_PEER: {
1886                 /* just dummy value to allow using common interface */
1887                 kgn_peer_t      *peer;
1888                 rc = kgnilnd_add_peer(net, data->ioc_nid, &peer);
1889                 break;
1890         }
1891         case IOC_LIBCFS_DEL_PEER: {
1892                 /* NULL is passed in so it affects all peers in existence without regard to network
1893                  * as the peer may not exist on the network LNET believes it to be on.
1894                  */
1895                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1896                                               GNILND_DEL_PEER, -EUCLEAN);
1897                 break;
1898         }
1899         case IOC_LIBCFS_GET_CONN: {
1900                 kgn_conn_t *conn = kgnilnd_get_conn_by_idx(data->ioc_count);
1901
1902                 if (conn == NULL)
1903                         rc = -ENOENT;
1904                 else {
1905                         rc = 0;
1906                         /* LNET_MKNID is used to build the correct address based on what LNET wants to see instead of
1907                          * the generic connection that is used to send the data
1908                          */
1909                         data->ioc_nid    = LNET_MKNID(LNET_NID_NET(&ni->ni_nid),
1910                                                       LNET_NIDADDR(conn->gnc_peer->gnp_nid));
1911                         data->ioc_u32[0] = conn->gnc_device->gnd_id;
1912                         kgnilnd_conn_decref(conn);
1913                 }
1914                 break;
1915         }
1916         case IOC_LIBCFS_CLOSE_CONNECTION: {
1917                 /* use error = -ENETRESET to indicate it was lctl disconnect */
1918                 /* NULL is passed in so it affects all the nets as the connection is virtual
1919                  * and may not exist on the network LNET believes it to be on.
1920                  */
1921                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1922                                               GNILND_DEL_CONN, -ENETRESET);
1923                 break;
1924         }
1925         case IOC_LIBCFS_PUSH_CONNECTION: {
1926                 /* we use this to flush purgatory */
1927                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1928                                               GNILND_CLEAR_PURGATORY, -EUCLEAN);
1929                 break;
1930         }
1931         case IOC_LIBCFS_REGISTER_MYNID: {
1932                 /* Ignore if this is a noop */
1933                 if (data->ioc_nid == lnet_nid_to_nid4(&ni->ni_nid)) {
1934                         rc = 0;
1935                 } else {
1936                         CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
1937                                libcfs_nid2str(data->ioc_nid),
1938                                libcfs_nidstr(&ni->ni_nid));
1939                         rc = -EINVAL;
1940                 }
1941                 break;
1942         }
1943         }
1944
1945         return rc;
1946 }
1947
1948 int
1949 kgnilnd_dev_init(kgn_device_t *dev)
1950 {
1951         gni_return_t      rrc;
1952         int               rc = 0;
1953         unsigned int      cq_size;
1954         ENTRY;
1955
1956         /* size of these CQs should be able to accommodate the outgoing
1957          * RDMA and SMSG transactions.  Since we really don't know what we
1958          * really need here, we'll take credits * 2 * 3 to allow a bunch.
1959          * We need to dig into this more with the performance work. */
1960         cq_size = *kgnilnd_tunables.kgn_credits * 2 * 3;
1961
1962         rrc = kgnilnd_cdm_create(dev->gnd_id, *kgnilnd_tunables.kgn_ptag,
1963                                  *kgnilnd_tunables.kgn_pkey, 0,
1964                                  &dev->gnd_domain);
1965         if (rrc != GNI_RC_SUCCESS) {
1966                 CERROR("Can't create CDM %d (%d)\n", dev->gnd_id, rrc);
1967                 GOTO(failed, rc = -ENODEV);
1968         }
1969
1970         rrc = kgnilnd_cdm_attach(dev->gnd_domain, dev->gnd_id,
1971                                  &dev->gnd_host_id, &dev->gnd_handle);
1972         if (rrc != GNI_RC_SUCCESS) {
1973                 CERROR("Can't attach CDM to device %d (%d)\n",
1974                         dev->gnd_id, rrc);
1975                 GOTO(failed, rc = -ENODEV);
1976         }
1977
1978         /* a bit gross, but not much we can do - Aries Sim doesn't have
1979          * hardcoded NIC/NID that we can use */
1980         rc = kgnilnd_setup_nic_translation(dev->gnd_host_id);
1981         if (rc != 0)
1982                 GOTO(failed, rc = -ENODEV);
1983
1984         /* only dev 0 gets the errors - no need to reset the stack twice
1985          * - this works because we have a single PTAG, if we had more
1986          * then we'd need to have multiple handlers */
1987         if (dev->gnd_id == 0) {
1988                 rrc = kgnilnd_subscribe_errors(dev->gnd_handle,
1989                                                 GNI_ERRMASK_CRITICAL |
1990                                                 GNI_ERRMASK_UNKNOWN_TRANSACTION,
1991                                               0, NULL, kgnilnd_critical_error,
1992                                               &dev->gnd_err_handle);
1993                 if (rrc != GNI_RC_SUCCESS) {
1994                         CERROR("Can't subscribe for errors on device %d: rc %d\n",
1995                                 dev->gnd_id, rrc);
1996                         GOTO(failed, rc = -ENODEV);
1997                 }
1998
1999                 rc = kgnilnd_set_quiesce_callback(dev->gnd_handle,
2000                                                   kgnilnd_quiesce_end_callback);
2001                 if (rc != GNI_RC_SUCCESS) {
2002                         CERROR("Can't subscribe for quiesce callback on device %d: rc %d\n",
2003                                 dev->gnd_id, rrc);
2004                         GOTO(failed, rc = -ENODEV);
2005                 }
2006         }
2007
2008         rc = kgnilnd_nicaddr_to_nid(dev->gnd_host_id, &dev->gnd_nid);
2009         if (rc < 0) {
2010                 /* log messages during startup */
2011                 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
2012                         CERROR("couldn't translate host_id 0x%x to nid. rc %d\n",
2013                                 dev->gnd_host_id, rc);
2014                 }
2015                 GOTO(failed, rc = -ESRCH);
2016         }
2017         CDEBUG(D_NET, "NIC %x -> NID %d\n", dev->gnd_host_id, dev->gnd_nid);
2018
2019         rrc = kgnilnd_cq_create(dev->gnd_handle, *kgnilnd_tunables.kgn_credits,
2020                                 0, kgnilnd_device_callback,
2021                                 dev->gnd_id, &dev->gnd_snd_rdma_cqh);
2022         if (rrc != GNI_RC_SUCCESS) {
2023                 CERROR("Can't create rdma send cq size %u for device "
2024                        "%d (%d)\n", cq_size, dev->gnd_id, rrc);
2025                 GOTO(failed, rc = -EINVAL);
2026         }
2027
2028         rrc = kgnilnd_cq_create(dev->gnd_handle, cq_size,
2029                         0, kgnilnd_device_callback, dev->gnd_id,
2030                         &dev->gnd_snd_fma_cqh);
2031         if (rrc != GNI_RC_SUCCESS) {
2032                 CERROR("Can't create fma send cq size %u for device %d (%d)\n",
2033                        cq_size, dev->gnd_id, rrc);
2034                 GOTO(failed, rc = -EINVAL);
2035         }
2036
2037         /* This one we size differently - overflows are possible and it needs to be
2038          * sized based on machine size */
2039         rrc = kgnilnd_cq_create(dev->gnd_handle,
2040                         *kgnilnd_tunables.kgn_fma_cq_size,
2041                         0, kgnilnd_device_callback, dev->gnd_id,
2042                         &dev->gnd_rcv_fma_cqh);
2043         if (rrc != GNI_RC_SUCCESS) {
2044                 CERROR("Can't create fma cq size %d for device %d (%d)\n",
2045                        *kgnilnd_tunables.kgn_fma_cq_size, dev->gnd_id, rrc);
2046                 GOTO(failed, rc = -EINVAL);
2047         }
2048
2049         rrc = kgnilnd_register_smdd_buf(dev);
2050         if (rrc != GNI_RC_SUCCESS) {
2051                 GOTO(failed, rc = -EINVAL);
2052         }
2053
2054         RETURN(0);
2055
2056 failed:
2057         kgnilnd_dev_fini(dev);
2058         RETURN(rc);
2059 }
2060
2061 void
2062 kgnilnd_dev_fini(kgn_device_t *dev)
2063 {
2064         gni_return_t rrc;
2065         ENTRY;
2066
2067         /* At quiesce or rest time, need to loop through and clear gnd_ready_conns ?*/
2068         LASSERTF(list_empty(&dev->gnd_ready_conns) &&
2069                  list_empty(&dev->gnd_map_tx) &&
2070                  list_empty(&dev->gnd_rdmaq) &&
2071                  list_empty(&dev->gnd_delay_conns),
2072                  "dev 0x%p ready_conns %d@0x%p delay_conns %d@0x%p" 
2073                  "map_tx %d@0x%p rdmaq %d@0x%p\n",
2074                  dev, kgnilnd_count_list(&dev->gnd_ready_conns), &dev->gnd_ready_conns,
2075                  kgnilnd_count_list(&dev->gnd_delay_conns), &dev->gnd_delay_conns,
2076                  kgnilnd_count_list(&dev->gnd_map_tx), &dev->gnd_map_tx,
2077                  kgnilnd_count_list(&dev->gnd_rdmaq), &dev->gnd_rdmaq);
2078
2079         /* These should follow from tearing down all connections */
2080         LASSERTF(dev->gnd_map_nphys == 0 && dev->gnd_map_physnop == 0,
2081                 "%d physical mappings of %d pages still mapped\n",
2082                  dev->gnd_map_nphys, dev->gnd_map_physnop);
2083
2084         LASSERTF(atomic_read(&dev->gnd_n_mdd) == 0 &&
2085                  atomic_read(&dev->gnd_n_mdd_held) == 0 &&
2086                  atomic64_read(&dev->gnd_nbytes_map) == 0,
2087                  "%d SMSG mappings of %lld bytes still mapped or held %d\n",
2088                  atomic_read(&dev->gnd_n_mdd),
2089                  (u64)atomic64_read(&dev->gnd_nbytes_map),
2090                  atomic_read(&dev->gnd_n_mdd_held));
2091
2092         LASSERT(list_empty(&dev->gnd_map_list));
2093
2094         /* What other assertions needed to ensure all connections torn down ? */
2095
2096         /* check all counters == 0 (EP, MDD, etc) */
2097
2098         /* if we are resetting due to quiese (stack reset), don't check
2099          * thread states */
2100         LASSERTF(kgnilnd_data.kgn_quiesce_trigger ||
2101                 atomic_read(&kgnilnd_data.kgn_nthreads) == 0,
2102                 "tried to shutdown with threads active\n");
2103
2104         if (dev->gnd_smdd_hold_buf) {
2105                 rrc = kgnilnd_deregister_smdd_buf(dev);
2106                 LASSERTF(rrc == GNI_RC_SUCCESS,
2107                         "bad rc from deregistion of sMDD buffer: %d\n", rrc);
2108                 dev->gnd_smdd_hold_buf = NULL;
2109         }
2110
2111         if (dev->gnd_rcv_fma_cqh) {
2112                 rrc = kgnilnd_cq_destroy(dev->gnd_rcv_fma_cqh);
2113                 LASSERTF(rrc == GNI_RC_SUCCESS,
2114                         "bad rc from gni_cq_destroy on rcv_fma_cqh: %d\n", rrc);
2115                 dev->gnd_rcv_fma_cqh = NULL;
2116         }
2117
2118         if (dev->gnd_snd_rdma_cqh) {
2119                 rrc = kgnilnd_cq_destroy(dev->gnd_snd_rdma_cqh);
2120                 LASSERTF(rrc == GNI_RC_SUCCESS,
2121                         "bad rc from gni_cq_destroy on send_rdma_cqh: %d\n", rrc);
2122                 dev->gnd_snd_rdma_cqh = NULL;
2123         }
2124
2125         if (dev->gnd_snd_fma_cqh) {
2126                 rrc = kgnilnd_cq_destroy(dev->gnd_snd_fma_cqh);
2127                 LASSERTF(rrc == GNI_RC_SUCCESS,
2128                         "bad rc from gni_cq_destroy on snd_fma_cqh: %d\n", rrc);
2129                 dev->gnd_snd_fma_cqh = NULL;
2130         }
2131
2132         if (dev->gnd_err_handle) {
2133                 rrc = kgnilnd_release_errors(dev->gnd_err_handle);
2134                 LASSERTF(rrc == GNI_RC_SUCCESS,
2135                         "bad rc from gni_release_errors: %d\n", rrc);
2136                 dev->gnd_err_handle = NULL;
2137         }
2138
2139         if (dev->gnd_domain) {
2140                 rrc = kgnilnd_cdm_destroy(dev->gnd_domain);
2141                 LASSERTF(rrc == GNI_RC_SUCCESS,
2142                         "bad rc from gni_cdm_destroy: %d\n", rrc);
2143                 dev->gnd_domain = NULL;
2144         }
2145
2146         EXIT;
2147 }
2148
2149 int kgnilnd_base_startup(void)
2150 {
2151         long long            pkmem = libcfs_kmem_read();
2152         int                  rc;
2153         int                  i;
2154         kgn_device_t        *dev;
2155         struct task_struct  *thrd;
2156
2157 #if defined(CONFIG_CRAY_XT) && !defined(CONFIG_CRAY_COMPUTE)
2158         /* limit how much memory can be allocated for fma blocks in
2159          * instances where many nodes need to reconnects at the same time */
2160         struct sysinfo si;
2161         si_meminfo(&si);
2162         kgnilnd_data.free_pages_limit = si.totalram/4;
2163 #endif
2164
2165         ENTRY;
2166
2167         LASSERTF(kgnilnd_data.kgn_init == GNILND_INIT_NOTHING,
2168                 "init %d\n", kgnilnd_data.kgn_init);
2169
2170         /* zero pointers, flags etc */
2171         memset(&kgnilnd_data, 0, sizeof(kgnilnd_data));
2172         kgnilnd_check_kgni_version();
2173
2174         /* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and
2175          * a unique (for all time) connstamp so we can uniquely identify
2176          * the sender.  The connstamp is an incrementing counter
2177          * initialised with seconds + microseconds at startup time.  So we
2178          * rely on NOT creating connections more frequently on average than
2179          * 1MHz to ensure we don't use old connstamps when we reboot. */
2180         kgnilnd_data.kgn_connstamp =
2181                  kgnilnd_data.kgn_peerstamp =
2182                         ktime_get_seconds();
2183
2184         init_rwsem(&kgnilnd_data.kgn_net_rw_sem);
2185
2186         for (i = 0; i < GNILND_MAXDEVS; i++) {
2187                 kgn_device_t  *dev = &kgnilnd_data.kgn_devices[i];
2188
2189                 dev->gnd_id = i;
2190                 INIT_LIST_HEAD(&dev->gnd_ready_conns);
2191                 INIT_LIST_HEAD(&dev->gnd_delay_conns);
2192                 INIT_LIST_HEAD(&dev->gnd_map_tx);
2193                 INIT_LIST_HEAD(&dev->gnd_fma_buffs);
2194                 mutex_init(&dev->gnd_cq_mutex);
2195                 mutex_init(&dev->gnd_fmablk_mutex);
2196                 spin_lock_init(&dev->gnd_fmablk_lock);
2197                 init_waitqueue_head(&dev->gnd_waitq);
2198                 init_waitqueue_head(&dev->gnd_dgram_waitq);
2199                 init_waitqueue_head(&dev->gnd_dgping_waitq);
2200                 spin_lock_init(&dev->gnd_lock);
2201                 INIT_LIST_HEAD(&dev->gnd_map_list);
2202                 spin_lock_init(&dev->gnd_map_lock);
2203                 atomic_set(&dev->gnd_nfmablk, 0);
2204                 atomic_set(&dev->gnd_fmablk_vers, 1);
2205                 atomic_set(&dev->gnd_neps, 0);
2206                 atomic_set(&dev->gnd_canceled_dgrams, 0);
2207                 INIT_LIST_HEAD(&dev->gnd_connd_peers);
2208                 spin_lock_init(&dev->gnd_connd_lock);
2209                 spin_lock_init(&dev->gnd_dgram_lock);
2210                 spin_lock_init(&dev->gnd_rdmaq_lock);
2211                 INIT_LIST_HEAD(&dev->gnd_rdmaq);
2212                 init_rwsem(&dev->gnd_conn_sem);
2213
2214                 /* alloc & setup nid based dgram table */
2215                 CFS_ALLOC_PTR_ARRAY(dev->gnd_dgrams,
2216                                     *kgnilnd_tunables.kgn_peer_hash_size);
2217
2218                 if (dev->gnd_dgrams == NULL)
2219                         GOTO(failed, rc = -ENOMEM);
2220
2221                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2222                         INIT_LIST_HEAD(&dev->gnd_dgrams[i]);
2223                 }
2224                 atomic_set(&dev->gnd_ndgrams, 0);
2225                 atomic_set(&dev->gnd_nwcdgrams, 0);
2226                 /* setup timer for RDMAQ processing */
2227                 cfs_timer_setup(&dev->gnd_rdmaq_timer,
2228                                 kgnilnd_schedule_device_timer,
2229                                 (unsigned long)dev, 0);
2230
2231                 /* setup timer for mapping processing */
2232                 cfs_timer_setup(&dev->gnd_map_timer,
2233                                 kgnilnd_schedule_device_timer,
2234                                 (unsigned long)dev, 0);
2235
2236         }
2237
2238         /* CQID 0 isn't allowed, set to MAX_MSG_ID - 1 to check for conflicts early */
2239         kgnilnd_data.kgn_next_cqid = GNILND_MAX_MSG_ID - 1;
2240         kgnilnd_data.kgn_new_min_timeout = *kgnilnd_tunables.kgn_timeout;
2241         init_waitqueue_head(&kgnilnd_data.kgn_reaper_waitq);
2242         init_waitqueue_head(&kgnilnd_data.kgn_ruhroh_waitq);
2243         spin_lock_init(&kgnilnd_data.kgn_reaper_lock);
2244
2245         mutex_init(&kgnilnd_data.kgn_quiesce_mutex);
2246         atomic_set(&kgnilnd_data.kgn_nquiesce, 0);
2247         atomic_set(&kgnilnd_data.kgn_npending_conns, 0);
2248         atomic_set(&kgnilnd_data.kgn_npending_unlink, 0);
2249         atomic_set(&kgnilnd_data.kgn_npending_detach, 0);
2250         atomic_set(&kgnilnd_data.kgn_rev_offset, 0);
2251         atomic_set(&kgnilnd_data.kgn_rev_length, 0);
2252         atomic_set(&kgnilnd_data.kgn_rev_copy_buff, 0);
2253
2254         /* OK to call kgnilnd_api_shutdown() to cleanup now */
2255         kgnilnd_data.kgn_init = GNILND_INIT_DATA;
2256         if (!try_module_get(THIS_MODULE))
2257                 GOTO(failed, rc = -ENOENT);
2258
2259         rwlock_init(&kgnilnd_data.kgn_peer_conn_lock);
2260
2261         CFS_ALLOC_PTR_ARRAY(kgnilnd_data.kgn_peers,
2262                             *kgnilnd_tunables.kgn_peer_hash_size);
2263
2264         if (kgnilnd_data.kgn_peers == NULL)
2265                 GOTO(failed, rc = -ENOMEM);
2266
2267         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2268                 INIT_LIST_HEAD(&kgnilnd_data.kgn_peers[i]);
2269         }
2270
2271         CFS_ALLOC_PTR_ARRAY(kgnilnd_data.kgn_conns,
2272                             *kgnilnd_tunables.kgn_peer_hash_size);
2273
2274         if (kgnilnd_data.kgn_conns == NULL)
2275                 GOTO(failed, rc = -ENOMEM);
2276
2277         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2278                 INIT_LIST_HEAD(&kgnilnd_data.kgn_conns[i]);
2279         }
2280
2281         CFS_ALLOC_PTR_ARRAY(kgnilnd_data.kgn_nets,
2282                             *kgnilnd_tunables.kgn_net_hash_size);
2283
2284         if (kgnilnd_data.kgn_nets == NULL)
2285                 GOTO(failed, rc = -ENOMEM);
2286
2287         for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
2288                 INIT_LIST_HEAD(&kgnilnd_data.kgn_nets[i]);
2289         }
2290
2291         kgnilnd_data.kgn_mbox_cache =
2292                 kmem_cache_create("kgn_mbox_block", GNILND_MBOX_SIZE, 0,
2293                                   SLAB_HWCACHE_ALIGN, NULL);
2294         if (kgnilnd_data.kgn_mbox_cache == NULL) {
2295                 CERROR("Can't create slab for physical mbox blocks\n");
2296                 GOTO(failed, rc = -ENOMEM);
2297         }
2298
2299         kgnilnd_data.kgn_rx_cache =
2300                 kmem_cache_create("kgn_rx_t", sizeof(kgn_rx_t), 0, 0, NULL);
2301         if (kgnilnd_data.kgn_rx_cache == NULL) {
2302                 CERROR("Can't create slab for kgn_rx_t descriptors\n");
2303                 GOTO(failed, rc = -ENOMEM);
2304         }
2305
2306         kgnilnd_data.kgn_tx_cache =
2307                 kmem_cache_create("kgn_tx_t", sizeof(kgn_tx_t), 0, 0, NULL);
2308         if (kgnilnd_data.kgn_tx_cache == NULL) {
2309                 CERROR("Can't create slab for kgn_tx_t\n");
2310                 GOTO(failed, rc = -ENOMEM);
2311         }
2312
2313         kgnilnd_data.kgn_tx_phys_cache =
2314                 kmem_cache_create("kgn_tx_phys",
2315                                    GNILND_MAX_IOV * sizeof(gni_mem_segment_t),
2316                                    0, 0, NULL);
2317         if (kgnilnd_data.kgn_tx_phys_cache == NULL) {
2318                 CERROR("Can't create slab for kgn_tx_phys\n");
2319                 GOTO(failed, rc = -ENOMEM);
2320         }
2321
2322         kgnilnd_data.kgn_dgram_cache =
2323                 kmem_cache_create("kgn_dgram_t", sizeof(kgn_dgram_t), 0, 0, NULL);
2324         if (kgnilnd_data.kgn_dgram_cache == NULL) {
2325                 CERROR("Can't create slab for outgoing datagrams\n");
2326                 GOTO(failed, rc = -ENOMEM);
2327         }
2328
2329         /* allocate a MAX_IOV array of page pointers for each cpu */
2330         kgnilnd_data.kgn_cksum_map_pages = kmalloc(num_possible_cpus() * sizeof (struct page *),
2331                                                    GFP_KERNEL);
2332         if (kgnilnd_data.kgn_cksum_map_pages == NULL) {
2333                 CERROR("Can't allocate vmap cksum pages\n");
2334                 GOTO(failed, rc = -ENOMEM);
2335         }
2336         kgnilnd_data.kgn_cksum_npages = num_possible_cpus();
2337         memset(kgnilnd_data.kgn_cksum_map_pages, 0,
2338                 kgnilnd_data.kgn_cksum_npages * sizeof (struct page *));
2339
2340         for (i = 0; i < kgnilnd_data.kgn_cksum_npages; i++) {
2341                 kgnilnd_data.kgn_cksum_map_pages[i] = kmalloc(GNILND_MAX_IOV * sizeof (struct page *),
2342                                                               GFP_KERNEL);
2343                 if (kgnilnd_data.kgn_cksum_map_pages[i] == NULL) {
2344                         CERROR("Can't allocate vmap cksum pages for cpu %d\n", i);
2345                         GOTO(failed, rc = -ENOMEM);
2346                 }
2347         }
2348
2349         LASSERT(kgnilnd_data.kgn_ndevs == 0);
2350
2351         /* Use all available GNI devices */
2352         for (i = 0; i < GNILND_MAXDEVS; i++) {
2353                 dev = &kgnilnd_data.kgn_devices[kgnilnd_data.kgn_ndevs];
2354
2355                 rc = kgnilnd_dev_init(dev);
2356                 if (rc == 0) {
2357                         /* Increment here so base_shutdown cleans it up */
2358                         kgnilnd_data.kgn_ndevs++;
2359
2360                         rc = kgnilnd_allocate_phys_fmablk(dev);
2361                         if (rc)
2362                                 GOTO(failed, rc);
2363                 }
2364         }
2365
2366         if (kgnilnd_data.kgn_ndevs == 0) {
2367                 CERROR("Can't initialise any GNI devices\n");
2368                 GOTO(failed, rc = -ENODEV);
2369         }
2370
2371         rc = kgnilnd_thread_start(kgnilnd_reaper, NULL, "kgnilnd_rpr", 0);
2372         if (rc != 0) {
2373                 CERROR("Can't spawn gnilnd reaper: %d\n", rc);
2374                 GOTO(failed, rc);
2375         }
2376
2377         rc = kgnilnd_start_rca_thread();
2378         if (rc != 0) {
2379                 CERROR("Can't spawn gnilnd rca: %d\n", rc);
2380                 GOTO(failed, rc);
2381         }
2382
2383         /*
2384          * Start ruhroh thread.  We can't use kgnilnd_thread_start() because
2385          * we don't want this thread included in kgnilnd_data.kgn_nthreads
2386          * count.  This thread controls quiesce, so it mustn't
2387          * quiesce itself.
2388          */
2389         thrd = kthread_run(kgnilnd_ruhroh_thread, NULL, "%s_%02d", "kgnilnd_rr", 0);
2390         if (IS_ERR(thrd)) {
2391                 rc = PTR_ERR(thrd);
2392                 CERROR("Can't spawn gnilnd ruhroh thread: %d\n", rc);
2393                 GOTO(failed, rc);
2394         }
2395
2396         /* threads will load balance across devs as they are available */
2397         if (*kgnilnd_tunables.kgn_thread_affinity) {
2398                 rc = kgnilnd_start_sd_threads();
2399                 if (rc != 0)
2400                         GOTO(failed, rc);
2401         } else {
2402                 for (i = 0; i < *kgnilnd_tunables.kgn_sched_threads; i++) {
2403                         rc = kgnilnd_thread_start(kgnilnd_scheduler,
2404                                                   (void *)((long)i),
2405                                                   "kgnilnd_sd", i);
2406                         if (rc != 0) {
2407                                 CERROR("Can't spawn gnilnd scheduler[%d]: %d\n",
2408                                        i, rc);
2409                                 GOTO(failed, rc);
2410                         }
2411                 }
2412         }
2413
2414         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2415                 dev = &kgnilnd_data.kgn_devices[i];
2416                 rc = kgnilnd_thread_start(kgnilnd_dgram_mover, dev,
2417                                           "kgnilnd_dg", dev->gnd_id);
2418                 if (rc != 0) {
2419                         CERROR("Can't spawn gnilnd dgram_mover[%d]: %d\n",
2420                                dev->gnd_id, rc);
2421                         GOTO(failed, rc);
2422                 }
2423
2424                 rc = kgnilnd_thread_start(kgnilnd_dgram_waitq, dev,
2425                                           "kgnilnd_dgn", dev->gnd_id);
2426                 if (rc != 0) {
2427                         CERROR("Can't spawn gnilnd dgram_waitq[%d]: %d\n",
2428                                 dev->gnd_id, rc);
2429                         GOTO(failed, rc);
2430                 }
2431
2432                 rc = kgnilnd_setup_wildcard_dgram(dev);
2433
2434                 if (rc != 0) {
2435                         CERROR("Can't create wildcard dgrams[%d]: %d\n",
2436                                 dev->gnd_id, rc);
2437                         GOTO(failed, rc);
2438                 }
2439         }
2440
2441         /* flag everything initialised */
2442         kgnilnd_data.kgn_init = GNILND_INIT_ALL;
2443         /*****************************************************/
2444
2445         CDEBUG(D_MALLOC, "initial kmem %lld\n", pkmem);
2446         RETURN(0);
2447
2448 failed:
2449         kgnilnd_base_shutdown();
2450         kgnilnd_data.kgn_init = GNILND_INIT_NOTHING;
2451         RETURN(rc);
2452 }
2453
2454 void
2455 kgnilnd_base_shutdown(void)
2456 {
2457         int                     i, j;
2458         ENTRY;
2459
2460         while (CFS_FAIL_TIMEOUT(CFS_FAIL_GNI_PAUSE_SHUTDOWN, 1)) {};
2461
2462         kgnilnd_data.kgn_wc_kill = 1;
2463
2464         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2465                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2466                 kgnilnd_cancel_wc_dgrams(dev);
2467                 kgnilnd_cancel_dgrams(dev);
2468                 kgnilnd_del_conn_or_peer(NULL, LNET_NID_ANY, GNILND_DEL_PEER, -ESHUTDOWN);
2469                 kgnilnd_wait_for_canceled_dgrams(dev);
2470         }
2471
2472         /* We need to verify there are no conns left before we let the threads
2473          * shut down otherwise we could clean up the peers but still have
2474          * some outstanding conns due to orphaned datagram conns that are
2475          * being cleaned up.
2476          */
2477         i = 2;
2478         while (atomic_read(&kgnilnd_data.kgn_nconns) != 0) {
2479                 i++;
2480
2481                 for(j = 0; j < kgnilnd_data.kgn_ndevs; ++j) {
2482                         kgn_device_t *dev = &kgnilnd_data.kgn_devices[j];
2483                         kgnilnd_schedule_device(dev);
2484                 }
2485
2486                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2487                         "Waiting for conns to be cleaned up %d\n",atomic_read(&kgnilnd_data.kgn_nconns));
2488                 schedule_timeout_uninterruptible(cfs_time_seconds(1));
2489         }
2490         /* Peer state all cleaned up BEFORE setting shutdown, so threads don't
2491          * have to worry about shutdown races.  NB connections may be created
2492          * while there are still active connds, but these will be temporary
2493          * since peer creation always fails after the listener has started to
2494          * shut down.
2495          * all peers should have been cleared out on the nets */
2496         LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
2497                 "peers left %d\n", atomic_read(&kgnilnd_data.kgn_npeers));
2498
2499         /* Wait for the ruhroh thread to shut down. */
2500         kgnilnd_data.kgn_ruhroh_shutdown = 1;
2501         wake_up(&kgnilnd_data.kgn_ruhroh_waitq);
2502         i = 2;
2503         while (kgnilnd_data.kgn_ruhroh_running != 0) {
2504                 i++;
2505                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2506                        "Waiting for ruhroh thread to terminate\n");
2507                 schedule_timeout_uninterruptible(cfs_time_seconds(1));
2508         }
2509
2510        /* Flag threads to terminate */
2511         kgnilnd_data.kgn_shutdown = 1;
2512
2513         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2514                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2515
2516                 /* should clear all the MDDs */
2517                 kgnilnd_unmap_fma_blocks(dev);
2518
2519                 kgnilnd_schedule_device(dev);
2520                 wake_up(&dev->gnd_dgram_waitq);
2521                 wake_up(&dev->gnd_dgping_waitq);
2522                 LASSERT(list_empty(&dev->gnd_connd_peers));
2523         }
2524
2525         spin_lock(&kgnilnd_data.kgn_reaper_lock);
2526         wake_up(&kgnilnd_data.kgn_reaper_waitq);
2527         spin_unlock(&kgnilnd_data.kgn_reaper_lock);
2528
2529         if (atomic_read(&kgnilnd_data.kgn_nthreads))
2530                 kgnilnd_wakeup_rca_thread();
2531
2532         /* Wait for threads to exit */
2533         i = 2;
2534         while (atomic_read(&kgnilnd_data.kgn_nthreads) != 0) {
2535                 i++;
2536                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2537                        "Waiting for %d threads to terminate\n",
2538                        atomic_read(&kgnilnd_data.kgn_nthreads));
2539                 schedule_timeout_uninterruptible(cfs_time_seconds(1));
2540         }
2541
2542         LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
2543                 "peers left %d\n", atomic_read(&kgnilnd_data.kgn_npeers));
2544
2545         if (kgnilnd_data.kgn_peers != NULL) {
2546                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2547                         LASSERT(list_empty(&kgnilnd_data.kgn_peers[i]));
2548
2549                 CFS_FREE_PTR_ARRAY(kgnilnd_data.kgn_peers,
2550                                    *kgnilnd_tunables.kgn_peer_hash_size);
2551         }
2552
2553         down_write(&kgnilnd_data.kgn_net_rw_sem);
2554         if (kgnilnd_data.kgn_nets != NULL) {
2555                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++)
2556                         LASSERT(list_empty(&kgnilnd_data.kgn_nets[i]));
2557
2558                 CFS_FREE_PTR_ARRAY(kgnilnd_data.kgn_nets,
2559                                    *kgnilnd_tunables.kgn_net_hash_size);
2560         }
2561         up_write(&kgnilnd_data.kgn_net_rw_sem);
2562
2563         LASSERTF(atomic_read(&kgnilnd_data.kgn_nconns) == 0,
2564                 "conns left %d\n", atomic_read(&kgnilnd_data.kgn_nconns));
2565
2566         if (kgnilnd_data.kgn_conns != NULL) {
2567                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2568                         LASSERT(list_empty(&kgnilnd_data.kgn_conns[i]));
2569
2570                 CFS_FREE_PTR_ARRAY(kgnilnd_data.kgn_conns,
2571                                    *kgnilnd_tunables.kgn_peer_hash_size);
2572         }
2573
2574         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2575                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2576                 kgnilnd_dev_fini(dev);
2577
2578                 LASSERTF(atomic_read(&dev->gnd_ndgrams) == 0,
2579                         "dgrams left %d\n", atomic_read(&dev->gnd_ndgrams));
2580
2581                 if (dev->gnd_dgrams != NULL) {
2582                         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size;
2583                              i++)
2584                                 LASSERT(list_empty(&dev->gnd_dgrams[i]));
2585
2586                         CFS_FREE_PTR_ARRAY(dev->gnd_dgrams,
2587                                            *kgnilnd_tunables.kgn_peer_hash_size);
2588                 }
2589
2590                 kgnilnd_free_phys_fmablk(dev);
2591         }
2592
2593         if (kgnilnd_data.kgn_mbox_cache != NULL)
2594                 kmem_cache_destroy(kgnilnd_data.kgn_mbox_cache);
2595
2596         if (kgnilnd_data.kgn_rx_cache != NULL)
2597                 kmem_cache_destroy(kgnilnd_data.kgn_rx_cache);
2598
2599         if (kgnilnd_data.kgn_tx_cache != NULL)
2600                 kmem_cache_destroy(kgnilnd_data.kgn_tx_cache);
2601
2602         if (kgnilnd_data.kgn_tx_phys_cache != NULL)
2603                 kmem_cache_destroy(kgnilnd_data.kgn_tx_phys_cache);
2604
2605         if (kgnilnd_data.kgn_dgram_cache != NULL)
2606                 kmem_cache_destroy(kgnilnd_data.kgn_dgram_cache);
2607
2608         if (kgnilnd_data.kgn_cksum_map_pages != NULL) {
2609                 for (i = 0; i < kgnilnd_data.kgn_cksum_npages; i++) {
2610                         if (kgnilnd_data.kgn_cksum_map_pages[i] != NULL) {
2611                                 kfree(kgnilnd_data.kgn_cksum_map_pages[i]);
2612                         }
2613                 }
2614                 kfree(kgnilnd_data.kgn_cksum_map_pages);
2615         }
2616
2617         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %lld\n",
2618                libcfs_kmem_read());
2619
2620         kgnilnd_data.kgn_init = GNILND_INIT_NOTHING;
2621         module_put(THIS_MODULE);
2622
2623         EXIT;
2624 }
2625
2626 int
2627 kgnilnd_startup(struct lnet_ni *ni)
2628 {
2629         int               rc, devno;
2630         kgn_net_t        *net;
2631         ENTRY;
2632
2633         LASSERTF(ni->ni_net->net_lnd == &the_kgnilnd,
2634                 "bad LND 0x%p != the_kgnilnd @ 0x%p\n",
2635                 ni->ni_net->net_lnd, &the_kgnilnd);
2636
2637         if (kgnilnd_data.kgn_init == GNILND_INIT_NOTHING) {
2638                 rc = kgnilnd_base_startup();
2639                 if (rc != 0)
2640                         RETURN(rc);
2641         }
2642
2643         /* Serialize with shutdown. */
2644         mutex_lock(&kgnilnd_data.kgn_quiesce_mutex);
2645
2646         LIBCFS_ALLOC(net, sizeof(*net));
2647         if (net == NULL) {
2648                 CERROR("could not allocate net for new interface instance\n");
2649                 /* no need to cleanup the CDM... */
2650                 GOTO(failed, rc = -ENOMEM);
2651         }
2652         INIT_LIST_HEAD(&net->gnn_list);
2653         ni->ni_data = net;
2654         net->gnn_ni = ni;
2655
2656         kgnilnd_tunables_setup(ni);
2657
2658         if (!ni->ni_interface) {
2659                 rc = lnet_ni_add_interface(ni, "ipogif0");
2660                 if (rc < 0)
2661                         CWARN("gnilnd failed to allocate ni_interface\n");
2662         }
2663
2664         if (*kgnilnd_tunables.kgn_peer_health) {
2665                 int     fudge;
2666                 int     timeout;
2667                 /* give this a bit of leeway - we don't have a hard timeout
2668                  * as we only check timeouts periodically - see comment in kgnilnd_reaper */
2669                 fudge = (GNILND_TO2KA(*kgnilnd_tunables.kgn_timeout) / GNILND_REAPER_NCHECKS);
2670                 timeout = *kgnilnd_tunables.kgn_timeout + fudge;
2671
2672                 if (*kgnilnd_tunables.kgn_peer_timeout >= timeout) {
2673                         ni->ni_net->net_tunables.lct_peer_timeout =
2674                                  *kgnilnd_tunables.kgn_peer_timeout;
2675                 } else if (*kgnilnd_tunables.kgn_peer_timeout > -1) {
2676                         LCONSOLE_ERROR("Peer_timeout is set to %d but needs to be >= %d\n",
2677                                         *kgnilnd_tunables.kgn_peer_timeout,
2678                                         timeout);
2679                         ni->ni_data = NULL;
2680                         LIBCFS_FREE(net, sizeof(*net));
2681                         GOTO(failed, rc = -EINVAL);
2682                 } else
2683                         ni->ni_net->net_tunables.lct_peer_timeout = timeout;
2684
2685                 LCONSOLE_INFO("Enabling LNet peer health for gnilnd, timeout %ds\n",
2686                               ni->ni_net->net_tunables.lct_peer_timeout);
2687         }
2688
2689         atomic_set(&net->gnn_refcount, 1);
2690
2691         /* if we have multiple devices, spread the nets around */
2692         net->gnn_netnum = LNET_NETNUM(LNET_NID_NET(&ni->ni_nid));
2693
2694         devno = LNET_NID_NET(&ni->ni_nid) % GNILND_MAXDEVS;
2695         net->gnn_dev = &kgnilnd_data.kgn_devices[devno];
2696
2697         /* allocate a 'dummy' cdm for datagram use. We can only have a single
2698          * datagram between a nid:inst_id and nid2:inst_id. The fake cdm
2699          * give us additional inst_id to use, allowing the datagrams to flow
2700          * like rivers of honey and beer */
2701
2702         /* the instance id for the cdm is the NETNUM offset by MAXDEVS -
2703          * ensuring we'll have a unique id */
2704
2705         ni->ni_nid.nid_addr[0] =
2706                 cpu_to_be32(LNET_NIDADDR(net->gnn_dev->gnd_nid));
2707         CDEBUG(D_NET, "adding net %p nid=%s on dev %d \n",
2708                 net, libcfs_nidstr(&ni->ni_nid), net->gnn_dev->gnd_id);
2709         /* until the gnn_list is set, we need to cleanup ourselves as
2710          * kgnilnd_shutdown is just gonna get confused */
2711
2712         down_write(&kgnilnd_data.kgn_net_rw_sem);
2713         list_add_tail(&net->gnn_list, kgnilnd_netnum2netlist(net->gnn_netnum));
2714         up_write(&kgnilnd_data.kgn_net_rw_sem);
2715
2716         /* we need a separate thread to call probe_wait_by_id until
2717          * we get a function callback notifier from kgni */
2718         mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2719         RETURN(0);
2720  failed:
2721         mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2722         kgnilnd_shutdown(ni);
2723         RETURN(rc);
2724 }
2725
2726 void
2727 kgnilnd_shutdown(struct lnet_ni *ni)
2728 {
2729         kgn_net_t     *net = ni->ni_data;
2730         int           i;
2731         int           rc;
2732         ENTRY;
2733
2734         CFS_RACE(CFS_FAIL_GNI_SR_DOWN_RACE);
2735
2736         LASSERTF(kgnilnd_data.kgn_init == GNILND_INIT_ALL,
2737                 "init %d\n", kgnilnd_data.kgn_init);
2738
2739         /* Serialize with startup. */
2740         mutex_lock(&kgnilnd_data.kgn_quiesce_mutex);
2741         CDEBUG(D_MALLOC, "before NAL cleanup: kmem %lld\n",
2742                libcfs_kmem_read());
2743
2744         if (net == NULL) {
2745                 CERROR("got NULL net for ni %p\n", ni);
2746                 GOTO(out, rc = -EINVAL);
2747         }
2748
2749         LASSERTF(ni == net->gnn_ni,
2750                 "ni %px gnn_ni %px\n", net, net->gnn_ni);
2751
2752         ni->ni_data = NULL;
2753
2754         LASSERT(!net->gnn_shutdown);
2755         LASSERTF(atomic_read(&net->gnn_refcount) != 0,
2756                 "net %px refcount %d\n",
2757                  net, atomic_read(&net->gnn_refcount));
2758
2759         if (!list_empty(&net->gnn_list)) {
2760                 /* serialize with peer creation */
2761                 down_write(&kgnilnd_data.kgn_net_rw_sem);
2762                 net->gnn_shutdown = 1;
2763                 up_write(&kgnilnd_data.kgn_net_rw_sem);
2764
2765                 kgnilnd_cancel_net_dgrams(net);
2766
2767                 kgnilnd_del_conn_or_peer(net, LNET_NID_ANY, GNILND_DEL_PEER, -ESHUTDOWN);
2768
2769                 /* if we are quiesced, need to wake up - we need those threads
2770                  * alive to release peers, etc */
2771                 if (GNILND_IS_QUIESCED) {
2772                         set_mb(kgnilnd_data.kgn_quiesce_trigger, GNILND_QUIESCE_IDLE);
2773                         kgnilnd_quiesce_wait("shutdown");
2774                 }
2775
2776                 kgnilnd_wait_for_canceled_dgrams(net->gnn_dev);
2777
2778                 /* We wait until the nets ref's are 1, we will release final ref which is ours
2779                  * this allows us to make sure everything else is done before we free the
2780                  * net.
2781                  */
2782                 i = 4;
2783                 while (atomic_read(&net->gnn_refcount) != 1) {
2784                         i++;
2785                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2786                                 "Waiting for %d references to clear on net %d\n",
2787                                 atomic_read(&net->gnn_refcount),
2788                                 net->gnn_netnum);
2789                         schedule_timeout_uninterruptible(cfs_time_seconds(1));
2790                 }
2791
2792                 /* release ref from kgnilnd_startup */
2793                 kgnilnd_net_decref(net);
2794                 /* serialize with reaper and conn_task looping */
2795                 down_write(&kgnilnd_data.kgn_net_rw_sem);
2796                 list_del_init(&net->gnn_list);
2797                 up_write(&kgnilnd_data.kgn_net_rw_sem);
2798
2799         }
2800
2801         /* not locking, this can't race with writers */
2802         LASSERTF(atomic_read(&net->gnn_refcount) == 0,
2803                 "net %px refcount %d\n",
2804                  net, atomic_read(&net->gnn_refcount));
2805         LIBCFS_FREE(net, sizeof(*net));
2806
2807 out:
2808         down_read(&kgnilnd_data.kgn_net_rw_sem);
2809         for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
2810                 if (!list_empty(&kgnilnd_data.kgn_nets[i])) {
2811                         up_read(&kgnilnd_data.kgn_net_rw_sem);
2812                         break;
2813                 }
2814
2815                 if (i == *kgnilnd_tunables.kgn_net_hash_size - 1) {
2816                         up_read(&kgnilnd_data.kgn_net_rw_sem);
2817                         kgnilnd_base_shutdown();
2818                 }
2819         }
2820         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %lld\n",
2821                libcfs_kmem_read());
2822
2823         mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2824         EXIT;
2825 }
2826
2827 static void __exit kgnilnd_exit(void)
2828 {
2829         lnet_unregister_lnd(&the_kgnilnd);
2830         kgnilnd_proc_fini();
2831         kgnilnd_remove_sysctl();
2832 }
2833
2834 static int __init kgnilnd_init(void)
2835 {
2836         int    rc;
2837
2838         rc = kgnilnd_tunables_init();
2839         if (rc != 0)
2840                 return rc;
2841
2842         LCONSOLE_INFO("Lustre: kgnilnd build version: "LUSTRE_VERSION_STRING"\n");
2843
2844         kgnilnd_insert_sysctl();
2845         kgnilnd_proc_init();
2846
2847         rc = libcfs_setup();
2848         if (rc)
2849                 return rc;
2850
2851         lnet_register_lnd(&the_kgnilnd);
2852
2853         return 0;
2854 }
2855
2856 MODULE_AUTHOR("Cray, Inc. <nic@cray.com>");
2857 MODULE_DESCRIPTION("Gemini LNet Network Driver");
2858 MODULE_VERSION(LUSTRE_VERSION_STRING);
2859 MODULE_LICENSE("GPL");
2860
2861 module_init(kgnilnd_init);
2862 module_exit(kgnilnd_exit);