Whamcloud - gitweb
LU-10391 lnet: support setting LND timeouts
[fs/lustre-release.git] / lnet / klnds / gnilnd / gnilnd.c
1 /*
2  * Copyright (C) 2012 Cray, Inc.
3  *
4  * Copyright (c) 2013, 2017, Intel Corporation.
5  *
6  *   Author: Nic Henke <nic@cray.com>
7  *   Author: James Shimek <jshimek@cray.com>
8  *
9  *   This file is part of Lustre, http://www.lustre.org.
10  *
11  *   Lustre is free software; you can redistribute it and/or
12  *   modify it under the terms of version 2 of the GNU General Public
13  *   License as published by the Free Software Foundation.
14  *
15  *   Lustre is distributed in the hope that it will be useful,
16  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  *   GNU General Public License for more details.
19  *
20  *   You should have received a copy of the GNU General Public License
21  *   along with Lustre; if not, write to the Free Software
22  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23  *
24  */
25 #include "gnilnd.h"
26
27 static int
28 kgnilnd_nl_get(int cmd, struct sk_buff *msg, int type, void *data)
29 {
30         struct lnet_ni *ni = data;
31
32         if (!ni || !msg)
33                 return -EINVAL;
34
35         if (cmd != LNET_CMD_NETS || type != LNET_NET_LOCAL_NI_ATTR_LND_TUNABLES)
36                 return -EOPNOTSUPP;
37
38         nla_put_u32(msg, LNET_NET_GNILND_TUNABLES_ATTR_LND_TIMEOUT,
39                     kgnilnd_timeout());
40         return 0;
41 }
42
43 static int
44 kgnilnd_nl_set(int cmd, struct nlattr *attr, int type, void *data)
45 {
46         struct lnet_ni *ni = data;
47
48         if (cmd != LNET_CMD_NETS)
49                 return -EOPNOTSUPP;
50
51         if (nla_type(attr) != LN_SCALAR_ATTR_INT_VALUE)
52                 return -EINVAL;
53
54         if (type == LNET_NET_GNILND_TUNABLES_ATTR_LND_TIMEOUT) {
55                 s64 timeout = nla_get_s64(attr);
56
57                 ni->ni_lnd_tunables.lnd_tun_u.lnd_gni.lnd_timeout = timeout;
58         }
59
60         return 0;
61 }
62
63 /* Primary entry points from LNET.  There are no guarantees against reentrance. */
64 const struct lnet_lnd the_kgnilnd = {
65         .lnd_type       = GNILND,
66         .lnd_startup    = kgnilnd_startup,
67         .lnd_shutdown   = kgnilnd_shutdown,
68         .lnd_ctl        = kgnilnd_ctl,
69         .lnd_send       = kgnilnd_send,
70         .lnd_recv       = kgnilnd_recv,
71         .lnd_eager_recv = kgnilnd_eager_recv,
72         .lnd_nl_get     = kgnilnd_nl_get,
73         .lnd_nl_set     = kgnilnd_nl_set,
74 };
75
76 kgn_data_t      kgnilnd_data;
77
78 int
79 kgnilnd_thread_start(int(*fn)(void *arg), void *arg, char *name, int id)
80 {
81         struct task_struct *thrd;
82
83         thrd = kthread_run(fn, arg, "%s_%02d", name, id);
84         if (IS_ERR(thrd))
85                 return PTR_ERR(thrd);
86
87         atomic_inc(&kgnilnd_data.kgn_nthreads);
88         return 0;
89 }
90
91 /* bind scheduler threads to cpus */
92 int
93 kgnilnd_start_sd_threads(void)
94 {
95         int cpu;
96         int i = 0;
97         struct task_struct *task;
98
99         for_each_online_cpu(cpu) {
100                 /* don't bind to cpu 0 - all interrupts are processed here */
101                 if (cpu == 0)
102                         continue;
103
104                 task = kthread_create(kgnilnd_scheduler, (void *)((long)i),
105                                       "%s_%02d", "kgnilnd_sd", i);
106                 if (!IS_ERR(task)) {
107                         kthread_bind(task, cpu);
108                         wake_up_process(task);
109                 } else {
110                         CERROR("Can't spawn gnilnd scheduler[%d] %ld\n", i,
111                                 PTR_ERR(task));
112                         return PTR_ERR(task);
113                 }
114                 atomic_inc(&kgnilnd_data.kgn_nthreads);
115
116                 if (++i >= *kgnilnd_tunables.kgn_sched_threads) {
117                         break;
118                 }
119         }
120
121         return 0;
122 }
123
124 /* needs write_lock on kgn_peer_conn_lock */
125 int
126 kgnilnd_close_stale_conns_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
127 {
128         kgn_conn_t *conn, *cnxt;
129         int                 loopback;
130         int                 count = 0;
131
132         loopback = (peer->gnp_nid ==
133                     lnet_nid_to_nid4(&peer->gnp_net->gnn_ni->ni_nid));
134
135         list_for_each_entry_safe(conn, cnxt, &peer->gnp_conns, gnc_list) {
136                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
137                         continue;
138
139                 if (conn == newconn)
140                         continue;
141
142                 if (conn->gnc_device != newconn->gnc_device)
143                         continue;
144
145                 /* This is a two connection loopback - one talking to the other */
146                 if (loopback &&
147                     newconn->gnc_my_connstamp == conn->gnc_peer_connstamp &&
148                     newconn->gnc_peer_connstamp == conn->gnc_my_connstamp) {
149                         CDEBUG(D_NET, "skipping prune of %p, "
150                                 "loopback and matching stamps"
151                                 " connstamp %llu(%llu)"
152                                 " peerstamp %llu(%llu)\n",
153                                 conn, newconn->gnc_my_connstamp,
154                                 conn->gnc_peer_connstamp,
155                                 newconn->gnc_peer_connstamp,
156                                 conn->gnc_my_connstamp);
157                         continue;
158                 }
159
160                 if (conn->gnc_peerstamp != newconn->gnc_peerstamp) {
161                         LASSERTF(conn->gnc_peerstamp < newconn->gnc_peerstamp,
162                                 "conn 0x%p peerstamp %llu >= "
163                                 "newconn 0x%p peerstamp %llu\n",
164                                 conn, conn->gnc_peerstamp,
165                                 newconn, newconn->gnc_peerstamp);
166
167                         CDEBUG(D_NET, "Closing stale conn nid: %s "
168                                " peerstamp:%#llx(%#llx)\n",
169                                libcfs_nid2str(peer->gnp_nid),
170                                conn->gnc_peerstamp, newconn->gnc_peerstamp);
171                 } else {
172
173                         LASSERTF(conn->gnc_peer_connstamp < newconn->gnc_peer_connstamp,
174                                 "conn 0x%p peer_connstamp %llu >= "
175                                 "newconn 0x%p peer_connstamp %llu\n",
176                                 conn, conn->gnc_peer_connstamp,
177                                 newconn, newconn->gnc_peer_connstamp);
178
179                         CDEBUG(D_NET, "Closing stale conn nid: %s"
180                                " connstamp:%llu(%llu)\n",
181                                libcfs_nid2str(peer->gnp_nid),
182                                conn->gnc_peer_connstamp, newconn->gnc_peer_connstamp);
183                 }
184
185                 count++;
186                 kgnilnd_close_conn_locked(conn, -ESTALE);
187         }
188
189         if (count != 0) {
190                 CWARN("Closed %d stale conns to %s\n", count, libcfs_nid2str(peer->gnp_nid));
191         }
192
193         RETURN(count);
194 }
195
196 int
197 kgnilnd_conn_isdup_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
198 {
199         kgn_conn_t       *conn;
200         int               loopback;
201         ENTRY;
202
203         loopback = (peer->gnp_nid ==
204                     lnet_nid_to_nid4(&peer->gnp_net->gnn_ni->ni_nid));
205
206         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
207                 CDEBUG(D_NET, "checking conn 0x%p for peer %s"
208                         " lo %d new %llu existing %llu"
209                         " new peer %llu existing peer %llu"
210                         " new dev %p existing dev %p\n",
211                         conn, libcfs_nid2str(peer->gnp_nid),
212                         loopback,
213                         newconn->gnc_peerstamp, conn->gnc_peerstamp,
214                         newconn->gnc_peer_connstamp, conn->gnc_peer_connstamp,
215                         newconn->gnc_device, conn->gnc_device);
216
217                 /* conn is in the process of closing */
218                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
219                         continue;
220
221                 /* 'newconn' is from an earlier version of 'peer'!!! */
222                 if (newconn->gnc_peerstamp < conn->gnc_peerstamp)
223                         RETURN(1);
224
225                 /* 'conn' is from an earlier version of 'peer': it will be
226                  * removed when we cull stale conns later on... */
227                 if (newconn->gnc_peerstamp > conn->gnc_peerstamp)
228                         continue;
229
230                 /* Different devices are OK */
231                 if (conn->gnc_device != newconn->gnc_device)
232                         continue;
233
234                 /* It's me connecting to myself */
235                 if (loopback &&
236                     newconn->gnc_my_connstamp == conn->gnc_peer_connstamp &&
237                     newconn->gnc_peer_connstamp == conn->gnc_my_connstamp)
238                         continue;
239
240                 /* 'newconn' is an earlier connection from 'peer'!!! */
241                 if (newconn->gnc_peer_connstamp < conn->gnc_peer_connstamp)
242                         RETURN(2);
243
244                 /* 'conn' is an earlier connection from 'peer': it will be
245                  * removed when we cull stale conns later on... */
246                 if (newconn->gnc_peer_connstamp > conn->gnc_peer_connstamp)
247                         continue;
248
249                 /* 'newconn' has the SAME connection stamp; 'peer' isn't
250                  * playing the game... */
251                 RETURN(3);
252         }
253
254         RETURN(0);
255 }
256
257 int
258 kgnilnd_create_conn(kgn_conn_t **connp, kgn_device_t *dev)
259 {
260         kgn_conn_t      *conn;
261         gni_return_t    rrc;
262         int             rc = 0;
263
264         LASSERT (!in_interrupt());
265         atomic_inc(&kgnilnd_data.kgn_nconns);
266
267         /* divide by 2 to allow for complete reset and immediate reconnect */
268         if (atomic_read(&kgnilnd_data.kgn_nconns) >= GNILND_MAX_CQID/2) {
269                 CERROR("Too many conn are live: %d > %d\n",
270                         atomic_read(&kgnilnd_data.kgn_nconns), GNILND_MAX_CQID/2);
271                 atomic_dec(&kgnilnd_data.kgn_nconns);
272                 return -E2BIG;
273         }
274
275         LIBCFS_ALLOC(conn, sizeof(*conn));
276         if (conn == NULL) {
277                 atomic_dec(&kgnilnd_data.kgn_nconns);
278                 return -ENOMEM;
279         }
280
281         conn->gnc_tx_ref_table =
282                 kgnilnd_vzalloc(GNILND_MAX_MSG_ID * sizeof(void *));
283         if (conn->gnc_tx_ref_table == NULL) {
284                 CERROR("Can't allocate conn tx_ref_table\n");
285                 GOTO(failed, rc = -ENOMEM);
286         }
287
288         mutex_init(&conn->gnc_smsg_mutex);
289         mutex_init(&conn->gnc_rdma_mutex);
290         atomic_set(&conn->gnc_refcount, 1);
291         atomic_set(&conn->gnc_reaper_noop, 0);
292         atomic_set(&conn->gnc_sched_noop, 0);
293         atomic_set(&conn->gnc_tx_in_use, 0);
294         INIT_LIST_HEAD(&conn->gnc_list);
295         INIT_LIST_HEAD(&conn->gnc_hashlist);
296         INIT_LIST_HEAD(&conn->gnc_schedlist);
297         INIT_LIST_HEAD(&conn->gnc_fmaq);
298         INIT_LIST_HEAD(&conn->gnc_mdd_list);
299         INIT_LIST_HEAD(&conn->gnc_delaylist);
300         spin_lock_init(&conn->gnc_list_lock);
301         spin_lock_init(&conn->gnc_tx_lock);
302         conn->gnc_magic = GNILND_CONN_MAGIC;
303
304         /* set tx id to nearly the end to make sure we find wrapping
305          * issues soon */
306         conn->gnc_next_tx = (int) GNILND_MAX_MSG_ID - 10;
307
308         /* if this fails, we have conflicts and MAX_TX is too large */
309         BUILD_BUG_ON(GNILND_MAX_MSG_ID >= GNILND_MSGID_CLOSE);
310
311         /* get a new unique CQ id for this conn */
312         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
313         conn->gnc_my_connstamp = kgnilnd_data.kgn_connstamp++;
314         conn->gnc_cqid = kgnilnd_get_cqid_locked();
315         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
316
317         if (conn->gnc_cqid == 0) {
318                 CERROR("Could not allocate unique CQ ID for conn 0x%p\n", conn);
319                 GOTO(failed, rc = -E2BIG);
320         }
321
322         CDEBUG(D_NET, "alloc cqid %u for conn 0x%p\n",
323                 conn->gnc_cqid, conn);
324
325         /* need to be set before gnc_ephandle to allow kgnilnd_destroy_conn_ep to
326          * check context */
327         conn->gnc_device = dev;
328
329         conn->gnc_timeout = max(*kgnilnd_tunables.kgn_timeout,
330                                  GNILND_MIN_TIMEOUT);
331         kgnilnd_update_reaper_timeout(conn->gnc_timeout);
332
333         /* this is the ep_handle for doing SMSG & BTE */
334         mutex_lock(&dev->gnd_cq_mutex);
335         rrc = kgnilnd_ep_create(dev->gnd_handle, dev->gnd_snd_fma_cqh,
336                                 &conn->gnc_ephandle);
337         mutex_unlock(&dev->gnd_cq_mutex);
338         if (rrc != GNI_RC_SUCCESS)
339                 GOTO(failed, rc = -ENETDOWN);
340
341         CDEBUG(D_NET, "created conn 0x%p ep_hndl 0x%p\n",
342                conn, conn->gnc_ephandle);
343
344         /* add ref for EP canceling */
345         kgnilnd_conn_addref(conn);
346         atomic_inc(&dev->gnd_neps);
347
348         *connp = conn;
349         return 0;
350
351 failed:
352         atomic_dec(&kgnilnd_data.kgn_nconns);
353         kgnilnd_vfree(conn->gnc_tx_ref_table,
354                       GNILND_MAX_MSG_ID * sizeof(void *));
355         LIBCFS_FREE(conn, sizeof(*conn));
356         return rc;
357 }
358
359 /* needs to be called with kgn_peer_conn_lock held (read or write) */
360 kgn_conn_t *
361 kgnilnd_find_conn_locked(kgn_peer_t *peer)
362 {
363         kgn_conn_t      *conn = NULL;
364
365         /* if we are in reset, this conn is going to die soon */
366         if (unlikely(kgnilnd_data.kgn_in_reset)) {
367                 RETURN(NULL);
368         }
369
370         /* just return the first ESTABLISHED connection */
371         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
372                 /* kgnilnd_finish_connect doesn't put connections on the
373                  * peer list until they are actually established */
374                 LASSERTF(conn->gnc_state >= GNILND_CONN_ESTABLISHED,
375                         "found conn %p state %s on peer %p (%s)\n",
376                         conn, kgnilnd_conn_state2str(conn), peer,
377                         libcfs_nid2str(peer->gnp_nid));
378                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
379                         continue;
380
381                 RETURN(conn);
382         }
383         RETURN(NULL);
384 }
385
386 /* needs write_lock on kgn_peer_conn_lock held */
387 kgn_conn_t *
388 kgnilnd_find_or_create_conn_locked(kgn_peer_t *peer) {
389
390         kgn_device_t    *dev = peer->gnp_net->gnn_dev;
391         kgn_conn_t      *conn;
392
393         conn = kgnilnd_find_conn_locked(peer);
394
395         if (conn != NULL) {
396                 return conn;
397         }
398
399         /* if the peer was previously connecting, check if we should
400          * trigger another connection attempt yet. */
401         if (time_before(jiffies, peer->gnp_reconnect_time)) {
402                 return NULL;
403         }
404
405         /* This check prevents us from creating a new connection to a peer while we are
406          * still in the process of closing an existing connection to the peer.
407          */
408         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
409                 if (conn->gnc_ephandle != NULL) {
410                         CDEBUG(D_NET, "Not connecting non-null ephandle found peer 0x%p->%s\n", peer,
411                                 libcfs_nid2str(peer->gnp_nid));
412                         return NULL;
413                 }
414         }
415
416         if (peer->gnp_connecting != GNILND_PEER_IDLE) {
417                 /* if we are not connecting, fire up a new connection */
418                 /* or if we are anything but IDLE DONT start a new connection */
419                return NULL;
420         }
421
422         CDEBUG(D_NET, "starting connect to %s\n",
423                 libcfs_nid2str(peer->gnp_nid));
424         peer->gnp_connecting = GNILND_PEER_CONNECT;
425         kgnilnd_peer_addref(peer); /* extra ref for connd */
426
427         spin_lock(&dev->gnd_connd_lock);
428         list_add_tail(&peer->gnp_connd_list, &dev->gnd_connd_peers);
429         spin_unlock(&dev->gnd_connd_lock);
430
431         kgnilnd_schedule_dgram(dev);
432         CDEBUG(D_NETTRACE, "scheduling new connect\n");
433
434         return NULL;
435 }
436
437 /* Caller is responsible for deciding if/when to call this */
438 void
439 kgnilnd_destroy_conn_ep(kgn_conn_t *conn)
440 {
441         gni_return_t    rrc;
442         gni_ep_handle_t tmp_ep;
443
444         /* only if we actually initialized it,
445          *  then set NULL to tell kgnilnd_destroy_conn to leave it alone */
446
447         tmp_ep = xchg(&conn->gnc_ephandle, NULL);
448         if (tmp_ep != NULL) {
449                 /* we never re-use the EP, so unbind is not needed */
450                 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
451                 rrc = kgnilnd_ep_destroy(tmp_ep);
452
453                 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
454
455                 /* if this fails, it could hork up kgni smsg retransmit and others
456                  * since we could free the SMSG mbox memory, etc. */
457                 LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d conn 0x%p ep 0x%p\n",
458                          rrc, conn, conn->gnc_ephandle);
459
460                 atomic_dec(&conn->gnc_device->gnd_neps);
461
462                 /* clear out count added in kgnilnd_close_conn_locked
463                  * conn will have a peer once it hits finish_connect, where it
464                  * is the first spot we'll mark it ESTABLISHED as well */
465                 if (conn->gnc_peer) {
466                         kgnilnd_admin_decref(conn->gnc_peer->gnp_dirty_eps);
467                 }
468
469                 /* drop ref for EP */
470                 kgnilnd_conn_decref(conn);
471         }
472 }
473
474 void
475 kgnilnd_destroy_conn(kgn_conn_t *conn)
476 {
477         LASSERTF(!in_interrupt() &&
478                 !conn->gnc_scheduled &&
479                 !conn->gnc_in_purgatory &&
480                 conn->gnc_ephandle == NULL &&
481                 list_empty(&conn->gnc_list) &&
482                 list_empty(&conn->gnc_hashlist) &&
483                 list_empty(&conn->gnc_schedlist) &&
484                 list_empty(&conn->gnc_mdd_list) &&
485                 list_empty(&conn->gnc_delaylist) &&
486                 conn->gnc_magic == GNILND_CONN_MAGIC,
487                 "conn 0x%p->%s IRQ %d sched %d purg %d ep 0x%p Mg %d lists %d/%d/%d/%d/%d\n",
488                 conn, conn->gnc_peer ? libcfs_nid2str(conn->gnc_peer->gnp_nid)
489                                      : "<?>",
490                 !!in_interrupt(), conn->gnc_scheduled,
491                 conn->gnc_in_purgatory,
492                 conn->gnc_ephandle,
493                 conn->gnc_magic,
494                 list_empty(&conn->gnc_list),
495                 list_empty(&conn->gnc_hashlist),
496                 list_empty(&conn->gnc_schedlist),
497                 list_empty(&conn->gnc_mdd_list),
498                 list_empty(&conn->gnc_delaylist));
499
500         /* Tripping these is especially bad, as it means we have items on the
501          *  lists that didn't keep their refcount on the connection - or
502          *  somebody evil released their own */
503         LASSERTF(list_empty(&conn->gnc_fmaq) &&
504                  atomic_read(&conn->gnc_nlive_fma) == 0 &&
505                  atomic_read(&conn->gnc_nlive_rdma) == 0,
506                  "conn 0x%p fmaq %d@0x%p nfma %d nrdma %d\n",
507                  conn, kgnilnd_count_list(&conn->gnc_fmaq), &conn->gnc_fmaq,
508                  atomic_read(&conn->gnc_nlive_fma), atomic_read(&conn->gnc_nlive_rdma));
509
510         CDEBUG(D_NET, "destroying conn %p ephandle %p error %d\n",
511                 conn, conn->gnc_ephandle, conn->gnc_error);
512
513         /* We are freeing this memory remove the magic value from the connection */
514         conn->gnc_magic = 0;
515
516         /* if there is an FMA blk left here, we'll tear it down */
517         if (conn->gnc_fma_blk) {
518                 if (conn->gnc_peer) {
519                         kgn_mbox_info_t *mbox;
520                         mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
521                         mbox->mbx_prev_nid = conn->gnc_peer->gnp_nid;
522                 }
523                 kgnilnd_release_mbox(conn, 0);
524         }
525
526         if (conn->gnc_peer != NULL)
527                 kgnilnd_peer_decref(conn->gnc_peer);
528
529         if (conn->gnc_tx_ref_table != NULL) {
530                 kgnilnd_vfree(conn->gnc_tx_ref_table,
531                               GNILND_MAX_MSG_ID * sizeof(void *));
532         }
533
534         LIBCFS_FREE(conn, sizeof(*conn));
535         atomic_dec(&kgnilnd_data.kgn_nconns);
536 }
537
538 /* peer_alive and peer_notify done in the style of the o2iblnd */
539 void
540 kgnilnd_peer_alive(kgn_peer_t *peer)
541 {
542         time64_t now = ktime_get_seconds();
543
544         set_mb(peer->gnp_last_alive, now);
545 }
546
547 void
548 kgnilnd_peer_notify(kgn_peer_t *peer, int error, int alive)
549 {
550         int                     tell_lnet = 0;
551         int                     nnets = 0;
552         int                     rc;
553         int                     i, j;
554         kgn_conn_t             *conn;
555         kgn_net_t             **nets;
556         kgn_net_t              *net;
557
558
559         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DONT_NOTIFY))
560                 return;
561
562         /* Tell LNet we are giving ups on this peer - but only
563          * if it isn't already reconnected or trying to reconnect */
564         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
565
566         /* use kgnilnd_find_conn_locked to avoid any conns in the process of being nuked
567          *
568          * don't tell LNet if we are in reset - we assume that everyone will be able to
569          * reconnect just fine
570          */
571         conn = kgnilnd_find_conn_locked(peer);
572
573         CDEBUG(D_NETTRACE, "peer 0x%p->%s ting %d conn 0x%p, rst %d error %d\n",
574                peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting, conn,
575                kgnilnd_data.kgn_in_reset, error);
576
577         if (((peer->gnp_connecting == GNILND_PEER_IDLE) &&
578             (conn == NULL) &&
579             (!kgnilnd_data.kgn_in_reset) &&
580             (!kgnilnd_conn_clean_errno(error))) || alive) {
581                 tell_lnet = 1;
582         }
583
584         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
585
586         if (!tell_lnet) {
587                 /* short circuit if we dont need to notify Lnet */
588                 return;
589         }
590
591         rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
592
593         if (rc) {
594             /* dont do this if this fails since LNET is in shutdown or something else
595              */
596
597                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
598                         list_for_each_entry(net, &kgnilnd_data.kgn_nets[i], gnn_list) {
599                                 /* if gnn_shutdown set for any net shutdown is in progress just return */
600                                 if (net->gnn_shutdown) {
601                                         up_read(&kgnilnd_data.kgn_net_rw_sem);
602                                         return;
603                                 }
604                                 nnets++;
605                         }
606                 }
607
608                 if (nnets == 0) {
609                         /* shutdown in progress most likely */
610                         up_read(&kgnilnd_data.kgn_net_rw_sem);
611                         return;
612                 }
613
614                 CFS_ALLOC_PTR_ARRAY(nets, nnets);
615
616                 if (nets == NULL) {
617                         up_read(&kgnilnd_data.kgn_net_rw_sem);
618                         CERROR("Failed to allocate nets[%d]\n", nnets);
619                         return;
620                 }
621
622                 j = 0;
623                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
624                         list_for_each_entry(net, &kgnilnd_data.kgn_nets[i], gnn_list) {
625                                 nets[j] = net;
626                                 kgnilnd_net_addref(net);
627                                 j++;
628                         }
629                 }
630                 up_read(&kgnilnd_data.kgn_net_rw_sem);
631
632                 for (i = 0; i < nnets; i++) {
633                         struct lnet_nid peer_nid;
634
635                         net = nets[i];
636
637                         lnet_nid4_to_nid(kgnilnd_lnd2lnetnid(
638                                                  lnet_nid_to_nid4(&net->gnn_ni->ni_nid),
639                                                  peer->gnp_nid),
640                                          &peer_nid);
641
642                         CDEBUG(D_NET, "peer 0x%p->%s last_alive %lld (%llds ago)\n",
643                                 peer, libcfs_nidstr(&peer_nid),
644                                 peer->gnp_last_alive,
645                                 ktime_get_seconds() - peer->gnp_last_alive);
646
647                         lnet_notify(net->gnn_ni, &peer_nid, alive, true,
648                                     peer->gnp_last_alive);
649
650                         kgnilnd_net_decref(net);
651                 }
652
653                 CFS_FREE_PTR_ARRAY(nets, nnets);
654         }
655 }
656
657 /* need write_lock on kgn_peer_conn_lock */
658 void
659 kgnilnd_close_conn_locked(kgn_conn_t *conn, int error)
660 {
661         kgn_peer_t        *peer = conn->gnc_peer;
662         ENTRY;
663
664         LASSERT(!in_interrupt());
665
666         /* store error for tx completion */
667         conn->gnc_error = error;
668         peer->gnp_last_errno = error;
669
670         /* use real error from peer if possible */
671         if (error == -ECONNRESET) {
672                 error = conn->gnc_peer_error;
673         }
674
675         /* if we NETERROR, make sure it is rate limited */
676         if (!kgnilnd_conn_clean_errno(error) &&
677             peer->gnp_state != GNILND_PEER_DOWN) {
678                 CNETERR("closing conn to %s: error %d\n",
679                        libcfs_nid2str(peer->gnp_nid), error);
680         } else {
681                 CDEBUG(D_NET, "closing conn to %s: error %d\n",
682                        libcfs_nid2str(peer->gnp_nid), error);
683         }
684
685         LASSERTF(conn->gnc_state == GNILND_CONN_ESTABLISHED,
686                 "conn %p to %s with bogus state %s\n", conn,
687                 libcfs_nid2str(conn->gnc_peer->gnp_nid),
688                 kgnilnd_conn_state2str(conn));
689         LASSERT(!list_empty(&conn->gnc_hashlist));
690         LASSERT(!list_empty(&conn->gnc_list));
691
692
693         /* mark peer count here so any place the EP gets destroyed will
694          * open up the peer count so that a new ESTABLISHED conn is then free
695          * to send new messages -- sending before the previous EPs are destroyed
696          * could end up with messages on the network for the old conn _after_
697          * the new conn and break the mbox safety protocol */
698         kgnilnd_admin_addref(conn->gnc_peer->gnp_dirty_eps);
699
700         /* Remove from conn hash table: no new callbacks */
701         list_del_init(&conn->gnc_hashlist);
702         kgnilnd_data.kgn_conn_version++;
703         kgnilnd_conn_decref(conn);
704
705         /* if we are in reset, go right to CLOSED as there is no scheduler
706          * thread to move from CLOSING to CLOSED */
707         if (unlikely(kgnilnd_data.kgn_in_reset)) {
708                 conn->gnc_state = GNILND_CONN_CLOSED;
709         } else {
710                 conn->gnc_state = GNILND_CONN_CLOSING;
711         }
712
713         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_RDMA_CQ_ERROR)) {
714                 msleep_interruptible(MSEC_PER_SEC);
715         }
716
717         /* leave on peer->gnp_conns to make sure we don't let the reaper
718          * or others try to unlink this peer until the conn is fully
719          * processed for closing */
720
721         if (kgnilnd_check_purgatory_conn(conn)) {
722                 kgnilnd_add_purgatory_locked(conn, conn->gnc_peer);
723         }
724
725         /* Reset RX timeout to ensure we wait for an incoming CLOSE
726          * for the full timeout.  If we get a CLOSE we know the
727          * peer has stopped all RDMA.  Otherwise if we wait for
728          * the full timeout we can also be sure all RDMA has stopped. */
729         conn->gnc_last_rx = conn->gnc_last_rx_cq = jiffies;
730         mb();
731
732         /* schedule sending CLOSE - if we are in quiesce, this adds to
733          * gnd_ready_conns and allows us to find it in quiesce processing */
734         kgnilnd_schedule_conn(conn);
735
736         EXIT;
737 }
738
739 void
740 kgnilnd_close_conn(kgn_conn_t *conn, int error)
741 {
742         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
743         /* need to check the state here - this call is racy and we don't
744          * know the state until after the lock is grabbed */
745         if (conn->gnc_state == GNILND_CONN_ESTABLISHED) {
746                 kgnilnd_close_conn_locked(conn, error);
747         }
748         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
749 }
750
751 void
752 kgnilnd_complete_closed_conn(kgn_conn_t *conn)
753 {
754         LIST_HEAD               (sinners);
755         kgn_tx_t               *tx, *txn;
756         int                     nlive = 0;
757         int                     nrdma = 0;
758         int                     nq_rdma = 0;
759         int                     logmsg;
760         ENTRY;
761
762         /* Dump log  on cksum error - wait until complete phase to let
763          * RX of error happen */
764         if (*kgnilnd_tunables.kgn_checksum_dump &&
765             (conn != NULL && conn->gnc_peer_error == -ENOKEY)) {
766                 libcfs_debug_dumplog();
767         }
768
769         /* _CLOSED set in kgnilnd_process_fmaq once we decide to
770          * send the CLOSE or not */
771         LASSERTF(conn->gnc_state == GNILND_CONN_CLOSED,
772                  "conn 0x%p->%s with bad state %s\n",
773                  conn, conn->gnc_peer ?
774                         libcfs_nid2str(conn->gnc_peer->gnp_nid) :
775                         "<?>",
776                  kgnilnd_conn_state2str(conn));
777
778         LASSERT(list_empty(&conn->gnc_hashlist));
779         /* We shouldnt be on the delay list, the conn can 
780          * get added to this list during a retransmit, and retransmits
781          * only occur within scheduler threads.
782          */
783         LASSERT(list_empty(&conn->gnc_delaylist));
784
785         /* we've sent the close, start nuking */
786         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_SCHEDULE_COMPLETE))
787                 kgnilnd_schedule_conn(conn);
788
789         if (conn->gnc_scheduled != GNILND_CONN_PROCESS) {
790                 CDEBUG(D_NETERROR, "Error someone scheduled us after we were "
791                                 "done, Attempting to recover conn 0x%p "
792                                 "scheduled %d function: %s line: %d\n", conn,
793                                 conn->gnc_scheduled, conn->gnc_sched_caller,
794                                 conn->gnc_sched_line);
795                 RETURN_EXIT;
796         }
797
798         /* we don't use lists to track things that we can get out of the
799          * tx_ref table... */
800
801         /* need to hold locks for tx_list_state, sampling it is too racy:
802          * - the lock actually protects tx != NULL, but we can't take the proper
803          *   lock until we check tx_list_state, which would be too late and
804          *   we could have the TX change under us.
805          * gnd_rdmaq_lock and gnd_lock and not used together, so taking both
806          * should be fine */
807         spin_lock(&conn->gnc_device->gnd_rdmaq_lock);
808         spin_lock(&conn->gnc_device->gnd_lock);
809
810         for (nrdma = 0; nrdma < GNILND_MAX_MSG_ID; nrdma++) {
811                 tx = conn->gnc_tx_ref_table[nrdma];
812
813                 if (tx != NULL) {
814                         /* only print the first error and if not CLOSE, we often don't see
815                          * CQ events for that by the time we get here... and really don't care */
816                         if (nlive || tx->tx_msg.gnm_type == GNILND_MSG_CLOSE)
817                                 tx->tx_state |= GNILND_TX_QUIET_ERROR;
818                         nlive++;
819                         GNIDBG_TX(D_NET, tx, "cleaning up on close, nlive %d", nlive);
820
821                         /* don't worry about gnc_lock here as nobody else should be
822                          * touching this conn */
823                         kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
824                         list_add_tail(&tx->tx_list, &sinners);
825                 }
826         }
827         spin_unlock(&conn->gnc_device->gnd_lock);
828         spin_unlock(&conn->gnc_device->gnd_rdmaq_lock);
829
830         /* nobody should have marked this as needing scheduling after
831          * we called close - so only ref should be us handling it */
832         if (conn->gnc_scheduled != GNILND_CONN_PROCESS) {
833                 CDEBUG(D_NETERROR, "Error someone scheduled us after we were "
834                                 "done, Attempting to recover conn 0x%p "
835                                 "scheduled %d function %s line: %d\n", conn,
836                                 conn->gnc_scheduled, conn->gnc_sched_caller,
837                                 conn->gnc_sched_line);
838         }
839         /* now reset a few to actual counters... */
840         nrdma = atomic_read(&conn->gnc_nlive_rdma);
841         nq_rdma = atomic_read(&conn->gnc_nq_rdma);
842
843         if (!list_empty(&sinners)) {
844                 list_for_each_entry_safe(tx, txn, &sinners, tx_list) {
845                         /* clear tx_list to make tx_add_list_locked happy */
846                         list_del_init(&tx->tx_list);
847                         /* The error codes determine if we hold onto the MDD */
848                         kgnilnd_tx_done(tx, conn->gnc_error);
849                 }
850         }
851
852         logmsg = (nlive + nrdma + nq_rdma);
853
854         if (logmsg) {
855                 int level = conn->gnc_peer->gnp_state == GNILND_PEER_UP ?
856                                 D_NETERROR : D_NET;
857                 CDEBUG(level, "Closed conn 0x%p->%s (errno %d,"
858                         " peer errno %d): canceled %d TX, %d/%d RDMA\n",
859                         conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
860                         conn->gnc_error, conn->gnc_peer_error,
861                         nlive, nq_rdma, nrdma);
862         }
863
864         kgnilnd_destroy_conn_ep(conn);
865
866         /* Bug 765042 - race this with completing a new conn to same peer - we need
867          * finish_connect to detach purgatory before we can do it ourselves here */
868         CFS_RACE(CFS_FAIL_GNI_FINISH_PURG);
869
870         /* now it is safe to remove from peer list - anyone looking at
871          * gnp_conns now is free to unlink if not on purgatory */
872         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
873
874         conn->gnc_state = GNILND_CONN_DONE;
875
876         /* Decrement counter if we are marked by del_conn_or_peers for closing
877          */
878         if (conn->gnc_needs_closing)
879                 kgnilnd_admin_decref(kgnilnd_data.kgn_npending_conns);
880
881         /* Remove from peer's list of valid connections if its not in purgatory */
882         if (!conn->gnc_in_purgatory) {
883                 list_del_init(&conn->gnc_list);
884                 /* Lose peers reference on the conn */
885                 kgnilnd_conn_decref(conn);
886         }
887
888         /* NB - only unlinking if we set pending in del_peer_locked from admin or
889          * shutdown */
890         if (kgnilnd_peer_active(conn->gnc_peer) &&
891             conn->gnc_peer->gnp_pending_unlink &&
892             kgnilnd_can_unlink_peer_locked(conn->gnc_peer)) {
893                 kgnilnd_unlink_peer_locked(conn->gnc_peer);
894         }
895
896         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
897
898         /* I'm telling Mommy! - use peer_error if they initiated close */
899         kgnilnd_peer_notify(conn->gnc_peer,
900                             conn->gnc_error == -ECONNRESET ?
901                             conn->gnc_peer_error : conn->gnc_error, 0);
902
903         EXIT;
904 }
905
906 int
907 kgnilnd_set_conn_params(kgn_dgram_t *dgram)
908 {
909         kgn_conn_t             *conn = dgram->gndg_conn;
910         kgn_connreq_t          *connreq = &dgram->gndg_conn_in;
911         kgn_gniparams_t        *rem_param = &connreq->gncr_gnparams;
912         gni_return_t            rrc;
913         int                     rc = 0;
914         gni_smsg_attr_t        *remote = &connreq->gncr_gnparams.gnpr_smsg_attr;
915
916         /* set timeout vals in conn early so we can use them for the NAK */
917
918         /* use max of the requested and our timeout, peer will do the same */
919         conn->gnc_timeout = max(conn->gnc_timeout, connreq->gncr_timeout);
920
921         /* only ep_bind really mucks around with the CQ */
922         /* only ep bind if we are not connecting to ourself and the dstnid is not a wildcard. this check
923          * is necessary as you can only bind an ep once and we must make sure we dont bind when already bound.
924          */
925         if (connreq->gncr_dstnid != LNET_NID_ANY && dgram->gndg_conn_out.gncr_dstnid != connreq->gncr_srcnid) {
926                 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
927                 rrc = kgnilnd_ep_bind(conn->gnc_ephandle,
928                         connreq->gncr_gnparams.gnpr_host_id,
929                         conn->gnc_cqid);
930                 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
931                 if (rrc != GNI_RC_SUCCESS) {
932                         rc = -ECONNABORTED;
933                         goto return_out;
934                 }
935         }
936
937         rrc = kgnilnd_ep_set_eventdata(conn->gnc_ephandle, conn->gnc_cqid,
938                          connreq->gncr_gnparams.gnpr_cqid);
939         if (rrc != GNI_RC_SUCCESS) {
940                 rc = -ECONNABORTED;
941                 goto cleanup_out;
942         }
943
944         /* Initialize SMSG */
945         rrc = kgnilnd_smsg_init(conn->gnc_ephandle, &conn->gnpr_smsg_attr,
946                         &connreq->gncr_gnparams.gnpr_smsg_attr);
947         if (unlikely(rrc == GNI_RC_INVALID_PARAM)) {
948                 gni_smsg_attr_t *local = &conn->gnpr_smsg_attr;
949                 /* help folks figure out if there is a tunable off, etc. */
950                 LCONSOLE_ERROR("SMSG attribute mismatch. Data from local/remote:"
951                                " type %d/%d msg_maxsize %u/%u"
952                                " mbox_maxcredit %u/%u. Please check kgni"
953                                " logs for further data\n",
954                                local->msg_type, remote->msg_type,
955                                local->msg_maxsize, remote->msg_maxsize,
956                                local->mbox_maxcredit, remote->mbox_maxcredit);
957         }
958         if (rrc != GNI_RC_SUCCESS) {
959                 rc = -ECONNABORTED;
960                 goto cleanup_out;
961         }
962
963         /* log this for help in debuggin SMSG buffer re-use */
964         CDEBUG(D_NET, "conn %p src %s dst %s smsg %p acquired"
965                 " local cqid %u SMSG %p->%u hndl %#llx.%#llx"
966                 " remote cqid %u SMSG %p->%u hndl %#llx.%#llx\n",
967                 conn, libcfs_nid2str(connreq->gncr_srcnid),
968                 libcfs_nid2str(connreq->gncr_dstnid),
969                 &conn->gnpr_smsg_attr,
970                 conn->gnc_cqid,
971                 conn->gnpr_smsg_attr.msg_buffer,
972                 conn->gnpr_smsg_attr.mbox_offset,
973                 conn->gnpr_smsg_attr.mem_hndl.qword1,
974                 conn->gnpr_smsg_attr.mem_hndl.qword2,
975                 rem_param->gnpr_cqid,
976                 rem_param->gnpr_smsg_attr.msg_buffer,
977                 rem_param->gnpr_smsg_attr.mbox_offset,
978                 rem_param->gnpr_smsg_attr.mem_hndl.qword1,
979                 rem_param->gnpr_smsg_attr.mem_hndl.qword2);
980
981         conn->gnc_peerstamp = connreq->gncr_peerstamp;
982         conn->gnc_peer_connstamp = connreq->gncr_connstamp;
983         conn->remote_mbox_addr = (void *)((char *)remote->msg_buffer + remote->mbox_offset);
984
985         /* We update the reaper timeout once we have a valid conn and timeout */
986         kgnilnd_update_reaper_timeout(GNILND_TO2KA(conn->gnc_timeout));
987
988         return 0;
989
990 cleanup_out:
991         rrc = kgnilnd_ep_unbind(conn->gnc_ephandle);
992         /* not sure I can just let this fly */
993         LASSERTF(rrc == GNI_RC_SUCCESS,
994                 "bad rc from gni_ep_unbind trying to cleanup: %d\n", rrc);
995
996 return_out:
997         LASSERTF(rc != 0, "SOFTWARE BUG: rc == 0\n");
998         CERROR("Error setting connection params from %s: %d\n",
999                libcfs_nid2str(connreq->gncr_srcnid), rc);
1000         return rc;
1001 }
1002
1003 /* needs down_read on kgn_net_rw_sem held from before this call until
1004  * after the write_lock on kgn_peer_conn_lock - this ensures we stay sane
1005  * with kgnilnd_shutdown - it'll get the sem and set shutdown, then get the
1006  * kgn_peer_conn_lock to start del_peer'ing. If we hold the sem until after
1007  * kgn_peer_conn_lock is held, we guarantee that nobody calls
1008  * kgnilnd_add_peer_locked without checking gnn_shutdown */
1009 int
1010 kgnilnd_create_peer_safe(kgn_peer_t **peerp,
1011                          lnet_nid_t nid,
1012                          kgn_net_t *net,
1013                          int node_state)
1014 {
1015         kgn_peer_t      *peer;
1016         int             rc;
1017
1018         LASSERT(nid != LNET_NID_ANY);
1019
1020         /* We dont pass the net around in the dgram anymore so here is where we find it
1021          * this will work unless its in shutdown or the nid has a net that is invalid.
1022          * Either way error code needs to be returned in that case.
1023          *
1024          * If the net passed in is not NULL then we can use it, this alleviates looking it
1025          * when the calling function has access to the data.
1026          */
1027         if (net == NULL) {
1028                 rc = kgnilnd_find_net(nid, &net);
1029                 if (rc < 0)
1030                         return rc;
1031         } else {
1032                 /* find net adds a reference on the net if we are not using
1033                  * it we must do it manually so the net references are
1034                  * correct when tearing down the net
1035                  */
1036                 kgnilnd_net_addref(net);
1037         }
1038
1039         LIBCFS_ALLOC(peer, sizeof(*peer));
1040         if (peer == NULL) {
1041                 kgnilnd_net_decref(net);
1042                 return -ENOMEM;
1043         }
1044         peer->gnp_nid = nid;
1045         peer->gnp_state = node_state;
1046
1047         /* translate from nid to nic addr & store */
1048         rc = kgnilnd_nid_to_nicaddrs(LNET_NIDADDR(nid), 1, &peer->gnp_host_id);
1049         if (rc <= 0) {
1050                 kgnilnd_net_decref(net);
1051                 LIBCFS_FREE(peer, sizeof(*peer));
1052                 return -ESRCH;
1053         }
1054         CDEBUG(D_NET, "peer 0x%p->%s -> NIC 0x%x\n", peer,
1055                 libcfs_nid2str(nid), peer->gnp_host_id);
1056
1057         atomic_set(&peer->gnp_refcount, 1);     /* 1 ref for caller */
1058         atomic_set(&peer->gnp_dirty_eps, 0);
1059
1060         INIT_LIST_HEAD(&peer->gnp_list);
1061         INIT_LIST_HEAD(&peer->gnp_connd_list);
1062         INIT_LIST_HEAD(&peer->gnp_conns);
1063         INIT_LIST_HEAD(&peer->gnp_tx_queue);
1064
1065         /* the first reconnect should happen immediately, so we leave
1066          * gnp_reconnect_interval set to 0 */
1067
1068         LASSERTF(net != NULL, "peer 0x%p->%s with NULL net\n",
1069                  peer, libcfs_nid2str(nid));
1070
1071         /* must have kgn_net_rw_sem held for this...  */
1072         if (net->gnn_shutdown) {
1073                 /* shutdown has started already */
1074                 kgnilnd_net_decref(net);
1075                 LIBCFS_FREE(peer, sizeof(*peer));
1076                 return -ESHUTDOWN;
1077         }
1078
1079         peer->gnp_net = net;
1080
1081         atomic_inc(&kgnilnd_data.kgn_npeers);
1082
1083         *peerp = peer;
1084         return 0;
1085 }
1086
1087 void
1088 kgnilnd_destroy_peer(kgn_peer_t *peer)
1089 {
1090         CDEBUG(D_NET, "peer %s %p deleted\n",
1091                libcfs_nid2str(peer->gnp_nid), peer);
1092         LASSERTF(atomic_read(&peer->gnp_refcount) == 0,
1093                  "peer 0x%p->%s refs %d\n",
1094                  peer, libcfs_nid2str(peer->gnp_nid),
1095                  atomic_read(&peer->gnp_refcount));
1096         LASSERTF(atomic_read(&peer->gnp_dirty_eps) == 0,
1097                  "peer 0x%p->%s dirty eps %d\n",
1098                  peer, libcfs_nid2str(peer->gnp_nid),
1099                  atomic_read(&peer->gnp_dirty_eps));
1100         LASSERTF(peer->gnp_net != NULL, "peer %p (%s) with NULL net\n",
1101                  peer, libcfs_nid2str(peer->gnp_nid));
1102         LASSERTF(!kgnilnd_peer_active(peer),
1103                  "peer 0x%p->%s\n",
1104                 peer, libcfs_nid2str(peer->gnp_nid));
1105         LASSERTF(peer->gnp_connecting == GNILND_PEER_IDLE || peer->gnp_connecting == GNILND_PEER_KILL,
1106                  "peer 0x%p->%s, connecting %d\n",
1107                 peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
1108         LASSERTF(list_empty(&peer->gnp_conns),
1109                  "peer 0x%p->%s\n",
1110                 peer, libcfs_nid2str(peer->gnp_nid));
1111         LASSERTF(list_empty(&peer->gnp_tx_queue),
1112                  "peer 0x%p->%s\n",
1113                 peer, libcfs_nid2str(peer->gnp_nid));
1114         LASSERTF(list_empty(&peer->gnp_connd_list),
1115                  "peer 0x%p->%s\n",
1116                 peer, libcfs_nid2str(peer->gnp_nid));
1117
1118         /* NB a peer's connections keep a reference on their peer until
1119          * they are destroyed, so we can be assured that _all_ state to do
1120          * with this peer has been cleaned up when its refcount drops to
1121          * zero. */
1122
1123         atomic_dec(&kgnilnd_data.kgn_npeers);
1124         kgnilnd_net_decref(peer->gnp_net);
1125
1126         LIBCFS_FREE(peer, sizeof(*peer));
1127 }
1128
1129 /* the conn might not have made it all the way through to a connected
1130  * state - but we need to purgatory any conn that a remote peer might
1131  * have seen through a posted dgram as well */
1132 void
1133 kgnilnd_add_purgatory_locked(kgn_conn_t *conn, kgn_peer_t *peer)
1134 {
1135         kgn_mbox_info_t *mbox = NULL;
1136         ENTRY;
1137
1138         /* NB - the caller should own conn by removing him from the
1139          * scheduler thread when finishing the close */
1140
1141         LASSERTF(peer != NULL, "conn %p with NULL peer\n", conn);
1142
1143         /* If this is still true, need to add the calls to unlink back in and
1144          * figure out how to close the hole on loopback conns */
1145         LASSERTF(kgnilnd_peer_active(peer), "can't use inactive peer %s (%p)"
1146                 " we'll never recover the resources\n",
1147                 libcfs_nid2str(peer->gnp_nid), peer);
1148
1149         CDEBUG(D_NET, "conn %p peer %p dev %p\n", conn, peer,
1150                 conn->gnc_device);
1151
1152         LASSERTF(conn->gnc_in_purgatory == 0,
1153                 "Conn already in purgatory\n");
1154         conn->gnc_in_purgatory = 1;
1155
1156         mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
1157         mbox->mbx_prev_purg_nid = peer->gnp_nid;
1158         mbox->mbx_add_purgatory = jiffies;
1159         kgnilnd_release_mbox(conn, 1);
1160
1161         LASSERTF(list_empty(&conn->gnc_mdd_list),
1162                 "conn 0x%p->%s with active purgatory hold MDD %d\n",
1163                 conn, libcfs_nid2str(peer->gnp_nid),
1164                 kgnilnd_count_list(&conn->gnc_mdd_list));
1165
1166         EXIT;
1167 }
1168
1169 /* Instead of detaching everything from purgatory here we just mark the conn as needing
1170  * detach, when the reaper checks the conn the next time it will detach it.
1171  * Calling function requires write_lock held on kgn_peer_conn_lock
1172  */
1173 void
1174 kgnilnd_mark_for_detach_purgatory_all_locked(kgn_peer_t *peer) {
1175         kgn_conn_t       *conn;
1176
1177         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
1178                 if (conn->gnc_in_purgatory && !conn->gnc_needs_detach) {
1179                         conn->gnc_needs_detach = 1;
1180                         kgnilnd_admin_addref(kgnilnd_data.kgn_npending_detach);
1181                 }
1182         }
1183 }
1184
1185 /* Calling function needs a write_lock held on kgn_peer_conn_lock */
1186 void
1187 kgnilnd_detach_purgatory_locked(kgn_conn_t *conn, struct list_head *conn_list)
1188 {
1189         kgn_mbox_info_t *mbox = NULL;
1190
1191         /* if needed, add the conn purgatory data to the list passed in */
1192         if (conn->gnc_in_purgatory) {
1193                 CDEBUG(D_NET, "peer %p->%s purg_conn %p@%s mdd_list #tx %d\n",
1194                         conn->gnc_peer, libcfs_nid2str(conn->gnc_peer->gnp_nid),
1195                         conn, kgnilnd_conn_state2str(conn),
1196                         kgnilnd_count_list(&conn->gnc_mdd_list));
1197
1198                 mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
1199                 mbox->mbx_detach_of_purgatory = jiffies;
1200
1201                 /* conn->gnc_list is the entry point on peer->gnp_conns, so detaching it
1202                  * here removes it from the list of 'valid' peer connections.
1203                  * We put the current conn onto a list of conns to call kgnilnd_release_purgatory_locked()
1204                  * and as such the caller of kgnilnd_detach_purgatory_locked() now owns that conn, since its not
1205                  * on the peer's conn_list anymore.
1206                  */
1207
1208                 list_del_init(&conn->gnc_list);
1209
1210                 /* NB - only unlinking if we set pending in del_peer_locked from admin or
1211                  * shutdown */
1212                 if (kgnilnd_peer_active(conn->gnc_peer) &&
1213                     conn->gnc_peer->gnp_pending_unlink &&
1214                     kgnilnd_can_unlink_peer_locked(conn->gnc_peer)) {
1215                         kgnilnd_unlink_peer_locked(conn->gnc_peer);
1216                 }
1217                 /* The reaper will not call detach unless the conn is fully through kgnilnd_complete_closed_conn.
1218                  * If the conn is not in a DONE state somehow we are attempting to detach even though
1219                  * the conn has not been fully cleaned up. If we detach while the conn is still closing
1220                  * we will end up with an orphaned connection that has valid ep_handle, that is not on a
1221                  * peer.
1222                  */
1223
1224                 LASSERTF(conn->gnc_state == GNILND_CONN_DONE, "Conn in invalid state  %p@%s \n",
1225                                 conn, kgnilnd_conn_state2str(conn));
1226
1227                 /* move from peer to the delayed release list */
1228                 list_add_tail(&conn->gnc_list, conn_list);
1229         }
1230 }
1231
1232 void
1233 kgnilnd_release_purgatory_list(struct list_head *conn_list)
1234 {
1235         kgn_device_t            *dev;
1236         kgn_conn_t              *conn, *connN;
1237         kgn_mdd_purgatory_t     *gmp, *gmpN;
1238
1239         list_for_each_entry_safe(conn, connN, conn_list, gnc_list) {
1240                 dev = conn->gnc_device;
1241
1242                 kgnilnd_release_mbox(conn, -1);
1243                 conn->gnc_in_purgatory = 0;
1244
1245                 list_del_init(&conn->gnc_list);
1246
1247                 /* gnc_needs_detach is set in kgnilnd_del_conn_or_peer. It is used to keep track
1248                  * of conns that have been marked for detach by kgnilnd_del_conn_or_peer.
1249                  * The function uses kgn_npending_detach to verify the conn has
1250                  * actually been detached.
1251                  */
1252
1253                 if (conn->gnc_needs_detach)
1254                         kgnilnd_admin_decref(kgnilnd_data.kgn_npending_detach);
1255
1256                 /* if this guy is really dead (we are doing release from reaper),
1257                  * make sure we tell LNet - if this is from other context,
1258                  * the checks in the function will prevent an errant
1259                  * notification */
1260                 kgnilnd_peer_notify(conn->gnc_peer, conn->gnc_error, 0);
1261
1262                 list_for_each_entry_safe(gmp, gmpN, &conn->gnc_mdd_list,
1263                                          gmp_list) {
1264                         CDEBUG(D_NET,
1265                                "dev %p releasing held mdd %#llx.%#llx\n",
1266                                conn->gnc_device, gmp->gmp_map_key.qword1,
1267                                gmp->gmp_map_key.qword2);
1268
1269                         atomic_dec(&dev->gnd_n_mdd_held);
1270                         kgnilnd_mem_mdd_release(conn->gnc_device->gnd_handle,
1271                                                 &gmp->gmp_map_key);
1272                         /* ignoring the return code - if kgni/ghal can't find it
1273                          * it must be released already */
1274
1275                         list_del_init(&gmp->gmp_list);
1276                         LIBCFS_FREE(gmp, sizeof(*gmp));
1277                 }
1278                 /* lose conn ref for purgatory */
1279                 kgnilnd_conn_decref(conn);
1280         }
1281 }
1282
1283 /* needs write_lock on kgnilnd_data.kgn_peer_conn_lock held */
1284 void
1285 kgnilnd_peer_increase_reconnect_locked(kgn_peer_t *peer)
1286 {
1287         int current_to;
1288
1289         current_to = peer->gnp_reconnect_interval;
1290
1291         /* we'll try to reconnect fast the first time, then back-off */
1292         if (current_to == 0) {
1293                 peer->gnp_reconnect_time = jiffies - 1;
1294                 current_to = *kgnilnd_tunables.kgn_min_reconnect_interval;
1295         } else {
1296                 peer->gnp_reconnect_time = jiffies + cfs_time_seconds(current_to);
1297                 /* add 50% of min timeout & retry */
1298                 current_to += *kgnilnd_tunables.kgn_min_reconnect_interval / 2;
1299         }
1300
1301         current_to = min(current_to,
1302                          *kgnilnd_tunables.kgn_max_reconnect_interval);
1303
1304         peer->gnp_reconnect_interval = current_to;
1305         CDEBUG(D_NET, "peer %s can reconnect at %lu interval %lu\n",
1306                libcfs_nid2str(peer->gnp_nid), peer->gnp_reconnect_time,
1307                peer->gnp_reconnect_interval);
1308 }
1309
1310 /* needs kgnilnd_data.kgn_peer_conn_lock held */
1311 kgn_peer_t *
1312 kgnilnd_find_peer_locked(lnet_nid_t nid)
1313 {
1314         struct list_head *peer_list = kgnilnd_nid2peerlist(nid);
1315         kgn_peer_t       *peer;
1316
1317         /* Chopping nid down to only NIDADDR using LNET_NIDADDR so we only
1318          * have a single peer per device instead of a peer per nid/net combo.
1319          */
1320
1321         list_for_each_entry(peer, peer_list, gnp_list) {
1322                 if (LNET_NIDADDR(nid) != LNET_NIDADDR(peer->gnp_nid))
1323                         continue;
1324
1325                 CDEBUG(D_NET, "got peer [%p] -> %s c %d (%d)\n",
1326                        peer, libcfs_nid2str(nid),
1327                        peer->gnp_connecting,
1328                        atomic_read(&peer->gnp_refcount));
1329                 return peer;
1330         }
1331         return NULL;
1332 }
1333
1334 /* need write_lock on kgn_peer_conn_lock */
1335 void
1336 kgnilnd_unlink_peer_locked(kgn_peer_t *peer)
1337 {
1338         LASSERTF(list_empty(&peer->gnp_conns),
1339                 "peer 0x%p->%s\n",
1340                  peer, libcfs_nid2str(peer->gnp_nid));
1341         LASSERTF(list_empty(&peer->gnp_tx_queue),
1342                 "peer 0x%p->%s\n",
1343                  peer, libcfs_nid2str(peer->gnp_nid));
1344         LASSERTF(kgnilnd_peer_active(peer),
1345                 "peer 0x%p->%s\n",
1346                  peer, libcfs_nid2str(peer->gnp_nid));
1347         CDEBUG(D_NET, "unlinking peer 0x%p->%s\n",
1348                 peer, libcfs_nid2str(peer->gnp_nid));
1349
1350         list_del_init(&peer->gnp_list);
1351         kgnilnd_data.kgn_peer_version++;
1352         kgnilnd_admin_decref(kgnilnd_data.kgn_npending_unlink);
1353         /* lose peerlist's ref */
1354         kgnilnd_peer_decref(peer);
1355 }
1356
1357 int
1358 kgnilnd_get_peer_info(int index,
1359                       kgn_peer_t **found_peer,
1360                       lnet_nid_t *id, __u32 *nic_addr,
1361                       int *refcount, int *connecting)
1362 {
1363         kgn_peer_t        *peer;
1364         int               i;
1365         int               rc = -ENOENT;
1366
1367         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1368
1369         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
1370                 list_for_each_entry(peer, &kgnilnd_data.kgn_peers[i], gnp_list) {
1371                         if (index-- > 0)
1372                                 continue;
1373
1374                         CDEBUG(D_NET, "found peer %p (%s) at index %d\n",
1375                                peer, libcfs_nid2str(peer->gnp_nid), index);
1376
1377                         *found_peer  = peer;
1378                         *id          = peer->gnp_nid;
1379                         *nic_addr    = peer->gnp_host_id;
1380                         *refcount    = atomic_read(&peer->gnp_refcount);
1381                         *connecting  = peer->gnp_connecting;
1382
1383                         rc = 0;
1384                         goto out;
1385                 }
1386         }
1387 out:
1388         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1389         if (rc)
1390                 CDEBUG(D_NET, "no gni peer at index %d\n", index);
1391         return rc;
1392 }
1393
1394 /* requires write_lock on kgn_peer_conn_lock held */
1395 void
1396 kgnilnd_add_peer_locked(lnet_nid_t nid, kgn_peer_t *new_stub_peer, kgn_peer_t **peerp)
1397 {
1398         kgn_peer_t        *peer, *peer2;
1399
1400         LASSERTF(new_stub_peer != NULL, "bad stub peer for nid %s\n",
1401                  libcfs_nid2str(nid));
1402
1403         peer2 = kgnilnd_find_peer_locked(nid);
1404         if (peer2 != NULL) {
1405                 /* A peer was created during the lock transition, so drop
1406                  * the new one we created */
1407                 kgnilnd_peer_decref(new_stub_peer);
1408                 peer = peer2;
1409         } else {
1410                 peer = new_stub_peer;
1411                 /* peer table takes existing ref on peer */
1412
1413                 LASSERTF(!kgnilnd_peer_active(peer),
1414                         "peer 0x%p->%s already in peer table\n",
1415                         peer, libcfs_nid2str(peer->gnp_nid));
1416                 list_add_tail(&peer->gnp_list,
1417                               kgnilnd_nid2peerlist(nid));
1418                 kgnilnd_data.kgn_peer_version++;
1419         }
1420
1421         LASSERTF(peer->gnp_net != NULL, "peer 0x%p->%s with NULL net\n",
1422                  peer, libcfs_nid2str(peer->gnp_nid));
1423         *peerp = peer;
1424 }
1425
1426 int
1427 kgnilnd_add_peer(kgn_net_t *net, lnet_nid_t nid, kgn_peer_t **peerp)
1428 {
1429         kgn_peer_t        *peer;
1430         int                rc;
1431         int                node_state;
1432         ENTRY;
1433
1434         if (nid == LNET_NID_ANY)
1435                 return -EINVAL;
1436
1437         node_state = kgnilnd_get_node_state(LNET_NIDADDR(nid));
1438
1439         /* NB - this will not block during normal operations -
1440          * the only writer of this is in the startup/shutdown path. */
1441         rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
1442         if (!rc) {
1443                 rc = -ESHUTDOWN;
1444                 RETURN(rc);
1445         }
1446         rc = kgnilnd_create_peer_safe(&peer, nid, net, node_state);
1447         if (rc != 0) {
1448                 up_read(&kgnilnd_data.kgn_net_rw_sem);
1449                 RETURN(rc);
1450         }
1451
1452         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1453         up_read(&kgnilnd_data.kgn_net_rw_sem);
1454
1455         kgnilnd_add_peer_locked(nid, peer, peerp);
1456
1457         CDEBUG(D_NET, "peer 0x%p->%s connecting %d\n",
1458                peerp, libcfs_nid2str((*peerp)->gnp_nid),
1459                (*peerp)->gnp_connecting);
1460
1461         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1462         RETURN(0);
1463 }
1464
1465 /* needs write_lock on kgn_peer_conn_lock */
1466 void
1467 kgnilnd_cancel_peer_connect_locked(kgn_peer_t *peer, struct list_head *zombies)
1468 {
1469         kgn_tx_t        *tx, *txn;
1470
1471         /* we do care about state of gnp_connecting - we could be between
1472          * reconnect attempts, so try to find the dgram and cancel the TX
1473          * anyways. If we are in the process of posting DONT do anything;
1474          * once it fails or succeeds we can nuke the connect attempt.
1475          * We have no idea where in kgnilnd_post_dgram we are so we cant
1476          * attempt to cancel until the function is done.
1477          */
1478
1479         /* make sure peer isn't in process of connecting or waiting for connect*/
1480         spin_lock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1481         if (!(list_empty(&peer->gnp_connd_list))) {
1482                 list_del_init(&peer->gnp_connd_list);
1483                 /* remove connd ref */
1484                 kgnilnd_peer_decref(peer);
1485         }
1486         spin_unlock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1487
1488         if (peer->gnp_connecting == GNILND_PEER_POSTING || peer->gnp_connecting == GNILND_PEER_NEEDS_DEATH) {
1489                 peer->gnp_connecting = GNILND_PEER_NEEDS_DEATH;
1490                 /* We are in process of posting right now the xchg set it up for us to
1491                  * cancel the connect so we are finished for now */
1492         } else {
1493                 /* no need for exchange we have the peer lock and its ready for us to nuke */
1494                 LASSERTF(peer->gnp_connecting != GNILND_PEER_POSTING,
1495                         "Peer in invalid state 0x%p->%s, connecting %d\n",
1496                         peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
1497                 peer->gnp_connecting = GNILND_PEER_IDLE;
1498                 set_mb(peer->gnp_last_dgram_errno, -ETIMEDOUT);
1499                 kgnilnd_find_and_cancel_dgram(peer->gnp_net->gnn_dev,
1500                                                       peer->gnp_nid);
1501         }
1502
1503         /* The least we can do is nuke the tx's no matter what.... */
1504         list_for_each_entry_safe(tx, txn, &peer->gnp_tx_queue, tx_list) {
1505                 kgnilnd_tx_del_state_locked(tx, peer, NULL,
1506                                            GNILND_TX_ALLOCD);
1507                 list_add_tail(&tx->tx_list, zombies);
1508         }
1509 }
1510
1511 /* needs write_lock on kgn_peer_conn_lock */
1512 void
1513 kgnilnd_del_peer_locked(kgn_peer_t *peer, int error)
1514 {
1515         /* this peer could be passive and only held for purgatory,
1516          * take a ref to ensure it doesn't disappear in this function */
1517         kgnilnd_peer_addref(peer);
1518
1519         CFS_RACE(CFS_FAIL_GNI_FIND_TARGET);
1520
1521         /* if purgatory release cleared it out, don't try again */
1522         if (kgnilnd_peer_active(peer)) {
1523                 /* always do this to allow kgnilnd_start_connect and
1524                  * kgnilnd_finish_connect to catch this before they
1525                  * wrap up their operations */
1526                 if (kgnilnd_can_unlink_peer_locked(peer)) {
1527                         /* already released purgatory, so only active
1528                          * conns hold it */
1529                         kgnilnd_unlink_peer_locked(peer);
1530                 } else {
1531                         kgnilnd_close_peer_conns_locked(peer, error);
1532                         /* peer unlinks itself when last conn is closed */
1533                 }
1534         }
1535
1536         /* we are done, release back to the wild */
1537         kgnilnd_peer_decref(peer);
1538 }
1539
1540 int
1541 kgnilnd_del_conn_or_peer(kgn_net_t *net, lnet_nid_t nid, int command,
1542                           int error)
1543 {
1544         LIST_HEAD               (souls);
1545         LIST_HEAD               (zombies);
1546         kgn_peer_t *peer, *pnxt;
1547         int                     lo;
1548         int                     hi;
1549         int                     i;
1550         int                     rc = -ENOENT;
1551
1552         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1553
1554         if (nid != LNET_NID_ANY)
1555                 lo = hi = kgnilnd_nid2peerlist(nid) - kgnilnd_data.kgn_peers;
1556         else {
1557                 lo = 0;
1558                 hi = *kgnilnd_tunables.kgn_peer_hash_size - 1;
1559                 /* wildcards always succeed */
1560                 rc = 0;
1561         }
1562
1563         for (i = lo; i <= hi; i++) {
1564                 list_for_each_entry_safe(peer, pnxt, &kgnilnd_data.kgn_peers[i],
1565                                          gnp_list) {
1566                         LASSERTF(peer->gnp_net != NULL,
1567                                 "peer %p (%s) with NULL net\n",
1568                                  peer, libcfs_nid2str(peer->gnp_nid));
1569
1570                         if (net != NULL && peer->gnp_net != net)
1571                                 continue;
1572
1573                         if (!(nid == LNET_NID_ANY || LNET_NIDADDR(peer->gnp_nid) == LNET_NIDADDR(nid)))
1574                                 continue;
1575
1576                         /* In both cases, we want to stop any in-flight
1577                          * connect attempts */
1578                         kgnilnd_cancel_peer_connect_locked(peer, &zombies);
1579
1580                         switch (command) {
1581                         case GNILND_DEL_CONN:
1582                                 kgnilnd_close_peer_conns_locked(peer, error);
1583                                 break;
1584                         case GNILND_DEL_PEER:
1585                                 peer->gnp_pending_unlink = 1;
1586                                 kgnilnd_admin_addref(kgnilnd_data.kgn_npending_unlink);
1587                                 kgnilnd_mark_for_detach_purgatory_all_locked(peer);
1588                                 kgnilnd_del_peer_locked(peer, error);
1589                                 break;
1590                         case GNILND_CLEAR_PURGATORY:
1591                                 /* Mark everything ready for detach reaper will cleanup
1592                                  * once we release the kgn_peer_conn_lock
1593                                  */
1594                                 kgnilnd_mark_for_detach_purgatory_all_locked(peer);
1595                                 peer->gnp_last_errno = -EISCONN;
1596                                 /* clear reconnect so he can reconnect soon */
1597                                 peer->gnp_reconnect_time = 0;
1598                                 peer->gnp_reconnect_interval = 0;
1599                                 break;
1600                         default:
1601                                 CERROR("bad command %d\n", command);
1602                                 LBUG();
1603                         }
1604                         /* we matched something */
1605                         rc = 0;
1606                 }
1607         }
1608
1609         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1610
1611         /* nuke peer TX */
1612         kgnilnd_txlist_done(&zombies, error);
1613
1614         /* This function does not return until the commands it initiated have completed,
1615          * since they have to work there way through the other threads. In the case of shutdown
1616          * threads are not woken up until after this call is initiated so we cannot wait, we just
1617          * need to return. The same applies for stack reset we shouldnt wait as the reset thread
1618          * handles closing.
1619          */
1620
1621         CFS_RACE(CFS_FAIL_GNI_RACE_RESET);
1622
1623         if (error == -ENOTRECOVERABLE || error == -ESHUTDOWN) {
1624                 return rc;
1625         }
1626
1627         wait_var_event_warning(&kgnilnd_data,
1628                                !atomic_read(&kgnilnd_data.kgn_npending_conns) &&
1629                                !atomic_read(&kgnilnd_data.kgn_npending_detach) &&
1630                                !atomic_read(&kgnilnd_data.kgn_npending_unlink),
1631                                "Waiting on %d peers %d closes %d detaches\n",
1632                                 atomic_read(&kgnilnd_data.kgn_npending_unlink),
1633                                 atomic_read(&kgnilnd_data.kgn_npending_conns),
1634                                 atomic_read(&kgnilnd_data.kgn_npending_detach));
1635
1636         return rc;
1637 }
1638
1639 kgn_conn_t *
1640 kgnilnd_get_conn_by_idx(int index)
1641 {
1642         kgn_peer_t        *peer;
1643         kgn_conn_t        *conn;
1644         int                i;
1645
1646
1647         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
1648                 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1649                 list_for_each_entry(peer, &kgnilnd_data.kgn_peers[i], gnp_list) {
1650                         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
1651                                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
1652                                         continue;
1653
1654                                 if (index-- > 0)
1655                                         continue;
1656
1657                                 CDEBUG(D_NET, "++conn[%p] -> %s (%d)\n", conn,
1658                                        libcfs_nid2str(conn->gnc_peer->gnp_nid),
1659                                        atomic_read(&conn->gnc_refcount));
1660                                 kgnilnd_conn_addref(conn);
1661                                 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1662                                 return conn;
1663                         }
1664                 }
1665                 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1666         }
1667
1668         return NULL;
1669 }
1670
1671 int
1672 kgnilnd_get_conn_info(kgn_peer_t *peer,
1673                       int *device_id, __u64 *peerstamp,
1674                       int *tx_seq, int *rx_seq,
1675                       int *fmaq_len, int *nfma, int *nrdma)
1676 {
1677         kgn_conn_t        *conn;
1678         int               rc = 0;
1679
1680         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1681
1682         conn = kgnilnd_find_conn_locked(peer);
1683         if (conn == NULL) {
1684                 rc = -ENOENT;
1685                 goto out;
1686         }
1687
1688         *device_id = conn->gnc_device->gnd_host_id;
1689         *peerstamp = conn->gnc_peerstamp;
1690         *tx_seq = atomic_read(&conn->gnc_tx_seq);
1691         *rx_seq = atomic_read(&conn->gnc_rx_seq);
1692         *fmaq_len = kgnilnd_count_list(&conn->gnc_fmaq);
1693         *nfma = atomic_read(&conn->gnc_nlive_fma);
1694         *nrdma = atomic_read(&conn->gnc_nlive_rdma);
1695 out:
1696         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1697         return rc;
1698 }
1699
1700 /* needs write_lock on kgn_peer_conn_lock */
1701 int
1702 kgnilnd_close_peer_conns_locked(kgn_peer_t *peer, int why)
1703 {
1704         kgn_conn_t         *conn;
1705         struct list_head   *ctmp, *cnxt;
1706         int                 count = 0;
1707
1708         list_for_each_safe(ctmp, cnxt, &peer->gnp_conns) {
1709                 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
1710
1711                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
1712                         continue;
1713
1714                 count++;
1715                 /* we mark gnc_needs closing and increment kgn_npending_conns so that
1716                  * kgnilnd_del_conn_or_peer can wait on the other threads closing
1717                  * and cleaning up the connection.
1718                  */
1719                 if (!conn->gnc_needs_closing) {
1720                         conn->gnc_needs_closing = 1;
1721                         kgnilnd_admin_addref(kgnilnd_data.kgn_npending_conns);
1722                 }
1723                 kgnilnd_close_conn_locked(conn, why);
1724         }
1725         return count;
1726 }
1727
1728 int
1729 kgnilnd_report_node_state(lnet_nid_t nid, int down)
1730 {
1731         int         rc;
1732         kgn_peer_t  *peer, *new_peer;
1733         LIST_HEAD(zombies);
1734
1735         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1736         peer = kgnilnd_find_peer_locked(nid);
1737
1738         if (peer == NULL) {
1739                 int       i;
1740                 int       found_net = 0;
1741                 kgn_net_t *net;
1742
1743                 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1744
1745                 /* Don't add a peer for node up events */
1746                 if (down == GNILND_PEER_UP)
1747                         return 0;
1748
1749                 /* find any valid net - we don't care which one... */
1750                 down_read(&kgnilnd_data.kgn_net_rw_sem);
1751                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
1752                         list_for_each_entry(net, &kgnilnd_data.kgn_nets[i],
1753                                             gnn_list) {
1754                                 found_net = 1;
1755                                 break;
1756                         }
1757
1758                         if (found_net) {
1759                                 break;
1760                         }
1761                 }
1762                 up_read(&kgnilnd_data.kgn_net_rw_sem);
1763
1764                 if (!found_net) {
1765                         CNETERR("Could not find a net for nid %lld\n", nid);
1766                         return 1;
1767                 }
1768
1769                 /* The nid passed in does not yet contain the net portion.
1770                  * Let's build it up now
1771                  */
1772                 nid = LNET_MKNID(LNET_NID_NET(&net->gnn_ni->ni_nid), nid);
1773                 rc = kgnilnd_add_peer(net, nid, &new_peer);
1774
1775                 if (rc) {
1776                         CNETERR("Could not add peer for nid %lld, rc %d\n",
1777                                 nid, rc);
1778                         return 1;
1779                 }
1780
1781                 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1782                 peer = kgnilnd_find_peer_locked(nid);
1783
1784                 if (peer == NULL) {
1785                         CNETERR("Could not find peer for nid %lld\n", nid);
1786                         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1787                         return 1;
1788                 }
1789         }
1790
1791         peer->gnp_state = down;
1792
1793         if (down == GNILND_PEER_DOWN) {
1794                 kgn_conn_t *conn;
1795
1796                 peer->gnp_down_event_time = jiffies;
1797                 kgnilnd_cancel_peer_connect_locked(peer, &zombies);
1798                 conn = kgnilnd_find_conn_locked(peer);
1799
1800                 if (conn != NULL) {
1801                         kgnilnd_close_conn_locked(conn, -ENETRESET);
1802                 }
1803         } else {
1804                 peer->gnp_up_event_time = jiffies;
1805         }
1806
1807         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1808
1809         if (down == GNILND_PEER_DOWN) {
1810                 /* using ENETRESET so we don't get messages from
1811                  * kgnilnd_tx_done
1812                  */
1813                 kgnilnd_txlist_done(&zombies, -ENETRESET);
1814                 kgnilnd_peer_notify(peer, -ECONNRESET, 0);
1815                 LCONSOLE_INFO("Received down event for nid %d\n",
1816                               LNET_NIDADDR(nid));
1817         }
1818
1819         return 0;
1820 }
1821
1822 int
1823 kgnilnd_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg)
1824 {
1825         struct libcfs_ioctl_data *data = arg;
1826         kgn_net_t                *net = ni->ni_data;
1827         int                       rc = -EINVAL;
1828
1829         LASSERT(ni == net->gnn_ni);
1830
1831         switch (cmd) {
1832         case IOC_LIBCFS_GET_PEER: {
1833                 lnet_nid_t   nid = 0;
1834                 kgn_peer_t  *peer = NULL;
1835                 __u32 nic_addr = 0;
1836                 __u64 peerstamp = 0;
1837                 int peer_refcount = 0, peer_connecting = 0;
1838                 int device_id = 0;
1839                 int tx_seq = 0, rx_seq = 0;
1840                 int fmaq_len = 0, nfma = 0, nrdma = 0;
1841
1842                 rc = kgnilnd_get_peer_info(data->ioc_count, &peer,
1843                                            &nid, &nic_addr, &peer_refcount,
1844                                            &peer_connecting);
1845                 if (rc)
1846                         break;
1847
1848                 /* Barf */
1849                 /* LNET_MKNID is used to mask from lnet the multiplexing/demultiplexing of connections and peers
1850                  * LNET assumes a conn and peer per net, the LNET_MKNID/LNET_NIDADDR allows us to let Lnet see what it
1851                  * wants to see instead of the underlying network that is being used to send the data
1852                  */
1853                 data->ioc_nid    = LNET_MKNID(LNET_NID_NET(&ni->ni_nid),
1854                                               LNET_NIDADDR(nid));
1855                 data->ioc_flags  = peer_connecting;
1856                 data->ioc_count  = peer_refcount;
1857
1858                 rc = kgnilnd_get_conn_info(peer, &device_id, &peerstamp,
1859                                            &tx_seq, &rx_seq, &fmaq_len,
1860                                            &nfma, &nrdma);
1861
1862                 /* This is allowable - a persistent peer could not
1863                  * have a connection */
1864                 if (rc) {
1865                         /* flag to indicate we are not connected -
1866                          * need to print as such */
1867                         data->ioc_flags |= (1<<16);
1868                         rc = 0;
1869                 } else {
1870                         /* still barf */
1871                         data->ioc_net = device_id;
1872                         data->ioc_u64[0] = peerstamp;
1873                         data->ioc_u32[0] = fmaq_len;
1874                         data->ioc_u32[1] = nfma;
1875                         data->ioc_u32[2] = tx_seq;
1876                         data->ioc_u32[3] = rx_seq;
1877                         data->ioc_u32[4] = nrdma;
1878                 }
1879                 break;
1880         }
1881         case IOC_LIBCFS_ADD_PEER: {
1882                 /* just dummy value to allow using common interface */
1883                 kgn_peer_t      *peer;
1884                 rc = kgnilnd_add_peer(net, data->ioc_nid, &peer);
1885                 break;
1886         }
1887         case IOC_LIBCFS_DEL_PEER: {
1888                 /* NULL is passed in so it affects all peers in existence without regard to network
1889                  * as the peer may not exist on the network LNET believes it to be on.
1890                  */
1891                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1892                                               GNILND_DEL_PEER, -EUCLEAN);
1893                 break;
1894         }
1895         case IOC_LIBCFS_GET_CONN: {
1896                 kgn_conn_t *conn = kgnilnd_get_conn_by_idx(data->ioc_count);
1897
1898                 if (conn == NULL)
1899                         rc = -ENOENT;
1900                 else {
1901                         rc = 0;
1902                         /* LNET_MKNID is used to build the correct address based on what LNET wants to see instead of
1903                          * the generic connection that is used to send the data
1904                          */
1905                         data->ioc_nid    = LNET_MKNID(LNET_NID_NET(&ni->ni_nid),
1906                                                       LNET_NIDADDR(conn->gnc_peer->gnp_nid));
1907                         data->ioc_u32[0] = conn->gnc_device->gnd_id;
1908                         kgnilnd_conn_decref(conn);
1909                 }
1910                 break;
1911         }
1912         case IOC_LIBCFS_CLOSE_CONNECTION: {
1913                 /* use error = -ENETRESET to indicate it was lctl disconnect */
1914                 /* NULL is passed in so it affects all the nets as the connection is virtual
1915                  * and may not exist on the network LNET believes it to be on.
1916                  */
1917                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1918                                               GNILND_DEL_CONN, -ENETRESET);
1919                 break;
1920         }
1921         case IOC_LIBCFS_PUSH_CONNECTION: {
1922                 /* we use this to flush purgatory */
1923                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1924                                               GNILND_CLEAR_PURGATORY, -EUCLEAN);
1925                 break;
1926         }
1927         case IOC_LIBCFS_REGISTER_MYNID: {
1928                 /* Ignore if this is a noop */
1929                 if (data->ioc_nid == lnet_nid_to_nid4(&ni->ni_nid)) {
1930                         rc = 0;
1931                 } else {
1932                         CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
1933                                libcfs_nid2str(data->ioc_nid),
1934                                libcfs_nidstr(&ni->ni_nid));
1935                         rc = -EINVAL;
1936                 }
1937                 break;
1938         }
1939         }
1940
1941         return rc;
1942 }
1943
1944 int
1945 kgnilnd_dev_init(kgn_device_t *dev)
1946 {
1947         gni_return_t      rrc;
1948         int               rc = 0;
1949         unsigned int      cq_size;
1950         ENTRY;
1951
1952         /* size of these CQs should be able to accommodate the outgoing
1953          * RDMA and SMSG transactions.  Since we really don't know what we
1954          * really need here, we'll take credits * 2 * 3 to allow a bunch.
1955          * We need to dig into this more with the performance work. */
1956         cq_size = *kgnilnd_tunables.kgn_credits * 2 * 3;
1957
1958         rrc = kgnilnd_cdm_create(dev->gnd_id, *kgnilnd_tunables.kgn_ptag,
1959                                  *kgnilnd_tunables.kgn_pkey, 0,
1960                                  &dev->gnd_domain);
1961         if (rrc != GNI_RC_SUCCESS) {
1962                 CERROR("Can't create CDM %d (%d)\n", dev->gnd_id, rrc);
1963                 GOTO(failed, rc = -ENODEV);
1964         }
1965
1966         rrc = kgnilnd_cdm_attach(dev->gnd_domain, dev->gnd_id,
1967                                  &dev->gnd_host_id, &dev->gnd_handle);
1968         if (rrc != GNI_RC_SUCCESS) {
1969                 CERROR("Can't attach CDM to device %d (%d)\n",
1970                         dev->gnd_id, rrc);
1971                 GOTO(failed, rc = -ENODEV);
1972         }
1973
1974         /* a bit gross, but not much we can do - Aries Sim doesn't have
1975          * hardcoded NIC/NID that we can use */
1976         rc = kgnilnd_setup_nic_translation(dev->gnd_host_id);
1977         if (rc != 0)
1978                 GOTO(failed, rc = -ENODEV);
1979
1980         /* only dev 0 gets the errors - no need to reset the stack twice
1981          * - this works because we have a single PTAG, if we had more
1982          * then we'd need to have multiple handlers */
1983         if (dev->gnd_id == 0) {
1984                 rrc = kgnilnd_subscribe_errors(dev->gnd_handle,
1985                                                 GNI_ERRMASK_CRITICAL |
1986                                                 GNI_ERRMASK_UNKNOWN_TRANSACTION,
1987                                               0, NULL, kgnilnd_critical_error,
1988                                               &dev->gnd_err_handle);
1989                 if (rrc != GNI_RC_SUCCESS) {
1990                         CERROR("Can't subscribe for errors on device %d: rc %d\n",
1991                                 dev->gnd_id, rrc);
1992                         GOTO(failed, rc = -ENODEV);
1993                 }
1994
1995                 rc = kgnilnd_set_quiesce_callback(dev->gnd_handle,
1996                                                   kgnilnd_quiesce_end_callback);
1997                 if (rc != GNI_RC_SUCCESS) {
1998                         CERROR("Can't subscribe for quiesce callback on device %d: rc %d\n",
1999                                 dev->gnd_id, rrc);
2000                         GOTO(failed, rc = -ENODEV);
2001                 }
2002         }
2003
2004         rc = kgnilnd_nicaddr_to_nid(dev->gnd_host_id, &dev->gnd_nid);
2005         if (rc < 0) {
2006                 /* log messages during startup */
2007                 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
2008                         CERROR("couldn't translate host_id 0x%x to nid. rc %d\n",
2009                                 dev->gnd_host_id, rc);
2010                 }
2011                 GOTO(failed, rc = -ESRCH);
2012         }
2013         CDEBUG(D_NET, "NIC %x -> NID %d\n", dev->gnd_host_id, dev->gnd_nid);
2014
2015         rrc = kgnilnd_cq_create(dev->gnd_handle, *kgnilnd_tunables.kgn_credits,
2016                                 0, kgnilnd_device_callback,
2017                                 dev->gnd_id, &dev->gnd_snd_rdma_cqh);
2018         if (rrc != GNI_RC_SUCCESS) {
2019                 CERROR("Can't create rdma send cq size %u for device "
2020                        "%d (%d)\n", cq_size, dev->gnd_id, rrc);
2021                 GOTO(failed, rc = -EINVAL);
2022         }
2023
2024         rrc = kgnilnd_cq_create(dev->gnd_handle, cq_size,
2025                         0, kgnilnd_device_callback, dev->gnd_id,
2026                         &dev->gnd_snd_fma_cqh);
2027         if (rrc != GNI_RC_SUCCESS) {
2028                 CERROR("Can't create fma send cq size %u for device %d (%d)\n",
2029                        cq_size, dev->gnd_id, rrc);
2030                 GOTO(failed, rc = -EINVAL);
2031         }
2032
2033         /* This one we size differently - overflows are possible and it needs to be
2034          * sized based on machine size */
2035         rrc = kgnilnd_cq_create(dev->gnd_handle,
2036                         *kgnilnd_tunables.kgn_fma_cq_size,
2037                         0, kgnilnd_device_callback, dev->gnd_id,
2038                         &dev->gnd_rcv_fma_cqh);
2039         if (rrc != GNI_RC_SUCCESS) {
2040                 CERROR("Can't create fma cq size %d for device %d (%d)\n",
2041                        *kgnilnd_tunables.kgn_fma_cq_size, dev->gnd_id, rrc);
2042                 GOTO(failed, rc = -EINVAL);
2043         }
2044
2045         rrc = kgnilnd_register_smdd_buf(dev);
2046         if (rrc != GNI_RC_SUCCESS) {
2047                 GOTO(failed, rc = -EINVAL);
2048         }
2049
2050         RETURN(0);
2051
2052 failed:
2053         kgnilnd_dev_fini(dev);
2054         RETURN(rc);
2055 }
2056
2057 void
2058 kgnilnd_dev_fini(kgn_device_t *dev)
2059 {
2060         gni_return_t rrc;
2061         ENTRY;
2062
2063         /* At quiesce or rest time, need to loop through and clear gnd_ready_conns ?*/
2064         LASSERTF(list_empty(&dev->gnd_ready_conns) &&
2065                  list_empty(&dev->gnd_map_tx) &&
2066                  list_empty(&dev->gnd_rdmaq) &&
2067                  list_empty(&dev->gnd_delay_conns),
2068                  "dev 0x%p ready_conns %d@0x%p delay_conns %d@0x%p" 
2069                  "map_tx %d@0x%p rdmaq %d@0x%p\n",
2070                  dev, kgnilnd_count_list(&dev->gnd_ready_conns), &dev->gnd_ready_conns,
2071                  kgnilnd_count_list(&dev->gnd_delay_conns), &dev->gnd_delay_conns,
2072                  kgnilnd_count_list(&dev->gnd_map_tx), &dev->gnd_map_tx,
2073                  kgnilnd_count_list(&dev->gnd_rdmaq), &dev->gnd_rdmaq);
2074
2075         /* These should follow from tearing down all connections */
2076         LASSERTF(dev->gnd_map_nphys == 0 && dev->gnd_map_physnop == 0,
2077                 "%d physical mappings of %d pages still mapped\n",
2078                  dev->gnd_map_nphys, dev->gnd_map_physnop);
2079
2080         LASSERTF(atomic_read(&dev->gnd_n_mdd) == 0 &&
2081                  atomic_read(&dev->gnd_n_mdd_held) == 0 &&
2082                  atomic64_read(&dev->gnd_nbytes_map) == 0,
2083                  "%d SMSG mappings of %lld bytes still mapped or held %d\n",
2084                  atomic_read(&dev->gnd_n_mdd),
2085                  (u64)atomic64_read(&dev->gnd_nbytes_map),
2086                  atomic_read(&dev->gnd_n_mdd_held));
2087
2088         LASSERT(list_empty(&dev->gnd_map_list));
2089
2090         /* What other assertions needed to ensure all connections torn down ? */
2091
2092         /* check all counters == 0 (EP, MDD, etc) */
2093
2094         /* if we are resetting due to quiese (stack reset), don't check
2095          * thread states */
2096         LASSERTF(kgnilnd_data.kgn_quiesce_trigger ||
2097                 atomic_read(&kgnilnd_data.kgn_nthreads) == 0,
2098                 "tried to shutdown with threads active\n");
2099
2100         if (dev->gnd_smdd_hold_buf) {
2101                 rrc = kgnilnd_deregister_smdd_buf(dev);
2102                 LASSERTF(rrc == GNI_RC_SUCCESS,
2103                         "bad rc from deregistion of sMDD buffer: %d\n", rrc);
2104                 dev->gnd_smdd_hold_buf = NULL;
2105         }
2106
2107         if (dev->gnd_rcv_fma_cqh) {
2108                 rrc = kgnilnd_cq_destroy(dev->gnd_rcv_fma_cqh);
2109                 LASSERTF(rrc == GNI_RC_SUCCESS,
2110                         "bad rc from gni_cq_destroy on rcv_fma_cqh: %d\n", rrc);
2111                 dev->gnd_rcv_fma_cqh = NULL;
2112         }
2113
2114         if (dev->gnd_snd_rdma_cqh) {
2115                 rrc = kgnilnd_cq_destroy(dev->gnd_snd_rdma_cqh);
2116                 LASSERTF(rrc == GNI_RC_SUCCESS,
2117                         "bad rc from gni_cq_destroy on send_rdma_cqh: %d\n", rrc);
2118                 dev->gnd_snd_rdma_cqh = NULL;
2119         }
2120
2121         if (dev->gnd_snd_fma_cqh) {
2122                 rrc = kgnilnd_cq_destroy(dev->gnd_snd_fma_cqh);
2123                 LASSERTF(rrc == GNI_RC_SUCCESS,
2124                         "bad rc from gni_cq_destroy on snd_fma_cqh: %d\n", rrc);
2125                 dev->gnd_snd_fma_cqh = NULL;
2126         }
2127
2128         if (dev->gnd_err_handle) {
2129                 rrc = kgnilnd_release_errors(dev->gnd_err_handle);
2130                 LASSERTF(rrc == GNI_RC_SUCCESS,
2131                         "bad rc from gni_release_errors: %d\n", rrc);
2132                 dev->gnd_err_handle = NULL;
2133         }
2134
2135         if (dev->gnd_domain) {
2136                 rrc = kgnilnd_cdm_destroy(dev->gnd_domain);
2137                 LASSERTF(rrc == GNI_RC_SUCCESS,
2138                         "bad rc from gni_cdm_destroy: %d\n", rrc);
2139                 dev->gnd_domain = NULL;
2140         }
2141
2142         EXIT;
2143 }
2144
2145 int kgnilnd_base_startup(void)
2146 {
2147         long long            pkmem = libcfs_kmem_read();
2148         int                  rc;
2149         int                  i;
2150         kgn_device_t        *dev;
2151         struct task_struct  *thrd;
2152
2153 #if defined(CONFIG_CRAY_XT) && !defined(CONFIG_CRAY_COMPUTE)
2154         /* limit how much memory can be allocated for fma blocks in
2155          * instances where many nodes need to reconnects at the same time */
2156         struct sysinfo si;
2157         si_meminfo(&si);
2158         kgnilnd_data.free_pages_limit = si.totalram/4;
2159 #endif
2160
2161         ENTRY;
2162
2163         LASSERTF(kgnilnd_data.kgn_init == GNILND_INIT_NOTHING,
2164                 "init %d\n", kgnilnd_data.kgn_init);
2165
2166         /* zero pointers, flags etc */
2167         memset(&kgnilnd_data, 0, sizeof(kgnilnd_data));
2168         kgnilnd_check_kgni_version();
2169
2170         /* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and
2171          * a unique (for all time) connstamp so we can uniquely identify
2172          * the sender.  The connstamp is an incrementing counter
2173          * initialised with seconds + microseconds at startup time.  So we
2174          * rely on NOT creating connections more frequently on average than
2175          * 1MHz to ensure we don't use old connstamps when we reboot. */
2176         kgnilnd_data.kgn_connstamp =
2177                  kgnilnd_data.kgn_peerstamp =
2178                         ktime_get_seconds();
2179
2180         init_rwsem(&kgnilnd_data.kgn_net_rw_sem);
2181
2182         for (i = 0; i < GNILND_MAXDEVS; i++) {
2183                 kgn_device_t  *dev = &kgnilnd_data.kgn_devices[i];
2184
2185                 dev->gnd_id = i;
2186                 INIT_LIST_HEAD(&dev->gnd_ready_conns);
2187                 INIT_LIST_HEAD(&dev->gnd_delay_conns);
2188                 INIT_LIST_HEAD(&dev->gnd_map_tx);
2189                 INIT_LIST_HEAD(&dev->gnd_fma_buffs);
2190                 mutex_init(&dev->gnd_cq_mutex);
2191                 mutex_init(&dev->gnd_fmablk_mutex);
2192                 spin_lock_init(&dev->gnd_fmablk_lock);
2193                 init_waitqueue_head(&dev->gnd_waitq);
2194                 init_waitqueue_head(&dev->gnd_dgram_waitq);
2195                 init_waitqueue_head(&dev->gnd_dgping_waitq);
2196                 spin_lock_init(&dev->gnd_lock);
2197                 INIT_LIST_HEAD(&dev->gnd_map_list);
2198                 spin_lock_init(&dev->gnd_map_lock);
2199                 atomic_set(&dev->gnd_nfmablk, 0);
2200                 atomic_set(&dev->gnd_fmablk_vers, 1);
2201                 atomic_set(&dev->gnd_neps, 0);
2202                 atomic_set(&dev->gnd_canceled_dgrams, 0);
2203                 INIT_LIST_HEAD(&dev->gnd_connd_peers);
2204                 spin_lock_init(&dev->gnd_connd_lock);
2205                 spin_lock_init(&dev->gnd_dgram_lock);
2206                 spin_lock_init(&dev->gnd_rdmaq_lock);
2207                 INIT_LIST_HEAD(&dev->gnd_rdmaq);
2208                 init_rwsem(&dev->gnd_conn_sem);
2209
2210                 /* alloc & setup nid based dgram table */
2211                 CFS_ALLOC_PTR_ARRAY(dev->gnd_dgrams,
2212                                     *kgnilnd_tunables.kgn_peer_hash_size);
2213
2214                 if (dev->gnd_dgrams == NULL)
2215                         GOTO(failed, rc = -ENOMEM);
2216
2217                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2218                         INIT_LIST_HEAD(&dev->gnd_dgrams[i]);
2219                 }
2220                 atomic_set(&dev->gnd_ndgrams, 0);
2221                 atomic_set(&dev->gnd_nwcdgrams, 0);
2222                 /* setup timer for RDMAQ processing */
2223                 cfs_timer_setup(&dev->gnd_rdmaq_timer,
2224                                 kgnilnd_schedule_device_timer,
2225                                 (unsigned long)dev, 0);
2226
2227                 /* setup timer for mapping processing */
2228                 cfs_timer_setup(&dev->gnd_map_timer,
2229                                 kgnilnd_schedule_device_timer,
2230                                 (unsigned long)dev, 0);
2231
2232         }
2233
2234         /* CQID 0 isn't allowed, set to MAX_MSG_ID - 1 to check for conflicts early */
2235         kgnilnd_data.kgn_next_cqid = GNILND_MAX_MSG_ID - 1;
2236         kgnilnd_data.kgn_new_min_timeout = *kgnilnd_tunables.kgn_timeout;
2237         init_waitqueue_head(&kgnilnd_data.kgn_reaper_waitq);
2238         init_waitqueue_head(&kgnilnd_data.kgn_ruhroh_waitq);
2239         spin_lock_init(&kgnilnd_data.kgn_reaper_lock);
2240
2241         mutex_init(&kgnilnd_data.kgn_quiesce_mutex);
2242         atomic_set(&kgnilnd_data.kgn_nquiesce, 0);
2243         atomic_set(&kgnilnd_data.kgn_npending_conns, 0);
2244         atomic_set(&kgnilnd_data.kgn_npending_unlink, 0);
2245         atomic_set(&kgnilnd_data.kgn_npending_detach, 0);
2246         atomic_set(&kgnilnd_data.kgn_rev_offset, 0);
2247         atomic_set(&kgnilnd_data.kgn_rev_length, 0);
2248         atomic_set(&kgnilnd_data.kgn_rev_copy_buff, 0);
2249
2250         /* OK to call kgnilnd_api_shutdown() to cleanup now */
2251         kgnilnd_data.kgn_init = GNILND_INIT_DATA;
2252         if (!try_module_get(THIS_MODULE))
2253                 GOTO(failed, rc = -ENOENT);
2254
2255         rwlock_init(&kgnilnd_data.kgn_peer_conn_lock);
2256
2257         CFS_ALLOC_PTR_ARRAY(kgnilnd_data.kgn_peers,
2258                             *kgnilnd_tunables.kgn_peer_hash_size);
2259
2260         if (kgnilnd_data.kgn_peers == NULL)
2261                 GOTO(failed, rc = -ENOMEM);
2262
2263         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2264                 INIT_LIST_HEAD(&kgnilnd_data.kgn_peers[i]);
2265         }
2266
2267         CFS_ALLOC_PTR_ARRAY(kgnilnd_data.kgn_conns,
2268                             *kgnilnd_tunables.kgn_peer_hash_size);
2269
2270         if (kgnilnd_data.kgn_conns == NULL)
2271                 GOTO(failed, rc = -ENOMEM);
2272
2273         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2274                 INIT_LIST_HEAD(&kgnilnd_data.kgn_conns[i]);
2275         }
2276
2277         CFS_ALLOC_PTR_ARRAY(kgnilnd_data.kgn_nets,
2278                             *kgnilnd_tunables.kgn_net_hash_size);
2279
2280         if (kgnilnd_data.kgn_nets == NULL)
2281                 GOTO(failed, rc = -ENOMEM);
2282
2283         for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
2284                 INIT_LIST_HEAD(&kgnilnd_data.kgn_nets[i]);
2285         }
2286
2287         kgnilnd_data.kgn_mbox_cache =
2288                 kmem_cache_create("kgn_mbox_block", GNILND_MBOX_SIZE, 0,
2289                                   SLAB_HWCACHE_ALIGN, NULL);
2290         if (kgnilnd_data.kgn_mbox_cache == NULL) {
2291                 CERROR("Can't create slab for physical mbox blocks\n");
2292                 GOTO(failed, rc = -ENOMEM);
2293         }
2294
2295         kgnilnd_data.kgn_rx_cache =
2296                 kmem_cache_create("kgn_rx_t", sizeof(kgn_rx_t), 0, 0, NULL);
2297         if (kgnilnd_data.kgn_rx_cache == NULL) {
2298                 CERROR("Can't create slab for kgn_rx_t descriptors\n");
2299                 GOTO(failed, rc = -ENOMEM);
2300         }
2301
2302         kgnilnd_data.kgn_tx_cache =
2303                 kmem_cache_create("kgn_tx_t", sizeof(kgn_tx_t), 0, 0, NULL);
2304         if (kgnilnd_data.kgn_tx_cache == NULL) {
2305                 CERROR("Can't create slab for kgn_tx_t\n");
2306                 GOTO(failed, rc = -ENOMEM);
2307         }
2308
2309         kgnilnd_data.kgn_tx_phys_cache =
2310                 kmem_cache_create("kgn_tx_phys",
2311                                    GNILND_MAX_IOV * sizeof(gni_mem_segment_t),
2312                                    0, 0, NULL);
2313         if (kgnilnd_data.kgn_tx_phys_cache == NULL) {
2314                 CERROR("Can't create slab for kgn_tx_phys\n");
2315                 GOTO(failed, rc = -ENOMEM);
2316         }
2317
2318         kgnilnd_data.kgn_dgram_cache =
2319                 kmem_cache_create("kgn_dgram_t", sizeof(kgn_dgram_t), 0, 0, NULL);
2320         if (kgnilnd_data.kgn_dgram_cache == NULL) {
2321                 CERROR("Can't create slab for outgoing datagrams\n");
2322                 GOTO(failed, rc = -ENOMEM);
2323         }
2324
2325         /* allocate a MAX_IOV array of page pointers for each cpu */
2326         kgnilnd_data.kgn_cksum_map_pages = kmalloc(num_possible_cpus() * sizeof (struct page *),
2327                                                    GFP_KERNEL);
2328         if (kgnilnd_data.kgn_cksum_map_pages == NULL) {
2329                 CERROR("Can't allocate vmap cksum pages\n");
2330                 GOTO(failed, rc = -ENOMEM);
2331         }
2332         kgnilnd_data.kgn_cksum_npages = num_possible_cpus();
2333         memset(kgnilnd_data.kgn_cksum_map_pages, 0,
2334                 kgnilnd_data.kgn_cksum_npages * sizeof (struct page *));
2335
2336         for (i = 0; i < kgnilnd_data.kgn_cksum_npages; i++) {
2337                 kgnilnd_data.kgn_cksum_map_pages[i] = kmalloc(GNILND_MAX_IOV * sizeof (struct page *),
2338                                                               GFP_KERNEL);
2339                 if (kgnilnd_data.kgn_cksum_map_pages[i] == NULL) {
2340                         CERROR("Can't allocate vmap cksum pages for cpu %d\n", i);
2341                         GOTO(failed, rc = -ENOMEM);
2342                 }
2343         }
2344
2345         LASSERT(kgnilnd_data.kgn_ndevs == 0);
2346
2347         /* Use all available GNI devices */
2348         for (i = 0; i < GNILND_MAXDEVS; i++) {
2349                 dev = &kgnilnd_data.kgn_devices[kgnilnd_data.kgn_ndevs];
2350
2351                 rc = kgnilnd_dev_init(dev);
2352                 if (rc == 0) {
2353                         /* Increment here so base_shutdown cleans it up */
2354                         kgnilnd_data.kgn_ndevs++;
2355
2356                         rc = kgnilnd_allocate_phys_fmablk(dev);
2357                         if (rc)
2358                                 GOTO(failed, rc);
2359                 }
2360         }
2361
2362         if (kgnilnd_data.kgn_ndevs == 0) {
2363                 CERROR("Can't initialise any GNI devices\n");
2364                 GOTO(failed, rc = -ENODEV);
2365         }
2366
2367         rc = kgnilnd_thread_start(kgnilnd_reaper, NULL, "kgnilnd_rpr", 0);
2368         if (rc != 0) {
2369                 CERROR("Can't spawn gnilnd reaper: %d\n", rc);
2370                 GOTO(failed, rc);
2371         }
2372
2373         rc = kgnilnd_start_rca_thread();
2374         if (rc != 0) {
2375                 CERROR("Can't spawn gnilnd rca: %d\n", rc);
2376                 GOTO(failed, rc);
2377         }
2378
2379         /*
2380          * Start ruhroh thread.  We can't use kgnilnd_thread_start() because
2381          * we don't want this thread included in kgnilnd_data.kgn_nthreads
2382          * count.  This thread controls quiesce, so it mustn't
2383          * quiesce itself.
2384          */
2385         thrd = kthread_run(kgnilnd_ruhroh_thread, NULL, "%s_%02d", "kgnilnd_rr", 0);
2386         if (IS_ERR(thrd)) {
2387                 rc = PTR_ERR(thrd);
2388                 CERROR("Can't spawn gnilnd ruhroh thread: %d\n", rc);
2389                 GOTO(failed, rc);
2390         }
2391
2392         /* threads will load balance across devs as they are available */
2393         if (*kgnilnd_tunables.kgn_thread_affinity) {
2394                 rc = kgnilnd_start_sd_threads();
2395                 if (rc != 0)
2396                         GOTO(failed, rc);
2397         } else {
2398                 for (i = 0; i < *kgnilnd_tunables.kgn_sched_threads; i++) {
2399                         rc = kgnilnd_thread_start(kgnilnd_scheduler,
2400                                                   (void *)((long)i),
2401                                                   "kgnilnd_sd", i);
2402                         if (rc != 0) {
2403                                 CERROR("Can't spawn gnilnd scheduler[%d]: %d\n",
2404                                        i, rc);
2405                                 GOTO(failed, rc);
2406                         }
2407                 }
2408         }
2409
2410         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2411                 dev = &kgnilnd_data.kgn_devices[i];
2412                 rc = kgnilnd_thread_start(kgnilnd_dgram_mover, dev,
2413                                           "kgnilnd_dg", dev->gnd_id);
2414                 if (rc != 0) {
2415                         CERROR("Can't spawn gnilnd dgram_mover[%d]: %d\n",
2416                                dev->gnd_id, rc);
2417                         GOTO(failed, rc);
2418                 }
2419
2420                 rc = kgnilnd_thread_start(kgnilnd_dgram_waitq, dev,
2421                                           "kgnilnd_dgn", dev->gnd_id);
2422                 if (rc != 0) {
2423                         CERROR("Can't spawn gnilnd dgram_waitq[%d]: %d\n",
2424                                 dev->gnd_id, rc);
2425                         GOTO(failed, rc);
2426                 }
2427
2428                 rc = kgnilnd_setup_wildcard_dgram(dev);
2429
2430                 if (rc != 0) {
2431                         CERROR("Can't create wildcard dgrams[%d]: %d\n",
2432                                 dev->gnd_id, rc);
2433                         GOTO(failed, rc);
2434                 }
2435         }
2436
2437         /* flag everything initialised */
2438         kgnilnd_data.kgn_init = GNILND_INIT_ALL;
2439         /*****************************************************/
2440
2441         CDEBUG(D_MALLOC, "initial kmem %lld\n", pkmem);
2442         RETURN(0);
2443
2444 failed:
2445         kgnilnd_base_shutdown();
2446         kgnilnd_data.kgn_init = GNILND_INIT_NOTHING;
2447         RETURN(rc);
2448 }
2449
2450 void
2451 kgnilnd_base_shutdown(void)
2452 {
2453         int                     i, j;
2454         ENTRY;
2455
2456         while (CFS_FAIL_TIMEOUT(CFS_FAIL_GNI_PAUSE_SHUTDOWN, 1)) {};
2457
2458         kgnilnd_data.kgn_wc_kill = 1;
2459
2460         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2461                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2462                 kgnilnd_cancel_wc_dgrams(dev);
2463                 kgnilnd_cancel_dgrams(dev);
2464                 kgnilnd_del_conn_or_peer(NULL, LNET_NID_ANY, GNILND_DEL_PEER, -ESHUTDOWN);
2465                 kgnilnd_wait_for_canceled_dgrams(dev);
2466         }
2467
2468         /* We need to verify there are no conns left before we let the threads
2469          * shut down otherwise we could clean up the peers but still have
2470          * some outstanding conns due to orphaned datagram conns that are
2471          * being cleaned up.
2472          */
2473         i = 2;
2474         while (atomic_read(&kgnilnd_data.kgn_nconns) != 0) {
2475                 i++;
2476
2477                 for(j = 0; j < kgnilnd_data.kgn_ndevs; ++j) {
2478                         kgn_device_t *dev = &kgnilnd_data.kgn_devices[j];
2479                         kgnilnd_schedule_device(dev);
2480                 }
2481
2482                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2483                         "Waiting for conns to be cleaned up %d\n",atomic_read(&kgnilnd_data.kgn_nconns));
2484                 schedule_timeout_uninterruptible(cfs_time_seconds(1));
2485         }
2486         /* Peer state all cleaned up BEFORE setting shutdown, so threads don't
2487          * have to worry about shutdown races.  NB connections may be created
2488          * while there are still active connds, but these will be temporary
2489          * since peer creation always fails after the listener has started to
2490          * shut down.
2491          * all peers should have been cleared out on the nets */
2492         LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
2493                 "peers left %d\n", atomic_read(&kgnilnd_data.kgn_npeers));
2494
2495         /* Wait for the ruhroh thread to shut down. */
2496         kgnilnd_data.kgn_ruhroh_shutdown = 1;
2497         wake_up(&kgnilnd_data.kgn_ruhroh_waitq);
2498         i = 2;
2499         while (kgnilnd_data.kgn_ruhroh_running != 0) {
2500                 i++;
2501                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2502                        "Waiting for ruhroh thread to terminate\n");
2503                 schedule_timeout_uninterruptible(cfs_time_seconds(1));
2504         }
2505
2506        /* Flag threads to terminate */
2507         kgnilnd_data.kgn_shutdown = 1;
2508
2509         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2510                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2511
2512                 /* should clear all the MDDs */
2513                 kgnilnd_unmap_fma_blocks(dev);
2514
2515                 kgnilnd_schedule_device(dev);
2516                 wake_up(&dev->gnd_dgram_waitq);
2517                 wake_up(&dev->gnd_dgping_waitq);
2518                 LASSERT(list_empty(&dev->gnd_connd_peers));
2519         }
2520
2521         spin_lock(&kgnilnd_data.kgn_reaper_lock);
2522         wake_up(&kgnilnd_data.kgn_reaper_waitq);
2523         spin_unlock(&kgnilnd_data.kgn_reaper_lock);
2524
2525         if (atomic_read(&kgnilnd_data.kgn_nthreads))
2526                 kgnilnd_wakeup_rca_thread();
2527
2528         /* Wait for threads to exit */
2529         i = 2;
2530         while (atomic_read(&kgnilnd_data.kgn_nthreads) != 0) {
2531                 i++;
2532                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2533                        "Waiting for %d threads to terminate\n",
2534                        atomic_read(&kgnilnd_data.kgn_nthreads));
2535                 schedule_timeout_uninterruptible(cfs_time_seconds(1));
2536         }
2537
2538         LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
2539                 "peers left %d\n", atomic_read(&kgnilnd_data.kgn_npeers));
2540
2541         if (kgnilnd_data.kgn_peers != NULL) {
2542                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2543                         LASSERT(list_empty(&kgnilnd_data.kgn_peers[i]));
2544
2545                 CFS_FREE_PTR_ARRAY(kgnilnd_data.kgn_peers,
2546                                    *kgnilnd_tunables.kgn_peer_hash_size);
2547         }
2548
2549         down_write(&kgnilnd_data.kgn_net_rw_sem);
2550         if (kgnilnd_data.kgn_nets != NULL) {
2551                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++)
2552                         LASSERT(list_empty(&kgnilnd_data.kgn_nets[i]));
2553
2554                 CFS_FREE_PTR_ARRAY(kgnilnd_data.kgn_nets,
2555                                    *kgnilnd_tunables.kgn_net_hash_size);
2556         }
2557         up_write(&kgnilnd_data.kgn_net_rw_sem);
2558
2559         LASSERTF(atomic_read(&kgnilnd_data.kgn_nconns) == 0,
2560                 "conns left %d\n", atomic_read(&kgnilnd_data.kgn_nconns));
2561
2562         if (kgnilnd_data.kgn_conns != NULL) {
2563                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2564                         LASSERT(list_empty(&kgnilnd_data.kgn_conns[i]));
2565
2566                 CFS_FREE_PTR_ARRAY(kgnilnd_data.kgn_conns,
2567                                    *kgnilnd_tunables.kgn_peer_hash_size);
2568         }
2569
2570         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2571                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2572                 kgnilnd_dev_fini(dev);
2573
2574                 LASSERTF(atomic_read(&dev->gnd_ndgrams) == 0,
2575                         "dgrams left %d\n", atomic_read(&dev->gnd_ndgrams));
2576
2577                 if (dev->gnd_dgrams != NULL) {
2578                         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size;
2579                              i++)
2580                                 LASSERT(list_empty(&dev->gnd_dgrams[i]));
2581
2582                         CFS_FREE_PTR_ARRAY(dev->gnd_dgrams,
2583                                            *kgnilnd_tunables.kgn_peer_hash_size);
2584                 }
2585
2586                 kgnilnd_free_phys_fmablk(dev);
2587         }
2588
2589         if (kgnilnd_data.kgn_mbox_cache != NULL)
2590                 kmem_cache_destroy(kgnilnd_data.kgn_mbox_cache);
2591
2592         if (kgnilnd_data.kgn_rx_cache != NULL)
2593                 kmem_cache_destroy(kgnilnd_data.kgn_rx_cache);
2594
2595         if (kgnilnd_data.kgn_tx_cache != NULL)
2596                 kmem_cache_destroy(kgnilnd_data.kgn_tx_cache);
2597
2598         if (kgnilnd_data.kgn_tx_phys_cache != NULL)
2599                 kmem_cache_destroy(kgnilnd_data.kgn_tx_phys_cache);
2600
2601         if (kgnilnd_data.kgn_dgram_cache != NULL)
2602                 kmem_cache_destroy(kgnilnd_data.kgn_dgram_cache);
2603
2604         if (kgnilnd_data.kgn_cksum_map_pages != NULL) {
2605                 for (i = 0; i < kgnilnd_data.kgn_cksum_npages; i++) {
2606                         if (kgnilnd_data.kgn_cksum_map_pages[i] != NULL) {
2607                                 kfree(kgnilnd_data.kgn_cksum_map_pages[i]);
2608                         }
2609                 }
2610                 kfree(kgnilnd_data.kgn_cksum_map_pages);
2611         }
2612
2613         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %lld\n",
2614                libcfs_kmem_read());
2615
2616         kgnilnd_data.kgn_init = GNILND_INIT_NOTHING;
2617         module_put(THIS_MODULE);
2618
2619         EXIT;
2620 }
2621
2622 int
2623 kgnilnd_startup(struct lnet_ni *ni)
2624 {
2625         int               rc, devno;
2626         kgn_net_t        *net;
2627         ENTRY;
2628
2629         LASSERTF(ni->ni_net->net_lnd == &the_kgnilnd,
2630                 "bad LND 0x%p != the_kgnilnd @ 0x%p\n",
2631                 ni->ni_net->net_lnd, &the_kgnilnd);
2632
2633         if (kgnilnd_data.kgn_init == GNILND_INIT_NOTHING) {
2634                 rc = kgnilnd_base_startup();
2635                 if (rc != 0)
2636                         RETURN(rc);
2637         }
2638
2639         /* Serialize with shutdown. */
2640         mutex_lock(&kgnilnd_data.kgn_quiesce_mutex);
2641
2642         LIBCFS_ALLOC(net, sizeof(*net));
2643         if (net == NULL) {
2644                 CERROR("could not allocate net for new interface instance\n");
2645                 /* no need to cleanup the CDM... */
2646                 GOTO(failed, rc = -ENOMEM);
2647         }
2648         INIT_LIST_HEAD(&net->gnn_list);
2649         ni->ni_data = net;
2650         net->gnn_ni = ni;
2651
2652         kgnilnd_tunables_setup(ni);
2653
2654         if (!ni->ni_interface) {
2655                 rc = lnet_ni_add_interface(ni, "ipogif0");
2656                 if (rc < 0)
2657                         CWARN("gnilnd failed to allocate ni_interface\n");
2658         }
2659
2660         if (*kgnilnd_tunables.kgn_peer_health) {
2661                 int     fudge;
2662                 int     timeout;
2663                 /* give this a bit of leeway - we don't have a hard timeout
2664                  * as we only check timeouts periodically - see comment in kgnilnd_reaper */
2665                 fudge = (GNILND_TO2KA(*kgnilnd_tunables.kgn_timeout) / GNILND_REAPER_NCHECKS);
2666                 timeout = *kgnilnd_tunables.kgn_timeout + fudge;
2667
2668                 if (*kgnilnd_tunables.kgn_peer_timeout >= timeout) {
2669                         ni->ni_net->net_tunables.lct_peer_timeout =
2670                                  *kgnilnd_tunables.kgn_peer_timeout;
2671                 } else if (*kgnilnd_tunables.kgn_peer_timeout > -1) {
2672                         LCONSOLE_ERROR("Peer_timeout is set to %d but needs to be >= %d\n",
2673                                         *kgnilnd_tunables.kgn_peer_timeout,
2674                                         timeout);
2675                         ni->ni_data = NULL;
2676                         LIBCFS_FREE(net, sizeof(*net));
2677                         GOTO(failed, rc = -EINVAL);
2678                 } else
2679                         ni->ni_net->net_tunables.lct_peer_timeout = timeout;
2680
2681                 LCONSOLE_INFO("Enabling LNet peer health for gnilnd, timeout %ds\n",
2682                               ni->ni_net->net_tunables.lct_peer_timeout);
2683         }
2684
2685         atomic_set(&net->gnn_refcount, 1);
2686
2687         /* if we have multiple devices, spread the nets around */
2688         net->gnn_netnum = LNET_NETNUM(LNET_NID_NET(&ni->ni_nid));
2689
2690         devno = LNET_NID_NET(&ni->ni_nid) % GNILND_MAXDEVS;
2691         net->gnn_dev = &kgnilnd_data.kgn_devices[devno];
2692
2693         /* allocate a 'dummy' cdm for datagram use. We can only have a single
2694          * datagram between a nid:inst_id and nid2:inst_id. The fake cdm
2695          * give us additional inst_id to use, allowing the datagrams to flow
2696          * like rivers of honey and beer */
2697
2698         /* the instance id for the cdm is the NETNUM offset by MAXDEVS -
2699          * ensuring we'll have a unique id */
2700
2701         ni->ni_nid.nid_addr[0] =
2702                 cpu_to_be32(LNET_NIDADDR(net->gnn_dev->gnd_nid));
2703         CDEBUG(D_NET, "adding net %p nid=%s on dev %d \n",
2704                 net, libcfs_nidstr(&ni->ni_nid), net->gnn_dev->gnd_id);
2705         /* until the gnn_list is set, we need to cleanup ourselves as
2706          * kgnilnd_shutdown is just gonna get confused */
2707
2708         down_write(&kgnilnd_data.kgn_net_rw_sem);
2709         list_add_tail(&net->gnn_list, kgnilnd_netnum2netlist(net->gnn_netnum));
2710         up_write(&kgnilnd_data.kgn_net_rw_sem);
2711
2712         /* we need a separate thread to call probe_wait_by_id until
2713          * we get a function callback notifier from kgni */
2714         mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2715         RETURN(0);
2716  failed:
2717         mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2718         kgnilnd_shutdown(ni);
2719         RETURN(rc);
2720 }
2721
2722 void
2723 kgnilnd_shutdown(struct lnet_ni *ni)
2724 {
2725         kgn_net_t     *net = ni->ni_data;
2726         int           i;
2727         int           rc;
2728         ENTRY;
2729
2730         CFS_RACE(CFS_FAIL_GNI_SR_DOWN_RACE);
2731
2732         LASSERTF(kgnilnd_data.kgn_init == GNILND_INIT_ALL,
2733                 "init %d\n", kgnilnd_data.kgn_init);
2734
2735         /* Serialize with startup. */
2736         mutex_lock(&kgnilnd_data.kgn_quiesce_mutex);
2737         CDEBUG(D_MALLOC, "before NAL cleanup: kmem %lld\n",
2738                libcfs_kmem_read());
2739
2740         if (net == NULL) {
2741                 CERROR("got NULL net for ni %p\n", ni);
2742                 GOTO(out, rc = -EINVAL);
2743         }
2744
2745         LASSERTF(ni == net->gnn_ni,
2746                 "ni %p gnn_ni %p\n", net, net->gnn_ni);
2747
2748         ni->ni_data = NULL;
2749
2750         LASSERT(!net->gnn_shutdown);
2751         LASSERTF(atomic_read(&net->gnn_refcount) != 0,
2752                 "net %p refcount %d\n",
2753                  net, atomic_read(&net->gnn_refcount));
2754
2755         if (!list_empty(&net->gnn_list)) {
2756                 /* serialize with peer creation */
2757                 down_write(&kgnilnd_data.kgn_net_rw_sem);
2758                 net->gnn_shutdown = 1;
2759                 up_write(&kgnilnd_data.kgn_net_rw_sem);
2760
2761                 kgnilnd_cancel_net_dgrams(net);
2762
2763                 kgnilnd_del_conn_or_peer(net, LNET_NID_ANY, GNILND_DEL_PEER, -ESHUTDOWN);
2764
2765                 /* if we are quiesced, need to wake up - we need those threads
2766                  * alive to release peers, etc */
2767                 if (GNILND_IS_QUIESCED) {
2768                         set_mb(kgnilnd_data.kgn_quiesce_trigger, GNILND_QUIESCE_IDLE);
2769                         kgnilnd_quiesce_wait("shutdown");
2770                 }
2771
2772                 kgnilnd_wait_for_canceled_dgrams(net->gnn_dev);
2773
2774                 /* We wait until the nets ref's are 1, we will release final ref which is ours
2775                  * this allows us to make sure everything else is done before we free the
2776                  * net.
2777                  */
2778                 i = 4;
2779                 while (atomic_read(&net->gnn_refcount) != 1) {
2780                         i++;
2781                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2782                                 "Waiting for %d references to clear on net %d\n",
2783                                 atomic_read(&net->gnn_refcount),
2784                                 net->gnn_netnum);
2785                         schedule_timeout_uninterruptible(cfs_time_seconds(1));
2786                 }
2787
2788                 /* release ref from kgnilnd_startup */
2789                 kgnilnd_net_decref(net);
2790                 /* serialize with reaper and conn_task looping */
2791                 down_write(&kgnilnd_data.kgn_net_rw_sem);
2792                 list_del_init(&net->gnn_list);
2793                 up_write(&kgnilnd_data.kgn_net_rw_sem);
2794
2795         }
2796
2797         /* not locking, this can't race with writers */
2798         LASSERTF(atomic_read(&net->gnn_refcount) == 0,
2799                 "net %p refcount %d\n",
2800                  net, atomic_read(&net->gnn_refcount));
2801         LIBCFS_FREE(net, sizeof(*net));
2802
2803 out:
2804         down_read(&kgnilnd_data.kgn_net_rw_sem);
2805         for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
2806                 if (!list_empty(&kgnilnd_data.kgn_nets[i])) {
2807                         up_read(&kgnilnd_data.kgn_net_rw_sem);
2808                         break;
2809                 }
2810
2811                 if (i == *kgnilnd_tunables.kgn_net_hash_size - 1) {
2812                         up_read(&kgnilnd_data.kgn_net_rw_sem);
2813                         kgnilnd_base_shutdown();
2814                 }
2815         }
2816         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %lld\n",
2817                libcfs_kmem_read());
2818
2819         mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2820         EXIT;
2821 }
2822
2823 static void __exit kgnilnd_exit(void)
2824 {
2825         lnet_unregister_lnd(&the_kgnilnd);
2826         kgnilnd_proc_fini();
2827         kgnilnd_remove_sysctl();
2828 }
2829
2830 static int __init kgnilnd_init(void)
2831 {
2832         int    rc;
2833
2834         rc = kgnilnd_tunables_init();
2835         if (rc != 0)
2836                 return rc;
2837
2838         LCONSOLE_INFO("Lustre: kgnilnd build version: "LUSTRE_VERSION_STRING"\n");
2839
2840         kgnilnd_insert_sysctl();
2841         kgnilnd_proc_init();
2842
2843         lnet_register_lnd(&the_kgnilnd);
2844
2845         return 0;
2846 }
2847
2848 MODULE_AUTHOR("Cray, Inc. <nic@cray.com>");
2849 MODULE_DESCRIPTION("Gemini LNet Network Driver");
2850 MODULE_VERSION(LUSTRE_VERSION_STRING);
2851 MODULE_LICENSE("GPL");
2852
2853 module_init(kgnilnd_init);
2854 module_exit(kgnilnd_exit);