Whamcloud - gitweb
LU-6245 libcfs: remove types abstraction from libcfs/LNet code
[fs/lustre-release.git] / lnet / klnds / gnilnd / gnilnd.c
1 /*
2  * Copyright (C) 2012 Cray, Inc.
3  *
4  * Copyright (c) 2013, 2014, Intel Corporation.
5  *
6  *   Author: Nic Henke <nic@cray.com>
7  *   Author: James Shimek <jshimek@cray.com>
8  *
9  *   This file is part of Lustre, http://www.lustre.org.
10  *
11  *   Lustre is free software; you can redistribute it and/or
12  *   modify it under the terms of version 2 of the GNU General Public
13  *   License as published by the Free Software Foundation.
14  *
15  *   Lustre is distributed in the hope that it will be useful,
16  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  *   GNU General Public License for more details.
19  *
20  *   You should have received a copy of the GNU General Public License
21  *   along with Lustre; if not, write to the Free Software
22  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23  *
24  */
25 #include "gnilnd.h"
26
27 /* Primary entry points from LNET.  There are no guarantees against reentrance. */
28 lnd_t the_kgnilnd = {
29 #ifdef CONFIG_CRAY_XT
30         .lnd_type       = GNILND,
31 #else
32         .lnd_type       = GNIIPLND,
33 #endif
34         .lnd_startup    = kgnilnd_startup,
35         .lnd_shutdown   = kgnilnd_shutdown,
36         .lnd_ctl        = kgnilnd_ctl,
37         .lnd_send       = kgnilnd_send,
38         .lnd_recv       = kgnilnd_recv,
39         .lnd_eager_recv = kgnilnd_eager_recv,
40         .lnd_query      = kgnilnd_query,
41 };
42
43 kgn_data_t      kgnilnd_data;
44
45 int
46 kgnilnd_thread_start(int(*fn)(void *arg), void *arg, char *name, int id)
47 {
48         struct task_struct *thrd;
49
50         thrd = kthread_run(fn, arg, "%s_%02d", name, id);
51         if (IS_ERR(thrd))
52                 return PTR_ERR(thrd);
53
54         atomic_inc(&kgnilnd_data.kgn_nthreads);
55         return 0;
56 }
57
58 /* bind scheduler threads to cpus */
59 int
60 kgnilnd_start_sd_threads(void)
61 {
62         int cpu;
63         int i = 0;
64         struct task_struct *task;
65
66         for_each_online_cpu(cpu) {
67                 /* don't bind to cpu 0 - all interrupts are processed here */
68                 if (cpu == 0)
69                         continue;
70
71                 task = kthread_create(kgnilnd_scheduler, (void *)((long)i),
72                                       "%s_%02d", "kgnilnd_sd", i);
73                 if (!IS_ERR(task)) {
74                         kthread_bind(task, cpu);
75                         wake_up_process(task);
76                 } else {
77                         CERROR("Can't spawn gnilnd scheduler[%d] %ld\n", i,
78                                 PTR_ERR(task));
79                         return PTR_ERR(task);
80                 }
81                 atomic_inc(&kgnilnd_data.kgn_nthreads);
82
83                 if (++i >= *kgnilnd_tunables.kgn_sched_threads) {
84                         break;
85                 }
86         }
87
88         return 0;
89 }
90
91 /* needs write_lock on kgn_peer_conn_lock */
92 int
93 kgnilnd_close_stale_conns_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
94 {
95         kgn_conn_t         *conn;
96         struct list_head   *ctmp, *cnxt;
97         int                 loopback;
98         int                 count = 0;
99
100         loopback = peer->gnp_nid == peer->gnp_net->gnn_ni->ni_nid;
101
102         list_for_each_safe(ctmp, cnxt, &peer->gnp_conns) {
103                 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
104
105                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
106                         continue;
107
108                 if (conn == newconn)
109                         continue;
110
111                 if (conn->gnc_device != newconn->gnc_device)
112                         continue;
113
114                 /* This is a two connection loopback - one talking to the other */
115                 if (loopback &&
116                     newconn->gnc_my_connstamp == conn->gnc_peer_connstamp &&
117                     newconn->gnc_peer_connstamp == conn->gnc_my_connstamp) {
118                         CDEBUG(D_NET, "skipping prune of %p, "
119                                 "loopback and matching stamps"
120                                 " connstamp %llu(%llu)"
121                                 " peerstamp %llu(%llu)\n",
122                                 conn, newconn->gnc_my_connstamp,
123                                 conn->gnc_peer_connstamp,
124                                 newconn->gnc_peer_connstamp,
125                                 conn->gnc_my_connstamp);
126                         continue;
127                 }
128
129                 if (conn->gnc_peerstamp != newconn->gnc_peerstamp) {
130                         LASSERTF(conn->gnc_peerstamp < newconn->gnc_peerstamp,
131                                 "conn 0x%p peerstamp %llu >= "
132                                 "newconn 0x%p peerstamp %llu\n",
133                                 conn, conn->gnc_peerstamp,
134                                 newconn, newconn->gnc_peerstamp);
135
136                         CDEBUG(D_NET, "Closing stale conn nid: %s "
137                                " peerstamp:%#llx(%#llx)\n",
138                                libcfs_nid2str(peer->gnp_nid),
139                                conn->gnc_peerstamp, newconn->gnc_peerstamp);
140                 } else {
141
142                         LASSERTF(conn->gnc_peer_connstamp < newconn->gnc_peer_connstamp,
143                                 "conn 0x%p peer_connstamp %llu >= "
144                                 "newconn 0x%p peer_connstamp %llu\n",
145                                 conn, conn->gnc_peer_connstamp,
146                                 newconn, newconn->gnc_peer_connstamp);
147
148                         CDEBUG(D_NET, "Closing stale conn nid: %s"
149                                " connstamp:%llu(%llu)\n",
150                                libcfs_nid2str(peer->gnp_nid),
151                                conn->gnc_peer_connstamp, newconn->gnc_peer_connstamp);
152                 }
153
154                 count++;
155                 kgnilnd_close_conn_locked(conn, -ESTALE);
156         }
157
158         if (count != 0) {
159                 CWARN("Closed %d stale conns to %s\n", count, libcfs_nid2str(peer->gnp_nid));
160         }
161
162         RETURN(count);
163 }
164
165 int
166 kgnilnd_conn_isdup_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
167 {
168         kgn_conn_t       *conn;
169         struct list_head *tmp;
170         int               loopback;
171         ENTRY;
172
173         loopback = peer->gnp_nid == peer->gnp_net->gnn_ni->ni_nid;
174
175         list_for_each(tmp, &peer->gnp_conns) {
176                 conn = list_entry(tmp, kgn_conn_t, gnc_list);
177                 CDEBUG(D_NET, "checking conn 0x%p for peer %s"
178                         " lo %d new %llu existing %llu"
179                         " new peer %llu existing peer %llu"
180                         " new dev %p existing dev %p\n",
181                         conn, libcfs_nid2str(peer->gnp_nid),
182                         loopback,
183                         newconn->gnc_peerstamp, conn->gnc_peerstamp,
184                         newconn->gnc_peer_connstamp, conn->gnc_peer_connstamp,
185                         newconn->gnc_device, conn->gnc_device);
186
187                 /* conn is in the process of closing */
188                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
189                         continue;
190
191                 /* 'newconn' is from an earlier version of 'peer'!!! */
192                 if (newconn->gnc_peerstamp < conn->gnc_peerstamp)
193                         RETURN(1);
194
195                 /* 'conn' is from an earlier version of 'peer': it will be
196                  * removed when we cull stale conns later on... */
197                 if (newconn->gnc_peerstamp > conn->gnc_peerstamp)
198                         continue;
199
200                 /* Different devices are OK */
201                 if (conn->gnc_device != newconn->gnc_device)
202                         continue;
203
204                 /* It's me connecting to myself */
205                 if (loopback &&
206                     newconn->gnc_my_connstamp == conn->gnc_peer_connstamp &&
207                     newconn->gnc_peer_connstamp == conn->gnc_my_connstamp)
208                         continue;
209
210                 /* 'newconn' is an earlier connection from 'peer'!!! */
211                 if (newconn->gnc_peer_connstamp < conn->gnc_peer_connstamp)
212                         RETURN(2);
213
214                 /* 'conn' is an earlier connection from 'peer': it will be
215                  * removed when we cull stale conns later on... */
216                 if (newconn->gnc_peer_connstamp > conn->gnc_peer_connstamp)
217                         continue;
218
219                 /* 'newconn' has the SAME connection stamp; 'peer' isn't
220                  * playing the game... */
221                 RETURN(3);
222         }
223
224         RETURN(0);
225 }
226
227 int
228 kgnilnd_create_conn(kgn_conn_t **connp, kgn_device_t *dev)
229 {
230         kgn_conn_t      *conn;
231         gni_return_t    rrc;
232         int             rc = 0;
233
234         LASSERT (!in_interrupt());
235         atomic_inc(&kgnilnd_data.kgn_nconns);
236
237         /* divide by 2 to allow for complete reset and immediate reconnect */
238         if (atomic_read(&kgnilnd_data.kgn_nconns) >= GNILND_MAX_CQID/2) {
239                 CERROR("Too many conn are live: %d > %d\n",
240                         atomic_read(&kgnilnd_data.kgn_nconns), GNILND_MAX_CQID/2);
241                 atomic_dec(&kgnilnd_data.kgn_nconns);
242                 return -E2BIG;
243         }
244
245         LIBCFS_ALLOC(conn, sizeof(*conn));
246         if (conn == NULL) {
247                 atomic_dec(&kgnilnd_data.kgn_nconns);
248                 return -ENOMEM;
249         }
250
251         conn->gnc_tx_ref_table =
252                 kgnilnd_vzalloc(GNILND_MAX_MSG_ID * sizeof(void *));
253         if (conn->gnc_tx_ref_table == NULL) {
254                 CERROR("Can't allocate conn tx_ref_table\n");
255                 GOTO(failed, rc = -ENOMEM);
256         }
257
258         mutex_init(&conn->gnc_smsg_mutex);
259         mutex_init(&conn->gnc_rdma_mutex);
260         atomic_set(&conn->gnc_refcount, 1);
261         atomic_set(&conn->gnc_reaper_noop, 0);
262         atomic_set(&conn->gnc_sched_noop, 0);
263         atomic_set(&conn->gnc_tx_in_use, 0);
264         INIT_LIST_HEAD(&conn->gnc_list);
265         INIT_LIST_HEAD(&conn->gnc_hashlist);
266         INIT_LIST_HEAD(&conn->gnc_schedlist);
267         INIT_LIST_HEAD(&conn->gnc_fmaq);
268         INIT_LIST_HEAD(&conn->gnc_mdd_list);
269         spin_lock_init(&conn->gnc_list_lock);
270         spin_lock_init(&conn->gnc_tx_lock);
271         conn->gnc_magic = GNILND_CONN_MAGIC;
272
273         /* set tx id to nearly the end to make sure we find wrapping
274          * issues soon */
275         conn->gnc_next_tx = (int) GNILND_MAX_MSG_ID - 10;
276
277         /* if this fails, we have conflicts and MAX_TX is too large */
278         CLASSERT(GNILND_MAX_MSG_ID < GNILND_MSGID_CLOSE);
279
280         /* get a new unique CQ id for this conn */
281         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
282         conn->gnc_my_connstamp = kgnilnd_data.kgn_connstamp++;
283         conn->gnc_cqid = kgnilnd_get_cqid_locked();
284         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
285
286         if (conn->gnc_cqid == 0) {
287                 CERROR("Could not allocate unique CQ ID for conn 0x%p\n", conn);
288                 GOTO(failed, rc = -E2BIG);
289         }
290
291         CDEBUG(D_NET, "alloc cqid %u for conn 0x%p\n",
292                 conn->gnc_cqid, conn);
293
294         /* need to be set before gnc_ephandle to allow kgnilnd_destroy_conn_ep to
295          * check context */
296         conn->gnc_device = dev;
297
298         conn->gnc_timeout = MAX(*kgnilnd_tunables.kgn_timeout,
299                                 GNILND_MIN_TIMEOUT);
300         kgnilnd_update_reaper_timeout(conn->gnc_timeout);
301
302         /* this is the ep_handle for doing SMSG & BTE */
303         mutex_lock(&dev->gnd_cq_mutex);
304         rrc = kgnilnd_ep_create(dev->gnd_handle, dev->gnd_snd_fma_cqh,
305                                 &conn->gnc_ephandle);
306         mutex_unlock(&dev->gnd_cq_mutex);
307         if (rrc != GNI_RC_SUCCESS)
308                 GOTO(failed, rc = -ENETDOWN);
309
310         CDEBUG(D_NET, "created conn 0x%p ep_hndl 0x%p\n",
311                conn, conn->gnc_ephandle);
312
313         /* add ref for EP canceling */
314         kgnilnd_conn_addref(conn);
315         atomic_inc(&dev->gnd_neps);
316
317         *connp = conn;
318         return 0;
319
320 failed:
321         atomic_dec(&kgnilnd_data.kgn_nconns);
322         LIBCFS_FREE(conn->gnc_tx_ref_table, GNILND_MAX_MSG_ID * sizeof(void *));
323         LIBCFS_FREE(conn, sizeof(*conn));
324         return rc;
325 }
326
327 /* needs to be called with kgn_peer_conn_lock held (read or write) */
328 kgn_conn_t *
329 kgnilnd_find_conn_locked(kgn_peer_t *peer)
330 {
331         kgn_conn_t      *conn = NULL;
332
333         /* if we are in reset, this conn is going to die soon */
334         if (unlikely(kgnilnd_data.kgn_in_reset)) {
335                 RETURN(NULL);
336         }
337
338         /* just return the first ESTABLISHED connection */
339         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
340                 /* kgnilnd_finish_connect doesn't put connections on the
341                  * peer list until they are actually established */
342                 LASSERTF(conn->gnc_state >= GNILND_CONN_ESTABLISHED,
343                         "found conn %p state %s on peer %p (%s)\n",
344                         conn, kgnilnd_conn_state2str(conn), peer,
345                         libcfs_nid2str(peer->gnp_nid));
346                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
347                         continue;
348
349                 RETURN(conn);
350         }
351         RETURN(NULL);
352 }
353
354 /* needs write_lock on kgn_peer_conn_lock held */
355 kgn_conn_t *
356 kgnilnd_find_or_create_conn_locked(kgn_peer_t *peer) {
357
358         kgn_device_t    *dev = peer->gnp_net->gnn_dev;
359         kgn_conn_t      *conn;
360
361         conn = kgnilnd_find_conn_locked(peer);
362
363         if (conn != NULL) {
364                 return conn;
365         }
366
367         /* if the peer was previously connecting, check if we should
368          * trigger another connection attempt yet. */
369         if (time_before(jiffies, peer->gnp_reconnect_time)) {
370                 return NULL;
371         }
372
373         /* This check prevents us from creating a new connection to a peer while we are
374          * still in the process of closing an existing connection to the peer.
375          */
376         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
377                 if (conn->gnc_ephandle != NULL) {
378                         CDEBUG(D_NET, "Not connecting non-null ephandle found peer 0x%p->%s\n", peer,
379                                 libcfs_nid2str(peer->gnp_nid));
380                         return NULL;
381                 }
382         }
383
384         if (peer->gnp_connecting != GNILND_PEER_IDLE) {
385                 /* if we are not connecting, fire up a new connection */
386                 /* or if we are anything but IDLE DONT start a new connection */
387                return NULL;
388         }
389
390         CDEBUG(D_NET, "starting connect to %s\n",
391                 libcfs_nid2str(peer->gnp_nid));
392         peer->gnp_connecting = GNILND_PEER_CONNECT;
393         kgnilnd_peer_addref(peer); /* extra ref for connd */
394
395         spin_lock(&dev->gnd_connd_lock);
396         list_add_tail(&peer->gnp_connd_list, &dev->gnd_connd_peers);
397         spin_unlock(&dev->gnd_connd_lock);
398
399         kgnilnd_schedule_dgram(dev);
400         CDEBUG(D_NETTRACE, "scheduling new connect\n");
401
402         return NULL;
403 }
404
405 /* Caller is responsible for deciding if/when to call this */
406 void
407 kgnilnd_destroy_conn_ep(kgn_conn_t *conn)
408 {
409         gni_return_t    rrc;
410         gni_ep_handle_t tmp_ep;
411
412         /* only if we actually initialized it,
413          *  then set NULL to tell kgnilnd_destroy_conn to leave it alone */
414
415         tmp_ep = xchg(&conn->gnc_ephandle, NULL);
416         if (tmp_ep != NULL) {
417                 /* we never re-use the EP, so unbind is not needed */
418                 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
419                 rrc = kgnilnd_ep_destroy(tmp_ep);
420
421                 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
422
423                 /* if this fails, it could hork up kgni smsg retransmit and others
424                  * since we could free the SMSG mbox memory, etc. */
425                 LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d conn 0x%p ep 0x%p\n",
426                          rrc, conn, conn->gnc_ephandle);
427
428                 atomic_dec(&conn->gnc_device->gnd_neps);
429
430                 /* clear out count added in kgnilnd_close_conn_locked
431                  * conn will have a peer once it hits finish_connect, where it
432                  * is the first spot we'll mark it ESTABLISHED as well */
433                 if (conn->gnc_peer) {
434                         kgnilnd_admin_decref(conn->gnc_peer->gnp_dirty_eps);
435                 }
436
437                 /* drop ref for EP */
438                 kgnilnd_conn_decref(conn);
439         }
440 }
441
442 void
443 kgnilnd_destroy_conn(kgn_conn_t *conn)
444 {
445         LASSERTF(!in_interrupt() &&
446                 !conn->gnc_scheduled &&
447                 !conn->gnc_in_purgatory &&
448                 conn->gnc_ephandle == NULL &&
449                 list_empty(&conn->gnc_list) &&
450                 list_empty(&conn->gnc_hashlist) &&
451                 list_empty(&conn->gnc_schedlist) &&
452                 list_empty(&conn->gnc_mdd_list) &&
453                 conn->gnc_magic == GNILND_CONN_MAGIC,
454                 "conn 0x%p->%s IRQ %d sched %d purg %d ep 0x%p Mg %d lists %d/%d/%d/%d\n",
455                 conn, conn->gnc_peer ? libcfs_nid2str(conn->gnc_peer->gnp_nid)
456                                      : "<?>",
457                 !!in_interrupt(), conn->gnc_scheduled,
458                 conn->gnc_in_purgatory,
459                 conn->gnc_ephandle,
460                 conn->gnc_magic,
461                 list_empty(&conn->gnc_list),
462                 list_empty(&conn->gnc_hashlist),
463                 list_empty(&conn->gnc_schedlist),
464                 list_empty(&conn->gnc_mdd_list));
465
466         /* Tripping these is especially bad, as it means we have items on the
467          *  lists that didn't keep their refcount on the connection - or
468          *  somebody evil released their own */
469         LASSERTF(list_empty(&conn->gnc_fmaq) &&
470                  atomic_read(&conn->gnc_nlive_fma) == 0 &&
471                  atomic_read(&conn->gnc_nlive_rdma) == 0,
472                  "conn 0x%p fmaq %d@0x%p nfma %d nrdma %d\n",
473                  conn, kgnilnd_count_list(&conn->gnc_fmaq), &conn->gnc_fmaq,
474                  atomic_read(&conn->gnc_nlive_fma), atomic_read(&conn->gnc_nlive_rdma));
475
476         CDEBUG(D_NET, "destroying conn %p ephandle %p error %d\n",
477                 conn, conn->gnc_ephandle, conn->gnc_error);
478
479         /* We are freeing this memory remove the magic value from the connection */
480         conn->gnc_magic = 0;
481
482         /* if there is an FMA blk left here, we'll tear it down */
483         if (conn->gnc_fma_blk) {
484                 if (conn->gnc_peer) {
485                         kgn_mbox_info_t *mbox;
486                         mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
487                         mbox->mbx_prev_nid = conn->gnc_peer->gnp_nid;
488                 }
489                 kgnilnd_release_mbox(conn, 0);
490         }
491
492         if (conn->gnc_peer != NULL)
493                 kgnilnd_peer_decref(conn->gnc_peer);
494
495         if (conn->gnc_tx_ref_table != NULL) {
496                 LIBCFS_FREE(conn->gnc_tx_ref_table,
497                             GNILND_MAX_MSG_ID * sizeof(void *));
498         }
499
500         LIBCFS_FREE(conn, sizeof(*conn));
501         atomic_dec(&kgnilnd_data.kgn_nconns);
502 }
503
504 /* peer_alive and peer_notify done in the style of the o2iblnd */
505 void
506 kgnilnd_peer_alive(kgn_peer_t *peer)
507 {
508         set_mb(peer->gnp_last_alive, jiffies);
509 }
510
511 void
512 kgnilnd_peer_notify(kgn_peer_t *peer, int error, int alive)
513 {
514         int                     tell_lnet = 0;
515         int                     nnets = 0;
516         int                     rc;
517         int                     i, j;
518         kgn_conn_t             *conn;
519         kgn_net_t             **nets;
520         kgn_net_t              *net;
521
522
523         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DONT_NOTIFY))
524                 return;
525
526         /* Tell LNet we are giving ups on this peer - but only
527          * if it isn't already reconnected or trying to reconnect */
528         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
529
530         /* use kgnilnd_find_conn_locked to avoid any conns in the process of being nuked
531          *
532          * don't tell LNet if we are in reset - we assume that everyone will be able to
533          * reconnect just fine
534          */
535         conn = kgnilnd_find_conn_locked(peer);
536
537         CDEBUG(D_NETTRACE, "peer 0x%p->%s ting %d conn 0x%p, rst %d error %d\n",
538                peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting, conn,
539                kgnilnd_data.kgn_in_reset, error);
540
541         if (((peer->gnp_connecting == GNILND_PEER_IDLE) &&
542             (conn == NULL) &&
543             (!kgnilnd_data.kgn_in_reset) &&
544             (!kgnilnd_conn_clean_errno(error))) || alive) {
545                 tell_lnet = 1;
546         }
547
548         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
549
550         if (!tell_lnet) {
551                 /* short circuit if we dont need to notify Lnet */
552                 return;
553         }
554
555         rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
556
557         if (rc) {
558             /* dont do this if this fails since LNET is in shutdown or something else
559              */
560
561                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
562                         list_for_each_entry(net , &kgnilnd_data.kgn_nets[i], gnn_list) {
563                                 /* if gnn_shutdown set for any net shutdown is in progress just return */
564                                 if (net->gnn_shutdown) {
565                                         up_read(&kgnilnd_data.kgn_net_rw_sem);
566                                         return;
567                                 }
568                                 nnets++;
569                         }
570                 }
571
572                 if (nnets == 0) {
573                         /* shutdown in progress most likely */
574                         up_read(&kgnilnd_data.kgn_net_rw_sem);
575                         return;
576                 }
577
578                 LIBCFS_ALLOC(nets, nnets * sizeof(*nets));
579
580                 if (nets == NULL) {
581                         up_read(&kgnilnd_data.kgn_net_rw_sem);
582                         CERROR("Failed to allocate nets[%d]\n", nnets);
583                         return;
584                 }
585
586                 j = 0;
587                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
588                         list_for_each_entry(net, &kgnilnd_data.kgn_nets[i], gnn_list) {
589                                 nets[j] = net;
590                                 kgnilnd_net_addref(net);
591                                 j++;
592                         }
593                 }
594                 up_read(&kgnilnd_data.kgn_net_rw_sem);
595
596                 for (i = 0; i < nnets; i++) {
597                         lnet_nid_t peer_nid;
598
599                         net = nets[i];
600
601                         peer_nid = kgnilnd_lnd2lnetnid(net->gnn_ni->ni_nid,
602                                                                  peer->gnp_nid);
603
604                         CDEBUG(D_NET, "peer 0x%p->%s last_alive %lu (%lus ago)\n",
605                                 peer, libcfs_nid2str(peer_nid), peer->gnp_last_alive,
606                                 cfs_duration_sec(jiffies - peer->gnp_last_alive));
607
608                         lnet_notify(net->gnn_ni, peer_nid, alive,
609                                     peer->gnp_last_alive);
610
611                         kgnilnd_net_decref(net);
612                 }
613
614                 LIBCFS_FREE(nets, nnets * sizeof(*nets));
615         }
616 }
617
618 /* need write_lock on kgn_peer_conn_lock */
619 void
620 kgnilnd_close_conn_locked(kgn_conn_t *conn, int error)
621 {
622         kgn_peer_t        *peer = conn->gnc_peer;
623         ENTRY;
624
625         LASSERT(!in_interrupt());
626
627         /* store error for tx completion */
628         conn->gnc_error = error;
629         peer->gnp_last_errno = error;
630
631         /* use real error from peer if possible */
632         if (error == -ECONNRESET) {
633                 error = conn->gnc_peer_error;
634         }
635
636         /* if we NETERROR, make sure it is rate limited */
637         if (!kgnilnd_conn_clean_errno(error) &&
638             peer->gnp_down == GNILND_RCA_NODE_UP) {
639                 CNETERR("closing conn to %s: error %d\n",
640                        libcfs_nid2str(peer->gnp_nid), error);
641         } else {
642                 CDEBUG(D_NET, "closing conn to %s: error %d\n",
643                        libcfs_nid2str(peer->gnp_nid), error);
644         }
645
646         LASSERTF(conn->gnc_state == GNILND_CONN_ESTABLISHED,
647                 "conn %p to %s with bogus state %s\n", conn,
648                 libcfs_nid2str(conn->gnc_peer->gnp_nid),
649                 kgnilnd_conn_state2str(conn));
650         LASSERT(!list_empty(&conn->gnc_hashlist));
651         LASSERT(!list_empty(&conn->gnc_list));
652
653
654         /* mark peer count here so any place the EP gets destroyed will
655          * open up the peer count so that a new ESTABLISHED conn is then free
656          * to send new messages -- sending before the previous EPs are destroyed
657          * could end up with messages on the network for the old conn _after_
658          * the new conn and break the mbox safety protocol */
659         kgnilnd_admin_addref(conn->gnc_peer->gnp_dirty_eps);
660
661         /* Remove from conn hash table: no new callbacks */
662         list_del_init(&conn->gnc_hashlist);
663         kgnilnd_data.kgn_conn_version++;
664         kgnilnd_conn_decref(conn);
665
666         /* if we are in reset, go right to CLOSED as there is no scheduler
667          * thread to move from CLOSING to CLOSED */
668         if (unlikely(kgnilnd_data.kgn_in_reset)) {
669                 conn->gnc_state = GNILND_CONN_CLOSED;
670         } else {
671                 conn->gnc_state = GNILND_CONN_CLOSING;
672         }
673
674         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_RDMA_CQ_ERROR)) {
675                 msleep_interruptible(MSEC_PER_SEC);
676         }
677
678         /* leave on peer->gnp_conns to make sure we don't let the reaper
679          * or others try to unlink this peer until the conn is fully
680          * processed for closing */
681
682         if (kgnilnd_check_purgatory_conn(conn)) {
683                 kgnilnd_add_purgatory_locked(conn, conn->gnc_peer);
684         }
685
686         /* Reset RX timeout to ensure we wait for an incoming CLOSE
687          * for the full timeout.  If we get a CLOSE we know the
688          * peer has stopped all RDMA.  Otherwise if we wait for
689          * the full timeout we can also be sure all RDMA has stopped. */
690         conn->gnc_last_rx = conn->gnc_last_rx_cq = jiffies;
691         mb();
692
693         /* schedule sending CLOSE - if we are in quiesce, this adds to
694          * gnd_ready_conns and allows us to find it in quiesce processing */
695         kgnilnd_schedule_conn(conn);
696
697         EXIT;
698 }
699
700 void
701 kgnilnd_close_conn(kgn_conn_t *conn, int error)
702 {
703         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
704         /* need to check the state here - this call is racy and we don't
705          * know the state until after the lock is grabbed */
706         if (conn->gnc_state == GNILND_CONN_ESTABLISHED) {
707                 kgnilnd_close_conn_locked(conn, error);
708         }
709         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
710 }
711
712 void
713 kgnilnd_complete_closed_conn(kgn_conn_t *conn)
714 {
715         LIST_HEAD               (sinners);
716         kgn_tx_t               *tx, *txn;
717         int                     nlive = 0;
718         int                     nrdma = 0;
719         int                     nq_rdma = 0;
720         int                     logmsg;
721         ENTRY;
722
723         /* Dump log  on cksum error - wait until complete phase to let
724          * RX of error happen */
725         if (*kgnilnd_tunables.kgn_checksum_dump &&
726             (conn != NULL && conn->gnc_peer_error == -ENOKEY)) {
727                 libcfs_debug_dumplog();
728         }
729
730         /* _CLOSED set in kgnilnd_process_fmaq once we decide to
731          * send the CLOSE or not */
732         LASSERTF(conn->gnc_state == GNILND_CONN_CLOSED,
733                  "conn 0x%p->%s with bad state %s\n",
734                  conn, conn->gnc_peer ?
735                         libcfs_nid2str(conn->gnc_peer->gnp_nid) :
736                         "<?>",
737                  kgnilnd_conn_state2str(conn));
738
739         LASSERT(list_empty(&conn->gnc_hashlist));
740
741         /* we've sent the close, start nuking */
742         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_SCHEDULE_COMPLETE))
743                 kgnilnd_schedule_conn(conn);
744
745         if (conn->gnc_scheduled != GNILND_CONN_PROCESS) {
746                 CDEBUG(D_NETERROR, "Error someone scheduled us after we were "
747                                 "done, Attempting to recover conn 0x%p "
748                                 "scheduled %d function: %s line: %d\n", conn,
749                                 conn->gnc_scheduled, conn->gnc_sched_caller,
750                                 conn->gnc_sched_line);
751                 RETURN_EXIT;
752         }
753
754         /* we don't use lists to track things that we can get out of the
755          * tx_ref table... */
756
757         /* need to hold locks for tx_list_state, sampling it is too racy:
758          * - the lock actually protects tx != NULL, but we can't take the proper
759          *   lock until we check tx_list_state, which would be too late and
760          *   we could have the TX change under us.
761          * gnd_rdmaq_lock and gnd_lock and not used together, so taking both
762          * should be fine */
763         spin_lock(&conn->gnc_device->gnd_rdmaq_lock);
764         spin_lock(&conn->gnc_device->gnd_lock);
765
766         for (nrdma = 0; nrdma < GNILND_MAX_MSG_ID; nrdma++) {
767                 tx = conn->gnc_tx_ref_table[nrdma];
768
769                 if (tx != NULL) {
770                         /* only print the first error and if not CLOSE, we often don't see
771                          * CQ events for that by the time we get here... and really don't care */
772                         if (nlive || tx->tx_msg.gnm_type == GNILND_MSG_CLOSE)
773                                 tx->tx_state |= GNILND_TX_QUIET_ERROR;
774                         nlive++;
775                         GNIDBG_TX(D_NET, tx, "cleaning up on close, nlive %d", nlive);
776
777                         /* don't worry about gnc_lock here as nobody else should be
778                          * touching this conn */
779                         kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
780                         list_add_tail(&tx->tx_list, &sinners);
781                 }
782         }
783         spin_unlock(&conn->gnc_device->gnd_lock);
784         spin_unlock(&conn->gnc_device->gnd_rdmaq_lock);
785
786         /* nobody should have marked this as needing scheduling after
787          * we called close - so only ref should be us handling it */
788         if (conn->gnc_scheduled != GNILND_CONN_PROCESS) {
789                 CDEBUG(D_NETERROR, "Error someone scheduled us after we were "
790                                 "done, Attempting to recover conn 0x%p "
791                                 "scheduled %d function %s line: %d\n", conn,
792                                 conn->gnc_scheduled, conn->gnc_sched_caller,
793                                 conn->gnc_sched_line);
794         }
795         /* now reset a few to actual counters... */
796         nrdma = atomic_read(&conn->gnc_nlive_rdma);
797         nq_rdma = atomic_read(&conn->gnc_nq_rdma);
798
799         if (!list_empty(&sinners)) {
800                 list_for_each_entry_safe(tx, txn, &sinners, tx_list) {
801                         /* clear tx_list to make tx_add_list_locked happy */
802                         list_del_init(&tx->tx_list);
803                         /* The error codes determine if we hold onto the MDD */
804                         kgnilnd_tx_done(tx, conn->gnc_error);
805                 }
806         }
807
808         logmsg = (nlive + nrdma + nq_rdma);
809
810         if (logmsg) {
811                 if (conn->gnc_peer->gnp_down == GNILND_RCA_NODE_UP) {
812                         CNETERR("Closed conn 0x%p->%s (errno %d, peer errno %d): "
813                                 "canceled %d TX, %d/%d RDMA\n",
814                                 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
815                                 conn->gnc_error, conn->gnc_peer_error,
816                                 nlive, nq_rdma, nrdma);
817                 } else {
818                         CDEBUG(D_NET, "Closed conn 0x%p->%s (errno %d,"
819                                 " peer errno %d): canceled %d TX, %d/%d RDMA\n",
820                                 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
821                                 conn->gnc_error, conn->gnc_peer_error,
822                                 nlive, nq_rdma, nrdma);
823                 }
824         }
825
826         kgnilnd_destroy_conn_ep(conn);
827
828         /* Bug 765042 - race this with completing a new conn to same peer - we need
829          * finish_connect to detach purgatory before we can do it ourselves here */
830         CFS_RACE(CFS_FAIL_GNI_FINISH_PURG);
831
832         /* now it is safe to remove from peer list - anyone looking at
833          * gnp_conns now is free to unlink if not on purgatory */
834         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
835
836         conn->gnc_state = GNILND_CONN_DONE;
837
838         /* Decrement counter if we are marked by del_conn_or_peers for closing
839          */
840         if (conn->gnc_needs_closing)
841                 kgnilnd_admin_decref(kgnilnd_data.kgn_npending_conns);
842
843         /* Remove from peer's list of valid connections if its not in purgatory */
844         if (!conn->gnc_in_purgatory) {
845                 list_del_init(&conn->gnc_list);
846                 /* Lose peers reference on the conn */
847                 kgnilnd_conn_decref(conn);
848         }
849
850         /* NB - only unlinking if we set pending in del_peer_locked from admin or
851          * shutdown */
852         if (kgnilnd_peer_active(conn->gnc_peer) &&
853             conn->gnc_peer->gnp_pending_unlink &&
854             kgnilnd_can_unlink_peer_locked(conn->gnc_peer)) {
855                 kgnilnd_unlink_peer_locked(conn->gnc_peer);
856         }
857
858         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
859
860         /* I'm telling Mommy! - use peer_error if they initiated close */
861         kgnilnd_peer_notify(conn->gnc_peer,
862                             conn->gnc_error == -ECONNRESET ?
863                             conn->gnc_peer_error : conn->gnc_error, 0);
864
865         EXIT;
866 }
867
868 int
869 kgnilnd_set_conn_params(kgn_dgram_t *dgram)
870 {
871         kgn_conn_t             *conn = dgram->gndg_conn;
872         kgn_connreq_t          *connreq = &dgram->gndg_conn_in;
873         kgn_gniparams_t        *rem_param = &connreq->gncr_gnparams;
874         gni_return_t            rrc;
875         int                     rc = 0;
876         gni_smsg_attr_t        *remote = &connreq->gncr_gnparams.gnpr_smsg_attr;
877
878         /* set timeout vals in conn early so we can use them for the NAK */
879
880         /* use max of the requested and our timeout, peer will do the same */
881         conn->gnc_timeout = MAX(conn->gnc_timeout, connreq->gncr_timeout);
882
883         /* only ep_bind really mucks around with the CQ */
884         /* only ep bind if we are not connecting to ourself and the dstnid is not a wildcard. this check
885          * is necessary as you can only bind an ep once and we must make sure we dont bind when already bound.
886          */
887         if (connreq->gncr_dstnid != LNET_NID_ANY && dgram->gndg_conn_out.gncr_dstnid != connreq->gncr_srcnid) {
888                 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
889                 rrc = kgnilnd_ep_bind(conn->gnc_ephandle,
890                         connreq->gncr_gnparams.gnpr_host_id,
891                         conn->gnc_cqid);
892                 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
893                 if (rrc != GNI_RC_SUCCESS) {
894                         rc = -ECONNABORTED;
895                         goto return_out;
896                 }
897         }
898
899         rrc = kgnilnd_ep_set_eventdata(conn->gnc_ephandle, conn->gnc_cqid,
900                          connreq->gncr_gnparams.gnpr_cqid);
901         if (rrc != GNI_RC_SUCCESS) {
902                 rc = -ECONNABORTED;
903                 goto cleanup_out;
904         }
905
906         /* Initialize SMSG */
907         rrc = kgnilnd_smsg_init(conn->gnc_ephandle, &conn->gnpr_smsg_attr,
908                         &connreq->gncr_gnparams.gnpr_smsg_attr);
909         if (unlikely(rrc == GNI_RC_INVALID_PARAM)) {
910                 gni_smsg_attr_t *local = &conn->gnpr_smsg_attr;
911                 /* help folks figure out if there is a tunable off, etc. */
912                 LCONSOLE_ERROR("SMSG attribute mismatch. Data from local/remote:"
913                                " type %d/%d msg_maxsize %u/%u"
914                                " mbox_maxcredit %u/%u. Please check kgni"
915                                " logs for further data\n",
916                                local->msg_type, remote->msg_type,
917                                local->msg_maxsize, remote->msg_maxsize,
918                                local->mbox_maxcredit, remote->mbox_maxcredit);
919         }
920         if (rrc != GNI_RC_SUCCESS) {
921                 rc = -ECONNABORTED;
922                 goto cleanup_out;
923         }
924
925         /* log this for help in debuggin SMSG buffer re-use */
926         CDEBUG(D_NET, "conn %p src %s dst %s smsg %p acquired"
927                 " local cqid %u SMSG %p->%u hndl %#llx.%#llx"
928                 " remote cqid %u SMSG %p->%u hndl %#llx.%#llx\n",
929                 conn, libcfs_nid2str(connreq->gncr_srcnid),
930                 libcfs_nid2str(connreq->gncr_dstnid),
931                 &conn->gnpr_smsg_attr,
932                 conn->gnc_cqid,
933                 conn->gnpr_smsg_attr.msg_buffer,
934                 conn->gnpr_smsg_attr.mbox_offset,
935                 conn->gnpr_smsg_attr.mem_hndl.qword1,
936                 conn->gnpr_smsg_attr.mem_hndl.qword2,
937                 rem_param->gnpr_cqid,
938                 rem_param->gnpr_smsg_attr.msg_buffer,
939                 rem_param->gnpr_smsg_attr.mbox_offset,
940                 rem_param->gnpr_smsg_attr.mem_hndl.qword1,
941                 rem_param->gnpr_smsg_attr.mem_hndl.qword2);
942
943         conn->gnc_peerstamp = connreq->gncr_peerstamp;
944         conn->gnc_peer_connstamp = connreq->gncr_connstamp;
945         conn->remote_mbox_addr = (void *)((char *)remote->msg_buffer + remote->mbox_offset);
946
947         /* We update the reaper timeout once we have a valid conn and timeout */
948         kgnilnd_update_reaper_timeout(GNILND_TO2KA(conn->gnc_timeout));
949
950         return 0;
951
952 cleanup_out:
953         rrc = kgnilnd_ep_unbind(conn->gnc_ephandle);
954         /* not sure I can just let this fly */
955         LASSERTF(rrc == GNI_RC_SUCCESS,
956                 "bad rc from gni_ep_unbind trying to cleanup: %d\n", rrc);
957
958 return_out:
959         LASSERTF(rc != 0, "SOFTWARE BUG: rc == 0\n");
960         CERROR("Error setting connection params from %s: %d\n",
961                libcfs_nid2str(connreq->gncr_srcnid), rc);
962         return rc;
963 }
964
965 /* needs down_read on kgn_net_rw_sem held from before this call until
966  * after the write_lock on kgn_peer_conn_lock - this ensures we stay sane
967  * with kgnilnd_shutdown - it'll get the sem and set shutdown, then get the
968  * kgn_peer_conn_lock to start del_peer'ing. If we hold the sem until after
969  * kgn_peer_conn_lock is held, we guarantee that nobody calls
970  * kgnilnd_add_peer_locked without checking gnn_shutdown */
971 int
972 kgnilnd_create_peer_safe(kgn_peer_t **peerp,
973                          lnet_nid_t nid,
974                          kgn_net_t *net,
975                          int node_state)
976 {
977         kgn_peer_t      *peer;
978         int             rc;
979
980         LASSERT(nid != LNET_NID_ANY);
981
982         /* We dont pass the net around in the dgram anymore so here is where we find it
983          * this will work unless its in shutdown or the nid has a net that is invalid.
984          * Either way error code needs to be returned in that case.
985          *
986          * If the net passed in is not NULL then we can use it, this alleviates looking it
987          * when the calling function has access to the data.
988          */
989         if (net == NULL) {
990                 rc = kgnilnd_find_net(nid, &net);
991                 if (rc < 0)
992                         return rc;
993         } else {
994                 /* find net adds a reference on the net if we are not using
995                  * it we must do it manually so the net references are
996                  * correct when tearing down the net
997                  */
998                 kgnilnd_net_addref(net);
999         }
1000
1001         LIBCFS_ALLOC(peer, sizeof(*peer));
1002         if (peer == NULL) {
1003                 kgnilnd_net_decref(net);
1004                 return -ENOMEM;
1005         }
1006         peer->gnp_nid = nid;
1007         peer->gnp_down = node_state;
1008
1009         /* translate from nid to nic addr & store */
1010         rc = kgnilnd_nid_to_nicaddrs(LNET_NIDADDR(nid), 1, &peer->gnp_host_id);
1011         if (rc <= 0) {
1012                 kgnilnd_net_decref(net);
1013                 LIBCFS_FREE(peer, sizeof(*peer));
1014                 return -ESRCH;
1015         }
1016         CDEBUG(D_NET, "peer 0x%p->%s -> NIC 0x%x\n", peer,
1017                 libcfs_nid2str(nid), peer->gnp_host_id);
1018
1019         atomic_set(&peer->gnp_refcount, 1);     /* 1 ref for caller */
1020         atomic_set(&peer->gnp_dirty_eps, 0);
1021
1022         INIT_LIST_HEAD(&peer->gnp_list);
1023         INIT_LIST_HEAD(&peer->gnp_connd_list);
1024         INIT_LIST_HEAD(&peer->gnp_conns);
1025         INIT_LIST_HEAD(&peer->gnp_tx_queue);
1026
1027         /* the first reconnect should happen immediately, so we leave
1028          * gnp_reconnect_interval set to 0 */
1029
1030         LASSERTF(net != NULL, "peer 0x%p->%s with NULL net\n",
1031                  peer, libcfs_nid2str(nid));
1032
1033         /* must have kgn_net_rw_sem held for this...  */
1034         if (net->gnn_shutdown) {
1035                 /* shutdown has started already */
1036                 kgnilnd_net_decref(net);
1037                 LIBCFS_FREE(peer, sizeof(*peer));
1038                 return -ESHUTDOWN;
1039         }
1040
1041         peer->gnp_net = net;
1042
1043         atomic_inc(&kgnilnd_data.kgn_npeers);
1044
1045         *peerp = peer;
1046         return 0;
1047 }
1048
1049 void
1050 kgnilnd_destroy_peer(kgn_peer_t *peer)
1051 {
1052         CDEBUG(D_NET, "peer %s %p deleted\n",
1053                libcfs_nid2str(peer->gnp_nid), peer);
1054         LASSERTF(atomic_read(&peer->gnp_refcount) == 0,
1055                  "peer 0x%p->%s refs %d\n",
1056                  peer, libcfs_nid2str(peer->gnp_nid),
1057                  atomic_read(&peer->gnp_refcount));
1058         LASSERTF(atomic_read(&peer->gnp_dirty_eps) == 0,
1059                  "peer 0x%p->%s dirty eps %d\n",
1060                  peer, libcfs_nid2str(peer->gnp_nid),
1061                  atomic_read(&peer->gnp_dirty_eps));
1062         LASSERTF(peer->gnp_net != NULL, "peer %p (%s) with NULL net\n",
1063                  peer, libcfs_nid2str(peer->gnp_nid));
1064         LASSERTF(!kgnilnd_peer_active(peer),
1065                  "peer 0x%p->%s\n",
1066                 peer, libcfs_nid2str(peer->gnp_nid));
1067         LASSERTF(peer->gnp_connecting == GNILND_PEER_IDLE || peer->gnp_connecting == GNILND_PEER_KILL,
1068                  "peer 0x%p->%s, connecting %d\n",
1069                 peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
1070         LASSERTF(list_empty(&peer->gnp_conns),
1071                  "peer 0x%p->%s\n",
1072                 peer, libcfs_nid2str(peer->gnp_nid));
1073         LASSERTF(list_empty(&peer->gnp_tx_queue),
1074                  "peer 0x%p->%s\n",
1075                 peer, libcfs_nid2str(peer->gnp_nid));
1076         LASSERTF(list_empty(&peer->gnp_connd_list),
1077                  "peer 0x%p->%s\n",
1078                 peer, libcfs_nid2str(peer->gnp_nid));
1079
1080         /* NB a peer's connections keep a reference on their peer until
1081          * they are destroyed, so we can be assured that _all_ state to do
1082          * with this peer has been cleaned up when its refcount drops to
1083          * zero. */
1084
1085         atomic_dec(&kgnilnd_data.kgn_npeers);
1086         kgnilnd_net_decref(peer->gnp_net);
1087
1088         LIBCFS_FREE(peer, sizeof(*peer));
1089 }
1090
1091 /* the conn might not have made it all the way through to a connected
1092  * state - but we need to purgatory any conn that a remote peer might
1093  * have seen through a posted dgram as well */
1094 void
1095 kgnilnd_add_purgatory_locked(kgn_conn_t *conn, kgn_peer_t *peer)
1096 {
1097         kgn_mbox_info_t *mbox = NULL;
1098         ENTRY;
1099
1100         /* NB - the caller should own conn by removing him from the
1101          * scheduler thread when finishing the close */
1102
1103         LASSERTF(peer != NULL, "conn %p with NULL peer\n", conn);
1104
1105         /* If this is still true, need to add the calls to unlink back in and
1106          * figure out how to close the hole on loopback conns */
1107         LASSERTF(kgnilnd_peer_active(peer), "can't use inactive peer %s (%p)"
1108                 " we'll never recover the resources\n",
1109                 libcfs_nid2str(peer->gnp_nid), peer);
1110
1111         CDEBUG(D_NET, "conn %p peer %p dev %p\n", conn, peer,
1112                 conn->gnc_device);
1113
1114         LASSERTF(conn->gnc_in_purgatory == 0,
1115                 "Conn already in purgatory\n");
1116         conn->gnc_in_purgatory = 1;
1117
1118         mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
1119         mbox->mbx_prev_purg_nid = peer->gnp_nid;
1120         mbox->mbx_add_purgatory = jiffies;
1121         kgnilnd_release_mbox(conn, 1);
1122
1123         LASSERTF(list_empty(&conn->gnc_mdd_list),
1124                 "conn 0x%p->%s with active purgatory hold MDD %d\n",
1125                 conn, libcfs_nid2str(peer->gnp_nid),
1126                 kgnilnd_count_list(&conn->gnc_mdd_list));
1127
1128         EXIT;
1129 }
1130
1131 /* Instead of detaching everything from purgatory here we just mark the conn as needing
1132  * detach, when the reaper checks the conn the next time it will detach it.
1133  * Calling function requires write_lock held on kgn_peer_conn_lock
1134  */
1135 void
1136 kgnilnd_mark_for_detach_purgatory_all_locked(kgn_peer_t *peer) {
1137         kgn_conn_t       *conn;
1138
1139         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
1140                 if (conn->gnc_in_purgatory && !conn->gnc_needs_detach) {
1141                         conn->gnc_needs_detach = 1;
1142                         kgnilnd_admin_addref(kgnilnd_data.kgn_npending_detach);
1143                 }
1144         }
1145 }
1146
1147 /* Calling function needs a write_lock held on kgn_peer_conn_lock */
1148 void
1149 kgnilnd_detach_purgatory_locked(kgn_conn_t *conn, struct list_head *conn_list)
1150 {
1151         kgn_mbox_info_t *mbox = NULL;
1152
1153         /* if needed, add the conn purgatory data to the list passed in */
1154         if (conn->gnc_in_purgatory) {
1155                 CDEBUG(D_NET, "peer %p->%s purg_conn %p@%s mdd_list #tx %d\n",
1156                         conn->gnc_peer, libcfs_nid2str(conn->gnc_peer->gnp_nid),
1157                         conn, kgnilnd_conn_state2str(conn),
1158                         kgnilnd_count_list(&conn->gnc_mdd_list));
1159
1160                 mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
1161                 mbox->mbx_detach_of_purgatory = jiffies;
1162
1163                 /* conn->gnc_list is the entry point on peer->gnp_conns, so detaching it
1164                  * here removes it from the list of 'valid' peer connections.
1165                  * We put the current conn onto a list of conns to call kgnilnd_release_purgatory_locked()
1166                  * and as such the caller of kgnilnd_detach_purgatory_locked() now owns that conn, since its not
1167                  * on the peer's conn_list anymore.
1168                  */
1169
1170                 list_del_init(&conn->gnc_list);
1171
1172                 /* NB - only unlinking if we set pending in del_peer_locked from admin or
1173                  * shutdown */
1174                 if (kgnilnd_peer_active(conn->gnc_peer) &&
1175                     conn->gnc_peer->gnp_pending_unlink &&
1176                     kgnilnd_can_unlink_peer_locked(conn->gnc_peer)) {
1177                         kgnilnd_unlink_peer_locked(conn->gnc_peer);
1178                 }
1179                 /* The reaper will not call detach unless the conn is fully through kgnilnd_complete_closed_conn.
1180                  * If the conn is not in a DONE state somehow we are attempting to detach even though
1181                  * the conn has not been fully cleaned up. If we detach while the conn is still closing
1182                  * we will end up with an orphaned connection that has valid ep_handle, that is not on a
1183                  * peer.
1184                  */
1185
1186                 LASSERTF(conn->gnc_state == GNILND_CONN_DONE, "Conn in invalid state  %p@%s \n",
1187                                 conn, kgnilnd_conn_state2str(conn));
1188
1189                 /* move from peer to the delayed release list */
1190                 list_add_tail(&conn->gnc_list, conn_list);
1191         }
1192 }
1193
1194 void
1195 kgnilnd_release_purgatory_list(struct list_head *conn_list)
1196 {
1197         kgn_device_t            *dev;
1198         kgn_conn_t              *conn, *connN;
1199         kgn_mdd_purgatory_t     *gmp, *gmpN;
1200
1201         list_for_each_entry_safe(conn, connN, conn_list, gnc_list) {
1202                 dev = conn->gnc_device;
1203
1204                 kgnilnd_release_mbox(conn, -1);
1205                 conn->gnc_in_purgatory = 0;
1206
1207                 list_del_init(&conn->gnc_list);
1208
1209                 /* gnc_needs_detach is set in kgnilnd_del_conn_or_peer. It is used to keep track
1210                  * of conns that have been marked for detach by kgnilnd_del_conn_or_peer.
1211                  * The function uses kgn_npending_detach to verify the conn has
1212                  * actually been detached.
1213                  */
1214
1215                 if (conn->gnc_needs_detach)
1216                         kgnilnd_admin_decref(kgnilnd_data.kgn_npending_detach);
1217
1218                 /* if this guy is really dead (we are doing release from reaper),
1219                  * make sure we tell LNet - if this is from other context,
1220                  * the checks in the function will prevent an errant
1221                  * notification */
1222                 kgnilnd_peer_notify(conn->gnc_peer, conn->gnc_error, 0);
1223
1224                 list_for_each_entry_safe(gmp, gmpN, &conn->gnc_mdd_list,
1225                                          gmp_list) {
1226                         CDEBUG(D_NET,
1227                                "dev %p releasing held mdd %#llx.%#llx\n",
1228                                conn->gnc_device, gmp->gmp_map_key.qword1,
1229                                gmp->gmp_map_key.qword2);
1230
1231                         atomic_dec(&dev->gnd_n_mdd_held);
1232                         kgnilnd_mem_mdd_release(conn->gnc_device->gnd_handle,
1233                                                 &gmp->gmp_map_key);
1234                         /* ignoring the return code - if kgni/ghal can't find it
1235                          * it must be released already */
1236
1237                         list_del_init(&gmp->gmp_list);
1238                         LIBCFS_FREE(gmp, sizeof(*gmp));
1239                 }
1240                 /* lose conn ref for purgatory */
1241                 kgnilnd_conn_decref(conn);
1242         }
1243 }
1244
1245 /* needs write_lock on kgnilnd_data.kgn_peer_conn_lock held */
1246 void
1247 kgnilnd_peer_increase_reconnect_locked(kgn_peer_t *peer)
1248 {
1249         int current_to;
1250
1251         current_to = peer->gnp_reconnect_interval;
1252
1253         /* we'll try to reconnect fast the first time, then back-off */
1254         if (current_to == 0) {
1255                 peer->gnp_reconnect_time = jiffies - 1;
1256                 current_to = *kgnilnd_tunables.kgn_min_reconnect_interval;
1257         } else {
1258                 peer->gnp_reconnect_time = jiffies + cfs_time_seconds(current_to);
1259                 /* add 50% of min timeout & retry */
1260                 current_to += *kgnilnd_tunables.kgn_min_reconnect_interval / 2;
1261         }
1262
1263         current_to = MIN(current_to,
1264                                 *kgnilnd_tunables.kgn_max_reconnect_interval);
1265
1266         peer->gnp_reconnect_interval = current_to;
1267         CDEBUG(D_NET, "peer %s can reconnect at %lu interval %lu\n",
1268                libcfs_nid2str(peer->gnp_nid), peer->gnp_reconnect_time,
1269                peer->gnp_reconnect_interval);
1270 }
1271
1272 /* needs kgnilnd_data.kgn_peer_conn_lock held */
1273 kgn_peer_t *
1274 kgnilnd_find_peer_locked(lnet_nid_t nid)
1275 {
1276         struct list_head *peer_list = kgnilnd_nid2peerlist(nid);
1277         kgn_peer_t       *peer;
1278
1279         /* Chopping nid down to only NIDADDR using LNET_NIDADDR so we only
1280          * have a single peer per device instead of a peer per nid/net combo.
1281          */
1282
1283         list_for_each_entry(peer, peer_list, gnp_list) {
1284                 if (LNET_NIDADDR(nid) != LNET_NIDADDR(peer->gnp_nid))
1285                         continue;
1286
1287                 CDEBUG(D_NET, "got peer [%p] -> %s c %d (%d)\n",
1288                        peer, libcfs_nid2str(nid),
1289                        peer->gnp_connecting,
1290                        atomic_read(&peer->gnp_refcount));
1291                 return peer;
1292         }
1293         return NULL;
1294 }
1295
1296 /* need write_lock on kgn_peer_conn_lock */
1297 void
1298 kgnilnd_unlink_peer_locked(kgn_peer_t *peer)
1299 {
1300         LASSERTF(list_empty(&peer->gnp_conns),
1301                 "peer 0x%p->%s\n",
1302                  peer, libcfs_nid2str(peer->gnp_nid));
1303         LASSERTF(list_empty(&peer->gnp_tx_queue),
1304                 "peer 0x%p->%s\n",
1305                  peer, libcfs_nid2str(peer->gnp_nid));
1306         LASSERTF(kgnilnd_peer_active(peer),
1307                 "peer 0x%p->%s\n",
1308                  peer, libcfs_nid2str(peer->gnp_nid));
1309         CDEBUG(D_NET, "unlinking peer 0x%p->%s\n",
1310                 peer, libcfs_nid2str(peer->gnp_nid));
1311
1312         list_del_init(&peer->gnp_list);
1313         kgnilnd_data.kgn_peer_version++;
1314         kgnilnd_admin_decref(kgnilnd_data.kgn_npending_unlink);
1315         /* lose peerlist's ref */
1316         kgnilnd_peer_decref(peer);
1317 }
1318
1319 int
1320 kgnilnd_get_peer_info(int index,
1321                       kgn_peer_t **found_peer,
1322                       lnet_nid_t *id, __u32 *nic_addr,
1323                       int *refcount, int *connecting)
1324 {
1325         struct list_head  *ptmp;
1326         kgn_peer_t        *peer;
1327         int               i;
1328         int               rc = -ENOENT;
1329
1330         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1331
1332         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
1333
1334                 list_for_each(ptmp, &kgnilnd_data.kgn_peers[i]) {
1335                         peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1336
1337                         if (index-- > 0)
1338                                 continue;
1339
1340                         CDEBUG(D_NET, "found peer %p (%s) at index %d\n",
1341                                peer, libcfs_nid2str(peer->gnp_nid), index);
1342
1343                         *found_peer  = peer;
1344                         *id          = peer->gnp_nid;
1345                         *nic_addr    = peer->gnp_host_id;
1346                         *refcount    = atomic_read(&peer->gnp_refcount);
1347                         *connecting  = peer->gnp_connecting;
1348
1349                         rc = 0;
1350                         goto out;
1351                 }
1352         }
1353 out:
1354         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1355         if (rc)
1356                 CDEBUG(D_NET, "no gni peer at index %d\n", index);
1357         return rc;
1358 }
1359
1360 /* requires write_lock on kgn_peer_conn_lock held */
1361 void
1362 kgnilnd_add_peer_locked(lnet_nid_t nid, kgn_peer_t *new_stub_peer, kgn_peer_t **peerp)
1363 {
1364         kgn_peer_t        *peer, *peer2;
1365
1366         LASSERTF(new_stub_peer != NULL, "bad stub peer for nid %s\n",
1367                  libcfs_nid2str(nid));
1368
1369         peer2 = kgnilnd_find_peer_locked(nid);
1370         if (peer2 != NULL) {
1371                 /* A peer was created during the lock transition, so drop
1372                  * the new one we created */
1373                 kgnilnd_peer_decref(new_stub_peer);
1374                 peer = peer2;
1375         } else {
1376                 peer = new_stub_peer;
1377                 /* peer table takes existing ref on peer */
1378
1379                 LASSERTF(!kgnilnd_peer_active(peer),
1380                         "peer 0x%p->%s already in peer table\n",
1381                         peer, libcfs_nid2str(peer->gnp_nid));
1382                 list_add_tail(&peer->gnp_list,
1383                               kgnilnd_nid2peerlist(nid));
1384                 kgnilnd_data.kgn_peer_version++;
1385         }
1386
1387         LASSERTF(peer->gnp_net != NULL, "peer 0x%p->%s with NULL net\n",
1388                  peer, libcfs_nid2str(peer->gnp_nid));
1389         *peerp = peer;
1390 }
1391
1392 int
1393 kgnilnd_add_peer(kgn_net_t *net, lnet_nid_t nid, kgn_peer_t **peerp)
1394 {
1395         kgn_peer_t        *peer;
1396         int                rc;
1397         int                node_state;
1398         ENTRY;
1399
1400         if (nid == LNET_NID_ANY)
1401                 return -EINVAL;
1402
1403         node_state = kgnilnd_get_node_state(LNET_NIDADDR(nid));
1404
1405         /* NB - this will not block during normal operations -
1406          * the only writer of this is in the startup/shutdown path. */
1407         rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
1408         if (!rc) {
1409                 rc = -ESHUTDOWN;
1410                 RETURN(rc);
1411         }
1412         rc = kgnilnd_create_peer_safe(&peer, nid, net, node_state);
1413         if (rc != 0) {
1414                 up_read(&kgnilnd_data.kgn_net_rw_sem);
1415                 RETURN(rc);
1416         }
1417
1418         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1419         up_read(&kgnilnd_data.kgn_net_rw_sem);
1420
1421         kgnilnd_add_peer_locked(nid, peer, peerp);
1422
1423         CDEBUG(D_NET, "peer 0x%p->%s connecting %d\n",
1424                peerp, libcfs_nid2str((*peerp)->gnp_nid),
1425                (*peerp)->gnp_connecting);
1426
1427         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1428         RETURN(0);
1429 }
1430
1431 /* needs write_lock on kgn_peer_conn_lock */
1432 void
1433 kgnilnd_cancel_peer_connect_locked(kgn_peer_t *peer, struct list_head *zombies)
1434 {
1435         kgn_tx_t        *tx, *txn;
1436
1437         /* we do care about state of gnp_connecting - we could be between
1438          * reconnect attempts, so try to find the dgram and cancel the TX
1439          * anyways. If we are in the process of posting DONT do anything;
1440          * once it fails or succeeds we can nuke the connect attempt.
1441          * We have no idea where in kgnilnd_post_dgram we are so we cant
1442          * attempt to cancel until the function is done.
1443          */
1444
1445         /* make sure peer isn't in process of connecting or waiting for connect*/
1446         spin_lock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1447         if (!(list_empty(&peer->gnp_connd_list))) {
1448                 list_del_init(&peer->gnp_connd_list);
1449                 /* remove connd ref */
1450                 kgnilnd_peer_decref(peer);
1451         }
1452         spin_unlock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1453
1454         if (peer->gnp_connecting == GNILND_PEER_POSTING || peer->gnp_connecting == GNILND_PEER_NEEDS_DEATH) {
1455                 peer->gnp_connecting = GNILND_PEER_NEEDS_DEATH;
1456                 /* We are in process of posting right now the xchg set it up for us to
1457                  * cancel the connect so we are finished for now */
1458         } else {
1459                 /* no need for exchange we have the peer lock and its ready for us to nuke */
1460                 LASSERTF(peer->gnp_connecting != GNILND_PEER_POSTING,
1461                         "Peer in invalid state 0x%p->%s, connecting %d\n",
1462                         peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
1463                 peer->gnp_connecting = GNILND_PEER_IDLE;
1464                 set_mb(peer->gnp_last_dgram_errno, -ETIMEDOUT);
1465                 kgnilnd_find_and_cancel_dgram(peer->gnp_net->gnn_dev,
1466                                                       peer->gnp_nid);
1467         }
1468
1469         /* The least we can do is nuke the tx's no matter what.... */
1470         list_for_each_entry_safe(tx, txn, &peer->gnp_tx_queue, tx_list) {
1471                 kgnilnd_tx_del_state_locked(tx, peer, NULL,
1472                                            GNILND_TX_ALLOCD);
1473                 list_add_tail(&tx->tx_list, zombies);
1474         }
1475 }
1476
1477 /* needs write_lock on kgn_peer_conn_lock */
1478 void
1479 kgnilnd_del_peer_locked(kgn_peer_t *peer, int error)
1480 {
1481         /* this peer could be passive and only held for purgatory,
1482          * take a ref to ensure it doesn't disappear in this function */
1483         kgnilnd_peer_addref(peer);
1484
1485         CFS_RACE(CFS_FAIL_GNI_FIND_TARGET);
1486
1487         /* if purgatory release cleared it out, don't try again */
1488         if (kgnilnd_peer_active(peer)) {
1489                 /* always do this to allow kgnilnd_start_connect and
1490                  * kgnilnd_finish_connect to catch this before they
1491                  * wrap up their operations */
1492                 if (kgnilnd_can_unlink_peer_locked(peer)) {
1493                         /* already released purgatory, so only active
1494                          * conns hold it */
1495                         kgnilnd_unlink_peer_locked(peer);
1496                 } else {
1497                         kgnilnd_close_peer_conns_locked(peer, error);
1498                         /* peer unlinks itself when last conn is closed */
1499                 }
1500         }
1501
1502         /* we are done, release back to the wild */
1503         kgnilnd_peer_decref(peer);
1504 }
1505
1506 int
1507 kgnilnd_del_conn_or_peer(kgn_net_t *net, lnet_nid_t nid, int command,
1508                           int error)
1509 {
1510         LIST_HEAD               (souls);
1511         LIST_HEAD               (zombies);
1512         struct list_head        *ptmp, *pnxt;
1513         kgn_peer_t              *peer;
1514         int                     lo;
1515         int                     hi;
1516         int                     i;
1517         int                     rc = -ENOENT;
1518
1519         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1520
1521         if (nid != LNET_NID_ANY)
1522                 lo = hi = kgnilnd_nid2peerlist(nid) - kgnilnd_data.kgn_peers;
1523         else {
1524                 lo = 0;
1525                 hi = *kgnilnd_tunables.kgn_peer_hash_size - 1;
1526                 /* wildcards always succeed */
1527                 rc = 0;
1528         }
1529
1530         for (i = lo; i <= hi; i++) {
1531                 list_for_each_safe(ptmp, pnxt, &kgnilnd_data.kgn_peers[i]) {
1532                         peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1533
1534                         LASSERTF(peer->gnp_net != NULL,
1535                                 "peer %p (%s) with NULL net\n",
1536                                  peer, libcfs_nid2str(peer->gnp_nid));
1537
1538                         if (net != NULL && peer->gnp_net != net)
1539                                 continue;
1540
1541                         if (!(nid == LNET_NID_ANY || LNET_NIDADDR(peer->gnp_nid) == LNET_NIDADDR(nid)))
1542                                 continue;
1543
1544                         /* In both cases, we want to stop any in-flight
1545                          * connect attempts */
1546                         kgnilnd_cancel_peer_connect_locked(peer, &zombies);
1547
1548                         switch (command) {
1549                         case GNILND_DEL_CONN:
1550                                 kgnilnd_close_peer_conns_locked(peer, error);
1551                                 break;
1552                         case GNILND_DEL_PEER:
1553                                 peer->gnp_pending_unlink = 1;
1554                                 kgnilnd_admin_addref(kgnilnd_data.kgn_npending_unlink);
1555                                 kgnilnd_mark_for_detach_purgatory_all_locked(peer);
1556                                 kgnilnd_del_peer_locked(peer, error);
1557                                 break;
1558                         case GNILND_CLEAR_PURGATORY:
1559                                 /* Mark everything ready for detach reaper will cleanup
1560                                  * once we release the kgn_peer_conn_lock
1561                                  */
1562                                 kgnilnd_mark_for_detach_purgatory_all_locked(peer);
1563                                 peer->gnp_last_errno = -EISCONN;
1564                                 /* clear reconnect so he can reconnect soon */
1565                                 peer->gnp_reconnect_time = 0;
1566                                 peer->gnp_reconnect_interval = 0;
1567                                 break;
1568                         default:
1569                                 CERROR("bad command %d\n", command);
1570                                 LBUG();
1571                         }
1572                         /* we matched something */
1573                         rc = 0;
1574                 }
1575         }
1576
1577         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1578
1579         /* nuke peer TX */
1580         kgnilnd_txlist_done(&zombies, error);
1581
1582         /* This function does not return until the commands it initiated have completed,
1583          * since they have to work there way through the other threads. In the case of shutdown
1584          * threads are not woken up until after this call is initiated so we cannot wait, we just
1585          * need to return. The same applies for stack reset we shouldnt wait as the reset thread
1586          * handles closing.
1587          */
1588
1589         CFS_RACE(CFS_FAIL_GNI_RACE_RESET);
1590
1591         if (error == -ENOTRECOVERABLE || error == -ESHUTDOWN) {
1592                 return rc;
1593         }
1594
1595         i = 4;
1596         while (atomic_read(&kgnilnd_data.kgn_npending_conns)   ||
1597                atomic_read(&kgnilnd_data.kgn_npending_detach)  ||
1598                atomic_read(&kgnilnd_data.kgn_npending_unlink)) {
1599
1600                 set_current_state(TASK_UNINTERRUPTIBLE);
1601                 schedule_timeout(cfs_time_seconds(1));
1602                 i++;
1603
1604                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, "Waiting on %d peers %d closes %d detaches\n",
1605                                 atomic_read(&kgnilnd_data.kgn_npending_unlink),
1606                                 atomic_read(&kgnilnd_data.kgn_npending_conns),
1607                                 atomic_read(&kgnilnd_data.kgn_npending_detach));
1608         }
1609
1610         return rc;
1611 }
1612
1613 kgn_conn_t *
1614 kgnilnd_get_conn_by_idx(int index)
1615 {
1616         kgn_peer_t        *peer;
1617         struct list_head  *ptmp;
1618         kgn_conn_t        *conn;
1619         struct list_head  *ctmp;
1620         int                i;
1621
1622
1623         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
1624                 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1625                 list_for_each(ptmp, &kgnilnd_data.kgn_peers[i]) {
1626
1627                         peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1628
1629                         list_for_each(ctmp, &peer->gnp_conns) {
1630                                 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
1631
1632                                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
1633                                         continue;
1634
1635                                 if (index-- > 0)
1636                                         continue;
1637
1638                                 CDEBUG(D_NET, "++conn[%p] -> %s (%d)\n", conn,
1639                                        libcfs_nid2str(conn->gnc_peer->gnp_nid),
1640                                        atomic_read(&conn->gnc_refcount));
1641                                 kgnilnd_conn_addref(conn);
1642                                 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1643                                 return conn;
1644                         }
1645                 }
1646                 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1647         }
1648
1649         return NULL;
1650 }
1651
1652 int
1653 kgnilnd_get_conn_info(kgn_peer_t *peer,
1654                       int *device_id, __u64 *peerstamp,
1655                       int *tx_seq, int *rx_seq,
1656                       int *fmaq_len, int *nfma, int *nrdma)
1657 {
1658         kgn_conn_t        *conn;
1659         int               rc = 0;
1660
1661         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1662
1663         conn = kgnilnd_find_conn_locked(peer);
1664         if (conn == NULL) {
1665                 rc = -ENOENT;
1666                 goto out;
1667         }
1668
1669         *device_id = conn->gnc_device->gnd_host_id;
1670         *peerstamp = conn->gnc_peerstamp;
1671         *tx_seq = atomic_read(&conn->gnc_tx_seq);
1672         *rx_seq = atomic_read(&conn->gnc_rx_seq);
1673         *fmaq_len = kgnilnd_count_list(&conn->gnc_fmaq);
1674         *nfma = atomic_read(&conn->gnc_nlive_fma);
1675         *nrdma = atomic_read(&conn->gnc_nlive_rdma);
1676 out:
1677         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1678         return rc;
1679 }
1680
1681 /* needs write_lock on kgn_peer_conn_lock */
1682 int
1683 kgnilnd_close_peer_conns_locked(kgn_peer_t *peer, int why)
1684 {
1685         kgn_conn_t         *conn;
1686         struct list_head   *ctmp, *cnxt;
1687         int                 count = 0;
1688
1689         list_for_each_safe(ctmp, cnxt, &peer->gnp_conns) {
1690                 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
1691
1692                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
1693                         continue;
1694
1695                 count++;
1696                 /* we mark gnc_needs closing and increment kgn_npending_conns so that
1697                  * kgnilnd_del_conn_or_peer can wait on the other threads closing
1698                  * and cleaning up the connection.
1699                  */
1700                 if (!conn->gnc_needs_closing) {
1701                         conn->gnc_needs_closing = 1;
1702                         kgnilnd_admin_addref(kgnilnd_data.kgn_npending_conns);
1703                 }
1704                 kgnilnd_close_conn_locked(conn, why);
1705         }
1706         return count;
1707 }
1708
1709 int
1710 kgnilnd_report_node_state(lnet_nid_t nid, int down)
1711 {
1712         int         rc;
1713         kgn_peer_t  *peer, *new_peer;
1714         LIST_HEAD(zombies);
1715
1716         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1717         peer = kgnilnd_find_peer_locked(nid);
1718
1719         if (peer == NULL) {
1720                 int       i;
1721                 int       found_net = 0;
1722                 kgn_net_t *net;
1723
1724                 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1725
1726                 /* Don't add a peer for node up events */
1727                 if (down == GNILND_RCA_NODE_UP) {
1728                         return 0;
1729                 }
1730
1731                 /* find any valid net - we don't care which one... */
1732                 down_read(&kgnilnd_data.kgn_net_rw_sem);
1733                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
1734                         list_for_each_entry(net, &kgnilnd_data.kgn_nets[i],
1735                                             gnn_list) {
1736                                 found_net = 1;
1737                                 break;
1738                         }
1739
1740                         if (found_net) {
1741                                 break;
1742                         }
1743                 }
1744                 up_read(&kgnilnd_data.kgn_net_rw_sem);
1745
1746                 if (!found_net) {
1747                         CNETERR("Could not find a net for nid %lld\n", nid);
1748                         return 1;
1749                 }
1750
1751                 /* The nid passed in does not yet contain the net portion.
1752                  * Let's build it up now
1753                  */
1754                 nid = LNET_MKNID(LNET_NIDNET(net->gnn_ni->ni_nid), nid);
1755                 rc = kgnilnd_add_peer(net, nid, &new_peer);
1756
1757                 if (rc) {
1758                         CNETERR("Could not add peer for nid %lld, rc %d\n",
1759                                 nid, rc);
1760                         return 1;
1761                 }
1762
1763                 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1764                 peer = kgnilnd_find_peer_locked(nid);
1765
1766                 if (peer == NULL) {
1767                         CNETERR("Could not find peer for nid %lld\n", nid);
1768                         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1769                         return 1;
1770                 }
1771         }
1772
1773         peer->gnp_down = down;
1774
1775         if (down == GNILND_RCA_NODE_DOWN) {
1776                 kgn_conn_t *conn;
1777
1778                 peer->gnp_down_event_time = jiffies;
1779                 kgnilnd_cancel_peer_connect_locked(peer, &zombies);
1780                 conn = kgnilnd_find_conn_locked(peer);
1781
1782                 if (conn != NULL) {
1783                         kgnilnd_close_conn_locked(conn, -ENETRESET);
1784                 }
1785         } else {
1786                 peer->gnp_up_event_time = jiffies;
1787         }
1788
1789         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1790
1791         if (down == GNILND_RCA_NODE_DOWN) {
1792                 /* using ENETRESET so we don't get messages from
1793                  * kgnilnd_tx_done
1794                  */
1795                 kgnilnd_txlist_done(&zombies, -ENETRESET);
1796                 kgnilnd_peer_notify(peer, -ECONNRESET, 0);
1797                 LCONSOLE_INFO("Received down event for nid %d\n",
1798                               LNET_NIDADDR(nid));
1799         }
1800
1801         return 0;
1802 }
1803
1804 int
1805 kgnilnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
1806 {
1807         struct libcfs_ioctl_data *data = arg;
1808         kgn_net_t                *net = ni->ni_data;
1809         int                       rc = -EINVAL;
1810
1811         LASSERT(ni == net->gnn_ni);
1812
1813         switch (cmd) {
1814         case IOC_LIBCFS_GET_PEER: {
1815                 lnet_nid_t   nid = 0;
1816                 kgn_peer_t  *peer = NULL;
1817                 __u32 nic_addr = 0;
1818                 __u64 peerstamp = 0;
1819                 int peer_refcount = 0, peer_connecting = 0;
1820                 int device_id = 0;
1821                 int tx_seq = 0, rx_seq = 0;
1822                 int fmaq_len = 0, nfma = 0, nrdma = 0;
1823
1824                 rc = kgnilnd_get_peer_info(data->ioc_count, &peer,
1825                                            &nid, &nic_addr, &peer_refcount,
1826                                            &peer_connecting);
1827                 if (rc)
1828                         break;
1829
1830                 /* Barf */
1831                 /* LNET_MKNID is used to mask from lnet the multiplexing/demultiplexing of connections and peers
1832                  * LNET assumes a conn and peer per net, the LNET_MKNID/LNET_NIDADDR allows us to let Lnet see what it
1833                  * wants to see instead of the underlying network that is being used to send the data
1834                  */
1835                 data->ioc_nid    = LNET_MKNID(LNET_NIDNET(ni->ni_nid), LNET_NIDADDR(nid));
1836                 data->ioc_flags  = peer_connecting;
1837                 data->ioc_count  = peer_refcount;
1838
1839                 rc = kgnilnd_get_conn_info(peer, &device_id, &peerstamp,
1840                                            &tx_seq, &rx_seq, &fmaq_len,
1841                                            &nfma, &nrdma);
1842
1843                 /* This is allowable - a persistent peer could not
1844                  * have a connection */
1845                 if (rc) {
1846                         /* flag to indicate we are not connected -
1847                          * need to print as such */
1848                         data->ioc_flags |= (1<<16);
1849                         rc = 0;
1850                 } else {
1851                         /* still barf */
1852                         data->ioc_net = device_id;
1853                         data->ioc_u64[0] = peerstamp;
1854                         data->ioc_u32[0] = fmaq_len;
1855                         data->ioc_u32[1] = nfma;
1856                         data->ioc_u32[2] = tx_seq;
1857                         data->ioc_u32[3] = rx_seq;
1858                         data->ioc_u32[4] = nrdma;
1859                 }
1860                 break;
1861         }
1862         case IOC_LIBCFS_ADD_PEER: {
1863                 /* just dummy value to allow using common interface */
1864                 kgn_peer_t      *peer;
1865                 rc = kgnilnd_add_peer(net, data->ioc_nid, &peer);
1866                 break;
1867         }
1868         case IOC_LIBCFS_DEL_PEER: {
1869                 /* NULL is passed in so it affects all peers in existence without regard to network
1870                  * as the peer may not exist on the network LNET believes it to be on.
1871                  */
1872                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1873                                               GNILND_DEL_PEER, -EUCLEAN);
1874                 break;
1875         }
1876         case IOC_LIBCFS_GET_CONN: {
1877                 kgn_conn_t *conn = kgnilnd_get_conn_by_idx(data->ioc_count);
1878
1879                 if (conn == NULL)
1880                         rc = -ENOENT;
1881                 else {
1882                         rc = 0;
1883                         /* LNET_MKNID is used to build the correct address based on what LNET wants to see instead of
1884                          * the generic connection that is used to send the data
1885                          */
1886                         data->ioc_nid    = LNET_MKNID(LNET_NIDNET(ni->ni_nid), LNET_NIDADDR(conn->gnc_peer->gnp_nid));
1887                         data->ioc_u32[0] = conn->gnc_device->gnd_id;
1888                         kgnilnd_conn_decref(conn);
1889                 }
1890                 break;
1891         }
1892         case IOC_LIBCFS_CLOSE_CONNECTION: {
1893                 /* use error = -ENETRESET to indicate it was lctl disconnect */
1894                 /* NULL is passed in so it affects all the nets as the connection is virtual
1895                  * and may not exist on the network LNET believes it to be on.
1896                  */
1897                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1898                                               GNILND_DEL_CONN, -ENETRESET);
1899                 break;
1900         }
1901         case IOC_LIBCFS_PUSH_CONNECTION: {
1902                 /* we use this to flush purgatory */
1903                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1904                                               GNILND_CLEAR_PURGATORY, -EUCLEAN);
1905                 break;
1906         }
1907         case IOC_LIBCFS_REGISTER_MYNID: {
1908                 /* Ignore if this is a noop */
1909                 if (data->ioc_nid == ni->ni_nid) {
1910                         rc = 0;
1911                 } else {
1912                         CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
1913                                libcfs_nid2str(data->ioc_nid),
1914                                libcfs_nid2str(ni->ni_nid));
1915                         rc = -EINVAL;
1916                 }
1917                 break;
1918         }
1919         }
1920
1921         return rc;
1922 }
1923
1924 void
1925 kgnilnd_query(lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
1926 {
1927         kgn_net_t               *net = ni->ni_data;
1928         kgn_tx_t                *tx;
1929         kgn_peer_t              *peer = NULL;
1930         kgn_conn_t              *conn = NULL;
1931         lnet_process_id_t       id = {
1932                 .nid = nid,
1933                 .pid = LNET_PID_LUSTRE,
1934         };
1935         ENTRY;
1936
1937         /* I expect to find him, so only take a read lock */
1938         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1939         peer = kgnilnd_find_peer_locked(nid);
1940         if (peer != NULL) {
1941                 /* LIE if in a quiesce - we will update the timeouts after,
1942                  * but we don't want sends failing during it */
1943                 if (kgnilnd_data.kgn_quiesce_trigger) {
1944                         *when = jiffies;
1945                         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1946                         GOTO(out, 0);
1947                 }
1948
1949                 /* Update to best guess, might refine on later checks */
1950                 *when = peer->gnp_last_alive;
1951
1952                 /* we have a peer, how about a conn? */
1953                 conn = kgnilnd_find_conn_locked(peer);
1954
1955                 if (conn == NULL)  {
1956                         /* if there is no conn, check peer last errno to see if clean disconnect
1957                          * - if it was, we lie to LNet because we believe a TX would complete
1958                          * on reconnect */
1959                         if (kgnilnd_conn_clean_errno(peer->gnp_last_errno)) {
1960                                 *when = jiffies;
1961                         }
1962                         /* we still want to fire a TX and new conn in this case */
1963                 } else {
1964                         /* gnp_last_alive is valid, run for the hills */
1965                         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1966                         GOTO(out, 0);
1967                 }
1968         }
1969         /* if we get here, either we have no peer or no conn for him, so fire off
1970          * new TX to trigger conn setup */
1971         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1972
1973         /* if we couldn't find him, we'll fire up a TX and get connected -
1974          * if we don't do this, after ni_peer_timeout, LNet will declare him dead.
1975          * So really we treat kgnilnd_query as a bit of a 'connect now' type
1976          * event because it'll only do this when it wants to send
1977          *
1978          * Use a real TX for this to get the proper gnp_tx_queue behavior, etc
1979          * normally we'd use kgnilnd_send_ctlmsg for this, but we don't really
1980          * care that this goes out quickly since we already know we need a new conn
1981          * formed */
1982         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_NOOP_SEND))
1983                 return;
1984
1985         tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP, ni->ni_nid);
1986         if (tx != NULL) {
1987                 kgnilnd_launch_tx(tx, net, &id);
1988         }
1989 out:
1990         CDEBUG(D_NETTRACE, "peer 0x%p->%s when %lu\n", peer,
1991                libcfs_nid2str(nid), *when);
1992         EXIT;
1993 }
1994
1995 int
1996 kgnilnd_dev_init(kgn_device_t *dev)
1997 {
1998         gni_return_t      rrc;
1999         int               rc = 0;
2000         unsigned int      cq_size;
2001         ENTRY;
2002
2003         /* size of these CQs should be able to accommodate the outgoing
2004          * RDMA and SMSG transactions.  Since we really don't know what we
2005          * really need here, we'll take credits * 2 * 3 to allow a bunch.
2006          * We need to dig into this more with the performance work. */
2007         cq_size = *kgnilnd_tunables.kgn_credits * 2 * 3;
2008
2009         rrc = kgnilnd_cdm_create(dev->gnd_id, *kgnilnd_tunables.kgn_ptag,
2010                                  *kgnilnd_tunables.kgn_pkey, 0,
2011                                  &dev->gnd_domain);
2012         if (rrc != GNI_RC_SUCCESS) {
2013                 CERROR("Can't create CDM %d (%d)\n", dev->gnd_id, rrc);
2014                 GOTO(failed, rc = -ENODEV);
2015         }
2016
2017         rrc = kgnilnd_cdm_attach(dev->gnd_domain, dev->gnd_id,
2018                                  &dev->gnd_host_id, &dev->gnd_handle);
2019         if (rrc != GNI_RC_SUCCESS) {
2020                 CERROR("Can't attach CDM to device %d (%d)\n",
2021                         dev->gnd_id, rrc);
2022                 GOTO(failed, rc = -ENODEV);
2023         }
2024
2025         /* a bit gross, but not much we can do - Aries Sim doesn't have
2026          * hardcoded NIC/NID that we can use */
2027         rc = kgnilnd_setup_nic_translation(dev->gnd_host_id);
2028         if (rc != 0)
2029                 GOTO(failed, rc = -ENODEV);
2030
2031         /* only dev 0 gets the errors - no need to reset the stack twice
2032          * - this works because we have a single PTAG, if we had more
2033          * then we'd need to have multiple handlers */
2034         if (dev->gnd_id == 0) {
2035                 rrc = kgnilnd_subscribe_errors(dev->gnd_handle,
2036                                                 GNI_ERRMASK_CRITICAL |
2037                                                 GNI_ERRMASK_UNKNOWN_TRANSACTION,
2038                                               0, NULL, kgnilnd_critical_error,
2039                                               &dev->gnd_err_handle);
2040                 if (rrc != GNI_RC_SUCCESS) {
2041                         CERROR("Can't subscribe for errors on device %d: rc %d\n",
2042                                 dev->gnd_id, rrc);
2043                         GOTO(failed, rc = -ENODEV);
2044                 }
2045
2046                 rc = kgnilnd_set_quiesce_callback(dev->gnd_handle,
2047                                                   kgnilnd_quiesce_end_callback);
2048                 if (rc != GNI_RC_SUCCESS) {
2049                         CERROR("Can't subscribe for quiesce callback on device %d: rc %d\n",
2050                                 dev->gnd_id, rrc);
2051                         GOTO(failed, rc = -ENODEV);
2052                 }
2053         }
2054
2055         rrc = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_IP, &kgnilnd_data.kgn_sock);
2056         if (rrc < 0) {
2057                 CERROR("sock_create returned %d\n", rrc);
2058                 GOTO(failed, rrc);
2059         }
2060
2061         rc = kgnilnd_nicaddr_to_nid(dev->gnd_host_id, &dev->gnd_nid);
2062         if (rc < 0) {
2063                 /* log messages during startup */
2064                 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
2065                         CERROR("couldn't translate host_id 0x%x to nid. rc %d\n",
2066                                 dev->gnd_host_id, rc);
2067                 }
2068                 GOTO(failed, rc = -ESRCH);
2069         }
2070         CDEBUG(D_NET, "NIC %x -> NID %d\n", dev->gnd_host_id, dev->gnd_nid);
2071
2072         rrc = kgnilnd_cq_create(dev->gnd_handle, *kgnilnd_tunables.kgn_credits,
2073                                 0, kgnilnd_device_callback,
2074                                 dev->gnd_id, &dev->gnd_snd_rdma_cqh);
2075         if (rrc != GNI_RC_SUCCESS) {
2076                 CERROR("Can't create rdma send cq size %u for device "
2077                        "%d (%d)\n", cq_size, dev->gnd_id, rrc);
2078                 GOTO(failed, rc = -EINVAL);
2079         }
2080
2081         rrc = kgnilnd_cq_create(dev->gnd_handle, cq_size,
2082                         0, kgnilnd_device_callback, dev->gnd_id,
2083                         &dev->gnd_snd_fma_cqh);
2084         if (rrc != GNI_RC_SUCCESS) {
2085                 CERROR("Can't create fma send cq size %u for device %d (%d)\n",
2086                        cq_size, dev->gnd_id, rrc);
2087                 GOTO(failed, rc = -EINVAL);
2088         }
2089
2090         /* This one we size differently - overflows are possible and it needs to be
2091          * sized based on machine size */
2092         rrc = kgnilnd_cq_create(dev->gnd_handle,
2093                         *kgnilnd_tunables.kgn_fma_cq_size,
2094                         0, kgnilnd_device_callback, dev->gnd_id,
2095                         &dev->gnd_rcv_fma_cqh);
2096         if (rrc != GNI_RC_SUCCESS) {
2097                 CERROR("Can't create fma cq size %d for device %d (%d)\n",
2098                        *kgnilnd_tunables.kgn_fma_cq_size, dev->gnd_id, rrc);
2099                 GOTO(failed, rc = -EINVAL);
2100         }
2101
2102         rrc = kgnilnd_register_smdd_buf(dev);
2103         if (rrc != GNI_RC_SUCCESS) {
2104                 GOTO(failed, rc = -EINVAL);
2105         }
2106
2107         RETURN(0);
2108
2109 failed:
2110         kgnilnd_dev_fini(dev);
2111         RETURN(rc);
2112 }
2113
2114 void
2115 kgnilnd_dev_fini(kgn_device_t *dev)
2116 {
2117         gni_return_t rrc;
2118         ENTRY;
2119
2120         /* At quiesce or rest time, need to loop through and clear gnd_ready_conns ?*/
2121         LASSERTF(list_empty(&dev->gnd_ready_conns) &&
2122                  list_empty(&dev->gnd_map_tx) &&
2123                  list_empty(&dev->gnd_rdmaq),
2124                  "dev 0x%p ready_conns %d@0x%p map_tx %d@0x%p rdmaq %d@0x%p\n",
2125                  dev, kgnilnd_count_list(&dev->gnd_ready_conns), &dev->gnd_ready_conns,
2126                  kgnilnd_count_list(&dev->gnd_map_tx), &dev->gnd_map_tx,
2127                  kgnilnd_count_list(&dev->gnd_rdmaq), &dev->gnd_rdmaq);
2128
2129         /* These should follow from tearing down all connections */
2130         LASSERTF(dev->gnd_map_nphys == 0 && dev->gnd_map_physnop == 0,
2131                 "%d physical mappings of %d pages still mapped\n",
2132                  dev->gnd_map_nphys, dev->gnd_map_physnop);
2133
2134         LASSERTF(dev->gnd_map_nvirt == 0 && dev->gnd_map_virtnob == 0,
2135                 "%d virtual mappings of %llu bytes still mapped\n",
2136                  dev->gnd_map_nvirt, dev->gnd_map_virtnob);
2137
2138         LASSERTF(atomic_read(&dev->gnd_n_mdd) == 0 &&
2139                  atomic_read(&dev->gnd_n_mdd_held) == 0 &&
2140                  atomic64_read(&dev->gnd_nbytes_map) == 0,
2141                 "%d SMSG mappings of %ld bytes still mapped or held %d\n",
2142                  atomic_read(&dev->gnd_n_mdd),
2143                  atomic64_read(&dev->gnd_nbytes_map), atomic_read(&dev->gnd_n_mdd_held));
2144
2145         LASSERT(list_empty(&dev->gnd_map_list));
2146
2147         /* What other assertions needed to ensure all connections torn down ? */
2148
2149         /* check all counters == 0 (EP, MDD, etc) */
2150
2151         /* if we are resetting due to quiese (stack reset), don't check
2152          * thread states */
2153         LASSERTF(kgnilnd_data.kgn_quiesce_trigger ||
2154                 atomic_read(&kgnilnd_data.kgn_nthreads) == 0,
2155                 "tried to shutdown with threads active\n");
2156
2157         if (dev->gnd_smdd_hold_buf) {
2158                 rrc = kgnilnd_deregister_smdd_buf(dev);
2159                 LASSERTF(rrc == GNI_RC_SUCCESS,
2160                         "bad rc from deregistion of sMDD buffer: %d\n", rrc);
2161                 dev->gnd_smdd_hold_buf = NULL;
2162         }
2163
2164         if (dev->gnd_rcv_fma_cqh) {
2165                 rrc = kgnilnd_cq_destroy(dev->gnd_rcv_fma_cqh);
2166                 LASSERTF(rrc == GNI_RC_SUCCESS,
2167                         "bad rc from gni_cq_destroy on rcv_fma_cqh: %d\n", rrc);
2168                 dev->gnd_rcv_fma_cqh = NULL;
2169         }
2170
2171         if (dev->gnd_snd_rdma_cqh) {
2172                 rrc = kgnilnd_cq_destroy(dev->gnd_snd_rdma_cqh);
2173                 LASSERTF(rrc == GNI_RC_SUCCESS,
2174                         "bad rc from gni_cq_destroy on send_rdma_cqh: %d\n", rrc);
2175                 dev->gnd_snd_rdma_cqh = NULL;
2176         }
2177
2178         if (dev->gnd_snd_fma_cqh) {
2179                 rrc = kgnilnd_cq_destroy(dev->gnd_snd_fma_cqh);
2180                 LASSERTF(rrc == GNI_RC_SUCCESS,
2181                         "bad rc from gni_cq_destroy on snd_fma_cqh: %d\n", rrc);
2182                 dev->gnd_snd_fma_cqh = NULL;
2183         }
2184
2185         if (dev->gnd_err_handle) {
2186                 rrc = kgnilnd_release_errors(dev->gnd_err_handle);
2187                 LASSERTF(rrc == GNI_RC_SUCCESS,
2188                         "bad rc from gni_release_errors: %d\n", rrc);
2189                 dev->gnd_err_handle = NULL;
2190         }
2191
2192         if (dev->gnd_domain) {
2193                 rrc = kgnilnd_cdm_destroy(dev->gnd_domain);
2194                 LASSERTF(rrc == GNI_RC_SUCCESS,
2195                         "bad rc from gni_cdm_destroy: %d\n", rrc);
2196                 dev->gnd_domain = NULL;
2197         }
2198
2199         if (kgnilnd_data.kgn_sock)
2200                 sock_release(kgnilnd_data.kgn_sock);
2201
2202         EXIT;
2203 }
2204
2205 int kgnilnd_base_startup(void)
2206 {
2207         struct timeval       tv;
2208         int                  pkmem = atomic_read(&libcfs_kmemory);
2209         int                  rc;
2210         int                  i;
2211         kgn_device_t        *dev;
2212         struct task_struct  *thrd;
2213
2214 #if defined(CONFIG_CRAY_XT) && !defined(CONFIG_CRAY_COMPUTE)
2215         /* limit how much memory can be allocated for fma blocks in
2216          * instances where many nodes need to reconnects at the same time */
2217         struct sysinfo si;
2218         si_meminfo(&si);
2219         kgnilnd_data.free_pages_limit = si.totalram/4;
2220 #endif
2221
2222         ENTRY;
2223
2224         LASSERTF(kgnilnd_data.kgn_init == GNILND_INIT_NOTHING,
2225                 "init %d\n", kgnilnd_data.kgn_init);
2226
2227         /* zero pointers, flags etc */
2228         memset(&kgnilnd_data, 0, sizeof(kgnilnd_data));
2229         kgnilnd_check_kgni_version();
2230
2231         /* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and
2232          * a unique (for all time) connstamp so we can uniquely identify
2233          * the sender.  The connstamp is an incrementing counter
2234          * initialised with seconds + microseconds at startup time.  So we
2235          * rely on NOT creating connections more frequently on average than
2236          * 1MHz to ensure we don't use old connstamps when we reboot. */
2237         do_gettimeofday(&tv);
2238         kgnilnd_data.kgn_connstamp =
2239                  kgnilnd_data.kgn_peerstamp =
2240                         (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
2241
2242         init_rwsem(&kgnilnd_data.kgn_net_rw_sem);
2243
2244         for (i = 0; i < GNILND_MAXDEVS; i++) {
2245                 kgn_device_t  *dev = &kgnilnd_data.kgn_devices[i];
2246
2247                 dev->gnd_id = i;
2248                 INIT_LIST_HEAD(&dev->gnd_ready_conns);
2249                 INIT_LIST_HEAD(&dev->gnd_map_tx);
2250                 INIT_LIST_HEAD(&dev->gnd_fma_buffs);
2251                 mutex_init(&dev->gnd_cq_mutex);
2252                 mutex_init(&dev->gnd_fmablk_mutex);
2253                 spin_lock_init(&dev->gnd_fmablk_lock);
2254                 init_waitqueue_head(&dev->gnd_waitq);
2255                 init_waitqueue_head(&dev->gnd_dgram_waitq);
2256                 init_waitqueue_head(&dev->gnd_dgping_waitq);
2257                 spin_lock_init(&dev->gnd_lock);
2258                 INIT_LIST_HEAD(&dev->gnd_map_list);
2259                 spin_lock_init(&dev->gnd_map_lock);
2260                 atomic_set(&dev->gnd_nfmablk, 0);
2261                 atomic_set(&dev->gnd_fmablk_vers, 1);
2262                 atomic_set(&dev->gnd_neps, 0);
2263                 atomic_set(&dev->gnd_canceled_dgrams, 0);
2264                 INIT_LIST_HEAD(&dev->gnd_connd_peers);
2265                 spin_lock_init(&dev->gnd_connd_lock);
2266                 spin_lock_init(&dev->gnd_dgram_lock);
2267                 spin_lock_init(&dev->gnd_rdmaq_lock);
2268                 INIT_LIST_HEAD(&dev->gnd_rdmaq);
2269                 init_rwsem(&dev->gnd_conn_sem);
2270
2271                 /* alloc & setup nid based dgram table */
2272                 LIBCFS_ALLOC(dev->gnd_dgrams,
2273                             sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2274
2275                 if (dev->gnd_dgrams == NULL)
2276                         GOTO(failed, rc = -ENOMEM);
2277
2278                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2279                         INIT_LIST_HEAD(&dev->gnd_dgrams[i]);
2280                 }
2281                 atomic_set(&dev->gnd_ndgrams, 0);
2282                 atomic_set(&dev->gnd_nwcdgrams, 0);
2283                 /* setup timer for RDMAQ processing */
2284                 setup_timer(&dev->gnd_rdmaq_timer, kgnilnd_schedule_device_timer,
2285                             (unsigned long)dev);
2286
2287                 /* setup timer for mapping processing */
2288                 setup_timer(&dev->gnd_map_timer, kgnilnd_schedule_device_timer,
2289                             (unsigned long)dev);
2290
2291         }
2292
2293         /* CQID 0 isn't allowed, set to MAX_MSG_ID - 1 to check for conflicts early */
2294         kgnilnd_data.kgn_next_cqid = GNILND_MAX_MSG_ID - 1;
2295         kgnilnd_data.kgn_new_min_timeout = *kgnilnd_tunables.kgn_timeout;
2296         init_waitqueue_head(&kgnilnd_data.kgn_reaper_waitq);
2297         init_waitqueue_head(&kgnilnd_data.kgn_ruhroh_waitq);
2298         spin_lock_init(&kgnilnd_data.kgn_reaper_lock);
2299
2300         mutex_init(&kgnilnd_data.kgn_quiesce_mutex);
2301         atomic_set(&kgnilnd_data.kgn_nquiesce, 0);
2302         atomic_set(&kgnilnd_data.kgn_npending_conns, 0);
2303         atomic_set(&kgnilnd_data.kgn_npending_unlink, 0);
2304         atomic_set(&kgnilnd_data.kgn_npending_detach, 0);
2305         atomic_set(&kgnilnd_data.kgn_rev_offset, 0);
2306         atomic_set(&kgnilnd_data.kgn_rev_length, 0);
2307         atomic_set(&kgnilnd_data.kgn_rev_copy_buff, 0);
2308
2309         /* OK to call kgnilnd_api_shutdown() to cleanup now */
2310         kgnilnd_data.kgn_init = GNILND_INIT_DATA;
2311         try_module_get(THIS_MODULE);
2312
2313         rwlock_init(&kgnilnd_data.kgn_peer_conn_lock);
2314
2315         LIBCFS_ALLOC(kgnilnd_data.kgn_peers,
2316                     sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2317
2318         if (kgnilnd_data.kgn_peers == NULL)
2319                 GOTO(failed, rc = -ENOMEM);
2320
2321         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2322                 INIT_LIST_HEAD(&kgnilnd_data.kgn_peers[i]);
2323         }
2324
2325         LIBCFS_ALLOC(kgnilnd_data.kgn_conns,
2326                     sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2327
2328         if (kgnilnd_data.kgn_conns == NULL)
2329                 GOTO(failed, rc = -ENOMEM);
2330
2331         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2332                 INIT_LIST_HEAD(&kgnilnd_data.kgn_conns[i]);
2333         }
2334
2335         LIBCFS_ALLOC(kgnilnd_data.kgn_nets,
2336                     sizeof(struct list_head) * *kgnilnd_tunables.kgn_net_hash_size);
2337
2338         if (kgnilnd_data.kgn_nets == NULL)
2339                 GOTO(failed, rc = -ENOMEM);
2340
2341         for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
2342                 INIT_LIST_HEAD(&kgnilnd_data.kgn_nets[i]);
2343         }
2344
2345         kgnilnd_data.kgn_mbox_cache =
2346                 kmem_cache_create("kgn_mbox_block", KMALLOC_MAX_SIZE, 0,
2347                                   SLAB_HWCACHE_ALIGN, NULL);
2348         if (kgnilnd_data.kgn_mbox_cache == NULL) {
2349                 CERROR("Can't create slab for physical mbox blocks\n");
2350                 GOTO(failed, rc = -ENOMEM);
2351         }
2352
2353         kgnilnd_data.kgn_rx_cache =
2354                 kmem_cache_create("kgn_rx_t", sizeof(kgn_rx_t), 0, 0, NULL);
2355         if (kgnilnd_data.kgn_rx_cache == NULL) {
2356                 CERROR("Can't create slab for kgn_rx_t descriptors\n");
2357                 GOTO(failed, rc = -ENOMEM);
2358         }
2359
2360         kgnilnd_data.kgn_tx_cache =
2361                 kmem_cache_create("kgn_tx_t", sizeof(kgn_tx_t), 0, 0, NULL);
2362         if (kgnilnd_data.kgn_tx_cache == NULL) {
2363                 CERROR("Can't create slab for kgn_tx_t\n");
2364                 GOTO(failed, rc = -ENOMEM);
2365         }
2366
2367         kgnilnd_data.kgn_tx_phys_cache =
2368                 kmem_cache_create("kgn_tx_phys",
2369                                    LNET_MAX_IOV * sizeof(gni_mem_segment_t),
2370                                    0, 0, NULL);
2371         if (kgnilnd_data.kgn_tx_phys_cache == NULL) {
2372                 CERROR("Can't create slab for kgn_tx_phys\n");
2373                 GOTO(failed, rc = -ENOMEM);
2374         }
2375
2376         kgnilnd_data.kgn_dgram_cache =
2377                 kmem_cache_create("kgn_dgram_t", sizeof(kgn_dgram_t), 0, 0, NULL);
2378         if (kgnilnd_data.kgn_dgram_cache == NULL) {
2379                 CERROR("Can't create slab for outgoing datagrams\n");
2380                 GOTO(failed, rc = -ENOMEM);
2381         }
2382
2383         /* allocate a MAX_IOV array of page pointers for each cpu */
2384         kgnilnd_data.kgn_cksum_map_pages = kmalloc(num_possible_cpus() * sizeof (struct page *),
2385                                                    GFP_KERNEL);
2386         if (kgnilnd_data.kgn_cksum_map_pages == NULL) {
2387                 CERROR("Can't allocate vmap cksum pages\n");
2388                 GOTO(failed, rc = -ENOMEM);
2389         }
2390         kgnilnd_data.kgn_cksum_npages = num_possible_cpus();
2391         memset(kgnilnd_data.kgn_cksum_map_pages, 0,
2392                 kgnilnd_data.kgn_cksum_npages * sizeof (struct page *));
2393
2394         for (i = 0; i < kgnilnd_data.kgn_cksum_npages; i++) {
2395                 kgnilnd_data.kgn_cksum_map_pages[i] = kmalloc(LNET_MAX_IOV * sizeof (struct page *),
2396                                                               GFP_KERNEL);
2397                 if (kgnilnd_data.kgn_cksum_map_pages[i] == NULL) {
2398                         CERROR("Can't allocate vmap cksum pages for cpu %d\n", i);
2399                         GOTO(failed, rc = -ENOMEM);
2400                 }
2401         }
2402
2403         LASSERT(kgnilnd_data.kgn_ndevs == 0);
2404
2405         /* Use all available GNI devices */
2406         for (i = 0; i < GNILND_MAXDEVS; i++) {
2407                 dev = &kgnilnd_data.kgn_devices[kgnilnd_data.kgn_ndevs];
2408
2409                 rc = kgnilnd_dev_init(dev);
2410                 if (rc == 0) {
2411                         /* Increment here so base_shutdown cleans it up */
2412                         kgnilnd_data.kgn_ndevs++;
2413
2414                         rc = kgnilnd_allocate_phys_fmablk(dev);
2415                         if (rc)
2416                                 GOTO(failed, rc);
2417                 }
2418         }
2419
2420         if (kgnilnd_data.kgn_ndevs == 0) {
2421                 CERROR("Can't initialise any GNI devices\n");
2422                 GOTO(failed, rc = -ENODEV);
2423         }
2424
2425         rc = kgnilnd_thread_start(kgnilnd_reaper, NULL, "kgnilnd_rpr", 0);
2426         if (rc != 0) {
2427                 CERROR("Can't spawn gnilnd reaper: %d\n", rc);
2428                 GOTO(failed, rc);
2429         }
2430
2431         rc = kgnilnd_start_rca_thread();
2432         if (rc != 0) {
2433                 CERROR("Can't spawn gnilnd rca: %d\n", rc);
2434                 GOTO(failed, rc);
2435         }
2436
2437         /*
2438          * Start ruhroh thread.  We can't use kgnilnd_thread_start() because
2439          * we don't want this thread included in kgnilnd_data.kgn_nthreads
2440          * count.  This thread controls quiesce, so it mustn't
2441          * quiesce itself.
2442          */
2443         thrd = kthread_run(kgnilnd_ruhroh_thread, NULL, "%s_%02d", "kgnilnd_rr", 0);
2444         if (IS_ERR(thrd)) {
2445                 rc = PTR_ERR(thrd);
2446                 CERROR("Can't spawn gnilnd ruhroh thread: %d\n", rc);
2447                 GOTO(failed, rc);
2448         }
2449
2450         /* threads will load balance across devs as they are available */
2451         if (*kgnilnd_tunables.kgn_thread_affinity) {
2452                 rc = kgnilnd_start_sd_threads();
2453                 if (rc != 0)
2454                         GOTO(failed, rc);
2455         } else {
2456                 for (i = 0; i < *kgnilnd_tunables.kgn_sched_threads; i++) {
2457                         rc = kgnilnd_thread_start(kgnilnd_scheduler,
2458                                                   (void *)((long)i),
2459                                                   "kgnilnd_sd", i);
2460                         if (rc != 0) {
2461                                 CERROR("Can't spawn gnilnd scheduler[%d]: %d\n",
2462                                        i, rc);
2463                                 GOTO(failed, rc);
2464                         }
2465                 }
2466         }
2467
2468         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2469                 dev = &kgnilnd_data.kgn_devices[i];
2470                 rc = kgnilnd_thread_start(kgnilnd_dgram_mover, dev,
2471                                           "kgnilnd_dg", dev->gnd_id);
2472                 if (rc != 0) {
2473                         CERROR("Can't spawn gnilnd dgram_mover[%d]: %d\n",
2474                                dev->gnd_id, rc);
2475                         GOTO(failed, rc);
2476                 }
2477
2478                 rc = kgnilnd_thread_start(kgnilnd_dgram_waitq, dev,
2479                                           "kgnilnd_dgn", dev->gnd_id);
2480                 if (rc != 0) {
2481                         CERROR("Can't spawn gnilnd dgram_waitq[%d]: %d\n",
2482                                 dev->gnd_id, rc);
2483                         GOTO(failed, rc);
2484                 }
2485
2486                 rc = kgnilnd_setup_wildcard_dgram(dev);
2487
2488                 if (rc != 0) {
2489                         CERROR("Can't create wildcard dgrams[%d]: %d\n",
2490                                 dev->gnd_id, rc);
2491                         GOTO(failed, rc);
2492                 }
2493         }
2494
2495         /* flag everything initialised */
2496         kgnilnd_data.kgn_init = GNILND_INIT_ALL;
2497         /*****************************************************/
2498
2499         CDEBUG(D_MALLOC, "initial kmem %d\n", pkmem);
2500         RETURN(0);
2501
2502 failed:
2503         kgnilnd_base_shutdown();
2504         kgnilnd_data.kgn_init = GNILND_INIT_NOTHING;
2505         RETURN(rc);
2506 }
2507
2508 void
2509 kgnilnd_base_shutdown(void)
2510 {
2511         int                     i, j;
2512         ENTRY;
2513
2514         while (CFS_FAIL_TIMEOUT(CFS_FAIL_GNI_PAUSE_SHUTDOWN, 1)) {};
2515
2516         kgnilnd_data.kgn_wc_kill = 1;
2517
2518         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2519                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2520                 kgnilnd_cancel_wc_dgrams(dev);
2521                 kgnilnd_cancel_dgrams(dev);
2522                 kgnilnd_del_conn_or_peer(NULL, LNET_NID_ANY, GNILND_DEL_PEER, -ESHUTDOWN);
2523                 kgnilnd_wait_for_canceled_dgrams(dev);
2524         }
2525
2526         /* We need to verify there are no conns left before we let the threads
2527          * shut down otherwise we could clean up the peers but still have
2528          * some outstanding conns due to orphaned datagram conns that are
2529          * being cleaned up.
2530          */
2531         i = 2;
2532         while (atomic_read(&kgnilnd_data.kgn_nconns) != 0) {
2533                 i++;
2534
2535                 for(j = 0; j < kgnilnd_data.kgn_ndevs; ++j) {
2536                         kgn_device_t *dev = &kgnilnd_data.kgn_devices[j];
2537                         kgnilnd_schedule_device(dev);
2538                 }
2539
2540                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2541                         "Waiting for conns to be cleaned up %d\n",atomic_read(&kgnilnd_data.kgn_nconns));
2542                 set_current_state(TASK_UNINTERRUPTIBLE);
2543                 schedule_timeout(cfs_time_seconds(1));
2544         }
2545         /* Peer state all cleaned up BEFORE setting shutdown, so threads don't
2546          * have to worry about shutdown races.  NB connections may be created
2547          * while there are still active connds, but these will be temporary
2548          * since peer creation always fails after the listener has started to
2549          * shut down.
2550          * all peers should have been cleared out on the nets */
2551         LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
2552                 "peers left %d\n", atomic_read(&kgnilnd_data.kgn_npeers));
2553
2554         /* Wait for the ruhroh thread to shut down. */
2555         kgnilnd_data.kgn_ruhroh_shutdown = 1;
2556         wake_up(&kgnilnd_data.kgn_ruhroh_waitq);
2557         i = 2;
2558         while (kgnilnd_data.kgn_ruhroh_running != 0) {
2559                 i++;
2560                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2561                        "Waiting for ruhroh thread to terminate\n");
2562                 set_current_state(TASK_UNINTERRUPTIBLE);
2563                 schedule_timeout(cfs_time_seconds(1));
2564         }
2565
2566        /* Flag threads to terminate */
2567         kgnilnd_data.kgn_shutdown = 1;
2568
2569         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2570                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2571
2572                 /* should clear all the MDDs */
2573                 kgnilnd_unmap_fma_blocks(dev);
2574
2575                 kgnilnd_schedule_device(dev);
2576                 wake_up_all(&dev->gnd_dgram_waitq);
2577                 wake_up_all(&dev->gnd_dgping_waitq);
2578                 LASSERT(list_empty(&dev->gnd_connd_peers));
2579         }
2580
2581         spin_lock(&kgnilnd_data.kgn_reaper_lock);
2582         wake_up_all(&kgnilnd_data.kgn_reaper_waitq);
2583         spin_unlock(&kgnilnd_data.kgn_reaper_lock);
2584
2585         if (atomic_read(&kgnilnd_data.kgn_nthreads))
2586                 kgnilnd_wakeup_rca_thread();
2587
2588         /* Wait for threads to exit */
2589         i = 2;
2590         while (atomic_read(&kgnilnd_data.kgn_nthreads) != 0) {
2591                 i++;
2592                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2593                        "Waiting for %d threads to terminate\n",
2594                        atomic_read(&kgnilnd_data.kgn_nthreads));
2595                 set_current_state(TASK_UNINTERRUPTIBLE);
2596                 schedule_timeout(cfs_time_seconds(1));
2597         }
2598
2599         LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
2600                 "peers left %d\n", atomic_read(&kgnilnd_data.kgn_npeers));
2601
2602         if (kgnilnd_data.kgn_peers != NULL) {
2603                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2604                         LASSERT(list_empty(&kgnilnd_data.kgn_peers[i]));
2605
2606                 LIBCFS_FREE(kgnilnd_data.kgn_peers,
2607                             sizeof (struct list_head) *
2608                             *kgnilnd_tunables.kgn_peer_hash_size);
2609         }
2610
2611         down_write(&kgnilnd_data.kgn_net_rw_sem);
2612         if (kgnilnd_data.kgn_nets != NULL) {
2613                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++)
2614                         LASSERT(list_empty(&kgnilnd_data.kgn_nets[i]));
2615
2616                 LIBCFS_FREE(kgnilnd_data.kgn_nets,
2617                             sizeof (struct list_head) *
2618                             *kgnilnd_tunables.kgn_net_hash_size);
2619         }
2620         up_write(&kgnilnd_data.kgn_net_rw_sem);
2621
2622         LASSERTF(atomic_read(&kgnilnd_data.kgn_nconns) == 0,
2623                 "conns left %d\n", atomic_read(&kgnilnd_data.kgn_nconns));
2624
2625         if (kgnilnd_data.kgn_conns != NULL) {
2626                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2627                         LASSERT(list_empty(&kgnilnd_data.kgn_conns[i]));
2628
2629                 LIBCFS_FREE(kgnilnd_data.kgn_conns,
2630                             sizeof (struct list_head) *
2631                             *kgnilnd_tunables.kgn_peer_hash_size);
2632         }
2633
2634         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2635                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2636                 kgnilnd_dev_fini(dev);
2637
2638                 LASSERTF(atomic_read(&dev->gnd_ndgrams) == 0,
2639                         "dgrams left %d\n", atomic_read(&dev->gnd_ndgrams));
2640
2641                 if (dev->gnd_dgrams != NULL) {
2642                         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2643                                 LASSERT(list_empty(&dev->gnd_dgrams[i]));
2644
2645                         LIBCFS_FREE(dev->gnd_dgrams,
2646                                     sizeof (struct list_head) *
2647                                     *kgnilnd_tunables.kgn_peer_hash_size);
2648                 }
2649
2650                 kgnilnd_free_phys_fmablk(dev);
2651         }
2652
2653         if (kgnilnd_data.kgn_mbox_cache != NULL)
2654                 kmem_cache_destroy(kgnilnd_data.kgn_mbox_cache);
2655
2656         if (kgnilnd_data.kgn_rx_cache != NULL)
2657                 kmem_cache_destroy(kgnilnd_data.kgn_rx_cache);
2658
2659         if (kgnilnd_data.kgn_tx_cache != NULL)
2660                 kmem_cache_destroy(kgnilnd_data.kgn_tx_cache);
2661
2662         if (kgnilnd_data.kgn_tx_phys_cache != NULL)
2663                 kmem_cache_destroy(kgnilnd_data.kgn_tx_phys_cache);
2664
2665         if (kgnilnd_data.kgn_dgram_cache != NULL)
2666                 kmem_cache_destroy(kgnilnd_data.kgn_dgram_cache);
2667
2668         if (kgnilnd_data.kgn_cksum_map_pages != NULL) {
2669                 for (i = 0; i < kgnilnd_data.kgn_cksum_npages; i++) {
2670                         if (kgnilnd_data.kgn_cksum_map_pages[i] != NULL) {
2671                                 kfree(kgnilnd_data.kgn_cksum_map_pages[i]);
2672                         }
2673                 }
2674                 kfree(kgnilnd_data.kgn_cksum_map_pages);
2675         }
2676
2677         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2678                atomic_read(&libcfs_kmemory));
2679
2680         kgnilnd_data.kgn_init = GNILND_INIT_NOTHING;
2681         module_put(THIS_MODULE);
2682
2683         EXIT;
2684 }
2685
2686 int
2687 kgnilnd_startup(lnet_ni_t *ni)
2688 {
2689         int               rc, devno;
2690         kgn_net_t        *net;
2691         ENTRY;
2692
2693         LASSERTF(ni->ni_lnd == &the_kgnilnd,
2694                 "bad LND 0x%p != the_kgnilnd @ 0x%p\n",
2695                 ni->ni_lnd, &the_kgnilnd);
2696
2697         if (kgnilnd_data.kgn_init == GNILND_INIT_NOTHING) {
2698                 rc = kgnilnd_base_startup();
2699                 if (rc != 0)
2700                         RETURN(rc);
2701         }
2702
2703         /* Serialize with shutdown. */
2704         mutex_lock(&kgnilnd_data.kgn_quiesce_mutex);
2705
2706         LIBCFS_ALLOC(net, sizeof(*net));
2707         if (net == NULL) {
2708                 CERROR("could not allocate net for new interface instance\n");
2709                 /* no need to cleanup the CDM... */
2710                 GOTO(failed, rc = -ENOMEM);
2711         }
2712         INIT_LIST_HEAD(&net->gnn_list);
2713         ni->ni_data = net;
2714         net->gnn_ni = ni;
2715         ni->ni_maxtxcredits = *kgnilnd_tunables.kgn_credits;
2716         ni->ni_peertxcredits = *kgnilnd_tunables.kgn_peer_credits;
2717
2718         if (*kgnilnd_tunables.kgn_peer_health) {
2719                 int     fudge;
2720                 int     timeout;
2721                 /* give this a bit of leeway - we don't have a hard timeout
2722                  * as we only check timeouts periodically - see comment in kgnilnd_reaper */
2723                 fudge = (GNILND_TO2KA(*kgnilnd_tunables.kgn_timeout) / GNILND_REAPER_NCHECKS);
2724                 timeout = *kgnilnd_tunables.kgn_timeout + fudge;
2725
2726                 if (*kgnilnd_tunables.kgn_peer_timeout >= timeout)
2727                         ni->ni_peertimeout = *kgnilnd_tunables.kgn_peer_timeout;
2728                 else if (*kgnilnd_tunables.kgn_peer_timeout > -1) {
2729                         LCONSOLE_ERROR("Peer_timeout is set to %d but needs to be >= %d\n",
2730                                         *kgnilnd_tunables.kgn_peer_timeout,
2731                                         timeout);
2732                         ni->ni_data = NULL;
2733                         LIBCFS_FREE(net, sizeof(*net));
2734                         GOTO(failed, rc = -EINVAL);
2735                 } else
2736                         ni->ni_peertimeout = timeout;
2737
2738                 LCONSOLE_INFO("Enabling LNet peer health for gnilnd, timeout %ds\n",
2739                               ni->ni_peertimeout);
2740         }
2741
2742         atomic_set(&net->gnn_refcount, 1);
2743
2744         /* if we have multiple devices, spread the nets around */
2745         net->gnn_netnum = LNET_NETNUM(LNET_NIDNET(ni->ni_nid));
2746
2747         devno = LNET_NIDNET(ni->ni_nid) % GNILND_MAXDEVS;
2748         net->gnn_dev = &kgnilnd_data.kgn_devices[devno];
2749
2750         /* allocate a 'dummy' cdm for datagram use. We can only have a single
2751          * datagram between a nid:inst_id and nid2:inst_id. The fake cdm
2752          * give us additional inst_id to use, allowing the datagrams to flow
2753          * like rivers of honey and beer */
2754
2755         /* the instance id for the cdm is the NETNUM offset by MAXDEVS -
2756          * ensuring we'll have a unique id */
2757
2758
2759         ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), net->gnn_dev->gnd_nid);
2760         CDEBUG(D_NET, "adding net %p nid=%s on dev %d \n",
2761                 net, libcfs_nid2str(ni->ni_nid), net->gnn_dev->gnd_id);
2762         /* until the gnn_list is set, we need to cleanup ourselves as
2763          * kgnilnd_shutdown is just gonna get confused */
2764
2765         down_write(&kgnilnd_data.kgn_net_rw_sem);
2766         list_add_tail(&net->gnn_list, kgnilnd_netnum2netlist(net->gnn_netnum));
2767         up_write(&kgnilnd_data.kgn_net_rw_sem);
2768
2769         /* we need a separate thread to call probe_wait_by_id until
2770          * we get a function callback notifier from kgni */
2771         mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2772         RETURN(0);
2773  failed:
2774         mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2775         kgnilnd_shutdown(ni);
2776         RETURN(rc);
2777 }
2778
2779 void
2780 kgnilnd_shutdown(lnet_ni_t *ni)
2781 {
2782         kgn_net_t     *net = ni->ni_data;
2783         int           i;
2784         int           rc;
2785         ENTRY;
2786
2787         CFS_RACE(CFS_FAIL_GNI_SR_DOWN_RACE);
2788
2789         LASSERTF(kgnilnd_data.kgn_init == GNILND_INIT_ALL,
2790                 "init %d\n", kgnilnd_data.kgn_init);
2791
2792         /* Serialize with startup. */
2793         mutex_lock(&kgnilnd_data.kgn_quiesce_mutex);
2794         CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
2795                atomic_read(&libcfs_kmemory));
2796
2797         if (net == NULL) {
2798                 CERROR("got NULL net for ni %p\n", ni);
2799                 GOTO(out, rc = -EINVAL);
2800         }
2801
2802         LASSERTF(ni == net->gnn_ni,
2803                 "ni %p gnn_ni %p\n", net, net->gnn_ni);
2804
2805         ni->ni_data = NULL;
2806
2807         LASSERT(!net->gnn_shutdown);
2808         LASSERTF(atomic_read(&net->gnn_refcount) != 0,
2809                 "net %p refcount %d\n",
2810                  net, atomic_read(&net->gnn_refcount));
2811
2812         if (!list_empty(&net->gnn_list)) {
2813                 /* serialize with peer creation */
2814                 down_write(&kgnilnd_data.kgn_net_rw_sem);
2815                 net->gnn_shutdown = 1;
2816                 up_write(&kgnilnd_data.kgn_net_rw_sem);
2817
2818                 kgnilnd_cancel_net_dgrams(net);
2819
2820                 kgnilnd_del_conn_or_peer(net, LNET_NID_ANY, GNILND_DEL_PEER, -ESHUTDOWN);
2821
2822                 /* if we are quiesced, need to wake up - we need those threads
2823                  * alive to release peers, etc */
2824                 if (GNILND_IS_QUIESCED) {
2825                         set_mb(kgnilnd_data.kgn_quiesce_trigger, GNILND_QUIESCE_IDLE);
2826                         kgnilnd_quiesce_wait("shutdown");
2827                 }
2828
2829                 kgnilnd_wait_for_canceled_dgrams(net->gnn_dev);
2830
2831                 /* We wait until the nets ref's are 1, we will release final ref which is ours
2832                  * this allows us to make sure everything else is done before we free the
2833                  * net.
2834                  */
2835                 i = 4;
2836                 while (atomic_read(&net->gnn_refcount) != 1) {
2837                         i++;
2838                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2839                                 "Waiting for %d references to clear on net %d\n",
2840                                 atomic_read(&net->gnn_refcount),
2841                                 net->gnn_netnum);
2842                         set_current_state(TASK_UNINTERRUPTIBLE);
2843                         schedule_timeout(cfs_time_seconds(1));
2844                 }
2845
2846                 /* release ref from kgnilnd_startup */
2847                 kgnilnd_net_decref(net);
2848                 /* serialize with reaper and conn_task looping */
2849                 down_write(&kgnilnd_data.kgn_net_rw_sem);
2850                 list_del_init(&net->gnn_list);
2851                 up_write(&kgnilnd_data.kgn_net_rw_sem);
2852
2853         }
2854
2855         /* not locking, this can't race with writers */
2856         LASSERTF(atomic_read(&net->gnn_refcount) == 0,
2857                 "net %p refcount %d\n",
2858                  net, atomic_read(&net->gnn_refcount));
2859         LIBCFS_FREE(net, sizeof(*net));
2860
2861 out:
2862         down_read(&kgnilnd_data.kgn_net_rw_sem);
2863         for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
2864                 if (!list_empty(&kgnilnd_data.kgn_nets[i])) {
2865                         up_read(&kgnilnd_data.kgn_net_rw_sem);
2866                         break;
2867                 }
2868
2869                 if (i == *kgnilnd_tunables.kgn_net_hash_size - 1) {
2870                         up_read(&kgnilnd_data.kgn_net_rw_sem);
2871                         kgnilnd_base_shutdown();
2872                 }
2873         }
2874         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2875                atomic_read(&libcfs_kmemory));
2876
2877         mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2878         EXIT;
2879 }
2880
2881 static void __exit kgnilnd_exit(void)
2882 {
2883         lnet_unregister_lnd(&the_kgnilnd);
2884         kgnilnd_proc_fini();
2885         kgnilnd_remove_sysctl();
2886 }
2887
2888 static int __init kgnilnd_init(void)
2889 {
2890         int    rc;
2891
2892         rc = kgnilnd_tunables_init();
2893         if (rc != 0)
2894                 return rc;
2895
2896         printk(KERN_INFO "Lustre: kgnilnd build version: "KGNILND_BUILD_REV"\n");
2897
2898         kgnilnd_insert_sysctl();
2899         kgnilnd_proc_init();
2900
2901         lnet_register_lnd(&the_kgnilnd);
2902
2903         return 0;
2904 }
2905
2906 MODULE_AUTHOR("Cray, Inc. <nic@cray.com>");
2907 MODULE_DESCRIPTION("Gemini LNet Network Driver");
2908 MODULE_VERSION(KGNILND_BUILD_REV);
2909 MODULE_LICENSE("GPL");
2910
2911 module_init(kgnilnd_init);
2912 module_exit(kgnilnd_exit);