Whamcloud - gitweb
d1a73a7fc62f1c8e3cbb3703f98d8e94cbe272f6
[fs/lustre-release.git] / lnet / klnds / gnilnd / gnilnd.c
1 /*
2  * Copyright (C) 2012 Cray, Inc.
3  *
4  * Copyright (c) 2013, 2017, Intel Corporation.
5  *
6  *   Author: Nic Henke <nic@cray.com>
7  *   Author: James Shimek <jshimek@cray.com>
8  *
9  *   This file is part of Lustre, http://www.lustre.org.
10  *
11  *   Lustre is free software; you can redistribute it and/or
12  *   modify it under the terms of version 2 of the GNU General Public
13  *   License as published by the Free Software Foundation.
14  *
15  *   Lustre is distributed in the hope that it will be useful,
16  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  *   GNU General Public License for more details.
19  *
20  *   You should have received a copy of the GNU General Public License
21  *   along with Lustre; if not, write to the Free Software
22  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23  *
24  */
25 #include "gnilnd.h"
26
27 /* Primary entry points from LNET.  There are no guarantees against reentrance. */
28 struct lnet_lnd the_kgnilnd = {
29         .lnd_type       = GNILND,
30         .lnd_startup    = kgnilnd_startup,
31         .lnd_shutdown   = kgnilnd_shutdown,
32         .lnd_ctl        = kgnilnd_ctl,
33         .lnd_send       = kgnilnd_send,
34         .lnd_recv       = kgnilnd_recv,
35         .lnd_eager_recv = kgnilnd_eager_recv,
36         .lnd_query      = kgnilnd_query,
37 };
38
39 kgn_data_t      kgnilnd_data;
40
41 int
42 kgnilnd_thread_start(int(*fn)(void *arg), void *arg, char *name, int id)
43 {
44         struct task_struct *thrd;
45
46         thrd = kthread_run(fn, arg, "%s_%02d", name, id);
47         if (IS_ERR(thrd))
48                 return PTR_ERR(thrd);
49
50         atomic_inc(&kgnilnd_data.kgn_nthreads);
51         return 0;
52 }
53
54 /* bind scheduler threads to cpus */
55 int
56 kgnilnd_start_sd_threads(void)
57 {
58         int cpu;
59         int i = 0;
60         struct task_struct *task;
61
62         for_each_online_cpu(cpu) {
63                 /* don't bind to cpu 0 - all interrupts are processed here */
64                 if (cpu == 0)
65                         continue;
66
67                 task = kthread_create(kgnilnd_scheduler, (void *)((long)i),
68                                       "%s_%02d", "kgnilnd_sd", i);
69                 if (!IS_ERR(task)) {
70                         kthread_bind(task, cpu);
71                         wake_up_process(task);
72                 } else {
73                         CERROR("Can't spawn gnilnd scheduler[%d] %ld\n", i,
74                                 PTR_ERR(task));
75                         return PTR_ERR(task);
76                 }
77                 atomic_inc(&kgnilnd_data.kgn_nthreads);
78
79                 if (++i >= *kgnilnd_tunables.kgn_sched_threads) {
80                         break;
81                 }
82         }
83
84         return 0;
85 }
86
87 /* needs write_lock on kgn_peer_conn_lock */
88 int
89 kgnilnd_close_stale_conns_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
90 {
91         kgn_conn_t         *conn;
92         struct list_head   *ctmp, *cnxt;
93         int                 loopback;
94         int                 count = 0;
95
96         loopback = peer->gnp_nid == peer->gnp_net->gnn_ni->ni_nid;
97
98         list_for_each_safe(ctmp, cnxt, &peer->gnp_conns) {
99                 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
100
101                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
102                         continue;
103
104                 if (conn == newconn)
105                         continue;
106
107                 if (conn->gnc_device != newconn->gnc_device)
108                         continue;
109
110                 /* This is a two connection loopback - one talking to the other */
111                 if (loopback &&
112                     newconn->gnc_my_connstamp == conn->gnc_peer_connstamp &&
113                     newconn->gnc_peer_connstamp == conn->gnc_my_connstamp) {
114                         CDEBUG(D_NET, "skipping prune of %p, "
115                                 "loopback and matching stamps"
116                                 " connstamp %llu(%llu)"
117                                 " peerstamp %llu(%llu)\n",
118                                 conn, newconn->gnc_my_connstamp,
119                                 conn->gnc_peer_connstamp,
120                                 newconn->gnc_peer_connstamp,
121                                 conn->gnc_my_connstamp);
122                         continue;
123                 }
124
125                 if (conn->gnc_peerstamp != newconn->gnc_peerstamp) {
126                         LASSERTF(conn->gnc_peerstamp < newconn->gnc_peerstamp,
127                                 "conn 0x%p peerstamp %llu >= "
128                                 "newconn 0x%p peerstamp %llu\n",
129                                 conn, conn->gnc_peerstamp,
130                                 newconn, newconn->gnc_peerstamp);
131
132                         CDEBUG(D_NET, "Closing stale conn nid: %s "
133                                " peerstamp:%#llx(%#llx)\n",
134                                libcfs_nid2str(peer->gnp_nid),
135                                conn->gnc_peerstamp, newconn->gnc_peerstamp);
136                 } else {
137
138                         LASSERTF(conn->gnc_peer_connstamp < newconn->gnc_peer_connstamp,
139                                 "conn 0x%p peer_connstamp %llu >= "
140                                 "newconn 0x%p peer_connstamp %llu\n",
141                                 conn, conn->gnc_peer_connstamp,
142                                 newconn, newconn->gnc_peer_connstamp);
143
144                         CDEBUG(D_NET, "Closing stale conn nid: %s"
145                                " connstamp:%llu(%llu)\n",
146                                libcfs_nid2str(peer->gnp_nid),
147                                conn->gnc_peer_connstamp, newconn->gnc_peer_connstamp);
148                 }
149
150                 count++;
151                 kgnilnd_close_conn_locked(conn, -ESTALE);
152         }
153
154         if (count != 0) {
155                 CWARN("Closed %d stale conns to %s\n", count, libcfs_nid2str(peer->gnp_nid));
156         }
157
158         RETURN(count);
159 }
160
161 int
162 kgnilnd_conn_isdup_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
163 {
164         kgn_conn_t       *conn;
165         struct list_head *tmp;
166         int               loopback;
167         ENTRY;
168
169         loopback = peer->gnp_nid == peer->gnp_net->gnn_ni->ni_nid;
170
171         list_for_each(tmp, &peer->gnp_conns) {
172                 conn = list_entry(tmp, kgn_conn_t, gnc_list);
173                 CDEBUG(D_NET, "checking conn 0x%p for peer %s"
174                         " lo %d new %llu existing %llu"
175                         " new peer %llu existing peer %llu"
176                         " new dev %p existing dev %p\n",
177                         conn, libcfs_nid2str(peer->gnp_nid),
178                         loopback,
179                         newconn->gnc_peerstamp, conn->gnc_peerstamp,
180                         newconn->gnc_peer_connstamp, conn->gnc_peer_connstamp,
181                         newconn->gnc_device, conn->gnc_device);
182
183                 /* conn is in the process of closing */
184                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
185                         continue;
186
187                 /* 'newconn' is from an earlier version of 'peer'!!! */
188                 if (newconn->gnc_peerstamp < conn->gnc_peerstamp)
189                         RETURN(1);
190
191                 /* 'conn' is from an earlier version of 'peer': it will be
192                  * removed when we cull stale conns later on... */
193                 if (newconn->gnc_peerstamp > conn->gnc_peerstamp)
194                         continue;
195
196                 /* Different devices are OK */
197                 if (conn->gnc_device != newconn->gnc_device)
198                         continue;
199
200                 /* It's me connecting to myself */
201                 if (loopback &&
202                     newconn->gnc_my_connstamp == conn->gnc_peer_connstamp &&
203                     newconn->gnc_peer_connstamp == conn->gnc_my_connstamp)
204                         continue;
205
206                 /* 'newconn' is an earlier connection from 'peer'!!! */
207                 if (newconn->gnc_peer_connstamp < conn->gnc_peer_connstamp)
208                         RETURN(2);
209
210                 /* 'conn' is an earlier connection from 'peer': it will be
211                  * removed when we cull stale conns later on... */
212                 if (newconn->gnc_peer_connstamp > conn->gnc_peer_connstamp)
213                         continue;
214
215                 /* 'newconn' has the SAME connection stamp; 'peer' isn't
216                  * playing the game... */
217                 RETURN(3);
218         }
219
220         RETURN(0);
221 }
222
223 int
224 kgnilnd_create_conn(kgn_conn_t **connp, kgn_device_t *dev)
225 {
226         kgn_conn_t      *conn;
227         gni_return_t    rrc;
228         int             rc = 0;
229
230         LASSERT (!in_interrupt());
231         atomic_inc(&kgnilnd_data.kgn_nconns);
232
233         /* divide by 2 to allow for complete reset and immediate reconnect */
234         if (atomic_read(&kgnilnd_data.kgn_nconns) >= GNILND_MAX_CQID/2) {
235                 CERROR("Too many conn are live: %d > %d\n",
236                         atomic_read(&kgnilnd_data.kgn_nconns), GNILND_MAX_CQID/2);
237                 atomic_dec(&kgnilnd_data.kgn_nconns);
238                 return -E2BIG;
239         }
240
241         LIBCFS_ALLOC(conn, sizeof(*conn));
242         if (conn == NULL) {
243                 atomic_dec(&kgnilnd_data.kgn_nconns);
244                 return -ENOMEM;
245         }
246
247         conn->gnc_tx_ref_table =
248                 kgnilnd_vzalloc(GNILND_MAX_MSG_ID * sizeof(void *));
249         if (conn->gnc_tx_ref_table == NULL) {
250                 CERROR("Can't allocate conn tx_ref_table\n");
251                 GOTO(failed, rc = -ENOMEM);
252         }
253
254         mutex_init(&conn->gnc_smsg_mutex);
255         mutex_init(&conn->gnc_rdma_mutex);
256         atomic_set(&conn->gnc_refcount, 1);
257         atomic_set(&conn->gnc_reaper_noop, 0);
258         atomic_set(&conn->gnc_sched_noop, 0);
259         atomic_set(&conn->gnc_tx_in_use, 0);
260         INIT_LIST_HEAD(&conn->gnc_list);
261         INIT_LIST_HEAD(&conn->gnc_hashlist);
262         INIT_LIST_HEAD(&conn->gnc_schedlist);
263         INIT_LIST_HEAD(&conn->gnc_fmaq);
264         INIT_LIST_HEAD(&conn->gnc_mdd_list);
265         INIT_LIST_HEAD(&conn->gnc_delaylist);
266         spin_lock_init(&conn->gnc_list_lock);
267         spin_lock_init(&conn->gnc_tx_lock);
268         conn->gnc_magic = GNILND_CONN_MAGIC;
269
270         /* set tx id to nearly the end to make sure we find wrapping
271          * issues soon */
272         conn->gnc_next_tx = (int) GNILND_MAX_MSG_ID - 10;
273
274         /* if this fails, we have conflicts and MAX_TX is too large */
275         BUILD_BUG_ON(GNILND_MAX_MSG_ID >= GNILND_MSGID_CLOSE);
276
277         /* get a new unique CQ id for this conn */
278         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
279         conn->gnc_my_connstamp = kgnilnd_data.kgn_connstamp++;
280         conn->gnc_cqid = kgnilnd_get_cqid_locked();
281         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
282
283         if (conn->gnc_cqid == 0) {
284                 CERROR("Could not allocate unique CQ ID for conn 0x%p\n", conn);
285                 GOTO(failed, rc = -E2BIG);
286         }
287
288         CDEBUG(D_NET, "alloc cqid %u for conn 0x%p\n",
289                 conn->gnc_cqid, conn);
290
291         /* need to be set before gnc_ephandle to allow kgnilnd_destroy_conn_ep to
292          * check context */
293         conn->gnc_device = dev;
294
295         conn->gnc_timeout = MAX(*kgnilnd_tunables.kgn_timeout,
296                                 GNILND_MIN_TIMEOUT);
297         kgnilnd_update_reaper_timeout(conn->gnc_timeout);
298
299         /* this is the ep_handle for doing SMSG & BTE */
300         mutex_lock(&dev->gnd_cq_mutex);
301         rrc = kgnilnd_ep_create(dev->gnd_handle, dev->gnd_snd_fma_cqh,
302                                 &conn->gnc_ephandle);
303         mutex_unlock(&dev->gnd_cq_mutex);
304         if (rrc != GNI_RC_SUCCESS)
305                 GOTO(failed, rc = -ENETDOWN);
306
307         CDEBUG(D_NET, "created conn 0x%p ep_hndl 0x%p\n",
308                conn, conn->gnc_ephandle);
309
310         /* add ref for EP canceling */
311         kgnilnd_conn_addref(conn);
312         atomic_inc(&dev->gnd_neps);
313
314         *connp = conn;
315         return 0;
316
317 failed:
318         atomic_dec(&kgnilnd_data.kgn_nconns);
319         kgnilnd_vfree(conn->gnc_tx_ref_table,
320                       GNILND_MAX_MSG_ID * sizeof(void *));
321         LIBCFS_FREE(conn, sizeof(*conn));
322         return rc;
323 }
324
325 /* needs to be called with kgn_peer_conn_lock held (read or write) */
326 kgn_conn_t *
327 kgnilnd_find_conn_locked(kgn_peer_t *peer)
328 {
329         kgn_conn_t      *conn = NULL;
330
331         /* if we are in reset, this conn is going to die soon */
332         if (unlikely(kgnilnd_data.kgn_in_reset)) {
333                 RETURN(NULL);
334         }
335
336         /* just return the first ESTABLISHED connection */
337         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
338                 /* kgnilnd_finish_connect doesn't put connections on the
339                  * peer list until they are actually established */
340                 LASSERTF(conn->gnc_state >= GNILND_CONN_ESTABLISHED,
341                         "found conn %p state %s on peer %p (%s)\n",
342                         conn, kgnilnd_conn_state2str(conn), peer,
343                         libcfs_nid2str(peer->gnp_nid));
344                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
345                         continue;
346
347                 RETURN(conn);
348         }
349         RETURN(NULL);
350 }
351
352 /* needs write_lock on kgn_peer_conn_lock held */
353 kgn_conn_t *
354 kgnilnd_find_or_create_conn_locked(kgn_peer_t *peer) {
355
356         kgn_device_t    *dev = peer->gnp_net->gnn_dev;
357         kgn_conn_t      *conn;
358
359         conn = kgnilnd_find_conn_locked(peer);
360
361         if (conn != NULL) {
362                 return conn;
363         }
364
365         /* if the peer was previously connecting, check if we should
366          * trigger another connection attempt yet. */
367         if (time_before(jiffies, peer->gnp_reconnect_time)) {
368                 return NULL;
369         }
370
371         /* This check prevents us from creating a new connection to a peer while we are
372          * still in the process of closing an existing connection to the peer.
373          */
374         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
375                 if (conn->gnc_ephandle != NULL) {
376                         CDEBUG(D_NET, "Not connecting non-null ephandle found peer 0x%p->%s\n", peer,
377                                 libcfs_nid2str(peer->gnp_nid));
378                         return NULL;
379                 }
380         }
381
382         if (peer->gnp_connecting != GNILND_PEER_IDLE) {
383                 /* if we are not connecting, fire up a new connection */
384                 /* or if we are anything but IDLE DONT start a new connection */
385                return NULL;
386         }
387
388         CDEBUG(D_NET, "starting connect to %s\n",
389                 libcfs_nid2str(peer->gnp_nid));
390         peer->gnp_connecting = GNILND_PEER_CONNECT;
391         kgnilnd_peer_addref(peer); /* extra ref for connd */
392
393         spin_lock(&dev->gnd_connd_lock);
394         list_add_tail(&peer->gnp_connd_list, &dev->gnd_connd_peers);
395         spin_unlock(&dev->gnd_connd_lock);
396
397         kgnilnd_schedule_dgram(dev);
398         CDEBUG(D_NETTRACE, "scheduling new connect\n");
399
400         return NULL;
401 }
402
403 /* Caller is responsible for deciding if/when to call this */
404 void
405 kgnilnd_destroy_conn_ep(kgn_conn_t *conn)
406 {
407         gni_return_t    rrc;
408         gni_ep_handle_t tmp_ep;
409
410         /* only if we actually initialized it,
411          *  then set NULL to tell kgnilnd_destroy_conn to leave it alone */
412
413         tmp_ep = xchg(&conn->gnc_ephandle, NULL);
414         if (tmp_ep != NULL) {
415                 /* we never re-use the EP, so unbind is not needed */
416                 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
417                 rrc = kgnilnd_ep_destroy(tmp_ep);
418
419                 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
420
421                 /* if this fails, it could hork up kgni smsg retransmit and others
422                  * since we could free the SMSG mbox memory, etc. */
423                 LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d conn 0x%p ep 0x%p\n",
424                          rrc, conn, conn->gnc_ephandle);
425
426                 atomic_dec(&conn->gnc_device->gnd_neps);
427
428                 /* clear out count added in kgnilnd_close_conn_locked
429                  * conn will have a peer once it hits finish_connect, where it
430                  * is the first spot we'll mark it ESTABLISHED as well */
431                 if (conn->gnc_peer) {
432                         kgnilnd_admin_decref(conn->gnc_peer->gnp_dirty_eps);
433                 }
434
435                 /* drop ref for EP */
436                 kgnilnd_conn_decref(conn);
437         }
438 }
439
440 void
441 kgnilnd_destroy_conn(kgn_conn_t *conn)
442 {
443         LASSERTF(!in_interrupt() &&
444                 !conn->gnc_scheduled &&
445                 !conn->gnc_in_purgatory &&
446                 conn->gnc_ephandle == NULL &&
447                 list_empty(&conn->gnc_list) &&
448                 list_empty(&conn->gnc_hashlist) &&
449                 list_empty(&conn->gnc_schedlist) &&
450                 list_empty(&conn->gnc_mdd_list) &&
451                 list_empty(&conn->gnc_delaylist) &&
452                 conn->gnc_magic == GNILND_CONN_MAGIC,
453                 "conn 0x%p->%s IRQ %d sched %d purg %d ep 0x%p Mg %d lists %d/%d/%d/%d/%d\n",
454                 conn, conn->gnc_peer ? libcfs_nid2str(conn->gnc_peer->gnp_nid)
455                                      : "<?>",
456                 !!in_interrupt(), conn->gnc_scheduled,
457                 conn->gnc_in_purgatory,
458                 conn->gnc_ephandle,
459                 conn->gnc_magic,
460                 list_empty(&conn->gnc_list),
461                 list_empty(&conn->gnc_hashlist),
462                 list_empty(&conn->gnc_schedlist),
463                 list_empty(&conn->gnc_mdd_list),
464                 list_empty(&conn->gnc_delaylist));
465
466         /* Tripping these is especially bad, as it means we have items on the
467          *  lists that didn't keep their refcount on the connection - or
468          *  somebody evil released their own */
469         LASSERTF(list_empty(&conn->gnc_fmaq) &&
470                  atomic_read(&conn->gnc_nlive_fma) == 0 &&
471                  atomic_read(&conn->gnc_nlive_rdma) == 0,
472                  "conn 0x%p fmaq %d@0x%p nfma %d nrdma %d\n",
473                  conn, kgnilnd_count_list(&conn->gnc_fmaq), &conn->gnc_fmaq,
474                  atomic_read(&conn->gnc_nlive_fma), atomic_read(&conn->gnc_nlive_rdma));
475
476         CDEBUG(D_NET, "destroying conn %p ephandle %p error %d\n",
477                 conn, conn->gnc_ephandle, conn->gnc_error);
478
479         /* We are freeing this memory remove the magic value from the connection */
480         conn->gnc_magic = 0;
481
482         /* if there is an FMA blk left here, we'll tear it down */
483         if (conn->gnc_fma_blk) {
484                 if (conn->gnc_peer) {
485                         kgn_mbox_info_t *mbox;
486                         mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
487                         mbox->mbx_prev_nid = conn->gnc_peer->gnp_nid;
488                 }
489                 kgnilnd_release_mbox(conn, 0);
490         }
491
492         if (conn->gnc_peer != NULL)
493                 kgnilnd_peer_decref(conn->gnc_peer);
494
495         if (conn->gnc_tx_ref_table != NULL) {
496                 kgnilnd_vfree(conn->gnc_tx_ref_table,
497                               GNILND_MAX_MSG_ID * sizeof(void *));
498         }
499
500         LIBCFS_FREE(conn, sizeof(*conn));
501         atomic_dec(&kgnilnd_data.kgn_nconns);
502 }
503
504 /* peer_alive and peer_notify done in the style of the o2iblnd */
505 void
506 kgnilnd_peer_alive(kgn_peer_t *peer)
507 {
508         time64_t now = ktime_get_seconds();
509
510         set_mb(peer->gnp_last_alive, now);
511 }
512
513 void
514 kgnilnd_peer_notify(kgn_peer_t *peer, int error, int alive)
515 {
516         int                     tell_lnet = 0;
517         int                     nnets = 0;
518         int                     rc;
519         int                     i, j;
520         kgn_conn_t             *conn;
521         kgn_net_t             **nets;
522         kgn_net_t              *net;
523
524
525         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DONT_NOTIFY))
526                 return;
527
528         /* Tell LNet we are giving ups on this peer - but only
529          * if it isn't already reconnected or trying to reconnect */
530         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
531
532         /* use kgnilnd_find_conn_locked to avoid any conns in the process of being nuked
533          *
534          * don't tell LNet if we are in reset - we assume that everyone will be able to
535          * reconnect just fine
536          */
537         conn = kgnilnd_find_conn_locked(peer);
538
539         CDEBUG(D_NETTRACE, "peer 0x%p->%s ting %d conn 0x%p, rst %d error %d\n",
540                peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting, conn,
541                kgnilnd_data.kgn_in_reset, error);
542
543         if (((peer->gnp_connecting == GNILND_PEER_IDLE) &&
544             (conn == NULL) &&
545             (!kgnilnd_data.kgn_in_reset) &&
546             (!kgnilnd_conn_clean_errno(error))) || alive) {
547                 tell_lnet = 1;
548         }
549
550         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
551
552         if (!tell_lnet) {
553                 /* short circuit if we dont need to notify Lnet */
554                 return;
555         }
556
557         rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
558
559         if (rc) {
560             /* dont do this if this fails since LNET is in shutdown or something else
561              */
562
563                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
564                         list_for_each_entry(net , &kgnilnd_data.kgn_nets[i], gnn_list) {
565                                 /* if gnn_shutdown set for any net shutdown is in progress just return */
566                                 if (net->gnn_shutdown) {
567                                         up_read(&kgnilnd_data.kgn_net_rw_sem);
568                                         return;
569                                 }
570                                 nnets++;
571                         }
572                 }
573
574                 if (nnets == 0) {
575                         /* shutdown in progress most likely */
576                         up_read(&kgnilnd_data.kgn_net_rw_sem);
577                         return;
578                 }
579
580                 LIBCFS_ALLOC(nets, nnets * sizeof(*nets));
581
582                 if (nets == NULL) {
583                         up_read(&kgnilnd_data.kgn_net_rw_sem);
584                         CERROR("Failed to allocate nets[%d]\n", nnets);
585                         return;
586                 }
587
588                 j = 0;
589                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
590                         list_for_each_entry(net, &kgnilnd_data.kgn_nets[i], gnn_list) {
591                                 nets[j] = net;
592                                 kgnilnd_net_addref(net);
593                                 j++;
594                         }
595                 }
596                 up_read(&kgnilnd_data.kgn_net_rw_sem);
597
598                 for (i = 0; i < nnets; i++) {
599                         lnet_nid_t peer_nid;
600
601                         net = nets[i];
602
603                         peer_nid = kgnilnd_lnd2lnetnid(net->gnn_ni->ni_nid,
604                                                                  peer->gnp_nid);
605
606                         CDEBUG(D_NET, "peer 0x%p->%s last_alive %lld (%llds ago)\n",
607                                 peer, libcfs_nid2str(peer_nid), peer->gnp_last_alive,
608                                 ktime_get_seconds() - peer->gnp_last_alive);
609
610                         lnet_notify(net->gnn_ni, peer_nid, alive,
611                                     (alive) ? true : false,
612                                     peer->gnp_last_alive);
613
614                         kgnilnd_net_decref(net);
615                 }
616
617                 LIBCFS_FREE(nets, nnets * sizeof(*nets));
618         }
619 }
620
621 /* need write_lock on kgn_peer_conn_lock */
622 void
623 kgnilnd_close_conn_locked(kgn_conn_t *conn, int error)
624 {
625         kgn_peer_t        *peer = conn->gnc_peer;
626         ENTRY;
627
628         LASSERT(!in_interrupt());
629
630         /* store error for tx completion */
631         conn->gnc_error = error;
632         peer->gnp_last_errno = error;
633
634         /* use real error from peer if possible */
635         if (error == -ECONNRESET) {
636                 error = conn->gnc_peer_error;
637         }
638
639         /* if we NETERROR, make sure it is rate limited */
640         if (!kgnilnd_conn_clean_errno(error) &&
641             peer->gnp_state != GNILND_PEER_DOWN) {
642                 CNETERR("closing conn to %s: error %d\n",
643                        libcfs_nid2str(peer->gnp_nid), error);
644         } else {
645                 CDEBUG(D_NET, "closing conn to %s: error %d\n",
646                        libcfs_nid2str(peer->gnp_nid), error);
647         }
648
649         LASSERTF(conn->gnc_state == GNILND_CONN_ESTABLISHED,
650                 "conn %p to %s with bogus state %s\n", conn,
651                 libcfs_nid2str(conn->gnc_peer->gnp_nid),
652                 kgnilnd_conn_state2str(conn));
653         LASSERT(!list_empty(&conn->gnc_hashlist));
654         LASSERT(!list_empty(&conn->gnc_list));
655
656
657         /* mark peer count here so any place the EP gets destroyed will
658          * open up the peer count so that a new ESTABLISHED conn is then free
659          * to send new messages -- sending before the previous EPs are destroyed
660          * could end up with messages on the network for the old conn _after_
661          * the new conn and break the mbox safety protocol */
662         kgnilnd_admin_addref(conn->gnc_peer->gnp_dirty_eps);
663
664         /* Remove from conn hash table: no new callbacks */
665         list_del_init(&conn->gnc_hashlist);
666         kgnilnd_data.kgn_conn_version++;
667         kgnilnd_conn_decref(conn);
668
669         /* if we are in reset, go right to CLOSED as there is no scheduler
670          * thread to move from CLOSING to CLOSED */
671         if (unlikely(kgnilnd_data.kgn_in_reset)) {
672                 conn->gnc_state = GNILND_CONN_CLOSED;
673         } else {
674                 conn->gnc_state = GNILND_CONN_CLOSING;
675         }
676
677         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_RDMA_CQ_ERROR)) {
678                 msleep_interruptible(MSEC_PER_SEC);
679         }
680
681         /* leave on peer->gnp_conns to make sure we don't let the reaper
682          * or others try to unlink this peer until the conn is fully
683          * processed for closing */
684
685         if (kgnilnd_check_purgatory_conn(conn)) {
686                 kgnilnd_add_purgatory_locked(conn, conn->gnc_peer);
687         }
688
689         /* Reset RX timeout to ensure we wait for an incoming CLOSE
690          * for the full timeout.  If we get a CLOSE we know the
691          * peer has stopped all RDMA.  Otherwise if we wait for
692          * the full timeout we can also be sure all RDMA has stopped. */
693         conn->gnc_last_rx = conn->gnc_last_rx_cq = jiffies;
694         mb();
695
696         /* schedule sending CLOSE - if we are in quiesce, this adds to
697          * gnd_ready_conns and allows us to find it in quiesce processing */
698         kgnilnd_schedule_conn(conn);
699
700         EXIT;
701 }
702
703 void
704 kgnilnd_close_conn(kgn_conn_t *conn, int error)
705 {
706         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
707         /* need to check the state here - this call is racy and we don't
708          * know the state until after the lock is grabbed */
709         if (conn->gnc_state == GNILND_CONN_ESTABLISHED) {
710                 kgnilnd_close_conn_locked(conn, error);
711         }
712         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
713 }
714
715 void
716 kgnilnd_complete_closed_conn(kgn_conn_t *conn)
717 {
718         LIST_HEAD               (sinners);
719         kgn_tx_t               *tx, *txn;
720         int                     nlive = 0;
721         int                     nrdma = 0;
722         int                     nq_rdma = 0;
723         int                     logmsg;
724         ENTRY;
725
726         /* Dump log  on cksum error - wait until complete phase to let
727          * RX of error happen */
728         if (*kgnilnd_tunables.kgn_checksum_dump &&
729             (conn != NULL && conn->gnc_peer_error == -ENOKEY)) {
730                 libcfs_debug_dumplog();
731         }
732
733         /* _CLOSED set in kgnilnd_process_fmaq once we decide to
734          * send the CLOSE or not */
735         LASSERTF(conn->gnc_state == GNILND_CONN_CLOSED,
736                  "conn 0x%p->%s with bad state %s\n",
737                  conn, conn->gnc_peer ?
738                         libcfs_nid2str(conn->gnc_peer->gnp_nid) :
739                         "<?>",
740                  kgnilnd_conn_state2str(conn));
741
742         LASSERT(list_empty(&conn->gnc_hashlist));
743         /* We shouldnt be on the delay list, the conn can 
744          * get added to this list during a retransmit, and retransmits
745          * only occur within scheduler threads.
746          */
747         LASSERT(list_empty(&conn->gnc_delaylist));
748
749         /* we've sent the close, start nuking */
750         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_SCHEDULE_COMPLETE))
751                 kgnilnd_schedule_conn(conn);
752
753         if (conn->gnc_scheduled != GNILND_CONN_PROCESS) {
754                 CDEBUG(D_NETERROR, "Error someone scheduled us after we were "
755                                 "done, Attempting to recover conn 0x%p "
756                                 "scheduled %d function: %s line: %d\n", conn,
757                                 conn->gnc_scheduled, conn->gnc_sched_caller,
758                                 conn->gnc_sched_line);
759                 RETURN_EXIT;
760         }
761
762         /* we don't use lists to track things that we can get out of the
763          * tx_ref table... */
764
765         /* need to hold locks for tx_list_state, sampling it is too racy:
766          * - the lock actually protects tx != NULL, but we can't take the proper
767          *   lock until we check tx_list_state, which would be too late and
768          *   we could have the TX change under us.
769          * gnd_rdmaq_lock and gnd_lock and not used together, so taking both
770          * should be fine */
771         spin_lock(&conn->gnc_device->gnd_rdmaq_lock);
772         spin_lock(&conn->gnc_device->gnd_lock);
773
774         for (nrdma = 0; nrdma < GNILND_MAX_MSG_ID; nrdma++) {
775                 tx = conn->gnc_tx_ref_table[nrdma];
776
777                 if (tx != NULL) {
778                         /* only print the first error and if not CLOSE, we often don't see
779                          * CQ events for that by the time we get here... and really don't care */
780                         if (nlive || tx->tx_msg.gnm_type == GNILND_MSG_CLOSE)
781                                 tx->tx_state |= GNILND_TX_QUIET_ERROR;
782                         nlive++;
783                         GNIDBG_TX(D_NET, tx, "cleaning up on close, nlive %d", nlive);
784
785                         /* don't worry about gnc_lock here as nobody else should be
786                          * touching this conn */
787                         kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
788                         list_add_tail(&tx->tx_list, &sinners);
789                 }
790         }
791         spin_unlock(&conn->gnc_device->gnd_lock);
792         spin_unlock(&conn->gnc_device->gnd_rdmaq_lock);
793
794         /* nobody should have marked this as needing scheduling after
795          * we called close - so only ref should be us handling it */
796         if (conn->gnc_scheduled != GNILND_CONN_PROCESS) {
797                 CDEBUG(D_NETERROR, "Error someone scheduled us after we were "
798                                 "done, Attempting to recover conn 0x%p "
799                                 "scheduled %d function %s line: %d\n", conn,
800                                 conn->gnc_scheduled, conn->gnc_sched_caller,
801                                 conn->gnc_sched_line);
802         }
803         /* now reset a few to actual counters... */
804         nrdma = atomic_read(&conn->gnc_nlive_rdma);
805         nq_rdma = atomic_read(&conn->gnc_nq_rdma);
806
807         if (!list_empty(&sinners)) {
808                 list_for_each_entry_safe(tx, txn, &sinners, tx_list) {
809                         /* clear tx_list to make tx_add_list_locked happy */
810                         list_del_init(&tx->tx_list);
811                         /* The error codes determine if we hold onto the MDD */
812                         kgnilnd_tx_done(tx, conn->gnc_error);
813                 }
814         }
815
816         logmsg = (nlive + nrdma + nq_rdma);
817
818         if (logmsg) {
819                 int level = conn->gnc_peer->gnp_state == GNILND_PEER_UP ?
820                                 D_NETERROR : D_NET;
821                 CDEBUG(level, "Closed conn 0x%p->%s (errno %d,"
822                         " peer errno %d): canceled %d TX, %d/%d RDMA\n",
823                         conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
824                         conn->gnc_error, conn->gnc_peer_error,
825                         nlive, nq_rdma, nrdma);
826         }
827
828         kgnilnd_destroy_conn_ep(conn);
829
830         /* Bug 765042 - race this with completing a new conn to same peer - we need
831          * finish_connect to detach purgatory before we can do it ourselves here */
832         CFS_RACE(CFS_FAIL_GNI_FINISH_PURG);
833
834         /* now it is safe to remove from peer list - anyone looking at
835          * gnp_conns now is free to unlink if not on purgatory */
836         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
837
838         conn->gnc_state = GNILND_CONN_DONE;
839
840         /* Decrement counter if we are marked by del_conn_or_peers for closing
841          */
842         if (conn->gnc_needs_closing)
843                 kgnilnd_admin_decref(kgnilnd_data.kgn_npending_conns);
844
845         /* Remove from peer's list of valid connections if its not in purgatory */
846         if (!conn->gnc_in_purgatory) {
847                 list_del_init(&conn->gnc_list);
848                 /* Lose peers reference on the conn */
849                 kgnilnd_conn_decref(conn);
850         }
851
852         /* NB - only unlinking if we set pending in del_peer_locked from admin or
853          * shutdown */
854         if (kgnilnd_peer_active(conn->gnc_peer) &&
855             conn->gnc_peer->gnp_pending_unlink &&
856             kgnilnd_can_unlink_peer_locked(conn->gnc_peer)) {
857                 kgnilnd_unlink_peer_locked(conn->gnc_peer);
858         }
859
860         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
861
862         /* I'm telling Mommy! - use peer_error if they initiated close */
863         kgnilnd_peer_notify(conn->gnc_peer,
864                             conn->gnc_error == -ECONNRESET ?
865                             conn->gnc_peer_error : conn->gnc_error, 0);
866
867         EXIT;
868 }
869
870 int
871 kgnilnd_set_conn_params(kgn_dgram_t *dgram)
872 {
873         kgn_conn_t             *conn = dgram->gndg_conn;
874         kgn_connreq_t          *connreq = &dgram->gndg_conn_in;
875         kgn_gniparams_t        *rem_param = &connreq->gncr_gnparams;
876         gni_return_t            rrc;
877         int                     rc = 0;
878         gni_smsg_attr_t        *remote = &connreq->gncr_gnparams.gnpr_smsg_attr;
879
880         /* set timeout vals in conn early so we can use them for the NAK */
881
882         /* use max of the requested and our timeout, peer will do the same */
883         conn->gnc_timeout = MAX(conn->gnc_timeout, connreq->gncr_timeout);
884
885         /* only ep_bind really mucks around with the CQ */
886         /* only ep bind if we are not connecting to ourself and the dstnid is not a wildcard. this check
887          * is necessary as you can only bind an ep once and we must make sure we dont bind when already bound.
888          */
889         if (connreq->gncr_dstnid != LNET_NID_ANY && dgram->gndg_conn_out.gncr_dstnid != connreq->gncr_srcnid) {
890                 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
891                 rrc = kgnilnd_ep_bind(conn->gnc_ephandle,
892                         connreq->gncr_gnparams.gnpr_host_id,
893                         conn->gnc_cqid);
894                 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
895                 if (rrc != GNI_RC_SUCCESS) {
896                         rc = -ECONNABORTED;
897                         goto return_out;
898                 }
899         }
900
901         rrc = kgnilnd_ep_set_eventdata(conn->gnc_ephandle, conn->gnc_cqid,
902                          connreq->gncr_gnparams.gnpr_cqid);
903         if (rrc != GNI_RC_SUCCESS) {
904                 rc = -ECONNABORTED;
905                 goto cleanup_out;
906         }
907
908         /* Initialize SMSG */
909         rrc = kgnilnd_smsg_init(conn->gnc_ephandle, &conn->gnpr_smsg_attr,
910                         &connreq->gncr_gnparams.gnpr_smsg_attr);
911         if (unlikely(rrc == GNI_RC_INVALID_PARAM)) {
912                 gni_smsg_attr_t *local = &conn->gnpr_smsg_attr;
913                 /* help folks figure out if there is a tunable off, etc. */
914                 LCONSOLE_ERROR("SMSG attribute mismatch. Data from local/remote:"
915                                " type %d/%d msg_maxsize %u/%u"
916                                " mbox_maxcredit %u/%u. Please check kgni"
917                                " logs for further data\n",
918                                local->msg_type, remote->msg_type,
919                                local->msg_maxsize, remote->msg_maxsize,
920                                local->mbox_maxcredit, remote->mbox_maxcredit);
921         }
922         if (rrc != GNI_RC_SUCCESS) {
923                 rc = -ECONNABORTED;
924                 goto cleanup_out;
925         }
926
927         /* log this for help in debuggin SMSG buffer re-use */
928         CDEBUG(D_NET, "conn %p src %s dst %s smsg %p acquired"
929                 " local cqid %u SMSG %p->%u hndl %#llx.%#llx"
930                 " remote cqid %u SMSG %p->%u hndl %#llx.%#llx\n",
931                 conn, libcfs_nid2str(connreq->gncr_srcnid),
932                 libcfs_nid2str(connreq->gncr_dstnid),
933                 &conn->gnpr_smsg_attr,
934                 conn->gnc_cqid,
935                 conn->gnpr_smsg_attr.msg_buffer,
936                 conn->gnpr_smsg_attr.mbox_offset,
937                 conn->gnpr_smsg_attr.mem_hndl.qword1,
938                 conn->gnpr_smsg_attr.mem_hndl.qword2,
939                 rem_param->gnpr_cqid,
940                 rem_param->gnpr_smsg_attr.msg_buffer,
941                 rem_param->gnpr_smsg_attr.mbox_offset,
942                 rem_param->gnpr_smsg_attr.mem_hndl.qword1,
943                 rem_param->gnpr_smsg_attr.mem_hndl.qword2);
944
945         conn->gnc_peerstamp = connreq->gncr_peerstamp;
946         conn->gnc_peer_connstamp = connreq->gncr_connstamp;
947         conn->remote_mbox_addr = (void *)((char *)remote->msg_buffer + remote->mbox_offset);
948
949         /* We update the reaper timeout once we have a valid conn and timeout */
950         kgnilnd_update_reaper_timeout(GNILND_TO2KA(conn->gnc_timeout));
951
952         return 0;
953
954 cleanup_out:
955         rrc = kgnilnd_ep_unbind(conn->gnc_ephandle);
956         /* not sure I can just let this fly */
957         LASSERTF(rrc == GNI_RC_SUCCESS,
958                 "bad rc from gni_ep_unbind trying to cleanup: %d\n", rrc);
959
960 return_out:
961         LASSERTF(rc != 0, "SOFTWARE BUG: rc == 0\n");
962         CERROR("Error setting connection params from %s: %d\n",
963                libcfs_nid2str(connreq->gncr_srcnid), rc);
964         return rc;
965 }
966
967 /* needs down_read on kgn_net_rw_sem held from before this call until
968  * after the write_lock on kgn_peer_conn_lock - this ensures we stay sane
969  * with kgnilnd_shutdown - it'll get the sem and set shutdown, then get the
970  * kgn_peer_conn_lock to start del_peer'ing. If we hold the sem until after
971  * kgn_peer_conn_lock is held, we guarantee that nobody calls
972  * kgnilnd_add_peer_locked without checking gnn_shutdown */
973 int
974 kgnilnd_create_peer_safe(kgn_peer_t **peerp,
975                          lnet_nid_t nid,
976                          kgn_net_t *net,
977                          int node_state)
978 {
979         kgn_peer_t      *peer;
980         int             rc;
981
982         LASSERT(nid != LNET_NID_ANY);
983
984         /* We dont pass the net around in the dgram anymore so here is where we find it
985          * this will work unless its in shutdown or the nid has a net that is invalid.
986          * Either way error code needs to be returned in that case.
987          *
988          * If the net passed in is not NULL then we can use it, this alleviates looking it
989          * when the calling function has access to the data.
990          */
991         if (net == NULL) {
992                 rc = kgnilnd_find_net(nid, &net);
993                 if (rc < 0)
994                         return rc;
995         } else {
996                 /* find net adds a reference on the net if we are not using
997                  * it we must do it manually so the net references are
998                  * correct when tearing down the net
999                  */
1000                 kgnilnd_net_addref(net);
1001         }
1002
1003         LIBCFS_ALLOC(peer, sizeof(*peer));
1004         if (peer == NULL) {
1005                 kgnilnd_net_decref(net);
1006                 return -ENOMEM;
1007         }
1008         peer->gnp_nid = nid;
1009         peer->gnp_state = node_state;
1010
1011         /* translate from nid to nic addr & store */
1012         rc = kgnilnd_nid_to_nicaddrs(LNET_NIDADDR(nid), 1, &peer->gnp_host_id);
1013         if (rc <= 0) {
1014                 kgnilnd_net_decref(net);
1015                 LIBCFS_FREE(peer, sizeof(*peer));
1016                 return -ESRCH;
1017         }
1018         CDEBUG(D_NET, "peer 0x%p->%s -> NIC 0x%x\n", peer,
1019                 libcfs_nid2str(nid), peer->gnp_host_id);
1020
1021         atomic_set(&peer->gnp_refcount, 1);     /* 1 ref for caller */
1022         atomic_set(&peer->gnp_dirty_eps, 0);
1023
1024         INIT_LIST_HEAD(&peer->gnp_list);
1025         INIT_LIST_HEAD(&peer->gnp_connd_list);
1026         INIT_LIST_HEAD(&peer->gnp_conns);
1027         INIT_LIST_HEAD(&peer->gnp_tx_queue);
1028
1029         /* the first reconnect should happen immediately, so we leave
1030          * gnp_reconnect_interval set to 0 */
1031
1032         LASSERTF(net != NULL, "peer 0x%p->%s with NULL net\n",
1033                  peer, libcfs_nid2str(nid));
1034
1035         /* must have kgn_net_rw_sem held for this...  */
1036         if (net->gnn_shutdown) {
1037                 /* shutdown has started already */
1038                 kgnilnd_net_decref(net);
1039                 LIBCFS_FREE(peer, sizeof(*peer));
1040                 return -ESHUTDOWN;
1041         }
1042
1043         peer->gnp_net = net;
1044
1045         atomic_inc(&kgnilnd_data.kgn_npeers);
1046
1047         *peerp = peer;
1048         return 0;
1049 }
1050
1051 void
1052 kgnilnd_destroy_peer(kgn_peer_t *peer)
1053 {
1054         CDEBUG(D_NET, "peer %s %p deleted\n",
1055                libcfs_nid2str(peer->gnp_nid), peer);
1056         LASSERTF(atomic_read(&peer->gnp_refcount) == 0,
1057                  "peer 0x%p->%s refs %d\n",
1058                  peer, libcfs_nid2str(peer->gnp_nid),
1059                  atomic_read(&peer->gnp_refcount));
1060         LASSERTF(atomic_read(&peer->gnp_dirty_eps) == 0,
1061                  "peer 0x%p->%s dirty eps %d\n",
1062                  peer, libcfs_nid2str(peer->gnp_nid),
1063                  atomic_read(&peer->gnp_dirty_eps));
1064         LASSERTF(peer->gnp_net != NULL, "peer %p (%s) with NULL net\n",
1065                  peer, libcfs_nid2str(peer->gnp_nid));
1066         LASSERTF(!kgnilnd_peer_active(peer),
1067                  "peer 0x%p->%s\n",
1068                 peer, libcfs_nid2str(peer->gnp_nid));
1069         LASSERTF(peer->gnp_connecting == GNILND_PEER_IDLE || peer->gnp_connecting == GNILND_PEER_KILL,
1070                  "peer 0x%p->%s, connecting %d\n",
1071                 peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
1072         LASSERTF(list_empty(&peer->gnp_conns),
1073                  "peer 0x%p->%s\n",
1074                 peer, libcfs_nid2str(peer->gnp_nid));
1075         LASSERTF(list_empty(&peer->gnp_tx_queue),
1076                  "peer 0x%p->%s\n",
1077                 peer, libcfs_nid2str(peer->gnp_nid));
1078         LASSERTF(list_empty(&peer->gnp_connd_list),
1079                  "peer 0x%p->%s\n",
1080                 peer, libcfs_nid2str(peer->gnp_nid));
1081
1082         /* NB a peer's connections keep a reference on their peer until
1083          * they are destroyed, so we can be assured that _all_ state to do
1084          * with this peer has been cleaned up when its refcount drops to
1085          * zero. */
1086
1087         atomic_dec(&kgnilnd_data.kgn_npeers);
1088         kgnilnd_net_decref(peer->gnp_net);
1089
1090         LIBCFS_FREE(peer, sizeof(*peer));
1091 }
1092
1093 /* the conn might not have made it all the way through to a connected
1094  * state - but we need to purgatory any conn that a remote peer might
1095  * have seen through a posted dgram as well */
1096 void
1097 kgnilnd_add_purgatory_locked(kgn_conn_t *conn, kgn_peer_t *peer)
1098 {
1099         kgn_mbox_info_t *mbox = NULL;
1100         ENTRY;
1101
1102         /* NB - the caller should own conn by removing him from the
1103          * scheduler thread when finishing the close */
1104
1105         LASSERTF(peer != NULL, "conn %p with NULL peer\n", conn);
1106
1107         /* If this is still true, need to add the calls to unlink back in and
1108          * figure out how to close the hole on loopback conns */
1109         LASSERTF(kgnilnd_peer_active(peer), "can't use inactive peer %s (%p)"
1110                 " we'll never recover the resources\n",
1111                 libcfs_nid2str(peer->gnp_nid), peer);
1112
1113         CDEBUG(D_NET, "conn %p peer %p dev %p\n", conn, peer,
1114                 conn->gnc_device);
1115
1116         LASSERTF(conn->gnc_in_purgatory == 0,
1117                 "Conn already in purgatory\n");
1118         conn->gnc_in_purgatory = 1;
1119
1120         mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
1121         mbox->mbx_prev_purg_nid = peer->gnp_nid;
1122         mbox->mbx_add_purgatory = jiffies;
1123         kgnilnd_release_mbox(conn, 1);
1124
1125         LASSERTF(list_empty(&conn->gnc_mdd_list),
1126                 "conn 0x%p->%s with active purgatory hold MDD %d\n",
1127                 conn, libcfs_nid2str(peer->gnp_nid),
1128                 kgnilnd_count_list(&conn->gnc_mdd_list));
1129
1130         EXIT;
1131 }
1132
1133 /* Instead of detaching everything from purgatory here we just mark the conn as needing
1134  * detach, when the reaper checks the conn the next time it will detach it.
1135  * Calling function requires write_lock held on kgn_peer_conn_lock
1136  */
1137 void
1138 kgnilnd_mark_for_detach_purgatory_all_locked(kgn_peer_t *peer) {
1139         kgn_conn_t       *conn;
1140
1141         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
1142                 if (conn->gnc_in_purgatory && !conn->gnc_needs_detach) {
1143                         conn->gnc_needs_detach = 1;
1144                         kgnilnd_admin_addref(kgnilnd_data.kgn_npending_detach);
1145                 }
1146         }
1147 }
1148
1149 /* Calling function needs a write_lock held on kgn_peer_conn_lock */
1150 void
1151 kgnilnd_detach_purgatory_locked(kgn_conn_t *conn, struct list_head *conn_list)
1152 {
1153         kgn_mbox_info_t *mbox = NULL;
1154
1155         /* if needed, add the conn purgatory data to the list passed in */
1156         if (conn->gnc_in_purgatory) {
1157                 CDEBUG(D_NET, "peer %p->%s purg_conn %p@%s mdd_list #tx %d\n",
1158                         conn->gnc_peer, libcfs_nid2str(conn->gnc_peer->gnp_nid),
1159                         conn, kgnilnd_conn_state2str(conn),
1160                         kgnilnd_count_list(&conn->gnc_mdd_list));
1161
1162                 mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
1163                 mbox->mbx_detach_of_purgatory = jiffies;
1164
1165                 /* conn->gnc_list is the entry point on peer->gnp_conns, so detaching it
1166                  * here removes it from the list of 'valid' peer connections.
1167                  * We put the current conn onto a list of conns to call kgnilnd_release_purgatory_locked()
1168                  * and as such the caller of kgnilnd_detach_purgatory_locked() now owns that conn, since its not
1169                  * on the peer's conn_list anymore.
1170                  */
1171
1172                 list_del_init(&conn->gnc_list);
1173
1174                 /* NB - only unlinking if we set pending in del_peer_locked from admin or
1175                  * shutdown */
1176                 if (kgnilnd_peer_active(conn->gnc_peer) &&
1177                     conn->gnc_peer->gnp_pending_unlink &&
1178                     kgnilnd_can_unlink_peer_locked(conn->gnc_peer)) {
1179                         kgnilnd_unlink_peer_locked(conn->gnc_peer);
1180                 }
1181                 /* The reaper will not call detach unless the conn is fully through kgnilnd_complete_closed_conn.
1182                  * If the conn is not in a DONE state somehow we are attempting to detach even though
1183                  * the conn has not been fully cleaned up. If we detach while the conn is still closing
1184                  * we will end up with an orphaned connection that has valid ep_handle, that is not on a
1185                  * peer.
1186                  */
1187
1188                 LASSERTF(conn->gnc_state == GNILND_CONN_DONE, "Conn in invalid state  %p@%s \n",
1189                                 conn, kgnilnd_conn_state2str(conn));
1190
1191                 /* move from peer to the delayed release list */
1192                 list_add_tail(&conn->gnc_list, conn_list);
1193         }
1194 }
1195
1196 void
1197 kgnilnd_release_purgatory_list(struct list_head *conn_list)
1198 {
1199         kgn_device_t            *dev;
1200         kgn_conn_t              *conn, *connN;
1201         kgn_mdd_purgatory_t     *gmp, *gmpN;
1202
1203         list_for_each_entry_safe(conn, connN, conn_list, gnc_list) {
1204                 dev = conn->gnc_device;
1205
1206                 kgnilnd_release_mbox(conn, -1);
1207                 conn->gnc_in_purgatory = 0;
1208
1209                 list_del_init(&conn->gnc_list);
1210
1211                 /* gnc_needs_detach is set in kgnilnd_del_conn_or_peer. It is used to keep track
1212                  * of conns that have been marked for detach by kgnilnd_del_conn_or_peer.
1213                  * The function uses kgn_npending_detach to verify the conn has
1214                  * actually been detached.
1215                  */
1216
1217                 if (conn->gnc_needs_detach)
1218                         kgnilnd_admin_decref(kgnilnd_data.kgn_npending_detach);
1219
1220                 /* if this guy is really dead (we are doing release from reaper),
1221                  * make sure we tell LNet - if this is from other context,
1222                  * the checks in the function will prevent an errant
1223                  * notification */
1224                 kgnilnd_peer_notify(conn->gnc_peer, conn->gnc_error, 0);
1225
1226                 list_for_each_entry_safe(gmp, gmpN, &conn->gnc_mdd_list,
1227                                          gmp_list) {
1228                         CDEBUG(D_NET,
1229                                "dev %p releasing held mdd %#llx.%#llx\n",
1230                                conn->gnc_device, gmp->gmp_map_key.qword1,
1231                                gmp->gmp_map_key.qword2);
1232
1233                         atomic_dec(&dev->gnd_n_mdd_held);
1234                         kgnilnd_mem_mdd_release(conn->gnc_device->gnd_handle,
1235                                                 &gmp->gmp_map_key);
1236                         /* ignoring the return code - if kgni/ghal can't find it
1237                          * it must be released already */
1238
1239                         list_del_init(&gmp->gmp_list);
1240                         LIBCFS_FREE(gmp, sizeof(*gmp));
1241                 }
1242                 /* lose conn ref for purgatory */
1243                 kgnilnd_conn_decref(conn);
1244         }
1245 }
1246
1247 /* needs write_lock on kgnilnd_data.kgn_peer_conn_lock held */
1248 void
1249 kgnilnd_peer_increase_reconnect_locked(kgn_peer_t *peer)
1250 {
1251         int current_to;
1252
1253         current_to = peer->gnp_reconnect_interval;
1254
1255         /* we'll try to reconnect fast the first time, then back-off */
1256         if (current_to == 0) {
1257                 peer->gnp_reconnect_time = jiffies - 1;
1258                 current_to = *kgnilnd_tunables.kgn_min_reconnect_interval;
1259         } else {
1260                 peer->gnp_reconnect_time = jiffies + cfs_time_seconds(current_to);
1261                 /* add 50% of min timeout & retry */
1262                 current_to += *kgnilnd_tunables.kgn_min_reconnect_interval / 2;
1263         }
1264
1265         current_to = MIN(current_to,
1266                                 *kgnilnd_tunables.kgn_max_reconnect_interval);
1267
1268         peer->gnp_reconnect_interval = current_to;
1269         CDEBUG(D_NET, "peer %s can reconnect at %lu interval %lu\n",
1270                libcfs_nid2str(peer->gnp_nid), peer->gnp_reconnect_time,
1271                peer->gnp_reconnect_interval);
1272 }
1273
1274 /* needs kgnilnd_data.kgn_peer_conn_lock held */
1275 kgn_peer_t *
1276 kgnilnd_find_peer_locked(lnet_nid_t nid)
1277 {
1278         struct list_head *peer_list = kgnilnd_nid2peerlist(nid);
1279         kgn_peer_t       *peer;
1280
1281         /* Chopping nid down to only NIDADDR using LNET_NIDADDR so we only
1282          * have a single peer per device instead of a peer per nid/net combo.
1283          */
1284
1285         list_for_each_entry(peer, peer_list, gnp_list) {
1286                 if (LNET_NIDADDR(nid) != LNET_NIDADDR(peer->gnp_nid))
1287                         continue;
1288
1289                 CDEBUG(D_NET, "got peer [%p] -> %s c %d (%d)\n",
1290                        peer, libcfs_nid2str(nid),
1291                        peer->gnp_connecting,
1292                        atomic_read(&peer->gnp_refcount));
1293                 return peer;
1294         }
1295         return NULL;
1296 }
1297
1298 /* need write_lock on kgn_peer_conn_lock */
1299 void
1300 kgnilnd_unlink_peer_locked(kgn_peer_t *peer)
1301 {
1302         LASSERTF(list_empty(&peer->gnp_conns),
1303                 "peer 0x%p->%s\n",
1304                  peer, libcfs_nid2str(peer->gnp_nid));
1305         LASSERTF(list_empty(&peer->gnp_tx_queue),
1306                 "peer 0x%p->%s\n",
1307                  peer, libcfs_nid2str(peer->gnp_nid));
1308         LASSERTF(kgnilnd_peer_active(peer),
1309                 "peer 0x%p->%s\n",
1310                  peer, libcfs_nid2str(peer->gnp_nid));
1311         CDEBUG(D_NET, "unlinking peer 0x%p->%s\n",
1312                 peer, libcfs_nid2str(peer->gnp_nid));
1313
1314         list_del_init(&peer->gnp_list);
1315         kgnilnd_data.kgn_peer_version++;
1316         kgnilnd_admin_decref(kgnilnd_data.kgn_npending_unlink);
1317         /* lose peerlist's ref */
1318         kgnilnd_peer_decref(peer);
1319 }
1320
1321 int
1322 kgnilnd_get_peer_info(int index,
1323                       kgn_peer_t **found_peer,
1324                       lnet_nid_t *id, __u32 *nic_addr,
1325                       int *refcount, int *connecting)
1326 {
1327         struct list_head  *ptmp;
1328         kgn_peer_t        *peer;
1329         int               i;
1330         int               rc = -ENOENT;
1331
1332         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1333
1334         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
1335
1336                 list_for_each(ptmp, &kgnilnd_data.kgn_peers[i]) {
1337                         peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1338
1339                         if (index-- > 0)
1340                                 continue;
1341
1342                         CDEBUG(D_NET, "found peer %p (%s) at index %d\n",
1343                                peer, libcfs_nid2str(peer->gnp_nid), index);
1344
1345                         *found_peer  = peer;
1346                         *id          = peer->gnp_nid;
1347                         *nic_addr    = peer->gnp_host_id;
1348                         *refcount    = atomic_read(&peer->gnp_refcount);
1349                         *connecting  = peer->gnp_connecting;
1350
1351                         rc = 0;
1352                         goto out;
1353                 }
1354         }
1355 out:
1356         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1357         if (rc)
1358                 CDEBUG(D_NET, "no gni peer at index %d\n", index);
1359         return rc;
1360 }
1361
1362 /* requires write_lock on kgn_peer_conn_lock held */
1363 void
1364 kgnilnd_add_peer_locked(lnet_nid_t nid, kgn_peer_t *new_stub_peer, kgn_peer_t **peerp)
1365 {
1366         kgn_peer_t        *peer, *peer2;
1367
1368         LASSERTF(new_stub_peer != NULL, "bad stub peer for nid %s\n",
1369                  libcfs_nid2str(nid));
1370
1371         peer2 = kgnilnd_find_peer_locked(nid);
1372         if (peer2 != NULL) {
1373                 /* A peer was created during the lock transition, so drop
1374                  * the new one we created */
1375                 kgnilnd_peer_decref(new_stub_peer);
1376                 peer = peer2;
1377         } else {
1378                 peer = new_stub_peer;
1379                 /* peer table takes existing ref on peer */
1380
1381                 LASSERTF(!kgnilnd_peer_active(peer),
1382                         "peer 0x%p->%s already in peer table\n",
1383                         peer, libcfs_nid2str(peer->gnp_nid));
1384                 list_add_tail(&peer->gnp_list,
1385                               kgnilnd_nid2peerlist(nid));
1386                 kgnilnd_data.kgn_peer_version++;
1387         }
1388
1389         LASSERTF(peer->gnp_net != NULL, "peer 0x%p->%s with NULL net\n",
1390                  peer, libcfs_nid2str(peer->gnp_nid));
1391         *peerp = peer;
1392 }
1393
1394 int
1395 kgnilnd_add_peer(kgn_net_t *net, lnet_nid_t nid, kgn_peer_t **peerp)
1396 {
1397         kgn_peer_t        *peer;
1398         int                rc;
1399         int                node_state;
1400         ENTRY;
1401
1402         if (nid == LNET_NID_ANY)
1403                 return -EINVAL;
1404
1405         node_state = kgnilnd_get_node_state(LNET_NIDADDR(nid));
1406
1407         /* NB - this will not block during normal operations -
1408          * the only writer of this is in the startup/shutdown path. */
1409         rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
1410         if (!rc) {
1411                 rc = -ESHUTDOWN;
1412                 RETURN(rc);
1413         }
1414         rc = kgnilnd_create_peer_safe(&peer, nid, net, node_state);
1415         if (rc != 0) {
1416                 up_read(&kgnilnd_data.kgn_net_rw_sem);
1417                 RETURN(rc);
1418         }
1419
1420         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1421         up_read(&kgnilnd_data.kgn_net_rw_sem);
1422
1423         kgnilnd_add_peer_locked(nid, peer, peerp);
1424
1425         CDEBUG(D_NET, "peer 0x%p->%s connecting %d\n",
1426                peerp, libcfs_nid2str((*peerp)->gnp_nid),
1427                (*peerp)->gnp_connecting);
1428
1429         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1430         RETURN(0);
1431 }
1432
1433 /* needs write_lock on kgn_peer_conn_lock */
1434 void
1435 kgnilnd_cancel_peer_connect_locked(kgn_peer_t *peer, struct list_head *zombies)
1436 {
1437         kgn_tx_t        *tx, *txn;
1438
1439         /* we do care about state of gnp_connecting - we could be between
1440          * reconnect attempts, so try to find the dgram and cancel the TX
1441          * anyways. If we are in the process of posting DONT do anything;
1442          * once it fails or succeeds we can nuke the connect attempt.
1443          * We have no idea where in kgnilnd_post_dgram we are so we cant
1444          * attempt to cancel until the function is done.
1445          */
1446
1447         /* make sure peer isn't in process of connecting or waiting for connect*/
1448         spin_lock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1449         if (!(list_empty(&peer->gnp_connd_list))) {
1450                 list_del_init(&peer->gnp_connd_list);
1451                 /* remove connd ref */
1452                 kgnilnd_peer_decref(peer);
1453         }
1454         spin_unlock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1455
1456         if (peer->gnp_connecting == GNILND_PEER_POSTING || peer->gnp_connecting == GNILND_PEER_NEEDS_DEATH) {
1457                 peer->gnp_connecting = GNILND_PEER_NEEDS_DEATH;
1458                 /* We are in process of posting right now the xchg set it up for us to
1459                  * cancel the connect so we are finished for now */
1460         } else {
1461                 /* no need for exchange we have the peer lock and its ready for us to nuke */
1462                 LASSERTF(peer->gnp_connecting != GNILND_PEER_POSTING,
1463                         "Peer in invalid state 0x%p->%s, connecting %d\n",
1464                         peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
1465                 peer->gnp_connecting = GNILND_PEER_IDLE;
1466                 set_mb(peer->gnp_last_dgram_errno, -ETIMEDOUT);
1467                 kgnilnd_find_and_cancel_dgram(peer->gnp_net->gnn_dev,
1468                                                       peer->gnp_nid);
1469         }
1470
1471         /* The least we can do is nuke the tx's no matter what.... */
1472         list_for_each_entry_safe(tx, txn, &peer->gnp_tx_queue, tx_list) {
1473                 kgnilnd_tx_del_state_locked(tx, peer, NULL,
1474                                            GNILND_TX_ALLOCD);
1475                 list_add_tail(&tx->tx_list, zombies);
1476         }
1477 }
1478
1479 /* needs write_lock on kgn_peer_conn_lock */
1480 void
1481 kgnilnd_del_peer_locked(kgn_peer_t *peer, int error)
1482 {
1483         /* this peer could be passive and only held for purgatory,
1484          * take a ref to ensure it doesn't disappear in this function */
1485         kgnilnd_peer_addref(peer);
1486
1487         CFS_RACE(CFS_FAIL_GNI_FIND_TARGET);
1488
1489         /* if purgatory release cleared it out, don't try again */
1490         if (kgnilnd_peer_active(peer)) {
1491                 /* always do this to allow kgnilnd_start_connect and
1492                  * kgnilnd_finish_connect to catch this before they
1493                  * wrap up their operations */
1494                 if (kgnilnd_can_unlink_peer_locked(peer)) {
1495                         /* already released purgatory, so only active
1496                          * conns hold it */
1497                         kgnilnd_unlink_peer_locked(peer);
1498                 } else {
1499                         kgnilnd_close_peer_conns_locked(peer, error);
1500                         /* peer unlinks itself when last conn is closed */
1501                 }
1502         }
1503
1504         /* we are done, release back to the wild */
1505         kgnilnd_peer_decref(peer);
1506 }
1507
1508 int
1509 kgnilnd_del_conn_or_peer(kgn_net_t *net, lnet_nid_t nid, int command,
1510                           int error)
1511 {
1512         LIST_HEAD               (souls);
1513         LIST_HEAD               (zombies);
1514         struct list_head        *ptmp, *pnxt;
1515         kgn_peer_t              *peer;
1516         int                     lo;
1517         int                     hi;
1518         int                     i;
1519         int                     rc = -ENOENT;
1520
1521         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1522
1523         if (nid != LNET_NID_ANY)
1524                 lo = hi = kgnilnd_nid2peerlist(nid) - kgnilnd_data.kgn_peers;
1525         else {
1526                 lo = 0;
1527                 hi = *kgnilnd_tunables.kgn_peer_hash_size - 1;
1528                 /* wildcards always succeed */
1529                 rc = 0;
1530         }
1531
1532         for (i = lo; i <= hi; i++) {
1533                 list_for_each_safe(ptmp, pnxt, &kgnilnd_data.kgn_peers[i]) {
1534                         peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1535
1536                         LASSERTF(peer->gnp_net != NULL,
1537                                 "peer %p (%s) with NULL net\n",
1538                                  peer, libcfs_nid2str(peer->gnp_nid));
1539
1540                         if (net != NULL && peer->gnp_net != net)
1541                                 continue;
1542
1543                         if (!(nid == LNET_NID_ANY || LNET_NIDADDR(peer->gnp_nid) == LNET_NIDADDR(nid)))
1544                                 continue;
1545
1546                         /* In both cases, we want to stop any in-flight
1547                          * connect attempts */
1548                         kgnilnd_cancel_peer_connect_locked(peer, &zombies);
1549
1550                         switch (command) {
1551                         case GNILND_DEL_CONN:
1552                                 kgnilnd_close_peer_conns_locked(peer, error);
1553                                 break;
1554                         case GNILND_DEL_PEER:
1555                                 peer->gnp_pending_unlink = 1;
1556                                 kgnilnd_admin_addref(kgnilnd_data.kgn_npending_unlink);
1557                                 kgnilnd_mark_for_detach_purgatory_all_locked(peer);
1558                                 kgnilnd_del_peer_locked(peer, error);
1559                                 break;
1560                         case GNILND_CLEAR_PURGATORY:
1561                                 /* Mark everything ready for detach reaper will cleanup
1562                                  * once we release the kgn_peer_conn_lock
1563                                  */
1564                                 kgnilnd_mark_for_detach_purgatory_all_locked(peer);
1565                                 peer->gnp_last_errno = -EISCONN;
1566                                 /* clear reconnect so he can reconnect soon */
1567                                 peer->gnp_reconnect_time = 0;
1568                                 peer->gnp_reconnect_interval = 0;
1569                                 break;
1570                         default:
1571                                 CERROR("bad command %d\n", command);
1572                                 LBUG();
1573                         }
1574                         /* we matched something */
1575                         rc = 0;
1576                 }
1577         }
1578
1579         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1580
1581         /* nuke peer TX */
1582         kgnilnd_txlist_done(&zombies, error);
1583
1584         /* This function does not return until the commands it initiated have completed,
1585          * since they have to work there way through the other threads. In the case of shutdown
1586          * threads are not woken up until after this call is initiated so we cannot wait, we just
1587          * need to return. The same applies for stack reset we shouldnt wait as the reset thread
1588          * handles closing.
1589          */
1590
1591         CFS_RACE(CFS_FAIL_GNI_RACE_RESET);
1592
1593         if (error == -ENOTRECOVERABLE || error == -ESHUTDOWN) {
1594                 return rc;
1595         }
1596
1597         i = 4;
1598         while (atomic_read(&kgnilnd_data.kgn_npending_conns)   ||
1599                atomic_read(&kgnilnd_data.kgn_npending_detach)  ||
1600                atomic_read(&kgnilnd_data.kgn_npending_unlink)) {
1601
1602                 set_current_state(TASK_UNINTERRUPTIBLE);
1603                 schedule_timeout(cfs_time_seconds(1));
1604                 i++;
1605
1606                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, "Waiting on %d peers %d closes %d detaches\n",
1607                                 atomic_read(&kgnilnd_data.kgn_npending_unlink),
1608                                 atomic_read(&kgnilnd_data.kgn_npending_conns),
1609                                 atomic_read(&kgnilnd_data.kgn_npending_detach));
1610         }
1611
1612         return rc;
1613 }
1614
1615 kgn_conn_t *
1616 kgnilnd_get_conn_by_idx(int index)
1617 {
1618         kgn_peer_t        *peer;
1619         struct list_head  *ptmp;
1620         kgn_conn_t        *conn;
1621         struct list_head  *ctmp;
1622         int                i;
1623
1624
1625         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
1626                 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1627                 list_for_each(ptmp, &kgnilnd_data.kgn_peers[i]) {
1628
1629                         peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1630
1631                         list_for_each(ctmp, &peer->gnp_conns) {
1632                                 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
1633
1634                                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
1635                                         continue;
1636
1637                                 if (index-- > 0)
1638                                         continue;
1639
1640                                 CDEBUG(D_NET, "++conn[%p] -> %s (%d)\n", conn,
1641                                        libcfs_nid2str(conn->gnc_peer->gnp_nid),
1642                                        atomic_read(&conn->gnc_refcount));
1643                                 kgnilnd_conn_addref(conn);
1644                                 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1645                                 return conn;
1646                         }
1647                 }
1648                 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1649         }
1650
1651         return NULL;
1652 }
1653
1654 int
1655 kgnilnd_get_conn_info(kgn_peer_t *peer,
1656                       int *device_id, __u64 *peerstamp,
1657                       int *tx_seq, int *rx_seq,
1658                       int *fmaq_len, int *nfma, int *nrdma)
1659 {
1660         kgn_conn_t        *conn;
1661         int               rc = 0;
1662
1663         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1664
1665         conn = kgnilnd_find_conn_locked(peer);
1666         if (conn == NULL) {
1667                 rc = -ENOENT;
1668                 goto out;
1669         }
1670
1671         *device_id = conn->gnc_device->gnd_host_id;
1672         *peerstamp = conn->gnc_peerstamp;
1673         *tx_seq = atomic_read(&conn->gnc_tx_seq);
1674         *rx_seq = atomic_read(&conn->gnc_rx_seq);
1675         *fmaq_len = kgnilnd_count_list(&conn->gnc_fmaq);
1676         *nfma = atomic_read(&conn->gnc_nlive_fma);
1677         *nrdma = atomic_read(&conn->gnc_nlive_rdma);
1678 out:
1679         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1680         return rc;
1681 }
1682
1683 /* needs write_lock on kgn_peer_conn_lock */
1684 int
1685 kgnilnd_close_peer_conns_locked(kgn_peer_t *peer, int why)
1686 {
1687         kgn_conn_t         *conn;
1688         struct list_head   *ctmp, *cnxt;
1689         int                 count = 0;
1690
1691         list_for_each_safe(ctmp, cnxt, &peer->gnp_conns) {
1692                 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
1693
1694                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
1695                         continue;
1696
1697                 count++;
1698                 /* we mark gnc_needs closing and increment kgn_npending_conns so that
1699                  * kgnilnd_del_conn_or_peer can wait on the other threads closing
1700                  * and cleaning up the connection.
1701                  */
1702                 if (!conn->gnc_needs_closing) {
1703                         conn->gnc_needs_closing = 1;
1704                         kgnilnd_admin_addref(kgnilnd_data.kgn_npending_conns);
1705                 }
1706                 kgnilnd_close_conn_locked(conn, why);
1707         }
1708         return count;
1709 }
1710
1711 int
1712 kgnilnd_report_node_state(lnet_nid_t nid, int down)
1713 {
1714         int         rc;
1715         kgn_peer_t  *peer, *new_peer;
1716         LIST_HEAD(zombies);
1717
1718         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1719         peer = kgnilnd_find_peer_locked(nid);
1720
1721         if (peer == NULL) {
1722                 int       i;
1723                 int       found_net = 0;
1724                 kgn_net_t *net;
1725
1726                 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1727
1728                 /* Don't add a peer for node up events */
1729                 if (down == GNILND_PEER_UP)
1730                         return 0;
1731
1732                 /* find any valid net - we don't care which one... */
1733                 down_read(&kgnilnd_data.kgn_net_rw_sem);
1734                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
1735                         list_for_each_entry(net, &kgnilnd_data.kgn_nets[i],
1736                                             gnn_list) {
1737                                 found_net = 1;
1738                                 break;
1739                         }
1740
1741                         if (found_net) {
1742                                 break;
1743                         }
1744                 }
1745                 up_read(&kgnilnd_data.kgn_net_rw_sem);
1746
1747                 if (!found_net) {
1748                         CNETERR("Could not find a net for nid %lld\n", nid);
1749                         return 1;
1750                 }
1751
1752                 /* The nid passed in does not yet contain the net portion.
1753                  * Let's build it up now
1754                  */
1755                 nid = LNET_MKNID(LNET_NIDNET(net->gnn_ni->ni_nid), nid);
1756                 rc = kgnilnd_add_peer(net, nid, &new_peer);
1757
1758                 if (rc) {
1759                         CNETERR("Could not add peer for nid %lld, rc %d\n",
1760                                 nid, rc);
1761                         return 1;
1762                 }
1763
1764                 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1765                 peer = kgnilnd_find_peer_locked(nid);
1766
1767                 if (peer == NULL) {
1768                         CNETERR("Could not find peer for nid %lld\n", nid);
1769                         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1770                         return 1;
1771                 }
1772         }
1773
1774         peer->gnp_state = down;
1775
1776         if (down == GNILND_PEER_DOWN) {
1777                 kgn_conn_t *conn;
1778
1779                 peer->gnp_down_event_time = jiffies;
1780                 kgnilnd_cancel_peer_connect_locked(peer, &zombies);
1781                 conn = kgnilnd_find_conn_locked(peer);
1782
1783                 if (conn != NULL) {
1784                         kgnilnd_close_conn_locked(conn, -ENETRESET);
1785                 }
1786         } else {
1787                 peer->gnp_up_event_time = jiffies;
1788         }
1789
1790         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1791
1792         if (down == GNILND_PEER_DOWN) {
1793                 /* using ENETRESET so we don't get messages from
1794                  * kgnilnd_tx_done
1795                  */
1796                 kgnilnd_txlist_done(&zombies, -ENETRESET);
1797                 kgnilnd_peer_notify(peer, -ECONNRESET, 0);
1798                 LCONSOLE_INFO("Received down event for nid %d\n",
1799                               LNET_NIDADDR(nid));
1800         }
1801
1802         return 0;
1803 }
1804
1805 int
1806 kgnilnd_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg)
1807 {
1808         struct libcfs_ioctl_data *data = arg;
1809         kgn_net_t                *net = ni->ni_data;
1810         int                       rc = -EINVAL;
1811
1812         LASSERT(ni == net->gnn_ni);
1813
1814         switch (cmd) {
1815         case IOC_LIBCFS_GET_PEER: {
1816                 lnet_nid_t   nid = 0;
1817                 kgn_peer_t  *peer = NULL;
1818                 __u32 nic_addr = 0;
1819                 __u64 peerstamp = 0;
1820                 int peer_refcount = 0, peer_connecting = 0;
1821                 int device_id = 0;
1822                 int tx_seq = 0, rx_seq = 0;
1823                 int fmaq_len = 0, nfma = 0, nrdma = 0;
1824
1825                 rc = kgnilnd_get_peer_info(data->ioc_count, &peer,
1826                                            &nid, &nic_addr, &peer_refcount,
1827                                            &peer_connecting);
1828                 if (rc)
1829                         break;
1830
1831                 /* Barf */
1832                 /* LNET_MKNID is used to mask from lnet the multiplexing/demultiplexing of connections and peers
1833                  * LNET assumes a conn and peer per net, the LNET_MKNID/LNET_NIDADDR allows us to let Lnet see what it
1834                  * wants to see instead of the underlying network that is being used to send the data
1835                  */
1836                 data->ioc_nid    = LNET_MKNID(LNET_NIDNET(ni->ni_nid), LNET_NIDADDR(nid));
1837                 data->ioc_flags  = peer_connecting;
1838                 data->ioc_count  = peer_refcount;
1839
1840                 rc = kgnilnd_get_conn_info(peer, &device_id, &peerstamp,
1841                                            &tx_seq, &rx_seq, &fmaq_len,
1842                                            &nfma, &nrdma);
1843
1844                 /* This is allowable - a persistent peer could not
1845                  * have a connection */
1846                 if (rc) {
1847                         /* flag to indicate we are not connected -
1848                          * need to print as such */
1849                         data->ioc_flags |= (1<<16);
1850                         rc = 0;
1851                 } else {
1852                         /* still barf */
1853                         data->ioc_net = device_id;
1854                         data->ioc_u64[0] = peerstamp;
1855                         data->ioc_u32[0] = fmaq_len;
1856                         data->ioc_u32[1] = nfma;
1857                         data->ioc_u32[2] = tx_seq;
1858                         data->ioc_u32[3] = rx_seq;
1859                         data->ioc_u32[4] = nrdma;
1860                 }
1861                 break;
1862         }
1863         case IOC_LIBCFS_ADD_PEER: {
1864                 /* just dummy value to allow using common interface */
1865                 kgn_peer_t      *peer;
1866                 rc = kgnilnd_add_peer(net, data->ioc_nid, &peer);
1867                 break;
1868         }
1869         case IOC_LIBCFS_DEL_PEER: {
1870                 /* NULL is passed in so it affects all peers in existence without regard to network
1871                  * as the peer may not exist on the network LNET believes it to be on.
1872                  */
1873                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1874                                               GNILND_DEL_PEER, -EUCLEAN);
1875                 break;
1876         }
1877         case IOC_LIBCFS_GET_CONN: {
1878                 kgn_conn_t *conn = kgnilnd_get_conn_by_idx(data->ioc_count);
1879
1880                 if (conn == NULL)
1881                         rc = -ENOENT;
1882                 else {
1883                         rc = 0;
1884                         /* LNET_MKNID is used to build the correct address based on what LNET wants to see instead of
1885                          * the generic connection that is used to send the data
1886                          */
1887                         data->ioc_nid    = LNET_MKNID(LNET_NIDNET(ni->ni_nid), LNET_NIDADDR(conn->gnc_peer->gnp_nid));
1888                         data->ioc_u32[0] = conn->gnc_device->gnd_id;
1889                         kgnilnd_conn_decref(conn);
1890                 }
1891                 break;
1892         }
1893         case IOC_LIBCFS_CLOSE_CONNECTION: {
1894                 /* use error = -ENETRESET to indicate it was lctl disconnect */
1895                 /* NULL is passed in so it affects all the nets as the connection is virtual
1896                  * and may not exist on the network LNET believes it to be on.
1897                  */
1898                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1899                                               GNILND_DEL_CONN, -ENETRESET);
1900                 break;
1901         }
1902         case IOC_LIBCFS_PUSH_CONNECTION: {
1903                 /* we use this to flush purgatory */
1904                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1905                                               GNILND_CLEAR_PURGATORY, -EUCLEAN);
1906                 break;
1907         }
1908         case IOC_LIBCFS_REGISTER_MYNID: {
1909                 /* Ignore if this is a noop */
1910                 if (data->ioc_nid == ni->ni_nid) {
1911                         rc = 0;
1912                 } else {
1913                         CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
1914                                libcfs_nid2str(data->ioc_nid),
1915                                libcfs_nid2str(ni->ni_nid));
1916                         rc = -EINVAL;
1917                 }
1918                 break;
1919         }
1920         }
1921
1922         return rc;
1923 }
1924
1925 void
1926 kgnilnd_query(struct lnet_ni *ni, lnet_nid_t nid, time64_t *when)
1927 {
1928         kgn_net_t               *net = ni->ni_data;
1929         kgn_tx_t                *tx;
1930         kgn_peer_t              *peer = NULL;
1931         kgn_conn_t              *conn = NULL;
1932         struct lnet_process_id       id = {
1933                 .nid = nid,
1934                 .pid = LNET_PID_LUSTRE,
1935         };
1936         ENTRY;
1937
1938         /* I expect to find him, so only take a read lock */
1939         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1940         peer = kgnilnd_find_peer_locked(nid);
1941         if (peer != NULL) {
1942                 /* LIE if in a quiesce - we will update the timeouts after,
1943                  * but we don't want sends failing during it */
1944                 if (kgnilnd_data.kgn_quiesce_trigger) {
1945                         *when = ktime_get_seconds();
1946                         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1947                         GOTO(out, 0);
1948                 }
1949
1950                 /* Update to best guess, might refine on later checks */
1951                 *when = peer->gnp_last_alive;
1952
1953                 /* we have a peer, how about a conn? */
1954                 conn = kgnilnd_find_conn_locked(peer);
1955
1956                 if (conn == NULL)  {
1957                         /* if there is no conn, check peer last errno to see if clean disconnect
1958                          * - if it was, we lie to LNet because we believe a TX would complete
1959                          * on reconnect */
1960                         if (kgnilnd_conn_clean_errno(peer->gnp_last_errno)) {
1961                                 *when = ktime_get_seconds();
1962                         }
1963                         /* we still want to fire a TX and new conn in this case */
1964                 } else {
1965                         /* gnp_last_alive is valid, run for the hills */
1966                         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1967                         GOTO(out, 0);
1968                 }
1969         }
1970         /* if we get here, either we have no peer or no conn for him, so fire off
1971          * new TX to trigger conn setup */
1972         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1973
1974         /* if we couldn't find him, we'll fire up a TX and get connected -
1975          * if we don't do this, after ni_peer_timeout, LNet will declare him dead.
1976          * So really we treat kgnilnd_query as a bit of a 'connect now' type
1977          * event because it'll only do this when it wants to send
1978          *
1979          * Use a real TX for this to get the proper gnp_tx_queue behavior, etc
1980          * normally we'd use kgnilnd_send_ctlmsg for this, but we don't really
1981          * care that this goes out quickly since we already know we need a new conn
1982          * formed */
1983         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_NOOP_SEND))
1984                 return;
1985
1986         tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP, ni->ni_nid);
1987         if (tx != NULL) {
1988                 kgnilnd_launch_tx(tx, net, &id);
1989         }
1990 out:
1991         CDEBUG(D_NETTRACE, "peer 0x%p->%s when %lld\n", peer,
1992                libcfs_nid2str(nid), *when);
1993         EXIT;
1994 }
1995
1996 int
1997 kgnilnd_dev_init(kgn_device_t *dev)
1998 {
1999         gni_return_t      rrc;
2000         int               rc = 0;
2001         unsigned int      cq_size;
2002         ENTRY;
2003
2004         /* size of these CQs should be able to accommodate the outgoing
2005          * RDMA and SMSG transactions.  Since we really don't know what we
2006          * really need here, we'll take credits * 2 * 3 to allow a bunch.
2007          * We need to dig into this more with the performance work. */
2008         cq_size = *kgnilnd_tunables.kgn_credits * 2 * 3;
2009
2010         rrc = kgnilnd_cdm_create(dev->gnd_id, *kgnilnd_tunables.kgn_ptag,
2011                                  *kgnilnd_tunables.kgn_pkey, 0,
2012                                  &dev->gnd_domain);
2013         if (rrc != GNI_RC_SUCCESS) {
2014                 CERROR("Can't create CDM %d (%d)\n", dev->gnd_id, rrc);
2015                 GOTO(failed, rc = -ENODEV);
2016         }
2017
2018         rrc = kgnilnd_cdm_attach(dev->gnd_domain, dev->gnd_id,
2019                                  &dev->gnd_host_id, &dev->gnd_handle);
2020         if (rrc != GNI_RC_SUCCESS) {
2021                 CERROR("Can't attach CDM to device %d (%d)\n",
2022                         dev->gnd_id, rrc);
2023                 GOTO(failed, rc = -ENODEV);
2024         }
2025
2026         /* a bit gross, but not much we can do - Aries Sim doesn't have
2027          * hardcoded NIC/NID that we can use */
2028         rc = kgnilnd_setup_nic_translation(dev->gnd_host_id);
2029         if (rc != 0)
2030                 GOTO(failed, rc = -ENODEV);
2031
2032         /* only dev 0 gets the errors - no need to reset the stack twice
2033          * - this works because we have a single PTAG, if we had more
2034          * then we'd need to have multiple handlers */
2035         if (dev->gnd_id == 0) {
2036                 rrc = kgnilnd_subscribe_errors(dev->gnd_handle,
2037                                                 GNI_ERRMASK_CRITICAL |
2038                                                 GNI_ERRMASK_UNKNOWN_TRANSACTION,
2039                                               0, NULL, kgnilnd_critical_error,
2040                                               &dev->gnd_err_handle);
2041                 if (rrc != GNI_RC_SUCCESS) {
2042                         CERROR("Can't subscribe for errors on device %d: rc %d\n",
2043                                 dev->gnd_id, rrc);
2044                         GOTO(failed, rc = -ENODEV);
2045                 }
2046
2047                 rc = kgnilnd_set_quiesce_callback(dev->gnd_handle,
2048                                                   kgnilnd_quiesce_end_callback);
2049                 if (rc != GNI_RC_SUCCESS) {
2050                         CERROR("Can't subscribe for quiesce callback on device %d: rc %d\n",
2051                                 dev->gnd_id, rrc);
2052                         GOTO(failed, rc = -ENODEV);
2053                 }
2054         }
2055
2056         rc = kgnilnd_nicaddr_to_nid(dev->gnd_host_id, &dev->gnd_nid);
2057         if (rc < 0) {
2058                 /* log messages during startup */
2059                 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
2060                         CERROR("couldn't translate host_id 0x%x to nid. rc %d\n",
2061                                 dev->gnd_host_id, rc);
2062                 }
2063                 GOTO(failed, rc = -ESRCH);
2064         }
2065         CDEBUG(D_NET, "NIC %x -> NID %d\n", dev->gnd_host_id, dev->gnd_nid);
2066
2067         rrc = kgnilnd_cq_create(dev->gnd_handle, *kgnilnd_tunables.kgn_credits,
2068                                 0, kgnilnd_device_callback,
2069                                 dev->gnd_id, &dev->gnd_snd_rdma_cqh);
2070         if (rrc != GNI_RC_SUCCESS) {
2071                 CERROR("Can't create rdma send cq size %u for device "
2072                        "%d (%d)\n", cq_size, dev->gnd_id, rrc);
2073                 GOTO(failed, rc = -EINVAL);
2074         }
2075
2076         rrc = kgnilnd_cq_create(dev->gnd_handle, cq_size,
2077                         0, kgnilnd_device_callback, dev->gnd_id,
2078                         &dev->gnd_snd_fma_cqh);
2079         if (rrc != GNI_RC_SUCCESS) {
2080                 CERROR("Can't create fma send cq size %u for device %d (%d)\n",
2081                        cq_size, dev->gnd_id, rrc);
2082                 GOTO(failed, rc = -EINVAL);
2083         }
2084
2085         /* This one we size differently - overflows are possible and it needs to be
2086          * sized based on machine size */
2087         rrc = kgnilnd_cq_create(dev->gnd_handle,
2088                         *kgnilnd_tunables.kgn_fma_cq_size,
2089                         0, kgnilnd_device_callback, dev->gnd_id,
2090                         &dev->gnd_rcv_fma_cqh);
2091         if (rrc != GNI_RC_SUCCESS) {
2092                 CERROR("Can't create fma cq size %d for device %d (%d)\n",
2093                        *kgnilnd_tunables.kgn_fma_cq_size, dev->gnd_id, rrc);
2094                 GOTO(failed, rc = -EINVAL);
2095         }
2096
2097         rrc = kgnilnd_register_smdd_buf(dev);
2098         if (rrc != GNI_RC_SUCCESS) {
2099                 GOTO(failed, rc = -EINVAL);
2100         }
2101
2102         RETURN(0);
2103
2104 failed:
2105         kgnilnd_dev_fini(dev);
2106         RETURN(rc);
2107 }
2108
2109 void
2110 kgnilnd_dev_fini(kgn_device_t *dev)
2111 {
2112         gni_return_t rrc;
2113         ENTRY;
2114
2115         /* At quiesce or rest time, need to loop through and clear gnd_ready_conns ?*/
2116         LASSERTF(list_empty(&dev->gnd_ready_conns) &&
2117                  list_empty(&dev->gnd_map_tx) &&
2118                  list_empty(&dev->gnd_rdmaq) &&
2119                  list_empty(&dev->gnd_delay_conns),
2120                  "dev 0x%p ready_conns %d@0x%p delay_conns %d@0x%p" 
2121                  "map_tx %d@0x%p rdmaq %d@0x%p\n",
2122                  dev, kgnilnd_count_list(&dev->gnd_ready_conns), &dev->gnd_ready_conns,
2123                  kgnilnd_count_list(&dev->gnd_delay_conns), &dev->gnd_delay_conns,
2124                  kgnilnd_count_list(&dev->gnd_map_tx), &dev->gnd_map_tx,
2125                  kgnilnd_count_list(&dev->gnd_rdmaq), &dev->gnd_rdmaq);
2126
2127         /* These should follow from tearing down all connections */
2128         LASSERTF(dev->gnd_map_nphys == 0 && dev->gnd_map_physnop == 0,
2129                 "%d physical mappings of %d pages still mapped\n",
2130                  dev->gnd_map_nphys, dev->gnd_map_physnop);
2131
2132         LASSERTF(dev->gnd_map_nvirt == 0 && dev->gnd_map_virtnob == 0,
2133                 "%d virtual mappings of %llu bytes still mapped\n",
2134                  dev->gnd_map_nvirt, dev->gnd_map_virtnob);
2135
2136         LASSERTF(atomic_read(&dev->gnd_n_mdd) == 0 &&
2137                  atomic_read(&dev->gnd_n_mdd_held) == 0 &&
2138                  atomic64_read(&dev->gnd_nbytes_map) == 0,
2139                 "%d SMSG mappings of %ld bytes still mapped or held %d\n",
2140                  atomic_read(&dev->gnd_n_mdd),
2141                  atomic64_read(&dev->gnd_nbytes_map), atomic_read(&dev->gnd_n_mdd_held));
2142
2143         LASSERT(list_empty(&dev->gnd_map_list));
2144
2145         /* What other assertions needed to ensure all connections torn down ? */
2146
2147         /* check all counters == 0 (EP, MDD, etc) */
2148
2149         /* if we are resetting due to quiese (stack reset), don't check
2150          * thread states */
2151         LASSERTF(kgnilnd_data.kgn_quiesce_trigger ||
2152                 atomic_read(&kgnilnd_data.kgn_nthreads) == 0,
2153                 "tried to shutdown with threads active\n");
2154
2155         if (dev->gnd_smdd_hold_buf) {
2156                 rrc = kgnilnd_deregister_smdd_buf(dev);
2157                 LASSERTF(rrc == GNI_RC_SUCCESS,
2158                         "bad rc from deregistion of sMDD buffer: %d\n", rrc);
2159                 dev->gnd_smdd_hold_buf = NULL;
2160         }
2161
2162         if (dev->gnd_rcv_fma_cqh) {
2163                 rrc = kgnilnd_cq_destroy(dev->gnd_rcv_fma_cqh);
2164                 LASSERTF(rrc == GNI_RC_SUCCESS,
2165                         "bad rc from gni_cq_destroy on rcv_fma_cqh: %d\n", rrc);
2166                 dev->gnd_rcv_fma_cqh = NULL;
2167         }
2168
2169         if (dev->gnd_snd_rdma_cqh) {
2170                 rrc = kgnilnd_cq_destroy(dev->gnd_snd_rdma_cqh);
2171                 LASSERTF(rrc == GNI_RC_SUCCESS,
2172                         "bad rc from gni_cq_destroy on send_rdma_cqh: %d\n", rrc);
2173                 dev->gnd_snd_rdma_cqh = NULL;
2174         }
2175
2176         if (dev->gnd_snd_fma_cqh) {
2177                 rrc = kgnilnd_cq_destroy(dev->gnd_snd_fma_cqh);
2178                 LASSERTF(rrc == GNI_RC_SUCCESS,
2179                         "bad rc from gni_cq_destroy on snd_fma_cqh: %d\n", rrc);
2180                 dev->gnd_snd_fma_cqh = NULL;
2181         }
2182
2183         if (dev->gnd_err_handle) {
2184                 rrc = kgnilnd_release_errors(dev->gnd_err_handle);
2185                 LASSERTF(rrc == GNI_RC_SUCCESS,
2186                         "bad rc from gni_release_errors: %d\n", rrc);
2187                 dev->gnd_err_handle = NULL;
2188         }
2189
2190         if (dev->gnd_domain) {
2191                 rrc = kgnilnd_cdm_destroy(dev->gnd_domain);
2192                 LASSERTF(rrc == GNI_RC_SUCCESS,
2193                         "bad rc from gni_cdm_destroy: %d\n", rrc);
2194                 dev->gnd_domain = NULL;
2195         }
2196
2197         EXIT;
2198 }
2199
2200 int kgnilnd_base_startup(void)
2201 {
2202         struct timeval       tv;
2203         int                  pkmem = atomic_read(&libcfs_kmemory);
2204         int                  rc;
2205         int                  i;
2206         kgn_device_t        *dev;
2207         struct task_struct  *thrd;
2208
2209 #if defined(CONFIG_CRAY_XT) && !defined(CONFIG_CRAY_COMPUTE)
2210         /* limit how much memory can be allocated for fma blocks in
2211          * instances where many nodes need to reconnects at the same time */
2212         struct sysinfo si;
2213         si_meminfo(&si);
2214         kgnilnd_data.free_pages_limit = si.totalram/4;
2215 #endif
2216
2217         ENTRY;
2218
2219         LASSERTF(kgnilnd_data.kgn_init == GNILND_INIT_NOTHING,
2220                 "init %d\n", kgnilnd_data.kgn_init);
2221
2222         /* zero pointers, flags etc */
2223         memset(&kgnilnd_data, 0, sizeof(kgnilnd_data));
2224         kgnilnd_check_kgni_version();
2225
2226         /* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and
2227          * a unique (for all time) connstamp so we can uniquely identify
2228          * the sender.  The connstamp is an incrementing counter
2229          * initialised with seconds + microseconds at startup time.  So we
2230          * rely on NOT creating connections more frequently on average than
2231          * 1MHz to ensure we don't use old connstamps when we reboot. */
2232         do_gettimeofday(&tv);
2233         kgnilnd_data.kgn_connstamp =
2234                  kgnilnd_data.kgn_peerstamp =
2235                         (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
2236
2237         init_rwsem(&kgnilnd_data.kgn_net_rw_sem);
2238
2239         for (i = 0; i < GNILND_MAXDEVS; i++) {
2240                 kgn_device_t  *dev = &kgnilnd_data.kgn_devices[i];
2241
2242                 dev->gnd_id = i;
2243                 INIT_LIST_HEAD(&dev->gnd_ready_conns);
2244                 INIT_LIST_HEAD(&dev->gnd_delay_conns);
2245                 INIT_LIST_HEAD(&dev->gnd_map_tx);
2246                 INIT_LIST_HEAD(&dev->gnd_fma_buffs);
2247                 mutex_init(&dev->gnd_cq_mutex);
2248                 mutex_init(&dev->gnd_fmablk_mutex);
2249                 spin_lock_init(&dev->gnd_fmablk_lock);
2250                 init_waitqueue_head(&dev->gnd_waitq);
2251                 init_waitqueue_head(&dev->gnd_dgram_waitq);
2252                 init_waitqueue_head(&dev->gnd_dgping_waitq);
2253                 spin_lock_init(&dev->gnd_lock);
2254                 INIT_LIST_HEAD(&dev->gnd_map_list);
2255                 spin_lock_init(&dev->gnd_map_lock);
2256                 atomic_set(&dev->gnd_nfmablk, 0);
2257                 atomic_set(&dev->gnd_fmablk_vers, 1);
2258                 atomic_set(&dev->gnd_neps, 0);
2259                 atomic_set(&dev->gnd_canceled_dgrams, 0);
2260                 INIT_LIST_HEAD(&dev->gnd_connd_peers);
2261                 spin_lock_init(&dev->gnd_connd_lock);
2262                 spin_lock_init(&dev->gnd_dgram_lock);
2263                 spin_lock_init(&dev->gnd_rdmaq_lock);
2264                 INIT_LIST_HEAD(&dev->gnd_rdmaq);
2265                 init_rwsem(&dev->gnd_conn_sem);
2266
2267                 /* alloc & setup nid based dgram table */
2268                 LIBCFS_ALLOC(dev->gnd_dgrams,
2269                             sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2270
2271                 if (dev->gnd_dgrams == NULL)
2272                         GOTO(failed, rc = -ENOMEM);
2273
2274                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2275                         INIT_LIST_HEAD(&dev->gnd_dgrams[i]);
2276                 }
2277                 atomic_set(&dev->gnd_ndgrams, 0);
2278                 atomic_set(&dev->gnd_nwcdgrams, 0);
2279                 /* setup timer for RDMAQ processing */
2280                 setup_timer(&dev->gnd_rdmaq_timer, kgnilnd_schedule_device_timer,
2281                             (unsigned long)dev);
2282
2283                 /* setup timer for mapping processing */
2284                 setup_timer(&dev->gnd_map_timer, kgnilnd_schedule_device_timer,
2285                             (unsigned long)dev);
2286
2287         }
2288
2289         /* CQID 0 isn't allowed, set to MAX_MSG_ID - 1 to check for conflicts early */
2290         kgnilnd_data.kgn_next_cqid = GNILND_MAX_MSG_ID - 1;
2291         kgnilnd_data.kgn_new_min_timeout = *kgnilnd_tunables.kgn_timeout;
2292         init_waitqueue_head(&kgnilnd_data.kgn_reaper_waitq);
2293         init_waitqueue_head(&kgnilnd_data.kgn_ruhroh_waitq);
2294         spin_lock_init(&kgnilnd_data.kgn_reaper_lock);
2295
2296         mutex_init(&kgnilnd_data.kgn_quiesce_mutex);
2297         atomic_set(&kgnilnd_data.kgn_nquiesce, 0);
2298         atomic_set(&kgnilnd_data.kgn_npending_conns, 0);
2299         atomic_set(&kgnilnd_data.kgn_npending_unlink, 0);
2300         atomic_set(&kgnilnd_data.kgn_npending_detach, 0);
2301         atomic_set(&kgnilnd_data.kgn_rev_offset, 0);
2302         atomic_set(&kgnilnd_data.kgn_rev_length, 0);
2303         atomic_set(&kgnilnd_data.kgn_rev_copy_buff, 0);
2304
2305         /* OK to call kgnilnd_api_shutdown() to cleanup now */
2306         kgnilnd_data.kgn_init = GNILND_INIT_DATA;
2307         try_module_get(THIS_MODULE);
2308
2309         rwlock_init(&kgnilnd_data.kgn_peer_conn_lock);
2310
2311         LIBCFS_ALLOC(kgnilnd_data.kgn_peers,
2312                     sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2313
2314         if (kgnilnd_data.kgn_peers == NULL)
2315                 GOTO(failed, rc = -ENOMEM);
2316
2317         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2318                 INIT_LIST_HEAD(&kgnilnd_data.kgn_peers[i]);
2319         }
2320
2321         LIBCFS_ALLOC(kgnilnd_data.kgn_conns,
2322                     sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2323
2324         if (kgnilnd_data.kgn_conns == NULL)
2325                 GOTO(failed, rc = -ENOMEM);
2326
2327         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2328                 INIT_LIST_HEAD(&kgnilnd_data.kgn_conns[i]);
2329         }
2330
2331         LIBCFS_ALLOC(kgnilnd_data.kgn_nets,
2332                     sizeof(struct list_head) * *kgnilnd_tunables.kgn_net_hash_size);
2333
2334         if (kgnilnd_data.kgn_nets == NULL)
2335                 GOTO(failed, rc = -ENOMEM);
2336
2337         for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
2338                 INIT_LIST_HEAD(&kgnilnd_data.kgn_nets[i]);
2339         }
2340
2341         kgnilnd_data.kgn_mbox_cache =
2342                 kmem_cache_create("kgn_mbox_block", GNILND_MBOX_SIZE, 0,
2343                                   SLAB_HWCACHE_ALIGN, NULL);
2344         if (kgnilnd_data.kgn_mbox_cache == NULL) {
2345                 CERROR("Can't create slab for physical mbox blocks\n");
2346                 GOTO(failed, rc = -ENOMEM);
2347         }
2348
2349         kgnilnd_data.kgn_rx_cache =
2350                 kmem_cache_create("kgn_rx_t", sizeof(kgn_rx_t), 0, 0, NULL);
2351         if (kgnilnd_data.kgn_rx_cache == NULL) {
2352                 CERROR("Can't create slab for kgn_rx_t descriptors\n");
2353                 GOTO(failed, rc = -ENOMEM);
2354         }
2355
2356         kgnilnd_data.kgn_tx_cache =
2357                 kmem_cache_create("kgn_tx_t", sizeof(kgn_tx_t), 0, 0, NULL);
2358         if (kgnilnd_data.kgn_tx_cache == NULL) {
2359                 CERROR("Can't create slab for kgn_tx_t\n");
2360                 GOTO(failed, rc = -ENOMEM);
2361         }
2362
2363         kgnilnd_data.kgn_tx_phys_cache =
2364                 kmem_cache_create("kgn_tx_phys",
2365                                    LNET_MAX_IOV * sizeof(gni_mem_segment_t),
2366                                    0, 0, NULL);
2367         if (kgnilnd_data.kgn_tx_phys_cache == NULL) {
2368                 CERROR("Can't create slab for kgn_tx_phys\n");
2369                 GOTO(failed, rc = -ENOMEM);
2370         }
2371
2372         kgnilnd_data.kgn_dgram_cache =
2373                 kmem_cache_create("kgn_dgram_t", sizeof(kgn_dgram_t), 0, 0, NULL);
2374         if (kgnilnd_data.kgn_dgram_cache == NULL) {
2375                 CERROR("Can't create slab for outgoing datagrams\n");
2376                 GOTO(failed, rc = -ENOMEM);
2377         }
2378
2379         /* allocate a MAX_IOV array of page pointers for each cpu */
2380         kgnilnd_data.kgn_cksum_map_pages = kmalloc(num_possible_cpus() * sizeof (struct page *),
2381                                                    GFP_KERNEL);
2382         if (kgnilnd_data.kgn_cksum_map_pages == NULL) {
2383                 CERROR("Can't allocate vmap cksum pages\n");
2384                 GOTO(failed, rc = -ENOMEM);
2385         }
2386         kgnilnd_data.kgn_cksum_npages = num_possible_cpus();
2387         memset(kgnilnd_data.kgn_cksum_map_pages, 0,
2388                 kgnilnd_data.kgn_cksum_npages * sizeof (struct page *));
2389
2390         for (i = 0; i < kgnilnd_data.kgn_cksum_npages; i++) {
2391                 kgnilnd_data.kgn_cksum_map_pages[i] = kmalloc(LNET_MAX_IOV * sizeof (struct page *),
2392                                                               GFP_KERNEL);
2393                 if (kgnilnd_data.kgn_cksum_map_pages[i] == NULL) {
2394                         CERROR("Can't allocate vmap cksum pages for cpu %d\n", i);
2395                         GOTO(failed, rc = -ENOMEM);
2396                 }
2397         }
2398
2399         LASSERT(kgnilnd_data.kgn_ndevs == 0);
2400
2401         /* Use all available GNI devices */
2402         for (i = 0; i < GNILND_MAXDEVS; i++) {
2403                 dev = &kgnilnd_data.kgn_devices[kgnilnd_data.kgn_ndevs];
2404
2405                 rc = kgnilnd_dev_init(dev);
2406                 if (rc == 0) {
2407                         /* Increment here so base_shutdown cleans it up */
2408                         kgnilnd_data.kgn_ndevs++;
2409
2410                         rc = kgnilnd_allocate_phys_fmablk(dev);
2411                         if (rc)
2412                                 GOTO(failed, rc);
2413                 }
2414         }
2415
2416         if (kgnilnd_data.kgn_ndevs == 0) {
2417                 CERROR("Can't initialise any GNI devices\n");
2418                 GOTO(failed, rc = -ENODEV);
2419         }
2420
2421         rc = kgnilnd_thread_start(kgnilnd_reaper, NULL, "kgnilnd_rpr", 0);
2422         if (rc != 0) {
2423                 CERROR("Can't spawn gnilnd reaper: %d\n", rc);
2424                 GOTO(failed, rc);
2425         }
2426
2427         rc = kgnilnd_start_rca_thread();
2428         if (rc != 0) {
2429                 CERROR("Can't spawn gnilnd rca: %d\n", rc);
2430                 GOTO(failed, rc);
2431         }
2432
2433         /*
2434          * Start ruhroh thread.  We can't use kgnilnd_thread_start() because
2435          * we don't want this thread included in kgnilnd_data.kgn_nthreads
2436          * count.  This thread controls quiesce, so it mustn't
2437          * quiesce itself.
2438          */
2439         thrd = kthread_run(kgnilnd_ruhroh_thread, NULL, "%s_%02d", "kgnilnd_rr", 0);
2440         if (IS_ERR(thrd)) {
2441                 rc = PTR_ERR(thrd);
2442                 CERROR("Can't spawn gnilnd ruhroh thread: %d\n", rc);
2443                 GOTO(failed, rc);
2444         }
2445
2446         /* threads will load balance across devs as they are available */
2447         if (*kgnilnd_tunables.kgn_thread_affinity) {
2448                 rc = kgnilnd_start_sd_threads();
2449                 if (rc != 0)
2450                         GOTO(failed, rc);
2451         } else {
2452                 for (i = 0; i < *kgnilnd_tunables.kgn_sched_threads; i++) {
2453                         rc = kgnilnd_thread_start(kgnilnd_scheduler,
2454                                                   (void *)((long)i),
2455                                                   "kgnilnd_sd", i);
2456                         if (rc != 0) {
2457                                 CERROR("Can't spawn gnilnd scheduler[%d]: %d\n",
2458                                        i, rc);
2459                                 GOTO(failed, rc);
2460                         }
2461                 }
2462         }
2463
2464         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2465                 dev = &kgnilnd_data.kgn_devices[i];
2466                 rc = kgnilnd_thread_start(kgnilnd_dgram_mover, dev,
2467                                           "kgnilnd_dg", dev->gnd_id);
2468                 if (rc != 0) {
2469                         CERROR("Can't spawn gnilnd dgram_mover[%d]: %d\n",
2470                                dev->gnd_id, rc);
2471                         GOTO(failed, rc);
2472                 }
2473
2474                 rc = kgnilnd_thread_start(kgnilnd_dgram_waitq, dev,
2475                                           "kgnilnd_dgn", dev->gnd_id);
2476                 if (rc != 0) {
2477                         CERROR("Can't spawn gnilnd dgram_waitq[%d]: %d\n",
2478                                 dev->gnd_id, rc);
2479                         GOTO(failed, rc);
2480                 }
2481
2482                 rc = kgnilnd_setup_wildcard_dgram(dev);
2483
2484                 if (rc != 0) {
2485                         CERROR("Can't create wildcard dgrams[%d]: %d\n",
2486                                 dev->gnd_id, rc);
2487                         GOTO(failed, rc);
2488                 }
2489         }
2490
2491         /* flag everything initialised */
2492         kgnilnd_data.kgn_init = GNILND_INIT_ALL;
2493         /*****************************************************/
2494
2495         CDEBUG(D_MALLOC, "initial kmem %d\n", pkmem);
2496         RETURN(0);
2497
2498 failed:
2499         kgnilnd_base_shutdown();
2500         kgnilnd_data.kgn_init = GNILND_INIT_NOTHING;
2501         RETURN(rc);
2502 }
2503
2504 void
2505 kgnilnd_base_shutdown(void)
2506 {
2507         int                     i, j;
2508         ENTRY;
2509
2510         while (CFS_FAIL_TIMEOUT(CFS_FAIL_GNI_PAUSE_SHUTDOWN, 1)) {};
2511
2512         kgnilnd_data.kgn_wc_kill = 1;
2513
2514         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2515                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2516                 kgnilnd_cancel_wc_dgrams(dev);
2517                 kgnilnd_cancel_dgrams(dev);
2518                 kgnilnd_del_conn_or_peer(NULL, LNET_NID_ANY, GNILND_DEL_PEER, -ESHUTDOWN);
2519                 kgnilnd_wait_for_canceled_dgrams(dev);
2520         }
2521
2522         /* We need to verify there are no conns left before we let the threads
2523          * shut down otherwise we could clean up the peers but still have
2524          * some outstanding conns due to orphaned datagram conns that are
2525          * being cleaned up.
2526          */
2527         i = 2;
2528         while (atomic_read(&kgnilnd_data.kgn_nconns) != 0) {
2529                 i++;
2530
2531                 for(j = 0; j < kgnilnd_data.kgn_ndevs; ++j) {
2532                         kgn_device_t *dev = &kgnilnd_data.kgn_devices[j];
2533                         kgnilnd_schedule_device(dev);
2534                 }
2535
2536                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2537                         "Waiting for conns to be cleaned up %d\n",atomic_read(&kgnilnd_data.kgn_nconns));
2538                 set_current_state(TASK_UNINTERRUPTIBLE);
2539                 schedule_timeout(cfs_time_seconds(1));
2540         }
2541         /* Peer state all cleaned up BEFORE setting shutdown, so threads don't
2542          * have to worry about shutdown races.  NB connections may be created
2543          * while there are still active connds, but these will be temporary
2544          * since peer creation always fails after the listener has started to
2545          * shut down.
2546          * all peers should have been cleared out on the nets */
2547         LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
2548                 "peers left %d\n", atomic_read(&kgnilnd_data.kgn_npeers));
2549
2550         /* Wait for the ruhroh thread to shut down. */
2551         kgnilnd_data.kgn_ruhroh_shutdown = 1;
2552         wake_up(&kgnilnd_data.kgn_ruhroh_waitq);
2553         i = 2;
2554         while (kgnilnd_data.kgn_ruhroh_running != 0) {
2555                 i++;
2556                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2557                        "Waiting for ruhroh thread to terminate\n");
2558                 set_current_state(TASK_UNINTERRUPTIBLE);
2559                 schedule_timeout(cfs_time_seconds(1));
2560         }
2561
2562        /* Flag threads to terminate */
2563         kgnilnd_data.kgn_shutdown = 1;
2564
2565         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2566                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2567
2568                 /* should clear all the MDDs */
2569                 kgnilnd_unmap_fma_blocks(dev);
2570
2571                 kgnilnd_schedule_device(dev);
2572                 wake_up_all(&dev->gnd_dgram_waitq);
2573                 wake_up_all(&dev->gnd_dgping_waitq);
2574                 LASSERT(list_empty(&dev->gnd_connd_peers));
2575         }
2576
2577         spin_lock(&kgnilnd_data.kgn_reaper_lock);
2578         wake_up_all(&kgnilnd_data.kgn_reaper_waitq);
2579         spin_unlock(&kgnilnd_data.kgn_reaper_lock);
2580
2581         if (atomic_read(&kgnilnd_data.kgn_nthreads))
2582                 kgnilnd_wakeup_rca_thread();
2583
2584         /* Wait for threads to exit */
2585         i = 2;
2586         while (atomic_read(&kgnilnd_data.kgn_nthreads) != 0) {
2587                 i++;
2588                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2589                        "Waiting for %d threads to terminate\n",
2590                        atomic_read(&kgnilnd_data.kgn_nthreads));
2591                 set_current_state(TASK_UNINTERRUPTIBLE);
2592                 schedule_timeout(cfs_time_seconds(1));
2593         }
2594
2595         LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
2596                 "peers left %d\n", atomic_read(&kgnilnd_data.kgn_npeers));
2597
2598         if (kgnilnd_data.kgn_peers != NULL) {
2599                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2600                         LASSERT(list_empty(&kgnilnd_data.kgn_peers[i]));
2601
2602                 LIBCFS_FREE(kgnilnd_data.kgn_peers,
2603                             sizeof (struct list_head) *
2604                             *kgnilnd_tunables.kgn_peer_hash_size);
2605         }
2606
2607         down_write(&kgnilnd_data.kgn_net_rw_sem);
2608         if (kgnilnd_data.kgn_nets != NULL) {
2609                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++)
2610                         LASSERT(list_empty(&kgnilnd_data.kgn_nets[i]));
2611
2612                 LIBCFS_FREE(kgnilnd_data.kgn_nets,
2613                             sizeof (struct list_head) *
2614                             *kgnilnd_tunables.kgn_net_hash_size);
2615         }
2616         up_write(&kgnilnd_data.kgn_net_rw_sem);
2617
2618         LASSERTF(atomic_read(&kgnilnd_data.kgn_nconns) == 0,
2619                 "conns left %d\n", atomic_read(&kgnilnd_data.kgn_nconns));
2620
2621         if (kgnilnd_data.kgn_conns != NULL) {
2622                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2623                         LASSERT(list_empty(&kgnilnd_data.kgn_conns[i]));
2624
2625                 LIBCFS_FREE(kgnilnd_data.kgn_conns,
2626                             sizeof (struct list_head) *
2627                             *kgnilnd_tunables.kgn_peer_hash_size);
2628         }
2629
2630         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2631                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2632                 kgnilnd_dev_fini(dev);
2633
2634                 LASSERTF(atomic_read(&dev->gnd_ndgrams) == 0,
2635                         "dgrams left %d\n", atomic_read(&dev->gnd_ndgrams));
2636
2637                 if (dev->gnd_dgrams != NULL) {
2638                         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2639                                 LASSERT(list_empty(&dev->gnd_dgrams[i]));
2640
2641                         LIBCFS_FREE(dev->gnd_dgrams,
2642                                     sizeof (struct list_head) *
2643                                     *kgnilnd_tunables.kgn_peer_hash_size);
2644                 }
2645
2646                 kgnilnd_free_phys_fmablk(dev);
2647         }
2648
2649         if (kgnilnd_data.kgn_mbox_cache != NULL)
2650                 kmem_cache_destroy(kgnilnd_data.kgn_mbox_cache);
2651
2652         if (kgnilnd_data.kgn_rx_cache != NULL)
2653                 kmem_cache_destroy(kgnilnd_data.kgn_rx_cache);
2654
2655         if (kgnilnd_data.kgn_tx_cache != NULL)
2656                 kmem_cache_destroy(kgnilnd_data.kgn_tx_cache);
2657
2658         if (kgnilnd_data.kgn_tx_phys_cache != NULL)
2659                 kmem_cache_destroy(kgnilnd_data.kgn_tx_phys_cache);
2660
2661         if (kgnilnd_data.kgn_dgram_cache != NULL)
2662                 kmem_cache_destroy(kgnilnd_data.kgn_dgram_cache);
2663
2664         if (kgnilnd_data.kgn_cksum_map_pages != NULL) {
2665                 for (i = 0; i < kgnilnd_data.kgn_cksum_npages; i++) {
2666                         if (kgnilnd_data.kgn_cksum_map_pages[i] != NULL) {
2667                                 kfree(kgnilnd_data.kgn_cksum_map_pages[i]);
2668                         }
2669                 }
2670                 kfree(kgnilnd_data.kgn_cksum_map_pages);
2671         }
2672
2673         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2674                atomic_read(&libcfs_kmemory));
2675
2676         kgnilnd_data.kgn_init = GNILND_INIT_NOTHING;
2677         module_put(THIS_MODULE);
2678
2679         EXIT;
2680 }
2681
2682 int
2683 kgnilnd_startup(struct lnet_ni *ni)
2684 {
2685         int               rc, devno;
2686         kgn_net_t        *net;
2687         ENTRY;
2688
2689         LASSERTF(ni->ni_net->net_lnd == &the_kgnilnd,
2690                 "bad LND 0x%p != the_kgnilnd @ 0x%p\n",
2691                 ni->ni_net->net_lnd, &the_kgnilnd);
2692
2693         if (kgnilnd_data.kgn_init == GNILND_INIT_NOTHING) {
2694                 rc = kgnilnd_base_startup();
2695                 if (rc != 0)
2696                         RETURN(rc);
2697         }
2698
2699         /* Serialize with shutdown. */
2700         mutex_lock(&kgnilnd_data.kgn_quiesce_mutex);
2701
2702         LIBCFS_ALLOC(net, sizeof(*net));
2703         if (net == NULL) {
2704                 CERROR("could not allocate net for new interface instance\n");
2705                 /* no need to cleanup the CDM... */
2706                 GOTO(failed, rc = -ENOMEM);
2707         }
2708         INIT_LIST_HEAD(&net->gnn_list);
2709         ni->ni_data = net;
2710         net->gnn_ni = ni;
2711         if (!ni->ni_net->net_tunables_set) {
2712                 ni->ni_net->net_tunables.lct_max_tx_credits =
2713                         *kgnilnd_tunables.kgn_credits;
2714                 ni->ni_net->net_tunables.lct_peer_tx_credits =
2715                         *kgnilnd_tunables.kgn_peer_credits;
2716         }
2717
2718         if (*kgnilnd_tunables.kgn_peer_health) {
2719                 int     fudge;
2720                 int     timeout;
2721                 /* give this a bit of leeway - we don't have a hard timeout
2722                  * as we only check timeouts periodically - see comment in kgnilnd_reaper */
2723                 fudge = (GNILND_TO2KA(*kgnilnd_tunables.kgn_timeout) / GNILND_REAPER_NCHECKS);
2724                 timeout = *kgnilnd_tunables.kgn_timeout + fudge;
2725
2726                 if (*kgnilnd_tunables.kgn_peer_timeout >= timeout) {
2727                         ni->ni_net->net_tunables.lct_peer_timeout =
2728                                  *kgnilnd_tunables.kgn_peer_timeout;
2729                 } else if (*kgnilnd_tunables.kgn_peer_timeout > -1) {
2730                         LCONSOLE_ERROR("Peer_timeout is set to %d but needs to be >= %d\n",
2731                                         *kgnilnd_tunables.kgn_peer_timeout,
2732                                         timeout);
2733                         ni->ni_data = NULL;
2734                         LIBCFS_FREE(net, sizeof(*net));
2735                         GOTO(failed, rc = -EINVAL);
2736                 } else
2737                         ni->ni_net->net_tunables.lct_peer_timeout = timeout;
2738
2739                 LCONSOLE_INFO("Enabling LNet peer health for gnilnd, timeout %ds\n",
2740                               ni->ni_net->net_tunables.lct_peer_timeout);
2741         }
2742
2743         atomic_set(&net->gnn_refcount, 1);
2744
2745         /* if we have multiple devices, spread the nets around */
2746         net->gnn_netnum = LNET_NETNUM(LNET_NIDNET(ni->ni_nid));
2747
2748         devno = LNET_NIDNET(ni->ni_nid) % GNILND_MAXDEVS;
2749         net->gnn_dev = &kgnilnd_data.kgn_devices[devno];
2750
2751         /* allocate a 'dummy' cdm for datagram use. We can only have a single
2752          * datagram between a nid:inst_id and nid2:inst_id. The fake cdm
2753          * give us additional inst_id to use, allowing the datagrams to flow
2754          * like rivers of honey and beer */
2755
2756         /* the instance id for the cdm is the NETNUM offset by MAXDEVS -
2757          * ensuring we'll have a unique id */
2758
2759
2760         ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), net->gnn_dev->gnd_nid);
2761         CDEBUG(D_NET, "adding net %p nid=%s on dev %d \n",
2762                 net, libcfs_nid2str(ni->ni_nid), net->gnn_dev->gnd_id);
2763         /* until the gnn_list is set, we need to cleanup ourselves as
2764          * kgnilnd_shutdown is just gonna get confused */
2765
2766         down_write(&kgnilnd_data.kgn_net_rw_sem);
2767         list_add_tail(&net->gnn_list, kgnilnd_netnum2netlist(net->gnn_netnum));
2768         up_write(&kgnilnd_data.kgn_net_rw_sem);
2769
2770         /* we need a separate thread to call probe_wait_by_id until
2771          * we get a function callback notifier from kgni */
2772         mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2773         RETURN(0);
2774  failed:
2775         mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2776         kgnilnd_shutdown(ni);
2777         RETURN(rc);
2778 }
2779
2780 void
2781 kgnilnd_shutdown(struct lnet_ni *ni)
2782 {
2783         kgn_net_t     *net = ni->ni_data;
2784         int           i;
2785         int           rc;
2786         ENTRY;
2787
2788         CFS_RACE(CFS_FAIL_GNI_SR_DOWN_RACE);
2789
2790         LASSERTF(kgnilnd_data.kgn_init == GNILND_INIT_ALL,
2791                 "init %d\n", kgnilnd_data.kgn_init);
2792
2793         /* Serialize with startup. */
2794         mutex_lock(&kgnilnd_data.kgn_quiesce_mutex);
2795         CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
2796                atomic_read(&libcfs_kmemory));
2797
2798         if (net == NULL) {
2799                 CERROR("got NULL net for ni %p\n", ni);
2800                 GOTO(out, rc = -EINVAL);
2801         }
2802
2803         LASSERTF(ni == net->gnn_ni,
2804                 "ni %p gnn_ni %p\n", net, net->gnn_ni);
2805
2806         ni->ni_data = NULL;
2807
2808         LASSERT(!net->gnn_shutdown);
2809         LASSERTF(atomic_read(&net->gnn_refcount) != 0,
2810                 "net %p refcount %d\n",
2811                  net, atomic_read(&net->gnn_refcount));
2812
2813         if (!list_empty(&net->gnn_list)) {
2814                 /* serialize with peer creation */
2815                 down_write(&kgnilnd_data.kgn_net_rw_sem);
2816                 net->gnn_shutdown = 1;
2817                 up_write(&kgnilnd_data.kgn_net_rw_sem);
2818
2819                 kgnilnd_cancel_net_dgrams(net);
2820
2821                 kgnilnd_del_conn_or_peer(net, LNET_NID_ANY, GNILND_DEL_PEER, -ESHUTDOWN);
2822
2823                 /* if we are quiesced, need to wake up - we need those threads
2824                  * alive to release peers, etc */
2825                 if (GNILND_IS_QUIESCED) {
2826                         set_mb(kgnilnd_data.kgn_quiesce_trigger, GNILND_QUIESCE_IDLE);
2827                         kgnilnd_quiesce_wait("shutdown");
2828                 }
2829
2830                 kgnilnd_wait_for_canceled_dgrams(net->gnn_dev);
2831
2832                 /* We wait until the nets ref's are 1, we will release final ref which is ours
2833                  * this allows us to make sure everything else is done before we free the
2834                  * net.
2835                  */
2836                 i = 4;
2837                 while (atomic_read(&net->gnn_refcount) != 1) {
2838                         i++;
2839                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2840                                 "Waiting for %d references to clear on net %d\n",
2841                                 atomic_read(&net->gnn_refcount),
2842                                 net->gnn_netnum);
2843                         set_current_state(TASK_UNINTERRUPTIBLE);
2844                         schedule_timeout(cfs_time_seconds(1));
2845                 }
2846
2847                 /* release ref from kgnilnd_startup */
2848                 kgnilnd_net_decref(net);
2849                 /* serialize with reaper and conn_task looping */
2850                 down_write(&kgnilnd_data.kgn_net_rw_sem);
2851                 list_del_init(&net->gnn_list);
2852                 up_write(&kgnilnd_data.kgn_net_rw_sem);
2853
2854         }
2855
2856         /* not locking, this can't race with writers */
2857         LASSERTF(atomic_read(&net->gnn_refcount) == 0,
2858                 "net %p refcount %d\n",
2859                  net, atomic_read(&net->gnn_refcount));
2860         LIBCFS_FREE(net, sizeof(*net));
2861
2862 out:
2863         down_read(&kgnilnd_data.kgn_net_rw_sem);
2864         for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
2865                 if (!list_empty(&kgnilnd_data.kgn_nets[i])) {
2866                         up_read(&kgnilnd_data.kgn_net_rw_sem);
2867                         break;
2868                 }
2869
2870                 if (i == *kgnilnd_tunables.kgn_net_hash_size - 1) {
2871                         up_read(&kgnilnd_data.kgn_net_rw_sem);
2872                         kgnilnd_base_shutdown();
2873                 }
2874         }
2875         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2876                atomic_read(&libcfs_kmemory));
2877
2878         mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
2879         EXIT;
2880 }
2881
2882 static void __exit kgnilnd_exit(void)
2883 {
2884         lnet_unregister_lnd(&the_kgnilnd);
2885         kgnilnd_proc_fini();
2886         kgnilnd_remove_sysctl();
2887 }
2888
2889 static int __init kgnilnd_init(void)
2890 {
2891         int    rc;
2892
2893         rc = kgnilnd_tunables_init();
2894         if (rc != 0)
2895                 return rc;
2896
2897         LCONSOLE_INFO("Lustre: kgnilnd build version: "LUSTRE_VERSION_STRING"\n");
2898
2899         kgnilnd_insert_sysctl();
2900         kgnilnd_proc_init();
2901
2902         lnet_register_lnd(&the_kgnilnd);
2903
2904         return 0;
2905 }
2906
2907 MODULE_AUTHOR("Cray, Inc. <nic@cray.com>");
2908 MODULE_DESCRIPTION("Gemini LNet Network Driver");
2909 MODULE_VERSION(LUSTRE_VERSION_STRING);
2910 MODULE_LICENSE("GPL");
2911
2912 module_init(kgnilnd_init);
2913 module_exit(kgnilnd_exit);