Whamcloud - gitweb
fcc05fa0e33048e54e36f806a1bd3e230140b72c
[fs/lustre-release.git] / lnet / klnds / gnilnd / gnilnd.c
1 /*
2  * Copyright (C) 2012 Cray, Inc.
3  *
4  *   Author: Igor Gorodetsky <iogordet@cray.com>
5  *   Author: Nic Henke <nic@cray.com>
6  *   Author: James Shimek <jshimek@cray.com>
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or
11  *   modify it under the terms of version 2 of the GNU General Public
12  *   License as published by the Free Software Foundation.
13  *
14  *   Lustre is distributed in the hope that it will be useful,
15  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *   GNU General Public License for more details.
18  *
19  *   You should have received a copy of the GNU General Public License
20  *   along with Lustre; if not, write to the Free Software
21  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  *
23  */
24 #include "gnilnd.h"
25
26 /* Primary entry points from LNET.  There are no guarantees against reentrance. */
27 lnd_t the_kgnilnd = {
28         .lnd_type       = GNILND,
29         .lnd_startup    = kgnilnd_startup,
30         .lnd_shutdown   = kgnilnd_shutdown,
31         .lnd_ctl        = kgnilnd_ctl,
32         .lnd_send       = kgnilnd_send,
33         .lnd_recv       = kgnilnd_recv,
34         .lnd_eager_recv = kgnilnd_eager_recv,
35         .lnd_query      = kgnilnd_query,
36 };
37
38 kgn_data_t      kgnilnd_data;
39 kgn_hssops_t    kgnilnd_hssops;
40
41 /* needs write_lock on kgn_peer_conn_lock */
42 int
43 kgnilnd_close_stale_conns_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
44 {
45         kgn_conn_t         *conn;
46         struct list_head   *ctmp, *cnxt;
47         int                 loopback;
48         int                 count = 0;
49
50         loopback = peer->gnp_nid == peer->gnp_net->gnn_ni->ni_nid;
51
52         list_for_each_safe(ctmp, cnxt, &peer->gnp_conns) {
53                 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
54
55                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
56                         continue;
57
58                 if (conn == newconn)
59                         continue;
60
61                 if (conn->gnc_device != newconn->gnc_device)
62                         continue;
63
64                 /* This is a two connection loopback - one talking to the other */
65                 if (loopback &&
66                     newconn->gnc_my_connstamp == conn->gnc_peer_connstamp &&
67                     newconn->gnc_peer_connstamp == conn->gnc_my_connstamp) {
68                         CDEBUG(D_NET, "skipping prune of %p, "
69                                 "loopback and matching stamps"
70                                 " connstamp "LPU64"("LPU64")"
71                                 " peerstamp "LPU64"("LPU64")\n",
72                                 conn, newconn->gnc_my_connstamp,
73                                 conn->gnc_peer_connstamp,
74                                 newconn->gnc_peer_connstamp,
75                                 conn->gnc_my_connstamp);
76                         continue;
77                 }
78
79                 if (conn->gnc_peerstamp != newconn->gnc_peerstamp) {
80                         LASSERTF(conn->gnc_peerstamp < newconn->gnc_peerstamp,
81                                 "conn 0x%p peerstamp "LPU64" >= "
82                                 "newconn 0x%p peerstamp "LPU64"\n",
83                                 conn, conn->gnc_peerstamp,
84                                 newconn, newconn->gnc_peerstamp);
85
86                         CDEBUG(D_NET, "Closing stale conn nid: %s "
87                                " peerstamp:"LPX64"("LPX64")\n",
88                                libcfs_nid2str(peer->gnp_nid),
89                                conn->gnc_peerstamp, newconn->gnc_peerstamp);
90                 } else {
91
92                         LASSERTF(conn->gnc_peer_connstamp < newconn->gnc_peer_connstamp,
93                                 "conn 0x%p peer_connstamp "LPU64" >= "
94                                 "newconn 0x%p peer_connstamp "LPU64"\n",
95                                 conn, conn->gnc_peer_connstamp,
96                                 newconn, newconn->gnc_peer_connstamp);
97
98                         CDEBUG(D_NET, "Closing stale conn nid: %s"
99                                " connstamp:"LPU64"("LPU64")\n",
100                                libcfs_nid2str(peer->gnp_nid),
101                                conn->gnc_peer_connstamp, newconn->gnc_peer_connstamp);
102                 }
103
104                 count++;
105                 kgnilnd_close_conn_locked(conn, -ESTALE);
106         }
107
108         if (count != 0) {
109                 CWARN("Closed %d stale conns to %s\n", count, libcfs_nid2str(peer->gnp_nid));
110         }
111
112         RETURN(count);
113 }
114
115 int
116 kgnilnd_conn_isdup_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
117 {
118         kgn_conn_t       *conn;
119         struct list_head *tmp;
120         int               loopback;
121         ENTRY;
122
123         loopback = peer->gnp_nid == peer->gnp_net->gnn_ni->ni_nid;
124
125         list_for_each(tmp, &peer->gnp_conns) {
126                 conn = list_entry(tmp, kgn_conn_t, gnc_list);
127                 CDEBUG(D_NET, "checking conn 0x%p for peer %s"
128                         " lo %d new "LPU64" existing "LPU64
129                         " new peer "LPU64" existing peer "LPU64
130                         " new dev %p existing dev %p\n",
131                         conn, libcfs_nid2str(peer->gnp_nid),
132                         loopback,
133                         newconn->gnc_peerstamp, conn->gnc_peerstamp,
134                         newconn->gnc_peer_connstamp, conn->gnc_peer_connstamp,
135                         newconn->gnc_device, conn->gnc_device);
136
137                 /* conn is in the process of closing */
138                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
139                         continue;
140
141                 /* 'newconn' is from an earlier version of 'peer'!!! */
142                 if (newconn->gnc_peerstamp < conn->gnc_peerstamp)
143                         RETURN(1);
144
145                 /* 'conn' is from an earlier version of 'peer': it will be
146                  * removed when we cull stale conns later on... */
147                 if (newconn->gnc_peerstamp > conn->gnc_peerstamp)
148                         continue;
149
150                 /* Different devices are OK */
151                 if (conn->gnc_device != newconn->gnc_device)
152                         continue;
153
154                 /* It's me connecting to myself */
155                 if (loopback &&
156                     newconn->gnc_my_connstamp == conn->gnc_peer_connstamp &&
157                     newconn->gnc_peer_connstamp == conn->gnc_my_connstamp)
158                         continue;
159
160                 /* 'newconn' is an earlier connection from 'peer'!!! */
161                 if (newconn->gnc_peer_connstamp < conn->gnc_peer_connstamp)
162                         RETURN(2);
163
164                 /* 'conn' is an earlier connection from 'peer': it will be
165                  * removed when we cull stale conns later on... */
166                 if (newconn->gnc_peer_connstamp > conn->gnc_peer_connstamp)
167                         continue;
168
169                 /* 'newconn' has the SAME connection stamp; 'peer' isn't
170                  * playing the game... */
171                 RETURN(3);
172         }
173
174         RETURN(0);
175 }
176
177 int
178 kgnilnd_create_conn(kgn_conn_t **connp, kgn_device_t *dev)
179 {
180         kgn_conn_t    *conn;
181         gni_return_t   rrc;
182         int            rc = 0;
183
184         LASSERT (!in_interrupt());
185         atomic_inc(&kgnilnd_data.kgn_nconns);
186
187         /* divide by 2 to allow for complete reset and immediate reconnect */
188         if (atomic_read(&kgnilnd_data.kgn_nconns) >= GNILND_MAX_CQID/2) {
189                 CERROR("Too many conn are live: %d > %d\n",
190                         atomic_read(&kgnilnd_data.kgn_nconns), GNILND_MAX_CQID/2);
191                 atomic_dec(&kgnilnd_data.kgn_nconns);
192                 return -E2BIG;
193         }
194
195         LIBCFS_ALLOC(conn, sizeof(*conn));
196         if (conn == NULL) {
197                 atomic_dec(&kgnilnd_data.kgn_nconns);
198                 return -ENOMEM;
199         }
200
201         LIBCFS_ALLOC(conn->gnc_tx_ref_table, GNILND_MAX_MSG_ID * sizeof(void *));
202         if (conn->gnc_tx_ref_table == NULL) {
203                 CERROR("Can't allocate conn tx_ref_table\n");
204                 rc = -ENOMEM;
205                 GOTO(failed, rc);
206         }
207
208         atomic_set(&conn->gnc_refcount, 1);
209         atomic_set(&conn->gnc_reaper_noop, 0);
210         atomic_set(&conn->gnc_sched_noop, 0);
211         INIT_LIST_HEAD(&conn->gnc_list);
212         INIT_LIST_HEAD(&conn->gnc_hashlist);
213         INIT_LIST_HEAD(&conn->gnc_schedlist);
214         INIT_LIST_HEAD(&conn->gnc_fmaq);
215         INIT_LIST_HEAD(&conn->gnc_mdd_list);
216         spin_lock_init(&conn->gnc_list_lock);
217         spin_lock_init(&conn->gnc_tx_lock);
218
219         /* set tx id to nearly the end to make sure we find wrapping
220          * issues soon */
221         conn->gnc_next_tx = (int) GNILND_MAX_MSG_ID - 10;
222
223         /* if this fails, we have conflicts and MAX_TX is too large */
224         CLASSERT(GNILND_MAX_MSG_ID < GNILND_MSGID_CLOSE);
225
226         /* get a new unique CQ id for this conn */
227         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
228         conn->gnc_my_connstamp = kgnilnd_data.kgn_connstamp++;
229         conn->gnc_cqid = kgnilnd_get_cqid_locked();
230         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
231
232         if (conn->gnc_cqid == 0) {
233                 CERROR("Could not allocate unique CQ ID for conn 0x%p\n", conn);
234                 rc = -E2BIG;
235                 GOTO(failed, rc);
236         }
237
238         CDEBUG(D_NET, "alloc cqid %u for conn 0x%p\n",
239                 conn->gnc_cqid, conn);
240
241         /* need to be set before gnc_ephandle to allow kgnilnd_destroy_conn_ep to
242          * check context */
243         conn->gnc_device = dev;
244
245         conn->gnc_timeout = MAX(*kgnilnd_tunables.kgn_timeout,
246                                 GNILND_MIN_TIMEOUT);
247         kgnilnd_update_reaper_timeout(conn->gnc_timeout);
248
249         /* this is the ep_handle for doing SMSG & BTE */
250         mutex_lock(&dev->gnd_cq_mutex);
251         rrc = kgnilnd_ep_create(dev->gnd_handle, dev->gnd_snd_fma_cqh,
252                                 &conn->gnc_ephandle);
253         mutex_unlock(&dev->gnd_cq_mutex);
254         if (rrc != GNI_RC_SUCCESS) {
255                 rc = -ENETDOWN;
256                 GOTO(failed, rc);
257         }
258
259         CDEBUG(D_NET, "created conn 0x%p ep_hndl 0x%p\n",
260                conn, conn->gnc_ephandle);
261
262         /* add ref for EP canceling */
263         kgnilnd_conn_addref(conn);
264         atomic_inc(&dev->gnd_neps);
265
266         *connp = conn;
267         return 0;
268
269 failed:
270         atomic_dec(&kgnilnd_data.kgn_nconns);
271         LIBCFS_FREE(conn->gnc_tx_ref_table, GNILND_MAX_MSG_ID * sizeof(void *));
272         LIBCFS_FREE(conn, sizeof(*conn));
273         return rc;
274 }
275
276 /* needs to be called with kgn_peer_conn_lock held (read or write) */
277 kgn_conn_t *
278 kgnilnd_find_conn_locked(kgn_peer_t *peer)
279 {
280         kgn_conn_t      *conn = NULL;
281         ENTRY;
282
283         /* if we are in reset, this conn is going to die soon */
284         if (unlikely(kgnilnd_data.kgn_in_reset)) {
285                 RETURN(NULL);
286         }
287
288         /* just return the first ESTABLISHED connection */
289         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
290                 /* kgnilnd_finish_connect doesn't put connections on the
291                  * peer list until they are actually established */
292                 LASSERTF(conn->gnc_state >= GNILND_CONN_ESTABLISHED,
293                         "found conn %p state %s on peer %p (%s)\n",
294                         conn, kgnilnd_conn_state2str(conn), peer,
295                         libcfs_nid2str(peer->gnp_nid));
296                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
297                         continue;
298
299                 RETURN(conn);
300         }
301         RETURN(NULL);
302 }
303
304 /* needs write_lock on kgn_peer_conn_lock held */
305 kgn_conn_t *
306 kgnilnd_find_or_create_conn_locked(kgn_peer_t *peer) {
307
308         kgn_device_t    *dev = peer->gnp_net->gnn_dev;
309         kgn_conn_t      *conn;
310
311         conn = kgnilnd_find_conn_locked(peer);
312
313         if (conn != NULL) {
314                 return conn;
315         }
316
317         /* if the peer was previously connecting, check if we should
318          * trigger another connection attempt yet. */
319         if (time_before(jiffies, peer->gnp_reconnect_time)) {
320                 return NULL;
321         }
322
323         /* This check prevents us from creating a new connection to a peer while we are
324          * still in the process of closing an existing connection to the peer.
325          */
326         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
327                 if (conn->gnc_ephandle != NULL) {
328                         CDEBUG(D_NET, "Not connecting non-null ephandle found peer 0x%p->%s\n", peer,
329                                 libcfs_nid2str(peer->gnp_nid));
330                         return NULL;
331                 }
332         }
333
334         if (peer->gnp_connecting != GNILND_PEER_IDLE) {
335                 /* if we are not connecting, fire up a new connection */
336                 /* or if we are anything but IDLE DONT start a new connection */
337                return NULL;
338         }
339
340         CDEBUG(D_NET, "starting connect to %s\n",
341                 libcfs_nid2str(peer->gnp_nid));
342         peer->gnp_connecting = GNILND_PEER_CONNECT;
343         kgnilnd_peer_addref(peer); /* extra ref for connd */
344
345         spin_lock(&dev->gnd_connd_lock);
346         list_add_tail(&peer->gnp_connd_list, &dev->gnd_connd_peers);
347         spin_unlock(&dev->gnd_connd_lock);
348
349         kgnilnd_schedule_dgram(dev);
350         CDEBUG(D_NETTRACE, "scheduling new connect\n");
351
352         return NULL;
353 }
354
355 /* Caller is responsible for deciding if/when to call this */
356 void
357 kgnilnd_destroy_conn_ep(kgn_conn_t *conn)
358 {
359         gni_return_t    rrc;
360         gni_ep_handle_t tmp_ep;
361
362         /* only if we actually initialized it,
363          *  then set NULL to tell kgnilnd_destroy_conn to leave it alone */
364
365         tmp_ep = xchg(&conn->gnc_ephandle, NULL);
366         if (tmp_ep != NULL) {
367                 /* we never re-use the EP, so unbind is not needed */
368                 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
369                 rrc = kgnilnd_ep_destroy(tmp_ep);
370
371                 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
372
373                 /* if this fails, it could hork up kgni smsg retransmit and others
374                  * since we could free the SMSG mbox memory, etc. */
375                 LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d conn 0x%p ep 0x%p\n",
376                          rrc, conn, conn->gnc_ephandle);
377
378                 atomic_dec(&conn->gnc_device->gnd_neps);
379
380                 /* clear out count added in kgnilnd_close_conn_locked
381                  * conn will have a peer once it hits finish_connect, where it
382                  * is the first spot we'll mark it ESTABLISHED as well */
383                 if (conn->gnc_peer) {
384                         kgnilnd_admin_decref(conn->gnc_peer->gnp_dirty_eps);
385                 }
386
387                 /* drop ref for EP */
388                 kgnilnd_conn_decref(conn);
389         }
390 }
391
392 void
393 kgnilnd_destroy_conn(kgn_conn_t *conn)
394 {
395         LASSERTF(!in_interrupt() &&
396                 !conn->gnc_scheduled &&
397                 !conn->gnc_in_purgatory &&
398                 conn->gnc_ephandle == NULL &&
399                 list_empty(&conn->gnc_list) &&
400                 list_empty(&conn->gnc_hashlist) &&
401                 list_empty(&conn->gnc_schedlist) &&
402                 list_empty(&conn->gnc_mdd_list),
403                 "conn 0x%p->%s IRQ %d sched %d purg %d ep 0x%p lists %d/%d/%d/%d\n",
404                 conn, conn->gnc_peer ? libcfs_nid2str(conn->gnc_peer->gnp_nid)
405                                      : "<?>",
406                 !!in_interrupt(), conn->gnc_scheduled,
407                 conn->gnc_in_purgatory,
408                 conn->gnc_ephandle,
409                 list_empty(&conn->gnc_list),
410                 list_empty(&conn->gnc_hashlist),
411                 list_empty(&conn->gnc_schedlist),
412                 list_empty(&conn->gnc_mdd_list));
413
414         /* Tripping these is especially bad, as it means we have items on the
415          *  lists that didn't keep their refcount on the connection - or
416          *  somebody evil released their own */
417         LASSERTF(list_empty(&conn->gnc_fmaq) &&
418                  atomic_read(&conn->gnc_nlive_fma) == 0 &&
419                  atomic_read(&conn->gnc_nlive_rdma) == 0,
420                  "conn 0x%p fmaq %d@0x%p nfma %d nrdma %d\n",
421                  conn, kgnilnd_count_list(&conn->gnc_fmaq), &conn->gnc_fmaq,
422                  atomic_read(&conn->gnc_nlive_fma), atomic_read(&conn->gnc_nlive_rdma));
423
424         CDEBUG(D_NET, "destroying conn %p ephandle %p error %d\n",
425                 conn, conn->gnc_ephandle, conn->gnc_error);
426
427         /* if there is an FMA blk left here, we'll tear it down */
428         if (conn->gnc_fma_blk) {
429                 kgnilnd_release_mbox(conn, 0);
430         }
431
432         if (conn->gnc_peer != NULL)
433                 kgnilnd_peer_decref(conn->gnc_peer);
434
435         if (conn->gnc_tx_ref_table != NULL) {
436                 LIBCFS_FREE(conn->gnc_tx_ref_table,
437                             GNILND_MAX_MSG_ID * sizeof(void *));
438         }
439
440         LIBCFS_FREE(conn, sizeof(*conn));
441         atomic_dec(&kgnilnd_data.kgn_nconns);
442 }
443
444 /* peer_alive and peer_notify done in the style of the o2iblnd */
445 void
446 kgnilnd_peer_alive(kgn_peer_t *peer)
447 {
448         set_mb(peer->gnp_last_alive, jiffies);
449 }
450
451 void
452 kgnilnd_peer_notify(kgn_peer_t *peer, int error)
453 {
454         int                     tell_lnet = 0;
455         int                     nnets = 0;
456         int                     rc;
457         int                     i, j;
458         kgn_conn_t             *conn;
459         kgn_net_t             **nets;
460         kgn_net_t              *net;
461
462
463         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DONT_NOTIFY))
464                 return;
465
466         /* Tell LNet we are giving ups on this peer - but only
467          * if it isn't already reconnected or trying to reconnect */
468         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
469
470         /* use kgnilnd_find_conn_locked to avoid any conns in the process of being nuked
471          *
472          * don't tell LNet if we are in reset - we assume that everyone will be able to
473          * reconnect just fine
474          */
475         conn = kgnilnd_find_conn_locked(peer);
476
477         CDEBUG(D_NETTRACE, "peer 0x%p->%s ting %d conn 0x%p, rst %d error %d\n",
478                peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting, conn,
479                kgnilnd_data.kgn_in_reset, error);
480
481         if ((peer->gnp_connecting == GNILND_PEER_IDLE) &&
482             (conn == NULL) &&
483             (!kgnilnd_data.kgn_in_reset) &&
484             (!kgnilnd_conn_clean_errno(error))) {
485                 tell_lnet = 1;
486         }
487
488         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
489
490         if (!tell_lnet) {
491                 /* short circuit if we dont need to notify Lnet */
492                 return;
493         }
494
495         rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
496
497         if (rc) {
498             /* dont do this if this fails since LNET is in shutdown or something else
499              */
500
501                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
502                         list_for_each_entry(net , &kgnilnd_data.kgn_nets[i], gnn_list) {
503                                 /* if gnn_shutdown set for any net shutdown is in progress just return */
504                                 if (net->gnn_shutdown) {
505                                         up_read(&kgnilnd_data.kgn_net_rw_sem);
506                                         return;
507                                 }
508                                 nnets++;
509                         }
510                 }
511
512                 if (nnets == 0) {
513                         /* shutdown in progress most likely */
514                         up_read(&kgnilnd_data.kgn_net_rw_sem);
515                         return;
516                 }
517
518                 LIBCFS_ALLOC(nets, nnets * sizeof(*nets));
519
520                 if (nets == NULL) {
521                         up_read(&kgnilnd_data.kgn_net_rw_sem);
522                         CERROR("Failed to allocate nets[%d]\n", nnets);
523                         return;
524                 }
525
526                 j = 0;
527                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
528                         list_for_each_entry(net, &kgnilnd_data.kgn_nets[i], gnn_list) {
529                                 nets[j] = net;
530                                 kgnilnd_net_addref(net);
531                                 j++;
532                         }
533                 }
534                 up_read(&kgnilnd_data.kgn_net_rw_sem);
535
536                 for (i = 0; i < nnets; i++) {
537                         lnet_nid_t peer_nid;
538
539                         net = nets[i];
540
541                         peer_nid = kgnilnd_lnd2lnetnid(net->gnn_ni->ni_nid,
542                                                                  peer->gnp_nid);
543
544                         CDEBUG(D_NET, "peer 0x%p->%s last_alive %lu (%lus ago)\n",
545                                 peer, libcfs_nid2str(peer_nid), peer->gnp_last_alive,
546                                 cfs_duration_sec(jiffies - peer->gnp_last_alive));
547
548                         lnet_notify(net->gnn_ni, peer_nid, 0, peer->gnp_last_alive);
549
550
551                         kgnilnd_net_decref(net);
552                 }
553
554                 LIBCFS_FREE(nets, nnets * sizeof(*nets));
555         }
556 }
557
558 /* need write_lock on kgn_peer_conn_lock */
559 void
560 kgnilnd_close_conn_locked(kgn_conn_t *conn, int error)
561 {
562         kgn_peer_t        *peer = conn->gnc_peer;
563         ENTRY;
564
565         LASSERT(!in_interrupt());
566
567         /* store error for tx completion */
568         conn->gnc_error = error;
569         peer->gnp_last_errno = error;
570
571         /* use real error from peer if possible */
572         if (error == -ECONNRESET) {
573                 error = conn->gnc_peer_error;
574         }
575
576         /* if we NETERROR, make sure it is rate limited */
577         if (!kgnilnd_conn_clean_errno(error)) {
578                 CNETERR("closing conn to %s: error %d\n",
579                        libcfs_nid2str(peer->gnp_nid), error);
580         } else {
581                 CDEBUG(D_NET, "closing conn to %s: error %d\n",
582                        libcfs_nid2str(peer->gnp_nid), error);
583         }
584
585         LASSERTF(conn->gnc_state == GNILND_CONN_ESTABLISHED,
586                 "conn %p to %s with bogus state %s\n", conn,
587                 libcfs_nid2str(conn->gnc_peer->gnp_nid),
588                 kgnilnd_conn_state2str(conn));
589         LASSERT(!list_empty(&conn->gnc_hashlist));
590         LASSERT(!list_empty(&conn->gnc_list));
591
592
593         /* mark peer count here so any place the EP gets destroyed will
594          * open up the peer count so that a new ESTABLISHED conn is then free
595          * to send new messages -- sending before the previous EPs are destroyed
596          * could end up with messages on the network for the old conn _after_
597          * the new conn and break the mbox safety protocol */
598         kgnilnd_admin_addref(conn->gnc_peer->gnp_dirty_eps);
599
600         /* Remove from conn hash table: no new callbacks */
601         list_del_init(&conn->gnc_hashlist);
602         kgnilnd_data.kgn_conn_version++;
603
604         /* if we are in reset, go right to CLOSED as there is no scheduler
605          * thread to move from CLOSING to CLOSED */
606         if (unlikely(kgnilnd_data.kgn_in_reset)) {
607                 conn->gnc_state = GNILND_CONN_CLOSED;
608         } else {
609                 conn->gnc_state = GNILND_CONN_CLOSING;
610         }
611
612         /* leave on peer->gnp_conns to make sure we don't let the reaper
613          * or others try to unlink this peer until the conn is fully
614          * processed for closing */
615
616         if (kgnilnd_check_purgatory_conn(conn)) {
617                 kgnilnd_add_purgatory_locked(conn, conn->gnc_peer);
618         }
619
620         /* Reset RX timeout to ensure we wait for an incoming CLOSE
621          * for the full timeout.  If we get a CLOSE we know the
622          * peer has stopped all RDMA.  Otherwise if we wait for
623          * the full timeout we can also be sure all RDMA has stopped. */
624         conn->gnc_last_rx = conn->gnc_last_rx_cq = jiffies;
625         mb();
626
627         /* schedule sending CLOSE - if we are in quiesce, this adds to
628          * gnd_ready_conns and allows us to find it in quiesce processing */
629         kgnilnd_schedule_conn(conn);
630
631         /* lose peer's ref */
632         kgnilnd_conn_decref(conn);
633         /* -1 for conn table */
634         kgnilnd_conn_decref(conn);
635
636         EXIT;
637 }
638
639 void
640 kgnilnd_close_conn(kgn_conn_t *conn, int error)
641 {
642         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
643         /* need to check the state here - this call is racy and we don't
644          * know the state until after the lock is grabbed */
645         if (conn->gnc_state == GNILND_CONN_ESTABLISHED) {
646                 kgnilnd_close_conn_locked(conn, error);
647         }
648         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
649 }
650
651 void
652 kgnilnd_complete_closed_conn(kgn_conn_t *conn)
653 {
654         LIST_HEAD               (sinners);
655         kgn_tx_t               *tx, *txn;
656         int                     nlive = 0;
657         int                     nrdma = 0;
658         int                     nq_rdma = 0;
659         int                     logmsg;
660         ENTRY;
661
662         /* Dump log  on cksum error - wait until complete phase to let
663          * RX of error happen */
664         if (*kgnilnd_tunables.kgn_checksum_dump &&
665             (conn != NULL && conn->gnc_peer_error == -ENOKEY)) {
666                 libcfs_debug_dumplog();
667         }
668
669         /* _CLOSED set in kgnilnd_process_fmaq once we decide to
670          * send the CLOSE or not */
671         LASSERTF(conn->gnc_state == GNILND_CONN_CLOSED,
672                  "conn 0x%p->%s with bad state %s\n",
673                  conn, conn->gnc_peer ?
674                         libcfs_nid2str(conn->gnc_peer->gnp_nid) :
675                         "<?>",
676                  kgnilnd_conn_state2str(conn));
677
678         LASSERT(list_empty(&conn->gnc_hashlist));
679
680         /* we've sent the close, start nuking */
681
682         /* we don't use lists to track things that we can get out of the
683          * tx_ref table... */
684
685         /* need to hold locks for tx_list_state, sampling it is too racy:
686          * - the lock actually protects tx != NULL, but we can't take the proper
687          *   lock until we check tx_list_state, which would be too late and
688          *   we could have the TX change under us.
689          * gnd_rdmaq_lock and gnd_lock and not used together, so taking both
690          * should be fine */
691         spin_lock(&conn->gnc_device->gnd_rdmaq_lock);
692         spin_lock(&conn->gnc_device->gnd_lock);
693
694         for (nrdma = 0; nrdma < GNILND_MAX_MSG_ID; nrdma++) {
695                 tx = conn->gnc_tx_ref_table[nrdma];
696
697                 if (tx != NULL) {
698                         /* only print the first error and if not CLOSE, we often don't see
699                          * CQ events for that by the time we get here... and really don't care */
700                         if (nlive || tx->tx_msg.gnm_type == GNILND_MSG_CLOSE)
701                                 tx->tx_state |= GNILND_TX_QUIET_ERROR;
702                         nlive++;
703                         GNIDBG_TX(D_NET, tx, "cleaning up on close, nlive %d", nlive);
704
705                         /* don't worry about gnc_lock here as nobody else should be
706                          * touching this conn */
707                         kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
708                         list_add_tail(&tx->tx_list, &sinners);
709                 }
710         }
711         spin_unlock(&conn->gnc_device->gnd_lock);
712         spin_unlock(&conn->gnc_device->gnd_rdmaq_lock);
713
714         /* nobody should have marked this as needing scheduling after
715          * we called close - so only ref should be us handling it */
716         LASSERTF(conn->gnc_scheduled == GNILND_CONN_PROCESS,
717                  "conn 0x%p scheduled %d\n", conn, conn->gnc_scheduled);
718
719         /* now reset a few to actual counters... */
720         nrdma = atomic_read(&conn->gnc_nlive_rdma);
721         nq_rdma = atomic_read(&conn->gnc_nq_rdma);
722
723         if (!list_empty(&sinners)) {
724                 list_for_each_entry_safe(tx, txn, &sinners, tx_list) {
725                         /* clear tx_list to make tx_add_list_locked happy */
726                         list_del_init(&tx->tx_list);
727                         /* The error codes determine if we hold onto the MDD */
728                         kgnilnd_tx_done(tx, conn->gnc_error);
729                 }
730         }
731
732         logmsg = (nlive + nrdma + nq_rdma);
733
734         if (logmsg) {
735                 if (conn->gnc_peer_error != 0) {
736                         CNETERR("Closed conn 0x%p->%s (errno %d, peer errno %d): "
737                                 "canceled %d TX, %d/%d RDMA\n",
738                                 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
739                                 conn->gnc_error, conn->gnc_peer_error,
740                                 nlive, nq_rdma, nrdma);
741                 } else {
742                         CNETERR("Closed conn 0x%p->%s (errno %d): "
743                                 "canceled %d TX, %d/%d RDMA\n",
744                                 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
745                                 conn->gnc_error,
746                                 nlive, nq_rdma, nrdma);
747                 }
748         }
749
750         kgnilnd_destroy_conn_ep(conn);
751
752         /* Bug 765042 - race this with completing a new conn to same peer - we need
753          * finish_connect to detach purgatory before we can do it ourselves here */
754         CFS_RACE(CFS_FAIL_GNI_FINISH_PURG);
755
756         /* now it is safe to remove from peer list - anyone looking at
757          * gnp_conns now is free to unlink if not on purgatory */
758         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
759
760         conn->gnc_state = GNILND_CONN_DONE;
761
762         /* Decrement counter if we are marked by del_conn_or_peers for closing
763          */
764         if (conn->gnc_needs_closing)
765                 kgnilnd_admin_decref(kgnilnd_data.kgn_npending_conns);
766
767         /* Remove from peer's list of valid connections if its not in purgatory */
768         if (!conn->gnc_in_purgatory) {
769                 list_del_init(&conn->gnc_list);
770         }
771
772         /* NB - only unlinking if we set pending in del_peer_locked from admin or
773          * shutdown */
774         if (kgnilnd_peer_active(conn->gnc_peer) &&
775             conn->gnc_peer->gnp_pending_unlink &&
776             kgnilnd_can_unlink_peer_locked(conn->gnc_peer)) {
777                 kgnilnd_unlink_peer_locked(conn->gnc_peer);
778         }
779
780         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
781
782         /* I'm telling Mommy! - use peer_error if they initiated close */
783         kgnilnd_peer_notify(conn->gnc_peer,
784                             conn->gnc_error == -ECONNRESET ? conn->gnc_peer_error
785                                                            : conn->gnc_error);
786
787         EXIT;
788 }
789
790 int
791 kgnilnd_set_conn_params(kgn_dgram_t *dgram)
792 {
793         kgn_conn_t             *conn = dgram->gndg_conn;
794         kgn_connreq_t          *connreq = &dgram->gndg_conn_in;
795         kgn_gniparams_t        *rem_param = &connreq->gncr_gnparams;
796         gni_return_t            rrc;
797         int                     rc = 0;
798
799         /* set timeout vals in conn early so we can use them for the NAK */
800
801         /* use max of the requested and our timeout, peer will do the same */
802         conn->gnc_timeout = MAX(conn->gnc_timeout, connreq->gncr_timeout);
803
804         /* only ep_bind really mucks around with the CQ */
805         /* only ep bind if we are not connecting to ourself and the dstnid is not a wildcard. this check
806          * is necessary as you can only bind an ep once and we must make sure we dont bind when already bound.
807          */
808         if (connreq->gncr_dstnid != LNET_NID_ANY && dgram->gndg_conn_out.gncr_dstnid != connreq->gncr_srcnid) {
809                 mutex_lock(&conn->gnc_device->gnd_cq_mutex);
810                 rrc = kgnilnd_ep_bind(conn->gnc_ephandle,
811                         connreq->gncr_gnparams.gnpr_host_id,
812                         conn->gnc_cqid);
813                 mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
814                 if (rrc != GNI_RC_SUCCESS) {
815                         rc = -ECONNABORTED;
816                         goto return_out;
817                 }
818         }
819
820         rrc = kgnilnd_ep_set_eventdata(conn->gnc_ephandle, conn->gnc_cqid,
821                          connreq->gncr_gnparams.gnpr_cqid);
822         if (rrc != GNI_RC_SUCCESS) {
823                 rc = -ECONNABORTED;
824                 goto cleanup_out;
825         }
826
827         /* Initialize SMSG */
828         rrc = kgnilnd_smsg_init(conn->gnc_ephandle, &conn->gnpr_smsg_attr,
829                         &connreq->gncr_gnparams.gnpr_smsg_attr);
830         if (unlikely(rrc == GNI_RC_INVALID_PARAM)) {
831                 gni_smsg_attr_t *local = &conn->gnpr_smsg_attr;
832                 gni_smsg_attr_t *remote = &connreq->gncr_gnparams.gnpr_smsg_attr;
833                 /* help folks figure out if there is a tunable off, etc. */
834                 LCONSOLE_ERROR("SMSG attribute mismatch. Data from local/remote:"
835                                " type %d/%d msg_maxsize %u/%u"
836                                " mbox_maxcredit %u/%u. Please check kgni"
837                                " logs for further data\n",
838                                local->msg_type, remote->msg_type,
839                                local->msg_maxsize, remote->msg_maxsize,
840                                local->mbox_maxcredit, remote->mbox_maxcredit);
841         }
842         if (rrc != GNI_RC_SUCCESS) {
843                 rc = -ECONNABORTED;
844                 goto cleanup_out;
845         }
846
847         /* log this for help in debuggin SMSG buffer re-use */
848         CDEBUG(D_NET, "conn %p src %s dst %s smsg %p acquired"
849                 " local cqid %u SMSG %p->%u hndl "LPX64"."LPX64
850                 " remote cqid %u SMSG %p->%u hndl "LPX64"."LPX64"\n",
851                 conn, libcfs_nid2str(connreq->gncr_srcnid),
852                 libcfs_nid2str(connreq->gncr_dstnid),
853                 &conn->gnpr_smsg_attr,
854                 conn->gnc_cqid,
855                 conn->gnpr_smsg_attr.msg_buffer,
856                 conn->gnpr_smsg_attr.mbox_offset,
857                 conn->gnpr_smsg_attr.mem_hndl.qword1,
858                 conn->gnpr_smsg_attr.mem_hndl.qword2,
859                 rem_param->gnpr_cqid,
860                 rem_param->gnpr_smsg_attr.msg_buffer,
861                 rem_param->gnpr_smsg_attr.mbox_offset,
862                 rem_param->gnpr_smsg_attr.mem_hndl.qword1,
863                 rem_param->gnpr_smsg_attr.mem_hndl.qword2);
864
865         conn->gnc_peerstamp = connreq->gncr_peerstamp;
866         conn->gnc_peer_connstamp = connreq->gncr_connstamp;
867
868         /* We update the reaper timeout once we have a valid conn and timeout */
869         kgnilnd_update_reaper_timeout(GNILND_TO2KA(conn->gnc_timeout));
870
871         return 0;
872
873 cleanup_out:
874         rrc = kgnilnd_ep_unbind(conn->gnc_ephandle);
875         /* not sure I can just let this fly */
876         LASSERTF(rrc == GNI_RC_SUCCESS,
877                 "bad rc from gni_ep_unbind trying to cleanup: %d\n", rrc);
878
879 return_out:
880         LASSERTF(rc != 0, "SOFTWARE BUG: rc == 0\n");
881         CERROR("Error setting connection params from %s: %d\n",
882                libcfs_nid2str(connreq->gncr_srcnid), rc);
883         return rc;
884 }
885
886 /* needs down_read on kgn_net_rw_sem held from before this call until
887  * after the write_lock on kgn_peer_conn_lock - this ensures we stay sane
888  * with kgnilnd_shutdown - it'll get the sem and set shutdown, then get the
889  * kgn_peer_conn_lock to start del_peer'ing. If we hold the sem until after
890  * kgn_peer_conn_lock is held, we guarantee that nobody calls
891  * kgnilnd_add_peer_locked without checking gnn_shutdown */
892 int
893 kgnilnd_create_peer_safe(kgn_peer_t **peerp, lnet_nid_t nid, kgn_net_t *net)
894 {
895         kgn_peer_t    *peer;
896         int            rc;
897
898         LASSERT(nid != LNET_NID_ANY);
899
900         /* We dont pass the net around in the dgram anymore so here is where we find it
901          * this will work unless its in shutdown or the nid has a net that is invalid.
902          * Either way error code needs to be returned in that case.
903          *
904          * If the net passed in is not NULL then we can use it, this alleviates looking it
905          * when the calling function has access to the data.
906          */
907         if (net == NULL) {
908                 rc = kgnilnd_find_net(nid, &net);
909                 if (rc < 0)
910                         return rc;
911         } else {
912                 /* find net adds a reference on the net if we are not using
913                  * it we must do it manually so the net references are
914                  * correct when tearing down the net
915                  */
916                 kgnilnd_net_addref(net);
917         }
918
919         LIBCFS_ALLOC(peer, sizeof(*peer));
920         if (peer == NULL) {
921                 kgnilnd_net_decref(net);
922                 return -ENOMEM;
923         }
924         peer->gnp_nid = nid;
925
926         /* translate from nid to nic addr & store */
927         rc = kgnilnd_nid_to_nicaddrs(LNET_NIDADDR(nid), 1, &peer->gnp_host_id);
928         if (rc <= 0) {
929                 kgnilnd_net_decref(net);
930                 LIBCFS_FREE(peer, sizeof(*peer));
931                 return -ESRCH;
932         }
933         CDEBUG(D_NET, "peer 0x%p->%s -> NIC 0x%x\n", peer,
934                 libcfs_nid2str(nid), peer->gnp_host_id);
935
936         atomic_set(&peer->gnp_refcount, 1);     /* 1 ref for caller */
937         atomic_set(&peer->gnp_dirty_eps, 0);
938
939         INIT_LIST_HEAD(&peer->gnp_list);
940         INIT_LIST_HEAD(&peer->gnp_connd_list);
941         INIT_LIST_HEAD(&peer->gnp_conns);
942         INIT_LIST_HEAD(&peer->gnp_tx_queue);
943
944         /* the first reconnect should happen immediately, so we leave
945          * gnp_reconnect_interval set to 0 */
946
947         LASSERTF(net != NULL, "peer 0x%p->%s with NULL net\n",
948                  peer, libcfs_nid2str(nid));
949
950         /* must have kgn_net_rw_sem held for this...  */
951         if (net->gnn_shutdown) {
952                 /* shutdown has started already */
953                 kgnilnd_net_decref(net);
954                 LIBCFS_FREE(peer, sizeof(*peer));
955                 return -ESHUTDOWN;
956         }
957
958         peer->gnp_net = net;
959
960         atomic_inc(&kgnilnd_data.kgn_npeers);
961
962         *peerp = peer;
963         return 0;
964 }
965
966 void
967 kgnilnd_destroy_peer(kgn_peer_t *peer)
968 {
969         CDEBUG(D_NET, "peer %s %p deleted\n",
970                libcfs_nid2str(peer->gnp_nid), peer);
971         LASSERTF(atomic_read(&peer->gnp_refcount) == 0,
972                  "peer 0x%p->%s refs %d\n",
973                  peer, libcfs_nid2str(peer->gnp_nid),
974                  atomic_read(&peer->gnp_refcount));
975         LASSERTF(atomic_read(&peer->gnp_dirty_eps) == 0,
976                  "peer 0x%p->%s dirty eps %d\n",
977                  peer, libcfs_nid2str(peer->gnp_nid),
978                  atomic_read(&peer->gnp_dirty_eps));
979         LASSERTF(peer->gnp_net != NULL, "peer %p (%s) with NULL net\n",
980                  peer, libcfs_nid2str(peer->gnp_nid));
981         LASSERTF(!kgnilnd_peer_active(peer),
982                  "peer 0x%p->%s\n",
983                 peer, libcfs_nid2str(peer->gnp_nid));
984         LASSERTF(peer->gnp_connecting == GNILND_PEER_IDLE || peer->gnp_connecting == GNILND_PEER_KILL,
985                  "peer 0x%p->%s, connecting %d\n",
986                 peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
987         LASSERTF(list_empty(&peer->gnp_conns),
988                  "peer 0x%p->%s\n",
989                 peer, libcfs_nid2str(peer->gnp_nid));
990         LASSERTF(list_empty(&peer->gnp_tx_queue),
991                  "peer 0x%p->%s\n",
992                 peer, libcfs_nid2str(peer->gnp_nid));
993         LASSERTF(list_empty(&peer->gnp_connd_list),
994                  "peer 0x%p->%s\n",
995                 peer, libcfs_nid2str(peer->gnp_nid));
996
997         /* NB a peer's connections keep a reference on their peer until
998          * they are destroyed, so we can be assured that _all_ state to do
999          * with this peer has been cleaned up when its refcount drops to
1000          * zero. */
1001
1002         atomic_dec(&kgnilnd_data.kgn_npeers);
1003         kgnilnd_net_decref(peer->gnp_net);
1004
1005         LIBCFS_FREE(peer, sizeof(*peer));
1006 }
1007
1008 /* the conn might not have made it all the way through to a connected
1009  * state - but we need to purgatory any conn that a remote peer might
1010  * have seen through a posted dgram as well */
1011 void
1012 kgnilnd_add_purgatory_locked(kgn_conn_t *conn, kgn_peer_t *peer)
1013 {
1014         kgn_mbox_info_t *mbox = NULL;
1015         ENTRY;
1016
1017         /* NB - the caller should own conn by removing him from the
1018          * scheduler thread when finishing the close */
1019
1020         LASSERTF(peer != NULL, "conn %p with NULL peer\n", conn);
1021
1022         /* If this is still true, need to add the calls to unlink back in and
1023          * figure out how to close the hole on loopback conns */
1024         LASSERTF(kgnilnd_peer_active(peer), "can't use inactive peer %s (%p)"
1025                 " we'll never recover the resources\n",
1026                 libcfs_nid2str(peer->gnp_nid), peer);
1027
1028         CDEBUG(D_NET, "conn %p peer %p dev %p\n", conn, peer,
1029                 conn->gnc_device);
1030
1031         /* add ref for mbox purgatory hold */
1032         kgnilnd_peer_addref(peer);
1033         kgnilnd_conn_addref(conn);
1034         conn->gnc_in_purgatory = 1;
1035
1036         mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
1037         mbox->mbx_prev_nid = peer->gnp_nid;
1038         mbox->mbx_add_purgatory = jiffies;
1039         kgnilnd_release_mbox(conn, 1);
1040
1041         LASSERTF(list_empty(&conn->gnc_mdd_list),
1042                 "conn 0x%p->%s with active purgatory hold MDD %d\n",
1043                 conn, libcfs_nid2str(peer->gnp_nid),
1044                 kgnilnd_count_list(&conn->gnc_mdd_list));
1045
1046         EXIT;
1047 }
1048
1049 /* Instead of detaching everything from purgatory here we just mark the conn as needing
1050  * detach, when the reaper checks the conn the next time it will detach it.
1051  * Calling function requires write_lock held on kgn_peer_conn_lock
1052  */
1053 void
1054 kgnilnd_mark_for_detach_purgatory_all_locked(kgn_peer_t *peer) {
1055         kgn_conn_t       *conn;
1056
1057         list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
1058                 if (conn->gnc_in_purgatory && !conn->gnc_needs_detach) {
1059                         conn->gnc_needs_detach = 1;
1060                         kgnilnd_admin_addref(kgnilnd_data.kgn_npending_detach);
1061                 }
1062         }
1063 }
1064
1065 /* Calling function needs a write_lock held on kgn_peer_conn_lock */
1066 void
1067 kgnilnd_detach_purgatory_locked(kgn_conn_t *conn, struct list_head *conn_list)
1068 {
1069         kgn_mbox_info_t *mbox = NULL;
1070
1071         /* if needed, add the conn purgatory data to the list passed in */
1072         if (conn->gnc_in_purgatory) {
1073                 CDEBUG(D_NET, "peer %p->%s purg_conn %p@%s mdd_list #tx %d\n",
1074                         conn->gnc_peer, libcfs_nid2str(conn->gnc_peer->gnp_nid),
1075                         conn, kgnilnd_conn_state2str(conn),
1076                         kgnilnd_count_list(&conn->gnc_mdd_list));
1077
1078                 mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
1079                 mbox->mbx_detach_of_purgatory = jiffies;
1080
1081                 /* conn->gnc_list is the entry point on peer->gnp_conns, so detaching it
1082                  * here removes it from the list of 'valid' peer connections.
1083                  * We put the current conn onto a list of conns to call kgnilnd_release_purgatory_locked()
1084                  * and as such the caller of kgnilnd_detach_purgatory_locked() now owns that conn, since its not
1085                  * on the peer's conn_list anymore.
1086                  */
1087
1088                 kgnilnd_peer_decref(conn->gnc_peer);
1089                 list_del_init(&conn->gnc_list);
1090
1091                 /* NB - only unlinking if we set pending in del_peer_locked from admin or
1092                  * shutdown */
1093                 if (kgnilnd_peer_active(conn->gnc_peer) &&
1094                     conn->gnc_peer->gnp_pending_unlink &&
1095                     kgnilnd_can_unlink_peer_locked(conn->gnc_peer)) {
1096                         kgnilnd_unlink_peer_locked(conn->gnc_peer);
1097                 }
1098                 /* The reaper will not call detach unless the conn is fully through kgnilnd_complete_closed_conn.
1099                  * If the conn is not in a DONE state somehow we are attempting to detach even though
1100                  * the conn has not been fully cleaned up. If we detach while the conn is still closing
1101                  * we will end up with an orphaned connection that has valid ep_handle, that is not on a
1102                  * peer.
1103                  */
1104
1105                 LASSERTF(conn->gnc_state == GNILND_CONN_DONE, "Conn in invalid state  %p@%s \n",
1106                                 conn, kgnilnd_conn_state2str(conn));
1107
1108                 /* move from peer to the delayed release list */
1109                 list_add_tail(&conn->gnc_list, conn_list);
1110         }
1111 }
1112
1113 void
1114 kgnilnd_release_purgatory_list(struct list_head *conn_list)
1115 {
1116         kgn_device_t            *dev;
1117         kgn_conn_t              *conn, *connN;
1118         kgn_mdd_purgatory_t     *gmp, *gmpN;
1119
1120         list_for_each_entry_safe(conn, connN, conn_list, gnc_list) {
1121                 dev = conn->gnc_device;
1122
1123                 kgnilnd_release_mbox(conn, -1);
1124                 conn->gnc_in_purgatory = 0;
1125
1126                 list_del_init(&conn->gnc_list);
1127
1128                 /* gnc_needs_detach is set in kgnilnd_del_conn_or_peer. It is used to keep track
1129                  * of conns that have been marked for detach by kgnilnd_del_conn_or_peer.
1130                  * The function uses kgn_npending_detach to verify the conn has
1131                  * actually been detached.
1132                  */
1133
1134                 if (conn->gnc_needs_detach)
1135                         kgnilnd_admin_decref(kgnilnd_data.kgn_npending_detach);
1136
1137                 /* if this guy is really dead (we are doing release from reaper),
1138                  * make sure we tell LNet - if this is from other context,
1139                  * the checks in the function will prevent an errant
1140                  * notification */
1141                 kgnilnd_peer_notify(conn->gnc_peer, conn->gnc_error);
1142
1143                 list_for_each_entry_safe(gmp, gmpN, &conn->gnc_mdd_list,
1144                                          gmp_list) {
1145                         CDEBUG(D_NET,
1146                                "dev %p releasing held mdd "LPX64"."LPX64"\n",
1147                                conn->gnc_device, gmp->gmp_map_key.qword1,
1148                                gmp->gmp_map_key.qword2);
1149
1150                         atomic_dec(&dev->gnd_n_mdd_held);
1151                         kgnilnd_mem_mdd_release(conn->gnc_device->gnd_handle,
1152                                                 &gmp->gmp_map_key);
1153                         /* ignoring the return code - if kgni/ghal can't find it
1154                          * it must be released already */
1155
1156                         list_del_init(&gmp->gmp_list);
1157                         LIBCFS_FREE(gmp, sizeof(*gmp));
1158                 }
1159                 /* lose conn ref for purgatory */
1160                 kgnilnd_conn_decref(conn);
1161         }
1162 }
1163
1164 /* needs write_lock on kgnilnd_data.kgn_peer_conn_lock held */
1165 void
1166 kgnilnd_peer_increase_reconnect_locked(kgn_peer_t *peer)
1167 {
1168         int current_to;
1169
1170         current_to = peer->gnp_reconnect_interval;
1171
1172         /* we'll try to reconnect fast the first time, then back-off */
1173         if (current_to == 0) {
1174                 peer->gnp_reconnect_time = jiffies - 1;
1175                 current_to = *kgnilnd_tunables.kgn_min_reconnect_interval;
1176         } else {
1177                 peer->gnp_reconnect_time = jiffies + cfs_time_seconds(current_to);
1178                 /* add 50% of min timeout & retry */
1179                 current_to += *kgnilnd_tunables.kgn_min_reconnect_interval / 2;
1180         }
1181
1182         current_to = MIN(current_to,
1183                                 *kgnilnd_tunables.kgn_max_reconnect_interval);
1184
1185         peer->gnp_reconnect_interval = current_to;
1186         CDEBUG(D_NET, "peer %s can reconnect at %lu interval %lu\n",
1187                libcfs_nid2str(peer->gnp_nid), peer->gnp_reconnect_time,
1188                peer->gnp_reconnect_interval);
1189 }
1190
1191 /* needs kgnilnd_data.kgn_peer_conn_lock held */
1192 kgn_peer_t *
1193 kgnilnd_find_peer_locked(lnet_nid_t nid)
1194 {
1195         struct list_head *peer_list = kgnilnd_nid2peerlist(nid);
1196         kgn_peer_t       *peer;
1197
1198         /* Chopping nid down to only NIDADDR using LNET_NIDADDR so we only
1199          * have a single peer per device instead of a peer per nid/net combo.
1200          */
1201
1202         list_for_each_entry(peer, peer_list, gnp_list) {
1203                 if (LNET_NIDADDR(nid) != LNET_NIDADDR(peer->gnp_nid))
1204                         continue;
1205
1206                 CDEBUG(D_NET, "got peer [%p] -> %s c %d (%d)\n",
1207                        peer, libcfs_nid2str(nid),
1208                        peer->gnp_connecting,
1209                        atomic_read(&peer->gnp_refcount));
1210                 return peer;
1211         }
1212         return NULL;
1213 }
1214
1215 /* need write_lock on kgn_peer_conn_lock */
1216 void
1217 kgnilnd_unlink_peer_locked(kgn_peer_t *peer)
1218 {
1219         LASSERTF(list_empty(&peer->gnp_conns),
1220                 "peer 0x%p->%s\n",
1221                  peer, libcfs_nid2str(peer->gnp_nid));
1222         LASSERTF(list_empty(&peer->gnp_tx_queue),
1223                 "peer 0x%p->%s\n",
1224                  peer, libcfs_nid2str(peer->gnp_nid));
1225         LASSERTF(kgnilnd_peer_active(peer),
1226                 "peer 0x%p->%s\n",
1227                  peer, libcfs_nid2str(peer->gnp_nid));
1228         CDEBUG(D_NET, "unlinking peer 0x%p->%s\n",
1229                 peer, libcfs_nid2str(peer->gnp_nid));
1230
1231         list_del_init(&peer->gnp_list);
1232         kgnilnd_data.kgn_peer_version++;
1233         kgnilnd_admin_decref(kgnilnd_data.kgn_npending_unlink);
1234         /* lose peerlist's ref */
1235         kgnilnd_peer_decref(peer);
1236 }
1237
1238 int
1239 kgnilnd_get_peer_info(int index,
1240                       kgn_peer_t **found_peer,
1241                       lnet_nid_t *id, __u32 *nic_addr,
1242                       int *refcount, int *connecting)
1243 {
1244         struct list_head  *ptmp;
1245         kgn_peer_t        *peer;
1246         int               i;
1247         int               rc = -ENOENT;
1248
1249         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1250
1251         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
1252
1253                 list_for_each(ptmp, &kgnilnd_data.kgn_peers[i]) {
1254                         peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1255
1256                         if (peer->gnp_nid != *id)
1257                                 continue;
1258
1259                         if (index-- > 0)
1260                                 continue;
1261
1262                         CDEBUG(D_NET, "found peer %p (%s) at index %d\n",
1263                                peer, libcfs_nid2str(peer->gnp_nid), index);
1264
1265                         *found_peer  = peer;
1266                         *id          = peer->gnp_nid;
1267                         *nic_addr    = peer->gnp_host_id;
1268                         *refcount    = atomic_read(&peer->gnp_refcount);
1269                         *connecting  = peer->gnp_connecting;
1270
1271                         rc = 0;
1272                         goto out;
1273                 }
1274         }
1275 out:
1276         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1277         if (rc)
1278                 CDEBUG(D_NET, "no gni peer at index %d\n", index);
1279         return rc;
1280 }
1281
1282 /* requires write_lock on kgn_peer_conn_lock held */
1283 void
1284 kgnilnd_add_peer_locked(lnet_nid_t nid, kgn_peer_t *new_stub_peer, kgn_peer_t **peerp)
1285 {
1286         kgn_peer_t        *peer, *peer2;
1287
1288         LASSERTF(new_stub_peer != NULL, "bad stub peer for nid %s\n",
1289                  libcfs_nid2str(nid));
1290
1291         peer2 = kgnilnd_find_peer_locked(nid);
1292         if (peer2 != NULL) {
1293                 /* A peer was created during the lock transition, so drop
1294                  * the new one we created */
1295                 kgnilnd_peer_decref(new_stub_peer);
1296                 peer = peer2;
1297         } else {
1298                 peer = new_stub_peer;
1299                 /* peer table takes existing ref on peer */
1300
1301                 LASSERTF(!kgnilnd_peer_active(peer),
1302                         "peer 0x%p->%s already in peer table\n",
1303                         peer, libcfs_nid2str(peer->gnp_nid));
1304                 list_add_tail(&peer->gnp_list,
1305                               kgnilnd_nid2peerlist(nid));
1306                 kgnilnd_data.kgn_peer_version++;
1307         }
1308
1309         LASSERTF(peer->gnp_net != NULL, "peer 0x%p->%s with NULL net\n",
1310                  peer, libcfs_nid2str(peer->gnp_nid));
1311         *peerp = peer;
1312 }
1313
1314 int
1315 kgnilnd_add_peer(kgn_net_t *net, lnet_nid_t nid, kgn_peer_t **peerp)
1316 {
1317         kgn_peer_t        *peer;
1318         int                rc;
1319         ENTRY;
1320
1321         if (nid == LNET_NID_ANY)
1322                 return -EINVAL;
1323
1324         /* NB - this will not block during normal operations -
1325          * the only writer of this is in the startup/shutdown path. */
1326         rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
1327         if (!rc) {
1328                 rc = -ESHUTDOWN;
1329                 RETURN(rc);
1330         }
1331         rc = kgnilnd_create_peer_safe(&peer, nid, net);
1332         if (rc != 0) {
1333                 up_read(&kgnilnd_data.kgn_net_rw_sem);
1334                 RETURN(rc);
1335         }
1336
1337         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1338         up_read(&kgnilnd_data.kgn_net_rw_sem);
1339
1340         kgnilnd_add_peer_locked(nid, peer, peerp);
1341
1342         CDEBUG(D_NET, "peer 0x%p->%s connecting %d\n",
1343                peerp, libcfs_nid2str((*peerp)->gnp_nid),
1344                (*peerp)->gnp_connecting);
1345
1346         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1347         RETURN(0);
1348 }
1349
1350 /* needs write_lock on kgn_peer_conn_lock */
1351 void
1352 kgnilnd_cancel_peer_connect_locked(kgn_peer_t *peer, struct list_head *zombies)
1353 {
1354         kgn_tx_t        *tx, *txn;
1355
1356         /* we do care about state of gnp_connecting - we could be between
1357          * reconnect attempts, so try to find the dgram and cancel the TX
1358          * anyways. If we are in the process of posting DONT do anything;
1359          * once it fails or succeeds we can nuke the connect attempt.
1360          * We have no idea where in kgnilnd_post_dgram we are so we cant
1361          * attempt to cancel until the function is done.
1362          */
1363
1364         /* make sure peer isn't in process of connecting or waiting for connect*/
1365         spin_lock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1366         if (!(list_empty(&peer->gnp_connd_list))) {
1367                 list_del_init(&peer->gnp_connd_list);
1368                 /* remove connd ref */
1369                 kgnilnd_peer_decref(peer);
1370         }
1371         spin_unlock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1372
1373         if (peer->gnp_connecting == GNILND_PEER_POSTING || peer->gnp_connecting == GNILND_PEER_NEEDS_DEATH) {
1374                 peer->gnp_connecting = GNILND_PEER_NEEDS_DEATH;
1375                 /* We are in process of posting right now the xchg set it up for us to
1376                  * cancel the connect so we are finished for now */
1377         } else {
1378                 /* no need for exchange we have the peer lock and its ready for us to nuke */
1379                 LASSERTF(peer->gnp_connecting != GNILND_PEER_POSTING,
1380                         "Peer in invalid state 0x%p->%s, connecting %d\n",
1381                         peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
1382                 peer->gnp_connecting = GNILND_PEER_IDLE;
1383                 set_mb(peer->gnp_last_dgram_errno, -ETIMEDOUT);
1384                 kgnilnd_find_and_cancel_dgram(peer->gnp_net->gnn_dev,
1385                                                       peer->gnp_nid);
1386         }
1387
1388         /* The least we can do is nuke the tx's no matter what.... */
1389         list_for_each_entry_safe(tx, txn, &peer->gnp_tx_queue, tx_list) {
1390                 kgnilnd_tx_del_state_locked(tx, peer, NULL,
1391                                            GNILND_TX_ALLOCD);
1392                 list_add_tail(&tx->tx_list, zombies);
1393         }
1394 }
1395
1396 /* needs write_lock on kgn_peer_conn_lock */
1397 void
1398 kgnilnd_del_peer_locked(kgn_peer_t *peer, int error)
1399 {
1400         /* this peer could be passive and only held for purgatory,
1401          * take a ref to ensure it doesn't disappear in this function */
1402         kgnilnd_peer_addref(peer);
1403
1404         CFS_RACE(CFS_FAIL_GNI_FIND_TARGET);
1405
1406         /* if purgatory release cleared it out, don't try again */
1407         if (kgnilnd_peer_active(peer)) {
1408                 /* always do this to allow kgnilnd_start_connect and
1409                  * kgnilnd_finish_connect to catch this before they
1410                  * wrap up their operations */
1411                 if (kgnilnd_can_unlink_peer_locked(peer)) {
1412                         /* already released purgatory, so only active
1413                          * conns hold it */
1414                         kgnilnd_unlink_peer_locked(peer);
1415                 } else {
1416                         kgnilnd_close_peer_conns_locked(peer, error);
1417                         /* peer unlinks itself when last conn is closed */
1418                 }
1419         }
1420
1421         /* we are done, release back to the wild */
1422         kgnilnd_peer_decref(peer);
1423 }
1424
1425 int
1426 kgnilnd_del_conn_or_peer(kgn_net_t *net, lnet_nid_t nid, int command,
1427                           int error)
1428 {
1429         LIST_HEAD               (souls);
1430         LIST_HEAD               (zombies);
1431         struct list_head        *ptmp, *pnxt;
1432         kgn_peer_t              *peer;
1433         int                     lo;
1434         int                     hi;
1435         int                     i;
1436         int                     rc = -ENOENT;
1437
1438         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1439
1440         if (nid != LNET_NID_ANY)
1441                 lo = hi = kgnilnd_nid2peerlist(nid) - kgnilnd_data.kgn_peers;
1442         else {
1443                 lo = 0;
1444                 hi = *kgnilnd_tunables.kgn_peer_hash_size - 1;
1445                 /* wildcards always succeed */
1446                 rc = 0;
1447         }
1448
1449         for (i = lo; i <= hi; i++) {
1450                 list_for_each_safe(ptmp, pnxt, &kgnilnd_data.kgn_peers[i]) {
1451                         peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1452
1453                         LASSERTF(peer->gnp_net != NULL,
1454                                 "peer %p (%s) with NULL net\n",
1455                                  peer, libcfs_nid2str(peer->gnp_nid));
1456
1457                         if (net != NULL && peer->gnp_net != net)
1458                                 continue;
1459
1460                         if (!(nid == LNET_NID_ANY || LNET_NIDADDR(peer->gnp_nid) == LNET_NIDADDR(nid)))
1461                                 continue;
1462
1463                         /* In both cases, we want to stop any in-flight
1464                          * connect attempts */
1465                         kgnilnd_cancel_peer_connect_locked(peer, &zombies);
1466
1467                         switch (command) {
1468                         case GNILND_DEL_CONN:
1469                                 kgnilnd_close_peer_conns_locked(peer, error);
1470                                 break;
1471                         case GNILND_DEL_PEER:
1472                                 peer->gnp_pending_unlink = 1;
1473                                 kgnilnd_admin_addref(kgnilnd_data.kgn_npending_unlink);
1474                                 kgnilnd_mark_for_detach_purgatory_all_locked(peer);
1475                                 kgnilnd_del_peer_locked(peer, error);
1476                                 break;
1477                         case GNILND_CLEAR_PURGATORY:
1478                                 /* Mark everything ready for detach reaper will cleanup
1479                                  * once we release the kgn_peer_conn_lock
1480                                  */
1481                                 kgnilnd_mark_for_detach_purgatory_all_locked(peer);
1482                                 peer->gnp_last_errno = -EISCONN;
1483                                 /* clear reconnect so he can reconnect soon */
1484                                 peer->gnp_reconnect_time = 0;
1485                                 peer->gnp_reconnect_interval = 0;
1486                                 break;
1487                         default:
1488                                 CERROR("bad command %d\n", command);
1489                                 LBUG();
1490                         }
1491                         /* we matched something */
1492                         rc = 0;
1493                 }
1494         }
1495
1496         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1497
1498         /* release all of the souls found held in purgatory */
1499         kgnilnd_release_purgatory_list(&souls);
1500
1501         /* nuke peer TX */
1502         kgnilnd_txlist_done(&zombies, error);
1503
1504         /* This function does not return until the commands it initiated have completed,
1505          * since they have to work there way through the other threads. In the case of shutdown
1506          * threads are not woken up until after this call is initiated so we cannot wait, we just
1507          * need to return. The same applies for stack reset we shouldnt wait as the reset thread
1508          * handles closing.
1509          */
1510
1511         CFS_RACE(CFS_FAIL_GNI_RACE_RESET);
1512
1513         if (error == -ENOTRECOVERABLE || error == -ESHUTDOWN) {
1514                 return rc;
1515         }
1516
1517         i = 4;
1518         while (atomic_read(&kgnilnd_data.kgn_npending_conns)   ||
1519                atomic_read(&kgnilnd_data.kgn_npending_detach)  ||
1520                atomic_read(&kgnilnd_data.kgn_npending_unlink)) {
1521
1522                 cfs_pause(cfs_time_seconds(1));
1523                 i++;
1524
1525                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, "Waiting on %d peers %d closes %d detaches\n",
1526                                 atomic_read(&kgnilnd_data.kgn_npending_unlink),
1527                                 atomic_read(&kgnilnd_data.kgn_npending_conns),
1528                                 atomic_read(&kgnilnd_data.kgn_npending_detach));
1529         }
1530
1531         return rc;
1532 }
1533
1534 kgn_conn_t *
1535 kgnilnd_get_conn_by_idx(int index)
1536 {
1537         kgn_peer_t        *peer;
1538         struct list_head  *ptmp;
1539         kgn_conn_t        *conn;
1540         struct list_head  *ctmp;
1541         int                i;
1542
1543
1544         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
1545                 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1546                 list_for_each(ptmp, &kgnilnd_data.kgn_peers[i]) {
1547
1548                         peer = list_entry(ptmp, kgn_peer_t, gnp_list);
1549
1550                         list_for_each(ctmp, &peer->gnp_conns) {
1551                                 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
1552
1553                                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
1554                                         continue;
1555
1556                                 if (index-- > 0)
1557                                         continue;
1558
1559                                 CDEBUG(D_NET, "++conn[%p] -> %s (%d)\n", conn,
1560                                        libcfs_nid2str(conn->gnc_peer->gnp_nid),
1561                                        atomic_read(&conn->gnc_refcount));
1562                                 kgnilnd_conn_addref(conn);
1563                                 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1564                                 return conn;
1565                         }
1566                 }
1567                 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1568         }
1569
1570         return NULL;
1571 }
1572
1573 int
1574 kgnilnd_get_conn_info(kgn_peer_t *peer,
1575                       int *device_id, __u64 *peerstamp,
1576                       int *tx_seq, int *rx_seq,
1577                       int *fmaq_len, int *nfma, int *nrdma)
1578 {
1579         kgn_conn_t        *conn;
1580         int               rc = 0;
1581
1582         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1583
1584         conn = kgnilnd_find_conn_locked(peer);
1585         if (conn == NULL) {
1586                 rc = -ENOENT;
1587                 goto out;
1588         }
1589
1590         *device_id = conn->gnc_device->gnd_host_id;
1591         *peerstamp = conn->gnc_peerstamp;
1592         *tx_seq = conn->gnc_tx_seq;
1593         *rx_seq = conn->gnc_rx_seq;
1594         *fmaq_len = kgnilnd_count_list(&conn->gnc_fmaq);
1595         *nfma = atomic_read(&conn->gnc_nlive_fma);
1596         *nrdma = atomic_read(&conn->gnc_nlive_rdma);
1597 out:
1598         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1599         return rc;
1600 }
1601
1602 /* needs write_lock on kgn_peer_conn_lock */
1603 int
1604 kgnilnd_close_peer_conns_locked(kgn_peer_t *peer, int why)
1605 {
1606         kgn_conn_t         *conn;
1607         struct list_head   *ctmp, *cnxt;
1608         int                 count = 0;
1609
1610         list_for_each_safe(ctmp, cnxt, &peer->gnp_conns) {
1611                 conn = list_entry(ctmp, kgn_conn_t, gnc_list);
1612
1613                 if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
1614                         continue;
1615
1616                 count++;
1617                 /* we mark gnc_needs closing and increment kgn_npending_conns so that
1618                  * kgnilnd_del_conn_or_peer can wait on the other threads closing
1619                  * and cleaning up the connection.
1620                  */
1621                 if (!conn->gnc_needs_closing) {
1622                         conn->gnc_needs_closing = 1;
1623                         kgnilnd_admin_addref(kgnilnd_data.kgn_npending_conns);
1624                 }
1625                 kgnilnd_close_conn_locked(conn, why);
1626         }
1627         return count;
1628 }
1629
1630 int
1631 kgnilnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
1632 {
1633         struct libcfs_ioctl_data *data = arg;
1634         kgn_net_t                *net = ni->ni_data;
1635         int                       rc = -EINVAL;
1636
1637         LASSERT(ni == net->gnn_ni);
1638
1639         switch (cmd) {
1640         case IOC_LIBCFS_GET_PEER: {
1641                 lnet_nid_t   nid = 0;
1642                 kgn_peer_t  *peer = NULL;
1643                 __u32 nic_addr = 0;
1644                 __u64 peerstamp = 0;
1645                 int peer_refcount = 0, peer_connecting = 0;
1646                 int device_id = 0;
1647                 int tx_seq = 0, rx_seq = 0;
1648                 int fmaq_len = 0, nfma = 0, nrdma = 0;
1649
1650                 rc = kgnilnd_get_peer_info(data->ioc_count, &peer,
1651                                            &nid, &nic_addr, &peer_refcount,
1652                                            &peer_connecting);
1653                 if (rc)
1654                         break;
1655
1656                 /* Barf */
1657                 /* LNET_MKNID is used to mask from lnet the multiplexing/demultiplexing of connections and peers
1658                  * LNET assumes a conn and peer per net, the LNET_MKNID/LNET_NIDADDR allows us to let Lnet see what it
1659                  * wants to see instead of the underlying network that is being used to send the data
1660                  */
1661                 data->ioc_nid    = LNET_MKNID(LNET_NIDNET(ni->ni_nid), LNET_NIDADDR(nid));
1662                 data->ioc_flags  = peer_connecting;
1663                 data->ioc_count  = peer_refcount;
1664
1665                 rc = kgnilnd_get_conn_info(peer, &device_id, &peerstamp,
1666                                            &tx_seq, &rx_seq, &fmaq_len,
1667                                            &nfma, &nrdma);
1668
1669                 /* This is allowable - a persistent peer could not
1670                  * have a connection */
1671                 if (rc) {
1672                         /* flag to indicate we are not connected -
1673                          * need to print as such */
1674                         data->ioc_flags |= (1<<16);
1675                         rc = 0;
1676                 } else {
1677                         /* still barf */
1678                         data->ioc_net = device_id;
1679                         data->ioc_u64[0] = peerstamp;
1680                         data->ioc_u32[0] = fmaq_len;
1681                         data->ioc_u32[1] = nfma;
1682                         data->ioc_u32[2] = tx_seq;
1683                         data->ioc_u32[3] = rx_seq;
1684                         data->ioc_u32[4] = nrdma;
1685                 }
1686                 break;
1687         }
1688         case IOC_LIBCFS_ADD_PEER: {
1689                 /* just dummy value to allow using common interface */
1690                 kgn_peer_t      *peer;
1691                 rc = kgnilnd_add_peer(net, data->ioc_nid, &peer);
1692                 break;
1693         }
1694         case IOC_LIBCFS_DEL_PEER: {
1695                 /* NULL is passed in so it affects all peers in existence without regard to network
1696                  * as the peer may not exist on the network LNET believes it to be on.
1697                  */
1698                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1699                                               GNILND_DEL_PEER, -EUCLEAN);
1700                 break;
1701         }
1702         case IOC_LIBCFS_GET_CONN: {
1703                 kgn_conn_t *conn = kgnilnd_get_conn_by_idx(data->ioc_count);
1704
1705                 if (conn == NULL)
1706                         rc = -ENOENT;
1707                 else {
1708                         rc = 0;
1709                         /* LNET_MKNID is used to build the correct address based on what LNET wants to see instead of
1710                          * the generic connection that is used to send the data
1711                          */
1712                         data->ioc_nid    = LNET_MKNID(LNET_NIDNET(ni->ni_nid), LNET_NIDADDR(conn->gnc_peer->gnp_nid));
1713                         data->ioc_u32[0] = conn->gnc_device->gnd_id;
1714                         kgnilnd_conn_decref(conn);
1715                 }
1716                 break;
1717         }
1718         case IOC_LIBCFS_CLOSE_CONNECTION: {
1719                 /* use error = -ENETRESET to indicate it was lctl disconnect */
1720                 /* NULL is passed in so it affects all the nets as the connection is virtual
1721                  * and may not exist on the network LNET believes it to be on.
1722                  */
1723                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1724                                               GNILND_DEL_CONN, -ENETRESET);
1725                 break;
1726         }
1727         case IOC_LIBCFS_PUSH_CONNECTION: {
1728                 /* we use this to flush purgatory */
1729                 rc = kgnilnd_del_conn_or_peer(NULL, data->ioc_nid,
1730                                               GNILND_CLEAR_PURGATORY, -EUCLEAN);
1731                 break;
1732         }
1733         case IOC_LIBCFS_REGISTER_MYNID: {
1734                 /* Ignore if this is a noop */
1735                 if (data->ioc_nid == ni->ni_nid) {
1736                         rc = 0;
1737                 } else {
1738                         CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
1739                                libcfs_nid2str(data->ioc_nid),
1740                                libcfs_nid2str(ni->ni_nid));
1741                         rc = -EINVAL;
1742                 }
1743                 break;
1744         }
1745         }
1746
1747         return rc;
1748 }
1749
1750 void
1751 kgnilnd_query(lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
1752 {
1753         kgn_net_t               *net = ni->ni_data;
1754         kgn_tx_t                *tx;
1755         kgn_peer_t              *peer = NULL;
1756         kgn_conn_t              *conn = NULL;
1757         lnet_process_id_t       id = {.nid = nid, .pid = LUSTRE_SRV_LNET_PID};
1758         ENTRY;
1759
1760         /* I expect to find him, so only take a read lock */
1761         read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1762         peer = kgnilnd_find_peer_locked(nid);
1763         if (peer != NULL) {
1764                 /* LIE if in a quiesce - we will update the timeouts after,
1765                  * but we don't want sends failing during it */
1766                 if (kgnilnd_data.kgn_quiesce_trigger) {
1767                         *when = jiffies;
1768                         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1769                         GOTO(out, 0);
1770                 }
1771
1772                 /* Update to best guess, might refine on later checks */
1773                 *when = peer->gnp_last_alive;
1774
1775                 /* we have a peer, how about a conn? */
1776                 conn = kgnilnd_find_conn_locked(peer);
1777
1778                 if (conn == NULL)  {
1779                         /* if there is no conn, check peer last errno to see if clean disconnect
1780                          * - if it was, we lie to LNet because we believe a TX would complete
1781                          * on reconnect */
1782                         if (kgnilnd_conn_clean_errno(peer->gnp_last_errno)) {
1783                                 *when = jiffies;
1784                         }
1785                         /* we still want to fire a TX and new conn in this case */
1786                 } else {
1787                         /* gnp_last_alive is valid, run for the hills */
1788                         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1789                         GOTO(out, 0);
1790                 }
1791         }
1792         /* if we get here, either we have no peer or no conn for him, so fire off
1793          * new TX to trigger conn setup */
1794         read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1795
1796         /* if we couldn't find him, we'll fire up a TX and get connected -
1797          * if we don't do this, after ni_peer_timeout, LNet will declare him dead.
1798          * So really we treat kgnilnd_query as a bit of a 'connect now' type
1799          * event because it'll only do this when it wants to send
1800          *
1801          * Use a real TX for this to get the proper gnp_tx_queue behavior, etc
1802          * normally we'd use kgnilnd_send_ctlmsg for this, but we don't really
1803          * care that this goes out quickly since we already know we need a new conn
1804          * formed */
1805         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_NOOP_SEND))
1806                 return;
1807
1808         tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP, ni->ni_nid);
1809         if (tx != NULL) {
1810                 kgnilnd_launch_tx(tx, net, &id);
1811         }
1812 out:
1813         CDEBUG(D_NETTRACE, "peer 0x%p->%s when %lu\n", peer,
1814                libcfs_nid2str(nid), *when);
1815         EXIT;
1816 }
1817
1818 int
1819 kgnilnd_dev_init(kgn_device_t *dev)
1820 {
1821         gni_return_t      rrc;
1822         int               rc = 0;
1823         unsigned int      cq_size;
1824         ENTRY;
1825
1826         /* size of these CQs should be able to accommodate the outgoing
1827          * RDMA and SMSG transactions.  Since we really don't know what we
1828          * really need here, we'll take credits * 2 * 3 to allow a bunch.
1829          * We need to dig into this more with the performance work. */
1830         cq_size = *kgnilnd_tunables.kgn_credits * 2 * 3;
1831
1832         rrc = kgnilnd_cdm_create(dev->gnd_id, *kgnilnd_tunables.kgn_ptag,
1833                                  GNILND_COOKIE, 0,
1834                                  &dev->gnd_domain);
1835         if (rrc != GNI_RC_SUCCESS) {
1836                 CERROR("Can't create CDM %d (%d)\n", dev->gnd_id, rrc);
1837                 rc = -ENODEV;
1838                 GOTO(failed, rc);
1839         }
1840
1841         rrc = kgnilnd_cdm_attach(dev->gnd_domain, dev->gnd_id,
1842                                  &dev->gnd_host_id, &dev->gnd_handle);
1843         if (rrc != GNI_RC_SUCCESS) {
1844                 CERROR("Can't attach CDM to device %d (%d)\n",
1845                         dev->gnd_id, rrc);
1846                 rc = -ENODEV;
1847                 GOTO(failed, rc);
1848         }
1849
1850         rc = kgnilnd_setup_nic_translation(dev->gnd_host_id);
1851         if (rc != 0) {
1852                 rc = -ENODEV;
1853                 GOTO(failed, rc);
1854         }
1855
1856         /* only dev 0 gets the errors - no need to reset the stack twice
1857          * - this works because we have a single PTAG, if we had more
1858          * then we'd need to have multiple handlers */
1859         if (dev->gnd_id == 0) {
1860                 rrc = kgnilnd_subscribe_errors(dev->gnd_handle, GNI_ERRMASK_CRITICAL,
1861                                               0, NULL, kgnilnd_critical_error,
1862                                               &dev->gnd_err_handle);
1863                 if (rrc != GNI_RC_SUCCESS) {
1864                         CERROR("Can't subscribe for errors on device %d: rc %d\n",
1865                                 dev->gnd_id, rrc);
1866                         rc = -ENODEV;
1867                         GOTO(failed, rc);
1868                 }
1869
1870                 rc = kgnilnd_set_quiesce_callback(dev->gnd_handle,
1871                                                   kgnilnd_quiesce_end_callback);
1872                 if (rc != GNI_RC_SUCCESS) {
1873                         CERROR("Can't subscribe for quiesce callback on device %d: rc %d\n",
1874                                 dev->gnd_id, rrc);
1875                         rc = -ENODEV;
1876                         GOTO(failed, rc);
1877                 }
1878         }
1879
1880         rc = kgnilnd_nicaddr_to_nid(dev->gnd_host_id, &dev->gnd_nid);
1881         if (rc < 0) {
1882                 /* log messages during startup */
1883                 if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
1884                         CERROR("couldn't translate host_id 0x%x to nid. rc %d\n",
1885                                 dev->gnd_host_id, rc);
1886                 }
1887                 rc = -ESRCH;
1888                 GOTO(failed, rc);
1889         }
1890         CDEBUG(D_NET, "NIC %x -> NID %d\n", dev->gnd_host_id, dev->gnd_nid);
1891
1892         rrc = kgnilnd_cq_create(dev->gnd_handle, cq_size,
1893                                 0, kgnilnd_device_callback,
1894                                 dev->gnd_id, &dev->gnd_snd_rdma_cqh);
1895         if (rrc != GNI_RC_SUCCESS) {
1896                 CERROR("Can't create rdma send cq size %u for device "
1897                        "%d (%d)\n", cq_size, dev->gnd_id, rrc);
1898                 rc = -EINVAL;
1899                 GOTO(failed, rc);
1900         }
1901
1902         rrc = kgnilnd_cq_create(dev->gnd_handle, cq_size,
1903                         0, kgnilnd_device_callback, dev->gnd_id,
1904                         &dev->gnd_snd_fma_cqh);
1905         if (rrc != GNI_RC_SUCCESS) {
1906                 CERROR("Can't create fma send cq size %u for device %d (%d)\n",
1907                        cq_size, dev->gnd_id, rrc);
1908                 rc = -EINVAL;
1909                 GOTO(failed, rc);
1910         }
1911
1912         /* This one we size differently - overflows are possible and it needs to be
1913          * sized based on machine size */
1914         rrc = kgnilnd_cq_create(dev->gnd_handle,
1915                         *kgnilnd_tunables.kgn_fma_cq_size,
1916                         0, kgnilnd_device_callback, dev->gnd_id,
1917                         &dev->gnd_rcv_fma_cqh);
1918         if (rrc != GNI_RC_SUCCESS) {
1919                 CERROR("Can't create fma cq size %d for device %d (%d)\n",
1920                        *kgnilnd_tunables.kgn_fma_cq_size, dev->gnd_id, rrc);
1921                 rc = -EINVAL;
1922                 GOTO(failed, rc);
1923         }
1924
1925         RETURN(0);
1926
1927 failed:
1928         kgnilnd_dev_fini(dev);
1929         RETURN(rc);
1930 }
1931
1932 void
1933 kgnilnd_dev_fini(kgn_device_t *dev)
1934 {
1935         gni_return_t rrc;
1936         ENTRY;
1937
1938         /* At quiesce or rest time, need to loop through and clear gnd_ready_conns ?*/
1939         LASSERTF(list_empty(&dev->gnd_ready_conns) &&
1940                  list_empty(&dev->gnd_map_tx) &&
1941                  list_empty(&dev->gnd_rdmaq),
1942                  "dev 0x%p ready_conns %d@0x%p map_tx %d@0x%p rdmaq %d@0x%p\n",
1943                  dev, kgnilnd_count_list(&dev->gnd_ready_conns), &dev->gnd_ready_conns,
1944                  kgnilnd_count_list(&dev->gnd_map_tx), &dev->gnd_map_tx,
1945                  kgnilnd_count_list(&dev->gnd_rdmaq), &dev->gnd_rdmaq);
1946
1947         /* These should follow from tearing down all connections */
1948         LASSERTF(dev->gnd_map_nphys == 0 && dev->gnd_map_physnop == 0,
1949                 "%d physical mappings of %d pages still mapped\n",
1950                  dev->gnd_map_nphys, dev->gnd_map_physnop);
1951
1952         LASSERTF(dev->gnd_map_nvirt == 0 && dev->gnd_map_virtnob == 0,
1953                 "%d virtual mappings of "LPU64" bytes still mapped\n",
1954                  dev->gnd_map_nvirt, dev->gnd_map_virtnob);
1955
1956         LASSERTF(atomic_read(&dev->gnd_n_mdd) == 0 &&
1957                  atomic_read(&dev->gnd_n_mdd_held) == 0 &&
1958                  atomic64_read(&dev->gnd_nbytes_map) == 0,
1959                 "%d SMSG mappings of %ld bytes still mapped or held %d\n",
1960                  atomic_read(&dev->gnd_n_mdd),
1961                  atomic64_read(&dev->gnd_nbytes_map), atomic_read(&dev->gnd_n_mdd_held));
1962
1963         LASSERT(list_empty(&dev->gnd_map_list));
1964
1965         /* What other assertions needed to ensure all connections torn down ? */
1966
1967         /* check all counters == 0 (EP, MDD, etc) */
1968
1969         /* if we are resetting due to quiese (stack reset), don't check
1970          * thread states */
1971         LASSERTF(kgnilnd_data.kgn_quiesce_trigger ||
1972                 atomic_read(&kgnilnd_data.kgn_nthreads) == 0,
1973                 "tried to shutdown with threads active\n");
1974
1975         if (dev->gnd_rcv_fma_cqh) {
1976                 rrc = kgnilnd_cq_destroy(dev->gnd_rcv_fma_cqh);
1977                 LASSERTF(rrc == GNI_RC_SUCCESS,
1978                         "bad rc from gni_cq_destroy on rcv_fma_cqh: %d\n", rrc);
1979                 dev->gnd_rcv_fma_cqh = NULL;
1980         }
1981
1982         if (dev->gnd_snd_rdma_cqh) {
1983                 rrc = kgnilnd_cq_destroy(dev->gnd_snd_rdma_cqh);
1984                 LASSERTF(rrc == GNI_RC_SUCCESS,
1985                         "bad rc from gni_cq_destroy on send_rdma_cqh: %d\n", rrc);
1986                 dev->gnd_snd_rdma_cqh = NULL;
1987         }
1988
1989         if (dev->gnd_snd_fma_cqh) {
1990                 rrc = kgnilnd_cq_destroy(dev->gnd_snd_fma_cqh);
1991                 LASSERTF(rrc == GNI_RC_SUCCESS,
1992                         "bad rc from gni_cq_destroy on snd_fma_cqh: %d\n", rrc);
1993                 dev->gnd_snd_fma_cqh = NULL;
1994         }
1995
1996         if (dev->gnd_err_handle) {
1997                 rrc = kgnilnd_release_errors(dev->gnd_err_handle);
1998                 LASSERTF(rrc == GNI_RC_SUCCESS,
1999                         "bad rc from gni_release_errors: %d\n", rrc);
2000                 dev->gnd_err_handle = NULL;
2001         }
2002
2003         if (dev->gnd_domain) {
2004                 rrc = kgnilnd_cdm_destroy(dev->gnd_domain);
2005                 LASSERTF(rrc == GNI_RC_SUCCESS,
2006                         "bad rc from gni_cdm_destroy: %d\n", rrc);
2007                 dev->gnd_domain = NULL;
2008         }
2009
2010         EXIT;
2011 }
2012
2013
2014 int kgnilnd_base_startup(void)
2015 {
2016         struct timeval       tv;
2017         int                  pkmem = atomic_read(&libcfs_kmemory);
2018         int                  rc;
2019         int                  i;
2020         kgn_device_t        *dev;
2021         struct task_struct  *thrd;
2022         ENTRY;
2023
2024         LASSERTF(kgnilnd_data.kgn_init == GNILND_INIT_NOTHING,
2025                 "init %d\n", kgnilnd_data.kgn_init);
2026
2027         /* zero pointers, flags etc */
2028         memset(&kgnilnd_data, 0, sizeof(kgnilnd_data));
2029         memset(&kgnilnd_hssops, 0, sizeof(kgnilnd_hssops));
2030
2031         /* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and
2032          * a unique (for all time) connstamp so we can uniquely identify
2033          * the sender.  The connstamp is an incrementing counter
2034          * initialised with seconds + microseconds at startup time.  So we
2035          * rely on NOT creating connections more frequently on average than
2036          * 1MHz to ensure we don't use old connstamps when we reboot. */
2037         do_gettimeofday(&tv);
2038         kgnilnd_data.kgn_connstamp =
2039                  kgnilnd_data.kgn_peerstamp =
2040                         (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
2041
2042         init_rwsem(&kgnilnd_data.kgn_net_rw_sem);
2043
2044         for (i = 0; i < GNILND_MAXDEVS; i++) {
2045                 kgn_device_t  *dev = &kgnilnd_data.kgn_devices[i];
2046
2047                 dev->gnd_id = i;
2048                 INIT_LIST_HEAD(&dev->gnd_ready_conns);
2049                 INIT_LIST_HEAD(&dev->gnd_map_tx);
2050                 INIT_LIST_HEAD(&dev->gnd_fma_buffs);
2051                 mutex_init(&dev->gnd_cq_mutex);
2052                 sema_init(&dev->gnd_fmablk_sem, 1);
2053                 spin_lock_init(&dev->gnd_fmablk_lock);
2054                 init_waitqueue_head(&dev->gnd_waitq);
2055                 init_waitqueue_head(&dev->gnd_dgram_waitq);
2056                 init_waitqueue_head(&dev->gnd_dgping_waitq);
2057                 spin_lock_init(&dev->gnd_lock);
2058                 INIT_LIST_HEAD(&dev->gnd_map_list);
2059                 spin_lock_init(&dev->gnd_map_lock);
2060                 atomic_set(&dev->gnd_nfmablk, 0);
2061                 atomic_set(&dev->gnd_fmablk_vers, 1);
2062                 atomic_set(&dev->gnd_neps, 0);
2063                 atomic_set(&dev->gnd_canceled_dgrams, 0);
2064                 INIT_LIST_HEAD(&dev->gnd_connd_peers);
2065                 spin_lock_init(&dev->gnd_connd_lock);
2066                 spin_lock_init(&dev->gnd_dgram_lock);
2067                 spin_lock_init(&dev->gnd_rdmaq_lock);
2068                 INIT_LIST_HEAD(&dev->gnd_rdmaq);
2069
2070                 /* alloc & setup nid based dgram table */
2071                 LIBCFS_ALLOC(dev->gnd_dgrams,
2072                             sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2073
2074                 if (dev->gnd_dgrams == NULL) {
2075                         rc = -ENOMEM;
2076                         GOTO(failed, rc);
2077                 }
2078
2079                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2080                         INIT_LIST_HEAD(&dev->gnd_dgrams[i]);
2081                 }
2082                 atomic_set(&dev->gnd_ndgrams, 0);
2083
2084                 /* setup timer for RDMAQ processing */
2085                 setup_timer(&dev->gnd_rdmaq_timer, kgnilnd_schedule_device_timer,
2086                             (unsigned long)dev);
2087         }
2088
2089         /* CQID 0 isn't allowed, set to MAX_MSG_ID - 1 to check for conflicts early */
2090         kgnilnd_data.kgn_next_cqid = GNILND_MAX_MSG_ID - 1;
2091         kgnilnd_data.kgn_new_min_timeout = *kgnilnd_tunables.kgn_timeout;
2092         init_waitqueue_head(&kgnilnd_data.kgn_reaper_waitq);
2093         init_waitqueue_head(&kgnilnd_data.kgn_ruhroh_waitq);
2094         spin_lock_init(&kgnilnd_data.kgn_reaper_lock);
2095
2096         sema_init(&kgnilnd_data.kgn_quiesce_sem, 1);
2097         atomic_set(&kgnilnd_data.kgn_nquiesce, 0);
2098         atomic_set(&kgnilnd_data.kgn_npending_conns, 0);
2099         atomic_set(&kgnilnd_data.kgn_npending_unlink, 0);
2100         atomic_set(&kgnilnd_data.kgn_npending_detach, 0);
2101         /* OK to call kgnilnd_api_shutdown() to cleanup now */
2102         kgnilnd_data.kgn_init = GNILND_INIT_DATA;
2103         PORTAL_MODULE_USE;
2104
2105         rwlock_init(&kgnilnd_data.kgn_peer_conn_lock);
2106
2107         LIBCFS_ALLOC(kgnilnd_data.kgn_peers,
2108                     sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2109
2110         if (kgnilnd_data.kgn_peers == NULL) {
2111                 rc = -ENOMEM;
2112                 GOTO(failed, rc);
2113         }
2114
2115         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2116                 INIT_LIST_HEAD(&kgnilnd_data.kgn_peers[i]);
2117         }
2118
2119         LIBCFS_ALLOC(kgnilnd_data.kgn_conns,
2120                     sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
2121
2122         if (kgnilnd_data.kgn_conns == NULL) {
2123                 rc = -ENOMEM;
2124                 GOTO(failed, rc);
2125         }
2126
2127         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
2128                 INIT_LIST_HEAD(&kgnilnd_data.kgn_conns[i]);
2129         }
2130
2131         LIBCFS_ALLOC(kgnilnd_data.kgn_nets,
2132                     sizeof(struct list_head) * *kgnilnd_tunables.kgn_net_hash_size);
2133
2134         if (kgnilnd_data.kgn_nets == NULL) {
2135                 rc = -ENOMEM;
2136                 GOTO(failed, rc);
2137         }
2138
2139         for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
2140                 INIT_LIST_HEAD(&kgnilnd_data.kgn_nets[i]);
2141         }
2142
2143         kgnilnd_data.kgn_mbox_cache =
2144                 cfs_mem_cache_create("kgn_mbox_block",
2145                                      KMALLOC_MAX_SIZE,
2146                                      0,    /* offset */
2147                                      SLAB_HWCACHE_ALIGN);   /* flags */
2148         if (kgnilnd_data.kgn_mbox_cache == NULL) {
2149                 CERROR("Can't create slab for physical mbox blocks\n");
2150                 rc = -ENOMEM;
2151                 GOTO(failed, rc);
2152         }
2153
2154         kgnilnd_data.kgn_rx_cache =
2155                 cfs_mem_cache_create("kgn_rx_t",
2156                                      sizeof(kgn_rx_t),
2157                                      0,    /* offset */
2158                                      0);   /* flags */
2159         if (kgnilnd_data.kgn_rx_cache == NULL) {
2160                 CERROR("Can't create slab for kgn_rx_t descriptors\n");
2161                 rc = -ENOMEM;
2162                 GOTO(failed, rc);
2163         }
2164
2165         kgnilnd_data.kgn_tx_cache =
2166                 cfs_mem_cache_create("kgn_tx_t",
2167                                      sizeof(kgn_tx_t),
2168                                      0,    /* offset */
2169                                      0);   /* flags */
2170         if (kgnilnd_data.kgn_tx_cache == NULL) {
2171                 CERROR("Can't create slab for kgn_tx_t\n");
2172                 rc = -ENOMEM;
2173                 GOTO(failed, rc);
2174         }
2175
2176         kgnilnd_data.kgn_tx_phys_cache =
2177                 cfs_mem_cache_create("kgn_tx_phys",
2178                                      LNET_MAX_IOV * sizeof(gni_mem_segment_t),
2179                                      0,    /* offset */
2180                                      0);   /* flags */
2181         if (kgnilnd_data.kgn_tx_phys_cache == NULL) {
2182                 CERROR("Can't create slab for kgn_tx_phys\n");
2183                 rc = -ENOMEM;
2184                 GOTO(failed, rc);
2185         }
2186
2187         kgnilnd_data.kgn_dgram_cache =
2188                 cfs_mem_cache_create("kgn_dgram_t",
2189                                      sizeof(kgn_dgram_t),
2190                                      0,    /* offset */
2191                                      0);   /* flags */
2192         if (kgnilnd_data.kgn_dgram_cache == NULL) {
2193                 CERROR("Can't create slab for outgoing datagrams\n");
2194                 rc = -ENOMEM;
2195                 GOTO(failed, rc);
2196         }
2197
2198         /* allocate a MAX_IOV array of page pointers for each cpu */
2199         kgnilnd_data.kgn_cksum_map_pages = kmalloc(num_possible_cpus() * sizeof (struct page *),
2200                                                    GFP_KERNEL);
2201         if (kgnilnd_data.kgn_cksum_map_pages == NULL) {
2202                 CERROR("Can't allocate vmap cksum pages\n");
2203                 rc = -ENOMEM;
2204                 GOTO(failed, rc);
2205         }
2206         kgnilnd_data.kgn_cksum_npages = num_possible_cpus();
2207         memset(kgnilnd_data.kgn_cksum_map_pages, 0,
2208                 kgnilnd_data.kgn_cksum_npages * sizeof (struct page *));
2209
2210         for (i = 0; i < kgnilnd_data.kgn_cksum_npages; i++) {
2211                 kgnilnd_data.kgn_cksum_map_pages[i] = kmalloc(LNET_MAX_IOV * sizeof (struct page *),
2212                                                               GFP_KERNEL);
2213                 if (kgnilnd_data.kgn_cksum_map_pages[i] == NULL) {
2214                         CERROR("Can't allocate vmap cksum pages for cpu %d\n", i);
2215                         rc = -ENOMEM;
2216                         GOTO(failed, rc);
2217                 }
2218         }
2219
2220         LASSERT(kgnilnd_data.kgn_ndevs == 0);
2221
2222         /* Use all available GNI devices */
2223         for (i = 0; i < GNILND_MAXDEVS; i++) {
2224                 dev = &kgnilnd_data.kgn_devices[kgnilnd_data.kgn_ndevs];
2225
2226                 rc = kgnilnd_dev_init(dev);
2227                 if (rc == 0) {
2228                         /* Increment here so base_shutdown cleans it up */
2229                         kgnilnd_data.kgn_ndevs++;
2230
2231                         rc = kgnilnd_allocate_phys_fmablk(dev);
2232                         if (rc) {
2233                                 GOTO(failed, rc);
2234                         }
2235                 }
2236         }
2237
2238         if (kgnilnd_data.kgn_ndevs == 0) {
2239                 CERROR("Can't initialise any GNI devices\n");
2240                 rc = -ENODEV;
2241                 GOTO(failed, rc);
2242         }
2243
2244         rc = kgnilnd_thread_start(kgnilnd_reaper, NULL, "kgnilnd_rpr", 0);
2245         if (rc != 0) {
2246                 CERROR("Can't spawn gnilnd reaper: %d\n", rc);
2247                 GOTO(failed, rc);
2248         }
2249
2250         /*
2251          * Start ruhroh thread.  We can't use kgnilnd_thread_start() because
2252          * we don't want this thread included in kgnilnd_data.kgn_nthreads
2253          * count.  This thread controls quiesce, so it mustn't
2254          * quiesce itself.
2255          */
2256         thrd = kthread_run(kgnilnd_ruhroh_thread, NULL, "%s_%02d", "kgnilnd_rr", 0);
2257         if (IS_ERR(thrd)) {
2258                 rc = PTR_ERR(thrd);
2259                 CERROR("Can't spawn gnilnd ruhroh thread: %d\n", rc);
2260                 GOTO(failed, rc);
2261         }
2262
2263         /* threads will load balance across devs as they are available */
2264         for (i = 0; i < *kgnilnd_tunables.kgn_sched_threads; i++) {
2265                 rc = kgnilnd_thread_start(kgnilnd_scheduler, (void *)((long)i),
2266                                           "kgnilnd_sd", i);
2267                 if (rc != 0) {
2268                         CERROR("Can't spawn gnilnd scheduler[%d]: %d\n",
2269                                i, rc);
2270                         GOTO(failed, rc);
2271                 }
2272         }
2273
2274         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2275                 dev = &kgnilnd_data.kgn_devices[i];
2276                 rc = kgnilnd_thread_start(kgnilnd_dgram_mover, dev,
2277                                           "kgnilnd_dg", dev->gnd_id);
2278                 if (rc != 0) {
2279                         CERROR("Can't spawn gnilnd dgram_mover[%d]: %d\n",
2280                                dev->gnd_id, rc);
2281                         GOTO(failed, rc);
2282                 }
2283
2284                 rc = kgnilnd_thread_start(kgnilnd_dgram_waitq, dev,
2285                                           "kgnilnd_dgn", dev->gnd_id);
2286                 if (rc != 0) {
2287                         CERROR("Can't spawn gnilnd dgram_waitq[%d]: %d\n",
2288                                 dev->gnd_id, rc);
2289                         GOTO(failed, rc);
2290                 }
2291
2292                 rc = kgnilnd_setup_wildcard_dgram(dev);
2293
2294                 if (rc != 0) {
2295                         CERROR("Can't create wildcard dgrams[%d]: %d\n",
2296                                 dev->gnd_id, rc);
2297                         GOTO(failed, rc);
2298                 }
2299         }
2300
2301
2302
2303         /* flag everything initialised */
2304         kgnilnd_data.kgn_init = GNILND_INIT_ALL;
2305         /*****************************************************/
2306
2307         CDEBUG(D_MALLOC, "initial kmem %d\n", pkmem);
2308         RETURN(0);
2309
2310 failed:
2311         kgnilnd_base_shutdown();
2312         kgnilnd_data.kgn_init = GNILND_INIT_NOTHING;
2313         RETURN(rc);
2314 }
2315
2316 void
2317 kgnilnd_base_shutdown(void)
2318 {
2319         int           i;
2320         ENTRY;
2321
2322         while (CFS_FAIL_TIMEOUT(CFS_FAIL_GNI_PAUSE_SHUTDOWN, 1)) {};
2323
2324         kgnilnd_data.kgn_wc_kill = 1;
2325
2326         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2327                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2328                 kgnilnd_cancel_wc_dgrams(dev);
2329                 kgnilnd_del_conn_or_peer(NULL, LNET_NID_ANY, GNILND_DEL_PEER, -ESHUTDOWN);
2330                 kgnilnd_wait_for_canceled_dgrams(dev);
2331         }
2332
2333         /* Peer state all cleaned up BEFORE setting shutdown, so threads don't
2334          * have to worry about shutdown races.  NB connections may be created
2335          * while there are still active connds, but these will be temporary
2336          * since peer creation always fails after the listener has started to
2337          * shut down.
2338          * all peers should have been cleared out on the nets */
2339         LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
2340                 "peers left %d\n", atomic_read(&kgnilnd_data.kgn_npeers));
2341
2342         /* Wait for the ruhroh thread to shut down. */
2343         kgnilnd_data.kgn_ruhroh_shutdown = 1;
2344         wake_up(&kgnilnd_data.kgn_ruhroh_waitq);
2345         i = 2;
2346         while (kgnilnd_data.kgn_ruhroh_running != 0) {
2347                 i++;
2348                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2349                        "Waiting for ruhroh thread to terminate\n");
2350                 cfs_pause(cfs_time_seconds(1));
2351         }
2352
2353        /* Flag threads to terminate */
2354         kgnilnd_data.kgn_shutdown = 1;
2355
2356         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2357                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2358
2359                 /* should clear all the MDDs */
2360                 kgnilnd_unmap_phys_fmablk(dev);
2361
2362                 kgnilnd_schedule_device(dev);
2363                 wake_up_all(&dev->gnd_dgram_waitq);
2364                 wake_up_all(&dev->gnd_dgping_waitq);
2365                 LASSERT(list_empty(&dev->gnd_connd_peers));
2366         }
2367
2368         spin_lock(&kgnilnd_data.kgn_reaper_lock);
2369         wake_up_all(&kgnilnd_data.kgn_reaper_waitq);
2370         spin_unlock(&kgnilnd_data.kgn_reaper_lock);
2371
2372         /* Wait for threads to exit */
2373         i = 2;
2374         while (atomic_read(&kgnilnd_data.kgn_nthreads) != 0) {
2375                 i++;
2376                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2377                        "Waiting for %d threads to terminate\n",
2378                        atomic_read(&kgnilnd_data.kgn_nthreads));
2379                 cfs_pause(cfs_time_seconds(1));
2380         }
2381
2382         LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
2383                 "peers left %d\n", atomic_read(&kgnilnd_data.kgn_npeers));
2384
2385         if (kgnilnd_data.kgn_peers != NULL) {
2386                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2387                         LASSERT(list_empty(&kgnilnd_data.kgn_peers[i]));
2388
2389                 LIBCFS_FREE(kgnilnd_data.kgn_peers,
2390                             sizeof (struct list_head) *
2391                             *kgnilnd_tunables.kgn_peer_hash_size);
2392         }
2393
2394         down_write(&kgnilnd_data.kgn_net_rw_sem);
2395         if (kgnilnd_data.kgn_nets != NULL) {
2396                 for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++)
2397                         LASSERT(list_empty(&kgnilnd_data.kgn_nets[i]));
2398
2399                 LIBCFS_FREE(kgnilnd_data.kgn_nets,
2400                             sizeof (struct list_head) *
2401                             *kgnilnd_tunables.kgn_net_hash_size);
2402         }
2403         up_write(&kgnilnd_data.kgn_net_rw_sem);
2404
2405         LASSERTF(atomic_read(&kgnilnd_data.kgn_nconns) == 0,
2406                 "conns left %d\n", atomic_read(&kgnilnd_data.kgn_nconns));
2407
2408         if (kgnilnd_data.kgn_conns != NULL) {
2409                 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2410                         LASSERT(list_empty(&kgnilnd_data.kgn_conns[i]));
2411
2412                 LIBCFS_FREE(kgnilnd_data.kgn_conns,
2413                             sizeof (struct list_head) *
2414                             *kgnilnd_tunables.kgn_peer_hash_size);
2415         }
2416
2417         for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
2418                 kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
2419                 kgnilnd_dev_fini(dev);
2420
2421                 LASSERTF(atomic_read(&dev->gnd_ndgrams) == 0,
2422                         "dgrams left %d\n", atomic_read(&dev->gnd_ndgrams));
2423
2424                 if (dev->gnd_dgrams != NULL) {
2425                         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++)
2426                                 LASSERT(list_empty(&dev->gnd_dgrams[i]));
2427
2428                         LIBCFS_FREE(dev->gnd_dgrams,
2429                                     sizeof (struct list_head) *
2430                                     *kgnilnd_tunables.kgn_peer_hash_size);
2431                 }
2432
2433                 kgnilnd_free_phys_fmablk(dev);
2434         }
2435
2436         if (kgnilnd_data.kgn_mbox_cache != NULL) {
2437                 i = cfs_mem_cache_destroy(kgnilnd_data.kgn_mbox_cache);
2438                 LASSERTF(i == 0, "rc %d destroying kgn_mbox_cache\n", i);
2439         }
2440
2441         if (kgnilnd_data.kgn_rx_cache != NULL) {
2442                 i = cfs_mem_cache_destroy(kgnilnd_data.kgn_rx_cache);
2443                 LASSERTF(i == 0, "rc %d destroying kgn_rx_cache\n", i);
2444         }
2445
2446         if (kgnilnd_data.kgn_tx_cache != NULL) {
2447                 i = cfs_mem_cache_destroy(kgnilnd_data.kgn_tx_cache);
2448                 LASSERTF(i == 0, "rc %d destroying kgn_tx_cache\n", i);
2449         }
2450
2451         if (kgnilnd_data.kgn_tx_phys_cache != NULL) {
2452                 i = cfs_mem_cache_destroy(kgnilnd_data.kgn_tx_phys_cache);
2453                 LASSERTF(i == 0, "rc %d destroying kgn_tx_phys_cache\n", i);
2454         }
2455
2456         if (kgnilnd_data.kgn_dgram_cache != NULL) {
2457                 i = cfs_mem_cache_destroy(kgnilnd_data.kgn_dgram_cache);
2458                 LASSERTF(i == 0, "rc %d destroying kgn_dgram_cache\n", i);
2459         }
2460
2461         if (kgnilnd_data.kgn_cksum_map_pages != NULL) {
2462                 for (i = 0; i < kgnilnd_data.kgn_cksum_npages; i++) {
2463                         if (kgnilnd_data.kgn_cksum_map_pages[i] != NULL) {
2464                                 kfree(kgnilnd_data.kgn_cksum_map_pages[i]);
2465                         }
2466                 }
2467                 kfree(kgnilnd_data.kgn_cksum_map_pages);
2468         }
2469
2470         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2471                atomic_read(&libcfs_kmemory));
2472
2473         kgnilnd_data.kgn_init = GNILND_INIT_NOTHING;
2474         PORTAL_MODULE_UNUSE;
2475
2476         EXIT;
2477 }
2478
2479 int
2480 kgnilnd_startup(lnet_ni_t *ni)
2481 {
2482         int               rc, devno;
2483         kgn_net_t        *net;
2484         ENTRY;
2485
2486         LASSERTF(ni->ni_lnd == &the_kgnilnd,
2487                 "bad LND 0x%p != the_kgnilnd @ 0x%p\n",
2488                 ni->ni_lnd, &the_kgnilnd);
2489
2490         if (kgnilnd_data.kgn_init == GNILND_INIT_NOTHING) {
2491                 rc = kgnilnd_base_startup();
2492                 if (rc != 0)
2493                         RETURN(rc);
2494         }
2495
2496         /* Serialize with shutdown. */
2497         down(&kgnilnd_data.kgn_quiesce_sem);
2498
2499         LIBCFS_ALLOC(net, sizeof(*net));
2500         if (net == NULL) {
2501                 CERROR("could not allocate net for new interface instance\n");
2502                 rc = -ENOMEM;
2503                 /* no need to cleanup the CDM... */
2504                 GOTO(failed, rc);
2505         }
2506         INIT_LIST_HEAD(&net->gnn_list);
2507         ni->ni_data = net;
2508         net->gnn_ni = ni;
2509         ni->ni_maxtxcredits = *kgnilnd_tunables.kgn_credits;
2510         ni->ni_peertxcredits = *kgnilnd_tunables.kgn_peer_credits;
2511
2512         if (*kgnilnd_tunables.kgn_peer_health) {
2513                 int     fudge;
2514
2515                 /* give this a bit of leeway - we don't have a hard timeout
2516                  * as we only check timeouts periodically - see comment in kgnilnd_reaper */
2517                 fudge = (GNILND_TO2KA(*kgnilnd_tunables.kgn_timeout) / GNILND_REAPER_NCHECKS);
2518
2519                 ni->ni_peertimeout = *kgnilnd_tunables.kgn_timeout + fudge;
2520
2521                 LCONSOLE_INFO("Enabling LNet peer health for gnilnd, timeout %ds\n",
2522                               ni->ni_peertimeout);
2523         }
2524
2525         atomic_set(&net->gnn_refcount, 1);
2526
2527         /* if we have multiple devices, spread the nets around */
2528         net->gnn_netnum = LNET_NETNUM(LNET_NIDNET(ni->ni_nid));
2529
2530         devno = LNET_NIDNET(ni->ni_nid) % GNILND_MAXDEVS;
2531         net->gnn_dev = &kgnilnd_data.kgn_devices[devno];
2532
2533         /* allocate a 'dummy' cdm for datagram use. We can only have a single
2534          * datagram between a nid:inst_id and nid2:inst_id. The fake cdm
2535          * give us additional inst_id to use, allowing the datagrams to flow
2536          * like rivers of honey and beer */
2537
2538         /* the instance id for the cdm is the NETNUM offset by MAXDEVS -
2539          * ensuring we'll have a unique id */
2540
2541
2542         ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), net->gnn_dev->gnd_nid);
2543         CDEBUG(D_NET, "adding net %p nid=%s on dev %d \n",
2544                 net, libcfs_nid2str(ni->ni_nid), net->gnn_dev->gnd_id);
2545         /* until the gnn_list is set, we need to cleanup ourselves as
2546          * kgnilnd_shutdown is just gonna get confused */
2547
2548         down_write(&kgnilnd_data.kgn_net_rw_sem);
2549         list_add_tail(&net->gnn_list, kgnilnd_netnum2netlist(net->gnn_netnum));
2550         up_write(&kgnilnd_data.kgn_net_rw_sem);
2551
2552         /* we need a separate thread to call probe_wait_by_id until
2553          * we get a function callback notifier from kgni */
2554         up(&kgnilnd_data.kgn_quiesce_sem);
2555         RETURN(0);
2556  failed:
2557         up(&kgnilnd_data.kgn_quiesce_sem);
2558         kgnilnd_shutdown(ni);
2559         RETURN(rc);
2560 }
2561
2562 void
2563 kgnilnd_shutdown(lnet_ni_t *ni)
2564 {
2565         kgn_net_t     *net = ni->ni_data;
2566         int           i;
2567         int           rc;
2568         ENTRY;
2569
2570         CFS_RACE(CFS_FAIL_GNI_SR_DOWN_RACE);
2571
2572         LASSERTF(kgnilnd_data.kgn_init == GNILND_INIT_ALL,
2573                 "init %d\n", kgnilnd_data.kgn_init);
2574
2575         /* Serialize with startup. */
2576         down(&kgnilnd_data.kgn_quiesce_sem);
2577         CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
2578                atomic_read(&libcfs_kmemory));
2579
2580         if (net == NULL) {
2581                 CERROR("got NULL net for ni %p\n", ni);
2582                 rc = -EINVAL;
2583                 GOTO(out, rc);
2584         }
2585
2586         LASSERTF(ni == net->gnn_ni,
2587                 "ni %p gnn_ni %p\n", net, net->gnn_ni);
2588
2589         ni->ni_data = NULL;
2590
2591         LASSERT(!net->gnn_shutdown);
2592         LASSERTF(atomic_read(&net->gnn_refcount) != 0,
2593                 "net %p refcount %d\n",
2594                  net, atomic_read(&net->gnn_refcount));
2595
2596         if (!list_empty(&net->gnn_list)) {
2597                 /* serialize with peer creation */
2598                 down_write(&kgnilnd_data.kgn_net_rw_sem);
2599                 net->gnn_shutdown = 1;
2600                 up_write(&kgnilnd_data.kgn_net_rw_sem);
2601
2602                 kgnilnd_cancel_net_dgrams(net);
2603
2604                 kgnilnd_del_conn_or_peer(net, LNET_NID_ANY, GNILND_DEL_PEER, -ESHUTDOWN);
2605
2606                 /* if we are quiesced, need to wake up - we need those threads
2607                  * alive to release peers, etc */
2608                 if (GNILND_IS_QUIESCED) {
2609                         set_mb(kgnilnd_data.kgn_quiesce_trigger, GNILND_QUIESCE_IDLE);
2610                         kgnilnd_quiesce_wait("shutdown");
2611                 }
2612
2613                 kgnilnd_wait_for_canceled_dgrams(net->gnn_dev);
2614
2615                 /* We wait until the nets ref's are 1, we will release final ref which is ours
2616                  * this allows us to make sure everything else is done before we free the
2617                  * net.
2618                  */
2619                 i = 4;
2620                 while (atomic_read(&net->gnn_refcount) != 1) {
2621                         i++;
2622                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2623                                 "Waiting for %d references to clear on net %d\n",
2624                                 atomic_read(&net->gnn_refcount),
2625                                 net->gnn_netnum);
2626                         cfs_pause(cfs_time_seconds(1));
2627                 }
2628
2629                 /* release ref from kgnilnd_startup */
2630                 kgnilnd_net_decref(net);
2631                 /* serialize with reaper and conn_task looping */
2632                 down_write(&kgnilnd_data.kgn_net_rw_sem);
2633                 list_del_init(&net->gnn_list);
2634                 up_write(&kgnilnd_data.kgn_net_rw_sem);
2635
2636         }
2637
2638         /* not locking, this can't race with writers */
2639         LASSERTF(atomic_read(&net->gnn_refcount) == 0,
2640                 "net %p refcount %d\n",
2641                  net, atomic_read(&net->gnn_refcount));
2642         LIBCFS_FREE(net, sizeof(*net));
2643
2644 out:
2645         down_read(&kgnilnd_data.kgn_net_rw_sem);
2646         for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
2647                 if (!list_empty(&kgnilnd_data.kgn_nets[i])) {
2648                         up_read(&kgnilnd_data.kgn_net_rw_sem);
2649                         break;
2650                 }
2651
2652                 if (i == *kgnilnd_tunables.kgn_net_hash_size - 1) {
2653                         up_read(&kgnilnd_data.kgn_net_rw_sem);
2654                         kgnilnd_base_shutdown();
2655                 }
2656         }
2657         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2658                atomic_read(&libcfs_kmemory));
2659
2660         up(&kgnilnd_data.kgn_quiesce_sem);
2661         EXIT;
2662         return;
2663 }
2664
2665 void __exit
2666 kgnilnd_module_fini(void)
2667 {
2668         lnet_unregister_lnd(&the_kgnilnd);
2669         kgnilnd_proc_fini();
2670         kgnilnd_remove_sysctl();
2671         kgnilnd_tunables_fini();
2672 }
2673
2674 int __init
2675 kgnilnd_module_init(void)
2676 {
2677         int    rc;
2678
2679         rc = kgnilnd_tunables_init();
2680         if (rc != 0)
2681                 return rc;
2682
2683         printk(KERN_INFO "Lustre: kgnilnd build version: "KGNILND_BUILD_REV"\n");
2684
2685         kgnilnd_insert_sysctl();
2686         kgnilnd_proc_init();
2687
2688         lnet_register_lnd(&the_kgnilnd);
2689
2690         return 0;
2691 }
2692
2693 MODULE_AUTHOR("Cray, Inc. <nic@cray.com>");
2694 MODULE_DESCRIPTION("Kernel Gemini LND v"KGNILND_BUILD_REV);
2695 MODULE_LICENSE("GPL");
2696
2697 module_init(kgnilnd_module_init);
2698 module_exit(kgnilnd_module_fini);