Whamcloud - gitweb
ae84f06bf8c410c90c40a11fd9c068235679f01d
[fs/lustre-release.git] / lnet / klnds / o2iblnd / o2iblnd.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lnet/klnds/o2iblnd/o2iblnd.c
32  *
33  * Author: Eric Barton <eric@bartonsoftware.com>
34  */
35
36 #include <asm/page.h>
37 #include <linux/inetdevice.h>
38
39 #include "o2iblnd.h"
40
41 static const struct lnet_lnd the_o2iblnd;
42
43 struct kib_data kiblnd_data;
44
45 static __u32
46 kiblnd_cksum (void *ptr, int nob)
47 {
48         char  *c  = ptr;
49         __u32  sum = 0;
50
51         while (nob-- > 0)
52                 sum = ((sum << 1) | (sum >> 31)) + *c++;
53
54         /* ensure I don't return 0 (== no checksum) */
55         return (sum == 0) ? 1 : sum;
56 }
57
58 static char *
59 kiblnd_msgtype2str(int type)
60 {
61         switch (type) {
62         case IBLND_MSG_CONNREQ:
63                 return "CONNREQ";
64
65         case IBLND_MSG_CONNACK:
66                 return "CONNACK";
67
68         case IBLND_MSG_NOOP:
69                 return "NOOP";
70
71         case IBLND_MSG_IMMEDIATE:
72                 return "IMMEDIATE";
73
74         case IBLND_MSG_PUT_REQ:
75                 return "PUT_REQ";
76
77         case IBLND_MSG_PUT_NAK:
78                 return "PUT_NAK";
79
80         case IBLND_MSG_PUT_ACK:
81                 return "PUT_ACK";
82
83         case IBLND_MSG_PUT_DONE:
84                 return "PUT_DONE";
85
86         case IBLND_MSG_GET_REQ:
87                 return "GET_REQ";
88
89         case IBLND_MSG_GET_DONE:
90                 return "GET_DONE";
91
92         default:
93                 return "???";
94         }
95 }
96
97 static int
98 kiblnd_msgtype2size(int type)
99 {
100         const int hdr_size = offsetof(struct kib_msg, ibm_u);
101
102         switch (type) {
103         case IBLND_MSG_CONNREQ:
104         case IBLND_MSG_CONNACK:
105                 return hdr_size + sizeof(struct kib_connparams);
106
107         case IBLND_MSG_NOOP:
108                 return hdr_size;
109
110         case IBLND_MSG_IMMEDIATE:
111                 return offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[0]);
112
113         case IBLND_MSG_PUT_REQ:
114                 return hdr_size + sizeof(struct kib_putreq_msg);
115
116         case IBLND_MSG_PUT_ACK:
117                 return hdr_size + sizeof(struct kib_putack_msg);
118
119         case IBLND_MSG_GET_REQ:
120                 return hdr_size + sizeof(struct kib_get_msg);
121
122         case IBLND_MSG_PUT_NAK:
123         case IBLND_MSG_PUT_DONE:
124         case IBLND_MSG_GET_DONE:
125                 return hdr_size + sizeof(struct kib_completion_msg);
126         default:
127                 return -1;
128         }
129 }
130
131 static int kiblnd_unpack_rd(struct kib_msg *msg, bool flip)
132 {
133         struct kib_rdma_desc *rd;
134         int nob;
135         int n;
136         int i;
137
138         LASSERT(msg->ibm_type == IBLND_MSG_GET_REQ ||
139                 msg->ibm_type == IBLND_MSG_PUT_ACK);
140
141         rd = msg->ibm_type == IBLND_MSG_GET_REQ ?
142                 &msg->ibm_u.get.ibgm_rd :
143                 &msg->ibm_u.putack.ibpam_rd;
144
145         if (flip) {
146                 __swab32s(&rd->rd_key);
147                 __swab32s(&rd->rd_nfrags);
148         }
149
150         n = rd->rd_nfrags;
151
152         if (n <= 0 || n > IBLND_MAX_RDMA_FRAGS) {
153                 CERROR("Bad nfrags: %d, should be 0 < n <= %d\n",
154                        n, IBLND_MAX_RDMA_FRAGS);
155                 return 1;
156         }
157
158         nob = offsetof(struct kib_msg, ibm_u) +
159                 kiblnd_rd_msg_size(rd, msg->ibm_type, n);
160
161         if (msg->ibm_nob < nob) {
162                 CERROR("Short %s: %d(%d)\n",
163                        kiblnd_msgtype2str(msg->ibm_type), msg->ibm_nob, nob);
164                 return 1;
165         }
166
167         if (!flip)
168                 return 0;
169
170         for (i = 0; i < n; i++) {
171                 __swab32s(&rd->rd_frags[i].rf_nob);
172                 __swab64s(&rd->rd_frags[i].rf_addr);
173         }
174
175         return 0;
176 }
177
178 void kiblnd_pack_msg(struct lnet_ni *ni, struct kib_msg *msg, int version,
179                      int credits, lnet_nid_t dstnid, __u64 dststamp)
180 {
181         struct kib_net *net = ni->ni_data;
182
183         /* CAVEAT EMPTOR! all message fields not set here should have been
184          * initialised previously. */
185         msg->ibm_magic    = IBLND_MSG_MAGIC;
186         msg->ibm_version  = version;
187         /*   ibm_type */
188         msg->ibm_credits  = credits;
189         /*   ibm_nob */
190         msg->ibm_cksum    = 0;
191         msg->ibm_srcnid   = ni->ni_nid;
192         msg->ibm_srcstamp = net->ibn_incarnation;
193         msg->ibm_dstnid   = dstnid;
194         msg->ibm_dststamp = dststamp;
195
196         if (*kiblnd_tunables.kib_cksum) {
197                 /* NB ibm_cksum zero while computing cksum */
198                 msg->ibm_cksum = kiblnd_cksum(msg, msg->ibm_nob);
199         }
200 }
201
202 int kiblnd_unpack_msg(struct kib_msg *msg, int nob)
203 {
204         const int hdr_size = offsetof(struct kib_msg, ibm_u);
205         __u32 msg_cksum;
206         __u16 version;
207         int msg_nob;
208         bool flip;
209
210         /* 6 bytes are enough to have received magic + version */
211         if (nob < 6) {
212                 CERROR("Short message: %d\n", nob);
213                 return -EPROTO;
214         }
215
216         if (msg->ibm_magic == IBLND_MSG_MAGIC) {
217                 flip = false;
218         } else if (msg->ibm_magic == __swab32(IBLND_MSG_MAGIC)) {
219                 flip = true;
220         } else {
221                 CERROR("Bad magic: %08x\n", msg->ibm_magic);
222                 return -EPROTO;
223         }
224
225         version = flip ? __swab16(msg->ibm_version) : msg->ibm_version;
226         if (version != IBLND_MSG_VERSION &&
227             version != IBLND_MSG_VERSION_1) {
228                 CERROR("Bad version: %x\n", version);
229                 return -EPROTO;
230         }
231
232         if (nob < hdr_size) {
233                 CERROR("Short message: %d\n", nob);
234                 return -EPROTO;
235         }
236
237         msg_nob = flip ? __swab32(msg->ibm_nob) : msg->ibm_nob;
238         if (msg_nob > nob) {
239                 CERROR("Short message: got %d, wanted %d\n", nob, msg_nob);
240                 return -EPROTO;
241         }
242
243         /* checksum must be computed with ibm_cksum zero and BEFORE anything
244          * gets flipped
245          */
246         msg_cksum = flip ? __swab32(msg->ibm_cksum) : msg->ibm_cksum;
247         msg->ibm_cksum = 0;
248         if (msg_cksum != 0 &&
249             msg_cksum != kiblnd_cksum(msg, msg_nob)) {
250                 CERROR("Bad checksum\n");
251                 return -EPROTO;
252         }
253
254         msg->ibm_cksum = msg_cksum;
255
256         if (flip) {
257                 /* leave magic unflipped as a clue to peer_ni endianness */
258                 msg->ibm_version = version;
259                 BUILD_BUG_ON(sizeof(msg->ibm_type) != 1);
260                 BUILD_BUG_ON(sizeof(msg->ibm_credits) != 1);
261                 msg->ibm_nob     = msg_nob;
262                 __swab64s(&msg->ibm_srcnid);
263                 __swab64s(&msg->ibm_srcstamp);
264                 __swab64s(&msg->ibm_dstnid);
265                 __swab64s(&msg->ibm_dststamp);
266         }
267
268         if (msg->ibm_srcnid == LNET_NID_ANY) {
269                 CERROR("Bad src nid: %s\n", libcfs_nid2str(msg->ibm_srcnid));
270                 return -EPROTO;
271         }
272
273         if (msg_nob < kiblnd_msgtype2size(msg->ibm_type)) {
274                 CERROR("Short %s: %d(%d)\n", kiblnd_msgtype2str(msg->ibm_type),
275                        msg_nob, kiblnd_msgtype2size(msg->ibm_type));
276                 return -EPROTO;
277         }
278
279         switch (msg->ibm_type) {
280         default:
281                 CERROR("Unknown message type %x\n", msg->ibm_type);
282                 return -EPROTO;
283
284         case IBLND_MSG_NOOP:
285         case IBLND_MSG_IMMEDIATE:
286         case IBLND_MSG_PUT_REQ:
287                 break;
288
289         case IBLND_MSG_PUT_ACK:
290         case IBLND_MSG_GET_REQ:
291                 if (kiblnd_unpack_rd(msg, flip))
292                         return -EPROTO;
293                 break;
294
295         case IBLND_MSG_PUT_NAK:
296         case IBLND_MSG_PUT_DONE:
297         case IBLND_MSG_GET_DONE:
298                 if (flip)
299                         __swab32s(&msg->ibm_u.completion.ibcm_status);
300                 break;
301
302         case IBLND_MSG_CONNREQ:
303         case IBLND_MSG_CONNACK:
304                 if (flip) {
305                         __swab16s(&msg->ibm_u.connparams.ibcp_queue_depth);
306                         __swab16s(&msg->ibm_u.connparams.ibcp_max_frags);
307                         __swab32s(&msg->ibm_u.connparams.ibcp_max_msg_size);
308                 }
309                 break;
310         }
311         return 0;
312 }
313
314 int
315 kiblnd_create_peer(struct lnet_ni *ni, struct kib_peer_ni **peerp,
316                    lnet_nid_t nid)
317 {
318         struct kib_peer_ni *peer_ni;
319         struct kib_net *net = ni->ni_data;
320         int cpt = lnet_cpt_of_nid(nid, ni);
321         unsigned long flags;
322
323         LASSERT(net != NULL);
324         LASSERT(nid != LNET_NID_ANY);
325
326         LIBCFS_CPT_ALLOC(peer_ni, lnet_cpt_table(), cpt, sizeof(*peer_ni));
327         if (!peer_ni) {
328                 CERROR("Cannot allocate peer_ni\n");
329                 return -ENOMEM;
330         }
331
332         peer_ni->ibp_ni = ni;
333         peer_ni->ibp_nid = nid;
334         peer_ni->ibp_error = 0;
335         peer_ni->ibp_last_alive = 0;
336         peer_ni->ibp_max_frags = IBLND_MAX_RDMA_FRAGS;
337         peer_ni->ibp_queue_depth = ni->ni_net->net_tunables.lct_peer_tx_credits;
338         peer_ni->ibp_queue_depth_mod = 0;       /* try to use the default */
339         atomic_set(&peer_ni->ibp_refcount, 1);  /* 1 ref for caller */
340
341         INIT_HLIST_NODE(&peer_ni->ibp_list);
342         INIT_LIST_HEAD(&peer_ni->ibp_conns);
343         INIT_LIST_HEAD(&peer_ni->ibp_tx_queue);
344
345         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
346
347         /* always called with a ref on ni, which prevents ni being shutdown */
348         LASSERT(net->ibn_shutdown == 0);
349
350         /* npeers only grows with the global lock held */
351         atomic_inc(&net->ibn_npeers);
352
353         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
354
355         *peerp = peer_ni;
356         return 0;
357 }
358
359 void
360 kiblnd_destroy_peer(struct kib_peer_ni *peer_ni)
361 {
362         struct kib_net *net = peer_ni->ibp_ni->ni_data;
363
364         LASSERT(net != NULL);
365         LASSERT (atomic_read(&peer_ni->ibp_refcount) == 0);
366         LASSERT(!kiblnd_peer_active(peer_ni));
367         LASSERT(kiblnd_peer_idle(peer_ni));
368         LASSERT(list_empty(&peer_ni->ibp_tx_queue));
369
370         LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
371
372         /* NB a peer_ni's connections keep a reference on their peer_ni until
373          * they are destroyed, so we can be assured that _all_ state to do
374          * with this peer_ni has been cleaned up when its refcount drops to
375          * zero.
376          */
377         if (atomic_dec_and_test(&net->ibn_npeers))
378                 wake_up_var(&net->ibn_npeers);
379 }
380
381 struct kib_peer_ni *
382 kiblnd_find_peer_locked(struct lnet_ni *ni, lnet_nid_t nid)
383 {
384         /* the caller is responsible for accounting the additional reference
385          * that this creates
386          */
387         struct kib_peer_ni *peer_ni;
388
389         hash_for_each_possible(kiblnd_data.kib_peers, peer_ni,
390                                ibp_list, nid) {
391                 LASSERT(!kiblnd_peer_idle(peer_ni));
392
393                 /*
394                  * Match a peer if its NID and the NID of the local NI it
395                  * communicates over are the same. Otherwise don't match
396                  * the peer, which will result in a new lnd peer being
397                  * created.
398                  */
399                 if (peer_ni->ibp_nid != nid ||
400                     peer_ni->ibp_ni->ni_nid != ni->ni_nid)
401                         continue;
402
403                 CDEBUG(D_NET, "got peer_ni [%p] -> %s (%d) version: %x\n",
404                        peer_ni, libcfs_nid2str(nid),
405                        atomic_read(&peer_ni->ibp_refcount),
406                        peer_ni->ibp_version);
407                 return peer_ni;
408         }
409         return NULL;
410 }
411
412 void
413 kiblnd_unlink_peer_locked(struct kib_peer_ni *peer_ni)
414 {
415         LASSERT(list_empty(&peer_ni->ibp_conns));
416
417         LASSERT(kiblnd_peer_active(peer_ni));
418         hlist_del_init(&peer_ni->ibp_list);
419         /* lose peerlist's ref */
420         kiblnd_peer_decref(peer_ni);
421 }
422
423 static int
424 kiblnd_get_peer_info(struct lnet_ni *ni, int index,
425                      lnet_nid_t *nidp, int *count)
426 {
427         struct kib_peer_ni              *peer_ni;
428         int                      i;
429         unsigned long            flags;
430
431         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
432
433         hash_for_each(kiblnd_data.kib_peers, i, peer_ni, ibp_list) {
434                 LASSERT(!kiblnd_peer_idle(peer_ni));
435
436                 if (peer_ni->ibp_ni != ni)
437                         continue;
438
439                 if (index-- > 0)
440                         continue;
441
442                 *nidp = peer_ni->ibp_nid;
443                 *count = atomic_read(&peer_ni->ibp_refcount);
444
445                 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
446                 return 0;
447         }
448
449         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
450         return -ENOENT;
451 }
452
453 static void
454 kiblnd_del_peer_locked(struct kib_peer_ni *peer_ni)
455 {
456         struct kib_conn *cnxt;
457         struct kib_conn *conn;
458
459         if (list_empty(&peer_ni->ibp_conns)) {
460                 kiblnd_unlink_peer_locked(peer_ni);
461         } else {
462                 list_for_each_entry_safe(conn, cnxt, &peer_ni->ibp_conns,
463                                          ibc_list)
464                         kiblnd_close_conn_locked(conn, 0);
465                 /* NB closing peer_ni's last conn unlinked it. */
466         }
467         /* NB peer_ni now unlinked; might even be freed if the peer_ni table had the
468          * last ref on it. */
469 }
470
471 static int
472 kiblnd_del_peer(struct lnet_ni *ni, lnet_nid_t nid)
473 {
474         LIST_HEAD(zombies);
475         struct hlist_node *pnxt;
476         struct kib_peer_ni *peer_ni;
477         int lo;
478         int hi;
479         int i;
480         unsigned long flags;
481         int rc = -ENOENT;
482
483         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
484
485         if (nid != LNET_NID_ANY) {
486                 lo = hash_min(nid, HASH_BITS(kiblnd_data.kib_peers));
487                 hi = lo;
488         } else {
489                 lo = 0;
490                 hi = HASH_SIZE(kiblnd_data.kib_peers) - 1;
491         }
492
493         for (i = lo; i <= hi; i++) {
494                 hlist_for_each_entry_safe(peer_ni, pnxt,
495                                           &kiblnd_data.kib_peers[i], ibp_list) {
496                         LASSERT(!kiblnd_peer_idle(peer_ni));
497
498                         if (peer_ni->ibp_ni != ni)
499                                 continue;
500
501                         if (!(nid == LNET_NID_ANY || peer_ni->ibp_nid == nid))
502                                 continue;
503
504                         if (!list_empty(&peer_ni->ibp_tx_queue)) {
505                                 LASSERT(list_empty(&peer_ni->ibp_conns));
506
507                                 list_splice_init(&peer_ni->ibp_tx_queue,
508                                                  &zombies);
509                         }
510
511                         kiblnd_del_peer_locked(peer_ni);
512                         rc = 0;         /* matched something */
513                 }
514         }
515
516         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
517
518         kiblnd_txlist_done(&zombies, -EIO, LNET_MSG_STATUS_LOCAL_ERROR);
519
520         return rc;
521 }
522
523 static struct kib_conn *
524 kiblnd_get_conn_by_idx(struct lnet_ni *ni, int index)
525 {
526         struct kib_peer_ni *peer_ni;
527         struct kib_conn *conn;
528         struct list_head *ctmp;
529         int i;
530         unsigned long flags;
531
532         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
533
534         hash_for_each(kiblnd_data.kib_peers, i, peer_ni, ibp_list) {
535                 LASSERT(!kiblnd_peer_idle(peer_ni));
536
537                 if (peer_ni->ibp_ni != ni)
538                         continue;
539
540                 list_for_each(ctmp, &peer_ni->ibp_conns) {
541                         if (index-- > 0)
542                                 continue;
543
544                         conn = list_entry(ctmp, struct kib_conn, ibc_list);
545                         kiblnd_conn_addref(conn);
546                         read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
547                                                flags);
548                         return conn;
549                 }
550         }
551
552         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
553         return NULL;
554 }
555
556 static void
557 kiblnd_debug_rx(struct kib_rx *rx)
558 {
559         CDEBUG(D_CONSOLE, "      %p msg_type %x cred %d\n",
560                rx, rx->rx_msg->ibm_type,
561                rx->rx_msg->ibm_credits);
562 }
563
564 static void
565 kiblnd_debug_tx(struct kib_tx *tx)
566 {
567         CDEBUG(D_CONSOLE, "      %p snd %d q %d w %d rc %d dl %lld "
568                "cookie %#llx msg %s%s type %x cred %d\n",
569                tx, tx->tx_sending, tx->tx_queued, tx->tx_waiting,
570                tx->tx_status, ktime_to_ns(tx->tx_deadline), tx->tx_cookie,
571                tx->tx_lntmsg[0] == NULL ? "-" : "!",
572                tx->tx_lntmsg[1] == NULL ? "-" : "!",
573                tx->tx_msg->ibm_type, tx->tx_msg->ibm_credits);
574 }
575
576 void
577 kiblnd_debug_conn(struct kib_conn *conn)
578 {
579         struct list_head        *tmp;
580         int                     i;
581
582         spin_lock(&conn->ibc_lock);
583
584         CDEBUG(D_CONSOLE, "conn[%d] %p [version %x] -> %s:\n",
585                atomic_read(&conn->ibc_refcount), conn,
586                conn->ibc_version, libcfs_nid2str(conn->ibc_peer->ibp_nid));
587         CDEBUG(D_CONSOLE, "   state %d nposted %d/%d cred %d o_cred %d "
588                " r_cred %d\n", conn->ibc_state, conn->ibc_noops_posted,
589                conn->ibc_nsends_posted, conn->ibc_credits,
590                conn->ibc_outstanding_credits, conn->ibc_reserved_credits);
591         CDEBUG(D_CONSOLE, "   comms_err %d\n", conn->ibc_comms_error);
592
593         CDEBUG(D_CONSOLE, "   early_rxs:\n");
594         list_for_each(tmp, &conn->ibc_early_rxs)
595                 kiblnd_debug_rx(list_entry(tmp, struct kib_rx, rx_list));
596
597         CDEBUG(D_CONSOLE, "   tx_noops:\n");
598         list_for_each(tmp, &conn->ibc_tx_noops)
599                 kiblnd_debug_tx(list_entry(tmp, struct kib_tx, tx_list));
600
601         CDEBUG(D_CONSOLE, "   tx_queue_nocred:\n");
602         list_for_each(tmp, &conn->ibc_tx_queue_nocred)
603                 kiblnd_debug_tx(list_entry(tmp, struct kib_tx, tx_list));
604
605         CDEBUG(D_CONSOLE, "   tx_queue_rsrvd:\n");
606         list_for_each(tmp, &conn->ibc_tx_queue_rsrvd)
607                 kiblnd_debug_tx(list_entry(tmp, struct kib_tx, tx_list));
608
609         CDEBUG(D_CONSOLE, "   tx_queue:\n");
610         list_for_each(tmp, &conn->ibc_tx_queue)
611                 kiblnd_debug_tx(list_entry(tmp, struct kib_tx, tx_list));
612
613         CDEBUG(D_CONSOLE, "   active_txs:\n");
614         list_for_each(tmp, &conn->ibc_active_txs)
615                 kiblnd_debug_tx(list_entry(tmp, struct kib_tx, tx_list));
616
617         CDEBUG(D_CONSOLE, "   rxs:\n");
618         for (i = 0; i < IBLND_RX_MSGS(conn); i++)
619                 kiblnd_debug_rx(&conn->ibc_rxs[i]);
620
621         spin_unlock(&conn->ibc_lock);
622 }
623
624 static void
625 kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid)
626 {
627         /* XXX There is no path record for iWARP, set by netdev->change_mtu? */
628         if (cmid->route.path_rec == NULL)
629                 return;
630
631         if (*kiblnd_tunables.kib_ib_mtu)
632                 cmid->route.path_rec->mtu =
633                         ib_mtu_int_to_enum(*kiblnd_tunables.kib_ib_mtu);
634 }
635
636 static int
637 kiblnd_get_completion_vector(struct kib_conn *conn, int cpt)
638 {
639         cpumask_var_t   *mask;
640         int             vectors;
641         int             off;
642         int             i;
643         lnet_nid_t      ibp_nid;
644
645         vectors = conn->ibc_cmid->device->num_comp_vectors;
646         if (vectors <= 1)
647                 return 0;
648
649         mask = cfs_cpt_cpumask(lnet_cpt_table(), cpt);
650
651         /* hash NID to CPU id in this partition... */
652         ibp_nid = conn->ibc_peer->ibp_nid;
653         off = do_div(ibp_nid, cpumask_weight(*mask));
654         for_each_cpu(i, *mask) {
655                 if (off-- == 0)
656                         return i % vectors;
657         }
658
659         LBUG();
660         return 1;
661 }
662
663 /*
664  * Get the scheduler bound to this CPT. If the scheduler has no
665  * threads, which means that the CPT has no CPUs, then grab the
666  * next scheduler that we can use.
667  *
668  * This case would be triggered if a NUMA node is configured with
669  * no associated CPUs.
670  */
671 static struct kib_sched_info *
672 kiblnd_get_scheduler(int cpt)
673 {
674         struct kib_sched_info *sched;
675         int i;
676
677         sched = kiblnd_data.kib_scheds[cpt];
678
679         if (sched->ibs_nthreads > 0)
680                 return sched;
681
682         cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) {
683                 if (sched->ibs_nthreads > 0) {
684                         CDEBUG(D_NET, "scheduler[%d] has no threads. selected scheduler[%d]\n",
685                                         cpt, sched->ibs_cpt);
686                         return sched;
687                 }
688         }
689
690         return NULL;
691 }
692
693 static unsigned int kiblnd_send_wrs(struct kib_conn *conn)
694 {
695         /*
696          * One WR for the LNet message
697          * And ibc_max_frags for the transfer WRs
698          */
699         int ret;
700         int multiplier = 1 + conn->ibc_max_frags;
701         enum kib_dev_caps dev_caps = conn->ibc_hdev->ibh_dev->ibd_dev_caps;
702
703         /* FastReg needs two extra WRs for map and invalidate */
704         if (dev_caps & IBLND_DEV_CAPS_FASTREG_ENABLED)
705                 multiplier += 2;
706
707         /* account for a maximum of ibc_queue_depth in-flight transfers */
708         ret = multiplier * conn->ibc_queue_depth;
709
710         if (ret > conn->ibc_hdev->ibh_max_qp_wr) {
711                 CDEBUG(D_NET, "peer_credits %u will result in send work "
712                        "request size %d larger than maximum %d device "
713                        "can handle\n", conn->ibc_queue_depth, ret,
714                        conn->ibc_hdev->ibh_max_qp_wr);
715                 conn->ibc_queue_depth =
716                         conn->ibc_hdev->ibh_max_qp_wr / multiplier;
717         }
718
719         /* don't go beyond the maximum the device can handle */
720         return min(ret, conn->ibc_hdev->ibh_max_qp_wr);
721 }
722
723 struct kib_conn *
724 kiblnd_create_conn(struct kib_peer_ni *peer_ni, struct rdma_cm_id *cmid,
725                    int state, int version)
726 {
727         /* CAVEAT EMPTOR:
728          * If the new conn is created successfully it takes over the caller's
729          * ref on 'peer_ni'.  It also "owns" 'cmid' and destroys it when it itself
730          * is destroyed.  On failure, the caller's ref on 'peer_ni' remains and
731          * she must dispose of 'cmid'.  (Actually I'd block forever if I tried
732          * to destroy 'cmid' here since I'm called from the CM which still has
733          * its ref on 'cmid'). */
734         rwlock_t               *glock = &kiblnd_data.kib_global_lock;
735         struct kib_net              *net = peer_ni->ibp_ni->ni_data;
736         struct kib_dev *dev;
737         struct ib_qp_init_attr init_qp_attr = {};
738         struct kib_sched_info   *sched;
739 #ifdef HAVE_IB_CQ_INIT_ATTR
740         struct ib_cq_init_attr  cq_attr = {};
741 #endif
742         struct kib_conn *conn;
743         struct ib_cq            *cq;
744         unsigned long           flags;
745         int                     cpt;
746         int                     rc;
747         int                     i;
748
749         LASSERT(net != NULL);
750         LASSERT(!in_interrupt());
751
752         dev = net->ibn_dev;
753
754         cpt = lnet_cpt_of_nid(peer_ni->ibp_nid, peer_ni->ibp_ni);
755         sched = kiblnd_get_scheduler(cpt);
756
757         if (sched == NULL) {
758                 CERROR("no schedulers available. node is unhealthy\n");
759                 goto failed_0;
760         }
761
762         /*
763          * The cpt might have changed if we ended up selecting a non cpt
764          * native scheduler. So use the scheduler's cpt instead.
765          */
766         cpt = sched->ibs_cpt;
767
768         LIBCFS_CPT_ALLOC(conn, lnet_cpt_table(), cpt, sizeof(*conn));
769         if (conn == NULL) {
770                 CERROR("Can't allocate connection for %s\n",
771                        libcfs_nid2str(peer_ni->ibp_nid));
772                 goto failed_0;
773         }
774
775         conn->ibc_state = IBLND_CONN_INIT;
776         conn->ibc_version = version;
777         conn->ibc_peer = peer_ni;                       /* I take the caller's ref */
778         cmid->context = conn;                   /* for future CM callbacks */
779         conn->ibc_cmid = cmid;
780         conn->ibc_max_frags = peer_ni->ibp_max_frags;
781         conn->ibc_queue_depth = peer_ni->ibp_queue_depth;
782         conn->ibc_rxs = NULL;
783         conn->ibc_rx_pages = NULL;
784
785         INIT_LIST_HEAD(&conn->ibc_early_rxs);
786         INIT_LIST_HEAD(&conn->ibc_tx_noops);
787         INIT_LIST_HEAD(&conn->ibc_tx_queue);
788         INIT_LIST_HEAD(&conn->ibc_tx_queue_rsrvd);
789         INIT_LIST_HEAD(&conn->ibc_tx_queue_nocred);
790         INIT_LIST_HEAD(&conn->ibc_active_txs);
791         INIT_LIST_HEAD(&conn->ibc_zombie_txs);
792         spin_lock_init(&conn->ibc_lock);
793
794         LIBCFS_CPT_ALLOC(conn->ibc_connvars, lnet_cpt_table(), cpt,
795                          sizeof(*conn->ibc_connvars));
796         if (conn->ibc_connvars == NULL) {
797                 CERROR("Can't allocate in-progress connection state\n");
798                 goto failed_2;
799         }
800
801         write_lock_irqsave(glock, flags);
802         if (dev->ibd_failover) {
803                 write_unlock_irqrestore(glock, flags);
804                 CERROR("%s: failover in progress\n", dev->ibd_ifname);
805                 goto failed_2;
806         }
807
808         if (dev->ibd_hdev->ibh_ibdev != cmid->device) {
809                 /* wakeup failover thread and teardown connection */
810                 if (kiblnd_dev_can_failover(dev)) {
811                         list_add_tail(&dev->ibd_fail_list,
812                                       &kiblnd_data.kib_failed_devs);
813                         wake_up(&kiblnd_data.kib_failover_waitq);
814                 }
815
816                 write_unlock_irqrestore(glock, flags);
817                 CERROR("cmid HCA(%s), kib_dev(%s) need failover\n",
818                        cmid->device->name, dev->ibd_ifname);
819                 goto failed_2;
820         }
821
822         kiblnd_hdev_addref_locked(dev->ibd_hdev);
823         conn->ibc_hdev = dev->ibd_hdev;
824
825         kiblnd_setup_mtu_locked(cmid);
826
827         write_unlock_irqrestore(glock, flags);
828
829 #ifdef HAVE_IB_CQ_INIT_ATTR
830         cq_attr.cqe = IBLND_CQ_ENTRIES(conn);
831         cq_attr.comp_vector = kiblnd_get_completion_vector(conn, cpt);
832         cq = ib_create_cq(cmid->device,
833                           kiblnd_cq_completion, kiblnd_cq_event, conn,
834                           &cq_attr);
835 #else
836         cq = ib_create_cq(cmid->device,
837                           kiblnd_cq_completion, kiblnd_cq_event, conn,
838                           IBLND_CQ_ENTRIES(conn),
839                           kiblnd_get_completion_vector(conn, cpt));
840 #endif
841         if (IS_ERR(cq)) {
842                 /*
843                  * on MLX-5 (possibly MLX-4 as well) this error could be
844                  * hit if the concurrent_sends and/or peer_tx_credits is set
845                  * too high. Or due to an MLX-5 bug which tries to
846                  * allocate 256kb via kmalloc for WR cookie array
847                  */
848                 CERROR("Failed to create CQ with %d CQEs: %ld\n",
849                         IBLND_CQ_ENTRIES(conn), PTR_ERR(cq));
850                 goto failed_2;
851         }
852
853         conn->ibc_cq = cq;
854
855         rc = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
856         if (rc != 0) {
857                 CERROR("Can't request completion notification: %d\n", rc);
858                 goto failed_2;
859         }
860
861         init_qp_attr.event_handler = kiblnd_qp_event;
862         init_qp_attr.qp_context = conn;
863         init_qp_attr.cap.max_send_sge = *kiblnd_tunables.kib_wrq_sge;
864         init_qp_attr.cap.max_recv_sge = 1;
865         init_qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
866         init_qp_attr.qp_type = IB_QPT_RC;
867         init_qp_attr.send_cq = cq;
868         init_qp_attr.recv_cq = cq;
869
870         if (peer_ni->ibp_queue_depth_mod &&
871             peer_ni->ibp_queue_depth_mod < peer_ni->ibp_queue_depth) {
872                 conn->ibc_queue_depth = peer_ni->ibp_queue_depth_mod;
873                 CDEBUG(D_NET, "Use reduced queue depth %u (from %u)\n",
874                        peer_ni->ibp_queue_depth_mod,
875                        peer_ni->ibp_queue_depth);
876         }
877
878         do {
879                 /* kiblnd_send_wrs() can change the connection's queue depth if
880                  * the maximum work requests for the device is maxed out
881                  */
882                 init_qp_attr.cap.max_send_wr = kiblnd_send_wrs(conn);
883                 init_qp_attr.cap.max_recv_wr = IBLND_RECV_WRS(conn);
884                 rc = rdma_create_qp(cmid, conn->ibc_hdev->ibh_pd,
885                                     &init_qp_attr);
886                 if (rc != -ENOMEM || conn->ibc_queue_depth < 2)
887                         break;
888                 conn->ibc_queue_depth--;
889         } while (rc);
890
891         if (rc) {
892                 CERROR("Can't create QP: %d, send_wr: %d, recv_wr: %d, "
893                        "send_sge: %d, recv_sge: %d\n",
894                        rc, init_qp_attr.cap.max_send_wr,
895                        init_qp_attr.cap.max_recv_wr,
896                        init_qp_attr.cap.max_send_sge,
897                        init_qp_attr.cap.max_recv_sge);
898                 goto failed_2;
899         }
900
901         conn->ibc_sched = sched;
902
903         if (!peer_ni->ibp_queue_depth_mod &&
904             conn->ibc_queue_depth != peer_ni->ibp_queue_depth) {
905                 CWARN("peer %s - queue depth reduced from %u to %u"
906                       "  to allow for qp creation\n",
907                       libcfs_nid2str(peer_ni->ibp_nid),
908                       peer_ni->ibp_queue_depth,
909                       conn->ibc_queue_depth);
910                 peer_ni->ibp_queue_depth_mod = conn->ibc_queue_depth;
911         }
912
913         LIBCFS_CPT_ALLOC(conn->ibc_rxs, lnet_cpt_table(), cpt,
914                          IBLND_RX_MSGS(conn) * sizeof(struct kib_rx));
915         if (conn->ibc_rxs == NULL) {
916                 CERROR("Cannot allocate RX buffers\n");
917                 goto failed_2;
918         }
919
920         rc = kiblnd_alloc_pages(&conn->ibc_rx_pages, cpt,
921                                 IBLND_RX_MSG_PAGES(conn));
922         if (rc != 0)
923                 goto failed_2;
924
925         kiblnd_map_rx_descs(conn);
926
927         /* 1 ref for caller and each rxmsg */
928         atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(conn));
929         conn->ibc_nrx = IBLND_RX_MSGS(conn);
930
931         /* post receives */
932         for (i = 0; i < IBLND_RX_MSGS(conn); i++) {
933                 rc = kiblnd_post_rx(&conn->ibc_rxs[i], IBLND_POSTRX_NO_CREDIT);
934                 if (rc != 0) {
935                         CERROR("Can't post rxmsg: %d\n", rc);
936
937                         /* Make posted receives complete */
938                         kiblnd_abort_receives(conn);
939
940                         /* correct # of posted buffers
941                          * NB locking needed now I'm racing with completion */
942                         spin_lock_irqsave(&sched->ibs_lock, flags);
943                         conn->ibc_nrx -= IBLND_RX_MSGS(conn) - i;
944                         spin_unlock_irqrestore(&sched->ibs_lock, flags);
945
946                         /* cmid will be destroyed by CM(ofed) after cm_callback
947                          * returned, so we can't refer it anymore
948                          * (by kiblnd_connd()->kiblnd_destroy_conn) */
949                         rdma_destroy_qp(conn->ibc_cmid);
950                         conn->ibc_cmid = NULL;
951
952                         /* Drop my own and unused rxbuffer refcounts */
953                         while (i++ <= IBLND_RX_MSGS(conn))
954                                 kiblnd_conn_decref(conn);
955
956                         return NULL;
957                 }
958         }
959
960         /* Init successful! */
961         LASSERT (state == IBLND_CONN_ACTIVE_CONNECT ||
962                  state == IBLND_CONN_PASSIVE_WAIT);
963         conn->ibc_state = state;
964
965         /* 1 more conn */
966         atomic_inc(&net->ibn_nconns);
967         return conn;
968
969  failed_2:
970         kiblnd_destroy_conn(conn);
971         LIBCFS_FREE(conn, sizeof(*conn));
972  failed_0:
973         return NULL;
974 }
975
976 void
977 kiblnd_destroy_conn(struct kib_conn *conn)
978 {
979         struct rdma_cm_id *cmid = conn->ibc_cmid;
980         struct kib_peer_ni *peer_ni = conn->ibc_peer;
981
982         LASSERT (!in_interrupt());
983         LASSERT (atomic_read(&conn->ibc_refcount) == 0);
984         LASSERT(list_empty(&conn->ibc_early_rxs));
985         LASSERT(list_empty(&conn->ibc_tx_noops));
986         LASSERT(list_empty(&conn->ibc_tx_queue));
987         LASSERT(list_empty(&conn->ibc_tx_queue_rsrvd));
988         LASSERT(list_empty(&conn->ibc_tx_queue_nocred));
989         LASSERT(list_empty(&conn->ibc_active_txs));
990         LASSERT (conn->ibc_noops_posted == 0);
991         LASSERT (conn->ibc_nsends_posted == 0);
992
993         switch (conn->ibc_state) {
994         default:
995                 /* conn must be completely disengaged from the network */
996                 LBUG();
997
998         case IBLND_CONN_DISCONNECTED:
999                 /* connvars should have been freed already */
1000                 LASSERT (conn->ibc_connvars == NULL);
1001                 break;
1002
1003         case IBLND_CONN_INIT:
1004                 break;
1005         }
1006
1007         /* conn->ibc_cmid might be destroyed by CM already */
1008         if (cmid != NULL && cmid->qp != NULL)
1009                 rdma_destroy_qp(cmid);
1010
1011         if (conn->ibc_cq)
1012                 ib_destroy_cq(conn->ibc_cq);
1013
1014         kiblnd_txlist_done(&conn->ibc_zombie_txs, -ECONNABORTED,
1015                            LNET_MSG_STATUS_OK);
1016
1017         if (conn->ibc_rx_pages != NULL)
1018                 kiblnd_unmap_rx_descs(conn);
1019
1020         if (conn->ibc_rxs != NULL)
1021                 CFS_FREE_PTR_ARRAY(conn->ibc_rxs, IBLND_RX_MSGS(conn));
1022
1023         if (conn->ibc_connvars != NULL)
1024                 LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
1025
1026         if (conn->ibc_hdev != NULL)
1027                 kiblnd_hdev_decref(conn->ibc_hdev);
1028
1029         /* See CAVEAT EMPTOR above in kiblnd_create_conn */
1030         if (conn->ibc_state != IBLND_CONN_INIT) {
1031                 struct kib_net *net = peer_ni->ibp_ni->ni_data;
1032
1033                 kiblnd_peer_decref(peer_ni);
1034                 rdma_destroy_id(cmid);
1035                 atomic_dec(&net->ibn_nconns);
1036         }
1037 }
1038
1039 int
1040 kiblnd_close_peer_conns_locked(struct kib_peer_ni *peer_ni, int why)
1041 {
1042         struct kib_conn *conn;
1043         struct kib_conn *cnxt;
1044         int count = 0;
1045
1046         list_for_each_entry_safe(conn, cnxt, &peer_ni->ibp_conns,
1047                                  ibc_list) {
1048                 CDEBUG(D_NET, "Closing conn -> %s, "
1049                               "version: %x, reason: %d\n",
1050                        libcfs_nid2str(peer_ni->ibp_nid),
1051                        conn->ibc_version, why);
1052
1053                 kiblnd_close_conn_locked(conn, why);
1054                 count++;
1055         }
1056
1057         return count;
1058 }
1059
1060 int
1061 kiblnd_close_stale_conns_locked(struct kib_peer_ni *peer_ni,
1062                                 int version, __u64 incarnation)
1063 {
1064         struct kib_conn *conn;
1065         struct kib_conn *cnxt;
1066         int count = 0;
1067
1068         list_for_each_entry_safe(conn, cnxt, &peer_ni->ibp_conns,
1069                                  ibc_list) {
1070                 if (conn->ibc_version     == version &&
1071                     conn->ibc_incarnation == incarnation)
1072                         continue;
1073
1074                 CDEBUG(D_NET, "Closing stale conn -> %s version: %x, "
1075                               "incarnation:%#llx(%x, %#llx)\n",
1076                        libcfs_nid2str(peer_ni->ibp_nid),
1077                        conn->ibc_version, conn->ibc_incarnation,
1078                        version, incarnation);
1079
1080                 kiblnd_close_conn_locked(conn, -ESTALE);
1081                 count++;
1082         }
1083
1084         return count;
1085 }
1086
1087 static int
1088 kiblnd_close_matching_conns(struct lnet_ni *ni, lnet_nid_t nid)
1089 {
1090         struct kib_peer_ni *peer_ni;
1091         struct hlist_node *pnxt;
1092         int lo;
1093         int hi;
1094         int i;
1095         unsigned long flags;
1096         int count = 0;
1097
1098         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1099
1100         if (nid != LNET_NID_ANY) {
1101                 lo = hash_min(nid, HASH_BITS(kiblnd_data.kib_peers));
1102                 hi = lo;
1103         } else {
1104                 lo = 0;
1105                 hi = HASH_SIZE(kiblnd_data.kib_peers) - 1;
1106         }
1107
1108         for (i = lo; i <= hi; i++) {
1109                 hlist_for_each_entry_safe(peer_ni, pnxt,
1110                                           &kiblnd_data.kib_peers[i], ibp_list) {
1111                         LASSERT(!kiblnd_peer_idle(peer_ni));
1112
1113                         if (peer_ni->ibp_ni != ni)
1114                                 continue;
1115
1116                         if (!(nid == LNET_NID_ANY || nid == peer_ni->ibp_nid))
1117                                 continue;
1118
1119                         count += kiblnd_close_peer_conns_locked(peer_ni, 0);
1120                 }
1121         }
1122
1123         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1124
1125         /* wildcards always succeed */
1126         if (nid == LNET_NID_ANY)
1127                 return 0;
1128
1129         return (count == 0) ? -ENOENT : 0;
1130 }
1131
1132 static int
1133 kiblnd_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg)
1134 {
1135         struct libcfs_ioctl_data *data = arg;
1136         int                       rc = -EINVAL;
1137
1138         switch(cmd) {
1139         case IOC_LIBCFS_GET_PEER: {
1140                 lnet_nid_t   nid = 0;
1141                 int          count = 0;
1142
1143                 rc = kiblnd_get_peer_info(ni, data->ioc_count,
1144                                           &nid, &count);
1145                 data->ioc_nid    = nid;
1146                 data->ioc_count  = count;
1147                 break;
1148         }
1149
1150         case IOC_LIBCFS_DEL_PEER: {
1151                 rc = kiblnd_del_peer(ni, data->ioc_nid);
1152                 break;
1153         }
1154         case IOC_LIBCFS_GET_CONN: {
1155                 struct kib_conn *conn;
1156
1157                 rc = 0;
1158                 conn = kiblnd_get_conn_by_idx(ni, data->ioc_count);
1159                 if (conn == NULL) {
1160                         rc = -ENOENT;
1161                         break;
1162                 }
1163
1164                 LASSERT(conn->ibc_cmid != NULL);
1165                 data->ioc_nid = conn->ibc_peer->ibp_nid;
1166                 if (conn->ibc_cmid->route.path_rec == NULL)
1167                         data->ioc_u32[0] = 0; /* iWarp has no path MTU */
1168                 else
1169                         data->ioc_u32[0] =
1170                         ib_mtu_enum_to_int(conn->ibc_cmid->route.path_rec->mtu);
1171                 kiblnd_conn_decref(conn);
1172                 break;
1173         }
1174         case IOC_LIBCFS_CLOSE_CONNECTION: {
1175                 rc = kiblnd_close_matching_conns(ni, data->ioc_nid);
1176                 break;
1177         }
1178
1179         default:
1180                 break;
1181         }
1182
1183         return rc;
1184 }
1185
1186 static void
1187 kiblnd_free_pages(struct kib_pages *p)
1188 {
1189         int     npages = p->ibp_npages;
1190         int     i;
1191
1192         for (i = 0; i < npages; i++) {
1193                 if (p->ibp_pages[i] != NULL)
1194                         __free_page(p->ibp_pages[i]);
1195         }
1196
1197         LIBCFS_FREE(p, offsetof(struct kib_pages, ibp_pages[npages]));
1198 }
1199
1200 int
1201 kiblnd_alloc_pages(struct kib_pages **pp, int cpt, int npages)
1202 {
1203         struct kib_pages *p;
1204         int i;
1205
1206         LIBCFS_CPT_ALLOC(p, lnet_cpt_table(), cpt,
1207                          offsetof(struct kib_pages, ibp_pages[npages]));
1208         if (p == NULL) {
1209                 CERROR("Can't allocate descriptor for %d pages\n", npages);
1210                 return -ENOMEM;
1211         }
1212
1213         memset(p, 0, offsetof(struct kib_pages, ibp_pages[npages]));
1214         p->ibp_npages = npages;
1215
1216         for (i = 0; i < npages; i++) {
1217                 p->ibp_pages[i] = cfs_page_cpt_alloc(lnet_cpt_table(), cpt,
1218                                                      GFP_NOFS);
1219                 if (p->ibp_pages[i] == NULL) {
1220                         CERROR("Can't allocate page %d of %d\n", i, npages);
1221                         kiblnd_free_pages(p);
1222                         return -ENOMEM;
1223                 }
1224         }
1225
1226         *pp = p;
1227         return 0;
1228 }
1229
1230 void
1231 kiblnd_unmap_rx_descs(struct kib_conn *conn)
1232 {
1233         struct kib_rx *rx;
1234         int       i;
1235
1236         LASSERT (conn->ibc_rxs != NULL);
1237         LASSERT (conn->ibc_hdev != NULL);
1238
1239         for (i = 0; i < IBLND_RX_MSGS(conn); i++) {
1240                 rx = &conn->ibc_rxs[i];
1241
1242                 LASSERT(rx->rx_nob >= 0); /* not posted */
1243
1244                 kiblnd_dma_unmap_single(conn->ibc_hdev->ibh_ibdev,
1245                                         KIBLND_UNMAP_ADDR(rx, rx_msgunmap,
1246                                                           rx->rx_msgaddr),
1247                                         IBLND_MSG_SIZE, DMA_FROM_DEVICE);
1248         }
1249
1250         kiblnd_free_pages(conn->ibc_rx_pages);
1251
1252         conn->ibc_rx_pages = NULL;
1253 }
1254
1255 void
1256 kiblnd_map_rx_descs(struct kib_conn *conn)
1257 {
1258         struct kib_rx *rx;
1259         struct page    *pg;
1260         int             pg_off;
1261         int             ipg;
1262         int             i;
1263
1264         for (pg_off = ipg = i = 0; i < IBLND_RX_MSGS(conn); i++) {
1265                 pg = conn->ibc_rx_pages->ibp_pages[ipg];
1266                 rx = &conn->ibc_rxs[i];
1267
1268                 rx->rx_conn = conn;
1269                 rx->rx_msg = (struct kib_msg *)(((char *)page_address(pg)) + pg_off);
1270
1271                 rx->rx_msgaddr =
1272                         kiblnd_dma_map_single(conn->ibc_hdev->ibh_ibdev,
1273                                               rx->rx_msg, IBLND_MSG_SIZE,
1274                                               DMA_FROM_DEVICE);
1275                 LASSERT(!kiblnd_dma_mapping_error(conn->ibc_hdev->ibh_ibdev,
1276                                                   rx->rx_msgaddr));
1277                 KIBLND_UNMAP_ADDR_SET(rx, rx_msgunmap, rx->rx_msgaddr);
1278
1279                 CDEBUG(D_NET, "rx %d: %p %#llx(%#llx)\n",
1280                        i, rx->rx_msg, rx->rx_msgaddr,
1281                        (__u64)(page_to_phys(pg) + pg_off));
1282
1283                 pg_off += IBLND_MSG_SIZE;
1284                 LASSERT(pg_off <= PAGE_SIZE);
1285
1286                 if (pg_off == PAGE_SIZE) {
1287                         pg_off = 0;
1288                         ipg++;
1289                         LASSERT(ipg <= IBLND_RX_MSG_PAGES(conn));
1290                 }
1291         }
1292 }
1293
1294 static void
1295 kiblnd_unmap_tx_pool(struct kib_tx_pool *tpo)
1296 {
1297         struct kib_hca_dev *hdev = tpo->tpo_hdev;
1298         struct kib_tx *tx;
1299         int i;
1300
1301         LASSERT (tpo->tpo_pool.po_allocated == 0);
1302
1303         if (hdev == NULL)
1304                 return;
1305
1306         for (i = 0; i < tpo->tpo_pool.po_size; i++) {
1307                 tx = &tpo->tpo_tx_descs[i];
1308                 kiblnd_dma_unmap_single(hdev->ibh_ibdev,
1309                                         KIBLND_UNMAP_ADDR(tx, tx_msgunmap,
1310                                                           tx->tx_msgaddr),
1311                                         IBLND_MSG_SIZE, DMA_TO_DEVICE);
1312         }
1313
1314         kiblnd_hdev_decref(hdev);
1315         tpo->tpo_hdev = NULL;
1316 }
1317
1318 static struct kib_hca_dev *
1319 kiblnd_current_hdev(struct kib_dev *dev)
1320 {
1321         struct kib_hca_dev *hdev;
1322         unsigned long  flags;
1323         int            i = 0;
1324
1325         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1326         while (dev->ibd_failover) {
1327                 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1328                 if (i++ % 50 == 0)
1329                         CDEBUG(D_NET, "%s: Wait for failover\n",
1330                                dev->ibd_ifname);
1331                 schedule_timeout_interruptible(cfs_time_seconds(1) / 100);
1332
1333                 read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1334         }
1335
1336         kiblnd_hdev_addref_locked(dev->ibd_hdev);
1337         hdev = dev->ibd_hdev;
1338
1339         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1340
1341         return hdev;
1342 }
1343
1344 static void
1345 kiblnd_map_tx_pool(struct kib_tx_pool *tpo)
1346 {
1347         struct kib_pages *txpgs = tpo->tpo_tx_pages;
1348         struct kib_pool *pool = &tpo->tpo_pool;
1349         struct kib_net      *net   = pool->po_owner->ps_net;
1350         struct kib_dev *dev;
1351         struct page *page;
1352         struct kib_tx *tx;
1353         int             page_offset;
1354         int             ipage;
1355         int             i;
1356
1357         LASSERT (net != NULL);
1358
1359         dev = net->ibn_dev;
1360
1361         /* pre-mapped messages are not bigger than 1 page */
1362         BUILD_BUG_ON(IBLND_MSG_SIZE > PAGE_SIZE);
1363
1364         /* No fancy arithmetic when we do the buffer calculations */
1365         BUILD_BUG_ON(PAGE_SIZE % IBLND_MSG_SIZE != 0);
1366
1367         tpo->tpo_hdev = kiblnd_current_hdev(dev);
1368
1369         for (ipage = page_offset = i = 0; i < pool->po_size; i++) {
1370                 page = txpgs->ibp_pages[ipage];
1371                 tx = &tpo->tpo_tx_descs[i];
1372
1373                 tx->tx_msg = (struct kib_msg *)(((char *)page_address(page)) +
1374                                                 page_offset);
1375
1376                 tx->tx_msgaddr = kiblnd_dma_map_single(tpo->tpo_hdev->ibh_ibdev,
1377                                                        tx->tx_msg,
1378                                                        IBLND_MSG_SIZE,
1379                                                        DMA_TO_DEVICE);
1380                 LASSERT(!kiblnd_dma_mapping_error(tpo->tpo_hdev->ibh_ibdev,
1381                                                   tx->tx_msgaddr));
1382                 KIBLND_UNMAP_ADDR_SET(tx, tx_msgunmap, tx->tx_msgaddr);
1383
1384                 list_add(&tx->tx_list, &pool->po_free_list);
1385
1386                 page_offset += IBLND_MSG_SIZE;
1387                 LASSERT(page_offset <= PAGE_SIZE);
1388
1389                 if (page_offset == PAGE_SIZE) {
1390                         page_offset = 0;
1391                         ipage++;
1392                         LASSERT(ipage <= txpgs->ibp_npages);
1393                 }
1394         }
1395 }
1396
1397 static void
1398 kiblnd_destroy_fmr_pool(struct kib_fmr_pool *fpo)
1399 {
1400         LASSERT(fpo->fpo_map_count == 0);
1401
1402 #ifdef HAVE_FMR_POOL_API
1403         if (fpo->fpo_is_fmr && fpo->fmr.fpo_fmr_pool) {
1404                 ib_destroy_fmr_pool(fpo->fmr.fpo_fmr_pool);
1405         } else
1406 #endif /* HAVE_FMR_POOL_API */
1407         {
1408                 struct kib_fast_reg_descriptor *frd, *tmp;
1409                 int i = 0;
1410
1411                 list_for_each_entry_safe(frd, tmp, &fpo->fast_reg.fpo_pool_list,
1412                                          frd_list) {
1413                         list_del(&frd->frd_list);
1414 #ifndef HAVE_IB_MAP_MR_SG
1415                         ib_free_fast_reg_page_list(frd->frd_frpl);
1416 #endif
1417                         ib_dereg_mr(frd->frd_mr);
1418                         LIBCFS_FREE(frd, sizeof(*frd));
1419                         i++;
1420                 }
1421                 if (i < fpo->fast_reg.fpo_pool_size)
1422                         CERROR("FastReg pool still has %d regions registered\n",
1423                                 fpo->fast_reg.fpo_pool_size - i);
1424         }
1425
1426         if (fpo->fpo_hdev)
1427                 kiblnd_hdev_decref(fpo->fpo_hdev);
1428
1429         LIBCFS_FREE(fpo, sizeof(*fpo));
1430 }
1431
1432 static void
1433 kiblnd_destroy_fmr_pool_list(struct list_head *head)
1434 {
1435         struct kib_fmr_pool *fpo, *tmp;
1436
1437         list_for_each_entry_safe(fpo, tmp, head, fpo_list) {
1438                 list_del(&fpo->fpo_list);
1439                 kiblnd_destroy_fmr_pool(fpo);
1440         }
1441 }
1442
1443 static int
1444 kiblnd_fmr_pool_size(struct lnet_ioctl_config_o2iblnd_tunables *tunables,
1445                      int ncpts)
1446 {
1447         int size = tunables->lnd_fmr_pool_size / ncpts;
1448
1449         return max(IBLND_FMR_POOL, size);
1450 }
1451
1452 static int
1453 kiblnd_fmr_flush_trigger(struct lnet_ioctl_config_o2iblnd_tunables *tunables,
1454                          int ncpts)
1455 {
1456         int size = tunables->lnd_fmr_flush_trigger / ncpts;
1457
1458         return max(IBLND_FMR_POOL_FLUSH, size);
1459 }
1460
1461 #ifdef HAVE_FMR_POOL_API
1462 static int kiblnd_alloc_fmr_pool(struct kib_fmr_poolset *fps,
1463                                  struct kib_fmr_pool *fpo)
1464 {
1465         struct ib_fmr_pool_param param = {
1466                 .max_pages_per_fmr = LNET_MAX_IOV,
1467                 .page_shift        = PAGE_SHIFT,
1468                 .access            = (IB_ACCESS_LOCAL_WRITE |
1469                                       IB_ACCESS_REMOTE_WRITE),
1470                 .pool_size         = fps->fps_pool_size,
1471                 .dirty_watermark   = fps->fps_flush_trigger,
1472                 .flush_function    = NULL,
1473                 .flush_arg         = NULL,
1474                 .cache             = !!fps->fps_cache };
1475         int rc = 0;
1476
1477         fpo->fmr.fpo_fmr_pool = ib_create_fmr_pool(fpo->fpo_hdev->ibh_pd,
1478                                                    &param);
1479         if (IS_ERR(fpo->fmr.fpo_fmr_pool)) {
1480                 rc = PTR_ERR(fpo->fmr.fpo_fmr_pool);
1481                 if (rc != -ENOSYS)
1482                         CERROR("Failed to create FMR pool: %d\n", rc);
1483                 else
1484                         CERROR("FMRs are not supported\n");
1485         }
1486         fpo->fpo_is_fmr = true;
1487
1488         return rc;
1489 }
1490 #endif /* HAVE_FMR_POOL_API */
1491
1492 static int kiblnd_alloc_freg_pool(struct kib_fmr_poolset *fps,
1493                                   struct kib_fmr_pool *fpo,
1494                                   enum kib_dev_caps dev_caps)
1495 {
1496         struct kib_fast_reg_descriptor *frd, *tmp;
1497         int i, rc;
1498
1499 #ifdef HAVE_FMR_POOL_API
1500         fpo->fpo_is_fmr = false;
1501 #endif
1502
1503         INIT_LIST_HEAD(&fpo->fast_reg.fpo_pool_list);
1504         fpo->fast_reg.fpo_pool_size = 0;
1505         for (i = 0; i < fps->fps_pool_size; i++) {
1506                 LIBCFS_CPT_ALLOC(frd, lnet_cpt_table(), fps->fps_cpt,
1507                                  sizeof(*frd));
1508                 if (!frd) {
1509                         CERROR("Failed to allocate a new fast_reg descriptor\n");
1510                         rc = -ENOMEM;
1511                         goto out;
1512                 }
1513                 frd->frd_mr = NULL;
1514
1515 #ifndef HAVE_IB_MAP_MR_SG
1516                 frd->frd_frpl = ib_alloc_fast_reg_page_list(fpo->fpo_hdev->ibh_ibdev,
1517                                                             LNET_MAX_IOV);
1518                 if (IS_ERR(frd->frd_frpl)) {
1519                         rc = PTR_ERR(frd->frd_frpl);
1520                         CERROR("Failed to allocate ib_fast_reg_page_list: %d\n",
1521                                 rc);
1522                         frd->frd_frpl = NULL;
1523                         goto out_middle;
1524                 }
1525 #endif
1526
1527 #ifdef HAVE_IB_ALLOC_FAST_REG_MR
1528                 frd->frd_mr = ib_alloc_fast_reg_mr(fpo->fpo_hdev->ibh_pd,
1529                                                    LNET_MAX_IOV);
1530 #else
1531                 /*
1532                  * it is expected to get here if this is an MLX-5 card.
1533                  * MLX-4 cards will always use FMR and MLX-5 cards will
1534                  * always use fast_reg. It turns out that some MLX-5 cards
1535                  * (possibly due to older FW versions) do not natively support
1536                  * gaps. So we will need to track them here.
1537                  */
1538                 frd->frd_mr = ib_alloc_mr(fpo->fpo_hdev->ibh_pd,
1539 #ifdef IB_MR_TYPE_SG_GAPS
1540                                           ((*kiblnd_tunables.kib_use_fastreg_gaps == 1) &&
1541                                            (dev_caps & IBLND_DEV_CAPS_FASTREG_GAPS_SUPPORT)) ?
1542                                                 IB_MR_TYPE_SG_GAPS :
1543                                                 IB_MR_TYPE_MEM_REG,
1544 #else
1545                                                 IB_MR_TYPE_MEM_REG,
1546 #endif
1547                                           LNET_MAX_IOV);
1548                 if ((*kiblnd_tunables.kib_use_fastreg_gaps == 1) &&
1549                     (dev_caps & IBLND_DEV_CAPS_FASTREG_GAPS_SUPPORT))
1550                         CWARN("using IB_MR_TYPE_SG_GAPS, expect a performance drop\n");
1551 #endif
1552                 if (IS_ERR(frd->frd_mr)) {
1553                         rc = PTR_ERR(frd->frd_mr);
1554                         CERROR("Failed to allocate ib_fast_reg_mr: %d\n", rc);
1555                         frd->frd_mr = NULL;
1556                         goto out_middle;
1557                 }
1558
1559                 /* There appears to be a bug in MLX5 code where you must
1560                  * invalidate the rkey of a new FastReg pool before first
1561                  * using it. Thus, I am marking the FRD invalid here. */
1562                 frd->frd_valid = false;
1563
1564                 list_add_tail(&frd->frd_list, &fpo->fast_reg.fpo_pool_list);
1565                 fpo->fast_reg.fpo_pool_size++;
1566         }
1567
1568         return 0;
1569
1570 out_middle:
1571         if (frd->frd_mr)
1572                 ib_dereg_mr(frd->frd_mr);
1573 #ifndef HAVE_IB_MAP_MR_SG
1574         if (frd->frd_frpl)
1575                 ib_free_fast_reg_page_list(frd->frd_frpl);
1576 #endif
1577         LIBCFS_FREE(frd, sizeof(*frd));
1578
1579 out:
1580         list_for_each_entry_safe(frd, tmp, &fpo->fast_reg.fpo_pool_list,
1581                                  frd_list) {
1582                 list_del(&frd->frd_list);
1583 #ifndef HAVE_IB_MAP_MR_SG
1584                 ib_free_fast_reg_page_list(frd->frd_frpl);
1585 #endif
1586                 ib_dereg_mr(frd->frd_mr);
1587                 LIBCFS_FREE(frd, sizeof(*frd));
1588         }
1589
1590         return rc;
1591 }
1592
1593 static int kiblnd_create_fmr_pool(struct kib_fmr_poolset *fps,
1594                                   struct kib_fmr_pool **pp_fpo)
1595 {
1596         struct kib_dev *dev = fps->fps_net->ibn_dev;
1597         struct kib_fmr_pool *fpo;
1598         int rc;
1599
1600         LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo));
1601         if (!fpo) {
1602                 return -ENOMEM;
1603         }
1604         memset(fpo, 0, sizeof(*fpo));
1605
1606         fpo->fpo_hdev = kiblnd_current_hdev(dev);
1607
1608 #ifdef HAVE_FMR_POOL_API
1609         if (dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED)
1610                 rc = kiblnd_alloc_fmr_pool(fps, fpo);
1611         else
1612 #endif /* HAVE_FMR_POOL_API */
1613                 rc = kiblnd_alloc_freg_pool(fps, fpo, dev->ibd_dev_caps);
1614         if (rc)
1615                 goto out_fpo;
1616
1617         fpo->fpo_deadline = ktime_get_seconds() + IBLND_POOL_DEADLINE;
1618         fpo->fpo_owner = fps;
1619         *pp_fpo = fpo;
1620
1621         return 0;
1622
1623 out_fpo:
1624         kiblnd_hdev_decref(fpo->fpo_hdev);
1625         LIBCFS_FREE(fpo, sizeof(*fpo));
1626         return rc;
1627 }
1628
1629 static void
1630 kiblnd_fail_fmr_poolset(struct kib_fmr_poolset *fps, struct list_head *zombies)
1631 {
1632         struct kib_fmr_pool *fpo;
1633
1634         if (fps->fps_net == NULL) /* intialized? */
1635                 return;
1636
1637         spin_lock(&fps->fps_lock);
1638
1639         while ((fpo = list_first_entry_or_null(&fps->fps_pool_list,
1640                                                struct kib_fmr_pool,
1641                                                fpo_list)) != NULL) {
1642                 fpo->fpo_failed = 1;
1643                 if (fpo->fpo_map_count == 0)
1644                         list_move(&fpo->fpo_list, zombies);
1645                 else
1646                         list_move(&fpo->fpo_list, &fps->fps_failed_pool_list);
1647         }
1648
1649         spin_unlock(&fps->fps_lock);
1650 }
1651
1652 static void
1653 kiblnd_fini_fmr_poolset(struct kib_fmr_poolset *fps)
1654 {
1655         if (fps->fps_net != NULL) { /* initialized? */
1656                 kiblnd_destroy_fmr_pool_list(&fps->fps_failed_pool_list);
1657                 kiblnd_destroy_fmr_pool_list(&fps->fps_pool_list);
1658         }
1659 }
1660
1661 static int
1662 kiblnd_init_fmr_poolset(struct kib_fmr_poolset *fps, int cpt, int ncpts,
1663                         struct kib_net *net,
1664                         struct lnet_ioctl_config_o2iblnd_tunables *tunables)
1665 {
1666         struct kib_fmr_pool *fpo;
1667         int rc;
1668
1669         memset(fps, 0, sizeof(struct kib_fmr_poolset));
1670
1671         fps->fps_net = net;
1672         fps->fps_cpt = cpt;
1673
1674         fps->fps_pool_size = kiblnd_fmr_pool_size(tunables, ncpts);
1675         fps->fps_flush_trigger = kiblnd_fmr_flush_trigger(tunables, ncpts);
1676         fps->fps_cache = tunables->lnd_fmr_cache;
1677
1678         spin_lock_init(&fps->fps_lock);
1679         INIT_LIST_HEAD(&fps->fps_pool_list);
1680         INIT_LIST_HEAD(&fps->fps_failed_pool_list);
1681
1682         rc = kiblnd_create_fmr_pool(fps, &fpo);
1683         if (rc == 0)
1684                 list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
1685
1686         return rc;
1687 }
1688
1689 static int
1690 kiblnd_fmr_pool_is_idle(struct kib_fmr_pool *fpo, time64_t now)
1691 {
1692         if (fpo->fpo_map_count != 0) /* still in use */
1693                 return 0;
1694         if (fpo->fpo_failed)
1695                 return 1;
1696         return now >= fpo->fpo_deadline;
1697 }
1698
1699 #if defined(HAVE_FMR_POOL_API) || !defined(HAVE_IB_MAP_MR_SG)
1700 static int
1701 kiblnd_map_tx_pages(struct kib_tx *tx, struct kib_rdma_desc *rd)
1702 {
1703         struct kib_hca_dev *hdev;
1704         __u64           *pages = tx->tx_pages;
1705         int             npages;
1706         int             size;
1707         int             i;
1708
1709         hdev = tx->tx_pool->tpo_hdev;
1710
1711         for (i = 0, npages = 0; i < rd->rd_nfrags; i++) {
1712                 for (size = 0; size <  rd->rd_frags[i].rf_nob;
1713                         size += hdev->ibh_page_size) {
1714                         pages[npages++] = (rd->rd_frags[i].rf_addr &
1715                                            hdev->ibh_page_mask) + size;
1716                 }
1717         }
1718
1719         return npages;
1720 }
1721 #endif
1722
1723 void
1724 kiblnd_fmr_pool_unmap(struct kib_fmr *fmr, int status)
1725 {
1726         LIST_HEAD(zombies);
1727         struct kib_fmr_pool *fpo = fmr->fmr_pool;
1728         struct kib_fmr_poolset *fps;
1729         time64_t now = ktime_get_seconds();
1730         struct kib_fmr_pool *tmp;
1731
1732         if (!fpo)
1733                 return;
1734
1735         fps = fpo->fpo_owner;
1736
1737 #ifdef HAVE_FMR_POOL_API
1738         if (fpo->fpo_is_fmr) {
1739                 if (fmr->fmr_pfmr) {
1740                         ib_fmr_pool_unmap(fmr->fmr_pfmr);
1741                         fmr->fmr_pfmr = NULL;
1742                 }
1743
1744                 if (status) {
1745                         int rc = ib_flush_fmr_pool(fpo->fmr.fpo_fmr_pool);
1746                         LASSERT(!rc);
1747                 }
1748         } else
1749 #endif /* HAVE_FMR_POOL_API */
1750         {
1751                 struct kib_fast_reg_descriptor *frd = fmr->fmr_frd;
1752
1753                 if (frd) {
1754                         frd->frd_valid = false;
1755                         spin_lock(&fps->fps_lock);
1756                         list_add_tail(&frd->frd_list, &fpo->fast_reg.fpo_pool_list);
1757                         spin_unlock(&fps->fps_lock);
1758                         fmr->fmr_frd = NULL;
1759                 }
1760         }
1761         fmr->fmr_pool = NULL;
1762
1763         spin_lock(&fps->fps_lock);
1764         fpo->fpo_map_count--;   /* decref the pool */
1765
1766         list_for_each_entry_safe(fpo, tmp, &fps->fps_pool_list, fpo_list) {
1767                 /* the first pool is persistent */
1768                 if (fps->fps_pool_list.next == &fpo->fpo_list)
1769                         continue;
1770
1771                 if (kiblnd_fmr_pool_is_idle(fpo, now)) {
1772                         list_move(&fpo->fpo_list, &zombies);
1773                         fps->fps_version++;
1774                 }
1775         }
1776         spin_unlock(&fps->fps_lock);
1777
1778         if (!list_empty(&zombies))
1779                 kiblnd_destroy_fmr_pool_list(&zombies);
1780 }
1781
1782 int kiblnd_fmr_pool_map(struct kib_fmr_poolset *fps, struct kib_tx *tx,
1783                         struct kib_rdma_desc *rd, u32 nob, u64 iov,
1784                         struct kib_fmr *fmr)
1785 {
1786         struct kib_fmr_pool *fpo;
1787         __u64 version;
1788         bool is_rx = (rd != tx->tx_rd);
1789 #ifdef HAVE_FMR_POOL_API
1790         __u64 *pages = tx->tx_pages;
1791         bool tx_pages_mapped = false;
1792         int npages = 0;
1793 #endif
1794         int rc;
1795
1796 again:
1797         spin_lock(&fps->fps_lock);
1798         version = fps->fps_version;
1799         list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) {
1800                 fpo->fpo_deadline = ktime_get_seconds() + IBLND_POOL_DEADLINE;
1801                 fpo->fpo_map_count++;
1802
1803 #ifdef HAVE_FMR_POOL_API
1804                 fmr->fmr_pfmr = NULL;
1805                 if (fpo->fpo_is_fmr) {
1806                         struct ib_pool_fmr *pfmr;
1807
1808                         spin_unlock(&fps->fps_lock);
1809
1810                         if (!tx_pages_mapped) {
1811                                 npages = kiblnd_map_tx_pages(tx, rd);
1812                                 tx_pages_mapped = true;
1813                         }
1814
1815                         pfmr = kib_fmr_pool_map(fpo->fmr.fpo_fmr_pool,
1816                                                 pages, npages, iov);
1817                         if (likely(!IS_ERR(pfmr))) {
1818                                 fmr->fmr_key  = is_rx ? pfmr->fmr->rkey
1819                                         : pfmr->fmr->lkey;
1820                                 fmr->fmr_frd  = NULL;
1821                                 fmr->fmr_pfmr = pfmr;
1822                                 fmr->fmr_pool = fpo;
1823                                 return 0;
1824                         }
1825                         rc = PTR_ERR(pfmr);
1826                 } else
1827 #endif /* HAVE_FMR_POOL_API */
1828                 {
1829                         if (!list_empty(&fpo->fast_reg.fpo_pool_list)) {
1830                                 struct kib_fast_reg_descriptor *frd;
1831 #ifdef HAVE_IB_MAP_MR_SG
1832                                 struct ib_reg_wr *wr;
1833                                 int n;
1834 #else
1835                                 struct ib_rdma_wr *wr;
1836                                 struct ib_fast_reg_page_list *frpl;
1837 #endif
1838                                 struct ib_mr *mr;
1839
1840                                 frd = list_first_entry(
1841                                         &fpo->fast_reg.fpo_pool_list,
1842                                         struct kib_fast_reg_descriptor,
1843                                         frd_list);
1844                                 list_del(&frd->frd_list);
1845                                 spin_unlock(&fps->fps_lock);
1846
1847 #ifndef HAVE_IB_MAP_MR_SG
1848                                 frpl = frd->frd_frpl;
1849 #endif
1850                                 mr   = frd->frd_mr;
1851
1852                                 if (!frd->frd_valid) {
1853                                         struct ib_rdma_wr *inv_wr;
1854                                         __u32 key = is_rx ? mr->rkey : mr->lkey;
1855
1856                                         inv_wr = &frd->frd_inv_wr;
1857                                         memset(inv_wr, 0, sizeof(*inv_wr));
1858
1859                                         inv_wr->wr.opcode = IB_WR_LOCAL_INV;
1860                                         inv_wr->wr.wr_id  = IBLND_WID_MR;
1861                                         inv_wr->wr.ex.invalidate_rkey = key;
1862
1863                                         /* Bump the key */
1864                                         key = ib_inc_rkey(key);
1865                                         ib_update_fast_reg_key(mr, key);
1866                                 }
1867
1868 #ifdef HAVE_IB_MAP_MR_SG
1869 #ifdef HAVE_IB_MAP_MR_SG_5ARGS
1870                                 n = ib_map_mr_sg(mr, tx->tx_frags,
1871                                                  rd->rd_nfrags, NULL, PAGE_SIZE);
1872 #else
1873                                 n = ib_map_mr_sg(mr, tx->tx_frags,
1874                                                  rd->rd_nfrags, PAGE_SIZE);
1875 #endif /* HAVE_IB_MAP_MR_SG_5ARGS */
1876                                 if (unlikely(n != rd->rd_nfrags)) {
1877                                         CERROR("Failed to map mr %d/%d elements\n",
1878                                                n, rd->rd_nfrags);
1879                                         return n < 0 ? n : -EINVAL;
1880                                 }
1881
1882                                 wr = &frd->frd_fastreg_wr;
1883                                 memset(wr, 0, sizeof(*wr));
1884
1885                                 wr->wr.opcode = IB_WR_REG_MR;
1886                                 wr->wr.wr_id  = IBLND_WID_MR;
1887                                 wr->wr.num_sge = 0;
1888                                 wr->wr.send_flags = 0;
1889                                 wr->mr = mr;
1890                                 wr->key = is_rx ? mr->rkey : mr->lkey;
1891                                 wr->access = (IB_ACCESS_LOCAL_WRITE |
1892                                               IB_ACCESS_REMOTE_WRITE);
1893 #else /* HAVE_IB_MAP_MR_SG */
1894                                 if (!tx_pages_mapped) {
1895                                         npages = kiblnd_map_tx_pages(tx, rd);
1896                                         tx_pages_mapped = true;
1897                                 }
1898
1899                                 LASSERT(npages <= frpl->max_page_list_len);
1900                                 memcpy(frpl->page_list, pages,
1901                                        sizeof(*pages) * npages);
1902
1903                                 /* Prepare FastReg WR */
1904                                 wr = &frd->frd_fastreg_wr;
1905                                 memset(wr, 0, sizeof(*wr));
1906
1907                                 wr->wr.opcode = IB_WR_FAST_REG_MR;
1908                                 wr->wr.wr_id  = IBLND_WID_MR;
1909
1910                                 wr->wr.wr.fast_reg.iova_start = iov;
1911                                 wr->wr.wr.fast_reg.page_list  = frpl;
1912                                 wr->wr.wr.fast_reg.page_list_len = npages;
1913                                 wr->wr.wr.fast_reg.page_shift = PAGE_SHIFT;
1914                                 wr->wr.wr.fast_reg.length = nob;
1915                                 wr->wr.wr.fast_reg.rkey =
1916                                         is_rx ? mr->rkey : mr->lkey;
1917                                 wr->wr.wr.fast_reg.access_flags =
1918                                         (IB_ACCESS_LOCAL_WRITE |
1919                                          IB_ACCESS_REMOTE_WRITE);
1920 #endif /* HAVE_IB_MAP_MR_SG */
1921
1922                                 fmr->fmr_key  = is_rx ? mr->rkey : mr->lkey;
1923                                 fmr->fmr_frd  = frd;
1924                                 fmr->fmr_pool = fpo;
1925                                 return 0;
1926                         }
1927                         spin_unlock(&fps->fps_lock);
1928                         rc = -EAGAIN;
1929                 }
1930
1931                 spin_lock(&fps->fps_lock);
1932                 fpo->fpo_map_count--;
1933                 if (rc != -EAGAIN) {
1934                         spin_unlock(&fps->fps_lock);
1935                         return rc;
1936                 }
1937
1938                 /* EAGAIN and ... */
1939                 if (version != fps->fps_version) {
1940                         spin_unlock(&fps->fps_lock);
1941                         goto again;
1942                 }
1943         }
1944
1945         if (fps->fps_increasing) {
1946                 spin_unlock(&fps->fps_lock);
1947                 CDEBUG(D_NET, "Another thread is allocating new "
1948                        "FMR pool, waiting for her to complete\n");
1949                 wait_var_event(fps, !fps->fps_increasing);
1950                 goto again;
1951
1952         }
1953
1954         if (ktime_get_seconds() < fps->fps_next_retry) {
1955                 /* someone failed recently */
1956                 spin_unlock(&fps->fps_lock);
1957                 return -EAGAIN;
1958         }
1959
1960         fps->fps_increasing = 1;
1961         spin_unlock(&fps->fps_lock);
1962
1963         CDEBUG(D_NET, "Allocate new FMR pool\n");
1964         rc = kiblnd_create_fmr_pool(fps, &fpo);
1965         spin_lock(&fps->fps_lock);
1966         fps->fps_increasing = 0;
1967         wake_up_var(fps);
1968         if (rc == 0) {
1969                 fps->fps_version++;
1970                 list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
1971         } else {
1972                 fps->fps_next_retry = ktime_get_seconds() + IBLND_POOL_RETRY;
1973         }
1974         spin_unlock(&fps->fps_lock);
1975
1976         goto again;
1977 }
1978
1979 static void
1980 kiblnd_fini_pool(struct kib_pool *pool)
1981 {
1982         LASSERT(list_empty(&pool->po_free_list));
1983         LASSERT(pool->po_allocated == 0);
1984
1985         CDEBUG(D_NET, "Finalize %s pool\n", pool->po_owner->ps_name);
1986 }
1987
1988 static void
1989 kiblnd_init_pool(struct kib_poolset *ps, struct kib_pool *pool, int size)
1990 {
1991         CDEBUG(D_NET, "Initialize %s pool\n", ps->ps_name);
1992
1993         memset(pool, 0, sizeof(struct kib_pool));
1994         INIT_LIST_HEAD(&pool->po_free_list);
1995         pool->po_deadline = ktime_get_seconds() + IBLND_POOL_DEADLINE;
1996         pool->po_owner = ps;
1997         pool->po_size = size;
1998 }
1999
2000 static void
2001 kiblnd_destroy_pool_list(struct list_head *head)
2002 {
2003         struct kib_pool *pool;
2004
2005         while ((pool = list_first_entry_or_null(head,
2006                                                 struct kib_pool,
2007                                                 po_list)) != NULL) {
2008                 list_del(&pool->po_list);
2009
2010                 LASSERT(pool->po_owner != NULL);
2011                 pool->po_owner->ps_pool_destroy(pool);
2012         }
2013 }
2014
2015 static void
2016 kiblnd_fail_poolset(struct kib_poolset *ps, struct list_head *zombies)
2017 {
2018         struct kib_pool *po;
2019
2020         if (ps->ps_net == NULL) /* intialized? */
2021                 return;
2022
2023         spin_lock(&ps->ps_lock);
2024         while ((po = list_first_entry_or_null(&ps->ps_pool_list,
2025                                               struct kib_pool,
2026                                               po_list)) == NULL) {
2027                 po->po_failed = 1;
2028                 if (po->po_allocated == 0)
2029                         list_move(&po->po_list, zombies);
2030                 else
2031                         list_move(&po->po_list, &ps->ps_failed_pool_list);
2032         }
2033         spin_unlock(&ps->ps_lock);
2034 }
2035
2036 static void
2037 kiblnd_fini_poolset(struct kib_poolset *ps)
2038 {
2039         if (ps->ps_net != NULL) { /* initialized? */
2040                 kiblnd_destroy_pool_list(&ps->ps_failed_pool_list);
2041                 kiblnd_destroy_pool_list(&ps->ps_pool_list);
2042         }
2043 }
2044
2045 static int
2046 kiblnd_init_poolset(struct kib_poolset *ps, int cpt,
2047                     struct kib_net *net, char *name, int size,
2048                     kib_ps_pool_create_t po_create,
2049                     kib_ps_pool_destroy_t po_destroy,
2050                     kib_ps_node_init_t nd_init,
2051                     kib_ps_node_fini_t nd_fini)
2052 {
2053         struct kib_pool *pool;
2054         int rc;
2055
2056         memset(ps, 0, sizeof(struct kib_poolset));
2057
2058         ps->ps_cpt          = cpt;
2059         ps->ps_net          = net;
2060         ps->ps_pool_create  = po_create;
2061         ps->ps_pool_destroy = po_destroy;
2062         ps->ps_node_init    = nd_init;
2063         ps->ps_node_fini    = nd_fini;
2064         ps->ps_pool_size    = size;
2065         if (strlcpy(ps->ps_name, name, sizeof(ps->ps_name))
2066             >= sizeof(ps->ps_name))
2067                 return -E2BIG;
2068         spin_lock_init(&ps->ps_lock);
2069         INIT_LIST_HEAD(&ps->ps_pool_list);
2070         INIT_LIST_HEAD(&ps->ps_failed_pool_list);
2071
2072         rc = ps->ps_pool_create(ps, size, &pool);
2073         if (rc == 0)
2074                 list_add(&pool->po_list, &ps->ps_pool_list);
2075         else
2076                 CERROR("Failed to create the first pool for %s\n", ps->ps_name);
2077
2078         return rc;
2079 }
2080
2081 static int
2082 kiblnd_pool_is_idle(struct kib_pool *pool, time64_t now)
2083 {
2084         if (pool->po_allocated != 0) /* still in use */
2085                 return 0;
2086         if (pool->po_failed)
2087                 return 1;
2088         return now >= pool->po_deadline;
2089 }
2090
2091 void
2092 kiblnd_pool_free_node(struct kib_pool *pool, struct list_head *node)
2093 {
2094         LIST_HEAD(zombies);
2095         struct kib_poolset *ps = pool->po_owner;
2096         struct kib_pool *tmp;
2097         time64_t now = ktime_get_seconds();
2098
2099         spin_lock(&ps->ps_lock);
2100
2101         if (ps->ps_node_fini != NULL)
2102                 ps->ps_node_fini(pool, node);
2103
2104         LASSERT(pool->po_allocated > 0);
2105         list_add(node, &pool->po_free_list);
2106         pool->po_allocated--;
2107
2108         list_for_each_entry_safe(pool, tmp, &ps->ps_pool_list, po_list) {
2109                 /* the first pool is persistent */
2110                 if (ps->ps_pool_list.next == &pool->po_list)
2111                         continue;
2112
2113                 if (kiblnd_pool_is_idle(pool, now))
2114                         list_move(&pool->po_list, &zombies);
2115         }
2116         spin_unlock(&ps->ps_lock);
2117
2118         if (!list_empty(&zombies))
2119                 kiblnd_destroy_pool_list(&zombies);
2120 }
2121
2122 struct list_head *
2123 kiblnd_pool_alloc_node(struct kib_poolset *ps)
2124 {
2125         struct list_head        *node;
2126         struct kib_pool *pool;
2127         int                     rc;
2128         unsigned int            interval = 1;
2129         ktime_t time_before;
2130         unsigned int trips = 0;
2131
2132 again:
2133         spin_lock(&ps->ps_lock);
2134         list_for_each_entry(pool, &ps->ps_pool_list, po_list) {
2135                 if (list_empty(&pool->po_free_list))
2136                         continue;
2137
2138                 pool->po_allocated++;
2139                 pool->po_deadline = ktime_get_seconds() +
2140                                     IBLND_POOL_DEADLINE;
2141                 node = pool->po_free_list.next;
2142                 list_del(node);
2143
2144                 if (ps->ps_node_init != NULL) {
2145                         /* still hold the lock */
2146                         ps->ps_node_init(pool, node);
2147                 }
2148                 spin_unlock(&ps->ps_lock);
2149                 return node;
2150         }
2151
2152         /* no available tx pool and ... */
2153         if (ps->ps_increasing) {
2154                 /* another thread is allocating a new pool */
2155                 spin_unlock(&ps->ps_lock);
2156                 trips++;
2157                 CDEBUG(D_NET,
2158                        "Another thread is allocating new %s pool, waiting %d jiffies for her to complete. trips = %d\n",
2159                        ps->ps_name, interval, trips);
2160
2161                 schedule_timeout_interruptible(interval);
2162                 if (interval < cfs_time_seconds(1))
2163                         interval *= 2;
2164
2165                 goto again;
2166         }
2167
2168         if (ktime_get_seconds() < ps->ps_next_retry) {
2169                 /* someone failed recently */
2170                 spin_unlock(&ps->ps_lock);
2171                 return NULL;
2172         }
2173
2174         ps->ps_increasing = 1;
2175         spin_unlock(&ps->ps_lock);
2176
2177         CDEBUG(D_NET, "%s pool exhausted, allocate new pool\n", ps->ps_name);
2178         time_before = ktime_get();
2179         rc = ps->ps_pool_create(ps, ps->ps_pool_size, &pool);
2180         CDEBUG(D_NET, "ps_pool_create took %lld ms to complete",
2181                ktime_ms_delta(ktime_get(), time_before));
2182
2183         spin_lock(&ps->ps_lock);
2184         ps->ps_increasing = 0;
2185         if (rc == 0) {
2186                 list_add_tail(&pool->po_list, &ps->ps_pool_list);
2187         } else {
2188                 ps->ps_next_retry = ktime_get_seconds() + IBLND_POOL_RETRY;
2189                 CERROR("Can't allocate new %s pool because out of memory\n",
2190                        ps->ps_name);
2191         }
2192         spin_unlock(&ps->ps_lock);
2193
2194         goto again;
2195 }
2196
2197 static void
2198 kiblnd_destroy_tx_pool(struct kib_pool *pool)
2199 {
2200         struct kib_tx_pool *tpo = container_of(pool, struct kib_tx_pool,
2201                                                tpo_pool);
2202         int i;
2203
2204         LASSERT (pool->po_allocated == 0);
2205
2206         if (tpo->tpo_tx_pages != NULL) {
2207                 kiblnd_unmap_tx_pool(tpo);
2208                 kiblnd_free_pages(tpo->tpo_tx_pages);
2209         }
2210
2211         if (tpo->tpo_tx_descs == NULL)
2212                 goto out;
2213
2214         for (i = 0; i < pool->po_size; i++) {
2215                 struct kib_tx *tx = &tpo->tpo_tx_descs[i];
2216                 int       wrq_sge = *kiblnd_tunables.kib_wrq_sge;
2217
2218                 list_del(&tx->tx_list);
2219                 if (tx->tx_pages != NULL)
2220                         CFS_FREE_PTR_ARRAY(tx->tx_pages, LNET_MAX_IOV);
2221                 if (tx->tx_frags != NULL)
2222                         CFS_FREE_PTR_ARRAY(tx->tx_frags,
2223                                            (1 + IBLND_MAX_RDMA_FRAGS));
2224                 if (tx->tx_wrq != NULL)
2225                         CFS_FREE_PTR_ARRAY(tx->tx_wrq,
2226                                            (1 + IBLND_MAX_RDMA_FRAGS));
2227                 if (tx->tx_sge != NULL)
2228                         CFS_FREE_PTR_ARRAY(tx->tx_sge,
2229                                            (1 + IBLND_MAX_RDMA_FRAGS) *
2230                                            wrq_sge);
2231                 if (tx->tx_rd != NULL)
2232                         LIBCFS_FREE(tx->tx_rd,
2233                                     offsetof(struct kib_rdma_desc,
2234                                              rd_frags[IBLND_MAX_RDMA_FRAGS]));
2235         }
2236
2237         CFS_FREE_PTR_ARRAY(tpo->tpo_tx_descs, pool->po_size);
2238 out:
2239         kiblnd_fini_pool(pool);
2240         CFS_FREE_PTR(tpo);
2241 }
2242
2243 static int kiblnd_tx_pool_size(struct lnet_ni *ni, int ncpts)
2244 {
2245         struct lnet_ioctl_config_o2iblnd_tunables *tunables;
2246         int ntx;
2247
2248         tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
2249         ntx = tunables->lnd_ntx / ncpts;
2250
2251         return max(IBLND_TX_POOL, ntx);
2252 }
2253
2254 static int
2255 kiblnd_create_tx_pool(struct kib_poolset *ps, int size, struct kib_pool **pp_po)
2256 {
2257         int            i;
2258         int            npg;
2259         struct kib_pool *pool;
2260         struct kib_tx_pool *tpo;
2261
2262         LIBCFS_CPT_ALLOC(tpo, lnet_cpt_table(), ps->ps_cpt, sizeof(*tpo));
2263         if (tpo == NULL) {
2264                 CERROR("Failed to allocate TX pool\n");
2265                 return -ENOMEM;
2266         }
2267
2268         pool = &tpo->tpo_pool;
2269         kiblnd_init_pool(ps, pool, size);
2270         tpo->tpo_tx_descs = NULL;
2271         tpo->tpo_tx_pages = NULL;
2272
2273         npg = (size * IBLND_MSG_SIZE + PAGE_SIZE - 1) / PAGE_SIZE;
2274         if (kiblnd_alloc_pages(&tpo->tpo_tx_pages, ps->ps_cpt, npg) != 0) {
2275                 CERROR("Can't allocate tx pages: %d\n", npg);
2276                 CFS_FREE_PTR(tpo);
2277                 return -ENOMEM;
2278         }
2279
2280         LIBCFS_CPT_ALLOC(tpo->tpo_tx_descs, lnet_cpt_table(), ps->ps_cpt,
2281                          size * sizeof(struct kib_tx));
2282         if (tpo->tpo_tx_descs == NULL) {
2283                 CERROR("Can't allocate %d tx descriptors\n", size);
2284                 ps->ps_pool_destroy(pool);
2285                 return -ENOMEM;
2286         }
2287
2288         memset(tpo->tpo_tx_descs, 0, size * sizeof(struct kib_tx));
2289
2290         for (i = 0; i < size; i++) {
2291                 struct kib_tx *tx = &tpo->tpo_tx_descs[i];
2292                 int       wrq_sge = *kiblnd_tunables.kib_wrq_sge;
2293
2294                 tx->tx_pool = tpo;
2295                 if (ps->ps_net->ibn_fmr_ps != NULL) {
2296                         LIBCFS_CPT_ALLOC(tx->tx_pages,
2297                                          lnet_cpt_table(), ps->ps_cpt,
2298                                          LNET_MAX_IOV * sizeof(*tx->tx_pages));
2299                         if (tx->tx_pages == NULL)
2300                                 break;
2301                 }
2302
2303                 LIBCFS_CPT_ALLOC(tx->tx_frags, lnet_cpt_table(), ps->ps_cpt,
2304                                  (1 + IBLND_MAX_RDMA_FRAGS) *
2305                                  sizeof(*tx->tx_frags));
2306                 if (tx->tx_frags == NULL)
2307                         break;
2308
2309                 sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS + 1);
2310
2311                 LIBCFS_CPT_ALLOC(tx->tx_wrq, lnet_cpt_table(), ps->ps_cpt,
2312                                  (1 + IBLND_MAX_RDMA_FRAGS) *
2313                                  sizeof(*tx->tx_wrq));
2314                 if (tx->tx_wrq == NULL)
2315                         break;
2316
2317                 LIBCFS_CPT_ALLOC(tx->tx_sge, lnet_cpt_table(), ps->ps_cpt,
2318                                  (1 + IBLND_MAX_RDMA_FRAGS) * wrq_sge *
2319                                  sizeof(*tx->tx_sge));
2320                 if (tx->tx_sge == NULL)
2321                         break;
2322
2323                 LIBCFS_CPT_ALLOC(tx->tx_rd, lnet_cpt_table(), ps->ps_cpt,
2324                                  offsetof(struct kib_rdma_desc,
2325                                           rd_frags[IBLND_MAX_RDMA_FRAGS]));
2326                 if (tx->tx_rd == NULL)
2327                         break;
2328         }
2329
2330         if (i == size) {
2331                 kiblnd_map_tx_pool(tpo);
2332                 *pp_po = pool;
2333                 return 0;
2334         }
2335
2336         ps->ps_pool_destroy(pool);
2337         return -ENOMEM;
2338 }
2339
2340 static void
2341 kiblnd_tx_init(struct kib_pool *pool, struct list_head *node)
2342 {
2343         struct kib_tx_poolset *tps = container_of(pool->po_owner,
2344                                                   struct kib_tx_poolset,
2345                                                   tps_poolset);
2346         struct kib_tx *tx  = list_entry(node, struct kib_tx, tx_list);
2347
2348         tx->tx_cookie = tps->tps_next_tx_cookie++;
2349 }
2350
2351 static void
2352 kiblnd_net_fini_pools(struct kib_net *net)
2353 {
2354         int     i;
2355
2356         cfs_cpt_for_each(i, lnet_cpt_table()) {
2357                 struct kib_tx_poolset *tps;
2358                 struct kib_fmr_poolset *fps;
2359
2360                 if (net->ibn_tx_ps != NULL) {
2361                         tps = net->ibn_tx_ps[i];
2362                         kiblnd_fini_poolset(&tps->tps_poolset);
2363                 }
2364
2365                 if (net->ibn_fmr_ps != NULL) {
2366                         fps = net->ibn_fmr_ps[i];
2367                         kiblnd_fini_fmr_poolset(fps);
2368                 }
2369         }
2370
2371         if (net->ibn_tx_ps != NULL) {
2372                 cfs_percpt_free(net->ibn_tx_ps);
2373                 net->ibn_tx_ps = NULL;
2374         }
2375
2376         if (net->ibn_fmr_ps != NULL) {
2377                 cfs_percpt_free(net->ibn_fmr_ps);
2378                 net->ibn_fmr_ps = NULL;
2379         }
2380 }
2381
2382 static int
2383 kiblnd_net_init_pools(struct kib_net *net, struct lnet_ni *ni, __u32 *cpts,
2384                       int ncpts)
2385 {
2386         struct lnet_ioctl_config_o2iblnd_tunables *tunables;
2387 #ifdef HAVE_IB_GET_DMA_MR
2388         unsigned long   flags;
2389 #endif
2390         int             cpt;
2391         int             rc;
2392         int             i;
2393
2394         tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
2395
2396 #ifdef HAVE_IB_GET_DMA_MR
2397         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2398         /*
2399          * if lnd_map_on_demand is zero then we have effectively disabled
2400          * FMR or FastReg and we're using global memory regions
2401          * exclusively.
2402          */
2403         if (!tunables->lnd_map_on_demand) {
2404                 read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
2405                                            flags);
2406                 goto create_tx_pool;
2407         }
2408
2409         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2410 #endif
2411
2412         if (tunables->lnd_fmr_pool_size < tunables->lnd_ntx / 4) {
2413                 CERROR("Can't set fmr pool size (%d) < ntx / 4(%d)\n",
2414                        tunables->lnd_fmr_pool_size,
2415                        tunables->lnd_ntx / 4);
2416                 rc = -EINVAL;
2417                 goto failed;
2418         }
2419
2420         /* TX pool must be created later than FMR, see LU-2268
2421          * for details */
2422         LASSERT(net->ibn_tx_ps == NULL);
2423
2424         /* premapping can fail if ibd_nmr > 1, so we always create
2425          * FMR pool and map-on-demand if premapping failed */
2426
2427         net->ibn_fmr_ps = cfs_percpt_alloc(lnet_cpt_table(),
2428                                            sizeof(struct kib_fmr_poolset));
2429         if (net->ibn_fmr_ps == NULL) {
2430                 CERROR("Failed to allocate FMR pool array\n");
2431                 rc = -ENOMEM;
2432                 goto failed;
2433         }
2434
2435         for (i = 0; i < ncpts; i++) {
2436                 cpt = (cpts == NULL) ? i : cpts[i];
2437                 rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, ncpts,
2438                                              net, tunables);
2439                 if (rc != 0) {
2440                         CERROR("Can't initialize FMR pool for CPT %d: %d\n",
2441                                cpt, rc);
2442                         goto failed;
2443                 }
2444         }
2445
2446         if (i > 0)
2447                 LASSERT(i == ncpts);
2448
2449 #ifdef HAVE_IB_GET_DMA_MR
2450  create_tx_pool:
2451 #endif
2452         net->ibn_tx_ps = cfs_percpt_alloc(lnet_cpt_table(),
2453                                           sizeof(struct kib_tx_poolset));
2454         if (net->ibn_tx_ps == NULL) {
2455                 CERROR("Failed to allocate tx pool array\n");
2456                 rc = -ENOMEM;
2457                 goto failed;
2458         }
2459
2460         for (i = 0; i < ncpts; i++) {
2461                 cpt = (cpts == NULL) ? i : cpts[i];
2462                 rc = kiblnd_init_poolset(&net->ibn_tx_ps[cpt]->tps_poolset,
2463                                          cpt, net, "TX",
2464                                          kiblnd_tx_pool_size(ni, ncpts),
2465                                          kiblnd_create_tx_pool,
2466                                          kiblnd_destroy_tx_pool,
2467                                          kiblnd_tx_init, NULL);
2468                 if (rc != 0) {
2469                         CERROR("Can't initialize TX pool for CPT %d: %d\n",
2470                                cpt, rc);
2471                         goto failed;
2472                 }
2473         }
2474
2475         return 0;
2476  failed:
2477         kiblnd_net_fini_pools(net);
2478         LASSERT(rc != 0);
2479         return rc;
2480 }
2481
2482 static int
2483 kiblnd_port_get_attr(struct kib_hca_dev *hdev)
2484 {
2485         struct ib_port_attr *port_attr;
2486         int rc;
2487         unsigned long flags;
2488         rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
2489
2490         LIBCFS_ALLOC(port_attr, sizeof(*port_attr));
2491         if (port_attr == NULL) {
2492                 CDEBUG(D_NETERROR, "Out of memory\n");
2493                 return -ENOMEM;
2494         }
2495
2496         rc = ib_query_port(hdev->ibh_ibdev, hdev->ibh_port, port_attr);
2497
2498         write_lock_irqsave(g_lock, flags);
2499
2500         if (rc == 0)
2501                 hdev->ibh_state = port_attr->state == IB_PORT_ACTIVE
2502                                  ? IBLND_DEV_PORT_ACTIVE
2503                                  : IBLND_DEV_PORT_DOWN;
2504
2505         write_unlock_irqrestore(g_lock, flags);
2506         LIBCFS_FREE(port_attr, sizeof(*port_attr));
2507
2508         if (rc != 0) {
2509                 CDEBUG(D_NETERROR, "Failed to query IB port: %d\n", rc);
2510                 return rc;
2511         }
2512         return 0;
2513 }
2514
2515 static inline void
2516 kiblnd_set_ni_fatal_on(struct kib_hca_dev *hdev, int val)
2517 {
2518         struct kib_net  *net;
2519
2520         /* for health check */
2521         list_for_each_entry(net, &hdev->ibh_dev->ibd_nets, ibn_list) {
2522                 if (val)
2523                         CDEBUG(D_NETERROR, "Fatal device error for NI %s\n",
2524                                         libcfs_nid2str(net->ibn_ni->ni_nid));
2525                 atomic_set(&net->ibn_ni->ni_fatal_error_on, val);
2526         }
2527 }
2528
2529 void
2530 kiblnd_event_handler(struct ib_event_handler *handler, struct ib_event *event)
2531 {
2532         rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
2533         struct kib_hca_dev  *hdev;
2534         unsigned long flags;
2535
2536         hdev = container_of(handler, struct kib_hca_dev, ibh_event_handler);
2537
2538         write_lock_irqsave(g_lock, flags);
2539
2540         switch (event->event) {
2541         case IB_EVENT_DEVICE_FATAL:
2542                 CDEBUG(D_NET, "IB device fatal\n");
2543                 hdev->ibh_state = IBLND_DEV_FATAL;
2544                 kiblnd_set_ni_fatal_on(hdev, 1);
2545                 break;
2546         case IB_EVENT_PORT_ACTIVE:
2547                 CDEBUG(D_NET, "IB port active\n");
2548                 if (event->element.port_num == hdev->ibh_port) {
2549                         hdev->ibh_state = IBLND_DEV_PORT_ACTIVE;
2550                         kiblnd_set_ni_fatal_on(hdev, 0);
2551                 }
2552                 break;
2553         case IB_EVENT_PORT_ERR:
2554                 CDEBUG(D_NET, "IB port err\n");
2555                 if (event->element.port_num == hdev->ibh_port) {
2556                         hdev->ibh_state = IBLND_DEV_PORT_DOWN;
2557                         kiblnd_set_ni_fatal_on(hdev, 1);
2558                 }
2559                 break;
2560         default:
2561                 break;
2562         }
2563         write_unlock_irqrestore(g_lock, flags);
2564 }
2565
2566 static int
2567 kiblnd_hdev_get_attr(struct kib_hca_dev *hdev)
2568 {
2569         struct ib_device_attr *dev_attr;
2570         int rc = 0;
2571         int rc2 = 0;
2572
2573         /* It's safe to assume a HCA can handle a page size
2574          * matching that of the native system */
2575         hdev->ibh_page_shift = PAGE_SHIFT;
2576         hdev->ibh_page_size  = 1 << PAGE_SHIFT;
2577         hdev->ibh_page_mask  = ~((__u64)hdev->ibh_page_size - 1);
2578
2579 #ifndef HAVE_IB_DEVICE_ATTRS
2580         LIBCFS_ALLOC(dev_attr, sizeof(*dev_attr));
2581         if (dev_attr == NULL) {
2582                 CERROR("Out of memory\n");
2583                 return -ENOMEM;
2584         }
2585
2586         rc = ib_query_device(hdev->ibh_ibdev, dev_attr);
2587         if (rc != 0) {
2588                 CERROR("Failed to query IB device: %d\n", rc);
2589                 goto out_clean_attr;
2590         }
2591 #else
2592         dev_attr = &hdev->ibh_ibdev->attrs;
2593 #endif
2594
2595         hdev->ibh_mr_size = dev_attr->max_mr_size;
2596         hdev->ibh_max_qp_wr = dev_attr->max_qp_wr;
2597
2598         /* Setup device Memory Registration capabilities */
2599 #ifdef HAVE_FMR_POOL_API
2600 #ifdef HAVE_IB_DEVICE_OPS
2601         if (hdev->ibh_ibdev->ops.alloc_fmr &&
2602             hdev->ibh_ibdev->ops.dealloc_fmr &&
2603             hdev->ibh_ibdev->ops.map_phys_fmr &&
2604             hdev->ibh_ibdev->ops.unmap_fmr) {
2605 #else
2606         if (hdev->ibh_ibdev->alloc_fmr &&
2607             hdev->ibh_ibdev->dealloc_fmr &&
2608             hdev->ibh_ibdev->map_phys_fmr &&
2609             hdev->ibh_ibdev->unmap_fmr) {
2610 #endif
2611                 LCONSOLE_INFO("Using FMR for registration\n");
2612                 hdev->ibh_dev->ibd_dev_caps |= IBLND_DEV_CAPS_FMR_ENABLED;
2613         } else
2614 #endif /* HAVE_FMR_POOL_API */
2615         if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
2616                 LCONSOLE_INFO("Using FastReg for registration\n");
2617                 hdev->ibh_dev->ibd_dev_caps |= IBLND_DEV_CAPS_FASTREG_ENABLED;
2618 #ifndef HAVE_IB_ALLOC_FAST_REG_MR
2619 #ifdef IB_DEVICE_SG_GAPS_REG
2620                 if (dev_attr->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
2621                         hdev->ibh_dev->ibd_dev_caps |= IBLND_DEV_CAPS_FASTREG_GAPS_SUPPORT;
2622 #endif
2623 #endif
2624         } else {
2625                 rc = -ENOSYS;
2626         }
2627
2628         rc2 = kiblnd_port_get_attr(hdev);
2629         if (rc2 != 0)
2630                 return rc2;
2631
2632         if (rc != 0)
2633                 rc = -EINVAL;
2634
2635 #ifndef HAVE_IB_DEVICE_ATTRS
2636 out_clean_attr:
2637         LIBCFS_FREE(dev_attr, sizeof(*dev_attr));
2638 #endif
2639
2640         if (rc == -ENOSYS)
2641                 CERROR("IB device does not support FMRs nor FastRegs, can't "
2642                        "register memory: %d\n", rc);
2643         else if (rc == -EINVAL)
2644                 CERROR("Invalid mr size: %#llx\n", hdev->ibh_mr_size);
2645         return rc;
2646 }
2647
2648 #ifdef HAVE_IB_GET_DMA_MR
2649 static void
2650 kiblnd_hdev_cleanup_mrs(struct kib_hca_dev *hdev)
2651 {
2652         if (hdev->ibh_mrs == NULL)
2653                 return;
2654
2655         ib_dereg_mr(hdev->ibh_mrs);
2656
2657         hdev->ibh_mrs = NULL;
2658 }
2659 #endif
2660
2661 void
2662 kiblnd_hdev_destroy(struct kib_hca_dev *hdev)
2663 {
2664         if (hdev->ibh_event_handler.device != NULL)
2665                 ib_unregister_event_handler(&hdev->ibh_event_handler);
2666
2667 #ifdef HAVE_IB_GET_DMA_MR
2668         kiblnd_hdev_cleanup_mrs(hdev);
2669 #endif
2670
2671         if (hdev->ibh_pd != NULL)
2672                 ib_dealloc_pd(hdev->ibh_pd);
2673
2674         if (hdev->ibh_cmid != NULL)
2675                 rdma_destroy_id(hdev->ibh_cmid);
2676
2677         LIBCFS_FREE(hdev, sizeof(*hdev));
2678 }
2679
2680 #ifdef HAVE_IB_GET_DMA_MR
2681 static int
2682 kiblnd_hdev_setup_mrs(struct kib_hca_dev *hdev)
2683 {
2684         struct ib_mr *mr;
2685         int           acflags = IB_ACCESS_LOCAL_WRITE |
2686                                 IB_ACCESS_REMOTE_WRITE;
2687
2688         mr = ib_get_dma_mr(hdev->ibh_pd, acflags);
2689         if (IS_ERR(mr)) {
2690                 CERROR("Failed ib_get_dma_mr: %ld\n", PTR_ERR(mr));
2691                 kiblnd_hdev_cleanup_mrs(hdev);
2692                 return PTR_ERR(mr);
2693         }
2694
2695         hdev->ibh_mrs = mr;
2696
2697         return 0;
2698 }
2699 #endif
2700
2701 static int
2702 kiblnd_dummy_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
2703 {       /* DUMMY */
2704         return 0;
2705 }
2706
2707 static int
2708 kiblnd_dev_need_failover(struct kib_dev *dev, struct net *ns)
2709 {
2710         struct rdma_cm_id  *cmid;
2711         struct sockaddr_in  srcaddr;
2712         struct sockaddr_in  dstaddr;
2713         int                 rc;
2714
2715         if (dev->ibd_hdev == NULL || /* initializing */
2716             dev->ibd_hdev->ibh_cmid == NULL || /* listener is dead */
2717             *kiblnd_tunables.kib_dev_failover > 1) /* debugging */
2718                 return 1;
2719
2720         /* XXX: it's UGLY, but I don't have better way to find
2721          * ib-bonding HCA failover because:
2722          *
2723          * a. no reliable CM event for HCA failover...
2724          * b. no OFED API to get ib_device for current net_device...
2725          *
2726          * We have only two choices at this point:
2727          *
2728          * a. rdma_bind_addr(), it will conflict with listener cmid
2729          * b. rdma_resolve_addr() to zero addr */
2730         cmid = kiblnd_rdma_create_id(ns, kiblnd_dummy_callback, dev,
2731                                      RDMA_PS_TCP, IB_QPT_RC);
2732         if (IS_ERR(cmid)) {
2733                 rc = PTR_ERR(cmid);
2734                 CERROR("Failed to create cmid for failover: %d\n", rc);
2735                 return rc;
2736         }
2737
2738         memset(&srcaddr, 0, sizeof(srcaddr));
2739         srcaddr.sin_family      = AF_INET;
2740         srcaddr.sin_addr.s_addr = (__force u32)htonl(dev->ibd_ifip);
2741
2742         memset(&dstaddr, 0, sizeof(dstaddr));
2743         dstaddr.sin_family = AF_INET;
2744         rc = rdma_resolve_addr(cmid, (struct sockaddr *)&srcaddr,
2745                                (struct sockaddr *)&dstaddr, 1);
2746         if (rc != 0 || cmid->device == NULL) {
2747                 CERROR("Failed to bind %s:%pI4h to device(%p): %d\n",
2748                        dev->ibd_ifname, &dev->ibd_ifip,
2749                        cmid->device, rc);
2750                 rdma_destroy_id(cmid);
2751                 return rc;
2752         }
2753
2754         rc = dev->ibd_hdev->ibh_ibdev != cmid->device; /* true for failover */
2755         rdma_destroy_id(cmid);
2756         return rc;
2757 }
2758
2759 int
2760 kiblnd_dev_failover(struct kib_dev *dev, struct net *ns)
2761 {
2762         LIST_HEAD(zombie_tpo);
2763         LIST_HEAD(zombie_ppo);
2764         LIST_HEAD(zombie_fpo);
2765         struct rdma_cm_id  *cmid  = NULL;
2766         struct kib_hca_dev *hdev  = NULL;
2767         struct kib_hca_dev *old;
2768         struct ib_pd       *pd;
2769         struct kib_net *net;
2770         struct sockaddr_in  addr;
2771         unsigned long       flags;
2772         int                 rc = 0;
2773         int                 i;
2774
2775         LASSERT (*kiblnd_tunables.kib_dev_failover > 1 ||
2776                  dev->ibd_can_failover ||
2777                  dev->ibd_hdev == NULL);
2778
2779         rc = kiblnd_dev_need_failover(dev, ns);
2780         if (rc <= 0)
2781                 goto out;
2782
2783         if (dev->ibd_hdev != NULL &&
2784             dev->ibd_hdev->ibh_cmid != NULL) {
2785                 /* XXX it's not good to close old listener at here,
2786                  * because we can fail to create new listener.
2787                  * But we have to close it now, otherwise rdma_bind_addr
2788                  * will return EADDRINUSE... How crap! */
2789                 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2790
2791                 cmid = dev->ibd_hdev->ibh_cmid;
2792                 /* make next schedule of kiblnd_dev_need_failover()
2793                  * return 1 for me */
2794                 dev->ibd_hdev->ibh_cmid  = NULL;
2795                 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2796
2797                 rdma_destroy_id(cmid);
2798         }
2799
2800         cmid = kiblnd_rdma_create_id(ns, kiblnd_cm_callback, dev, RDMA_PS_TCP,
2801                                      IB_QPT_RC);
2802         if (IS_ERR(cmid)) {
2803                 rc = PTR_ERR(cmid);
2804                 CERROR("Failed to create cmid for failover: %d\n", rc);
2805                 goto out;
2806         }
2807
2808         memset(&addr, 0, sizeof(addr));
2809         addr.sin_family      = AF_INET;
2810         addr.sin_addr.s_addr = (__force u32)htonl(dev->ibd_ifip);
2811         addr.sin_port        = htons(*kiblnd_tunables.kib_service);
2812
2813         /* Bind to failover device or port */
2814         rc = rdma_bind_addr(cmid, (struct sockaddr *)&addr);
2815         if (rc != 0 || cmid->device == NULL) {
2816                 CERROR("Failed to bind %s:%pI4h to device(%p): %d\n",
2817                        dev->ibd_ifname, &dev->ibd_ifip,
2818                        cmid->device, rc);
2819                 rdma_destroy_id(cmid);
2820                 goto out;
2821         }
2822
2823         LIBCFS_ALLOC(hdev, sizeof(*hdev));
2824         if (hdev == NULL) {
2825                 CERROR("Failed to allocate kib_hca_dev\n");
2826                 rdma_destroy_id(cmid);
2827                 rc = -ENOMEM;
2828                 goto out;
2829         }
2830
2831         atomic_set(&hdev->ibh_ref, 1);
2832         hdev->ibh_dev   = dev;
2833         hdev->ibh_cmid  = cmid;
2834         hdev->ibh_ibdev = cmid->device;
2835         hdev->ibh_port  = cmid->port_num;
2836
2837 #ifdef HAVE_IB_ALLOC_PD_2ARGS
2838         pd = ib_alloc_pd(cmid->device, 0);
2839 #else
2840         pd = ib_alloc_pd(cmid->device);
2841 #endif
2842         if (IS_ERR(pd)) {
2843                 rc = PTR_ERR(pd);
2844                 CERROR("Can't allocate PD: %d\n", rc);
2845                 goto out;
2846         }
2847
2848         hdev->ibh_pd = pd;
2849
2850         rc = rdma_listen(cmid, 0);
2851         if (rc != 0) {
2852                 CERROR("Can't start new listener: %d\n", rc);
2853                 goto out;
2854         }
2855
2856         rc = kiblnd_hdev_get_attr(hdev);
2857         if (rc != 0) {
2858                 CERROR("Can't get device attributes: %d\n", rc);
2859                 goto out;
2860         }
2861
2862 #ifdef HAVE_IB_GET_DMA_MR
2863         rc = kiblnd_hdev_setup_mrs(hdev);
2864         if (rc != 0) {
2865                 CERROR("Can't setup device: %d\n", rc);
2866                 goto out;
2867         }
2868 #endif
2869
2870         INIT_IB_EVENT_HANDLER(&hdev->ibh_event_handler,
2871                                 hdev->ibh_ibdev, kiblnd_event_handler);
2872         ib_register_event_handler(&hdev->ibh_event_handler);
2873
2874         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2875
2876         old = dev->ibd_hdev;
2877         dev->ibd_hdev = hdev;   /* take over the refcount */
2878         hdev = old;
2879
2880         list_for_each_entry(net, &dev->ibd_nets, ibn_list) {
2881                 cfs_cpt_for_each(i, lnet_cpt_table()) {
2882                         kiblnd_fail_poolset(&net->ibn_tx_ps[i]->tps_poolset,
2883                                             &zombie_tpo);
2884
2885                         if (net->ibn_fmr_ps != NULL)
2886                                 kiblnd_fail_fmr_poolset(net->ibn_fmr_ps[i],
2887                                                         &zombie_fpo);
2888                 }
2889         }
2890
2891         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2892  out:
2893         if (!list_empty(&zombie_tpo))
2894                 kiblnd_destroy_pool_list(&zombie_tpo);
2895         if (!list_empty(&zombie_ppo))
2896                 kiblnd_destroy_pool_list(&zombie_ppo);
2897         if (!list_empty(&zombie_fpo))
2898                 kiblnd_destroy_fmr_pool_list(&zombie_fpo);
2899         if (hdev != NULL)
2900                 kiblnd_hdev_decref(hdev);
2901
2902         if (rc != 0)
2903                 dev->ibd_failed_failover++;
2904         else
2905                 dev->ibd_failed_failover = 0;
2906
2907         return rc;
2908 }
2909
2910 void
2911 kiblnd_destroy_dev(struct kib_dev *dev)
2912 {
2913         LASSERT(dev->ibd_nnets == 0);
2914         LASSERT(list_empty(&dev->ibd_nets));
2915
2916         list_del(&dev->ibd_fail_list);
2917         list_del(&dev->ibd_list);
2918
2919         if (dev->ibd_hdev != NULL)
2920                 kiblnd_hdev_decref(dev->ibd_hdev);
2921
2922         LIBCFS_FREE(dev, sizeof(*dev));
2923 }
2924
2925 static void
2926 kiblnd_base_shutdown(void)
2927 {
2928         struct kib_sched_info *sched;
2929         struct kib_peer_ni *peer_ni;
2930         int i;
2931
2932         LASSERT(list_empty(&kiblnd_data.kib_devs));
2933
2934         CDEBUG(D_MALLOC, "before LND base cleanup: kmem %lld\n",
2935                libcfs_kmem_read());
2936
2937         switch (kiblnd_data.kib_init) {
2938         default:
2939                 LBUG();
2940
2941         case IBLND_INIT_ALL:
2942         case IBLND_INIT_DATA:
2943                 hash_for_each(kiblnd_data.kib_peers, i, peer_ni, ibp_list)
2944                         LASSERT(0);
2945                 LASSERT(list_empty(&kiblnd_data.kib_connd_zombies));
2946                 LASSERT(list_empty(&kiblnd_data.kib_connd_conns));
2947                 LASSERT(list_empty(&kiblnd_data.kib_reconn_list));
2948                 LASSERT(list_empty(&kiblnd_data.kib_reconn_wait));
2949
2950                 /* flag threads to terminate; wake and wait for them to die */
2951                 kiblnd_data.kib_shutdown = 1;
2952
2953                 /* NB: we really want to stop scheduler threads net by net
2954                  * instead of the whole module, this should be improved
2955                  * with dynamic configuration LNet.
2956                  */
2957                 cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds)
2958                         wake_up_all(&sched->ibs_waitq);
2959
2960                 wake_up(&kiblnd_data.kib_connd_waitq);
2961                 wake_up(&kiblnd_data.kib_failover_waitq);
2962
2963                 wait_var_event_warning(&kiblnd_data.kib_nthreads,
2964                                        !atomic_read(&kiblnd_data.kib_nthreads),
2965                                        "Waiting for %d threads to terminate\n",
2966                                        atomic_read(&kiblnd_data.kib_nthreads));
2967                 /* fall through */
2968
2969         case IBLND_INIT_NOTHING:
2970                 break;
2971         }
2972
2973         if (kiblnd_data.kib_scheds != NULL)
2974                 cfs_percpt_free(kiblnd_data.kib_scheds);
2975
2976         CDEBUG(D_MALLOC, "after LND base cleanup: kmem %lld\n",
2977                libcfs_kmem_read());
2978
2979         kiblnd_data.kib_init = IBLND_INIT_NOTHING;
2980         module_put(THIS_MODULE);
2981 }
2982
2983 static void
2984 kiblnd_shutdown(struct lnet_ni *ni)
2985 {
2986         struct kib_net *net = ni->ni_data;
2987         rwlock_t     *g_lock = &kiblnd_data.kib_global_lock;
2988         unsigned long     flags;
2989
2990         LASSERT(kiblnd_data.kib_init == IBLND_INIT_ALL);
2991
2992         if (net == NULL)
2993                 goto out;
2994
2995         CDEBUG(D_MALLOC, "before LND net cleanup: kmem %lld\n",
2996                libcfs_kmem_read());
2997
2998         write_lock_irqsave(g_lock, flags);
2999         net->ibn_shutdown = 1;
3000         write_unlock_irqrestore(g_lock, flags);
3001
3002         switch (net->ibn_init) {
3003         default:
3004                 LBUG();
3005
3006         case IBLND_INIT_ALL:
3007                 /* nuke all existing peers within this net */
3008                 kiblnd_del_peer(ni, LNET_NID_ANY);
3009
3010                 /* Wait for all peer_ni state to clean up */
3011                 wait_var_event_warning(&net->ibn_npeers,
3012                                        atomic_read(&net->ibn_npeers) == 0,
3013                                        "%s: waiting for %d peers to disconnect\n",
3014                                        libcfs_nid2str(ni->ni_nid),
3015                                        atomic_read(&net->ibn_npeers));
3016
3017                 kiblnd_net_fini_pools(net);
3018
3019                 write_lock_irqsave(g_lock, flags);
3020                 LASSERT(net->ibn_dev->ibd_nnets > 0);
3021                 net->ibn_dev->ibd_nnets--;
3022                 list_del(&net->ibn_list);
3023                 write_unlock_irqrestore(g_lock, flags);
3024
3025                 /* fall through */
3026
3027         case IBLND_INIT_NOTHING:
3028                 LASSERT (atomic_read(&net->ibn_nconns) == 0);
3029
3030                 if (net->ibn_dev != NULL &&
3031                     net->ibn_dev->ibd_nnets == 0)
3032                         kiblnd_destroy_dev(net->ibn_dev);
3033
3034                 break;
3035         }
3036
3037         CDEBUG(D_MALLOC, "after LND net cleanup: kmem %lld\n",
3038                libcfs_kmem_read());
3039
3040         net->ibn_init = IBLND_INIT_NOTHING;
3041         ni->ni_data = NULL;
3042
3043         LIBCFS_FREE(net, sizeof(*net));
3044
3045 out:
3046         if (list_empty(&kiblnd_data.kib_devs))
3047                 kiblnd_base_shutdown();
3048 }
3049
3050 static int
3051 kiblnd_base_startup(struct net *ns)
3052 {
3053         struct kib_sched_info *sched;
3054         int rc;
3055         int i;
3056
3057         LASSERT(kiblnd_data.kib_init == IBLND_INIT_NOTHING);
3058
3059         if (!try_module_get(THIS_MODULE))
3060                 goto failed;
3061
3062         memset(&kiblnd_data, 0, sizeof(kiblnd_data)); /* zero pointers, flags etc */
3063
3064         rwlock_init(&kiblnd_data.kib_global_lock);
3065
3066         INIT_LIST_HEAD(&kiblnd_data.kib_devs);
3067         INIT_LIST_HEAD(&kiblnd_data.kib_failed_devs);
3068
3069         hash_init(kiblnd_data.kib_peers);
3070
3071         spin_lock_init(&kiblnd_data.kib_connd_lock);
3072         INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
3073         INIT_LIST_HEAD(&kiblnd_data.kib_connd_waits);
3074         INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies);
3075         INIT_LIST_HEAD(&kiblnd_data.kib_reconn_list);
3076         INIT_LIST_HEAD(&kiblnd_data.kib_reconn_wait);
3077
3078         init_waitqueue_head(&kiblnd_data.kib_connd_waitq);
3079         init_waitqueue_head(&kiblnd_data.kib_failover_waitq);
3080
3081         kiblnd_data.kib_scheds = cfs_percpt_alloc(lnet_cpt_table(),
3082                                                   sizeof(*sched));
3083         if (kiblnd_data.kib_scheds == NULL)
3084                 goto failed;
3085
3086         cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) {
3087                 int     nthrs;
3088
3089                 spin_lock_init(&sched->ibs_lock);
3090                 INIT_LIST_HEAD(&sched->ibs_conns);
3091                 init_waitqueue_head(&sched->ibs_waitq);
3092
3093                 nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
3094                 if (*kiblnd_tunables.kib_nscheds > 0) {
3095                         nthrs = min(nthrs, *kiblnd_tunables.kib_nscheds);
3096                 } else {
3097                         /* max to half of CPUs, another half is reserved for
3098                          * upper layer modules */
3099                         nthrs = min(max(IBLND_N_SCHED, nthrs >> 1), nthrs);
3100                 }
3101
3102                 sched->ibs_nthreads_max = nthrs;
3103                 sched->ibs_cpt = i;
3104         }
3105
3106         kiblnd_data.kib_error_qpa.qp_state = IB_QPS_ERR;
3107
3108         /* lists/ptrs/locks initialised */
3109         kiblnd_data.kib_init = IBLND_INIT_DATA;
3110         /*****************************************************/
3111
3112         rc = kiblnd_thread_start(kiblnd_connd, NULL, "kiblnd_connd");
3113         if (rc != 0) {
3114                 CERROR("Can't spawn o2iblnd connd: %d\n", rc);
3115                 goto failed;
3116         }
3117
3118         if (*kiblnd_tunables.kib_dev_failover != 0)
3119                 rc = kiblnd_thread_start(kiblnd_failover_thread, ns,
3120                                          "kiblnd_failover");
3121
3122         if (rc != 0) {
3123                 CERROR("Can't spawn o2iblnd failover thread: %d\n", rc);
3124                 goto failed;
3125         }
3126
3127         /* flag everything initialised */
3128         kiblnd_data.kib_init = IBLND_INIT_ALL;
3129         /*****************************************************/
3130
3131         return 0;
3132
3133  failed:
3134         kiblnd_base_shutdown();
3135         return -ENETDOWN;
3136 }
3137
3138 static int
3139 kiblnd_start_schedulers(struct kib_sched_info *sched)
3140 {
3141         int     rc = 0;
3142         int     nthrs;
3143         int     i;
3144
3145         if (sched->ibs_nthreads == 0) {
3146                 if (*kiblnd_tunables.kib_nscheds > 0) {
3147                         nthrs = sched->ibs_nthreads_max;
3148                 } else {
3149                         nthrs = cfs_cpt_weight(lnet_cpt_table(),
3150                                                sched->ibs_cpt);
3151                         nthrs = min(max(IBLND_N_SCHED, nthrs >> 1), nthrs);
3152                         nthrs = min(IBLND_N_SCHED_HIGH, nthrs);
3153                 }
3154         } else {
3155                 LASSERT(sched->ibs_nthreads <= sched->ibs_nthreads_max);
3156                 /* increase one thread if there is new interface */
3157                 nthrs = (sched->ibs_nthreads < sched->ibs_nthreads_max);
3158         }
3159
3160         for (i = 0; i < nthrs; i++) {
3161                 long    id;
3162                 char    name[20];
3163                 id = KIB_THREAD_ID(sched->ibs_cpt, sched->ibs_nthreads + i);
3164                 snprintf(name, sizeof(name), "kiblnd_sd_%02ld_%02ld",
3165                          KIB_THREAD_CPT(id), KIB_THREAD_TID(id));
3166                 rc = kiblnd_thread_start(kiblnd_scheduler, (void *)id, name);
3167                 if (rc == 0)
3168                         continue;
3169
3170                 CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
3171                        sched->ibs_cpt, sched->ibs_nthreads + i, rc);
3172                 break;
3173         }
3174
3175         sched->ibs_nthreads += i;
3176         return rc;
3177 }
3178
3179 static int kiblnd_dev_start_threads(struct kib_dev *dev, bool newdev, u32 *cpts,
3180                                     int ncpts)
3181 {
3182         int     cpt;
3183         int     rc;
3184         int     i;
3185
3186         for (i = 0; i < ncpts; i++) {
3187                 struct kib_sched_info *sched;
3188
3189                 cpt = (cpts == NULL) ? i : cpts[i];
3190                 sched = kiblnd_data.kib_scheds[cpt];
3191
3192                 if (!newdev && sched->ibs_nthreads > 0)
3193                         continue;
3194
3195                 rc = kiblnd_start_schedulers(kiblnd_data.kib_scheds[cpt]);
3196                 if (rc != 0) {
3197                         CERROR("Failed to start scheduler threads for %s\n",
3198                                dev->ibd_ifname);
3199                         return rc;
3200                 }
3201         }
3202         return 0;
3203 }
3204
3205 static struct kib_dev *
3206 kiblnd_dev_search(char *ifname)
3207 {
3208         struct kib_dev *alias = NULL;
3209         struct kib_dev *dev;
3210         char            *colon;
3211         char            *colon2;
3212
3213         colon = strchr(ifname, ':');
3214         list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
3215                 if (strcmp(&dev->ibd_ifname[0], ifname) == 0)
3216                         return dev;
3217
3218                 if (alias != NULL)
3219                         continue;
3220
3221                 colon2 = strchr(dev->ibd_ifname, ':');
3222                 if (colon != NULL)
3223                         *colon = 0;
3224                 if (colon2 != NULL)
3225                         *colon2 = 0;
3226
3227                 if (strcmp(&dev->ibd_ifname[0], ifname) == 0)
3228                         alias = dev;
3229
3230                 if (colon != NULL)
3231                         *colon = ':';
3232                 if (colon2 != NULL)
3233                         *colon2 = ':';
3234         }
3235         return alias;
3236 }
3237
3238 static int
3239 kiblnd_startup(struct lnet_ni *ni)
3240 {
3241         char *ifname = NULL;
3242         struct lnet_inetdev *ifaces = NULL;
3243         struct kib_dev *ibdev = NULL;
3244         struct kib_net *net = NULL;
3245         unsigned long flags;
3246         int rc;
3247         int i;
3248         bool newdev;
3249
3250         LASSERT(ni->ni_net->net_lnd == &the_o2iblnd);
3251
3252         if (kiblnd_data.kib_init == IBLND_INIT_NOTHING) {
3253                 rc = kiblnd_base_startup(ni->ni_net_ns);
3254                 if (rc != 0)
3255                         return rc;
3256         }
3257
3258         LIBCFS_ALLOC(net, sizeof(*net));
3259         ni->ni_data = net;
3260         if (net == NULL) {
3261         &n