Whamcloud - gitweb
e4bb8b98b9c8c01c2fe665afd0563ef4632b3d42
[fs/lustre-release.git] / lnet / klnds / o2iblnd / o2iblnd.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2015, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lnet/klnds/o2iblnd/o2iblnd.c
37  *
38  * Author: Eric Barton <eric@bartonsoftware.com>
39  */
40
41 #include <asm/page.h>
42 #include "o2iblnd.h"
43
44 static lnd_t the_o2iblnd;
45
46 kib_data_t              kiblnd_data;
47
48 static __u32
49 kiblnd_cksum (void *ptr, int nob)
50 {
51         char  *c  = ptr;
52         __u32  sum = 0;
53
54         while (nob-- > 0)
55                 sum = ((sum << 1) | (sum >> 31)) + *c++;
56
57         /* ensure I don't return 0 (== no checksum) */
58         return (sum == 0) ? 1 : sum;
59 }
60
61 static char *
62 kiblnd_msgtype2str(int type)
63 {
64         switch (type) {
65         case IBLND_MSG_CONNREQ:
66                 return "CONNREQ";
67
68         case IBLND_MSG_CONNACK:
69                 return "CONNACK";
70
71         case IBLND_MSG_NOOP:
72                 return "NOOP";
73
74         case IBLND_MSG_IMMEDIATE:
75                 return "IMMEDIATE";
76
77         case IBLND_MSG_PUT_REQ:
78                 return "PUT_REQ";
79
80         case IBLND_MSG_PUT_NAK:
81                 return "PUT_NAK";
82
83         case IBLND_MSG_PUT_ACK:
84                 return "PUT_ACK";
85
86         case IBLND_MSG_PUT_DONE:
87                 return "PUT_DONE";
88
89         case IBLND_MSG_GET_REQ:
90                 return "GET_REQ";
91
92         case IBLND_MSG_GET_DONE:
93                 return "GET_DONE";
94
95         default:
96                 return "???";
97         }
98 }
99
100 static int
101 kiblnd_msgtype2size(int type)
102 {
103         const int hdr_size = offsetof(kib_msg_t, ibm_u);
104
105         switch (type) {
106         case IBLND_MSG_CONNREQ:
107         case IBLND_MSG_CONNACK:
108                 return hdr_size + sizeof(kib_connparams_t);
109
110         case IBLND_MSG_NOOP:
111                 return hdr_size;
112
113         case IBLND_MSG_IMMEDIATE:
114                 return offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[0]);
115
116         case IBLND_MSG_PUT_REQ:
117                 return hdr_size + sizeof(kib_putreq_msg_t);
118
119         case IBLND_MSG_PUT_ACK:
120                 return hdr_size + sizeof(kib_putack_msg_t);
121
122         case IBLND_MSG_GET_REQ:
123                 return hdr_size + sizeof(kib_get_msg_t);
124
125         case IBLND_MSG_PUT_NAK:
126         case IBLND_MSG_PUT_DONE:
127         case IBLND_MSG_GET_DONE:
128                 return hdr_size + sizeof(kib_completion_msg_t);
129         default:
130                 return -1;
131         }
132 }
133
134 static int
135 kiblnd_unpack_rd(kib_msg_t *msg, int flip)
136 {
137         kib_rdma_desc_t   *rd;
138         int                nob;
139         int                n;
140         int                i;
141
142         LASSERT (msg->ibm_type == IBLND_MSG_GET_REQ ||
143                  msg->ibm_type == IBLND_MSG_PUT_ACK);
144
145         rd = msg->ibm_type == IBLND_MSG_GET_REQ ?
146                               &msg->ibm_u.get.ibgm_rd :
147                               &msg->ibm_u.putack.ibpam_rd;
148
149         if (flip) {
150                 __swab32s(&rd->rd_key);
151                 __swab32s(&rd->rd_nfrags);
152         }
153
154         n = rd->rd_nfrags;
155
156         if (n <= 0 || n > IBLND_MAX_RDMA_FRAGS) {
157                 CERROR("Bad nfrags: %d, should be 0 < n <= %d\n",
158                        n, IBLND_MAX_RDMA_FRAGS);
159                 return 1;
160         }
161
162         nob = offsetof (kib_msg_t, ibm_u) +
163               kiblnd_rd_msg_size(rd, msg->ibm_type, n);
164
165         if (msg->ibm_nob < nob) {
166                 CERROR("Short %s: %d(%d)\n",
167                        kiblnd_msgtype2str(msg->ibm_type), msg->ibm_nob, nob);
168                 return 1;
169         }
170
171         if (!flip)
172                 return 0;
173
174         for (i = 0; i < n; i++) {
175                 __swab32s(&rd->rd_frags[i].rf_nob);
176                 __swab64s(&rd->rd_frags[i].rf_addr);
177         }
178
179         return 0;
180 }
181
182 void
183 kiblnd_pack_msg (lnet_ni_t *ni, kib_msg_t *msg, int version,
184                  int credits, lnet_nid_t dstnid, __u64 dststamp)
185 {
186         kib_net_t *net = ni->ni_data;
187
188         /* CAVEAT EMPTOR! all message fields not set here should have been
189          * initialised previously. */
190         msg->ibm_magic    = IBLND_MSG_MAGIC;
191         msg->ibm_version  = version;
192         /*   ibm_type */
193         msg->ibm_credits  = credits;
194         /*   ibm_nob */
195         msg->ibm_cksum    = 0;
196         msg->ibm_srcnid   = ni->ni_nid;
197         msg->ibm_srcstamp = net->ibn_incarnation;
198         msg->ibm_dstnid   = dstnid;
199         msg->ibm_dststamp = dststamp;
200
201         if (*kiblnd_tunables.kib_cksum) {
202                 /* NB ibm_cksum zero while computing cksum */
203                 msg->ibm_cksum = kiblnd_cksum(msg, msg->ibm_nob);
204         }
205 }
206
207 int
208 kiblnd_unpack_msg(kib_msg_t *msg, int nob)
209 {
210         const int hdr_size = offsetof(kib_msg_t, ibm_u);
211         __u32     msg_cksum;
212         __u16     version;
213         int       msg_nob;
214         int       flip;
215
216         /* 6 bytes are enough to have received magic + version */
217         if (nob < 6) {
218                 CERROR("Short message: %d\n", nob);
219                 return -EPROTO;
220         }
221
222         if (msg->ibm_magic == IBLND_MSG_MAGIC) {
223                 flip = 0;
224         } else if (msg->ibm_magic == __swab32(IBLND_MSG_MAGIC)) {
225                 flip = 1;
226         } else {
227                 CERROR("Bad magic: %08x\n", msg->ibm_magic);
228                 return -EPROTO;
229         }
230
231         version = flip ? __swab16(msg->ibm_version) : msg->ibm_version;
232         if (version != IBLND_MSG_VERSION &&
233             version != IBLND_MSG_VERSION_1) {
234                 CERROR("Bad version: %x\n", version);
235                 return -EPROTO;
236         }
237
238         if (nob < hdr_size) {
239                 CERROR("Short message: %d\n", nob);
240                 return -EPROTO;
241         }
242
243         msg_nob = flip ? __swab32(msg->ibm_nob) : msg->ibm_nob;
244         if (msg_nob > nob) {
245                 CERROR("Short message: got %d, wanted %d\n", nob, msg_nob);
246                 return -EPROTO;
247         }
248
249         /* checksum must be computed with ibm_cksum zero and BEFORE anything
250          * gets flipped */
251         msg_cksum = flip ? __swab32(msg->ibm_cksum) : msg->ibm_cksum;
252         msg->ibm_cksum = 0;
253         if (msg_cksum != 0 &&
254             msg_cksum != kiblnd_cksum(msg, msg_nob)) {
255                 CERROR("Bad checksum\n");
256                 return -EPROTO;
257         }
258
259         msg->ibm_cksum = msg_cksum;
260
261         if (flip) {
262                 /* leave magic unflipped as a clue to peer endianness */
263                 msg->ibm_version = version;
264                 CLASSERT (sizeof(msg->ibm_type) == 1);
265                 CLASSERT (sizeof(msg->ibm_credits) == 1);
266                 msg->ibm_nob     = msg_nob;
267                 __swab64s(&msg->ibm_srcnid);
268                 __swab64s(&msg->ibm_srcstamp);
269                 __swab64s(&msg->ibm_dstnid);
270                 __swab64s(&msg->ibm_dststamp);
271         }
272
273         if (msg->ibm_srcnid == LNET_NID_ANY) {
274                 CERROR("Bad src nid: %s\n", libcfs_nid2str(msg->ibm_srcnid));
275                 return -EPROTO;
276         }
277
278         if (msg_nob < kiblnd_msgtype2size(msg->ibm_type)) {
279                 CERROR("Short %s: %d(%d)\n", kiblnd_msgtype2str(msg->ibm_type),
280                        msg_nob, kiblnd_msgtype2size(msg->ibm_type));
281                 return -EPROTO;
282         }
283
284         switch (msg->ibm_type) {
285         default:
286                 CERROR("Unknown message type %x\n", msg->ibm_type);
287                 return -EPROTO;
288
289         case IBLND_MSG_NOOP:
290         case IBLND_MSG_IMMEDIATE:
291         case IBLND_MSG_PUT_REQ:
292                 break;
293
294         case IBLND_MSG_PUT_ACK:
295         case IBLND_MSG_GET_REQ:
296                 if (kiblnd_unpack_rd(msg, flip))
297                         return -EPROTO;
298                 break;
299
300         case IBLND_MSG_PUT_NAK:
301         case IBLND_MSG_PUT_DONE:
302         case IBLND_MSG_GET_DONE:
303                 if (flip)
304                         __swab32s(&msg->ibm_u.completion.ibcm_status);
305                 break;
306
307         case IBLND_MSG_CONNREQ:
308         case IBLND_MSG_CONNACK:
309                 if (flip) {
310                         __swab16s(&msg->ibm_u.connparams.ibcp_queue_depth);
311                         __swab16s(&msg->ibm_u.connparams.ibcp_max_frags);
312                         __swab32s(&msg->ibm_u.connparams.ibcp_max_msg_size);
313                 }
314                 break;
315         }
316         return 0;
317 }
318
319 int
320 kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid)
321 {
322         kib_peer_t      *peer;
323         kib_net_t       *net = ni->ni_data;
324         int             cpt = lnet_cpt_of_nid(nid);
325         unsigned long   flags;
326
327         LASSERT(net != NULL);
328         LASSERT(nid != LNET_NID_ANY);
329
330         LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer));
331         if (peer == NULL) {
332                 CERROR("Cannot allocate peer\n");
333                 return -ENOMEM;
334         }
335
336         peer->ibp_ni = ni;
337         peer->ibp_nid = nid;
338         peer->ibp_error = 0;
339         peer->ibp_last_alive = 0;
340         peer->ibp_max_frags = kiblnd_cfg_rdma_frags(peer->ibp_ni);
341         peer->ibp_queue_depth = ni->ni_peertxcredits;
342         atomic_set(&peer->ibp_refcount, 1);     /* 1 ref for caller */
343
344         INIT_LIST_HEAD(&peer->ibp_list);        /* not in the peer table yet */
345         INIT_LIST_HEAD(&peer->ibp_conns);
346         INIT_LIST_HEAD(&peer->ibp_tx_queue);
347
348         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
349
350         /* always called with a ref on ni, which prevents ni being shutdown */
351         LASSERT(net->ibn_shutdown == 0);
352
353         /* npeers only grows with the global lock held */
354         atomic_inc(&net->ibn_npeers);
355
356         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
357
358         *peerp = peer;
359         return 0;
360 }
361
362 void
363 kiblnd_destroy_peer (kib_peer_t *peer)
364 {
365         kib_net_t *net = peer->ibp_ni->ni_data;
366
367         LASSERT(net != NULL);
368         LASSERT (atomic_read(&peer->ibp_refcount) == 0);
369         LASSERT(!kiblnd_peer_active(peer));
370         LASSERT(kiblnd_peer_idle(peer));
371         LASSERT(list_empty(&peer->ibp_tx_queue));
372
373         LIBCFS_FREE(peer, sizeof(*peer));
374
375         /* NB a peer's connections keep a reference on their peer until
376          * they are destroyed, so we can be assured that _all_ state to do
377          * with this peer has been cleaned up when its refcount drops to
378          * zero. */
379         atomic_dec(&net->ibn_npeers);
380 }
381
382 kib_peer_t *
383 kiblnd_find_peer_locked (lnet_nid_t nid)
384 {
385         /* the caller is responsible for accounting the additional reference
386          * that this creates */
387         struct list_head        *peer_list = kiblnd_nid2peerlist(nid);
388         struct list_head        *tmp;
389         kib_peer_t              *peer;
390
391         list_for_each(tmp, peer_list) {
392
393                 peer = list_entry(tmp, kib_peer_t, ibp_list);
394                 LASSERT(!kiblnd_peer_idle(peer));
395
396                 if (peer->ibp_nid != nid)
397                         continue;
398
399                 CDEBUG(D_NET, "got peer [%p] -> %s (%d) version: %x\n",
400                        peer, libcfs_nid2str(nid),
401                        atomic_read(&peer->ibp_refcount),
402                        peer->ibp_version);
403                 return peer;
404         }
405         return NULL;
406 }
407
408 void
409 kiblnd_unlink_peer_locked (kib_peer_t *peer)
410 {
411         LASSERT(list_empty(&peer->ibp_conns));
412
413         LASSERT (kiblnd_peer_active(peer));
414         list_del_init(&peer->ibp_list);
415         /* lose peerlist's ref */
416         kiblnd_peer_decref(peer);
417 }
418
419 static int
420 kiblnd_get_peer_info(lnet_ni_t *ni, int index,
421                      lnet_nid_t *nidp, int *count)
422 {
423         kib_peer_t              *peer;
424         struct list_head        *ptmp;
425         int                      i;
426         unsigned long            flags;
427
428         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
429
430         for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
431
432                 list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
433
434                         peer = list_entry(ptmp, kib_peer_t, ibp_list);
435                         LASSERT(!kiblnd_peer_idle(peer));
436
437                         if (peer->ibp_ni != ni)
438                                 continue;
439
440                         if (index-- > 0)
441                                 continue;
442
443                         *nidp = peer->ibp_nid;
444                         *count = atomic_read(&peer->ibp_refcount);
445
446                         read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
447                                                flags);
448                         return 0;
449                 }
450         }
451
452         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
453         return -ENOENT;
454 }
455
456 static void
457 kiblnd_del_peer_locked (kib_peer_t *peer)
458 {
459         struct list_head        *ctmp;
460         struct list_head        *cnxt;
461         kib_conn_t              *conn;
462
463         if (list_empty(&peer->ibp_conns)) {
464                 kiblnd_unlink_peer_locked(peer);
465         } else {
466                 list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
467                         conn = list_entry(ctmp, kib_conn_t, ibc_list);
468
469                         kiblnd_close_conn_locked(conn, 0);
470                 }
471                 /* NB closing peer's last conn unlinked it. */
472         }
473         /* NB peer now unlinked; might even be freed if the peer table had the
474          * last ref on it. */
475 }
476
477 static int
478 kiblnd_del_peer (lnet_ni_t *ni, lnet_nid_t nid)
479 {
480         struct list_head        zombies = LIST_HEAD_INIT(zombies);
481         struct list_head        *ptmp;
482         struct list_head        *pnxt;
483         kib_peer_t              *peer;
484         int                     lo;
485         int                     hi;
486         int                     i;
487         unsigned long           flags;
488         int                     rc = -ENOENT;
489
490         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
491
492         if (nid != LNET_NID_ANY) {
493                 lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
494         } else {
495                 lo = 0;
496                 hi = kiblnd_data.kib_peer_hash_size - 1;
497         }
498
499         for (i = lo; i <= hi; i++) {
500                 list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
501                         peer = list_entry(ptmp, kib_peer_t, ibp_list);
502                         LASSERT(!kiblnd_peer_idle(peer));
503
504                         if (peer->ibp_ni != ni)
505                                 continue;
506
507                         if (!(nid == LNET_NID_ANY || peer->ibp_nid == nid))
508                                 continue;
509
510                         if (!list_empty(&peer->ibp_tx_queue)) {
511                                 LASSERT(list_empty(&peer->ibp_conns));
512
513                                 list_splice_init(&peer->ibp_tx_queue,
514                                                  &zombies);
515                         }
516
517                         kiblnd_del_peer_locked(peer);
518                         rc = 0;         /* matched something */
519                 }
520         }
521
522         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
523
524         kiblnd_txlist_done(ni, &zombies, -EIO);
525
526         return rc;
527 }
528
529 static kib_conn_t *
530 kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index)
531 {
532         kib_peer_t              *peer;
533         struct list_head        *ptmp;
534         kib_conn_t              *conn;
535         struct list_head        *ctmp;
536         int                     i;
537         unsigned long           flags;
538
539         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
540
541         for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
542                 list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
543
544                         peer = list_entry(ptmp, kib_peer_t, ibp_list);
545                         LASSERT(!kiblnd_peer_idle(peer));
546
547                         if (peer->ibp_ni != ni)
548                                 continue;
549
550                         list_for_each(ctmp, &peer->ibp_conns) {
551                                 if (index-- > 0)
552                                         continue;
553
554                                 conn = list_entry(ctmp, kib_conn_t, ibc_list);
555                                 kiblnd_conn_addref(conn);
556                                 read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
557                                                        flags);
558                                 return conn;
559                         }
560                 }
561         }
562
563         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
564         return NULL;
565 }
566
567 static void
568 kiblnd_debug_rx (kib_rx_t *rx)
569 {
570         CDEBUG(D_CONSOLE, "      %p status %d msg_type %x cred %d\n",
571                rx, rx->rx_status, rx->rx_msg->ibm_type,
572                rx->rx_msg->ibm_credits);
573 }
574
575 static void
576 kiblnd_debug_tx (kib_tx_t *tx)
577 {
578         CDEBUG(D_CONSOLE, "      %p snd %d q %d w %d rc %d dl %lx "
579                "cookie "LPX64" msg %s%s type %x cred %d\n",
580                tx, tx->tx_sending, tx->tx_queued, tx->tx_waiting,
581                tx->tx_status, tx->tx_deadline, tx->tx_cookie,
582                tx->tx_lntmsg[0] == NULL ? "-" : "!",
583                tx->tx_lntmsg[1] == NULL ? "-" : "!",
584                tx->tx_msg->ibm_type, tx->tx_msg->ibm_credits);
585 }
586
587 void
588 kiblnd_debug_conn (kib_conn_t *conn)
589 {
590         struct list_head        *tmp;
591         int                     i;
592
593         spin_lock(&conn->ibc_lock);
594
595         CDEBUG(D_CONSOLE, "conn[%d] %p [version %x] -> %s:\n",
596                atomic_read(&conn->ibc_refcount), conn,
597                conn->ibc_version, libcfs_nid2str(conn->ibc_peer->ibp_nid));
598         CDEBUG(D_CONSOLE, "   state %d nposted %d/%d cred %d o_cred %d "
599                " r_cred %d\n", conn->ibc_state, conn->ibc_noops_posted,
600                conn->ibc_nsends_posted, conn->ibc_credits,
601                conn->ibc_outstanding_credits, conn->ibc_reserved_credits);
602         CDEBUG(D_CONSOLE, "   comms_err %d\n", conn->ibc_comms_error);
603
604         CDEBUG(D_CONSOLE, "   early_rxs:\n");
605         list_for_each(tmp, &conn->ibc_early_rxs)
606                 kiblnd_debug_rx(list_entry(tmp, kib_rx_t, rx_list));
607
608         CDEBUG(D_CONSOLE, "   tx_noops:\n");
609         list_for_each(tmp, &conn->ibc_tx_noops)
610                 kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
611
612         CDEBUG(D_CONSOLE, "   tx_queue_nocred:\n");
613         list_for_each(tmp, &conn->ibc_tx_queue_nocred)
614                 kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
615
616         CDEBUG(D_CONSOLE, "   tx_queue_rsrvd:\n");
617         list_for_each(tmp, &conn->ibc_tx_queue_rsrvd)
618                 kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
619
620         CDEBUG(D_CONSOLE, "   tx_queue:\n");
621         list_for_each(tmp, &conn->ibc_tx_queue)
622                 kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
623
624         CDEBUG(D_CONSOLE, "   active_txs:\n");
625         list_for_each(tmp, &conn->ibc_active_txs)
626                 kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
627
628         CDEBUG(D_CONSOLE, "   rxs:\n");
629         for (i = 0; i < IBLND_RX_MSGS(conn); i++)
630                 kiblnd_debug_rx(&conn->ibc_rxs[i]);
631
632         spin_unlock(&conn->ibc_lock);
633 }
634
635 int
636 kiblnd_translate_mtu(int value)
637 {
638         switch (value) {
639         default:
640                 return -1;
641         case 0:
642                 return 0;
643         case 256:
644                 return IB_MTU_256;
645         case 512:
646                 return IB_MTU_512;
647         case 1024:
648                 return IB_MTU_1024;
649         case 2048:
650                 return IB_MTU_2048;
651         case 4096:
652                 return IB_MTU_4096;
653         }
654 }
655
656 static void
657 kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid)
658 {
659         int           mtu;
660
661         /* XXX There is no path record for iWARP, set by netdev->change_mtu? */
662         if (cmid->route.path_rec == NULL)
663                 return;
664
665         mtu = kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu);
666         LASSERT (mtu >= 0);
667         if (mtu != 0)
668                 cmid->route.path_rec->mtu = mtu;
669 }
670
671 static int
672 kiblnd_get_completion_vector(kib_conn_t *conn, int cpt)
673 {
674         cpumask_t       *mask;
675         int             vectors;
676         int             off;
677         int             i;
678         lnet_nid_t      ibp_nid;
679
680         vectors = conn->ibc_cmid->device->num_comp_vectors;
681         if (vectors <= 1)
682                 return 0;
683
684         mask = cfs_cpt_cpumask(lnet_cpt_table(), cpt);
685
686         /* hash NID to CPU id in this partition... */
687         ibp_nid = conn->ibc_peer->ibp_nid;
688         off = do_div(ibp_nid, cpumask_weight(mask));
689         for_each_cpu(i, mask) {
690                 if (off-- == 0)
691                         return i % vectors;
692         }
693
694         LBUG();
695         return 1;
696 }
697
698 kib_conn_t *
699 kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
700                    int state, int version)
701 {
702         /* CAVEAT EMPTOR:
703          * If the new conn is created successfully it takes over the caller's
704          * ref on 'peer'.  It also "owns" 'cmid' and destroys it when it itself
705          * is destroyed.  On failure, the caller's ref on 'peer' remains and
706          * she must dispose of 'cmid'.  (Actually I'd block forever if I tried
707          * to destroy 'cmid' here since I'm called from the CM which still has
708          * its ref on 'cmid'). */
709         rwlock_t               *glock = &kiblnd_data.kib_global_lock;
710         kib_net_t              *net = peer->ibp_ni->ni_data;
711         kib_dev_t              *dev;
712         struct ib_qp_init_attr *init_qp_attr;
713         struct kib_sched_info   *sched;
714 #ifdef HAVE_IB_CQ_INIT_ATTR
715         struct ib_cq_init_attr  cq_attr = {};
716 #endif
717         kib_conn_t              *conn;
718         struct ib_cq            *cq;
719         unsigned long           flags;
720         int                     cpt;
721         int                     rc;
722         int                     i;
723
724         LASSERT(net != NULL);
725         LASSERT(!in_interrupt());
726
727         dev = net->ibn_dev;
728
729         cpt = lnet_cpt_of_nid(peer->ibp_nid);
730         sched = kiblnd_data.kib_scheds[cpt];
731
732         LASSERT(sched->ibs_nthreads > 0);
733
734         LIBCFS_CPT_ALLOC(init_qp_attr, lnet_cpt_table(), cpt,
735                          sizeof(*init_qp_attr));
736         if (init_qp_attr == NULL) {
737                 CERROR("Can't allocate qp_attr for %s\n",
738                        libcfs_nid2str(peer->ibp_nid));
739                 goto failed_0;
740         }
741
742         LIBCFS_CPT_ALLOC(conn, lnet_cpt_table(), cpt, sizeof(*conn));
743         if (conn == NULL) {
744                 CERROR("Can't allocate connection for %s\n",
745                        libcfs_nid2str(peer->ibp_nid));
746                 goto failed_1;
747         }
748
749         conn->ibc_state = IBLND_CONN_INIT;
750         conn->ibc_version = version;
751         conn->ibc_peer = peer;                  /* I take the caller's ref */
752         cmid->context = conn;                   /* for future CM callbacks */
753         conn->ibc_cmid = cmid;
754         conn->ibc_max_frags = peer->ibp_max_frags;
755         conn->ibc_queue_depth = peer->ibp_queue_depth;
756
757         INIT_LIST_HEAD(&conn->ibc_early_rxs);
758         INIT_LIST_HEAD(&conn->ibc_tx_noops);
759         INIT_LIST_HEAD(&conn->ibc_tx_queue);
760         INIT_LIST_HEAD(&conn->ibc_tx_queue_rsrvd);
761         INIT_LIST_HEAD(&conn->ibc_tx_queue_nocred);
762         INIT_LIST_HEAD(&conn->ibc_active_txs);
763         spin_lock_init(&conn->ibc_lock);
764
765         LIBCFS_CPT_ALLOC(conn->ibc_connvars, lnet_cpt_table(), cpt,
766                          sizeof(*conn->ibc_connvars));
767         if (conn->ibc_connvars == NULL) {
768                 CERROR("Can't allocate in-progress connection state\n");
769                 goto failed_2;
770         }
771
772         write_lock_irqsave(glock, flags);
773         if (dev->ibd_failover) {
774                 write_unlock_irqrestore(glock, flags);
775                 CERROR("%s: failover in progress\n", dev->ibd_ifname);
776                 goto failed_2;
777         }
778
779         if (dev->ibd_hdev->ibh_ibdev != cmid->device) {
780                 /* wakeup failover thread and teardown connection */
781                 if (kiblnd_dev_can_failover(dev)) {
782                         list_add_tail(&dev->ibd_fail_list,
783                                       &kiblnd_data.kib_failed_devs);
784                         wake_up(&kiblnd_data.kib_failover_waitq);
785                 }
786
787                 write_unlock_irqrestore(glock, flags);
788                 CERROR("cmid HCA(%s), kib_dev(%s) need failover\n",
789                        cmid->device->name, dev->ibd_ifname);
790                 goto failed_2;
791         }
792
793         kiblnd_hdev_addref_locked(dev->ibd_hdev);
794         conn->ibc_hdev = dev->ibd_hdev;
795
796         kiblnd_setup_mtu_locked(cmid);
797
798         write_unlock_irqrestore(glock, flags);
799
800         LIBCFS_CPT_ALLOC(conn->ibc_rxs, lnet_cpt_table(), cpt,
801                          IBLND_RX_MSGS(conn) * sizeof(kib_rx_t));
802         if (conn->ibc_rxs == NULL) {
803                 CERROR("Cannot allocate RX buffers\n");
804                 goto failed_2;
805         }
806
807         rc = kiblnd_alloc_pages(&conn->ibc_rx_pages, cpt,
808                                 IBLND_RX_MSG_PAGES(conn));
809         if (rc != 0)
810                 goto failed_2;
811
812         kiblnd_map_rx_descs(conn);
813
814 #ifdef HAVE_IB_CQ_INIT_ATTR
815         cq_attr.cqe = IBLND_CQ_ENTRIES(conn);
816         cq_attr.comp_vector = kiblnd_get_completion_vector(conn, cpt);
817         cq = ib_create_cq(cmid->device,
818                           kiblnd_cq_completion, kiblnd_cq_event, conn,
819                           &cq_attr);
820 #else
821         cq = ib_create_cq(cmid->device,
822                           kiblnd_cq_completion, kiblnd_cq_event, conn,
823                           IBLND_CQ_ENTRIES(conn),
824                           kiblnd_get_completion_vector(conn, cpt));
825 #endif
826         if (IS_ERR(cq)) {
827                 CERROR("Failed to create CQ with %d CQEs: %ld\n",
828                         IBLND_CQ_ENTRIES(conn), PTR_ERR(cq));
829                 goto failed_2;
830         }
831
832         conn->ibc_cq = cq;
833
834         rc = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
835         if (rc != 0) {
836                 CERROR("Can't request completion notification: %d\n", rc);
837                 goto failed_2;
838         }
839
840         init_qp_attr->event_handler = kiblnd_qp_event;
841         init_qp_attr->qp_context = conn;
842         init_qp_attr->cap.max_send_wr = IBLND_SEND_WRS(conn);
843         init_qp_attr->cap.max_recv_wr = IBLND_RECV_WRS(conn);
844         init_qp_attr->cap.max_send_sge = 1;
845         init_qp_attr->cap.max_recv_sge = 1;
846         init_qp_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
847         init_qp_attr->qp_type = IB_QPT_RC;
848         init_qp_attr->send_cq = cq;
849         init_qp_attr->recv_cq = cq;
850
851         conn->ibc_sched = sched;
852
853         do {
854                 rc = rdma_create_qp(cmid, conn->ibc_hdev->ibh_pd, init_qp_attr);
855                 if (!rc || init_qp_attr->cap.max_send_wr < 16)
856                         break;
857
858                 init_qp_attr->cap.max_send_wr -= init_qp_attr->cap.max_send_wr / 4;
859         } while (rc);
860
861         if (rc) {
862                 CERROR("Can't create QP: %d, send_wr: %d, recv_wr: %d\n",
863                         rc, init_qp_attr->cap.max_send_wr,
864                         init_qp_attr->cap.max_recv_wr);
865                 goto failed_2;
866         }
867
868         if (init_qp_attr->cap.max_send_wr != IBLND_SEND_WRS(conn))
869                 CDEBUG(D_NET, "original send wr %d, created with %d\n",
870                         IBLND_SEND_WRS(conn), init_qp_attr->cap.max_send_wr);
871
872         LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
873
874         /* 1 ref for caller and each rxmsg */
875         atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(conn));
876         conn->ibc_nrx = IBLND_RX_MSGS(conn);
877
878         /* post receives */
879         for (i = 0; i < IBLND_RX_MSGS(conn); i++) {
880                 rc = kiblnd_post_rx(&conn->ibc_rxs[i], IBLND_POSTRX_NO_CREDIT);
881                 if (rc != 0) {
882                         CERROR("Can't post rxmsg: %d\n", rc);
883
884                         /* Make posted receives complete */
885                         kiblnd_abort_receives(conn);
886
887                         /* correct # of posted buffers
888                          * NB locking needed now I'm racing with completion */
889                         spin_lock_irqsave(&sched->ibs_lock, flags);
890                         conn->ibc_nrx -= IBLND_RX_MSGS(conn) - i;
891                         spin_unlock_irqrestore(&sched->ibs_lock, flags);
892
893                         /* cmid will be destroyed by CM(ofed) after cm_callback
894                          * returned, so we can't refer it anymore
895                          * (by kiblnd_connd()->kiblnd_destroy_conn) */
896                         rdma_destroy_qp(conn->ibc_cmid);
897                         conn->ibc_cmid = NULL;
898
899                         /* Drop my own and unused rxbuffer refcounts */
900                         while (i++ <= IBLND_RX_MSGS(conn))
901                                 kiblnd_conn_decref(conn);
902
903                         return NULL;
904                 }
905         }
906
907         /* Init successful! */
908         LASSERT (state == IBLND_CONN_ACTIVE_CONNECT ||
909                  state == IBLND_CONN_PASSIVE_WAIT);
910         conn->ibc_state = state;
911
912         /* 1 more conn */
913         atomic_inc(&net->ibn_nconns);
914         return conn;
915
916  failed_2:
917         kiblnd_destroy_conn(conn, true);
918  failed_1:
919         LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
920  failed_0:
921         return NULL;
922 }
923
924 void
925 kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn)
926 {
927         struct rdma_cm_id *cmid = conn->ibc_cmid;
928         kib_peer_t        *peer = conn->ibc_peer;
929         int                rc;
930
931         LASSERT (!in_interrupt());
932         LASSERT (atomic_read(&conn->ibc_refcount) == 0);
933         LASSERT(list_empty(&conn->ibc_early_rxs));
934         LASSERT(list_empty(&conn->ibc_tx_noops));
935         LASSERT(list_empty(&conn->ibc_tx_queue));
936         LASSERT(list_empty(&conn->ibc_tx_queue_rsrvd));
937         LASSERT(list_empty(&conn->ibc_tx_queue_nocred));
938         LASSERT(list_empty(&conn->ibc_active_txs));
939         LASSERT (conn->ibc_noops_posted == 0);
940         LASSERT (conn->ibc_nsends_posted == 0);
941
942         switch (conn->ibc_state) {
943         default:
944                 /* conn must be completely disengaged from the network */
945                 LBUG();
946
947         case IBLND_CONN_DISCONNECTED:
948                 /* connvars should have been freed already */
949                 LASSERT (conn->ibc_connvars == NULL);
950                 break;
951
952         case IBLND_CONN_INIT:
953                 break;
954         }
955
956         /* conn->ibc_cmid might be destroyed by CM already */
957         if (cmid != NULL && cmid->qp != NULL)
958                 rdma_destroy_qp(cmid);
959
960         if (conn->ibc_cq != NULL) {
961                 rc = ib_destroy_cq(conn->ibc_cq);
962                 if (rc != 0)
963                         CWARN("Error destroying CQ: %d\n", rc);
964         }
965
966         if (conn->ibc_rx_pages != NULL)
967                 kiblnd_unmap_rx_descs(conn);
968
969         if (conn->ibc_rxs != NULL) {
970                 LIBCFS_FREE(conn->ibc_rxs,
971                             IBLND_RX_MSGS(conn) * sizeof(kib_rx_t));
972         }
973
974         if (conn->ibc_connvars != NULL)
975                 LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
976
977         if (conn->ibc_hdev != NULL)
978                 kiblnd_hdev_decref(conn->ibc_hdev);
979
980         /* See CAVEAT EMPTOR above in kiblnd_create_conn */
981         if (conn->ibc_state != IBLND_CONN_INIT) {
982                 kib_net_t *net = peer->ibp_ni->ni_data;
983
984                 kiblnd_peer_decref(peer);
985                 rdma_destroy_id(cmid);
986                 atomic_dec(&net->ibn_nconns);
987         }
988
989         if (free_conn)
990                 LIBCFS_FREE(conn, sizeof(*conn));
991 }
992
993 int
994 kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why)
995 {
996         kib_conn_t              *conn;
997         struct list_head        *ctmp;
998         struct list_head        *cnxt;
999         int                     count = 0;
1000
1001         list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
1002                 conn = list_entry(ctmp, kib_conn_t, ibc_list);
1003
1004                 CDEBUG(D_NET, "Closing conn -> %s, "
1005                               "version: %x, reason: %d\n",
1006                        libcfs_nid2str(peer->ibp_nid),
1007                        conn->ibc_version, why);
1008
1009                 kiblnd_close_conn_locked(conn, why);
1010                 count++;
1011         }
1012
1013         return count;
1014 }
1015
1016 int
1017 kiblnd_close_stale_conns_locked(kib_peer_t *peer,
1018                                 int version, __u64 incarnation)
1019 {
1020         kib_conn_t              *conn;
1021         struct list_head        *ctmp;
1022         struct list_head        *cnxt;
1023         int                     count = 0;
1024
1025         list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
1026                 conn = list_entry(ctmp, kib_conn_t, ibc_list);
1027
1028                 if (conn->ibc_version     == version &&
1029                     conn->ibc_incarnation == incarnation)
1030                         continue;
1031
1032                 CDEBUG(D_NET, "Closing stale conn -> %s version: %x, "
1033                               "incarnation:"LPX64"(%x, "LPX64")\n",
1034                        libcfs_nid2str(peer->ibp_nid),
1035                        conn->ibc_version, conn->ibc_incarnation,
1036                        version, incarnation);
1037
1038                 kiblnd_close_conn_locked(conn, -ESTALE);
1039                 count++;
1040         }
1041
1042         return count;
1043 }
1044
1045 static int
1046 kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid)
1047 {
1048         kib_peer_t              *peer;
1049         struct list_head        *ptmp;
1050         struct list_head        *pnxt;
1051         int                     lo;
1052         int                     hi;
1053         int                     i;
1054         unsigned long           flags;
1055         int                     count = 0;
1056
1057         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1058
1059         if (nid != LNET_NID_ANY)
1060                 lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
1061         else {
1062                 lo = 0;
1063                 hi = kiblnd_data.kib_peer_hash_size - 1;
1064         }
1065
1066         for (i = lo; i <= hi; i++) {
1067                 list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
1068
1069                         peer = list_entry(ptmp, kib_peer_t, ibp_list);
1070                         LASSERT(!kiblnd_peer_idle(peer));
1071
1072                         if (peer->ibp_ni != ni)
1073                                 continue;
1074
1075                         if (!(nid == LNET_NID_ANY || nid == peer->ibp_nid))
1076                                 continue;
1077
1078                         count += kiblnd_close_peer_conns_locked(peer, 0);
1079                 }
1080         }
1081
1082         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1083
1084         /* wildcards always succeed */
1085         if (nid == LNET_NID_ANY)
1086                 return 0;
1087
1088         return (count == 0) ? -ENOENT : 0;
1089 }
1090
1091 static int
1092 kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
1093 {
1094         struct libcfs_ioctl_data *data = arg;
1095         int                       rc = -EINVAL;
1096
1097         switch(cmd) {
1098         case IOC_LIBCFS_GET_PEER: {
1099                 lnet_nid_t   nid = 0;
1100                 int          count = 0;
1101
1102                 rc = kiblnd_get_peer_info(ni, data->ioc_count,
1103                                           &nid, &count);
1104                 data->ioc_nid    = nid;
1105                 data->ioc_count  = count;
1106                 break;
1107         }
1108
1109         case IOC_LIBCFS_DEL_PEER: {
1110                 rc = kiblnd_del_peer(ni, data->ioc_nid);
1111                 break;
1112         }
1113         case IOC_LIBCFS_GET_CONN: {
1114                 kib_conn_t *conn;
1115
1116                 rc = 0;
1117                 conn = kiblnd_get_conn_by_idx(ni, data->ioc_count);
1118                 if (conn == NULL) {
1119                         rc = -ENOENT;
1120                         break;
1121                 }
1122
1123                 LASSERT (conn->ibc_cmid != NULL);
1124                 data->ioc_nid = conn->ibc_peer->ibp_nid;
1125                 if (conn->ibc_cmid->route.path_rec == NULL)
1126                         data->ioc_u32[0] = 0; /* iWarp has no path MTU */
1127                 else
1128                         data->ioc_u32[0] =
1129                         ib_mtu_enum_to_int(conn->ibc_cmid->route.path_rec->mtu);
1130                 kiblnd_conn_decref(conn);
1131                 break;
1132         }
1133         case IOC_LIBCFS_CLOSE_CONNECTION: {
1134                 rc = kiblnd_close_matching_conns(ni, data->ioc_nid);
1135                 break;
1136         }
1137
1138         default:
1139                 break;
1140         }
1141
1142         return rc;
1143 }
1144
1145 static void
1146 kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
1147 {
1148         cfs_time_t      last_alive = 0;
1149         cfs_time_t      now = cfs_time_current();
1150         rwlock_t        *glock = &kiblnd_data.kib_global_lock;
1151         kib_peer_t      *peer;
1152         unsigned long   flags;
1153
1154         read_lock_irqsave(glock, flags);
1155
1156         peer = kiblnd_find_peer_locked(nid);
1157         if (peer != NULL)
1158                 last_alive = peer->ibp_last_alive;
1159
1160         read_unlock_irqrestore(glock, flags);
1161
1162         if (last_alive != 0)
1163                 *when = last_alive;
1164
1165         /* peer is not persistent in hash, trigger peer creation
1166          * and connection establishment with a NULL tx */
1167         if (peer == NULL)
1168                 kiblnd_launch_tx(ni, NULL, nid);
1169
1170         CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago\n",
1171                libcfs_nid2str(nid), peer,
1172                last_alive ? cfs_duration_sec(now - last_alive) : -1);
1173         return;
1174 }
1175
1176 static void
1177 kiblnd_free_pages(kib_pages_t *p)
1178 {
1179         int     npages = p->ibp_npages;
1180         int     i;
1181
1182         for (i = 0; i < npages; i++) {
1183                 if (p->ibp_pages[i] != NULL)
1184                         __free_page(p->ibp_pages[i]);
1185         }
1186
1187         LIBCFS_FREE(p, offsetof(kib_pages_t, ibp_pages[npages]));
1188 }
1189
1190 int
1191 kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages)
1192 {
1193         kib_pages_t     *p;
1194         int             i;
1195
1196         LIBCFS_CPT_ALLOC(p, lnet_cpt_table(), cpt,
1197                          offsetof(kib_pages_t, ibp_pages[npages]));
1198         if (p == NULL) {
1199                 CERROR("Can't allocate descriptor for %d pages\n", npages);
1200                 return -ENOMEM;
1201         }
1202
1203         memset(p, 0, offsetof(kib_pages_t, ibp_pages[npages]));
1204         p->ibp_npages = npages;
1205
1206         for (i = 0; i < npages; i++) {
1207                 p->ibp_pages[i] = cfs_page_cpt_alloc(lnet_cpt_table(), cpt,
1208                                                      GFP_NOFS);
1209                 if (p->ibp_pages[i] == NULL) {
1210                         CERROR("Can't allocate page %d of %d\n", i, npages);
1211                         kiblnd_free_pages(p);
1212                         return -ENOMEM;
1213                 }
1214         }
1215
1216         *pp = p;
1217         return 0;
1218 }
1219
1220 void
1221 kiblnd_unmap_rx_descs(kib_conn_t *conn)
1222 {
1223         kib_rx_t *rx;
1224         int       i;
1225
1226         LASSERT (conn->ibc_rxs != NULL);
1227         LASSERT (conn->ibc_hdev != NULL);
1228
1229         for (i = 0; i < IBLND_RX_MSGS(conn); i++) {
1230                 rx = &conn->ibc_rxs[i];
1231
1232                 LASSERT(rx->rx_nob >= 0); /* not posted */
1233
1234                 kiblnd_dma_unmap_single(conn->ibc_hdev->ibh_ibdev,
1235                                         KIBLND_UNMAP_ADDR(rx, rx_msgunmap,
1236                                                           rx->rx_msgaddr),
1237                                         IBLND_MSG_SIZE, DMA_FROM_DEVICE);
1238         }
1239
1240         kiblnd_free_pages(conn->ibc_rx_pages);
1241
1242         conn->ibc_rx_pages = NULL;
1243 }
1244
1245 void
1246 kiblnd_map_rx_descs(kib_conn_t *conn)
1247 {
1248         kib_rx_t       *rx;
1249         struct page    *pg;
1250         int             pg_off;
1251         int             ipg;
1252         int             i;
1253
1254         for (pg_off = ipg = i = 0; i < IBLND_RX_MSGS(conn); i++) {
1255                 pg = conn->ibc_rx_pages->ibp_pages[ipg];
1256                 rx = &conn->ibc_rxs[i];
1257
1258                 rx->rx_conn = conn;
1259                 rx->rx_msg = (kib_msg_t *)(((char *)page_address(pg)) + pg_off);
1260
1261                 rx->rx_msgaddr =
1262                         kiblnd_dma_map_single(conn->ibc_hdev->ibh_ibdev,
1263                                               rx->rx_msg, IBLND_MSG_SIZE,
1264                                               DMA_FROM_DEVICE);
1265                 LASSERT(!kiblnd_dma_mapping_error(conn->ibc_hdev->ibh_ibdev,
1266                                                   rx->rx_msgaddr));
1267                 KIBLND_UNMAP_ADDR_SET(rx, rx_msgunmap, rx->rx_msgaddr);
1268
1269                 CDEBUG(D_NET, "rx %d: %p "LPX64"("LPX64")\n",
1270                        i, rx->rx_msg, rx->rx_msgaddr,
1271                        (__u64)(page_to_phys(pg) + pg_off));
1272
1273                 pg_off += IBLND_MSG_SIZE;
1274                 LASSERT(pg_off <= PAGE_SIZE);
1275
1276                 if (pg_off == PAGE_SIZE) {
1277                         pg_off = 0;
1278                         ipg++;
1279                         LASSERT(ipg <= IBLND_RX_MSG_PAGES(conn));
1280                 }
1281         }
1282 }
1283
1284 static void
1285 kiblnd_unmap_tx_pool(kib_tx_pool_t *tpo)
1286 {
1287         kib_hca_dev_t  *hdev = tpo->tpo_hdev;
1288         kib_tx_t       *tx;
1289         int             i;
1290
1291         LASSERT (tpo->tpo_pool.po_allocated == 0);
1292
1293         if (hdev == NULL)
1294                 return;
1295
1296         for (i = 0; i < tpo->tpo_pool.po_size; i++) {
1297                 tx = &tpo->tpo_tx_descs[i];
1298                 kiblnd_dma_unmap_single(hdev->ibh_ibdev,
1299                                         KIBLND_UNMAP_ADDR(tx, tx_msgunmap,
1300                                                           tx->tx_msgaddr),
1301                                         IBLND_MSG_SIZE, DMA_TO_DEVICE);
1302         }
1303
1304         kiblnd_hdev_decref(hdev);
1305         tpo->tpo_hdev = NULL;
1306 }
1307
1308 static kib_hca_dev_t *
1309 kiblnd_current_hdev(kib_dev_t *dev)
1310 {
1311         kib_hca_dev_t *hdev;
1312         unsigned long  flags;
1313         int            i = 0;
1314
1315         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1316         while (dev->ibd_failover) {
1317                 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1318                 if (i++ % 50 == 0)
1319                         CDEBUG(D_NET, "%s: Wait for failover\n",
1320                                dev->ibd_ifname);
1321                 set_current_state(TASK_INTERRUPTIBLE);
1322                 schedule_timeout(cfs_time_seconds(1) / 100);
1323
1324                 read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1325         }
1326
1327         kiblnd_hdev_addref_locked(dev->ibd_hdev);
1328         hdev = dev->ibd_hdev;
1329
1330         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1331
1332         return hdev;
1333 }
1334
1335 static void
1336 kiblnd_map_tx_pool(kib_tx_pool_t *tpo)
1337 {
1338         kib_pages_t    *txpgs = tpo->tpo_tx_pages;
1339         kib_pool_t     *pool  = &tpo->tpo_pool;
1340         kib_net_t      *net   = pool->po_owner->ps_net;
1341         kib_dev_t      *dev;
1342         struct page    *page;
1343         kib_tx_t       *tx;
1344         int             page_offset;
1345         int             ipage;
1346         int             i;
1347
1348         LASSERT (net != NULL);
1349
1350         dev = net->ibn_dev;
1351
1352         /* pre-mapped messages are not bigger than 1 page */
1353         CLASSERT (IBLND_MSG_SIZE <= PAGE_SIZE);
1354
1355         /* No fancy arithmetic when we do the buffer calculations */
1356         CLASSERT (PAGE_SIZE % IBLND_MSG_SIZE == 0);
1357
1358         tpo->tpo_hdev = kiblnd_current_hdev(dev);
1359
1360         for (ipage = page_offset = i = 0; i < pool->po_size; i++) {
1361                 page = txpgs->ibp_pages[ipage];
1362                 tx = &tpo->tpo_tx_descs[i];
1363
1364                 tx->tx_msg = (kib_msg_t *)(((char *)page_address(page)) +
1365                                            page_offset);
1366
1367                 tx->tx_msgaddr = kiblnd_dma_map_single(tpo->tpo_hdev->ibh_ibdev,
1368                                                        tx->tx_msg,
1369                                                        IBLND_MSG_SIZE,
1370                                                        DMA_TO_DEVICE);
1371                 LASSERT(!kiblnd_dma_mapping_error(tpo->tpo_hdev->ibh_ibdev,
1372                                                   tx->tx_msgaddr));
1373                 KIBLND_UNMAP_ADDR_SET(tx, tx_msgunmap, tx->tx_msgaddr);
1374
1375                 list_add(&tx->tx_list, &pool->po_free_list);
1376
1377                 page_offset += IBLND_MSG_SIZE;
1378                 LASSERT(page_offset <= PAGE_SIZE);
1379
1380                 if (page_offset == PAGE_SIZE) {
1381                         page_offset = 0;
1382                         ipage++;
1383                         LASSERT(ipage <= txpgs->ibp_npages);
1384                 }
1385         }
1386 }
1387
1388 struct ib_mr *
1389 kiblnd_find_rd_dma_mr(struct lnet_ni *ni, kib_rdma_desc_t *rd,
1390                       int negotiated_nfrags)
1391 {
1392         kib_net_t     *net   = ni->ni_data;
1393         kib_hca_dev_t *hdev  = net->ibn_dev->ibd_hdev;
1394         struct lnet_ioctl_config_o2iblnd_tunables *tunables;
1395         int     mod;
1396         __u16   nfrags;
1397
1398         tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
1399         mod = tunables->lnd_map_on_demand;
1400         nfrags = (negotiated_nfrags != -1) ? negotiated_nfrags : mod;
1401
1402         LASSERT(hdev->ibh_mrs != NULL);
1403
1404         if (mod > 0 && nfrags <= rd->rd_nfrags)
1405                 return NULL;
1406
1407         return hdev->ibh_mrs;
1408 }
1409
1410 static void
1411 kiblnd_destroy_fmr_pool(kib_fmr_pool_t *fpo)
1412 {
1413         LASSERT(fpo->fpo_map_count == 0);
1414
1415         if (fpo->fpo_is_fmr) {
1416                 if (fpo->fmr.fpo_fmr_pool)
1417                         ib_destroy_fmr_pool(fpo->fmr.fpo_fmr_pool);
1418         } else {
1419                 struct kib_fast_reg_descriptor *frd, *tmp;
1420                 int i = 0;
1421
1422                 list_for_each_entry_safe(frd, tmp, &fpo->fast_reg.fpo_pool_list,
1423                                          frd_list) {
1424                         list_del(&frd->frd_list);
1425 #ifndef HAVE_IB_MAP_MR_SG
1426                         ib_free_fast_reg_page_list(frd->frd_frpl);
1427 #endif
1428                         ib_dereg_mr(frd->frd_mr);
1429                         LIBCFS_FREE(frd, sizeof(*frd));
1430                         i++;
1431                 }
1432                 if (i < fpo->fast_reg.fpo_pool_size)
1433                         CERROR("FastReg pool still has %d regions registered\n",
1434                                 fpo->fast_reg.fpo_pool_size - i);
1435         }
1436
1437         if (fpo->fpo_hdev)
1438                 kiblnd_hdev_decref(fpo->fpo_hdev);
1439
1440         LIBCFS_FREE(fpo, sizeof(*fpo));
1441 }
1442
1443 static void
1444 kiblnd_destroy_fmr_pool_list(struct list_head *head)
1445 {
1446         kib_fmr_pool_t *fpo, *tmp;
1447
1448         list_for_each_entry_safe(fpo, tmp, head, fpo_list) {
1449                 list_del(&fpo->fpo_list);
1450                 kiblnd_destroy_fmr_pool(fpo);
1451         }
1452 }
1453
1454 static int
1455 kiblnd_fmr_pool_size(struct lnet_ioctl_config_o2iblnd_tunables *tunables,
1456                      int ncpts)
1457 {
1458         int size = tunables->lnd_fmr_pool_size / ncpts;
1459
1460         return max(IBLND_FMR_POOL, size);
1461 }
1462
1463 static int
1464 kiblnd_fmr_flush_trigger(struct lnet_ioctl_config_o2iblnd_tunables *tunables,
1465                          int ncpts)
1466 {
1467         int size = tunables->lnd_fmr_flush_trigger / ncpts;
1468
1469         return max(IBLND_FMR_POOL_FLUSH, size);
1470 }
1471
1472 static int kiblnd_alloc_fmr_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t *fpo)
1473 {
1474         struct ib_fmr_pool_param param = {
1475                 .max_pages_per_fmr = LNET_MAX_PAYLOAD/PAGE_SIZE,
1476                 .page_shift        = PAGE_SHIFT,
1477                 .access            = (IB_ACCESS_LOCAL_WRITE |
1478                                       IB_ACCESS_REMOTE_WRITE),
1479                 .pool_size         = fps->fps_pool_size,
1480                 .dirty_watermark   = fps->fps_flush_trigger,
1481                 .flush_function    = NULL,
1482                 .flush_arg         = NULL,
1483                 .cache             = !!fps->fps_cache };
1484         int rc = 0;
1485
1486         fpo->fmr.fpo_fmr_pool = ib_create_fmr_pool(fpo->fpo_hdev->ibh_pd,
1487                                                    &param);
1488         if (IS_ERR(fpo->fmr.fpo_fmr_pool)) {
1489                 rc = PTR_ERR(fpo->fmr.fpo_fmr_pool);
1490                 if (rc != -ENOSYS)
1491                         CERROR("Failed to create FMR pool: %d\n", rc);
1492                 else
1493                         CERROR("FMRs are not supported\n");
1494         }
1495
1496         return rc;
1497 }
1498
1499 static int kiblnd_alloc_freg_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t *fpo)
1500 {
1501         struct kib_fast_reg_descriptor *frd, *tmp;
1502         int i, rc;
1503
1504         INIT_LIST_HEAD(&fpo->fast_reg.fpo_pool_list);
1505         fpo->fast_reg.fpo_pool_size = 0;
1506         for (i = 0; i < fps->fps_pool_size; i++) {
1507                 LIBCFS_CPT_ALLOC(frd, lnet_cpt_table(), fps->fps_cpt,
1508                                  sizeof(*frd));
1509                 if (!frd) {
1510                         CERROR("Failed to allocate a new fast_reg descriptor\n");
1511                         rc = -ENOMEM;
1512                         goto out;
1513                 }
1514                 frd->frd_mr = NULL;
1515
1516 #ifndef HAVE_IB_MAP_MR_SG
1517                 frd->frd_frpl = ib_alloc_fast_reg_page_list(fpo->fpo_hdev->ibh_ibdev,
1518                                                             LNET_MAX_PAYLOAD/PAGE_SIZE);
1519                 if (IS_ERR(frd->frd_frpl)) {
1520                         rc = PTR_ERR(frd->frd_frpl);
1521                         CERROR("Failed to allocate ib_fast_reg_page_list: %d\n",
1522                                 rc);
1523                         frd->frd_frpl = NULL;
1524                         goto out_middle;
1525                 }
1526 #endif
1527
1528 #ifdef HAVE_IB_ALLOC_FAST_REG_MR
1529                 frd->frd_mr = ib_alloc_fast_reg_mr(fpo->fpo_hdev->ibh_pd,
1530                                                    LNET_MAX_PAYLOAD/PAGE_SIZE);
1531 #else
1532                 frd->frd_mr = ib_alloc_mr(fpo->fpo_hdev->ibh_pd,
1533                                           IB_MR_TYPE_MEM_REG,
1534                                           LNET_MAX_PAYLOAD/PAGE_SIZE);
1535 #endif
1536                 if (IS_ERR(frd->frd_mr)) {
1537                         rc = PTR_ERR(frd->frd_mr);
1538                         CERROR("Failed to allocate ib_fast_reg_mr: %d\n", rc);
1539                         frd->frd_mr = NULL;
1540                         goto out_middle;
1541                 }
1542
1543                 frd->frd_valid = true;
1544
1545                 list_add_tail(&frd->frd_list, &fpo->fast_reg.fpo_pool_list);
1546                 fpo->fast_reg.fpo_pool_size++;
1547         }
1548
1549         return 0;
1550
1551 out_middle:
1552         if (frd->frd_mr)
1553                 ib_dereg_mr(frd->frd_mr);
1554 #ifndef HAVE_IB_MAP_MR_SG
1555         if (frd->frd_frpl)
1556                 ib_free_fast_reg_page_list(frd->frd_frpl);
1557 #endif
1558         LIBCFS_FREE(frd, sizeof(*frd));
1559
1560 out:
1561         list_for_each_entry_safe(frd, tmp, &fpo->fast_reg.fpo_pool_list,
1562                                  frd_list) {
1563                 list_del(&frd->frd_list);
1564 #ifndef HAVE_IB_MAP_MR_SG
1565                 ib_free_fast_reg_page_list(frd->frd_frpl);
1566 #endif
1567                 ib_dereg_mr(frd->frd_mr);
1568                 LIBCFS_FREE(frd, sizeof(*frd));
1569         }
1570
1571         return rc;
1572 }
1573
1574 static int
1575 kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t **pp_fpo)
1576 {
1577         struct ib_device_attr *dev_attr;
1578         kib_dev_t *dev = fps->fps_net->ibn_dev;
1579         kib_fmr_pool_t *fpo;
1580         int rc;
1581
1582         dev_attr = kmalloc(sizeof(*dev_attr), GFP_KERNEL);
1583         if (!dev_attr)
1584                 return -ENOMEM;
1585
1586         LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo));
1587         if (!fpo) {
1588                 rc = -ENOMEM;
1589                 goto out_dev_attr;
1590         }
1591
1592         fpo->fpo_hdev = kiblnd_current_hdev(dev);
1593
1594         rc = ib_query_device(fpo->fpo_hdev->ibh_ibdev, dev_attr);
1595         if (rc) {
1596                 CERROR("Query device failed for %s: %d\n",
1597                         fpo->fpo_hdev->ibh_ibdev->name, rc);
1598                 goto out_dev_attr;
1599         }
1600
1601         /* Check for FMR or FastReg support */
1602         fpo->fpo_is_fmr = 0;
1603         if (fpo->fpo_hdev->ibh_ibdev->alloc_fmr &&
1604             fpo->fpo_hdev->ibh_ibdev->dealloc_fmr &&
1605             fpo->fpo_hdev->ibh_ibdev->map_phys_fmr &&
1606             fpo->fpo_hdev->ibh_ibdev->unmap_fmr) {
1607                 LCONSOLE_INFO("Using FMR for registration\n");
1608                 fpo->fpo_is_fmr = 1;
1609         } else if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
1610                 LCONSOLE_INFO("Using FastReg for registration\n");
1611         } else {
1612                 rc = -ENOSYS;
1613                 LCONSOLE_ERROR_MSG(rc, "IB device does not support FMRs nor FastRegs, can't register memory\n");
1614                 goto out_dev_attr;
1615         }
1616
1617         if (fpo->fpo_is_fmr)
1618                 rc = kiblnd_alloc_fmr_pool(fps, fpo);
1619         else
1620                 rc = kiblnd_alloc_freg_pool(fps, fpo);
1621         if (rc)
1622                 goto out_fpo;
1623
1624         kfree(dev_attr);
1625         fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
1626         fpo->fpo_owner    = fps;
1627         *pp_fpo = fpo;
1628
1629         return 0;
1630
1631 out_fpo:
1632         kiblnd_hdev_decref(fpo->fpo_hdev);
1633         LIBCFS_FREE(fpo, sizeof(*fpo));
1634
1635 out_dev_attr:
1636         kfree(dev_attr);
1637
1638         return rc;
1639 }
1640
1641 static void
1642 kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps, struct list_head *zombies)
1643 {
1644         if (fps->fps_net == NULL) /* intialized? */
1645                 return;
1646
1647         spin_lock(&fps->fps_lock);
1648
1649         while (!list_empty(&fps->fps_pool_list)) {
1650                 kib_fmr_pool_t *fpo = list_entry(fps->fps_pool_list.next,
1651                                                  kib_fmr_pool_t, fpo_list);
1652                 fpo->fpo_failed = 1;
1653                 list_del(&fpo->fpo_list);
1654                 if (fpo->fpo_map_count == 0)
1655                         list_add(&fpo->fpo_list, zombies);
1656                 else
1657                         list_add(&fpo->fpo_list, &fps->fps_failed_pool_list);
1658         }
1659
1660         spin_unlock(&fps->fps_lock);
1661 }
1662
1663 static void
1664 kiblnd_fini_fmr_poolset(kib_fmr_poolset_t *fps)
1665 {
1666         if (fps->fps_net != NULL) { /* initialized? */
1667                 kiblnd_destroy_fmr_pool_list(&fps->fps_failed_pool_list);
1668                 kiblnd_destroy_fmr_pool_list(&fps->fps_pool_list);
1669         }
1670 }
1671
1672 static int
1673 kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, int ncpts,
1674                         kib_net_t *net,
1675                         struct lnet_ioctl_config_o2iblnd_tunables *tunables)
1676 {
1677         kib_fmr_pool_t *fpo;
1678         int             rc;
1679
1680         memset(fps, 0, sizeof(kib_fmr_poolset_t));
1681
1682         fps->fps_net = net;
1683         fps->fps_cpt = cpt;
1684
1685         fps->fps_pool_size = kiblnd_fmr_pool_size(tunables, ncpts);
1686         fps->fps_flush_trigger = kiblnd_fmr_flush_trigger(tunables, ncpts);
1687         fps->fps_cache = tunables->lnd_fmr_cache;
1688
1689         spin_lock_init(&fps->fps_lock);
1690         INIT_LIST_HEAD(&fps->fps_pool_list);
1691         INIT_LIST_HEAD(&fps->fps_failed_pool_list);
1692
1693         rc = kiblnd_create_fmr_pool(fps, &fpo);
1694         if (rc == 0)
1695                 list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
1696
1697         return rc;
1698 }
1699
1700 static int
1701 kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, cfs_time_t now)
1702 {
1703         if (fpo->fpo_map_count != 0) /* still in use */
1704                 return 0;
1705         if (fpo->fpo_failed)
1706                 return 1;
1707         return cfs_time_aftereq(now, fpo->fpo_deadline);
1708 }
1709
1710 static int
1711 kiblnd_map_tx_pages(kib_tx_t *tx, kib_rdma_desc_t *rd)
1712 {
1713         kib_hca_dev_t   *hdev;
1714         __u64           *pages = tx->tx_pages;
1715         int             npages;
1716         int             size;
1717         int             i;
1718
1719         hdev = tx->tx_pool->tpo_hdev;
1720
1721         for (i = 0, npages = 0; i < rd->rd_nfrags; i++) {
1722                 for (size = 0; size <  rd->rd_frags[i].rf_nob;
1723                         size += hdev->ibh_page_size) {
1724                         pages[npages++] = (rd->rd_frags[i].rf_addr &
1725                                            hdev->ibh_page_mask) + size;
1726                 }
1727         }
1728
1729         return npages;
1730 }
1731
1732 void
1733 kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
1734 {
1735         struct list_head   zombies = LIST_HEAD_INIT(zombies);
1736         kib_fmr_pool_t    *fpo = fmr->fmr_pool;
1737         kib_fmr_poolset_t *fps;
1738         cfs_time_t         now = cfs_time_current();
1739         kib_fmr_pool_t    *tmp;
1740         int                rc;
1741
1742         if (!fpo)
1743                 return;
1744
1745         fps = fpo->fpo_owner;
1746         if (fpo->fpo_is_fmr) {
1747                 if (fmr->fmr_pfmr) {
1748                         rc = ib_fmr_pool_unmap(fmr->fmr_pfmr);
1749                         LASSERT(!rc);
1750                         fmr->fmr_pfmr = NULL;
1751                 }
1752
1753                 if (status) {
1754                         rc = ib_flush_fmr_pool(fpo->fmr.fpo_fmr_pool);
1755                         LASSERT(!rc);
1756                 }
1757         } else {
1758                 struct kib_fast_reg_descriptor *frd = fmr->fmr_frd;
1759
1760                 if (frd) {
1761                         frd->frd_valid = false;
1762                         spin_lock(&fps->fps_lock);
1763                         list_add_tail(&frd->frd_list, &fpo->fast_reg.fpo_pool_list);
1764                         spin_unlock(&fps->fps_lock);
1765                         fmr->fmr_frd = NULL;
1766                 }
1767         }
1768         fmr->fmr_pool = NULL;
1769
1770         spin_lock(&fps->fps_lock);
1771         fpo->fpo_map_count--;   /* decref the pool */
1772
1773         list_for_each_entry_safe(fpo, tmp, &fps->fps_pool_list, fpo_list) {
1774                 /* the first pool is persistent */
1775                 if (fps->fps_pool_list.next == &fpo->fpo_list)
1776                         continue;
1777
1778                 if (kiblnd_fmr_pool_is_idle(fpo, now)) {
1779                         list_move(&fpo->fpo_list, &zombies);
1780                         fps->fps_version++;
1781                 }
1782         }
1783         spin_unlock(&fps->fps_lock);
1784
1785         if (!list_empty(&zombies))
1786                 kiblnd_destroy_fmr_pool_list(&zombies);
1787 }
1788
1789 int
1790 kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, kib_tx_t *tx, kib_rdma_desc_t *rd,
1791                     __u32 nob, __u64 iov, kib_fmr_t *fmr)
1792 {
1793         kib_fmr_pool_t *fpo;
1794         __u64 *pages = tx->tx_pages;
1795         __u64 version;
1796         bool is_rx = (rd != tx->tx_rd);
1797         bool tx_pages_mapped = 0;
1798         int npages = 0;
1799         int rc;
1800
1801 again:
1802         spin_lock(&fps->fps_lock);
1803         version = fps->fps_version;
1804         list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) {
1805                 fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
1806                 fpo->fpo_map_count++;
1807
1808                 if (fpo->fpo_is_fmr) {
1809                         struct ib_pool_fmr *pfmr;
1810
1811                         spin_unlock(&fps->fps_lock);
1812
1813                         if (!tx_pages_mapped) {
1814                                 npages = kiblnd_map_tx_pages(tx, rd);
1815                                 tx_pages_mapped = 1;
1816                         }
1817
1818                         pfmr = ib_fmr_pool_map_phys(fpo->fmr.fpo_fmr_pool,
1819                                                     pages, npages, iov);
1820                         if (likely(!IS_ERR(pfmr))) {
1821                                 fmr->fmr_key  = is_rx ? pfmr->fmr->rkey
1822                                                       : pfmr->fmr->lkey;
1823                                 fmr->fmr_frd  = NULL;
1824                                 fmr->fmr_pfmr = pfmr;
1825                                 fmr->fmr_pool = fpo;
1826                                 return 0;
1827                         }
1828                         rc = PTR_ERR(pfmr);
1829                 } else {
1830                         if (!list_empty(&fpo->fast_reg.fpo_pool_list)) {
1831                                 struct kib_fast_reg_descriptor *frd;
1832 #ifdef HAVE_IB_MAP_MR_SG
1833                                 struct ib_reg_wr *wr;
1834                                 int n;
1835 #else
1836                                 struct ib_send_wr *wr;
1837                                 struct ib_fast_reg_page_list *frpl;
1838 #endif
1839                                 struct ib_mr *mr;
1840
1841                                 frd = list_first_entry(&fpo->fast_reg.fpo_pool_list,
1842                                                         struct kib_fast_reg_descriptor,
1843                                                         frd_list);
1844                                 list_del(&frd->frd_list);
1845                                 spin_unlock(&fps->fps_lock);
1846
1847 #ifndef HAVE_IB_MAP_MR_SG
1848                                 frpl = frd->frd_frpl;
1849 #endif
1850                                 mr   = frd->frd_mr;
1851
1852                                 if (!frd->frd_valid) {
1853                                         struct ib_send_wr *inv_wr;
1854                                         __u32 key = is_rx ? mr->rkey : mr->lkey;
1855
1856                                         inv_wr = &frd->frd_inv_wr;
1857                                         memset(inv_wr, 0, sizeof(*inv_wr));
1858                                         inv_wr->opcode = IB_WR_LOCAL_INV;
1859                                         inv_wr->wr_id = IBLND_WID_MR;
1860                                         inv_wr->ex.invalidate_rkey = key;
1861
1862                                         /* Bump the key */
1863                                         key = ib_inc_rkey(key);
1864                                         ib_update_fast_reg_key(mr, key);
1865                                 }
1866
1867 #ifdef HAVE_IB_MAP_MR_SG
1868                                 n = ib_map_mr_sg(mr, tx->tx_frags,
1869                                                  tx->tx_nfrags, PAGE_SIZE);
1870                                 if (unlikely(n != tx->tx_nfrags)) {
1871                                         CERROR("Failed to map mr %d/%d "
1872                                                "elements\n", n, tx->tx_nfrags);
1873                                         return n < 0 ? n : -EINVAL;
1874                                 }
1875
1876                                 mr->iova = iov;
1877
1878                                 wr = &frd->frd_fastreg_wr;
1879                                 memset(wr, 0, sizeof(*wr));
1880                                 wr->wr.opcode = IB_WR_REG_MR;
1881                                 wr->wr.wr_id = IBLND_WID_MR;
1882                                 wr->wr.num_sge = 0;
1883                                 wr->wr.send_flags = 0;
1884                                 wr->mr = mr;
1885                                 wr->key = is_rx ? mr->rkey : mr->lkey;
1886                                 wr->access = (IB_ACCESS_LOCAL_WRITE |
1887                                               IB_ACCESS_REMOTE_WRITE);
1888 #else
1889                                 if (!tx_pages_mapped) {
1890                                         npages = kiblnd_map_tx_pages(tx, rd);
1891                                         tx_pages_mapped = 1;
1892                                 }
1893
1894                                 LASSERT(npages <= frpl->max_page_list_len);
1895                                 memcpy(frpl->page_list, pages,
1896                                         sizeof(*pages) * npages);
1897
1898                                 /* Prepare FastReg WR */
1899                                 wr = &frd->frd_fastreg_wr;
1900                                 memset(wr, 0, sizeof(*wr));
1901                                 wr->opcode = IB_WR_FAST_REG_MR;
1902                                 wr->wr_id = IBLND_WID_MR;
1903                                 wr->wr.fast_reg.iova_start = iov;
1904                                 wr->wr.fast_reg.page_list  = frpl;
1905                                 wr->wr.fast_reg.page_list_len = npages;
1906                                 wr->wr.fast_reg.page_shift = PAGE_SHIFT;
1907                                 wr->wr.fast_reg.length = nob;
1908                                 wr->wr.fast_reg.rkey = is_rx ? mr->rkey
1909                                                              : mr->lkey;
1910                                 wr->wr.fast_reg.access_flags =
1911                                                 (IB_ACCESS_LOCAL_WRITE |
1912                                                  IB_ACCESS_REMOTE_WRITE);
1913 #endif
1914
1915                                 fmr->fmr_key  = is_rx ? mr->rkey : mr->lkey;
1916                                 fmr->fmr_frd  = frd;
1917                                 fmr->fmr_pfmr = NULL;
1918                                 fmr->fmr_pool = fpo;
1919                                 return 0;
1920                         }
1921                         spin_unlock(&fps->fps_lock);
1922                         rc = -EBUSY;
1923                 }
1924
1925                 spin_lock(&fps->fps_lock);
1926                 fpo->fpo_map_count--;
1927                 if (rc != -EAGAIN) {
1928                         spin_unlock(&fps->fps_lock);
1929                         return rc;
1930                 }
1931
1932                 /* EAGAIN and ... */
1933                 if (version != fps->fps_version) {
1934                         spin_unlock(&fps->fps_lock);
1935                         goto again;
1936                 }
1937         }
1938
1939         if (fps->fps_increasing) {
1940                 spin_unlock(&fps->fps_lock);
1941                 CDEBUG(D_NET, "Another thread is allocating new "
1942                        "FMR pool, waiting for her to complete\n");
1943                 schedule();
1944                 goto again;
1945
1946         }
1947
1948         if (cfs_time_before(cfs_time_current(), fps->fps_next_retry)) {
1949                 /* someone failed recently */
1950                 spin_unlock(&fps->fps_lock);
1951                 return -EAGAIN;
1952         }
1953
1954         fps->fps_increasing = 1;
1955         spin_unlock(&fps->fps_lock);
1956
1957         CDEBUG(D_NET, "Allocate new FMR pool\n");
1958         rc = kiblnd_create_fmr_pool(fps, &fpo);
1959         spin_lock(&fps->fps_lock);
1960         fps->fps_increasing = 0;
1961         if (rc == 0) {
1962                 fps->fps_version++;
1963                 list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
1964         } else {
1965                 fps->fps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
1966         }
1967         spin_unlock(&fps->fps_lock);
1968
1969         goto again;
1970 }
1971
1972 static void
1973 kiblnd_fini_pool(kib_pool_t *pool)
1974 {
1975         LASSERT(list_empty(&pool->po_free_list));
1976         LASSERT(pool->po_allocated == 0);
1977
1978         CDEBUG(D_NET, "Finalize %s pool\n", pool->po_owner->ps_name);
1979 }
1980
1981 static void
1982 kiblnd_init_pool(kib_poolset_t *ps, kib_pool_t *pool, int size)
1983 {
1984         CDEBUG(D_NET, "Initialize %s pool\n", ps->ps_name);
1985
1986         memset(pool, 0, sizeof(kib_pool_t));
1987         INIT_LIST_HEAD(&pool->po_free_list);
1988         pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
1989         pool->po_owner    = ps;
1990         pool->po_size     = size;
1991 }
1992
1993 static void
1994 kiblnd_destroy_pool_list(struct list_head *head)
1995 {
1996         kib_pool_t *pool;
1997
1998         while (!list_empty(head)) {
1999                 pool = list_entry(head->next, kib_pool_t, po_list);
2000                 list_del(&pool->po_list);
2001
2002                 LASSERT(pool->po_owner != NULL);
2003                 pool->po_owner->ps_pool_destroy(pool);
2004         }
2005 }
2006
2007 static void
2008 kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies)
2009 {
2010         if (ps->ps_net == NULL) /* intialized? */
2011                 return;
2012
2013         spin_lock(&ps->ps_lock);
2014         while (!list_empty(&ps->ps_pool_list)) {
2015                 kib_pool_t *po = list_entry(ps->ps_pool_list.next,
2016                                             kib_pool_t, po_list);
2017                 po->po_failed = 1;
2018                 list_del(&po->po_list);
2019                 if (po->po_allocated == 0)
2020                         list_add(&po->po_list, zombies);
2021                 else
2022                         list_add(&po->po_list, &ps->ps_failed_pool_list);
2023         }
2024         spin_unlock(&ps->ps_lock);
2025 }
2026
2027 static void
2028 kiblnd_fini_poolset(kib_poolset_t *ps)
2029 {
2030         if (ps->ps_net != NULL) { /* initialized? */
2031                 kiblnd_destroy_pool_list(&ps->ps_failed_pool_list);
2032                 kiblnd_destroy_pool_list(&ps->ps_pool_list);
2033         }
2034 }
2035
2036 static int
2037 kiblnd_init_poolset(kib_poolset_t *ps, int cpt,
2038                     kib_net_t *net, char *name, int size,
2039                     kib_ps_pool_create_t po_create,
2040                     kib_ps_pool_destroy_t po_destroy,
2041                     kib_ps_node_init_t nd_init,
2042                     kib_ps_node_fini_t nd_fini)
2043 {
2044         kib_pool_t      *pool;
2045         int             rc;
2046
2047         memset(ps, 0, sizeof(kib_poolset_t));
2048
2049         ps->ps_cpt          = cpt;
2050         ps->ps_net          = net;
2051         ps->ps_pool_create  = po_create;
2052         ps->ps_pool_destroy = po_destroy;
2053         ps->ps_node_init    = nd_init;
2054         ps->ps_node_fini    = nd_fini;
2055         ps->ps_pool_size    = size;
2056         if (strlcpy(ps->ps_name, name, sizeof(ps->ps_name))
2057             >= sizeof(ps->ps_name))
2058                 return -E2BIG;
2059         spin_lock_init(&ps->ps_lock);
2060         INIT_LIST_HEAD(&ps->ps_pool_list);
2061         INIT_LIST_HEAD(&ps->ps_failed_pool_list);
2062
2063         rc = ps->ps_pool_create(ps, size, &pool);
2064         if (rc == 0)
2065                 list_add(&pool->po_list, &ps->ps_pool_list);
2066         else
2067                 CERROR("Failed to create the first pool for %s\n", ps->ps_name);
2068
2069         return rc;
2070 }
2071
2072 static int
2073 kiblnd_pool_is_idle(kib_pool_t *pool, cfs_time_t now)
2074 {
2075         if (pool->po_allocated != 0) /* still in use */
2076                 return 0;
2077         if (pool->po_failed)
2078                 return 1;
2079         return cfs_time_aftereq(now, pool->po_deadline);
2080 }
2081
2082 void
2083 kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node)
2084 {
2085         struct list_head zombies = LIST_HEAD_INIT(zombies);
2086         kib_poolset_t   *ps = pool->po_owner;
2087         kib_pool_t      *tmp;
2088         cfs_time_t       now = cfs_time_current();
2089
2090         spin_lock(&ps->ps_lock);
2091
2092         if (ps->ps_node_fini != NULL)
2093                 ps->ps_node_fini(pool, node);
2094
2095         LASSERT(pool->po_allocated > 0);
2096         list_add(node, &pool->po_free_list);
2097         pool->po_allocated--;
2098
2099         list_for_each_entry_safe(pool, tmp, &ps->ps_pool_list, po_list) {
2100                 /* the first pool is persistent */
2101                 if (ps->ps_pool_list.next == &pool->po_list)
2102                         continue;
2103
2104                 if (kiblnd_pool_is_idle(pool, now))
2105                         list_move(&pool->po_list, &zombies);
2106         }
2107         spin_unlock(&ps->ps_lock);
2108
2109         if (!list_empty(&zombies))
2110                 kiblnd_destroy_pool_list(&zombies);
2111 }
2112
2113 struct list_head *
2114 kiblnd_pool_alloc_node(kib_poolset_t *ps)
2115 {
2116         struct list_head        *node;
2117         kib_pool_t              *pool;
2118         int                     rc;
2119         unsigned int            interval = 1;
2120         cfs_time_t              time_before;
2121         unsigned int            trips = 0;
2122
2123 again:
2124         spin_lock(&ps->ps_lock);
2125         list_for_each_entry(pool, &ps->ps_pool_list, po_list) {
2126                 if (list_empty(&pool->po_free_list))
2127                         continue;
2128
2129                 pool->po_allocated++;
2130                 pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
2131                 node = pool->po_free_list.next;
2132                 list_del(node);
2133
2134                 if (ps->ps_node_init != NULL) {
2135                         /* still hold the lock */
2136                         ps->ps_node_init(pool, node);
2137                 }
2138                 spin_unlock(&ps->ps_lock);
2139                 return node;
2140         }
2141
2142         /* no available tx pool and ... */
2143         if (ps->ps_increasing) {
2144                 /* another thread is allocating a new pool */
2145                 spin_unlock(&ps->ps_lock);
2146                 trips++;
2147                 CDEBUG(D_NET, "Another thread is allocating new "
2148                        "%s pool, waiting %d HZs for her to complete."
2149                        "trips = %d\n",
2150                        ps->ps_name, interval, trips);
2151
2152                 set_current_state(TASK_INTERRUPTIBLE);
2153                 schedule_timeout(interval);
2154                 if (interval < cfs_time_seconds(1))
2155                         interval *= 2;
2156
2157                 goto again;
2158         }
2159
2160         if (cfs_time_before(cfs_time_current(), ps->ps_next_retry)) {
2161                 /* someone failed recently */
2162                 spin_unlock(&ps->ps_lock);
2163                 return NULL;
2164         }
2165
2166         ps->ps_increasing = 1;
2167         spin_unlock(&ps->ps_lock);
2168
2169         CDEBUG(D_NET, "%s pool exhausted, allocate new pool\n", ps->ps_name);
2170         time_before = cfs_time_current();
2171         rc = ps->ps_pool_create(ps, ps->ps_pool_size, &pool);
2172         CDEBUG(D_NET, "ps_pool_create took %lu HZ to complete",
2173                cfs_time_current() - time_before);
2174
2175         spin_lock(&ps->ps_lock);
2176         ps->ps_increasing = 0;
2177         if (rc == 0) {
2178                 list_add_tail(&pool->po_list, &ps->ps_pool_list);
2179         } else {
2180                 ps->ps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
2181                 CERROR("Can't allocate new %s pool because out of memory\n",
2182                        ps->ps_name);
2183         }
2184         spin_unlock(&ps->ps_lock);
2185
2186         goto again;
2187 }
2188
2189 static void
2190 kiblnd_destroy_tx_pool(kib_pool_t *pool)
2191 {
2192         kib_tx_pool_t  *tpo = container_of(pool, kib_tx_pool_t, tpo_pool);
2193         int             i;
2194
2195         LASSERT (pool->po_allocated == 0);
2196
2197         if (tpo->tpo_tx_pages != NULL) {
2198                 kiblnd_unmap_tx_pool(tpo);
2199                 kiblnd_free_pages(tpo->tpo_tx_pages);
2200         }
2201
2202         if (tpo->tpo_tx_descs == NULL)
2203                 goto out;
2204
2205         for (i = 0; i < pool->po_size; i++) {
2206                 kib_tx_t *tx = &tpo->tpo_tx_descs[i];
2207
2208                 list_del(&tx->tx_list);
2209                 if (tx->tx_pages != NULL)
2210                         LIBCFS_FREE(tx->tx_pages,
2211                                     LNET_MAX_IOV *
2212                                     sizeof(*tx->tx_pages));
2213                 if (tx->tx_frags != NULL)
2214                         LIBCFS_FREE(tx->tx_frags,
2215                                     IBLND_MAX_RDMA_FRAGS *
2216                                             sizeof(*tx->tx_frags));
2217                 if (tx->tx_wrq != NULL)
2218                         LIBCFS_FREE(tx->tx_wrq,
2219                                     (1 + IBLND_MAX_RDMA_FRAGS) *
2220                                     sizeof(*tx->tx_wrq));
2221                 if (tx->tx_sge != NULL)
2222                         LIBCFS_FREE(tx->tx_sge,
2223                                     (1 + IBLND_MAX_RDMA_FRAGS) *
2224                                     sizeof(*tx->tx_sge));
2225                 if (tx->tx_rd != NULL)
2226                         LIBCFS_FREE(tx->tx_rd,
2227                                     offsetof(kib_rdma_desc_t,
2228                                              rd_frags[IBLND_MAX_RDMA_FRAGS]));
2229         }
2230
2231         LIBCFS_FREE(tpo->tpo_tx_descs,
2232                     pool->po_size * sizeof(kib_tx_t));
2233 out:
2234         kiblnd_fini_pool(pool);
2235         LIBCFS_FREE(tpo, sizeof(kib_tx_pool_t));
2236 }
2237
2238 static int kiblnd_tx_pool_size(int ncpts)
2239 {
2240         int ntx = *kiblnd_tunables.kib_ntx / ncpts;
2241
2242         return max(IBLND_TX_POOL, ntx);
2243 }
2244
2245 static int
2246 kiblnd_create_tx_pool(kib_poolset_t *ps, int size, kib_pool_t **pp_po)
2247 {
2248         int            i;
2249         int            npg;
2250         kib_pool_t    *pool;
2251         kib_tx_pool_t *tpo;
2252
2253         LIBCFS_CPT_ALLOC(tpo, lnet_cpt_table(), ps->ps_cpt, sizeof(*tpo));
2254         if (tpo == NULL) {
2255                 CERROR("Failed to allocate TX pool\n");
2256                 return -ENOMEM;
2257         }
2258
2259         pool = &tpo->tpo_pool;
2260         kiblnd_init_pool(ps, pool, size);
2261         tpo->tpo_tx_descs = NULL;
2262         tpo->tpo_tx_pages = NULL;
2263
2264         npg = (size * IBLND_MSG_SIZE + PAGE_SIZE - 1) / PAGE_SIZE;
2265         if (kiblnd_alloc_pages(&tpo->tpo_tx_pages, ps->ps_cpt, npg) != 0) {
2266                 CERROR("Can't allocate tx pages: %d\n", npg);
2267                 LIBCFS_FREE(tpo, sizeof(kib_tx_pool_t));
2268                 return -ENOMEM;
2269         }
2270
2271         LIBCFS_CPT_ALLOC(tpo->tpo_tx_descs, lnet_cpt_table(), ps->ps_cpt,
2272                          size * sizeof(kib_tx_t));
2273         if (tpo->tpo_tx_descs == NULL) {
2274                 CERROR("Can't allocate %d tx descriptors\n", size);
2275                 ps->ps_pool_destroy(pool);
2276                 return -ENOMEM;
2277         }
2278
2279         memset(tpo->tpo_tx_descs, 0, size * sizeof(kib_tx_t));
2280
2281         for (i = 0; i < size; i++) {
2282                 kib_tx_t *tx = &tpo->tpo_tx_descs[i];
2283
2284                 tx->tx_pool = tpo;
2285                 if (ps->ps_net->ibn_fmr_ps != NULL) {
2286                         LIBCFS_CPT_ALLOC(tx->tx_pages,
2287                                          lnet_cpt_table(), ps->ps_cpt,
2288                                          LNET_MAX_IOV * sizeof(*tx->tx_pages));
2289                         if (tx->tx_pages == NULL)
2290                                 break;
2291                 }
2292
2293                 LIBCFS_CPT_ALLOC(tx->tx_frags, lnet_cpt_table(), ps->ps_cpt,
2294                                  IBLND_MAX_RDMA_FRAGS * sizeof(*tx->tx_frags));
2295                 if (tx->tx_frags == NULL)
2296                         break;
2297
2298                 sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS);
2299
2300                 LIBCFS_CPT_ALLOC(tx->tx_wrq, lnet_cpt_table(), ps->ps_cpt,
2301                                  (1 + IBLND_MAX_RDMA_FRAGS) *
2302                                  sizeof(*tx->tx_wrq));
2303                 if (tx->tx_wrq == NULL)
2304                         break;
2305
2306                 LIBCFS_CPT_ALLOC(tx->tx_sge, lnet_cpt_table(), ps->ps_cpt,
2307                                  (1 + IBLND_MAX_RDMA_FRAGS) *
2308                                  sizeof(*tx->tx_sge));
2309                 if (tx->tx_sge == NULL)
2310                         break;
2311
2312                 LIBCFS_CPT_ALLOC(tx->tx_rd, lnet_cpt_table(), ps->ps_cpt,
2313                                  offsetof(kib_rdma_desc_t,
2314                                           rd_frags[IBLND_MAX_RDMA_FRAGS]));
2315                 if (tx->tx_rd == NULL)
2316                         break;
2317         }
2318
2319         if (i == size) {
2320                 kiblnd_map_tx_pool(tpo);
2321                 *pp_po = pool;
2322                 return 0;
2323         }
2324
2325         ps->ps_pool_destroy(pool);
2326         return -ENOMEM;
2327 }
2328
2329 static void
2330 kiblnd_tx_init(kib_pool_t *pool, struct list_head *node)
2331 {
2332         kib_tx_poolset_t *tps = container_of(pool->po_owner, kib_tx_poolset_t,
2333                                              tps_poolset);
2334         kib_tx_t         *tx  = list_entry(node, kib_tx_t, tx_list);
2335
2336         tx->tx_cookie = tps->tps_next_tx_cookie++;
2337 }
2338
2339 static void
2340 kiblnd_net_fini_pools(kib_net_t *net)
2341 {
2342         int     i;
2343
2344         cfs_cpt_for_each(i, lnet_cpt_table()) {
2345                 kib_tx_poolset_t        *tps;
2346                 kib_fmr_poolset_t       *fps;
2347
2348                 if (net->ibn_tx_ps != NULL) {
2349                         tps = net->ibn_tx_ps[i];
2350                         kiblnd_fini_poolset(&tps->tps_poolset);
2351                 }
2352
2353                 if (net->ibn_fmr_ps != NULL) {
2354                         fps = net->ibn_fmr_ps[i];
2355                         kiblnd_fini_fmr_poolset(fps);
2356                 }
2357         }
2358
2359         if (net->ibn_tx_ps != NULL) {
2360                 cfs_percpt_free(net->ibn_tx_ps);
2361                 net->ibn_tx_ps = NULL;
2362         }
2363
2364         if (net->ibn_fmr_ps != NULL) {
2365                 cfs_percpt_free(net->ibn_fmr_ps);
2366                 net->ibn_fmr_ps = NULL;
2367         }
2368 }
2369
2370 static int
2371 kiblnd_net_init_pools(kib_net_t *net, lnet_ni_t *ni, __u32 *cpts, int ncpts)
2372 {
2373         struct lnet_ioctl_config_o2iblnd_tunables *tunables;
2374         unsigned long   flags;
2375         int             cpt;
2376         int             rc;
2377         int             i;
2378
2379         tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
2380
2381         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2382         if (tunables->lnd_map_on_demand == 0) {
2383                 read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
2384                                            flags);
2385                 goto create_tx_pool;
2386         }
2387
2388         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2389
2390         if (tunables->lnd_fmr_pool_size < *kiblnd_tunables.kib_ntx / 4) {
2391                 CERROR("Can't set fmr pool size (%d) < ntx / 4(%d)\n",
2392                        tunables->lnd_fmr_pool_size,
2393                        *kiblnd_tunables.kib_ntx / 4);
2394                 rc = -EINVAL;
2395                 goto failed;
2396         }
2397
2398         /* TX pool must be created later than FMR, see LU-2268
2399          * for details */
2400         LASSERT(net->ibn_tx_ps == NULL);
2401
2402         /* premapping can fail if ibd_nmr > 1, so we always create
2403          * FMR pool and map-on-demand if premapping failed */
2404
2405         net->ibn_fmr_ps = cfs_percpt_alloc(lnet_cpt_table(),
2406                                            sizeof(kib_fmr_poolset_t));
2407         if (net->ibn_fmr_ps == NULL) {
2408                 CERROR("Failed to allocate FMR pool array\n");
2409                 rc = -ENOMEM;
2410                 goto failed;
2411         }
2412
2413         for (i = 0; i < ncpts; i++) {
2414                 cpt = (cpts == NULL) ? i : cpts[i];
2415                 rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, ncpts,
2416                                              net, tunables);
2417                 if (rc != 0) {
2418                         CERROR("Can't initialize FMR pool for CPT %d: %d\n",
2419                                cpt, rc);
2420                         goto failed;
2421                 }
2422         }
2423
2424         if (i > 0)
2425                 LASSERT(i == ncpts);
2426
2427  create_tx_pool:
2428         net->ibn_tx_ps = cfs_percpt_alloc(lnet_cpt_table(),
2429                                           sizeof(kib_tx_poolset_t));
2430         if (net->ibn_tx_ps == NULL) {
2431                 CERROR("Failed to allocate tx pool array\n");
2432                 rc = -ENOMEM;
2433                 goto failed;
2434         }
2435
2436         for (i = 0; i < ncpts; i++) {
2437                 cpt = (cpts == NULL) ? i : cpts[i];
2438                 rc = kiblnd_init_poolset(&net->ibn_tx_ps[cpt]->tps_poolset,
2439                                          cpt, net, "TX",
2440                                          kiblnd_tx_pool_size(ncpts),
2441                                          kiblnd_create_tx_pool,
2442                                          kiblnd_destroy_tx_pool,
2443                                          kiblnd_tx_init, NULL);
2444                 if (rc != 0) {
2445                         CERROR("Can't initialize TX pool for CPT %d: %d\n",
2446                                cpt, rc);
2447                         goto failed;
2448                 }
2449         }
2450
2451         return 0;
2452  failed:
2453         kiblnd_net_fini_pools(net);
2454         LASSERT(rc != 0);
2455         return rc;
2456 }
2457
2458 static int
2459 kiblnd_hdev_get_attr(kib_hca_dev_t *hdev)
2460 {
2461         struct ib_device_attr *attr;
2462         int                    rc;
2463
2464         /* It's safe to assume a HCA can handle a page size
2465          * matching that of the native system */
2466         hdev->ibh_page_shift = PAGE_SHIFT;
2467         hdev->ibh_page_size  = 1 << PAGE_SHIFT;
2468         hdev->ibh_page_mask  = ~((__u64)hdev->ibh_page_size - 1);
2469
2470         LIBCFS_ALLOC(attr, sizeof(*attr));
2471         if (attr == NULL) {
2472                 CERROR("Out of memory\n");
2473                 return -ENOMEM;
2474         }
2475
2476         rc = ib_query_device(hdev->ibh_ibdev, attr);
2477         if (rc == 0)
2478                 hdev->ibh_mr_size = attr->max_mr_size;
2479
2480         LIBCFS_FREE(attr, sizeof(*attr));
2481
2482         if (rc != 0) {
2483                 CERROR("Failed to query IB device: %d\n", rc);
2484                 return rc;
2485         }
2486
2487         if (hdev->ibh_mr_size == ~0ULL) {
2488                 hdev->ibh_mr_shift = 64;
2489                 return 0;
2490         }
2491
2492         CERROR("Invalid mr size: "LPX64"\n", hdev->ibh_mr_size);
2493         return -EINVAL;
2494 }
2495
2496 static void
2497 kiblnd_hdev_cleanup_mrs(kib_hca_dev_t *hdev)
2498 {
2499         if (hdev->ibh_mrs == NULL)
2500                 return;
2501
2502         ib_dereg_mr(hdev->ibh_mrs);
2503
2504         hdev->ibh_mrs = NULL;
2505 }
2506
2507 void
2508 kiblnd_hdev_destroy(kib_hca_dev_t *hdev)
2509 {
2510         kiblnd_hdev_cleanup_mrs(hdev);
2511
2512         if (hdev->ibh_pd != NULL)
2513                 ib_dealloc_pd(hdev->ibh_pd);
2514
2515         if (hdev->ibh_cmid != NULL)
2516                 rdma_destroy_id(hdev->ibh_cmid);
2517
2518         LIBCFS_FREE(hdev, sizeof(*hdev));
2519 }
2520
2521 static int
2522 kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev)
2523 {
2524         struct ib_mr *mr;
2525         int           rc;
2526         int           acflags = IB_ACCESS_LOCAL_WRITE |
2527                                 IB_ACCESS_REMOTE_WRITE;
2528
2529         rc = kiblnd_hdev_get_attr(hdev);
2530         if (rc != 0)
2531                 return rc;
2532
2533         mr = ib_get_dma_mr(hdev->ibh_pd, acflags);
2534         if (IS_ERR(mr)) {
2535                 CERROR("Failed ib_get_dma_mr: %ld\n", PTR_ERR(mr));
2536                 kiblnd_hdev_cleanup_mrs(hdev);
2537                 return PTR_ERR(mr);
2538         }
2539
2540         hdev->ibh_mrs = mr;
2541
2542         return 0;
2543 }
2544
2545 static int
2546 kiblnd_dummy_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
2547 {       /* DUMMY */
2548         return 0;
2549 }
2550
2551 static int
2552 kiblnd_dev_need_failover(kib_dev_t *dev)
2553 {
2554         struct rdma_cm_id  *cmid;
2555         struct sockaddr_in  srcaddr;
2556         struct sockaddr_in  dstaddr;
2557         int                 rc;
2558
2559         if (dev->ibd_hdev == NULL || /* initializing */
2560             dev->ibd_hdev->ibh_cmid == NULL || /* listener is dead */
2561             *kiblnd_tunables.kib_dev_failover > 1) /* debugging */
2562                 return 1;
2563
2564         /* XXX: it's UGLY, but I don't have better way to find
2565          * ib-bonding HCA failover because:
2566          *
2567          * a. no reliable CM event for HCA failover...
2568          * b. no OFED API to get ib_device for current net_device...
2569          *
2570          * We have only two choices at this point:
2571          *
2572          * a. rdma_bind_addr(), it will conflict with listener cmid
2573          * b. rdma_resolve_addr() to zero addr */
2574         cmid = kiblnd_rdma_create_id(kiblnd_dummy_callback, dev, RDMA_PS_TCP,
2575                                      IB_QPT_RC);
2576         if (IS_ERR(cmid)) {
2577                 rc = PTR_ERR(cmid);
2578                 CERROR("Failed to create cmid for failover: %d\n", rc);
2579                 return rc;
2580         }
2581
2582         memset(&srcaddr, 0, sizeof(srcaddr));
2583         srcaddr.sin_family      = AF_INET;
2584         srcaddr.sin_addr.s_addr = (__force u32)htonl(dev->ibd_ifip);
2585
2586         memset(&dstaddr, 0, sizeof(dstaddr));
2587         dstaddr.sin_family = AF_INET;
2588         rc = rdma_resolve_addr(cmid, (struct sockaddr *)&srcaddr,
2589                                (struct sockaddr *)&dstaddr, 1);
2590         if (rc != 0 || cmid->device == NULL) {
2591                 CERROR("Failed to bind %s:%pI4h to device(%p): %d\n",
2592                        dev->ibd_ifname, &dev->ibd_ifip,
2593                        cmid->device, rc);
2594                 rdma_destroy_id(cmid);
2595                 return rc;
2596         }
2597
2598         rc = dev->ibd_hdev->ibh_ibdev != cmid->device; /* true for failover */
2599         rdma_destroy_id(cmid);
2600         return rc;
2601 }
2602
2603 int
2604 kiblnd_dev_failover(kib_dev_t *dev)
2605 {
2606         struct list_head    zombie_tpo = LIST_HEAD_INIT(zombie_tpo);
2607         struct list_head    zombie_ppo = LIST_HEAD_INIT(zombie_ppo);
2608         struct list_head    zombie_fpo = LIST_HEAD_INIT(zombie_fpo);
2609         struct rdma_cm_id  *cmid  = NULL;
2610         kib_hca_dev_t      *hdev  = NULL;
2611         kib_hca_dev_t      *old;
2612         struct ib_pd       *pd;
2613         kib_net_t          *net;
2614         struct sockaddr_in  addr;
2615         unsigned long       flags;
2616         int                 rc = 0;
2617         int                 i;
2618
2619         LASSERT (*kiblnd_tunables.kib_dev_failover > 1 ||
2620                  dev->ibd_can_failover ||
2621                  dev->ibd_hdev == NULL);
2622
2623         rc = kiblnd_dev_need_failover(dev);
2624         if (rc <= 0)
2625                 goto out;
2626
2627         if (dev->ibd_hdev != NULL &&
2628             dev->ibd_hdev->ibh_cmid != NULL) {
2629                 /* XXX it's not good to close old listener at here,
2630                  * because we can fail to create new listener.
2631                  * But we have to close it now, otherwise rdma_bind_addr
2632                  * will return EADDRINUSE... How crap! */
2633                 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2634
2635                 cmid = dev->ibd_hdev->ibh_cmid;
2636                 /* make next schedule of kiblnd_dev_need_failover()
2637                  * return 1 for me */
2638                 dev->ibd_hdev->ibh_cmid  = NULL;
2639                 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2640
2641                 rdma_destroy_id(cmid);
2642         }
2643
2644         cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, dev, RDMA_PS_TCP,
2645                                      IB_QPT_RC);
2646         if (IS_ERR(cmid)) {
2647                 rc = PTR_ERR(cmid);
2648                 CERROR("Failed to create cmid for failover: %d\n", rc);
2649                 goto out;
2650         }
2651
2652         memset(&addr, 0, sizeof(addr));
2653         addr.sin_family      = AF_INET;
2654         addr.sin_addr.s_addr = (__force u32)htonl(dev->ibd_ifip);
2655         addr.sin_port        = htons(*kiblnd_tunables.kib_service);
2656
2657         /* Bind to failover device or port */
2658         rc = rdma_bind_addr(cmid, (struct sockaddr *)&addr);
2659         if (rc != 0 || cmid->device == NULL) {
2660                 CERROR("Failed to bind %s:%pI4h to device(%p): %d\n",
2661                        dev->ibd_ifname, &dev->ibd_ifip,
2662                        cmid->device, rc);
2663                 rdma_destroy_id(cmid);
2664                 goto out;
2665         }
2666
2667         LIBCFS_ALLOC(hdev, sizeof(*hdev));
2668         if (hdev == NULL) {
2669                 CERROR("Failed to allocate kib_hca_dev\n");
2670                 rdma_destroy_id(cmid);
2671                 rc = -ENOMEM;
2672                 goto out;
2673         }
2674
2675         atomic_set(&hdev->ibh_ref, 1);
2676         hdev->ibh_dev   = dev;
2677         hdev->ibh_cmid  = cmid;
2678         hdev->ibh_ibdev = cmid->device;
2679
2680         pd = ib_alloc_pd(cmid->device);
2681         if (IS_ERR(pd)) {
2682                 rc = PTR_ERR(pd);
2683                 CERROR("Can't allocate PD: %d\n", rc);
2684                 goto out;
2685         }
2686
2687         hdev->ibh_pd = pd;
2688
2689         rc = rdma_listen(cmid, 0);
2690         if (rc != 0) {
2691                 CERROR("Can't start new listener: %d\n", rc);
2692                 goto out;
2693         }
2694
2695         rc = kiblnd_hdev_setup_mrs(hdev);
2696         if (rc != 0) {
2697                 CERROR("Can't setup device: %d\n", rc);
2698                 goto out;
2699         }
2700
2701         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2702
2703         old = dev->ibd_hdev;
2704         dev->ibd_hdev = hdev;   /* take over the refcount */
2705         hdev = old;
2706
2707         list_for_each_entry(net, &dev->ibd_nets, ibn_list) {
2708                 cfs_cpt_for_each(i, lnet_cpt_table()) {
2709                         kiblnd_fail_poolset(&net->ibn_tx_ps[i]->tps_poolset,
2710                                             &zombie_tpo);
2711
2712                         if (net->ibn_fmr_ps != NULL)
2713                                 kiblnd_fail_fmr_poolset(net->ibn_fmr_ps[i],
2714                                                         &zombie_fpo);
2715                 }
2716         }
2717
2718         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2719  out:
2720         if (!list_empty(&zombie_tpo))
2721                 kiblnd_destroy_pool_list(&zombie_tpo);
2722         if (!list_empty(&zombie_ppo))
2723                 kiblnd_destroy_pool_list(&zombie_ppo);
2724         if (!list_empty(&zombie_fpo))
2725                 kiblnd_destroy_fmr_pool_list(&zombie_fpo);
2726         if (hdev != NULL)
2727                 kiblnd_hdev_decref(hdev);
2728
2729         if (rc != 0)
2730                 dev->ibd_failed_failover++;
2731         else
2732                 dev->ibd_failed_failover = 0;
2733
2734         return rc;
2735 }
2736
2737 void
2738 kiblnd_destroy_dev (kib_dev_t *dev)
2739 {
2740         LASSERT (dev->ibd_nnets == 0);
2741         LASSERT(list_empty(&dev->ibd_nets));
2742
2743         list_del(&dev->ibd_fail_list);
2744         list_del(&dev->ibd_list);
2745
2746         if (dev->ibd_hdev != NULL)
2747                 kiblnd_hdev_decref(dev->ibd_hdev);
2748
2749         LIBCFS_FREE(dev, sizeof(*dev));
2750 }
2751
2752 static kib_dev_t *
2753 kiblnd_create_dev(char *ifname)
2754 {
2755         struct net_device *netdev;
2756         kib_dev_t         *dev;
2757         __u32              netmask;
2758         __u32              ip;
2759         int                up;
2760         int                rc;
2761
2762         rc = lnet_ipif_query(ifname, &up, &ip, &netmask);
2763         if (rc != 0) {
2764                 CERROR("Can't query IPoIB interface %s: %d\n",
2765                        ifname, rc);
2766                 return NULL;
2767         }
2768
2769         if (!up) {
2770                 CERROR("Can't query IPoIB interface %s: it's down\n", ifname);
2771                 return NULL;
2772         }
2773
2774         LIBCFS_ALLOC(dev, sizeof(*dev));
2775         if (dev == NULL)
2776                 return NULL;
2777
2778         netdev = dev_get_by_name(&init_net, ifname);
2779         if (netdev == NULL) {
2780                 dev->ibd_can_failover = 0;
2781         } else {
2782                 dev->ibd_can_failover = !!(netdev->flags & IFF_MASTER);
2783                 dev_put(netdev);
2784         }
2785
2786         INIT_LIST_HEAD(&dev->ibd_nets);
2787         INIT_LIST_HEAD(&dev->ibd_list); /* not yet in kib_devs */
2788         INIT_LIST_HEAD(&dev->ibd_fail_list);
2789         dev->ibd_ifip = ip;
2790         strcpy(&dev->ibd_ifname[0], ifname);
2791
2792         /* initialize the device */
2793         rc = kiblnd_dev_failover(dev);
2794         if (rc != 0) {
2795                 CERROR("Can't initialize device: %d\n", rc);
2796                 LIBCFS_FREE(dev, sizeof(*dev));
2797                 return NULL;
2798         }
2799
2800         list_add_tail(&dev->ibd_list,
2801                           &kiblnd_data.kib_devs);
2802         return dev;
2803 }
2804
2805 static void
2806 kiblnd_base_shutdown(void)
2807 {
2808         struct kib_sched_info   *sched;
2809         int                     i;
2810
2811         LASSERT(list_empty(&kiblnd_data.kib_devs));
2812
2813         CDEBUG(D_MALLOC, "before LND base cleanup: kmem %d\n",
2814                atomic_read(&libcfs_kmemory));
2815
2816         switch (kiblnd_data.kib_init) {
2817         default:
2818                 LBUG();
2819
2820         case IBLND_INIT_ALL:
2821         case IBLND_INIT_DATA:
2822                 LASSERT (kiblnd_data.kib_peers != NULL);
2823                 for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
2824                         LASSERT(list_empty(&kiblnd_data.kib_peers[i]));
2825                 }
2826                 LASSERT(list_empty(&kiblnd_data.kib_connd_zombies));
2827                 LASSERT(list_empty(&kiblnd_data.kib_connd_conns));
2828                 LASSERT(list_empty(&kiblnd_data.kib_reconn_list));
2829                 LASSERT(list_empty(&kiblnd_data.kib_reconn_wait));
2830
2831                 /* flag threads to terminate; wake and wait for them to die */
2832                 kiblnd_data.kib_shutdown = 1;
2833
2834                 /* NB: we really want to stop scheduler threads net by net
2835                  * instead of the whole module, this should be improved
2836                  * with dynamic configuration LNet */
2837                 cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds)
2838                         wake_up_all(&sched->ibs_waitq);
2839
2840                 wake_up_all(&kiblnd_data.kib_connd_waitq);
2841                 wake_up_all(&kiblnd_data.kib_failover_waitq);
2842
2843                 i = 2;
2844                 while (atomic_read(&kiblnd_data.kib_nthreads) != 0) {
2845                         i++;
2846                         /* power of 2? */
2847                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2848                                "Waiting for %d threads to terminate\n",
2849                                atomic_read(&kiblnd_data.kib_nthreads));
2850                         set_current_state(TASK_UNINTERRUPTIBLE);
2851                         schedule_timeout(cfs_time_seconds(1));
2852                 }
2853
2854                 /* fall through */
2855
2856         case IBLND_INIT_NOTHING:
2857                 break;
2858         }
2859
2860         if (kiblnd_data.kib_peers != NULL) {
2861                 LIBCFS_FREE(kiblnd_data.kib_peers,
2862                             sizeof(struct list_head) *
2863                             kiblnd_data.kib_peer_hash_size);
2864         }
2865
2866         if (kiblnd_data.kib_scheds != NULL)
2867                 cfs_percpt_free(kiblnd_data.kib_scheds);
2868
2869         CDEBUG(D_MALLOC, "after LND base cleanup: kmem %d\n",
2870                atomic_read(&libcfs_kmemory));
2871
2872         kiblnd_data.kib_init = IBLND_INIT_NOTHING;
2873         module_put(THIS_MODULE);
2874 }
2875
2876 static void
2877 kiblnd_shutdown (lnet_ni_t *ni)
2878 {
2879         kib_net_t        *net = ni->ni_data;
2880         rwlock_t     *g_lock = &kiblnd_data.kib_global_lock;
2881         int               i;
2882         unsigned long     flags;
2883
2884         LASSERT(kiblnd_data.kib_init == IBLND_INIT_ALL);
2885
2886         if (net == NULL)
2887                 goto out;
2888
2889         CDEBUG(D_MALLOC, "before LND net cleanup: kmem %d\n",
2890                atomic_read(&libcfs_kmemory));
2891
2892         write_lock_irqsave(g_lock, flags);
2893         net->ibn_shutdown = 1;
2894         write_unlock_irqrestore(g_lock, flags);
2895
2896         switch (net->ibn_init) {
2897         default:
2898                 LBUG();
2899
2900         case IBLND_INIT_ALL:
2901                 /* nuke all existing peers within this net */
2902                 kiblnd_del_peer(ni, LNET_NID_ANY);
2903
2904                 /* Wait for all peer state to clean up */
2905                 i = 2;
2906                 while (atomic_read(&net->ibn_npeers) != 0) {
2907                         i++;
2908                         /* power of 2? */
2909                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2910                                "%s: waiting for %d peers to disconnect\n",
2911                                libcfs_nid2str(ni->ni_nid),
2912                                atomic_read(&net->ibn_npeers));
2913                         set_current_state(TASK_UNINTERRUPTIBLE);
2914                         schedule_timeout(cfs_time_seconds(1));
2915                 }
2916
2917                 kiblnd_net_fini_pools(net);
2918
2919                 write_lock_irqsave(g_lock, flags);
2920                 LASSERT(net->ibn_dev->ibd_nnets > 0);
2921                 net->ibn_dev->ibd_nnets--;
2922                 list_del(&net->ibn_list);
2923                 write_unlock_irqrestore(g_lock, flags);
2924
2925                 /* fall through */
2926
2927         case IBLND_INIT_NOTHING:
2928                 LASSERT (atomic_read(&net->ibn_nconns) == 0);
2929
2930                 if (net->ibn_dev != NULL &&
2931                     net->ibn_dev->ibd_nnets == 0)
2932                         kiblnd_destroy_dev(net->ibn_dev);
2933
2934                 break;
2935         }
2936
2937         CDEBUG(D_MALLOC, "after LND net cleanup: kmem %d\n",
2938                atomic_read(&libcfs_kmemory));
2939
2940         net->ibn_init = IBLND_INIT_NOTHING;
2941         ni->ni_data = NULL;
2942
2943         LIBCFS_FREE(net, sizeof(*net));
2944
2945 out:
2946         if (list_empty(&kiblnd_data.kib_devs))
2947                 kiblnd_base_shutdown();
2948         return;
2949 }
2950
2951 static int
2952 kiblnd_base_startup(void)
2953 {
2954         struct kib_sched_info   *sched;
2955         int                     rc;
2956         int                     i;
2957
2958         LASSERT(kiblnd_data.kib_init == IBLND_INIT_NOTHING);
2959
2960         try_module_get(THIS_MODULE);
2961         memset(&kiblnd_data, 0, sizeof(kiblnd_data)); /* zero pointers, flags etc */
2962
2963         rwlock_init(&kiblnd_data.kib_global_lock);
2964
2965         INIT_LIST_HEAD(&kiblnd_data.kib_devs);
2966         INIT_LIST_HEAD(&kiblnd_data.kib_failed_devs);
2967
2968         kiblnd_data.kib_peer_hash_size = IBLND_PEER_HASH_SIZE;
2969         LIBCFS_ALLOC(kiblnd_data.kib_peers,
2970                      sizeof(struct list_head) *
2971                      kiblnd_data.kib_peer_hash_size);
2972         if (kiblnd_data.kib_peers == NULL)
2973                 goto failed;
2974
2975         for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
2976                 INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]);
2977
2978         spin_lock_init(&kiblnd_data.kib_connd_lock);
2979         INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
2980         INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies);
2981         INIT_LIST_HEAD(&kiblnd_data.kib_reconn_list);
2982         INIT_LIST_HEAD(&kiblnd_data.kib_reconn_wait);
2983
2984         init_waitqueue_head(&kiblnd_data.kib_connd_waitq);
2985         init_waitqueue_head(&kiblnd_data.kib_failover_waitq);
2986
2987         kiblnd_data.kib_scheds = cfs_percpt_alloc(lnet_cpt_table(),
2988                                                   sizeof(*sched));
2989         if (kiblnd_data.kib_scheds == NULL)
2990                 goto failed;
2991
2992         cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) {
2993                 int     nthrs;
2994
2995                 spin_lock_init(&sched->ibs_lock);
2996                 INIT_LIST_HEAD(&sched->ibs_conns);
2997                 init_waitqueue_head(&sched->ibs_waitq);
2998
2999                 nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
3000                 if (*kiblnd_tunables.kib_nscheds > 0) {
3001                         nthrs = min(nthrs, *kiblnd_tunables.kib_nscheds);
3002                 } else {
3003                         /* max to half of CPUs, another half is reserved for
3004                          * upper layer modules */
3005                         nthrs = min(max(IBLND_N_SCHED, nthrs >> 1), nthrs);
3006                 }
3007
3008                 sched->ibs_nthreads_max = nthrs;
3009                 sched->ibs_cpt = i;
3010         }
3011
3012         kiblnd_data.kib_error_qpa.qp_state = IB_QPS_ERR;
3013
3014         /* lists/ptrs/locks initialised */
3015         kiblnd_data.kib_init = IBLND_INIT_DATA;
3016         /*****************************************************/
3017
3018         rc = kiblnd_thread_start(kiblnd_connd, NULL, "kiblnd_connd");
3019         if (rc != 0) {
3020                 CERROR("Can't spawn o2iblnd connd: %d\n", rc);
3021                 goto failed;
3022         }
3023
3024         if (*kiblnd_tunables.kib_dev_failover != 0)
3025                 rc = kiblnd_thread_start(kiblnd_failover_thread, NULL,
3026                                          "kiblnd_failover");
3027
3028         if (rc != 0) {
3029                 CERROR("Can't spawn o2iblnd failover thread: %d\n", rc);
3030                 goto failed;
3031         }
3032
3033         /* flag everything initialised */
3034         kiblnd_data.kib_init = IBLND_INIT_ALL;
3035         /*****************************************************/
3036
3037         return 0;
3038
3039  failed:
3040         kiblnd_base_shutdown();
3041         return -ENETDOWN;
3042 }
3043
3044 static int
3045 kiblnd_start_schedulers(struct kib_sched_info *sched)
3046 {
3047         int     rc = 0;
3048         int     nthrs;
3049         int     i;
3050
3051         if (sched->ibs_nthreads == 0) {
3052                 if (*kiblnd_tunables.kib_nscheds > 0) {
3053                         nthrs = sched->ibs_nthreads_max;
3054                 } else {
3055                         nthrs = cfs_cpt_weight(lnet_cpt_table(),
3056                                                sched->ibs_cpt);
3057                         nthrs = min(max(IBLND_N_SCHED, nthrs >> 1), nthrs);
3058                         nthrs = min(IBLND_N_SCHED_HIGH, nthrs);
3059                 }
3060         } else {
3061                 LASSERT(sched->ibs_nthreads <= sched->ibs_nthreads_max);
3062                 /* increase one thread if there is new interface */
3063                 nthrs = (sched->ibs_nthreads < sched->ibs_nthreads_max);
3064         }
3065
3066         for (i = 0; i < nthrs; i++) {
3067                 long    id;
3068                 char    name[20];
3069                 id = KIB_THREAD_ID(sched->ibs_cpt, sched->ibs_nthreads + i);
3070                 snprintf(name, sizeof(name), "kiblnd_sd_%02ld_%02ld",
3071                          KIB_THREAD_CPT(id), KIB_THREAD_TID(id));
3072                 rc = kiblnd_thread_start(kiblnd_scheduler, (void *)id, name);
3073                 if (rc == 0)
3074                         continue;
3075
3076                 CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
3077                        sched->ibs_cpt, sched->ibs_nthreads + i, rc);
3078                 break;
3079         }
3080
3081         sched->ibs_nthreads += i;
3082         return rc;
3083 }
3084
3085 static int
3086 kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts, int ncpts)
3087 {
3088         int     cpt;
3089         int     rc;
3090         int     i;
3091
3092         for (i = 0; i < ncpts; i++) {
3093                 struct kib_sched_info *sched;
3094
3095                 cpt = (cpts == NULL) ? i : cpts[i];
3096                 sched = kiblnd_data.kib_scheds[cpt];
3097
3098                 if (!newdev && sched->ibs_nthreads > 0)
3099                         continue;
3100
3101                 rc = kiblnd_start_schedulers(kiblnd_data.kib_scheds[cpt]);
3102                 if (rc != 0) {
3103                         CERROR("Failed to start scheduler threads for %s\n",
3104                                dev->ibd_ifname);
3105                         return rc;
3106                 }
3107         }
3108         return 0;
3109 }
3110
3111 static kib_dev_t *
3112 kiblnd_dev_search(char *ifname)
3113 {
3114         kib_dev_t       *alias = NULL;
3115         kib_dev_t       *dev;
3116         char            *colon;
3117         char            *colon2;
3118
3119         colon = strchr(ifname, ':');
3120         list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
3121                 if (strcmp(&dev->ibd_ifname[0], ifname) == 0)
3122                         return dev;
3123
3124                 if (alias != NULL)
3125                         continue;
3126
3127                 colon2 = strchr(dev->ibd_ifname, ':');
3128                 if (colon != NULL)
3129                         *colon = 0;
3130                 if (colon2 != NULL)
3131                         *colon2 = 0;
3132
3133                 if (strcmp(&dev->ibd_ifname[0], ifname) == 0)
3134                         alias = dev;
3135
3136                 if (colon != NULL)
3137                         *colon = ':';
3138                 if (colon2 != NULL)
3139                         *colon2 = ':';
3140         }
3141         return alias;
3142 }
3143
3144 static int
3145 kiblnd_startup (lnet_ni_t *ni)
3146 {
3147         char                     *ifname;
3148         kib_dev_t                *ibdev = NULL;
3149         kib_net_t                *net;
3150         struct timeval            tv;
3151         unsigned long             flags;
3152         int                       rc;
3153         int                       newdev;
3154
3155         LASSERT (ni->ni_lnd == &the_o2iblnd);
3156
3157         if (kiblnd_data.kib_init == IBLND_INIT_NOTHING) {
3158                 rc = kiblnd_base_startup();
3159                 if (rc != 0)
3160                         return rc;
3161         }
3162
3163         LIBCFS_ALLOC(net, sizeof(*net));
3164         ni->ni_data = net;
3165         if (net == NULL)
3166                 goto failed;
3167
3168         do_gettimeofday(&tv);
3169         net->ibn_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
3170
3171         kiblnd_tunables_setup(ni);
3172
3173         if (ni->ni_interfaces[0] != NULL) {
3174                 /* Use the IPoIB interface specified in 'networks=' */
3175
3176                 CLASSERT (LNET_MAX_INTERFACES > 1);
3177                 if (ni->ni_interfaces[1] != NULL) {
3178                         CERROR("Multiple interfaces not supported\n");
3179                         goto failed;
3180                 }
3181
3182                 ifname = ni->ni_interfaces[0];
3183         } else {
3184                 ifname = *kiblnd_tunables.kib_default_ipif;
3185         }
3186
3187         if (strlen(ifname) >= sizeof(ibdev->ibd_ifname)) {
3188                 CERROR("IPoIB interface name too long: %s\n", ifname);
3189                 goto failed;
3190         }
3191
3192         ibdev = kiblnd_dev_search(ifname);
3193
3194         newdev = ibdev == NULL;
3195         /* hmm...create kib_dev even for alias */
3196         if (ibdev == NULL || strcmp(&ibdev->ibd_ifname[0], ifname) != 0)
3197                 ibdev = kiblnd_create_dev(ifname);
3198
3199         if (ibdev == NULL)
3200                 goto failed;
3201
3202         net->ibn_dev = ibdev;
3203         ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ibdev->ibd_ifip);
3204
3205         rc = kiblnd_dev_start_threads(ibdev, newdev,
3206                                       ni->ni_cpts, ni->ni_ncpts);
3207         if (rc != 0)
3208                 goto failed;
3209
3210         rc = kiblnd_net_init_pools(net, ni, ni->ni_cpts, ni->ni_ncpts);
3211         if (rc != 0) {
3212                 CERROR("Failed to initialize NI pools: %d\n", rc);
3213                 goto failed;
3214         }
3215
3216         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
3217         ibdev->ibd_nnets++;
3218         list_add_tail(&net->ibn_list, &ibdev->ibd_nets);
3219         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
3220
3221         net->ibn_init = IBLND_INIT_ALL;
3222
3223         return 0;
3224
3225 failed:
3226         if (net != NULL && net->ibn_dev == NULL && ibdev != NULL)
3227                 kiblnd_destroy_dev(ibdev);
3228
3229         kiblnd_shutdown(ni);
3230
3231         CDEBUG(D_NET, "kiblnd_startup failed\n");
3232         return -ENETDOWN;
3233 }
3234
3235 static lnd_t the_o2iblnd = {
3236         .lnd_type       = O2IBLND,
3237         .lnd_startup    = kiblnd_startup,
3238         .lnd_shutdown   = kiblnd_shutdown,
3239         .lnd_ctl        = kiblnd_ctl,
3240         .lnd_query      = kiblnd_query,
3241         .lnd_send       = kiblnd_send,
3242         .lnd_recv       = kiblnd_recv,
3243 };
3244
3245 static void __exit ko2iblnd_exit(void)
3246 {
3247         lnet_unregister_lnd(&the_o2iblnd);
3248 }
3249
3250 static int __init ko2iblnd_init(void)
3251 {
3252         int rc;
3253
3254         CLASSERT(sizeof(kib_msg_t) <= IBLND_MSG_SIZE);
3255         CLASSERT(offsetof(kib_msg_t,
3256                           ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) <=
3257                  IBLND_MSG_SIZE);
3258         CLASSERT(offsetof(kib_msg_t,
3259                           ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
3260                  <= IBLND_MSG_SIZE);
3261
3262         rc = kiblnd_tunables_init();
3263         if (rc != 0)
3264                 return rc;
3265
3266         lnet_register_lnd(&the_o2iblnd);
3267
3268         return 0;
3269 }
3270
3271 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
3272 MODULE_DESCRIPTION("OpenIB gen2 LNet Network Driver");
3273 MODULE_VERSION("2.8.0");
3274 MODULE_LICENSE("GPL");
3275
3276 module_init(ko2iblnd_init);
3277 module_exit(ko2iblnd_exit);