Whamcloud - gitweb
98818ed85671e414b940e4d0515ead7069e4ccec
[fs/lustre-release.git] / lnet / klnds / o2iblnd / o2iblnd.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lnet/klnds/o2iblnd/o2iblnd.c
33  *
34  * Author: Eric Barton <eric@bartonsoftware.com>
35  */
36
37 #include <asm/page.h>
38 #include "o2iblnd.h"
39
40 static struct lnet_lnd the_o2iblnd;
41
42 struct kib_data kiblnd_data;
43
44 static __u32
45 kiblnd_cksum (void *ptr, int nob)
46 {
47         char  *c  = ptr;
48         __u32  sum = 0;
49
50         while (nob-- > 0)
51                 sum = ((sum << 1) | (sum >> 31)) + *c++;
52
53         /* ensure I don't return 0 (== no checksum) */
54         return (sum == 0) ? 1 : sum;
55 }
56
57 static char *
58 kiblnd_msgtype2str(int type)
59 {
60         switch (type) {
61         case IBLND_MSG_CONNREQ:
62                 return "CONNREQ";
63
64         case IBLND_MSG_CONNACK:
65                 return "CONNACK";
66
67         case IBLND_MSG_NOOP:
68                 return "NOOP";
69
70         case IBLND_MSG_IMMEDIATE:
71                 return "IMMEDIATE";
72
73         case IBLND_MSG_PUT_REQ:
74                 return "PUT_REQ";
75
76         case IBLND_MSG_PUT_NAK:
77                 return "PUT_NAK";
78
79         case IBLND_MSG_PUT_ACK:
80                 return "PUT_ACK";
81
82         case IBLND_MSG_PUT_DONE:
83                 return "PUT_DONE";
84
85         case IBLND_MSG_GET_REQ:
86                 return "GET_REQ";
87
88         case IBLND_MSG_GET_DONE:
89                 return "GET_DONE";
90
91         default:
92                 return "???";
93         }
94 }
95
96 static int
97 kiblnd_msgtype2size(int type)
98 {
99         const int hdr_size = offsetof(struct kib_msg, ibm_u);
100
101         switch (type) {
102         case IBLND_MSG_CONNREQ:
103         case IBLND_MSG_CONNACK:
104                 return hdr_size + sizeof(struct kib_connparams);
105
106         case IBLND_MSG_NOOP:
107                 return hdr_size;
108
109         case IBLND_MSG_IMMEDIATE:
110                 return offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[0]);
111
112         case IBLND_MSG_PUT_REQ:
113                 return hdr_size + sizeof(struct kib_putreq_msg);
114
115         case IBLND_MSG_PUT_ACK:
116                 return hdr_size + sizeof(struct kib_putack_msg);
117
118         case IBLND_MSG_GET_REQ:
119                 return hdr_size + sizeof(struct kib_get_msg);
120
121         case IBLND_MSG_PUT_NAK:
122         case IBLND_MSG_PUT_DONE:
123         case IBLND_MSG_GET_DONE:
124                 return hdr_size + sizeof(struct kib_completion_msg);
125         default:
126                 return -1;
127         }
128 }
129
130 static int kiblnd_unpack_rd(struct kib_msg *msg, int flip)
131 {
132         struct kib_rdma_desc *rd;
133         int                nob;
134         int                n;
135         int                i;
136
137         LASSERT (msg->ibm_type == IBLND_MSG_GET_REQ ||
138                  msg->ibm_type == IBLND_MSG_PUT_ACK);
139
140         rd = msg->ibm_type == IBLND_MSG_GET_REQ ?
141                               &msg->ibm_u.get.ibgm_rd :
142                               &msg->ibm_u.putack.ibpam_rd;
143
144         if (flip) {
145                 __swab32s(&rd->rd_key);
146                 __swab32s(&rd->rd_nfrags);
147         }
148
149         n = rd->rd_nfrags;
150
151         if (n <= 0 || n > IBLND_MAX_RDMA_FRAGS) {
152                 CERROR("Bad nfrags: %d, should be 0 < n <= %d\n",
153                        n, IBLND_MAX_RDMA_FRAGS);
154                 return 1;
155         }
156
157         nob = offsetof(struct kib_msg, ibm_u) +
158               kiblnd_rd_msg_size(rd, msg->ibm_type, n);
159
160         if (msg->ibm_nob < nob) {
161                 CERROR("Short %s: %d(%d)\n",
162                        kiblnd_msgtype2str(msg->ibm_type), msg->ibm_nob, nob);
163                 return 1;
164         }
165
166         if (!flip)
167                 return 0;
168
169         for (i = 0; i < n; i++) {
170                 __swab32s(&rd->rd_frags[i].rf_nob);
171                 __swab64s(&rd->rd_frags[i].rf_addr);
172         }
173
174         return 0;
175 }
176
177 void kiblnd_pack_msg(struct lnet_ni *ni, struct kib_msg *msg, int version,
178                      int credits, lnet_nid_t dstnid, __u64 dststamp)
179 {
180         struct kib_net *net = ni->ni_data;
181
182         /* CAVEAT EMPTOR! all message fields not set here should have been
183          * initialised previously. */
184         msg->ibm_magic    = IBLND_MSG_MAGIC;
185         msg->ibm_version  = version;
186         /*   ibm_type */
187         msg->ibm_credits  = credits;
188         /*   ibm_nob */
189         msg->ibm_cksum    = 0;
190         msg->ibm_srcnid   = ni->ni_nid;
191         msg->ibm_srcstamp = net->ibn_incarnation;
192         msg->ibm_dstnid   = dstnid;
193         msg->ibm_dststamp = dststamp;
194
195         if (*kiblnd_tunables.kib_cksum) {
196                 /* NB ibm_cksum zero while computing cksum */
197                 msg->ibm_cksum = kiblnd_cksum(msg, msg->ibm_nob);
198         }
199 }
200
201 int kiblnd_unpack_msg(struct kib_msg *msg, int nob)
202 {
203         const int hdr_size = offsetof(struct kib_msg, ibm_u);
204         __u32     msg_cksum;
205         __u16     version;
206         int       msg_nob;
207         int       flip;
208
209         /* 6 bytes are enough to have received magic + version */
210         if (nob < 6) {
211                 CERROR("Short message: %d\n", nob);
212                 return -EPROTO;
213         }
214
215         if (msg->ibm_magic == IBLND_MSG_MAGIC) {
216                 flip = 0;
217         } else if (msg->ibm_magic == __swab32(IBLND_MSG_MAGIC)) {
218                 flip = 1;
219         } else {
220                 CERROR("Bad magic: %08x\n", msg->ibm_magic);
221                 return -EPROTO;
222         }
223
224         version = flip ? __swab16(msg->ibm_version) : msg->ibm_version;
225         if (version != IBLND_MSG_VERSION &&
226             version != IBLND_MSG_VERSION_1) {
227                 CERROR("Bad version: %x\n", version);
228                 return -EPROTO;
229         }
230
231         if (nob < hdr_size) {
232                 CERROR("Short message: %d\n", nob);
233                 return -EPROTO;
234         }
235
236         msg_nob = flip ? __swab32(msg->ibm_nob) : msg->ibm_nob;
237         if (msg_nob > nob) {
238                 CERROR("Short message: got %d, wanted %d\n", nob, msg_nob);
239                 return -EPROTO;
240         }
241
242         /* checksum must be computed with ibm_cksum zero and BEFORE anything
243          * gets flipped */
244         msg_cksum = flip ? __swab32(msg->ibm_cksum) : msg->ibm_cksum;
245         msg->ibm_cksum = 0;
246         if (msg_cksum != 0 &&
247             msg_cksum != kiblnd_cksum(msg, msg_nob)) {
248                 CERROR("Bad checksum\n");
249                 return -EPROTO;
250         }
251
252         msg->ibm_cksum = msg_cksum;
253
254         if (flip) {
255                 /* leave magic unflipped as a clue to peer_ni endianness */
256                 msg->ibm_version = version;
257                 CLASSERT (sizeof(msg->ibm_type) == 1);
258                 CLASSERT (sizeof(msg->ibm_credits) == 1);
259                 msg->ibm_nob     = msg_nob;
260                 __swab64s(&msg->ibm_srcnid);
261                 __swab64s(&msg->ibm_srcstamp);
262                 __swab64s(&msg->ibm_dstnid);
263                 __swab64s(&msg->ibm_dststamp);
264         }
265
266         if (msg->ibm_srcnid == LNET_NID_ANY) {
267                 CERROR("Bad src nid: %s\n", libcfs_nid2str(msg->ibm_srcnid));
268                 return -EPROTO;
269         }
270
271         if (msg_nob < kiblnd_msgtype2size(msg->ibm_type)) {
272                 CERROR("Short %s: %d(%d)\n", kiblnd_msgtype2str(msg->ibm_type),
273                        msg_nob, kiblnd_msgtype2size(msg->ibm_type));
274                 return -EPROTO;
275         }
276
277         switch (msg->ibm_type) {
278         default:
279                 CERROR("Unknown message type %x\n", msg->ibm_type);
280                 return -EPROTO;
281
282         case IBLND_MSG_NOOP:
283         case IBLND_MSG_IMMEDIATE:
284         case IBLND_MSG_PUT_REQ:
285                 break;
286
287         case IBLND_MSG_PUT_ACK:
288         case IBLND_MSG_GET_REQ:
289                 if (kiblnd_unpack_rd(msg, flip))
290                         return -EPROTO;
291                 break;
292
293         case IBLND_MSG_PUT_NAK:
294         case IBLND_MSG_PUT_DONE:
295         case IBLND_MSG_GET_DONE:
296                 if (flip)
297                         __swab32s(&msg->ibm_u.completion.ibcm_status);
298                 break;
299
300         case IBLND_MSG_CONNREQ:
301         case IBLND_MSG_CONNACK:
302                 if (flip) {
303                         __swab16s(&msg->ibm_u.connparams.ibcp_queue_depth);
304                         __swab16s(&msg->ibm_u.connparams.ibcp_max_frags);
305                         __swab32s(&msg->ibm_u.connparams.ibcp_max_msg_size);
306                 }
307                 break;
308         }
309         return 0;
310 }
311
312 int
313 kiblnd_create_peer(struct lnet_ni *ni, struct kib_peer_ni **peerp,
314                    lnet_nid_t nid)
315 {
316         struct kib_peer_ni *peer_ni;
317         struct kib_net *net = ni->ni_data;
318         int cpt = lnet_cpt_of_nid(nid, ni);
319         unsigned long flags;
320
321         LASSERT(net != NULL);
322         LASSERT(nid != LNET_NID_ANY);
323
324         LIBCFS_CPT_ALLOC(peer_ni, lnet_cpt_table(), cpt, sizeof(*peer_ni));
325         if (peer_ni == NULL) {
326                 CERROR("Cannot allocate peer_ni\n");
327                 return -ENOMEM;
328         }
329
330         peer_ni->ibp_ni = ni;
331         peer_ni->ibp_nid = nid;
332         peer_ni->ibp_error = 0;
333         peer_ni->ibp_last_alive = 0;
334         peer_ni->ibp_max_frags = IBLND_MAX_RDMA_FRAGS;
335         peer_ni->ibp_queue_depth = ni->ni_net->net_tunables.lct_peer_tx_credits;
336         atomic_set(&peer_ni->ibp_refcount, 1);  /* 1 ref for caller */
337
338         INIT_LIST_HEAD(&peer_ni->ibp_list);     /* not in the peer_ni table yet */
339         INIT_LIST_HEAD(&peer_ni->ibp_conns);
340         INIT_LIST_HEAD(&peer_ni->ibp_tx_queue);
341
342         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
343
344         /* always called with a ref on ni, which prevents ni being shutdown */
345         LASSERT(net->ibn_shutdown == 0);
346
347         /* npeers only grows with the global lock held */
348         atomic_inc(&net->ibn_npeers);
349
350         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
351
352         *peerp = peer_ni;
353         return 0;
354 }
355
356 void
357 kiblnd_destroy_peer(struct kib_peer_ni *peer_ni)
358 {
359         struct kib_net *net = peer_ni->ibp_ni->ni_data;
360
361         LASSERT(net != NULL);
362         LASSERT (atomic_read(&peer_ni->ibp_refcount) == 0);
363         LASSERT(!kiblnd_peer_active(peer_ni));
364         LASSERT(kiblnd_peer_idle(peer_ni));
365         LASSERT(list_empty(&peer_ni->ibp_tx_queue));
366
367         LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
368
369         /* NB a peer_ni's connections keep a reference on their peer_ni until
370          * they are destroyed, so we can be assured that _all_ state to do
371          * with this peer_ni has been cleaned up when its refcount drops to
372          * zero. */
373         atomic_dec(&net->ibn_npeers);
374 }
375
376 struct kib_peer_ni *
377 kiblnd_find_peer_locked(struct lnet_ni *ni, lnet_nid_t nid)
378 {
379         /* the caller is responsible for accounting the additional reference
380          * that this creates */
381         struct list_head        *peer_list = kiblnd_nid2peerlist(nid);
382         struct list_head        *tmp;
383         struct kib_peer_ni              *peer_ni;
384
385         list_for_each(tmp, peer_list) {
386
387                 peer_ni = list_entry(tmp, struct kib_peer_ni, ibp_list);
388                 LASSERT(!kiblnd_peer_idle(peer_ni));
389
390                 /*
391                  * Match a peer if its NID and the NID of the local NI it
392                  * communicates over are the same. Otherwise don't match
393                  * the peer, which will result in a new lnd peer being
394                  * created.
395                  */
396                 if (peer_ni->ibp_nid != nid ||
397                     peer_ni->ibp_ni->ni_nid != ni->ni_nid)
398                         continue;
399
400                 CDEBUG(D_NET, "got peer_ni [%p] -> %s (%d) version: %x\n",
401                        peer_ni, libcfs_nid2str(nid),
402                        atomic_read(&peer_ni->ibp_refcount),
403                        peer_ni->ibp_version);
404                 return peer_ni;
405         }
406         return NULL;
407 }
408
409 void
410 kiblnd_unlink_peer_locked(struct kib_peer_ni *peer_ni)
411 {
412         LASSERT(list_empty(&peer_ni->ibp_conns));
413
414         LASSERT (kiblnd_peer_active(peer_ni));
415         list_del_init(&peer_ni->ibp_list);
416         /* lose peerlist's ref */
417         kiblnd_peer_decref(peer_ni);
418 }
419
420 static int
421 kiblnd_get_peer_info(struct lnet_ni *ni, int index,
422                      lnet_nid_t *nidp, int *count)
423 {
424         struct kib_peer_ni              *peer_ni;
425         struct list_head        *ptmp;
426         int                      i;
427         unsigned long            flags;
428
429         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
430
431         for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
432
433                 list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
434
435                         peer_ni = list_entry(ptmp, struct kib_peer_ni, ibp_list);
436                         LASSERT(!kiblnd_peer_idle(peer_ni));
437
438                         if (peer_ni->ibp_ni != ni)
439                                 continue;
440
441                         if (index-- > 0)
442                                 continue;
443
444                         *nidp = peer_ni->ibp_nid;
445                         *count = atomic_read(&peer_ni->ibp_refcount);
446
447                         read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
448                                                flags);
449                         return 0;
450                 }
451         }
452
453         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
454         return -ENOENT;
455 }
456
457 static void
458 kiblnd_del_peer_locked(struct kib_peer_ni *peer_ni)
459 {
460         struct list_head *ctmp;
461         struct list_head *cnxt;
462         struct kib_conn *conn;
463
464         if (list_empty(&peer_ni->ibp_conns)) {
465                 kiblnd_unlink_peer_locked(peer_ni);
466         } else {
467                 list_for_each_safe(ctmp, cnxt, &peer_ni->ibp_conns) {
468                         conn = list_entry(ctmp, struct kib_conn, ibc_list);
469
470                         kiblnd_close_conn_locked(conn, 0);
471                 }
472                 /* NB closing peer_ni's last conn unlinked it. */
473         }
474         /* NB peer_ni now unlinked; might even be freed if the peer_ni table had the
475          * last ref on it. */
476 }
477
478 static int
479 kiblnd_del_peer(struct lnet_ni *ni, lnet_nid_t nid)
480 {
481         struct list_head        zombies = LIST_HEAD_INIT(zombies);
482         struct list_head        *ptmp;
483         struct list_head        *pnxt;
484         struct kib_peer_ni              *peer_ni;
485         int                     lo;
486         int                     hi;
487         int                     i;
488         unsigned long           flags;
489         int                     rc = -ENOENT;
490
491         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
492
493         if (nid != LNET_NID_ANY) {
494                 lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
495         } else {
496                 lo = 0;
497                 hi = kiblnd_data.kib_peer_hash_size - 1;
498         }
499
500         for (i = lo; i <= hi; i++) {
501                 list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
502                         peer_ni = list_entry(ptmp, struct kib_peer_ni, ibp_list);
503                         LASSERT(!kiblnd_peer_idle(peer_ni));
504
505                         if (peer_ni->ibp_ni != ni)
506                                 continue;
507
508                         if (!(nid == LNET_NID_ANY || peer_ni->ibp_nid == nid))
509                                 continue;
510
511                         if (!list_empty(&peer_ni->ibp_tx_queue)) {
512                                 LASSERT(list_empty(&peer_ni->ibp_conns));
513
514                                 list_splice_init(&peer_ni->ibp_tx_queue,
515                                                  &zombies);
516                         }
517
518                         kiblnd_del_peer_locked(peer_ni);
519                         rc = 0;         /* matched something */
520                 }
521         }
522
523         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
524
525         kiblnd_txlist_done(&zombies, -EIO, LNET_MSG_STATUS_LOCAL_ERROR);
526
527         return rc;
528 }
529
530 static struct kib_conn *
531 kiblnd_get_conn_by_idx(struct lnet_ni *ni, int index)
532 {
533         struct kib_peer_ni              *peer_ni;
534         struct list_head        *ptmp;
535         struct kib_conn *conn;
536         struct list_head        *ctmp;
537         int                     i;
538         unsigned long           flags;
539
540         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
541
542         for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
543                 list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
544
545                         peer_ni = list_entry(ptmp, struct kib_peer_ni, ibp_list);
546                         LASSERT(!kiblnd_peer_idle(peer_ni));
547
548                         if (peer_ni->ibp_ni != ni)
549                                 continue;
550
551                         list_for_each(ctmp, &peer_ni->ibp_conns) {
552                                 if (index-- > 0)
553                                         continue;
554
555                                 conn = list_entry(ctmp, struct kib_conn, ibc_list);
556                                 kiblnd_conn_addref(conn);
557                                 read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
558                                                        flags);
559                                 return conn;
560                         }
561                 }
562         }
563
564         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
565         return NULL;
566 }
567
568 static void
569 kiblnd_debug_rx(struct kib_rx *rx)
570 {
571         CDEBUG(D_CONSOLE, "      %p status %d msg_type %x cred %d\n",
572                rx, rx->rx_status, rx->rx_msg->ibm_type,
573                rx->rx_msg->ibm_credits);
574 }
575
576 static void
577 kiblnd_debug_tx(struct kib_tx *tx)
578 {
579         CDEBUG(D_CONSOLE, "      %p snd %d q %d w %d rc %d dl %lld "
580                "cookie %#llx msg %s%s type %x cred %d\n",
581                tx, tx->tx_sending, tx->tx_queued, tx->tx_waiting,
582                tx->tx_status, ktime_to_ns(tx->tx_deadline), tx->tx_cookie,
583                tx->tx_lntmsg[0] == NULL ? "-" : "!",
584                tx->tx_lntmsg[1] == NULL ? "-" : "!",
585                tx->tx_msg->ibm_type, tx->tx_msg->ibm_credits);
586 }
587
588 void
589 kiblnd_debug_conn(struct kib_conn *conn)
590 {
591         struct list_head        *tmp;
592         int                     i;
593
594         spin_lock(&conn->ibc_lock);
595
596         CDEBUG(D_CONSOLE, "conn[%d] %p [version %x] -> %s:\n",
597                atomic_read(&conn->ibc_refcount), conn,
598                conn->ibc_version, libcfs_nid2str(conn->ibc_peer->ibp_nid));
599         CDEBUG(D_CONSOLE, "   state %d nposted %d/%d cred %d o_cred %d "
600                " r_cred %d\n", conn->ibc_state, conn->ibc_noops_posted,
601                conn->ibc_nsends_posted, conn->ibc_credits,
602                conn->ibc_outstanding_credits, conn->ibc_reserved_credits);
603         CDEBUG(D_CONSOLE, "   comms_err %d\n", conn->ibc_comms_error);
604
605         CDEBUG(D_CONSOLE, "   early_rxs:\n");
606         list_for_each(tmp, &conn->ibc_early_rxs)
607                 kiblnd_debug_rx(list_entry(tmp, struct kib_rx, rx_list));
608
609         CDEBUG(D_CONSOLE, "   tx_noops:\n");
610         list_for_each(tmp, &conn->ibc_tx_noops)
611                 kiblnd_debug_tx(list_entry(tmp, struct kib_tx, tx_list));
612
613         CDEBUG(D_CONSOLE, "   tx_queue_nocred:\n");
614         list_for_each(tmp, &conn->ibc_tx_queue_nocred)
615                 kiblnd_debug_tx(list_entry(tmp, struct kib_tx, tx_list));
616
617         CDEBUG(D_CONSOLE, "   tx_queue_rsrvd:\n");
618         list_for_each(tmp, &conn->ibc_tx_queue_rsrvd)
619                 kiblnd_debug_tx(list_entry(tmp, struct kib_tx, tx_list));
620
621         CDEBUG(D_CONSOLE, "   tx_queue:\n");
622         list_for_each(tmp, &conn->ibc_tx_queue)
623                 kiblnd_debug_tx(list_entry(tmp, struct kib_tx, tx_list));
624
625         CDEBUG(D_CONSOLE, "   active_txs:\n");
626         list_for_each(tmp, &conn->ibc_active_txs)
627                 kiblnd_debug_tx(list_entry(tmp, struct kib_tx, tx_list));
628
629         CDEBUG(D_CONSOLE, "   rxs:\n");
630         for (i = 0; i < IBLND_RX_MSGS(conn); i++)
631                 kiblnd_debug_rx(&conn->ibc_rxs[i]);
632
633         spin_unlock(&conn->ibc_lock);
634 }
635
636 int
637 kiblnd_translate_mtu(int value)
638 {
639         switch (value) {
640         default:
641                 return -1;
642         case 0:
643                 return 0;
644         case 256:
645                 return IB_MTU_256;
646         case 512:
647                 return IB_MTU_512;
648         case 1024:
649                 return IB_MTU_1024;
650         case 2048:
651                 return IB_MTU_2048;
652         case 4096:
653                 return IB_MTU_4096;
654         }
655 }
656
657 static void
658 kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid)
659 {
660         int           mtu;
661
662         /* XXX There is no path record for iWARP, set by netdev->change_mtu? */
663         if (cmid->route.path_rec == NULL)
664                 return;
665
666         mtu = kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu);
667         LASSERT (mtu >= 0);
668         if (mtu != 0)
669                 cmid->route.path_rec->mtu = mtu;
670 }
671
672 static int
673 kiblnd_get_completion_vector(struct kib_conn *conn, int cpt)
674 {
675         cpumask_t       *mask;
676         int             vectors;
677         int             off;
678         int             i;
679         lnet_nid_t      ibp_nid;
680
681         vectors = conn->ibc_cmid->device->num_comp_vectors;
682         if (vectors <= 1)
683                 return 0;
684
685         mask = cfs_cpt_cpumask(lnet_cpt_table(), cpt);
686
687         /* hash NID to CPU id in this partition... */
688         ibp_nid = conn->ibc_peer->ibp_nid;
689         off = do_div(ibp_nid, cpumask_weight(mask));
690         for_each_cpu(i, mask) {
691                 if (off-- == 0)
692                         return i % vectors;
693         }
694
695         LBUG();
696         return 1;
697 }
698
699 /*
700  * Get the scheduler bound to this CPT. If the scheduler has no
701  * threads, which means that the CPT has no CPUs, then grab the
702  * next scheduler that we can use.
703  *
704  * This case would be triggered if a NUMA node is configured with
705  * no associated CPUs.
706  */
707 static struct kib_sched_info *
708 kiblnd_get_scheduler(int cpt)
709 {
710         struct kib_sched_info *sched;
711         int i;
712
713         sched = kiblnd_data.kib_scheds[cpt];
714
715         if (sched->ibs_nthreads > 0)
716                 return sched;
717
718         cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) {
719                 if (sched->ibs_nthreads > 0) {
720                         CDEBUG(D_NET, "scheduler[%d] has no threads. selected scheduler[%d]\n",
721                                         cpt, sched->ibs_cpt);
722                         return sched;
723                 }
724         }
725
726         return NULL;
727 }
728
729 static unsigned int kiblnd_send_wrs(struct kib_conn *conn)
730 {
731         /*
732          * One WR for the LNet message
733          * And ibc_max_frags for the transfer WRs
734          */
735         unsigned int ret = 1 + conn->ibc_max_frags;
736         enum kib_dev_caps dev_caps = conn->ibc_hdev->ibh_dev->ibd_dev_caps;
737
738         /* FastReg needs two extra WRs for map and invalidate */
739         if (dev_caps & IBLND_DEV_CAPS_FASTREG_ENABLED)
740                 ret += 2;
741
742         /* account for a maximum of ibc_queue_depth in-flight transfers */
743         ret *= conn->ibc_queue_depth;
744         return ret;
745 }
746
747 struct kib_conn *
748 kiblnd_create_conn(struct kib_peer_ni *peer_ni, struct rdma_cm_id *cmid,
749                    int state, int version)
750 {
751         /* CAVEAT EMPTOR:
752          * If the new conn is created successfully it takes over the caller's
753          * ref on 'peer_ni'.  It also "owns" 'cmid' and destroys it when it itself
754          * is destroyed.  On failure, the caller's ref on 'peer_ni' remains and
755          * she must dispose of 'cmid'.  (Actually I'd block forever if I tried
756          * to destroy 'cmid' here since I'm called from the CM which still has
757          * its ref on 'cmid'). */
758         rwlock_t               *glock = &kiblnd_data.kib_global_lock;
759         struct kib_net              *net = peer_ni->ibp_ni->ni_data;
760         struct kib_dev *dev;
761         struct ib_qp_init_attr *init_qp_attr;
762         struct kib_sched_info   *sched;
763 #ifdef HAVE_IB_CQ_INIT_ATTR
764         struct ib_cq_init_attr  cq_attr = {};
765 #endif
766         struct kib_conn *conn;
767         struct ib_cq            *cq;
768         unsigned long           flags;
769         int                     cpt;
770         int                     rc;
771         int                     i;
772
773         LASSERT(net != NULL);
774         LASSERT(!in_interrupt());
775
776         dev = net->ibn_dev;
777
778         cpt = lnet_cpt_of_nid(peer_ni->ibp_nid, peer_ni->ibp_ni);
779         sched = kiblnd_get_scheduler(cpt);
780
781         if (sched == NULL) {
782                 CERROR("no schedulers available. node is unhealthy\n");
783                 goto failed_0;
784         }
785
786         /*
787          * The cpt might have changed if we ended up selecting a non cpt
788          * native scheduler. So use the scheduler's cpt instead.
789          */
790         cpt = sched->ibs_cpt;
791
792         LIBCFS_CPT_ALLOC(init_qp_attr, lnet_cpt_table(), cpt,
793                          sizeof(*init_qp_attr));
794         if (init_qp_attr == NULL) {
795                 CERROR("Can't allocate qp_attr for %s\n",
796                        libcfs_nid2str(peer_ni->ibp_nid));
797                 goto failed_0;
798         }
799
800         LIBCFS_CPT_ALLOC(conn, lnet_cpt_table(), cpt, sizeof(*conn));
801         if (conn == NULL) {
802                 CERROR("Can't allocate connection for %s\n",
803                        libcfs_nid2str(peer_ni->ibp_nid));
804                 goto failed_1;
805         }
806
807         conn->ibc_state = IBLND_CONN_INIT;
808         conn->ibc_version = version;
809         conn->ibc_peer = peer_ni;                       /* I take the caller's ref */
810         cmid->context = conn;                   /* for future CM callbacks */
811         conn->ibc_cmid = cmid;
812         conn->ibc_max_frags = peer_ni->ibp_max_frags;
813         conn->ibc_queue_depth = peer_ni->ibp_queue_depth;
814
815         INIT_LIST_HEAD(&conn->ibc_early_rxs);
816         INIT_LIST_HEAD(&conn->ibc_tx_noops);
817         INIT_LIST_HEAD(&conn->ibc_tx_queue);
818         INIT_LIST_HEAD(&conn->ibc_tx_queue_rsrvd);
819         INIT_LIST_HEAD(&conn->ibc_tx_queue_nocred);
820         INIT_LIST_HEAD(&conn->ibc_active_txs);
821         spin_lock_init(&conn->ibc_lock);
822
823         LIBCFS_CPT_ALLOC(conn->ibc_connvars, lnet_cpt_table(), cpt,
824                          sizeof(*conn->ibc_connvars));
825         if (conn->ibc_connvars == NULL) {
826                 CERROR("Can't allocate in-progress connection state\n");
827                 goto failed_2;
828         }
829
830         write_lock_irqsave(glock, flags);
831         if (dev->ibd_failover) {
832                 write_unlock_irqrestore(glock, flags);
833                 CERROR("%s: failover in progress\n", dev->ibd_ifname);
834                 goto failed_2;
835         }
836
837         if (dev->ibd_hdev->ibh_ibdev != cmid->device) {
838                 /* wakeup failover thread and teardown connection */
839                 if (kiblnd_dev_can_failover(dev)) {
840                         list_add_tail(&dev->ibd_fail_list,
841                                       &kiblnd_data.kib_failed_devs);
842                         wake_up(&kiblnd_data.kib_failover_waitq);
843                 }
844
845                 write_unlock_irqrestore(glock, flags);
846                 CERROR("cmid HCA(%s), kib_dev(%s) need failover\n",
847                        cmid->device->name, dev->ibd_ifname);
848                 goto failed_2;
849         }
850
851         kiblnd_hdev_addref_locked(dev->ibd_hdev);
852         conn->ibc_hdev = dev->ibd_hdev;
853
854         kiblnd_setup_mtu_locked(cmid);
855
856         write_unlock_irqrestore(glock, flags);
857
858         LIBCFS_CPT_ALLOC(conn->ibc_rxs, lnet_cpt_table(), cpt,
859                          IBLND_RX_MSGS(conn) * sizeof(struct kib_rx));
860         if (conn->ibc_rxs == NULL) {
861                 CERROR("Cannot allocate RX buffers\n");
862                 goto failed_2;
863         }
864
865         rc = kiblnd_alloc_pages(&conn->ibc_rx_pages, cpt,
866                                 IBLND_RX_MSG_PAGES(conn));
867         if (rc != 0)
868                 goto failed_2;
869
870         kiblnd_map_rx_descs(conn);
871
872 #ifdef HAVE_IB_CQ_INIT_ATTR
873         cq_attr.cqe = IBLND_CQ_ENTRIES(conn);
874         cq_attr.comp_vector = kiblnd_get_completion_vector(conn, cpt);
875         cq = ib_create_cq(cmid->device,
876                           kiblnd_cq_completion, kiblnd_cq_event, conn,
877                           &cq_attr);
878 #else
879         cq = ib_create_cq(cmid->device,
880                           kiblnd_cq_completion, kiblnd_cq_event, conn,
881                           IBLND_CQ_ENTRIES(conn),
882                           kiblnd_get_completion_vector(conn, cpt));
883 #endif
884         if (IS_ERR(cq)) {
885                 /*
886                  * on MLX-5 (possibly MLX-4 as well) this error could be
887                  * hit if the concurrent_sends and/or peer_tx_credits is set
888                  * too high. Or due to an MLX-5 bug which tries to
889                  * allocate 256kb via kmalloc for WR cookie array
890                  */
891                 CERROR("Failed to create CQ with %d CQEs: %ld\n",
892                         IBLND_CQ_ENTRIES(conn), PTR_ERR(cq));
893                 goto failed_2;
894         }
895
896         conn->ibc_cq = cq;
897
898         rc = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
899         if (rc != 0) {
900                 CERROR("Can't request completion notification: %d\n", rc);
901                 goto failed_2;
902         }
903
904         init_qp_attr->event_handler = kiblnd_qp_event;
905         init_qp_attr->qp_context = conn;
906         init_qp_attr->cap.max_send_sge = *kiblnd_tunables.kib_wrq_sge;
907         init_qp_attr->cap.max_recv_sge = 1;
908         init_qp_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
909         init_qp_attr->qp_type = IB_QPT_RC;
910         init_qp_attr->send_cq = cq;
911         init_qp_attr->recv_cq = cq;
912
913         conn->ibc_sched = sched;
914
915         do {
916                 init_qp_attr->cap.max_send_wr = kiblnd_send_wrs(conn);
917                 init_qp_attr->cap.max_recv_wr = IBLND_RECV_WRS(conn);
918
919                 rc = rdma_create_qp(cmid, conn->ibc_hdev->ibh_pd, init_qp_attr);
920                 if (!rc || conn->ibc_queue_depth < 2)
921                         break;
922
923                 conn->ibc_queue_depth--;
924         } while (rc);
925
926         if (rc) {
927                 CERROR("Can't create QP: %d, send_wr: %d, recv_wr: %d, "
928                        "send_sge: %d, recv_sge: %d\n",
929                        rc, init_qp_attr->cap.max_send_wr,
930                        init_qp_attr->cap.max_recv_wr,
931                        init_qp_attr->cap.max_send_sge,
932                        init_qp_attr->cap.max_recv_sge);
933                 goto failed_2;
934         }
935
936         if (conn->ibc_queue_depth != peer_ni->ibp_queue_depth)
937                 CWARN("peer %s - queue depth reduced from %u to %u"
938                       "  to allow for qp creation\n",
939                       libcfs_nid2str(peer_ni->ibp_nid),
940                       peer_ni->ibp_queue_depth,
941                       conn->ibc_queue_depth);
942
943         LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
944
945         /* 1 ref for caller and each rxmsg */
946         atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(conn));
947         conn->ibc_nrx = IBLND_RX_MSGS(conn);
948
949         /* post receives */
950         for (i = 0; i < IBLND_RX_MSGS(conn); i++) {
951                 rc = kiblnd_post_rx(&conn->ibc_rxs[i], IBLND_POSTRX_NO_CREDIT);
952                 if (rc != 0) {
953                         CERROR("Can't post rxmsg: %d\n", rc);
954
955                         /* Make posted receives complete */
956                         kiblnd_abort_receives(conn);
957
958                         /* correct # of posted buffers
959                          * NB locking needed now I'm racing with completion */
960                         spin_lock_irqsave(&sched->ibs_lock, flags);
961                         conn->ibc_nrx -= IBLND_RX_MSGS(conn) - i;
962                         spin_unlock_irqrestore(&sched->ibs_lock, flags);
963
964                         /* cmid will be destroyed by CM(ofed) after cm_callback
965                          * returned, so we can't refer it anymore
966                          * (by kiblnd_connd()->kiblnd_destroy_conn) */
967                         rdma_destroy_qp(conn->ibc_cmid);
968                         conn->ibc_cmid = NULL;
969
970                         /* Drop my own and unused rxbuffer refcounts */
971                         while (i++ <= IBLND_RX_MSGS(conn))
972                                 kiblnd_conn_decref(conn);
973
974                         return NULL;
975                 }
976         }
977
978         /* Init successful! */
979         LASSERT (state == IBLND_CONN_ACTIVE_CONNECT ||
980                  state == IBLND_CONN_PASSIVE_WAIT);
981         conn->ibc_state = state;
982
983         /* 1 more conn */
984         atomic_inc(&net->ibn_nconns);
985         return conn;
986
987  failed_2:
988         kiblnd_destroy_conn(conn);
989         LIBCFS_FREE(conn, sizeof(*conn));
990  failed_1:
991         LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
992  failed_0:
993         return NULL;
994 }
995
996 void
997 kiblnd_destroy_conn(struct kib_conn *conn)
998 {
999         struct rdma_cm_id *cmid = conn->ibc_cmid;
1000         struct kib_peer_ni        *peer_ni = conn->ibc_peer;
1001         int                rc;
1002
1003         LASSERT (!in_interrupt());
1004         LASSERT (atomic_read(&conn->ibc_refcount) == 0);
1005         LASSERT(list_empty(&conn->ibc_early_rxs));
1006         LASSERT(list_empty(&conn->ibc_tx_noops));
1007         LASSERT(list_empty(&conn->ibc_tx_queue));
1008         LASSERT(list_empty(&conn->ibc_tx_queue_rsrvd));
1009         LASSERT(list_empty(&conn->ibc_tx_queue_nocred));
1010         LASSERT(list_empty(&conn->ibc_active_txs));
1011         LASSERT (conn->ibc_noops_posted == 0);
1012         LASSERT (conn->ibc_nsends_posted == 0);
1013
1014         switch (conn->ibc_state) {
1015         default:
1016                 /* conn must be completely disengaged from the network */
1017                 LBUG();
1018
1019         case IBLND_CONN_DISCONNECTED:
1020                 /* connvars should have been freed already */
1021                 LASSERT (conn->ibc_connvars == NULL);
1022                 break;
1023
1024         case IBLND_CONN_INIT:
1025                 break;
1026         }
1027
1028         /* conn->ibc_cmid might be destroyed by CM already */
1029         if (cmid != NULL && cmid->qp != NULL)
1030                 rdma_destroy_qp(cmid);
1031
1032         if (conn->ibc_cq != NULL) {
1033                 rc = ib_destroy_cq(conn->ibc_cq);
1034                 if (rc != 0)
1035                         CWARN("Error destroying CQ: %d\n", rc);
1036         }
1037
1038         if (conn->ibc_rx_pages != NULL)
1039                 kiblnd_unmap_rx_descs(conn);
1040
1041         if (conn->ibc_rxs != NULL) {
1042                 LIBCFS_FREE(conn->ibc_rxs,
1043                             IBLND_RX_MSGS(conn) * sizeof(struct kib_rx));
1044         }
1045
1046         if (conn->ibc_connvars != NULL)
1047                 LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
1048
1049         if (conn->ibc_hdev != NULL)
1050                 kiblnd_hdev_decref(conn->ibc_hdev);
1051
1052         /* See CAVEAT EMPTOR above in kiblnd_create_conn */
1053         if (conn->ibc_state != IBLND_CONN_INIT) {
1054                 struct kib_net *net = peer_ni->ibp_ni->ni_data;
1055
1056                 kiblnd_peer_decref(peer_ni);
1057                 rdma_destroy_id(cmid);
1058                 atomic_dec(&net->ibn_nconns);
1059         }
1060 }
1061
1062 int
1063 kiblnd_close_peer_conns_locked(struct kib_peer_ni *peer_ni, int why)
1064 {
1065         struct kib_conn *conn;
1066         struct list_head        *ctmp;
1067         struct list_head        *cnxt;
1068         int                     count = 0;
1069
1070         list_for_each_safe(ctmp, cnxt, &peer_ni->ibp_conns) {
1071                 conn = list_entry(ctmp, struct kib_conn, ibc_list);
1072
1073                 CDEBUG(D_NET, "Closing conn -> %s, "
1074                               "version: %x, reason: %d\n",
1075                        libcfs_nid2str(peer_ni->ibp_nid),
1076                        conn->ibc_version, why);
1077
1078                 kiblnd_close_conn_locked(conn, why);
1079                 count++;
1080         }
1081
1082         return count;
1083 }
1084
1085 int
1086 kiblnd_close_stale_conns_locked(struct kib_peer_ni *peer_ni,
1087                                 int version, __u64 incarnation)
1088 {
1089         struct kib_conn *conn;
1090         struct list_head        *ctmp;
1091         struct list_head        *cnxt;
1092         int                     count = 0;
1093
1094         list_for_each_safe(ctmp, cnxt, &peer_ni->ibp_conns) {
1095                 conn = list_entry(ctmp, struct kib_conn, ibc_list);
1096
1097                 if (conn->ibc_version     == version &&
1098                     conn->ibc_incarnation == incarnation)
1099                         continue;
1100
1101                 CDEBUG(D_NET, "Closing stale conn -> %s version: %x, "
1102                               "incarnation:%#llx(%x, %#llx)\n",
1103                        libcfs_nid2str(peer_ni->ibp_nid),
1104                        conn->ibc_version, conn->ibc_incarnation,
1105                        version, incarnation);
1106
1107                 kiblnd_close_conn_locked(conn, -ESTALE);
1108                 count++;
1109         }
1110
1111         return count;
1112 }
1113
1114 static int
1115 kiblnd_close_matching_conns(struct lnet_ni *ni, lnet_nid_t nid)
1116 {
1117         struct kib_peer_ni              *peer_ni;
1118         struct list_head        *ptmp;
1119         struct list_head        *pnxt;
1120         int                     lo;
1121         int                     hi;
1122         int                     i;
1123         unsigned long           flags;
1124         int                     count = 0;
1125
1126         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1127
1128         if (nid != LNET_NID_ANY)
1129                 lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
1130         else {
1131                 lo = 0;
1132                 hi = kiblnd_data.kib_peer_hash_size - 1;
1133         }
1134
1135         for (i = lo; i <= hi; i++) {
1136                 list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
1137
1138                         peer_ni = list_entry(ptmp, struct kib_peer_ni, ibp_list);
1139                         LASSERT(!kiblnd_peer_idle(peer_ni));
1140
1141                         if (peer_ni->ibp_ni != ni)
1142                                 continue;
1143
1144                         if (!(nid == LNET_NID_ANY || nid == peer_ni->ibp_nid))
1145                                 continue;
1146
1147                         count += kiblnd_close_peer_conns_locked(peer_ni, 0);
1148                 }
1149         }
1150
1151         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1152
1153         /* wildcards always succeed */
1154         if (nid == LNET_NID_ANY)
1155                 return 0;
1156
1157         return (count == 0) ? -ENOENT : 0;
1158 }
1159
1160 static int
1161 kiblnd_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg)
1162 {
1163         struct libcfs_ioctl_data *data = arg;
1164         int                       rc = -EINVAL;
1165
1166         switch(cmd) {
1167         case IOC_LIBCFS_GET_PEER: {
1168                 lnet_nid_t   nid = 0;
1169                 int          count = 0;
1170
1171                 rc = kiblnd_get_peer_info(ni, data->ioc_count,
1172                                           &nid, &count);
1173                 data->ioc_nid    = nid;
1174                 data->ioc_count  = count;
1175                 break;
1176         }
1177
1178         case IOC_LIBCFS_DEL_PEER: {
1179                 rc = kiblnd_del_peer(ni, data->ioc_nid);
1180                 break;
1181         }
1182         case IOC_LIBCFS_GET_CONN: {
1183                 struct kib_conn *conn;
1184
1185                 rc = 0;
1186                 conn = kiblnd_get_conn_by_idx(ni, data->ioc_count);
1187                 if (conn == NULL) {
1188                         rc = -ENOENT;
1189                         break;
1190                 }
1191
1192                 LASSERT(conn->ibc_cmid != NULL);
1193                 data->ioc_nid = conn->ibc_peer->ibp_nid;
1194                 if (conn->ibc_cmid->route.path_rec == NULL)
1195                         data->ioc_u32[0] = 0; /* iWarp has no path MTU */
1196                 else
1197                         data->ioc_u32[0] =
1198                         ib_mtu_enum_to_int(conn->ibc_cmid->route.path_rec->mtu);
1199                 kiblnd_conn_decref(conn);
1200                 break;
1201         }
1202         case IOC_LIBCFS_CLOSE_CONNECTION: {
1203                 rc = kiblnd_close_matching_conns(ni, data->ioc_nid);
1204                 break;
1205         }
1206
1207         default:
1208                 break;
1209         }
1210
1211         return rc;
1212 }
1213
1214 static void
1215 kiblnd_query(struct lnet_ni *ni, lnet_nid_t nid, time64_t *when)
1216 {
1217         time64_t last_alive = 0;
1218         time64_t now = ktime_get_seconds();
1219         rwlock_t *glock = &kiblnd_data.kib_global_lock;
1220         struct kib_peer_ni *peer_ni;
1221         unsigned long flags;
1222
1223         read_lock_irqsave(glock, flags);
1224
1225         peer_ni = kiblnd_find_peer_locked(ni, nid);
1226         if (peer_ni != NULL)
1227                 last_alive = peer_ni->ibp_last_alive;
1228
1229         read_unlock_irqrestore(glock, flags);
1230
1231         if (last_alive != 0)
1232                 *when = last_alive;
1233
1234         /* peer_ni is not persistent in hash, trigger peer_ni creation
1235          * and connection establishment with a NULL tx */
1236         if (peer_ni == NULL)
1237                 kiblnd_launch_tx(ni, NULL, nid);
1238
1239         CDEBUG(D_NET, "peer_ni %s %p, alive %lld secs ago\n",
1240                libcfs_nid2str(nid), peer_ni,
1241                last_alive ? now - last_alive : -1);
1242         return;
1243 }
1244
1245 static void
1246 kiblnd_free_pages(struct kib_pages *p)
1247 {
1248         int     npages = p->ibp_npages;
1249         int     i;
1250
1251         for (i = 0; i < npages; i++) {
1252                 if (p->ibp_pages[i] != NULL)
1253                         __free_page(p->ibp_pages[i]);
1254         }
1255
1256         LIBCFS_FREE(p, offsetof(struct kib_pages, ibp_pages[npages]));
1257 }
1258
1259 int
1260 kiblnd_alloc_pages(struct kib_pages **pp, int cpt, int npages)
1261 {
1262         struct kib_pages *p;
1263         int i;
1264
1265         LIBCFS_CPT_ALLOC(p, lnet_cpt_table(), cpt,
1266                          offsetof(struct kib_pages, ibp_pages[npages]));
1267         if (p == NULL) {
1268                 CERROR("Can't allocate descriptor for %d pages\n", npages);
1269                 return -ENOMEM;
1270         }
1271
1272         memset(p, 0, offsetof(struct kib_pages, ibp_pages[npages]));
1273         p->ibp_npages = npages;
1274
1275         for (i = 0; i < npages; i++) {
1276                 p->ibp_pages[i] = cfs_page_cpt_alloc(lnet_cpt_table(), cpt,
1277                                                      GFP_NOFS);
1278                 if (p->ibp_pages[i] == NULL) {
1279                         CERROR("Can't allocate page %d of %d\n", i, npages);
1280                         kiblnd_free_pages(p);
1281                         return -ENOMEM;
1282                 }
1283         }
1284
1285         *pp = p;
1286         return 0;
1287 }
1288
1289 void
1290 kiblnd_unmap_rx_descs(struct kib_conn *conn)
1291 {
1292         struct kib_rx *rx;
1293         int       i;
1294
1295         LASSERT (conn->ibc_rxs != NULL);
1296         LASSERT (conn->ibc_hdev != NULL);
1297
1298         for (i = 0; i < IBLND_RX_MSGS(conn); i++) {
1299                 rx = &conn->ibc_rxs[i];
1300
1301                 LASSERT(rx->rx_nob >= 0); /* not posted */
1302
1303                 kiblnd_dma_unmap_single(conn->ibc_hdev->ibh_ibdev,
1304                                         KIBLND_UNMAP_ADDR(rx, rx_msgunmap,
1305                                                           rx->rx_msgaddr),
1306                                         IBLND_MSG_SIZE, DMA_FROM_DEVICE);
1307         }
1308
1309         kiblnd_free_pages(conn->ibc_rx_pages);
1310
1311         conn->ibc_rx_pages = NULL;
1312 }
1313
1314 void
1315 kiblnd_map_rx_descs(struct kib_conn *conn)
1316 {
1317         struct kib_rx *rx;
1318         struct page    *pg;
1319         int             pg_off;
1320         int             ipg;
1321         int             i;
1322
1323         for (pg_off = ipg = i = 0; i < IBLND_RX_MSGS(conn); i++) {
1324                 pg = conn->ibc_rx_pages->ibp_pages[ipg];
1325                 rx = &conn->ibc_rxs[i];
1326
1327                 rx->rx_conn = conn;
1328                 rx->rx_msg = (struct kib_msg *)(((char *)page_address(pg)) + pg_off);
1329
1330                 rx->rx_msgaddr =
1331                         kiblnd_dma_map_single(conn->ibc_hdev->ibh_ibdev,
1332                                               rx->rx_msg, IBLND_MSG_SIZE,
1333                                               DMA_FROM_DEVICE);
1334                 LASSERT(!kiblnd_dma_mapping_error(conn->ibc_hdev->ibh_ibdev,
1335                                                   rx->rx_msgaddr));
1336                 KIBLND_UNMAP_ADDR_SET(rx, rx_msgunmap, rx->rx_msgaddr);
1337
1338                 CDEBUG(D_NET, "rx %d: %p %#llx(%#llx)\n",
1339                        i, rx->rx_msg, rx->rx_msgaddr,
1340                        (__u64)(page_to_phys(pg) + pg_off));
1341
1342                 pg_off += IBLND_MSG_SIZE;
1343                 LASSERT(pg_off <= PAGE_SIZE);
1344
1345                 if (pg_off == PAGE_SIZE) {
1346                         pg_off = 0;
1347                         ipg++;
1348                         LASSERT(ipg <= IBLND_RX_MSG_PAGES(conn));
1349                 }
1350         }
1351 }
1352
1353 static void
1354 kiblnd_unmap_tx_pool(struct kib_tx_pool *tpo)
1355 {
1356         struct kib_hca_dev *hdev = tpo->tpo_hdev;
1357         struct kib_tx *tx;
1358         int i;
1359
1360         LASSERT (tpo->tpo_pool.po_allocated == 0);
1361
1362         if (hdev == NULL)
1363                 return;
1364
1365         for (i = 0; i < tpo->tpo_pool.po_size; i++) {
1366                 tx = &tpo->tpo_tx_descs[i];
1367                 kiblnd_dma_unmap_single(hdev->ibh_ibdev,
1368                                         KIBLND_UNMAP_ADDR(tx, tx_msgunmap,
1369                                                           tx->tx_msgaddr),
1370                                         IBLND_MSG_SIZE, DMA_TO_DEVICE);
1371         }
1372
1373         kiblnd_hdev_decref(hdev);
1374         tpo->tpo_hdev = NULL;
1375 }
1376
1377 static struct kib_hca_dev *
1378 kiblnd_current_hdev(struct kib_dev *dev)
1379 {
1380         struct kib_hca_dev *hdev;
1381         unsigned long  flags;
1382         int            i = 0;
1383
1384         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1385         while (dev->ibd_failover) {
1386                 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1387                 if (i++ % 50 == 0)
1388                         CDEBUG(D_NET, "%s: Wait for failover\n",
1389                                dev->ibd_ifname);
1390                 set_current_state(TASK_INTERRUPTIBLE);
1391                 schedule_timeout(cfs_time_seconds(1) / 100);
1392
1393                 read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1394         }
1395
1396         kiblnd_hdev_addref_locked(dev->ibd_hdev);
1397         hdev = dev->ibd_hdev;
1398
1399         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1400
1401         return hdev;
1402 }
1403
1404 static void
1405 kiblnd_map_tx_pool(struct kib_tx_pool *tpo)
1406 {
1407         struct kib_pages *txpgs = tpo->tpo_tx_pages;
1408         struct kib_pool *pool = &tpo->tpo_pool;
1409         struct kib_net      *net   = pool->po_owner->ps_net;
1410         struct kib_dev *dev;
1411         struct page *page;
1412         struct kib_tx *tx;
1413         int             page_offset;
1414         int             ipage;
1415         int             i;
1416
1417         LASSERT (net != NULL);
1418
1419         dev = net->ibn_dev;
1420
1421         /* pre-mapped messages are not bigger than 1 page */
1422         CLASSERT (IBLND_MSG_SIZE <= PAGE_SIZE);
1423
1424         /* No fancy arithmetic when we do the buffer calculations */
1425         CLASSERT (PAGE_SIZE % IBLND_MSG_SIZE == 0);
1426
1427         tpo->tpo_hdev = kiblnd_current_hdev(dev);
1428
1429         for (ipage = page_offset = i = 0; i < pool->po_size; i++) {
1430                 page = txpgs->ibp_pages[ipage];
1431                 tx = &tpo->tpo_tx_descs[i];
1432
1433                 tx->tx_msg = (struct kib_msg *)(((char *)page_address(page)) +
1434                                                 page_offset);
1435
1436                 tx->tx_msgaddr = kiblnd_dma_map_single(tpo->tpo_hdev->ibh_ibdev,
1437                                                        tx->tx_msg,
1438                                                        IBLND_MSG_SIZE,
1439                                                        DMA_TO_DEVICE);
1440                 LASSERT(!kiblnd_dma_mapping_error(tpo->tpo_hdev->ibh_ibdev,
1441                                                   tx->tx_msgaddr));
1442                 KIBLND_UNMAP_ADDR_SET(tx, tx_msgunmap, tx->tx_msgaddr);
1443
1444                 list_add(&tx->tx_list, &pool->po_free_list);
1445
1446                 page_offset += IBLND_MSG_SIZE;
1447                 LASSERT(page_offset <= PAGE_SIZE);
1448
1449                 if (page_offset == PAGE_SIZE) {
1450                         page_offset = 0;
1451                         ipage++;
1452                         LASSERT(ipage <= txpgs->ibp_npages);
1453                 }
1454         }
1455 }
1456
1457 static void
1458 kiblnd_destroy_fmr_pool(struct kib_fmr_pool *fpo)
1459 {
1460         LASSERT(fpo->fpo_map_count == 0);
1461
1462         if (fpo->fpo_is_fmr && fpo->fmr.fpo_fmr_pool) {
1463                 ib_destroy_fmr_pool(fpo->fmr.fpo_fmr_pool);
1464         } else {
1465                 struct kib_fast_reg_descriptor *frd, *tmp;
1466                 int i = 0;
1467
1468                 list_for_each_entry_safe(frd, tmp, &fpo->fast_reg.fpo_pool_list,
1469                                          frd_list) {
1470                         list_del(&frd->frd_list);
1471 #ifndef HAVE_IB_MAP_MR_SG
1472                         ib_free_fast_reg_page_list(frd->frd_frpl);
1473 #endif
1474                         ib_dereg_mr(frd->frd_mr);
1475                         LIBCFS_FREE(frd, sizeof(*frd));
1476                         i++;
1477                 }
1478                 if (i < fpo->fast_reg.fpo_pool_size)
1479                         CERROR("FastReg pool still has %d regions registered\n",
1480                                 fpo->fast_reg.fpo_pool_size - i);
1481         }
1482
1483         if (fpo->fpo_hdev)
1484                 kiblnd_hdev_decref(fpo->fpo_hdev);
1485
1486         LIBCFS_FREE(fpo, sizeof(*fpo));
1487 }
1488
1489 static void
1490 kiblnd_destroy_fmr_pool_list(struct list_head *head)
1491 {
1492         struct kib_fmr_pool *fpo, *tmp;
1493
1494         list_for_each_entry_safe(fpo, tmp, head, fpo_list) {
1495                 list_del(&fpo->fpo_list);
1496                 kiblnd_destroy_fmr_pool(fpo);
1497         }
1498 }
1499
1500 static int
1501 kiblnd_fmr_pool_size(struct lnet_ioctl_config_o2iblnd_tunables *tunables,
1502                      int ncpts)
1503 {
1504         int size = tunables->lnd_fmr_pool_size / ncpts;
1505
1506         return max(IBLND_FMR_POOL, size);
1507 }
1508
1509 static int
1510 kiblnd_fmr_flush_trigger(struct lnet_ioctl_config_o2iblnd_tunables *tunables,
1511                          int ncpts)
1512 {
1513         int size = tunables->lnd_fmr_flush_trigger / ncpts;
1514
1515         return max(IBLND_FMR_POOL_FLUSH, size);
1516 }
1517
1518 static int kiblnd_alloc_fmr_pool(struct kib_fmr_poolset *fps,
1519                                  struct kib_fmr_pool *fpo)
1520 {
1521         struct ib_fmr_pool_param param = {
1522                 .max_pages_per_fmr = LNET_MAX_IOV,
1523                 .page_shift        = PAGE_SHIFT,
1524                 .access            = (IB_ACCESS_LOCAL_WRITE |
1525                                       IB_ACCESS_REMOTE_WRITE),
1526                 .pool_size         = fps->fps_pool_size,
1527                 .dirty_watermark   = fps->fps_flush_trigger,
1528                 .flush_function    = NULL,
1529                 .flush_arg         = NULL,
1530                 .cache             = !!fps->fps_cache };
1531         int rc = 0;
1532
1533         fpo->fmr.fpo_fmr_pool = ib_create_fmr_pool(fpo->fpo_hdev->ibh_pd,
1534                                                    &param);
1535         if (IS_ERR(fpo->fmr.fpo_fmr_pool)) {
1536                 rc = PTR_ERR(fpo->fmr.fpo_fmr_pool);
1537                 if (rc != -ENOSYS)
1538                         CERROR("Failed to create FMR pool: %d\n", rc);
1539                 else
1540                         CERROR("FMRs are not supported\n");
1541         }
1542         fpo->fpo_is_fmr = true;
1543
1544         return rc;
1545 }
1546
1547 static int kiblnd_alloc_freg_pool(struct kib_fmr_poolset *fps,
1548                                   struct kib_fmr_pool *fpo,
1549                                   enum kib_dev_caps dev_caps)
1550 {
1551         struct kib_fast_reg_descriptor *frd, *tmp;
1552         int i, rc;
1553
1554         fpo->fpo_is_fmr = false;
1555
1556         INIT_LIST_HEAD(&fpo->fast_reg.fpo_pool_list);
1557         fpo->fast_reg.fpo_pool_size = 0;
1558         for (i = 0; i < fps->fps_pool_size; i++) {
1559                 LIBCFS_CPT_ALLOC(frd, lnet_cpt_table(), fps->fps_cpt,
1560                                  sizeof(*frd));
1561                 if (!frd) {
1562                         CERROR("Failed to allocate a new fast_reg descriptor\n");
1563                         rc = -ENOMEM;
1564                         goto out;
1565                 }
1566                 frd->frd_mr = NULL;
1567
1568 #ifndef HAVE_IB_MAP_MR_SG
1569                 frd->frd_frpl = ib_alloc_fast_reg_page_list(fpo->fpo_hdev->ibh_ibdev,
1570                                                             LNET_MAX_IOV);
1571                 if (IS_ERR(frd->frd_frpl)) {
1572                         rc = PTR_ERR(frd->frd_frpl);
1573                         CERROR("Failed to allocate ib_fast_reg_page_list: %d\n",
1574                                 rc);
1575                         frd->frd_frpl = NULL;
1576                         goto out_middle;
1577                 }
1578 #endif
1579
1580 #ifdef HAVE_IB_ALLOC_FAST_REG_MR
1581                 frd->frd_mr = ib_alloc_fast_reg_mr(fpo->fpo_hdev->ibh_pd,
1582                                                    LNET_MAX_IOV);
1583 #else
1584                 /*
1585                  * it is expected to get here if this is an MLX-5 card.
1586                  * MLX-4 cards will always use FMR and MLX-5 cards will
1587                  * always use fast_reg. It turns out that some MLX-5 cards
1588                  * (possibly due to older FW versions) do not natively support
1589                  * gaps. So we will need to track them here.
1590                  */
1591                 frd->frd_mr = ib_alloc_mr(fpo->fpo_hdev->ibh_pd,
1592 #ifdef IB_MR_TYPE_SG_GAPS
1593                                           ((*kiblnd_tunables.kib_use_fastreg_gaps == 1) &&
1594                                            (dev_caps & IBLND_DEV_CAPS_FASTREG_GAPS_SUPPORT)) ?
1595                                                 IB_MR_TYPE_SG_GAPS :
1596                                                 IB_MR_TYPE_MEM_REG,
1597 #else
1598                                                 IB_MR_TYPE_MEM_REG,
1599 #endif
1600                                           LNET_MAX_IOV);
1601                 if ((*kiblnd_tunables.kib_use_fastreg_gaps == 1) &&
1602                     (dev_caps & IBLND_DEV_CAPS_FASTREG_GAPS_SUPPORT))
1603                         CWARN("using IB_MR_TYPE_SG_GAPS, expect a performance drop\n");
1604 #endif
1605                 if (IS_ERR(frd->frd_mr)) {
1606                         rc = PTR_ERR(frd->frd_mr);
1607                         CERROR("Failed to allocate ib_fast_reg_mr: %d\n", rc);
1608                         frd->frd_mr = NULL;
1609                         goto out_middle;
1610                 }
1611
1612                 /* There appears to be a bug in MLX5 code where you must
1613                  * invalidate the rkey of a new FastReg pool before first
1614                  * using it. Thus, I am marking the FRD invalid here. */
1615                 frd->frd_valid = false;
1616
1617                 list_add_tail(&frd->frd_list, &fpo->fast_reg.fpo_pool_list);
1618                 fpo->fast_reg.fpo_pool_size++;
1619         }
1620
1621         return 0;
1622
1623 out_middle:
1624         if (frd->frd_mr)
1625                 ib_dereg_mr(frd->frd_mr);
1626 #ifndef HAVE_IB_MAP_MR_SG
1627         if (frd->frd_frpl)
1628                 ib_free_fast_reg_page_list(frd->frd_frpl);
1629 #endif
1630         LIBCFS_FREE(frd, sizeof(*frd));
1631
1632 out:
1633         list_for_each_entry_safe(frd, tmp, &fpo->fast_reg.fpo_pool_list,
1634                                  frd_list) {
1635                 list_del(&frd->frd_list);
1636 #ifndef HAVE_IB_MAP_MR_SG
1637                 ib_free_fast_reg_page_list(frd->frd_frpl);
1638 #endif
1639                 ib_dereg_mr(frd->frd_mr);
1640                 LIBCFS_FREE(frd, sizeof(*frd));
1641         }
1642
1643         return rc;
1644 }
1645
1646 static int kiblnd_create_fmr_pool(struct kib_fmr_poolset *fps,
1647                                   struct kib_fmr_pool **pp_fpo)
1648 {
1649         struct kib_dev *dev = fps->fps_net->ibn_dev;
1650         struct kib_fmr_pool *fpo;
1651         int rc;
1652
1653         LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo));
1654         if (!fpo) {
1655                 return -ENOMEM;
1656         }
1657         memset(fpo, 0, sizeof(*fpo));
1658
1659         fpo->fpo_hdev = kiblnd_current_hdev(dev);
1660
1661         if (dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED)
1662                 rc = kiblnd_alloc_fmr_pool(fps, fpo);
1663         else
1664                 rc = kiblnd_alloc_freg_pool(fps, fpo, dev->ibd_dev_caps);
1665         if (rc)
1666                 goto out_fpo;
1667
1668         fpo->fpo_deadline = ktime_get_seconds() + IBLND_POOL_DEADLINE;
1669         fpo->fpo_owner = fps;
1670         *pp_fpo = fpo;
1671
1672         return 0;
1673
1674 out_fpo:
1675         kiblnd_hdev_decref(fpo->fpo_hdev);
1676         LIBCFS_FREE(fpo, sizeof(*fpo));
1677         return rc;
1678 }
1679
1680 static void
1681 kiblnd_fail_fmr_poolset(struct kib_fmr_poolset *fps, struct list_head *zombies)
1682 {
1683         if (fps->fps_net == NULL) /* intialized? */
1684                 return;
1685
1686         spin_lock(&fps->fps_lock);
1687
1688         while (!list_empty(&fps->fps_pool_list)) {
1689                 struct kib_fmr_pool *fpo = list_entry(fps->fps_pool_list.next,
1690                                                       struct kib_fmr_pool,
1691                                                       fpo_list);
1692
1693                 fpo->fpo_failed = 1;
1694                 list_del(&fpo->fpo_list);
1695                 if (fpo->fpo_map_count == 0)
1696                         list_add(&fpo->fpo_list, zombies);
1697                 else
1698                         list_add(&fpo->fpo_list, &fps->fps_failed_pool_list);
1699         }
1700
1701         spin_unlock(&fps->fps_lock);
1702 }
1703
1704 static void
1705 kiblnd_fini_fmr_poolset(struct kib_fmr_poolset *fps)
1706 {
1707         if (fps->fps_net != NULL) { /* initialized? */
1708                 kiblnd_destroy_fmr_pool_list(&fps->fps_failed_pool_list);
1709                 kiblnd_destroy_fmr_pool_list(&fps->fps_pool_list);
1710         }
1711 }
1712
1713 static int
1714 kiblnd_init_fmr_poolset(struct kib_fmr_poolset *fps, int cpt, int ncpts,
1715                         struct kib_net *net,
1716                         struct lnet_ioctl_config_o2iblnd_tunables *tunables)
1717 {
1718         struct kib_fmr_pool *fpo;
1719         int rc;
1720
1721         memset(fps, 0, sizeof(struct kib_fmr_poolset));
1722
1723         fps->fps_net = net;
1724         fps->fps_cpt = cpt;
1725
1726         fps->fps_pool_size = kiblnd_fmr_pool_size(tunables, ncpts);
1727         fps->fps_flush_trigger = kiblnd_fmr_flush_trigger(tunables, ncpts);
1728         fps->fps_cache = tunables->lnd_fmr_cache;
1729
1730         spin_lock_init(&fps->fps_lock);
1731         INIT_LIST_HEAD(&fps->fps_pool_list);
1732         INIT_LIST_HEAD(&fps->fps_failed_pool_list);
1733
1734         rc = kiblnd_create_fmr_pool(fps, &fpo);
1735         if (rc == 0)
1736                 list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
1737
1738         return rc;
1739 }
1740
1741 static int
1742 kiblnd_fmr_pool_is_idle(struct kib_fmr_pool *fpo, time64_t now)
1743 {
1744         if (fpo->fpo_map_count != 0) /* still in use */
1745                 return 0;
1746         if (fpo->fpo_failed)
1747                 return 1;
1748         return now >= fpo->fpo_deadline;
1749 }
1750
1751 static int
1752 kiblnd_map_tx_pages(struct kib_tx *tx, struct kib_rdma_desc *rd)
1753 {
1754         struct kib_hca_dev *hdev;
1755         __u64           *pages = tx->tx_pages;
1756         int             npages;
1757         int             size;
1758         int             i;
1759
1760         hdev = tx->tx_pool->tpo_hdev;
1761
1762         for (i = 0, npages = 0; i < rd->rd_nfrags; i++) {
1763                 for (size = 0; size <  rd->rd_frags[i].rf_nob;
1764                         size += hdev->ibh_page_size) {
1765                         pages[npages++] = (rd->rd_frags[i].rf_addr &
1766                                            hdev->ibh_page_mask) + size;
1767                 }
1768         }
1769
1770         return npages;
1771 }
1772
1773 void
1774 kiblnd_fmr_pool_unmap(struct kib_fmr *fmr, int status)
1775 {
1776         struct list_head zombies = LIST_HEAD_INIT(zombies);
1777         struct kib_fmr_pool *fpo = fmr->fmr_pool;
1778         struct kib_fmr_poolset *fps;
1779         time64_t now = ktime_get_seconds();
1780         struct kib_fmr_pool *tmp;
1781         int rc;
1782
1783         if (!fpo)
1784                 return;
1785
1786         fps = fpo->fpo_owner;
1787         if (fpo->fpo_is_fmr) {
1788                 if (fmr->fmr_pfmr) {
1789                         rc = ib_fmr_pool_unmap(fmr->fmr_pfmr);
1790                         LASSERT(!rc);
1791                         fmr->fmr_pfmr = NULL;
1792                 }
1793
1794                 if (status) {
1795                         rc = ib_flush_fmr_pool(fpo->fmr.fpo_fmr_pool);
1796                         LASSERT(!rc);
1797                 }
1798         } else {
1799                 struct kib_fast_reg_descriptor *frd = fmr->fmr_frd;
1800
1801                 if (frd) {
1802                         frd->frd_valid = false;
1803                         spin_lock(&fps->fps_lock);
1804                         list_add_tail(&frd->frd_list, &fpo->fast_reg.fpo_pool_list);
1805                         spin_unlock(&fps->fps_lock);
1806                         fmr->fmr_frd = NULL;
1807                 }
1808         }
1809         fmr->fmr_pool = NULL;
1810
1811         spin_lock(&fps->fps_lock);
1812         fpo->fpo_map_count--;   /* decref the pool */
1813
1814         list_for_each_entry_safe(fpo, tmp, &fps->fps_pool_list, fpo_list) {
1815                 /* the first pool is persistent */
1816                 if (fps->fps_pool_list.next == &fpo->fpo_list)
1817                         continue;
1818
1819                 if (kiblnd_fmr_pool_is_idle(fpo, now)) {
1820                         list_move(&fpo->fpo_list, &zombies);
1821                         fps->fps_version++;
1822                 }
1823         }
1824         spin_unlock(&fps->fps_lock);
1825
1826         if (!list_empty(&zombies))
1827                 kiblnd_destroy_fmr_pool_list(&zombies);
1828 }
1829
1830 int kiblnd_fmr_pool_map(struct kib_fmr_poolset *fps, struct kib_tx *tx,
1831                         struct kib_rdma_desc *rd, u32 nob, u64 iov,
1832                         struct kib_fmr *fmr)
1833 {
1834         struct kib_fmr_pool *fpo;
1835         __u64 *pages = tx->tx_pages;
1836         __u64 version;
1837         bool is_rx = (rd != tx->tx_rd);
1838         bool tx_pages_mapped = 0;
1839         int npages = 0;
1840         int rc;
1841
1842 again:
1843         spin_lock(&fps->fps_lock);
1844         version = fps->fps_version;
1845         list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) {
1846                 fpo->fpo_deadline = ktime_get_seconds() + IBLND_POOL_DEADLINE;
1847                 fpo->fpo_map_count++;
1848
1849                 if (fpo->fpo_is_fmr) {
1850                         struct ib_pool_fmr *pfmr;
1851
1852                         spin_unlock(&fps->fps_lock);
1853
1854                         if (!tx_pages_mapped) {
1855                                 npages = kiblnd_map_tx_pages(tx, rd);
1856                                 tx_pages_mapped = 1;
1857                         }
1858
1859                         pfmr = ib_fmr_pool_map_phys(fpo->fmr.fpo_fmr_pool,
1860                                                     pages, npages, iov);
1861                         if (likely(!IS_ERR(pfmr))) {
1862                                 fmr->fmr_key  = is_rx ? pfmr->fmr->rkey
1863                                                       : pfmr->fmr->lkey;
1864                                 fmr->fmr_frd  = NULL;
1865                                 fmr->fmr_pfmr = pfmr;
1866                                 fmr->fmr_pool = fpo;
1867                                 return 0;
1868                         }
1869                         rc = PTR_ERR(pfmr);
1870                 } else {
1871                         if (!list_empty(&fpo->fast_reg.fpo_pool_list)) {
1872                                 struct kib_fast_reg_descriptor *frd;
1873 #ifdef HAVE_IB_MAP_MR_SG
1874                                 struct ib_reg_wr *wr;
1875                                 int n;
1876 #else
1877                                 struct ib_rdma_wr *wr;
1878                                 struct ib_fast_reg_page_list *frpl;
1879 #endif
1880                                 struct ib_mr *mr;
1881
1882                                 frd = list_first_entry(&fpo->fast_reg.fpo_pool_list,
1883                                                         struct kib_fast_reg_descriptor,
1884                                                         frd_list);
1885                                 list_del(&frd->frd_list);
1886                                 spin_unlock(&fps->fps_lock);
1887
1888 #ifndef HAVE_IB_MAP_MR_SG
1889                                 frpl = frd->frd_frpl;
1890 #endif
1891                                 mr   = frd->frd_mr;
1892
1893                                 if (!frd->frd_valid) {
1894                                         struct ib_rdma_wr *inv_wr;
1895                                         __u32 key = is_rx ? mr->rkey : mr->lkey;
1896
1897                                         inv_wr = &frd->frd_inv_wr;
1898                                         memset(inv_wr, 0, sizeof(*inv_wr));
1899
1900                                         inv_wr->wr.opcode = IB_WR_LOCAL_INV;
1901                                         inv_wr->wr.wr_id  = IBLND_WID_MR;
1902                                         inv_wr->wr.ex.invalidate_rkey = key;
1903
1904                                         /* Bump the key */
1905                                         key = ib_inc_rkey(key);
1906                                         ib_update_fast_reg_key(mr, key);
1907                                 }
1908
1909 #ifdef HAVE_IB_MAP_MR_SG
1910 #ifdef HAVE_IB_MAP_MR_SG_5ARGS
1911                                 n = ib_map_mr_sg(mr, tx->tx_frags,
1912                                                  tx->tx_nfrags, NULL, PAGE_SIZE);
1913 #else
1914                                 n = ib_map_mr_sg(mr, tx->tx_frags,
1915                                                  tx->tx_nfrags, PAGE_SIZE);
1916 #endif
1917                                 if (unlikely(n != tx->tx_nfrags)) {
1918                                         CERROR("Failed to map mr %d/%d "
1919                                                "elements\n", n, tx->tx_nfrags);
1920                                         return n < 0 ? n : -EINVAL;
1921                                 }
1922
1923                                 wr = &frd->frd_fastreg_wr;
1924                                 memset(wr, 0, sizeof(*wr));
1925
1926                                 wr->wr.opcode = IB_WR_REG_MR;
1927                                 wr->wr.wr_id  = IBLND_WID_MR;
1928                                 wr->wr.num_sge = 0;
1929                                 wr->wr.send_flags = 0;
1930                                 wr->mr = mr;
1931                                 wr->key = is_rx ? mr->rkey : mr->lkey;
1932                                 wr->access = (IB_ACCESS_LOCAL_WRITE |
1933                                               IB_ACCESS_REMOTE_WRITE);
1934 #else
1935                                 if (!tx_pages_mapped) {
1936                                         npages = kiblnd_map_tx_pages(tx, rd);
1937                                         tx_pages_mapped = 1;
1938                                 }
1939
1940                                 LASSERT(npages <= frpl->max_page_list_len);
1941                                 memcpy(frpl->page_list, pages,
1942                                         sizeof(*pages) * npages);
1943
1944                                 /* Prepare FastReg WR */
1945                                 wr = &frd->frd_fastreg_wr;
1946                                 memset(wr, 0, sizeof(*wr));
1947
1948                                 wr->wr.opcode = IB_WR_FAST_REG_MR;
1949                                 wr->wr.wr_id  = IBLND_WID_MR;
1950
1951                                 wr->wr.wr.fast_reg.iova_start = iov;
1952                                 wr->wr.wr.fast_reg.page_list  = frpl;
1953                                 wr->wr.wr.fast_reg.page_list_len = npages;
1954                                 wr->wr.wr.fast_reg.page_shift = PAGE_SHIFT;
1955                                 wr->wr.wr.fast_reg.length = nob;
1956                                 wr->wr.wr.fast_reg.rkey =
1957                                                 is_rx ? mr->rkey : mr->lkey;
1958                                 wr->wr.wr.fast_reg.access_flags =
1959                                                 (IB_ACCESS_LOCAL_WRITE |
1960                                                  IB_ACCESS_REMOTE_WRITE);
1961 #endif
1962
1963                                 fmr->fmr_key  = is_rx ? mr->rkey : mr->lkey;
1964                                 fmr->fmr_frd  = frd;
1965                                 fmr->fmr_pfmr = NULL;
1966                                 fmr->fmr_pool = fpo;
1967                                 return 0;
1968                         }
1969                         spin_unlock(&fps->fps_lock);
1970                         rc = -EAGAIN;
1971                 }
1972
1973                 spin_lock(&fps->fps_lock);
1974                 fpo->fpo_map_count--;
1975                 if (rc != -EAGAIN) {
1976                         spin_unlock(&fps->fps_lock);
1977                         return rc;
1978                 }
1979
1980                 /* EAGAIN and ... */
1981                 if (version != fps->fps_version) {
1982                         spin_unlock(&fps->fps_lock);
1983                         goto again;
1984                 }
1985         }
1986
1987         if (fps->fps_increasing) {
1988                 spin_unlock(&fps->fps_lock);
1989                 CDEBUG(D_NET, "Another thread is allocating new "
1990                        "FMR pool, waiting for her to complete\n");
1991                 schedule();
1992                 goto again;
1993
1994         }
1995
1996         if (ktime_get_seconds() < fps->fps_next_retry) {
1997                 /* someone failed recently */
1998                 spin_unlock(&fps->fps_lock);
1999                 return -EAGAIN;
2000         }
2001
2002         fps->fps_increasing = 1;
2003         spin_unlock(&fps->fps_lock);
2004
2005         CDEBUG(D_NET, "Allocate new FMR pool\n");
2006         rc = kiblnd_create_fmr_pool(fps, &fpo);
2007         spin_lock(&fps->fps_lock);
2008         fps->fps_increasing = 0;
2009         if (rc == 0) {
2010                 fps->fps_version++;
2011                 list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
2012         } else {
2013                 fps->fps_next_retry = ktime_get_seconds() + IBLND_POOL_RETRY;
2014         }
2015         spin_unlock(&fps->fps_lock);
2016
2017         goto again;
2018 }
2019
2020 static void
2021 kiblnd_fini_pool(struct kib_pool *pool)
2022 {
2023         LASSERT(list_empty(&pool->po_free_list));
2024         LASSERT(pool->po_allocated == 0);
2025
2026         CDEBUG(D_NET, "Finalize %s pool\n", pool->po_owner->ps_name);
2027 }
2028
2029 static void
2030 kiblnd_init_pool(struct kib_poolset *ps, struct kib_pool *pool, int size)
2031 {
2032         CDEBUG(D_NET, "Initialize %s pool\n", ps->ps_name);
2033
2034         memset(pool, 0, sizeof(struct kib_pool));
2035         INIT_LIST_HEAD(&pool->po_free_list);
2036         pool->po_deadline = ktime_get_seconds() + IBLND_POOL_DEADLINE;
2037         pool->po_owner = ps;
2038         pool->po_size = size;
2039 }
2040
2041 static void
2042 kiblnd_destroy_pool_list(struct list_head *head)
2043 {
2044         struct kib_pool *pool;
2045
2046         while (!list_empty(head)) {
2047                 pool = list_entry(head->next, struct kib_pool, po_list);
2048                 list_del(&pool->po_list);
2049
2050                 LASSERT(pool->po_owner != NULL);
2051                 pool->po_owner->ps_pool_destroy(pool);
2052         }
2053 }
2054
2055 static void
2056 kiblnd_fail_poolset(struct kib_poolset *ps, struct list_head *zombies)
2057 {
2058         if (ps->ps_net == NULL) /* intialized? */
2059                 return;
2060
2061         spin_lock(&ps->ps_lock);
2062         while (!list_empty(&ps->ps_pool_list)) {
2063                 struct kib_pool *po = list_entry(ps->ps_pool_list.next,
2064                                                  struct kib_pool, po_list);
2065
2066                 po->po_failed = 1;
2067                 list_del(&po->po_list);
2068                 if (po->po_allocated == 0)
2069                         list_add(&po->po_list, zombies);
2070                 else
2071                         list_add(&po->po_list, &ps->ps_failed_pool_list);
2072         }
2073         spin_unlock(&ps->ps_lock);
2074 }
2075
2076 static void
2077 kiblnd_fini_poolset(struct kib_poolset *ps)
2078 {
2079         if (ps->ps_net != NULL) { /* initialized? */
2080                 kiblnd_destroy_pool_list(&ps->ps_failed_pool_list);
2081                 kiblnd_destroy_pool_list(&ps->ps_pool_list);
2082         }
2083 }
2084
2085 static int
2086 kiblnd_init_poolset(struct kib_poolset *ps, int cpt,
2087                     struct kib_net *net, char *name, int size,
2088                     kib_ps_pool_create_t po_create,
2089                     kib_ps_pool_destroy_t po_destroy,
2090                     kib_ps_node_init_t nd_init,
2091                     kib_ps_node_fini_t nd_fini)
2092 {
2093         struct kib_pool *pool;
2094         int rc;
2095
2096         memset(ps, 0, sizeof(struct kib_poolset));
2097
2098         ps->ps_cpt          = cpt;
2099         ps->ps_net          = net;
2100         ps->ps_pool_create  = po_create;
2101         ps->ps_pool_destroy = po_destroy;
2102         ps->ps_node_init    = nd_init;
2103         ps->ps_node_fini    = nd_fini;
2104         ps->ps_pool_size    = size;
2105         if (strlcpy(ps->ps_name, name, sizeof(ps->ps_name))
2106             >= sizeof(ps->ps_name))
2107                 return -E2BIG;
2108         spin_lock_init(&ps->ps_lock);
2109         INIT_LIST_HEAD(&ps->ps_pool_list);
2110         INIT_LIST_HEAD(&ps->ps_failed_pool_list);
2111
2112         rc = ps->ps_pool_create(ps, size, &pool);
2113         if (rc == 0)
2114                 list_add(&pool->po_list, &ps->ps_pool_list);
2115         else
2116                 CERROR("Failed to create the first pool for %s\n", ps->ps_name);
2117
2118         return rc;
2119 }
2120
2121 static int
2122 kiblnd_pool_is_idle(struct kib_pool *pool, time64_t now)
2123 {
2124         if (pool->po_allocated != 0) /* still in use */
2125                 return 0;
2126         if (pool->po_failed)
2127                 return 1;
2128         return now >= pool->po_deadline;
2129 }
2130
2131 void
2132 kiblnd_pool_free_node(struct kib_pool *pool, struct list_head *node)
2133 {
2134         struct list_head zombies = LIST_HEAD_INIT(zombies);
2135         struct kib_poolset *ps = pool->po_owner;
2136         struct kib_pool *tmp;
2137         time64_t now = ktime_get_seconds();
2138
2139         spin_lock(&ps->ps_lock);
2140
2141         if (ps->ps_node_fini != NULL)
2142                 ps->ps_node_fini(pool, node);
2143
2144         LASSERT(pool->po_allocated > 0);
2145         list_add(node, &pool->po_free_list);
2146         pool->po_allocated--;
2147
2148         list_for_each_entry_safe(pool, tmp, &ps->ps_pool_list, po_list) {
2149                 /* the first pool is persistent */
2150                 if (ps->ps_pool_list.next == &pool->po_list)
2151                         continue;
2152
2153                 if (kiblnd_pool_is_idle(pool, now))
2154                         list_move(&pool->po_list, &zombies);
2155         }
2156         spin_unlock(&ps->ps_lock);
2157
2158         if (!list_empty(&zombies))
2159                 kiblnd_destroy_pool_list(&zombies);
2160 }
2161
2162 struct list_head *
2163 kiblnd_pool_alloc_node(struct kib_poolset *ps)
2164 {
2165         struct list_head        *node;
2166         struct kib_pool *pool;
2167         int                     rc;
2168         unsigned int            interval = 1;
2169         ktime_t time_before;
2170         unsigned int trips = 0;
2171
2172 again:
2173         spin_lock(&ps->ps_lock);
2174         list_for_each_entry(pool, &ps->ps_pool_list, po_list) {
2175                 if (list_empty(&pool->po_free_list))
2176                         continue;
2177
2178                 pool->po_allocated++;
2179                 pool->po_deadline = ktime_get_seconds() +
2180                                     IBLND_POOL_DEADLINE;
2181                 node = pool->po_free_list.next;
2182                 list_del(node);
2183
2184                 if (ps->ps_node_init != NULL) {
2185                         /* still hold the lock */
2186                         ps->ps_node_init(pool, node);
2187                 }
2188                 spin_unlock(&ps->ps_lock);
2189                 return node;
2190         }
2191
2192         /* no available tx pool and ... */
2193         if (ps->ps_increasing) {
2194                 /* another thread is allocating a new pool */
2195                 spin_unlock(&ps->ps_lock);
2196                 trips++;
2197                 CDEBUG(D_NET, "Another thread is allocating new "
2198                        "%s pool, waiting %d HZs for her to complete."
2199                        "trips = %d\n",
2200                        ps->ps_name, interval, trips);
2201
2202                 set_current_state(TASK_INTERRUPTIBLE);
2203                 schedule_timeout(interval);
2204                 if (interval < cfs_time_seconds(1))
2205                         interval *= 2;
2206
2207                 goto again;
2208         }
2209
2210         if (ktime_get_seconds() < ps->ps_next_retry) {
2211                 /* someone failed recently */
2212                 spin_unlock(&ps->ps_lock);
2213                 return NULL;
2214         }
2215
2216         ps->ps_increasing = 1;
2217         spin_unlock(&ps->ps_lock);
2218
2219         CDEBUG(D_NET, "%s pool exhausted, allocate new pool\n", ps->ps_name);
2220         time_before = ktime_get();
2221         rc = ps->ps_pool_create(ps, ps->ps_pool_size, &pool);
2222         CDEBUG(D_NET, "ps_pool_create took %lld ms to complete",
2223                ktime_ms_delta(ktime_get(), time_before));
2224
2225         spin_lock(&ps->ps_lock);
2226         ps->ps_increasing = 0;
2227         if (rc == 0) {
2228                 list_add_tail(&pool->po_list, &ps->ps_pool_list);
2229         } else {
2230                 ps->ps_next_retry = ktime_get_seconds() + IBLND_POOL_RETRY;
2231                 CERROR("Can't allocate new %s pool because out of memory\n",
2232                        ps->ps_name);
2233         }
2234         spin_unlock(&ps->ps_lock);
2235
2236         goto again;
2237 }
2238
2239 static void
2240 kiblnd_destroy_tx_pool(struct kib_pool *pool)
2241 {
2242         struct kib_tx_pool *tpo = container_of(pool, struct kib_tx_pool,
2243                                                tpo_pool);
2244         int i;
2245
2246         LASSERT (pool->po_allocated == 0);
2247
2248         if (tpo->tpo_tx_pages != NULL) {
2249                 kiblnd_unmap_tx_pool(tpo);
2250                 kiblnd_free_pages(tpo->tpo_tx_pages);
2251         }
2252
2253         if (tpo->tpo_tx_descs == NULL)
2254                 goto out;
2255
2256         for (i = 0; i < pool->po_size; i++) {
2257                 struct kib_tx *tx = &tpo->tpo_tx_descs[i];
2258                 int       wrq_sge = *kiblnd_tunables.kib_wrq_sge;
2259
2260                 list_del(&tx->tx_list);
2261                 if (tx->tx_pages != NULL)
2262                         LIBCFS_FREE(tx->tx_pages,
2263                                     LNET_MAX_IOV *
2264                                     sizeof(*tx->tx_pages));
2265                 if (tx->tx_frags != NULL)
2266                         LIBCFS_FREE(tx->tx_frags,
2267                                     (1 + IBLND_MAX_RDMA_FRAGS) *
2268                                     sizeof(*tx->tx_frags));
2269                 if (tx->tx_wrq != NULL)
2270                         LIBCFS_FREE(tx->tx_wrq,
2271                                     (1 + IBLND_MAX_RDMA_FRAGS) *
2272                                     sizeof(*tx->tx_wrq));
2273                 if (tx->tx_sge != NULL)
2274                         LIBCFS_FREE(tx->tx_sge,
2275                                     (1 + IBLND_MAX_RDMA_FRAGS) * wrq_sge *
2276                                     sizeof(*tx->tx_sge));
2277                 if (tx->tx_rd != NULL)
2278                         LIBCFS_FREE(tx->tx_rd,
2279                                     offsetof(struct kib_rdma_desc,
2280                                              rd_frags[IBLND_MAX_RDMA_FRAGS]));
2281         }
2282
2283         LIBCFS_FREE(tpo->tpo_tx_descs,
2284                     pool->po_size * sizeof(struct kib_tx));
2285 out:
2286         kiblnd_fini_pool(pool);
2287         LIBCFS_FREE(tpo, sizeof(struct kib_tx_pool));
2288 }
2289
2290 static int kiblnd_tx_pool_size(struct lnet_ni *ni, int ncpts)
2291 {
2292         struct lnet_ioctl_config_o2iblnd_tunables *tunables;
2293         int ntx;
2294
2295         tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
2296         ntx = tunables->lnd_ntx / ncpts;
2297
2298         return max(IBLND_TX_POOL, ntx);
2299 }
2300
2301 static int
2302 kiblnd_create_tx_pool(struct kib_poolset *ps, int size, struct kib_pool **pp_po)
2303 {
2304         int            i;
2305         int            npg;
2306         struct kib_pool *pool;
2307         struct kib_tx_pool *tpo;
2308
2309         LIBCFS_CPT_ALLOC(tpo, lnet_cpt_table(), ps->ps_cpt, sizeof(*tpo));
2310         if (tpo == NULL) {
2311                 CERROR("Failed to allocate TX pool\n");
2312                 return -ENOMEM;
2313         }
2314
2315         pool = &tpo->tpo_pool;
2316         kiblnd_init_pool(ps, pool, size);
2317         tpo->tpo_tx_descs = NULL;
2318         tpo->tpo_tx_pages = NULL;
2319
2320         npg = (size * IBLND_MSG_SIZE + PAGE_SIZE - 1) / PAGE_SIZE;
2321         if (kiblnd_alloc_pages(&tpo->tpo_tx_pages, ps->ps_cpt, npg) != 0) {
2322                 CERROR("Can't allocate tx pages: %d\n", npg);
2323                 LIBCFS_FREE(tpo, sizeof(struct kib_tx_pool));
2324                 return -ENOMEM;
2325         }
2326
2327         LIBCFS_CPT_ALLOC(tpo->tpo_tx_descs, lnet_cpt_table(), ps->ps_cpt,
2328                          size * sizeof(struct kib_tx));
2329         if (tpo->tpo_tx_descs == NULL) {
2330                 CERROR("Can't allocate %d tx descriptors\n", size);
2331                 ps->ps_pool_destroy(pool);
2332                 return -ENOMEM;
2333         }
2334
2335         memset(tpo->tpo_tx_descs, 0, size * sizeof(struct kib_tx));
2336
2337         for (i = 0; i < size; i++) {
2338                 struct kib_tx *tx = &tpo->tpo_tx_descs[i];
2339                 int       wrq_sge = *kiblnd_tunables.kib_wrq_sge;
2340
2341                 tx->tx_pool = tpo;
2342                 if (ps->ps_net->ibn_fmr_ps != NULL) {
2343                         LIBCFS_CPT_ALLOC(tx->tx_pages,
2344                                          lnet_cpt_table(), ps->ps_cpt,
2345                                          LNET_MAX_IOV * sizeof(*tx->tx_pages));
2346                         if (tx->tx_pages == NULL)
2347                                 break;
2348                 }
2349
2350                 LIBCFS_CPT_ALLOC(tx->tx_frags, lnet_cpt_table(), ps->ps_cpt,
2351                                  (1 + IBLND_MAX_RDMA_FRAGS) *
2352                                  sizeof(*tx->tx_frags));
2353                 if (tx->tx_frags == NULL)
2354                         break;
2355
2356                 sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS + 1);
2357
2358                 LIBCFS_CPT_ALLOC(tx->tx_wrq, lnet_cpt_table(), ps->ps_cpt,
2359                                  (1 + IBLND_MAX_RDMA_FRAGS) *
2360                                  sizeof(*tx->tx_wrq));
2361                 if (tx->tx_wrq == NULL)
2362                         break;
2363
2364                 LIBCFS_CPT_ALLOC(tx->tx_sge, lnet_cpt_table(), ps->ps_cpt,
2365                                  (1 + IBLND_MAX_RDMA_FRAGS) * wrq_sge *
2366                                  sizeof(*tx->tx_sge));
2367                 if (tx->tx_sge == NULL)
2368                         break;
2369
2370                 LIBCFS_CPT_ALLOC(tx->tx_rd, lnet_cpt_table(), ps->ps_cpt,
2371                                  offsetof(struct kib_rdma_desc,
2372                                           rd_frags[IBLND_MAX_RDMA_FRAGS]));
2373                 if (tx->tx_rd == NULL)
2374                         break;
2375         }
2376
2377         if (i == size) {
2378                 kiblnd_map_tx_pool(tpo);
2379                 *pp_po = pool;
2380                 return 0;
2381         }
2382
2383         ps->ps_pool_destroy(pool);
2384         return -ENOMEM;
2385 }
2386
2387 static void
2388 kiblnd_tx_init(struct kib_pool *pool, struct list_head *node)
2389 {
2390         struct kib_tx_poolset *tps = container_of(pool->po_owner,
2391                                                   struct kib_tx_poolset,
2392                                                   tps_poolset);
2393         struct kib_tx *tx  = list_entry(node, struct kib_tx, tx_list);
2394
2395         tx->tx_cookie = tps->tps_next_tx_cookie++;
2396 }
2397
2398 static void
2399 kiblnd_net_fini_pools(struct kib_net *net)
2400 {
2401         int     i;
2402
2403         cfs_cpt_for_each(i, lnet_cpt_table()) {
2404                 struct kib_tx_poolset *tps;
2405                 struct kib_fmr_poolset *fps;
2406
2407                 if (net->ibn_tx_ps != NULL) {
2408                         tps = net->ibn_tx_ps[i];
2409                         kiblnd_fini_poolset(&tps->tps_poolset);
2410                 }
2411
2412                 if (net->ibn_fmr_ps != NULL) {
2413                         fps = net->ibn_fmr_ps[i];
2414                         kiblnd_fini_fmr_poolset(fps);
2415                 }
2416         }
2417
2418         if (net->ibn_tx_ps != NULL) {
2419                 cfs_percpt_free(net->ibn_tx_ps);
2420                 net->ibn_tx_ps = NULL;
2421         }
2422
2423         if (net->ibn_fmr_ps != NULL) {
2424                 cfs_percpt_free(net->ibn_fmr_ps);
2425                 net->ibn_fmr_ps = NULL;
2426         }
2427 }
2428
2429 static int
2430 kiblnd_net_init_pools(struct kib_net *net, struct lnet_ni *ni, __u32 *cpts,
2431                       int ncpts)
2432 {
2433         struct lnet_ioctl_config_o2iblnd_tunables *tunables;
2434 #ifdef HAVE_IB_GET_DMA_MR
2435         unsigned long   flags;
2436 #endif
2437         int             cpt;
2438         int             rc;
2439         int             i;
2440
2441         tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
2442
2443 #ifdef HAVE_IB_GET_DMA_MR
2444         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2445         /*
2446          * if lnd_map_on_demand is zero then we have effectively disabled
2447          * FMR or FastReg and we're using global memory regions
2448          * exclusively.
2449          */
2450         if (!tunables->lnd_map_on_demand) {
2451                 read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
2452                                            flags);
2453                 goto create_tx_pool;
2454         }
2455
2456         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2457 #endif
2458
2459         if (tunables->lnd_fmr_pool_size < tunables->lnd_ntx / 4) {
2460                 CERROR("Can't set fmr pool size (%d) < ntx / 4(%d)\n",
2461                        tunables->lnd_fmr_pool_size,
2462                        tunables->lnd_ntx / 4);
2463                 rc = -EINVAL;
2464                 goto failed;
2465         }
2466
2467         /* TX pool must be created later than FMR, see LU-2268
2468          * for details */
2469         LASSERT(net->ibn_tx_ps == NULL);
2470
2471         /* premapping can fail if ibd_nmr > 1, so we always create
2472          * FMR pool and map-on-demand if premapping failed */
2473
2474         net->ibn_fmr_ps = cfs_percpt_alloc(lnet_cpt_table(),
2475                                            sizeof(struct kib_fmr_poolset));
2476         if (net->ibn_fmr_ps == NULL) {
2477                 CERROR("Failed to allocate FMR pool array\n");
2478                 rc = -ENOMEM;
2479                 goto failed;
2480         }
2481
2482         for (i = 0; i < ncpts; i++) {
2483                 cpt = (cpts == NULL) ? i : cpts[i];
2484                 rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, ncpts,
2485                                              net, tunables);
2486                 if (rc != 0) {
2487                         CERROR("Can't initialize FMR pool for CPT %d: %d\n",
2488                                cpt, rc);
2489                         goto failed;
2490                 }
2491         }
2492
2493         if (i > 0)
2494                 LASSERT(i == ncpts);
2495
2496 #ifdef HAVE_IB_GET_DMA_MR
2497  create_tx_pool:
2498 #endif
2499         net->ibn_tx_ps = cfs_percpt_alloc(lnet_cpt_table(),
2500                                           sizeof(struct kib_tx_poolset));
2501         if (net->ibn_tx_ps == NULL) {
2502                 CERROR("Failed to allocate tx pool array\n");
2503                 rc = -ENOMEM;
2504                 goto failed;
2505         }
2506
2507         for (i = 0; i < ncpts; i++) {
2508                 cpt = (cpts == NULL) ? i : cpts[i];
2509                 rc = kiblnd_init_poolset(&net->ibn_tx_ps[cpt]->tps_poolset,
2510                                          cpt, net, "TX",
2511                                          kiblnd_tx_pool_size(ni, ncpts),
2512                                          kiblnd_create_tx_pool,
2513                                          kiblnd_destroy_tx_pool,
2514                                          kiblnd_tx_init, NULL);
2515                 if (rc != 0) {
2516                         CERROR("Can't initialize TX pool for CPT %d: %d\n",
2517                                cpt, rc);
2518                         goto failed;
2519                 }
2520         }
2521
2522         return 0;
2523  failed:
2524         kiblnd_net_fini_pools(net);
2525         LASSERT(rc != 0);
2526         return rc;
2527 }
2528
2529 static int
2530 kiblnd_hdev_get_attr(struct kib_hca_dev *hdev)
2531 {
2532         struct ib_device_attr *dev_attr;
2533         int rc = 0;
2534
2535         /* It's safe to assume a HCA can handle a page size
2536          * matching that of the native system */
2537         hdev->ibh_page_shift = PAGE_SHIFT;
2538         hdev->ibh_page_size  = 1 << PAGE_SHIFT;
2539         hdev->ibh_page_mask  = ~((__u64)hdev->ibh_page_size - 1);
2540
2541 #ifndef HAVE_IB_DEVICE_ATTRS
2542         LIBCFS_ALLOC(dev_attr, sizeof(*dev_attr));
2543         if (dev_attr == NULL) {
2544                 CERROR("Out of memory\n");
2545                 return -ENOMEM;
2546         }
2547
2548         rc = ib_query_device(hdev->ibh_ibdev, dev_attr);
2549         if (rc != 0) {
2550                 CERROR("Failed to query IB device: %d\n", rc);
2551                 goto out_clean_attr;
2552         }
2553 #else
2554         dev_attr = &hdev->ibh_ibdev->attrs;
2555 #endif
2556
2557         hdev->ibh_mr_size = dev_attr->max_mr_size;
2558
2559         /* Setup device Memory Registration capabilities */
2560         if (hdev->ibh_ibdev->alloc_fmr &&
2561             hdev->ibh_ibdev->dealloc_fmr &&
2562             hdev->ibh_ibdev->map_phys_fmr &&
2563             hdev->ibh_ibdev->unmap_fmr) {
2564                 LCONSOLE_INFO("Using FMR for registration\n");
2565                 hdev->ibh_dev->ibd_dev_caps |= IBLND_DEV_CAPS_FMR_ENABLED;
2566         } else if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
2567                 LCONSOLE_INFO("Using FastReg for registration\n");
2568                 hdev->ibh_dev->ibd_dev_caps |= IBLND_DEV_CAPS_FASTREG_ENABLED;
2569 #ifndef HAVE_IB_ALLOC_FAST_REG_MR
2570 #ifdef IB_DEVICE_SG_GAPS_REG
2571                 if (dev_attr->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
2572                         hdev->ibh_dev->ibd_dev_caps |= IBLND_DEV_CAPS_FASTREG_GAPS_SUPPORT;
2573 #endif
2574 #endif
2575         } else {
2576                 rc = -ENOSYS;
2577         }
2578
2579         if (rc == 0 && hdev->ibh_mr_size == ~0ULL)
2580                 hdev->ibh_mr_shift = 64;
2581         else if (rc != 0)
2582                 rc = -EINVAL;
2583
2584 #ifndef HAVE_IB_DEVICE_ATTRS
2585 out_clean_attr:
2586         LIBCFS_FREE(dev_attr, sizeof(*dev_attr));
2587 #endif
2588
2589         if (rc == -ENOSYS)
2590                 CERROR("IB device does not support FMRs nor FastRegs, can't "
2591                        "register memory: %d\n", rc);
2592         else if (rc == -EINVAL)
2593                 CERROR("Invalid mr size: %#llx\n", hdev->ibh_mr_size);
2594         return rc;
2595 }
2596
2597 #ifdef HAVE_IB_GET_DMA_MR
2598 static void
2599 kiblnd_hdev_cleanup_mrs(struct kib_hca_dev *hdev)
2600 {
2601         if (hdev->ibh_mrs == NULL)
2602                 return;
2603
2604         ib_dereg_mr(hdev->ibh_mrs);
2605
2606         hdev->ibh_mrs = NULL;
2607 }
2608 #endif
2609
2610 void
2611 kiblnd_hdev_destroy(struct kib_hca_dev *hdev)
2612 {
2613 #ifdef HAVE_IB_GET_DMA_MR
2614         kiblnd_hdev_cleanup_mrs(hdev);
2615 #endif
2616
2617         if (hdev->ibh_pd != NULL)
2618                 ib_dealloc_pd(hdev->ibh_pd);
2619
2620         if (hdev->ibh_cmid != NULL)
2621                 rdma_destroy_id(hdev->ibh_cmid);
2622
2623         LIBCFS_FREE(hdev, sizeof(*hdev));
2624 }
2625
2626 #ifdef HAVE_IB_GET_DMA_MR
2627 static int
2628 kiblnd_hdev_setup_mrs(struct kib_hca_dev *hdev)
2629 {
2630         struct ib_mr *mr;
2631         int           acflags = IB_ACCESS_LOCAL_WRITE |
2632                                 IB_ACCESS_REMOTE_WRITE;
2633
2634         mr = ib_get_dma_mr(hdev->ibh_pd, acflags);
2635         if (IS_ERR(mr)) {
2636                 CERROR("Failed ib_get_dma_mr: %ld\n", PTR_ERR(mr));
2637                 kiblnd_hdev_cleanup_mrs(hdev);
2638                 return PTR_ERR(mr);
2639         }
2640
2641         hdev->ibh_mrs = mr;
2642
2643         return 0;
2644 }
2645 #endif
2646
2647 static int
2648 kiblnd_dummy_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
2649 {       /* DUMMY */
2650         return 0;
2651 }
2652
2653 static int
2654 kiblnd_dev_need_failover(struct kib_dev *dev)
2655 {
2656         struct rdma_cm_id  *cmid;
2657         struct sockaddr_in  srcaddr;
2658         struct sockaddr_in  dstaddr;
2659         int                 rc;
2660
2661         if (dev->ibd_hdev == NULL || /* initializing */
2662             dev->ibd_hdev->ibh_cmid == NULL || /* listener is dead */
2663             *kiblnd_tunables.kib_dev_failover > 1) /* debugging */
2664                 return 1;
2665
2666         /* XXX: it's UGLY, but I don't have better way to find
2667          * ib-bonding HCA failover because:
2668          *
2669          * a. no reliable CM event for HCA failover...
2670          * b. no OFED API to get ib_device for current net_device...
2671          *
2672          * We have only two choices at this point:
2673          *
2674          * a. rdma_bind_addr(), it will conflict with listener cmid
2675          * b. rdma_resolve_addr() to zero addr */
2676         cmid = kiblnd_rdma_create_id(kiblnd_dummy_callback, dev, RDMA_PS_TCP,
2677                                      IB_QPT_RC);
2678         if (IS_ERR(cmid)) {
2679                 rc = PTR_ERR(cmid);
2680                 CERROR("Failed to create cmid for failover: %d\n", rc);
2681                 return rc;
2682         }
2683
2684         memset(&srcaddr, 0, sizeof(srcaddr));
2685         srcaddr.sin_family      = AF_INET;
2686         srcaddr.sin_addr.s_addr = (__force u32)htonl(dev->ibd_ifip);
2687
2688         memset(&dstaddr, 0, sizeof(dstaddr));
2689         dstaddr.sin_family = AF_INET;
2690         rc = rdma_resolve_addr(cmid, (struct sockaddr *)&srcaddr,
2691                                (struct sockaddr *)&dstaddr, 1);
2692         if (rc != 0 || cmid->device == NULL) {
2693                 CERROR("Failed to bind %s:%pI4h to device(%p): %d\n",
2694                        dev->ibd_ifname, &dev->ibd_ifip,
2695                        cmid->device, rc);
2696                 rdma_destroy_id(cmid);
2697                 return rc;
2698         }
2699
2700         rc = dev->ibd_hdev->ibh_ibdev != cmid->device; /* true for failover */
2701         rdma_destroy_id(cmid);
2702         return rc;
2703 }
2704
2705 int
2706 kiblnd_dev_failover(struct kib_dev *dev)
2707 {
2708         struct list_head    zombie_tpo = LIST_HEAD_INIT(zombie_tpo);
2709         struct list_head    zombie_ppo = LIST_HEAD_INIT(zombie_ppo);
2710         struct list_head    zombie_fpo = LIST_HEAD_INIT(zombie_fpo);
2711         struct rdma_cm_id  *cmid  = NULL;
2712         struct kib_hca_dev *hdev  = NULL;
2713         struct kib_hca_dev *old;
2714         struct ib_pd       *pd;
2715         struct kib_net *net;
2716         struct sockaddr_in  addr;
2717         unsigned long       flags;
2718         int                 rc = 0;
2719         int                 i;
2720
2721         LASSERT (*kiblnd_tunables.kib_dev_failover > 1 ||
2722                  dev->ibd_can_failover ||
2723                  dev->ibd_hdev == NULL);
2724
2725         rc = kiblnd_dev_need_failover(dev);
2726         if (rc <= 0)
2727                 goto out;
2728
2729         if (dev->ibd_hdev != NULL &&
2730             dev->ibd_hdev->ibh_cmid != NULL) {
2731                 /* XXX it's not good to close old listener at here,
2732                  * because we can fail to create new listener.
2733                  * But we have to close it now, otherwise rdma_bind_addr
2734                  * will return EADDRINUSE... How crap! */
2735                 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2736
2737                 cmid = dev->ibd_hdev->ibh_cmid;
2738                 /* make next schedule of kiblnd_dev_need_failover()
2739                  * return 1 for me */
2740                 dev->ibd_hdev->ibh_cmid  = NULL;
2741                 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2742
2743                 rdma_destroy_id(cmid);
2744         }
2745
2746         cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, dev, RDMA_PS_TCP,
2747                                      IB_QPT_RC);
2748         if (IS_ERR(cmid)) {
2749                 rc = PTR_ERR(cmid);
2750                 CERROR("Failed to create cmid for failover: %d\n", rc);
2751                 goto out;
2752         }
2753
2754         memset(&addr, 0, sizeof(addr));
2755         addr.sin_family      = AF_INET;
2756         addr.sin_addr.s_addr = (__force u32)htonl(dev->ibd_ifip);
2757         addr.sin_port        = htons(*kiblnd_tunables.kib_service);
2758
2759         /* Bind to failover device or port */
2760         rc = rdma_bind_addr(cmid, (struct sockaddr *)&addr);
2761         if (rc != 0 || cmid->device == NULL) {
2762                 CERROR("Failed to bind %s:%pI4h to device(%p): %d\n",
2763                        dev->ibd_ifname, &dev->ibd_ifip,
2764                        cmid->device, rc);
2765                 rdma_destroy_id(cmid);
2766                 goto out;
2767         }
2768
2769         LIBCFS_ALLOC(hdev, sizeof(*hdev));
2770         if (hdev == NULL) {
2771                 CERROR("Failed to allocate kib_hca_dev\n");
2772                 rdma_destroy_id(cmid);
2773                 rc = -ENOMEM;
2774                 goto out;
2775         }
2776
2777         atomic_set(&hdev->ibh_ref, 1);
2778         hdev->ibh_dev   = dev;
2779         hdev->ibh_cmid  = cmid;
2780         hdev->ibh_ibdev = cmid->device;
2781
2782 #ifdef HAVE_IB_ALLOC_PD_2ARGS
2783         pd = ib_alloc_pd(cmid->device, 0);
2784 #else
2785         pd = ib_alloc_pd(cmid->device);
2786 #endif
2787         if (IS_ERR(pd)) {
2788                 rc = PTR_ERR(pd);
2789                 CERROR("Can't allocate PD: %d\n", rc);
2790                 goto out;
2791         }
2792
2793         hdev->ibh_pd = pd;
2794
2795         rc = rdma_listen(cmid, 0);
2796         if (rc != 0) {
2797                 CERROR("Can't start new listener: %d\n", rc);
2798                 goto out;
2799         }
2800
2801         rc = kiblnd_hdev_get_attr(hdev);
2802         if (rc != 0) {
2803                 CERROR("Can't get device attributes: %d\n", rc);
2804                 goto out;
2805         }
2806
2807 #ifdef HAVE_IB_GET_DMA_MR
2808         rc = kiblnd_hdev_setup_mrs(hdev);
2809         if (rc != 0) {
2810                 CERROR("Can't setup device: %d\n", rc);
2811                 goto out;
2812         }
2813 #endif
2814
2815         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2816
2817         old = dev->ibd_hdev;
2818         dev->ibd_hdev = hdev;   /* take over the refcount */
2819         hdev = old;
2820
2821         list_for_each_entry(net, &dev->ibd_nets, ibn_list) {
2822                 cfs_cpt_for_each(i, lnet_cpt_table()) {
2823                         kiblnd_fail_poolset(&net->ibn_tx_ps[i]->tps_poolset,
2824                                             &zombie_tpo);
2825
2826                         if (net->ibn_fmr_ps != NULL)
2827                                 kiblnd_fail_fmr_poolset(net->ibn_fmr_ps[i],
2828                                                         &zombie_fpo);
2829                 }
2830         }
2831
2832         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2833  out:
2834         if (!list_empty(&zombie_tpo))
2835                 kiblnd_destroy_pool_list(&zombie_tpo);
2836         if (!list_empty(&zombie_ppo))
2837                 kiblnd_destroy_pool_list(&zombie_ppo);
2838         if (!list_empty(&zombie_fpo))
2839                 kiblnd_destroy_fmr_pool_list(&zombie_fpo);
2840         if (hdev != NULL)
2841                 kiblnd_hdev_decref(hdev);
2842
2843         if (rc != 0)
2844                 dev->ibd_failed_failover++;
2845         else
2846                 dev->ibd_failed_failover = 0;
2847
2848         return rc;
2849 }
2850
2851 void
2852 kiblnd_destroy_dev(struct kib_dev *dev)
2853 {
2854         LASSERT(dev->ibd_nnets == 0);
2855         LASSERT(list_empty(&dev->ibd_nets));
2856
2857         list_del(&dev->ibd_fail_list);
2858         list_del(&dev->ibd_list);
2859
2860         if (dev->ibd_hdev != NULL)
2861                 kiblnd_hdev_decref(dev->ibd_hdev);
2862
2863         LIBCFS_FREE(dev, sizeof(*dev));
2864 }
2865
2866 static struct kib_dev *
2867 kiblnd_create_dev(char *ifname)
2868 {
2869         struct net_device *netdev;
2870         struct kib_dev *dev;
2871         __u32              netmask;
2872         __u32              ip;
2873         int                up;
2874         int                rc;
2875
2876         rc = lnet_ipif_query(ifname, &up, &ip, &netmask);
2877         if (rc != 0) {
2878                 CERROR("Can't query IPoIB interface %s: %d\n",
2879                        ifname, rc);
2880                 return NULL;
2881         }
2882
2883         if (!up) {
2884                 CERROR("Can't query IPoIB interface %s: it's down\n", ifname);
2885                 return NULL;
2886         }
2887
2888         LIBCFS_ALLOC(dev, sizeof(*dev));
2889         if (dev == NULL)
2890                 return NULL;
2891
2892         netdev = dev_get_by_name(&init_net, ifname);
2893         if (netdev == NULL) {
2894                 dev->ibd_can_failover = 0;
2895         } else {
2896                 dev->ibd_can_failover = !!(netdev->flags & IFF_MASTER);
2897                 dev_put(netdev);
2898         }
2899
2900         INIT_LIST_HEAD(&dev->ibd_nets);
2901         INIT_LIST_HEAD(&dev->ibd_list); /* not yet in kib_devs */
2902         INIT_LIST_HEAD(&dev->ibd_fail_list);
2903         dev->ibd_ifip = ip;
2904         strcpy(&dev->ibd_ifname[0], ifname);
2905
2906         /* initialize the device */
2907         rc = kiblnd_dev_failover(dev);
2908         if (rc != 0) {
2909                 CERROR("Can't initialize device: %d\n", rc);
2910                 LIBCFS_FREE(dev, sizeof(*dev));
2911                 return NULL;
2912         }
2913
2914         list_add_tail(&dev->ibd_list,
2915                           &kiblnd_data.kib_devs);
2916         return dev;
2917 }
2918
2919 static void
2920 kiblnd_base_shutdown(void)
2921 {
2922         struct kib_sched_info   *sched;
2923         int                     i;
2924
2925         LASSERT(list_empty(&kiblnd_data.kib_devs));
2926
2927         CDEBUG(D_MALLOC, "before LND base cleanup: kmem %d\n",
2928                atomic_read(&libcfs_kmemory));
2929
2930         switch (kiblnd_data.kib_init) {
2931         default:
2932                 LBUG();
2933
2934         case IBLND_INIT_ALL:
2935         case IBLND_INIT_DATA:
2936                 LASSERT (kiblnd_data.kib_peers != NULL);
2937                 for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
2938                         LASSERT(list_empty(&kiblnd_data.kib_peers[i]));
2939                 }
2940                 LASSERT(list_empty(&kiblnd_data.kib_connd_zombies));
2941                 LASSERT(list_empty(&kiblnd_data.kib_connd_conns));
2942                 LASSERT(list_empty(&kiblnd_data.kib_reconn_list));
2943                 LASSERT(list_empty(&kiblnd_data.kib_reconn_wait));
2944
2945                 /* flag threads to terminate; wake and wait for them to die */
2946                 kiblnd_data.kib_shutdown = 1;
2947
2948                 /* NB: we really want to stop scheduler threads net by net
2949                  * instead of the whole module, this should be improved
2950                  * with dynamic configuration LNet */
2951                 cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds)
2952                         wake_up_all(&sched->ibs_waitq);
2953
2954                 wake_up_all(&kiblnd_data.kib_connd_waitq);
2955                 wake_up_all(&kiblnd_data.kib_failover_waitq);
2956
2957                 i = 2;
2958                 while (atomic_read(&kiblnd_data.kib_nthreads) != 0) {
2959                         i++;
2960                         /* power of 2? */
2961                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2962                                "Waiting for %d threads to terminate\n",
2963                                atomic_read(&kiblnd_data.kib_nthreads));
2964                         set_current_state(TASK_UNINTERRUPTIBLE);
2965                         schedule_timeout(cfs_time_seconds(1));
2966                 }
2967
2968                 /* fall through */
2969
2970         case IBLND_INIT_NOTHING:
2971                 break;
2972         }
2973
2974         if (kiblnd_data.kib_peers != NULL) {
2975                 LIBCFS_FREE(kiblnd_data.kib_peers,
2976                             sizeof(struct list_head) *
2977                             kiblnd_data.kib_peer_hash_size);
2978         }
2979
2980         if (kiblnd_data.kib_scheds != NULL)
2981                 cfs_percpt_free(kiblnd_data.kib_scheds);
2982
2983         CDEBUG(D_MALLOC, "after LND base cleanup: kmem %d\n",
2984                atomic_read(&libcfs_kmemory));
2985
2986         kiblnd_data.kib_init = IBLND_INIT_NOTHING;
2987         module_put(THIS_MODULE);
2988 }
2989
2990 static void
2991 kiblnd_shutdown(struct lnet_ni *ni)
2992 {
2993         struct kib_net *net = ni->ni_data;
2994         rwlock_t     *g_lock = &kiblnd_data.kib_global_lock;
2995         int               i;
2996         unsigned long     flags;
2997
2998         LASSERT(kiblnd_data.kib_init == IBLND_INIT_ALL);
2999
3000         if (net == NULL)
3001                 goto out;
3002
3003         CDEBUG(D_MALLOC, "before LND net cleanup: kmem %d\n",
3004                atomic_read(&libcfs_kmemory));
3005
3006         write_lock_irqsave(g_lock, flags);
3007         net->ibn_shutdown = 1;
3008         write_unlock_irqrestore(g_lock, flags);
3009
3010         switch (net->ibn_init) {
3011         default:
3012                 LBUG();
3013
3014         case IBLND_INIT_ALL:
3015                 /* nuke all existing peers within this net */
3016                 kiblnd_del_peer(ni, LNET_NID_ANY);
3017
3018                 /* Wait for all peer_ni state to clean up */
3019                 i = 2;
3020                 while (atomic_read(&net->ibn_npeers) != 0) {
3021                         i++;
3022                         /* power of 2? */
3023                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
3024                                "%s: waiting for %d peers to disconnect\n",
3025                                libcfs_nid2str(ni->ni_nid),
3026                                atomic_read(&net->ibn_npeers));
3027                         set_current_state(TASK_UNINTERRUPTIBLE);
3028                         schedule_timeout(cfs_time_seconds(1));
3029                 }
3030
3031                 kiblnd_net_fini_pools(net);
3032
3033                 write_lock_irqsave(g_lock, flags);
3034                 LASSERT(net->ibn_dev->ibd_nnets > 0);
3035                 net->ibn_dev->ibd_nnets--;
3036                 list_del(&net->ibn_list);
3037                 write_unlock_irqrestore(g_lock, flags);
3038
3039                 /* fall through */
3040
3041         case IBLND_INIT_NOTHING:
3042                 LASSERT (atomic_read(&net->ibn_nconns) == 0);
3043
3044                 if (net->ibn_dev != NULL &&
3045                     net->ibn_dev->ibd_nnets == 0)
3046                         kiblnd_destroy_dev(net->ibn_dev);
3047
3048                 break;
3049         }
3050
3051         CDEBUG(D_MALLOC, "after LND net cleanup: kmem %d\n",
3052                atomic_read(&libcfs_kmemory));
3053
3054         net->ibn_init = IBLND_INIT_NOTHING;
3055         ni->ni_data = NULL;
3056
3057         LIBCFS_FREE(net, sizeof(*net));
3058
3059 out:
3060         if (list_empty(&kiblnd_data.kib_devs))
3061                 kiblnd_base_shutdown();
3062         return;
3063 }
3064
3065 static int
3066 kiblnd_base_startup(void)
3067 {
3068         struct kib_sched_info   *sched;
3069         int                     rc;
3070         int                     i;
3071
3072         LASSERT(kiblnd_data.kib_init == IBLND_INIT_NOTHING);
3073
3074         try_module_get(THIS_MODULE);
3075         memset(&kiblnd_data, 0, sizeof(kiblnd_data)); /* zero pointers, flags etc */
3076
3077         rwlock_init(&kiblnd_data.kib_global_lock);
3078
3079         INIT_LIST_HEAD(&kiblnd_data.kib_devs);
3080         INIT_LIST_HEAD(&kiblnd_data.kib_failed_devs);
3081
3082         kiblnd_data.kib_peer_hash_size = IBLND_PEER_HASH_SIZE;
3083         LIBCFS_ALLOC(kiblnd_data.kib_peers,
3084                      sizeof(struct list_head) *
3085                      kiblnd_data.kib_peer_hash_size);
3086         if (kiblnd_data.kib_peers == NULL)
3087                 goto failed;
3088
3089         for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
3090                 INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]);
3091
3092         spin_lock_init(&kiblnd_data.kib_connd_lock);
3093         INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
3094         INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies);
3095         INIT_LIST_HEAD(&kiblnd_data.kib_reconn_list);
3096         INIT_LIST_HEAD(&kiblnd_data.kib_reconn_wait);
3097
3098         init_waitqueue_head(&kiblnd_data.kib_connd_waitq);
3099         init_waitqueue_head(&kiblnd_data.kib_failover_waitq);
3100
3101         kiblnd_data.kib_scheds = cfs_percpt_alloc(lnet_cpt_table(),
3102                                                   sizeof(*sched));
3103         if (kiblnd_data.kib_scheds == NULL)
3104                 goto failed;
3105
3106         cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) {
3107                 int     nthrs;
3108
3109                 spin_lock_init(&sched->ibs_lock);
3110                 INIT_LIST_HEAD(&sched->ibs_conns);
3111                 init_waitqueue_head(&sched->ibs_waitq);
3112
3113                 nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
3114                 if (*kiblnd_tunables.kib_nscheds > 0) {
3115                         nthrs = min(nthrs, *kiblnd_tunables.kib_nscheds);
3116                 } else {
3117                         /* max to half of CPUs, another half is reserved for
3118                          * upper layer modules */
3119                         nthrs = min(max(IBLND_N_SCHED, nthrs >> 1), nthrs);
3120                 }
3121
3122                 sched->ibs_nthreads_max = nthrs;
3123                 sched->ibs_cpt = i;
3124         }
3125
3126         kiblnd_data.kib_error_qpa.qp_state = IB_QPS_ERR;
3127
3128         /* lists/ptrs/locks initialised */
3129         kiblnd_data.kib_init = IBLND_INIT_DATA;
3130         /*****************************************************/
3131
3132         rc = kiblnd_thread_start(kiblnd_connd, NULL, "kiblnd_connd");
3133         if (rc != 0) {
3134                 CERROR("Can't spawn o2iblnd connd: %d\n", rc);
3135                 goto failed;
3136         }
3137
3138         if (*kiblnd_tunables.kib_dev_failover != 0)
3139                 rc = kiblnd_thread_start(kiblnd_failover_thread, NULL,
3140                                          "kiblnd_failover");
3141
3142         if (rc != 0) {
3143                 CERROR("Can't spawn o2iblnd failover thread: %d\n", rc);
3144                 goto failed;
3145         }
3146
3147         /* flag everything initialised */
3148         kiblnd_data.kib_init = IBLND_INIT_ALL;
3149         /*****************************************************/
3150
3151         return 0;
3152
3153  failed:
3154         kiblnd_base_shutdown();
3155         return -ENETDOWN;
3156 }
3157
3158 static int
3159 kiblnd_start_schedulers(struct kib_sched_info *sched)
3160 {
3161         int     rc = 0;
3162         int     nthrs;
3163         int     i;
3164
3165         if (sched->ibs_nthreads == 0) {
3166                 if (*kiblnd_tunables.kib_nscheds > 0) {
3167                         nthrs = sched->ibs_nthreads_max;
3168                 } else {
3169                         nthrs = cfs_cpt_weight(lnet_cpt_table(),
3170                                                sched->ibs_cpt);
3171                         nthrs = min(max(IBLND_N_SCHED, nthrs >> 1), nthrs);
3172                         nthrs = min(IBLND_N_SCHED_HIGH, nthrs);
3173                 }
3174         } else {
3175                 LASSERT(sched->ibs_nthreads <= sched->ibs_nthreads_max);
3176                 /* increase one thread if there is new interface */
3177                 nthrs = (sched->ibs_nthreads < sched->ibs_nthreads_max);
3178         }
3179
3180         for (i = 0; i < nthrs; i++) {
3181                 long    id;
3182                 char    name[20];
3183                 id = KIB_THREAD_ID(sched->ibs_cpt, sched->ibs_nthreads + i);
3184                 snprintf(name, sizeof(name), "kiblnd_sd_%02ld_%02ld",
3185                          KIB_THREAD_CPT(id), KIB_THREAD_TID(id));
3186                 rc = kiblnd_thread_start(kiblnd_scheduler, (void *)id, name);
3187                 if (rc == 0)
3188                         continue;
3189
3190                 CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
3191                        sched->ibs_cpt, sched->ibs_nthreads + i, rc);
3192                 break;
3193         }
3194
3195         sched->ibs_nthreads += i;
3196         return rc;
3197 }
3198
3199 static int
3200 kiblnd_dev_start_threads(struct kib_dev *dev, int newdev, u32 *cpts, int ncpts)
3201 {
3202         int     cpt;
3203         int     rc;
3204         int     i;
3205
3206         for (i = 0; i < ncpts; i++) {
3207                 struct kib_sched_info *sched;
3208
3209                 cpt = (cpts == NULL) ? i : cpts[i];
3210                 sched = kiblnd_data.kib_scheds[cpt];
3211
3212                 if (!newdev && sched->ibs_nthreads > 0)
3213                         continue;
3214
3215                 rc = kiblnd_start_schedulers(kiblnd_data.kib_scheds[cpt]);
3216                 if (rc != 0) {
3217                         CERROR("Failed to start scheduler threads for %s\n",
3218                                dev->ibd_ifname);
3219                         return rc;
3220                 }
3221         }
3222         return 0;
3223 }
3224
3225 static struct kib_dev *
3226 kiblnd_dev_search(char *ifname)
3227 {
3228         struct kib_dev *alias = NULL;
3229         struct kib_dev *dev;
3230         char            *colon;
3231         char            *colon2;
3232
3233         colon = strchr(ifname, ':');
3234         list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
3235                 if (strcmp(&dev->ibd_ifname[0], ifname) == 0)
3236                         return dev;
3237
3238                 if (alias != NULL)
3239                         continue;
3240
3241                 colon2 = strchr(dev->ibd_ifname, ':');
3242                 if (colon != NULL)
3243                         *colon = 0;
3244                 if (colon2 != NULL)
3245                         *colon2 = 0;
3246
3247                 if (strcmp(&dev->ibd_ifname[0], ifname) == 0)
3248                         alias = dev;
3249
3250                 if (colon != NULL)
3251                         *colon = ':';
3252                 if (colon2 != NULL)
3253                         *colon2 = ':';
3254         }
3255         return alias;
3256 }
3257
3258 static int
3259 kiblnd_startup(struct lnet_ni *ni)
3260 {
3261         char                     *ifname;
3262         struct kib_dev *ibdev = NULL;
3263         struct kib_net *net;
3264         unsigned long             flags;
3265         int                       rc;
3266         int                       newdev;
3267         int                       node_id;
3268
3269         LASSERT (ni->ni_net->net_lnd == &the_o2iblnd);
3270
3271         if (kiblnd_data.kib_init == IBLND_INIT_NOTHING) {
3272                 rc = kiblnd_base_startup();
3273                 if (rc != 0)
3274                         return rc;
3275         }
3276
3277         LIBCFS_ALLOC(net, sizeof(*net));
3278         ni->ni_data = net;
3279         if (net == NULL)
3280                 goto failed;
3281
3282         net->ibn_incarnation = ktime_get_real_ns() / NSEC_PER_USEC;
3283
3284         kiblnd_tunables_setup(ni);
3285
3286         if (ni->ni_interfaces[0] != NULL) {
3287                 /* Use the IPoIB interface specified in 'networks=' */
3288
3289                 CLASSERT(LNET_INTERFACES_NUM > 1);
3290                 if (ni->ni_interfaces[1] != NULL) {
3291                         CERROR("Multiple interfaces not supported\n");
3292                         goto failed;
3293                 }
3294
3295                 ifname = ni->ni_interfaces[0];
3296         } else {
3297                 ifname = *kiblnd_tunables.kib_default_ipif;
3298         }
3299
3300         if (strlen(ifname) >= sizeof(ibdev->ibd_ifname)) {
3301                 CERROR("IPoIB interface name too long: %s\n", ifname);
3302                 goto failed;
3303         }
3304
3305         ibdev = kiblnd_dev_search(ifname);
3306
3307         newdev = ibdev == NULL;
3308         /* hmm...create kib_dev even for alias */
3309         if (ibdev == NULL || strcmp(&ibdev->ibd_ifname[0], ifname) != 0)
3310                 ibdev = kiblnd_create_dev(ifname);
3311
3312         if (ibdev == NULL)
3313                 goto failed;
3314
3315         node_id = dev_to_node(ibdev->ibd_hdev->ibh_ibdev->dma_device);
3316         ni->ni_dev_cpt = cfs_cpt_of_node(lnet_cpt_table(), node_id);
3317
3318         net->ibn_dev = ibdev;
3319         ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ibdev->ibd_ifip);
3320
3321         rc = kiblnd_dev_start_threads(ibdev, newdev,
3322                                       ni->ni_cpts, ni->ni_ncpts);
3323         if (rc != 0)
3324                 goto failed;
3325
3326         rc = kiblnd_net_init_pools(net, ni, ni->ni_cpts, ni->ni_ncpts);
3327         if (rc != 0) {
3328                 CERROR("Failed to initialize NI pools: %d\n", rc);
3329                 goto failed;
3330         }
3331
3332         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
3333         ibdev->ibd_nnets++;
3334         list_add_tail(&net->ibn_list, &ibdev->ibd_nets);
3335         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
3336
3337         net->ibn_init = IBLND_INIT_ALL;
3338
3339         return 0;
3340
3341 failed:
3342         if (net != NULL && net->ibn_dev == NULL && ibdev != NULL)
3343                 kiblnd_destroy_dev(ibdev);
3344
3345         kiblnd_shutdown(ni);
3346
3347         CDEBUG(D_NET, "kiblnd_startup failed\n");
3348         return -ENETDOWN;
3349 }
3350
3351 static struct lnet_lnd the_o2iblnd = {
3352         .lnd_type       = O2IBLND,
3353         .lnd_startup    = kiblnd_startup,
3354         .lnd_shutdown   = kiblnd_shutdown,
3355         .lnd_ctl        = kiblnd_ctl,
3356         .lnd_query      = kiblnd_query,
3357         .lnd_send       = kiblnd_send,
3358         .lnd_recv       = kiblnd_recv,
3359 };
3360
3361 static void __exit ko2iblnd_exit(void)
3362 {
3363         lnet_unregister_lnd(&the_o2iblnd);
3364 }
3365
3366 static int __init ko2iblnd_init(void)
3367 {
3368         int rc;
3369
3370         CLASSERT(sizeof(struct kib_msg) <= IBLND_MSG_SIZE);
3371         CLASSERT(offsetof(struct kib_msg,
3372                           ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) <=
3373                  IBLND_MSG_SIZE);
3374         CLASSERT(offsetof(struct kib_msg,
3375                           ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
3376                  <= IBLND_MSG_SIZE);
3377
3378         rc = kiblnd_tunables_init();
3379         if (rc != 0)
3380                 return rc;
3381
3382         lnet_register_lnd(&the_o2iblnd);
3383
3384         return 0;
3385 }
3386
3387 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
3388 MODULE_DESCRIPTION("OpenIB gen2 LNet Network Driver");
3389 MODULE_VERSION("2.8.0");
3390 MODULE_LICENSE("GPL");
3391
3392 module_init(ko2iblnd_init);
3393 module_exit(ko2iblnd_exit);