1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lnet/klnds/o2iblnd/o2iblnd.c
38 * Author: Eric Barton <eric@bartonsoftware.com>
45 .lnd_startup = kiblnd_startup,
46 .lnd_shutdown = kiblnd_shutdown,
47 .lnd_ctl = kiblnd_ctl,
48 .lnd_send = kiblnd_send,
49 .lnd_recv = kiblnd_recv,
52 kib_data_t kiblnd_data;
55 kiblnd_cksum (void *ptr, int nob)
61 sum = ((sum << 1) | (sum >> 31)) + *c++;
63 /* ensure I don't return 0 (== no checksum) */
64 return (sum == 0) ? 1 : sum;
68 kiblnd_init_msg (kib_msg_t *msg, int type, int body_nob)
71 msg->ibm_nob = offsetof(kib_msg_t, ibm_u) + body_nob;
75 kiblnd_pack_msg (lnet_ni_t *ni, kib_msg_t *msg,
76 int credits, lnet_nid_t dstnid, __u64 dststamp)
78 kib_net_t *net = ni->ni_data;
80 /* CAVEAT EMPTOR! all message fields not set here should have been
81 * initialised previously. */
82 msg->ibm_magic = IBLND_MSG_MAGIC;
83 msg->ibm_version = IBLND_MSG_VERSION;
85 msg->ibm_credits = credits;
88 msg->ibm_srcnid = ni->ni_nid;
89 msg->ibm_srcstamp = net->ibn_incarnation;
90 msg->ibm_dstnid = dstnid;
91 msg->ibm_dststamp = dststamp;
93 if (*kiblnd_tunables.kib_cksum) {
94 /* NB ibm_cksum zero while computing cksum */
95 msg->ibm_cksum = kiblnd_cksum(msg, msg->ibm_nob);
100 kiblnd_unpack_msg(kib_msg_t *msg, int nob)
102 const int hdr_size = offsetof(kib_msg_t, ibm_u);
106 #if !IBLND_MAP_ON_DEMAND
110 /* 6 bytes are enough to have received magic + version */
112 CERROR("Short message: %d\n", nob);
116 if (msg->ibm_magic == IBLND_MSG_MAGIC) {
118 } else if (msg->ibm_magic == __swab32(IBLND_MSG_MAGIC)) {
121 CERROR("Bad magic: %08x\n", msg->ibm_magic);
125 if (msg->ibm_version !=
126 (flip ? __swab16(IBLND_MSG_VERSION) : IBLND_MSG_VERSION)) {
127 CERROR("Bad version: %d\n", msg->ibm_version);
131 if (nob < hdr_size) {
132 CERROR("Short message: %d\n", nob);
136 msg_nob = flip ? __swab32(msg->ibm_nob) : msg->ibm_nob;
138 CERROR("Short message: got %d, wanted %d\n", nob, msg_nob);
142 /* checksum must be computed with ibm_cksum zero and BEFORE anything
144 msg_cksum = flip ? __swab32(msg->ibm_cksum) : msg->ibm_cksum;
146 if (msg_cksum != 0 &&
147 msg_cksum != kiblnd_cksum(msg, msg_nob)) {
148 CERROR("Bad checksum\n");
151 msg->ibm_cksum = msg_cksum;
154 /* leave magic unflipped as a clue to peer endianness */
155 __swab16s(&msg->ibm_version);
156 CLASSERT (sizeof(msg->ibm_type) == 1);
157 CLASSERT (sizeof(msg->ibm_credits) == 1);
158 msg->ibm_nob = msg_nob;
159 __swab64s(&msg->ibm_srcnid);
160 __swab64s(&msg->ibm_srcstamp);
161 __swab64s(&msg->ibm_dstnid);
162 __swab64s(&msg->ibm_dststamp);
165 if (msg->ibm_srcnid == LNET_NID_ANY) {
166 CERROR("Bad src nid: %s\n", libcfs_nid2str(msg->ibm_srcnid));
170 switch (msg->ibm_type) {
172 CERROR("Unknown message type %x\n", msg->ibm_type);
178 case IBLND_MSG_IMMEDIATE:
179 if (msg_nob < offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[0])) {
180 CERROR("Short IMMEDIATE: %d(%d)\n", msg_nob,
181 (int)offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[0]));
186 case IBLND_MSG_PUT_REQ:
187 if (msg_nob < hdr_size + sizeof(msg->ibm_u.putreq)) {
188 CERROR("Short PUT_REQ: %d(%d)\n", msg_nob,
189 (int)(hdr_size + sizeof(msg->ibm_u.putreq)));
194 case IBLND_MSG_PUT_ACK:
195 if (msg_nob < hdr_size + sizeof(msg->ibm_u.putack)) {
196 CERROR("Short PUT_ACK: %d(%d)\n", msg_nob,
197 (int)(hdr_size + sizeof(msg->ibm_u.putack)));
200 #if IBLND_MAP_ON_DEMAND
202 __swab64s(&msg->ibm_u.putack.ibpam_rd.rd_addr);
203 __swab32s(&msg->ibm_u.putack.ibpam_rd.rd_nob);
204 __swab32s(&msg->ibm_u.putack.ibpam_rd.rd_key);
208 __swab32s(&msg->ibm_u.putack.ibpam_rd.rd_key);
209 __swab32s(&msg->ibm_u.putack.ibpam_rd.rd_nfrags);
212 n = msg->ibm_u.putack.ibpam_rd.rd_nfrags;
213 if (n <= 0 || n > IBLND_MAX_RDMA_FRAGS) {
214 CERROR("Bad PUT_ACK nfrags: %d, should be 0 < n <= %d\n",
215 n, IBLND_MAX_RDMA_FRAGS);
219 if (msg_nob < offsetof(kib_msg_t, ibm_u.putack.ibpam_rd.rd_frags[n])) {
220 CERROR("Short PUT_ACK: %d(%d)\n", msg_nob,
221 (int)offsetof(kib_msg_t, ibm_u.putack.ibpam_rd.rd_frags[n]));
226 for (i = 0; i < n; i++) {
227 __swab32s(&msg->ibm_u.putack.ibpam_rd.rd_frags[i].rf_nob);
228 __swab64s(&msg->ibm_u.putack.ibpam_rd.rd_frags[i].rf_addr);
234 case IBLND_MSG_GET_REQ:
235 if (msg_nob < hdr_size + sizeof(msg->ibm_u.get)) {
236 CERROR("Short GET_REQ: %d(%d)\n", msg_nob,
237 (int)(hdr_size + sizeof(msg->ibm_u.get)));
240 #if IBLND_MAP_ON_DEMAND
242 __swab64s(&msg->ibm_u.get.ibgm_rd.rd_addr);
243 __swab32s(&msg->ibm_u.get.ibgm_rd.rd_nob);
244 __swab32s(&msg->ibm_u.get.ibgm_rd.rd_key);
248 __swab32s(&msg->ibm_u.get.ibgm_rd.rd_key);
249 __swab32s(&msg->ibm_u.get.ibgm_rd.rd_nfrags);
252 n = msg->ibm_u.get.ibgm_rd.rd_nfrags;
253 if (n <= 0 || n > IBLND_MAX_RDMA_FRAGS) {
254 CERROR("Bad GET_REQ nfrags: %d, should be 0 < n <= %d\n",
255 n, IBLND_MAX_RDMA_FRAGS);
259 if (msg_nob < offsetof(kib_msg_t, ibm_u.get.ibgm_rd.rd_frags[n])) {
260 CERROR("Short GET_REQ: %d(%d)\n", msg_nob,
261 (int)offsetof(kib_msg_t, ibm_u.get.ibgm_rd.rd_frags[n]));
266 for (i = 0; i < msg->ibm_u.get.ibgm_rd.rd_nfrags; i++) {
267 __swab32s(&msg->ibm_u.get.ibgm_rd.rd_frags[i].rf_nob);
268 __swab64s(&msg->ibm_u.get.ibgm_rd.rd_frags[i].rf_addr);
273 case IBLND_MSG_PUT_NAK:
274 case IBLND_MSG_PUT_DONE:
275 case IBLND_MSG_GET_DONE:
276 if (msg_nob < hdr_size + sizeof(msg->ibm_u.completion)) {
277 CERROR("Short RDMA completion: %d(%d)\n", msg_nob,
278 (int)(hdr_size + sizeof(msg->ibm_u.completion)));
282 __swab32s(&msg->ibm_u.completion.ibcm_status);
285 case IBLND_MSG_CONNREQ:
286 case IBLND_MSG_CONNACK:
287 if (msg_nob < hdr_size + sizeof(msg->ibm_u.connparams)) {
288 CERROR("Short connreq/ack: %d(%d)\n", msg_nob,
289 (int)(hdr_size + sizeof(msg->ibm_u.connparams)));
293 __swab16s(&msg->ibm_u.connparams.ibcp_queue_depth);
294 __swab16s(&msg->ibm_u.connparams.ibcp_max_frags);
295 __swab32s(&msg->ibm_u.connparams.ibcp_max_msg_size);
303 kiblnd_create_peer (lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid)
306 kib_net_t *net = ni->ni_data;
309 LASSERT (net != NULL);
310 LASSERT (nid != LNET_NID_ANY);
312 LIBCFS_ALLOC(peer, sizeof(*peer));
314 CERROR("Cannot allocate peer\n");
318 memset(peer, 0, sizeof(*peer)); /* zero flags etc */
323 peer->ibp_last_alive = cfs_time_current();
324 atomic_set(&peer->ibp_refcount, 1); /* 1 ref for caller */
326 INIT_LIST_HEAD(&peer->ibp_list); /* not in the peer table yet */
327 INIT_LIST_HEAD(&peer->ibp_conns);
328 INIT_LIST_HEAD(&peer->ibp_tx_queue);
330 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
332 /* always called with a ref on ni, which prevents ni being shutdown */
333 LASSERT (net->ibn_shutdown == 0);
335 /* npeers only grows with the global lock held */
336 atomic_inc(&net->ibn_npeers);
338 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
345 kiblnd_destroy_peer (kib_peer_t *peer)
347 kib_net_t *net = peer->ibp_ni->ni_data;
349 LASSERT (net != NULL);
350 LASSERT (atomic_read(&peer->ibp_refcount) == 0);
351 LASSERT (!kiblnd_peer_active(peer));
352 LASSERT (peer->ibp_connecting == 0);
353 LASSERT (peer->ibp_accepting == 0);
354 LASSERT (list_empty(&peer->ibp_conns));
355 LASSERT (list_empty(&peer->ibp_tx_queue));
357 LIBCFS_FREE(peer, sizeof(*peer));
359 /* NB a peer's connections keep a reference on their peer until
360 * they are destroyed, so we can be assured that _all_ state to do
361 * with this peer has been cleaned up when its refcount drops to
363 atomic_dec(&net->ibn_npeers);
367 kiblnd_destroy_dev (kib_dev_t *dev)
369 LASSERT (dev->ibd_nnets == 0);
371 if (!list_empty(&dev->ibd_list)) /* on kib_devs? */
372 list_del_init(&dev->ibd_list);
374 if (dev->ibd_mr != NULL)
375 ib_dereg_mr(dev->ibd_mr);
377 if (dev->ibd_pd != NULL)
378 ib_dealloc_pd(dev->ibd_pd);
380 if (dev->ibd_cmid != NULL)
381 rdma_destroy_id(dev->ibd_cmid);
383 LIBCFS_FREE(dev, sizeof(*dev));
387 kiblnd_find_peer_locked (lnet_nid_t nid)
389 /* the caller is responsible for accounting the additional reference
390 * that this creates */
391 struct list_head *peer_list = kiblnd_nid2peerlist(nid);
392 struct list_head *tmp;
395 list_for_each (tmp, peer_list) {
397 peer = list_entry(tmp, kib_peer_t, ibp_list);
399 LASSERT (peer->ibp_connecting > 0 || /* creating conns */
400 peer->ibp_accepting > 0 ||
401 !list_empty(&peer->ibp_conns)); /* active conn */
403 if (peer->ibp_nid != nid)
406 CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n",
407 peer, libcfs_nid2str(nid),
408 atomic_read(&peer->ibp_refcount));
415 kiblnd_unlink_peer_locked (kib_peer_t *peer)
417 LASSERT (list_empty(&peer->ibp_conns));
419 LASSERT (kiblnd_peer_active(peer));
420 list_del_init(&peer->ibp_list);
421 /* lose peerlist's ref */
422 kiblnd_peer_decref(peer);
426 kiblnd_get_peer_info (lnet_ni_t *ni, int index,
427 lnet_nid_t *nidp, int *count)
430 struct list_head *ptmp;
434 read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
436 for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
438 list_for_each (ptmp, &kiblnd_data.kib_peers[i]) {
440 peer = list_entry(ptmp, kib_peer_t, ibp_list);
441 LASSERT (peer->ibp_connecting > 0 ||
442 peer->ibp_accepting > 0 ||
443 !list_empty(&peer->ibp_conns));
445 if (peer->ibp_ni != ni)
451 *nidp = peer->ibp_nid;
452 *count = atomic_read(&peer->ibp_refcount);
454 read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
460 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
465 kiblnd_del_peer_locked (kib_peer_t *peer)
467 struct list_head *ctmp;
468 struct list_head *cnxt;
471 if (list_empty(&peer->ibp_conns)) {
472 kiblnd_unlink_peer_locked(peer);
474 list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
475 conn = list_entry(ctmp, kib_conn_t, ibc_list);
477 kiblnd_close_conn_locked(conn, 0);
479 /* NB closing peer's last conn unlinked it. */
481 /* NB peer now unlinked; might even be freed if the peer table had the
486 kiblnd_del_peer (lnet_ni_t *ni, lnet_nid_t nid)
488 CFS_LIST_HEAD (zombies);
489 struct list_head *ptmp;
490 struct list_head *pnxt;
498 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
500 if (nid != LNET_NID_ANY) {
501 lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
504 hi = kiblnd_data.kib_peer_hash_size - 1;
507 for (i = lo; i <= hi; i++) {
508 list_for_each_safe (ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
509 peer = list_entry(ptmp, kib_peer_t, ibp_list);
510 LASSERT (peer->ibp_connecting > 0 ||
511 peer->ibp_accepting > 0 ||
512 !list_empty(&peer->ibp_conns));
514 if (peer->ibp_ni != ni)
517 if (!(nid == LNET_NID_ANY || peer->ibp_nid == nid))
520 if (!list_empty(&peer->ibp_tx_queue)) {
521 LASSERT (list_empty(&peer->ibp_conns));
523 list_splice_init(&peer->ibp_tx_queue, &zombies);
526 kiblnd_del_peer_locked(peer);
527 rc = 0; /* matched something */
531 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
533 kiblnd_txlist_done(ni, &zombies, -EIO);
539 kiblnd_get_conn_by_idx (lnet_ni_t *ni, int index)
542 struct list_head *ptmp;
544 struct list_head *ctmp;
548 read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
550 for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
551 list_for_each (ptmp, &kiblnd_data.kib_peers[i]) {
553 peer = list_entry(ptmp, kib_peer_t, ibp_list);
554 LASSERT (peer->ibp_connecting > 0 ||
555 peer->ibp_accepting > 0 ||
556 !list_empty(&peer->ibp_conns));
558 if (peer->ibp_ni != ni)
561 list_for_each (ctmp, &peer->ibp_conns) {
565 conn = list_entry(ctmp, kib_conn_t, ibc_list);
566 kiblnd_conn_addref(conn);
567 read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
574 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
579 kiblnd_debug_rx (kib_rx_t *rx)
581 CDEBUG(D_CONSOLE, " %p status %d msg_type %x cred %d\n",
582 rx, rx->rx_status, rx->rx_msg->ibm_type,
583 rx->rx_msg->ibm_credits);
587 kiblnd_debug_tx (kib_tx_t *tx)
589 CDEBUG(D_CONSOLE, " %p snd %d q %d w %d rc %d dl %lx "
590 "cookie "LPX64" msg %s%s type %x cred %d\n",
591 tx, tx->tx_sending, tx->tx_queued, tx->tx_waiting,
592 tx->tx_status, tx->tx_deadline, tx->tx_cookie,
593 tx->tx_lntmsg[0] == NULL ? "-" : "!",
594 tx->tx_lntmsg[1] == NULL ? "-" : "!",
595 tx->tx_msg->ibm_type, tx->tx_msg->ibm_credits);
599 kiblnd_debug_conn (kib_conn_t *conn)
601 struct list_head *tmp;
604 spin_lock(&conn->ibc_lock);
606 CDEBUG(D_CONSOLE, "conn[%d] %p -> %s: \n",
607 atomic_read(&conn->ibc_refcount), conn,
608 libcfs_nid2str(conn->ibc_peer->ibp_nid));
609 CDEBUG(D_CONSOLE, " state %d nposted %d cred %d o_cred %d r_cred %d\n",
610 conn->ibc_state, conn->ibc_nsends_posted, conn->ibc_credits,
611 conn->ibc_outstanding_credits, conn->ibc_reserved_credits);
612 CDEBUG(D_CONSOLE, " comms_err %d\n", conn->ibc_comms_error);
614 CDEBUG(D_CONSOLE, " early_rxs:\n");
615 list_for_each(tmp, &conn->ibc_early_rxs)
616 kiblnd_debug_rx(list_entry(tmp, kib_rx_t, rx_list));
618 CDEBUG(D_CONSOLE, " tx_noops:\n");
619 list_for_each(tmp, &conn->ibc_tx_noops)
620 kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
622 CDEBUG(D_CONSOLE, " tx_queue_nocred:\n");
623 list_for_each(tmp, &conn->ibc_tx_queue_nocred)
624 kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
626 CDEBUG(D_CONSOLE, " tx_queue_rsrvd:\n");
627 list_for_each(tmp, &conn->ibc_tx_queue_rsrvd)
628 kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
630 CDEBUG(D_CONSOLE, " tx_queue:\n");
631 list_for_each(tmp, &conn->ibc_tx_queue)
632 kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
634 CDEBUG(D_CONSOLE, " active_txs:\n");
635 list_for_each(tmp, &conn->ibc_active_txs)
636 kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
638 CDEBUG(D_CONSOLE, " rxs:\n");
639 for (i = 0; i < IBLND_RX_MSGS; i++)
640 kiblnd_debug_rx(&conn->ibc_rxs[i]);
642 spin_unlock(&conn->ibc_lock);
646 kiblnd_create_conn (kib_peer_t *peer, struct rdma_cm_id *cmid, int state)
649 * If the new conn is created successfully it takes over the caller's
650 * ref on 'peer'. It also "owns" 'cmid' and destroys it when it itself
651 * is destroyed. On failure, the caller's ref on 'peer' remains and
652 * she must dispose of 'cmid'. (Actually I'd block forever if I tried
653 * to destroy 'cmid' here since I'm called from the CM which still has
654 * its ref on 'cmid'). */
656 kib_net_t *net = peer->ibp_ni->ni_data;
662 struct ib_qp_init_attr *init_qp_attr;
665 LASSERT (net != NULL);
666 LASSERT (!in_interrupt());
668 LIBCFS_ALLOC(init_qp_attr, sizeof(*init_qp_attr));
669 if (init_qp_attr == NULL) {
670 CERROR("Can't allocate qp_attr for %s\n",
671 libcfs_nid2str(peer->ibp_nid));
675 LIBCFS_ALLOC(conn, sizeof(*conn));
677 CERROR("Can't allocate connection for %s\n",
678 libcfs_nid2str(peer->ibp_nid));
682 memset(conn, 0, sizeof(*conn)); /* zero flags, NULL pointers etc... */
684 conn->ibc_state = IBLND_CONN_INIT;
685 conn->ibc_peer = peer; /* I take the caller's ref */
686 cmid->context = conn; /* for future CM callbacks */
687 conn->ibc_cmid = cmid;
689 INIT_LIST_HEAD(&conn->ibc_early_rxs);
690 INIT_LIST_HEAD(&conn->ibc_tx_noops);
691 INIT_LIST_HEAD(&conn->ibc_tx_queue);
692 INIT_LIST_HEAD(&conn->ibc_tx_queue_rsrvd);
693 INIT_LIST_HEAD(&conn->ibc_tx_queue_nocred);
694 INIT_LIST_HEAD(&conn->ibc_active_txs);
695 spin_lock_init(&conn->ibc_lock);
697 LIBCFS_ALLOC(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
698 if (conn->ibc_connvars == NULL) {
699 CERROR("Can't allocate in-progress connection state\n");
702 memset(conn->ibc_connvars, 0, sizeof(*conn->ibc_connvars));
704 LIBCFS_ALLOC(conn->ibc_rxs, IBLND_RX_MSGS * sizeof(kib_rx_t));
705 if (conn->ibc_rxs == NULL) {
706 CERROR("Cannot allocate RX buffers\n");
709 memset(conn->ibc_rxs, 0, IBLND_RX_MSGS * sizeof(kib_rx_t));
711 rc = kiblnd_alloc_pages(&conn->ibc_rx_pages, IBLND_RX_MSG_PAGES);
715 for (i = ipage = page_offset = 0; i < IBLND_RX_MSGS; i++) {
716 struct page *page = conn->ibc_rx_pages->ibp_pages[ipage];
717 kib_rx_t *rx = &conn->ibc_rxs[i];
720 rx->rx_msg = (kib_msg_t *)(((char *)page_address(page)) +
722 rx->rx_msgaddr = kiblnd_dma_map_single(cmid->device,
723 rx->rx_msg, IBLND_MSG_SIZE,
725 KIBLND_UNMAP_ADDR_SET(rx, rx_msgunmap, rx->rx_msgaddr);
727 CDEBUG(D_NET,"rx %d: %p "LPX64"("LPX64")\n",
728 i, rx->rx_msg, rx->rx_msgaddr,
729 lnet_page2phys(page) + page_offset);
731 page_offset += IBLND_MSG_SIZE;
732 LASSERT (page_offset <= PAGE_SIZE);
734 if (page_offset == PAGE_SIZE) {
737 LASSERT (ipage <= IBLND_RX_MSG_PAGES);
741 #ifdef HAVE_OFED_IB_COMP_VECTOR
742 cq = ib_create_cq(cmid->device,
743 kiblnd_cq_completion, kiblnd_cq_event, conn,
744 IBLND_CQ_ENTRIES(), 0);
746 cq = ib_create_cq(cmid->device,
747 kiblnd_cq_completion, kiblnd_cq_event, conn,
753 CERROR("Can't create CQ: %ld\n", PTR_ERR(cq));
757 rc = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
759 CERROR("Can't request completion notificiation: %d\n", rc);
763 memset(init_qp_attr, 0, sizeof(*init_qp_attr));
764 init_qp_attr->event_handler = kiblnd_qp_event;
765 init_qp_attr->qp_context = conn;
766 init_qp_attr->cap.max_send_wr = IBLND_SEND_WRS;
767 init_qp_attr->cap.max_recv_wr = IBLND_RECV_WRS;
768 init_qp_attr->cap.max_send_sge = 1;
769 init_qp_attr->cap.max_recv_sge = 1;
770 init_qp_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
771 init_qp_attr->qp_type = IB_QPT_RC;
772 init_qp_attr->send_cq = cq;
773 init_qp_attr->recv_cq = cq;
776 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
777 switch (*kiblnd_tunables.kib_ib_mtu) {
779 rc = *kiblnd_tunables.kib_ib_mtu;
780 /* fall through to... */
781 case 0: /* set tunable to the default
782 * CAVEAT EMPTOR! this assumes the default is one of the MTUs
783 * below, otherwise we'll WARN on the next QP create */
784 *kiblnd_tunables.kib_ib_mtu =
785 ib_mtu_enum_to_int(cmid->route.path_rec->mtu);
788 cmid->route.path_rec->mtu = IB_MTU_256;
791 cmid->route.path_rec->mtu = IB_MTU_512;
794 cmid->route.path_rec->mtu = IB_MTU_1024;
797 cmid->route.path_rec->mtu = IB_MTU_2048;
800 cmid->route.path_rec->mtu = IB_MTU_4096;
803 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
806 CWARN("Invalid IB MTU value %d, using default value %d\n",
807 rc, *kiblnd_tunables.kib_ib_mtu);
809 rc = rdma_create_qp(cmid, net->ibn_dev->ibd_pd, init_qp_attr);
811 CERROR("Can't create QP: %d\n", rc);
815 LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
817 /* 1 ref for caller and each rxmsg */
818 atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS);
819 conn->ibc_nrx = IBLND_RX_MSGS;
822 for (i = 0; i < IBLND_RX_MSGS; i++) {
823 rc = kiblnd_post_rx(&conn->ibc_rxs[i],
824 IBLND_POSTRX_NO_CREDIT);
826 CERROR("Can't post rxmsg: %d\n", rc);
828 /* Make posted receives complete */
829 kiblnd_abort_receives(conn);
831 /* correct # of posted buffers
832 * NB locking needed now I'm racing with completion */
833 spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
834 conn->ibc_nrx -= IBLND_RX_MSGS - i;
835 spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock,
838 /* Drop my own and unused rxbuffer refcounts */
839 while (i++ <= IBLND_RX_MSGS)
840 kiblnd_conn_decref(conn);
846 /* Init successful! */
847 LASSERT (state == IBLND_CONN_ACTIVE_CONNECT ||
848 state == IBLND_CONN_PASSIVE_WAIT);
849 conn->ibc_state = state;
852 atomic_inc(&net->ibn_nconns);
856 kiblnd_destroy_conn(conn);
858 LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
864 kiblnd_destroy_conn (kib_conn_t *conn)
866 struct rdma_cm_id *cmid = conn->ibc_cmid;
867 kib_peer_t *peer = conn->ibc_peer;
871 LASSERT (!in_interrupt());
872 LASSERT (atomic_read(&conn->ibc_refcount) == 0);
873 LASSERT (list_empty(&conn->ibc_early_rxs));
874 LASSERT (list_empty(&conn->ibc_tx_noops));
875 LASSERT (list_empty(&conn->ibc_tx_queue));
876 LASSERT (list_empty(&conn->ibc_tx_queue_rsrvd));
877 LASSERT (list_empty(&conn->ibc_tx_queue_nocred));
878 LASSERT (list_empty(&conn->ibc_active_txs));
879 LASSERT (conn->ibc_nsends_posted == 0);
881 switch (conn->ibc_state) {
883 /* conn must be completely disengaged from the network */
886 case IBLND_CONN_DISCONNECTED:
887 /* connvars should have been freed already */
888 LASSERT (conn->ibc_connvars == NULL);
891 case IBLND_CONN_INIT:
895 if (conn->ibc_cmid->qp != NULL)
896 rdma_destroy_qp(conn->ibc_cmid);
898 if (conn->ibc_cq != NULL) {
899 rc = ib_destroy_cq(conn->ibc_cq);
901 CWARN("Error destroying CQ: %d\n", rc);
904 if (conn->ibc_rx_pages != NULL) {
905 LASSERT (conn->ibc_rxs != NULL);
907 for (i = 0; i < IBLND_RX_MSGS; i++) {
908 kib_rx_t *rx = &conn->ibc_rxs[i];
910 LASSERT (rx->rx_nob >= 0); /* not posted */
912 kiblnd_dma_unmap_single(conn->ibc_cmid->device,
913 KIBLND_UNMAP_ADDR(rx, rx_msgunmap,
915 IBLND_MSG_SIZE, DMA_FROM_DEVICE);
918 kiblnd_free_pages(conn->ibc_rx_pages);
921 if (conn->ibc_rxs != NULL) {
922 LIBCFS_FREE(conn->ibc_rxs,
923 IBLND_RX_MSGS * sizeof(kib_rx_t));
926 if (conn->ibc_connvars != NULL)
927 LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
929 /* See CAVEAT EMPTOR above in kiblnd_create_conn */
930 if (conn->ibc_state != IBLND_CONN_INIT) {
931 kib_net_t *net = peer->ibp_ni->ni_data;
933 kiblnd_peer_decref(peer);
934 rdma_destroy_id(cmid);
935 atomic_dec(&net->ibn_nconns);
938 LIBCFS_FREE(conn, sizeof(*conn));
942 kiblnd_close_peer_conns_locked (kib_peer_t *peer, int why)
945 struct list_head *ctmp;
946 struct list_head *cnxt;
949 list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
950 conn = list_entry(ctmp, kib_conn_t, ibc_list);
953 kiblnd_close_conn_locked(conn, why);
960 kiblnd_close_stale_conns_locked (kib_peer_t *peer, __u64 incarnation)
963 struct list_head *ctmp;
964 struct list_head *cnxt;
967 list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
968 conn = list_entry(ctmp, kib_conn_t, ibc_list);
970 if (conn->ibc_incarnation == incarnation)
973 CDEBUG(D_NET, "Closing stale conn -> %s incarnation:"LPX64"("LPX64")\n",
974 libcfs_nid2str(peer->ibp_nid),
975 conn->ibc_incarnation, incarnation);
978 kiblnd_close_conn_locked(conn, -ESTALE);
985 kiblnd_close_matching_conns (lnet_ni_t *ni, lnet_nid_t nid)
988 struct list_head *ptmp;
989 struct list_head *pnxt;
996 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
998 if (nid != LNET_NID_ANY)
999 lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
1002 hi = kiblnd_data.kib_peer_hash_size - 1;
1005 for (i = lo; i <= hi; i++) {
1006 list_for_each_safe (ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
1008 peer = list_entry(ptmp, kib_peer_t, ibp_list);
1009 LASSERT (peer->ibp_connecting > 0 ||
1010 peer->ibp_accepting > 0 ||
1011 !list_empty(&peer->ibp_conns));
1013 if (peer->ibp_ni != ni)
1016 if (!(nid == LNET_NID_ANY || nid == peer->ibp_nid))
1019 count += kiblnd_close_peer_conns_locked(peer, 0);
1023 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1025 /* wildcards always succeed */
1026 if (nid == LNET_NID_ANY)
1029 return (count == 0) ? -ENOENT : 0;
1033 kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
1035 struct libcfs_ioctl_data *data = arg;
1039 case IOC_LIBCFS_GET_PEER: {
1043 rc = kiblnd_get_peer_info(ni, data->ioc_count,
1045 data->ioc_nid = nid;
1046 data->ioc_count = count;
1050 case IOC_LIBCFS_DEL_PEER: {
1051 rc = kiblnd_del_peer(ni, data->ioc_nid);
1054 case IOC_LIBCFS_GET_CONN: {
1055 kib_conn_t *conn = kiblnd_get_conn_by_idx(ni, data->ioc_count);
1060 // kiblnd_debug_conn(conn);
1062 data->ioc_nid = conn->ibc_peer->ibp_nid;
1063 kiblnd_conn_decref(conn);
1067 case IOC_LIBCFS_CLOSE_CONNECTION: {
1068 rc = kiblnd_close_matching_conns(ni, data->ioc_nid);
1080 kiblnd_free_pages (kib_pages_t *p)
1082 int npages = p->ibp_npages;
1085 for (i = 0; i < npages; i++)
1086 if (p->ibp_pages[i] != NULL)
1087 __free_page(p->ibp_pages[i]);
1089 LIBCFS_FREE (p, offsetof(kib_pages_t, ibp_pages[npages]));
1093 kiblnd_alloc_pages (kib_pages_t **pp, int npages)
1098 LIBCFS_ALLOC(p, offsetof(kib_pages_t, ibp_pages[npages]));
1100 CERROR("Can't allocate descriptor for %d pages\n", npages);
1104 memset(p, 0, offsetof(kib_pages_t, ibp_pages[npages]));
1105 p->ibp_npages = npages;
1107 for (i = 0; i < npages; i++) {
1108 p->ibp_pages[i] = alloc_page(GFP_KERNEL);
1109 if (p->ibp_pages[i] == NULL) {
1110 CERROR("Can't allocate page %d of %d\n", i, npages);
1111 kiblnd_free_pages(p);
1121 kiblnd_free_tx_descs (lnet_ni_t *ni)
1124 kib_net_t *net = ni->ni_data;
1126 LASSERT (net != NULL);
1128 if (net->ibn_tx_descs != NULL) {
1129 for (i = 0; i < IBLND_TX_MSGS(); i++) {
1130 kib_tx_t *tx = &net->ibn_tx_descs[i];
1132 #if IBLND_MAP_ON_DEMAND
1133 if (tx->tx_pages != NULL)
1134 LIBCFS_FREE(tx->tx_pages, LNET_MAX_IOV *
1135 sizeof(*tx->tx_pages));
1137 if (tx->tx_wrq != NULL)
1138 LIBCFS_FREE(tx->tx_wrq,
1139 (1 + IBLND_MAX_RDMA_FRAGS) *
1140 sizeof(*tx->tx_wrq));
1142 if (tx->tx_sge != NULL)
1143 LIBCFS_FREE(tx->tx_sge,
1144 (1 + IBLND_MAX_RDMA_FRAGS) *
1145 sizeof(*tx->tx_sge));
1147 if (tx->tx_rd != NULL)
1148 LIBCFS_FREE(tx->tx_rd,
1149 offsetof(kib_rdma_desc_t,
1150 rd_frags[IBLND_MAX_RDMA_FRAGS]));
1152 if (tx->tx_frags != NULL)
1153 LIBCFS_FREE(tx->tx_frags,
1154 IBLND_MAX_RDMA_FRAGS *
1155 sizeof(*tx->tx_frags));
1159 LIBCFS_FREE(net->ibn_tx_descs,
1160 IBLND_TX_MSGS() * sizeof(kib_tx_t));
1163 if (net->ibn_tx_pages != NULL)
1164 kiblnd_free_pages(net->ibn_tx_pages);
1168 kiblnd_alloc_tx_descs (lnet_ni_t *ni)
1172 kib_net_t *net = ni->ni_data;
1174 LASSERT (net != NULL);
1176 rc = kiblnd_alloc_pages(&net->ibn_tx_pages, IBLND_TX_MSG_PAGES());
1179 CERROR("Can't allocate tx pages\n");
1183 LIBCFS_ALLOC (net->ibn_tx_descs,
1184 IBLND_TX_MSGS() * sizeof(kib_tx_t));
1185 if (net->ibn_tx_descs == NULL) {
1186 CERROR("Can't allocate %d tx descriptors\n", IBLND_TX_MSGS());
1190 memset(net->ibn_tx_descs, 0,
1191 IBLND_TX_MSGS() * sizeof(kib_tx_t));
1193 for (i = 0; i < IBLND_TX_MSGS(); i++) {
1194 kib_tx_t *tx = &net->ibn_tx_descs[i];
1196 #if IBLND_MAP_ON_DEMAND
1197 LIBCFS_ALLOC(tx->tx_pages, LNET_MAX_IOV *
1198 sizeof(*tx->tx_pages));
1199 if (tx->tx_pages == NULL) {
1200 CERROR("Can't allocate phys page vector[%d]\n",
1205 LIBCFS_ALLOC(tx->tx_wrq,
1206 (1 + IBLND_MAX_RDMA_FRAGS) *
1207 sizeof(*tx->tx_wrq));
1208 if (tx->tx_wrq == NULL)
1211 LIBCFS_ALLOC(tx->tx_sge,
1212 (1 + IBLND_MAX_RDMA_FRAGS) *
1213 sizeof(*tx->tx_sge));
1214 if (tx->tx_sge == NULL)
1217 LIBCFS_ALLOC(tx->tx_rd,
1218 offsetof(kib_rdma_desc_t,
1219 rd_frags[IBLND_MAX_RDMA_FRAGS]));
1220 if (tx->tx_rd == NULL)
1223 LIBCFS_ALLOC(tx->tx_frags,
1224 IBLND_MAX_RDMA_FRAGS *
1225 sizeof(*tx->tx_frags));
1226 if (tx->tx_frags == NULL)
1235 kiblnd_unmap_tx_descs (lnet_ni_t *ni)
1239 kib_net_t *net = ni->ni_data;
1241 LASSERT (net != NULL);
1243 for (i = 0; i < IBLND_TX_MSGS(); i++) {
1244 tx = &net->ibn_tx_descs[i];
1246 kiblnd_dma_unmap_single(net->ibn_dev->ibd_cmid->device,
1247 KIBLND_UNMAP_ADDR(tx, tx_msgunmap,
1249 IBLND_MSG_SIZE, DMA_TO_DEVICE);
1254 kiblnd_map_tx_descs (lnet_ni_t *ni)
1257 int page_offset = 0;
1261 kib_net_t *net = ni->ni_data;
1263 LASSERT (net != NULL);
1265 /* pre-mapped messages are not bigger than 1 page */
1266 CLASSERT (IBLND_MSG_SIZE <= PAGE_SIZE);
1268 /* No fancy arithmetic when we do the buffer calculations */
1269 CLASSERT (PAGE_SIZE % IBLND_MSG_SIZE == 0);
1271 for (i = 0; i < IBLND_TX_MSGS(); i++) {
1272 page = net->ibn_tx_pages->ibp_pages[ipage];
1273 tx = &net->ibn_tx_descs[i];
1275 tx->tx_msg = (kib_msg_t *)(((char *)page_address(page)) +
1278 tx->tx_msgaddr = kiblnd_dma_map_single(
1279 net->ibn_dev->ibd_cmid->device,
1280 tx->tx_msg, IBLND_MSG_SIZE, DMA_TO_DEVICE);
1281 KIBLND_UNMAP_ADDR_SET(tx, tx_msgunmap, tx->tx_msgaddr);
1283 list_add(&tx->tx_list, &net->ibn_idle_txs);
1285 page_offset += IBLND_MSG_SIZE;
1286 LASSERT (page_offset <= PAGE_SIZE);
1288 if (page_offset == PAGE_SIZE) {
1291 LASSERT (ipage <= IBLND_TX_MSG_PAGES());
1297 kiblnd_base_shutdown (void)
1301 LASSERT (list_empty(&kiblnd_data.kib_devs));
1303 CDEBUG(D_MALLOC, "before LND base cleanup: kmem %d\n",
1304 atomic_read(&libcfs_kmemory));
1306 switch (kiblnd_data.kib_init) {
1310 case IBLND_INIT_ALL:
1311 case IBLND_INIT_DATA:
1312 LASSERT (kiblnd_data.kib_peers != NULL);
1313 for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
1314 LASSERT (list_empty(&kiblnd_data.kib_peers[i]));
1316 LASSERT (list_empty(&kiblnd_data.kib_connd_zombies));
1317 LASSERT (list_empty(&kiblnd_data.kib_connd_conns));
1319 /* flag threads to terminate; wake and wait for them to die */
1320 kiblnd_data.kib_shutdown = 1;
1321 wake_up_all(&kiblnd_data.kib_sched_waitq);
1322 wake_up_all(&kiblnd_data.kib_connd_waitq);
1325 while (atomic_read(&kiblnd_data.kib_nthreads) != 0) {
1327 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
1328 "Waiting for %d threads to terminate\n",
1329 atomic_read(&kiblnd_data.kib_nthreads));
1330 cfs_pause(cfs_time_seconds(1));
1335 case IBLND_INIT_NOTHING:
1339 if (kiblnd_data.kib_peers != NULL)
1340 LIBCFS_FREE(kiblnd_data.kib_peers,
1341 sizeof(struct list_head) *
1342 kiblnd_data.kib_peer_hash_size);
1344 CDEBUG(D_MALLOC, "after LND base cleanup: kmem %d\n",
1345 atomic_read(&libcfs_kmemory));
1347 kiblnd_data.kib_init = IBLND_INIT_NOTHING;
1348 PORTAL_MODULE_UNUSE;
1352 kiblnd_shutdown (lnet_ni_t *ni)
1354 kib_net_t *net = ni->ni_data;
1355 rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
1357 unsigned long flags;
1359 LASSERT(kiblnd_data.kib_init == IBLND_INIT_ALL);
1364 CDEBUG(D_MALLOC, "before LND net cleanup: kmem %d\n",
1365 atomic_read(&libcfs_kmemory));
1367 write_lock_irqsave(g_lock, flags);
1368 net->ibn_shutdown = 1;
1369 write_unlock_irqrestore(g_lock, flags);
1371 switch (net->ibn_init) {
1375 case IBLND_INIT_ALL:
1376 /* nuke all existing peers within this net */
1377 kiblnd_del_peer(ni, LNET_NID_ANY);
1379 /* Wait for all peer state to clean up */
1381 while (atomic_read(&net->ibn_npeers) != 0) {
1383 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n? */
1384 "%s: waiting for %d peers to disconnect\n",
1385 libcfs_nid2str(ni->ni_nid),
1386 atomic_read(&net->ibn_npeers));
1387 cfs_pause(cfs_time_seconds(1));
1390 kiblnd_unmap_tx_descs(ni);
1392 LASSERT (net->ibn_dev->ibd_nnets > 0);
1393 net->ibn_dev->ibd_nnets--;
1397 case IBLND_INIT_NOTHING:
1398 LASSERT (atomic_read(&net->ibn_nconns) == 0);
1400 #if IBLND_MAP_ON_DEMAND
1401 if (net->ibn_fmrpool != NULL)
1402 ib_destroy_fmr_pool(net->ibn_fmrpool);
1404 if (net->ibn_dev != NULL &&
1405 net->ibn_dev->ibd_nnets == 0)
1406 kiblnd_destroy_dev(net->ibn_dev);
1411 kiblnd_free_tx_descs(ni);
1413 CDEBUG(D_MALLOC, "after LND net cleanup: kmem %d\n",
1414 atomic_read(&libcfs_kmemory));
1416 net->ibn_init = IBLND_INIT_NOTHING;
1419 LIBCFS_FREE(net, sizeof(*net));
1422 if (list_empty(&kiblnd_data.kib_devs))
1423 kiblnd_base_shutdown();
1428 kiblnd_base_startup (void)
1433 LASSERT (kiblnd_data.kib_init == IBLND_INIT_NOTHING);
1435 if (*kiblnd_tunables.kib_credits > *kiblnd_tunables.kib_ntx) {
1436 CERROR("Can't set credits(%d) > ntx(%d)\n",
1437 *kiblnd_tunables.kib_credits,
1438 *kiblnd_tunables.kib_ntx);
1443 memset(&kiblnd_data, 0, sizeof(kiblnd_data)); /* zero pointers, flags etc */
1445 rwlock_init(&kiblnd_data.kib_global_lock);
1447 INIT_LIST_HEAD(&kiblnd_data.kib_devs);
1449 kiblnd_data.kib_peer_hash_size = IBLND_PEER_HASH_SIZE;
1450 LIBCFS_ALLOC(kiblnd_data.kib_peers,
1451 sizeof(struct list_head) * kiblnd_data.kib_peer_hash_size);
1452 if (kiblnd_data.kib_peers == NULL) {
1455 for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
1456 INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]);
1458 spin_lock_init(&kiblnd_data.kib_connd_lock);
1459 INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
1460 INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies);
1461 init_waitqueue_head(&kiblnd_data.kib_connd_waitq);
1463 spin_lock_init(&kiblnd_data.kib_sched_lock);
1464 INIT_LIST_HEAD(&kiblnd_data.kib_sched_conns);
1465 init_waitqueue_head(&kiblnd_data.kib_sched_waitq);
1467 kiblnd_data.kib_error_qpa.qp_state = IB_QPS_ERR;
1469 /* lists/ptrs/locks initialised */
1470 kiblnd_data.kib_init = IBLND_INIT_DATA;
1471 /*****************************************************/
1473 for (i = 0; i < IBLND_N_SCHED; i++) {
1474 rc = kiblnd_thread_start(kiblnd_scheduler, (void *)((long)i));
1476 CERROR("Can't spawn o2iblnd scheduler[%d]: %d\n",
1482 rc = kiblnd_thread_start(kiblnd_connd, NULL);
1484 CERROR("Can't spawn o2iblnd connd: %d\n", rc);
1488 /* flag everything initialised */
1489 kiblnd_data.kib_init = IBLND_INIT_ALL;
1490 /*****************************************************/
1495 kiblnd_base_shutdown();
1500 kiblnd_startup (lnet_ni_t *ni)
1505 struct list_head *tmp;
1509 LASSERT (ni->ni_lnd == &the_kiblnd);
1511 if (kiblnd_data.kib_init == IBLND_INIT_NOTHING) {
1512 rc = kiblnd_base_startup();
1517 LIBCFS_ALLOC(net, sizeof(*net));
1522 memset(net, 0, sizeof(*net));
1524 do_gettimeofday(&tv);
1525 net->ibn_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
1527 ni->ni_maxtxcredits = *kiblnd_tunables.kib_credits;
1528 ni->ni_peertxcredits = *kiblnd_tunables.kib_peercredits;
1530 spin_lock_init(&net->ibn_tx_lock);
1531 INIT_LIST_HEAD(&net->ibn_idle_txs);
1533 rc = kiblnd_alloc_tx_descs(ni);
1535 CERROR("Can't allocate tx descs\n");
1539 if (ni->ni_interfaces[0] != NULL) {
1540 /* Use the IPoIB interface specified in 'networks=' */
1542 CLASSERT (LNET_MAX_INTERFACES > 1);
1543 if (ni->ni_interfaces[1] != NULL) {
1544 CERROR("Multiple interfaces not supported\n");
1548 ifname = ni->ni_interfaces[0];
1550 ifname = *kiblnd_tunables.kib_default_ipif;
1553 if (strlen(ifname) >= sizeof(ibdev->ibd_ifname)) {
1554 CERROR("IPoIB interface name too long: %s\n", ifname);
1559 list_for_each (tmp, &kiblnd_data.kib_devs) {
1560 ibdev = list_entry(tmp, kib_dev_t, ibd_list);
1562 if (!strcmp(&ibdev->ibd_ifname[0], ifname))
1568 if (ibdev == NULL) {
1572 struct rdma_cm_id *id;
1575 struct sockaddr_in addr;
1577 rc = libcfs_ipif_query(ifname, &up, &ip, &netmask);
1579 CERROR("Can't query IPoIB interface %s: %d\n",
1585 CERROR("Can't query IPoIB interface %s: it's down\n",
1590 LIBCFS_ALLOC(ibdev, sizeof(*ibdev));
1594 memset(ibdev, 0, sizeof(*ibdev));
1596 INIT_LIST_HEAD(&ibdev->ibd_list); /* not yet in kib_devs */
1597 ibdev->ibd_ifip = ip;
1598 strcpy(&ibdev->ibd_ifname[0], ifname);
1600 id = rdma_create_id(kiblnd_cm_callback, ibdev, RDMA_PS_TCP);
1602 ibdev->ibd_cmid = id;
1604 CERROR("Can't create listen ID: %ld\n", PTR_ERR(id));
1608 memset(&addr, 0, sizeof(addr));
1609 addr.sin_family = AF_INET;
1610 addr.sin_port = htons(*kiblnd_tunables.kib_service);
1611 addr.sin_addr.s_addr = htonl(ip);
1613 rc = rdma_bind_addr(id, (struct sockaddr *)&addr);
1615 CERROR("Can't bind to %s: %d\n", ifname, rc);
1619 /* Binding should have assigned me an IB device */
1620 LASSERT (id->device != NULL);
1622 pd = ib_alloc_pd(id->device);
1626 CERROR("Can't allocate PD: %ld\n", PTR_ERR(pd));
1630 #if IBLND_MAP_ON_DEMAND
1631 /* MR for sends and receives */
1632 mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE);
1634 /* MR for sends, recieves _and_ RDMA...........v */
1635 mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE |
1636 IB_ACCESS_REMOTE_WRITE);
1641 CERROR("Can't get MR: %ld\n", PTR_ERR(mr));
1645 rc = rdma_listen(id, 0);
1647 CERROR("Can't start listener: %d\n", rc);
1651 list_add_tail(&ibdev->ibd_list,
1652 &kiblnd_data.kib_devs);
1655 ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ibdev->ibd_ifip);
1656 net->ibn_dev = ibdev;
1658 #if IBLND_MAP_ON_DEMAND
1659 /* FMR pool for RDMA */
1661 struct ib_fmr_pool *fmrpool;
1662 struct ib_fmr_pool_param param = {
1663 .max_pages_per_fmr = LNET_MAX_PAYLOAD/PAGE_SIZE,
1664 .page_shift = PAGE_SHIFT,
1665 .access = (IB_ACCESS_LOCAL_WRITE |
1666 IB_ACCESS_REMOTE_WRITE),
1667 .pool_size = *kiblnd_tunables.kib_fmr_pool_size,
1668 .dirty_watermark = *kiblnd_tunables.kib_fmr_flush_trigger,
1669 .flush_function = NULL,
1671 .cache = *kiblnd_tunables.kib_fmr_cache};
1673 if (*kiblnd_tunables.kib_fmr_pool_size <
1674 *kiblnd_tunables.kib_ntx) {
1675 CERROR("Can't set fmr pool size (%d) < ntx(%d)\n",
1676 *kiblnd_tunables.kib_fmr_pool_size,
1677 *kiblnd_tunables.kib_ntx);
1681 fmrpool = ib_create_fmr_pool(ibdev->ibd_pd, ¶m);
1682 if (!IS_ERR(fmrpool)) {
1683 net->ibn_fmrpool = fmrpool;
1685 CERROR("Can't create FMR pool: %ld\n",
1692 kiblnd_map_tx_descs(ni);
1695 net->ibn_init = IBLND_INIT_ALL;
1700 kiblnd_shutdown(ni);
1702 CDEBUG(D_NET, "kiblnd_startup failed\n");
1707 kiblnd_module_fini (void)
1709 lnet_unregister_lnd(&the_kiblnd);
1710 kiblnd_tunables_fini();
1714 kiblnd_module_init (void)
1718 CLASSERT (sizeof(kib_msg_t) <= IBLND_MSG_SIZE);
1719 #if !IBLND_MAP_ON_DEMAND
1720 CLASSERT (offsetof(kib_msg_t, ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
1722 CLASSERT (offsetof(kib_msg_t, ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
1725 rc = kiblnd_tunables_init();
1729 lnet_register_lnd(&the_kiblnd);
1734 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
1735 MODULE_DESCRIPTION("Kernel OpenIB gen2 LND v1.00");
1736 MODULE_LICENSE("GPL");
1738 module_init(kiblnd_module_init);
1739 module_exit(kiblnd_module_fini);