1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2006 Cluster File Systems, Inc.
5 * Author: Eric Barton <eric@bartonsoftware.com>
7 * This file is part of Lustre, http://www.lustre.org.
9 * Lustre is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * Lustre is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with Lustre; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
28 .lnd_startup = kiblnd_startup,
29 .lnd_shutdown = kiblnd_shutdown,
30 .lnd_ctl = kiblnd_ctl,
31 .lnd_send = kiblnd_send,
32 .lnd_recv = kiblnd_recv,
35 kib_data_t kiblnd_data;
38 kiblnd_cksum (void *ptr, int nob)
44 sum = ((sum << 1) | (sum >> 31)) + *c++;
46 /* ensure I don't return 0 (== no checksum) */
47 return (sum == 0) ? 1 : sum;
51 kiblnd_init_msg (kib_msg_t *msg, int type, int body_nob)
54 msg->ibm_nob = offsetof(kib_msg_t, ibm_u) + body_nob;
58 kiblnd_pack_msg (lnet_ni_t *ni, kib_msg_t *msg,
59 int credits, lnet_nid_t dstnid, __u64 dststamp)
61 kib_net_t *net = ni->ni_data;
63 /* CAVEAT EMPTOR! all message fields not set here should have been
64 * initialised previously. */
65 msg->ibm_magic = IBLND_MSG_MAGIC;
66 msg->ibm_version = IBLND_MSG_VERSION;
68 msg->ibm_credits = credits;
71 msg->ibm_srcnid = lnet_ptlcompat_srcnid(ni->ni_nid, dstnid);
72 msg->ibm_srcstamp = net->ibn_incarnation;
73 msg->ibm_dstnid = dstnid;
74 msg->ibm_dststamp = dststamp;
76 if (*kiblnd_tunables.kib_cksum) {
77 /* NB ibm_cksum zero while computing cksum */
78 msg->ibm_cksum = kiblnd_cksum(msg, msg->ibm_nob);
83 kiblnd_unpack_msg(kib_msg_t *msg, int nob)
85 const int hdr_size = offsetof(kib_msg_t, ibm_u);
89 #if !IBLND_MAP_ON_DEMAND
93 /* 6 bytes are enough to have received magic + version */
95 CERROR("Short message: %d\n", nob);
99 if (msg->ibm_magic == IBLND_MSG_MAGIC) {
101 } else if (msg->ibm_magic == __swab32(IBLND_MSG_MAGIC)) {
104 CERROR("Bad magic: %08x\n", msg->ibm_magic);
108 if (msg->ibm_version !=
109 (flip ? __swab16(IBLND_MSG_VERSION) : IBLND_MSG_VERSION)) {
110 CERROR("Bad version: %d\n", msg->ibm_version);
114 if (nob < hdr_size) {
115 CERROR("Short message: %d\n", nob);
119 msg_nob = flip ? __swab32(msg->ibm_nob) : msg->ibm_nob;
121 CERROR("Short message: got %d, wanted %d\n", nob, msg_nob);
125 /* checksum must be computed with ibm_cksum zero and BEFORE anything
127 msg_cksum = flip ? __swab32(msg->ibm_cksum) : msg->ibm_cksum;
129 if (msg_cksum != 0 &&
130 msg_cksum != kiblnd_cksum(msg, msg_nob)) {
131 CERROR("Bad checksum\n");
134 msg->ibm_cksum = msg_cksum;
137 /* leave magic unflipped as a clue to peer endianness */
138 __swab16s(&msg->ibm_version);
139 CLASSERT (sizeof(msg->ibm_type) == 1);
140 CLASSERT (sizeof(msg->ibm_credits) == 1);
141 msg->ibm_nob = msg_nob;
142 __swab64s(&msg->ibm_srcnid);
143 __swab64s(&msg->ibm_srcstamp);
144 __swab64s(&msg->ibm_dstnid);
145 __swab64s(&msg->ibm_dststamp);
148 if (msg->ibm_srcnid == LNET_NID_ANY) {
149 CERROR("Bad src nid: %s\n", libcfs_nid2str(msg->ibm_srcnid));
153 switch (msg->ibm_type) {
155 CERROR("Unknown message type %x\n", msg->ibm_type);
161 case IBLND_MSG_IMMEDIATE:
162 if (msg_nob < offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[0])) {
163 CERROR("Short IMMEDIATE: %d(%d)\n", msg_nob,
164 (int)offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[0]));
169 case IBLND_MSG_PUT_REQ:
170 if (msg_nob < hdr_size + sizeof(msg->ibm_u.putreq)) {
171 CERROR("Short PUT_REQ: %d(%d)\n", msg_nob,
172 (int)(hdr_size + sizeof(msg->ibm_u.putreq)));
177 case IBLND_MSG_PUT_ACK:
178 if (msg_nob < hdr_size + sizeof(msg->ibm_u.putack)) {
179 CERROR("Short PUT_ACK: %d(%d)\n", msg_nob,
180 (int)(hdr_size + sizeof(msg->ibm_u.putack)));
183 #if IBLND_MAP_ON_DEMAND
185 __swab64s(&msg->ibm_u.putack.ibpam_rd.rd_addr);
186 __swab32s(&msg->ibm_u.putack.ibpam_rd.rd_nob);
187 __swab32s(&msg->ibm_u.putack.ibpam_rd.rd_key);
191 __swab32s(&msg->ibm_u.putack.ibpam_rd.rd_key);
192 __swab32s(&msg->ibm_u.putack.ibpam_rd.rd_nfrags);
195 n = msg->ibm_u.putack.ibpam_rd.rd_nfrags;
196 if (n <= 0 || n > IBLND_MAX_RDMA_FRAGS) {
197 CERROR("Bad PUT_ACK nfrags: %d, should be 0 < n <= %d\n",
198 n, IBLND_MAX_RDMA_FRAGS);
202 if (msg_nob < offsetof(kib_msg_t, ibm_u.putack.ibpam_rd.rd_frags[n])) {
203 CERROR("Short PUT_ACK: %d(%d)\n", msg_nob,
204 (int)offsetof(kib_msg_t, ibm_u.putack.ibpam_rd.rd_frags[n]));
209 for (i = 0; i < n; i++) {
210 __swab32s(&msg->ibm_u.putack.ibpam_rd.rd_frags[i].rf_nob);
211 __swab64s(&msg->ibm_u.putack.ibpam_rd.rd_frags[i].rf_addr);
217 case IBLND_MSG_GET_REQ:
218 if (msg_nob < hdr_size + sizeof(msg->ibm_u.get)) {
219 CERROR("Short GET_REQ: %d(%d)\n", msg_nob,
220 (int)(hdr_size + sizeof(msg->ibm_u.get)));
223 #if IBLND_MAP_ON_DEMAND
225 __swab64s(&msg->ibm_u.get.ibgm_rd.rd_addr);
226 __swab32s(&msg->ibm_u.get.ibgm_rd.rd_nob);
227 __swab32s(&msg->ibm_u.get.ibgm_rd.rd_key);
231 __swab32s(&msg->ibm_u.get.ibgm_rd.rd_key);
232 __swab32s(&msg->ibm_u.get.ibgm_rd.rd_nfrags);
235 n = msg->ibm_u.get.ibgm_rd.rd_nfrags;
236 if (n <= 0 || n > IBLND_MAX_RDMA_FRAGS) {
237 CERROR("Bad GET_REQ nfrags: %d, should be 0 < n <= %d\n",
238 n, IBLND_MAX_RDMA_FRAGS);
242 if (msg_nob < offsetof(kib_msg_t, ibm_u.get.ibgm_rd.rd_frags[n])) {
243 CERROR("Short GET_REQ: %d(%d)\n", msg_nob,
244 (int)offsetof(kib_msg_t, ibm_u.get.ibgm_rd.rd_frags[n]));
249 for (i = 0; i < msg->ibm_u.get.ibgm_rd.rd_nfrags; i++) {
250 __swab32s(&msg->ibm_u.get.ibgm_rd.rd_frags[i].rf_nob);
251 __swab64s(&msg->ibm_u.get.ibgm_rd.rd_frags[i].rf_addr);
256 case IBLND_MSG_PUT_NAK:
257 case IBLND_MSG_PUT_DONE:
258 case IBLND_MSG_GET_DONE:
259 if (msg_nob < hdr_size + sizeof(msg->ibm_u.completion)) {
260 CERROR("Short RDMA completion: %d(%d)\n", msg_nob,
261 (int)(hdr_size + sizeof(msg->ibm_u.completion)));
265 __swab32s(&msg->ibm_u.completion.ibcm_status);
268 case IBLND_MSG_CONNREQ:
269 case IBLND_MSG_CONNACK:
270 if (msg_nob < hdr_size + sizeof(msg->ibm_u.connparams)) {
271 CERROR("Short connreq/ack: %d(%d)\n", msg_nob,
272 (int)(hdr_size + sizeof(msg->ibm_u.connparams)));
276 __swab16s(&msg->ibm_u.connparams.ibcp_queue_depth);
277 __swab16s(&msg->ibm_u.connparams.ibcp_max_frags);
278 __swab32s(&msg->ibm_u.connparams.ibcp_max_msg_size);
286 kiblnd_create_peer (lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid)
289 kib_net_t *net = ni->ni_data;
292 LASSERT (net != NULL);
293 LASSERT (nid != LNET_NID_ANY);
295 LIBCFS_ALLOC(peer, sizeof(*peer));
297 CERROR("Cannot allocate peer\n");
301 memset(peer, 0, sizeof(*peer)); /* zero flags etc */
306 peer->ibp_last_alive = cfs_time_current();
307 atomic_set(&peer->ibp_refcount, 1); /* 1 ref for caller */
309 INIT_LIST_HEAD(&peer->ibp_list); /* not in the peer table yet */
310 INIT_LIST_HEAD(&peer->ibp_conns);
311 INIT_LIST_HEAD(&peer->ibp_tx_queue);
313 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
315 /* always called with a ref on ni, which prevents ni being shutdown */
316 LASSERT (net->ibn_shutdown == 0);
318 /* npeers only grows with the global lock held */
319 atomic_inc(&net->ibn_npeers);
321 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
328 kiblnd_destroy_peer (kib_peer_t *peer)
330 kib_net_t *net = peer->ibp_ni->ni_data;
332 LASSERT (net != NULL);
333 LASSERT (atomic_read(&peer->ibp_refcount) == 0);
334 LASSERT (!kiblnd_peer_active(peer));
335 LASSERT (peer->ibp_connecting == 0);
336 LASSERT (peer->ibp_accepting == 0);
337 LASSERT (list_empty(&peer->ibp_conns));
338 LASSERT (list_empty(&peer->ibp_tx_queue));
340 LIBCFS_FREE(peer, sizeof(*peer));
342 /* NB a peer's connections keep a reference on their peer until
343 * they are destroyed, so we can be assured that _all_ state to do
344 * with this peer has been cleaned up when its refcount drops to
346 atomic_dec(&net->ibn_npeers);
350 kiblnd_destroy_dev (kib_dev_t *dev)
352 LASSERT (dev->ibd_nnets == 0);
354 if (!list_empty(&dev->ibd_list)) /* on kib_devs? */
355 list_del_init(&dev->ibd_list);
357 if (dev->ibd_mr != NULL)
358 ib_dereg_mr(dev->ibd_mr);
360 if (dev->ibd_pd != NULL)
361 ib_dealloc_pd(dev->ibd_pd);
363 if (dev->ibd_cmid != NULL)
364 rdma_destroy_id(dev->ibd_cmid);
366 LIBCFS_FREE(dev, sizeof(*dev));
370 kiblnd_find_peer_locked (lnet_nid_t nid)
372 /* the caller is responsible for accounting the additional reference
373 * that this creates */
374 struct list_head *peer_list = kiblnd_nid2peerlist(nid);
375 struct list_head *tmp;
378 list_for_each (tmp, peer_list) {
380 peer = list_entry(tmp, kib_peer_t, ibp_list);
382 LASSERT (peer->ibp_connecting > 0 || /* creating conns */
383 peer->ibp_accepting > 0 ||
384 !list_empty(&peer->ibp_conns)); /* active conn */
386 if (peer->ibp_nid != nid)
389 CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n",
390 peer, libcfs_nid2str(nid),
391 atomic_read(&peer->ibp_refcount));
398 kiblnd_unlink_peer_locked (kib_peer_t *peer)
400 LASSERT (list_empty(&peer->ibp_conns));
402 LASSERT (kiblnd_peer_active(peer));
403 list_del_init(&peer->ibp_list);
404 /* lose peerlist's ref */
405 kiblnd_peer_decref(peer);
409 kiblnd_get_peer_info (lnet_ni_t *ni, int index,
410 lnet_nid_t *nidp, int *count)
413 struct list_head *ptmp;
417 read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
419 for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
421 list_for_each (ptmp, &kiblnd_data.kib_peers[i]) {
423 peer = list_entry(ptmp, kib_peer_t, ibp_list);
424 LASSERT (peer->ibp_connecting > 0 ||
425 peer->ibp_accepting > 0 ||
426 !list_empty(&peer->ibp_conns));
428 if (peer->ibp_ni != ni)
434 *nidp = peer->ibp_nid;
435 *count = atomic_read(&peer->ibp_refcount);
437 read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
443 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
448 kiblnd_del_peer_locked (kib_peer_t *peer)
450 struct list_head *ctmp;
451 struct list_head *cnxt;
454 if (list_empty(&peer->ibp_conns)) {
455 kiblnd_unlink_peer_locked(peer);
457 list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
458 conn = list_entry(ctmp, kib_conn_t, ibc_list);
460 kiblnd_close_conn_locked(conn, 0);
462 /* NB closing peer's last conn unlinked it. */
464 /* NB peer now unlinked; might even be freed if the peer table had the
469 kiblnd_del_peer (lnet_ni_t *ni, lnet_nid_t nid)
471 CFS_LIST_HEAD (zombies);
472 struct list_head *ptmp;
473 struct list_head *pnxt;
481 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
483 if (nid != LNET_NID_ANY) {
484 lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
487 hi = kiblnd_data.kib_peer_hash_size - 1;
490 for (i = lo; i <= hi; i++) {
491 list_for_each_safe (ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
492 peer = list_entry(ptmp, kib_peer_t, ibp_list);
493 LASSERT (peer->ibp_connecting > 0 ||
494 peer->ibp_accepting > 0 ||
495 !list_empty(&peer->ibp_conns));
497 if (peer->ibp_ni != ni)
500 if (!(nid == LNET_NID_ANY || peer->ibp_nid == nid))
503 if (!list_empty(&peer->ibp_tx_queue)) {
504 LASSERT (list_empty(&peer->ibp_conns));
506 list_splice_init(&peer->ibp_tx_queue, &zombies);
509 kiblnd_del_peer_locked(peer);
510 rc = 0; /* matched something */
514 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
516 kiblnd_txlist_done(ni, &zombies, -EIO);
522 kiblnd_get_conn_by_idx (lnet_ni_t *ni, int index)
525 struct list_head *ptmp;
527 struct list_head *ctmp;
531 read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
533 for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
534 list_for_each (ptmp, &kiblnd_data.kib_peers[i]) {
536 peer = list_entry(ptmp, kib_peer_t, ibp_list);
537 LASSERT (peer->ibp_connecting > 0 ||
538 peer->ibp_accepting > 0 ||
539 !list_empty(&peer->ibp_conns));
541 if (peer->ibp_ni != ni)
544 list_for_each (ctmp, &peer->ibp_conns) {
548 conn = list_entry(ctmp, kib_conn_t, ibc_list);
549 kiblnd_conn_addref(conn);
550 read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
557 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
562 kiblnd_debug_rx (kib_rx_t *rx)
564 CDEBUG(D_CONSOLE, " %p status %d msg_type %x cred %d\n",
565 rx, rx->rx_status, rx->rx_msg->ibm_type,
566 rx->rx_msg->ibm_credits);
570 kiblnd_debug_tx (kib_tx_t *tx)
572 CDEBUG(D_CONSOLE, " %p snd %d q %d w %d rc %d dl %lx "
573 "cookie "LPX64" msg %s%s type %x cred %d\n",
574 tx, tx->tx_sending, tx->tx_queued, tx->tx_waiting,
575 tx->tx_status, tx->tx_deadline, tx->tx_cookie,
576 tx->tx_lntmsg[0] == NULL ? "-" : "!",
577 tx->tx_lntmsg[1] == NULL ? "-" : "!",
578 tx->tx_msg->ibm_type, tx->tx_msg->ibm_credits);
582 kiblnd_debug_conn (kib_conn_t *conn)
584 struct list_head *tmp;
587 spin_lock(&conn->ibc_lock);
589 CDEBUG(D_CONSOLE, "conn[%d] %p -> %s: \n",
590 atomic_read(&conn->ibc_refcount), conn,
591 libcfs_nid2str(conn->ibc_peer->ibp_nid));
592 CDEBUG(D_CONSOLE, " state %d nposted %d cred %d o_cred %d r_cred %d\n",
593 conn->ibc_state, conn->ibc_nsends_posted, conn->ibc_credits,
594 conn->ibc_outstanding_credits, conn->ibc_reserved_credits);
595 CDEBUG(D_CONSOLE, " comms_err %d\n", conn->ibc_comms_error);
597 CDEBUG(D_CONSOLE, " early_rxs:\n");
598 list_for_each(tmp, &conn->ibc_early_rxs)
599 kiblnd_debug_rx(list_entry(tmp, kib_rx_t, rx_list));
601 CDEBUG(D_CONSOLE, " tx_queue_nocred:\n");
602 list_for_each(tmp, &conn->ibc_tx_queue_nocred)
603 kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
605 CDEBUG(D_CONSOLE, " tx_queue_rsrvd:\n");
606 list_for_each(tmp, &conn->ibc_tx_queue_rsrvd)
607 kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
609 CDEBUG(D_CONSOLE, " tx_queue:\n");
610 list_for_each(tmp, &conn->ibc_tx_queue)
611 kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
613 CDEBUG(D_CONSOLE, " active_txs:\n");
614 list_for_each(tmp, &conn->ibc_active_txs)
615 kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
617 CDEBUG(D_CONSOLE, " rxs:\n");
618 for (i = 0; i < IBLND_RX_MSGS; i++)
619 kiblnd_debug_rx(&conn->ibc_rxs[i]);
621 spin_unlock(&conn->ibc_lock);
625 kiblnd_create_conn (kib_peer_t *peer, struct rdma_cm_id *cmid, int state)
628 * If the new conn is created successfully it takes over the caller's
629 * ref on 'peer'. It also "owns" 'cmid' and destroys it when it itself
630 * is destroyed. On failure, the caller's ref on 'peer' remains and
631 * she must dispose of 'cmid'. (Actually I'd block forever if I tried
632 * to destroy 'cmid' here since I'm called from the CM which still has
633 * its ref on 'cmid'). */
635 kib_net_t *net = peer->ibp_ni->ni_data;
641 struct ib_qp_init_attr *init_qp_attr;
644 LASSERT (net != NULL);
645 LASSERT (!in_interrupt());
647 LIBCFS_ALLOC(init_qp_attr, sizeof(*init_qp_attr));
648 if (init_qp_attr == NULL) {
649 CERROR("Can't allocate qp_attr for %s\n",
650 libcfs_nid2str(peer->ibp_nid));
654 LIBCFS_ALLOC(conn, sizeof(*conn));
656 CERROR("Can't allocate connection for %s\n",
657 libcfs_nid2str(peer->ibp_nid));
661 memset(conn, 0, sizeof(*conn)); /* zero flags, NULL pointers etc... */
663 conn->ibc_state = IBLND_CONN_INIT;
664 conn->ibc_peer = peer; /* I take the caller's ref */
665 cmid->context = conn; /* for future CM callbacks */
666 conn->ibc_cmid = cmid;
668 INIT_LIST_HEAD(&conn->ibc_early_rxs);
669 INIT_LIST_HEAD(&conn->ibc_tx_queue);
670 INIT_LIST_HEAD(&conn->ibc_tx_queue_rsrvd);
671 INIT_LIST_HEAD(&conn->ibc_tx_queue_nocred);
672 INIT_LIST_HEAD(&conn->ibc_active_txs);
673 spin_lock_init(&conn->ibc_lock);
675 LIBCFS_ALLOC(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
676 if (conn->ibc_connvars == NULL) {
677 CERROR("Can't allocate in-progress connection state\n");
680 memset(conn->ibc_connvars, 0, sizeof(*conn->ibc_connvars));
682 LIBCFS_ALLOC(conn->ibc_rxs, IBLND_RX_MSGS * sizeof(kib_rx_t));
683 if (conn->ibc_rxs == NULL) {
684 CERROR("Cannot allocate RX buffers\n");
687 memset(conn->ibc_rxs, 0, IBLND_RX_MSGS * sizeof(kib_rx_t));
689 rc = kiblnd_alloc_pages(&conn->ibc_rx_pages, IBLND_RX_MSG_PAGES);
693 for (i = ipage = page_offset = 0; i < IBLND_RX_MSGS; i++) {
694 struct page *page = conn->ibc_rx_pages->ibp_pages[ipage];
695 kib_rx_t *rx = &conn->ibc_rxs[i];
698 rx->rx_msg = (kib_msg_t *)(((char *)page_address(page)) +
700 rx->rx_msgaddr = kiblnd_dma_map_single(cmid->device,
701 rx->rx_msg, IBLND_MSG_SIZE,
703 KIBLND_UNMAP_ADDR_SET(rx, rx_msgunmap, rx->rx_msgaddr);
705 CDEBUG(D_NET,"rx %d: %p "LPX64"("LPX64")\n",
706 i, rx->rx_msg, rx->rx_msgaddr,
707 lnet_page2phys(page) + page_offset);
709 page_offset += IBLND_MSG_SIZE;
710 LASSERT (page_offset <= PAGE_SIZE);
712 if (page_offset == PAGE_SIZE) {
715 LASSERT (ipage <= IBLND_RX_MSG_PAGES);
719 #if (IBLND_OFED_VERSION == 1025)
720 cq = ib_create_cq(cmid->device,
721 kiblnd_cq_completion, kiblnd_cq_event, conn,
722 IBLND_CQ_ENTRIES(), 0);
724 cq = ib_create_cq(cmid->device,
725 kiblnd_cq_completion, kiblnd_cq_event, conn,
731 CERROR("Can't create CQ: %ld\n", PTR_ERR(cq));
735 rc = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
737 CERROR("Can't request completion notificiation: %d\n", rc);
741 memset(init_qp_attr, 0, sizeof(*init_qp_attr));
742 init_qp_attr->event_handler = kiblnd_qp_event;
743 init_qp_attr->qp_context = conn;
744 init_qp_attr->cap.max_send_wr = (*kiblnd_tunables.kib_concurrent_sends) *
745 (1 + IBLND_MAX_RDMA_FRAGS);
746 init_qp_attr->cap.max_recv_wr = IBLND_RX_MSGS;
747 init_qp_attr->cap.max_send_sge = 1;
748 init_qp_attr->cap.max_recv_sge = 1;
749 init_qp_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
750 init_qp_attr->qp_type = IB_QPT_RC;
751 init_qp_attr->send_cq = cq;
752 init_qp_attr->recv_cq = cq;
755 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
756 switch (*kiblnd_tunables.kib_ib_mtu) {
758 rc = *kiblnd_tunables.kib_ib_mtu;
759 /* fall through to... */
760 case 0: /* set tunable to the default
761 * CAVEAT EMPTOR! this assumes the default is one of the MTUs
762 * below, otherwise we'll WARN on the next QP create */
763 *kiblnd_tunables.kib_ib_mtu =
764 ib_mtu_enum_to_int(cmid->route.path_rec->mtu);
767 cmid->route.path_rec->mtu = IB_MTU_256;
770 cmid->route.path_rec->mtu = IB_MTU_512;
773 cmid->route.path_rec->mtu = IB_MTU_1024;
776 cmid->route.path_rec->mtu = IB_MTU_2048;
779 cmid->route.path_rec->mtu = IB_MTU_4096;
782 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
785 CWARN("Invalid IB MTU value %d, using default value %d\n",
786 rc, *kiblnd_tunables.kib_ib_mtu);
788 rc = rdma_create_qp(cmid, net->ibn_dev->ibd_pd, init_qp_attr);
790 CERROR("Can't create QP: %d\n", rc);
794 LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
796 /* 1 ref for caller and each rxmsg */
797 atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS);
798 conn->ibc_nrx = IBLND_RX_MSGS;
801 for (i = 0; i < IBLND_RX_MSGS; i++) {
802 rc = kiblnd_post_rx(&conn->ibc_rxs[i],
803 IBLND_POSTRX_NO_CREDIT);
805 CERROR("Can't post rxmsg: %d\n", rc);
807 /* Make posted receives complete */
808 kiblnd_abort_receives(conn);
810 /* correct # of posted buffers
811 * NB locking needed now I'm racing with completion */
812 spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
813 conn->ibc_nrx -= IBLND_RX_MSGS - i;
814 spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock,
817 /* Drop my own and unused rxbuffer refcounts */
818 while (i++ <= IBLND_RX_MSGS)
819 kiblnd_conn_decref(conn);
825 /* Init successful! */
826 LASSERT (state == IBLND_CONN_ACTIVE_CONNECT ||
827 state == IBLND_CONN_PASSIVE_WAIT);
828 conn->ibc_state = state;
831 atomic_inc(&net->ibn_nconns);
835 kiblnd_destroy_conn(conn);
837 LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
843 kiblnd_destroy_conn (kib_conn_t *conn)
845 struct rdma_cm_id *cmid = conn->ibc_cmid;
846 kib_peer_t *peer = conn->ibc_peer;
850 LASSERT (!in_interrupt());
851 LASSERT (atomic_read(&conn->ibc_refcount) == 0);
852 LASSERT (list_empty(&conn->ibc_early_rxs));
853 LASSERT (list_empty(&conn->ibc_tx_queue));
854 LASSERT (list_empty(&conn->ibc_tx_queue_rsrvd));
855 LASSERT (list_empty(&conn->ibc_tx_queue_nocred));
856 LASSERT (list_empty(&conn->ibc_active_txs));
857 LASSERT (conn->ibc_nsends_posted == 0);
859 switch (conn->ibc_state) {
861 /* conn must be completely disengaged from the network */
864 case IBLND_CONN_DISCONNECTED:
865 /* connvars should have been freed already */
866 LASSERT (conn->ibc_connvars == NULL);
869 case IBLND_CONN_INIT:
873 if (conn->ibc_cmid->qp != NULL)
874 rdma_destroy_qp(conn->ibc_cmid);
876 if (conn->ibc_cq != NULL) {
877 rc = ib_destroy_cq(conn->ibc_cq);
879 CWARN("Error destroying CQ: %d\n", rc);
882 if (conn->ibc_rx_pages != NULL) {
883 LASSERT (conn->ibc_rxs != NULL);
885 for (i = 0; i < IBLND_RX_MSGS; i++) {
886 kib_rx_t *rx = &conn->ibc_rxs[i];
888 LASSERT (rx->rx_nob >= 0); /* not posted */
890 kiblnd_dma_unmap_single(conn->ibc_cmid->device,
891 KIBLND_UNMAP_ADDR(rx, rx_msgunmap,
893 IBLND_MSG_SIZE, DMA_FROM_DEVICE);
896 kiblnd_free_pages(conn->ibc_rx_pages);
899 if (conn->ibc_rxs != NULL) {
900 LIBCFS_FREE(conn->ibc_rxs,
901 IBLND_RX_MSGS * sizeof(kib_rx_t));
904 if (conn->ibc_connvars != NULL)
905 LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
907 /* See CAVEAT EMPTOR above in kiblnd_create_conn */
908 if (conn->ibc_state != IBLND_CONN_INIT) {
909 kib_net_t *net = peer->ibp_ni->ni_data;
911 kiblnd_peer_decref(peer);
912 rdma_destroy_id(cmid);
913 atomic_dec(&net->ibn_nconns);
916 LIBCFS_FREE(conn, sizeof(*conn));
920 kiblnd_close_peer_conns_locked (kib_peer_t *peer, int why)
923 struct list_head *ctmp;
924 struct list_head *cnxt;
927 list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
928 conn = list_entry(ctmp, kib_conn_t, ibc_list);
931 kiblnd_close_conn_locked(conn, why);
938 kiblnd_close_stale_conns_locked (kib_peer_t *peer, __u64 incarnation)
941 struct list_head *ctmp;
942 struct list_head *cnxt;
945 list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
946 conn = list_entry(ctmp, kib_conn_t, ibc_list);
948 if (conn->ibc_incarnation == incarnation)
951 CDEBUG(D_NET, "Closing stale conn -> %s incarnation:"LPX64"("LPX64")\n",
952 libcfs_nid2str(peer->ibp_nid),
953 conn->ibc_incarnation, incarnation);
956 kiblnd_close_conn_locked(conn, -ESTALE);
963 kiblnd_close_matching_conns (lnet_ni_t *ni, lnet_nid_t nid)
966 struct list_head *ptmp;
967 struct list_head *pnxt;
974 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
976 if (nid != LNET_NID_ANY)
977 lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
980 hi = kiblnd_data.kib_peer_hash_size - 1;
983 for (i = lo; i <= hi; i++) {
984 list_for_each_safe (ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
986 peer = list_entry(ptmp, kib_peer_t, ibp_list);
987 LASSERT (peer->ibp_connecting > 0 ||
988 peer->ibp_accepting > 0 ||
989 !list_empty(&peer->ibp_conns));
991 if (peer->ibp_ni != ni)
994 if (!(nid == LNET_NID_ANY || nid == peer->ibp_nid))
997 count += kiblnd_close_peer_conns_locked(peer, 0);
1001 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1003 /* wildcards always succeed */
1004 if (nid == LNET_NID_ANY)
1007 return (count == 0) ? -ENOENT : 0;
1011 kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
1013 struct libcfs_ioctl_data *data = arg;
1017 case IOC_LIBCFS_GET_PEER: {
1021 rc = kiblnd_get_peer_info(ni, data->ioc_count,
1023 data->ioc_nid = nid;
1024 data->ioc_count = count;
1028 case IOC_LIBCFS_DEL_PEER: {
1029 rc = kiblnd_del_peer(ni, data->ioc_nid);
1032 case IOC_LIBCFS_GET_CONN: {
1033 kib_conn_t *conn = kiblnd_get_conn_by_idx(ni, data->ioc_count);
1038 // kiblnd_debug_conn(conn);
1040 data->ioc_nid = conn->ibc_peer->ibp_nid;
1041 kiblnd_conn_decref(conn);
1045 case IOC_LIBCFS_CLOSE_CONNECTION: {
1046 rc = kiblnd_close_matching_conns(ni, data->ioc_nid);
1058 kiblnd_free_pages (kib_pages_t *p)
1060 int npages = p->ibp_npages;
1063 for (i = 0; i < npages; i++)
1064 if (p->ibp_pages[i] != NULL)
1065 __free_page(p->ibp_pages[i]);
1067 LIBCFS_FREE (p, offsetof(kib_pages_t, ibp_pages[npages]));
1071 kiblnd_alloc_pages (kib_pages_t **pp, int npages)
1076 LIBCFS_ALLOC(p, offsetof(kib_pages_t, ibp_pages[npages]));
1078 CERROR("Can't allocate descriptor for %d pages\n", npages);
1082 memset(p, 0, offsetof(kib_pages_t, ibp_pages[npages]));
1083 p->ibp_npages = npages;
1085 for (i = 0; i < npages; i++) {
1086 p->ibp_pages[i] = alloc_page(GFP_KERNEL);
1087 if (p->ibp_pages[i] == NULL) {
1088 CERROR("Can't allocate page %d of %d\n", i, npages);
1089 kiblnd_free_pages(p);
1099 kiblnd_free_tx_descs (lnet_ni_t *ni)
1102 kib_net_t *net = ni->ni_data;
1104 LASSERT (net != NULL);
1106 if (net->ibn_tx_descs != NULL) {
1107 for (i = 0; i < IBLND_TX_MSGS(); i++) {
1108 kib_tx_t *tx = &net->ibn_tx_descs[i];
1110 #if IBLND_MAP_ON_DEMAND
1111 if (tx->tx_pages != NULL)
1112 LIBCFS_FREE(tx->tx_pages, LNET_MAX_IOV *
1113 sizeof(*tx->tx_pages));
1115 if (tx->tx_wrq != NULL)
1116 LIBCFS_FREE(tx->tx_wrq,
1117 (1 + IBLND_MAX_RDMA_FRAGS) *
1118 sizeof(*tx->tx_wrq));
1120 if (tx->tx_sge != NULL)
1121 LIBCFS_FREE(tx->tx_sge,
1122 (1 + IBLND_MAX_RDMA_FRAGS) *
1123 sizeof(*tx->tx_sge));
1125 if (tx->tx_rd != NULL)
1126 LIBCFS_FREE(tx->tx_rd,
1127 offsetof(kib_rdma_desc_t,
1128 rd_frags[IBLND_MAX_RDMA_FRAGS]));
1130 if (tx->tx_frags != NULL)
1131 LIBCFS_FREE(tx->tx_frags,
1132 IBLND_MAX_RDMA_FRAGS *
1133 sizeof(*tx->tx_frags));
1137 LIBCFS_FREE(net->ibn_tx_descs,
1138 IBLND_TX_MSGS() * sizeof(kib_tx_t));
1141 if (net->ibn_tx_pages != NULL)
1142 kiblnd_free_pages(net->ibn_tx_pages);
1146 kiblnd_alloc_tx_descs (lnet_ni_t *ni)
1150 kib_net_t *net = ni->ni_data;
1152 LASSERT (net != NULL);
1154 rc = kiblnd_alloc_pages(&net->ibn_tx_pages, IBLND_TX_MSG_PAGES());
1157 CERROR("Can't allocate tx pages\n");
1161 LIBCFS_ALLOC (net->ibn_tx_descs,
1162 IBLND_TX_MSGS() * sizeof(kib_tx_t));
1163 if (net->ibn_tx_descs == NULL) {
1164 CERROR("Can't allocate %d tx descriptors\n", IBLND_TX_MSGS());
1168 memset(net->ibn_tx_descs, 0,
1169 IBLND_TX_MSGS() * sizeof(kib_tx_t));
1171 for (i = 0; i < IBLND_TX_MSGS(); i++) {
1172 kib_tx_t *tx = &net->ibn_tx_descs[i];
1174 #if IBLND_MAP_ON_DEMAND
1175 LIBCFS_ALLOC(tx->tx_pages, LNET_MAX_IOV *
1176 sizeof(*tx->tx_pages));
1177 if (tx->tx_pages == NULL) {
1178 CERROR("Can't allocate phys page vector[%d]\n",
1183 LIBCFS_ALLOC(tx->tx_wrq,
1184 (1 + IBLND_MAX_RDMA_FRAGS) *
1185 sizeof(*tx->tx_wrq));
1186 if (tx->tx_wrq == NULL)
1189 LIBCFS_ALLOC(tx->tx_sge,
1190 (1 + IBLND_MAX_RDMA_FRAGS) *
1191 sizeof(*tx->tx_sge));
1192 if (tx->tx_sge == NULL)
1195 LIBCFS_ALLOC(tx->tx_rd,
1196 offsetof(kib_rdma_desc_t,
1197 rd_frags[IBLND_MAX_RDMA_FRAGS]));
1198 if (tx->tx_rd == NULL)
1201 LIBCFS_ALLOC(tx->tx_frags,
1202 IBLND_MAX_RDMA_FRAGS *
1203 sizeof(*tx->tx_frags));
1204 if (tx->tx_frags == NULL)
1213 kiblnd_unmap_tx_descs (lnet_ni_t *ni)
1217 kib_net_t *net = ni->ni_data;
1219 LASSERT (net != NULL);
1221 for (i = 0; i < IBLND_TX_MSGS(); i++) {
1222 tx = &net->ibn_tx_descs[i];
1224 kiblnd_dma_unmap_single(net->ibn_dev->ibd_cmid->device,
1225 KIBLND_UNMAP_ADDR(tx, tx_msgunmap,
1227 IBLND_MSG_SIZE, DMA_TO_DEVICE);
1232 kiblnd_map_tx_descs (lnet_ni_t *ni)
1235 int page_offset = 0;
1239 kib_net_t *net = ni->ni_data;
1241 LASSERT (net != NULL);
1243 /* pre-mapped messages are not bigger than 1 page */
1244 CLASSERT (IBLND_MSG_SIZE <= PAGE_SIZE);
1246 /* No fancy arithmetic when we do the buffer calculations */
1247 CLASSERT (PAGE_SIZE % IBLND_MSG_SIZE == 0);
1249 for (i = 0; i < IBLND_TX_MSGS(); i++) {
1250 page = net->ibn_tx_pages->ibp_pages[ipage];
1251 tx = &net->ibn_tx_descs[i];
1253 tx->tx_msg = (kib_msg_t *)(((char *)page_address(page)) +
1256 tx->tx_msgaddr = kiblnd_dma_map_single(
1257 net->ibn_dev->ibd_cmid->device,
1258 tx->tx_msg, IBLND_MSG_SIZE, DMA_TO_DEVICE);
1259 KIBLND_UNMAP_ADDR_SET(tx, tx_msgunmap, tx->tx_msgaddr);
1261 list_add(&tx->tx_list, &net->ibn_idle_txs);
1263 page_offset += IBLND_MSG_SIZE;
1264 LASSERT (page_offset <= PAGE_SIZE);
1266 if (page_offset == PAGE_SIZE) {
1269 LASSERT (ipage <= IBLND_TX_MSG_PAGES());
1275 kiblnd_base_shutdown (void)
1279 LASSERT (list_empty(&kiblnd_data.kib_devs));
1281 CDEBUG(D_MALLOC, "before LND base cleanup: kmem %d\n",
1282 atomic_read(&libcfs_kmemory));
1284 switch (kiblnd_data.kib_init) {
1288 case IBLND_INIT_ALL:
1289 case IBLND_INIT_DATA:
1290 LASSERT (kiblnd_data.kib_peers != NULL);
1291 for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
1292 LASSERT (list_empty(&kiblnd_data.kib_peers[i]));
1294 LASSERT (list_empty(&kiblnd_data.kib_connd_zombies));
1295 LASSERT (list_empty(&kiblnd_data.kib_connd_conns));
1297 /* flag threads to terminate; wake and wait for them to die */
1298 kiblnd_data.kib_shutdown = 1;
1299 wake_up_all(&kiblnd_data.kib_sched_waitq);
1300 wake_up_all(&kiblnd_data.kib_connd_waitq);
1303 while (atomic_read(&kiblnd_data.kib_nthreads) != 0) {
1305 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
1306 "Waiting for %d threads to terminate\n",
1307 atomic_read(&kiblnd_data.kib_nthreads));
1308 cfs_pause(cfs_time_seconds(1));
1313 case IBLND_INIT_NOTHING:
1317 if (kiblnd_data.kib_peers != NULL)
1318 LIBCFS_FREE(kiblnd_data.kib_peers,
1319 sizeof(struct list_head) *
1320 kiblnd_data.kib_peer_hash_size);
1322 CDEBUG(D_MALLOC, "after LND base cleanup: kmem %d\n",
1323 atomic_read(&libcfs_kmemory));
1325 kiblnd_data.kib_init = IBLND_INIT_NOTHING;
1326 PORTAL_MODULE_UNUSE;
1330 kiblnd_shutdown (lnet_ni_t *ni)
1332 kib_net_t *net = ni->ni_data;
1333 rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
1335 unsigned long flags;
1337 LASSERT(kiblnd_data.kib_init == IBLND_INIT_ALL);
1342 CDEBUG(D_MALLOC, "before LND net cleanup: kmem %d\n",
1343 atomic_read(&libcfs_kmemory));
1345 write_lock_irqsave(g_lock, flags);
1346 net->ibn_shutdown = 1;
1347 write_unlock_irqrestore(g_lock, flags);
1349 switch (net->ibn_init) {
1353 case IBLND_INIT_ALL:
1354 /* nuke all existing peers within this net */
1355 kiblnd_del_peer(ni, LNET_NID_ANY);
1357 /* Wait for all peer state to clean up */
1359 while (atomic_read(&net->ibn_npeers) != 0) {
1361 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n? */
1362 "%s: waiting for %d peers to disconnect\n",
1363 libcfs_nid2str(ni->ni_nid),
1364 atomic_read(&net->ibn_npeers));
1365 cfs_pause(cfs_time_seconds(1));
1368 kiblnd_unmap_tx_descs(ni);
1370 LASSERT (net->ibn_dev->ibd_nnets > 0);
1371 net->ibn_dev->ibd_nnets--;
1375 case IBLND_INIT_NOTHING:
1376 LASSERT (atomic_read(&net->ibn_nconns) == 0);
1378 #if IBLND_MAP_ON_DEMAND
1379 if (net->ibn_fmrpool != NULL)
1380 ib_destroy_fmr_pool(net->ibn_fmrpool);
1382 if (net->ibn_dev != NULL &&
1383 net->ibn_dev->ibd_nnets == 0)
1384 kiblnd_destroy_dev(net->ibn_dev);
1389 kiblnd_free_tx_descs(ni);
1391 CDEBUG(D_MALLOC, "after LND net cleanup: kmem %d\n",
1392 atomic_read(&libcfs_kmemory));
1394 net->ibn_init = IBLND_INIT_NOTHING;
1397 LIBCFS_FREE(net, sizeof(*net));
1400 if (list_empty(&kiblnd_data.kib_devs))
1401 kiblnd_base_shutdown();
1406 kiblnd_base_startup (void)
1411 LASSERT (kiblnd_data.kib_init == IBLND_INIT_NOTHING);
1413 if (*kiblnd_tunables.kib_credits > *kiblnd_tunables.kib_ntx) {
1414 CERROR("Can't set credits(%d) > ntx(%d)\n",
1415 *kiblnd_tunables.kib_credits,
1416 *kiblnd_tunables.kib_ntx);
1421 memset(&kiblnd_data, 0, sizeof(kiblnd_data)); /* zero pointers, flags etc */
1423 rwlock_init(&kiblnd_data.kib_global_lock);
1425 INIT_LIST_HEAD(&kiblnd_data.kib_devs);
1427 kiblnd_data.kib_peer_hash_size = IBLND_PEER_HASH_SIZE;
1428 LIBCFS_ALLOC(kiblnd_data.kib_peers,
1429 sizeof(struct list_head) * kiblnd_data.kib_peer_hash_size);
1430 if (kiblnd_data.kib_peers == NULL) {
1433 for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
1434 INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]);
1436 spin_lock_init(&kiblnd_data.kib_connd_lock);
1437 INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
1438 INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies);
1439 init_waitqueue_head(&kiblnd_data.kib_connd_waitq);
1441 spin_lock_init(&kiblnd_data.kib_sched_lock);
1442 INIT_LIST_HEAD(&kiblnd_data.kib_sched_conns);
1443 init_waitqueue_head(&kiblnd_data.kib_sched_waitq);
1445 kiblnd_data.kib_error_qpa.qp_state = IB_QPS_ERR;
1447 /* lists/ptrs/locks initialised */
1448 kiblnd_data.kib_init = IBLND_INIT_DATA;
1449 /*****************************************************/
1451 for (i = 0; i < IBLND_N_SCHED; i++) {
1452 rc = kiblnd_thread_start(kiblnd_scheduler, (void *)((long)i));
1454 CERROR("Can't spawn o2iblnd scheduler[%d]: %d\n",
1460 rc = kiblnd_thread_start(kiblnd_connd, NULL);
1462 CERROR("Can't spawn o2iblnd connd: %d\n", rc);
1466 /* flag everything initialised */
1467 kiblnd_data.kib_init = IBLND_INIT_ALL;
1468 /*****************************************************/
1473 kiblnd_base_shutdown();
1478 kiblnd_startup (lnet_ni_t *ni)
1483 struct list_head *tmp;
1487 LASSERT (ni->ni_lnd == &the_kiblnd);
1489 if (kiblnd_data.kib_init == IBLND_INIT_NOTHING) {
1490 rc = kiblnd_base_startup();
1495 LIBCFS_ALLOC(net, sizeof(*net));
1500 memset(net, 0, sizeof(*net));
1502 do_gettimeofday(&tv);
1503 net->ibn_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
1505 ni->ni_maxtxcredits = *kiblnd_tunables.kib_credits;
1506 ni->ni_peertxcredits = *kiblnd_tunables.kib_peercredits;
1508 spin_lock_init(&net->ibn_tx_lock);
1509 INIT_LIST_HEAD(&net->ibn_idle_txs);
1511 rc = kiblnd_alloc_tx_descs(ni);
1513 CERROR("Can't allocate tx descs\n");
1517 if (ni->ni_interfaces[0] != NULL) {
1518 /* Use the IPoIB interface specified in 'networks=' */
1520 CLASSERT (LNET_MAX_INTERFACES > 1);
1521 if (ni->ni_interfaces[1] != NULL) {
1522 CERROR("Multiple interfaces not supported\n");
1526 ifname = ni->ni_interfaces[0];
1528 ifname = *kiblnd_tunables.kib_default_ipif;
1531 if (strlen(ifname) >= sizeof(ibdev->ibd_ifname)) {
1532 CERROR("IPoIB interface name too long: %s\n", ifname);
1537 list_for_each (tmp, &kiblnd_data.kib_devs) {
1538 ibdev = list_entry(tmp, kib_dev_t, ibd_list);
1540 if (!strcmp(&ibdev->ibd_ifname[0], ifname))
1546 if (ibdev == NULL) {
1550 struct rdma_cm_id *id;
1553 struct sockaddr_in addr;
1555 rc = libcfs_ipif_query(ifname, &up, &ip, &netmask);
1557 CERROR("Can't query IPoIB interface %s: %d\n",
1563 CERROR("Can't query IPoIB interface %s: it's down\n",
1568 LIBCFS_ALLOC(ibdev, sizeof(*ibdev));
1572 memset(ibdev, 0, sizeof(*ibdev));
1574 INIT_LIST_HEAD(&ibdev->ibd_list); /* not yet in kib_devs */
1575 ibdev->ibd_ifip = ip;
1576 strcpy(&ibdev->ibd_ifname[0], ifname);
1578 id = rdma_create_id(kiblnd_cm_callback, ibdev, RDMA_PS_TCP);
1580 ibdev->ibd_cmid = id;
1582 CERROR("Can't create listen ID: %ld\n", PTR_ERR(id));
1586 memset(&addr, 0, sizeof(addr));
1587 addr.sin_family = AF_INET;
1588 addr.sin_port = htons(*kiblnd_tunables.kib_service);
1589 addr.sin_addr.s_addr = htonl(ip);
1591 rc = rdma_bind_addr(id, (struct sockaddr *)&addr);
1593 CERROR("Can't bind to %s: %d\n", ifname, rc);
1597 /* Binding should have assigned me an IB device */
1598 LASSERT (id->device != NULL);
1600 pd = ib_alloc_pd(id->device);
1604 CERROR("Can't allocate PD: %ld\n", PTR_ERR(pd));
1608 #if IBLND_MAP_ON_DEMAND
1609 /* MR for sends and receives */
1610 mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE);
1612 /* MR for sends, recieves _and_ RDMA...........v */
1613 mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE |
1614 IB_ACCESS_REMOTE_WRITE);
1619 CERROR("Can't get MR: %ld\n", PTR_ERR(mr));
1623 rc = rdma_listen(id, 0);
1625 CERROR("Can't start listener: %d\n", rc);
1629 list_add_tail(&ibdev->ibd_list,
1630 &kiblnd_data.kib_devs);
1633 ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ibdev->ibd_ifip);
1634 net->ibn_dev = ibdev;
1636 #if IBLND_MAP_ON_DEMAND
1637 /* FMR pool for RDMA */
1639 struct ib_fmr_pool *fmrpool;
1640 struct ib_fmr_pool_param param = {
1641 .max_pages_per_fmr = LNET_MAX_PAYLOAD/PAGE_SIZE,
1642 .page_shift = PAGE_SHIFT,
1643 .access = (IB_ACCESS_LOCAL_WRITE |
1644 IB_ACCESS_REMOTE_WRITE),
1645 .pool_size = *kiblnd_tunables.kib_fmr_pool_size,
1646 .dirty_watermark = *kiblnd_tunables.kib_fmr_flush_trigger,
1647 .flush_function = NULL,
1649 .cache = *kiblnd_tunables.kib_fmr_cache};
1651 if (*kiblnd_tunables.kib_fmr_pool_size <
1652 *kiblnd_tunables.kib_ntx) {
1653 CERROR("Can't set fmr pool size (%d) < ntx(%d)\n",
1654 *kiblnd_tunables.kib_fmr_pool_size,
1655 *kiblnd_tunables.kib_ntx);
1659 fmrpool = ib_create_fmr_pool(ibdev->ibd_pd, ¶m);
1660 if (!IS_ERR(fmrpool)) {
1661 net->ibn_fmrpool = fmrpool;
1663 CERROR("Can't create FMR pool: %ld\n",
1670 kiblnd_map_tx_descs(ni);
1673 net->ibn_init = IBLND_INIT_ALL;
1678 kiblnd_shutdown(ni);
1680 CDEBUG(D_NET, "kiblnd_startup failed\n");
1685 kiblnd_module_fini (void)
1687 lnet_unregister_lnd(&the_kiblnd);
1688 kiblnd_tunables_fini();
1692 kiblnd_module_init (void)
1696 CLASSERT (sizeof(kib_msg_t) <= IBLND_MSG_SIZE);
1697 #if !IBLND_MAP_ON_DEMAND
1698 CLASSERT (offsetof(kib_msg_t, ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
1700 CLASSERT (offsetof(kib_msg_t, ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
1703 rc = kiblnd_tunables_init();
1707 lnet_register_lnd(&the_kiblnd);
1712 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
1713 MODULE_DESCRIPTION("Kernel OpenIB gen2 LND v1.00");
1714 MODULE_LICENSE("GPL");
1716 module_init(kiblnd_module_init);
1717 module_exit(kiblnd_module_fini);