X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lnet%2Fklnds%2Fo2iblnd%2Fo2iblnd.c;h=3dddc0e0d7b89e58510235d6996eb3cb3388a803;hb=4de2841b30bae846092ed092d388a1b59f459b8b;hp=226d453d84873d645fbd673049f180f99ade4cb2;hpb=8c9b78c42436facd5c163f8968394d9235094a70;p=fs%2Flustre-release.git diff --git a/lnet/klnds/o2iblnd/o2iblnd.c b/lnet/klnds/o2iblnd/o2iblnd.c index 226d453..3dddc0e 100644 --- a/lnet/klnds/o2iblnd/o2iblnd.c +++ b/lnet/klnds/o2iblnd/o2iblnd.c @@ -1,33 +1,51 @@ /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- * vim:expandtab:shiftwidth=8:tabstop=8: * - * Copyright (C) 2006 Cluster File Systems, Inc. - * Author: Eric Barton + * GPL HEADER START * - * This file is part of Lustre, http://www.lustre.org. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * - * Lustre is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. * - * Lustre is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). * - * You should have received a copy of the GNU General Public License - * along with Lustre; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + * GPL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Use is subject to license terms. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. + * + * lnet/klnds/o2iblnd/o2iblnd.c + * + * Author: Eric Barton */ #include "o2iblnd.h" -lnd_t the_kiblnd = { +lnd_t the_o2iblnd = { .lnd_type = O2IBLND, .lnd_startup = kiblnd_startup, .lnd_shutdown = kiblnd_shutdown, .lnd_ctl = kiblnd_ctl, + .lnd_query = kiblnd_query, .lnd_send = kiblnd_send, .lnd_recv = kiblnd_recv, }; @@ -47,15 +65,129 @@ kiblnd_cksum (void *ptr, int nob) return (sum == 0) ? 1 : sum; } -void -kiblnd_init_msg (kib_msg_t *msg, int type, int body_nob) +static char * +kiblnd_msgtype2str(int type) +{ + switch (type) { + case IBLND_MSG_CONNREQ: + return "CONNREQ"; + + case IBLND_MSG_CONNACK: + return "CONNACK"; + + case IBLND_MSG_NOOP: + return "NOOP"; + + case IBLND_MSG_IMMEDIATE: + return "IMMEDIATE"; + + case IBLND_MSG_PUT_REQ: + return "PUT_REQ"; + + case IBLND_MSG_PUT_NAK: + return "PUT_NAK"; + + case IBLND_MSG_PUT_ACK: + return "PUT_ACK"; + + case IBLND_MSG_PUT_DONE: + return "PUT_DONE"; + + case IBLND_MSG_GET_REQ: + return "GET_REQ"; + + case IBLND_MSG_GET_DONE: + return "GET_DONE"; + + default: + return "???"; + } +} + +static int +kiblnd_msgtype2size(int type) +{ + const int hdr_size = offsetof(kib_msg_t, ibm_u); + + switch (type) { + case IBLND_MSG_CONNREQ: + case IBLND_MSG_CONNACK: + return hdr_size + sizeof(kib_connparams_t); + + case IBLND_MSG_NOOP: + return hdr_size; + + case IBLND_MSG_IMMEDIATE: + return offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[0]); + + case IBLND_MSG_PUT_REQ: + return hdr_size + sizeof(kib_putreq_msg_t); + + case IBLND_MSG_PUT_ACK: + return hdr_size + sizeof(kib_putack_msg_t); + + case IBLND_MSG_GET_REQ: + return hdr_size + sizeof(kib_get_msg_t); + + case IBLND_MSG_PUT_NAK: + case IBLND_MSG_PUT_DONE: + case IBLND_MSG_GET_DONE: + return hdr_size + sizeof(kib_completion_msg_t); + default: + return -1; + } +} + +static int +kiblnd_unpack_rd(kib_msg_t *msg, int flip) { - msg->ibm_type = type; - msg->ibm_nob = offsetof(kib_msg_t, ibm_u) + body_nob; + kib_rdma_desc_t *rd; + int nob; + int n; + int i; + + LASSERT (msg->ibm_type == IBLND_MSG_GET_REQ || + msg->ibm_type == IBLND_MSG_PUT_ACK); + + rd = msg->ibm_type == IBLND_MSG_GET_REQ ? + &msg->ibm_u.get.ibgm_rd : + &msg->ibm_u.putack.ibpam_rd; + + if (flip) { + __swab32s(&rd->rd_key); + __swab32s(&rd->rd_nfrags); + } + + n = rd->rd_nfrags; + + if (n <= 0 || n > IBLND_MAX_RDMA_FRAGS) { + CERROR("Bad nfrags: %d, should be 0 < n <= %d\n", + n, IBLND_MAX_RDMA_FRAGS); + return 1; + } + + nob = offsetof (kib_msg_t, ibm_u) + + kiblnd_rd_msg_size(rd, msg->ibm_type, n); + + if (msg->ibm_nob < nob) { + CERROR("Short %s: %d(%d)\n", + kiblnd_msgtype2str(msg->ibm_type), msg->ibm_nob, nob); + return 1; + } + + if (!flip) + return 0; + + for (i = 0; i < n; i++) { + __swab32s(&rd->rd_frags[i].rf_nob); + __swab64s(&rd->rd_frags[i].rf_addr); + } + + return 0; } void -kiblnd_pack_msg (lnet_ni_t *ni, kib_msg_t *msg, +kiblnd_pack_msg (lnet_ni_t *ni, kib_msg_t *msg, int version, int credits, lnet_nid_t dstnid, __u64 dststamp) { kib_net_t *net = ni->ni_data; @@ -63,12 +195,12 @@ kiblnd_pack_msg (lnet_ni_t *ni, kib_msg_t *msg, /* CAVEAT EMPTOR! all message fields not set here should have been * initialised previously. */ msg->ibm_magic = IBLND_MSG_MAGIC; - msg->ibm_version = IBLND_MSG_VERSION; + msg->ibm_version = version; /* ibm_type */ msg->ibm_credits = credits; /* ibm_nob */ msg->ibm_cksum = 0; - msg->ibm_srcnid = lnet_ptlcompat_srcnid(ni->ni_nid, dstnid); + msg->ibm_srcnid = ni->ni_nid; msg->ibm_srcstamp = net->ibn_incarnation; msg->ibm_dstnid = dstnid; msg->ibm_dststamp = dststamp; @@ -84,12 +216,10 @@ kiblnd_unpack_msg(kib_msg_t *msg, int nob) { const int hdr_size = offsetof(kib_msg_t, ibm_u); __u32 msg_cksum; - int flip; + __u16 version; int msg_nob; -#if !IBLND_MAP_ON_DEMAND - int i; - int n; -#endif + int flip; + /* 6 bytes are enough to have received magic + version */ if (nob < 6) { CERROR("Short message: %d\n", nob); @@ -105,9 +235,10 @@ kiblnd_unpack_msg(kib_msg_t *msg, int nob) return -EPROTO; } - if (msg->ibm_version != - (flip ? __swab16(IBLND_MSG_VERSION) : IBLND_MSG_VERSION)) { - CERROR("Bad version: %d\n", msg->ibm_version); + version = flip ? __swab16(msg->ibm_version) : msg->ibm_version; + if (version != IBLND_MSG_VERSION && + version != IBLND_MSG_VERSION_1) { + CERROR("Bad version: %x\n", version); return -EPROTO; } @@ -131,14 +262,15 @@ kiblnd_unpack_msg(kib_msg_t *msg, int nob) CERROR("Bad checksum\n"); return -EPROTO; } + msg->ibm_cksum = msg_cksum; if (flip) { /* leave magic unflipped as a clue to peer endianness */ - __swab16s(&msg->ibm_version); + msg->ibm_version = version; CLASSERT (sizeof(msg->ibm_type) == 1); CLASSERT (sizeof(msg->ibm_credits) == 1); - msg->ibm_nob = msg_nob; + msg->ibm_nob = msg_nob; __swab64s(&msg->ibm_srcnid); __swab64s(&msg->ibm_srcstamp); __swab64s(&msg->ibm_dstnid); @@ -150,128 +282,37 @@ kiblnd_unpack_msg(kib_msg_t *msg, int nob) return -EPROTO; } + if (msg_nob < kiblnd_msgtype2size(msg->ibm_type)) { + CERROR("Short %s: %d(%d)\n", kiblnd_msgtype2str(msg->ibm_type), + msg_nob, kiblnd_msgtype2size(msg->ibm_type)); + return -EPROTO; + } + switch (msg->ibm_type) { default: CERROR("Unknown message type %x\n", msg->ibm_type); return -EPROTO; case IBLND_MSG_NOOP: - break; - case IBLND_MSG_IMMEDIATE: - if (msg_nob < offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[0])) { - CERROR("Short IMMEDIATE: %d(%d)\n", msg_nob, - (int)offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[0])); - return -EPROTO; - } - break; - case IBLND_MSG_PUT_REQ: - if (msg_nob < hdr_size + sizeof(msg->ibm_u.putreq)) { - CERROR("Short PUT_REQ: %d(%d)\n", msg_nob, - (int)(hdr_size + sizeof(msg->ibm_u.putreq))); - return -EPROTO; - } break; case IBLND_MSG_PUT_ACK: - if (msg_nob < hdr_size + sizeof(msg->ibm_u.putack)) { - CERROR("Short PUT_ACK: %d(%d)\n", msg_nob, - (int)(hdr_size + sizeof(msg->ibm_u.putack))); - return -EPROTO; - } -#if IBLND_MAP_ON_DEMAND - if (flip) { - __swab64s(&msg->ibm_u.putack.ibpam_rd.rd_addr); - __swab32s(&msg->ibm_u.putack.ibpam_rd.rd_nob); - __swab32s(&msg->ibm_u.putack.ibpam_rd.rd_key); - } -#else - if (flip) { - __swab32s(&msg->ibm_u.putack.ibpam_rd.rd_key); - __swab32s(&msg->ibm_u.putack.ibpam_rd.rd_nfrags); - } - - n = msg->ibm_u.putack.ibpam_rd.rd_nfrags; - if (n <= 0 || n > IBLND_MAX_RDMA_FRAGS) { - CERROR("Bad PUT_ACK nfrags: %d, should be 0 < n <= %d\n", - n, IBLND_MAX_RDMA_FRAGS); - return -EPROTO; - } - - if (msg_nob < offsetof(kib_msg_t, ibm_u.putack.ibpam_rd.rd_frags[n])) { - CERROR("Short PUT_ACK: %d(%d)\n", msg_nob, - (int)offsetof(kib_msg_t, ibm_u.putack.ibpam_rd.rd_frags[n])); - return -EPROTO; - } - - if (flip) { - for (i = 0; i < n; i++) { - __swab32s(&msg->ibm_u.putack.ibpam_rd.rd_frags[i].rf_nob); - __swab64s(&msg->ibm_u.putack.ibpam_rd.rd_frags[i].rf_addr); - } - } -#endif - break; - case IBLND_MSG_GET_REQ: - if (msg_nob < hdr_size + sizeof(msg->ibm_u.get)) { - CERROR("Short GET_REQ: %d(%d)\n", msg_nob, - (int)(hdr_size + sizeof(msg->ibm_u.get))); - return -EPROTO; - } -#if IBLND_MAP_ON_DEMAND - if (flip) { - __swab64s(&msg->ibm_u.get.ibgm_rd.rd_addr); - __swab32s(&msg->ibm_u.get.ibgm_rd.rd_nob); - __swab32s(&msg->ibm_u.get.ibgm_rd.rd_key); - } -#else - if (flip) { - __swab32s(&msg->ibm_u.get.ibgm_rd.rd_key); - __swab32s(&msg->ibm_u.get.ibgm_rd.rd_nfrags); - } - - n = msg->ibm_u.get.ibgm_rd.rd_nfrags; - if (n <= 0 || n > IBLND_MAX_RDMA_FRAGS) { - CERROR("Bad GET_REQ nfrags: %d, should be 0 < n <= %d\n", - n, IBLND_MAX_RDMA_FRAGS); - return -EPROTO; - } - - if (msg_nob < offsetof(kib_msg_t, ibm_u.get.ibgm_rd.rd_frags[n])) { - CERROR("Short GET_REQ: %d(%d)\n", msg_nob, - (int)offsetof(kib_msg_t, ibm_u.get.ibgm_rd.rd_frags[n])); + if (kiblnd_unpack_rd(msg, flip)) return -EPROTO; - } - - if (flip) - for (i = 0; i < msg->ibm_u.get.ibgm_rd.rd_nfrags; i++) { - __swab32s(&msg->ibm_u.get.ibgm_rd.rd_frags[i].rf_nob); - __swab64s(&msg->ibm_u.get.ibgm_rd.rd_frags[i].rf_addr); - } -#endif break; case IBLND_MSG_PUT_NAK: case IBLND_MSG_PUT_DONE: case IBLND_MSG_GET_DONE: - if (msg_nob < hdr_size + sizeof(msg->ibm_u.completion)) { - CERROR("Short RDMA completion: %d(%d)\n", msg_nob, - (int)(hdr_size + sizeof(msg->ibm_u.completion))); - return -EPROTO; - } if (flip) __swab32s(&msg->ibm_u.completion.ibcm_status); break; case IBLND_MSG_CONNREQ: case IBLND_MSG_CONNACK: - if (msg_nob < hdr_size + sizeof(msg->ibm_u.connparams)) { - CERROR("Short connreq/ack: %d(%d)\n", msg_nob, - (int)(hdr_size + sizeof(msg->ibm_u.connparams))); - return -EPROTO; - } if (flip) { __swab16s(&msg->ibm_u.connparams.ibcp_queue_depth); __swab16s(&msg->ibm_u.connparams.ibcp_max_frags); @@ -303,7 +344,7 @@ kiblnd_create_peer (lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid) peer->ibp_ni = ni; peer->ibp_nid = nid; peer->ibp_error = 0; - peer->ibp_last_alive = cfs_time_current(); + peer->ibp_last_alive = 0; atomic_set(&peer->ibp_refcount, 1); /* 1 ref for caller */ INIT_LIST_HEAD(&peer->ibp_list); /* not in the peer table yet */ @@ -314,7 +355,7 @@ kiblnd_create_peer (lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid) /* always called with a ref on ni, which prevents ni being shutdown */ LASSERT (net->ibn_shutdown == 0); - + /* npeers only grows with the global lock held */ atomic_inc(&net->ibn_npeers); @@ -346,26 +387,6 @@ kiblnd_destroy_peer (kib_peer_t *peer) atomic_dec(&net->ibn_npeers); } -void -kiblnd_destroy_dev (kib_dev_t *dev) -{ - LASSERT (dev->ibd_nnets == 0); - - if (!list_empty(&dev->ibd_list)) /* on kib_devs? */ - list_del_init(&dev->ibd_list); - - if (dev->ibd_mr != NULL) - ib_dereg_mr(dev->ibd_mr); - - if (dev->ibd_pd != NULL) - ib_dealloc_pd(dev->ibd_pd); - - if (dev->ibd_cmid != NULL) - rdma_destroy_id(dev->ibd_cmid); - - LIBCFS_FREE(dev, sizeof(*dev)); -} - kib_peer_t * kiblnd_find_peer_locked (lnet_nid_t nid) { @@ -386,9 +407,10 @@ kiblnd_find_peer_locked (lnet_nid_t nid) if (peer->ibp_nid != nid) continue; - CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n", + CDEBUG(D_NET, "got peer [%p] -> %s (%d) version: %x\n", peer, libcfs_nid2str(nid), - atomic_read(&peer->ibp_refcount)); + atomic_read(&peer->ibp_refcount), + peer->ibp_version); return peer; } return NULL; @@ -586,11 +608,12 @@ kiblnd_debug_conn (kib_conn_t *conn) spin_lock(&conn->ibc_lock); - CDEBUG(D_CONSOLE, "conn[%d] %p -> %s: \n", + CDEBUG(D_CONSOLE, "conn[%d] %p [version %x] -> %s: \n", atomic_read(&conn->ibc_refcount), conn, - libcfs_nid2str(conn->ibc_peer->ibp_nid)); - CDEBUG(D_CONSOLE, " state %d nposted %d cred %d o_cred %d r_cred %d\n", - conn->ibc_state, conn->ibc_nsends_posted, conn->ibc_credits, + conn->ibc_version, libcfs_nid2str(conn->ibc_peer->ibp_nid)); + CDEBUG(D_CONSOLE, " state %d nposted %d/%d cred %d o_cred %d r_cred %d\n", + conn->ibc_state, conn->ibc_noops_posted, + conn->ibc_nsends_posted, conn->ibc_credits, conn->ibc_outstanding_credits, conn->ibc_reserved_credits); CDEBUG(D_CONSOLE, " comms_err %d\n", conn->ibc_comms_error); @@ -615,14 +638,56 @@ kiblnd_debug_conn (kib_conn_t *conn) kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list)); CDEBUG(D_CONSOLE, " rxs:\n"); - for (i = 0; i < IBLND_RX_MSGS; i++) + for (i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++) kiblnd_debug_rx(&conn->ibc_rxs[i]); spin_unlock(&conn->ibc_lock); } +int +kiblnd_translate_mtu(int value) +{ + switch (value) { + default: + return -1; + case 0: + return 0; + case 256: + return IB_MTU_256; + case 512: + return IB_MTU_512; + case 1024: + return IB_MTU_1024; + case 2048: + return IB_MTU_2048; + case 4096: + return IB_MTU_4096; + } +} + +static void +kiblnd_setup_mtu(struct rdma_cm_id *cmid) +{ + unsigned long flags; + int mtu; + + /* XXX There is no path record for iWARP, set by netdev->change_mtu? */ + if (cmid->route.path_rec == NULL) + return; + + write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + + mtu = kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu); + LASSERT (mtu >= 0); + if (mtu != 0) + cmid->route.path_rec->mtu = mtu; + + write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); +} + kib_conn_t * -kiblnd_create_conn (kib_peer_t *peer, struct rdma_cm_id *cmid, int state) +kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, + int state, int version) { /* CAVEAT EMPTOR: * If the new conn is created successfully it takes over the caller's @@ -631,15 +696,13 @@ kiblnd_create_conn (kib_peer_t *peer, struct rdma_cm_id *cmid, int state) * she must dispose of 'cmid'. (Actually I'd block forever if I tried * to destroy 'cmid' here since I'm called from the CM which still has * its ref on 'cmid'). */ - kib_conn_t *conn; kib_net_t *net = peer->ibp_ni->ni_data; - int i; - int page_offset; - int ipage; - int rc; - struct ib_cq *cq; struct ib_qp_init_attr *init_qp_attr; + kib_conn_t *conn; + struct ib_cq *cq; unsigned long flags; + int rc; + int i; LASSERT (net != NULL); LASSERT (!in_interrupt()); @@ -661,6 +724,7 @@ kiblnd_create_conn (kib_peer_t *peer, struct rdma_cm_id *cmid, int state) memset(conn, 0, sizeof(*conn)); /* zero flags, NULL pointers etc... */ conn->ibc_state = IBLND_CONN_INIT; + conn->ibc_version = version; conn->ibc_peer = peer; /* I take the caller's ref */ cmid->context = conn; /* for future CM callbacks */ conn->ibc_cmid = cmid; @@ -679,65 +743,50 @@ kiblnd_create_conn (kib_peer_t *peer, struct rdma_cm_id *cmid, int state) } memset(conn->ibc_connvars, 0, sizeof(*conn->ibc_connvars)); - LIBCFS_ALLOC(conn->ibc_rxs, IBLND_RX_MSGS * sizeof(kib_rx_t)); + LIBCFS_ALLOC(conn->ibc_rxs, IBLND_RX_MSGS(version) * sizeof(kib_rx_t)); if (conn->ibc_rxs == NULL) { CERROR("Cannot allocate RX buffers\n"); goto failed_2; } - memset(conn->ibc_rxs, 0, IBLND_RX_MSGS * sizeof(kib_rx_t)); + memset(conn->ibc_rxs, 0, IBLND_RX_MSGS(version) * sizeof(kib_rx_t)); - rc = kiblnd_alloc_pages(&conn->ibc_rx_pages, IBLND_RX_MSG_PAGES); + rc = kiblnd_alloc_pages(&conn->ibc_rx_pages, + IBLND_RX_MSG_PAGES(version)); if (rc != 0) goto failed_2; - for (i = ipage = page_offset = 0; i < IBLND_RX_MSGS; i++) { - struct page *page = conn->ibc_rx_pages->ibp_pages[ipage]; - kib_rx_t *rx = &conn->ibc_rxs[i]; - - rx->rx_conn = conn; - rx->rx_msg = (kib_msg_t *)(((char *)page_address(page)) + - page_offset); - rx->rx_msgaddr = kiblnd_dma_map_single(cmid->device, - rx->rx_msg, IBLND_MSG_SIZE, - DMA_FROM_DEVICE); - KIBLND_UNMAP_ADDR_SET(rx, rx_msgunmap, rx->rx_msgaddr); - - CDEBUG(D_NET,"rx %d: %p "LPX64"("LPX64")\n", - i, rx->rx_msg, rx->rx_msgaddr, - lnet_page2phys(page) + page_offset); - - page_offset += IBLND_MSG_SIZE; - LASSERT (page_offset <= PAGE_SIZE); - - if (page_offset == PAGE_SIZE) { - page_offset = 0; - ipage++; - LASSERT (ipage <= IBLND_RX_MSG_PAGES); - } - } + kiblnd_map_rx_descs(conn); +#ifdef HAVE_OFED_IB_COMP_VECTOR cq = ib_create_cq(cmid->device, kiblnd_cq_completion, kiblnd_cq_event, conn, - IBLND_CQ_ENTRIES()); - if (!IS_ERR(cq)) { - conn->ibc_cq = cq; - } else { - CERROR("Can't create CQ: %ld\n", PTR_ERR(cq)); + IBLND_CQ_ENTRIES(version), 0); +#else + cq = ib_create_cq(cmid->device, + kiblnd_cq_completion, kiblnd_cq_event, conn, + IBLND_CQ_ENTRIES(version)); +#endif + if (IS_ERR(cq)) { + CERROR("Can't create CQ: %ld, cqe: %d\n", + PTR_ERR(cq), IBLND_CQ_ENTRIES(version)); goto failed_2; } + conn->ibc_cq = cq; + rc = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); if (rc != 0) { CERROR("Can't request completion notificiation: %d\n", rc); goto failed_2; } - + + kiblnd_setup_mtu(cmid); + memset(init_qp_attr, 0, sizeof(*init_qp_attr)); init_qp_attr->event_handler = kiblnd_qp_event; init_qp_attr->qp_context = conn; - init_qp_attr->cap.max_send_wr = (*kiblnd_tunables.kib_concurrent_sends) * - (1 + IBLND_MAX_RDMA_FRAGS); - init_qp_attr->cap.max_recv_wr = IBLND_RX_MSGS; + init_qp_attr->cap.max_send_wr = IBLND_SEND_WRS(version); + init_qp_attr->cap.max_recv_wr = IBLND_RECV_WRS(version); init_qp_attr->cap.max_send_sge = 1; init_qp_attr->cap.max_recv_sge = 1; init_qp_attr->sq_sig_type = IB_SIGNAL_REQ_WR; @@ -745,54 +794,22 @@ kiblnd_create_conn (kib_peer_t *peer, struct rdma_cm_id *cmid, int state) init_qp_attr->send_cq = cq; init_qp_attr->recv_cq = cq; - rc = 0; - write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - switch (*kiblnd_tunables.kib_ib_mtu) { - default: - rc = *kiblnd_tunables.kib_ib_mtu; - /* fall through to... */ - case 0: /* set tunable to the default - * CAVEAT EMPTOR! this assumes the default is one of the MTUs - * below, otherwise we'll WARN on the next QP create */ - *kiblnd_tunables.kib_ib_mtu = - ib_mtu_enum_to_int(cmid->route.path_rec->mtu); - break; - case 256: - cmid->route.path_rec->mtu = IB_MTU_256; - break; - case 512: - cmid->route.path_rec->mtu = IB_MTU_512; - break; - case 1024: - cmid->route.path_rec->mtu = IB_MTU_1024; - break; - case 2048: - cmid->route.path_rec->mtu = IB_MTU_2048; - break; - case 4096: - cmid->route.path_rec->mtu = IB_MTU_4096; - break; - } - write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - - if (rc != 0) - CWARN("Invalid IB MTU value %d, using default value %d\n", - rc, *kiblnd_tunables.kib_ib_mtu); - rc = rdma_create_qp(cmid, net->ibn_dev->ibd_pd, init_qp_attr); if (rc != 0) { - CERROR("Can't create QP: %d\n", rc); + CERROR("Can't create QP: %d, send_wr: %d, recv_wr: %d\n", + rc, init_qp_attr->cap.max_send_wr, + init_qp_attr->cap.max_recv_wr); goto failed_2; } LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr)); /* 1 ref for caller and each rxmsg */ - atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS); - conn->ibc_nrx = IBLND_RX_MSGS; + atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(version)); + conn->ibc_nrx = IBLND_RX_MSGS(version); /* post receives */ - for (i = 0; i < IBLND_RX_MSGS; i++) { + for (i = 0; i < IBLND_RX_MSGS(version); i++) { rc = kiblnd_post_rx(&conn->ibc_rxs[i], IBLND_POSTRX_NO_CREDIT); if (rc != 0) { @@ -804,18 +821,24 @@ kiblnd_create_conn (kib_peer_t *peer, struct rdma_cm_id *cmid, int state) /* correct # of posted buffers * NB locking needed now I'm racing with completion */ spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags); - conn->ibc_nrx -= IBLND_RX_MSGS - i; + conn->ibc_nrx -= IBLND_RX_MSGS(version) - i; spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags); + /* cmid will be destroyed by CM(ofed) after cm_callback + * returned, so we can't refer it anymore + * (by kiblnd_connd()->kiblnd_destroy_conn) */ + rdma_destroy_qp(conn->ibc_cmid); + conn->ibc_cmid = NULL; + /* Drop my own and unused rxbuffer refcounts */ - while (i++ <= IBLND_RX_MSGS) + while (i++ <= IBLND_RX_MSGS(version)) kiblnd_conn_decref(conn); return NULL; } } - + /* Init successful! */ LASSERT (state == IBLND_CONN_ACTIVE_CONNECT || state == IBLND_CONN_PASSIVE_WAIT); @@ -839,7 +862,6 @@ kiblnd_destroy_conn (kib_conn_t *conn) struct rdma_cm_id *cmid = conn->ibc_cmid; kib_peer_t *peer = conn->ibc_peer; int rc; - int i; LASSERT (!in_interrupt()); LASSERT (atomic_read(&conn->ibc_refcount) == 0); @@ -848,6 +870,7 @@ kiblnd_destroy_conn (kib_conn_t *conn) LASSERT (list_empty(&conn->ibc_tx_queue_rsrvd)); LASSERT (list_empty(&conn->ibc_tx_queue_nocred)); LASSERT (list_empty(&conn->ibc_active_txs)); + LASSERT (conn->ibc_noops_posted == 0); LASSERT (conn->ibc_nsends_posted == 0); switch (conn->ibc_state) { @@ -864,8 +887,9 @@ kiblnd_destroy_conn (kib_conn_t *conn) break; } - if (conn->ibc_cmid->qp != NULL) - rdma_destroy_qp(conn->ibc_cmid); + /* conn->ibc_cmid might be destroyed by CM already */ + if (cmid != NULL && cmid->qp != NULL) + rdma_destroy_qp(cmid); if (conn->ibc_cq != NULL) { rc = ib_destroy_cq(conn->ibc_cq); @@ -873,26 +897,12 @@ kiblnd_destroy_conn (kib_conn_t *conn) CWARN("Error destroying CQ: %d\n", rc); } - if (conn->ibc_rx_pages != NULL) { - LASSERT (conn->ibc_rxs != NULL); - - for (i = 0; i < IBLND_RX_MSGS; i++) { - kib_rx_t *rx = &conn->ibc_rxs[i]; - - LASSERT (rx->rx_nob >= 0); /* not posted */ - - kiblnd_dma_unmap_single(conn->ibc_cmid->device, - KIBLND_UNMAP_ADDR(rx, rx_msgunmap, - rx->rx_msgaddr), - IBLND_MSG_SIZE, DMA_FROM_DEVICE); - } - - kiblnd_free_pages(conn->ibc_rx_pages); - } + if (conn->ibc_rx_pages != NULL) + kiblnd_unmap_rx_descs(conn); if (conn->ibc_rxs != NULL) { LIBCFS_FREE(conn->ibc_rxs, - IBLND_RX_MSGS * sizeof(kib_rx_t)); + IBLND_RX_MSGS(conn->ibc_version) * sizeof(kib_rx_t)); } if (conn->ibc_connvars != NULL) @@ -921,15 +931,21 @@ kiblnd_close_peer_conns_locked (kib_peer_t *peer, int why) list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) { conn = list_entry(ctmp, kib_conn_t, ibc_list); - count++; + CDEBUG(D_NET, "Closing conn -> %s, " + "version: %x, reason: %d\n", + libcfs_nid2str(peer->ibp_nid), + conn->ibc_version, why); + kiblnd_close_conn_locked(conn, why); + count++; } return count; } int -kiblnd_close_stale_conns_locked (kib_peer_t *peer, __u64 incarnation) +kiblnd_close_stale_conns_locked (kib_peer_t *peer, + int version, __u64 incarnation) { kib_conn_t *conn; struct list_head *ctmp; @@ -939,15 +955,18 @@ kiblnd_close_stale_conns_locked (kib_peer_t *peer, __u64 incarnation) list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) { conn = list_entry(ctmp, kib_conn_t, ibc_list); - if (conn->ibc_incarnation == incarnation) + if (conn->ibc_version == version && + conn->ibc_incarnation == incarnation) continue; - CDEBUG(D_NET, "Closing stale conn -> %s incarnation:"LPX64"("LPX64")\n", + CDEBUG(D_NET, "Closing stale conn -> %s version: %x, " + "incarnation:"LPX64"(%x, "LPX64")\n", libcfs_nid2str(peer->ibp_nid), - conn->ibc_incarnation, incarnation); + conn->ibc_version, conn->ibc_incarnation, + version, incarnation); - count++; kiblnd_close_conn_locked(conn, -ESTALE); + count++; } return count; @@ -1024,16 +1043,23 @@ kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) break; } case IOC_LIBCFS_GET_CONN: { - kib_conn_t *conn = kiblnd_get_conn_by_idx(ni, data->ioc_count); + kib_conn_t *conn; + rc = 0; + conn = kiblnd_get_conn_by_idx(ni, data->ioc_count); if (conn == NULL) { rc = -ENOENT; - } else { - // kiblnd_debug_conn(conn); - rc = 0; - data->ioc_nid = conn->ibc_peer->ibp_nid; - kiblnd_conn_decref(conn); + break; } + + LASSERT (conn->ibc_cmid != NULL); + data->ioc_nid = conn->ibc_peer->ibp_nid; + if (conn->ibc_cmid->route.path_rec == NULL) + data->ioc_u32[0] = 0; /* iWarp has no path MTU */ + else + data->ioc_u32[0] = + ib_mtu_enum_to_int(conn->ibc_cmid->route.path_rec->mtu); + kiblnd_conn_decref(conn); break; } case IOC_LIBCFS_CLOSE_CONNECTION: { @@ -1049,11 +1075,44 @@ kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) } void +kiblnd_query (lnet_ni_t *ni, lnet_nid_t nid, time_t *when) +{ + cfs_time_t last_alive = 0; + rwlock_t *glock = &kiblnd_data.kib_global_lock; + kib_peer_t *peer; + unsigned long flags; + + read_lock_irqsave(glock, flags); + + peer = kiblnd_find_peer_locked(nid); + if (peer != NULL) { + LASSERT (peer->ibp_connecting > 0 || /* creating conns */ + peer->ibp_accepting > 0 || + !list_empty(&peer->ibp_conns)); /* active conn */ + last_alive = peer->ibp_last_alive; + } + + read_unlock_irqrestore(glock, flags); + + if (last_alive != 0) + *when = cfs_time_current_sec() - + cfs_duration_sec(cfs_time_current() - last_alive); + + /* peer is not persistent in hash, trigger peer creation + * and connection establishment with a NULL tx */ + if (peer == NULL) + kiblnd_launch_tx(ni, NULL, nid); + return; +} + +void kiblnd_free_pages (kib_pages_t *p) { int npages = p->ibp_npages; int i; + LASSERT (p->ibp_device == NULL); + for (i = 0; i < npages; i++) if (p->ibp_pages[i] != NULL) __free_page(p->ibp_pages[i]); @@ -1090,50 +1149,193 @@ kiblnd_alloc_pages (kib_pages_t **pp, int npages) } void -kiblnd_free_tx_descs (lnet_ni_t *ni) +kiblnd_unmap_rx_descs(kib_conn_t *conn) { - int i; - kib_net_t *net = ni->ni_data; + kib_rx_t *rx; + int i; - LASSERT (net != NULL); + LASSERT (conn->ibc_rxs != NULL); + LASSERT (conn->ibc_rx_pages->ibp_device != NULL); - if (net->ibn_tx_descs != NULL) { - for (i = 0; i < IBLND_TX_MSGS(); i++) { - kib_tx_t *tx = &net->ibn_tx_descs[i]; + for (i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++) { + rx = &conn->ibc_rxs[i]; -#if IBLND_MAP_ON_DEMAND - if (tx->tx_pages != NULL) - LIBCFS_FREE(tx->tx_pages, LNET_MAX_IOV * - sizeof(*tx->tx_pages)); -#else - if (tx->tx_wrq != NULL) - LIBCFS_FREE(tx->tx_wrq, - (1 + IBLND_MAX_RDMA_FRAGS) * - sizeof(*tx->tx_wrq)); - - if (tx->tx_sge != NULL) - LIBCFS_FREE(tx->tx_sge, - (1 + IBLND_MAX_RDMA_FRAGS) * - sizeof(*tx->tx_sge)); - - if (tx->tx_rd != NULL) - LIBCFS_FREE(tx->tx_rd, - offsetof(kib_rdma_desc_t, - rd_frags[IBLND_MAX_RDMA_FRAGS])); - - if (tx->tx_frags != NULL) - LIBCFS_FREE(tx->tx_frags, - IBLND_MAX_RDMA_FRAGS * - sizeof(*tx->tx_frags)); -#endif + LASSERT (rx->rx_nob >= 0); /* not posted */ + + kiblnd_dma_unmap_single(conn->ibc_rx_pages->ibp_device, + KIBLND_UNMAP_ADDR(rx, rx_msgunmap, + rx->rx_msgaddr), + IBLND_MSG_SIZE, DMA_FROM_DEVICE); + } + + conn->ibc_rx_pages->ibp_device = NULL; + + kiblnd_free_pages(conn->ibc_rx_pages); + + conn->ibc_rx_pages = NULL; +} + +void +kiblnd_map_rx_descs(kib_conn_t *conn) +{ + kib_rx_t *rx; + struct page *pg; + int pg_off; + int ipg; + int i; + + for (pg_off = ipg = i = 0; + i < IBLND_RX_MSGS(conn->ibc_version); i++) { + pg = conn->ibc_rx_pages->ibp_pages[ipg]; + rx = &conn->ibc_rxs[i]; + + rx->rx_conn = conn; + rx->rx_msg = (kib_msg_t *)(((char *)page_address(pg)) + pg_off); + + rx->rx_msgaddr = kiblnd_dma_map_single(conn->ibc_cmid->device, + rx->rx_msg, IBLND_MSG_SIZE, + DMA_FROM_DEVICE); + LASSERT (!kiblnd_dma_mapping_error(conn->ibc_cmid->device, + rx->rx_msgaddr)); + KIBLND_UNMAP_ADDR_SET(rx, rx_msgunmap, rx->rx_msgaddr); + + CDEBUG(D_NET,"rx %d: %p "LPX64"("LPX64")\n", + i, rx->rx_msg, rx->rx_msgaddr, + lnet_page2phys(pg) + pg_off); + + pg_off += IBLND_MSG_SIZE; + LASSERT (pg_off <= PAGE_SIZE); + + if (pg_off == PAGE_SIZE) { + pg_off = 0; + ipg++; + LASSERT (ipg <= IBLND_RX_MSG_PAGES(conn->ibc_version)); } + } + + conn->ibc_rx_pages->ibp_device = conn->ibc_cmid->device; +} + +void +kiblnd_unmap_tx_descs(lnet_ni_t *ni) +{ + int i; + kib_tx_t *tx; + kib_net_t *net = ni->ni_data; + + LASSERT (net->ibn_tx_pages != NULL); + LASSERT (net->ibn_tx_pages->ibp_device != NULL); + + for (i = 0; i < IBLND_TX_MSGS(); i++) { + tx = &net->ibn_tx_descs[i]; + + kiblnd_dma_unmap_single(net->ibn_tx_pages->ibp_device, + KIBLND_UNMAP_ADDR(tx, tx_msgunmap, + tx->tx_msgaddr), + IBLND_MSG_SIZE, DMA_TO_DEVICE); + } - LIBCFS_FREE(net->ibn_tx_descs, - IBLND_TX_MSGS() * sizeof(kib_tx_t)); + net->ibn_tx_pages->ibp_device = NULL; +} + +void +kiblnd_map_tx_descs (lnet_ni_t *ni) +{ + kib_net_t *net = ni->ni_data; + struct page *page; + kib_tx_t *tx; + int page_offset; + int ipage; + int i; + + LASSERT (net != NULL); + + /* pre-mapped messages are not bigger than 1 page */ + CLASSERT (IBLND_MSG_SIZE <= PAGE_SIZE); + + /* No fancy arithmetic when we do the buffer calculations */ + CLASSERT (PAGE_SIZE % IBLND_MSG_SIZE == 0); + + + for (ipage = page_offset = i = 0; i < IBLND_TX_MSGS(); i++) { + page = net->ibn_tx_pages->ibp_pages[ipage]; + tx = &net->ibn_tx_descs[i]; + + tx->tx_msg = (kib_msg_t *)(((char *)page_address(page)) + + page_offset); + + tx->tx_msgaddr = kiblnd_dma_map_single( + net->ibn_dev->ibd_cmid->device, + tx->tx_msg, IBLND_MSG_SIZE, DMA_TO_DEVICE); + LASSERT (!kiblnd_dma_mapping_error(net->ibn_dev->ibd_cmid->device, + tx->tx_msgaddr)); + KIBLND_UNMAP_ADDR_SET(tx, tx_msgunmap, tx->tx_msgaddr); + + list_add(&tx->tx_list, &net->ibn_idle_txs); + + page_offset += IBLND_MSG_SIZE; + LASSERT (page_offset <= PAGE_SIZE); + + if (page_offset == PAGE_SIZE) { + page_offset = 0; + ipage++; + LASSERT (ipage <= IBLND_TX_MSG_PAGES()); + } } + net->ibn_tx_pages->ibp_device = net->ibn_dev->ibd_cmid->device; +} + +void +kiblnd_free_tx_descs (lnet_ni_t *ni) +{ + int i; + kib_net_t *net = ni->ni_data; + + LASSERT (net != NULL); + if (net->ibn_tx_pages != NULL) kiblnd_free_pages(net->ibn_tx_pages); + + if (net->ibn_tx_descs == NULL) + return; + + for (i = 0; i < IBLND_TX_MSGS(); i++) { + kib_tx_t *tx = &net->ibn_tx_descs[i]; + + if (tx->tx_pages != NULL) + LIBCFS_FREE(tx->tx_pages, + LNET_MAX_IOV * + sizeof(*tx->tx_pages)); + + if (tx->tx_ipb != NULL) + LIBCFS_FREE(tx->tx_ipb, + IBLND_MAX_RDMA_FRAGS * + sizeof(*tx->tx_ipb)); + + if (tx->tx_frags != NULL) + LIBCFS_FREE(tx->tx_frags, + IBLND_MAX_RDMA_FRAGS * + sizeof(*tx->tx_frags)); + + if (tx->tx_wrq != NULL) + LIBCFS_FREE(tx->tx_wrq, + (1 + IBLND_MAX_RDMA_FRAGS) * + sizeof(*tx->tx_wrq)); + + if (tx->tx_sge != NULL) + LIBCFS_FREE(tx->tx_sge, + (1 + IBLND_MAX_RDMA_FRAGS) * + sizeof(*tx->tx_sge)); + + if (tx->tx_rd != NULL) + LIBCFS_FREE(tx->tx_rd, + offsetof(kib_rdma_desc_t, + rd_frags[IBLND_MAX_RDMA_FRAGS])); + } + + LIBCFS_FREE(net->ibn_tx_descs, + IBLND_TX_MSGS() * sizeof(kib_tx_t)); } int @@ -1165,104 +1367,450 @@ kiblnd_alloc_tx_descs (lnet_ni_t *ni) for (i = 0; i < IBLND_TX_MSGS(); i++) { kib_tx_t *tx = &net->ibn_tx_descs[i]; -#if IBLND_MAP_ON_DEMAND - LIBCFS_ALLOC(tx->tx_pages, LNET_MAX_IOV * - sizeof(*tx->tx_pages)); - if (tx->tx_pages == NULL) { - CERROR("Can't allocate phys page vector[%d]\n", - LNET_MAX_IOV); - return -ENOMEM; + if (net->ibn_fmrpool != NULL) { + LIBCFS_ALLOC(tx->tx_pages, LNET_MAX_IOV * + sizeof(*tx->tx_pages)); + if (tx->tx_pages == NULL) + return -ENOMEM; } -#else - LIBCFS_ALLOC(tx->tx_wrq, - (1 + IBLND_MAX_RDMA_FRAGS) * + + if (net->ibn_pmrpool != NULL) { + LIBCFS_ALLOC(tx->tx_ipb, + IBLND_MAX_RDMA_FRAGS * + sizeof(*tx->tx_ipb)); + if (tx->tx_ipb == NULL) + return -ENOMEM; + } + + LIBCFS_ALLOC(tx->tx_frags, + IBLND_MAX_RDMA_FRAGS * + sizeof(*tx->tx_frags)); + if (tx->tx_frags == NULL) + return -ENOMEM; + + LIBCFS_ALLOC(tx->tx_wrq, + (1 + IBLND_MAX_RDMA_FRAGS) * sizeof(*tx->tx_wrq)); if (tx->tx_wrq == NULL) return -ENOMEM; - - LIBCFS_ALLOC(tx->tx_sge, - (1 + IBLND_MAX_RDMA_FRAGS) * + + LIBCFS_ALLOC(tx->tx_sge, + (1 + IBLND_MAX_RDMA_FRAGS) * sizeof(*tx->tx_sge)); if (tx->tx_sge == NULL) return -ENOMEM; - - LIBCFS_ALLOC(tx->tx_rd, - offsetof(kib_rdma_desc_t, + + LIBCFS_ALLOC(tx->tx_rd, + offsetof(kib_rdma_desc_t, rd_frags[IBLND_MAX_RDMA_FRAGS])); if (tx->tx_rd == NULL) return -ENOMEM; + } - LIBCFS_ALLOC(tx->tx_frags, - IBLND_MAX_RDMA_FRAGS * - sizeof(*tx->tx_frags)); - if (tx->tx_frags == NULL) - return -ENOMEM; -#endif + return 0; +} + +struct ib_mr * +kiblnd_find_dma_mr(kib_net_t *net, __u64 addr, __u64 size) +{ + __u64 index; + + LASSERT (net->ibn_dev->ibd_mrs[0] != NULL); + + if (net->ibn_dev->ibd_nmrs == 1) + return net->ibn_dev->ibd_mrs[0]; + + index = addr >> net->ibn_dev->ibd_mr_shift; + + if (index < net->ibn_dev->ibd_nmrs && + index == ((addr + size - 1) >> net->ibn_dev->ibd_mr_shift)) + return net->ibn_dev->ibd_mrs[index]; + + return NULL; +} + +struct ib_mr * +kiblnd_find_rd_dma_mr(kib_net_t *net, kib_rdma_desc_t *rd) +{ + struct ib_mr *prev_mr; + struct ib_mr *mr; + int i; + + LASSERT (net->ibn_dev->ibd_mrs[0] != NULL); + + if (*kiblnd_tunables.kib_map_on_demand > 0 && + *kiblnd_tunables.kib_map_on_demand <= rd->rd_nfrags) + return NULL; + + if (net->ibn_dev->ibd_nmrs == 1) + return net->ibn_dev->ibd_mrs[0]; + + for (i = 0, mr = prev_mr = NULL; + i < rd->rd_nfrags; i++) { + mr = kiblnd_find_dma_mr(net, + rd->rd_frags[i].rf_addr, + rd->rd_frags[i].rf_nob); + if (prev_mr == NULL) + prev_mr = mr; + + if (mr == NULL || prev_mr != mr) { + /* Can't covered by one single MR */ + mr = NULL; + break; + } + } + + return mr; +} + +void +kiblnd_dev_cleanup(kib_dev_t *ibdev) +{ + int i; + + if (ibdev->ibd_mrs == NULL) + return; + + for (i = 0; i < ibdev->ibd_nmrs; i++) { + if (ibdev->ibd_mrs[i] == NULL) + break; + + ib_dereg_mr(ibdev->ibd_mrs[i]); } + LIBCFS_FREE(ibdev->ibd_mrs, sizeof(*ibdev->ibd_mrs) * ibdev->ibd_nmrs); + ibdev->ibd_mrs = NULL; +} + +int +kiblnd_ib_create_fmr_pool(kib_dev_t *ibdev, struct ib_fmr_pool **fmrpp) +{ + /* FMR pool for RDMA */ + struct ib_fmr_pool *fmrpool; + struct ib_fmr_pool_param param = { + .max_pages_per_fmr = LNET_MAX_PAYLOAD/PAGE_SIZE, + .page_shift = PAGE_SHIFT, + .access = (IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_WRITE), + .pool_size = *kiblnd_tunables.kib_fmr_pool_size, + .dirty_watermark = *kiblnd_tunables.kib_fmr_flush_trigger, + .flush_function = NULL, + .flush_arg = NULL, + .cache = *kiblnd_tunables.kib_fmr_cache}; + + if (*kiblnd_tunables.kib_fmr_pool_size < + *kiblnd_tunables.kib_ntx) { + CERROR("Can't set fmr pool size (%d) < ntx(%d)\n", + *kiblnd_tunables.kib_fmr_pool_size, + *kiblnd_tunables.kib_ntx); + return -EINVAL; + } + + fmrpool = ib_create_fmr_pool(ibdev->ibd_pd, ¶m); + if (IS_ERR(fmrpool)) + return PTR_ERR(fmrpool); + + *fmrpp = fmrpool; + return 0; } void -kiblnd_unmap_tx_descs (lnet_ni_t *ni) +kiblnd_phys_mr_unmap(kib_net_t *net, kib_phys_mr_t *pmr) { - int i; - kib_tx_t *tx; - kib_net_t *net = ni->ni_data; + kib_phys_mr_pool_t *pool = net->ibn_pmrpool; + struct ib_mr *mr; - LASSERT (net != NULL); + spin_lock(&pool->ibmp_lock); - for (i = 0; i < IBLND_TX_MSGS(); i++) { - tx = &net->ibn_tx_descs[i]; + mr = pmr->ibpm_mr; - kiblnd_dma_unmap_single(net->ibn_dev->ibd_cmid->device, - KIBLND_UNMAP_ADDR(tx, tx_msgunmap, - tx->tx_msgaddr), - IBLND_MSG_SIZE, DMA_TO_DEVICE); + list_add(&pmr->ibpm_link, &pool->ibmp_free_list); + pool->ibmp_allocated --; + + spin_unlock(&pool->ibmp_lock); + + if (mr != NULL) + ib_dereg_mr(mr); +} + +kib_phys_mr_t * +kiblnd_phys_mr_map(kib_net_t *net, kib_rdma_desc_t *rd, + struct ib_phys_buf *ipb, __u64 *iova) +{ + kib_phys_mr_pool_t *pool = net->ibn_pmrpool; + kib_phys_mr_t *pmr; + int i; + + LASSERT (ipb != NULL); + + spin_lock(&pool->ibmp_lock); + if (list_empty(&pool->ibmp_free_list)) { + spin_unlock(&pool->ibmp_lock); + CERROR("pre-allocated MRs is not enough\n"); + + return NULL; + } + + pmr = list_entry(pool->ibmp_free_list.next, + kib_phys_mr_t, ibpm_link); + list_del_init(&pmr->ibpm_link); + pool->ibmp_allocated ++; + + spin_unlock(&pool->ibmp_lock); + + for (i = 0; i < rd->rd_nfrags; i ++) { + ipb[i].addr = rd->rd_frags[i].rf_addr; + ipb[i].size = rd->rd_frags[i].rf_nob; + } + + pmr->ibpm_mr = ib_reg_phys_mr(net->ibn_dev->ibd_pd, ipb, + rd->rd_nfrags, + IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_WRITE, + iova); + if (!IS_ERR(pmr->ibpm_mr)) { + pmr->ibpm_iova = *iova; + return pmr; } + + CERROR("Failed ib_reg_phys_mr: %ld\n", PTR_ERR(pmr->ibpm_mr)); + pmr->ibpm_mr = NULL; + + spin_lock(&pool->ibmp_lock); + + list_add(&pmr->ibpm_link, &pool->ibmp_free_list); + pool->ibmp_allocated --; + + spin_unlock(&pool->ibmp_lock); + + return NULL; } void -kiblnd_map_tx_descs (lnet_ni_t *ni) +kiblnd_destroy_pmr_pool(kib_phys_mr_pool_t *pool) { - int ipage = 0; - int page_offset = 0; - int i; - struct page *page; - kib_tx_t *tx; - kib_net_t *net = ni->ni_data; + kib_phys_mr_t *pmr; - LASSERT (net != NULL); + LASSERT (pool->ibmp_allocated == 0); - /* pre-mapped messages are not bigger than 1 page */ - CLASSERT (IBLND_MSG_SIZE <= PAGE_SIZE); + while (!list_empty(&pool->ibmp_free_list)) { + pmr = list_entry(pool->ibmp_free_list.next, + kib_phys_mr_t, ibpm_link); - /* No fancy arithmetic when we do the buffer calculations */ - CLASSERT (PAGE_SIZE % IBLND_MSG_SIZE == 0); + LASSERT (pmr->ibpm_mr == NULL); - for (i = 0; i < IBLND_TX_MSGS(); i++) { - page = net->ibn_tx_pages->ibp_pages[ipage]; - tx = &net->ibn_tx_descs[i]; + list_del(&pmr->ibpm_link); - tx->tx_msg = (kib_msg_t *)(((char *)page_address(page)) + - page_offset); + LIBCFS_FREE(pmr, sizeof(kib_phys_mr_t)); + } - tx->tx_msgaddr = kiblnd_dma_map_single( - net->ibn_dev->ibd_cmid->device, - tx->tx_msg, IBLND_MSG_SIZE, DMA_TO_DEVICE); - KIBLND_UNMAP_ADDR_SET(tx, tx_msgunmap, tx->tx_msgaddr); + LIBCFS_FREE(pool, sizeof(kib_phys_mr_pool_t)); +} - list_add(&tx->tx_list, &net->ibn_idle_txs); +int +kiblnd_create_pmr_pool(kib_dev_t *ibdev, kib_phys_mr_pool_t **poolpp) +{ + kib_phys_mr_pool_t *pool; + kib_phys_mr_t *pmr; + int i; + + if (*kiblnd_tunables.kib_pmr_pool_size < + *kiblnd_tunables.kib_ntx) { + CERROR("Can't set pmr pool size (%d) < ntx(%d)\n", + *kiblnd_tunables.kib_pmr_pool_size, + *kiblnd_tunables.kib_ntx); + return -EINVAL; + } - page_offset += IBLND_MSG_SIZE; - LASSERT (page_offset <= PAGE_SIZE); + LIBCFS_ALLOC(pool, sizeof(kib_phys_mr_pool_t)); + if (pool == NULL) + return -ENOMEM; - if (page_offset == PAGE_SIZE) { - page_offset = 0; - ipage++; - LASSERT (ipage <= IBLND_TX_MSG_PAGES()); + spin_lock_init(&pool->ibmp_lock); + + pool->ibmp_allocated = 0; + CFS_INIT_LIST_HEAD(&pool->ibmp_free_list); + + for (i = 0; i < *kiblnd_tunables.kib_pmr_pool_size; i++) { + LIBCFS_ALLOC(pmr, sizeof(kib_phys_mr_t)); + + if (pmr == NULL) { + kiblnd_destroy_pmr_pool(pool); + return -ENOMEM; } + + memset(pmr, 0, sizeof(kib_phys_mr_t)); + + list_add(&pmr->ibpm_link, &pool->ibmp_free_list); } + + *poolpp = pool; + + return 0; +} + +static int +kiblnd_dev_get_attr(kib_dev_t *ibdev) +{ + struct ib_device_attr *attr; + int rc; + + /* It's safe to assume a HCA can handle a page size + * matching that of the native system */ + ibdev->ibd_page_shift = PAGE_SHIFT; + ibdev->ibd_page_size = 1 << PAGE_SHIFT; + ibdev->ibd_page_mask = ~((__u64)ibdev->ibd_page_size - 1); + + LIBCFS_ALLOC(attr, sizeof(*attr)); + if (attr == NULL) { + CERROR("Out of memory\n"); + return -ENOMEM; + } + + rc = ib_query_device(ibdev->ibd_cmid->device, attr); + if (rc == 0) + ibdev->ibd_mr_size = attr->max_mr_size; + + LIBCFS_FREE(attr, sizeof(*attr)); + + if (rc != 0) { + CERROR("Failed to query IB device: %d\n", rc); + return rc; + } + +#ifdef HAVE_OFED_TRANSPORT_IWARP + /* XXX We can't trust this value returned by Chelsio driver, it's wrong + * and we have reported the bug, remove these in the future when Chelsio + * bug got fixed. */ + if (rdma_node_get_transport(ibdev->ibd_cmid->device->node_type) == + RDMA_TRANSPORT_IWARP) + ibdev->ibd_mr_size = (1ULL << 32) - 1; +#endif + + if (ibdev->ibd_mr_size == ~0ULL) { + ibdev->ibd_mr_shift = 64; + return 0; + } + + for (ibdev->ibd_mr_shift = 0; + ibdev->ibd_mr_shift < 64; ibdev->ibd_mr_shift ++) { + if (ibdev->ibd_mr_size == (1ULL << ibdev->ibd_mr_shift) || + ibdev->ibd_mr_size == (1ULL << ibdev->ibd_mr_shift) - 1) + return 0; + } + + CERROR("Invalid mr size: "LPX64"\n", ibdev->ibd_mr_size); + return -EINVAL; +} + +int +kiblnd_dev_setup(kib_dev_t *ibdev) +{ + struct ib_mr *mr; + int i; + int rc; + __u64 mm_size; + __u64 mr_size; + int acflags = IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_WRITE; + + rc = kiblnd_dev_get_attr(ibdev); + if (rc != 0) + return rc; + + if (ibdev->ibd_mr_shift == 64) { + LIBCFS_ALLOC(ibdev->ibd_mrs, 1 * sizeof(*ibdev->ibd_mrs)); + if (ibdev->ibd_mrs == NULL) { + CERROR("Failed to allocate MRs table\n"); + return -ENOMEM; + } + + ibdev->ibd_mrs[0] = NULL; + ibdev->ibd_nmrs = 1; + + mr = ib_get_dma_mr(ibdev->ibd_pd, acflags); + if (IS_ERR(mr)) { + CERROR("Failed ib_get_dma_mr : %ld\n", PTR_ERR(mr)); + kiblnd_dev_cleanup(ibdev); + return PTR_ERR(mr); + } + + ibdev->ibd_mrs[0] = mr; + + goto out; + } + + mr_size = (1ULL << ibdev->ibd_mr_shift); + mm_size = (unsigned long)high_memory - PAGE_OFFSET; + + ibdev->ibd_nmrs = (int)((mm_size + mr_size - 1) >> ibdev->ibd_mr_shift); + + if (ibdev->ibd_mr_shift < 32 || ibdev->ibd_nmrs > 1024) { + /* it's 4T..., assume we will re-code at that time */ + CERROR("Can't support memory size: x"LPX64 + " with MR size: x"LPX64"\n", mm_size, mr_size); + return -EINVAL; + } + + /* create an array of MRs to cover all memory */ + LIBCFS_ALLOC(ibdev->ibd_mrs, sizeof(*ibdev->ibd_mrs) * ibdev->ibd_nmrs); + if (ibdev->ibd_mrs == NULL) { + CERROR("Failed to allocate MRs' table\n"); + return -ENOMEM; + } + + memset(ibdev->ibd_mrs, 0, sizeof(*ibdev->ibd_mrs) * ibdev->ibd_nmrs); + + for (i = 0; i < ibdev->ibd_nmrs; i++) { + struct ib_phys_buf ipb; + __u64 iova; + + ipb.size = ibdev->ibd_mr_size; + ipb.addr = i * mr_size; + iova = ipb.addr; + + mr = ib_reg_phys_mr(ibdev->ibd_pd, &ipb, 1, acflags, &iova); + if (IS_ERR(mr)) { + CERROR("Failed ib_reg_phys_mr addr "LPX64 + " size "LPX64" : %ld\n", + ipb.addr, ipb.size, PTR_ERR(mr)); + kiblnd_dev_cleanup(ibdev); + return PTR_ERR(mr); + } + + LASSERT (iova == ipb.addr); + + ibdev->ibd_mrs[i] = mr; + } + +out: + CDEBUG(D_CONSOLE, "Register global MR array, MR size: " + LPX64", array size: %d\n", + ibdev->ibd_mr_size, ibdev->ibd_nmrs); + + list_add_tail(&ibdev->ibd_list, + &kiblnd_data.kib_devs); + return 0; +} + +void +kiblnd_destroy_dev (kib_dev_t *dev) +{ + LASSERT (dev->ibd_nnets == 0); + + if (!list_empty(&dev->ibd_list)) /* on kib_devs? */ + list_del_init(&dev->ibd_list); + + kiblnd_dev_cleanup(dev); + + if (dev->ibd_pd != NULL) + ib_dealloc_pd(dev->ibd_pd); + + if (dev->ibd_cmid != NULL) + rdma_destroy_id(dev->ibd_cmid); + + LIBCFS_FREE(dev, sizeof(*dev)); } void @@ -1369,10 +1917,11 @@ kiblnd_shutdown (lnet_ni_t *ni) case IBLND_INIT_NOTHING: LASSERT (atomic_read(&net->ibn_nconns) == 0); -#if IBLND_MAP_ON_DEMAND if (net->ibn_fmrpool != NULL) ib_destroy_fmr_pool(net->ibn_fmrpool); -#endif + if (net->ibn_pmrpool != NULL) + kiblnd_destroy_pmr_pool(net->ibn_pmrpool); + if (net->ibn_dev != NULL && net->ibn_dev->ibd_nnets == 0) kiblnd_destroy_dev(net->ibn_dev); @@ -1399,18 +1948,11 @@ out: int kiblnd_base_startup (void) { - int rc; - int i; + int i; + int rc; LASSERT (kiblnd_data.kib_init == IBLND_INIT_NOTHING); - if (*kiblnd_tunables.kib_credits > *kiblnd_tunables.kib_ntx) { - CERROR("Can't set credits(%d) > ntx(%d)\n", - *kiblnd_tunables.kib_credits, - *kiblnd_tunables.kib_ntx); - return -EINVAL; - } - PORTAL_MODULE_USE; memset(&kiblnd_data, 0, sizeof(kiblnd_data)); /* zero pointers, flags etc */ @@ -1472,13 +2014,13 @@ int kiblnd_startup (lnet_ni_t *ni) { char *ifname; + kib_dev_t *ibdev = NULL; kib_net_t *net; - kib_dev_t *ibdev; struct list_head *tmp; struct timeval tv; int rc; - LASSERT (ni->ni_lnd == &the_kiblnd); + LASSERT (ni->ni_lnd == &the_o2iblnd); if (kiblnd_data.kib_init == IBLND_INIT_NOTHING) { rc = kiblnd_base_startup(); @@ -1496,18 +2038,14 @@ kiblnd_startup (lnet_ni_t *ni) do_gettimeofday(&tv); net->ibn_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec; - ni->ni_maxtxcredits = *kiblnd_tunables.kib_credits; - ni->ni_peertxcredits = *kiblnd_tunables.kib_peercredits; + ni->ni_peertimeout = *kiblnd_tunables.kib_peertimeout; + ni->ni_maxtxcredits = *kiblnd_tunables.kib_credits; + ni->ni_peertxcredits = *kiblnd_tunables.kib_peertxcredits; + ni->ni_peerrtrcredits = *kiblnd_tunables.kib_peerrtrcredits; spin_lock_init(&net->ibn_tx_lock); INIT_LIST_HEAD(&net->ibn_idle_txs); - rc = kiblnd_alloc_tx_descs(ni); - if (rc != 0) { - CERROR("Can't allocate tx descs\n"); - goto failed; - } - if (ni->ni_interfaces[0] != NULL) { /* Use the IPoIB interface specified in 'networks=' */ @@ -1527,7 +2065,6 @@ kiblnd_startup (lnet_ni_t *ni) goto failed; } - ibdev = NULL; list_for_each (tmp, &kiblnd_data.kib_devs) { ibdev = list_entry(tmp, kib_dev_t, ibd_list); @@ -1543,7 +2080,6 @@ kiblnd_startup (lnet_ni_t *ni) int up; struct rdma_cm_id *id; struct ib_pd *pd; - struct ib_mr *mr; struct sockaddr_in addr; rc = libcfs_ipif_query(ifname, &up, &ip, &netmask); @@ -1565,18 +2101,18 @@ kiblnd_startup (lnet_ni_t *ni) memset(ibdev, 0, sizeof(*ibdev)); - INIT_LIST_HEAD(&ibdev->ibd_list); /* not yet in kib_devs */ + CFS_INIT_LIST_HEAD(&ibdev->ibd_list); /* not yet in kib_devs */ ibdev->ibd_ifip = ip; strcpy(&ibdev->ibd_ifname[0], ifname); id = rdma_create_id(kiblnd_cm_callback, ibdev, RDMA_PS_TCP); - if (!IS_ERR(id)) { - ibdev->ibd_cmid = id; - } else { + if (IS_ERR(id)) { CERROR("Can't create listen ID: %ld\n", PTR_ERR(id)); goto failed; } + ibdev->ibd_cmid = id; + memset(&addr, 0, sizeof(addr)); addr.sin_family = AF_INET; addr.sin_port = htons(*kiblnd_tunables.kib_service); @@ -1590,76 +2126,59 @@ kiblnd_startup (lnet_ni_t *ni) /* Binding should have assigned me an IB device */ LASSERT (id->device != NULL); + CDEBUG(D_CONSOLE, "Listener bound to %s:%u.%u.%u.%u:%d:%s\n", + ifname, HIPQUAD(ip), *kiblnd_tunables.kib_service, + id->device->name); pd = ib_alloc_pd(id->device); - if (!IS_ERR(pd)) { - ibdev->ibd_pd = pd; - } else { + if (IS_ERR(pd)) { CERROR("Can't allocate PD: %ld\n", PTR_ERR(pd)); goto failed; } -#if IBLND_MAP_ON_DEMAND - /* MR for sends and receives */ - mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE); -#else - /* MR for sends, recieves _and_ RDMA...........v */ - mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE | - IB_ACCESS_REMOTE_WRITE); -#endif - if (!IS_ERR(mr)) { - ibdev->ibd_mr = mr; - } else { - CERROR("Can't get MR: %ld\n", PTR_ERR(pd)); - goto failed; - } + ibdev->ibd_pd = pd; - rc = rdma_listen(id, 0); + rc = rdma_listen(id, 256); if (rc != 0) { CERROR("Can't start listener: %d\n", rc); goto failed; } - list_add_tail(&ibdev->ibd_list, - &kiblnd_data.kib_devs); + rc = kiblnd_dev_setup(ibdev); + if (rc != 0) { + CERROR("Can't setup device: %d\n", rc); + goto failed; + } } ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ibdev->ibd_ifip); net->ibn_dev = ibdev; -#if IBLND_MAP_ON_DEMAND - /* FMR pool for RDMA */ - { - struct ib_fmr_pool *fmrpool; - struct ib_fmr_pool_param param = { - .max_pages_per_fmr = LNET_MAX_PAYLOAD/PAGE_SIZE, - .page_shift = PAGE_SHIFT, - .access = (IB_ACCESS_LOCAL_WRITE | - IB_ACCESS_REMOTE_WRITE), - .pool_size = *kiblnd_tunables.kib_fmr_pool_size, - .dirty_watermark = *kiblnd_tunables.kib_fmr_flush_trigger, - .flush_function = NULL, - .flush_arg = NULL, - .cache = *kiblnd_tunables.kib_fmr_cache}; - - if (*kiblnd_tunables.kib_fmr_pool_size < - *kiblnd_tunables.kib_ntx) { - CERROR("Can't set fmr pool size (%d) < ntx(%d)\n", - *kiblnd_tunables.kib_fmr_pool_size, - *kiblnd_tunables.kib_ntx); - goto failed; + if (*kiblnd_tunables.kib_map_on_demand > 0 || + ibdev->ibd_nmrs > 1) { /* premapping can fail if ibd_nmr > 1, + * so we always create FMR/PMR pool and + * map-on-demand if premapping failed */ + /* Map on demand */ + rc = kiblnd_ib_create_fmr_pool(ibdev, &net->ibn_fmrpool); + if (rc == -ENOSYS) { + CDEBUG(D_CONSOLE, "No FMR, creating physical mapping\n"); + + rc = kiblnd_create_pmr_pool(ibdev, &net->ibn_pmrpool); } - fmrpool = ib_create_fmr_pool(ibdev->ibd_pd, ¶m); - if (!IS_ERR(fmrpool)) { - net->ibn_fmrpool = fmrpool; - } else { - CERROR("Can't create FMR pool: %ld\n", - PTR_ERR(fmrpool)); + if (rc != 0) { + CERROR("Can't create FMR or physical mapping pool: %d, " + "please disable map_on_demand and retry\n", rc); goto failed; } + + } + + rc = kiblnd_alloc_tx_descs(ni); + if (rc != 0) { + CERROR("Can't allocate tx descs\n"); + goto failed; } -#endif kiblnd_map_tx_descs(ni); @@ -1669,6 +2188,9 @@ kiblnd_startup (lnet_ni_t *ni) return 0; failed: + if (net->ibn_dev == NULL && ibdev != NULL) + kiblnd_destroy_dev(ibdev); + kiblnd_shutdown(ni); CDEBUG(D_NET, "kiblnd_startup failed\n"); @@ -1678,7 +2200,7 @@ failed: void __exit kiblnd_module_fini (void) { - lnet_unregister_lnd(&the_kiblnd); + lnet_unregister_lnd(&the_o2iblnd); kiblnd_tunables_fini(); } @@ -1688,23 +2210,22 @@ kiblnd_module_init (void) int rc; CLASSERT (sizeof(kib_msg_t) <= IBLND_MSG_SIZE); -#if !IBLND_MAP_ON_DEMAND CLASSERT (offsetof(kib_msg_t, ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) <= IBLND_MSG_SIZE); CLASSERT (offsetof(kib_msg_t, ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) <= IBLND_MSG_SIZE); -#endif + rc = kiblnd_tunables_init(); if (rc != 0) return rc; - lnet_register_lnd(&the_kiblnd); + lnet_register_lnd(&the_o2iblnd); return 0; } -MODULE_AUTHOR("Cluster File Systems, Inc. "); -MODULE_DESCRIPTION("Kernel OpenIB gen2 LND v1.00"); +MODULE_AUTHOR("Sun Microsystems, Inc. "); +MODULE_DESCRIPTION("Kernel OpenIB gen2 LND v2.00"); MODULE_LICENSE("GPL"); module_init(kiblnd_module_init);