static void
kiblnd_debug_rx(struct kib_rx *rx)
{
- CDEBUG(D_CONSOLE, " %p status %d msg_type %x cred %d\n",
- rx, rx->rx_status, rx->rx_msg->ibm_type,
- rx->rx_msg->ibm_credits);
+ CDEBUG(D_CONSOLE, " %p msg_type %x cred %d\n",
+ rx, rx->rx_msg->ibm_type,
+ rx->rx_msg->ibm_credits);
}
static void
static int
kiblnd_get_completion_vector(struct kib_conn *conn, int cpt)
{
- cpumask_t *mask;
+ cpumask_var_t *mask;
int vectors;
int off;
int i;
/* hash NID to CPU id in this partition... */
ibp_nid = conn->ibc_peer->ibp_nid;
- off = do_div(ibp_nid, cpumask_weight(mask));
- for_each_cpu(i, mask) {
+ off = do_div(ibp_nid, cpumask_weight(*mask));
+ for_each_cpu(i, *mask) {
if (off-- == 0)
return i % vectors;
}
* One WR for the LNet message
* And ibc_max_frags for the transfer WRs
*/
- unsigned int ret = 1 + conn->ibc_max_frags;
+ int ret;
+ int multiplier = 1 + conn->ibc_max_frags;
enum kib_dev_caps dev_caps = conn->ibc_hdev->ibh_dev->ibd_dev_caps;
/* FastReg needs two extra WRs for map and invalidate */
if (dev_caps & IBLND_DEV_CAPS_FASTREG_ENABLED)
- ret += 2;
+ multiplier += 2;
/* account for a maximum of ibc_queue_depth in-flight transfers */
- ret *= conn->ibc_queue_depth;
- return ret;
+ ret = multiplier * conn->ibc_queue_depth;
+
+ if (ret > conn->ibc_hdev->ibh_max_qp_wr) {
+ CDEBUG(D_NET, "peer_credits %u will result in send work "
+ "request size %d larger than maximum %d device "
+ "can handle\n", conn->ibc_queue_depth, ret,
+ conn->ibc_hdev->ibh_max_qp_wr);
+ conn->ibc_queue_depth =
+ conn->ibc_hdev->ibh_max_qp_wr / multiplier;
+ }
+
+ /* don't go beyond the maximum the device can handle */
+ return min(ret, conn->ibc_hdev->ibh_max_qp_wr);
}
struct kib_conn *
INIT_LIST_HEAD(&conn->ibc_tx_queue_rsrvd);
INIT_LIST_HEAD(&conn->ibc_tx_queue_nocred);
INIT_LIST_HEAD(&conn->ibc_active_txs);
+ INIT_LIST_HEAD(&conn->ibc_zombie_txs);
spin_lock_init(&conn->ibc_lock);
LIBCFS_CPT_ALLOC(conn->ibc_connvars, lnet_cpt_table(), cpt,
init_qp_attr->qp_type = IB_QPT_RC;
init_qp_attr->send_cq = cq;
init_qp_attr->recv_cq = cq;
+ /*
+ * kiblnd_send_wrs() can change the connection's queue depth if
+ * the maximum work requests for the device is maxed out
+ */
+ init_qp_attr->cap.max_send_wr = kiblnd_send_wrs(conn);
+ init_qp_attr->cap.max_recv_wr = IBLND_RECV_WRS(conn);
- conn->ibc_sched = sched;
-
- do {
- init_qp_attr->cap.max_send_wr = kiblnd_send_wrs(conn);
- init_qp_attr->cap.max_recv_wr = IBLND_RECV_WRS(conn);
-
- rc = rdma_create_qp(cmid, conn->ibc_hdev->ibh_pd, init_qp_attr);
- if (!rc || conn->ibc_queue_depth < 2)
- break;
-
- conn->ibc_queue_depth--;
- } while (rc);
-
+ rc = rdma_create_qp(cmid, conn->ibc_hdev->ibh_pd, init_qp_attr);
if (rc) {
CERROR("Can't create QP: %d, send_wr: %d, recv_wr: %d, "
"send_sge: %d, recv_sge: %d\n",
goto failed_2;
}
+ conn->ibc_sched = sched;
+
if (conn->ibc_queue_depth != peer_ni->ibp_queue_depth)
CWARN("peer %s - queue depth reduced from %u to %u"
" to allow for qp creation\n",
CWARN("Error destroying CQ: %d\n", rc);
}
+ kiblnd_txlist_done(&conn->ibc_zombie_txs, -ECONNABORTED,
+ LNET_MSG_STATUS_OK);
+
if (conn->ibc_rx_pages != NULL)
kiblnd_unmap_rx_descs(conn);
fps = fpo->fpo_owner;
if (fpo->fpo_is_fmr) {
if (fmr->fmr_pfmr) {
- rc = ib_fmr_pool_unmap(fmr->fmr_pfmr);
- LASSERT(!rc);
+ ib_fmr_pool_unmap(fmr->fmr_pfmr);
fmr->fmr_pfmr = NULL;
}
tx_pages_mapped = 1;
}
- pfmr = ib_fmr_pool_map_phys(fpo->fmr.fpo_fmr_pool,
- pages, npages, iov);
+ pfmr = kib_fmr_pool_map(fpo->fmr.fpo_fmr_pool,
+ pages, npages, iov);
if (likely(!IS_ERR(pfmr))) {
fmr->fmr_key = is_rx ? pfmr->fmr->rkey
: pfmr->fmr->lkey;
#endif
hdev->ibh_mr_size = dev_attr->max_mr_size;
+ hdev->ibh_max_qp_wr = dev_attr->max_qp_wr;
/* Setup device Memory Registration capabilities */
+#ifdef HAVE_IB_DEVICE_OPS
+ if (hdev->ibh_ibdev->ops.alloc_fmr &&
+ hdev->ibh_ibdev->ops.dealloc_fmr &&
+ hdev->ibh_ibdev->ops.map_phys_fmr &&
+ hdev->ibh_ibdev->ops.unmap_fmr) {
+#else
if (hdev->ibh_ibdev->alloc_fmr &&
hdev->ibh_ibdev->dealloc_fmr &&
hdev->ibh_ibdev->map_phys_fmr &&
hdev->ibh_ibdev->unmap_fmr) {
+#endif
LCONSOLE_INFO("Using FMR for registration\n");
hdev->ibh_dev->ibd_dev_caps |= IBLND_DEV_CAPS_FMR_ENABLED;
} else if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
rc = -ENOSYS;
}
- if (rc == 0 && hdev->ibh_mr_size == ~0ULL)
- hdev->ibh_mr_shift = 64;
- else if (rc != 0)
+ if (rc != 0)
rc = -EINVAL;
#ifndef HAVE_IB_DEVICE_ATTRS
}
static int
-kiblnd_dev_need_failover(struct kib_dev *dev)
+kiblnd_dev_need_failover(struct kib_dev *dev, struct net *ns)
{
struct rdma_cm_id *cmid;
struct sockaddr_in srcaddr;
*
* a. rdma_bind_addr(), it will conflict with listener cmid
* b. rdma_resolve_addr() to zero addr */
- cmid = kiblnd_rdma_create_id(kiblnd_dummy_callback, dev, RDMA_PS_TCP,
- IB_QPT_RC);
+ cmid = kiblnd_rdma_create_id(ns, kiblnd_dummy_callback, dev,
+ RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(cmid)) {
rc = PTR_ERR(cmid);
CERROR("Failed to create cmid for failover: %d\n", rc);
}
int
-kiblnd_dev_failover(struct kib_dev *dev)
+kiblnd_dev_failover(struct kib_dev *dev, struct net *ns)
{
struct list_head zombie_tpo = LIST_HEAD_INIT(zombie_tpo);
struct list_head zombie_ppo = LIST_HEAD_INIT(zombie_ppo);
dev->ibd_can_failover ||
dev->ibd_hdev == NULL);
- rc = kiblnd_dev_need_failover(dev);
+ rc = kiblnd_dev_need_failover(dev, ns);
if (rc <= 0)
goto out;
rdma_destroy_id(cmid);
}
- cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, dev, RDMA_PS_TCP,
- IB_QPT_RC);
+ cmid = kiblnd_rdma_create_id(ns, kiblnd_cm_callback, dev, RDMA_PS_TCP,
+ IB_QPT_RC);
if (IS_ERR(cmid)) {
rc = PTR_ERR(cmid);
CERROR("Failed to create cmid for failover: %d\n", rc);
LIBCFS_FREE(dev, sizeof(*dev));
}
-static struct kib_dev *
-kiblnd_create_dev(char *ifname)
-{
- struct net_device *netdev;
- struct in_device *in_dev;
- struct kib_dev *dev;
- int flags;
- int rc;
-
- rtnl_lock();
- netdev = dev_get_by_name(&init_net, ifname);
- if (!netdev) {
- CERROR("Can't find IPoIB interface %s\n",
- ifname);
- goto unlock;
- }
-
- flags = dev_get_flags(netdev);
- if (!(flags & IFF_UP)) {
- CERROR("Can't query IPoIB interface %s: it's down\n", ifname);
- goto unlock;
- }
-
- LIBCFS_ALLOC(dev, sizeof(*dev));
- if (!dev)
- goto unlock;
-
- dev->ibd_can_failover = !!(flags & IFF_MASTER);
-
- INIT_LIST_HEAD(&dev->ibd_nets);
- INIT_LIST_HEAD(&dev->ibd_list); /* not yet in kib_devs */
- INIT_LIST_HEAD(&dev->ibd_fail_list);
-
- in_dev = __in_dev_get_rtnl(netdev);
- if (!in_dev) {
- kfree(dev);
- goto unlock;
- }
-
- for_primary_ifa(in_dev)
- if (strcmp(ifa->ifa_label, ifname) == 0) {
- dev->ibd_ifip = ntohl(ifa->ifa_local);
- break;
- }
- endfor_ifa(in_dev);
- rtnl_unlock();
-
- if (dev->ibd_ifip == 0) {
- CERROR("Can't initialize device: no IP address\n");
- LIBCFS_FREE(dev, sizeof(*dev));
- return NULL;
- }
- strcpy(&dev->ibd_ifname[0], ifname);
-
- /* initialize the device */
- rc = kiblnd_dev_failover(dev);
- if (rc != 0) {
- CERROR("Can't initialize device: %d\n", rc);
- LIBCFS_FREE(dev, sizeof(*dev));
- return NULL;
- }
-
- list_add_tail(&dev->ibd_list, &kiblnd_data.kib_devs);
- return dev;
-unlock:
- rtnl_unlock();
- return NULL;
-}
-
static void
kiblnd_base_shutdown(void)
{
}
static int
-kiblnd_base_startup(void)
+kiblnd_base_startup(struct net *ns)
{
struct kib_sched_info *sched;
int rc;
}
if (*kiblnd_tunables.kib_dev_failover != 0)
- rc = kiblnd_thread_start(kiblnd_failover_thread, NULL,
+ rc = kiblnd_thread_start(kiblnd_failover_thread, ns,
"kiblnd_failover");
if (rc != 0) {
return rc;
}
-static int
-kiblnd_dev_start_threads(struct kib_dev *dev, int newdev, u32 *cpts, int ncpts)
+static int kiblnd_dev_start_threads(struct kib_dev *dev, u32 *cpts, int ncpts)
{
int cpt;
int rc;
cpt = (cpts == NULL) ? i : cpts[i];
sched = kiblnd_data.kib_scheds[cpt];
- if (!newdev && sched->ibs_nthreads > 0)
+ if (sched->ibs_nthreads > 0)
continue;
rc = kiblnd_start_schedulers(kiblnd_data.kib_scheds[cpt]);
return 0;
}
-static struct kib_dev *
-kiblnd_dev_search(char *ifname)
-{
- struct kib_dev *alias = NULL;
- struct kib_dev *dev;
- char *colon;
- char *colon2;
-
- colon = strchr(ifname, ':');
- list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
- if (strcmp(&dev->ibd_ifname[0], ifname) == 0)
- return dev;
-
- if (alias != NULL)
- continue;
-
- colon2 = strchr(dev->ibd_ifname, ':');
- if (colon != NULL)
- *colon = 0;
- if (colon2 != NULL)
- *colon2 = 0;
-
- if (strcmp(&dev->ibd_ifname[0], ifname) == 0)
- alias = dev;
-
- if (colon != NULL)
- *colon = ':';
- if (colon2 != NULL)
- *colon2 = ':';
- }
- return alias;
-}
-
static int
kiblnd_startup(struct lnet_ni *ni)
{
char *ifname;
+ struct lnet_inetdev *ifaces = NULL;
struct kib_dev *ibdev = NULL;
struct kib_net *net;
unsigned long flags;
int rc;
- int newdev;
- int node_id;
+ int i;
LASSERT (ni->ni_net->net_lnd == &the_o2iblnd);
if (kiblnd_data.kib_init == IBLND_INIT_NOTHING) {
- rc = kiblnd_base_startup();
+ rc = kiblnd_base_startup(ni->ni_net_ns);
if (rc != 0)
return rc;
}
kiblnd_tunables_setup(ni);
+ /*
+ * ni_interfaces is only to support legacy pre Multi-Rail
+ * tcp bonding for ksocklnd. Multi-Rail wants each secondary
+ * IP to be treated as an unique 'struct ni' interfaces instead.
+ */
if (ni->ni_interfaces[0] != NULL) {
/* Use the IPoIB interface specified in 'networks=' */
-
- CLASSERT(LNET_INTERFACES_NUM > 1);
if (ni->ni_interfaces[1] != NULL) {
- CERROR("Multiple interfaces not supported\n");
+ CERROR("ko2iblnd: Multiple interfaces not supported\n");
goto failed;
}
goto failed;
}
- ibdev = kiblnd_dev_search(ifname);
+ rc = lnet_inet_enumerate(&ifaces, ni->ni_net_ns);
+ if (rc < 0)
+ goto failed;
+
+ for (i = 0; i < rc; i++) {
+ if (strcmp(ifname, ifaces[i].li_name) == 0)
+ break;
+ }
+
+ if (i == rc) {
+ CERROR("ko2iblnd: No matching interfaces\n");
+ rc = -ENOENT;
+ goto failed;
+ }
+
+ LIBCFS_ALLOC(ibdev, sizeof(*ibdev));
+ if (!ibdev) {
+ rc = -ENOMEM;
+ goto failed;
+ }
- newdev = ibdev == NULL;
- /* hmm...create kib_dev even for alias */
- if (ibdev == NULL || strcmp(&ibdev->ibd_ifname[0], ifname) != 0)
- ibdev = kiblnd_create_dev(ifname);
+ ibdev->ibd_ifip = ifaces[i].li_ipaddr;
+ strlcpy(ibdev->ibd_ifname, ifaces[i].li_name,
+ sizeof(ibdev->ibd_ifname));
+ ibdev->ibd_can_failover = !!(ifaces[i].li_flags & IFF_MASTER);
- if (ibdev == NULL)
+ INIT_LIST_HEAD(&ibdev->ibd_nets);
+ INIT_LIST_HEAD(&ibdev->ibd_list); /* not yet in kib_devs */
+ INIT_LIST_HEAD(&ibdev->ibd_fail_list);
+
+ /* initialize the device */
+ rc = kiblnd_dev_failover(ibdev, ni->ni_net_ns);
+ if (rc) {
+ CERROR("ko2iblnd: Can't initialize device: rc = %d\n", rc);
goto failed;
+ }
- node_id = dev_to_node(ibdev->ibd_hdev->ibh_ibdev->dma_device);
- ni->ni_dev_cpt = cfs_cpt_of_node(lnet_cpt_table(), node_id);
+ list_add_tail(&ibdev->ibd_list, &kiblnd_data.kib_devs);
net->ibn_dev = ibdev;
ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ibdev->ibd_ifip);
- rc = kiblnd_dev_start_threads(ibdev, newdev,
- ni->ni_cpts, ni->ni_ncpts);
+ ni->ni_dev_cpt = ifaces[i].li_cpt;
+
+ rc = kiblnd_dev_start_threads(ibdev, ni->ni_cpts, ni->ni_ncpts);
if (rc != 0)
goto failed;
if (net != NULL && net->ibn_dev == NULL && ibdev != NULL)
kiblnd_destroy_dev(ibdev);
+ kfree(ifaces);
kiblnd_shutdown(ni);
CDEBUG(D_NET, "kiblnd_startup failed\n");