return 1;
}
+/*
+ * Get the scheduler bound to this CPT. If the scheduler has no
+ * threads, which means that the CPT has no CPUs, then grab the
+ * next scheduler that we can use.
+ *
+ * This case would be triggered if a NUMA node is configured with
+ * no associated CPUs.
+ */
+static struct kib_sched_info *
+kiblnd_get_scheduler(int cpt)
+{
+ struct kib_sched_info *sched;
+ int i;
+
+ sched = kiblnd_data.kib_scheds[cpt];
+
+ if (sched->ibs_nthreads > 0)
+ return sched;
+
+ cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) {
+ if (sched->ibs_nthreads > 0) {
+ CDEBUG(D_NET, "scheduler[%d] has no threads. selected scheduler[%d]\n",
+ cpt, sched->ibs_cpt);
+ return sched;
+ }
+ }
+
+ return NULL;
+}
+
kib_conn_t *
kiblnd_create_conn(kib_peer_ni_t *peer_ni, struct rdma_cm_id *cmid,
int state, int version)
dev = net->ibn_dev;
cpt = lnet_cpt_of_nid(peer_ni->ibp_nid, peer_ni->ibp_ni);
- sched = kiblnd_data.kib_scheds[cpt];
+ sched = kiblnd_get_scheduler(cpt);
- LASSERT(sched->ibs_nthreads > 0);
+ if (sched == NULL) {
+ CERROR("no schedulers available. node is unhealthy\n");
+ goto failed_0;
+ }
+
+ /*
+ * The cpt might have changed if we ended up selecting a non cpt
+ * native scheduler. So use the scheduler's cpt instead.
+ */
+ cpt = sched->ibs_cpt;
LIBCFS_CPT_ALLOC(init_qp_attr, lnet_cpt_table(), cpt,
sizeof(*init_qp_attr));
break;
}
- LASSERT (conn->ibc_cmid != NULL);
- data->ioc_nid = conn->ibc_peer->ibp_nid;
- if (conn->ibc_cmid->route.path_rec == NULL)
- data->ioc_u32[0] = 0; /* iWarp has no path MTU */
- else
- data->ioc_u32[0] =
- ib_mtu_enum_to_int(conn->ibc_cmid->route.path_rec->mtu);
- kiblnd_conn_decref(conn);
- break;
+ LASSERT(conn->ibc_cmid != NULL);
+ data->ioc_nid = conn->ibc_peer->ibp_nid;
+ if (conn->ibc_cmid->route.path_rec == NULL)
+ data->ioc_u32[0] = 0; /* iWarp has no path MTU */
+ else
+ data->ioc_u32[0] =
+ ib_mtu_enum_to_int(conn->ibc_cmid->route.path_rec->mtu);
+ kiblnd_conn_decref(conn);
+ break;
}
case IOC_LIBCFS_CLOSE_CONNECTION: {
rc = kiblnd_close_matching_conns(ni, data->ioc_nid);
return 0;
}
spin_unlock(&fps->fps_lock);
- rc = -EBUSY;
+ rc = -EAGAIN;
}
spin_lock(&fps->fps_lock);
hdev->ibh_cmid = cmid;
hdev->ibh_ibdev = cmid->device;
-#ifdef HAVE_IB_GET_DMA_MR
- pd = ib_alloc_pd(cmid->device);
-#else
+#ifdef HAVE_IB_ALLOC_PD_2ARGS
pd = ib_alloc_pd(cmid->device, 0);
+#else
+ pd = ib_alloc_pd(cmid->device);
#endif
if (IS_ERR(pd)) {
rc = PTR_ERR(pd);