+/*
+ * Get the scheduler bound to this CPT. If the scheduler has no
+ * threads, which means that the CPT has no CPUs, then grab the
+ * next scheduler that we can use.
+ *
+ * This case would be triggered if a NUMA node is configured with
+ * no associated CPUs.
+ */
+static struct kib_sched_info *
+kiblnd_get_scheduler(int cpt)
+{
+ struct kib_sched_info *sched;
+ int i;
+
+ sched = kiblnd_data.kib_scheds[cpt];
+
+ if (sched->ibs_nthreads > 0)
+ return sched;
+
+ cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) {
+ if (sched->ibs_nthreads > 0) {
+ CDEBUG(D_NET, "scheduler[%d] has no threads. selected scheduler[%d]\n",
+ cpt, sched->ibs_cpt);
+ return sched;
+ }
+ }
+
+ return NULL;
+}
+
+static unsigned int kiblnd_send_wrs(struct kib_conn *conn)
+{
+ /*
+ * One WR for the LNet message
+ * And ibc_max_frags for the transfer WRs
+ */
+ int ret;
+ int multiplier = 1 + conn->ibc_max_frags;
+ enum kib_dev_caps dev_caps = conn->ibc_hdev->ibh_dev->ibd_dev_caps;
+
+ /* FastReg needs two extra WRs for map and invalidate */
+ if (dev_caps & IBLND_DEV_CAPS_FASTREG_ENABLED)
+ multiplier += 2;
+
+ /* account for a maximum of ibc_queue_depth in-flight transfers */
+ ret = multiplier * conn->ibc_queue_depth;
+
+ if (ret > conn->ibc_hdev->ibh_max_qp_wr) {
+ CDEBUG(D_NET, "peer_credits %u will result in send work "
+ "request size %d larger than maximum %d device "
+ "can handle\n", conn->ibc_queue_depth, ret,
+ conn->ibc_hdev->ibh_max_qp_wr);
+ conn->ibc_queue_depth =
+ conn->ibc_hdev->ibh_max_qp_wr / multiplier;
+ }
+
+ /* don't go beyond the maximum the device can handle */
+ return min(ret, conn->ibc_hdev->ibh_max_qp_wr);
+}
+
+struct kib_conn *
+kiblnd_create_conn(struct kib_peer_ni *peer_ni, struct rdma_cm_id *cmid,