return 1;
}
+/*
+ * Get the scheduler bound to this CPT. If the scheduler has no
+ * threads, which means that the CPT has no CPUs, then grab the
+ * next scheduler that we can use.
+ *
+ * This case would be triggered if a NUMA node is configured with
+ * no associated CPUs.
+ */
+static struct kib_sched_info *
+kiblnd_get_scheduler(int cpt)
+{
+ struct kib_sched_info *sched;
+ int i;
+
+ sched = kiblnd_data.kib_scheds[cpt];
+
+ if (sched->ibs_nthreads > 0)
+ return sched;
+
+ cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) {
+ if (sched->ibs_nthreads > 0) {
+ CDEBUG(D_NET, "scheduler[%d] has no threads. selected scheduler[%d]\n",
+ cpt, sched->ibs_cpt);
+ return sched;
+ }
+ }
+
+ return NULL;
+}
+
kib_conn_t *
kiblnd_create_conn(kib_peer_ni_t *peer_ni, struct rdma_cm_id *cmid,
int state, int version)
dev = net->ibn_dev;
cpt = lnet_cpt_of_nid(peer_ni->ibp_nid, peer_ni->ibp_ni);
- sched = kiblnd_data.kib_scheds[cpt];
+ sched = kiblnd_get_scheduler(cpt);
+
+ if (sched == NULL) {
+ CERROR("no schedulers available. node is unhealthy\n");
+ goto failed_0;
+ }
- LASSERT(sched->ibs_nthreads > 0);
+ /*
+ * The cpt might have changed if we ended up selecting a non cpt
+ * native scheduler. So use the scheduler's cpt instead.
+ */
+ cpt = sched->ibs_cpt;
LIBCFS_CPT_ALLOC(init_qp_attr, lnet_cpt_table(), cpt,
sizeof(*init_qp_attr));
ksock_sched_t *sched;
int i;
- LASSERT(info->ksi_nthreads > 0);
+ if (info->ksi_nthreads == 0) {
+ cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
+ if (info->ksi_nthreads > 0) {
+ CDEBUG(D_NET, "scheduler[%d] has no threads. selected scheduler[%d]\n",
+ cpt, info->ksi_cpt);
+ goto select_sched;
+ }
+ }
+ return NULL;
+ }
+select_sched:
sched = &info->ksi_scheds[0];
/*
* NB: it's safe so far, but info->ksi_nthreads could be changed
peer_ni->ksnp_error = 0;
sched = ksocknal_choose_scheduler_locked(cpt);
+ if (!sched) {
+ CERROR("no schedulers available. node is unhealthy\n");
+ goto failed_2;
+ }
+ /*
+ * The cpt might have changed if we ended up selecting a non cpt
+ * native scheduler. So use the scheduler's cpt instead.
+ */
+ cpt = sched->kss_info->ksi_cpt;
sched->kss_nconns++;
conn->ksnc_scheduler = sched;
info->ksi_nthreads_max = nthrs;
info->ksi_cpt = i;
- LIBCFS_CPT_ALLOC(info->ksi_scheds, lnet_cpt_table(), i,
- info->ksi_nthreads_max * sizeof(*sched));
- if (info->ksi_scheds == NULL)
- goto failed;
-
- for (; nthrs > 0; nthrs--) {
- sched = &info->ksi_scheds[nthrs - 1];
-
- sched->kss_info = info;
- spin_lock_init(&sched->kss_lock);
- INIT_LIST_HEAD(&sched->kss_rx_conns);
- INIT_LIST_HEAD(&sched->kss_tx_conns);
- INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
- init_waitqueue_head(&sched->kss_waitq);
+ if (nthrs != 0) {
+ LIBCFS_CPT_ALLOC(info->ksi_scheds, lnet_cpt_table(), i,
+ info->ksi_nthreads_max *
+ sizeof(*sched));
+ if (info->ksi_scheds == NULL)
+ goto failed;
+
+ for (; nthrs > 0; nthrs--) {
+ sched = &info->ksi_scheds[nthrs - 1];
+
+ sched->kss_info = info;
+ spin_lock_init(&sched->kss_lock);
+ INIT_LIST_HEAD(&sched->kss_rx_conns);
+ INIT_LIST_HEAD(&sched->kss_tx_conns);
+ INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
+ init_waitqueue_head(&sched->kss_waitq);
+ }
}
}