kiblnd_data.kib_init = IBLND_INIT_DATA;
/*****************************************************/
- for (i = 0; i < IBLND_N_SCHED; i++) {
+ for (i = 0; i < *kiblnd_tunables.kib_schedulers; i++) {
rc = kiblnd_thread_start(kiblnd_scheduler, (void *)((long)i));
if (rc != 0) {
CERROR("Can't spawn o2iblnd scheduler[%d]: %d\n",
CFS_MODULE_PARM(use_privileged_port, "i", int, 0644,
"use privileged port when initiating connection");
+static int schedulers; /* initialized to zero by compiler */
+CFS_MODULE_PARM(schedulers, "i", int, 0444,
+ "Schedulers");
+
kib_tunables_t kiblnd_tunables = {
.kib_dev_failover = &dev_failover,
.kib_service = &service,
.kib_fmr_cache = &fmr_cache,
.kib_pmr_pool_size = &pmr_pool_size,
.kib_require_priv_port = &require_privileged_port,
- .kib_use_priv_port = &use_privileged_port
+ .kib_use_priv_port = &use_privileged_port,
+ .kib_schedulers = &schedulers
};
#if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
O2IBLND_FMR_FLUSH_TRIGGER,
O2IBLND_FMR_CACHE,
O2IBLND_PMR_POOL_SIZE,
- O2IBLND_DEV_FAILOVER
+ O2IBLND_DEV_FAILOVER,
+ O2IBLND_SCHEDULERS
};
#else
#define O2IBLND_FMR_CACHE CTL_UNNUMBERED
#define O2IBLND_PMR_POOL_SIZE CTL_UNNUMBERED
#define O2IBLND_DEV_FAILOVER CTL_UNNUMBERED
+#define O2IBLND_SCHEDULERS CTL_UNNUMBERED
#endif
.mode = 0444,
.proc_handler = &proc_dointvec
},
+ {
+ .ctl_name = O2IBLND_SCHEDULERS,
+ .procname = "schedulers",
+ .data = &schedulers,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = &proc_dointvec
+ },
{0}
};
*kiblnd_tunables.kib_concurrent_sends, *kiblnd_tunables.kib_peertxcredits);
}
+ if (*kiblnd_tunables.kib_schedulers <= 0) {
+ *kiblnd_tunables.kib_schedulers = IBLND_N_SCHED;
+ } else if (*kiblnd_tunables.kib_schedulers > IBLND_N_SCHED) {
+ CWARN("Number of schedulers %d exceeds machine capacity, lowering to %d .\n",
+ *kiblnd_tunables.kib_schedulers, IBLND_N_SCHED);
+ *kiblnd_tunables.kib_schedulers = IBLND_N_SCHED;
+ }
+
kiblnd_sysctl_init();
return 0;
}