kgn_data_t kgnilnd_data;
+int
+kgnilnd_thread_start(int(*fn)(void *arg), void *arg, char *name, int id)
+{
+ struct task_struct *thrd;
+
+ thrd = kthread_run(fn, arg, "%s_%02d", name, id);
+ if (IS_ERR(thrd))
+ return PTR_ERR(thrd);
+
+ atomic_inc(&kgnilnd_data.kgn_nthreads);
+ return 0;
+}
+
+/* bind scheduler threads to cpus */
+int
+kgnilnd_start_sd_threads(void)
+{
+ int cpu;
+ int i = 0;
+ struct task_struct *task;
+
+ for_each_online_cpu(cpu) {
+ /* don't bind to cpu 0 - all interrupts are processed here */
+ if (cpu == 0)
+ continue;
+
+ task = kthread_create(kgnilnd_scheduler, (void *)((long)i),
+ "%s_%02d", "kgnilnd_sd", i);
+ if (!IS_ERR(task)) {
+ kthread_bind(task, cpu);
+ wake_up_process(task);
+ } else {
+ CERROR("Can't spawn gnilnd scheduler[%d] %ld\n", i,
+ PTR_ERR(task));
+ return PTR_ERR(task);
+ }
+ atomic_inc(&kgnilnd_data.kgn_nthreads);
+
+ if (++i >= *kgnilnd_tunables.kgn_sched_threads) {
+ break;
+ }
+ }
+
+ return 0;
+}
+
/* needs write_lock on kgn_peer_conn_lock */
int
kgnilnd_close_stale_conns_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
}
/* threads will load balance across devs as they are available */
- for (i = 0; i < *kgnilnd_tunables.kgn_sched_threads; i++) {
- rc = kgnilnd_thread_start(kgnilnd_scheduler, (void *)((long)i),
- "kgnilnd_sd", i);
- if (rc != 0) {
- CERROR("Can't spawn gnilnd scheduler[%d]: %d\n",
- i, rc);
+ if (*kgnilnd_tunables.kgn_thread_affinity) {
+ rc = kgnilnd_start_sd_threads();
+ if (rc != 0)
GOTO(failed, rc);
+ } else {
+ for (i = 0; i < *kgnilnd_tunables.kgn_sched_threads; i++) {
+ rc = kgnilnd_thread_start(kgnilnd_scheduler,
+ (void *)((long)i),
+ "kgnilnd_sd", i);
+ if (rc != 0) {
+ CERROR("Can't spawn gnilnd scheduler[%d]: %d\n",
+ i, rc);
+ GOTO(failed, rc);
+ }
}
}
}
}
-
-
/* flag everything initialised */
kgnilnd_data.kgn_init = GNILND_INIT_ALL;
/*****************************************************/
int *kgn_fast_reconn; /* fast reconnection on conn timeout */
int *kgn_efault_lbug; /* LBUG on receiving an EFAULT */
int *kgn_max_purgatory; /* # conns/peer to keep in purgatory */
+ int *kgn_thread_affinity; /* bind scheduler threads to cpus */
#if CONFIG_SYSCTL && !CFS_SYSFS_MODULE_PARM
cfs_sysctl_table_header_t *kgn_sysctl; /* sysctl interface */
#endif
#define kgnilnd_schedule_conn(conn) \
_kgnilnd_schedule_conn(conn, __func__, __LINE__, 0);
-#define kgnilnd_schedule_conn_refheld(conn, refheld) \
+#define kgnilnd_schedule_conn_refheld(conn, refheld) \
_kgnilnd_schedule_conn(conn, __func__, __LINE__, refheld);
-static inline int
-kgnilnd_thread_start(int(*fn)(void *arg), void *arg, char *name, int id)
-{
- struct task_struct *thrd = kthread_run(fn, arg, "%s_%02d", name, id);
- if (IS_ERR(thrd))
- return PTR_ERR(thrd);
-
- atomic_inc(&kgnilnd_data.kgn_nthreads);
- return 0;
-}
-
static inline void
kgnilnd_thread_fini(void)
{
int kgnilnd_scheduler(void *arg);
int kgnilnd_dgram_mover(void *arg);
int kgnilnd_rca(void *arg);
+int kgnilnd_thread_start(int(*fn)(void *arg), void *arg, char *name, int id);
int kgnilnd_create_conn(kgn_conn_t **connp, kgn_device_t *dev);
int kgnilnd_conn_isdup_locked(kgn_peer_t *peer, kgn_conn_t *newconn);
CFS_MODULE_PARM(max_conn_purg, "i", int, 0644,
"Max number of connections per peer in purgatory");
+static int thread_affinity = 0;
+CFS_MODULE_PARM(thread_affinity, "i", int, 0444,
+ "scheduler thread affinity default 0 (diabled)");
+
kgn_tunables_t kgnilnd_tunables = {
.kgn_min_reconnect_interval = &min_reconnect_interval,
.kgn_max_reconnect_interval = &max_reconnect_interval,
.kgn_eager_credits = &eager_credits,
.kgn_fast_reconn = &fast_reconn,
.kgn_efault_lbug = &efault_lbug,
+ .kgn_thread_affinity = &thread_affinity,
.kgn_max_purgatory = &max_conn_purg
};
},
{
INIT_CTL_NAME
+ .procname = "thread_affinity"
+ .data = &thread_affinity,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = &proc_dointvec
+ },
+ {
+ INIT_CTL_NAME
.procname = "max_conn_purg"
.data = &max_conn_purg,
.maxlen = sizeof(int),