kgn_data_t kgnilnd_data;
+int
+kgnilnd_thread_start(int(*fn)(void *arg), void *arg, char *name, int id)
+{
+ struct task_struct *thrd;
+
+ thrd = kthread_run(fn, arg, "%s_%02d", name, id);
+ if (IS_ERR(thrd))
+ return PTR_ERR(thrd);
+
+ atomic_inc(&kgnilnd_data.kgn_nthreads);
+ return 0;
+}
+
+/* bind scheduler threads to cpus */
+int
+kgnilnd_start_sd_threads(void)
+{
+ int cpu;
+ int i = 0;
+ struct task_struct *task;
+
+ for_each_online_cpu(cpu) {
+ /* don't bind to cpu 0 - all interrupts are processed here */
+ if (cpu == 0)
+ continue;
+
+ task = kthread_create(kgnilnd_scheduler, (void *)((long)i),
+ "%s_%02d", "kgnilnd_sd", i);
+ if (!IS_ERR(task)) {
+ kthread_bind(task, cpu);
+ wake_up_process(task);
+ } else {
+ CERROR("Can't spawn gnilnd scheduler[%d] %ld\n", i,
+ PTR_ERR(task));
+ return PTR_ERR(task);
+ }
+ atomic_inc(&kgnilnd_data.kgn_nthreads);
+
+ if (++i >= *kgnilnd_tunables.kgn_sched_threads) {
+ break;
+ }
+ }
+
+ return 0;
+}
+
/* needs write_lock on kgn_peer_conn_lock */
int
kgnilnd_close_stale_conns_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
GOTO(failed, rc = -ENOMEM);
}
+ mutex_init(&conn->gnc_smsg_mutex);
+ mutex_init(&conn->gnc_rdma_mutex);
atomic_set(&conn->gnc_refcount, 1);
atomic_set(&conn->gnc_reaper_noop, 0);
atomic_set(&conn->gnc_sched_noop, 0);
atomic_read(&kgnilnd_data.kgn_npending_detach) ||
atomic_read(&kgnilnd_data.kgn_npending_unlink)) {
- cfs_pause(cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, "Waiting on %d peers %d closes %d detaches\n",
*device_id = conn->gnc_device->gnd_host_id;
*peerstamp = conn->gnc_peerstamp;
- *tx_seq = conn->gnc_tx_seq;
- *rx_seq = conn->gnc_rx_seq;
+ *tx_seq = atomic_read(&conn->gnc_tx_seq);
+ *rx_seq = atomic_read(&conn->gnc_rx_seq);
*fmaq_len = kgnilnd_count_list(&conn->gnc_fmaq);
*nfma = atomic_read(&conn->gnc_nlive_fma);
*nrdma = atomic_read(&conn->gnc_nlive_rdma);
cq_size = *kgnilnd_tunables.kgn_credits * 2 * 3;
rrc = kgnilnd_cdm_create(dev->gnd_id, *kgnilnd_tunables.kgn_ptag,
- GNILND_COOKIE, 0,
+ *kgnilnd_tunables.kgn_pkey, 0,
&dev->gnd_domain);
if (rrc != GNI_RC_SUCCESS) {
CERROR("Can't create CDM %d (%d)\n", dev->gnd_id, rrc);
EXIT;
}
-
int kgnilnd_base_startup(void)
{
struct timeval tv;
/* zero pointers, flags etc */
memset(&kgnilnd_data, 0, sizeof(kgnilnd_data));
+ kgnilnd_check_kgni_version();
/* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and
* a unique (for all time) connstamp so we can uniquely identify
}
/* threads will load balance across devs as they are available */
- for (i = 0; i < *kgnilnd_tunables.kgn_sched_threads; i++) {
- rc = kgnilnd_thread_start(kgnilnd_scheduler, (void *)((long)i),
- "kgnilnd_sd", i);
- if (rc != 0) {
- CERROR("Can't spawn gnilnd scheduler[%d]: %d\n",
- i, rc);
+ if (*kgnilnd_tunables.kgn_thread_affinity) {
+ rc = kgnilnd_start_sd_threads();
+ if (rc != 0)
GOTO(failed, rc);
+ } else {
+ for (i = 0; i < *kgnilnd_tunables.kgn_sched_threads; i++) {
+ rc = kgnilnd_thread_start(kgnilnd_scheduler,
+ (void *)((long)i),
+ "kgnilnd_sd", i);
+ if (rc != 0) {
+ CERROR("Can't spawn gnilnd scheduler[%d]: %d\n",
+ i, rc);
+ GOTO(failed, rc);
+ }
}
}
}
}
-
-
/* flag everything initialised */
kgnilnd_data.kgn_init = GNILND_INIT_ALL;
/*****************************************************/
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
"Waiting for conns to be cleaned up %d\n",atomic_read(&kgnilnd_data.kgn_nconns));
- cfs_pause(cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
}
/* Peer state all cleaned up BEFORE setting shutdown, so threads don't
* have to worry about shutdown races. NB connections may be created
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
"Waiting for ruhroh thread to terminate\n");
- cfs_pause(cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
}
/* Flag threads to terminate */
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
"Waiting for %d threads to terminate\n",
atomic_read(&kgnilnd_data.kgn_nthreads));
- cfs_pause(cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
}
LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
"Waiting for %d references to clear on net %d\n",
atomic_read(&net->gnn_refcount),
net->gnn_netnum);
- cfs_pause(cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
}
/* release ref from kgnilnd_startup */