}
for (i = 0; i < nthrs; i++) {
- long id;
- char name[20];
- id = KIB_THREAD_ID(sched->ibs_cpt, sched->ibs_nthreads + i);
- snprintf(name, sizeof(name), "kiblnd_sd_%02ld_%02ld",
- KIB_THREAD_CPT(id), KIB_THREAD_TID(id));
- rc = kiblnd_thread_start(kiblnd_scheduler, (void *)id, name);
+ long id = KIB_THREAD_ID(sched->ibs_cpt, sched->ibs_nthreads + i);
+
+ rc = kiblnd_thread_start(kiblnd_scheduler, (void *)id,
+ "kiblnd_sd_%02ld_%02ld",
+ KIB_THREAD_CPT(id), KIB_THREAD_TID(id));
if (rc == 0)
continue;
int kiblnd_connd (void *arg);
int kiblnd_scheduler(void *arg);
-int kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name);
+#define kiblnd_thread_start(fn, data, namefmt, arg...) \
+ ({ \
+ struct task_struct *__task = kthread_run(fn, data, \
+ namefmt, ##arg); \
+ if (!IS_ERR(__task)) \
+ atomic_inc(&kiblnd_data.kib_nthreads); \
+ PTR_ERR_OR_ZERO(__task); \
+ })
+
int kiblnd_failover_thread (void *arg);
int kiblnd_alloc_pages(struct kib_pages **pp, int cpt, int npages);
return rc;
}
-int
-kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
-{
- struct task_struct *task = kthread_run(fn, arg, "%s", name);
-
- if (IS_ERR(task))
- return PTR_ERR(task);
-
- atomic_inc(&kiblnd_data.kib_nthreads);
- return 0;
-}
-
static void
kiblnd_thread_fini (void)
{
}
for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
- char name[16];
spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
ksocknal_data.ksnd_connd_starting++;
spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
-
- snprintf(name, sizeof(name), "socknal_cd%02d", i);
rc = ksocknal_thread_start(ksocknal_connd,
- (void *)((uintptr_t)i), name);
+ (void *)((uintptr_t)i),
+ "socknal_cd%02d", i);
if (rc != 0) {
spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
ksocknal_data.ksnd_connd_starting--;
for (i = 0; i < nthrs; i++) {
long id;
- char name[20];
id = KSOCK_THREAD_ID(sched->kss_cpt, sched->kss_nthreads + i);
- snprintf(name, sizeof(name), "socknal_sd%02d_%02d",
- sched->kss_cpt, (int)KSOCK_THREAD_SID(id));
-
- rc = ksocknal_thread_start(ksocknal_scheduler,
- (void *)id, name);
+ rc = ksocknal_thread_start(ksocknal_scheduler, (void *)id,
+ "socknal_sd%02d_%02d",
+ sched->kss_cpt,
+ (int)KSOCK_THREAD_SID(id));
if (rc == 0)
continue;
extern void ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn);
extern void ksocknal_txlist_done(struct lnet_ni *ni, struct list_head *txlist,
int error);
-extern int ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name);
+#define ksocknal_thread_start(fn, data, namefmt, arg...) \
+ ({ \
+ struct task_struct *__task = kthread_run(fn, data, \
+ namefmt, ##arg); \
+ if (!IS_ERR(__task)) \
+ atomic_inc(&ksocknal_data.ksnd_nthreads); \
+ PTR_ERR_OR_ZERO(__task); \
+ })
+
extern void ksocknal_thread_fini(void);
extern void ksocknal_launch_all_connections_locked(struct ksock_peer_ni *peer_ni);
extern struct ksock_conn_cb *ksocknal_find_connectable_conn_cb_locked(struct ksock_peer_ni *peer_ni);
return (-EIO);
}
-int
-ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
-{
- struct task_struct *task = kthread_run(fn, arg, "%s", name);
-
- if (IS_ERR(task))
- return PTR_ERR(task);
-
- atomic_inc(&ksocknal_data.ksnd_nthreads);
- return 0;
-}
-
void
ksocknal_thread_fini (void)
{
static int
ksocknal_connd_check_start(time64_t sec, long *timeout)
{
- char name[16];
int rc;
int total = ksocknal_data.ksnd_connd_starting +
ksocknal_data.ksnd_connd_running;
spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
/* NB: total is the next id */
- snprintf(name, sizeof(name), "socknal_cd%02d", total);
- rc = ksocknal_thread_start(ksocknal_connd, NULL, name);
+ rc = ksocknal_thread_start(ksocknal_connd, NULL,
+ "socknal_cd%02d", total);
spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
if (rc == 0)