kernel_thread() is a low level kernel function.
Instead of calling kernel_thread() and relying on daemonize()
to create new kernel threads, we should really switch to
kthread_run() and elimilate daemonize().
All callers are converted except for those in
lnet/klnds/gnilnd/gnilnd*, as requested by James.
Signed-off-by: Peng Tao <tao.peng@emc.com>
Signed-off-by: James Simmons <uja.ornl@gmail.com>
Change-Id: I9f977bfc5d58192957d8c8d634204f4910edfa2a
Reviewed-on: http://review.whamcloud.com/5655
Tested-by: Hudson
Tested-by: Maloo <whamcloud.maloo@gmail.com>
Reviewed-by: Keith Mannthey <keith.mannthey@intel.com>
Reviewed-by: Nathaniel Clark <nathaniel.l.clark@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
])
])
-AC_DEFUN([LIBCFS_HAVE_OOM_H],
-[LB_CHECK_FILE([$LINUX/include/linux/oom.h], [
- AC_DEFINE(HAVE_LINUX_OOM_H, 1,
- [kernel has include/oom.h])
-],[
- AC_MSG_RESULT([no])
-])
-])
-
AC_DEFUN([LIBCFS_HAVE_KEYTYPE_H],
[LB_CHECK_FILE([$LINUX/include/linux/key-type.h], [
AC_DEFINE(HAVE_LINUX_KEYTYPE_H, 1,
])
])
-# 2.6.18 store oom parameters in task struct.
-# 2.6.32 store oom parameters in signal struct
-AC_DEFUN([LIBCFS_OOMADJ_IN_SIG],
-[AC_MSG_CHECKING([kernel store oom parameters in task])
-LB_LINUX_TRY_COMPILE([
- #include <linux/sched.h>
-],[
- ((struct signal_struct *)0)->oom_adj = 0;
-],[
- AC_MSG_RESULT(yes)
- AC_DEFINE(HAVE_OOMADJ_IN_SIG, 1,
- [kernel store a oom parameters in signal struct])
-],[
- AC_MSG_RESULT(no)
-])
-])
-
#
# 2.6.33 no longer has ctl_name & strategy field in struct ctl_table.
#
# 2.6.32
LIBCFS_STACKTRACE_OPS_HAVE_WALK_STACK
LC_SHRINKER_WANT_SHRINK_PTR
-LIBCFS_HAVE_OOM_H
-LIBCFS_OOMADJ_IN_SIG
# 2.6.33
LIBCFS_SYSCTL_CTLNAME
# 2.6.34
#define CLONE_SIGNAL (CLONE_SIGHAND | CLONE_THREAD)
-#define CFS_DAEMON_FLAGS (CLONE_VM | CLONE_FILES)
-
-extern int cfs_create_thread(cfs_thread_t func, void *arg, unsigned long flag);
-
+extern cfs_task_t kthread_run(cfs_thread_t func, void *arg,
+ const char namefmt[], ...);
/*
* Wait Queue implementation
/*
* Defined by platform
*/
-void cfs_daemonize(char *str);
-int cfs_daemonize_ctxt(char *str);
+int unshare_fs_struct(void);
cfs_sigset_t cfs_get_blocked_sigs(void);
cfs_sigset_t cfs_block_allsigs(void);
cfs_sigset_t cfs_block_sigs(unsigned long sigs);
int cfs_signal_pending(void);
void cfs_clear_sigpending(void);
-/*
- * XXX Liang:
- * these macros should be removed in the future,
- * we keep them just for keeping libcfs compatible
- * with other branches.
- */
-#define libcfs_daemonize(s) cfs_daemonize(s)
-#define cfs_sigmask_lock(f) do { f= 0; } while (0)
-#define cfs_sigmask_unlock(f) do { f= 0; } while (0)
-
int convert_server_error(__u64 ecode);
int convert_client_oflag(int cflag, int *result);
#define CFS_DECL_WAITQ(wq) DECLARE_WAIT_QUEUE_HEAD(wq)
-#define cfs_kthread_run(fn, data, fmt, arg...) kthread_run(fn, data, fmt, ##arg)
-
-/* Kernel thread */
-typedef int (*cfs_thread_t)(void *);
-
-#define CFS_DAEMON_FLAGS (CLONE_VM | CLONE_FILES)
-extern int cfs_create_thread(int (*fn)(void *),
- void *arg, unsigned long flags);
-
/*
* Task struct
*/
typedef unsigned long long cfs_cycles_t;
#define IS_ERR(a) ((unsigned long)(a) > (unsigned long)-1000L)
+#define IS_ERR_VALUE(a) (IS_ERR(a))
#define PTR_ERR(a) ((long)(a))
#define ERR_PTR(a) ((void*)((long)(a)))
#ifdef HAVE_LIBPTHREAD
typedef int (*cfs_thread_t)(void *);
-int cfs_create_thread(cfs_thread_t func, void *arg, unsigned long flags);
+void *kthread_run(cfs_thread_t func, void *arg, const char namefmt[], ...);
#else
-#define cfs_create_thread(l,m) LBUG()
+/* Fine, crash, but stop giving me compile warnings */
+#define kthread_run(f, a, n, ...) LBUG()
#endif
uid_t cfs_curproc_uid(void);
void * arg;
} cfs_thread_context_t;
-int cfs_create_thread(int (*func)(void *), void *arg, unsigned long flag);
-
/*
* thread creation flags from Linux, not used in winnt
*/
#define CLONE_SIGNAL (CLONE_SIGHAND | CLONE_THREAD)
-#define CFS_DAEMON_FLAGS (CLONE_VM|CLONE_FILES)
-
/*
* group_info: linux/sched.h
*/
libcfs-linux-objs := linux-tracefile.o linux-debug.o
libcfs-linux-objs += linux-prim.o linux-mem.o linux-cpu.o
libcfs-linux-objs += linux-fs.o linux-sync.o linux-tcpip.o
-libcfs-linux-objs += linux-lwt.o linux-proc.o linux-curproc.o
+libcfs-linux-objs += linux-proc.o linux-curproc.o
libcfs-linux-objs += linux-utils.o linux-module.o
libcfs-linux-objs += linux-crypto.o linux-crypto-crc32.o
libcfs-linux-objs += linux-crypto-adler.o
libcfs-linux-objs := $(addprefix linux/,$(libcfs-linux-objs))
-libcfs-all-objs := debug.o fail.o nidstrings.o lwt.o module.o tracefile.o \
+libcfs-all-objs := debug.o fail.o nidstrings.o module.o tracefile.o \
watchdog.o libcfs_string.o hash.o kernel_user_comm.o \
prng.o workitem.o upcall_cache.o libcfs_cpu.o \
libcfs_mem.o libcfs_lock.o heap.o
extern thread_t kernel_thread(task_t task, void (*start)(void));
-int
-cfs_create_thread(cfs_thread_t func, void *arg, unsigned long flag)
-{
- int ret = 0;
- thread_t th = NULL;
-
- thread_arg_hold(&cfs_thread_arg, func, arg);
- th = kernel_thread(kernel_task, cfs_thread_agent);
- thread_arg_release(&cfs_thread_arg);
- if (th == THREAD_NULL)
+cfs_task_t
+kthread_run(cfs_thread_t func, void *arg, const char namefmt[], ...)
+{
+ int ret = 0;
+ thread_t th = NULL;
+
+ thread_arg_hold(&cfs_thread_arg, func, arg);
+ th = kernel_thread(kernel_task, cfs_thread_agent);
+ thread_arg_release(&cfs_thread_arg);
+ if (th != THREAD_NULL) {
+ /*
+ * FIXME: change child thread name...
+ * cfs_curproc_comm() is already broken. So it is left as is...
+ va_list args;
+ va_start(args, namefmt);
+ snprintf(cfs_curproc_comm(), CFS_CURPROC_COMM_MAX,
+ namefmt, args);
+ va_end(args);
+ */
+ } else {
ret = -1;
- return ret;
-}
-
-void cfs_daemonize(char *str)
-{
- snprintf(cfs_curproc_comm(), CFS_CURPROC_COMM_MAX, "%s", str);
- return;
+ }
+ return (cfs_task_t)((long)ret);
}
/*
return 1;
else if (is_addr_in_range(addr, trap, syscall_trace))
return 1;
- else if (is_addr_in_range(addr, cfs_thread_agent, cfs_create_thread))
+ else if (is_addr_in_range(addr, cfs_thread_agent, kthread_run))
return 1;
else
return 0;
/* we're being careful to ensure that the kernel thread is
* able to set our state to running as it exits before we
* get to schedule() */
- cfs_waitlink_init(&wait);
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add(&debug_ctlwq, &wait);
-
- dumper = cfs_kthread_run(libcfs_debug_dumplog_thread,
- (void*)(long)cfs_curproc_pid(),
- "libcfs_debug_dumper");
- if (IS_ERR(dumper))
- printk(CFS_KERN_ERR "LustreError: cannot start log dump thread:"
- " %ld\n", PTR_ERR(dumper));
+ cfs_waitlink_init(&wait);
+ cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+ cfs_waitq_add(&debug_ctlwq, &wait);
+
+ dumper = kthread_run(libcfs_debug_dumplog_thread,
+ (void *)(long)cfs_curproc_pid(),
+ "libcfs_debug_dumper");
+ if (IS_ERR(dumper))
+ printk(CFS_KERN_ERR "LustreError: cannot start log dump thread:"
+ " %ld\n", PTR_ERR(dumper));
else
cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE);
-EXTRA_DIST = linux-debug.c linux-lwt.c linux-prim.c linux-tracefile.c \
+EXTRA_DIST = linux-debug.c linux-prim.c linux-tracefile.c \
linux-fs.c linux-mem.c linux-proc.c linux-utils.c linux-lock.c \
linux-module.c linux-sync.c linux-curproc.c linux-tcpip.c \
linux-cpu.c linux-crypto.c linux-crypto-crc32.c linux-crypto-adler.c \
+++ /dev/null
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-# define DEBUG_SUBSYSTEM S_LNET
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-
-#ifdef HAVE_LINUX_OOM_H
-#include <linux/oom.h>
-#else
-#include <linux/mm.h>
-#endif
-
-int oom_get_adj(struct task_struct *task, int scope)
-{
- int oom_adj;
-#ifdef HAVE_OOMADJ_IN_SIG
- unsigned long flags;
-
- spin_lock_irqsave(&task->sighand->siglock, flags);
- oom_adj = task->signal->oom_adj;
- task->signal->oom_adj = scope;
- spin_unlock_irqrestore(&task->sighand->siglock, flags);
-
-#else
- oom_adj = task->oomkilladj;
- task->oomkilladj = scope;
-#endif
- return oom_adj;
-}
-
-int cfs_create_thread(int (*fn)(void *),
- void *arg, unsigned long flags)
-{
- void *orig_info = current->journal_info;
- int rc;
- int old_oom;
-
- old_oom = oom_get_adj(current, OOM_DISABLE);
- current->journal_info = NULL;
- rc = kernel_thread(fn, arg, flags);
- current->journal_info = orig_info;
- oom_get_adj(current, old_oom);
-
- return rc;
-}
-EXPORT_SYMBOL(cfs_create_thread);
#endif
}
-void cfs_daemonize(char *str) {
- unsigned long flags;
-
- daemonize(str);
- SIGNAL_MASK_LOCK(current, flags);
- sigfillset(¤t->blocked);
- RECALC_SIGPENDING;
- SIGNAL_MASK_UNLOCK(current, flags);
-}
-
-int cfs_daemonize_ctxt(char *str) {
-
- cfs_daemonize(str);
-#ifndef HAVE_UNSHARE_FS_STRUCT
- {
- struct task_struct *tsk = current;
- struct fs_struct *fs = NULL;
- fs = copy_fs_struct(tsk->fs);
- if (fs == NULL)
- return -ENOMEM;
- exit_fs(tsk);
- tsk->fs = fs;
- }
-#else
- unshare_fs_struct();
-#endif
- return 0;
-}
-
sigset_t
cfs_block_allsigs(void)
{
EXPORT_SYMBOL(libcfs_arch_init);
EXPORT_SYMBOL(libcfs_arch_cleanup);
EXPORT_SYMBOL(cfs_enter_debugger);
-EXPORT_SYMBOL(cfs_daemonize);
-EXPORT_SYMBOL(cfs_daemonize_ctxt);
EXPORT_SYMBOL(cfs_block_allsigs);
EXPORT_SYMBOL(cfs_block_sigs);
EXPORT_SYMBOL(cfs_block_sigsinv);
/* we're started late enough that we pick up init's fs context */
/* this is so broken in uml? what on earth is going on? */
- cfs_daemonize("ktracefiled");
spin_lock_init(&pc.pc_lock);
complete(&tctl->tctl_start);
init_completion(&tctl->tctl_start);
init_completion(&tctl->tctl_stop);
- cfs_waitq_init(&tctl->tctl_waitq);
- cfs_atomic_set(&tctl->tctl_shutdown, 0);
+ cfs_waitq_init(&tctl->tctl_waitq);
+ cfs_atomic_set(&tctl->tctl_shutdown, 0);
- if (cfs_create_thread(tracefiled, tctl, 0) < 0) {
- rc = -ECHILD;
- goto out;
- }
+ if (IS_ERR(kthread_run(tracefiled, tctl, "ktracefiled"))) {
+ rc = -ECHILD;
+ goto out;
+ }
wait_for_completion(&tctl->tctl_start);
- thread_running = 1;
+ thread_running = 1;
out:
mutex_unlock(&cfs_trace_thread_mutex);
return rc;
return NULL;
}
-int cfs_create_thread(cfs_thread_t func, void *arg, unsigned long flags)
+void *kthread_run(cfs_thread_t func, void *arg, const char namefmt[], ...)
{
- pthread_t tid;
- pthread_attr_t tattr;
- int rc;
- struct lustre_thread_arg *targ_p = malloc(sizeof(struct lustre_thread_arg));
+ pthread_t tid;
+ pthread_attr_t tattr;
+ int rc;
+ struct lustre_thread_arg *targ_p =
+ malloc(sizeof(struct lustre_thread_arg));
- if ( targ_p == NULL )
- return -ENOMEM;
+ if (targ_p == NULL)
+ return ERR_PTR(-ENOMEM);
- targ_p->f = func;
- targ_p->arg = arg;
+ targ_p->f = func;
+ targ_p->arg = arg;
- pthread_attr_init(&tattr);
- pthread_attr_setdetachstate(&tattr, PTHREAD_CREATE_DETACHED);
- rc = pthread_create(&tid, &tattr, cfs_thread_helper, targ_p);
- pthread_attr_destroy(&tattr);
- return -rc;
+ pthread_attr_init(&tattr);
+ pthread_attr_setdetachstate(&tattr, PTHREAD_CREATE_DETACHED);
+ rc = pthread_create(&tid, &tattr, cfs_thread_helper, targ_p);
+ pthread_attr_destroy(&tattr);
+ return ERR_PTR(rc);
}
#endif
*/
}
-void cfs_daemonize(char *str)
+int unshare_fs_struct()
{
- return;
-}
-
-int cfs_daemonize_ctxt(char *str)
-{
- return 0;
+ return 0;
}
cfs_sigset_t cfs_block_allsigs(void)
static int lcw_dispatch_main(void *data)
{
int rc = 0;
- unsigned long flags;
struct lc_watchdog *lcw;
CFS_LIST_HEAD (zombies);
ENTRY;
- cfs_daemonize("lc_watchdogd");
-
- SIGNAL_MASK_LOCK(current, flags);
- sigfillset(¤t->blocked);
- RECALC_SIGPENDING;
- SIGNAL_MASK_UNLOCK(current, flags);
-
complete(&lcw_start_completion);
while (1) {
static void lcw_dispatch_start(void)
{
- int rc;
+ cfs_task_t *task;
ENTRY;
LASSERT(lcw_refcount == 1);
init_completion(&lcw_start_completion);
cfs_waitq_init(&lcw_event_waitq);
- CDEBUG(D_INFO, "starting dispatch thread\n");
- rc = cfs_create_thread(lcw_dispatch_main, NULL, 0);
- if (rc < 0) {
- CERROR("error spawning watchdog dispatch thread: %d\n", rc);
- EXIT;
- return;
- }
+ CDEBUG(D_INFO, "starting dispatch thread\n");
+ task = kthread_run(lcw_dispatch_main, NULL, "lc_watchdogd");
+ if (IS_ERR(task)) {
+ CERROR("error spawning watchdog dispatch thread: %ld\n",
+ PTR_ERR(task));
+ EXIT;
+ return;
+ }
wait_for_completion(&lcw_start_completion);
CDEBUG(D_INFO, "watchdog dispatcher initialization complete.\n");
}
/*
- * cfs_create_thread
+ * kthread_run
* Create a system thread to execute the routine specified
*
* Arguments:
* func: function to be executed in the thread
* arg: argument transferred to func function
- * flag: thread creation flags.
+ * name: thread name to create
*
* Return Value:
- * int: 0 on success or error codes
+ * cfs_task_t: 0 on success or error codes
*
* Notes:
* N/A
*/
-int cfs_create_thread(int (*func)(void *), void *arg, unsigned long flag)
+cfs_task_t kthread_run(int (*func)(void *), void *arg, char *name)
{
cfs_handle_t thread = NULL;
NTSTATUS status;
context = cfs_alloc(sizeof(cfs_thread_context_t), CFS_ALLOC_ZERO);
if (!context) {
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
context->func = func;
/* We need translate the nt status to linux error code */
- return cfs_error_code(status);
+ return ERR_PTR(cfs_error_code(status));
}
//
ZwClose(thread);
- return 0;
+ return (cfs_task_t)0;
}
return timer->deadline;
}
-/*
- * daemonize routine stub
- */
-
-void cfs_daemonize(char *str)
+int unshare_fs_struct()
{
- return;
-}
-
-int cfs_daemonize_ctxt(char *str) {
- cfs_daemonize(str);
- return 0;
+ return 0;
}
/*
}
for (i = 0; i < ks_data.ksnd_engine_nums; i++) {
spin_lock_init(&ks_data.ksnd_engine_mgr[i].lock);
- cfs_init_event(&ks_data.ksnd_engine_mgr[i].start, TRUE, FALSE);
- cfs_init_event(&ks_data.ksnd_engine_mgr[i].exit, TRUE, FALSE);
- CFS_INIT_LIST_HEAD(&ks_data.ksnd_engine_mgr[i].list);
- cfs_create_thread(KsDeliveryEngineThread, &ks_data.ksnd_engine_mgr[i], 0);
+ cfs_init_event(&ks_data.ksnd_engine_mgr[i].start, TRUE, FALSE);
+ cfs_init_event(&ks_data.ksnd_engine_mgr[i].exit, TRUE, FALSE);
+ CFS_INIT_LIST_HEAD(&ks_data.ksnd_engine_mgr[i].list);
+ kthread_run(KsDeliveryEngineThread, &ks_data.ksnd_engine_mgr[i], "");
}
/* register pnp handlers to watch network condition */
cfs_wi_scheduler (void *arg)
{
struct cfs_wi_sched *sched = (cfs_wi_sched_t *)arg;
- char name[16];
-
- if (sched->ws_cptab != NULL && sched->ws_cpt >= 0) {
- snprintf(name, sizeof(name), "%s_%02d_%02d",
- sched->ws_name, sched->ws_cpt, sched->ws_nthreads);
- } else {
- snprintf(name, sizeof(name), "%s_%02d",
- sched->ws_name, sched->ws_nthreads);
- }
- cfs_daemonize(name);
cfs_block_allsigs();
/* CPT affinity scheduler? */
rc = 0;
#ifdef __KERNEL__
while (nthrs > 0) {
+ char name[16];
+ cfs_task_t *task;
spin_lock(&cfs_wi_data.wi_glock);
while (sched->ws_starting > 0) {
spin_unlock(&cfs_wi_data.wi_glock);
sched->ws_starting++;
spin_unlock(&cfs_wi_data.wi_glock);
- rc = cfs_create_thread(cfs_wi_scheduler, sched, 0);
- if (rc >= 0) {
+ if (sched->ws_cptab != NULL && sched->ws_cpt >= 0) {
+ snprintf(name, sizeof(name), "%s_%02d_%02d",
+ sched->ws_name, sched->ws_cpt,
+ sched->ws_nthreads);
+ } else {
+ snprintf(name, sizeof(name), "%s_%02d",
+ sched->ws_name, sched->ws_nthreads);
+ }
+
+ task = kthread_run(cfs_wi_scheduler, sched, name);
+ if (!IS_ERR(task)) {
nthrs--;
continue;
}
+ rc = PTR_ERR(task);
CERROR("Failed to create thread for WI scheduler %s: %d\n",
name, rc);
* mxlnd_thread_start - spawn a kernel thread with this function
* @fn - function pointer
* @arg - pointer to the parameter data
+ * @name - name of new thread
*
* Returns 0 on success and a negative value on failure
*/
int
-mxlnd_thread_start(int (*fn)(void *arg), void *arg)
+mxlnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
{
- int pid = 0;
+ cfs_task *task;
int i = (int) ((long) arg);
cfs_atomic_inc(&kmxlnd_data.kmx_nthreads);
init_completion(&kmxlnd_data.kmx_completions[i]);
- pid = cfs_create_thread(fn, arg, 0);
- if (pid < 0) {
- CERROR("cfs_create_thread() failed with %d\n", pid);
- cfs_atomic_dec(&kmxlnd_data.kmx_nthreads);
- }
- return pid;
+ task = kthread_run(fn, arg, name);
+ if (IS_ERR(task)) {
+ CERROR("cfs_create_thread() failed with %d\n", PTR_ERR(task));
+ cfs_atomic_dec(&kmxlnd_data.kmx_nthreads);
+ }
+ return PTR_ERR(task);
}
/**
*kmxlnd_tunables.kmx_n_waitd == 1 ? "thread" : "threads");
for (i = 0; i < *kmxlnd_tunables.kmx_n_waitd; i++) {
+ char name[24];
+ memset(name, 0, sizeof(name));
+ snprintf(name, sizeof(name), "mxlnd_request_waitd_%02ld", i);
ret = mxlnd_thread_start(mxlnd_request_waitd, (void*)((long)i));
- if (ret < 0) {
- CERROR("Starting mxlnd_request_waitd[%d] failed with %d\n", i, ret);
+ if (ret < 0) {
+ CERROR("Starting mxlnd_request_waitd[%d] "
+ "failed with %d\n", i, ret);
cfs_atomic_set(&kmxlnd_data.kmx_shutdown, 1);
mx_wakeup(kmxlnd_data.kmx_endpt);
for (--i; i >= 0; i--) {
goto failed;
}
}
- ret = mxlnd_thread_start(mxlnd_tx_queued, (void*)((long)i++));
+ ret = mxlnd_thread_start(mxlnd_tx_queued, (void *)((long)i++),
+ "mxlnd_tx_queued");
if (ret < 0) {
CERROR("Starting mxlnd_tx_queued failed with %d\n", ret);
cfs_atomic_set(&kmxlnd_data.kmx_shutdown, 1);
nthreads * sizeof(struct completion));
goto failed;
}
- ret = mxlnd_thread_start(mxlnd_timeoutd, (void*)((long)i++));
+ ret = mxlnd_thread_start(mxlnd_timeoutd, (void *)((long)i++),
+ "mxlnd_timeoutd");
if (ret < 0) {
CERROR("Starting mxlnd_timeoutd failed with %d\n", ret);
cfs_atomic_set(&kmxlnd_data.kmx_shutdown, 1);
nthreads * sizeof(struct completion));
goto failed;
}
- ret = mxlnd_thread_start(mxlnd_connd, (void*)((long)i++));
+ ret = mxlnd_thread_start(mxlnd_connd, (void *)((long)i++),
+ "mxlnd_connd");
if (ret < 0) {
CERROR("Starting mxlnd_connd failed with %d\n", ret);
cfs_atomic_set(&kmxlnd_data.kmx_shutdown, 1);
spinlock_t *tx_q_lock = &kmxlnd_data.kmx_tx_queue_lock;
rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock;
- cfs_daemonize("mxlnd_tx_queued");
-
while (!(cfs_atomic_read(&kmxlnd_data.kmx_shutdown))) {
ret = down_interruptible(&kmxlnd_data.kmx_tx_queue_sem);
if (cfs_atomic_read(&kmxlnd_data.kmx_shutdown))
mxlnd_request_waitd(void *arg)
{
long id = (long) arg;
- char name[24];
__u32 result = 0;
mx_return_t mxret = MX_SUCCESS;
mx_status_t status;
int count = 0;
#endif
- memset(name, 0, sizeof(name));
- snprintf(name, sizeof(name), "mxlnd_request_waitd_%02ld", id);
- cfs_daemonize(name);
-
memset(&status, 0, sizeof(status));
CDEBUG(D_NET, "%s starting\n", name);
{
long id = (long) arg;
- cfs_daemonize("mxlnd_connd");
-
CDEBUG(D_NET, "connd starting\n");
while (!(cfs_atomic_read(&kmxlnd_data.kmx_shutdown))) {
kmx_conn_t *conn = NULL;
rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock;
- cfs_daemonize("mxlnd_timeoutd");
-
CDEBUG(D_NET, "timeoutd starting\n");
while (!(cfs_atomic_read(&kmxlnd_data.kmx_shutdown))) {
kiblnd_data.kib_init = IBLND_INIT_DATA;
/*****************************************************/
- rc = kiblnd_thread_start(kiblnd_connd, NULL);
+ rc = kiblnd_thread_start(kiblnd_connd, NULL, "kiblnd_connd");
if (rc != 0) {
CERROR("Can't spawn o2iblnd connd: %d\n", rc);
goto failed;
}
- if (*kiblnd_tunables.kib_dev_failover != 0)
- rc = kiblnd_thread_start(kiblnd_failover_thread, NULL);
+ if (*kiblnd_tunables.kib_dev_failover != 0)
+ rc = kiblnd_thread_start(kiblnd_failover_thread, NULL,
+ "kiblnd_failover");
if (rc != 0) {
CERROR("Can't spawn o2iblnd failover thread: %d\n", rc);
for (i = 0; i < nthrs; i++) {
long id;
-
+ char name[20];
id = KIB_THREAD_ID(sched->ibs_cpt, sched->ibs_nthreads + i);
- rc = kiblnd_thread_start(kiblnd_scheduler, (void *)id);
+ snprintf(name, sizeof(name), "kiblnd_sd_%02ld_%02ld",
+ KIB_THREAD_CPT(id), KIB_THREAD_TID(id));
+ rc = kiblnd_thread_start(kiblnd_scheduler, (void *)id, name);
if (rc == 0)
continue;
int kiblnd_connd (void *arg);
int kiblnd_scheduler(void *arg);
-int kiblnd_thread_start (int (*fn)(void *arg), void *arg);
+int kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name);
int kiblnd_failover_thread (void *arg);
int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages);
}
int
-kiblnd_thread_start (int (*fn)(void *arg), void *arg)
+kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
{
- long pid = cfs_create_thread (fn, arg, 0);
+ cfs_task_t *task = kthread_run(fn, arg, name);
- if (pid < 0)
- return ((int)pid);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
- cfs_atomic_inc (&kiblnd_data.kib_nthreads);
- return (0);
+ cfs_atomic_inc(&kiblnd_data.kib_nthreads);
+ return 0;
}
void
int peer_index = 0;
unsigned long deadline = jiffies;
- cfs_daemonize ("kiblnd_connd");
cfs_block_allsigs ();
cfs_waitlink_init (&wait);
cfs_waitlink_t wait;
unsigned long flags;
struct ib_wc wc;
- char name[20];
int did_something;
int busy_loops = 0;
int rc;
- snprintf(name, sizeof(name), "kiblnd_sd_%02ld_%02ld",
- KIB_THREAD_CPT(id), KIB_THREAD_TID(id));
-
- cfs_daemonize(name);
cfs_block_allsigs();
cfs_waitlink_init(&wait);
rc = cfs_cpt_bind(lnet_cpt_table(), sched->ibs_cpt);
if (rc != 0) {
- CWARN("Failed to bind %s on CPT %d, please verify whether "
+ CWARN("Failed to bind on CPT %d, please verify whether "
"all CPUs are healthy and reload modules if necessary, "
"otherwise your system might under risk of low "
- "performance\n", name, sched->ibs_cpt);
+ "performance\n", sched->ibs_cpt);
}
spin_lock_irqsave(&sched->ibs_lock, flags);
LASSERT (*kiblnd_tunables.kib_dev_failover != 0);
- cfs_daemonize ("kiblnd_failover");
cfs_block_allsigs ();
cfs_waitlink_init(&wait);
struct timeval tv;
lnet_process_id_t target;
ptl_err_t ptl_rc;
+ char name[16];
if (*kptllnd_tunables.kptl_max_procs_per_node < 1) {
CERROR("max_procs_per_node must be >= 1\n");
* now that PTLLND_INIT_DATA state has been entered */
CDEBUG(D_NET, "starting %d scheduler threads\n", PTLLND_N_SCHED);
for (i = 0; i < PTLLND_N_SCHED; i++) {
+ snprintf(name, sizeof(name), "kptllnd_sd_%02d", i);
rc = kptllnd_thread_start(kptllnd_scheduler, (void *)((long)i));
if (rc != 0) {
CERROR("Can't spawn scheduler[%d]: %d\n", i, rc);
}
}
- rc = kptllnd_thread_start(kptllnd_watchdog, NULL);
+ snprintf(name, sizeof(name), "kptllnd_wd_%02d", i);
+ rc = kptllnd_thread_start(kptllnd_watchdog, NULL, name);
if (rc != 0) {
CERROR("Can't spawn watchdog: %d\n", rc);
goto failed;
}
int
-kptllnd_thread_start (int (*fn)(void *arg), void *arg)
+kptllnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
{
- long pid;
+ cfs_task_t *task;
- cfs_atomic_inc(&kptllnd_data.kptl_nthreads);
+ cfs_atomic_inc(&kptllnd_data.kptl_nthreads);
- pid = cfs_create_thread (fn, arg, 0);
- if (pid >= 0)
- return 0;
-
- CERROR("Failed to start thread: error %d\n", (int)pid);
- kptllnd_thread_fini();
- return (int)pid;
+ task = kthread_run(fn, arg, name);
+ if (IS_ERR(task)) {
+ CERROR("Failed to start thread: error %ld\n", PTR_ERR(task));
+ kptllnd_thread_fini();
+ }
+ return PTR_ERR(task);
}
int
kptllnd_watchdog(void *arg)
{
int id = (long)arg;
- char name[16];
cfs_waitlink_t waitlink;
int stamp = 0;
int peer_index = 0;
int timeout;
int i;
- snprintf(name, sizeof(name), "kptllnd_wd_%02d", id);
- cfs_daemonize(name);
cfs_block_allsigs();
cfs_waitlink_init(&waitlink);
kptllnd_scheduler (void *arg)
{
int id = (long)arg;
- char name[16];
cfs_waitlink_t waitlink;
unsigned long flags;
int did_something;
kptl_rx_buffer_t *rxb;
kptl_tx_t *tx;
- snprintf(name, sizeof(name), "kptllnd_sd_%02d", id);
- cfs_daemonize(name);
cfs_block_allsigs();
cfs_waitlink_init(&waitlink);
/**********************************************************************/
/* Spawn scheduling threads */
for (i = 0; i < cfs_num_online_cpus(); i++) {
- rc = kqswnal_thread_start (kqswnal_scheduler, NULL);
+ rc = kqswnal_thread_start(kqswnal_scheduler, NULL,
+ "kqswnal_sched");
if (rc != 0)
{
CERROR ("failed to spawn scheduling thread: %d\n", rc);
}
int
-kqswnal_thread_start (int (*fn)(void *arg), void *arg)
+kqswnal_thread_start(int (*fn)(void *arg), void *arg, char *name)
{
- long pid = cfs_create_thread (fn, arg, 0);
+ cfs_task_t *task = cfs_thread_run(fn, arg, name);
- if (pid < 0)
- return ((int)pid);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
- cfs_atomic_inc (&kqswnal_data.kqn_nthreads);
- return (0);
+ cfs_atomic_inc(&kqswnal_data.kqn_nthreads);
+ return 0;
}
void
int counter = 0;
int did_something;
- cfs_daemonize ("kqswnal_sched");
cfs_block_allsigs ();
spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
int rc;
int i;
kra_device_t *dev;
+ char name[16];
LASSERT (ni->ni_lnd == &the_kralnd);
if (rc != 0)
goto failed;
- rc = kranal_thread_start(kranal_reaper, NULL);
+ rc = kranal_thread_start(kranal_reaper, NULL, "kranal_reaper");
if (rc != 0) {
CERROR("Can't spawn ranal reaper: %d\n", rc);
goto failed;
}
for (i = 0; i < *kranal_tunables.kra_n_connd; i++) {
- rc = kranal_thread_start(kranal_connd, (void *)(unsigned long)i);
+ snprintf(name, sizeof(name), "kranal_connd_%02ld", i);
+ rc = kranal_thread_start(kranal_connd,
+ (void *)(unsigned long)i, name);
if (rc != 0) {
CERROR("Can't spawn ranal connd[%d]: %d\n",
i, rc);
for (i = 0; i < kranal_data.kra_ndevs; i++) {
dev = &kranal_data.kra_devices[i];
- rc = kranal_thread_start(kranal_scheduler, dev);
+ snprintf(name, sizeof(name), "kranal_sd_%02d", dev->rad_idx);
+ rc = kranal_thread_start(kranal_scheduler, dev, name);
if (rc != 0) {
CERROR("Can't spawn ranal scheduler[%d]: %d\n",
i, rc);
extern void kranal_post_fma (kra_conn_t *conn, kra_tx_t *tx);
extern int kranal_del_peer (lnet_nid_t nid);
extern void kranal_device_callback (RAP_INT32 devid, RAP_PVOID arg);
-extern int kranal_thread_start (int(*fn)(void *arg), void *arg);
+extern int kranal_thread_start(int(*fn)(void *arg), void *arg, char *name);
extern int kranal_connd (void *arg);
extern int kranal_reaper (void *arg);
extern int kranal_scheduler (void *arg);
}
int
-kranal_thread_start (int(*fn)(void *arg), void *arg)
+kranal_thread_start(int(*fn)(void *arg), void *arg, char *name)
{
- long pid = cfs_create_thread(fn, arg, 0);
+ cfs_task_t *task = cfs_thread_run(fn, arg, name);
- if (pid < 0)
- return(int)pid;
-
- cfs_atomic_inc(&kranal_data.kra_nthreads);
- return 0;
+ if (!IS_ERR(task))
+ cfs_atomic_inc(&kranal_data.kra_nthreads);
+ return PTR_ERR(task);
}
void
kranal_connd (void *arg)
{
long id = (long)arg;
- char name[16];
cfs_waitlink_t wait;
unsigned long flags;
kra_peer_t *peer;
kra_acceptsock_t *ras;
int did_something;
- snprintf(name, sizeof(name), "kranal_connd_%02ld", id);
- cfs_daemonize(name);
cfs_block_allsigs();
cfs_waitlink_init(&wait);
long next_min_timeout = CFS_MAX_SCHEDULE_TIMEOUT;
long current_min_timeout = 1;
- cfs_daemonize("kranal_reaper");
cfs_block_allsigs();
cfs_waitlink_init(&wait);
{
kra_device_t *dev = (kra_device_t *)arg;
cfs_waitlink_t wait;
- char name[16];
kra_conn_t *conn;
unsigned long flags;
unsigned long deadline;
int dropped_lock;
int busy_loops = 0;
- snprintf(name, sizeof(name), "kranal_sd_%02d", dev->rad_idx);
- cfs_daemonize(name);
cfs_block_allsigs();
dev->rad_scheduler = current;
}
for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
+ char name[16];
spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
ksocknal_data.ksnd_connd_starting++;
spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
+
+ snprintf(name, sizeof(name), "socknal_cd%02d", i);
rc = ksocknal_thread_start(ksocknal_connd,
- (void *)((ulong_ptr_t)i));
+ (void *)((ulong_ptr_t)i), name);
if (rc != 0) {
spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
ksocknal_data.ksnd_connd_starting--;
}
}
- rc = ksocknal_thread_start (ksocknal_reaper, NULL);
+ rc = ksocknal_thread_start(ksocknal_reaper, NULL, "socknal_reaper");
if (rc != 0) {
CERROR ("Can't spawn socknal reaper: %d\n", rc);
goto failed;
}
for (i = 0; i < nthrs; i++) {
- long id;
-
+ long id;
+ char name[20];
+ ksock_sched_t *sched;
id = KSOCK_THREAD_ID(info->ksi_cpt, info->ksi_nthreads + i);
- rc = ksocknal_thread_start(ksocknal_scheduler, (void *)id);
+ sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
+ snprintf(name, sizeof(name), "socknal_sd%02d_%02d",
+ info->ksi_cpt, (int)(sched - &info->ksi_scheds[0]));
+
+ rc = ksocknal_thread_start(ksocknal_scheduler,
+ (void *)id, name);
if (rc == 0)
continue;
int error);
extern void ksocknal_notify (lnet_ni_t *ni, lnet_nid_t gw_nid, int alive);
extern void ksocknal_query (struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when);
-extern int ksocknal_thread_start (int (*fn)(void *arg), void *arg);
+extern int ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name);
extern void ksocknal_thread_fini (void);
extern void ksocknal_launch_all_connections_locked (ksock_peer_t *peer);
extern ksock_route_t *ksocknal_find_connectable_route_locked (ksock_peer_t *peer);
}
int
-ksocknal_thread_start (int (*fn)(void *arg), void *arg)
+ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
{
- long pid = cfs_create_thread (fn, arg, 0);
+ cfs_task_t *task = kthread_run(fn, arg, name);
- if (pid < 0)
- return ((int)pid);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
write_lock_bh(&ksocknal_data.ksnd_global_lock);
- ksocknal_data.ksnd_nthreads++;
+ ksocknal_data.ksnd_nthreads++;
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
- return (0);
+ return 0;
}
void
ksock_tx_t *tx;
int rc;
int nloops = 0;
- char name[20];
long id = (long)arg;
info = ksocknal_data.ksnd_sched_info[KSOCK_THREAD_CPT(id)];
sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
- snprintf(name, sizeof(name), "socknal_sd%02d_%02d",
- info->ksi_cpt, (int)(sched - &info->ksi_scheds[0]));
-
- cfs_daemonize(name);
cfs_block_allsigs();
rc = cfs_cpt_bind(lnet_cpt_table(), info->ksi_cpt);
if (rc != 0) {
- CERROR("Can't set CPT affinity for %s to %d: %d\n",
- name, info->ksi_cpt, rc);
+ CERROR("Can't set CPT affinity to %d: %d\n",
+ info->ksi_cpt, rc);
}
spin_lock_bh(&sched->kss_lock);
static int
ksocknal_connd_check_start(long sec, long *timeout)
{
+ char name[16];
int rc;
int total = ksocknal_data.ksnd_connd_starting +
ksocknal_data.ksnd_connd_running;
spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
/* NB: total is the next id */
- rc = ksocknal_thread_start(ksocknal_connd, (void *)((long)total));
+ snprintf(name, sizeof(name), "socknal_cd%02d", total);
+ rc = ksocknal_thread_start(ksocknal_connd, NULL, name);
spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
if (rc == 0)
ksocknal_connd (void *arg)
{
spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
- long id = (long)(long_ptr_t)arg;
- char name[16];
ksock_connreq_t *cr;
cfs_waitlink_t wait;
int nloops = 0;
int cons_retry = 0;
- snprintf (name, sizeof (name), "socknal_cd%02ld", id);
- cfs_daemonize (name);
cfs_block_allsigs ();
cfs_waitlink_init (&wait);
int peer_index = 0;
cfs_time_t deadline = cfs_time_current();
- cfs_daemonize ("socknal_reaper");
cfs_block_allsigs ();
CFS_INIT_LIST_HEAD(&enomem_conns);
int
lnet_acceptor(void *arg)
{
- char name[16];
cfs_socket_t *newsock;
int rc;
__u32 magic;
LASSERT (lnet_acceptor_state.pta_sock == NULL);
- snprintf(name, sizeof(name), "acceptor_%03d", accept_port);
- cfs_daemonize(name);
cfs_block_allsigs();
rc = libcfs_sock_listen(&lnet_acceptor_state.pta_sock,
if (lnet_count_acceptor_nis() == 0) /* not required */
return 0;
- rc2 = cfs_create_thread(lnet_acceptor, (void *)(ulong_ptr_t)secure, 0);
- if (rc2 < 0) {
- CERROR("Can't start acceptor thread: %d\n", rc);
+ rc2 = PTR_ERR(kthread_run(lnet_acceptor,
+ (void *)(ulong_ptr_t)secure,
+ "acceptor_%03ld", secure));
+ if (IS_ERR_VALUE(rc2)) {
+ CERROR("Can't start acceptor thread: %ld\n", rc2);
fini_completion(&lnet_acceptor_state.pta_signal);
return -ESRCH;
rc = libcfs_register_ioctl(&lnet_ioctl_handler);
LASSERT (rc == 0);
- if (config_on_load) {
- /* Have to schedule a separate thread to avoid deadlocking
- * in modload */
- (void) cfs_create_thread(lnet_configure, NULL, 0);
- }
+ if (config_on_load) {
+ /* Have to schedule a separate thread to avoid deadlocking
+ * in modload */
+ (void) kthread_run(lnet_configure, NULL, "lnet_initd");
+ }
RETURN(0);
}
the_lnet.ln_rc_state = LNET_RC_STATE_RUNNING;
#ifdef __KERNEL__
- rc = cfs_create_thread(lnet_router_checker, NULL, 0);
- if (rc < 0) {
- CERROR("Can't start router checker thread: %d\n", rc);
- /* block until event callback signals exit */
+ rc = PTR_ERR(kthread_run(lnet_router_checker,
+ NULL, "router_checker"));
+ if (IS_ERR_VALUE(rc)) {
+ CERROR("Can't start router checker thread: %d\n", rc);
+ /* block until event callback signals exit */
down(&the_lnet.ln_rc_signal);
- rc = LNetEQFree(the_lnet.ln_rc_eqh);
- LASSERT (rc == 0);
- the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
- return -ENOMEM;
- }
+ rc = LNetEQFree(the_lnet.ln_rc_eqh);
+ LASSERT(rc == 0);
+ the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
+ return -ENOMEM;
+ }
#endif
if (check_routers_before_use) {
lnet_peer_t *rtr;
cfs_list_t *entry;
- cfs_daemonize("router_checker");
cfs_block_allsigs();
LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
SET_BUT_UNUSED(rc);
- cfs_daemonize("st_timer");
cfs_block_allsigs();
while (!stt_data.stt_shuttingdown) {
int
stt_start_timer_thread (void)
{
- long pid;
+ cfs_task_t *task;
- LASSERT (!stt_data.stt_shuttingdown);
+ LASSERT(!stt_data.stt_shuttingdown);
- pid = cfs_create_thread(stt_timer_main, NULL, 0);
- if (pid < 0)
- return (int)pid;
+ task = kthread_run(stt_timer_main, NULL, "st_timer");
+ if (IS_ERR(task))
+ return PTR_ERR(task);
spin_lock(&stt_data.stt_lock);
stt_data.stt_nthreads++;
pthread_rwlock_init(&usock_data.ud_peers_lock, NULL);
/* Spawn poll threads */
- for (i = 0; i < usock_data.ud_npollthreads; i++) {
- rc = cfs_create_thread(usocklnd_poll_thread,
- &usock_data.ud_pollthreads[i], 0);
- if (rc) {
- usocklnd_base_shutdown(i);
- return rc;
- }
- }
+ for (i = 0; i < usock_data.ud_npollthreads; i++) {
+ rc = PTR_ERR(kthread_run(usocklnd_poll_thread,
+ &usock_data.ud_pollthreads[i],
+ ""));
+ if (IS_ERR_VALUE(rc)) {
+ usocklnd_base_shutdown(i);
+ return rc;
+ }
+ }
usock_data.ud_state = UD_STATE_INITIALIZED;
*/
struct ptlrpc_request_set *pc_set;
/**
- * Thread name used in cfs_daemonize()
+ * Thread name used in kthread_run()
*/
char pc_name[16];
/**
struct ptlrpc_request *req;
struct target_recovery_data *trd = &obd->obd_recovery_data;
unsigned long delta;
- unsigned long flags;
struct lu_env *env;
struct ptlrpc_thread *thread = NULL;
int rc = 0;
ENTRY;
- cfs_daemonize_ctxt("tgt_recov");
-
- SIGNAL_MASK_LOCK(current, flags);
- sigfillset(¤t->blocked);
- RECALC_SIGPENDING;
- SIGNAL_MASK_UNLOCK(current, flags);
-
+ unshare_fs_struct();
OBD_ALLOC_PTR(thread);
if (thread == NULL)
RETURN(-ENOMEM);
static int target_start_recovery_thread(struct lu_target *lut,
svc_handler_t handler)
{
- struct obd_device *obd = lut->lut_obd;
- int rc = 0;
- struct target_recovery_data *trd = &obd->obd_recovery_data;
+ struct obd_device *obd = lut->lut_obd;
+ int rc = 0;
+ struct target_recovery_data *trd = &obd->obd_recovery_data;
- memset(trd, 0, sizeof(*trd));
+ memset(trd, 0, sizeof(*trd));
init_completion(&trd->trd_starting);
init_completion(&trd->trd_finishing);
- trd->trd_recovery_handler = handler;
+ trd->trd_recovery_handler = handler;
- if (cfs_create_thread(target_recovery_thread, lut, 0) > 0) {
+ if (!IS_ERR(kthread_run(target_recovery_thread,
+ lut, "tgt_recov"))) {
wait_for_completion(&trd->trd_starting);
- LASSERT(obd->obd_recovering != 0);
- } else
- rc = -ECHILD;
+ LASSERT(obd->obd_recovering != 0);
+ } else {
+ rc = -ECHILD;
+ }
- return rc;
+ return rc;
}
void target_stop_recovery_thread(struct obd_device *obd)
int do_dump;
ENTRY;
- cfs_daemonize("ldlm_elt");
expired_lock_thread.elt_state = ELT_READY;
cfs_waitq_signal(&expired_lock_thread.elt_waitq);
static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp)
{
struct ldlm_bl_thread_data bltd = { .bltd_blp = blp };
- int rc;
+ cfs_task_t *task;
init_completion(&bltd.bltd_comp);
- rc = cfs_create_thread(ldlm_bl_thread_main, &bltd, 0);
- if (rc < 0) {
- CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %d\n",
- cfs_atomic_read(&blp->blp_num_threads), rc);
- return rc;
+ bltd.bltd_num = cfs_atomic_read(&blp->blp_num_threads);
+ snprintf(bltd.bltd_name, sizeof(bltd.bltd_name) - 1,
+ "ldlm_bl_%02d", bltd.bltd_num);
+ task = kthread_run(ldlm_bl_thread_main, &bltd, bltd.bltd_name);
+ if (IS_ERR(task)) {
+ CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %ld\n",
+ cfs_atomic_read(&blp->blp_num_threads), PTR_ERR(task));
+ return PTR_ERR(task);
}
wait_for_completion(&bltd.bltd_comp);
blp = bltd->bltd_blp;
- bltd->bltd_num =
- cfs_atomic_inc_return(&blp->blp_num_threads) - 1;
+ cfs_atomic_inc(&blp->blp_num_threads);
cfs_atomic_inc(&blp->blp_busy_threads);
- snprintf(bltd->bltd_name, sizeof(bltd->bltd_name) - 1,
- "ldlm_bl_%02d", bltd->bltd_num);
- cfs_daemonize(bltd->bltd_name);
-
complete(&bltd->bltd_comp);
/* cannot use bltd after this, it is only on caller's stack */
}
}
# ifdef HAVE_SERVER_SUPPORT
- CFS_INIT_LIST_HEAD(&expired_lock_thread.elt_expired_locks);
- expired_lock_thread.elt_state = ELT_STOPPED;
- cfs_waitq_init(&expired_lock_thread.elt_waitq);
+ CFS_INIT_LIST_HEAD(&expired_lock_thread.elt_expired_locks);
+ expired_lock_thread.elt_state = ELT_STOPPED;
+ cfs_waitq_init(&expired_lock_thread.elt_waitq);
- CFS_INIT_LIST_HEAD(&waiting_locks_list);
+ CFS_INIT_LIST_HEAD(&waiting_locks_list);
spin_lock_init(&waiting_locks_spinlock);
- cfs_timer_init(&waiting_locks_timer, waiting_locks_callback, 0);
+ cfs_timer_init(&waiting_locks_timer, waiting_locks_callback, 0);
- rc = cfs_create_thread(expired_lock_main, NULL, CFS_DAEMON_FLAGS);
- if (rc < 0) {
+ rc = PTR_ERR(kthread_run(expired_lock_main, NULL, "ldlm_elt"));
+ if (IS_ERR_VALUE(rc)) {
CERROR("Cannot start ldlm expired-lock thread: %d\n", rc);
GOTO(out, rc);
}
- cfs_wait_event(expired_lock_thread.elt_waitq,
- expired_lock_thread.elt_state == ELT_READY);
+ cfs_wait_event(expired_lock_thread.elt_waitq,
+ expired_lock_thread.elt_state == ELT_READY);
# endif /* HAVE_SERVER_SUPPORT */
rc = ldlm_pools_init();
static int ldlm_pools_thread_main(void *arg)
{
struct ptlrpc_thread *thread = (struct ptlrpc_thread *)arg;
- char *t_name = "ldlm_poold";
int s_time, c_time;
ENTRY;
- cfs_daemonize(t_name);
thread_set_flags(thread, SVC_RUNNING);
cfs_waitq_signal(&thread->t_ctl_waitq);
CDEBUG(D_DLMTRACE, "%s: pool thread starting, process %d\n",
- t_name, cfs_curproc_pid());
+ "ldlm_poold", cfs_curproc_pid());
while (1) {
struct l_wait_info lwi;
cfs_waitq_signal(&thread->t_ctl_waitq);
CDEBUG(D_DLMTRACE, "%s: pool thread exiting, process %d\n",
- t_name, cfs_curproc_pid());
+ "ldlm_poold", cfs_curproc_pid());
complete_and_exit(&ldlm_pools_comp, 0);
}
static int ldlm_pools_thread_start(void)
{
- struct l_wait_info lwi = { 0 };
- int rc;
- ENTRY;
+ struct l_wait_info lwi = { 0 };
+ cfs_task_t *task;
+ ENTRY;
- if (ldlm_pools_thread != NULL)
- RETURN(-EALREADY);
+ if (ldlm_pools_thread != NULL)
+ RETURN(-EALREADY);
- OBD_ALLOC_PTR(ldlm_pools_thread);
- if (ldlm_pools_thread == NULL)
- RETURN(-ENOMEM);
+ OBD_ALLOC_PTR(ldlm_pools_thread);
+ if (ldlm_pools_thread == NULL)
+ RETURN(-ENOMEM);
init_completion(&ldlm_pools_comp);
- cfs_waitq_init(&ldlm_pools_thread->t_ctl_waitq);
-
- /*
- * CLONE_VM and CLONE_FILES just avoid a needless copy, because we
- * just drop the VM and FILES in cfs_daemonize() right away.
- */
- rc = cfs_create_thread(ldlm_pools_thread_main, ldlm_pools_thread,
- CFS_DAEMON_FLAGS);
- if (rc < 0) {
- CERROR("Can't start pool thread, error %d\n",
- rc);
- OBD_FREE(ldlm_pools_thread, sizeof(*ldlm_pools_thread));
- ldlm_pools_thread = NULL;
- RETURN(rc);
- }
- l_wait_event(ldlm_pools_thread->t_ctl_waitq,
- thread_is_running(ldlm_pools_thread), &lwi);
- RETURN(0);
+ cfs_waitq_init(&ldlm_pools_thread->t_ctl_waitq);
+
+ task = kthread_run(ldlm_pools_thread_main, ldlm_pools_thread,
+ "ldlm_poold");
+ if (IS_ERR(task)) {
+ CERROR("Can't start pool thread, error %ld\n", PTR_ERR(task));
+ OBD_FREE(ldlm_pools_thread, sizeof(*ldlm_pools_thread));
+ ldlm_pools_thread = NULL;
+ RETURN(PTR_ERR(task));
+ }
+ l_wait_event(ldlm_pools_thread->t_ctl_waitq,
+ thread_is_running(ldlm_pools_thread), &lwi);
+ RETURN(0);
}
static void ldlm_pools_thread_stop(void)
int rc;
ENTRY;
- cfs_daemonize("lfsck_master");
rc = lu_env_init(&env, LCT_MD_THREAD | LCT_DT_THREAD);
if (rc != 0) {
CERROR("%s: LFSCK, fail to init env, rc = %d\n",
lfsck->li_args_oit = (flags << DT_OTABLE_IT_FLAGS_SHIFT) | valid;
thread_set_flags(thread, 0);
if (lfsck->li_master)
- rc = cfs_create_thread(lfsck_master_engine, lfsck, 0);
+ rc = PTR_ERR(kthread_run(lfsck_master_engine, lfsck, "lfsck"));
if (rc < 0)
CERROR("%s: cannot start LFSCK thread, rc = %d\n",
lfsck_lfsck2name(lfsck), rc);
int rc;
ENTRY;
- cfs_daemonize("ll_capa");
-
thread_set_flags(&ll_capa_thread, SVC_RUNNING);
cfs_waitq_signal(&ll_capa_thread.t_ctl_waitq);
int ll_capa_thread_start(void)
{
- int rc;
- ENTRY;
+ cfs_task_t *task;
+ ENTRY;
- cfs_waitq_init(&ll_capa_thread.t_ctl_waitq);
+ cfs_waitq_init(&ll_capa_thread.t_ctl_waitq);
- rc = cfs_create_thread(capa_thread_main, NULL, 0);
- if (rc < 0) {
- CERROR("cannot start expired capa thread: rc %d\n", rc);
- RETURN(rc);
- }
- cfs_wait_event(ll_capa_thread.t_ctl_waitq,
- thread_is_running(&ll_capa_thread));
+ task = kthread_run(capa_thread_main, NULL, "ll_capa");
+ if (IS_ERR(task)) {
+ CERROR("cannot start expired capa thread: rc %ld\n",
+ PTR_ERR(task));
+ RETURN(PTR_ERR(task));
+ }
+ cfs_wait_event(ll_capa_thread.t_ctl_waitq,
+ thread_is_running(&ll_capa_thread));
- RETURN(0);
+ RETURN(0);
}
void ll_capa_thread_stop(void)
struct ll_close_queue *lcq = arg;
ENTRY;
- {
- char name[CFS_CURPROC_COMM_MAX];
- snprintf(name, sizeof(name) - 1, "ll_close");
- cfs_daemonize(name);
- }
-
complete(&lcq->lcq_comp);
while (1) {
int ll_close_thread_start(struct ll_close_queue **lcq_ret)
{
- struct ll_close_queue *lcq;
- pid_t pid;
+ struct ll_close_queue *lcq;
+ cfs_task_t *task;
- if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CLOSE_THREAD))
- return -EINTR;
+ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CLOSE_THREAD))
+ return -EINTR;
- OBD_ALLOC(lcq, sizeof(*lcq));
- if (lcq == NULL)
- return -ENOMEM;
+ OBD_ALLOC(lcq, sizeof(*lcq));
+ if (lcq == NULL)
+ return -ENOMEM;
spin_lock_init(&lcq->lcq_lock);
CFS_INIT_LIST_HEAD(&lcq->lcq_head);
cfs_waitq_init(&lcq->lcq_waitq);
init_completion(&lcq->lcq_comp);
- pid = cfs_create_thread(ll_close_thread, lcq, 0);
- if (pid < 0) {
+ task = kthread_run(ll_close_thread, lcq, "ll_close");
+ if (IS_ERR(task)) {
OBD_FREE(lcq, sizeof(*lcq));
- return pid;
+ return PTR_ERR(task);
}
wait_for_completion(&lcq->lcq_comp);
int refcheck;
int ret = 0;
- daemonize("lloop%d", lo->lo_number);
-
set_user_nice(current, -20);
lo->lo_state = LLOOP_BOUND;
set_capacity(disks[lo->lo_number], size);
bd_set_size(bdev, size << 9);
- set_blocksize(bdev, lo->lo_blocksize);
+ set_blocksize(bdev, lo->lo_blocksize);
- cfs_create_thread(loop_thread, lo, CLONE_KERNEL);
+ kthread_run(loop_thread, lo, "lloop%d", lo->lo_number);
down(&lo->lo_sem);
- return 0;
+ return 0;
- out:
+out:
/* This is safe: open() is still holding a reference. */
cfs_module_put(THIS_MODULE);
return error;
struct l_wait_info lwi = { 0 };
ENTRY;
- {
- char pname[16];
- snprintf(pname, 15, "ll_agl_%u", plli->lli_opendir_pid);
- cfs_daemonize(pname);
- }
-
CDEBUG(D_READA, "agl thread started: [pid %d] [parent %.*s]\n",
cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
{
- struct ptlrpc_thread *thread = &sai->sai_agl_thread;
- struct l_wait_info lwi = { 0 };
- int rc;
- ENTRY;
+ struct ptlrpc_thread *thread = &sai->sai_agl_thread;
+ struct l_wait_info lwi = { 0 };
+ struct ll_inode_info *plli;
+ cfs_task_t *task;
+ ENTRY;
- CDEBUG(D_READA, "start agl thread: [pid %d] [parent %.*s]\n",
- cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
+ CDEBUG(D_READA, "start agl thread: [pid %d] [parent %.*s]\n",
+ cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
- rc = cfs_create_thread(ll_agl_thread, parent, 0);
- if (rc < 0) {
- CERROR("can't start ll_agl thread, rc: %d\n", rc);
- thread_set_flags(thread, SVC_STOPPED);
- RETURN_EXIT;
- }
+ plli = ll_i2info(parent->d_inode);
+ task = kthread_run(ll_agl_thread, parent,
+ "ll_agl_%u", plli->lli_opendir_pid);
+ if (IS_ERR(task)) {
+ CERROR("can't start ll_agl thread, rc: %ld\n", PTR_ERR(task));
+ thread_set_flags(thread, SVC_STOPPED);
+ RETURN_EXIT;
+ }
- l_wait_event(thread->t_ctl_waitq,
- thread_is_running(thread) || thread_is_stopped(thread),
- &lwi);
- EXIT;
+ l_wait_event(thread->t_ctl_waitq,
+ thread_is_running(thread) || thread_is_stopped(thread),
+ &lwi);
+ EXIT;
}
static int ll_statahead_thread(void *arg)
struct l_wait_info lwi = { 0 };
ENTRY;
- {
- char pname[16];
- snprintf(pname, 15, "ll_sa_%u", plli->lli_opendir_pid);
- cfs_daemonize(pname);
- }
-
CDEBUG(D_READA, "statahead thread started: [pid %d] [parent %.*s]\n",
cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
struct ptlrpc_thread *thread;
struct l_wait_info lwi = { 0 };
int rc = 0;
+ struct ll_inode_info *plli;
ENTRY;
LASSERT(lli->lli_opendir_pid == cfs_curproc_pid());
cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
lli->lli_sai = sai;
- rc = cfs_create_thread(ll_statahead_thread, parent, 0);
- thread = &sai->sai_thread;
- if (rc < 0) {
- CERROR("can't start ll_sa thread, rc: %d\n", rc);
- dput(parent);
+
+ plli = ll_i2info(parent->d_inode);
+ rc = PTR_ERR(kthread_run(ll_statahead_thread, parent,
+ "ll_sa_%u", plli->lli_opendir_pid));
+ thread = &sai->sai_thread;
+ if (IS_ERR_VALUE(rc)) {
+ CERROR("can't start ll_sa thread, rc: %d\n", rc);
+ dput(parent);
lli->lli_opendir_key = NULL;
thread_set_flags(thread, SVC_STOPPED);
thread_set_flags(&sai->sai_agl_thread, SVC_STOPPED);
CDEBUG(D_CHANGELOG, "changelog to fp=%p start "LPU64"\n",
cs->cs_fp, cs->cs_startrec);
- /*
- * It's important to daemonize here to close unused FDs.
- * The write fd from pipe is already opened by the caller,
- * so it's fine to clear all files here
- */
- cfs_daemonize("mdc_clg_send_thread");
-
OBD_ALLOC(cs->cs_buf, CR_MAXSIZE);
if (cs->cs_buf == NULL)
GOTO(out, rc = -ENOMEM);
llog_cat_close(NULL, llh);
if (ctxt)
llog_ctxt_put(ctxt);
- if (cs->cs_buf)
- OBD_FREE(cs->cs_buf, CR_MAXSIZE);
- OBD_FREE_PTR(cs);
- /* detach from parent process so we get cleaned up */
- cfs_daemonize("cl_send");
- return rc;
+ if (cs->cs_buf)
+ OBD_FREE(cs->cs_buf, CR_MAXSIZE);
+ OBD_FREE_PTR(cs);
+ return rc;
}
static int mdc_ioc_changelog_send(struct obd_device *obd,
cs->cs_fp = fget(icc->icc_id);
cs->cs_flags = icc->icc_flags;
- /* New thread because we should return to user app before
- writing into our pipe */
- rc = cfs_create_thread(mdc_changelog_send_thread, cs, CFS_DAEMON_FLAGS);
- if (rc >= 0) {
- CDEBUG(D_CHANGELOG, "start changelog thread: %d\n", rc);
- return 0;
- }
+ /*
+ * New thread because we should return to user app before
+ * writing into our pipe
+ */
+ rc = PTR_ERR(kthread_run(mdc_changelog_send_thread, cs,
+ "mdc_clg_send_thread"));
+ if (!IS_ERR_VALUE(rc)) {
+ CDEBUG(D_CHANGELOG, "start changelog thread\n");
+ return 0;
+ }
CERROR("Failed to start changelog thread: %d\n", rc);
OBD_FREE_PTR(cs);
int rc;
ENTRY;
- cfs_daemonize_ctxt("mdt_ck");
+ unshare_fs_struct();
cfs_block_allsigs();
thread_set_flags(thread, SVC_RUNNING);
int mdt_ck_thread_start(struct mdt_device *mdt)
{
- struct ptlrpc_thread *thread = &mdt->mdt_ck_thread;
- int rc;
-
- cfs_waitq_init(&thread->t_ctl_waitq);
- rc = cfs_create_thread(mdt_ck_thread_main, mdt, CFS_DAEMON_FLAGS);
- if (rc < 0) {
- CERROR("cannot start mdt_ck thread, rc = %d\n", rc);
- return rc;
- }
-
- l_wait_condition(thread->t_ctl_waitq, thread_is_running(thread));
- return 0;
+ struct ptlrpc_thread *thread = &mdt->mdt_ck_thread;
+ cfs_task_t *task;
+
+ cfs_waitq_init(&thread->t_ctl_waitq);
+ task = kthread_run(mdt_ck_thread_main, mdt, "mdt_ck");
+ if (IS_ERR(task)) {
+ CERROR("cannot start mdt_ck thread, rc = %ld\n", PTR_ERR(task));
+ return PTR_ERR(task);
+ }
+
+ l_wait_condition(thread->t_ctl_waitq, thread_is_running(thread));
+ return 0;
}
void mdt_ck_thread_stop(struct mdt_device *mdt)
static int mgc_requeue_thread(void *data)
{
- char name[] = "ll_cfg_requeue";
int rc = 0;
ENTRY;
- cfs_daemonize(name);
-
CDEBUG(D_MGC, "Starting requeue thread\n");
/* Keep trying failed locks periodically */
sptlrpc_lprocfs_cliobd_attach(obd);
if (cfs_atomic_inc_return(&mgc_count) == 1) {
- rq_state = 0;
- cfs_waitq_init(&rq_waitq);
-
- /* start requeue thread */
- rc = cfs_create_thread(mgc_requeue_thread, NULL,
- CFS_DAEMON_FLAGS);
- if (rc < 0) {
- CERROR("%s: Cannot start requeue thread (%d),"
- "no more log updates!\n",
- obd->obd_name, rc);
- GOTO(err_cleanup, rc);
- }
- /* rc is the pid of mgc_requeue_thread. */
- rc = 0;
+ rq_state = 0;
+ cfs_waitq_init(&rq_waitq);
+
+ /* start requeue thread */
+ rc = PTR_ERR(kthread_run(mgc_requeue_thread, NULL,
+ "ll_cfg_requeue"));
+ if (IS_ERR_VALUE(rc)) {
+ CERROR("%s: Cannot start requeue thread (%d),"
+ "no more log updates!\n",
+ obd->obd_name, rc);
+ GOTO(err_cleanup, rc);
+ }
+ /* rc is the task_struct pointer of mgc_requeue_thread. */
+ rc = 0;
}
RETURN(rc);
LASSERTF(sizeof(name) < 32, "name is too large to be in stack.\n");
sprintf(name, "mgs_%s_notify", fsdb->fsdb_name);
- cfs_daemonize(name);
complete(&fsdb->fsdb_notify_comp);
int mgs_ir_init_fs(const struct lu_env *env, struct mgs_device *mgs,
struct fs_db *fsdb)
{
- int rc;
+ cfs_task_t *task;
if (!ir_timeout)
ir_timeout = OBD_IR_MGS_TIMEOUT;
cfs_atomic_set(&fsdb->fsdb_notify_phase, 0);
cfs_waitq_init(&fsdb->fsdb_notify_waitq);
init_completion(&fsdb->fsdb_notify_comp);
- rc = cfs_create_thread(mgs_ir_notify, fsdb, CFS_DAEMON_FLAGS);
- if (rc > 0)
+
+ task = kthread_run(mgs_ir_notify, fsdb,
+ "mgs_%s_notify", fsdb->fsdb_name);
+ if (!IS_ERR(task))
wait_for_completion(&fsdb->fsdb_notify_comp);
- else
- CERROR("Start notify thread error %d\n", rc);
+ else
+ CERROR("Start notify thread error %ld\n", PTR_ERR(task));
mgs_nidtbl_init_fs(env, fsdb);
return 0;
*/
static int obd_zombie_impexp_thread(void *unused)
{
- int rc;
-
- rc = cfs_daemonize_ctxt("obd_zombid");
- if (rc != 0) {
- complete(&obd_zombie_start);
- RETURN(rc);
- }
-
+ unshare_fs_struct();
complete(&obd_zombie_start);
obd_zombie_pid = cfs_curproc_pid();
*/
int obd_zombie_impexp_init(void)
{
- int rc;
+#ifdef __KERNEL__
+ cfs_task_t *task;
+#endif
CFS_INIT_LIST_HEAD(&obd_zombie_imports);
CFS_INIT_LIST_HEAD(&obd_zombie_exports);
obd_zombie_pid = 0;
#ifdef __KERNEL__
- rc = cfs_create_thread(obd_zombie_impexp_thread, NULL, 0);
- if (rc < 0)
- RETURN(rc);
+ task = kthread_run(obd_zombie_impexp_thread, NULL, "obd_zombid");
+ if (IS_ERR(task))
+ RETURN(PTR_ERR(task));
wait_for_completion(&obd_zombie_start);
#else
obd_zombie_impexp_idle_cb =
liblustre_register_idle_callback("obd_zombi_impexp_check",
&obd_zombie_impexp_check, NULL);
- rc = 0;
#endif
- RETURN(rc);
+ RETURN(0);
}
/**
* stop destroy zombie import/export thread
struct lu_env env;
int rc;
- cfs_daemonize_ctxt("llog_process_thread");
+ unshare_fs_struct();
/* client env has no keys, tags is just 0 */
rc = lu_env_init(&env, LCT_LOCAL | LCT_MG_THREAD);
* init the new one in llog_process_thread_daemonize. */
lpi->lpi_env = NULL;
init_completion(&lpi->lpi_completion);
- rc = cfs_create_thread(llog_process_thread_daemonize, lpi,
- CFS_DAEMON_FLAGS);
- if (rc < 0) {
+ rc = PTR_ERR(kthread_run(llog_process_thread_daemonize, lpi,
+ "llog_process_thread"));
+ if (IS_ERR_VALUE(rc)) {
CERROR("%s: cannot start thread: rc = %d\n",
loghandle->lgh_ctxt->loc_obd->obd_name, rc);
OBD_FREE_PTR(lpi);
int rc;
ENTRY;
- cfs_daemonize("OI_scrub");
rc = lu_env_init(&env, LCT_DT_THREAD);
if (rc != 0) {
CERROR("%.16s: OI scrub, fail to init env, rc = %d\n",
scrub->os_start_flags = flags;
thread_set_flags(thread, 0);
- rc = cfs_create_thread(osd_scrub_main, dev, 0);
- if (rc < 0) {
+ rc = PTR_ERR(kthread_run(osd_scrub_main, dev, "OI_scrub"));
+ if (IS_ERR_VALUE(rc)) {
CERROR("%.16s: cannot start iteration thread, rc = %d\n",
LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name, rc);
RETURN(rc);
struct osp_device *d = _arg;
struct ptlrpc_thread *thread = &d->opd_pre_thread;
struct l_wait_info lwi = { 0 };
- char pname[16];
struct lu_env env;
int rc;
ENTRY;
- sprintf(pname, "osp-pre-%u", d->opd_index);
- cfs_daemonize(pname);
-
rc = lu_env_init(&env, d->opd_dt_dev.dd_lu_dev.ld_type->ldt_ctx_tags);
if (rc) {
CERROR("%s: init env error: rc = %d\n", d->opd_obd->obd_name,
int osp_init_precreate(struct osp_device *d)
{
struct l_wait_info lwi = { 0 };
- int rc;
+ cfs_task_t *task;
ENTRY;
/*
* start thread handling precreation and statfs updates
*/
- rc = cfs_create_thread(osp_precreate_thread, d, 0);
- if (rc < 0) {
- CERROR("can't start precreate thread %d\n", rc);
- RETURN(rc);
+ task = kthread_run(osp_precreate_thread, d,
+ "osp-pre-%u", d->opd_index);
+ if (IS_ERR(task)) {
+ CERROR("can't start precreate thread %ld\n", PTR_ERR(task));
+ RETURN(PTR_ERR(task));
}
l_wait_event(d->opd_pre_thread.t_ctl_waitq,
struct llog_handle *llh;
struct lu_env env;
int rc, count;
- char pname[16];
ENTRY;
RETURN(rc);
}
- sprintf(pname, "osp-syn-%u", d->opd_index);
- cfs_daemonize(pname);
-
spin_lock(&d->opd_syn_lock);
thread->t_flags = SVC_RUNNING;
spin_unlock(&d->opd_syn_lock);
cfs_waitq_init(&d->opd_syn_thread.t_ctl_waitq);
CFS_INIT_LIST_HEAD(&d->opd_syn_committed_there);
- rc = cfs_create_thread(osp_sync_thread, d, 0);
- if (rc < 0) {
+ rc = PTR_ERR(kthread_run(osp_sync_thread, d,
+ "osp-syn-%u", d->opd_index));
+ if (IS_ERR_VALUE(rc)) {
CERROR("%s: can't start sync thread: rc = %d\n",
d->opd_obd->obd_name, rc);
GOTO(err_llog, rc);
ENTRY;
- cfs_daemonize_ctxt("ll_imp_inval");
+ unshare_fs_struct();
CDEBUG(D_HA, "thread invalidate import %s to %s@%s\n",
imp->imp_obd->obd_name, obd2cli_tgt(imp->imp_obd),
spin_unlock(&imp->imp_lock);
#ifdef __KERNEL__
- /* bug 17802: XXX client_disconnect_export vs connect request
- * race. if client will evicted at this time, we start
- * invalidate thread without reference to import and import can
- * be freed at same time. */
- class_import_get(imp);
- rc = cfs_create_thread(ptlrpc_invalidate_import_thread, imp,
- CFS_DAEMON_FLAGS);
- if (rc < 0) {
- class_import_put(imp);
- CERROR("error starting invalidate thread: %d\n", rc);
- } else {
- rc = 0;
- }
- RETURN(rc);
+ {
+ cfs_task_t *task;
+ /* bug 17802: XXX client_disconnect_export vs connect request
+ * race. if client will evicted at this time, we start
+ * invalidate thread without reference to import and import can
+ * be freed at same time. */
+ class_import_get(imp);
+ task = kthread_run(ptlrpc_invalidate_import_thread, imp,
+ "ll_imp_inval");
+ if (IS_ERR(task)) {
+ class_import_put(imp);
+ CERROR("error starting invalidate thread: %d\n", rc);
+ rc = PTR_ERR(task);
+ } else {
+ rc = 0;
+ }
+ RETURN(rc);
+ }
#else
ptlrpc_invalidate_import(imp);
struct ptlrpc_thread *thread = (struct ptlrpc_thread *)arg;
ENTRY;
- cfs_daemonize(thread->t_name);
-
/* Record that the thread is running */
thread_set_flags(thread, SVC_RUNNING);
cfs_waitq_signal(&thread->t_ctl_waitq);
strcpy(pinger_thread.t_name, "ll_ping");
/* CLONE_VM and CLONE_FILES just avoid a needless copy, because we
- * just drop the VM and FILES in cfs_daemonize_ctxt() right away. */
- rc = cfs_create_thread(ptlrpc_pinger_main,
- &pinger_thread, CFS_DAEMON_FLAGS);
- if (rc < 0) {
+ * just drop the VM and FILES in kthread_run() right away. */
+ rc = PTR_ERR(kthread_run(ptlrpc_pinger_main,
+ &pinger_thread, pinger_thread.t_name));
+ if (IS_ERR_VALUE(rc)) {
CERROR("cannot start thread: %d\n", rc);
RETURN(rc);
}
time_t expire_time;
ENTRY;
- cfs_daemonize_ctxt("ll_evictor");
+ unshare_fs_struct();
CDEBUG(D_HA, "Starting Ping Evictor\n");
pet_state = PET_READY;
void ping_evictor_start(void)
{
- int rc;
+ cfs_task_t *task;
- if (++pet_refcount > 1)
- return;
+ if (++pet_refcount > 1)
+ return;
- cfs_waitq_init(&pet_waitq);
+ cfs_waitq_init(&pet_waitq);
- rc = cfs_create_thread(ping_evictor_main, NULL, CFS_DAEMON_FLAGS);
- if (rc < 0) {
- pet_refcount--;
- CERROR("Cannot start ping evictor thread: %d\n", rc);
- }
+ task = kthread_run(ping_evictor_main, NULL, "ll_evictor");
+ if (IS_ERR(task)) {
+ pet_refcount--;
+ CERROR("Cannot start ping evictor thread: %ld\n",
+ PTR_ERR(task));
+ }
}
EXPORT_SYMBOL(ping_evictor_start);
int rc, exit = 0;
ENTRY;
- cfs_daemonize_ctxt(pc->pc_name);
+ unshare_fs_struct();
#if defined(CONFIG_SMP) && \
(defined(HAVE_CPUMASK_OF_NODE) || defined(HAVE_NODE_TO_CPUMASK))
if (test_bit(LIOD_BIND, &pc->pc_flags)) {
env = 1;
#ifdef __KERNEL__
- if (index >= 0) {
- rc = ptlrpcd_bind(index, max);
- if (rc < 0)
- GOTO(out, rc);
- }
+ {
+ cfs_task_t *task;
+ if (index >= 0) {
+ rc = ptlrpcd_bind(index, max);
+ if (rc < 0)
+ GOTO(out, rc);
+ }
- rc = cfs_create_thread(ptlrpcd, pc, 0);
- if (rc < 0)
- GOTO(out, rc);
+ task = kthread_run(ptlrpcd, pc, pc->pc_name);
+ if (IS_ERR(task))
+ GOTO(out, rc = PTR_ERR(task));
- rc = 0;
- wait_for_completion(&pc->pc_starting);
+ rc = 0;
+ wait_for_completion(&pc->pc_starting);
+ }
#else
pc->pc_wait_callback =
liblustre_register_wait_callback("ptlrpcd_check_async_rpcs",
struct ptlrpc_thread *thread = (struct ptlrpc_thread *) arg;
struct l_wait_info lwi;
- cfs_daemonize_ctxt("sptlrpc_gc");
+ unshare_fs_struct();
/* Record that the thread is running */
thread_set_flags(thread, SVC_RUNNING);
int sptlrpc_gc_init(void)
{
struct l_wait_info lwi = { 0 };
- int rc;
+ cfs_task_t *task;
mutex_init(&sec_gc_mutex);
spin_lock_init(&sec_gc_list_lock);
memset(&sec_gc_thread, 0, sizeof(sec_gc_thread));
cfs_waitq_init(&sec_gc_thread.t_ctl_waitq);
- rc = cfs_create_thread(sec_gc_main, &sec_gc_thread, CFS_DAEMON_FLAGS);
- if (rc < 0) {
- CERROR("can't start gc thread: %d\n", rc);
- return rc;
+ task = kthread_run(sec_gc_main, &sec_gc_thread, "sptlrpc_gc");
+ if (IS_ERR(task)) {
+ CERROR("can't start gc thread: %ld\n", PTR_ERR(task));
+ return PTR_ERR(task);
}
l_wait_event(sec_gc_thread.t_ctl_waitq,
ENTRY;
thread->t_pid = cfs_curproc_pid();
- cfs_daemonize_ctxt(thread->t_name);
+ unshare_fs_struct();
/* NB: we will call cfs_cpt_bind() for all threads, because we
* might want to run lustre server only on a subset of system CPUs,
snprintf(threadname, sizeof(threadname), "ptlrpc_hr%02d_%03d",
hrp->hrp_cpt, hrt->hrt_id);
- cfs_daemonize_ctxt(threadname);
+ unshare_fs_struct();
rc = cfs_cpt_bind(ptlrpc_hr.hr_cpt_table, hrp->hrp_cpt);
if (rc != 0) {
int rc = 0;
for (j = 0; j < hrp->hrp_nthrs; j++) {
- rc = cfs_create_thread(ptlrpc_hr_main,
- &hrp->hrp_thrs[j],
- CLONE_VM | CLONE_FILES);
- if (rc < 0)
+ struct ptlrpc_hr_thread *hrt = &hrp->hrp_thrs[j];
+ rc = PTR_ERR(kthread_run(ptlrpc_hr_main,
+ &hrp->hrp_thrs[j],
+ "ptlrpc_hr%02d_%03d",
+ hrp->hrp_cpt,
+ hrt->hrt_id));
+ if (IS_ERR_VALUE(rc))
break;
}
cfs_wait_event(ptlrpc_hr.hr_waitq,
cfs_atomic_read(&hrp->hrp_nstarted) == j);
- if (rc >= 0)
+ if (!IS_ERR_VALUE(rc))
continue;
CERROR("Reply handling thread %d:%d Failed on starting: "
}
CDEBUG(D_RPCTRACE, "starting thread '%s'\n", thread->t_name);
- /*
- * CLONE_VM and CLONE_FILES just avoid a needless copy, because we
- * just drop the VM and FILES in cfs_daemonize_ctxt() right away.
- */
- rc = cfs_create_thread(ptlrpc_main, thread, CFS_DAEMON_FLAGS);
- if (rc < 0) {
+ rc = PTR_ERR(kthread_run(ptlrpc_main, thread, thread->t_name));
+ if (IS_ERR_VALUE(rc)) {
CERROR("cannot start thread '%s': rc %d\n",
thread->t_name, rc);
spin_lock(&svcpt->scp_lock);
struct l_wait_info lwi = { 0 };
struct lu_env *env;
struct lquota_entry *lqe, *tmp;
- char pname[MTI_NAME_MAXLEN];
int rc;
ENTRY;
RETURN(rc);
}
- snprintf(pname, MTI_NAME_MAXLEN, "qmt_reba_%s", qmt->qmt_svname);
- cfs_daemonize(pname);
-
thread_set_flags(thread, SVC_RUNNING);
cfs_waitq_signal(&thread->t_ctl_waitq);
{
struct ptlrpc_thread *thread = &qmt->qmt_reba_thread;
struct l_wait_info lwi = { 0 };
- int rc;
+ cfs_task_t *task;
ENTRY;
- rc = cfs_create_thread(qmt_reba_thread, (void *)qmt, 0);
- if (rc < 0) {
- CERROR("%s: failed to start rebalance thread (%d)\n",
- qmt->qmt_svname, rc);
+ task = kthread_run(qmt_reba_thread, (void *)qmt,
+ "qmt_reba_%s", qmt->qmt_svname);
+ if (IS_ERR(task)) {
+ CERROR("%s: failed to start rebalance thread (%ld)\n",
+ qmt->qmt_svname, PTR_ERR(task));
thread_set_flags(thread, SVC_STOPPED);
- RETURN(rc);
+ RETURN(PTR_ERR(task));
}
l_wait_event(thread->t_ctl_waitq,
int rc;
ENTRY;
- cfs_daemonize("qsd_reint");
-
CDEBUG(D_QUOTA, "%s: Starting reintegration thread for "DFID"\n",
qsd->qsd_svname, PFID(&qqi->qqi_fid));
RETURN(0);
}
- rc = cfs_create_thread(qsd_reint_main, (void *)qqi, 0);
- if (rc < 0) {
+ rc = PTR_ERR(kthread_run(qsd_reint_main, (void *)qqi, "qsd_reint"));
+ if (IS_ERR_VALUE(rc)) {
thread_set_flags(thread, SVC_STOPPED);
write_lock(&qsd->qsd_lock);
qqi->qqi_reint = 0;
struct l_wait_info lwi;
cfs_list_t queue;
struct qsd_upd_rec *upd, *n;
- char pname[MTI_NAME_MAXLEN];
struct lu_env *env;
int qtype, rc = 0;
bool uptodate;
RETURN(rc);
}
- snprintf(pname, MTI_NAME_MAXLEN, "lquota_wb_%s", qsd->qsd_svname);
- cfs_daemonize(pname);
-
thread_set_flags(thread, SVC_RUNNING);
cfs_waitq_signal(&thread->t_ctl_waitq);
{
struct ptlrpc_thread *thread = &qsd->qsd_upd_thread;
struct l_wait_info lwi = { 0 };
- int rc;
+ cfs_task_t *task;
ENTRY;
- rc = cfs_create_thread(qsd_upd_thread, (void *)qsd, 0);
- if (rc < 0) {
- CERROR("Fail to start quota update thread. rc: %d\n", rc);
+ task = kthread_run(qsd_upd_thread, (void *)qsd,
+ "lquota_wb_%s", qsd->qsd_svname);
+ if (IS_ERR(task)) {
+ CERROR("Fail to start quota update thread. rc: %ld\n",
+ PTR_ERR(task));
thread_set_flags(thread, SVC_STOPPED);
- RETURN(rc);
+ RETURN(PTR_ERR(task));
}
l_wait_event(thread->t_ctl_waitq,