s/\bcfs_schedule_timeout_and_set_state\b/schedule_timeout_and_set_state/g
s/\bCFS_MAX_SCHEDULE_TIMEOUT\b/MAX_SCHEDULE_TIMEOUT/g
s/\bcfs_task_state_t\b/long/g
+
+# Kernel thread
+s/\bcfs_kthread_run\b/kthread_run/g
+/#[ \t]*define[ \t]*\bkthread_run\b.*\bkthread_run\b/d
+#s/\bcfs_thread_t\b/thread_t/g
+s/\bCFS_DAEMON_FLAGS\b/DAEMON_FLAGS/g
+#s/\bcfs_create_thread\b/create_thread/g
+# Task struct
+s/\bcfs_task_t\b/struct task_struct/g
+s/\bcfs_current()/current/g
+/#[ \t]*define[ \t]*\bcurrent\b[ \t]*\bcurrent\b/d
+s/\bcfs_task_lock\b/task_lock/g
+/#[ \t]*define[ \t]*\btask_lock\b *( *\w* *)[ \t]*\btask_lock\b *( *\w* *)/d
+s/\bcfs_task_unlock\b/task_unlock/g
+/#[ \t]*define[ \t]*\btask_unlock\b *( *\w* *)[ \t]*\btask_unlock\b *( *\w* *)/d
+s/\bCFS_DECL_JOURNAL_DATA\b/DECL_JOURNAL_DATA/g
+s/\bCFS_PUSH_JOURNAL\b/PUSH_JOURNAL/g
+s/\bCFS_POP_JOURNAL\b/POP_JOURNAL/g
+# Signal
+s/\bcfs_sigset_t\b/sigset_t/g
+/typedef[ \t]*\bsigset_t\b[ \t]*\bsigset_t\b/d
+# Timer
+s/\bcfs_timer_t\b/struct timer_list/g
+s/\bCFS_MAX_SCHEDULE_TIMEOUT\b/MAX_SCHEDULE_TIMEOUT/g
+/#[ \t]*define[ \t]*\bMAX_SCHEDULE_TIMEOUT\b[ \t]*\bMAX_SCHEDULE_TIMEOUT\b/d
#ifdef __DARWIN8__
-typedef struct {} cfs_task_t;
-#define cfs_current() ((cfs_task_t *)current_thread())
+typedef struct task_struct {};
+#define current ((struct task_struct *)current_thread())
#else /* !__DARWIN8__ */
-typedef struct uthread cfs_task_t;
+#define task_struct uthread
#define current_uthread() ((struct uthread *)get_bsdthread_info(current_act()))
-#define cfs_current() current_uthread()
+#define current current_uthread()
#endif /* !__DARWIN8__ */
-#define cfs_task_lock(t) do {;} while (0)
-#define cfs_task_unlock(t) do {;} while (0)
+#define task_lock(t) do {;} while (0)
+#define task_unlock(t) do {;} while (0)
#define set_current_state(s) do {;} while (0)
-#define CFS_DECL_JOURNAL_DATA
-#define CFS_PUSH_JOURNAL do {;} while(0)
-#define CFS_POP_JOURNAL do {;} while(0)
+#define DECL_JOURNAL_DATA
+#define PUSH_JOURNAL do {;} while(0)
+#define POP_JOURNAL do {;} while(0)
/*
* Kernel thread:
#define CLONE_SIGNAL (CLONE_SIGHAND | CLONE_THREAD)
-extern cfs_task_t kthread_run(cfs_thread_t func, void *arg,
+extern struct task_struct kthread_run(cfs_thread_t func, void *arg,
const char namefmt[], ...);
/*
/*
* Signal
*/
-typedef sigset_t cfs_sigset_t;
/*
* Timer
*/
-typedef struct cfs_timer {
+struct timer_list {
struct ktimer t;
-} cfs_timer_t;
+};
#define cfs_init_timer(t) do {} while(0)
-void cfs_timer_init(struct cfs_timer *t, void (*func)(unsigned long), void *arg);
-void cfs_timer_done(struct cfs_timer *t);
-void cfs_timer_arm(struct cfs_timer *t, cfs_time_t deadline);
-void cfs_timer_disarm(struct cfs_timer *t);
-int cfs_timer_is_armed(struct cfs_timer *t);
+void cfs_timer_init(struct timer_list *t, void (*func)(unsigned long), void *arg);
+void cfs_timer_done(struct timer_list *t);
+void cfs_timer_arm(struct timer_list *t, cfs_time_t deadline);
+void cfs_timer_disarm(struct timer_list *t);
+int cfs_timer_is_armed(struct timer_list *t);
-cfs_time_t cfs_timer_deadline(struct cfs_timer *t);
+cfs_time_t cfs_timer_deadline(struct timer_list *t);
/*
* Ioctl
* Defined by platform
*/
int unshare_fs_struct(void);
-cfs_sigset_t cfs_get_blocked_sigs(void);
-cfs_sigset_t cfs_block_allsigs(void);
-cfs_sigset_t cfs_block_sigs(unsigned long sigs);
-cfs_sigset_t cfs_block_sigsinv(unsigned long sigs);
-void cfs_restore_sigs(cfs_sigset_t);
+sigset_t cfs_get_blocked_sigs(void);
+sigset_t cfs_block_allsigs(void);
+sigset_t cfs_block_sigs(unsigned long sigs);
+sigset_t cfs_block_sigsinv(unsigned long sigs);
+void cfs_restore_sigs(sigset_t);
int cfs_signal_pending(void);
void cfs_clear_sigpending(void);
*/
typedef void (cfs_timer_func_t)(ulong_ptr_t);
-void cfs_init_timer(cfs_timer_t *t);
-void cfs_timer_init(cfs_timer_t *t, cfs_timer_func_t *func, void *arg);
-void cfs_timer_done(cfs_timer_t *t);
-void cfs_timer_arm(cfs_timer_t *t, cfs_time_t deadline);
-void cfs_timer_disarm(cfs_timer_t *t);
-int cfs_timer_is_armed(cfs_timer_t *t);
-cfs_time_t cfs_timer_deadline(cfs_timer_t *t);
+void cfs_init_timer(struct timer_list *t);
+void cfs_timer_init(struct timer_list *t, cfs_timer_func_t *func, void *arg);
+void cfs_timer_done(struct timer_list *t);
+void cfs_timer_arm(struct timer_list *t, cfs_time_t deadline);
+void cfs_timer_disarm(struct timer_list *t);
+int cfs_timer_is_armed(struct timer_list *t);
+cfs_time_t cfs_timer_deadline(struct timer_list *t);
/*
* Memory
#define ntohs(x) ___ntohs(x)
#endif
-void libcfs_debug_dumpstack(cfs_task_t *tsk);
+void libcfs_debug_dumpstack(struct task_struct *tsk);
void libcfs_run_upcall(char **argv);
void libcfs_run_lbug_upcall(struct libcfs_debug_msg_data *);
void libcfs_debug_dumplog(void);
schedule_timeout(ticks); \
}
-/*
- * Task struct
- */
-typedef struct task_struct cfs_task_t;
-#define cfs_current() current
-#define cfs_task_lock(t) task_lock(t)
-#define cfs_task_unlock(t) task_unlock(t)
-#define CFS_DECL_JOURNAL_DATA void *journal_info
-#define CFS_PUSH_JOURNAL do { \
+#define DECL_JOURNAL_DATA void *journal_info
+#define PUSH_JOURNAL do { \
journal_info = current->journal_info; \
current->journal_info = NULL; \
} while(0)
-#define CFS_POP_JOURNAL do { \
+#define POP_JOURNAL do { \
current->journal_info = journal_info; \
} while(0)
module_exit(fini)
/*
- * Signal
- */
-typedef sigset_t cfs_sigset_t;
-
-/*
- * Timer
- */
-typedef struct timer_list cfs_timer_t;
-
-/*
* atomic
*/
/*
* Signal
*/
-typedef sigset_t cfs_sigset_t;
/*
* Timer
*/
-typedef struct {
- cfs_list_t tl_list;
- void (*function)(ulong_ptr_t unused);
- ulong_ptr_t data;
- long expires;
-} cfs_timer_t;
+struct timer_list {
+ cfs_list_t tl_list;
+ void (*function)(ulong_ptr_t unused);
+ ulong_ptr_t data;
+ long expires;
+};
#define cfs_in_interrupt() (0)
#define cfs_sigfillset(l) do {} while (0)
#define cfs_recalc_sigpending(l) do {} while (0)
-/* Fine, crash, but stop giving me compile warnings */
-#define cfs_kthread_run(fn,d,fmt,...) LBUG()
-#define CFS_DAEMON_FLAGS 0
+#define DAEMON_FLAGS 0
#define L1_CACHE_ALIGN(x) (x)
#define NGROUPS 1
#define CFS_CURPROC_COMM_MAX (16)
-typedef struct task_sruct{
+struct task_struct{
mode_t umask;
sigset_t blocked;
char comm[CFS_CURPROC_COMM_MAX];
void *journal_info;
struct vfsmount *fs;
-} cfs_task_t;
+};
-static inline void task_lock(cfs_task_t *t)
+static inline void task_lock(struct task_struct *t)
{
}
-static inline void task_unlock(cfs_task_t *t)
+static inline void task_unlock(struct task_struct *t)
{
}
KIRQL irql; /* irql for rwlock ... */
- cfs_task_t task; /* linux task part */
+ struct task_struct task; /* linux task part */
} TASK_SLOT, *PTASK_SLOT;
-#define current cfs_current()
#define set_current_state(s) do {;} while (0)
#define wait_event(wq, condition) \
int init_task_manager();
void cleanup_task_manager();
-cfs_task_t * cfs_current();
-int wake_up_process(cfs_task_t * task);
+struct task_struct * current;
+int wake_up_process(struct task_struct * task);
void sleep_on(wait_queue_head_t *waitq);
#define cfs_might_sleep() do {} while(0)
-#define CFS_DECL_JOURNAL_DATA
-#define CFS_PUSH_JOURNAL do {;} while(0)
-#define CFS_POP_JOURNAL do {;} while(0)
+#define DECL_JOURNAL_DATA
+#define PUSH_JOURNAL do {;} while(0)
+#define POP_JOURNAL do {;} while(0)
/* module related definitions */
#define CFS_TIMER_FLAG_INITED 0x00000001 // Initialized already
#define CFS_TIMER_FLAG_TIMERED 0x00000002 // KeSetTimer is called
-typedef struct cfs_timer {
+struct timer_list {
KSPIN_LOCK Lock;
void (*proc)(ulong_ptr_t);
void * arg;
-} cfs_timer_t;
+};
/*
* libcfs globals initialization/cleanup
* sigset_t routines
*/
-typedef sigset_t cfs_sigset_t;
#define sigaddset(what,sig) (*(what) |= (1<<(sig)), 0)
#define sigdelset(what,sig) (*(what) &= ~(1<<(sig)), 0)
#define sigemptyset(what) (*(what) = 0, 0)
#define sigismember(what,sig) (((*(what)) & (1<<(sig))) != 0)
static __inline int
-sigprocmask(int sig, cfs_sigset_t *w1, cfs_sigset_t *w2) {
+sigprocmask(int sig, sigset_t *w1, sigset_t *w2) {
return 0;
}
static __inline int
-sigpending(cfs_sigset_t *what) {
+sigpending(sigset_t *what) {
return 0;
}
#include <libcfs/libcfs.h>
#include "tracefile.h"
-void libcfs_debug_dumpstack(cfs_task_t *tsk)
+void libcfs_debug_dumpstack(struct task_struct *tsk)
{
return;
}
extern thread_t kernel_thread(task_t task, void (*start)(void));
-cfs_task_t
+struct task_struct
kthread_run(cfs_thread_t func, void *arg, const char namefmt[], ...)
{
int ret = 0;
} else {
ret = -1;
}
- return (cfs_task_t)((long)ret);
+ return (struct task_struct)((long)ret);
}
/*
* without patching kernel.
* Should we provide these functions in xnu?
*
- * These signal functions almost do nothing now, we
+ * These signal functions almost do nothing now, we
* need to investigate more about signal in Darwin.
*/
extern int block_procsigmask(struct proc *p, int bit);
-cfs_sigset_t cfs_block_allsigs()
+sigset_t cfs_block_allsigs()
{
- cfs_sigset_t old = 0;
+ sigset_t old = 0;
#ifdef __DARWIN8__
#else
- block_procsigmask(current_proc(), -1);
+ block_procsigmask(current_proc(), -1);
#endif
- return old;
+ return old;
}
-cfs_sigset_t cfs_block_sigs(unsigned long sigs)
+sigset_t cfs_block_sigs(unsigned long sigs)
{
- cfs_sigset_t old = 0;
+ sigset_t old = 0;
#ifdef __DARWIN8__
#else
block_procsigmask(current_proc(), sigs);
/* Block all signals except for the @sigs. It's only used in
* Linux kernel, just a dummy here. */
-cfs_sigset_t cfs_block_sigsinv(unsigned long sigs)
+sigset_t cfs_block_sigsinv(unsigned long sigs)
{
- cfs_sigset_t old = 0;
- return old;
+ sigset_t old = 0;
+ return old;
}
-void cfs_restore_sigs(cfs_sigset_t old)
+void cfs_restore_sigs(sigset_t old)
{
}
{
#ifdef __DARWIN8__
- extern int thread_issignal(proc_t, thread_t, sigset_t);
- return thread_issignal(current_proc(), current_thread(), (sigset_t)-1);
+ extern int thread_issignal(proc_t, thread_t, sigset_t);
+ return thread_issignal(current_proc(), current_thread(), (sigset_t)-1);
#else
- return SHOULDissignal(current_proc(), current_uthread())
+ return SHOULDissignal(current_proc(), current_uthread())
#endif
}
{
#ifdef __DARWIN8__
#else
- clear_procsiglist(current_proc(), -1);
+ clear_procsiglist(current_proc(), -1);
#endif
}
void lustre_cone_in(boolean_t *state, funnel_t **cone)
{
- *cone = thread_funnel_get();
- if (*cone == network_flock)
- thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
- else if (*cone == NULL)
- *state = thread_funnel_set(kernel_flock, TRUE);
+ *cone = thread_funnel_get();
+ if (*cone == network_flock)
+ thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
+ else if (*cone == NULL)
+ *state = thread_funnel_set(kernel_flock, TRUE);
}
void lustre_cone_ex(boolean_t state, funnel_t *cone)
{
- if (cone == network_flock)
- thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
- else if (cone == NULL)
- (void) thread_funnel_set(kernel_flock, state);
+ if (cone == network_flock)
+ thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
+ else if (cone == NULL)
+ (void) thread_funnel_set(kernel_flock, state);
}
void lustre_net_in(boolean_t *state, funnel_t **cone)
{
- *cone = thread_funnel_get();
- if (*cone == kernel_flock)
- thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
- else if (*cone == NULL)
- *state = thread_funnel_set(network_flock, TRUE);
+ *cone = thread_funnel_get();
+ if (*cone == kernel_flock)
+ thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
+ else if (*cone == NULL)
+ *state = thread_funnel_set(network_flock, TRUE);
}
void lustre_net_ex(boolean_t state, funnel_t *cone)
{
- if (cone == kernel_flock)
- thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
- else if (cone == NULL)
- (void) thread_funnel_set(network_flock, state);
+ if (cone == kernel_flock)
+ thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
+ else if (cone == NULL)
+ (void) thread_funnel_set(network_flock, state);
}
#endif /* !__DARWIN8__ */
}
void remove_wait_queue(struct cfs_waitq *waitq,
- struct cfs_waitlink *link)
+ struct cfs_waitlink *link)
{
ksleep_del(&waitq->wq_ksleep_chan, &link->wl_ksleep_link);
}
cfs_duration_t waitq_timedwait(struct cfs_waitlink *link,
long state,
- cfs_duration_t timeout)
+ cfs_duration_t timeout)
{
- return ksleep_timedwait(&link->wl_waitq->wq_ksleep_chan,
- state, timeout);
+ return ksleep_timedwait(&link->wl_waitq->wq_ksleep_chan,
+ state, timeout);
}
typedef void (*ktimer_func_t)(void *);
-void cfs_timer_init(cfs_timer_t *t, void (* func)(unsigned long), void *arg)
+void cfs_timer_init(struct timer_list *t, void (* func)(unsigned long), void *arg)
{
- ktimer_init(&t->t, (ktimer_func_t)func, arg);
+ ktimer_init(&t->t, (ktimer_func_t)func, arg);
}
void cfs_timer_done(struct cfs_timer *t)
{
- ktimer_done(&t->t);
+ ktimer_done(&t->t);
}
void cfs_timer_arm(struct cfs_timer *t, cfs_time_t deadline)
{
- ktimer_arm(&t->t, deadline);
+ ktimer_arm(&t->t, deadline);
}
void cfs_timer_disarm(struct cfs_timer *t)
{
- ktimer_disarm(&t->t);
+ ktimer_disarm(&t->t);
}
int cfs_timer_is_armed(struct cfs_timer *t)
{
- return ktimer_is_armed(&t->t);
+ return ktimer_is_armed(&t->t);
}
cfs_time_t cfs_timer_deadline(struct cfs_timer *t)
{
- return ktimer_deadline(&t->t);
+ return ktimer_deadline(&t->t);
}
void cfs_enter_debugger(void)
{
#ifdef __DARWIN8__
- extern void Debugger(const char * reason);
- Debugger("CFS");
+ extern void Debugger(const char * reason);
+ Debugger("CFS");
#else
- extern void PE_enter_debugger(char *cause);
- PE_enter_debugger("CFS");
+ extern void PE_enter_debugger(char *cause);
+ PE_enter_debugger("CFS");
#endif
}
int cfs_online_cpus(void)
{
- int activecpu;
- size_t size;
+ int activecpu;
+ size_t size;
-#ifdef __DARWIN8__
- size = sizeof(int);
- sysctlbyname("hw.activecpu", &activecpu, &size, NULL, 0);
- return activecpu;
+#ifdef __DARWIN8__
+ size = sizeof(int);
+ sysctlbyname("hw.activecpu", &activecpu, &size, NULL, 0);
+ return activecpu;
#else
- host_basic_info_data_t hinfo;
- kern_return_t kret;
- int count = HOST_BASIC_INFO_COUNT;
+ host_basic_info_data_t hinfo;
+ kern_return_t kret;
+ int count = HOST_BASIC_INFO_COUNT;
#define BSD_HOST 1
- kret = host_info(BSD_HOST, HOST_BASIC_INFO, &hinfo, &count);
- if (kret == KERN_SUCCESS)
- return (hinfo.avail_cpus);
- return(-EINVAL);
+ kret = host_info(BSD_HOST, HOST_BASIC_INFO, &hinfo, &count);
+ if (kret == KERN_SUCCESS)
+ return (hinfo.avail_cpus);
+ return(-EINVAL);
#endif
}
int cfs_ncpus(void)
{
- int ncpu;
- size_t size;
+ int ncpu;
+ size_t size;
- size = sizeof(int);
+ size = sizeof(int);
- sysctlbyname("hw.ncpu", &ncpu, &size, NULL, 0);
- return ncpu;
+ sysctlbyname("hw.ncpu", &ncpu, &size, NULL, 0);
+ return ncpu;
}
*/
void libcfs_debug_dumplog_internal(void *arg)
{
- CFS_DECL_JOURNAL_DATA;
+ DECL_JOURNAL_DATA;
- CFS_PUSH_JOURNAL;
+ PUSH_JOURNAL;
if (strncmp(libcfs_debug_file_path_arr, "NONE", 4) != 0) {
snprintf(debug_file_name, sizeof(debug_file_name) - 1,
cfs_tracefile_dump_all_pages(debug_file_name);
libcfs_run_debug_log_upcall(debug_file_name);
}
- CFS_POP_JOURNAL;
+ POP_JOURNAL;
}
int libcfs_debug_dumplog_thread(void *arg)
void libcfs_debug_dumplog(void)
{
wait_queue_t wait;
- cfs_task_t *dumper;
+ struct task_struct *dumper;
ENTRY;
/* we're being careful to ensure that the kernel thread is
if (cpu_isset(i, *cpumask))
continue;
- rc = set_cpus_allowed_ptr(cfs_current(), cpumask);
+ rc = set_cpus_allowed_ptr(current, cpumask);
set_mems_allowed(*nodemask);
if (rc == 0)
schedule(); /* switch to allowed CPU */
#endif
}
-cfs_task_t *libcfs_current(void)
+struct task_struct *libcfs_current(void)
{
CWARN("current task struct is %p\n", current);
return current;
#include <asm/kgdb.h>
#endif
-void cfs_init_timer(cfs_timer_t *t)
+void cfs_init_timer(struct timer_list *t)
{
- init_timer(t);
+ init_timer(t);
}
EXPORT_SYMBOL(cfs_init_timer);
-void cfs_timer_init(cfs_timer_t *t, cfs_timer_func_t *func, void *arg)
+void cfs_timer_init(struct timer_list *t, cfs_timer_func_t *func, void *arg)
{
- init_timer(t);
- t->function = func;
- t->data = (unsigned long)arg;
+ init_timer(t);
+ t->function = func;
+ t->data = (unsigned long)arg;
}
EXPORT_SYMBOL(cfs_timer_init);
-void cfs_timer_done(cfs_timer_t *t)
+void cfs_timer_done(struct timer_list *t)
{
- return;
+ return;
}
EXPORT_SYMBOL(cfs_timer_done);
-void cfs_timer_arm(cfs_timer_t *t, cfs_time_t deadline)
+void cfs_timer_arm(struct timer_list *t, cfs_time_t deadline)
{
- mod_timer(t, deadline);
+ mod_timer(t, deadline);
}
EXPORT_SYMBOL(cfs_timer_arm);
-void cfs_timer_disarm(cfs_timer_t *t)
+void cfs_timer_disarm(struct timer_list *t)
{
- del_timer(t);
+ del_timer(t);
}
EXPORT_SYMBOL(cfs_timer_disarm);
-int cfs_timer_is_armed(cfs_timer_t *t)
+int cfs_timer_is_armed(struct timer_list *t)
{
- return timer_pending(t);
+ return timer_pending(t);
}
EXPORT_SYMBOL(cfs_timer_is_armed);
-cfs_time_t cfs_timer_deadline(cfs_timer_t *t)
+cfs_time_t cfs_timer_deadline(struct timer_list *t)
{
- return t->expires;
+ return t->expires;
}
EXPORT_SYMBOL(cfs_timer_deadline);
}
void
-cfs_restore_sigs (cfs_sigset_t old)
+cfs_restore_sigs(sigset_t old)
{
unsigned long flags;
* Timer
*/
-void cfs_init_timer(cfs_timer_t *t)
+void cfs_init_timer(struct timer_list *t)
{
- CFS_INIT_LIST_HEAD(&t->tl_list);
+ CFS_INIT_LIST_HEAD(&t->tl_list);
}
-void cfs_timer_init(cfs_timer_t *l, cfs_timer_func_t *func, void *arg)
+void cfs_timer_init(struct timer_list *l, cfs_timer_func_t *func, void *arg)
{
- CFS_INIT_LIST_HEAD(&l->tl_list);
- l->function = func;
- l->data = (ulong_ptr_t)arg;
- return;
+ CFS_INIT_LIST_HEAD(&l->tl_list);
+ l->function = func;
+ l->data = (ulong_ptr_t)arg;
+ return;
}
-int cfs_timer_is_armed(cfs_timer_t *l)
+int cfs_timer_is_armed(struct timer_list *l)
{
- if (cfs_time_before(cfs_time_current(), l->expires))
- return 1;
- else
- return 0;
+ if (cfs_time_before(cfs_time_current(), l->expires))
+ return 1;
+ else
+ return 0;
}
-void cfs_timer_arm(cfs_timer_t *l, cfs_time_t deadline)
+void cfs_timer_arm(struct timer_list *l, cfs_time_t deadline)
{
- l->expires = deadline;
+ l->expires = deadline;
}
-void cfs_timer_disarm(cfs_timer_t *l)
+void cfs_timer_disarm(struct timer_list *l)
{
}
-cfs_time_t cfs_timer_deadline(cfs_timer_t *l)
+cfs_time_t cfs_timer_deadline(struct timer_list *l)
{
- return l->expires;
+ return l->expires;
}
return 0;
}
-cfs_sigset_t cfs_block_allsigs(void)
+sigset_t cfs_block_allsigs(void)
{
- cfs_sigset_t all;
- cfs_sigset_t old;
+ sigset_t all;
+ sigset_t old;
int rc;
sigfillset(&all);
return old;
}
-cfs_sigset_t cfs_block_sigs(unsigned long sigs)
+sigset_t cfs_block_sigs(unsigned long sigs)
{
- cfs_sigset_t old;
- cfs_sigset_t blocks = { { sigs } }; /* kludge */
+ sigset_t old;
+ sigset_t blocks = { { sigs } }; /* kludge */
int rc;
rc = sigprocmask(SIG_BLOCK, &blocks, &old);
/* Block all signals except for the @sigs. It's only used in
* Linux kernel, just a dummy here. */
-cfs_sigset_t cfs_block_sigsinv(unsigned long sigs)
+sigset_t cfs_block_sigsinv(unsigned long sigs)
{
- cfs_sigset_t old;
- int rc;
+ sigset_t old;
+ int rc;
- /* Return old blocked sigs */
- rc = sigprocmask(SIG_SETMASK, NULL, &old);
- LASSERT(rc == 0);
+ /* Return old blocked sigs */
+ rc = sigprocmask(SIG_SETMASK, NULL, &old);
+ LASSERT(rc == 0);
- return old;
+ return old;
}
-void cfs_restore_sigs(cfs_sigset_t old)
+void cfs_restore_sigs(sigset_t old)
{
- int rc = sigprocmask(SIG_SETMASK, &old, NULL);
+ int rc = sigprocmask(SIG_SETMASK, &old, NULL);
- LASSERT (rc == 0);
+ LASSERT (rc == 0);
}
int cfs_signal_pending(void)
{
- cfs_sigset_t empty;
- cfs_sigset_t set;
- int rc;
+ sigset_t empty;
+ sigset_t set;
+ int rc;
- rc = sigpending(&set);
- LASSERT (rc == 0);
+ rc = sigpending(&set);
+ LASSERT (rc == 0);
- sigemptyset(&empty);
+ sigemptyset(&empty);
- return !memcmp(&empty, &set, sizeof(set));
+ return !memcmp(&empty, &set, sizeof(set));
}
void cfs_clear_sigpending(void)
struct lc_watchdog {
spinlock_t lcw_lock; /* check or change lcw_list */
- int lcw_refcount; /* must hold lcw_pending_timers_lock */
- cfs_timer_t lcw_timer; /* kernel timer */
- cfs_list_t lcw_list; /* chain on pending list */
- cfs_time_t lcw_last_touched; /* last touched stamp */
- cfs_task_t *lcw_task; /* owner task */
- void (*lcw_callback)(pid_t, void *);
- void *lcw_data;
-
- pid_t lcw_pid;
-
- enum {
- LC_WATCHDOG_DISABLED,
- LC_WATCHDOG_ENABLED,
- LC_WATCHDOG_EXPIRED
- } lcw_state;
+ int lcw_refcount; /* must hold lcw_pending_timers_lock */
+ struct timer_list lcw_timer; /* kernel timer */
+ cfs_list_t lcw_list; /* chain on pending list */
+ cfs_time_t lcw_last_touched; /* last touched stamp */
+ struct task_struct *lcw_task; /* owner task */
+ void (*lcw_callback)(pid_t, void *);
+ void *lcw_data;
+
+ pid_t lcw_pid;
+
+ enum {
+ LC_WATCHDOG_DISABLED,
+ LC_WATCHDOG_ENABLED,
+ LC_WATCHDOG_EXPIRED
+ } lcw_state;
};
#ifdef WITH_WATCHDOG
static void lcw_dispatch_start(void)
{
- cfs_task_t *task;
+ struct task_struct *task;
ENTRY;
LASSERT(lcw_refcount == 1);
spin_lock_init(&lcw->lcw_lock);
lcw->lcw_refcount = 1; /* refcount for owner */
- lcw->lcw_task = cfs_current();
+ lcw->lcw_task = current;
lcw->lcw_pid = current_pid();
lcw->lcw_callback = (callback != NULL) ? callback : lc_watchdog_dumplog;
lcw->lcw_data = data;
* for Linux kernel.
*/
-cfs_task_t this_task =
+struct task_struct this_task =
{ /* umask */ 0,/* blocked*/0, /* pid */ 0, /* pgrp */ 0,
- /* uid,euid,suid,fsuid */ 0, 0, 0, 0,
+ /* uid,euid,suid,fsuid */ 0, 0, 0, 0,
/* gid_t gid,egid,sgid,fsgid */ 0, 0, 0, 0,
/* ngroups*/ 1, /*cgroups*/ 0, /*groups*/ 0,
/* group_info */ NULL,
pid_t current_pid(void)
{
- return cfs_current()->pid;
+ return current->pid;
}
mode_t current_umask(void)
IN BOOLEAN Create
)
{
- PLIST_ENTRY ListEntry = NULL;
+ PLIST_ENTRY ListEntry = NULL;
PTASK_SLOT TaskSlot = NULL;
spin_lock(&(cfs_win_task_manger.Lock));
void
cleanup_task_manager()
{
- PLIST_ENTRY ListEntry = NULL;
+ PLIST_ENTRY ListEntry = NULL;
PTASK_SLOT TaskSlot = NULL;
/* remove ThreadNotifyRoutine: task_manager_notify */
*/
-cfs_task_t *
-cfs_current()
+struct task_struct *
+current
{
HANDLE Pid = PsGetCurrentProcessId();
HANDLE Tid = PsGetCurrentThreadId();
PETHREAD Tet = PsGetCurrentThread();
- PLIST_ENTRY ListEntry = NULL;
+ PLIST_ENTRY ListEntry = NULL;
PTASK_SLOT TaskSlot = NULL;
spin_lock(&(cfs_win_task_manger.Lock));
{
PTASK_SLOT Prev = NULL, Curr = NULL;
-
+
ListEntry = cfs_win_task_manger.TaskList.Flink;
while (ListEntry != (&(cfs_win_task_manger.TaskList))) {
void
schedule_timeout_and_set_state(long state, int64_t time)
{
- cfs_task_t * task = cfs_current();
+ struct task_struct * task = current;
PTASK_SLOT slot = NULL;
if (!task) {
int
wake_up_process(
- cfs_task_t * task
+ struct task_struct * task
)
{
PTASK_SLOT slot = NULL;
sleep_on(wait_queue_head_t *waitq)
{
wait_queue_t link;
-
+
init_waitqueue_entry_current(&link);
add_wait_queue(waitq, &link);
waitq_wait(&link, TASK_INTERRUPTIBLE);
#include <libcfs/libcfs.h>
#include "tracefile.h"
-void libcfs_debug_dumpstack(cfs_task_t *tsk)
+void libcfs_debug_dumpstack(struct task_struct *tsk)
{
return;
}
{
}
-cfs_task_t *libcfs_current(void)
+struct task_struct *libcfs_current(void)
{
- return cfs_current();
+ return current;
}
void libcfs_run_lbug_upcall(struct libcfs_debug_msg_data *msgdata)
void
read_lock(rwlock_t *rwlock)
{
- cfs_task_t * task = cfs_current();
+ struct task_struct * task = current;
PTASK_SLOT slot = NULL;
if (!task) {
void
read_unlock(rwlock_t *rwlock)
{
- cfs_task_t * task = cfs_current();
+ struct task_struct * task = current;
PTASK_SLOT slot = NULL;
if (!task) {
void
write_lock(rwlock_t *rwlock)
{
- cfs_task_t * task = cfs_current();
+ struct task_struct * task = current;
PTASK_SLOT slot = NULL;
if (!task) {
void
write_unlock(rwlock_t *rwlock)
{
- cfs_task_t * task = cfs_current();
+ struct task_struct * task = current;
PTASK_SLOT slot = NULL;
if (!task) {
spinlock_t shrinker_guard = {0};
CFS_LIST_HEAD(shrinker_hdr);
-cfs_timer_t shrinker_timer = {0};
+struct timer_list shrinker_timer = {0};
struct shrinker *set_shrinker(int seeks, shrink_callback cb)
{
* name: thread name to create
*
* Return Value:
- * cfs_task_t: 0 on success or error codes
+ * struct task_struct: 0 on success or error codes
*
* Notes:
* N/A
*/
-cfs_task_t kthread_run(int (*func)(void *), void *arg, char *name)
+struct task_struct kthread_run(int (*func)(void *), void *arg, char *name)
{
cfs_handle_t thread = NULL;
NTSTATUS status;
ZwClose(thread);
- return (cfs_task_t)0;
+ return (struct task_struct)0;
}
IN PVOID SystemArgument1,
IN PVOID SystemArgument2)
{
- cfs_timer_t * timer;
+ struct timer_list * timer;
KIRQL Irql;
- timer = (cfs_timer_t *) DeferredContext;
+ timer = (struct timer_list *) DeferredContext;
/* clear the flag */
KeAcquireSpinLock(&(timer->Lock), &Irql);
timer->proc((long_ptr_t)timer->arg);
}
-void cfs_init_timer(cfs_timer_t *timer)
+void cfs_init_timer(struct timer_list *timer)
{
- memset(timer, 0, sizeof(cfs_timer_t));
+ memset(timer, 0, sizeof(struct timer_list));
}
/*
* cfs_timer_init
- * To initialize the cfs_timer_t
+ * To initialize the struct timer_list
*
* Arguments:
* timer: the cfs_timer to be initialized
* N/A
*/
-void cfs_timer_init(cfs_timer_t *timer, void (*func)(ulong_ptr_t), void *arg)
+void cfs_timer_init(struct timer_list *timer, void (*func)(ulong_ptr_t), void *arg)
{
- memset(timer, 0, sizeof(cfs_timer_t));
+ memset(timer, 0, sizeof(struct timer_list));
timer->proc = func;
timer->arg = arg;
/*
* cfs_timer_done
- * To finialize the cfs_timer_t (unused)
+ * To finialize the struct timer_list (unused)
*
* Arguments:
* timer: the cfs_timer to be cleaned up
* N/A
*/
-void cfs_timer_done(cfs_timer_t *timer)
+void cfs_timer_done(struct timer_list *timer)
{
return;
}
* N/A
*/
-void cfs_timer_arm(cfs_timer_t *timer, cfs_time_t deadline)
+void cfs_timer_arm(struct timer_list *timer, cfs_time_t deadline)
{
LARGE_INTEGER timeout;
KIRQL Irql;
* N/A
*/
-void cfs_timer_disarm(cfs_timer_t *timer)
+void cfs_timer_disarm(struct timer_list *timer)
{
KIRQL Irql;
* N/A
*/
-int cfs_timer_is_armed(cfs_timer_t *timer)
+int cfs_timer_is_armed(struct timer_list *timer)
{
int rc = 0;
KIRQL Irql;
* N/A
*/
-cfs_time_t cfs_timer_deadline(cfs_timer_t * timer)
+cfs_time_t cfs_timer_deadline(struct timer_list * timer)
{
return timer->deadline;
}
* routine related with sigals
*/
-cfs_sigset_t cfs_block_allsigs()
+sigset_t cfs_block_allsigs()
{
return 0;
}
-cfs_sigset_t cfs_block_sigs(sigset_t bit)
+sigset_t cfs_block_sigs(sigset_t bit)
{
return 0;
}
/* Block all signals except for the @sigs. It's only used in
* Linux kernel, just a dummy here. */
-cfs_sigset_t cfs_block_sigsinv(unsigned long sigs)
+sigset_t cfs_block_sigsinv(unsigned long sigs)
{
return 0;
}
-void cfs_restore_sigs(cfs_sigset_t old)
+void cfs_restore_sigs(sigset_t old)
{
}
void init_waitqueue_entry_current(wait_queue_t *link)
{
- cfs_task_t * task = cfs_current();
+ struct task_struct * task = current;
PTASK_SLOT slot = NULL;
if (!task) {
void cfs_waitlink_fini(wait_queue_t *link)
{
- cfs_task_t * task = cfs_current();
+ struct task_struct * task = current;
PTASK_SLOT slot = NULL;
if (!task) {
#ifdef __KERNEL__
for (; nthrs > 0; nthrs--) {
- char name[16];
- cfs_task_t *task;
+ char name[16];
+ struct task_struct *task;
spin_lock(&cfs_wi_data.wi_glock);
while (sched->ws_starting > 0) {
int
kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
{
- cfs_task_t *task = kthread_run(fn, arg, name);
+ struct task_struct *task = kthread_run(fn, arg, name);
if (IS_ERR(task))
return PTR_ERR(task);
int
kptllnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
{
- cfs_task_t *task;
+ struct task_struct *task;
cfs_atomic_inc(&kptllnd_data.kptl_nthreads);
int
kqswnal_thread_start(int (*fn)(void *arg), void *arg, char *name)
{
- cfs_task_t *task = cfs_thread_run(fn, arg, name);
+ struct task_struct *task = cfs_thread_run(fn, arg, name);
if (IS_ERR(task))
return PTR_ERR(task);
int
kranal_thread_start(int(*fn)(void *arg), void *arg, char *name)
{
- cfs_task_t *task = cfs_thread_run(fn, arg, name);
+ struct task_struct *task = cfs_thread_run(fn, arg, name);
if (!IS_ERR(task))
cfs_atomic_inc(&kranal_data.kra_nthreads);
int
ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
{
- cfs_task_t *task = kthread_run(fn, arg, name);
+ struct task_struct *task = kthread_run(fn, arg, name);
if (IS_ERR(task))
return PTR_ERR(task);
void
lnet_ping_target_fini(void)
{
- lnet_event_t event;
- int rc;
- int which;
- int timeout_ms = 1000;
- cfs_sigset_t blocked = cfs_block_allsigs();
+ lnet_event_t event;
+ int rc;
+ int which;
+ int timeout_ms = 1000;
+ sigset_t blocked = cfs_block_allsigs();
- LNetMDUnlink(the_lnet.ln_ping_target_md);
- /* NB md could be busy; this just starts the unlink */
+ LNetMDUnlink(the_lnet.ln_ping_target_md);
+ /* NB md could be busy; this just starts the unlink */
for (;;) {
rc = LNetEQPoll(&the_lnet.ln_ping_target_eq, 1,
int
lnet_ping (lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_ids)
{
- lnet_handle_eq_t eqh;
- lnet_handle_md_t mdh;
- lnet_event_t event;
- lnet_md_t md = {0};
- int which;
- int unlinked = 0;
- int replied = 0;
- const int a_long_time = 60000; /* mS */
- int infosz = offsetof(lnet_ping_info_t, pi_ni[n_ids]);
- lnet_ping_info_t *info;
- lnet_process_id_t tmpid;
- int i;
- int nob;
- int rc;
- int rc2;
- cfs_sigset_t blocked;
-
- if (n_ids <= 0 ||
- id.nid == LNET_NID_ANY ||
- timeout_ms > 500000 || /* arbitrary limit! */
- n_ids > 20) /* arbitrary limit! */
- return -EINVAL;
+ lnet_handle_eq_t eqh;
+ lnet_handle_md_t mdh;
+ lnet_event_t event;
+ lnet_md_t md = {0};
+ int which;
+ int unlinked = 0;
+ int replied = 0;
+ const int a_long_time = 60000; /* mS */
+ int infosz = offsetof(lnet_ping_info_t, pi_ni[n_ids]);
+ lnet_ping_info_t *info;
+ lnet_process_id_t tmpid;
+ int i;
+ int nob;
+ int rc;
+ int rc2;
+ sigset_t blocked;
+
+ if (n_ids <= 0 ||
+ id.nid == LNET_NID_ANY ||
+ timeout_ms > 500000 || /* arbitrary limit! */
+ n_ids > 20) /* arbitrary limit! */
+ return -EINVAL;
if (id.pid == LNET_PID_ANY)
id.pid = LUSTRE_SRV_LNET_PID;
#ifdef __KERNEL__
my_slot = -1;
for (i = 0; i < container->msc_nfinalizers; i++) {
- if (container->msc_finalizers[i] == cfs_current())
+ if (container->msc_finalizers[i] == current)
break;
if (my_slot < 0 && container->msc_finalizers[i] == NULL)
return;
}
- container->msc_finalizers[my_slot] = cfs_current();
+ container->msc_finalizers[my_slot] = current;
#else
LASSERT(container->msc_nfinalizers == 1);
if (container->msc_finalizers[0] != NULL) {
int
stt_start_timer_thread (void)
{
- cfs_task_t *task;
+ struct task_struct *task;
LASSERT(!stt_data.stt_shuttingdown);
/**
* Debug information, the task is owning the page.
*/
- cfs_task_t *cp_task;
+ struct task_struct *cp_task;
/**
* Owning IO request in cl_page_state::CPS_PAGEOUT and
* cl_page_state::CPS_PAGEIN states. This field is maintained only in
* \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait().
*/
struct mutex cll_guard;
- cfs_task_t *cll_guarder;
+ struct task_struct *cll_guarder;
int cll_depth;
/**
* the owner for INTRANSIT state
*/
- cfs_task_t *cll_intransit_owner;
+ struct task_struct *cll_intransit_owner;
int cll_error;
/**
* Number of holds on a lock. A hold prevents a lock from being
* @{
*/
struct cl_page_list {
- unsigned pl_nr;
- cfs_list_t pl_pages;
- cfs_task_t *pl_owner;
+ unsigned pl_nr;
+ cfs_list_t pl_pages;
+ struct task_struct *pl_owner;
};
/**
};
-typedef struct task_struct cfs_task_t;
-#define cfs_current() current
#define current_pid() (current->pid)
#define current_comm() (current->comm)
#define current_fsuid() (current->fsuid)
*
* // current thread acquired a temporary reference to foo.
* foo_get(foo);
- * lu_ref_add(&foo->reference, __FUNCTION__, cfs_current());
+ * lu_ref_add(&foo->reference, __FUNCTION__, current);
*
* ...
*
* // temporary reference is released.
- * lu_ref_del(&foo->reference, __FUNCTION__, cfs_current());
+ * lu_ref_del(&foo->reference, __FUNCTION__, current);
* foo_put(foo);
* \endcode
*
}
#define LDLM_LOCK_REF_DEL(lock) \
- lu_ref_del(&lock->l_reference, "handle", cfs_current())
+ lu_ref_del(&lock->l_reference, "handle", current)
static inline struct ldlm_lock *
ldlm_handle2lock_long(const struct lustre_handle *h, __u64 flags)
const struct ldlm_res_id *);
#define LDLM_RESOURCE_ADDREF(res) do { \
- lu_ref_add_atomic(&(res)->lr_reference, __FUNCTION__, cfs_current()); \
+ lu_ref_add_atomic(&(res)->lr_reference, __FUNCTION__, current); \
} while (0)
#define LDLM_RESOURCE_DELREF(res) do { \
- lu_ref_del(&(res)->lr_reference, __FUNCTION__, cfs_current()); \
+ lu_ref_del(&(res)->lr_reference, __FUNCTION__, current); \
} while (0)
/* ldlm_request.c */
/* l_lock.c */
struct lustre_lock {
int l_depth;
- cfs_task_t *l_owner;
+ struct task_struct *l_owner;
struct semaphore l_sem;
spinlock_t l_spin;
};
do { \
wait_queue_t __wait; \
cfs_duration_t __timeout = info->lwi_timeout; \
- cfs_sigset_t __blocked; \
+ sigset_t __blocked; \
int __allow_intr = info->lwi_allow_intr; \
\
ret = 0; \
/** reqs waiting for replies */
struct ptlrpc_at_array scp_at_array;
/** early reply timer */
- cfs_timer_t scp_at_timer;
+ struct timer_list scp_at_timer;
/** debug */
cfs_time_t scp_at_checktime;
/** check early replies */
int obd_requests_queued_for_recovery;
wait_queue_head_t obd_next_transno_waitq;
/* protected by obd_recovery_task_lock */
- cfs_timer_t obd_recovery_timer;
- time_t obd_recovery_start; /* seconds */
- time_t obd_recovery_end; /* seconds, for lprocfs_status */
- int obd_recovery_time_hard;
- int obd_recovery_timeout;
- int obd_recovery_ir_factor;
+ struct timer_list obd_recovery_timer;
+ time_t obd_recovery_start; /* seconds */
+ time_t obd_recovery_end; /* seconds, for lprocfs_status */
+ int obd_recovery_time_hard;
+ int obd_recovery_timeout;
+ int obd_recovery_ir_factor;
/* new recovery stuff from CMD2 */
struct target_recovery_data obd_recovery_data;
if (agl)
descr->cld_enq_flags |= CEF_AGL;
cio->cui_glimpse = 1;
- /*
- * CEF_ASYNC is used because glimpse sub-locks cannot
- * deadlock (because they never conflict with other
- * locks) and, hence, can be enqueued out-of-order.
- *
- * CEF_MUST protects glimpse lock from conversion into
- * a lockless mode.
- */
- lock = cl_lock_request(env, io, descr, "glimpse",
- cfs_current());
- cio->cui_glimpse = 0;
+ /*
+ * CEF_ASYNC is used because glimpse sub-locks cannot
+ * deadlock (because they never conflict with other
+ * locks) and, hence, can be enqueued out-of-order.
+ *
+ * CEF_MUST protects glimpse lock from conversion into
+ * a lockless mode.
+ */
+ lock = cl_lock_request(env, io, descr, "glimpse",
+ current);
+ cio->cui_glimpse = 0;
- if (lock == NULL)
- RETURN(0);
+ if (lock == NULL)
+ RETURN(0);
- if (IS_ERR(lock))
- RETURN(PTR_ERR(lock));
+ if (IS_ERR(lock))
+ RETURN(PTR_ERR(lock));
- LASSERT(agl == 0);
- result = cl_wait(env, lock);
- if (result == 0) {
+ LASSERT(agl == 0);
+ result = cl_wait(env, lock);
+ if (result == 0) {
cl_merge_lvb(env, inode);
- if (cl_isize_read(inode) > 0 &&
- inode->i_blocks == 0) {
- /*
- * LU-417: Add dirty pages block count
- * lest i_blocks reports 0, some "cp" or
- * "tar" may think it's a completely
- * sparse file and skip it.
- */
- inode->i_blocks = dirty_cnt(inode);
- }
- cl_unuse(env, lock);
- }
- cl_lock_release(env, lock, "glimpse", cfs_current());
- } else {
- CDEBUG(D_DLMTRACE, "No objects for inode\n");
+ if (cl_isize_read(inode) > 0 &&
+ inode->i_blocks == 0) {
+ /*
+ * LU-417: Add dirty pages block count
+ * lest i_blocks reports 0, some "cp" or
+ * "tar" may think it's a completely
+ * sparse file and skip it.
+ */
+ inode->i_blocks = dirty_cnt(inode);
+ }
+ cl_unuse(env, lock);
+ }
+ cl_lock_release(env, lock, "glimpse", current);
+ } else {
+ CDEBUG(D_DLMTRACE, "No objects for inode\n");
cl_merge_lvb(env, inode);
- }
- }
+ }
+ }
- RETURN(result);
+ RETURN(result);
}
static int cl_io_get(struct inode *inode, struct lu_env **envout,
result = cl_io_init(env, io, CIT_MISC, clob);
if (result > 0)
result = io->ci_result;
- else if (result == 0) {
- cti = ccc_env_info(env);
- descr = &cti->cti_descr;
+ else if (result == 0) {
+ cti = ccc_env_info(env);
+ descr = &cti->cti_descr;
- *descr = whole_file;
- descr->cld_obj = clob;
- lock = cl_lock_peek(env, io, descr, "localsize", cfs_current());
- if (lock != NULL) {
+ *descr = whole_file;
+ descr->cld_obj = clob;
+ lock = cl_lock_peek(env, io, descr, "localsize", current);
+ if (lock != NULL) {
cl_merge_lvb(env, inode);
- cl_unuse(env, lock);
- cl_lock_release(env, lock, "localsize", cfs_current());
- result = 0;
- } else
- result = -ENODATA;
- }
- cl_io_fini(env, io);
- cl_env_put(env, &refcheck);
- RETURN(result);
+ cl_unuse(env, lock);
+ cl_lock_release(env, lock, "localsize", current);
+ result = 0;
+ } else
+ result = -ENODATA;
+ }
+ cl_io_fini(env, io);
+ cl_env_put(env, &refcheck);
+ RETURN(result);
}
descr->cld_gid = gid;
descr->cld_mode = CLM_GROUP;
- enqflags = CEF_MUST | (nonblock ? CEF_NONBLOCK : 0);
- descr->cld_enq_flags = enqflags;
-
- lock = cl_lock_request(env, io, descr, GROUPLOCK_SCOPE, cfs_current());
- if (IS_ERR(lock)) {
- cl_io_fini(env, io);
- cl_env_put(env, &refcheck);
- return PTR_ERR(lock);
- }
+ enqflags = CEF_MUST | (nonblock ? CEF_NONBLOCK : 0);
+ descr->cld_enq_flags = enqflags;
+
+ lock = cl_lock_request(env, io, descr, GROUPLOCK_SCOPE, current);
+ if (IS_ERR(lock)) {
+ cl_io_fini(env, io);
+ cl_env_put(env, &refcheck);
+ return PTR_ERR(lock);
+ }
cg->cg_env = cl_env_get(&refcheck);
cg->cg_io = io;
void cl_put_grouplock(struct ccc_grouplock *cg)
{
- struct lu_env *env = cg->cg_env;
- struct cl_io *io = cg->cg_io;
- struct cl_lock *lock = cg->cg_lock;
- int refcheck;
+ struct lu_env *env = cg->cg_env;
+ struct cl_io *io = cg->cg_io;
+ struct cl_lock *lock = cg->cg_lock;
+ int refcheck;
- LASSERT(cg->cg_env);
- LASSERT(cg->cg_gid);
+ LASSERT(cg->cg_env);
+ LASSERT(cg->cg_gid);
- cl_env_implant(env, &refcheck);
- cl_env_put(env, &refcheck);
+ cl_env_implant(env, &refcheck);
+ cl_env_put(env, &refcheck);
- cl_unuse(env, lock);
- cl_lock_release(env, lock, GROUPLOCK_SCOPE, cfs_current());
- cl_io_fini(env, io);
- cl_env_put(env, NULL);
+ cl_unuse(env, lock);
+ cl_lock_release(env, lock, GROUPLOCK_SCOPE, current);
+ cl_io_fini(env, io);
+ cl_env_put(env, NULL);
}
/* Make sure the target isn't cleaned up while we're here. Yes,
* there's still a race between the above check and our incref here.
* Really, class_uuid2obd should take the ref. */
- targref = class_incref(target, __FUNCTION__, cfs_current());
+ targref = class_incref(target, __FUNCTION__, current);
target->obd_conn_inprogress++;
spin_unlock(&target->obd_dev_lock);
target->obd_conn_inprogress--;
spin_unlock(&target->obd_dev_lock);
- class_decref(targref, __func__, cfs_current());
+ class_decref(targref, __func__, current);
}
if (rc)
req->rq_status = rc;
struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
__u64 flags)
{
- struct ldlm_lock *lock;
- ENTRY;
+ struct ldlm_lock *lock;
+ ENTRY;
- LASSERT(handle);
+ LASSERT(handle);
lock = class_handle2object(handle->cookie, NULL);
- if (lock == NULL)
- RETURN(NULL);
+ if (lock == NULL)
+ RETURN(NULL);
- /* It's unlikely but possible that someone marked the lock as
- * destroyed after we did handle2object on it */
+ /* It's unlikely but possible that someone marked the lock as
+ * destroyed after we did handle2object on it */
if (flags == 0 && ((lock->l_flags & LDLM_FL_DESTROYED)== 0)) {
- lu_ref_add(&lock->l_reference, "handle", cfs_current());
- RETURN(lock);
- }
+ lu_ref_add(&lock->l_reference, "handle", current);
+ RETURN(lock);
+ }
- lock_res_and_lock(lock);
+ lock_res_and_lock(lock);
- LASSERT(lock->l_resource != NULL);
+ LASSERT(lock->l_resource != NULL);
- lu_ref_add_atomic(&lock->l_reference, "handle", cfs_current());
+ lu_ref_add_atomic(&lock->l_reference, "handle", current);
if (unlikely(lock->l_flags & LDLM_FL_DESTROYED)) {
- unlock_res_and_lock(lock);
- CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
- LDLM_LOCK_PUT(lock);
- RETURN(NULL);
- }
+ unlock_res_and_lock(lock);
+ CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
+ LDLM_LOCK_PUT(lock);
+ RETURN(NULL);
+ }
- if (flags && (lock->l_flags & flags)) {
- unlock_res_and_lock(lock);
- LDLM_LOCK_PUT(lock);
- RETURN(NULL);
- }
+ if (flags && (lock->l_flags & flags)) {
+ unlock_res_and_lock(lock);
+ LDLM_LOCK_PUT(lock);
+ RETURN(NULL);
+ }
- if (flags)
- lock->l_flags |= flags;
+ if (flags)
+ lock->l_flags |= flags;
- unlock_res_and_lock(lock);
- RETURN(lock);
+ unlock_res_and_lock(lock);
+ RETURN(lock);
}
EXPORT_SYMBOL(__ldlm_handle2lock);
/** @} ldlm_handles */
* All access to it should be under waiting_locks_spinlock.
*/
static cfs_list_t waiting_locks_list;
-static cfs_timer_t waiting_locks_timer;
+static struct timer_list waiting_locks_timer;
static struct expired_lock_thread {
wait_queue_head_t elt_waitq;
static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp)
{
struct ldlm_bl_thread_data bltd = { .bltd_blp = blp };
- cfs_task_t *task;
+ struct task_struct *task;
init_completion(&bltd.bltd_comp);
bltd.bltd_num = cfs_atomic_read(&blp->blp_num_threads);
static int ldlm_pools_thread_start(void)
{
struct l_wait_info lwi = { 0 };
- cfs_task_t *task;
+ struct task_struct *task;
ENTRY;
if (ldlm_pools_thread != NULL)
if (&lock->l_lru == &ns->ns_unused_list)
break;
- LDLM_LOCK_GET(lock);
+ LDLM_LOCK_GET(lock);
spin_unlock(&ns->ns_lock);
- lu_ref_add(&lock->l_reference, __FUNCTION__, cfs_current());
+ lu_ref_add(&lock->l_reference, __FUNCTION__, current);
/* Pass the lock through the policy filter and see if it
* should stay in LRU.
* old locks, but additionally choose them by
* their weight. Big extent locks will stay in
* the cache. */
- result = pf(ns, lock, unused, added, count);
- if (result == LDLM_POLICY_KEEP_LOCK) {
- lu_ref_del(&lock->l_reference,
- __FUNCTION__, cfs_current());
- LDLM_LOCK_RELEASE(lock);
+ result = pf(ns, lock, unused, added, count);
+ if (result == LDLM_POLICY_KEEP_LOCK) {
+ lu_ref_del(&lock->l_reference,
+ __FUNCTION__, current);
+ LDLM_LOCK_RELEASE(lock);
spin_lock(&ns->ns_lock);
break;
}
if (result == LDLM_POLICY_SKIP_LOCK) {
lu_ref_del(&lock->l_reference,
- __func__, cfs_current());
+ __func__, current);
LDLM_LOCK_RELEASE(lock);
spin_lock(&ns->ns_lock);
- continue;
- }
+ continue;
+ }
- lock_res_and_lock(lock);
- /* Check flags again under the lock. */
- if ((lock->l_flags & LDLM_FL_CANCELING) ||
- (ldlm_lock_remove_from_lru(lock) == 0)) {
+ lock_res_and_lock(lock);
+ /* Check flags again under the lock. */
+ if ((lock->l_flags & LDLM_FL_CANCELING) ||
+ (ldlm_lock_remove_from_lru(lock) == 0)) {
/* Another thread is removing lock from LRU, or
* somebody is already doing CANCEL, or there
* is a blocking request which will send cancel
* by itself, or the lock is no longer unused. */
- unlock_res_and_lock(lock);
- lu_ref_del(&lock->l_reference,
- __FUNCTION__, cfs_current());
- LDLM_LOCK_RELEASE(lock);
+ unlock_res_and_lock(lock);
+ lu_ref_del(&lock->l_reference,
+ __FUNCTION__, current);
+ LDLM_LOCK_RELEASE(lock);
spin_lock(&ns->ns_lock);
- continue;
- }
- LASSERT(!lock->l_readers && !lock->l_writers);
-
- /* If we have chosen to cancel this lock voluntarily, we
- * better send cancel notification to server, so that it
- * frees appropriate state. This might lead to a race
- * where while we are doing cancel here, server is also
- * silently cancelling this lock. */
- lock->l_flags &= ~LDLM_FL_CANCEL_ON_BLOCK;
-
- /* Setting the CBPENDING flag is a little misleading,
- * but prevents an important race; namely, once
- * CBPENDING is set, the lock can accumulate no more
- * readers/writers. Since readers and writers are
- * already zero here, ldlm_lock_decref() won't see
- * this flag and call l_blocking_ast */
- lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING;
-
- /* We can't re-add to l_lru as it confuses the
- * refcounting in ldlm_lock_remove_from_lru() if an AST
- * arrives after we drop lr_lock below. We use l_bl_ast
- * and can't use l_pending_chain as it is used both on
- * server and client nevertheless bug 5666 says it is
- * used only on server */
- LASSERT(cfs_list_empty(&lock->l_bl_ast));
- cfs_list_add(&lock->l_bl_ast, cancels);
- unlock_res_and_lock(lock);
- lu_ref_del(&lock->l_reference, __FUNCTION__, cfs_current());
+ continue;
+ }
+ LASSERT(!lock->l_readers && !lock->l_writers);
+
+ /* If we have chosen to cancel this lock voluntarily, we
+ * better send cancel notification to server, so that it
+ * frees appropriate state. This might lead to a race
+ * where while we are doing cancel here, server is also
+ * silently cancelling this lock. */
+ lock->l_flags &= ~LDLM_FL_CANCEL_ON_BLOCK;
+
+ /* Setting the CBPENDING flag is a little misleading,
+ * but prevents an important race; namely, once
+ * CBPENDING is set, the lock can accumulate no more
+ * readers/writers. Since readers and writers are
+ * already zero here, ldlm_lock_decref() won't see
+ * this flag and call l_blocking_ast */
+ lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING;
+
+ /* We can't re-add to l_lru as it confuses the
+ * refcounting in ldlm_lock_remove_from_lru() if an AST
+ * arrives after we drop lr_lock below. We use l_bl_ast
+ * and can't use l_pending_chain as it is used both on
+ * server and client nevertheless bug 5666 says it is
+ * used only on server */
+ LASSERT(cfs_list_empty(&lock->l_bl_ast));
+ cfs_list_add(&lock->l_bl_ast, cancels);
+ unlock_res_and_lock(lock);
+ lu_ref_del(&lock->l_reference, __FUNCTION__, current);
spin_lock(&ns->ns_lock);
added++;
unused--;
}
cfs_cap_t cfs_curproc_cap_pack(void) {
- return cfs_current()->cap_effective;
+ return current->cap_effective;
}
void cfs_curproc_cap_unpack(cfs_cap_t cap) {
- cfs_current()->cap_effective = cap;
+ current->cap_effective = cap;
}
int cfs_capable(cfs_cap_t cap)
int ll_capa_thread_start(void)
{
- cfs_task_t *task;
+ struct task_struct *task;
ENTRY;
init_waitqueue_head(&ll_capa_thread.t_ctl_waitq);
int ll_close_thread_start(struct ll_close_queue **lcq_ret)
{
struct ll_close_queue *lcq;
- cfs_task_t *task;
+ struct task_struct *task;
if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CLOSE_THREAD))
return -EINTR;
int lustre_check_remote_perm(struct inode *inode, int mask);
/* llite/llite_capa.c */
-extern cfs_timer_t ll_capa_timer;
+extern struct timer_list ll_capa_timer;
int ll_capa_thread_start(void);
void ll_capa_thread_stop(void);
struct vvp_io *vio;
struct cl_env_nest nest;
int result;
- cfs_sigset_t set;
+ sigset_t set;
struct inode *inode;
struct ll_inode_info *lli;
ENTRY;
cl_io_fini(env, io);
cl_env_nested_put(&nest, env);
out:
- CDEBUG(D_MMAP, "%s mkwrite with %d\n", cfs_current()->comm, result);
+ CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
LASSERT(ergo(result == 0, PageLocked(vmpage)));
return result;
vmf->page = NULL;
}
}
- cl_io_fini(env, io);
- cl_env_nested_put(&nest, env);
+ cl_io_fini(env, io);
+ cl_env_nested_put(&nest, env);
vma->vm_flags |= ra_flags;
if (result != 0 && !(fault_ret & VM_FAULT_RETRY))
fault_ret |= to_fault_error(result);
- CDEBUG(D_MMAP, "%s fault %d/%d\n",
- cfs_current()->comm, fault_ret, result);
- RETURN(fault_ret);
+ CDEBUG(D_MMAP, "%s fault %d/%d\n",
+ current->comm, fault_ret, result);
+ RETURN(fault_ret);
}
static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
int count = 0;
bool printed = false;
int result;
- cfs_sigset_t set;
+ sigset_t set;
/* Only SIGKILL and SIGTERM is allowed for fault/nopage/mkwrite
* so that it can be killed by admin but not cause segfault by
* page w/o holding inode mutex. This means we can
* add dirty pages into cache during truncate */
CERROR("Proc %s is dirting page w/o inode lock, this"
- "will break truncate.\n", cfs_current()->comm);
+ "will break truncate.\n", current->comm);
libcfs_debug_dumpstack(NULL);
LBUG();
return ERR_PTR(-EIO);
* for every page during write.
*/
int ll_prepare_write(struct file *file, struct page *vmpage, unsigned from,
- unsigned to)
+ unsigned to)
{
- struct ll_cl_context *lcc;
- int result;
- ENTRY;
+ struct ll_cl_context *lcc;
+ int result;
+ ENTRY;
- lcc = ll_cl_init(file, vmpage, 1);
- if (!IS_ERR(lcc)) {
- struct lu_env *env = lcc->lcc_env;
- struct cl_io *io = lcc->lcc_io;
- struct cl_page *page = lcc->lcc_page;
+ lcc = ll_cl_init(file, vmpage, 1);
+ if (!IS_ERR(lcc)) {
+ struct lu_env *env = lcc->lcc_env;
+ struct cl_io *io = lcc->lcc_io;
+ struct cl_page *page = lcc->lcc_page;
+
+ cl_page_assume(env, io, page);
- cl_page_assume(env, io, page);
-
result = cl_io_prepare_write(env, io, page, from, to);
- if (result == 0) {
- /*
- * Add a reference, so that page is not evicted from
- * the cache until ->commit_write() is called.
- */
- cl_page_get(page);
- lu_ref_add(&page->cp_reference, "prepare_write",
- cfs_current());
- } else {
- cl_page_unassume(env, io, page);
- ll_cl_fini(lcc);
- }
- /* returning 0 in prepare assumes commit must be called
- * afterwards */
- } else {
- result = PTR_ERR(lcc);
- }
- RETURN(result);
+ if (result == 0) {
+ /*
+ * Add a reference, so that page is not evicted from
+ * the cache until ->commit_write() is called.
+ */
+ cl_page_get(page);
+ lu_ref_add(&page->cp_reference, "prepare_write",
+ current);
+ } else {
+ cl_page_unassume(env, io, page);
+ ll_cl_fini(lcc);
+ }
+ /* returning 0 in prepare assumes commit must be called
+ * afterwards */
+ } else {
+ result = PTR_ERR(lcc);
+ }
+ RETURN(result);
}
int ll_commit_write(struct file *file, struct page *vmpage, unsigned from,
- unsigned to)
+ unsigned to)
{
- struct ll_cl_context *lcc;
- struct lu_env *env;
- struct cl_io *io;
- struct cl_page *page;
- int result = 0;
- ENTRY;
+ struct ll_cl_context *lcc;
+ struct lu_env *env;
+ struct cl_io *io;
+ struct cl_page *page;
+ int result = 0;
+ ENTRY;
- lcc = ll_cl_get();
- env = lcc->lcc_env;
- page = lcc->lcc_page;
- io = lcc->lcc_io;
-
- LASSERT(cl_page_is_owned(page, io));
- LASSERT(from <= to);
- if (from != to) /* handle short write case. */
- result = cl_io_commit_write(env, io, page, from, to);
- if (cl_page_is_owned(page, io))
- cl_page_unassume(env, io, page);
-
- /*
- * Release reference acquired by ll_prepare_write().
- */
- lu_ref_del(&page->cp_reference, "prepare_write", cfs_current());
- cl_page_put(env, page);
- ll_cl_fini(lcc);
- RETURN(result);
+ lcc = ll_cl_get();
+ env = lcc->lcc_env;
+ page = lcc->lcc_page;
+ io = lcc->lcc_io;
+
+ LASSERT(cl_page_is_owned(page, io));
+ LASSERT(from <= to);
+ if (from != to) /* handle short write case. */
+ result = cl_io_commit_write(env, io, page, from, to);
+ if (cl_page_is_owned(page, io))
+ cl_page_unassume(env, io, page);
+
+ /*
+ * Release reference acquired by ll_prepare_write().
+ */
+ lu_ref_del(&page->cp_reference, "prepare_write", current);
+ cl_page_put(env, page);
+ ll_cl_fini(lcc);
+ RETURN(result);
}
struct obd_capa *cl_capa_lookup(struct inode *inode, enum cl_req_type crt)
}
static int cl_read_ahead_page(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *queue, struct cl_page *page,
- struct page *vmpage)
+ struct cl_page_list *queue, struct cl_page *page,
+ struct page *vmpage)
{
- struct ccc_page *cp;
- int rc;
+ struct ccc_page *cp;
+ int rc;
- ENTRY;
+ ENTRY;
- rc = 0;
- cl_page_assume(env, io, page);
- lu_ref_add(&page->cp_reference, "ra", cfs_current());
+ rc = 0;
+ cl_page_assume(env, io, page);
+ lu_ref_add(&page->cp_reference, "ra", current);
cp = cl2ccc_page(cl_page_at(page, &vvp_device_type));
if (!cp->cpg_defer_uptodate && !PageUptodate(vmpage)) {
rc = cl_page_is_under_lock(env, io, page);
- if (rc == -EBUSY) {
- cp->cpg_defer_uptodate = 1;
- cp->cpg_ra_used = 0;
- cl_page_list_add(queue, page);
- rc = 1;
- } else {
- cl_page_delete(env, page);
- rc = -ENOLCK;
- }
+ if (rc == -EBUSY) {
+ cp->cpg_defer_uptodate = 1;
+ cp->cpg_ra_used = 0;
+ cl_page_list_add(queue, page);
+ rc = 1;
+ } else {
+ cl_page_delete(env, page);
+ rc = -ENOLCK;
+ }
} else {
/* skip completed pages */
cl_page_unassume(env, io, page);
}
- lu_ref_del(&page->cp_reference, "ra", cfs_current());
- cl_page_put(env, page);
- RETURN(rc);
+ lu_ref_del(&page->cp_reference, "ra", current);
+ cl_page_put(env, page);
+ RETURN(rc);
}
/**
if (result == 0) {
page = cl_page_find(env, clob, vmpage->index,
vmpage, CPT_CACHEABLE);
- if (!IS_ERR(page)) {
- lu_ref_add(&page->cp_reference, "writepage",
- cfs_current());
- cl_page_assume(env, io, page);
+ if (!IS_ERR(page)) {
+ lu_ref_add(&page->cp_reference, "writepage",
+ current);
+ cl_page_assume(env, io, page);
result = cl_page_flush(env, io, page);
if (result != 0) {
/*
}
cl_page_disown(env, io, page);
unlocked = true;
- lu_ref_del(&page->cp_reference,
- "writepage", cfs_current());
- cl_page_put(env, page);
+ lu_ref_del(&page->cp_reference,
+ "writepage", current);
+ cl_page_put(env, page);
} else {
result = PTR_ERR(page);
}
struct ptlrpc_thread *thread = &sai->sai_agl_thread;
struct l_wait_info lwi = { 0 };
struct ll_inode_info *plli;
- cfs_task_t *task;
+ struct task_struct *task;
ENTRY;
CDEBUG(D_READA, "start agl thread: [pid %d] [parent %.*s]\n",
}
static struct cl_object *vvp_pgcache_obj(const struct lu_env *env,
- struct lu_device *dev,
- struct vvp_pgcache_id *id)
+ struct lu_device *dev,
+ struct vvp_pgcache_id *id)
{
- LASSERT(lu_device_is_cl(dev));
-
- id->vpi_depth &= 0xf;
- id->vpi_obj = NULL;
- id->vpi_curdep = id->vpi_depth;
-
- cfs_hash_hlist_for_each(dev->ld_site->ls_obj_hash, id->vpi_bucket,
- vvp_pgcache_obj_get, id);
- if (id->vpi_obj != NULL) {
- struct lu_object *lu_obj;
-
- lu_obj = lu_object_locate(id->vpi_obj, dev->ld_type);
- if (lu_obj != NULL) {
- lu_object_ref_add(lu_obj, "dump", cfs_current());
- return lu2cl(lu_obj);
- }
- lu_object_put(env, lu_object_top(id->vpi_obj));
-
- } else if (id->vpi_curdep > 0) {
- id->vpi_depth = 0xf;
- }
- return NULL;
+ LASSERT(lu_device_is_cl(dev));
+
+ id->vpi_depth &= 0xf;
+ id->vpi_obj = NULL;
+ id->vpi_curdep = id->vpi_depth;
+
+ cfs_hash_hlist_for_each(dev->ld_site->ls_obj_hash, id->vpi_bucket,
+ vvp_pgcache_obj_get, id);
+ if (id->vpi_obj != NULL) {
+ struct lu_object *lu_obj;
+
+ lu_obj = lu_object_locate(id->vpi_obj, dev->ld_type);
+ if (lu_obj != NULL) {
+ lu_object_ref_add(lu_obj, "dump", current);
+ return lu2cl(lu_obj);
+ }
+ lu_object_put(env, lu_object_top(id->vpi_obj));
+
+ } else if (id->vpi_curdep > 0) {
+ id->vpi_depth = 0xf;
+ }
+ return NULL;
}
static loff_t vvp_pgcache_find(const struct lu_env *env,
- struct lu_device *dev, loff_t pos)
+ struct lu_device *dev, loff_t pos)
{
- struct cl_object *clob;
- struct lu_site *site;
- struct vvp_pgcache_id id;
+ struct cl_object *clob;
+ struct lu_site *site;
+ struct vvp_pgcache_id id;
- site = dev->ld_site;
- vvp_pgcache_id_unpack(pos, &id);
+ site = dev->ld_site;
+ vvp_pgcache_id_unpack(pos, &id);
- while (1) {
- if (id.vpi_bucket >= CFS_HASH_NHLIST(site->ls_obj_hash))
- return ~0ULL;
- clob = vvp_pgcache_obj(env, dev, &id);
- if (clob != NULL) {
- struct cl_object_header *hdr;
- int nr;
- struct cl_page *pg;
+ while (1) {
+ if (id.vpi_bucket >= CFS_HASH_NHLIST(site->ls_obj_hash))
+ return ~0ULL;
+ clob = vvp_pgcache_obj(env, dev, &id);
+ if (clob != NULL) {
+ struct cl_object_header *hdr;
+ int nr;
+ struct cl_page *pg;
- /* got an object. Find next page. */
- hdr = cl_object_header(clob);
+ /* got an object. Find next page. */
+ hdr = cl_object_header(clob);
spin_lock(&hdr->coh_page_guard);
- nr = radix_tree_gang_lookup(&hdr->coh_tree,
- (void **)&pg,
- id.vpi_index, 1);
- if (nr > 0) {
- id.vpi_index = pg->cp_index;
- /* Cant support over 16T file */
- nr = !(pg->cp_index > 0xffffffff);
- }
+ nr = radix_tree_gang_lookup(&hdr->coh_tree,
+ (void **)&pg,
+ id.vpi_index, 1);
+ if (nr > 0) {
+ id.vpi_index = pg->cp_index;
+ /* Cant support over 16T file */
+ nr = !(pg->cp_index > 0xffffffff);
+ }
spin_unlock(&hdr->coh_page_guard);
- lu_object_ref_del(&clob->co_lu, "dump", cfs_current());
- cl_object_put(env, clob);
- if (nr > 0)
- return vvp_pgcache_id_pack(&id);
- }
- /* to the next object. */
- ++id.vpi_depth;
- id.vpi_depth &= 0xf;
- if (id.vpi_depth == 0 && ++id.vpi_bucket == 0)
- return ~0ULL;
- id.vpi_index = 0;
- }
+ lu_object_ref_del(&clob->co_lu, "dump", current);
+ cl_object_put(env, clob);
+ if (nr > 0)
+ return vvp_pgcache_id_pack(&id);
+ }
+ /* to the next object. */
+ ++id.vpi_depth;
+ id.vpi_depth &= 0xf;
+ if (id.vpi_depth == 0 && ++id.vpi_bucket == 0)
+ return ~0ULL;
+ id.vpi_index = 0;
+ }
}
#define seq_page_flag(seq, page, flag, has_flags) do { \
static int vvp_pgcache_show(struct seq_file *f, void *v)
{
- loff_t pos;
- struct ll_sb_info *sbi;
- struct cl_object *clob;
- struct lu_env *env;
- struct cl_page *page;
- struct cl_object_header *hdr;
- struct vvp_pgcache_id id;
- int refcheck;
- int result;
-
- env = cl_env_get(&refcheck);
- if (!IS_ERR(env)) {
- pos = *(loff_t *) v;
- vvp_pgcache_id_unpack(pos, &id);
- sbi = f->private;
- clob = vvp_pgcache_obj(env, &sbi->ll_cl->cd_lu_dev, &id);
- if (clob != NULL) {
- hdr = cl_object_header(clob);
+ loff_t pos;
+ struct ll_sb_info *sbi;
+ struct cl_object *clob;
+ struct lu_env *env;
+ struct cl_page *page;
+ struct cl_object_header *hdr;
+ struct vvp_pgcache_id id;
+ int refcheck;
+ int result;
+
+ env = cl_env_get(&refcheck);
+ if (!IS_ERR(env)) {
+ pos = *(loff_t *) v;
+ vvp_pgcache_id_unpack(pos, &id);
+ sbi = f->private;
+ clob = vvp_pgcache_obj(env, &sbi->ll_cl->cd_lu_dev, &id);
+ if (clob != NULL) {
+ hdr = cl_object_header(clob);
spin_lock(&hdr->coh_page_guard);
page = cl_page_lookup(hdr, id.vpi_index);
- seq_printf(f, "%8x@"DFID": ",
- id.vpi_index, PFID(&hdr->coh_lu.loh_fid));
- if (page != NULL) {
- vvp_pgcache_page_show(env, f, page);
- cl_page_put(env, page);
- } else
- seq_puts(f, "missing\n");
+ seq_printf(f, "%8x@"DFID": ",
+ id.vpi_index, PFID(&hdr->coh_lu.loh_fid));
+ if (page != NULL) {
+ vvp_pgcache_page_show(env, f, page);
+ cl_page_put(env, page);
+ } else
+ seq_puts(f, "missing\n");
spin_unlock(&hdr->coh_page_guard);
- lu_object_ref_del(&clob->co_lu, "dump", cfs_current());
- cl_object_put(env, clob);
- } else
- seq_printf(f, "%llx missing\n", pos);
- cl_env_put(env, &refcheck);
- result = 0;
- } else
- result = PTR_ERR(env);
- return result;
+ lu_object_ref_del(&clob->co_lu, "dump", current);
+ cl_object_put(env, clob);
+ } else
+ seq_printf(f, "%llx missing\n", pos);
+ cl_env_put(env, &refcheck);
+ result = 0;
+ } else
+ result = PTR_ERR(env);
+ return result;
}
static void *vvp_pgcache_start(struct seq_file *f, loff_t *pos)
* function corresponding to the current layout type.
*/
struct lov_object {
- struct cl_object lo_cl;
- /**
- * Serializes object operations with transitions between layout types.
- *
- * This semaphore is taken in shared mode by all object methods, and
- * is taken in exclusive mode when object type is changed.
- *
- * \see lov_object::lo_type
- */
+ struct cl_object lo_cl;
+ /**
+ * Serializes object operations with transitions between layout types.
+ *
+ * This semaphore is taken in shared mode by all object methods, and
+ * is taken in exclusive mode when object type is changed.
+ *
+ * \see lov_object::lo_type
+ */
struct rw_semaphore lo_type_guard;
/**
* Type of an object. Protected by lov_object::lo_type_guard.
*/
struct lov_stripe_md *lo_lsm;
- union lov_layout_state {
- struct lov_layout_raid0 {
- unsigned lo_nr;
+ union lov_layout_state {
+ struct lov_layout_raid0 {
+ unsigned lo_nr;
/**
* When this is true, lov_object::lo_attr contains
* valid up to date attributes for a top-level
* any sub-object change.
*/
int lo_attr_valid;
- /**
- * Array of sub-objects. Allocated when top-object is
- * created (lov_init_raid0()).
- *
- * Top-object is a strict master of its sub-objects:
- * it is created before them, and outlives its
- * children (this later is necessary so that basic
- * functions like cl_object_top() always
- * work). Top-object keeps a reference on every
- * sub-object.
- *
- * When top-object is destroyed (lov_delete_raid0())
- * it releases its reference to a sub-object and waits
- * until the latter is finally destroyed.
- */
- struct lovsub_object **lo_sub;
- /**
- * protect lo_sub
- */
+ /**
+ * Array of sub-objects. Allocated when top-object is
+ * created (lov_init_raid0()).
+ *
+ * Top-object is a strict master of its sub-objects:
+ * it is created before them, and outlives its
+ * children (this later is necessary so that basic
+ * functions like cl_object_top() always
+ * work). Top-object keeps a reference on every
+ * sub-object.
+ *
+ * When top-object is destroyed (lov_delete_raid0())
+ * it releases its reference to a sub-object and waits
+ * until the latter is finally destroyed.
+ */
+ struct lovsub_object **lo_sub;
+ /**
+ * protect lo_sub
+ */
spinlock_t lo_sub_lock;
- /**
- * Cached object attribute, built from sub-object
- * attributes.
- */
- struct cl_attr lo_attr;
- } raid0;
- struct lov_layout_state_empty {
- } empty;
+ /**
+ * Cached object attribute, built from sub-object
+ * attributes.
+ */
+ struct cl_attr lo_attr;
+ } raid0;
+ struct lov_layout_state_empty {
+ } empty;
struct lov_layout_state_released {
} released;
- } u;
- /**
- * Thread that acquired lov_object::lo_type_guard in an exclusive
- * mode.
- */
- cfs_task_t *lo_owner;
+ } u;
+ /**
+ * Thread that acquired lov_object::lo_type_guard in an exclusive
+ * mode.
+ */
+ struct task_struct *lo_owner;
};
/**
static inline void lov_conf_freeze(struct lov_object *lov)
{
- if (lov->lo_owner != cfs_current())
+ if (lov->lo_owner != current)
down_read(&lov->lo_type_guard);
}
static inline void lov_conf_thaw(struct lov_object *lov)
{
- if (lov->lo_owner != cfs_current())
+ if (lov->lo_owner != current)
up_read(&lov->lo_type_guard);
}
static void lov_conf_lock(struct lov_object *lov)
{
- LASSERT(lov->lo_owner != cfs_current());
+ LASSERT(lov->lo_owner != current);
down_write(&lov->lo_type_guard);
LASSERT(lov->lo_owner == NULL);
- lov->lo_owner = cfs_current();
+ lov->lo_owner = current;
}
static void lov_conf_unlock(struct lov_object *lov)
lsm = lsm_addref(lov->lo_lsm);
CDEBUG(D_INODE, "lsm %p addref %d/%d by %p.\n",
lsm, cfs_atomic_read(&lsm->lsm_refc),
- lov->lo_layout_invalid, cfs_current());
+ lov->lo_layout_invalid, current);
}
lov_conf_thaw(lov);
return lsm;
return;
CDEBUG(D_INODE, "lsm %p decref %d by %p.\n",
- lsm, cfs_atomic_read(&lsm->lsm_refc), cfs_current());
+ lsm, cfs_atomic_read(&lsm->lsm_refc), current);
lov_free_memmd(&lsm);
}
static void lovsub_parent_lock(const struct lu_env *env, struct lov_lock *lov)
{
- struct cl_lock *parent;
-
- ENTRY;
- parent = lov->lls_cl.cls_lock;
- cl_lock_get(parent);
- lu_ref_add(&parent->cll_reference, "lovsub-parent", cfs_current());
- cl_lock_mutex_get(env, parent);
- EXIT;
+ struct cl_lock *parent;
+
+ ENTRY;
+ parent = lov->lls_cl.cls_lock;
+ cl_lock_get(parent);
+ lu_ref_add(&parent->cll_reference, "lovsub-parent", current);
+ cl_lock_mutex_get(env, parent);
+ EXIT;
}
static void lovsub_parent_unlock(const struct lu_env *env, struct lov_lock *lov)
{
- struct cl_lock *parent;
-
- ENTRY;
- parent = lov->lls_cl.cls_lock;
- cl_lock_mutex_put(env, lov->lls_cl.cls_lock);
- lu_ref_del(&parent->cll_reference, "lovsub-parent", cfs_current());
- cl_lock_put(env, parent);
- EXIT;
+ struct cl_lock *parent;
+
+ ENTRY;
+ parent = lov->lls_cl.cls_lock;
+ cl_lock_mutex_put(env, lov->lls_cl.cls_lock);
+ lu_ref_del(&parent->cll_reference, "lovsub-parent", current);
+ cl_lock_put(env, parent);
+ EXIT;
}
/**
int mdt_ck_thread_start(struct mdt_device *mdt)
{
struct ptlrpc_thread *thread = &mdt->mdt_ck_thread;
- cfs_task_t *task;
+ struct task_struct *task;
init_waitqueue_head(&thread->t_ctl_waitq);
task = kthread_run(mdt_ck_thread_main, mdt, "mdt_ck");
/* Pass the ioc down */
static int mdt_ioc_child(struct lu_env *env, struct mdt_device *mdt,
- unsigned int cmd, int len, void *data)
+ unsigned int cmd, int len, void *data)
{
- struct lu_context ioctl_session;
- struct md_device *next = mdt->mdt_child;
- int rc;
- ENTRY;
+ struct lu_context ioctl_session;
+ struct md_device *next = mdt->mdt_child;
+ int rc;
+ ENTRY;
- rc = lu_context_init(&ioctl_session, LCT_SESSION);
- if (rc)
- RETURN(rc);
- ioctl_session.lc_thread = (struct ptlrpc_thread *)cfs_current();
- lu_context_enter(&ioctl_session);
- env->le_ses = &ioctl_session;
+ rc = lu_context_init(&ioctl_session, LCT_SESSION);
+ if (rc)
+ RETURN(rc);
+ ioctl_session.lc_thread = (struct ptlrpc_thread *)current;
+ lu_context_enter(&ioctl_session);
+ env->le_ses = &ioctl_session;
- LASSERT(next->md_ops->mdo_iocontrol);
- rc = next->md_ops->mdo_iocontrol(env, next, cmd, len, data);
+ LASSERT(next->md_ops->mdo_iocontrol);
+ rc = next->md_ops->mdo_iocontrol(env, next, cmd, len, data);
- lu_context_exit(&ioctl_session);
- lu_context_fini(&ioctl_session);
- RETURN(rc);
+ lu_context_exit(&ioctl_session);
+ lu_context_fini(&ioctl_session);
+ RETURN(rc);
}
static int mdt_ioc_version_get(struct mdt_thread_info *mti, void *karg)
rwlock_t mdt_sptlrpc_lock;
struct sptlrpc_rule_set mdt_sptlrpc_rset;
- /* capability keys */
- unsigned long mdt_capa_timeout;
- __u32 mdt_capa_alg;
- struct dt_object *mdt_ck_obj;
- unsigned long mdt_ck_timeout;
- unsigned long mdt_ck_expiry;
- cfs_timer_t mdt_ck_timer;
- struct ptlrpc_thread mdt_ck_thread;
- struct lustre_capa_key mdt_capa_keys[2];
+ /* capability keys */
+ unsigned long mdt_capa_timeout;
+ __u32 mdt_capa_alg;
+ struct dt_object *mdt_ck_obj;
+ unsigned long mdt_ck_timeout;
+ unsigned long mdt_ck_expiry;
+ struct timer_list mdt_ck_timer;
+ struct ptlrpc_thread mdt_ck_thread;
+ struct lustre_capa_key mdt_capa_keys[2];
unsigned int mdt_capa_conf:1,
mdt_som_conf:1,
/* Enable remote dir on non-MDT0 */
int mgs_ir_init_fs(const struct lu_env *env, struct mgs_device *mgs,
struct fs_db *fsdb)
{
- cfs_task_t *task;
+ struct task_struct *task;
if (!ir_timeout)
ir_timeout = OBD_IR_MGS_TIMEOUT;
*/
void cl_page_list_init(struct cl_page_list *plist)
{
- ENTRY;
- plist->pl_nr = 0;
- CFS_INIT_LIST_HEAD(&plist->pl_pages);
- plist->pl_owner = cfs_current();
- EXIT;
+ ENTRY;
+ plist->pl_nr = 0;
+ CFS_INIT_LIST_HEAD(&plist->pl_pages);
+ plist->pl_owner = current;
+ EXIT;
}
EXPORT_SYMBOL(cl_page_list_init);
*/
void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page)
{
- ENTRY;
- /* it would be better to check that page is owned by "current" io, but
- * it is not passed here. */
- LASSERT(page->cp_owner != NULL);
- LINVRNT(plist->pl_owner == cfs_current());
+ ENTRY;
+ /* it would be better to check that page is owned by "current" io, but
+ * it is not passed here. */
+ LASSERT(page->cp_owner != NULL);
+ LINVRNT(plist->pl_owner == current);
lockdep_off();
mutex_lock(&page->cp_mutex);
lockdep_on();
- LASSERT(cfs_list_empty(&page->cp_batch));
- cfs_list_add_tail(&page->cp_batch, &plist->pl_pages);
- ++plist->pl_nr;
+ LASSERT(cfs_list_empty(&page->cp_batch));
+ cfs_list_add_tail(&page->cp_batch, &plist->pl_pages);
+ ++plist->pl_nr;
lu_ref_add_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
cl_page_get(page);
EXIT;
* Removes a page from a page list.
*/
void cl_page_list_del(const struct lu_env *env,
- struct cl_page_list *plist, struct cl_page *page)
+ struct cl_page_list *plist, struct cl_page *page)
{
- LASSERT(plist->pl_nr > 0);
- LINVRNT(plist->pl_owner == cfs_current());
+ LASSERT(plist->pl_nr > 0);
+ LINVRNT(plist->pl_owner == current);
- ENTRY;
- cfs_list_del_init(&page->cp_batch);
+ ENTRY;
+ cfs_list_del_init(&page->cp_batch);
lockdep_off();
mutex_unlock(&page->cp_mutex);
lockdep_on();
- --plist->pl_nr;
+ --plist->pl_nr;
lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
cl_page_put(env, page);
EXIT;
* Moves a page from one page list to another.
*/
void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
- struct cl_page *page)
+ struct cl_page *page)
{
- LASSERT(src->pl_nr > 0);
- LINVRNT(dst->pl_owner == cfs_current());
- LINVRNT(src->pl_owner == cfs_current());
+ LASSERT(src->pl_nr > 0);
+ LINVRNT(dst->pl_owner == current);
+ LINVRNT(src->pl_owner == current);
- ENTRY;
- cfs_list_move_tail(&page->cp_batch, &dst->pl_pages);
- --src->pl_nr;
- ++dst->pl_nr;
+ ENTRY;
+ cfs_list_move_tail(&page->cp_batch, &dst->pl_pages);
+ --src->pl_nr;
+ ++dst->pl_nr;
lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue",
src, dst);
EXIT;
*/
void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head)
{
- struct cl_page *page;
- struct cl_page *tmp;
+ struct cl_page *page;
+ struct cl_page *tmp;
- LINVRNT(list->pl_owner == cfs_current());
- LINVRNT(head->pl_owner == cfs_current());
+ LINVRNT(list->pl_owner == current);
+ LINVRNT(head->pl_owner == current);
- ENTRY;
- cl_page_list_for_each_safe(page, tmp, list)
- cl_page_list_move(head, list, page);
- EXIT;
+ ENTRY;
+ cl_page_list_for_each_safe(page, tmp, list)
+ cl_page_list_move(head, list, page);
+ EXIT;
}
EXPORT_SYMBOL(cl_page_list_splice);
* Disowns pages in a queue.
*/
void cl_page_list_disown(const struct lu_env *env,
- struct cl_io *io, struct cl_page_list *plist)
+ struct cl_io *io, struct cl_page_list *plist)
{
- struct cl_page *page;
- struct cl_page *temp;
+ struct cl_page *page;
+ struct cl_page *temp;
- LINVRNT(plist->pl_owner == cfs_current());
+ LINVRNT(plist->pl_owner == current);
- ENTRY;
- cl_page_list_for_each_safe(page, temp, plist) {
- LASSERT(plist->pl_nr > 0);
+ ENTRY;
+ cl_page_list_for_each_safe(page, temp, plist) {
+ LASSERT(plist->pl_nr > 0);
- cfs_list_del_init(&page->cp_batch);
+ cfs_list_del_init(&page->cp_batch);
lockdep_off();
mutex_unlock(&page->cp_mutex);
lockdep_on();
- --plist->pl_nr;
- /*
- * cl_page_disown0 rather than usual cl_page_disown() is used,
- * because pages are possibly in CPS_FREEING state already due
- * to the call to cl_page_list_discard().
- */
- /*
- * XXX cl_page_disown0() will fail if page is not locked.
- */
- cl_page_disown0(env, io, page);
+ --plist->pl_nr;
+ /*
+ * cl_page_disown0 rather than usual cl_page_disown() is used,
+ * because pages are possibly in CPS_FREEING state already due
+ * to the call to cl_page_list_discard().
+ */
+ /*
+ * XXX cl_page_disown0() will fail if page is not locked.
+ */
+ cl_page_disown0(env, io, page);
lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue",
plist);
- cl_page_put(env, page);
- }
- EXIT;
+ cl_page_put(env, page);
+ }
+ EXIT;
}
EXPORT_SYMBOL(cl_page_list_disown);
*/
void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist)
{
- struct cl_page *page;
- struct cl_page *temp;
+ struct cl_page *page;
+ struct cl_page *temp;
- LINVRNT(plist->pl_owner == cfs_current());
+ LINVRNT(plist->pl_owner == current);
- ENTRY;
- cl_page_list_for_each_safe(page, temp, plist)
- cl_page_list_del(env, plist, page);
- LASSERT(plist->pl_nr == 0);
- EXIT;
+ ENTRY;
+ cl_page_list_for_each_safe(page, temp, plist)
+ cl_page_list_del(env, plist, page);
+ LASSERT(plist->pl_nr == 0);
+ EXIT;
}
EXPORT_SYMBOL(cl_page_list_fini);
* Owns all pages in a queue.
*/
int cl_page_list_own(const struct lu_env *env,
- struct cl_io *io, struct cl_page_list *plist)
+ struct cl_io *io, struct cl_page_list *plist)
{
- struct cl_page *page;
- struct cl_page *temp;
- pgoff_t index = 0;
- int result;
+ struct cl_page *page;
+ struct cl_page *temp;
+ pgoff_t index = 0;
+ int result;
- LINVRNT(plist->pl_owner == cfs_current());
+ LINVRNT(plist->pl_owner == current);
- ENTRY;
- result = 0;
- cl_page_list_for_each_safe(page, temp, plist) {
- LASSERT(index <= page->cp_index);
- index = page->cp_index;
- if (cl_page_own(env, io, page) == 0)
- result = result ?: page->cp_error;
- else
- cl_page_list_del(env, plist, page);
- }
- RETURN(result);
+ ENTRY;
+ result = 0;
+ cl_page_list_for_each_safe(page, temp, plist) {
+ LASSERT(index <= page->cp_index);
+ index = page->cp_index;
+ if (cl_page_own(env, io, page) == 0)
+ result = result ?: page->cp_error;
+ else
+ cl_page_list_del(env, plist, page);
+ }
+ RETURN(result);
}
EXPORT_SYMBOL(cl_page_list_own);
* Assumes all pages in a queue.
*/
void cl_page_list_assume(const struct lu_env *env,
- struct cl_io *io, struct cl_page_list *plist)
+ struct cl_io *io, struct cl_page_list *plist)
{
- struct cl_page *page;
+ struct cl_page *page;
- LINVRNT(plist->pl_owner == cfs_current());
+ LINVRNT(plist->pl_owner == current);
- cl_page_list_for_each(page, plist)
- cl_page_assume(env, io, page);
+ cl_page_list_for_each(page, plist)
+ cl_page_assume(env, io, page);
}
EXPORT_SYMBOL(cl_page_list_assume);
* Discards all pages in a queue.
*/
void cl_page_list_discard(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *plist)
+ struct cl_page_list *plist)
{
- struct cl_page *page;
+ struct cl_page *page;
- LINVRNT(plist->pl_owner == cfs_current());
- ENTRY;
- cl_page_list_for_each(page, plist)
- cl_page_discard(env, io, page);
- EXIT;
+ LINVRNT(plist->pl_owner == current);
+ ENTRY;
+ cl_page_list_for_each(page, plist)
+ cl_page_discard(env, io, page);
+ EXIT;
}
EXPORT_SYMBOL(cl_page_list_discard);
* Unmaps all pages in a queue from user virtual memory.
*/
int cl_page_list_unmap(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *plist)
+ struct cl_page_list *plist)
{
- struct cl_page *page;
- int result;
+ struct cl_page *page;
+ int result;
- LINVRNT(plist->pl_owner == cfs_current());
- ENTRY;
- result = 0;
- cl_page_list_for_each(page, plist) {
- result = cl_page_unmap(env, io, page);
- if (result != 0)
- break;
- }
- RETURN(result);
+ LINVRNT(plist->pl_owner == current);
+ ENTRY;
+ result = 0;
+ cl_page_list_for_each(page, plist) {
+ result = cl_page_unmap(env, io, page);
+ if (result != 0)
+ break;
+ }
+ RETURN(result);
}
EXPORT_SYMBOL(cl_page_list_unmap);
* \see CLS_INTRANSIT
*/
enum cl_lock_state cl_lock_intransit(const struct lu_env *env,
- struct cl_lock *lock)
+ struct cl_lock *lock)
{
- enum cl_lock_state state = lock->cll_state;
+ enum cl_lock_state state = lock->cll_state;
- LASSERT(cl_lock_is_mutexed(lock));
- LASSERT(state != CLS_INTRANSIT);
- LASSERTF(state >= CLS_ENQUEUED && state <= CLS_CACHED,
- "Malformed lock state %d.\n", state);
+ LASSERT(cl_lock_is_mutexed(lock));
+ LASSERT(state != CLS_INTRANSIT);
+ LASSERTF(state >= CLS_ENQUEUED && state <= CLS_CACHED,
+ "Malformed lock state %d.\n", state);
- cl_lock_state_set(env, lock, CLS_INTRANSIT);
- lock->cll_intransit_owner = cfs_current();
- cl_lock_hold_add(env, lock, "intransit", cfs_current());
- return state;
+ cl_lock_state_set(env, lock, CLS_INTRANSIT);
+ lock->cll_intransit_owner = current;
+ cl_lock_hold_add(env, lock, "intransit", current);
+ return state;
}
EXPORT_SYMBOL(cl_lock_intransit);
* Exit the intransit state and restore the lock state to the original state
*/
void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock,
- enum cl_lock_state state)
+ enum cl_lock_state state)
{
- LASSERT(cl_lock_is_mutexed(lock));
- LASSERT(lock->cll_state == CLS_INTRANSIT);
- LASSERT(state != CLS_INTRANSIT);
- LASSERT(lock->cll_intransit_owner == cfs_current());
+ LASSERT(cl_lock_is_mutexed(lock));
+ LASSERT(lock->cll_state == CLS_INTRANSIT);
+ LASSERT(state != CLS_INTRANSIT);
+ LASSERT(lock->cll_intransit_owner == current);
- lock->cll_intransit_owner = NULL;
- cl_lock_state_set(env, lock, state);
- cl_lock_unhold(env, lock, "intransit", cfs_current());
+ lock->cll_intransit_owner = NULL;
+ cl_lock_state_set(env, lock, state);
+ cl_lock_unhold(env, lock, "intransit", current);
}
EXPORT_SYMBOL(cl_lock_extransit);
*/
int cl_lock_is_intransit(struct cl_lock *lock)
{
- LASSERT(cl_lock_is_mutexed(lock));
- return lock->cll_state == CLS_INTRANSIT &&
- lock->cll_intransit_owner != cfs_current();
+ LASSERT(cl_lock_is_mutexed(lock));
+ return lock->cll_state == CLS_INTRANSIT &&
+ lock->cll_intransit_owner != current;
}
EXPORT_SYMBOL(cl_lock_is_intransit);
/**
*/
void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock)
{
- LINVRNT(cl_lock_invariant(env, lock));
-
- if (lock->cll_guarder == cfs_current()) {
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(lock->cll_depth > 0);
- } else {
- struct cl_object_header *hdr;
- struct cl_thread_info *info;
- int i;
+ LINVRNT(cl_lock_invariant(env, lock));
- LINVRNT(lock->cll_guarder != cfs_current());
- hdr = cl_object_header(lock->cll_descr.cld_obj);
- /*
- * Check that mutices are taken in the bottom-to-top order.
- */
- info = cl_env_info(env);
- for (i = 0; i < hdr->coh_nesting; ++i)
- LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
+ if (lock->cll_guarder == current) {
+ LINVRNT(cl_lock_is_mutexed(lock));
+ LINVRNT(lock->cll_depth > 0);
+ } else {
+ struct cl_object_header *hdr;
+ struct cl_thread_info *info;
+ int i;
+
+ LINVRNT(lock->cll_guarder != current);
+ hdr = cl_object_header(lock->cll_descr.cld_obj);
+ /*
+ * Check that mutices are taken in the bottom-to-top order.
+ */
+ info = cl_env_info(env);
+ for (i = 0; i < hdr->coh_nesting; ++i)
+ LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
- lock->cll_guarder = cfs_current();
- LINVRNT(lock->cll_depth == 0);
- }
- cl_lock_mutex_tail(env, lock);
+ lock->cll_guarder = current;
+ LINVRNT(lock->cll_depth == 0);
+ }
+ cl_lock_mutex_tail(env, lock);
}
EXPORT_SYMBOL(cl_lock_mutex_get);
*/
int cl_lock_mutex_try(const struct lu_env *env, struct cl_lock *lock)
{
- int result;
+ int result;
- LINVRNT(cl_lock_invariant_trusted(env, lock));
- ENTRY;
+ LINVRNT(cl_lock_invariant_trusted(env, lock));
+ ENTRY;
- result = 0;
- if (lock->cll_guarder == cfs_current()) {
- LINVRNT(lock->cll_depth > 0);
- cl_lock_mutex_tail(env, lock);
+ result = 0;
+ if (lock->cll_guarder == current) {
+ LINVRNT(lock->cll_depth > 0);
+ cl_lock_mutex_tail(env, lock);
} else if (mutex_trylock(&lock->cll_guard)) {
- LINVRNT(lock->cll_depth == 0);
- lock->cll_guarder = cfs_current();
- cl_lock_mutex_tail(env, lock);
- } else
- result = -EBUSY;
- RETURN(result);
+ LINVRNT(lock->cll_depth == 0);
+ lock->cll_guarder = current;
+ cl_lock_mutex_tail(env, lock);
+ } else
+ result = -EBUSY;
+ RETURN(result);
}
EXPORT_SYMBOL(cl_lock_mutex_try);
*/
void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock)
{
- struct cl_thread_counters *counters;
+ struct cl_thread_counters *counters;
- LINVRNT(cl_lock_invariant(env, lock));
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(lock->cll_guarder == cfs_current());
- LINVRNT(lock->cll_depth > 0);
+ LINVRNT(cl_lock_invariant(env, lock));
+ LINVRNT(cl_lock_is_mutexed(lock));
+ LINVRNT(lock->cll_guarder == current);
+ LINVRNT(lock->cll_depth > 0);
- counters = cl_lock_counters(env, lock);
- LINVRNT(counters->ctc_nr_locks_locked > 0);
+ counters = cl_lock_counters(env, lock);
+ LINVRNT(counters->ctc_nr_locks_locked > 0);
- cl_lock_trace(D_TRACE, env, "put mutex", lock);
- lu_ref_del(&counters->ctc_locks_locked, "cll_guard", lock);
- counters->ctc_nr_locks_locked--;
- if (--lock->cll_depth == 0) {
- lock->cll_guarder = NULL;
+ cl_lock_trace(D_TRACE, env, "put mutex", lock);
+ lu_ref_del(&counters->ctc_locks_locked, "cll_guard", lock);
+ counters->ctc_nr_locks_locked--;
+ if (--lock->cll_depth == 0) {
+ lock->cll_guarder = NULL;
mutex_unlock(&lock->cll_guard);
- }
+ }
}
EXPORT_SYMBOL(cl_lock_mutex_put);
*/
int cl_lock_is_mutexed(struct cl_lock *lock)
{
- return lock->cll_guarder == cfs_current();
+ return lock->cll_guarder == current;
}
EXPORT_SYMBOL(cl_lock_is_mutexed);
int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
{
wait_queue_t waiter;
- cfs_sigset_t blocked;
+ sigset_t blocked;
int result;
ENTRY;
*/
void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
{
- struct cl_object_header *head;
- struct cl_lock *lock;
+ struct cl_object_header *head;
+ struct cl_lock *lock;
- ENTRY;
- head = cl_object_header(obj);
- /*
- * If locks are destroyed without cancellation, all pages must be
- * already destroyed (as otherwise they will be left unprotected).
- */
- LASSERT(ergo(!cancel,
- head->coh_tree.rnode == NULL && head->coh_pages == 0));
+ ENTRY;
+ head = cl_object_header(obj);
+ /*
+ * If locks are destroyed without cancellation, all pages must be
+ * already destroyed (as otherwise they will be left unprotected).
+ */
+ LASSERT(ergo(!cancel,
+ head->coh_tree.rnode == NULL && head->coh_pages == 0));
spin_lock(&head->coh_lock_guard);
while (!cfs_list_empty(&head->coh_locks)) {
struct cl_lock, cll_linkage);
cl_lock_get_trust(lock);
spin_unlock(&head->coh_lock_guard);
- lu_ref_add(&lock->cll_reference, "prune", cfs_current());
+ lu_ref_add(&lock->cll_reference, "prune", current);
again:
- cl_lock_mutex_get(env, lock);
- if (lock->cll_state < CLS_FREEING) {
- LASSERT(lock->cll_users <= 1);
- if (unlikely(lock->cll_users == 1)) {
- struct l_wait_info lwi = { 0 };
-
- cl_lock_mutex_put(env, lock);
- l_wait_event(lock->cll_wq,
- lock->cll_users == 0,
- &lwi);
- goto again;
- }
-
- if (cancel)
- cl_lock_cancel(env, lock);
- cl_lock_delete(env, lock);
- }
- cl_lock_mutex_put(env, lock);
- lu_ref_del(&lock->cll_reference, "prune", cfs_current());
- cl_lock_put(env, lock);
+ cl_lock_mutex_get(env, lock);
+ if (lock->cll_state < CLS_FREEING) {
+ LASSERT(lock->cll_users <= 1);
+ if (unlikely(lock->cll_users == 1)) {
+ struct l_wait_info lwi = { 0 };
+
+ cl_lock_mutex_put(env, lock);
+ l_wait_event(lock->cll_wq,
+ lock->cll_users == 0,
+ &lwi);
+ goto again;
+ }
+
+ if (cancel)
+ cl_lock_cancel(env, lock);
+ cl_lock_delete(env, lock);
+ }
+ cl_lock_mutex_put(env, lock);
+ lu_ref_del(&lock->cll_reference, "prune", current);
+ cl_lock_put(env, lock);
spin_lock(&head->coh_lock_guard);
}
spin_unlock(&head->coh_lock_guard);
*/
cfs_hlist_node_t ce_node;
#endif
- /**
- * Owner for the current cl_env.
- *
- * If LL_TASK_CL_ENV is defined, this point to the owning cfs_current(),
- * only for debugging purpose ;
- * Otherwise hash is used, and this is the key for cfs_hash.
- * Now current thread pid is stored. Note using thread pointer would
- * lead to unbalanced hash because of its specific allocation locality
- * and could be varied for different platforms and OSes, even different
- * OS versions.
- */
- void *ce_owner;
+ /**
+ * Owner for the current cl_env.
+ *
+ * If LL_TASK_CL_ENV is defined, this point to the owning current,
+ * only for debugging purpose ;
+ * Otherwise hash is used, and this is the key for cfs_hash.
+ * Now current thread pid is stored. Note using thread pointer would
+ * lead to unbalanced hash because of its specific allocation locality
+ * and could be varied for different platforms and OSes, even different
+ * OS versions.
+ */
+ void *ce_owner;
/*
* Linkage into global list of all client environments. Used for
static inline struct cl_env *cl_env_fetch(void)
{
- struct cl_env *cle;
+ struct cl_env *cle;
- cle = cfs_hash_lookup(cl_env_hash, (void *) (long) cfs_current()->pid);
- LASSERT(ergo(cle, cle->ce_magic == &cl_env_init0));
- return cle;
+ cle = cfs_hash_lookup(cl_env_hash, (void *) (long) current->pid);
+ LASSERT(ergo(cle, cle->ce_magic == &cl_env_init0));
+ return cle;
}
static inline void cl_env_attach(struct cl_env *cle)
{
- if (cle) {
- int rc;
-
- LASSERT(cle->ce_owner == NULL);
- cle->ce_owner = (void *) (long) cfs_current()->pid;
- rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner,
- &cle->ce_node);
- LASSERT(rc == 0);
- }
+ if (cle) {
+ int rc;
+
+ LASSERT(cle->ce_owner == NULL);
+ cle->ce_owner = (void *) (long) current->pid;
+ rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner,
+ &cle->ce_node);
+ LASSERT(rc == 0);
+ }
}
static inline void cl_env_do_detach(struct cl_env *cle)
{
- void *cookie;
+ void *cookie;
- LASSERT(cle->ce_owner == (void *) (long) cfs_current()->pid);
- cookie = cfs_hash_del(cl_env_hash, cle->ce_owner,
- &cle->ce_node);
- LASSERT(cookie == cle);
- cle->ce_owner = NULL;
+ LASSERT(cle->ce_owner == (void *) (long) current->pid);
+ cookie = cfs_hash_del(cl_env_hash, cle->ce_owner,
+ &cle->ce_node);
+ LASSERT(cookie == cle);
+ cle->ce_owner = NULL;
}
static int cl_env_store_init(void) {
static inline struct cl_env *cl_env_fetch(void)
{
- struct cl_env *cle;
+ struct cl_env *cle;
- cle = cfs_current()->LL_TASK_CL_ENV;
- if (cle && cle->ce_magic != &cl_env_init0)
- cle = NULL;
- return cle;
+ cle = current->LL_TASK_CL_ENV;
+ if (cle && cle->ce_magic != &cl_env_init0)
+ cle = NULL;
+ return cle;
}
static inline void cl_env_attach(struct cl_env *cle)
{
- if (cle) {
- LASSERT(cle->ce_owner == NULL);
- cle->ce_owner = cfs_current();
- cle->ce_prev = cfs_current()->LL_TASK_CL_ENV;
- cfs_current()->LL_TASK_CL_ENV = cle;
- }
+ if (cle) {
+ LASSERT(cle->ce_owner == NULL);
+ cle->ce_owner = current;
+ cle->ce_prev = current->LL_TASK_CL_ENV;
+ current->LL_TASK_CL_ENV = cle;
+ }
}
static inline void cl_env_do_detach(struct cl_env *cle)
{
- LASSERT(cle->ce_owner == cfs_current());
- LASSERT(cfs_current()->LL_TASK_CL_ENV == cle);
- cfs_current()->LL_TASK_CL_ENV = cle->ce_prev;
- cle->ce_owner = NULL;
+ LASSERT(cle->ce_owner == current);
+ LASSERT(current->LL_TASK_CL_ENV == cle);
+ current->LL_TASK_CL_ENV = cle->ce_prev;
+ cle->ce_owner = NULL;
}
static int cl_env_store_init(void) { return 0; }
*/
PASSERT(env, page, slice != NULL);
- page = slice->cpl_page;
- /*
- * Can safely call cl_page_get_trust() under
- * radix-tree spin-lock.
- *
- * XXX not true, because @page is from object another
- * than @hdr and protected by different tree lock.
- */
- cl_page_get_trust(page);
- lu_ref_add_atomic(&page->cp_reference,
- "gang_lookup", cfs_current());
- pvec[j++] = page;
- }
+ page = slice->cpl_page;
+ /*
+ * Can safely call cl_page_get_trust() under
+ * radix-tree spin-lock.
+ *
+ * XXX not true, because @page is from object another
+ * than @hdr and protected by different tree lock.
+ */
+ cl_page_get_trust(page);
+ lu_ref_add_atomic(&page->cp_reference,
+ "gang_lookup", current);
+ pvec[j++] = page;
+ }
- /*
- * Here a delicate locking dance is performed. Current thread
- * holds a reference to a page, but has to own it before it
- * can be placed into queue. Owning implies waiting, so
- * radix-tree lock is to be released. After a wait one has to
- * check that pages weren't truncated (cl_page_own() returns
- * error in the latter case).
- */
+ /*
+ * Here a delicate locking dance is performed. Current thread
+ * holds a reference to a page, but has to own it before it
+ * can be placed into queue. Owning implies waiting, so
+ * radix-tree lock is to be released. After a wait one has to
+ * check that pages weren't truncated (cl_page_own() returns
+ * error in the latter case).
+ */
spin_unlock(&hdr->coh_page_guard);
- tree_lock = 0;
-
- for (i = 0; i < j; ++i) {
- page = pvec[i];
- if (res == CLP_GANG_OKAY)
- res = (*cb)(env, io, page, cbdata);
- lu_ref_del(&page->cp_reference,
- "gang_lookup", cfs_current());
- cl_page_put(env, page);
- }
+ tree_lock = 0;
+
+ for (i = 0; i < j; ++i) {
+ page = pvec[i];
+ if (res == CLP_GANG_OKAY)
+ res = (*cb)(env, io, page, cbdata);
+ lu_ref_del(&page->cp_reference,
+ "gang_lookup", current);
+ cl_page_put(env, page);
+ }
if (nr < CLT_PVEC_SIZE || end_of_region)
break;
int obd_zombie_impexp_init(void)
{
#ifdef __KERNEL__
- cfs_task_t *task;
+ struct task_struct *task;
#endif
CFS_INIT_LIST_HEAD(&obd_zombie_imports);
static int obd_proc_read_health(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
- int rc = 0, i;
- *eof = 1;
+ int rc = 0, i;
+ *eof = 1;
- if (libcfs_catastrophe)
- rc += snprintf(page + rc, count - rc, "LBUG\n");
+ if (libcfs_catastrophe)
+ rc += snprintf(page + rc, count - rc, "LBUG\n");
read_lock(&obd_dev_lock);
- for (i = 0; i < class_devno_max(); i++) {
- struct obd_device *obd;
+ for (i = 0; i < class_devno_max(); i++) {
+ struct obd_device *obd;
- obd = class_num2obd(i);
- if (obd == NULL || !obd->obd_attached || !obd->obd_set_up)
- continue;
+ obd = class_num2obd(i);
+ if (obd == NULL || !obd->obd_attached || !obd->obd_set_up)
+ continue;
- LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
- if (obd->obd_stopping)
- continue;
+ LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
+ if (obd->obd_stopping)
+ continue;
- class_incref(obd, __FUNCTION__, cfs_current());
+ class_incref(obd, __FUNCTION__, current);
read_unlock(&obd_dev_lock);
- if (obd_health_check(NULL, obd)) {
- rc += snprintf(page + rc, count - rc,
- "device %s reported unhealthy\n",
- obd->obd_name);
- }
- class_decref(obd, __FUNCTION__, cfs_current());
+ if (obd_health_check(NULL, obd)) {
+ rc += snprintf(page + rc, count - rc,
+ "device %s reported unhealthy\n",
+ obd->obd_name);
+ }
+ class_decref(obd, __FUNCTION__, current);
read_lock(&obd_dev_lock);
- }
+ }
read_unlock(&obd_dev_lock);
- if (rc == 0)
- return snprintf(page, count, "healthy\n");
+ if (rc == 0)
+ return snprintf(page, count, "healthy\n");
- rc += snprintf(page + rc, count - rc, "NOT HEALTHY\n");
- return rc;
+ rc += snprintf(page + rc, count - rc, "NOT HEALTHY\n");
+ return rc;
}
static int obd_proc_rd_jobid_var(char *page, char **start, off_t off,
rc = lu_context_init(&test_session, LCT_SESSION);
if (rc)
GOTO(cleanup_env, rc);
- test_session.lc_thread = (struct ptlrpc_thread *)cfs_current();
+ test_session.lc_thread = (struct ptlrpc_thread *)current;
lu_context_enter(&test_session);
env.le_ses = &test_session;
*cookie = el->el_cookie;
spin_unlock(&ec->ec_lock);
} else {
- cl_lock_release(env, lck, "ec enqueue", cfs_current());
+ cl_lock_release(env, lck, "ec enqueue", current);
}
}
RETURN(rc);
cfs_list_del_init(&oap->oap_pending_item);
cl_page_get(page);
- lu_ref_add(&page->cp_reference, "truncate", cfs_current());
+ lu_ref_add(&page->cp_reference, "truncate", current);
if (cl_page_own(env, io, page) == 0) {
cl_page_unmap(env, io, page);
LASSERT(0);
}
- lu_ref_del(&page->cp_reference, "truncate", cfs_current());
+ lu_ref_del(&page->cp_reference, "truncate", current);
cl_page_put(env, page);
--ext->oe_nr_pages;
RETURN(0);
cfs_list_for_each_entry(tmp, rpclist, oe_link) {
- EASSERT(tmp->oe_owner == cfs_current(), tmp);
+ EASSERT(tmp->oe_owner == current, tmp);
#if 0
if (overlapped(tmp, ext)) {
OSC_EXTENT_DUMP(D_ERROR, tmp, "overlapped %p.\n", ext);
*pc += ext->oe_nr_pages;
cfs_list_move_tail(&ext->oe_link, rpclist);
- ext->oe_owner = cfs_current();
+ ext->oe_owner = current;
RETURN(1);
}
cl_object_get(obj);
client_obd_list_unlock(&cli->cl_loi_list_lock);
lu_object_ref_add_at(&obj->co_lu, &link, "check",
- cfs_current());
+ current);
/* attempt some read/write balancing by alternating between
* reads and writes in an object. The makes_rpc checks here
osc_list_maint(cli, osc);
lu_object_ref_del_at(&obj->co_lu, &link, "check",
- cfs_current());
+ current);
cl_object_put(env, obj);
client_obd_list_lock(&cli->cl_loi_list_lock);
EASSERT(ext->oe_start >= start &&
ext->oe_max_end <= end, ext);
osc_extent_state_set(ext, OES_LOCKING);
- ext->oe_owner = cfs_current();
+ ext->oe_owner = current;
cfs_list_move_tail(&ext->oe_link,
&discard_list);
osc_update_pending(obj, OBD_BRW_WRITE,
*/
cfs_list_t ops_inflight;
};
- /**
- * Thread that submitted this page for transfer. For debugging.
- */
- cfs_task_t *ops_submitter;
- /**
- * Submit time - the time when the page is starting RPC. For debugging.
- */
- cfs_time_t ops_submit_time;
+ /**
+ * Thread that submitted this page for transfer. For debugging.
+ */
+ struct task_struct *ops_submitter;
+ /**
+ * Submit time - the time when the page is starting RPC. For debugging.
+ */
+ cfs_time_t ops_submit_time;
/**
* A lock of which we hold a reference covers this page. Only used by
/** lock covering this extent */
struct cl_lock *oe_osclock;
/** terminator of this extent. Must be true if this extent is in IO. */
- cfs_task_t *oe_owner;
+ struct task_struct *oe_owner;
/** return value of writeback. If somebody is waiting for this extent,
* this value can be known by outside world. */
int oe_rc;
* guarantees that all osc references on dlmlock were
* released. osc_dlm_blocking_ast0() relies on that.
*/
- if (lock->cll_state < CLS_FREEING || olck->ols_has_ref) {
- cl_lock_get_trust(lock);
- lu_ref_add_atomic(&lock->cll_reference,
- "ast", cfs_current());
- } else
- olck = NULL;
+ if (lock->cll_state < CLS_FREEING || olck->ols_has_ref) {
+ cl_lock_get_trust(lock);
+ lu_ref_add_atomic(&lock->cll_reference,
+ "ast", current);
+ } else
+ olck = NULL;
}
spin_unlock(&osc_ast_guard);
unlock_res_and_lock(dlm_lock);
static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck)
{
- struct cl_lock *lock;
+ struct cl_lock *lock;
- lock = olck->ols_cl.cls_lock;
- lu_ref_del(&lock->cll_reference, "ast", cfs_current());
- cl_lock_put(env, lock);
+ lock = olck->ols_cl.cls_lock;
+ lu_ref_del(&lock->cll_reference, "ast", current);
+ cl_lock_put(env, lock);
}
/**
spin_lock(&obj->oo_seatbelt);
cfs_list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
- opg->ops_submitter = cfs_current();
+ opg->ops_submitter = current;
spin_unlock(&obj->oo_seatbelt);
}
*/
struct obd_statfs opd_statfs;
cfs_time_t opd_statfs_fresh_till;
- cfs_timer_t opd_statfs_timer;
+ struct timer_list opd_statfs_timer;
int opd_statfs_update_in_progress;
/* how often to update statfs data */
int opd_statfs_maxage;
int osp_init_precreate(struct osp_device *d)
{
struct l_wait_info lwi = { 0 };
- cfs_task_t *task;
+ struct task_struct *task;
ENTRY;
opd.opd_extent.end = nb->offset + nb->len - 1;
opd.opd_timeout = prolong_timeout(req);
- DEBUG_REQ(D_RPCTRACE, req,
- "%s %s: refresh rw locks: " LPU64"/"LPU64" ("LPU64"->"LPU64")\n",
- obd->obd_name, cfs_current()->comm,
- opd.opd_resid.name[0], opd.opd_resid.name[1],
- opd.opd_extent.start, opd.opd_extent.end);
+ DEBUG_REQ(D_RPCTRACE, req,
+ "%s %s: refresh rw locks: " LPU64"/"LPU64" ("LPU64"->"LPU64")\n",
+ obd->obd_name, current->comm,
+ opd.opd_resid.name[0], opd.opd_resid.name[1],
+ opd.opd_extent.start, opd.opd_extent.end);
ost_prolong_locks(&opd);
/* LU-769 - if we ignored the signal because it was already
* pending when we started, we need to handle it now or we risk
* it being ignored forever */
- if (rc == -ETIMEDOUT && !lwi.lwi_allow_intr &&
- cfs_signal_pending()) {
- cfs_sigset_t blocked_sigs =
- cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
-
- /* In fact we only interrupt for the "fatal" signals
- * like SIGINT or SIGKILL. We still ignore less
- * important signals since ptlrpc set is not easily
- * reentrant from userspace again */
- if (cfs_signal_pending())
- ptlrpc_interrupted_set(set);
+ if (rc == -ETIMEDOUT && !lwi.lwi_allow_intr &&
+ cfs_signal_pending()) {
+ sigset_t blocked_sigs =
+ cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
+
+ /* In fact we only interrupt for the "fatal" signals
+ * like SIGINT or SIGKILL. We still ignore less
+ * important signals since ptlrpc set is not easily
+ * reentrant from userspace again */
+ if (cfs_signal_pending())
+ ptlrpc_interrupted_set(set);
cfs_restore_sigs(blocked_sigs);
- }
+ }
LASSERT(rc == 0 || rc == -EINTR || rc == -ETIMEDOUT);
* the session keyring is created upon upcall, and don't change all
* the way until upcall finished, so rcu lock is not needed here.
*/
- LASSERT(key_tgcred(cfs_current())->session_keyring);
+ LASSERT(key_tgcred(current)->session_keyring);
lockdep_off();
- rc = key_link(key_tgcred(cfs_current())->session_keyring, key);
+ rc = key_link(key_tgcred(current)->session_keyring, key);
lockdep_on();
if (unlikely(rc)) {
CERROR("failed to link key %08x to keyring %08x: %d\n",
key->serial,
- key_tgcred(cfs_current())->session_keyring->serial, rc);
+ key_tgcred(current)->session_keyring->serial, rc);
RETURN(rc);
}
#ifdef __KERNEL__
{
- cfs_task_t *task;
+ struct task_struct *task;
/* bug 17802: XXX client_disconnect_export vs connect request
* race. if client will evicted at this time, we start
* invalidate thread without reference to import and import can
goto out;
}
tmpbuf = cfs_firststr(kbuf, min_t(unsigned long, BUFLEN - 1, count));
- class_incref(obd, __FUNCTION__, cfs_current());
+ class_incref(obd, __FUNCTION__, current);
if (strncmp(tmpbuf, "nid:", 4) == 0)
obd_export_evict_by_nid(obd, tmpbuf + 4);
else
obd_export_evict_by_uuid(obd, tmpbuf);
- class_decref(obd, __FUNCTION__, cfs_current());
+ class_decref(obd, __FUNCTION__, current);
out:
OBD_FREE(kbuf, BUFLEN);
void ping_evictor_start(void)
{
- cfs_task_t *task;
+ struct task_struct *task;
if (++pet_refcount > 1)
return;
if (++index >= num_possible_cpus())
index = 0;
}
- set_cpus_allowed_ptr(cfs_current(),
+ set_cpus_allowed_ptr(current,
cpumask_of_node(cpu_to_node(index)));
}
}
GOTO(out_set, rc);
{
- cfs_task_t *task;
+ struct task_struct *task;
if (index >= 0) {
rc = ptlrpcd_bind(index, max);
if (rc < 0)
int sptlrpc_gc_init(void)
{
struct l_wait_info lwi = { 0 };
- cfs_task_t *task;
+ struct task_struct *task;
mutex_init(&sec_gc_mutex);
spin_lock_init(&sec_gc_list_lock);
{
struct ptlrpc_thread *thread = &qmt->qmt_reba_thread;
struct l_wait_info lwi = { 0 };
- cfs_task_t *task;
+ struct task_struct *task;
ENTRY;
task = kthread_run(qmt_reba_thread, (void *)qmt,
{
struct ptlrpc_thread *thread = &qsd->qsd_upd_thread;
struct l_wait_info lwi = { 0 };
- cfs_task_t *task;
+ struct task_struct *task;
ENTRY;
task = kthread_run(qsd_upd_thread, (void *)qsd,