Whamcloud - gitweb
LU-1346 libcfs: cleanup libcfs primitive (linux-prim.h) 56/6956/8
authorPeng Tao <tao.peng@emc.com>
Wed, 11 Sep 2013 17:12:11 +0000 (01:12 +0800)
committerOleg Drokin <oleg.drokin@intel.com>
Thu, 26 Sep 2013 04:39:28 +0000 (04:39 +0000)
Affected primitives:

cfs_kthread_run, CFS_DAEMON_FLAGS, cfs_task_t, cfs_current,
cfs_task_lock, cfs_task_unlock, CFS_DECL_JOURNAL_DATA,
CFS_PUSH_JOURNAL, CFS_POP_JOURNAL, cfs_sigset_t,
cfs_timer_t, CFS_MAX_SCHEDULE_TIMEOUT

Signed-off-by: Peng Tao <tao.peng@emc.com>
Signed-off-by: Liu Xuezhao <xuezhao.liu@emc.com>
Signed-off-by: James Simmons <uja.ornl@gmail.com>
Change-Id: Iae0e9a9ccaa39fda71175205e9af451e65b620d1
Reviewed-on: http://review.whamcloud.com/6956
Tested-by: Hudson
Reviewed-by: Keith Mannthey <keith.mannthey@intel.com>
Tested-by: Maloo <whamcloud.maloo@gmail.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
84 files changed:
contrib/scripts/libcfs_cleanup.sed
libcfs/include/libcfs/darwin/darwin-prim.h
libcfs/include/libcfs/libcfs.h
libcfs/include/libcfs/libcfs_prim.h
libcfs/include/libcfs/libcfs_private.h
libcfs/include/libcfs/linux/linux-prim.h
libcfs/include/libcfs/user-prim.h
libcfs/include/libcfs/winnt/winnt-prim.h
libcfs/libcfs/darwin/darwin-debug.c
libcfs/libcfs/darwin/darwin-prim.c
libcfs/libcfs/debug.c
libcfs/libcfs/linux/linux-cpu.c
libcfs/libcfs/linux/linux-debug.c
libcfs/libcfs/linux/linux-prim.c
libcfs/libcfs/user-prim.c
libcfs/libcfs/watchdog.c
libcfs/libcfs/winnt/winnt-curproc.c
libcfs/libcfs/winnt/winnt-debug.c
libcfs/libcfs/winnt/winnt-lock.c
libcfs/libcfs/winnt/winnt-mem.c
libcfs/libcfs/winnt/winnt-prim.c
libcfs/libcfs/winnt/winnt-sync.c
libcfs/libcfs/workitem.c
lnet/klnds/o2iblnd/o2iblnd_cb.c
lnet/klnds/ptllnd/ptllnd_cb.c
lnet/klnds/qswlnd/qswlnd_cb.c
lnet/klnds/ralnd/ralnd_cb.c
lnet/klnds/socklnd/socklnd_cb.c
lnet/lnet/api-ni.c
lnet/lnet/lib-msg.c
lnet/selftest/timer.c
lustre/include/cl_object.h
lustre/include/liblustre.h
lustre/include/lu_ref.h
lustre/include/lustre_dlm.h
lustre/include/lustre_lib.h
lustre/include/lustre_net.h
lustre/include/obd.h
lustre/lclient/glimpse.c
lustre/lclient/lcommon_misc.c
lustre/ldlm/ldlm_lib.c
lustre/ldlm/ldlm_lock.c
lustre/ldlm/ldlm_lockd.c
lustre/ldlm/ldlm_pool.c
lustre/ldlm/ldlm_request.c
lustre/liblustre/lutil.c
lustre/llite/llite_capa.c
lustre/llite/llite_close.c
lustre/llite/llite_internal.h
lustre/llite/llite_mmap.c
lustre/llite/rw.c
lustre/llite/statahead.c
lustre/llite/vvp_dev.c
lustre/lov/lov_cl_internal.h
lustre/lov/lov_object.c
lustre/lov/lovsub_lock.c
lustre/mdt/mdt_capa.c
lustre/mdt/mdt_handler.c
lustre/mdt/mdt_internal.h
lustre/mgs/mgs_nids.c
lustre/obdclass/cl_io.c
lustre/obdclass/cl_lock.c
lustre/obdclass/cl_object.c
lustre/obdclass/cl_page.c
lustre/obdclass/genops.c
lustre/obdclass/linux/linux-module.c
lustre/obdclass/llog_test.c
lustre/obdecho/echo_client.c
lustre/osc/osc_cache.c
lustre/osc/osc_cl_internal.h
lustre/osc/osc_lock.c
lustre/osc/osc_page.c
lustre/osp/osp_internal.h
lustre/osp/osp_precreate.c
lustre/ost/ost_handler.c
lustre/ptlrpc/client.c
lustre/ptlrpc/gss/gss_keyring.c
lustre/ptlrpc/import.c
lustre/ptlrpc/lproc_ptlrpc.c
lustre/ptlrpc/pinger.c
lustre/ptlrpc/ptlrpcd.c
lustre/ptlrpc/sec_gc.c
lustre/quota/qmt_lock.c
lustre/quota/qsd_writeback.c

index 5f6af2b..9f0045c 100644 (file)
@@ -543,3 +543,28 @@ s/\bcfs_waitq_add_exclusive_head\b/add_wait_queue_exclusive_head/g
 s/\bcfs_schedule_timeout_and_set_state\b/schedule_timeout_and_set_state/g
 s/\bCFS_MAX_SCHEDULE_TIMEOUT\b/MAX_SCHEDULE_TIMEOUT/g
 s/\bcfs_task_state_t\b/long/g
+
+# Kernel thread
+s/\bcfs_kthread_run\b/kthread_run/g
+/#[ \t]*define[ \t]*\bkthread_run\b.*\bkthread_run\b/d
+#s/\bcfs_thread_t\b/thread_t/g
+s/\bCFS_DAEMON_FLAGS\b/DAEMON_FLAGS/g
+#s/\bcfs_create_thread\b/create_thread/g
+# Task struct
+s/\bcfs_task_t\b/struct task_struct/g
+s/\bcfs_current()/current/g
+/#[ \t]*define[ \t]*\bcurrent\b[ \t]*\bcurrent\b/d
+s/\bcfs_task_lock\b/task_lock/g
+/#[ \t]*define[ \t]*\btask_lock\b *( *\w* *)[ \t]*\btask_lock\b *( *\w* *)/d
+s/\bcfs_task_unlock\b/task_unlock/g
+/#[ \t]*define[ \t]*\btask_unlock\b *( *\w* *)[ \t]*\btask_unlock\b *( *\w* *)/d
+s/\bCFS_DECL_JOURNAL_DATA\b/DECL_JOURNAL_DATA/g
+s/\bCFS_PUSH_JOURNAL\b/PUSH_JOURNAL/g
+s/\bCFS_POP_JOURNAL\b/POP_JOURNAL/g
+# Signal
+s/\bcfs_sigset_t\b/sigset_t/g
+/typedef[ \t]*\bsigset_t\b[ \t]*\bsigset_t\b/d
+# Timer
+s/\bcfs_timer_t\b/struct timer_list/g
+s/\bCFS_MAX_SCHEDULE_TIMEOUT\b/MAX_SCHEDULE_TIMEOUT/g
+/#[ \t]*define[ \t]*\bMAX_SCHEDULE_TIMEOUT\b[ \t]*\bMAX_SCHEDULE_TIMEOUT\b/d
index d3c5410..8385acc 100644 (file)
@@ -156,25 +156,25 @@ extern void             *get_bsdtask_info(task_t);
 
 #ifdef __DARWIN8__
 
-typedef struct {}              cfs_task_t;
-#define cfs_current()          ((cfs_task_t *)current_thread())
+typedef struct task_struct {};
+#define current                ((struct task_struct *)current_thread())
 #else  /* !__DARWIN8__ */
 
-typedef struct uthread         cfs_task_t;
+#define task_struct uthread
 
 #define current_uthread()       ((struct uthread *)get_bsdthread_info(current_act()))
-#define cfs_current()          current_uthread()
+#define current                current_uthread()
 
 #endif /* !__DARWIN8__ */
 
-#define cfs_task_lock(t)       do {;} while (0)
-#define cfs_task_unlock(t)     do {;} while (0)
+#define task_lock(t)   do {;} while (0)
+#define task_unlock(t) do {;} while (0)
 
 #define set_current_state(s)   do {;} while (0)
 
-#define CFS_DECL_JOURNAL_DATA  
-#define CFS_PUSH_JOURNAL       do {;} while(0)
-#define CFS_POP_JOURNAL                do {;} while(0)
+#define DECL_JOURNAL_DATA
+#define PUSH_JOURNAL   do {;} while(0)
+#define POP_JOURNAL            do {;} while(0)
 
 /*
  * Kernel thread:
@@ -204,7 +204,7 @@ extern task_t       kernel_task;
 
 #define CLONE_SIGNAL    (CLONE_SIGHAND | CLONE_THREAD)
 
-extern cfs_task_t kthread_run(cfs_thread_t func, void *arg,
+extern struct task_struct kthread_run(cfs_thread_t func, void *arg,
                              const char namefmt[], ...);
 
 /*
@@ -365,23 +365,22 @@ static inline void sleep_on(wait_queue_head_t *waitq)
 /*
  * Signal
  */
-typedef sigset_t       cfs_sigset_t;
 
 /*
  * Timer
  */
-typedef struct cfs_timer {
+struct timer_list {
        struct ktimer t;
-} cfs_timer_t;
+};
 
 #define cfs_init_timer(t)      do {} while(0)
-void cfs_timer_init(struct cfs_timer *t, void (*func)(unsigned long), void *arg);
-void cfs_timer_done(struct cfs_timer *t);
-void cfs_timer_arm(struct cfs_timer *t, cfs_time_t deadline);
-void cfs_timer_disarm(struct cfs_timer *t);
-int  cfs_timer_is_armed(struct cfs_timer *t);
+void cfs_timer_init(struct timer_list *t, void (*func)(unsigned long), void *arg);
+void cfs_timer_done(struct timer_list *t);
+void cfs_timer_arm(struct timer_list *t, cfs_time_t deadline);
+void cfs_timer_disarm(struct timer_list *t);
+int  cfs_timer_is_armed(struct timer_list *t);
 
-cfs_time_t cfs_timer_deadline(struct cfs_timer *t);
+cfs_time_t cfs_timer_deadline(struct timer_list *t);
 
 /*
  * Ioctl
index 9bce8c8..8136fde 100644 (file)
@@ -194,11 +194,11 @@ void cfs_enter_debugger(void);
  * Defined by platform
  */
 int unshare_fs_struct(void);
-cfs_sigset_t cfs_get_blocked_sigs(void);
-cfs_sigset_t cfs_block_allsigs(void);
-cfs_sigset_t cfs_block_sigs(unsigned long sigs);
-cfs_sigset_t cfs_block_sigsinv(unsigned long sigs);
-void cfs_restore_sigs(cfs_sigset_t);
+sigset_t cfs_get_blocked_sigs(void);
+sigset_t cfs_block_allsigs(void);
+sigset_t cfs_block_sigs(unsigned long sigs);
+sigset_t cfs_block_sigsinv(unsigned long sigs);
+void cfs_restore_sigs(sigset_t);
 int cfs_signal_pending(void);
 void cfs_clear_sigpending(void);
 
index 49254df..74f36f5 100644 (file)
  */
 typedef  void (cfs_timer_func_t)(ulong_ptr_t);
 
-void cfs_init_timer(cfs_timer_t *t);
-void cfs_timer_init(cfs_timer_t *t, cfs_timer_func_t *func, void *arg);
-void cfs_timer_done(cfs_timer_t *t);
-void cfs_timer_arm(cfs_timer_t *t, cfs_time_t deadline);
-void cfs_timer_disarm(cfs_timer_t *t);
-int  cfs_timer_is_armed(cfs_timer_t *t);
-cfs_time_t cfs_timer_deadline(cfs_timer_t *t);
+void cfs_init_timer(struct timer_list *t);
+void cfs_timer_init(struct timer_list *t, cfs_timer_func_t *func, void *arg);
+void cfs_timer_done(struct timer_list *t);
+void cfs_timer_arm(struct timer_list *t, cfs_time_t deadline);
+void cfs_timer_disarm(struct timer_list *t);
+int  cfs_timer_is_armed(struct timer_list *t);
+cfs_time_t cfs_timer_deadline(struct timer_list *t);
 
 /*
  * Memory
index bf75401..2c4d121 100644 (file)
@@ -251,7 +251,7 @@ do {                                                                        \
 #define ntohs(x) ___ntohs(x)
 #endif
 
-void libcfs_debug_dumpstack(cfs_task_t *tsk);
+void libcfs_debug_dumpstack(struct task_struct *tsk);
 void libcfs_run_upcall(char **argv);
 void libcfs_run_lbug_upcall(struct libcfs_debug_msg_data *);
 void libcfs_debug_dumplog(void);
index 0d7047a..e099dc3 100644 (file)
@@ -160,19 +160,12 @@ static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
        schedule_timeout(ticks);                                        \
 }
 
-/*
- * Task struct
- */
-typedef struct task_struct              cfs_task_t;
-#define cfs_current()                   current
-#define cfs_task_lock(t)                task_lock(t)
-#define cfs_task_unlock(t)              task_unlock(t)
-#define CFS_DECL_JOURNAL_DATA           void *journal_info
-#define CFS_PUSH_JOURNAL                do {    \
+#define DECL_JOURNAL_DATA           void *journal_info
+#define PUSH_JOURNAL                do {    \
         journal_info = current->journal_info;   \
         current->journal_info = NULL;           \
         } while(0)
-#define CFS_POP_JOURNAL                 do {    \
+#define POP_JOURNAL                 do {    \
         current->journal_info = journal_info;   \
         } while(0)
 
@@ -182,16 +175,6 @@ typedef struct task_struct              cfs_task_t;
         module_exit(fini)
 
 /*
- * Signal
- */
-typedef sigset_t                        cfs_sigset_t;
-
-/*
- * Timer
- */
-typedef struct timer_list cfs_timer_t;
-
-/*
  * atomic
  */
 
index f8fff19..4d81e72 100644 (file)
@@ -125,18 +125,17 @@ typedef int (cfs_write_proc_t)(struct file *file, const char *buffer,
 /*
  * Signal
  */
-typedef sigset_t                        cfs_sigset_t;
 
 /*
  * Timer
  */
 
-typedef struct {
-        cfs_list_t tl_list;
-        void (*function)(ulong_ptr_t unused);
-        ulong_ptr_t data;
-        long expires;
-} cfs_timer_t;
+struct timer_list {
+       cfs_list_t tl_list;
+       void (*function)(ulong_ptr_t unused);
+       ulong_ptr_t data;
+       long expires;
+};
 
 
 #define cfs_in_interrupt()    (0)
@@ -156,10 +155,8 @@ static inline int misc_deregister(struct miscdevice *foo)
 
 #define cfs_sigfillset(l)               do {} while (0)
 #define cfs_recalc_sigpending(l)        do {} while (0)
-/* Fine, crash, but stop giving me compile warnings */
-#define cfs_kthread_run(fn,d,fmt,...)   LBUG()
 
-#define CFS_DAEMON_FLAGS                0
+#define DAEMON_FLAGS                0
 
 #define L1_CACHE_ALIGN(x)              (x)
 
index cc8a3dc..cc07c44 100644 (file)
@@ -545,7 +545,7 @@ struct vfsmount;
 
 #define NGROUPS 1
 #define CFS_CURPROC_COMM_MAX (16)
-typedef struct task_sruct{
+struct task_struct{
     mode_t                umask;
     sigset_t              blocked;
 
@@ -566,13 +566,13 @@ typedef struct task_sruct{
     char                  comm[CFS_CURPROC_COMM_MAX];
     void                 *journal_info;
     struct vfsmount      *fs;
-}  cfs_task_t;
+};
 
-static inline void task_lock(cfs_task_t *t)
+static inline void task_lock(struct task_struct *t)
 {
 }
 
-static inline void task_unlock(cfs_task_t *t)
+static inline void task_unlock(struct task_struct *t)
 {
 }
 
@@ -613,12 +613,11 @@ typedef struct _TASK_SLOT {
 
     KIRQL           irql;       /* irql for rwlock ... */
 
-    cfs_task_t      task;       /* linux task part */
+    struct task_struct      task;       /* linux task part */
 
 } TASK_SLOT, *PTASK_SLOT;
 
 
-#define current                      cfs_current()
 #define set_current_state(s)     do {;} while (0)
 
 #define wait_event(wq, condition)                           \
@@ -685,13 +684,13 @@ do {                                                            \
 
 int     init_task_manager();
 void    cleanup_task_manager();
-cfs_task_t * cfs_current();
-int     wake_up_process(cfs_task_t * task);
+struct task_struct * current;
+int     wake_up_process(struct task_struct * task);
 void sleep_on(wait_queue_head_t *waitq);
 #define cfs_might_sleep() do {} while(0)
-#define CFS_DECL_JOURNAL_DATA  
-#define CFS_PUSH_JOURNAL           do {;} while(0)
-#define CFS_POP_JOURNAL                    do {;} while(0)
+#define DECL_JOURNAL_DATA
+#define PUSH_JOURNAL       do {;} while(0)
+#define POP_JOURNAL                do {;} while(0)
 
 
 /* module related definitions */
@@ -778,7 +777,7 @@ static inline u32 cfs_hash_long(u32 val, unsigned int bits)
 #define CFS_TIMER_FLAG_INITED   0x00000001  // Initialized already
 #define CFS_TIMER_FLAG_TIMERED  0x00000002  // KeSetTimer is called
 
-typedef struct cfs_timer {
+struct timer_list {
 
     KSPIN_LOCK      Lock;
 
@@ -792,7 +791,7 @@ typedef struct cfs_timer {
     void (*proc)(ulong_ptr_t);
     void *          arg;
 
-} cfs_timer_t;
+};
 
 /*
  *  libcfs globals initialization/cleanup
@@ -1081,7 +1080,6 @@ static inline void module_put(struct module *module)
  *  sigset_t routines 
  */
 
-typedef sigset_t cfs_sigset_t;
 #define sigaddset(what,sig) (*(what) |= (1<<(sig)), 0)
 #define sigdelset(what,sig) (*(what) &= ~(1<<(sig)), 0)
 #define sigemptyset(what)   (*(what) = 0, 0)
@@ -1089,11 +1087,11 @@ typedef sigset_t cfs_sigset_t;
 #define sigismember(what,sig) (((*(what)) & (1<<(sig))) != 0)
 
 static __inline int
-sigprocmask(int sig, cfs_sigset_t *w1, cfs_sigset_t *w2) {
+sigprocmask(int sig, sigset_t *w1, sigset_t *w2) {
     return 0;
 }
 static __inline int
-sigpending(cfs_sigset_t *what) {
+sigpending(sigset_t *what) {
     return 0;
 }
 
index 7bf0da6..9b1894a 100644 (file)
@@ -39,7 +39,7 @@
 #include <libcfs/libcfs.h>
 #include "tracefile.h"
 
-void libcfs_debug_dumpstack(cfs_task_t *tsk)
+void libcfs_debug_dumpstack(struct task_struct *tsk)
 {
        return;
 }
index 26ceb58..d9b95ff 100644 (file)
@@ -333,7 +333,7 @@ cfs_thread_agent (void)
 
 extern thread_t kernel_thread(task_t task, void (*start)(void));
 
-cfs_task_t
+struct task_struct
 kthread_run(cfs_thread_t func, void *arg, const char namefmt[], ...)
 {
        int ret = 0;
@@ -355,7 +355,7 @@ kthread_run(cfs_thread_t func, void *arg, const char namefmt[], ...)
        } else {
                 ret = -1;
        }
-       return (cfs_task_t)((long)ret);
+       return (struct task_struct)((long)ret);
 }
 
 /*
@@ -364,25 +364,25 @@ kthread_run(cfs_thread_t func, void *arg, const char namefmt[], ...)
  * without patching kernel.
  * Should we provide these functions in xnu?
  *
- * These signal functions almost do nothing now, we 
+ * These signal functions almost do nothing now, we
  * need to investigate more about signal in Darwin.
  */
 
 extern int block_procsigmask(struct proc *p,  int bit);
 
-cfs_sigset_t cfs_block_allsigs()
+sigset_t cfs_block_allsigs()
 {
-        cfs_sigset_t    old = 0;
+       sigset_t    old = 0;
 #ifdef __DARWIN8__
 #else
-        block_procsigmask(current_proc(), -1);
+       block_procsigmask(current_proc(), -1);
 #endif
-        return old;
+       return old;
 }
 
-cfs_sigset_t cfs_block_sigs(unsigned long sigs)
+sigset_t cfs_block_sigs(unsigned long sigs)
 {
-       cfs_sigset_t    old = 0;
+       sigset_t    old = 0;
 #ifdef __DARWIN8__
 #else
        block_procsigmask(current_proc(), sigs);
@@ -392,13 +392,13 @@ cfs_sigset_t cfs_block_sigs(unsigned long sigs)
 
 /* Block all signals except for the @sigs. It's only used in
  * Linux kernel, just a dummy here. */
-cfs_sigset_t cfs_block_sigsinv(unsigned long sigs)
+sigset_t cfs_block_sigsinv(unsigned long sigs)
 {
-        cfs_sigset_t old = 0;
-        return old;
+       sigset_t old = 0;
+       return old;
 }
 
-void cfs_restore_sigs(cfs_sigset_t old)
+void cfs_restore_sigs(sigset_t old)
 {
 }
 
@@ -406,10 +406,10 @@ int cfs_signal_pending(void)
 
 {
 #ifdef __DARWIN8__
-        extern int thread_issignal(proc_t, thread_t, sigset_t);
-        return thread_issignal(current_proc(), current_thread(), (sigset_t)-1);
+       extern int thread_issignal(proc_t, thread_t, sigset_t);
+       return thread_issignal(current_proc(), current_thread(), (sigset_t)-1);
 #else
-        return SHOULDissignal(current_proc(), current_uthread())
+       return SHOULDissignal(current_proc(), current_uthread())
 #endif
 }
 
@@ -417,7 +417,7 @@ void cfs_clear_sigpending(void)
 {
 #ifdef __DARWIN8__
 #else
-        clear_procsiglist(current_proc(), -1);
+       clear_procsiglist(current_proc(), -1);
 #endif
 }
 
@@ -427,36 +427,36 @@ void cfs_clear_sigpending(void)
 
 void lustre_cone_in(boolean_t *state, funnel_t **cone)
 {
-        *cone = thread_funnel_get();
-        if (*cone == network_flock)
-                thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
-        else if (*cone == NULL)
-                *state = thread_funnel_set(kernel_flock, TRUE);
+       *cone = thread_funnel_get();
+       if (*cone == network_flock)
+               thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
+       else if (*cone == NULL)
+               *state = thread_funnel_set(kernel_flock, TRUE);
 }
 
 void lustre_cone_ex(boolean_t state, funnel_t *cone)
 {
-        if (cone == network_flock)
-                thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
-        else if (cone == NULL)
-                (void) thread_funnel_set(kernel_flock, state);
+       if (cone == network_flock)
+               thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
+       else if (cone == NULL)
+               (void) thread_funnel_set(kernel_flock, state);
 }
 
 void lustre_net_in(boolean_t *state, funnel_t **cone)
 {
-        *cone = thread_funnel_get();
-        if (*cone == kernel_flock)
-                thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
-        else if (*cone == NULL)
-                *state = thread_funnel_set(network_flock, TRUE);
+       *cone = thread_funnel_get();
+       if (*cone == kernel_flock)
+               thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
+       else if (*cone == NULL)
+               *state = thread_funnel_set(network_flock, TRUE);
 }
 
 void lustre_net_ex(boolean_t state, funnel_t *cone)
 {
-        if (cone == kernel_flock)
-                thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
-        else if (cone == NULL)
-                (void) thread_funnel_set(network_flock, state);
+       if (cone == kernel_flock)
+               thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
+       else if (cone == NULL)
+               (void) thread_funnel_set(network_flock, state);
 }
 #endif /* !__DARWIN8__ */
 
@@ -485,7 +485,7 @@ void add_wait_queue_exclusive(struct cfs_waitq *waitq,
 }
 
 void remove_wait_queue(struct cfs_waitq *waitq,
-                   struct cfs_waitlink *link)
+                  struct cfs_waitlink *link)
 {
        ksleep_del(&waitq->wq_ksleep_chan, &link->wl_ksleep_link);
 }
@@ -521,82 +521,82 @@ void waitq_wait(struct cfs_waitlink *link, long state)
 
 cfs_duration_t  waitq_timedwait(struct cfs_waitlink *link,
                                    long state,
-                                    cfs_duration_t timeout)
+                                   cfs_duration_t timeout)
 {
-        return ksleep_timedwait(&link->wl_waitq->wq_ksleep_chan, 
-                                state, timeout);
+       return ksleep_timedwait(&link->wl_waitq->wq_ksleep_chan,
+                               state, timeout);
 }
 
 typedef  void (*ktimer_func_t)(void *);
-void cfs_timer_init(cfs_timer_t *t, void (* func)(unsigned long), void *arg)
+void cfs_timer_init(struct timer_list *t, void (* func)(unsigned long), void *arg)
 {
-        ktimer_init(&t->t, (ktimer_func_t)func, arg);
+       ktimer_init(&t->t, (ktimer_func_t)func, arg);
 }
 
 void cfs_timer_done(struct cfs_timer *t)
 {
-        ktimer_done(&t->t);
+       ktimer_done(&t->t);
 }
 
 void cfs_timer_arm(struct cfs_timer *t, cfs_time_t deadline)
 {
-        ktimer_arm(&t->t, deadline);
+       ktimer_arm(&t->t, deadline);
 }
 
 void cfs_timer_disarm(struct cfs_timer *t)
 {
-        ktimer_disarm(&t->t);
+       ktimer_disarm(&t->t);
 }
 
 int  cfs_timer_is_armed(struct cfs_timer *t)
 {
-        return ktimer_is_armed(&t->t);
+       return ktimer_is_armed(&t->t);
 }
 
 cfs_time_t cfs_timer_deadline(struct cfs_timer *t)
 {
-        return ktimer_deadline(&t->t);
+       return ktimer_deadline(&t->t);
 }
 
 void cfs_enter_debugger(void)
 {
 #ifdef __DARWIN8__
-        extern void Debugger(const char * reason);
-        Debugger("CFS");
+       extern void Debugger(const char * reason);
+       Debugger("CFS");
 #else
-        extern void PE_enter_debugger(char *cause);
-        PE_enter_debugger("CFS");
+       extern void PE_enter_debugger(char *cause);
+       PE_enter_debugger("CFS");
 #endif
 }
 
 int cfs_online_cpus(void)
 {
-        int     activecpu;
-        size_t  size;
+       int     activecpu;
+       size_t  size;
 
-#ifdef __DARWIN8__ 
-        size = sizeof(int);
-        sysctlbyname("hw.activecpu", &activecpu, &size, NULL, 0);
-        return activecpu;
+#ifdef __DARWIN8__
+       size = sizeof(int);
+       sysctlbyname("hw.activecpu", &activecpu, &size, NULL, 0);
+       return activecpu;
 #else
-        host_basic_info_data_t hinfo;
-        kern_return_t kret;
-        int count = HOST_BASIC_INFO_COUNT;
+       host_basic_info_data_t hinfo;
+       kern_return_t kret;
+       int count = HOST_BASIC_INFO_COUNT;
 #define BSD_HOST 1
-        kret = host_info(BSD_HOST, HOST_BASIC_INFO, &hinfo, &count);
-        if (kret == KERN_SUCCESS) 
-                return (hinfo.avail_cpus);
-        return(-EINVAL);
+       kret = host_info(BSD_HOST, HOST_BASIC_INFO, &hinfo, &count);
+       if (kret == KERN_SUCCESS)
+               return (hinfo.avail_cpus);
+       return(-EINVAL);
 #endif
 }
 
 int cfs_ncpus(void)
 {
-        int     ncpu;
-        size_t  size;
+       int     ncpu;
+       size_t  size;
 
-        size = sizeof(int);
+       size = sizeof(int);
 
-        sysctlbyname("hw.ncpu", &ncpu, &size, NULL, 0);
-        return ncpu;
+       sysctlbyname("hw.ncpu", &ncpu, &size, NULL, 0);
+       return ncpu;
 }
index c418fbb..b3fe843 100644 (file)
@@ -229,9 +229,9 @@ libcfs_debug_str2mask(int *mask, const char *str, int is_subsys)
  */
 void libcfs_debug_dumplog_internal(void *arg)
 {
-       CFS_DECL_JOURNAL_DATA;
+       DECL_JOURNAL_DATA;
 
-       CFS_PUSH_JOURNAL;
+       PUSH_JOURNAL;
 
        if (strncmp(libcfs_debug_file_path_arr, "NONE", 4) != 0) {
                snprintf(debug_file_name, sizeof(debug_file_name) - 1,
@@ -242,7 +242,7 @@ void libcfs_debug_dumplog_internal(void *arg)
                cfs_tracefile_dump_all_pages(debug_file_name);
                libcfs_run_debug_log_upcall(debug_file_name);
        }
-       CFS_POP_JOURNAL;
+       POP_JOURNAL;
 }
 
 int libcfs_debug_dumplog_thread(void *arg)
@@ -255,7 +255,7 @@ int libcfs_debug_dumplog_thread(void *arg)
 void libcfs_debug_dumplog(void)
 {
        wait_queue_t wait;
-       cfs_task_t    *dumper;
+       struct task_struct    *dumper;
        ENTRY;
 
        /* we're being careful to ensure that the kernel thread is
index 2a198fb..eea82ee 100644 (file)
@@ -627,7 +627,7 @@ cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt)
                if (cpu_isset(i, *cpumask))
                        continue;
 
-               rc = set_cpus_allowed_ptr(cfs_current(), cpumask);
+               rc = set_cpus_allowed_ptr(current, cpumask);
                set_mems_allowed(*nodemask);
                if (rc == 0)
                        schedule(); /* switch to allowed CPU */
index 8317f0f..6c27049 100644 (file)
@@ -271,7 +271,7 @@ void libcfs_debug_dumpstack(struct task_struct *tsk)
 #endif
 }
 
-cfs_task_t *libcfs_current(void)
+struct task_struct *libcfs_current(void)
 {
         CWARN("current task struct is %p\n", current);
         return current;
index b6719d1..2f1eec1 100644 (file)
 #include <asm/kgdb.h>
 #endif
 
-void cfs_init_timer(cfs_timer_t *t)
+void cfs_init_timer(struct timer_list *t)
 {
-        init_timer(t);
+       init_timer(t);
 }
 EXPORT_SYMBOL(cfs_init_timer);
 
-void cfs_timer_init(cfs_timer_t *t, cfs_timer_func_t *func, void *arg)
+void cfs_timer_init(struct timer_list *t, cfs_timer_func_t *func, void *arg)
 {
-        init_timer(t);
-        t->function = func;
-        t->data = (unsigned long)arg;
+       init_timer(t);
+       t->function = func;
+       t->data = (unsigned long)arg;
 }
 EXPORT_SYMBOL(cfs_timer_init);
 
-void cfs_timer_done(cfs_timer_t *t)
+void cfs_timer_done(struct timer_list *t)
 {
-        return;
+       return;
 }
 EXPORT_SYMBOL(cfs_timer_done);
 
-void cfs_timer_arm(cfs_timer_t *t, cfs_time_t deadline)
+void cfs_timer_arm(struct timer_list *t, cfs_time_t deadline)
 {
-        mod_timer(t, deadline);
+       mod_timer(t, deadline);
 }
 EXPORT_SYMBOL(cfs_timer_arm);
 
-void cfs_timer_disarm(cfs_timer_t *t)
+void cfs_timer_disarm(struct timer_list *t)
 {
-        del_timer(t);
+       del_timer(t);
 }
 EXPORT_SYMBOL(cfs_timer_disarm);
 
-int  cfs_timer_is_armed(cfs_timer_t *t)
+int  cfs_timer_is_armed(struct timer_list *t)
 {
-        return timer_pending(t);
+       return timer_pending(t);
 }
 EXPORT_SYMBOL(cfs_timer_is_armed);
 
-cfs_time_t cfs_timer_deadline(cfs_timer_t *t)
+cfs_time_t cfs_timer_deadline(struct timer_list *t)
 {
-        return t->expires;
+       return t->expires;
 }
 EXPORT_SYMBOL(cfs_timer_deadline);
 
@@ -141,7 +141,7 @@ sigset_t cfs_block_sigsinv(unsigned long sigs)
 }
 
 void
-cfs_restore_sigs (cfs_sigset_t old)
+cfs_restore_sigs(sigset_t old)
 {
        unsigned long  flags;
 
index 34b0c5e..80bec14 100644 (file)
@@ -166,38 +166,38 @@ void cond_resched(void)
  * Timer
  */
 
-void cfs_init_timer(cfs_timer_t *t)
+void cfs_init_timer(struct timer_list *t)
 {
-        CFS_INIT_LIST_HEAD(&t->tl_list);
+       CFS_INIT_LIST_HEAD(&t->tl_list);
 }
 
-void cfs_timer_init(cfs_timer_t *l, cfs_timer_func_t *func, void *arg)
+void cfs_timer_init(struct timer_list *l, cfs_timer_func_t *func, void *arg)
 {
-        CFS_INIT_LIST_HEAD(&l->tl_list);
-        l->function = func;
-        l->data = (ulong_ptr_t)arg;
-        return;
+       CFS_INIT_LIST_HEAD(&l->tl_list);
+       l->function = func;
+       l->data = (ulong_ptr_t)arg;
+       return;
 }
 
-int cfs_timer_is_armed(cfs_timer_t *l)
+int cfs_timer_is_armed(struct timer_list *l)
 {
-        if (cfs_time_before(cfs_time_current(), l->expires))
-                return 1;
-        else
-                return 0;
+       if (cfs_time_before(cfs_time_current(), l->expires))
+               return 1;
+       else
+               return 0;
 }
 
-void cfs_timer_arm(cfs_timer_t *l, cfs_time_t deadline)
+void cfs_timer_arm(struct timer_list *l, cfs_time_t deadline)
 {
-        l->expires = deadline;
+       l->expires = deadline;
 }
 
-void cfs_timer_disarm(cfs_timer_t *l)
+void cfs_timer_disarm(struct timer_list *l)
 {
 }
-cfs_time_t cfs_timer_deadline(cfs_timer_t *l)
+cfs_time_t cfs_timer_deadline(struct timer_list *l)
 {
-        return l->expires;
+       return l->expires;
 }
 
 
@@ -320,10 +320,10 @@ int unshare_fs_struct()
        return 0;
 }
 
-cfs_sigset_t cfs_block_allsigs(void)
+sigset_t cfs_block_allsigs(void)
 {
-       cfs_sigset_t   all;
-       cfs_sigset_t   old;
+       sigset_t   all;
+       sigset_t   old;
        int            rc;
 
        sigfillset(&all);
@@ -333,10 +333,10 @@ cfs_sigset_t cfs_block_allsigs(void)
        return old;
 }
 
-cfs_sigset_t cfs_block_sigs(unsigned long sigs)
+sigset_t cfs_block_sigs(unsigned long sigs)
 {
-       cfs_sigset_t   old;
-       cfs_sigset_t   blocks = { { sigs } }; /* kludge */
+       sigset_t   old;
+       sigset_t   blocks = { { sigs } }; /* kludge */
        int   rc;
 
        rc = sigprocmask(SIG_BLOCK, &blocks, &old);
@@ -347,37 +347,37 @@ cfs_sigset_t cfs_block_sigs(unsigned long sigs)
 
 /* Block all signals except for the @sigs. It's only used in
  * Linux kernel, just a dummy here. */
-cfs_sigset_t cfs_block_sigsinv(unsigned long sigs)
+sigset_t cfs_block_sigsinv(unsigned long sigs)
 {
-        cfs_sigset_t old;
-        int rc;
+       sigset_t old;
+       int rc;
 
-        /* Return old blocked sigs */
-        rc = sigprocmask(SIG_SETMASK, NULL, &old);
-        LASSERT(rc == 0);
+       /* Return old blocked sigs */
+       rc = sigprocmask(SIG_SETMASK, NULL, &old);
+       LASSERT(rc == 0);
 
-        return old;
+       return old;
 }
 
-void cfs_restore_sigs(cfs_sigset_t old)
+void cfs_restore_sigs(sigset_t old)
 {
-        int   rc = sigprocmask(SIG_SETMASK, &old, NULL);
+       int   rc = sigprocmask(SIG_SETMASK, &old, NULL);
 
-        LASSERT (rc == 0);
+       LASSERT (rc == 0);
 }
 
 int cfs_signal_pending(void)
 {
-        cfs_sigset_t    empty;
-        cfs_sigset_t    set;
-        int  rc;
+       sigset_t    empty;
+       sigset_t    set;
+       int  rc;
 
-        rc = sigpending(&set);
-        LASSERT (rc == 0);
+       rc = sigpending(&set);
+       LASSERT (rc == 0);
 
-        sigemptyset(&empty);
+       sigemptyset(&empty);
 
-        return !memcmp(&empty, &set, sizeof(set));
+       return !memcmp(&empty, &set, sizeof(set));
 }
 
 void cfs_clear_sigpending(void)
index bbad0b1..93802ac 100644 (file)
 
 struct lc_watchdog {
        spinlock_t  lcw_lock;     /* check or change lcw_list */
-        int             lcw_refcount; /* must hold lcw_pending_timers_lock */
-        cfs_timer_t     lcw_timer;    /* kernel timer */
-        cfs_list_t      lcw_list;     /* chain on pending list */
-        cfs_time_t      lcw_last_touched; /* last touched stamp */
-        cfs_task_t     *lcw_task;     /* owner task */
-        void          (*lcw_callback)(pid_t, void *);
-        void           *lcw_data;
-
-        pid_t           lcw_pid;
-
-        enum {
-                LC_WATCHDOG_DISABLED,
-                LC_WATCHDOG_ENABLED,
-                LC_WATCHDOG_EXPIRED
-        } lcw_state;
+       int             lcw_refcount; /* must hold lcw_pending_timers_lock */
+       struct timer_list     lcw_timer;    /* kernel timer */
+       cfs_list_t      lcw_list;     /* chain on pending list */
+       cfs_time_t      lcw_last_touched; /* last touched stamp */
+       struct task_struct     *lcw_task;     /* owner task */
+       void          (*lcw_callback)(pid_t, void *);
+       void           *lcw_data;
+
+       pid_t           lcw_pid;
+
+       enum {
+               LC_WATCHDOG_DISABLED,
+               LC_WATCHDOG_ENABLED,
+               LC_WATCHDOG_EXPIRED
+       } lcw_state;
 };
 
 #ifdef WITH_WATCHDOG
@@ -296,7 +296,7 @@ static int lcw_dispatch_main(void *data)
 
 static void lcw_dispatch_start(void)
 {
-       cfs_task_t *task;
+       struct task_struct *task;
 
        ENTRY;
        LASSERT(lcw_refcount == 1);
@@ -351,7 +351,7 @@ struct lc_watchdog *lc_watchdog_add(int timeout,
 
        spin_lock_init(&lcw->lcw_lock);
        lcw->lcw_refcount = 1; /* refcount for owner */
-       lcw->lcw_task     = cfs_current();
+       lcw->lcw_task     = current;
        lcw->lcw_pid      = current_pid();
        lcw->lcw_callback = (callback != NULL) ? callback : lc_watchdog_dumplog;
        lcw->lcw_data     = data;
index 8ea15b3..6d6ab58 100644 (file)
@@ -47,9 +47,9 @@
  * for Linux kernel.
  */
 
-cfs_task_t this_task =
+struct task_struct this_task =
     { /* umask */ 0,/* blocked*/0, /* pid */ 0, /* pgrp */ 0,
-      /* uid,euid,suid,fsuid */  0, 0, 0, 0, 
+      /* uid,euid,suid,fsuid */  0, 0, 0, 0,
       /* gid_t gid,egid,sgid,fsgid */ 0, 0, 0, 0,
       /* ngroups*/ 1, /*cgroups*/ 0, /*groups*/ 0,
       /* group_info */ NULL,
@@ -81,7 +81,7 @@ gid_t current_fsgid(void)
 
 pid_t current_pid(void)
 {
-    return cfs_current()->pid;
+    return current->pid;
 }
 
 mode_t current_umask(void)
@@ -177,7 +177,7 @@ task_manager_notify(
     IN BOOLEAN  Create
     )
 {
-    PLIST_ENTRY ListEntry = NULL; 
+    PLIST_ENTRY ListEntry = NULL;
     PTASK_SLOT  TaskSlot  = NULL;
 
        spin_lock(&(cfs_win_task_manger.Lock));
@@ -248,7 +248,7 @@ init_task_manager()
 void
 cleanup_task_manager()
 {
-    PLIST_ENTRY ListEntry = NULL; 
+    PLIST_ENTRY ListEntry = NULL;
     PTASK_SLOT  TaskSlot  = NULL;
 
     /* remove ThreadNotifyRoutine: task_manager_notify */
@@ -285,14 +285,14 @@ cleanup_task_manager()
  */
 
 
-cfs_task_t *
-cfs_current()
+struct task_struct *
+current
 {
     HANDLE      Pid = PsGetCurrentProcessId();
     HANDLE      Tid = PsGetCurrentThreadId();
     PETHREAD    Tet = PsGetCurrentThread();
 
-    PLIST_ENTRY ListEntry = NULL; 
+    PLIST_ENTRY ListEntry = NULL;
     PTASK_SLOT  TaskSlot  = NULL;
 
        spin_lock(&(cfs_win_task_manger.Lock));
@@ -367,7 +367,7 @@ cfs_current()
 
     {
         PTASK_SLOT  Prev = NULL, Curr = NULL;
-        
+
         ListEntry = cfs_win_task_manger.TaskList.Flink;
 
         while (ListEntry != (&(cfs_win_task_manger.TaskList))) {
@@ -411,7 +411,7 @@ cfs_pause(cfs_duration_t ticks)
 void
 schedule_timeout_and_set_state(long state, int64_t time)
 {
-    cfs_task_t * task = cfs_current();
+    struct task_struct * task = current;
     PTASK_SLOT   slot = NULL;
 
     if (!task) {
@@ -437,7 +437,7 @@ schedule()
 
 int
 wake_up_process(
-    cfs_task_t * task
+    struct task_struct * task
     )
 {
     PTASK_SLOT   slot = NULL;
@@ -459,7 +459,7 @@ void
 sleep_on(wait_queue_head_t *waitq)
 {
        wait_queue_t link;
-       
+
        init_waitqueue_entry_current(&link);
        add_wait_queue(waitq, &link);
        waitq_wait(&link, TASK_INTERRUPTIBLE);
index 3569c99..d60aa31 100644 (file)
@@ -39,7 +39,7 @@
 #include <libcfs/libcfs.h>
 #include "tracefile.h"
 
-void libcfs_debug_dumpstack(cfs_task_t *tsk)
+void libcfs_debug_dumpstack(struct task_struct *tsk)
 {
        return;
 }
@@ -48,9 +48,9 @@ void libcfs_run_debug_log_upcall(char *file)
 {
 }
 
-cfs_task_t *libcfs_current(void)
+struct task_struct *libcfs_current(void)
 {
-       return cfs_current();
+       return current;
 }
 
 void libcfs_run_lbug_upcall(struct libcfs_debug_msg_data *msgdata)
index e8b3489..af32726 100644 (file)
@@ -321,7 +321,7 @@ cfs_rwlock_fini(rwlock_t *rwlock)
 void
 read_lock(rwlock_t *rwlock)
 {
-    cfs_task_t * task = cfs_current();
+    struct task_struct * task = current;
     PTASK_SLOT   slot = NULL;
 
     if (!task) {
@@ -349,7 +349,7 @@ read_lock(rwlock_t *rwlock)
 void
 read_unlock(rwlock_t *rwlock)
 {
-    cfs_task_t * task = cfs_current();
+    struct task_struct * task = current;
     PTASK_SLOT   slot = NULL;
 
     if (!task) {
@@ -374,7 +374,7 @@ read_unlock(rwlock_t *rwlock)
 void
 write_lock(rwlock_t *rwlock)
 {
-    cfs_task_t * task = cfs_current();
+    struct task_struct * task = current;
     PTASK_SLOT   slot = NULL;
 
     if (!task) {
@@ -402,7 +402,7 @@ write_lock(rwlock_t *rwlock)
 void
 write_unlock(rwlock_t *rwlock)
 {
-    cfs_task_t * task = cfs_current();
+    struct task_struct * task = current;
     PTASK_SLOT   slot = NULL;
 
     if (!task) {
index 5dc7958..1c91828 100644 (file)
@@ -368,7 +368,7 @@ void kmem_cache_free(struct kmem_cache *kmc, void *buf)
 
 spinlock_t  shrinker_guard = {0};
 CFS_LIST_HEAD(shrinker_hdr);
-cfs_timer_t shrinker_timer = {0};
+struct timer_list shrinker_timer = {0};
 
 struct shrinker *set_shrinker(int seeks, shrink_callback cb)
 {
index 351e171..077f3a4 100644 (file)
@@ -89,13 +89,13 @@ cfs_thread_proc(void *context)
  *   name:  thread name to create
  *
  * Return Value:
- *   cfs_task_t:   0 on success or error codes
+ *   struct task_struct:   0 on success or error codes
  *
  * Notes:
  *   N/A
  */
 
-cfs_task_t kthread_run(int (*func)(void *), void *arg, char *name)
+struct task_struct kthread_run(int (*func)(void *), void *arg, char *name)
 {
     cfs_handle_t  thread = NULL;
     NTSTATUS      status;
@@ -137,7 +137,7 @@ cfs_task_t kthread_run(int (*func)(void *), void *arg, char *name)
 
     ZwClose(thread);
 
-       return (cfs_task_t)0;
+       return (struct task_struct)0;
 }
 
 
@@ -351,10 +351,10 @@ cfs_timer_dpc_proc (
     IN PVOID SystemArgument1,
     IN PVOID SystemArgument2)
 {
-    cfs_timer_t *   timer;
+    struct timer_list *   timer;
     KIRQL           Irql;
 
-    timer = (cfs_timer_t *) DeferredContext;
+    timer = (struct timer_list *) DeferredContext;
 
     /* clear the flag */
     KeAcquireSpinLock(&(timer->Lock), &Irql);
@@ -365,14 +365,14 @@ cfs_timer_dpc_proc (
     timer->proc((long_ptr_t)timer->arg);
 }
 
-void cfs_init_timer(cfs_timer_t *timer)
+void cfs_init_timer(struct timer_list *timer)
 {
-    memset(timer, 0, sizeof(cfs_timer_t));
+    memset(timer, 0, sizeof(struct timer_list));
 }
 
 /*
  * cfs_timer_init
- *   To initialize the cfs_timer_t
+ *   To initialize the struct timer_list
  *
  * Arguments:
  *   timer:  the cfs_timer to be initialized
@@ -386,9 +386,9 @@ void cfs_init_timer(cfs_timer_t *timer)
  *   N/A
  */
 
-void cfs_timer_init(cfs_timer_t *timer, void (*func)(ulong_ptr_t), void *arg)
+void cfs_timer_init(struct timer_list *timer, void (*func)(ulong_ptr_t), void *arg)
 {
-    memset(timer, 0, sizeof(cfs_timer_t));
+    memset(timer, 0, sizeof(struct timer_list));
 
     timer->proc = func;
     timer->arg  = arg;
@@ -402,7 +402,7 @@ void cfs_timer_init(cfs_timer_t *timer, void (*func)(ulong_ptr_t), void *arg)
 
 /*
  * cfs_timer_done
- *   To finialize the cfs_timer_t (unused)
+ *   To finialize the struct timer_list (unused)
  *
  * Arguments:
  *   timer:  the cfs_timer to be cleaned up
@@ -414,7 +414,7 @@ void cfs_timer_init(cfs_timer_t *timer, void (*func)(ulong_ptr_t), void *arg)
  *   N/A
  */
 
-void cfs_timer_done(cfs_timer_t *timer)
+void cfs_timer_done(struct timer_list *timer)
 {
     return;
 }
@@ -434,7 +434,7 @@ void cfs_timer_done(cfs_timer_t *timer)
  *   N/A
  */
 
-void cfs_timer_arm(cfs_timer_t *timer, cfs_time_t deadline)
+void cfs_timer_arm(struct timer_list *timer, cfs_time_t deadline)
 {
     LARGE_INTEGER   timeout;
     KIRQL           Irql;
@@ -468,7 +468,7 @@ void cfs_timer_arm(cfs_timer_t *timer, cfs_time_t deadline)
  *   N/A
  */
 
-void cfs_timer_disarm(cfs_timer_t *timer)
+void cfs_timer_disarm(struct timer_list *timer)
 {
     KIRQL   Irql;
 
@@ -494,7 +494,7 @@ void cfs_timer_disarm(cfs_timer_t *timer)
  *   N/A
  */
 
-int cfs_timer_is_armed(cfs_timer_t *timer)
+int cfs_timer_is_armed(struct timer_list *timer)
 {
     int     rc = 0;
     KIRQL   Irql;
@@ -522,7 +522,7 @@ int cfs_timer_is_armed(cfs_timer_t *timer)
  *   N/A
  */
 
-cfs_time_t cfs_timer_deadline(cfs_timer_t * timer)
+cfs_time_t cfs_timer_deadline(struct timer_list * timer)
 {
     return timer->deadline;
 }
@@ -536,24 +536,24 @@ int unshare_fs_struct()
  *  routine related with sigals
  */
 
-cfs_sigset_t cfs_block_allsigs()
+sigset_t cfs_block_allsigs()
 {
         return 0;
 }
 
-cfs_sigset_t cfs_block_sigs(sigset_t bit)
+sigset_t cfs_block_sigs(sigset_t bit)
 {
         return 0;
 }
 
 /* Block all signals except for the @sigs. It's only used in
  * Linux kernel, just a dummy here. */
-cfs_sigset_t cfs_block_sigsinv(unsigned long sigs)
+sigset_t cfs_block_sigsinv(unsigned long sigs)
 {
         return 0;
 }
 
-void cfs_restore_sigs(cfs_sigset_t old)
+void cfs_restore_sigs(sigset_t old)
 {
 }
 
index 18817ee..696e8ae 100644 (file)
@@ -81,7 +81,7 @@ void init_waitqueue_head(wait_queue_head_t *waitq)
 
 void init_waitqueue_entry_current(wait_queue_t *link)
 {
-    cfs_task_t * task = cfs_current();
+    struct task_struct * task = current;
     PTASK_SLOT   slot = NULL;
 
     if (!task) {
@@ -126,7 +126,7 @@ void init_waitqueue_entry_current(wait_queue_t *link)
 
 void cfs_waitlink_fini(wait_queue_t *link)
 {
-    cfs_task_t * task = cfs_current();
+    struct task_struct * task = current;
     PTASK_SLOT   slot = NULL;
 
     if (!task) {
index 776a656..ecbe855 100644 (file)
@@ -453,8 +453,8 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
 
 #ifdef __KERNEL__
        for (; nthrs > 0; nthrs--)  {
-               char            name[16];
-               cfs_task_t      *task;
+               char                    name[16];
+               struct task_struct      *task;
 
                spin_lock(&cfs_wi_data.wi_glock);
                while (sched->ws_starting > 0) {
index 9ca2cb0..5c9d1a1 100644 (file)
@@ -1807,7 +1807,7 @@ kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
 int
 kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
 {
-       cfs_task_t *task = kthread_run(fn, arg, name);
+       struct task_struct *task = kthread_run(fn, arg, name);
 
        if (IS_ERR(task))
                return PTR_ERR(task);
index 6b47a16..3b66370 100644 (file)
@@ -653,7 +653,7 @@ kptllnd_thread_fini (void)
 int
 kptllnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
 {
-       cfs_task_t *task;
+       struct task_struct *task;
 
        cfs_atomic_inc(&kptllnd_data.kptl_nthreads);
 
index 203cddc..eec1a6b 100644 (file)
@@ -1657,7 +1657,7 @@ kqswnal_recv (lnet_ni_t     *ni,
 int
 kqswnal_thread_start(int (*fn)(void *arg), void *arg, char *name)
 {
-       cfs_task_t *task = cfs_thread_run(fn, arg, name);
+       struct task_struct *task = cfs_thread_run(fn, arg, name);
 
        if (IS_ERR(task))
                return PTR_ERR(task);
index ba744c6..00f1fcb 100644 (file)
@@ -893,7 +893,7 @@ kranal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
 int
 kranal_thread_start(int(*fn)(void *arg), void *arg, char *name)
 {
-       cfs_task_t *task = cfs_thread_run(fn, arg, name);
+       struct task_struct *task = cfs_thread_run(fn, arg, name);
 
        if (!IS_ERR(task))
                cfs_atomic_inc(&kranal_data.kra_nthreads);
index bb6974f..6d9d626 100644 (file)
@@ -1009,7 +1009,7 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
 int
 ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
 {
-       cfs_task_t *task = kthread_run(fn, arg, name);
+       struct task_struct *task = kthread_run(fn, arg, name);
 
        if (IS_ERR(task))
                return PTR_ERR(task);
index 1ae1338..55a7234 100644 (file)
@@ -1888,14 +1888,14 @@ lnet_ping_target_init(void)
 void
 lnet_ping_target_fini(void)
 {
-        lnet_event_t    event;
-        int             rc;
-        int             which;
-        int             timeout_ms = 1000;
-        cfs_sigset_t    blocked = cfs_block_allsigs();
+       lnet_event_t    event;
+       int             rc;
+       int             which;
+       int             timeout_ms = 1000;
+       sigset_t    blocked = cfs_block_allsigs();
 
-        LNetMDUnlink(the_lnet.ln_ping_target_md);
-        /* NB md could be busy; this just starts the unlink */
+       LNetMDUnlink(the_lnet.ln_ping_target_md);
+       /* NB md could be busy; this just starts the unlink */
 
         for (;;) {
                 rc = LNetEQPoll(&the_lnet.ln_ping_target_eq, 1,
@@ -1925,28 +1925,28 @@ lnet_ping_target_fini(void)
 int
 lnet_ping (lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_ids)
 {
-        lnet_handle_eq_t     eqh;
-        lnet_handle_md_t     mdh;
-        lnet_event_t         event;
-        lnet_md_t            md = {0};
-        int                  which;
-        int                  unlinked = 0;
-        int                  replied = 0;
-        const int            a_long_time = 60000; /* mS */
-        int                  infosz = offsetof(lnet_ping_info_t, pi_ni[n_ids]);
-        lnet_ping_info_t    *info;
-        lnet_process_id_t    tmpid;
-        int                  i;
-        int                  nob;
-        int                  rc;
-        int                  rc2;
-        cfs_sigset_t         blocked;
-
-        if (n_ids <= 0 ||
-            id.nid == LNET_NID_ANY ||
-            timeout_ms > 500000 ||              /* arbitrary limit! */
-            n_ids > 20)                         /* arbitrary limit! */
-                return -EINVAL;
+       lnet_handle_eq_t     eqh;
+       lnet_handle_md_t     mdh;
+       lnet_event_t         event;
+       lnet_md_t            md = {0};
+       int                  which;
+       int                  unlinked = 0;
+       int                  replied = 0;
+       const int            a_long_time = 60000; /* mS */
+       int                  infosz = offsetof(lnet_ping_info_t, pi_ni[n_ids]);
+       lnet_ping_info_t    *info;
+       lnet_process_id_t    tmpid;
+       int                  i;
+       int                  nob;
+       int                  rc;
+       int                  rc2;
+       sigset_t         blocked;
+
+       if (n_ids <= 0 ||
+           id.nid == LNET_NID_ANY ||
+           timeout_ms > 500000 ||              /* arbitrary limit! */
+           n_ids > 20)                         /* arbitrary limit! */
+               return -EINVAL;
 
         if (id.pid == LNET_PID_ANY)
                 id.pid = LUSTRE_SRV_LNET_PID;
index a252718..9869597 100644 (file)
@@ -500,7 +500,7 @@ lnet_finalize (lnet_ni_t *ni, lnet_msg_t *msg, int status)
 #ifdef __KERNEL__
        my_slot = -1;
        for (i = 0; i < container->msc_nfinalizers; i++) {
-               if (container->msc_finalizers[i] == cfs_current())
+               if (container->msc_finalizers[i] == current)
                        break;
 
                if (my_slot < 0 && container->msc_finalizers[i] == NULL)
@@ -512,7 +512,7 @@ lnet_finalize (lnet_ni_t *ni, lnet_msg_t *msg, int status)
                return;
        }
 
-       container->msc_finalizers[my_slot] = cfs_current();
+       container->msc_finalizers[my_slot] = current;
 #else
        LASSERT(container->msc_nfinalizers == 1);
        if (container->msc_finalizers[0] != NULL) {
index aaab0b0..19abf5d 100644 (file)
@@ -199,7 +199,7 @@ stt_timer_main (void *arg)
 int
 stt_start_timer_thread (void)
 {
-       cfs_task_t *task;
+       struct task_struct *task;
 
        LASSERT(!stt_data.stt_shuttingdown);
 
index 9b7b381..08894b0 100644 (file)
@@ -760,7 +760,7 @@ struct cl_page {
         /**
          * Debug information, the task is owning the page.
          */
-        cfs_task_t              *cp_task;
+       struct task_struct      *cp_task;
         /**
          * Owning IO request in cl_page_state::CPS_PAGEOUT and
          * cl_page_state::CPS_PAGEIN states. This field is maintained only in
@@ -1578,13 +1578,13 @@ struct cl_lock {
         * \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait().
         */
        struct mutex            cll_guard;
-        cfs_task_t           *cll_guarder;
+       struct task_struct    *cll_guarder;
         int                   cll_depth;
 
         /**
          * the owner for INTRANSIT state
          */
-        cfs_task_t           *cll_intransit_owner;
+       struct task_struct    *cll_intransit_owner;
         int                   cll_error;
         /**
          * Number of holds on a lock. A hold prevents a lock from being
@@ -1869,9 +1869,9 @@ do {                                                                    \
  * @{
  */
 struct cl_page_list {
-        unsigned             pl_nr;
-        cfs_list_t           pl_pages;
-        cfs_task_t          *pl_owner;
+       unsigned             pl_nr;
+       cfs_list_t           pl_pages;
+       struct task_struct   *pl_owner;
 };
 
 /** 
index 0f713e2..8e46213 100644 (file)
@@ -269,8 +269,6 @@ struct task_struct {
 };
 
 
-typedef struct task_struct cfs_task_t;
-#define cfs_current()           current
 #define current_pid()       (current->pid)
 #define current_comm()      (current->comm)
 #define current_fsuid()     (current->fsuid)
index da589f1..3ff67e0 100644 (file)
  *
  *        // current thread acquired a temporary reference to foo.
  *        foo_get(foo);
- *        lu_ref_add(&foo->reference, __FUNCTION__, cfs_current());
+ *        lu_ref_add(&foo->reference, __FUNCTION__, current);
  *
  *        ...
  *
  *        // temporary reference is released.
- *        lu_ref_del(&foo->reference, __FUNCTION__, cfs_current());
+ *        lu_ref_del(&foo->reference, __FUNCTION__, current);
  *        foo_put(foo);
  * \endcode
  *
index 79f4b05..769b283 100644 (file)
@@ -1217,7 +1217,7 @@ static inline struct ldlm_lock *ldlm_handle2lock(const struct lustre_handle *h)
 }
 
 #define LDLM_LOCK_REF_DEL(lock) \
-        lu_ref_del(&lock->l_reference, "handle", cfs_current())
+       lu_ref_del(&lock->l_reference, "handle", current)
 
 static inline struct ldlm_lock *
 ldlm_handle2lock_long(const struct lustre_handle *h, __u64 flags)
@@ -1358,11 +1358,11 @@ int ldlm_lock_change_resource(struct ldlm_namespace *, struct ldlm_lock *,
                               const struct ldlm_res_id *);
 
 #define LDLM_RESOURCE_ADDREF(res) do {                                  \
-        lu_ref_add_atomic(&(res)->lr_reference, __FUNCTION__, cfs_current());  \
+       lu_ref_add_atomic(&(res)->lr_reference, __FUNCTION__, current);  \
 } while (0)
 
 #define LDLM_RESOURCE_DELREF(res) do {                                  \
-        lu_ref_del(&(res)->lr_reference, __FUNCTION__, cfs_current());  \
+       lu_ref_del(&(res)->lr_reference, __FUNCTION__, current);  \
 } while (0)
 
 /* ldlm_request.c */
index 022a04c..8a9b62f 100644 (file)
@@ -121,7 +121,7 @@ void statfs_unpack(struct kstatfs *sfs, struct obd_statfs *osfs);
 /* l_lock.c */
 struct lustre_lock {
        int                     l_depth;
-       cfs_task_t              *l_owner;
+       struct task_struct      *l_owner;
        struct semaphore        l_sem;
        spinlock_t              l_spin;
 };
@@ -710,7 +710,7 @@ struct l_wait_info {
 do {                                                                           \
        wait_queue_t __wait;                                                   \
        cfs_duration_t __timeout = info->lwi_timeout;                          \
-       cfs_sigset_t   __blocked;                                              \
+       sigset_t   __blocked;                                              \
        int   __allow_intr = info->lwi_allow_intr;                             \
                                                                               \
        ret = 0;                                                               \
index de65401..1138ee0 100644 (file)
@@ -2645,7 +2645,7 @@ struct ptlrpc_service_part {
        /** reqs waiting for replies */
        struct ptlrpc_at_array          scp_at_array;
        /** early reply timer */
-       cfs_timer_t                     scp_at_timer;
+       struct timer_list               scp_at_timer;
        /** debug */
        cfs_time_t                      scp_at_checktime;
        /** check early replies */
index ad43606..1de0352 100644 (file)
@@ -921,12 +921,12 @@ struct obd_device {
        int                              obd_requests_queued_for_recovery;
        wait_queue_head_t                obd_next_transno_waitq;
        /* protected by obd_recovery_task_lock */
-       cfs_timer_t                      obd_recovery_timer;
-        time_t                           obd_recovery_start; /* seconds */
-        time_t                           obd_recovery_end; /* seconds, for lprocfs_status */
-        int                              obd_recovery_time_hard;
-        int                              obd_recovery_timeout;
-        int                              obd_recovery_ir_factor;
+       struct timer_list                obd_recovery_timer;
+       time_t                           obd_recovery_start; /* seconds */
+       time_t                           obd_recovery_end; /* seconds, for lprocfs_status */
+       int                              obd_recovery_time_hard;
+       int                              obd_recovery_timeout;
+       int                              obd_recovery_ir_factor;
 
         /* new recovery stuff from CMD2 */
         struct target_recovery_data      obd_recovery_data;
index 94b2696..0169cdf 100644 (file)
@@ -136,48 +136,48 @@ int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
                         if (agl)
                                 descr->cld_enq_flags |= CEF_AGL;
                         cio->cui_glimpse = 1;
-                        /*
-                         * CEF_ASYNC is used because glimpse sub-locks cannot
-                         * deadlock (because they never conflict with other
-                         * locks) and, hence, can be enqueued out-of-order.
-                         *
-                         * CEF_MUST protects glimpse lock from conversion into
-                         * a lockless mode.
-                         */
-                        lock = cl_lock_request(env, io, descr, "glimpse",
-                                               cfs_current());
-                        cio->cui_glimpse = 0;
+                       /*
+                        * CEF_ASYNC is used because glimpse sub-locks cannot
+                        * deadlock (because they never conflict with other
+                        * locks) and, hence, can be enqueued out-of-order.
+                        *
+                        * CEF_MUST protects glimpse lock from conversion into
+                        * a lockless mode.
+                        */
+                       lock = cl_lock_request(env, io, descr, "glimpse",
+                                              current);
+                       cio->cui_glimpse = 0;
 
-                        if (lock == NULL)
-                                RETURN(0);
+                       if (lock == NULL)
+                               RETURN(0);
 
-                        if (IS_ERR(lock))
-                                RETURN(PTR_ERR(lock));
+                       if (IS_ERR(lock))
+                               RETURN(PTR_ERR(lock));
 
-                        LASSERT(agl == 0);
-                        result = cl_wait(env, lock);
-                        if (result == 0) {
+                       LASSERT(agl == 0);
+                       result = cl_wait(env, lock);
+                       if (result == 0) {
                                cl_merge_lvb(env, inode);
-                                if (cl_isize_read(inode) > 0 &&
-                                    inode->i_blocks == 0) {
-                                        /*
-                                         * LU-417: Add dirty pages block count
-                                         * lest i_blocks reports 0, some "cp" or
-                                         * "tar" may think it's a completely
-                                         * sparse file and skip it.
-                                         */
-                                        inode->i_blocks = dirty_cnt(inode);
-                                }
-                                cl_unuse(env, lock);
-                        }
-                        cl_lock_release(env, lock, "glimpse", cfs_current());
-                } else {
-                        CDEBUG(D_DLMTRACE, "No objects for inode\n");
+                               if (cl_isize_read(inode) > 0 &&
+                                   inode->i_blocks == 0) {
+                                       /*
+                                        * LU-417: Add dirty pages block count
+                                        * lest i_blocks reports 0, some "cp" or
+                                        * "tar" may think it's a completely
+                                        * sparse file and skip it.
+                                        */
+                                       inode->i_blocks = dirty_cnt(inode);
+                               }
+                               cl_unuse(env, lock);
+                       }
+                       cl_lock_release(env, lock, "glimpse", current);
+               } else {
+                       CDEBUG(D_DLMTRACE, "No objects for inode\n");
                        cl_merge_lvb(env, inode);
-                }
-        }
+               }
+       }
 
-        RETURN(result);
+       RETURN(result);
 }
 
 static int cl_io_get(struct inode *inode, struct lu_env **envout,
@@ -271,23 +271,23 @@ int cl_local_size(struct inode *inode)
         result = cl_io_init(env, io, CIT_MISC, clob);
         if (result > 0)
                 result = io->ci_result;
-        else if (result == 0) {
-                cti = ccc_env_info(env);
-                descr = &cti->cti_descr;
+       else if (result == 0) {
+               cti = ccc_env_info(env);
+               descr = &cti->cti_descr;
 
-                *descr = whole_file;
-                descr->cld_obj = clob;
-                lock = cl_lock_peek(env, io, descr, "localsize", cfs_current());
-                if (lock != NULL) {
+               *descr = whole_file;
+               descr->cld_obj = clob;
+               lock = cl_lock_peek(env, io, descr, "localsize", current);
+               if (lock != NULL) {
                        cl_merge_lvb(env, inode);
-                        cl_unuse(env, lock);
-                        cl_lock_release(env, lock, "localsize", cfs_current());
-                        result = 0;
-                } else
-                        result = -ENODATA;
-        }
-        cl_io_fini(env, io);
-        cl_env_put(env, &refcheck);
-        RETURN(result);
+                       cl_unuse(env, lock);
+                       cl_lock_release(env, lock, "localsize", current);
+                       result = 0;
+               } else
+                       result = -ENODATA;
+       }
+       cl_io_fini(env, io);
+       cl_env_put(env, &refcheck);
+       RETURN(result);
 }
 
index 9dd8918..d531507 100644 (file)
@@ -156,15 +156,15 @@ int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
         descr->cld_gid = gid;
         descr->cld_mode = CLM_GROUP;
 
-        enqflags = CEF_MUST | (nonblock ? CEF_NONBLOCK : 0);
-        descr->cld_enq_flags = enqflags;
-
-        lock = cl_lock_request(env, io, descr, GROUPLOCK_SCOPE, cfs_current());
-        if (IS_ERR(lock)) {
-                cl_io_fini(env, io);
-                cl_env_put(env, &refcheck);
-                return PTR_ERR(lock);
-        }
+       enqflags = CEF_MUST | (nonblock ? CEF_NONBLOCK : 0);
+       descr->cld_enq_flags = enqflags;
+
+       lock = cl_lock_request(env, io, descr, GROUPLOCK_SCOPE, current);
+       if (IS_ERR(lock)) {
+               cl_io_fini(env, io);
+               cl_env_put(env, &refcheck);
+               return PTR_ERR(lock);
+       }
 
         cg->cg_env  = cl_env_get(&refcheck);
         cg->cg_io   = io;
@@ -178,20 +178,20 @@ int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
 
 void cl_put_grouplock(struct ccc_grouplock *cg)
 {
-        struct lu_env  *env  = cg->cg_env;
-        struct cl_io   *io   = cg->cg_io;
-        struct cl_lock *lock = cg->cg_lock;
-        int             refcheck;
+       struct lu_env  *env  = cg->cg_env;
+       struct cl_io   *io   = cg->cg_io;
+       struct cl_lock *lock = cg->cg_lock;
+       int             refcheck;
 
-        LASSERT(cg->cg_env);
-        LASSERT(cg->cg_gid);
+       LASSERT(cg->cg_env);
+       LASSERT(cg->cg_gid);
 
-        cl_env_implant(env, &refcheck);
-        cl_env_put(env, &refcheck);
+       cl_env_implant(env, &refcheck);
+       cl_env_put(env, &refcheck);
 
-        cl_unuse(env, lock);
-        cl_lock_release(env, lock, GROUPLOCK_SCOPE, cfs_current());
-        cl_io_fini(env, io);
-        cl_env_put(env, NULL);
+       cl_unuse(env, lock);
+       cl_lock_release(env, lock, GROUPLOCK_SCOPE, current);
+       cl_io_fini(env, io);
+       cl_env_put(env, NULL);
 }
 
index 3364504..a233c3d 100644 (file)
@@ -826,7 +826,7 @@ int target_handle_connect(struct ptlrpc_request *req)
        /* Make sure the target isn't cleaned up while we're here. Yes,
         * there's still a race between the above check and our incref here.
         * Really, class_uuid2obd should take the ref. */
-        targref = class_incref(target, __FUNCTION__, cfs_current());
+       targref = class_incref(target, __FUNCTION__, current);
 
        target->obd_conn_inprogress++;
        spin_unlock(&target->obd_dev_lock);
@@ -1271,7 +1271,7 @@ out:
                target->obd_conn_inprogress--;
                spin_unlock(&target->obd_dev_lock);
 
-               class_decref(targref, __func__, cfs_current());
+               class_decref(targref, __func__, current);
        }
        if (rc)
                req->rq_status = rc;
index 6758410..d7ad151 100644 (file)
@@ -605,45 +605,45 @@ EXPORT_SYMBOL(ldlm_lock2handle);
 struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
                                     __u64 flags)
 {
-        struct ldlm_lock *lock;
-        ENTRY;
+       struct ldlm_lock *lock;
+       ENTRY;
 
-        LASSERT(handle);
+       LASSERT(handle);
 
        lock = class_handle2object(handle->cookie, NULL);
-        if (lock == NULL)
-                RETURN(NULL);
+       if (lock == NULL)
+               RETURN(NULL);
 
-        /* It's unlikely but possible that someone marked the lock as
-         * destroyed after we did handle2object on it */
+       /* It's unlikely but possible that someone marked the lock as
+        * destroyed after we did handle2object on it */
        if (flags == 0 && ((lock->l_flags & LDLM_FL_DESTROYED)== 0)) {
-                lu_ref_add(&lock->l_reference, "handle", cfs_current());
-                RETURN(lock);
-        }
+               lu_ref_add(&lock->l_reference, "handle", current);
+               RETURN(lock);
+       }
 
-        lock_res_and_lock(lock);
+       lock_res_and_lock(lock);
 
-        LASSERT(lock->l_resource != NULL);
+       LASSERT(lock->l_resource != NULL);
 
-        lu_ref_add_atomic(&lock->l_reference, "handle", cfs_current());
+       lu_ref_add_atomic(&lock->l_reference, "handle", current);
        if (unlikely(lock->l_flags & LDLM_FL_DESTROYED)) {
-                unlock_res_and_lock(lock);
-                CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
-                LDLM_LOCK_PUT(lock);
-                RETURN(NULL);
-        }
+               unlock_res_and_lock(lock);
+               CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
+               LDLM_LOCK_PUT(lock);
+               RETURN(NULL);
+       }
 
-        if (flags && (lock->l_flags & flags)) {
-                unlock_res_and_lock(lock);
-                LDLM_LOCK_PUT(lock);
-                RETURN(NULL);
-        }
+       if (flags && (lock->l_flags & flags)) {
+               unlock_res_and_lock(lock);
+               LDLM_LOCK_PUT(lock);
+               RETURN(NULL);
+       }
 
-        if (flags)
-                lock->l_flags |= flags;
+       if (flags)
+               lock->l_flags |= flags;
 
-        unlock_res_and_lock(lock);
-        RETURN(lock);
+       unlock_res_and_lock(lock);
+       RETURN(lock);
 }
 EXPORT_SYMBOL(__ldlm_handle2lock);
 /** @} ldlm_handles */
index d13f033..2118653 100644 (file)
@@ -147,7 +147,7 @@ static spinlock_t waiting_locks_spinlock;   /* BH lock (timer) */
  * All access to it should be under waiting_locks_spinlock.
  */
 static cfs_list_t waiting_locks_list;
-static cfs_timer_t waiting_locks_timer;
+static struct timer_list waiting_locks_timer;
 
 static struct expired_lock_thread {
        wait_queue_head_t       elt_waitq;
@@ -2522,7 +2522,7 @@ static int ldlm_bl_thread_main(void *arg);
 static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp)
 {
        struct ldlm_bl_thread_data bltd = { .bltd_blp = blp };
-       cfs_task_t *task;
+       struct task_struct *task;
 
        init_completion(&bltd.bltd_comp);
        bltd.bltd_num = cfs_atomic_read(&blp->blp_num_threads);
index d7347a7..3259ff8 100644 (file)
@@ -1388,7 +1388,7 @@ static int ldlm_pools_thread_main(void *arg)
 static int ldlm_pools_thread_start(void)
 {
        struct l_wait_info lwi = { 0 };
-       cfs_task_t *task;
+       struct task_struct *task;
        ENTRY;
 
        if (ldlm_pools_thread != NULL)
index 066c8c6..a519a85 100644 (file)
@@ -1678,9 +1678,9 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, cfs_list_t *cancels,
                 if (&lock->l_lru == &ns->ns_unused_list)
                         break;
 
-                LDLM_LOCK_GET(lock);
+               LDLM_LOCK_GET(lock);
                spin_unlock(&ns->ns_lock);
-                lu_ref_add(&lock->l_reference, __FUNCTION__, cfs_current());
+               lu_ref_add(&lock->l_reference, __FUNCTION__, current);
 
                /* Pass the lock through the policy filter and see if it
                 * should stay in LRU.
@@ -1695,64 +1695,64 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, cfs_list_t *cancels,
                 * old locks, but additionally choose them by
                 * their weight. Big extent locks will stay in
                 * the cache. */
-                result = pf(ns, lock, unused, added, count);
-                if (result == LDLM_POLICY_KEEP_LOCK) {
-                        lu_ref_del(&lock->l_reference,
-                                   __FUNCTION__, cfs_current());
-                        LDLM_LOCK_RELEASE(lock);
+               result = pf(ns, lock, unused, added, count);
+               if (result == LDLM_POLICY_KEEP_LOCK) {
+                       lu_ref_del(&lock->l_reference,
+                                  __FUNCTION__, current);
+                       LDLM_LOCK_RELEASE(lock);
                        spin_lock(&ns->ns_lock);
                        break;
                }
                if (result == LDLM_POLICY_SKIP_LOCK) {
                        lu_ref_del(&lock->l_reference,
-                                  __func__, cfs_current());
+                                  __func__, current);
                        LDLM_LOCK_RELEASE(lock);
                        spin_lock(&ns->ns_lock);
-                        continue;
-                }
+                       continue;
+               }
 
-                lock_res_and_lock(lock);
-                /* Check flags again under the lock. */
-                if ((lock->l_flags & LDLM_FL_CANCELING) ||
-                    (ldlm_lock_remove_from_lru(lock) == 0)) {
+               lock_res_and_lock(lock);
+               /* Check flags again under the lock. */
+               if ((lock->l_flags & LDLM_FL_CANCELING) ||
+                   (ldlm_lock_remove_from_lru(lock) == 0)) {
                        /* Another thread is removing lock from LRU, or
                         * somebody is already doing CANCEL, or there
                         * is a blocking request which will send cancel
                         * by itself, or the lock is no longer unused. */
-                        unlock_res_and_lock(lock);
-                        lu_ref_del(&lock->l_reference,
-                                   __FUNCTION__, cfs_current());
-                        LDLM_LOCK_RELEASE(lock);
+                       unlock_res_and_lock(lock);
+                       lu_ref_del(&lock->l_reference,
+                                  __FUNCTION__, current);
+                       LDLM_LOCK_RELEASE(lock);
                        spin_lock(&ns->ns_lock);
-                        continue;
-                }
-                LASSERT(!lock->l_readers && !lock->l_writers);
-
-                /* If we have chosen to cancel this lock voluntarily, we
-                 * better send cancel notification to server, so that it
-                 * frees appropriate state. This might lead to a race
-                 * where while we are doing cancel here, server is also
-                 * silently cancelling this lock. */
-                lock->l_flags &= ~LDLM_FL_CANCEL_ON_BLOCK;
-
-                /* Setting the CBPENDING flag is a little misleading,
-                 * but prevents an important race; namely, once
-                 * CBPENDING is set, the lock can accumulate no more
-                 * readers/writers. Since readers and writers are
-                 * already zero here, ldlm_lock_decref() won't see
-                 * this flag and call l_blocking_ast */
-                lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING;
-
-                /* We can't re-add to l_lru as it confuses the
-                 * refcounting in ldlm_lock_remove_from_lru() if an AST
-                 * arrives after we drop lr_lock below. We use l_bl_ast
-                 * and can't use l_pending_chain as it is used both on
-                 * server and client nevertheless bug 5666 says it is
-                 * used only on server */
-                LASSERT(cfs_list_empty(&lock->l_bl_ast));
-                cfs_list_add(&lock->l_bl_ast, cancels);
-                unlock_res_and_lock(lock);
-                lu_ref_del(&lock->l_reference, __FUNCTION__, cfs_current());
+                       continue;
+               }
+               LASSERT(!lock->l_readers && !lock->l_writers);
+
+               /* If we have chosen to cancel this lock voluntarily, we
+                * better send cancel notification to server, so that it
+                * frees appropriate state. This might lead to a race
+                * where while we are doing cancel here, server is also
+                * silently cancelling this lock. */
+               lock->l_flags &= ~LDLM_FL_CANCEL_ON_BLOCK;
+
+               /* Setting the CBPENDING flag is a little misleading,
+                * but prevents an important race; namely, once
+                * CBPENDING is set, the lock can accumulate no more
+                * readers/writers. Since readers and writers are
+                * already zero here, ldlm_lock_decref() won't see
+                * this flag and call l_blocking_ast */
+               lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING;
+
+               /* We can't re-add to l_lru as it confuses the
+                * refcounting in ldlm_lock_remove_from_lru() if an AST
+                * arrives after we drop lr_lock below. We use l_bl_ast
+                * and can't use l_pending_chain as it is used both on
+                * server and client nevertheless bug 5666 says it is
+                * used only on server */
+               LASSERT(cfs_list_empty(&lock->l_bl_ast));
+               cfs_list_add(&lock->l_bl_ast, cancels);
+               unlock_res_and_lock(lock);
+               lu_ref_del(&lock->l_reference, __FUNCTION__, current);
                spin_lock(&ns->ns_lock);
                added++;
                unused--;
index ce87539..2ec4aa2 100644 (file)
@@ -232,11 +232,11 @@ int cfs_cap_raised(cfs_cap_t cap)
 }
 
 cfs_cap_t cfs_curproc_cap_pack(void) {
-        return cfs_current()->cap_effective;
+       return current->cap_effective;
 }
 
 void cfs_curproc_cap_unpack(cfs_cap_t cap) {
-        cfs_current()->cap_effective = cap;
+       current->cap_effective = cap;
 }
 
 int cfs_capable(cfs_cap_t cap)
index 0ed5e52..7b6302d 100644 (file)
@@ -291,7 +291,7 @@ void ll_capa_timer_callback(unsigned long unused)
 
 int ll_capa_thread_start(void)
 {
-       cfs_task_t *task;
+       struct task_struct *task;
        ENTRY;
 
        init_waitqueue_head(&ll_capa_thread.t_ctl_waitq);
index 5cedfa9..6005f6f 100644 (file)
@@ -377,7 +377,7 @@ static int ll_close_thread(void *arg)
 int ll_close_thread_start(struct ll_close_queue **lcq_ret)
 {
        struct ll_close_queue *lcq;
-       cfs_task_t *task;
+       struct task_struct *task;
 
        if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CLOSE_THREAD))
                return -EINTR;
index 4ee8ed3..82d29f8 100644 (file)
@@ -1197,7 +1197,7 @@ int ll_update_remote_perm(struct inode *inode, struct mdt_remote_perm *perm);
 int lustre_check_remote_perm(struct inode *inode, int mask);
 
 /* llite/llite_capa.c */
-extern cfs_timer_t ll_capa_timer;
+extern struct timer_list ll_capa_timer;
 
 int ll_capa_thread_start(void);
 void ll_capa_thread_stop(void);
index d17697a..eeeefdb 100644 (file)
@@ -183,7 +183,7 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
        struct vvp_io           *vio;
        struct cl_env_nest       nest;
        int                      result;
-       cfs_sigset_t             set;
+       sigset_t                 set;
        struct inode             *inode;
        struct ll_inode_info     *lli;
        ENTRY;
@@ -262,7 +262,7 @@ out_io:
        cl_io_fini(env, io);
        cl_env_nested_put(&nest, env);
 out:
-       CDEBUG(D_MMAP, "%s mkwrite with %d\n", cfs_current()->comm, result);
+       CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
        LASSERT(ergo(result == 0, PageLocked(vmpage)));
 
        return result;
@@ -330,16 +330,16 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
                        vmf->page = NULL;
                }
         }
-        cl_io_fini(env, io);
-        cl_env_nested_put(&nest, env);
+       cl_io_fini(env, io);
+       cl_env_nested_put(&nest, env);
 
        vma->vm_flags |= ra_flags;
        if (result != 0 && !(fault_ret & VM_FAULT_RETRY))
                fault_ret |= to_fault_error(result);
 
-        CDEBUG(D_MMAP, "%s fault %d/%d\n",
-               cfs_current()->comm, fault_ret, result);
-        RETURN(fault_ret);
+       CDEBUG(D_MMAP, "%s fault %d/%d\n",
+              current->comm, fault_ret, result);
+       RETURN(fault_ret);
 }
 
 static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
@@ -347,7 +347,7 @@ static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        int count = 0;
        bool printed = false;
        int result;
-       cfs_sigset_t set;
+       sigset_t set;
 
        /* Only SIGKILL and SIGTERM is allowed for fault/nopage/mkwrite
         * so that it can be killed by admin but not cause segfault by
index 397456a..ec52fc4 100644 (file)
@@ -131,7 +131,7 @@ static struct ll_cl_context *ll_cl_init(struct file *file,
                         * page w/o holding inode mutex. This means we can
                         * add dirty pages into cache during truncate */
                        CERROR("Proc %s is dirting page w/o inode lock, this"
-                              "will break truncate.\n", cfs_current()->comm);
+                              "will break truncate.\n", current->comm);
                        libcfs_debug_dumpstack(NULL);
                        LBUG();
                        return ERR_PTR(-EIO);
@@ -225,70 +225,70 @@ static struct ll_cl_context *ll_cl_get(void)
  * for every page during write.
  */
 int ll_prepare_write(struct file *file, struct page *vmpage, unsigned from,
-                     unsigned to)
+                    unsigned to)
 {
-        struct ll_cl_context *lcc;
-        int result;
-        ENTRY;
+       struct ll_cl_context *lcc;
+       int result;
+       ENTRY;
 
-        lcc = ll_cl_init(file, vmpage, 1);
-        if (!IS_ERR(lcc)) {
-                struct lu_env  *env = lcc->lcc_env;
-                struct cl_io   *io  = lcc->lcc_io;
-                struct cl_page *page = lcc->lcc_page;
+       lcc = ll_cl_init(file, vmpage, 1);
+       if (!IS_ERR(lcc)) {
+               struct lu_env  *env = lcc->lcc_env;
+               struct cl_io   *io  = lcc->lcc_io;
+               struct cl_page *page = lcc->lcc_page;
+
+               cl_page_assume(env, io, page);
 
-                cl_page_assume(env, io, page);
-                
                result = cl_io_prepare_write(env, io, page, from, to);
-                if (result == 0) {
-                        /*
-                         * Add a reference, so that page is not evicted from
-                         * the cache until ->commit_write() is called.
-                         */
-                        cl_page_get(page);
-                        lu_ref_add(&page->cp_reference, "prepare_write",
-                                   cfs_current());
-                } else {
-                        cl_page_unassume(env, io, page);
-                        ll_cl_fini(lcc);
-                }
-                /* returning 0 in prepare assumes commit must be called
-                 * afterwards */
-        } else {
-                result = PTR_ERR(lcc);
-        }
-        RETURN(result);
+               if (result == 0) {
+                       /*
+                        * Add a reference, so that page is not evicted from
+                        * the cache until ->commit_write() is called.
+                        */
+                       cl_page_get(page);
+                       lu_ref_add(&page->cp_reference, "prepare_write",
+                                  current);
+               } else {
+                       cl_page_unassume(env, io, page);
+                       ll_cl_fini(lcc);
+               }
+               /* returning 0 in prepare assumes commit must be called
+                * afterwards */
+       } else {
+               result = PTR_ERR(lcc);
+       }
+       RETURN(result);
 }
 
 int ll_commit_write(struct file *file, struct page *vmpage, unsigned from,
-                    unsigned to)
+                   unsigned to)
 {
-        struct ll_cl_context *lcc;
-        struct lu_env    *env;
-        struct cl_io     *io;
-        struct cl_page   *page;
-        int result = 0;
-        ENTRY;
+       struct ll_cl_context *lcc;
+       struct lu_env    *env;
+       struct cl_io     *io;
+       struct cl_page   *page;
+       int result = 0;
+       ENTRY;
 
-        lcc  = ll_cl_get();
-        env  = lcc->lcc_env;
-        page = lcc->lcc_page;
-        io   = lcc->lcc_io;
-
-        LASSERT(cl_page_is_owned(page, io));
-        LASSERT(from <= to);
-        if (from != to) /* handle short write case. */
-                result = cl_io_commit_write(env, io, page, from, to);
-        if (cl_page_is_owned(page, io))
-                cl_page_unassume(env, io, page);
-
-        /*
-         * Release reference acquired by ll_prepare_write().
-         */
-        lu_ref_del(&page->cp_reference, "prepare_write", cfs_current());
-        cl_page_put(env, page);
-        ll_cl_fini(lcc);
-        RETURN(result);
+       lcc  = ll_cl_get();
+       env  = lcc->lcc_env;
+       page = lcc->lcc_page;
+       io   = lcc->lcc_io;
+
+       LASSERT(cl_page_is_owned(page, io));
+       LASSERT(from <= to);
+       if (from != to) /* handle short write case. */
+               result = cl_io_commit_write(env, io, page, from, to);
+       if (cl_page_is_owned(page, io))
+               cl_page_unassume(env, io, page);
+
+       /*
+        * Release reference acquired by ll_prepare_write().
+        */
+       lu_ref_del(&page->cp_reference, "prepare_write", current);
+       cl_page_put(env, page);
+       ll_cl_fini(lcc);
+       RETURN(result);
 }
 
 struct obd_capa *cl_capa_lookup(struct inode *inode, enum cl_req_type crt)
@@ -463,36 +463,36 @@ struct ll_ra_read *ll_ra_read_get(struct file *f)
 }
 
 static int cl_read_ahead_page(const struct lu_env *env, struct cl_io *io,
-                              struct cl_page_list *queue, struct cl_page *page,
-                              struct page *vmpage)
+                             struct cl_page_list *queue, struct cl_page *page,
+                             struct page *vmpage)
 {
-        struct ccc_page *cp;
-        int              rc;
+       struct ccc_page *cp;
+       int              rc;
 
-        ENTRY;
+       ENTRY;
 
-        rc = 0;
-        cl_page_assume(env, io, page);
-       lu_ref_add(&page->cp_reference, "ra", cfs_current());
+       rc = 0;
+       cl_page_assume(env, io, page);
+       lu_ref_add(&page->cp_reference, "ra", current);
        cp = cl2ccc_page(cl_page_at(page, &vvp_device_type));
        if (!cp->cpg_defer_uptodate && !PageUptodate(vmpage)) {
                rc = cl_page_is_under_lock(env, io, page);
-                if (rc == -EBUSY) {
-                        cp->cpg_defer_uptodate = 1;
-                        cp->cpg_ra_used = 0;
-                        cl_page_list_add(queue, page);
-                        rc = 1;
-                } else {
-                        cl_page_delete(env, page);
-                        rc = -ENOLCK;
-                }
+               if (rc == -EBUSY) {
+                       cp->cpg_defer_uptodate = 1;
+                       cp->cpg_ra_used = 0;
+                       cl_page_list_add(queue, page);
+                       rc = 1;
+               } else {
+                       cl_page_delete(env, page);
+                       rc = -ENOLCK;
+               }
        } else {
                /* skip completed pages */
                cl_page_unassume(env, io, page);
        }
-        lu_ref_del(&page->cp_reference, "ra", cfs_current());
-        cl_page_put(env, page);
-        RETURN(rc);
+       lu_ref_del(&page->cp_reference, "ra", current);
+       cl_page_put(env, page);
+       RETURN(rc);
 }
 
 /**
@@ -1176,10 +1176,10 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
         if (result == 0) {
                 page = cl_page_find(env, clob, vmpage->index,
                                     vmpage, CPT_CACHEABLE);
-                if (!IS_ERR(page)) {
-                        lu_ref_add(&page->cp_reference, "writepage",
-                                   cfs_current());
-                        cl_page_assume(env, io, page);
+               if (!IS_ERR(page)) {
+                       lu_ref_add(&page->cp_reference, "writepage",
+                                  current);
+                       cl_page_assume(env, io, page);
                        result = cl_page_flush(env, io, page);
                        if (result != 0) {
                                /*
@@ -1195,9 +1195,9 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
                        }
                        cl_page_disown(env, io, page);
                        unlocked = true;
-                        lu_ref_del(&page->cp_reference,
-                                   "writepage", cfs_current());
-                        cl_page_put(env, page);
+                       lu_ref_del(&page->cp_reference,
+                                  "writepage", current);
+                       cl_page_put(env, page);
                } else {
                        result = PTR_ERR(page);
                }
index 3bb99ac..183eda9 100644 (file)
@@ -1034,7 +1034,7 @@ static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
        struct ptlrpc_thread *thread = &sai->sai_agl_thread;
        struct l_wait_info    lwi    = { 0 };
        struct ll_inode_info  *plli;
-       cfs_task_t            *task;
+       struct task_struct            *task;
        ENTRY;
 
        CDEBUG(D_READA, "start agl thread: [pid %d] [parent %.*s]\n",
index 68ea741..3aadb74 100644 (file)
@@ -319,78 +319,78 @@ static int vvp_pgcache_obj_get(cfs_hash_t *hs, cfs_hash_bd_t *bd,
 }
 
 static struct cl_object *vvp_pgcache_obj(const struct lu_env *env,
-                                         struct lu_device *dev,
-                                         struct vvp_pgcache_id *id)
+                                        struct lu_device *dev,
+                                        struct vvp_pgcache_id *id)
 {
-        LASSERT(lu_device_is_cl(dev));
-
-        id->vpi_depth &= 0xf;
-        id->vpi_obj    = NULL;
-        id->vpi_curdep = id->vpi_depth;
-
-        cfs_hash_hlist_for_each(dev->ld_site->ls_obj_hash, id->vpi_bucket,
-                                vvp_pgcache_obj_get, id);
-        if (id->vpi_obj != NULL) {
-                struct lu_object *lu_obj;
-
-                lu_obj = lu_object_locate(id->vpi_obj, dev->ld_type);
-                if (lu_obj != NULL) {
-                        lu_object_ref_add(lu_obj, "dump", cfs_current());
-                        return lu2cl(lu_obj);
-                }
-                lu_object_put(env, lu_object_top(id->vpi_obj));
-
-        } else if (id->vpi_curdep > 0) {
-                id->vpi_depth = 0xf;
-        }
-        return NULL;
+       LASSERT(lu_device_is_cl(dev));
+
+       id->vpi_depth &= 0xf;
+       id->vpi_obj    = NULL;
+       id->vpi_curdep = id->vpi_depth;
+
+       cfs_hash_hlist_for_each(dev->ld_site->ls_obj_hash, id->vpi_bucket,
+                               vvp_pgcache_obj_get, id);
+       if (id->vpi_obj != NULL) {
+               struct lu_object *lu_obj;
+
+               lu_obj = lu_object_locate(id->vpi_obj, dev->ld_type);
+               if (lu_obj != NULL) {
+                       lu_object_ref_add(lu_obj, "dump", current);
+                       return lu2cl(lu_obj);
+               }
+               lu_object_put(env, lu_object_top(id->vpi_obj));
+
+       } else if (id->vpi_curdep > 0) {
+               id->vpi_depth = 0xf;
+       }
+       return NULL;
 }
 
 static loff_t vvp_pgcache_find(const struct lu_env *env,
-                               struct lu_device *dev, loff_t pos)
+                              struct lu_device *dev, loff_t pos)
 {
-        struct cl_object     *clob;
-        struct lu_site       *site;
-        struct vvp_pgcache_id id;
+       struct cl_object     *clob;
+       struct lu_site       *site;
+       struct vvp_pgcache_id id;
 
-        site = dev->ld_site;
-        vvp_pgcache_id_unpack(pos, &id);
+       site = dev->ld_site;
+       vvp_pgcache_id_unpack(pos, &id);
 
-        while (1) {
-                if (id.vpi_bucket >= CFS_HASH_NHLIST(site->ls_obj_hash))
-                        return ~0ULL;
-                clob = vvp_pgcache_obj(env, dev, &id);
-                if (clob != NULL) {
-                        struct cl_object_header *hdr;
-                        int                      nr;
-                        struct cl_page          *pg;
+       while (1) {
+               if (id.vpi_bucket >= CFS_HASH_NHLIST(site->ls_obj_hash))
+                       return ~0ULL;
+               clob = vvp_pgcache_obj(env, dev, &id);
+               if (clob != NULL) {
+                       struct cl_object_header *hdr;
+                       int                      nr;
+                       struct cl_page          *pg;
 
-                        /* got an object. Find next page. */
-                        hdr = cl_object_header(clob);
+                       /* got an object. Find next page. */
+                       hdr = cl_object_header(clob);
 
                        spin_lock(&hdr->coh_page_guard);
-                        nr = radix_tree_gang_lookup(&hdr->coh_tree,
-                                                    (void **)&pg,
-                                                    id.vpi_index, 1);
-                        if (nr > 0) {
-                                id.vpi_index = pg->cp_index;
-                                /* Cant support over 16T file */
-                                nr = !(pg->cp_index > 0xffffffff);
-                        }
+                       nr = radix_tree_gang_lookup(&hdr->coh_tree,
+                                                   (void **)&pg,
+                                                   id.vpi_index, 1);
+                       if (nr > 0) {
+                               id.vpi_index = pg->cp_index;
+                               /* Cant support over 16T file */
+                               nr = !(pg->cp_index > 0xffffffff);
+                       }
                        spin_unlock(&hdr->coh_page_guard);
 
-                        lu_object_ref_del(&clob->co_lu, "dump", cfs_current());
-                        cl_object_put(env, clob);
-                        if (nr > 0)
-                                return vvp_pgcache_id_pack(&id);
-                }
-                /* to the next object. */
-                ++id.vpi_depth;
-                id.vpi_depth &= 0xf;
-                if (id.vpi_depth == 0 && ++id.vpi_bucket == 0)
-                        return ~0ULL;
-                id.vpi_index = 0;
-        }
+                       lu_object_ref_del(&clob->co_lu, "dump", current);
+                       cl_object_put(env, clob);
+                       if (nr > 0)
+                               return vvp_pgcache_id_pack(&id);
+               }
+               /* to the next object. */
+               ++id.vpi_depth;
+               id.vpi_depth &= 0xf;
+               if (id.vpi_depth == 0 && ++id.vpi_bucket == 0)
+                       return ~0ULL;
+               id.vpi_index = 0;
+       }
 }
 
 #define seq_page_flag(seq, page, flag, has_flags) do {                  \
@@ -432,45 +432,45 @@ static void vvp_pgcache_page_show(const struct lu_env *env,
 
 static int vvp_pgcache_show(struct seq_file *f, void *v)
 {
-        loff_t                   pos;
-        struct ll_sb_info       *sbi;
-        struct cl_object        *clob;
-        struct lu_env           *env;
-        struct cl_page          *page;
-        struct cl_object_header *hdr;
-        struct vvp_pgcache_id    id;
-        int                      refcheck;
-        int                      result;
-
-        env = cl_env_get(&refcheck);
-        if (!IS_ERR(env)) {
-                pos = *(loff_t *) v;
-                vvp_pgcache_id_unpack(pos, &id);
-                sbi = f->private;
-                clob = vvp_pgcache_obj(env, &sbi->ll_cl->cd_lu_dev, &id);
-                if (clob != NULL) {
-                        hdr = cl_object_header(clob);
+       loff_t                   pos;
+       struct ll_sb_info       *sbi;
+       struct cl_object        *clob;
+       struct lu_env           *env;
+       struct cl_page          *page;
+       struct cl_object_header *hdr;
+       struct vvp_pgcache_id    id;
+       int                      refcheck;
+       int                      result;
+
+       env = cl_env_get(&refcheck);
+       if (!IS_ERR(env)) {
+               pos = *(loff_t *) v;
+               vvp_pgcache_id_unpack(pos, &id);
+               sbi = f->private;
+               clob = vvp_pgcache_obj(env, &sbi->ll_cl->cd_lu_dev, &id);
+               if (clob != NULL) {
+                       hdr = cl_object_header(clob);
 
                        spin_lock(&hdr->coh_page_guard);
                        page = cl_page_lookup(hdr, id.vpi_index);
 
-                        seq_printf(f, "%8x@"DFID": ",
-                                   id.vpi_index, PFID(&hdr->coh_lu.loh_fid));
-                        if (page != NULL) {
-                                vvp_pgcache_page_show(env, f, page);
-                                cl_page_put(env, page);
-                        } else
-                                seq_puts(f, "missing\n");
+                       seq_printf(f, "%8x@"DFID": ",
+                                  id.vpi_index, PFID(&hdr->coh_lu.loh_fid));
+                       if (page != NULL) {
+                               vvp_pgcache_page_show(env, f, page);
+                               cl_page_put(env, page);
+                       } else
+                               seq_puts(f, "missing\n");
                        spin_unlock(&hdr->coh_page_guard);
-                        lu_object_ref_del(&clob->co_lu, "dump", cfs_current());
-                        cl_object_put(env, clob);
-                } else
-                        seq_printf(f, "%llx missing\n", pos);
-                cl_env_put(env, &refcheck);
-                result = 0;
-        } else
-                result = PTR_ERR(env);
-        return result;
+                       lu_object_ref_del(&clob->co_lu, "dump", current);
+                       cl_object_put(env, clob);
+               } else
+                       seq_printf(f, "%llx missing\n", pos);
+               cl_env_put(env, &refcheck);
+               result = 0;
+       } else
+               result = PTR_ERR(env);
+       return result;
 }
 
 static void *vvp_pgcache_start(struct seq_file *f, loff_t *pos)
index 4b1f083..2e899ac 100644 (file)
@@ -203,15 +203,15 @@ static inline char *llt2str(enum lov_layout_type llt)
  * function corresponding to the current layout type.
  */
 struct lov_object {
-        struct cl_object       lo_cl;
-        /**
-         * Serializes object operations with transitions between layout types.
-         *
-         * This semaphore is taken in shared mode by all object methods, and
-         * is taken in exclusive mode when object type is changed.
-         *
-         * \see lov_object::lo_type
-         */
+       struct cl_object       lo_cl;
+       /**
+        * Serializes object operations with transitions between layout types.
+        *
+        * This semaphore is taken in shared mode by all object methods, and
+        * is taken in exclusive mode when object type is changed.
+        *
+        * \see lov_object::lo_type
+        */
        struct rw_semaphore     lo_type_guard;
        /**
         * Type of an object. Protected by lov_object::lo_type_guard.
@@ -236,9 +236,9 @@ struct lov_object {
         */
        struct lov_stripe_md  *lo_lsm;
 
-        union lov_layout_state {
-                struct lov_layout_raid0 {
-                        unsigned               lo_nr;
+       union lov_layout_state {
+               struct lov_layout_raid0 {
+                       unsigned               lo_nr;
                        /**
                         * When this is true, lov_object::lo_attr contains
                         * valid up to date attributes for a top-level
@@ -246,42 +246,42 @@ struct lov_object {
                         * any sub-object change.
                         */
                        int                    lo_attr_valid;
-                        /**
-                         * Array of sub-objects. Allocated when top-object is
-                         * created (lov_init_raid0()).
-                         *
-                         * Top-object is a strict master of its sub-objects:
-                         * it is created before them, and outlives its
-                         * children (this later is necessary so that basic
-                         * functions like cl_object_top() always
-                         * work). Top-object keeps a reference on every
-                         * sub-object.
-                         *
-                         * When top-object is destroyed (lov_delete_raid0())
-                         * it releases its reference to a sub-object and waits
-                         * until the latter is finally destroyed.
-                         */
-                        struct lovsub_object **lo_sub;
-                        /**
-                         * protect lo_sub
-                         */
+                       /**
+                        * Array of sub-objects. Allocated when top-object is
+                        * created (lov_init_raid0()).
+                        *
+                        * Top-object is a strict master of its sub-objects:
+                        * it is created before them, and outlives its
+                        * children (this later is necessary so that basic
+                        * functions like cl_object_top() always
+                        * work). Top-object keeps a reference on every
+                        * sub-object.
+                        *
+                        * When top-object is destroyed (lov_delete_raid0())
+                        * it releases its reference to a sub-object and waits
+                        * until the latter is finally destroyed.
+                        */
+                       struct lovsub_object **lo_sub;
+                       /**
+                        * protect lo_sub
+                        */
                        spinlock_t              lo_sub_lock;
-                        /**
-                         * Cached object attribute, built from sub-object
-                         * attributes.
-                         */
-                        struct cl_attr         lo_attr;
-                } raid0;
-                struct lov_layout_state_empty {
-                } empty;
+                       /**
+                        * Cached object attribute, built from sub-object
+                        * attributes.
+                        */
+                       struct cl_attr         lo_attr;
+               } raid0;
+               struct lov_layout_state_empty {
+               } empty;
                struct lov_layout_state_released {
                } released;
-        } u;
-        /**
-         * Thread that acquired lov_object::lo_type_guard in an exclusive
-         * mode.
-         */
-        cfs_task_t            *lo_owner;
+       } u;
+       /**
+        * Thread that acquired lov_object::lo_type_guard in an exclusive
+        * mode.
+        */
+       struct task_struct            *lo_owner;
 };
 
 /**
index 230f9d6..ae80756 100644 (file)
@@ -587,13 +587,13 @@ enum lov_layout_type lov_type(struct lov_stripe_md *lsm)
 
 static inline void lov_conf_freeze(struct lov_object *lov)
 {
-       if (lov->lo_owner != cfs_current())
+       if (lov->lo_owner != current)
                down_read(&lov->lo_type_guard);
 }
 
 static inline void lov_conf_thaw(struct lov_object *lov)
 {
-       if (lov->lo_owner != cfs_current())
+       if (lov->lo_owner != current)
                up_read(&lov->lo_type_guard);
 }
 
@@ -631,10 +631,10 @@ do {                                                                    \
 
 static void lov_conf_lock(struct lov_object *lov)
 {
-       LASSERT(lov->lo_owner != cfs_current());
+       LASSERT(lov->lo_owner != current);
        down_write(&lov->lo_type_guard);
        LASSERT(lov->lo_owner == NULL);
-       lov->lo_owner = cfs_current();
+       lov->lo_owner = current;
 }
 
 static void lov_conf_unlock(struct lov_object *lov)
@@ -933,7 +933,7 @@ struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov)
                lsm = lsm_addref(lov->lo_lsm);
                CDEBUG(D_INODE, "lsm %p addref %d/%d by %p.\n",
                        lsm, cfs_atomic_read(&lsm->lsm_refc),
-                       lov->lo_layout_invalid, cfs_current());
+                       lov->lo_layout_invalid, current);
        }
        lov_conf_thaw(lov);
        return lsm;
@@ -945,7 +945,7 @@ void lov_lsm_decref(struct lov_object *lov, struct lov_stripe_md *lsm)
                return;
 
        CDEBUG(D_INODE, "lsm %p decref %d by %p.\n",
-               lsm, cfs_atomic_read(&lsm->lsm_refc), cfs_current());
+               lsm, cfs_atomic_read(&lsm->lsm_refc), current);
 
        lov_free_memmd(&lsm);
 }
index 51b3b22..3dcc6c2 100644 (file)
@@ -66,26 +66,26 @@ static void lovsub_lock_fini(const struct lu_env *env,
 
 static void lovsub_parent_lock(const struct lu_env *env, struct lov_lock *lov)
 {
-        struct cl_lock *parent;
-
-        ENTRY;
-        parent = lov->lls_cl.cls_lock;
-        cl_lock_get(parent);
-        lu_ref_add(&parent->cll_reference, "lovsub-parent", cfs_current());
-        cl_lock_mutex_get(env, parent);
-        EXIT;
+       struct cl_lock *parent;
+
+       ENTRY;
+       parent = lov->lls_cl.cls_lock;
+       cl_lock_get(parent);
+       lu_ref_add(&parent->cll_reference, "lovsub-parent", current);
+       cl_lock_mutex_get(env, parent);
+       EXIT;
 }
 
 static void lovsub_parent_unlock(const struct lu_env *env, struct lov_lock *lov)
 {
-        struct cl_lock *parent;
-
-        ENTRY;
-        parent = lov->lls_cl.cls_lock;
-        cl_lock_mutex_put(env, lov->lls_cl.cls_lock);
-        lu_ref_del(&parent->cll_reference, "lovsub-parent", cfs_current());
-        cl_lock_put(env, parent);
-        EXIT;
+       struct cl_lock *parent;
+
+       ENTRY;
+       parent = lov->lls_cl.cls_lock;
+       cl_lock_mutex_put(env, lov->lls_cl.cls_lock);
+       lu_ref_del(&parent->cll_reference, "lovsub-parent", current);
+       cl_lock_put(env, parent);
+       EXIT;
 }
 
 /**
index 4f6a44c..7c755ce 100644 (file)
@@ -291,7 +291,7 @@ static int mdt_ck_thread_main(void *args)
 int mdt_ck_thread_start(struct mdt_device *mdt)
 {
        struct ptlrpc_thread *thread = &mdt->mdt_ck_thread;
-       cfs_task_t *task;
+       struct task_struct *task;
 
        init_waitqueue_head(&thread->t_ctl_waitq);
        task = kthread_run(mdt_ck_thread_main, mdt, "mdt_ck");
index dd37805..6044ea3 100644 (file)
@@ -6064,26 +6064,26 @@ int mdt_get_info(struct mdt_thread_info *info)
 
 /* Pass the ioc down */
 static int mdt_ioc_child(struct lu_env *env, struct mdt_device *mdt,
-                         unsigned int cmd, int len, void *data)
+                        unsigned int cmd, int len, void *data)
 {
-        struct lu_context ioctl_session;
-        struct md_device *next = mdt->mdt_child;
-        int rc;
-        ENTRY;
+       struct lu_context ioctl_session;
+       struct md_device *next = mdt->mdt_child;
+       int rc;
+       ENTRY;
 
-        rc = lu_context_init(&ioctl_session, LCT_SESSION);
-        if (rc)
-                RETURN(rc);
-        ioctl_session.lc_thread = (struct ptlrpc_thread *)cfs_current();
-        lu_context_enter(&ioctl_session);
-        env->le_ses = &ioctl_session;
+       rc = lu_context_init(&ioctl_session, LCT_SESSION);
+       if (rc)
+               RETURN(rc);
+       ioctl_session.lc_thread = (struct ptlrpc_thread *)current;
+       lu_context_enter(&ioctl_session);
+       env->le_ses = &ioctl_session;
 
-        LASSERT(next->md_ops->mdo_iocontrol);
-        rc = next->md_ops->mdo_iocontrol(env, next, cmd, len, data);
+       LASSERT(next->md_ops->mdo_iocontrol);
+       rc = next->md_ops->mdo_iocontrol(env, next, cmd, len, data);
 
-        lu_context_exit(&ioctl_session);
-        lu_context_fini(&ioctl_session);
-        RETURN(rc);
+       lu_context_exit(&ioctl_session);
+       lu_context_fini(&ioctl_session);
+       RETURN(rc);
 }
 
 static int mdt_ioc_version_get(struct mdt_thread_info *mti, void *karg)
index 7db09e8..f4483de 100644 (file)
@@ -194,15 +194,15 @@ struct mdt_device {
        rwlock_t                   mdt_sptlrpc_lock;
         struct sptlrpc_rule_set    mdt_sptlrpc_rset;
 
-        /* capability keys */
-        unsigned long              mdt_capa_timeout;
-        __u32                      mdt_capa_alg;
-        struct dt_object          *mdt_ck_obj;
-        unsigned long              mdt_ck_timeout;
-        unsigned long              mdt_ck_expiry;
-        cfs_timer_t                mdt_ck_timer;
-        struct ptlrpc_thread       mdt_ck_thread;
-        struct lustre_capa_key     mdt_capa_keys[2];
+       /* capability keys */
+       unsigned long              mdt_capa_timeout;
+       __u32                      mdt_capa_alg;
+       struct dt_object          *mdt_ck_obj;
+       unsigned long              mdt_ck_timeout;
+       unsigned long              mdt_ck_expiry;
+       struct timer_list          mdt_ck_timer;
+       struct ptlrpc_thread       mdt_ck_thread;
+       struct lustre_capa_key     mdt_capa_keys[2];
        unsigned int               mdt_capa_conf:1,
                                   mdt_som_conf:1,
                                   /* Enable remote dir on non-MDT0 */
index 7de8f59..4d53969 100644 (file)
@@ -470,7 +470,7 @@ static int mgs_ir_notify(void *arg)
 int mgs_ir_init_fs(const struct lu_env *env, struct mgs_device *mgs,
                   struct fs_db *fsdb)
 {
-       cfs_task_t *task;
+       struct task_struct *task;
 
        if (!ir_timeout)
                ir_timeout = OBD_IR_MGS_TIMEOUT;
index 03f9e81..af0fca0 100644 (file)
@@ -1080,11 +1080,11 @@ EXPORT_SYMBOL(cl_io_slice_add);
  */
 void cl_page_list_init(struct cl_page_list *plist)
 {
-        ENTRY;
-        plist->pl_nr = 0;
-        CFS_INIT_LIST_HEAD(&plist->pl_pages);
-        plist->pl_owner = cfs_current();
-        EXIT;
+       ENTRY;
+       plist->pl_nr = 0;
+       CFS_INIT_LIST_HEAD(&plist->pl_pages);
+       plist->pl_owner = current;
+       EXIT;
 }
 EXPORT_SYMBOL(cl_page_list_init);
 
@@ -1093,18 +1093,18 @@ EXPORT_SYMBOL(cl_page_list_init);
  */
 void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page)
 {
-        ENTRY;
-        /* it would be better to check that page is owned by "current" io, but
-         * it is not passed here. */
-        LASSERT(page->cp_owner != NULL);
-        LINVRNT(plist->pl_owner == cfs_current());
+       ENTRY;
+       /* it would be better to check that page is owned by "current" io, but
+        * it is not passed here. */
+       LASSERT(page->cp_owner != NULL);
+       LINVRNT(plist->pl_owner == current);
 
        lockdep_off();
        mutex_lock(&page->cp_mutex);
        lockdep_on();
-        LASSERT(cfs_list_empty(&page->cp_batch));
-        cfs_list_add_tail(&page->cp_batch, &plist->pl_pages);
-        ++plist->pl_nr;
+       LASSERT(cfs_list_empty(&page->cp_batch));
+       cfs_list_add_tail(&page->cp_batch, &plist->pl_pages);
+       ++plist->pl_nr;
        lu_ref_add_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
        cl_page_get(page);
        EXIT;
@@ -1115,17 +1115,17 @@ EXPORT_SYMBOL(cl_page_list_add);
  * Removes a page from a page list.
  */
 void cl_page_list_del(const struct lu_env *env,
-                      struct cl_page_list *plist, struct cl_page *page)
+                     struct cl_page_list *plist, struct cl_page *page)
 {
-        LASSERT(plist->pl_nr > 0);
-        LINVRNT(plist->pl_owner == cfs_current());
+       LASSERT(plist->pl_nr > 0);
+       LINVRNT(plist->pl_owner == current);
 
-        ENTRY;
-        cfs_list_del_init(&page->cp_batch);
+       ENTRY;
+       cfs_list_del_init(&page->cp_batch);
        lockdep_off();
        mutex_unlock(&page->cp_mutex);
        lockdep_on();
-        --plist->pl_nr;
+       --plist->pl_nr;
        lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
        cl_page_put(env, page);
        EXIT;
@@ -1136,16 +1136,16 @@ EXPORT_SYMBOL(cl_page_list_del);
  * Moves a page from one page list to another.
  */
 void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
-                       struct cl_page *page)
+                      struct cl_page *page)
 {
-        LASSERT(src->pl_nr > 0);
-        LINVRNT(dst->pl_owner == cfs_current());
-        LINVRNT(src->pl_owner == cfs_current());
+       LASSERT(src->pl_nr > 0);
+       LINVRNT(dst->pl_owner == current);
+       LINVRNT(src->pl_owner == current);
 
-        ENTRY;
-        cfs_list_move_tail(&page->cp_batch, &dst->pl_pages);
-        --src->pl_nr;
-        ++dst->pl_nr;
+       ENTRY;
+       cfs_list_move_tail(&page->cp_batch, &dst->pl_pages);
+       --src->pl_nr;
+       ++dst->pl_nr;
        lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue",
                      src, dst);
        EXIT;
@@ -1157,16 +1157,16 @@ EXPORT_SYMBOL(cl_page_list_move);
  */
 void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head)
 {
-        struct cl_page *page;
-        struct cl_page *tmp;
+       struct cl_page *page;
+       struct cl_page *tmp;
 
-        LINVRNT(list->pl_owner == cfs_current());
-        LINVRNT(head->pl_owner == cfs_current());
+       LINVRNT(list->pl_owner == current);
+       LINVRNT(head->pl_owner == current);
 
-        ENTRY;
-        cl_page_list_for_each_safe(page, tmp, list)
-                cl_page_list_move(head, list, page);
-        EXIT;
+       ENTRY;
+       cl_page_list_for_each_safe(page, tmp, list)
+               cl_page_list_move(head, list, page);
+       EXIT;
 }
 EXPORT_SYMBOL(cl_page_list_splice);
 
@@ -1177,36 +1177,36 @@ void cl_page_disown0(const struct lu_env *env,
  * Disowns pages in a queue.
  */
 void cl_page_list_disown(const struct lu_env *env,
-                         struct cl_io *io, struct cl_page_list *plist)
+                        struct cl_io *io, struct cl_page_list *plist)
 {
-        struct cl_page *page;
-        struct cl_page *temp;
+       struct cl_page *page;
+       struct cl_page *temp;
 
-        LINVRNT(plist->pl_owner == cfs_current());
+       LINVRNT(plist->pl_owner == current);
 
-        ENTRY;
-        cl_page_list_for_each_safe(page, temp, plist) {
-                LASSERT(plist->pl_nr > 0);
+       ENTRY;
+       cl_page_list_for_each_safe(page, temp, plist) {
+               LASSERT(plist->pl_nr > 0);
 
-                cfs_list_del_init(&page->cp_batch);
+               cfs_list_del_init(&page->cp_batch);
                lockdep_off();
                mutex_unlock(&page->cp_mutex);
                lockdep_on();
-                --plist->pl_nr;
-                /*
-                 * cl_page_disown0 rather than usual cl_page_disown() is used,
-                 * because pages are possibly in CPS_FREEING state already due
-                 * to the call to cl_page_list_discard().
-                 */
-                /*
-                 * XXX cl_page_disown0() will fail if page is not locked.
-                 */
-                cl_page_disown0(env, io, page);
+               --plist->pl_nr;
+               /*
+                * cl_page_disown0 rather than usual cl_page_disown() is used,
+                * because pages are possibly in CPS_FREEING state already due
+                * to the call to cl_page_list_discard().
+                */
+               /*
+                * XXX cl_page_disown0() will fail if page is not locked.
+                */
+               cl_page_disown0(env, io, page);
                lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue",
                              plist);
-                cl_page_put(env, page);
-        }
-        EXIT;
+               cl_page_put(env, page);
+       }
+       EXIT;
 }
 EXPORT_SYMBOL(cl_page_list_disown);
 
@@ -1215,16 +1215,16 @@ EXPORT_SYMBOL(cl_page_list_disown);
  */
 void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist)
 {
-        struct cl_page *page;
-        struct cl_page *temp;
+       struct cl_page *page;
+       struct cl_page *temp;
 
-        LINVRNT(plist->pl_owner == cfs_current());
+       LINVRNT(plist->pl_owner == current);
 
-        ENTRY;
-        cl_page_list_for_each_safe(page, temp, plist)
-                cl_page_list_del(env, plist, page);
-        LASSERT(plist->pl_nr == 0);
-        EXIT;
+       ENTRY;
+       cl_page_list_for_each_safe(page, temp, plist)
+               cl_page_list_del(env, plist, page);
+       LASSERT(plist->pl_nr == 0);
+       EXIT;
 }
 EXPORT_SYMBOL(cl_page_list_fini);
 
@@ -1232,26 +1232,26 @@ EXPORT_SYMBOL(cl_page_list_fini);
  * Owns all pages in a queue.
  */
 int cl_page_list_own(const struct lu_env *env,
-                     struct cl_io *io, struct cl_page_list *plist)
+                    struct cl_io *io, struct cl_page_list *plist)
 {
-        struct cl_page *page;
-        struct cl_page *temp;
-        pgoff_t index = 0;
-        int result;
+       struct cl_page *page;
+       struct cl_page *temp;
+       pgoff_t index = 0;
+       int result;
 
-        LINVRNT(plist->pl_owner == cfs_current());
+       LINVRNT(plist->pl_owner == current);
 
-        ENTRY;
-        result = 0;
-        cl_page_list_for_each_safe(page, temp, plist) {
-                LASSERT(index <= page->cp_index);
-                index = page->cp_index;
-                if (cl_page_own(env, io, page) == 0)
-                        result = result ?: page->cp_error;
-                else
-                        cl_page_list_del(env, plist, page);
-        }
-        RETURN(result);
+       ENTRY;
+       result = 0;
+       cl_page_list_for_each_safe(page, temp, plist) {
+               LASSERT(index <= page->cp_index);
+               index = page->cp_index;
+               if (cl_page_own(env, io, page) == 0)
+                       result = result ?: page->cp_error;
+               else
+                       cl_page_list_del(env, plist, page);
+       }
+       RETURN(result);
 }
 EXPORT_SYMBOL(cl_page_list_own);
 
@@ -1259,14 +1259,14 @@ EXPORT_SYMBOL(cl_page_list_own);
  * Assumes all pages in a queue.
  */
 void cl_page_list_assume(const struct lu_env *env,
-                         struct cl_io *io, struct cl_page_list *plist)
+                        struct cl_io *io, struct cl_page_list *plist)
 {
-        struct cl_page *page;
+       struct cl_page *page;
 
-        LINVRNT(plist->pl_owner == cfs_current());
+       LINVRNT(plist->pl_owner == current);
 
-        cl_page_list_for_each(page, plist)
-                cl_page_assume(env, io, page);
+       cl_page_list_for_each(page, plist)
+               cl_page_assume(env, io, page);
 }
 EXPORT_SYMBOL(cl_page_list_assume);
 
@@ -1274,15 +1274,15 @@ EXPORT_SYMBOL(cl_page_list_assume);
  * Discards all pages in a queue.
  */
 void cl_page_list_discard(const struct lu_env *env, struct cl_io *io,
-                          struct cl_page_list *plist)
+                         struct cl_page_list *plist)
 {
-        struct cl_page *page;
+       struct cl_page *page;
 
-        LINVRNT(plist->pl_owner == cfs_current());
-        ENTRY;
-        cl_page_list_for_each(page, plist)
-                cl_page_discard(env, io, page);
-        EXIT;
+       LINVRNT(plist->pl_owner == current);
+       ENTRY;
+       cl_page_list_for_each(page, plist)
+               cl_page_discard(env, io, page);
+       EXIT;
 }
 EXPORT_SYMBOL(cl_page_list_discard);
 
@@ -1290,20 +1290,20 @@ EXPORT_SYMBOL(cl_page_list_discard);
  * Unmaps all pages in a queue from user virtual memory.
  */
 int cl_page_list_unmap(const struct lu_env *env, struct cl_io *io,
-                        struct cl_page_list *plist)
+                      struct cl_page_list *plist)
 {
-        struct cl_page *page;
-        int result;
+       struct cl_page *page;
+       int result;
 
-        LINVRNT(plist->pl_owner == cfs_current());
-        ENTRY;
-        result = 0;
-        cl_page_list_for_each(page, plist) {
-                result = cl_page_unmap(env, io, page);
-                if (result != 0)
-                        break;
-        }
-        RETURN(result);
+       LINVRNT(plist->pl_owner == current);
+       ENTRY;
+       result = 0;
+       cl_page_list_for_each(page, plist) {
+               result = cl_page_unmap(env, io, page);
+               if (result != 0)
+                       break;
+       }
+       RETURN(result);
 }
 EXPORT_SYMBOL(cl_page_list_unmap);
 
index 5d7708a..0db9fb6 100644 (file)
@@ -423,19 +423,19 @@ static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
  * \see CLS_INTRANSIT
  */
 enum cl_lock_state cl_lock_intransit(const struct lu_env *env,
-                                     struct cl_lock *lock)
+                                    struct cl_lock *lock)
 {
-        enum cl_lock_state state = lock->cll_state;
+       enum cl_lock_state state = lock->cll_state;
 
-        LASSERT(cl_lock_is_mutexed(lock));
-        LASSERT(state != CLS_INTRANSIT);
-        LASSERTF(state >= CLS_ENQUEUED && state <= CLS_CACHED,
-                 "Malformed lock state %d.\n", state);
+       LASSERT(cl_lock_is_mutexed(lock));
+       LASSERT(state != CLS_INTRANSIT);
+       LASSERTF(state >= CLS_ENQUEUED && state <= CLS_CACHED,
+                "Malformed lock state %d.\n", state);
 
-        cl_lock_state_set(env, lock, CLS_INTRANSIT);
-        lock->cll_intransit_owner = cfs_current();
-        cl_lock_hold_add(env, lock, "intransit", cfs_current());
-        return state;
+       cl_lock_state_set(env, lock, CLS_INTRANSIT);
+       lock->cll_intransit_owner = current;
+       cl_lock_hold_add(env, lock, "intransit", current);
+       return state;
 }
 EXPORT_SYMBOL(cl_lock_intransit);
 
@@ -443,16 +443,16 @@ EXPORT_SYMBOL(cl_lock_intransit);
  *  Exit the intransit state and restore the lock state to the original state
  */
 void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock,
-                       enum cl_lock_state state)
+                      enum cl_lock_state state)
 {
-        LASSERT(cl_lock_is_mutexed(lock));
-        LASSERT(lock->cll_state == CLS_INTRANSIT);
-        LASSERT(state != CLS_INTRANSIT);
-        LASSERT(lock->cll_intransit_owner == cfs_current());
+       LASSERT(cl_lock_is_mutexed(lock));
+       LASSERT(lock->cll_state == CLS_INTRANSIT);
+       LASSERT(state != CLS_INTRANSIT);
+       LASSERT(lock->cll_intransit_owner == current);
 
-        lock->cll_intransit_owner = NULL;
-        cl_lock_state_set(env, lock, state);
-        cl_lock_unhold(env, lock, "intransit", cfs_current());
+       lock->cll_intransit_owner = NULL;
+       cl_lock_state_set(env, lock, state);
+       cl_lock_unhold(env, lock, "intransit", current);
 }
 EXPORT_SYMBOL(cl_lock_extransit);
 
@@ -461,9 +461,9 @@ EXPORT_SYMBOL(cl_lock_extransit);
  */
 int cl_lock_is_intransit(struct cl_lock *lock)
 {
-        LASSERT(cl_lock_is_mutexed(lock));
-        return lock->cll_state == CLS_INTRANSIT &&
-               lock->cll_intransit_owner != cfs_current();
+       LASSERT(cl_lock_is_mutexed(lock));
+       return lock->cll_state == CLS_INTRANSIT &&
+              lock->cll_intransit_owner != current;
 }
 EXPORT_SYMBOL(cl_lock_is_intransit);
 /**
@@ -674,29 +674,29 @@ static void cl_lock_mutex_tail(const struct lu_env *env, struct cl_lock *lock)
  */
 void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock)
 {
-        LINVRNT(cl_lock_invariant(env, lock));
-
-        if (lock->cll_guarder == cfs_current()) {
-                LINVRNT(cl_lock_is_mutexed(lock));
-                LINVRNT(lock->cll_depth > 0);
-        } else {
-                struct cl_object_header *hdr;
-                struct cl_thread_info   *info;
-                int i;
+       LINVRNT(cl_lock_invariant(env, lock));
 
-                LINVRNT(lock->cll_guarder != cfs_current());
-                hdr = cl_object_header(lock->cll_descr.cld_obj);
-                /*
-                 * Check that mutices are taken in the bottom-to-top order.
-                 */
-                info = cl_env_info(env);
-                for (i = 0; i < hdr->coh_nesting; ++i)
-                        LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
+       if (lock->cll_guarder == current) {
+               LINVRNT(cl_lock_is_mutexed(lock));
+               LINVRNT(lock->cll_depth > 0);
+       } else {
+               struct cl_object_header *hdr;
+               struct cl_thread_info   *info;
+               int i;
+
+               LINVRNT(lock->cll_guarder != current);
+               hdr = cl_object_header(lock->cll_descr.cld_obj);
+               /*
+                * Check that mutices are taken in the bottom-to-top order.
+                */
+               info = cl_env_info(env);
+               for (i = 0; i < hdr->coh_nesting; ++i)
+                       LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
                mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
-                lock->cll_guarder = cfs_current();
-                LINVRNT(lock->cll_depth == 0);
-        }
-        cl_lock_mutex_tail(env, lock);
+               lock->cll_guarder = current;
+               LINVRNT(lock->cll_depth == 0);
+       }
+       cl_lock_mutex_tail(env, lock);
 }
 EXPORT_SYMBOL(cl_lock_mutex_get);
 
@@ -713,22 +713,22 @@ EXPORT_SYMBOL(cl_lock_mutex_get);
  */
 int cl_lock_mutex_try(const struct lu_env *env, struct cl_lock *lock)
 {
-        int result;
+       int result;
 
-        LINVRNT(cl_lock_invariant_trusted(env, lock));
-        ENTRY;
+       LINVRNT(cl_lock_invariant_trusted(env, lock));
+       ENTRY;
 
-        result = 0;
-        if (lock->cll_guarder == cfs_current()) {
-                LINVRNT(lock->cll_depth > 0);
-                cl_lock_mutex_tail(env, lock);
+       result = 0;
+       if (lock->cll_guarder == current) {
+               LINVRNT(lock->cll_depth > 0);
+               cl_lock_mutex_tail(env, lock);
        } else if (mutex_trylock(&lock->cll_guard)) {
-                LINVRNT(lock->cll_depth == 0);
-                lock->cll_guarder = cfs_current();
-                cl_lock_mutex_tail(env, lock);
-        } else
-                result = -EBUSY;
-        RETURN(result);
+               LINVRNT(lock->cll_depth == 0);
+               lock->cll_guarder = current;
+               cl_lock_mutex_tail(env, lock);
+       } else
+               result = -EBUSY;
+       RETURN(result);
 }
 EXPORT_SYMBOL(cl_lock_mutex_try);
 
@@ -741,23 +741,23 @@ EXPORT_SYMBOL(cl_lock_mutex_try);
  */
 void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock)
 {
-        struct cl_thread_counters *counters;
+       struct cl_thread_counters *counters;
 
-        LINVRNT(cl_lock_invariant(env, lock));
-        LINVRNT(cl_lock_is_mutexed(lock));
-        LINVRNT(lock->cll_guarder == cfs_current());
-        LINVRNT(lock->cll_depth > 0);
+       LINVRNT(cl_lock_invariant(env, lock));
+       LINVRNT(cl_lock_is_mutexed(lock));
+       LINVRNT(lock->cll_guarder == current);
+       LINVRNT(lock->cll_depth > 0);
 
-        counters = cl_lock_counters(env, lock);
-        LINVRNT(counters->ctc_nr_locks_locked > 0);
+       counters = cl_lock_counters(env, lock);
+       LINVRNT(counters->ctc_nr_locks_locked > 0);
 
-        cl_lock_trace(D_TRACE, env, "put mutex", lock);
-        lu_ref_del(&counters->ctc_locks_locked, "cll_guard", lock);
-        counters->ctc_nr_locks_locked--;
-        if (--lock->cll_depth == 0) {
-                lock->cll_guarder = NULL;
+       cl_lock_trace(D_TRACE, env, "put mutex", lock);
+       lu_ref_del(&counters->ctc_locks_locked, "cll_guard", lock);
+       counters->ctc_nr_locks_locked--;
+       if (--lock->cll_depth == 0) {
+               lock->cll_guarder = NULL;
                mutex_unlock(&lock->cll_guard);
-        }
+       }
 }
 EXPORT_SYMBOL(cl_lock_mutex_put);
 
@@ -766,7 +766,7 @@ EXPORT_SYMBOL(cl_lock_mutex_put);
  */
 int cl_lock_is_mutexed(struct cl_lock *lock)
 {
-        return lock->cll_guarder == cfs_current();
+       return lock->cll_guarder == current;
 }
 EXPORT_SYMBOL(cl_lock_is_mutexed);
 
@@ -947,7 +947,7 @@ EXPORT_SYMBOL(cl_lock_hold_release);
 int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
 {
        wait_queue_t waiter;
-       cfs_sigset_t blocked;
+       sigset_t blocked;
        int result;
 
        ENTRY;
@@ -2026,17 +2026,17 @@ EXPORT_SYMBOL(cl_lock_discard_pages);
  */
 void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
 {
-        struct cl_object_header *head;
-        struct cl_lock          *lock;
+       struct cl_object_header *head;
+       struct cl_lock          *lock;
 
-        ENTRY;
-        head = cl_object_header(obj);
-        /*
-         * If locks are destroyed without cancellation, all pages must be
-         * already destroyed (as otherwise they will be left unprotected).
-         */
-        LASSERT(ergo(!cancel,
-                     head->coh_tree.rnode == NULL && head->coh_pages == 0));
+       ENTRY;
+       head = cl_object_header(obj);
+       /*
+        * If locks are destroyed without cancellation, all pages must be
+        * already destroyed (as otherwise they will be left unprotected).
+        */
+       LASSERT(ergo(!cancel,
+                    head->coh_tree.rnode == NULL && head->coh_pages == 0));
 
        spin_lock(&head->coh_lock_guard);
        while (!cfs_list_empty(&head->coh_locks)) {
@@ -2044,29 +2044,29 @@ void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
                                    struct cl_lock, cll_linkage);
                cl_lock_get_trust(lock);
                spin_unlock(&head->coh_lock_guard);
-                lu_ref_add(&lock->cll_reference, "prune", cfs_current());
+               lu_ref_add(&lock->cll_reference, "prune", current);
 
 again:
-                cl_lock_mutex_get(env, lock);
-                if (lock->cll_state < CLS_FREEING) {
-                        LASSERT(lock->cll_users <= 1);
-                        if (unlikely(lock->cll_users == 1)) {
-                                struct l_wait_info lwi = { 0 };
-
-                                cl_lock_mutex_put(env, lock);
-                                l_wait_event(lock->cll_wq,
-                                             lock->cll_users == 0,
-                                             &lwi);
-                                goto again;
-                        }
-
-                        if (cancel)
-                                cl_lock_cancel(env, lock);
-                        cl_lock_delete(env, lock);
-                }
-                cl_lock_mutex_put(env, lock);
-                lu_ref_del(&lock->cll_reference, "prune", cfs_current());
-                cl_lock_put(env, lock);
+               cl_lock_mutex_get(env, lock);
+               if (lock->cll_state < CLS_FREEING) {
+                       LASSERT(lock->cll_users <= 1);
+                       if (unlikely(lock->cll_users == 1)) {
+                               struct l_wait_info lwi = { 0 };
+
+                               cl_lock_mutex_put(env, lock);
+                               l_wait_event(lock->cll_wq,
+                                            lock->cll_users == 0,
+                                            &lwi);
+                               goto again;
+                       }
+
+                       if (cancel)
+                               cl_lock_cancel(env, lock);
+                       cl_lock_delete(env, lock);
+               }
+               cl_lock_mutex_put(env, lock);
+               lu_ref_del(&lock->cll_reference, "prune", current);
+               cl_lock_put(env, lock);
                spin_lock(&head->coh_lock_guard);
        }
        spin_unlock(&head->coh_lock_guard);
index 6073df5..662caff 100644 (file)
@@ -547,18 +547,18 @@ struct cl_env {
          */
         cfs_hlist_node_t  ce_node;
 #endif
-        /**
-         * Owner for the current cl_env.
-         *
-         * If LL_TASK_CL_ENV is defined, this point to the owning cfs_current(),
-         * only for debugging purpose ;
-         * Otherwise hash is used, and this is the key for cfs_hash.
-         * Now current thread pid is stored. Note using thread pointer would
-         * lead to unbalanced hash because of its specific allocation locality
-         * and could be varied for different platforms and OSes, even different
-         * OS versions.
-         */
-        void             *ce_owner;
+       /**
+        * Owner for the current cl_env.
+        *
+        * If LL_TASK_CL_ENV is defined, this point to the owning current,
+        * only for debugging purpose ;
+        * Otherwise hash is used, and this is the key for cfs_hash.
+        * Now current thread pid is stored. Note using thread pointer would
+        * lead to unbalanced hash because of its specific allocation locality
+        * and could be varied for different platforms and OSes, even different
+        * OS versions.
+        */
+       void             *ce_owner;
 
         /*
          * Linkage into global list of all client environments. Used for
@@ -649,35 +649,35 @@ static cfs_hash_ops_t cl_env_hops = {
 
 static inline struct cl_env *cl_env_fetch(void)
 {
-        struct cl_env *cle;
+       struct cl_env *cle;
 
-        cle = cfs_hash_lookup(cl_env_hash, (void *) (long) cfs_current()->pid);
-        LASSERT(ergo(cle, cle->ce_magic == &cl_env_init0));
-        return cle;
+       cle = cfs_hash_lookup(cl_env_hash, (void *) (long) current->pid);
+       LASSERT(ergo(cle, cle->ce_magic == &cl_env_init0));
+       return cle;
 }
 
 static inline void cl_env_attach(struct cl_env *cle)
 {
-        if (cle) {
-                int rc;
-
-                LASSERT(cle->ce_owner == NULL);
-                cle->ce_owner = (void *) (long) cfs_current()->pid;
-                rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner,
-                                         &cle->ce_node);
-                LASSERT(rc == 0);
-        }
+       if (cle) {
+               int rc;
+
+               LASSERT(cle->ce_owner == NULL);
+               cle->ce_owner = (void *) (long) current->pid;
+               rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner,
+                                        &cle->ce_node);
+               LASSERT(rc == 0);
+       }
 }
 
 static inline void cl_env_do_detach(struct cl_env *cle)
 {
-        void *cookie;
+       void *cookie;
 
-        LASSERT(cle->ce_owner == (void *) (long) cfs_current()->pid);
-        cookie = cfs_hash_del(cl_env_hash, cle->ce_owner,
-                              &cle->ce_node);
-        LASSERT(cookie == cle);
-        cle->ce_owner = NULL;
+       LASSERT(cle->ce_owner == (void *) (long) current->pid);
+       cookie = cfs_hash_del(cl_env_hash, cle->ce_owner,
+                             &cle->ce_node);
+       LASSERT(cookie == cle);
+       cle->ce_owner = NULL;
 }
 
 static int cl_env_store_init(void) {
@@ -702,30 +702,30 @@ static void cl_env_store_fini(void) {
 
 static inline struct cl_env *cl_env_fetch(void)
 {
-        struct cl_env *cle;
+       struct cl_env *cle;
 
-        cle = cfs_current()->LL_TASK_CL_ENV;
-        if (cle && cle->ce_magic != &cl_env_init0)
-                cle = NULL;
-        return cle;
+       cle = current->LL_TASK_CL_ENV;
+       if (cle && cle->ce_magic != &cl_env_init0)
+               cle = NULL;
+       return cle;
 }
 
 static inline void cl_env_attach(struct cl_env *cle)
 {
-        if (cle) {
-                LASSERT(cle->ce_owner == NULL);
-                cle->ce_owner = cfs_current();
-                cle->ce_prev = cfs_current()->LL_TASK_CL_ENV;
-                cfs_current()->LL_TASK_CL_ENV = cle;
-        }
+       if (cle) {
+               LASSERT(cle->ce_owner == NULL);
+               cle->ce_owner = current;
+               cle->ce_prev = current->LL_TASK_CL_ENV;
+               current->LL_TASK_CL_ENV = cle;
+       }
 }
 
 static inline void cl_env_do_detach(struct cl_env *cle)
 {
-        LASSERT(cle->ce_owner == cfs_current());
-        LASSERT(cfs_current()->LL_TASK_CL_ENV == cle);
-        cfs_current()->LL_TASK_CL_ENV = cle->ce_prev;
-        cle->ce_owner = NULL;
+       LASSERT(cle->ce_owner == current);
+       LASSERT(current->LL_TASK_CL_ENV == cle);
+       current->LL_TASK_CL_ENV = cle->ce_prev;
+       cle->ce_owner = NULL;
 }
 
 static int cl_env_store_init(void) { return 0; }
index 3bc95c8..5db41e8 100644 (file)
@@ -223,39 +223,39 @@ int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
                          */
                         PASSERT(env, page, slice != NULL);
 
-                        page = slice->cpl_page;
-                        /*
-                         * Can safely call cl_page_get_trust() under
-                         * radix-tree spin-lock.
-                         *
-                         * XXX not true, because @page is from object another
-                         * than @hdr and protected by different tree lock.
-                         */
-                        cl_page_get_trust(page);
-                        lu_ref_add_atomic(&page->cp_reference,
-                                          "gang_lookup", cfs_current());
-                        pvec[j++] = page;
-                }
+                       page = slice->cpl_page;
+                       /*
+                        * Can safely call cl_page_get_trust() under
+                        * radix-tree spin-lock.
+                        *
+                        * XXX not true, because @page is from object another
+                        * than @hdr and protected by different tree lock.
+                        */
+                       cl_page_get_trust(page);
+                       lu_ref_add_atomic(&page->cp_reference,
+                                         "gang_lookup", current);
+                       pvec[j++] = page;
+               }
 
-                /*
-                 * Here a delicate locking dance is performed. Current thread
-                 * holds a reference to a page, but has to own it before it
-                 * can be placed into queue. Owning implies waiting, so
-                 * radix-tree lock is to be released. After a wait one has to
-                 * check that pages weren't truncated (cl_page_own() returns
-                 * error in the latter case).
-                 */
+               /*
+                * Here a delicate locking dance is performed. Current thread
+                * holds a reference to a page, but has to own it before it
+                * can be placed into queue. Owning implies waiting, so
+                * radix-tree lock is to be released. After a wait one has to
+                * check that pages weren't truncated (cl_page_own() returns
+                * error in the latter case).
+                */
                spin_unlock(&hdr->coh_page_guard);
-                tree_lock = 0;
-
-                for (i = 0; i < j; ++i) {
-                        page = pvec[i];
-                        if (res == CLP_GANG_OKAY)
-                                res = (*cb)(env, io, page, cbdata);
-                        lu_ref_del(&page->cp_reference,
-                                   "gang_lookup", cfs_current());
-                        cl_page_put(env, page);
-                }
+               tree_lock = 0;
+
+               for (i = 0; i < j; ++i) {
+                       page = pvec[i];
+                       if (res == CLP_GANG_OKAY)
+                               res = (*cb)(env, io, page, cbdata);
+                       lu_ref_del(&page->cp_reference,
+                                  "gang_lookup", current);
+                       cl_page_put(env, page);
+               }
                if (nr < CLT_PVEC_SIZE || end_of_region)
                        break;
 
index ac2b54c..9213b20 100644 (file)
@@ -1788,7 +1788,7 @@ int obd_zombie_impexp_kill(void *arg)
 int obd_zombie_impexp_init(void)
 {
 #ifdef __KERNEL__
-       cfs_task_t *task;
+       struct task_struct *task;
 #endif
 
        CFS_INIT_LIST_HEAD(&obd_zombie_imports);
index 9df58a3..37d2d3a 100644 (file)
@@ -265,42 +265,42 @@ int obd_proc_read_pinger(char *page, char **start, off_t off, int count,
 static int obd_proc_read_health(char *page, char **start, off_t off,
                                 int count, int *eof, void *data)
 {
-        int rc = 0, i;
-        *eof = 1;
+       int rc = 0, i;
+       *eof = 1;
 
-        if (libcfs_catastrophe)
-                rc += snprintf(page + rc, count - rc, "LBUG\n");
+       if (libcfs_catastrophe)
+               rc += snprintf(page + rc, count - rc, "LBUG\n");
 
        read_lock(&obd_dev_lock);
-        for (i = 0; i < class_devno_max(); i++) {
-                struct obd_device *obd;
+       for (i = 0; i < class_devno_max(); i++) {
+               struct obd_device *obd;
 
-                obd = class_num2obd(i);
-                if (obd == NULL || !obd->obd_attached || !obd->obd_set_up)
-                        continue;
+               obd = class_num2obd(i);
+               if (obd == NULL || !obd->obd_attached || !obd->obd_set_up)
+                       continue;
 
-                LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
-                if (obd->obd_stopping)
-                        continue;
+               LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
+               if (obd->obd_stopping)
+                       continue;
 
-                class_incref(obd, __FUNCTION__, cfs_current());
+               class_incref(obd, __FUNCTION__, current);
                read_unlock(&obd_dev_lock);
 
-                if (obd_health_check(NULL, obd)) {
-                        rc += snprintf(page + rc, count - rc,
-                                       "device %s reported unhealthy\n",
-                                       obd->obd_name);
-                }
-                class_decref(obd, __FUNCTION__, cfs_current());
+               if (obd_health_check(NULL, obd)) {
+                       rc += snprintf(page + rc, count - rc,
+                                      "device %s reported unhealthy\n",
+                                      obd->obd_name);
+               }
+               class_decref(obd, __FUNCTION__, current);
                read_lock(&obd_dev_lock);
-        }
+       }
        read_unlock(&obd_dev_lock);
 
-        if (rc == 0)
-                return snprintf(page, count, "healthy\n");
+       if (rc == 0)
+               return snprintf(page, count, "healthy\n");
 
-        rc += snprintf(page + rc, count - rc, "NOT HEALTHY\n");
-        return rc;
+       rc += snprintf(page + rc, count - rc, "NOT HEALTHY\n");
+       return rc;
 }
 
 static int obd_proc_rd_jobid_var(char *page, char **start, off_t off,
index 3925f74..480333a 100644 (file)
@@ -1020,7 +1020,7 @@ static int llog_test_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
        rc = lu_context_init(&test_session, LCT_SESSION);
        if (rc)
                GOTO(cleanup_env, rc);
-       test_session.lc_thread = (struct ptlrpc_thread *)cfs_current();
+       test_session.lc_thread = (struct ptlrpc_thread *)current;
        lu_context_enter(&test_session);
        env.le_ses = &test_session;
 
index cf5f7fa..b4344ec 100644 (file)
@@ -1201,7 +1201,7 @@ static int cl_echo_enqueue0(struct lu_env *env, struct echo_object *eco,
                        *cookie = el->el_cookie;
                        spin_unlock(&ec->ec_lock);
                } else {
-                       cl_lock_release(env, lck, "ec enqueue", cfs_current());
+                       cl_lock_release(env, lck, "ec enqueue", current);
                }
        }
        RETURN(rc);
index d11ff0e..1096d03 100644 (file)
@@ -964,7 +964,7 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
                cfs_list_del_init(&oap->oap_pending_item);
 
                cl_page_get(page);
-               lu_ref_add(&page->cp_reference, "truncate", cfs_current());
+               lu_ref_add(&page->cp_reference, "truncate", current);
 
                if (cl_page_own(env, io, page) == 0) {
                        cl_page_unmap(env, io, page);
@@ -975,7 +975,7 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
                        LASSERT(0);
                }
 
-               lu_ref_del(&page->cp_reference, "truncate", cfs_current());
+               lu_ref_del(&page->cp_reference, "truncate", current);
                cl_page_put(env, page);
 
                --ext->oe_nr_pages;
@@ -1832,7 +1832,7 @@ static int try_to_add_extent_for_io(struct client_obd *cli,
                RETURN(0);
 
        cfs_list_for_each_entry(tmp, rpclist, oe_link) {
-               EASSERT(tmp->oe_owner == cfs_current(), tmp);
+               EASSERT(tmp->oe_owner == current, tmp);
 #if 0
                if (overlapped(tmp, ext)) {
                        OSC_EXTENT_DUMP(D_ERROR, tmp, "overlapped %p.\n", ext);
@@ -1850,7 +1850,7 @@ static int try_to_add_extent_for_io(struct client_obd *cli,
 
        *pc += ext->oe_nr_pages;
        cfs_list_move_tail(&ext->oe_link, rpclist);
-       ext->oe_owner = cfs_current();
+       ext->oe_owner = current;
        RETURN(1);
 }
 
@@ -2103,7 +2103,7 @@ static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli,
                cl_object_get(obj);
                client_obd_list_unlock(&cli->cl_loi_list_lock);
                lu_object_ref_add_at(&obj->co_lu, &link, "check",
-                                    cfs_current());
+                                    current);
 
                /* attempt some read/write balancing by alternating between
                 * reads and writes in an object.  The makes_rpc checks here
@@ -2145,7 +2145,7 @@ static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli,
 
                osc_list_maint(cli, osc);
                lu_object_ref_del_at(&obj->co_lu, &link, "check",
-                                    cfs_current());
+                                    current);
                cl_object_put(env, obj);
 
                client_obd_list_lock(&cli->cl_loi_list_lock);
@@ -2879,7 +2879,7 @@ int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj,
                                EASSERT(ext->oe_start >= start &&
                                        ext->oe_max_end <= end, ext);
                                osc_extent_state_set(ext, OES_LOCKING);
-                               ext->oe_owner = cfs_current();
+                               ext->oe_owner = current;
                                cfs_list_move_tail(&ext->oe_link,
                                                   &discard_list);
                                osc_update_pending(obj, OBD_BRW_WRITE,
index a2e1867..a8c2ec5 100644 (file)
@@ -383,14 +383,14 @@ struct osc_page {
                 */
                cfs_list_t            ops_inflight;
        };
-        /**
-         * Thread that submitted this page for transfer. For debugging.
-         */
-        cfs_task_t           *ops_submitter;
-        /**
-         * Submit time - the time when the page is starting RPC. For debugging.
-         */
-        cfs_time_t            ops_submit_time;
+       /**
+        * Thread that submitted this page for transfer. For debugging.
+        */
+       struct task_struct           *ops_submitter;
+       /**
+        * Submit time - the time when the page is starting RPC. For debugging.
+        */
+       cfs_time_t            ops_submit_time;
 
         /**
          * A lock of which we hold a reference covers this page. Only used by
@@ -672,7 +672,7 @@ struct osc_extent {
        /** lock covering this extent */
        struct cl_lock    *oe_osclock;
        /** terminator of this extent. Must be true if this extent is in IO. */
-       cfs_task_t        *oe_owner;
+       struct task_struct        *oe_owner;
        /** return value of writeback. If somebody is waiting for this extent,
         * this value can be known by outside world. */
        int                oe_rc;
index 7791e89..a03a1d9 100644 (file)
@@ -307,12 +307,12 @@ static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
                  * guarantees that all osc references on dlmlock were
                  * released. osc_dlm_blocking_ast0() relies on that.
                  */
-                if (lock->cll_state < CLS_FREEING || olck->ols_has_ref) {
-                        cl_lock_get_trust(lock);
-                        lu_ref_add_atomic(&lock->cll_reference,
-                                          "ast", cfs_current());
-                } else
-                        olck = NULL;
+               if (lock->cll_state < CLS_FREEING || olck->ols_has_ref) {
+                       cl_lock_get_trust(lock);
+                       lu_ref_add_atomic(&lock->cll_reference,
+                                         "ast", current);
+               } else
+                       olck = NULL;
         }
        spin_unlock(&osc_ast_guard);
        unlock_res_and_lock(dlm_lock);
@@ -321,11 +321,11 @@ static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
 
 static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck)
 {
-        struct cl_lock *lock;
+       struct cl_lock *lock;
 
-        lock = olck->ols_cl.cls_lock;
-        lu_ref_del(&lock->cll_reference, "ast", cfs_current());
-        cl_lock_put(env, lock);
+       lock = olck->ols_cl.cls_lock;
+       lu_ref_del(&lock->cll_reference, "ast", current);
+       cl_lock_put(env, lock);
 }
 
 /**
index 2bf7e9e..8985109 100644 (file)
@@ -208,7 +208,7 @@ static void osc_page_transfer_add(const struct lu_env *env,
 
        spin_lock(&obj->oo_seatbelt);
        cfs_list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
-       opg->ops_submitter = cfs_current();
+       opg->ops_submitter = current;
        spin_unlock(&obj->oo_seatbelt);
 }
 
index 1557a65..799ef20 100644 (file)
@@ -173,7 +173,7 @@ struct osp_device {
         */
        struct obd_statfs                opd_statfs;
        cfs_time_t                       opd_statfs_fresh_till;
-       cfs_timer_t                      opd_statfs_timer;
+       struct timer_list                        opd_statfs_timer;
        int                              opd_statfs_update_in_progress;
        /* how often to update statfs data */
        int                              opd_statfs_maxage;
index 7f68596..5b8065a 100644 (file)
@@ -1240,7 +1240,7 @@ out:
 int osp_init_precreate(struct osp_device *d)
 {
        struct l_wait_info       lwi = { 0 };
-       cfs_task_t              *task;
+       struct task_struct              *task;
 
        ENTRY;
 
index 906421b..ae982d0 100644 (file)
@@ -2070,11 +2070,11 @@ static int ost_rw_hpreq_check(struct ptlrpc_request *req)
         opd.opd_extent.end = nb->offset + nb->len - 1;
         opd.opd_timeout = prolong_timeout(req);
 
-        DEBUG_REQ(D_RPCTRACE, req,
-               "%s %s: refresh rw locks: " LPU64"/"LPU64" ("LPU64"->"LPU64")\n",
-               obd->obd_name, cfs_current()->comm,
-               opd.opd_resid.name[0], opd.opd_resid.name[1],
-               opd.opd_extent.start, opd.opd_extent.end);
+       DEBUG_REQ(D_RPCTRACE, req,
+              "%s %s: refresh rw locks: " LPU64"/"LPU64" ("LPU64"->"LPU64")\n",
+              obd->obd_name, current->comm,
+              opd.opd_resid.name[0], opd.opd_resid.name[1],
+              opd.opd_extent.start, opd.opd_extent.end);
 
         ost_prolong_locks(&opd);
 
index 7889620..f6d7251 100644 (file)
@@ -2140,19 +2140,19 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set)
                 /* LU-769 - if we ignored the signal because it was already
                  * pending when we started, we need to handle it now or we risk
                  * it being ignored forever */
-                if (rc == -ETIMEDOUT && !lwi.lwi_allow_intr &&
-                    cfs_signal_pending()) {
-                        cfs_sigset_t blocked_sigs =
-                                           cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
-
-                        /* In fact we only interrupt for the "fatal" signals
-                         * like SIGINT or SIGKILL. We still ignore less
-                         * important signals since ptlrpc set is not easily
-                         * reentrant from userspace again */
-                        if (cfs_signal_pending())
-                                ptlrpc_interrupted_set(set);
+               if (rc == -ETIMEDOUT && !lwi.lwi_allow_intr &&
+                   cfs_signal_pending()) {
+                       sigset_t blocked_sigs =
+                                          cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
+
+                       /* In fact we only interrupt for the "fatal" signals
+                        * like SIGINT or SIGKILL. We still ignore less
+                        * important signals since ptlrpc set is not easily
+                        * reentrant from userspace again */
+                       if (cfs_signal_pending())
+                               ptlrpc_interrupted_set(set);
                        cfs_restore_sigs(blocked_sigs);
-                }
+               }
 
                 LASSERT(rc == 0 || rc == -EINTR || rc == -ETIMEDOUT);
 
index 82e1c63..14e7983 100644 (file)
@@ -1247,15 +1247,15 @@ int gss_kt_instantiate(struct key *key, const void *data, size_t datalen)
          * the session keyring is created upon upcall, and don't change all
          * the way until upcall finished, so rcu lock is not needed here.
          */
-       LASSERT(key_tgcred(cfs_current())->session_keyring);
+       LASSERT(key_tgcred(current)->session_keyring);
 
        lockdep_off();
-       rc = key_link(key_tgcred(cfs_current())->session_keyring, key);
+       rc = key_link(key_tgcred(current)->session_keyring, key);
        lockdep_on();
        if (unlikely(rc)) {
                CERROR("failed to link key %08x to keyring %08x: %d\n",
                       key->serial,
-                      key_tgcred(cfs_current())->session_keyring->serial, rc);
+                      key_tgcred(current)->session_keyring->serial, rc);
                RETURN(rc);
        }
 
index 2d5d4b4..30e8471 100644 (file)
@@ -1336,7 +1336,7 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp)
 
 #ifdef __KERNEL__
                {
-               cfs_task_t *task;
+               struct task_struct *task;
                /* bug 17802:  XXX client_disconnect_export vs connect request
                 * race. if client will evicted at this time, we start
                 * invalidate thread without reference to import and import can
index e2ba1ee..da5ff2a 100644 (file)
@@ -1240,7 +1240,7 @@ int lprocfs_wr_evict_client(struct file *file, const char *buffer,
                 goto out;
         }
         tmpbuf = cfs_firststr(kbuf, min_t(unsigned long, BUFLEN - 1, count));
-       class_incref(obd, __FUNCTION__, cfs_current());
+       class_incref(obd, __FUNCTION__, current);
 
         if (strncmp(tmpbuf, "nid:", 4) == 0)
                 obd_export_evict_by_nid(obd, tmpbuf + 4);
@@ -1249,7 +1249,7 @@ int lprocfs_wr_evict_client(struct file *file, const char *buffer,
         else
                 obd_export_evict_by_uuid(obd, tmpbuf);
 
-       class_decref(obd, __FUNCTION__, cfs_current());
+       class_decref(obd, __FUNCTION__, current);
 
 out:
         OBD_FREE(kbuf, BUFLEN);
index b55d5bc..f5241f2 100644 (file)
@@ -679,7 +679,7 @@ static int ping_evictor_main(void *arg)
 
 void ping_evictor_start(void)
 {
-       cfs_task_t *task;
+       struct task_struct *task;
 
        if (++pet_refcount > 1)
                return;
index ee2c15f..ecebfc9 100644 (file)
@@ -420,7 +420,7 @@ static int ptlrpcd(void *arg)
                                if (++index >= num_possible_cpus())
                                        index = 0;
                        }
-                       set_cpus_allowed_ptr(cfs_current(),
+                       set_cpus_allowed_ptr(current,
                                     cpumask_of_node(cpu_to_node(index)));
                }
        }
@@ -713,7 +713,7 @@ int ptlrpcd_start(int index, int max, const char *name, struct ptlrpcd_ctl *pc)
                GOTO(out_set, rc);
 
        {
-               cfs_task_t *task;
+               struct task_struct *task;
                if (index >= 0) {
                        rc = ptlrpcd_bind(index, max);
                        if (rc < 0)
index 5e99ca8..c2763dc 100644 (file)
@@ -222,7 +222,7 @@ again:
 int sptlrpc_gc_init(void)
 {
        struct l_wait_info lwi = { 0 };
-       cfs_task_t *task;
+       struct task_struct *task;
 
        mutex_init(&sec_gc_mutex);
        spin_lock_init(&sec_gc_list_lock);
index 9dd2929..f722617 100644 (file)
@@ -764,7 +764,7 @@ int qmt_start_reba_thread(struct qmt_device *qmt)
 {
        struct ptlrpc_thread    *thread = &qmt->qmt_reba_thread;
        struct l_wait_info       lwi    = { 0 };
-       cfs_task_t              *task;
+       struct task_struct              *task;
        ENTRY;
 
        task = kthread_run(qmt_reba_thread, (void *)qmt,
index a615877..19f9f17 100644 (file)
@@ -495,7 +495,7 @@ int qsd_start_upd_thread(struct qsd_instance *qsd)
 {
        struct ptlrpc_thread    *thread = &qsd->qsd_upd_thread;
        struct l_wait_info       lwi = { 0 };
-       cfs_task_t              *task;
+       struct task_struct              *task;
        ENTRY;
 
        task = kthread_run(qsd_upd_thread, (void *)qsd,