Whamcloud - gitweb
LU-1346 libcfs: cleanup waitq related primitives 55/6955/8
authorPeng Tao <tao.peng@emc.com>
Wed, 11 Sep 2013 17:01:45 +0000 (01:01 +0800)
committerOleg Drokin <oleg.drokin@intel.com>
Fri, 13 Sep 2013 05:06:26 +0000 (05:06 +0000)
Plus some manual change:

1. Remove __wait_event_timeout definition
2. Change cfs_waitq_wait_event_timeout and
   cfs_waitq_wait_event_interruptible_timeout to linux kernel API
3. Replace some function definitions in linux-prim.c as macros in
   linux-prim.h

Signed-off-by: Peng Tao <tao.peng@emc.com>
Signed-off-by: Liu Xuezhao <xuezhao.liu@emc.com>
Signed-off-by: James Simmons <uja.ornl@gmail.com>
Change-Id: I7e53f3deac9e4076e78c109662ff9d1e90239e8d
Reviewed-on: http://review.whamcloud.com/6955
Tested-by: Hudson
Tested-by: Maloo <whamcloud.maloo@gmail.com>
Reviewed-by: Keith Mannthey <keith.mannthey@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
147 files changed:
contrib/scripts/libcfs_cleanup.sed
libcfs/include/libcfs/darwin/darwin-prim.h
libcfs/include/libcfs/libcfs_fail.h
libcfs/include/libcfs/libcfs_prim.h
libcfs/include/libcfs/linux/linux-prim.h
libcfs/include/libcfs/lucache.h
libcfs/include/libcfs/user-lock.h
libcfs/include/libcfs/user-prim.h
libcfs/include/libcfs/winnt/winnt-prim.h
libcfs/libcfs/darwin/darwin-debug.c
libcfs/libcfs/darwin/darwin-prim.c
libcfs/libcfs/darwin/darwin-proc.c
libcfs/libcfs/darwin/darwin-sync.c
libcfs/libcfs/debug.c
libcfs/libcfs/fail.c
libcfs/libcfs/hash.c
libcfs/libcfs/linux/linux-cpu.c
libcfs/libcfs/linux/linux-prim.c
libcfs/libcfs/linux/linux-proc.c
libcfs/libcfs/lwt.c
libcfs/libcfs/module.c
libcfs/libcfs/tracefile.c
libcfs/libcfs/tracefile.h
libcfs/libcfs/upcall_cache.c
libcfs/libcfs/user-lock.c
libcfs/libcfs/user-prim.c
libcfs/libcfs/watchdog.c
libcfs/libcfs/winnt/winnt-curproc.c
libcfs/libcfs/winnt/winnt-prim.c
libcfs/libcfs/winnt/winnt-sync.c
libcfs/libcfs/workitem.c
lnet/include/lnet/lib-types.h
lnet/klnds/mxlnd/mxlnd_cb.c
lnet/klnds/o2iblnd/o2iblnd.c
lnet/klnds/o2iblnd/o2iblnd.h
lnet/klnds/o2iblnd/o2iblnd_cb.c
lnet/klnds/ptllnd/ptllnd.c
lnet/klnds/ptllnd/ptllnd.h
lnet/klnds/ptllnd/ptllnd_cb.c
lnet/klnds/ptllnd/ptllnd_peer.c
lnet/klnds/ptllnd/ptllnd_rx_buf.c
lnet/klnds/ptllnd/ptllnd_tx.c
lnet/klnds/qswlnd/qswlnd.c
lnet/klnds/qswlnd/qswlnd.h
lnet/klnds/qswlnd/qswlnd_cb.c
lnet/klnds/ralnd/ralnd.c
lnet/klnds/ralnd/ralnd.h
lnet/klnds/ralnd/ralnd_cb.c
lnet/klnds/socklnd/socklnd.c
lnet/klnds/socklnd/socklnd.h
lnet/klnds/socklnd/socklnd_cb.c
lnet/lnet/api-ni.c
lnet/lnet/lib-eq.c
lnet/lnet/router.c
lnet/selftest/conrpc.c
lnet/selftest/conrpc.h
lnet/selftest/rpc.c
lnet/selftest/timer.c
lustre/fid/fid_request.c
lustre/fld/fld_request.c
lustre/include/cl_object.h
lustre/include/liblustre.h
lustre/include/lu_object.h
lustre/include/lustre_dlm.h
lustre/include/lustre_fid.h
lustre/include/lustre_import.h
lustre/include/lustre_lib.h
lustre/include/lustre_log.h
lustre/include/lustre_mdc.h
lustre/include/lustre_net.h
lustre/include/obd.h
lustre/lclient/lcommon_cl.c
lustre/ldlm/ldlm_flock.c
lustre/ldlm/ldlm_lib.c
lustre/ldlm/ldlm_lock.c
lustre/ldlm/ldlm_lockd.c
lustre/ldlm/ldlm_pool.c
lustre/ldlm/ldlm_request.c
lustre/ldlm/ldlm_resource.c
lustre/lfsck/lfsck_engine.c
lustre/lfsck/lfsck_lib.c
lustre/llite/llite_capa.c
lustre/llite/llite_close.c
lustre/llite/llite_internal.h
lustre/llite/llite_lib.c
lustre/llite/lloop.c
lustre/llite/statahead.c
lustre/lod/lod_lov.c
lustre/lov/lov_cl_internal.h
lustre/lov/lov_internal.h
lustre/lov/lov_io.c
lustre/lov/lov_object.c
lustre/lov/lov_request.c
lustre/mdc/mdc_lib.c
lustre/mdc/mdc_request.c
lustre/mdt/mdt_capa.c
lustre/mdt/mdt_coordinator.c
lustre/mgc/mgc_request.c
lustre/mgs/mgs_internal.h
lustre/mgs/mgs_nids.c
lustre/obdclass/cl_io.c
lustre/obdclass/cl_lock.c
lustre/obdclass/cl_page.c
lustre/obdclass/genops.c
lustre/obdclass/llog_obd.c
lustre/obdclass/lprocfs_status.c
lustre/obdclass/lu_object.c
lustre/obdclass/obd_config.c
lustre/obdecho/echo.c
lustre/obdecho/echo_client.c
lustre/osc/osc_cache.c
lustre/osc/osc_cl_internal.h
lustre/osc/osc_internal.h
lustre/osc/osc_lock.c
lustre/osc/osc_page.c
lustre/osc/osc_request.c
lustre/osd-ldiskfs/osd_handler.c
lustre/osd-ldiskfs/osd_internal.h
lustre/osd-ldiskfs/osd_io.c
lustre/osd-ldiskfs/osd_scrub.c
lustre/osp/osp_dev.c
lustre/osp/osp_internal.h
lustre/osp/osp_precreate.c
lustre/osp/osp_sync.c
lustre/ost/ost_handler.c
lustre/ptlrpc/client.c
lustre/ptlrpc/events.c
lustre/ptlrpc/gss/gss_svc_upcall.c
lustre/ptlrpc/import.c
lustre/ptlrpc/niobuf.c
lustre/ptlrpc/pack_generic.c
lustre/ptlrpc/pinger.c
lustre/ptlrpc/ptlrpcd.c
lustre/ptlrpc/sec.c
lustre/ptlrpc/sec_bulk.c
lustre/ptlrpc/sec_gc.c
lustre/ptlrpc/service.c
lustre/quota/lquota_entry.c
lustre/quota/lquota_internal.h
lustre/quota/qmt_dev.c
lustre/quota/qmt_lock.c
lustre/quota/qsd_entry.c
lustre/quota/qsd_handler.c
lustre/quota/qsd_lib.c
lustre/quota/qsd_lock.c
lustre/quota/qsd_reint.c
lustre/quota/qsd_writeback.c

index 12d5389..5f6af2b 100644 (file)
@@ -496,3 +496,50 @@ s/\bcfs_module_t\b/struct module/g
 # s/\bcfs_module\b/declare_module/g
 s/\bcfs_request_module\b/request_module/g
 /#[ \t]*define[ \t]*\brequest_module\b[ \t]*\brequest_module\b/d
 # s/\bcfs_module\b/declare_module/g
 s/\bcfs_request_module\b/request_module/g
 /#[ \t]*define[ \t]*\brequest_module\b[ \t]*\brequest_module\b/d
+# Wait Queue
+s/\bCFS_TASK_INTERRUPTIBLE\b/TASK_INTERRUPTIBLE/g
+/#[ \t]*define[ \t]*\bTASK_INTERRUPTIBLE\b[ \t]*\bTASK_INTERRUPTIBLE\b/d
+s/\bCFS_TASK_UNINT\b/TASK_UNINTERRUPTIBLE/g
+/#[ \t]*define[ \t]*\bTASK_UNINTERRUPTIBLE\b[ \t]*\bTASK_UNINTERRUPTIBLE\b/d
+s/\bCFS_TASK_RUNNING\b/TASK_RUNNING/g
+/#[ \t]*define[ \t]*\bTASK_RUNNING\b[ \t]*\bTASK_RUNNING\b/d
+s/\bcfs_set_current_state\b/set_current_state/g
+/#[ \t]*define[ \t]*\bset_current_state\b *( *\w* *)[ \t]*\bset_current_state\b *( *\w* *)/d
+s/\bcfs_wait_event\b/wait_event/g
+/#[ \t]*define[ \t]*\bwait_event\b *( *\w* *, *\w* *)[ \t]*\bwait_event\b *( *\w* *, *\w* *)/d
+s/\bcfs_waitlink_t\b/wait_queue_t/g
+/typedef[ \t]*\bwait_queue_t\b[ \t]*\bwait_queue_t\b/d
+s/\bcfs_waitq_t\b/wait_queue_head_t/g
+/typedef[ \t]*\bwait_queue_head_t\b[ \t]*\bwait_queue_head_t\b/d
+#s/\bcfs_task_state_t\b/task_state_t/g
+s/\bcfs_waitq_init\b/init_waitqueue_head/g
+/#[ \t]*define[ \t]*\binit_waitqueue_head\b *( *\w* *)[ \t]*\binit_waitqueue_head\b *( *\w* *)/d
+s/\bcfs_waitlink_init\b/init_waitqueue_entry_current/g
+s/\bcfs_waitq_add\b/add_wait_queue/g
+/#[ \t]*define[ \t]*\badd_wait_queue\b *( *\w* *, *\w* *)[ \t]*\badd_wait_queue\b *( *\w* *, *\w* *)/d
+s/\bcfs_waitq_add_exclusive\b/add_wait_queue_exclusive/g
+/#[ \t]*define[ \t]*\badd_wait_queue_exclusive\b *( *\w* *, *\w* *)[ \t]*\badd_wait_queue_exclusive\b *( *\w* *, *\w* *)/d
+s/\bcfs_waitq_del\b/remove_wait_queue/g
+/#[ \t]*define[ \t]*\bremove_wait_queue\b *( *\w* *, *\w* *)[ \t]*\bremove_wait_queue\b *( *\w* *, *\w* *)/d
+s/\bcfs_waitq_active\b/waitqueue_active/g
+/#[ \t]*define[ \t]*\bwaitqueue_active\b *( *\w* *)[ \t]*\bwaitqueue_active\b *( *\w* *)/d
+s/\bcfs_waitq_signal\b/wake_up/g
+/#[ \t]*define[ \t]*\bwake_up\b *( *\w* *)[ \t]*\bwake_up\b *( *\w* *)/d
+s/\bcfs_waitq_signal_nr\b/wake_up_nr/g
+/#[ \t]*define[ \t]*\bwake_up_nr\b *( *\w* *, *\w* *)[ \t]*\bwake_up_nr\b *( *\w* *, *\w* *)/d
+s/\bcfs_waitq_broadcast\b/wake_up_all/g
+/#[ \t]*define[ \t]*\bwake_up_all\b *( *\w* *)[ \t]*\bwake_up_all\b *( *\w* *)/d
+s/\bcfs_waitq_wait\b/waitq_wait/g
+s/\bcfs_waitq_timedwait\b/waitq_timedwait/g
+s/\bcfs_schedule_timeout\b/schedule_timeout/g
+/#[ \t]*define[ \t]*\bschedule_timeout\b *( *\w* *)[ \t]*\bschedule_timeout\b *( *\w* *)/d
+s/\bcfs_schedule\b/schedule/g
+/#[ \t]*define[ \t]*\bschedule\b *( *)[ \t]*\bschedule\b *( *)/d
+s/\bcfs_need_resched\b/need_resched/g
+/#[ \t]*define[ \t]*\bneed_resched\b *( *)[ \t]*\bneed_resched\b *( *)/d
+s/\bcfs_cond_resched\b/cond_resched/g
+/#[ \t]*define[ \t]*\bcond_resched\b *( *)[ \t]*\bcond_resched\b *( *)/d
+s/\bcfs_waitq_add_exclusive_head\b/add_wait_queue_exclusive_head/g
+s/\bcfs_schedule_timeout_and_set_state\b/schedule_timeout_and_set_state/g
+s/\bCFS_MAX_SCHEDULE_TIMEOUT\b/MAX_SCHEDULE_TIMEOUT/g
+s/\bcfs_task_state_t\b/long/g
index d2118e7..d3c5410 100644 (file)
@@ -214,34 +214,32 @@ extern cfs_task_t kthread_run(cfs_thread_t func, void *arg,
  */
 typedef struct cfs_waitq {
        struct ksleep_chan wq_ksleep_chan;
  */
 typedef struct cfs_waitq {
        struct ksleep_chan wq_ksleep_chan;
-} cfs_waitq_t;
+} wait_queue_head_t;
 
 typedef struct cfs_waitlink {
        struct cfs_waitq   *wl_waitq;
        struct ksleep_link  wl_ksleep_link;
 
 typedef struct cfs_waitlink {
        struct cfs_waitq   *wl_waitq;
        struct ksleep_link  wl_ksleep_link;
-} cfs_waitlink_t;
+} wait_queue_t;
 
 
-typedef int cfs_task_state_t;
+#define TASK_INTERRUPTIBLE     THREAD_ABORTSAFE
+#define TASK_UNINTERRUPTIBLE           THREAD_UNINT
 
 
-#define CFS_TASK_INTERRUPTIBLE THREAD_ABORTSAFE
-#define CFS_TASK_UNINT         THREAD_UNINT
+void init_waitqueue_head(struct cfs_waitq *waitq);
+void init_waitqueue_entry_current(struct cfs_waitlink *link);
 
 
-void cfs_waitq_init(struct cfs_waitq *waitq);
-void cfs_waitlink_init(struct cfs_waitlink *link);
-
-void cfs_waitq_add(struct cfs_waitq *waitq, struct cfs_waitlink *link);
-void cfs_waitq_add_exclusive(struct cfs_waitq *waitq,
+void add_wait_queue(struct cfs_waitq *waitq, struct cfs_waitlink *link);
+void add_wait_queue_exclusive(struct cfs_waitq *waitq,
                             struct cfs_waitlink *link);
                             struct cfs_waitlink *link);
-void cfs_waitq_del(struct cfs_waitq *waitq, struct cfs_waitlink *link);
-int  cfs_waitq_active(struct cfs_waitq *waitq);
+void remove_wait_queue(struct cfs_waitq *waitq, struct cfs_waitlink *link);
+int  waitqueue_active(struct cfs_waitq *waitq);
 
 
-void cfs_waitq_signal(struct cfs_waitq *waitq);
-void cfs_waitq_signal_nr(struct cfs_waitq *waitq, int nr);
-void cfs_waitq_broadcast(struct cfs_waitq *waitq);
+void wake_up(struct cfs_waitq *waitq);
+void wake_up_nr(struct cfs_waitq *waitq, int nr);
+void wake_up_all(struct cfs_waitq *waitq);
 
 
-void cfs_waitq_wait(struct cfs_waitlink *link, cfs_task_state_t state);
-cfs_duration_t cfs_waitq_timedwait(struct cfs_waitlink *link,
-                                  cfs_task_state_t state, 
+void waitq_wait(struct cfs_waitlink *link, long state);
+cfs_duration_t waitq_timedwait(struct cfs_waitlink *link,
+                                  long state,
                                   cfs_duration_t timeout);
 
 /*
                                   cfs_duration_t timeout);
 
 /*
@@ -251,7 +249,7 @@ cfs_duration_t cfs_waitq_timedwait(struct cfs_waitlink *link,
 extern void thread_set_timer_deadline(__u64 deadline);
 extern void thread_cancel_timer(void);
 
 extern void thread_set_timer_deadline(__u64 deadline);
 extern void thread_cancel_timer(void);
 
-static inline int cfs_schedule_timeout(int state, int64_t timeout)
+static inline int schedule_timeout(int state, int64_t timeout)
 {
        int          result;
        
 {
        int          result;
        
@@ -277,22 +275,22 @@ static inline int cfs_schedule_timeout(int state, int64_t timeout)
        return result;
 }
 
        return result;
 }
 
-#define cfs_schedule() cfs_schedule_timeout(CFS_TASK_UNINT, CFS_TICK)
-#define cfs_pause(tick)        cfs_schedule_timeout(CFS_TASK_UNINT, tick)
+#define schedule()     schedule_timeout(TASK_UNINTERRUPTIBLE, CFS_TICK)
+#define cfs_pause(tick)        schedule_timeout(TASK_UNINTERRUPTIBLE, tick)
 
 #define __wait_event(wq, condition)                            \
 do {                                                           \
        struct cfs_waitlink __wait;                             \
                                                                \
 
 #define __wait_event(wq, condition)                            \
 do {                                                           \
        struct cfs_waitlink __wait;                             \
                                                                \
-       cfs_waitlink_init(&__wait);                             \
+       init_waitqueue_entry_current(&__wait);                  \
        for (;;) {                                              \
        for (;;) {                                              \
-               cfs_waitq_add(&wq, &__wait);                    \
+               add_wait_queue(&wq, &__wait);                   \
                if (condition)                                  \
                        break;                                  \
                if (condition)                                  \
                        break;                                  \
-               cfs_waitq_wait(&__wait, CFS_TASK_UNINT);        \
-               cfs_waitq_del(&wq, &__wait);                    \
+               waitq_wait(&__wait, TASK_UNINTERRUPTIBLE);      \
+               remove_wait_queue(&wq, &__wait);                \
        }                                                       \
        }                                                       \
-       cfs_waitq_del(&wq, &__wait);                            \
+       remove_wait_queue(&wq, &__wait);                        \
 } while (0)
 
 #define wait_event(wq, condition)                              \
 } while (0)
 
 #define wait_event(wq, condition)                              \
@@ -306,24 +304,24 @@ do {                                                              \
 do {                                                           \
        struct cfs_waitlink __wait;                             \
                                                                \
 do {                                                           \
        struct cfs_waitlink __wait;                             \
                                                                \
-       cfs_waitlink_init(&__wait);                             \
+       init_waitqueue_entry_current(&__wait);                  \
        for (;;) {                                              \
                if (ex == 0)                                    \
        for (;;) {                                              \
                if (ex == 0)                                    \
-                       cfs_waitq_add(&wq, &__wait);            \
+                       add_wait_queue(&wq, &__wait);           \
                else                                            \
                else                                            \
-                       cfs_waitq_add_exclusive(&wq, &__wait);  \
+                       add_wait_queue_exclusive(&wq, &__wait); \
                if (condition)                                  \
                        break;                                  \
                if (!cfs_signal_pending()) {                    \
                if (condition)                                  \
                        break;                                  \
                if (!cfs_signal_pending()) {                    \
-                       cfs_waitq_wait(&__wait,                 \
-                                      CFS_TASK_INTERRUPTIBLE); \
-                       cfs_waitq_del(&wq, &__wait);            \
+                       waitq_wait(&__wait,                     \
+                                      TASK_INTERRUPTIBLE);     \
+                       remove_wait_queue(&wq, &__wait);        \
                        continue;                               \
                }                                               \
                ret = -ERESTARTSYS;                             \
                break;                                          \
        }                                                       \
                        continue;                               \
                }                                               \
                ret = -ERESTARTSYS;                             \
                break;                                          \
        }                                                       \
-       cfs_waitq_del(&wq, &__wait);                            \
+       remove_wait_queue(&wq, &__wait);                        \
 } while (0)
 
 #define wait_event_interruptible(wq, condition)                        \
 } while (0)
 
 #define wait_event_interruptible(wq, condition)                        \
@@ -354,14 +352,14 @@ extern void       wakeup_one __P((void * chan));
        } while (0)
        
 /* used in couple of places */
        } while (0)
        
 /* used in couple of places */
-static inline void sleep_on(cfs_waitq_t *waitq)
+static inline void sleep_on(wait_queue_head_t *waitq)
 {
 {
-       cfs_waitlink_t link;
+       wait_queue_t link;
        
        
-       cfs_waitlink_init(&link);
-       cfs_waitq_add(waitq, &link);
-       cfs_waitq_wait(&link, CFS_TASK_UNINT);
-       cfs_waitq_del(waitq, &link);
+       init_waitqueue_entry_current(&link);
+       add_wait_queue(waitq, &link);
+       waitq_wait(&link, TASK_UNINTERRUPTIBLE);
+       remove_wait_queue(waitq, &link);
 }
 
 /*
 }
 
 /*
index 19ade49..89d0b97 100644 (file)
@@ -39,7 +39,7 @@
 extern unsigned long cfs_fail_loc;
 extern unsigned int cfs_fail_val;
 
 extern unsigned long cfs_fail_loc;
 extern unsigned int cfs_fail_val;
 
-extern cfs_waitq_t cfs_race_waitq;
+extern wait_queue_head_t cfs_race_waitq;
 extern int cfs_race_state;
 
 int __cfs_fail_check_set(__u32 id, __u32 value, int set);
 extern int cfs_race_state;
 
 int __cfs_fail_check_set(__u32 id, __u32 value, int set);
@@ -150,21 +150,20 @@ static inline int cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
  * the first and continues. */
 static inline void cfs_race(__u32 id)
 {
  * the first and continues. */
 static inline void cfs_race(__u32 id)
 {
-
-        if (CFS_FAIL_PRECHECK(id)) {
-                if (unlikely(__cfs_fail_check_set(id, 0, CFS_FAIL_LOC_NOSET))) {
-                        int rc;
-                        cfs_race_state = 0;
-                        CERROR("cfs_race id %x sleeping\n", id);
+       if (CFS_FAIL_PRECHECK(id)) {
+               if (unlikely(__cfs_fail_check_set(id, 0, CFS_FAIL_LOC_NOSET))) {
+                       int rc;
+                       cfs_race_state = 0;
+                       CERROR("cfs_race id %x sleeping\n", id);
                        rc = wait_event_interruptible(cfs_race_waitq,
                                                      cfs_race_state != 0);
                        rc = wait_event_interruptible(cfs_race_waitq,
                                                      cfs_race_state != 0);
-                        CERROR("cfs_fail_race id %x awake, rc=%d\n", id, rc);
-                } else {
-                        CERROR("cfs_fail_race id %x waking\n", id);
-                        cfs_race_state = 1;
-                        cfs_waitq_signal(&cfs_race_waitq);
-                }
-        }
+                       CERROR("cfs_fail_race id %x awake, rc=%d\n", id, rc);
+               } else {
+                       CERROR("cfs_fail_race id %x waking\n", id);
+                       cfs_race_state = 1;
+                       wake_up(&cfs_race_waitq);
+               }
+       }
 }
 #define CFS_RACE(id) cfs_race(id)
 #else
 }
 #define CFS_RACE(id) cfs_race(id)
 #else
index 497b98a..49254df 100644 (file)
 #define __LIBCFS_PRIM_H__
 
 /*
 #define __LIBCFS_PRIM_H__
 
 /*
- * Schedule
- */
-void cfs_schedule_timeout_and_set_state(cfs_task_state_t state,
-                                        int64_t timeout);
-void cfs_schedule_timeout(int64_t timeout);
-void cfs_schedule(void);
-void cfs_pause(cfs_duration_t ticks);
-int  cfs_need_resched(void);
-void cfs_cond_resched(void);
-
-/*
  * Wait Queues
  */
  * Wait Queues
  */
-void cfs_waitq_init(cfs_waitq_t *waitq);
-void cfs_waitlink_init(cfs_waitlink_t *link);
-void cfs_waitq_add(cfs_waitq_t *waitq, cfs_waitlink_t *link);
-void cfs_waitq_add_exclusive(cfs_waitq_t *waitq,
-                             cfs_waitlink_t *link);
-void cfs_waitq_add_exclusive_head(cfs_waitq_t *waitq,
-                                  cfs_waitlink_t *link);
-void cfs_waitq_del(cfs_waitq_t *waitq, cfs_waitlink_t *link);
-int  cfs_waitq_active(cfs_waitq_t *waitq);
-void cfs_waitq_signal(cfs_waitq_t *waitq);
-void cfs_waitq_signal_nr(cfs_waitq_t *waitq, int nr);
-void cfs_waitq_broadcast(cfs_waitq_t *waitq);
-void cfs_waitq_wait(cfs_waitlink_t *link, cfs_task_state_t state);
-int64_t cfs_waitq_timedwait(cfs_waitlink_t *link, cfs_task_state_t state, 
-                           int64_t timeout);
-
 /*
  * Timer
  */
 /*
  * Timer
  */
index 75c064f..0d7047a 100644 (file)
@@ -107,19 +107,59 @@ typedef struct proc_dir_entry           cfs_proc_dir_entry_t;
 /*
  * Wait Queue
  */
 /*
  * Wait Queue
  */
-#define CFS_TASK_INTERRUPTIBLE          TASK_INTERRUPTIBLE
-#define CFS_TASK_UNINT                  TASK_UNINTERRUPTIBLE
-#define CFS_TASK_RUNNING                TASK_RUNNING
 
 
-#define cfs_set_current_state(state)    set_current_state(state)
-#define cfs_wait_event(wq, cond)        wait_event(wq, cond)
-
-typedef wait_queue_t                   cfs_waitlink_t;
-typedef wait_queue_head_t              cfs_waitq_t;
-typedef long                            cfs_task_state_t;
 
 #define CFS_DECL_WAITQ(wq)             DECLARE_WAIT_QUEUE_HEAD(wq)
 
 
 #define CFS_DECL_WAITQ(wq)             DECLARE_WAIT_QUEUE_HEAD(wq)
 
+#define LIBCFS_WQITQ_MACROS           1
+#define init_waitqueue_entry_current(w)          init_waitqueue_entry(w, current)
+#define waitq_wait(w, s)          schedule()
+#define waitq_timedwait(w, s, t)  schedule_timeout(t)
+
+#ifndef HAVE___ADD_WAIT_QUEUE_EXCLUSIVE
+static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
+                                             wait_queue_t *wait)
+{
+       wait->flags |= WQ_FLAG_EXCLUSIVE;
+       __add_wait_queue(q, wait);
+}
+#endif /* HAVE___ADD_WAIT_QUEUE_EXCLUSIVE */
+
+/**
+ * wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively
+ * waiting threads, which is not always desirable because all threads will
+ * be waken up again and again, even user only needs a few of them to be
+ * active most time. This is not good for performance because cache can
+ * be polluted by different threads.
+ *
+ * LIFO list can resolve this problem because we always wakeup the most
+ * recent active thread by default.
+ *
+ * NB: please don't call non-exclusive & exclusive wait on the same
+ * waitq if add_wait_queue_exclusive_head is used.
+ */
+#define add_wait_queue_exclusive_head(waitq, link)                     \
+{                                                                      \
+       unsigned long flags;                                            \
+                                                                       \
+       spin_lock_irqsave(&((waitq)->lock), flags);                     \
+       __add_wait_queue_exclusive(waitq, link);                        \
+       spin_unlock_irqrestore(&((waitq)->lock), flags);                \
+}
+
+#define schedule_timeout_and_set_state(state, timeout)                 \
+{                                                                      \
+       set_current_state(state);                                       \
+       schedule_timeout(timeout);                                      \
+}
+
+/* deschedule for a bit... */
+#define cfs_pause(ticks)                                               \
+{                                                                      \
+       set_current_state(TASK_UNINTERRUPTIBLE);                        \
+       schedule_timeout(ticks);                                        \
+}
+
 /*
  * Task struct
  */
 /*
  * Task struct
  */
@@ -151,51 +191,6 @@ typedef sigset_t                        cfs_sigset_t;
  */
 typedef struct timer_list cfs_timer_t;
 
  */
 typedef struct timer_list cfs_timer_t;
 
-#define CFS_MAX_SCHEDULE_TIMEOUT MAX_SCHEDULE_TIMEOUT
-
-#ifndef wait_event_timeout /* Only for RHEL3 2.4.21 kernel */
-#define __wait_event_timeout(wq, condition, timeout, ret)        \
-do {                                                             \
-       int __ret = 0;                                           \
-       if (!(condition)) {                                      \
-               wait_queue_t __wait;                             \
-               unsigned long expire;                            \
-                                                                 \
-               init_waitqueue_entry(&__wait, current);          \
-               expire = timeout + jiffies;                      \
-               add_wait_queue(&wq, &__wait);                    \
-               for (;;) {                                       \
-                       set_current_state(TASK_UNINTERRUPTIBLE); \
-                       if (condition)                           \
-                               break;                           \
-                       if (jiffies > expire) {                  \
-                               ret = jiffies - expire;          \
-                               break;                           \
-                       }                                        \
-                       schedule_timeout(timeout);               \
-               }                                                \
-               current->state = TASK_RUNNING;                   \
-               remove_wait_queue(&wq, &__wait);                 \
-       }                                                        \
-} while (0)
-/*
-   retval == 0; condition met; we're good.
-   retval > 0; timed out.
-*/
-#define cfs_waitq_wait_event_timeout(wq, condition, timeout, ret)    \
-do {                                                                 \
-       ret = 0;                                                     \
-       if (!(condition))                                            \
-               __wait_event_timeout(wq, condition, timeout, ret);   \
-} while (0)
-#else
-#define cfs_waitq_wait_event_timeout(wq, condition, timeout, ret)    \
-        ret = wait_event_timeout(wq, condition, timeout)
-#endif
-
-#define cfs_waitq_wait_event_interruptible_timeout(wq, c, timeout, ret) \
-        ret = wait_event_interruptible_timeout(wq, c, timeout)
-
 /*
  * atomic
  */
 /*
  * atomic
  */
index d9a285b..6904315 100644 (file)
@@ -83,16 +83,16 @@ struct md_identity {
 };
 
 struct upcall_cache_entry {
 };
 
 struct upcall_cache_entry {
-        cfs_list_t              ue_hash;
-        __u64                   ue_key;
-        cfs_atomic_t            ue_refcount;
-        int                     ue_flags;
-        cfs_waitq_t             ue_waitq;
-        cfs_time_t              ue_acquire_expire;
-        cfs_time_t              ue_expire;
-        union {
-                struct md_identity     identity;
-        } u;
+       cfs_list_t              ue_hash;
+       __u64                   ue_key;
+       cfs_atomic_t            ue_refcount;
+       int                     ue_flags;
+       wait_queue_head_t       ue_waitq;
+       cfs_time_t              ue_acquire_expire;
+       cfs_time_t              ue_expire;
+       union {
+               struct md_identity     identity;
+       } u;
 };
 
 #define UC_CACHE_HASH_SIZE        (128)
 };
 
 #define UC_CACHE_HASH_SIZE        (128)
index 8c6b27a..0cac240 100644 (file)
@@ -157,7 +157,7 @@ struct completion {
 
 struct completion {
        unsigned int    done;
 
 struct completion {
        unsigned int    done;
-       cfs_waitq_t     wait;
+       wait_queue_head_t       wait;
 };
 #endif /* HAVE_LIBPTHREAD */
 
 };
 #endif /* HAVE_LIBPTHREAD */
 
index 5885728..f8fff19 100644 (file)
@@ -78,25 +78,39 @@ typedef struct proc_dir_entry           cfs_proc_dir_entry_t;
 typedef struct cfs_waitlink {
         cfs_list_t sleeping;
         void *process;
 typedef struct cfs_waitlink {
         cfs_list_t sleeping;
         void *process;
-} cfs_waitlink_t;
+} wait_queue_t;
 
 typedef struct cfs_waitq {
         cfs_list_t sleepers;
 
 typedef struct cfs_waitq {
         cfs_list_t sleepers;
-} cfs_waitq_t;
-
-#define CFS_DECL_WAITQ(wq) cfs_waitq_t wq
+} wait_queue_head_t;
+
+#define CFS_DECL_WAITQ(wq) wait_queue_head_t wq
+void init_waitqueue_head(struct cfs_waitq *waitq);
+void init_waitqueue_entry_current(struct cfs_waitlink *link);
+void add_wait_queue(struct cfs_waitq *waitq, struct cfs_waitlink *link);
+void add_wait_queue_exclusive(struct cfs_waitq *waitq, struct cfs_waitlink *link);
+void add_wait_queue_exclusive_head(struct cfs_waitq *waitq, struct cfs_waitlink *link);
+void remove_wait_queue(struct cfs_waitq *waitq, struct cfs_waitlink *link);
+int waitqueue_active(struct cfs_waitq *waitq);
+void wake_up(struct cfs_waitq *waitq);
+void wake_up_nr(struct cfs_waitq *waitq, int nr);
+void wake_up_all(struct cfs_waitq *waitq);
+void waitq_wait(struct cfs_waitlink *link, long state);
+int64_t waitq_timedwait(struct cfs_waitlink *link, long state, int64_t timeout);
+void schedule_timeout_and_set_state(long state, int64_t timeout);
+void cfs_pause(cfs_duration_t d);
+int need_resched(void);
+void cond_resched(void);
 
 /*
  * Task states
  */
 
 /*
  * Task states
  */
-typedef long cfs_task_state_t;
-
-#define CFS_TASK_INTERRUPTIBLE  (0)
-#define CFS_TASK_UNINT          (1)
-#define CFS_TASK_RUNNING        (2)
+#define TASK_INTERRUPTIBLE  (0)
+#define TASK_UNINTERRUPTIBLE          (1)
+#define TASK_RUNNING        (2)
 
 
-static inline void cfs_schedule(void)                  {}
-static inline void cfs_schedule_timeout(int64_t t)     {}
+static inline void schedule(void)                      {}
+static inline void schedule_timeout(int64_t t) {}
 
 /*
  * Lproc
 
 /*
  * Lproc
index c1b487d..cc8a3dc 100644 (file)
@@ -374,12 +374,10 @@ size_t lustre_write_file(struct file *fh, loff_t off, size_t size, char *buf);
  */
 
 
  */
 
 
-typedef int cfs_task_state_t;
-
-#define CFS_TASK_INTERRUPTIBLE  0x00000001
-#define CFS_TASK_UNINT          0x00000002
-#define CFS_TASK_RUNNING         0x00000003
-#define CFS_TASK_UNINTERRUPTIBLE CFS_TASK_UNINT
+#define TASK_INTERRUPTIBLE      0x00000001
+#define TASK_UNINTERRUPTIBLE            0x00000002
+#define TASK_RUNNING         0x00000003
+#define CFS_TASK_UNINTERRUPTIBLE TASK_UNINTERRUPTIBLE
 
 #define CFS_WAITQ_MAGIC     'CWQM'
 #define CFS_WAITLINK_MAGIC  'CWLM'
 
 #define CFS_WAITQ_MAGIC     'CWQM'
 #define CFS_WAITLINK_MAGIC  'CWLM'
@@ -391,10 +389,10 @@ typedef struct cfs_waitq {
        spinlock_t              guard;
        cfs_list_t              waiters;
 
        spinlock_t              guard;
        cfs_list_t              waiters;
 
-} cfs_waitq_t;
+} wait_queue_head_t;
 
 
 
 
-typedef struct cfs_waitlink cfs_waitlink_t;
+typedef struct cfs_waitlink wait_queue_t;
 
 #define CFS_WAITQ_CHANNELS     (2)
 
 
 #define CFS_WAITQ_CHANNELS     (2)
 
@@ -405,8 +403,8 @@ typedef struct cfs_waitlink cfs_waitlink_t;
 
 typedef struct cfs_waitlink_channel {
     cfs_list_t              link;
 
 typedef struct cfs_waitlink_channel {
     cfs_list_t              link;
-    cfs_waitq_t *           waitq;
-    cfs_waitlink_t *        waitl;
+    wait_queue_head_t *           waitq;
+    wait_queue_t *        waitl;
 } cfs_waitlink_channel_t;
 
 struct cfs_waitlink {
 } cfs_waitlink_channel_t;
 
 struct cfs_waitlink {
@@ -423,7 +421,7 @@ enum {
        CFS_WAITQ_EXCLUSIVE = 1
 };
 
        CFS_WAITQ_EXCLUSIVE = 1
 };
 
-#define CFS_DECL_WAITQ(name) cfs_waitq_t name
+#define CFS_DECL_WAITQ(name) wait_queue_head_t name
 
 /* Kernel thread */
 
 
 /* Kernel thread */
 
@@ -540,8 +538,8 @@ typedef __u32 kernel_cap_t;
  * Task struct
  */
 
  * Task struct
  */
 
-#define CFS_MAX_SCHEDULE_TIMEOUT     ((long_ptr_t)(~0UL>>12))
-#define cfs_schedule_timeout(t)      cfs_schedule_timeout_and_set_state(0, t)
+#define MAX_SCHEDULE_TIMEOUT     ((long_ptr_t)(~0UL>>12))
+#define schedule_timeout(t)      schedule_timeout_and_set_state(0, t)
 
 struct vfsmount;
 
 
 struct vfsmount;
 
@@ -621,40 +619,39 @@ typedef struct _TASK_SLOT {
 
 
 #define current                      cfs_current()
 
 
 #define current                      cfs_current()
-#define cfs_set_current_state(s)     do {;} while (0)
-#define cfs_set_current_state(state) cfs_set_current_state(state)
+#define set_current_state(s)     do {;} while (0)
 
 
-#define cfs_wait_event(wq, condition)                           \
+#define wait_event(wq, condition)                           \
 do {                                                            \
 do {                                                            \
-        cfs_waitlink_t __wait;                                  \
-                                                                \
-        cfs_waitlink_init(&__wait);                             \
-        while (TRUE) {                                          \
-            cfs_waitq_add(&wq, &__wait);                        \
-            if (condition) {                                    \
-                break;                                          \
-            }                                                   \
-            cfs_waitq_wait(&__wait, CFS_TASK_INTERRUPTIBLE);    \
-            cfs_waitq_del(&wq, &__wait);                       \
-        }                                                      \
-        cfs_waitq_del(&wq, &__wait);                           \
+       wait_queue_t __wait;                                    \
+                                                               \
+       init_waitqueue_entry_current(&__wait);                  \
+       while (TRUE) {                                          \
+           add_wait_queue(&wq, &__wait);                        \
+           if (condition) {                                    \
+               break;                                          \
+           }                                                   \
+           waitq_wait(&__wait, TASK_INTERRUPTIBLE);            \
+           remove_wait_queue(&wq, &__wait);                    \
+                                                             \
+       remove_wait_queue(&wq, &__wait);                        \
 } while(0)
 
 #define wait_event_interruptible(wq, condition)                 \
 {                                                               \
 } while(0)
 
 #define wait_event_interruptible(wq, condition)                 \
 {                                                               \
-       cfs_waitlink_t __wait;                                  \
+       wait_queue_t __wait;                                    \
                                                                \
        __ret = 0;                                              \
                                                                \
        __ret = 0;                                              \
-       cfs_waitlink_init(&__wait);                             \
+       init_waitqueue_entry_current(&__wait);                             \
        while (TRUE) {                                          \
        while (TRUE) {                                          \
-               cfs_waitq_add(&wq, &__wait);                    \
+               add_wait_queue(&wq, &__wait);                   \
                if (condition) {                                \
                        break;                                  \
                }                                               \
                if (condition) {                                \
                        break;                                  \
                }                                               \
-               cfs_waitq_wait(&__wait, CFS_TASK_INTERRUPTIBLE);\
-               cfs_waitq_del(&wq, &__wait);                    \
+               waitq_wait(&__wait, TASK_INTERRUPTIBLE);\
+               remove_wait_queue(&wq, &__wait);                        \
        }                                                       \
        }                                                       \
-       cfs_waitq_del(&wq, &__wait);                            \
+       remove_wait_queue(&wq, &__wait);                            \
        __ret;                                                  \
 }
 
        __ret;                                                  \
 }
 
@@ -667,37 +664,30 @@ do {                                                            \
    retval > 0; timed out.
 */
 
    retval > 0; timed out.
 */
 
-#define cfs_waitq_wait_event_interruptible_timeout(             \
-                        wq, condition, timeout, rc)             \
+#define wait_event_interruptible_timeout(wq, condition, timeout)\
 do {                                                            \
 do {                                                            \
-        cfs_waitlink_t __wait;                                  \
-                                                                \
-        rc = 0;                                                 \
-        cfs_waitlink_init(&__wait);                            \
-        while (TRUE) {                                          \
-            cfs_waitq_add(&wq, &__wait);                        \
-            if (condition) {                                    \
-                break;                                          \
-            }                                                   \
-            if (cfs_waitq_timedwait(&__wait,                    \
-                CFS_TASK_INTERRUPTIBLE, timeout) == 0) {        \
-                rc = TRUE;                                      \
-                break;                                          \
-            }                                                   \
-            cfs_waitq_del(&wq, &__wait);                       \
-        }                                                      \
-        cfs_waitq_del(&wq, &__wait);                           \
+       wait_queue_t __wait;                                    \
+                                                               \
+       init_waitqueue_entry_current(&__wait);                  \
+       while (TRUE) {                                          \
+           add_wait_queue(&wq, &__wait);                       \
+           if (condition) {                                    \
+               break;                                          \
+           }                                                   \
+           if (waitq_timedwait(&__wait,                        \
+               TASK_INTERRUPTIBLE, timeout) == 0) {            \
+               break;                                          \
+           }                                                   \
+           remove_wait_queue(&wq, &__wait);                    \
+       }                                                       \
+       remove_wait_queue(&wq, &__wait);                        \
 } while(0)
 
 } while(0)
 
-
-#define cfs_waitq_wait_event_timeout                            \
-        cfs_waitq_wait_event_interruptible_timeout
-
 int     init_task_manager();
 void    cleanup_task_manager();
 cfs_task_t * cfs_current();
 int     wake_up_process(cfs_task_t * task);
 int     init_task_manager();
 void    cleanup_task_manager();
 cfs_task_t * cfs_current();
 int     wake_up_process(cfs_task_t * task);
-void sleep_on(cfs_waitq_t *waitq);
+void sleep_on(wait_queue_head_t *waitq);
 #define cfs_might_sleep() do {} while(0)
 #define CFS_DECL_JOURNAL_DATA  
 #define CFS_PUSH_JOURNAL           do {;} while(0)
 #define cfs_might_sleep() do {} while(0)
 #define CFS_DECL_JOURNAL_DATA  
 #define CFS_PUSH_JOURNAL           do {;} while(0)
index bcf477a..7bf0da6 100644 (file)
@@ -50,13 +50,13 @@ void libcfs_run_lbug_upcall(struct libcfs_debug_msg_data *msgdata)
 
 void lbug_with_loc(struct libcfs_debug_msg_data *msgdata)
 {
 
 void lbug_with_loc(struct libcfs_debug_msg_data *msgdata)
 {
-        libcfs_catastrophe = 1;
-        CEMERG("LBUG: pid: %u thread: %#x\n",
+       libcfs_catastrophe = 1;
+       CEMERG("LBUG: pid: %u thread: %#x\n",
               (unsigned)current_pid(), (unsigned)current_thread());
               (unsigned)current_pid(), (unsigned)current_thread());
-        libcfs_debug_dumplog();
-        libcfs_run_lbug_upcall(msgdata);
-        while (1)
-                cfs_schedule();
+       libcfs_debug_dumplog();
+       libcfs_run_lbug_upcall(msgdata);
+       while (1)
+               schedule();
 
        /* panic("lbug_with_loc(%s, %s, %d)", file, func, line) */
 }
 
        /* panic("lbug_with_loc(%s, %s, %d)", file, func, line) */
 }
index e91dfab..26ceb58 100644 (file)
@@ -235,7 +235,7 @@ struct kernel_thread_arg cfs_thread_arg;
                        break;                                  \
                }                                               \
                spin_unlock(&(pta)->lock);                      \
                        break;                                  \
                }                                               \
                spin_unlock(&(pta)->lock);                      \
-               cfs_schedule();                                 \
+               schedule();                                     \
        } while(1);                                             \
 
 /*
        } while(1);                                             \
 
 /*
@@ -257,7 +257,7 @@ struct kernel_thread_arg cfs_thread_arg;
                        break;                                  \
                }                                               \
                spin_unlock(&(pta)->lock);                      \
                        break;                                  \
                }                                               \
                spin_unlock(&(pta)->lock);                      \
-               cfs_schedule();                                 \
+               schedule();                                     \
        } while(1)
 
 /*
        } while(1)
 
 /*
@@ -276,7 +276,7 @@ struct kernel_thread_arg cfs_thread_arg;
                        break;                                  \
                }                                               \
                spin_unlock(&(pta)->lock);                      \
                        break;                                  \
                }                                               \
                spin_unlock(&(pta)->lock);                      \
-               cfs_schedule();                                 \
+               schedule();                                     \
        } while (1);                                            \
 
 /*
        } while (1);                                            \
 
 /*
@@ -460,42 +460,42 @@ void lustre_net_ex(boolean_t state, funnel_t *cone)
 }
 #endif /* !__DARWIN8__ */
 
 }
 #endif /* !__DARWIN8__ */
 
-void cfs_waitq_init(struct cfs_waitq *waitq)
+void init_waitqueue_head(struct cfs_waitq *waitq)
 {
        ksleep_chan_init(&waitq->wq_ksleep_chan);
 }
 
 {
        ksleep_chan_init(&waitq->wq_ksleep_chan);
 }
 
-void cfs_waitlink_init(struct cfs_waitlink *link)
+void init_waitqueue_entry_current(struct cfs_waitlink *link)
 {
        ksleep_link_init(&link->wl_ksleep_link);
 }
 
 {
        ksleep_link_init(&link->wl_ksleep_link);
 }
 
-void cfs_waitq_add(struct cfs_waitq *waitq, struct cfs_waitlink *link)
+void add_wait_queue(struct cfs_waitq *waitq, struct cfs_waitlink *link)
 {
 {
-        link->wl_waitq = waitq;
+       link->wl_waitq = waitq;
        ksleep_add(&waitq->wq_ksleep_chan, &link->wl_ksleep_link);
 }
 
        ksleep_add(&waitq->wq_ksleep_chan, &link->wl_ksleep_link);
 }
 
-void cfs_waitq_add_exclusive(struct cfs_waitq *waitq,
-                             struct cfs_waitlink *link)
+void add_wait_queue_exclusive(struct cfs_waitq *waitq,
+                             struct cfs_waitlink *link)
 {
 {
-        link->wl_waitq = waitq;
+       link->wl_waitq = waitq;
        link->wl_ksleep_link.flags |= KSLEEP_EXCLUSIVE;
        ksleep_add(&waitq->wq_ksleep_chan, &link->wl_ksleep_link);
 }
 
        link->wl_ksleep_link.flags |= KSLEEP_EXCLUSIVE;
        ksleep_add(&waitq->wq_ksleep_chan, &link->wl_ksleep_link);
 }
 
-void cfs_waitq_del(struct cfs_waitq *waitq,
+void remove_wait_queue(struct cfs_waitq *waitq,
                    struct cfs_waitlink *link)
 {
        ksleep_del(&waitq->wq_ksleep_chan, &link->wl_ksleep_link);
 }
 
                    struct cfs_waitlink *link)
 {
        ksleep_del(&waitq->wq_ksleep_chan, &link->wl_ksleep_link);
 }
 
-int cfs_waitq_active(struct cfs_waitq *waitq)
+int waitqueue_active(struct cfs_waitq *waitq)
 {
        return (1);
 }
 
 {
        return (1);
 }
 
-void cfs_waitq_signal(struct cfs_waitq *waitq)
+void wake_up(struct cfs_waitq *waitq)
 {
        /*
         * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
 {
        /*
         * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
@@ -504,23 +504,23 @@ void cfs_waitq_signal(struct cfs_waitq *waitq)
        ksleep_wake(&waitq->wq_ksleep_chan);
 }
 
        ksleep_wake(&waitq->wq_ksleep_chan);
 }
 
-void cfs_waitq_signal_nr(struct cfs_waitq *waitq, int nr)
+void wake_up_nr(struct cfs_waitq *waitq, int nr)
 {
        ksleep_wake_nr(&waitq->wq_ksleep_chan, nr);
 }
 
 {
        ksleep_wake_nr(&waitq->wq_ksleep_chan, nr);
 }
 
-void cfs_waitq_broadcast(struct cfs_waitq *waitq)
+void wake_up_all(struct cfs_waitq *waitq)
 {
        ksleep_wake_all(&waitq->wq_ksleep_chan);
 }
 
 {
        ksleep_wake_all(&waitq->wq_ksleep_chan);
 }
 
-void cfs_waitq_wait(struct cfs_waitlink *link, cfs_task_state_t state)
+void waitq_wait(struct cfs_waitlink *link, long state)
 {
 {
-        ksleep_wait(&link->wl_waitq->wq_ksleep_chan, state);
+       ksleep_wait(&link->wl_waitq->wq_ksleep_chan, state);
 }
 
 }
 
-cfs_duration_t  cfs_waitq_timedwait(struct cfs_waitlink *link,
-                                    cfs_task_state_t state,
+cfs_duration_t  waitq_timedwait(struct cfs_waitlink *link,
+                                   long state,
                                     cfs_duration_t timeout)
 {
         return ksleep_timedwait(&link->wl_waitq->wq_ksleep_chan, 
                                     cfs_duration_t timeout)
 {
         return ksleep_timedwait(&link->wl_waitq->wq_ksleep_chan, 
index 1780f5b..aacc66b 100644 (file)
@@ -144,21 +144,21 @@ static int sysctl_debug_mb SYSCTL_HANDLER_ARGS
 
 static int proc_fail_loc SYSCTL_HANDLER_ARGS
 {
 
 static int proc_fail_loc SYSCTL_HANDLER_ARGS
 {
-        int error = 0;
-        long old_fail_loc = cfs_fail_loc;
-
-        error = sysctl_handle_long(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
-        if (!error && req->newptr != USER_ADDR_NULL) {
-                if (old_fail_loc != cfs_fail_loc)
-                        cfs_waitq_signal(&cfs_race_waitq);
-        } else  if (req->newptr != USER_ADDR_NULL) {
-                /* Something was wrong with the write request */
-                printf ("sysctl fail loc fault: %d.\n", error);
-        } else {
-                /* Read request */
-                error = SYSCTL_OUT(req, &cfs_fail_loc, sizeof cfs_fail_loc);
-        }
-        return error;
+       int error = 0;
+       long old_fail_loc = cfs_fail_loc;
+
+       error = sysctl_handle_long(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
+       if (!error && req->newptr != USER_ADDR_NULL) {
+               if (old_fail_loc != cfs_fail_loc)
+                       wake_up(&cfs_race_waitq);
+       } else  if (req->newptr != USER_ADDR_NULL) {
+               /* Something was wrong with the write request */
+               printf ("sysctl fail loc fault: %d.\n", error);
+       } else {
+               /* Read request */
+               error = SYSCTL_OUT(req, &cfs_fail_loc, sizeof cfs_fail_loc);
+       }
+       return error;
 }
 
 /*
 }
 
 /*
index 872ca00..81110c7 100644 (file)
@@ -753,7 +753,7 @@ static void add_hit(struct ksleep_chan *chan, event_t event)
        }
 }
 
        }
 }
 
-void ksleep_wait(struct ksleep_chan *chan, cfs_task_state_t state)
+void ksleep_wait(struct ksleep_chan *chan, long state)
 {
        event_t event;
        int     result;
 {
        event_t event;
        int     result;
@@ -783,8 +783,8 @@ void ksleep_wait(struct ksleep_chan *chan, cfs_task_state_t state)
  * implemented), or waitq was already in the "signalled" state).
  */
 int64_t ksleep_timedwait(struct ksleep_chan *chan, 
  * implemented), or waitq was already in the "signalled" state).
  */
 int64_t ksleep_timedwait(struct ksleep_chan *chan, 
-                         cfs_task_state_t state,
-                         __u64 timeout)
+                        long state,
+                        __u64 timeout)
 {
        event_t event;
 
 {
        event_t event;
 
index 6818a20..c418fbb 100644 (file)
@@ -110,7 +110,7 @@ EXPORT_SYMBOL(libcfs_panic_on_lbug);
 cfs_atomic_t libcfs_kmemory = CFS_ATOMIC_INIT(0);
 EXPORT_SYMBOL(libcfs_kmemory);
 
 cfs_atomic_t libcfs_kmemory = CFS_ATOMIC_INIT(0);
 EXPORT_SYMBOL(libcfs_kmemory);
 
-static cfs_waitq_t debug_ctlwq;
+static wait_queue_head_t debug_ctlwq;
 
 char libcfs_debug_file_path_arr[PATH_MAX] = LIBCFS_DEBUG_FILE_PATH_DEFAULT;
 
 
 char libcfs_debug_file_path_arr[PATH_MAX] = LIBCFS_DEBUG_FILE_PATH_DEFAULT;
 
@@ -247,23 +247,23 @@ void libcfs_debug_dumplog_internal(void *arg)
 
 int libcfs_debug_dumplog_thread(void *arg)
 {
 
 int libcfs_debug_dumplog_thread(void *arg)
 {
-        libcfs_debug_dumplog_internal(arg);
-        cfs_waitq_signal(&debug_ctlwq);
-        return 0;
+       libcfs_debug_dumplog_internal(arg);
+       wake_up(&debug_ctlwq);
+       return 0;
 }
 
 void libcfs_debug_dumplog(void)
 {
 }
 
 void libcfs_debug_dumplog(void)
 {
-        cfs_waitlink_t wait;
-        cfs_task_t    *dumper;
-        ENTRY;
+       wait_queue_t wait;
+       cfs_task_t    *dumper;
+       ENTRY;
 
 
-        /* we're being careful to ensure that the kernel thread is
-         * able to set our state to running as it exits before we
-         * get to schedule() */
-       cfs_waitlink_init(&wait);
-       cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
-       cfs_waitq_add(&debug_ctlwq, &wait);
+       /* we're being careful to ensure that the kernel thread is
+        * able to set our state to running as it exits before we
+        * get to schedule() */
+       init_waitqueue_entry_current(&wait);
+       set_current_state(TASK_INTERRUPTIBLE);
+       add_wait_queue(&debug_ctlwq, &wait);
 
        dumper = kthread_run(libcfs_debug_dumplog_thread,
                             (void *)(long)current_pid(),
 
        dumper = kthread_run(libcfs_debug_dumplog_thread,
                             (void *)(long)current_pid(),
@@ -271,28 +271,28 @@ void libcfs_debug_dumplog(void)
        if (IS_ERR(dumper))
                printk(KERN_ERR "LustreError: cannot start log dump thread:"
                       " %ld\n", PTR_ERR(dumper));
        if (IS_ERR(dumper))
                printk(KERN_ERR "LustreError: cannot start log dump thread:"
                       " %ld\n", PTR_ERR(dumper));
-        else
-                cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE);
+       else
+               waitq_wait(&wait, TASK_INTERRUPTIBLE);
 
 
-        /* be sure to teardown if cfs_create_thread() failed */
-        cfs_waitq_del(&debug_ctlwq, &wait);
-        cfs_set_current_state(CFS_TASK_RUNNING);
+       /* be sure to teardown if cfs_create_thread() failed */
+       remove_wait_queue(&debug_ctlwq, &wait);
+       set_current_state(TASK_RUNNING);
 }
 EXPORT_SYMBOL(libcfs_debug_dumplog);
 
 int libcfs_debug_init(unsigned long bufsize)
 {
 }
 EXPORT_SYMBOL(libcfs_debug_dumplog);
 
 int libcfs_debug_init(unsigned long bufsize)
 {
-        int    rc = 0;
-        unsigned int max = libcfs_debug_mb;
+       int    rc = 0;
+       unsigned int max = libcfs_debug_mb;
 
 
-        cfs_waitq_init(&debug_ctlwq);
+       init_waitqueue_head(&debug_ctlwq);
 
 
-        if (libcfs_console_max_delay <= 0 || /* not set by user or */
-            libcfs_console_min_delay <= 0 || /* set to invalid values */
-            libcfs_console_min_delay >= libcfs_console_max_delay) {
-                libcfs_console_max_delay = CDEBUG_DEFAULT_MAX_DELAY;
-                libcfs_console_min_delay = CDEBUG_DEFAULT_MIN_DELAY;
-        }
+       if (libcfs_console_max_delay <= 0 || /* not set by user or */
+           libcfs_console_min_delay <= 0 || /* set to invalid values */
+           libcfs_console_min_delay >= libcfs_console_max_delay) {
+               libcfs_console_max_delay = CDEBUG_DEFAULT_MAX_DELAY;
+               libcfs_console_min_delay = CDEBUG_DEFAULT_MIN_DELAY;
+       }
 
         if (libcfs_debug_file_path != NULL) {
                 memset(libcfs_debug_file_path_arr, 0, PATH_MAX);
 
         if (libcfs_debug_file_path != NULL) {
                 memset(libcfs_debug_file_path_arr, 0, PATH_MAX);
index f84e03b..e11caff 100644 (file)
@@ -41,7 +41,7 @@
 
 unsigned long cfs_fail_loc = 0;
 unsigned int cfs_fail_val = 0;
 
 unsigned long cfs_fail_loc = 0;
 unsigned int cfs_fail_val = 0;
-cfs_waitq_t cfs_race_waitq;
+wait_queue_head_t cfs_race_waitq;
 int cfs_race_state;
 
 EXPORT_SYMBOL(cfs_fail_loc);
 int cfs_race_state;
 
 EXPORT_SYMBOL(cfs_fail_loc);
@@ -125,17 +125,17 @@ EXPORT_SYMBOL(__cfs_fail_check_set);
 
 int __cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
 {
 
 int __cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
 {
-        int ret = 0;
-
-        ret = __cfs_fail_check_set(id, value, set);
-        if (ret) {
-                CERROR("cfs_fail_timeout id %x sleeping for %dms\n",
-                       id, ms);
-                cfs_schedule_timeout_and_set_state(CFS_TASK_UNINT,
-                                                   cfs_time_seconds(ms) / 1000);
-                cfs_set_current_state(CFS_TASK_RUNNING);
-                CERROR("cfs_fail_timeout id %x awake\n", id);
-        }
-        return ret;
+       int ret = 0;
+
+       ret = __cfs_fail_check_set(id, value, set);
+       if (ret) {
+               CERROR("cfs_fail_timeout id %x sleeping for %dms\n",
+                      id, ms);
+               schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE,
+                                                  cfs_time_seconds(ms) / 1000);
+               set_current_state(TASK_RUNNING);
+               CERROR("cfs_fail_timeout id %x awake\n", id);
+       }
+       return ret;
 }
 EXPORT_SYMBOL(__cfs_fail_timeout_set);
 }
 EXPORT_SYMBOL(__cfs_fail_timeout_set);
index c1232b7..b8aaa12 100644 (file)
@@ -1003,7 +1003,7 @@ static void cfs_hash_depth_wi_cancel(cfs_hash_t *hs)
        spin_lock(&hs->hs_dep_lock);
        while (hs->hs_dep_bits != 0) {
                spin_unlock(&hs->hs_dep_lock);
        spin_lock(&hs->hs_dep_lock);
        while (hs->hs_dep_bits != 0) {
                spin_unlock(&hs->hs_dep_lock);
-               cfs_cond_resched();
+               cond_resched();
                spin_lock(&hs->hs_dep_lock);
        }
        spin_unlock(&hs->hs_dep_lock);
                spin_lock(&hs->hs_dep_lock);
        }
        spin_unlock(&hs->hs_dep_lock);
@@ -1139,10 +1139,10 @@ cfs_hash_destroy(cfs_hash_t *hs)
                                 cfs_hash_exit(hs, hnode);
                         }
                 }
                                 cfs_hash_exit(hs, hnode);
                         }
                 }
-                LASSERT(bd.bd_bucket->hsb_count == 0);
-                cfs_hash_bd_unlock(hs, &bd, 1);
-                cfs_cond_resched();
-        }
+               LASSERT(bd.bd_bucket->hsb_count == 0);
+               cfs_hash_bd_unlock(hs, &bd, 1);
+               cond_resched();
+       }
 
         LASSERT(cfs_atomic_read(&hs->hs_count) == 0);
 
 
         LASSERT(cfs_atomic_read(&hs->hs_count) == 0);
 
@@ -1479,11 +1479,11 @@ cfs_hash_for_each_tight(cfs_hash_t *hs, cfs_hash_for_each_cb_t func,
                 cfs_hash_bd_unlock(hs, &bd, excl);
                 if (loop < CFS_HASH_LOOP_HOG)
                         continue;
                 cfs_hash_bd_unlock(hs, &bd, excl);
                 if (loop < CFS_HASH_LOOP_HOG)
                         continue;
-                loop = 0;
-                cfs_hash_unlock(hs, 0);
-                cfs_cond_resched();
-                cfs_hash_lock(hs, 0);
-        }
+               loop = 0;
+               cfs_hash_unlock(hs, 0);
+               cond_resched();
+               cfs_hash_lock(hs, 0);
+       }
  out:
         cfs_hash_unlock(hs, 0);
 
  out:
         cfs_hash_unlock(hs, 0);
 
@@ -1614,11 +1614,11 @@ cfs_hash_for_each_relax(cfs_hash_t *hs, cfs_hash_for_each_cb_t func, void *data)
                                 cfs_hash_bd_unlock(hs, &bd, 0);
                                 cfs_hash_unlock(hs, 0);
 
                                 cfs_hash_bd_unlock(hs, &bd, 0);
                                 cfs_hash_unlock(hs, 0);
 
-                                rc = func(hs, &bd, hnode, data);
-                                if (stop_on_change)
-                                        cfs_hash_put(hs, hnode);
-                                cfs_cond_resched();
-                                count++;
+                               rc = func(hs, &bd, hnode, data);
+                               if (stop_on_change)
+                                       cfs_hash_put(hs, hnode);
+                               cond_resched();
+                               count++;
 
                                 cfs_hash_lock(hs, 0);
                                 cfs_hash_bd_lock(hs, &bd, 0);
 
                                 cfs_hash_lock(hs, 0);
                                 cfs_hash_bd_lock(hs, &bd, 0);
@@ -1798,14 +1798,14 @@ cfs_hash_rehash_cancel_locked(cfs_hash_t *hs)
         }
 
         for (i = 2; cfs_hash_is_rehashing(hs); i++) {
         }
 
         for (i = 2; cfs_hash_is_rehashing(hs); i++) {
-                cfs_hash_unlock(hs, 1);
-                /* raise console warning while waiting too long */
-                CDEBUG(IS_PO2(i >> 3) ? D_WARNING : D_INFO,
-                       "hash %s is still rehashing, rescheded %d\n",
-                       hs->hs_name, i - 1);
-                cfs_cond_resched();
-                cfs_hash_lock(hs, 1);
-        }
+               cfs_hash_unlock(hs, 1);
+               /* raise console warning while waiting too long */
+               CDEBUG(IS_PO2(i >> 3) ? D_WARNING : D_INFO,
+                      "hash %s is still rehashing, rescheded %d\n",
+                      hs->hs_name, i - 1);
+               cond_resched();
+               cfs_hash_lock(hs, 1);
+       }
 }
 EXPORT_SYMBOL(cfs_hash_rehash_cancel_locked);
 
 }
 EXPORT_SYMBOL(cfs_hash_rehash_cancel_locked);
 
@@ -1951,11 +1951,11 @@ cfs_hash_rehash_worker(cfs_workitem_t *wi)
                         continue;
                 }
 
                         continue;
                 }
 
-                count = 0;
-                cfs_hash_unlock(hs, 1);
-                cfs_cond_resched();
-                cfs_hash_lock(hs, 1);
-        }
+               count = 0;
+               cfs_hash_unlock(hs, 1);
+               cond_resched();
+               cfs_hash_lock(hs, 1);
+       }
 
         hs->hs_rehash_count++;
 
 
         hs->hs_rehash_count++;
 
index a708148..2a198fb 100644 (file)
@@ -630,7 +630,7 @@ cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt)
                rc = set_cpus_allowed_ptr(cfs_current(), cpumask);
                set_mems_allowed(*nodemask);
                if (rc == 0)
                rc = set_cpus_allowed_ptr(cfs_current(), cpumask);
                set_mems_allowed(*nodemask);
                if (rc == 0)
-                       cfs_schedule(); /* switch to allowed CPU */
+                       schedule(); /* switch to allowed CPU */
 
                return rc;
        }
 
                return rc;
        }
index 27d95e9..b6719d1 100644 (file)
 #include <asm/kgdb.h>
 #endif
 
 #include <asm/kgdb.h>
 #endif
 
-#define LINUX_WAITQ(w) ((wait_queue_t *) w)
-#define LINUX_WAITQ_HEAD(w) ((wait_queue_head_t *) w)
-
-void
-cfs_waitq_init(cfs_waitq_t *waitq)
-{
-        init_waitqueue_head(LINUX_WAITQ_HEAD(waitq));
-}
-EXPORT_SYMBOL(cfs_waitq_init);
-
-void
-cfs_waitlink_init(cfs_waitlink_t *link)
-{
-        init_waitqueue_entry(LINUX_WAITQ(link), current);
-}
-EXPORT_SYMBOL(cfs_waitlink_init);
-
-void
-cfs_waitq_add(cfs_waitq_t *waitq, cfs_waitlink_t *link)
-{
-        add_wait_queue(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
-}
-EXPORT_SYMBOL(cfs_waitq_add);
-
-#ifndef HAVE___ADD_WAIT_QUEUE_EXCLUSIVE
-
-static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
-                                              wait_queue_t *wait)
-{
-        wait->flags |= WQ_FLAG_EXCLUSIVE;
-        __add_wait_queue(q, wait);
-}
-
-#endif /* HAVE___ADD_WAIT_QUEUE_EXCLUSIVE */
-
-void
-cfs_waitq_add_exclusive(cfs_waitq_t *waitq,
-                        cfs_waitlink_t *link)
-{
-        add_wait_queue_exclusive(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
-}
-EXPORT_SYMBOL(cfs_waitq_add_exclusive);
-
-/**
- * wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively
- * waiting threads, which is not always desirable because all threads will
- * be waken up again and again, even user only needs a few of them to be
- * active most time. This is not good for performance because cache can
- * be polluted by different threads.
- *
- * LIFO list can resolve this problem because we always wakeup the most
- * recent active thread by default.
- *
- * NB: please don't call non-exclusive & exclusive wait on the same
- * waitq if cfs_waitq_add_exclusive_head is used.
- */
-void
-cfs_waitq_add_exclusive_head(cfs_waitq_t *waitq, cfs_waitlink_t *link)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&LINUX_WAITQ_HEAD(waitq)->lock, flags);
-       __add_wait_queue_exclusive(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
-       spin_unlock_irqrestore(&LINUX_WAITQ_HEAD(waitq)->lock, flags);
-}
-EXPORT_SYMBOL(cfs_waitq_add_exclusive_head);
-
-void
-cfs_waitq_del(cfs_waitq_t *waitq, cfs_waitlink_t *link)
-{
-        remove_wait_queue(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
-}
-EXPORT_SYMBOL(cfs_waitq_del);
-
-int
-cfs_waitq_active(cfs_waitq_t *waitq)
-{
-        return waitqueue_active(LINUX_WAITQ_HEAD(waitq));
-}
-EXPORT_SYMBOL(cfs_waitq_active);
-
-void
-cfs_waitq_signal(cfs_waitq_t *waitq)
-{
-        wake_up(LINUX_WAITQ_HEAD(waitq));
-}
-EXPORT_SYMBOL(cfs_waitq_signal);
-
-void
-cfs_waitq_signal_nr(cfs_waitq_t *waitq, int nr)
-{
-        wake_up_nr(LINUX_WAITQ_HEAD(waitq), nr);
-}
-EXPORT_SYMBOL(cfs_waitq_signal_nr);
-
-void
-cfs_waitq_broadcast(cfs_waitq_t *waitq)
-{
-        wake_up_all(LINUX_WAITQ_HEAD(waitq));
-}
-EXPORT_SYMBOL(cfs_waitq_broadcast);
-
-void
-cfs_waitq_wait(cfs_waitlink_t *link, cfs_task_state_t state)
-{
-        schedule();
-}
-EXPORT_SYMBOL(cfs_waitq_wait);
-
-int64_t
-cfs_waitq_timedwait(cfs_waitlink_t *link, cfs_task_state_t state,
-                    int64_t timeout)
-{
-        return schedule_timeout(timeout);
-}
-EXPORT_SYMBOL(cfs_waitq_timedwait);
-
-void
-cfs_schedule_timeout_and_set_state(cfs_task_state_t state, int64_t timeout)
-{
-        set_current_state(state);
-        schedule_timeout(timeout);
-}
-EXPORT_SYMBOL(cfs_schedule_timeout_and_set_state);
-
-void
-cfs_schedule_timeout(int64_t timeout)
-{
-        schedule_timeout(timeout);
-}
-EXPORT_SYMBOL(cfs_schedule_timeout);
-
-void
-cfs_schedule(void)
-{
-        schedule();
-}
-EXPORT_SYMBOL(cfs_schedule);
-
-/* deschedule for a bit... */
-void
-cfs_pause(cfs_duration_t ticks)
-{
-        set_current_state(TASK_UNINTERRUPTIBLE);
-        schedule_timeout(ticks);
-}
-EXPORT_SYMBOL(cfs_pause);
-
-int cfs_need_resched(void)
-{
-        return need_resched();
-}
-EXPORT_SYMBOL(cfs_need_resched);
-
-void cfs_cond_resched(void)
-{
-        cond_resched();
-}
-EXPORT_SYMBOL(cfs_cond_resched);
-
 void cfs_init_timer(cfs_timer_t *t)
 {
         init_timer(t);
 void cfs_init_timer(cfs_timer_t *t)
 {
         init_timer(t);
index 027802c..bbc363a 100644 (file)
@@ -342,13 +342,13 @@ int LL_PROC_PROTO(libcfs_force_lbug)
 
 int LL_PROC_PROTO(proc_fail_loc)
 {
 
 int LL_PROC_PROTO(proc_fail_loc)
 {
-        int rc;
-        long old_fail_loc = cfs_fail_loc;
+       int rc;
+       long old_fail_loc = cfs_fail_loc;
 
 
-        rc = ll_proc_dolongvec(table, write, filp, buffer, lenp, ppos);
-        if (old_fail_loc != cfs_fail_loc)
-                cfs_waitq_signal(&cfs_race_waitq);
-        return rc;
+       rc = ll_proc_dolongvec(table, write, filp, buffer, lenp, ppos);
+       if (old_fail_loc != cfs_fail_loc)
+               wake_up(&cfs_race_waitq);
+       return rc;
 }
 
 static int __proc_cpt_table(void *data, int write,
 }
 
 static int __proc_cpt_table(void *data, int write,
index 72db8c8..2fa7a5d 100644 (file)
@@ -97,13 +97,13 @@ lwt_control (int enable, int clear)
         if (!cfs_capable(CFS_CAP_SYS_ADMIN))
                 return (-EPERM);
 
         if (!cfs_capable(CFS_CAP_SYS_ADMIN))
                 return (-EPERM);
 
-        if (!enable) {
-                LWT_EVENT(0,0,0,0);
-                lwt_enabled = 0;
-                cfs_mb();
-                /* give people some time to stop adding traces */
-                cfs_schedule_timeout(10);
-        }
+       if (!enable) {
+               LWT_EVENT(0,0,0,0);
+               lwt_enabled = 0;
+               cfs_mb();
+               /* give people some time to stop adding traces */
+               schedule_timeout(10);
+       }
 
        for (i = 0; i < num_online_cpus(); i++) {
                p = lwt_cpus[i].lwtc_current_page;
 
        for (i = 0; i < num_online_cpus(); i++) {
                p = lwt_cpus[i].lwtc_current_page;
index 8ab7e6c..04c9ea5 100644 (file)
@@ -385,7 +385,7 @@ static int init_libcfs_module(void)
        mutex_init(&cfs_trace_thread_mutex);
        init_rwsem(&ioctl_list_sem);
        CFS_INIT_LIST_HEAD(&ioctl_list);
        mutex_init(&cfs_trace_thread_mutex);
        init_rwsem(&ioctl_list_sem);
        CFS_INIT_LIST_HEAD(&ioctl_list);
-       cfs_waitq_init(&cfs_race_waitq);
+       init_waitqueue_head(&cfs_race_waitq);
 
        rc = libcfs_debug_init(5 * 1024 * 1024);
        if (rc < 0) {
 
        rc = libcfs_debug_init(5 * 1024 * 1024);
        if (rc < 0) {
index 49acdf2..57aefc8 100644 (file)
@@ -171,14 +171,14 @@ cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
                cfs_list_add_tail(&tage->linkage, &tcd->tcd_pages);
                tcd->tcd_cur_pages++;
 
                cfs_list_add_tail(&tage->linkage, &tcd->tcd_pages);
                tcd->tcd_cur_pages++;
 
-                if (tcd->tcd_cur_pages > 8 && thread_running) {
-                        struct tracefiled_ctl *tctl = &trace_tctl;
-                        /*
-                         * wake up tracefiled to process some pages.
-                         */
-                        cfs_waitq_signal(&tctl->tctl_waitq);
-                }
-                return tage;
+               if (tcd->tcd_cur_pages > 8 && thread_running) {
+                       struct tracefiled_ctl *tctl = &trace_tctl;
+                       /*
+                        * wake up tracefiled to process some pages.
+                        */
+                       wake_up(&tctl->tctl_waitq);
+               }
+               return tage;
         }
         return NULL;
 }
         }
         return NULL;
 }
@@ -996,8 +996,8 @@ static int tracefiled(void *arg)
        spin_lock_init(&pc.pc_lock);
        complete(&tctl->tctl_start);
 
        spin_lock_init(&pc.pc_lock);
        complete(&tctl->tctl_start);
 
-        while (1) {
-                cfs_waitlink_t __wait;
+       while (1) {
+               wait_queue_t __wait;
 
                 pc.pc_want_daemon_pages = 0;
                 collect_pages(&pc);
 
                 pc.pc_want_daemon_pages = 0;
                 collect_pages(&pc);
@@ -1083,12 +1083,12 @@ end_loop:
                                 break;
                         }
                 }
                                 break;
                         }
                 }
-                cfs_waitlink_init(&__wait);
-                cfs_waitq_add(&tctl->tctl_waitq, &__wait);
-                cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
-                cfs_waitq_timedwait(&__wait, CFS_TASK_INTERRUPTIBLE,
-                                    cfs_time_seconds(1));
-                cfs_waitq_del(&tctl->tctl_waitq, &__wait);
+               init_waitqueue_entry_current(&__wait);
+               add_wait_queue(&tctl->tctl_waitq, &__wait);
+               set_current_state(TASK_INTERRUPTIBLE);
+               waitq_timedwait(&__wait, TASK_INTERRUPTIBLE,
+                               cfs_time_seconds(1));
+               remove_wait_queue(&tctl->tctl_waitq, &__wait);
         }
        complete(&tctl->tctl_stop);
         return 0;
         }
        complete(&tctl->tctl_stop);
         return 0;
@@ -1105,7 +1105,7 @@ int cfs_trace_start_thread(void)
 
        init_completion(&tctl->tctl_start);
        init_completion(&tctl->tctl_stop);
 
        init_completion(&tctl->tctl_start);
        init_completion(&tctl->tctl_stop);
-       cfs_waitq_init(&tctl->tctl_waitq);
+       init_waitqueue_head(&tctl->tctl_waitq);
        cfs_atomic_set(&tctl->tctl_shutdown, 0);
 
        if (IS_ERR(kthread_run(tracefiled, tctl, "ktracefiled"))) {
        cfs_atomic_set(&tctl->tctl_shutdown, 0);
 
        if (IS_ERR(kthread_run(tracefiled, tctl, "ktracefiled"))) {
index 12c9ce9..d9dfb9f 100644 (file)
@@ -225,7 +225,7 @@ struct page_collection {
 struct tracefiled_ctl {
        struct completion       tctl_start;
        struct completion       tctl_stop;
 struct tracefiled_ctl {
        struct completion       tctl_start;
        struct completion       tctl_stop;
-       cfs_waitq_t             tctl_waitq;
+       wait_queue_head_t       tctl_waitq;
        pid_t                   tctl_pid;
        cfs_atomic_t            tctl_shutdown;
 };
        pid_t                   tctl_pid;
        cfs_atomic_t            tctl_shutdown;
 };
index 0604790..43014e2 100644 (file)
@@ -50,14 +50,14 @@ static struct upcall_cache_entry *alloc_entry(struct upcall_cache *cache,
         if (!entry)
                 return NULL;
 
         if (!entry)
                 return NULL;
 
-        UC_CACHE_SET_NEW(entry);
-        CFS_INIT_LIST_HEAD(&entry->ue_hash);
-        entry->ue_key = key;
-        cfs_atomic_set(&entry->ue_refcount, 0);
-        cfs_waitq_init(&entry->ue_waitq);
-        if (cache->uc_ops->init_entry)
-                cache->uc_ops->init_entry(entry, args);
-        return entry;
+       UC_CACHE_SET_NEW(entry);
+       CFS_INIT_LIST_HEAD(&entry->ue_hash);
+       entry->ue_key = key;
+       cfs_atomic_set(&entry->ue_refcount, 0);
+       init_waitqueue_head(&entry->ue_waitq);
+       if (cache->uc_ops->init_entry)
+               cache->uc_ops->init_entry(entry, args);
+       return entry;
 }
 
 /* protected by cache lock */
 }
 
 /* protected by cache lock */
@@ -126,11 +126,11 @@ static int check_unlink_entry(struct upcall_cache *cache,
                                     entry->ue_acquire_expire))
                         return 0;
 
                                     entry->ue_acquire_expire))
                         return 0;
 
-                UC_CACHE_SET_EXPIRED(entry);
-                cfs_waitq_broadcast(&entry->ue_waitq);
-        } else if (!UC_CACHE_IS_INVALID(entry)) {
-                UC_CACHE_SET_EXPIRED(entry);
-        }
+               UC_CACHE_SET_EXPIRED(entry);
+               wake_up_all(&entry->ue_waitq);
+       } else if (!UC_CACHE_IS_INVALID(entry)) {
+               UC_CACHE_SET_EXPIRED(entry);
+       }
 
         cfs_list_del_init(&entry->ue_hash);
         if (!cfs_atomic_read(&entry->ue_refcount))
 
         cfs_list_del_init(&entry->ue_hash);
         if (!cfs_atomic_read(&entry->ue_refcount))
@@ -148,11 +148,11 @@ static inline int refresh_entry(struct upcall_cache *cache,
 struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *cache,
                                                   __u64 key, void *args)
 {
 struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *cache,
                                                   __u64 key, void *args)
 {
-        struct upcall_cache_entry *entry = NULL, *new = NULL, *next;
-        cfs_list_t *head;
-        cfs_waitlink_t wait;
-        int rc, found;
-        ENTRY;
+       struct upcall_cache_entry *entry = NULL, *new = NULL, *next;
+       cfs_list_t *head;
+       wait_queue_t wait;
+       int rc, found;
+       ENTRY;
 
         LASSERT(cache);
 
 
         LASSERT(cache);
 
@@ -202,13 +202,13 @@ find_again:
                 entry->ue_acquire_expire =
                         cfs_time_shift(cache->uc_acquire_expire);
                 if (rc < 0) {
                 entry->ue_acquire_expire =
                         cfs_time_shift(cache->uc_acquire_expire);
                 if (rc < 0) {
-                        UC_CACHE_CLEAR_ACQUIRING(entry);
-                        UC_CACHE_SET_INVALID(entry);
-                        cfs_waitq_broadcast(&entry->ue_waitq);
-                        if (unlikely(rc == -EREMCHG)) {
-                                put_entry(cache, entry);
-                                GOTO(out, entry = ERR_PTR(rc));
-                        }
+                       UC_CACHE_CLEAR_ACQUIRING(entry);
+                       UC_CACHE_SET_INVALID(entry);
+                       wake_up_all(&entry->ue_waitq);
+                       if (unlikely(rc == -EREMCHG)) {
+                               put_entry(cache, entry);
+                               GOTO(out, entry = ERR_PTR(rc));
+                       }
                 }
         }
         /* someone (and only one) is doing upcall upon this item,
                 }
         }
         /* someone (and only one) is doing upcall upon this item,
@@ -216,27 +216,27 @@ find_again:
         if (UC_CACHE_IS_ACQUIRING(entry)) {
                 long expiry = (entry == new) ?
                               cfs_time_seconds(cache->uc_acquire_expire) :
         if (UC_CACHE_IS_ACQUIRING(entry)) {
                 long expiry = (entry == new) ?
                               cfs_time_seconds(cache->uc_acquire_expire) :
-                              CFS_MAX_SCHEDULE_TIMEOUT;
-                long left;
+                             MAX_SCHEDULE_TIMEOUT;
+               long left;
 
 
-                cfs_waitlink_init(&wait);
-                cfs_waitq_add(&entry->ue_waitq, &wait);
-                cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+               init_waitqueue_entry_current(&wait);
+               add_wait_queue(&entry->ue_waitq, &wait);
+               set_current_state(TASK_INTERRUPTIBLE);
                spin_unlock(&cache->uc_lock);
 
                spin_unlock(&cache->uc_lock);
 
-               left = cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE,
+               left = waitq_timedwait(&wait, TASK_INTERRUPTIBLE,
                                           expiry);
 
                spin_lock(&cache->uc_lock);
                                           expiry);
 
                spin_lock(&cache->uc_lock);
-                cfs_waitq_del(&entry->ue_waitq, &wait);
-                if (UC_CACHE_IS_ACQUIRING(entry)) {
-                        /* we're interrupted or upcall failed in the middle */
-                        rc = left > 0 ? -EINTR : -ETIMEDOUT;
-                        CERROR("acquire for key "LPU64": error %d\n",
-                               entry->ue_key, rc);
-                        put_entry(cache, entry);
-                        GOTO(out, entry = ERR_PTR(rc));
-                }
+               remove_wait_queue(&entry->ue_waitq, &wait);
+               if (UC_CACHE_IS_ACQUIRING(entry)) {
+                       /* we're interrupted or upcall failed in the middle */
+                       rc = left > 0 ? -EINTR : -ETIMEDOUT;
+                       CERROR("acquire for key "LPU64": error %d\n",
+                              entry->ue_key, rc);
+                       put_entry(cache, entry);
+                       GOTO(out, entry = ERR_PTR(rc));
+               }
         }
 
         /* invalid means error, don't need to try again */
         }
 
         /* invalid means error, don't need to try again */
@@ -353,7 +353,7 @@ out:
         }
         UC_CACHE_CLEAR_ACQUIRING(entry);
        spin_unlock(&cache->uc_lock);
         }
         UC_CACHE_CLEAR_ACQUIRING(entry);
        spin_unlock(&cache->uc_lock);
-       cfs_waitq_broadcast(&entry->ue_waitq);
+       wake_up_all(&entry->ue_waitq);
        put_entry(cache, entry);
 
        RETURN(rc);
        put_entry(cache, entry);
 
        RETURN(rc);
index 4337e12..fcba2cd 100644 (file)
@@ -172,7 +172,7 @@ void init_completion(struct completion *c)
 {
        LASSERT(c != NULL);
        c->done = 0;
 {
        LASSERT(c != NULL);
        c->done = 0;
-       cfs_waitq_init(&c->wait);
+       init_waitqueue_head(&c->wait);
 }
 
 void fini_completion(struct completion *c)
 }
 
 void fini_completion(struct completion *c)
@@ -183,7 +183,7 @@ void complete(struct completion *c)
 {
        LASSERT(c != NULL);
        c->done  = 1;
 {
        LASSERT(c != NULL);
        c->done  = 1;
-       cfs_waitq_signal(&c->wait);
+       wake_up(&c->wait);
 }
 
 void wait_for_completion(struct completion *c)
 }
 
 void wait_for_completion(struct completion *c)
index 7d7c059..34b0c5e 100644 (file)
  * Wait queue. No-op implementation.
  */
 
  * Wait queue. No-op implementation.
  */
 
-void cfs_waitq_init(struct cfs_waitq *waitq)
+void init_waitqueue_head(struct cfs_waitq *waitq)
 {
 {
-        LASSERT(waitq != NULL);
-        (void)waitq;
+       LASSERT(waitq != NULL);
+       (void)waitq;
 }
 
 }
 
-void cfs_waitlink_init(struct cfs_waitlink *link)
+void init_waitqueue_entry_current(struct cfs_waitlink *link)
 {
 {
-        LASSERT(link != NULL);
-        (void)link;
+       LASSERT(link != NULL);
+       (void)link;
 }
 
 }
 
-void cfs_waitq_add(struct cfs_waitq *waitq, struct cfs_waitlink *link)
+void add_wait_queue(struct cfs_waitq *waitq, struct cfs_waitlink *link)
 {
 {
-        LASSERT(waitq != NULL);
-        LASSERT(link != NULL);
-        (void)waitq;
-        (void)link;
+       LASSERT(waitq != NULL);
+       LASSERT(link != NULL);
+       (void)waitq;
+       (void)link;
 }
 
 }
 
-void cfs_waitq_add_exclusive(struct cfs_waitq *waitq, struct cfs_waitlink *link)
+void add_wait_queue_exclusive(struct cfs_waitq *waitq, struct cfs_waitlink *link)
 {
 {
-        LASSERT(waitq != NULL);
-        LASSERT(link != NULL);
-        (void)waitq;
-        (void)link;
+       LASSERT(waitq != NULL);
+       LASSERT(link != NULL);
+       (void)waitq;
+       (void)link;
 }
 
 }
 
-void cfs_waitq_add_exclusive_head(struct cfs_waitq *waitq, struct cfs_waitlink *link)
+void add_wait_queue_exclusive_head(struct cfs_waitq *waitq, struct cfs_waitlink *link)
 {
 {
-        cfs_waitq_add_exclusive(waitq, link);
+       add_wait_queue_exclusive(waitq, link);
 }
 
 }
 
-void cfs_waitq_del(struct cfs_waitq *waitq, struct cfs_waitlink *link)
+void remove_wait_queue(struct cfs_waitq *waitq, struct cfs_waitlink *link)
 {
 {
-        LASSERT(waitq != NULL);
-        LASSERT(link != NULL);
-        (void)waitq;
-        (void)link;
+       LASSERT(waitq != NULL);
+       LASSERT(link != NULL);
+       (void)waitq;
+       (void)link;
 }
 
 }
 
-int cfs_waitq_active(struct cfs_waitq *waitq)
+int waitqueue_active(struct cfs_waitq *waitq)
 {
 {
-        LASSERT(waitq != NULL);
-        (void)waitq;
-        return 0;
+       LASSERT(waitq != NULL);
+       (void)waitq;
+       return 0;
 }
 
 }
 
-void cfs_waitq_signal(struct cfs_waitq *waitq)
+void wake_up(struct cfs_waitq *waitq)
 {
 {
-        LASSERT(waitq != NULL);
-        (void)waitq;
+       LASSERT(waitq != NULL);
+       (void)waitq;
 }
 
 }
 
-void cfs_waitq_signal_nr(struct cfs_waitq *waitq, int nr)
+void wake_up_nr(struct cfs_waitq *waitq, int nr)
 {
 {
-        LASSERT(waitq != NULL);
-        (void)waitq;
+       LASSERT(waitq != NULL);
+       (void)waitq;
 }
 
 }
 
-void cfs_waitq_broadcast(struct cfs_waitq *waitq)
+void wake_up_all(struct cfs_waitq *waitq)
 {
 {
-        LASSERT(waitq != NULL);
-        (void)waitq;
+       LASSERT(waitq != NULL);
+       (void)waitq;
 }
 
 }
 
-void cfs_waitq_wait(struct cfs_waitlink *link, cfs_task_state_t state)
+void waitq_wait(struct cfs_waitlink *link, long state)
 {
 {
-        LASSERT(link != NULL);
-        (void)link;
+       LASSERT(link != NULL);
+       (void)link;
 
 
-        /* well, wait for something to happen */
+       /* well, wait for something to happen */
        call_wait_handler(0);
 }
 
        call_wait_handler(0);
 }
 
-int64_t cfs_waitq_timedwait(struct cfs_waitlink *link, cfs_task_state_t state,
-                            int64_t timeout)
+int64_t waitq_timedwait(struct cfs_waitlink *link, long state,
+                       int64_t timeout)
 {
 {
-        LASSERT(link != NULL);
-        (void)link;
+       LASSERT(link != NULL);
+       (void)link;
        call_wait_handler(timeout);
        call_wait_handler(timeout);
-        return 0;
+       return 0;
 }
 
 }
 
-void cfs_schedule_timeout_and_set_state(cfs_task_state_t state, int64_t timeout)
+void schedule_timeout_and_set_state(long state, int64_t timeout)
 {
 {
-        cfs_waitlink_t    l;
-        /* sleep(timeout) here instead? */
-        cfs_waitq_timedwait(&l, state, timeout);
+       wait_queue_t    l;
+       /* sleep(timeout) here instead? */
+       waitq_timedwait(&l, state, timeout);
 }
 
 void
 cfs_pause(cfs_duration_t d)
 {
 }
 
 void
 cfs_pause(cfs_duration_t d)
 {
-        struct timespec s;
+       struct timespec s;
 
 
-        cfs_duration_nsec(d, &s);
-        nanosleep(&s, NULL);
+       cfs_duration_nsec(d, &s);
+       nanosleep(&s, NULL);
 }
 
 }
 
-int cfs_need_resched(void)
+int need_resched(void)
 {
 {
-        return 0;
+       return 0;
 }
 
 }
 
-void cfs_cond_resched(void)
+void cond_resched(void)
 {
 }
 
 {
 }
 
index 0e32954..bbad0b1 100644 (file)
@@ -70,7 +70,7 @@ struct lc_watchdog {
  */
 static struct completion lcw_start_completion;
 static struct completion  lcw_stop_completion;
  */
 static struct completion lcw_start_completion;
 static struct completion  lcw_stop_completion;
-static cfs_waitq_t lcw_event_waitq;
+static wait_queue_head_t lcw_event_waitq;
 
 /*
  * Set this and wake lcw_event_waitq to stop the dispatcher.
 
 /*
  * Set this and wake lcw_event_waitq to stop the dispatcher.
@@ -135,7 +135,7 @@ static void lcw_cb(ulong_ptr_t data)
        spin_lock_bh(&lcw_pending_timers_lock);
        lcw->lcw_refcount++; /* +1 for pending list */
        cfs_list_add(&lcw->lcw_list, &lcw_pending_timers);
        spin_lock_bh(&lcw_pending_timers_lock);
        lcw->lcw_refcount++; /* +1 for pending list */
        cfs_list_add(&lcw->lcw_list, &lcw_pending_timers);
-       cfs_waitq_signal(&lcw_event_waitq);
+       wake_up(&lcw_event_waitq);
 
        spin_unlock_bh(&lcw_pending_timers_lock);
        spin_unlock_bh(&lcw->lcw_lock);
 
        spin_unlock_bh(&lcw_pending_timers_lock);
        spin_unlock_bh(&lcw->lcw_lock);
@@ -303,7 +303,7 @@ static void lcw_dispatch_start(void)
 
        init_completion(&lcw_stop_completion);
        init_completion(&lcw_start_completion);
 
        init_completion(&lcw_stop_completion);
        init_completion(&lcw_start_completion);
-        cfs_waitq_init(&lcw_event_waitq);
+       init_waitqueue_head(&lcw_event_waitq);
 
        CDEBUG(D_INFO, "starting dispatch thread\n");
        task = kthread_run(lcw_dispatch_main, NULL, "lc_watchdogd");
 
        CDEBUG(D_INFO, "starting dispatch thread\n");
        task = kthread_run(lcw_dispatch_main, NULL, "lc_watchdogd");
@@ -327,7 +327,7 @@ static void lcw_dispatch_stop(void)
        CDEBUG(D_INFO, "trying to stop watchdog dispatcher.\n");
 
        set_bit(LCW_FLAG_STOP, &lcw_flags);
        CDEBUG(D_INFO, "trying to stop watchdog dispatcher.\n");
 
        set_bit(LCW_FLAG_STOP, &lcw_flags);
-       cfs_waitq_signal(&lcw_event_waitq);
+       wake_up(&lcw_event_waitq);
 
        wait_for_completion(&lcw_stop_completion);
 
 
        wait_for_completion(&lcw_stop_completion);
 
index 9f6a7cf..8ea15b3 100644 (file)
@@ -405,11 +405,11 @@ errorout:
 void
 cfs_pause(cfs_duration_t ticks)
 {
 void
 cfs_pause(cfs_duration_t ticks)
 {
-    cfs_schedule_timeout_and_set_state(CFS_TASK_UNINTERRUPTIBLE, ticks);
+    schedule_timeout_and_set_state(CFS_TASK_UNINTERRUPTIBLE, ticks);
 }
 
 void
 }
 
 void
-cfs_schedule_timeout_and_set_state(cfs_task_state_t state, int64_t time)
+schedule_timeout_and_set_state(long state, int64_t time)
 {
     cfs_task_t * task = cfs_current();
     PTASK_SLOT   slot = NULL;
 {
     cfs_task_t * task = cfs_current();
     PTASK_SLOT   slot = NULL;
@@ -422,7 +422,7 @@ cfs_schedule_timeout_and_set_state(cfs_task_state_t state, int64_t time)
     slot = CONTAINING_RECORD(task, TASK_SLOT, task);
     cfs_assert(slot->Magic == TASKSLT_MAGIC);
 
     slot = CONTAINING_RECORD(task, TASK_SLOT, task);
     cfs_assert(slot->Magic == TASKSLT_MAGIC);
 
-    if (time == CFS_MAX_SCHEDULE_TIMEOUT) {
+    if (time == MAX_SCHEDULE_TIMEOUT) {
         time = 0;
     }
 
         time = 0;
     }
 
@@ -430,9 +430,9 @@ cfs_schedule_timeout_and_set_state(cfs_task_state_t state, int64_t time)
 }
 
 void
 }
 
 void
-cfs_schedule()
+schedule()
 {
 {
-    cfs_schedule_timeout_and_set_state(CFS_TASK_UNINTERRUPTIBLE, 0);
+    schedule_timeout_and_set_state(CFS_TASK_UNINTERRUPTIBLE, 0);
 }
 
 int
 }
 
 int
@@ -456,14 +456,14 @@ wake_up_process(
 }
 
 void
 }
 
 void
-sleep_on(cfs_waitq_t *waitq)
+sleep_on(wait_queue_head_t *waitq)
 {
 {
-       cfs_waitlink_t link;
+       wait_queue_t link;
        
        
-       cfs_waitlink_init(&link);
-       cfs_waitq_add(waitq, &link);
-       cfs_waitq_wait(&link, CFS_TASK_INTERRUPTIBLE);
-       cfs_waitq_del(waitq, &link);
+       init_waitqueue_entry_current(&link);
+       add_wait_queue(waitq, &link);
+       waitq_wait(&link, TASK_INTERRUPTIBLE);
+       remove_wait_queue(waitq, &link);
 }
 
 EXPORT_SYMBOL(current_uid);
 }
 
 EXPORT_SYMBOL(current_uid);
index d62bd01..351e171 100644 (file)
@@ -732,12 +732,12 @@ errorout:
     return NT_SUCCESS(status);
 }
 
     return NT_SUCCESS(status);
 }
 
-int cfs_need_resched(void)
+int need_resched(void)
 {
         return 0;
 }
 
 {
         return 0;
 }
 
-void cfs_cond_resched(void)
+void cond_resched(void)
 {
 }
 
 {
 }
 
index a6353bc..18817ee 100644 (file)
  */
 
 /*
  */
 
 /*
- * cfs_waitq_init
+ * init_waitqueue_head
  *   To initialize the wait queue
  *
  * Arguments:
  *   To initialize the wait queue
  *
  * Arguments:
- *   waitq:  pointer to the cfs_waitq_t structure
+ *   waitq:  pointer to the wait_queue_head_t structure
  *
  * Return Value:
  *   N/A
  *
  * Return Value:
  *   N/A
@@ -57,7 +57,7 @@
  *   N/A
  */
 
  *   N/A
  */
 
-void cfs_waitq_init(cfs_waitq_t *waitq)
+void init_waitqueue_head(wait_queue_head_t *waitq)
 {
     waitq->magic = CFS_WAITQ_MAGIC;
     waitq->flags = 0;
 {
     waitq->magic = CFS_WAITQ_MAGIC;
     waitq->flags = 0;
@@ -66,11 +66,11 @@ void cfs_waitq_init(cfs_waitq_t *waitq)
 }
 
 /*
 }
 
 /*
- * cfs_waitlink_init
+ * init_waitqueue_entry_current
  *   To initialize the wake link node
  *
  * Arguments:
  *   To initialize the wake link node
  *
  * Arguments:
- *   link:  pointer to the cfs_waitlink_t structure
+ *   link:  pointer to the wait_queue_t structure
  *
  * Return Value:
  *   N/A
  *
  * Return Value:
  *   N/A
@@ -79,7 +79,7 @@ void cfs_waitq_init(cfs_waitq_t *waitq)
  *   N/A
  */
 
  *   N/A
  */
 
-void cfs_waitlink_init(cfs_waitlink_t *link)
+void init_waitqueue_entry_current(wait_queue_t *link)
 {
     cfs_task_t * task = cfs_current();
     PTASK_SLOT   slot = NULL;
 {
     cfs_task_t * task = cfs_current();
     PTASK_SLOT   slot = NULL;
@@ -93,7 +93,7 @@ void cfs_waitlink_init(cfs_waitlink_t *link)
     slot = CONTAINING_RECORD(task, TASK_SLOT, task);
     cfs_assert(slot->Magic == TASKSLT_MAGIC);
 
     slot = CONTAINING_RECORD(task, TASK_SLOT, task);
     cfs_assert(slot->Magic == TASKSLT_MAGIC);
 
-    memset(link, 0, sizeof(cfs_waitlink_t));
+    memset(link, 0, sizeof(wait_queue_t));
 
     link->magic = CFS_WAITLINK_MAGIC;
     link->flags = 0;
 
     link->magic = CFS_WAITLINK_MAGIC;
     link->flags = 0;
@@ -115,7 +115,7 @@ void cfs_waitlink_init(cfs_waitlink_t *link)
  *   To finilize the wake link node
  *
  * Arguments:
  *   To finilize the wake link node
  *
  * Arguments:
- *   link:  pointer to the cfs_waitlink_t structure
+ *   link:  pointer to the wait_queue_t structure
  *
  * Return Value:
  *   N/A
  *
  * Return Value:
  *   N/A
@@ -124,7 +124,7 @@ void cfs_waitlink_init(cfs_waitlink_t *link)
  *   N/A
  */
 
  *   N/A
  */
 
-void cfs_waitlink_fini(cfs_waitlink_t *link)
+void cfs_waitlink_fini(wait_queue_t *link)
 {
     cfs_task_t * task = cfs_current();
     PTASK_SLOT   slot = NULL;
 {
     cfs_task_t * task = cfs_current();
     PTASK_SLOT   slot = NULL;
@@ -150,8 +150,8 @@ void cfs_waitlink_fini(cfs_waitlink_t *link)
  *   To queue the wait link node to the wait queue
  *
  * Arguments:
  *   To queue the wait link node to the wait queue
  *
  * Arguments:
- *   waitq:  pointer to the cfs_waitq_t structure
- *   link:   pointer to the cfs_waitlink_t structure
+ *   waitq:  pointer to the wait_queue_head_t structure
+ *   link:   pointer to the wait_queue_t structure
  *   int:    queue no (Normal or Forward waitq)
  *
  * Return Value:
  *   int:    queue no (Normal or Forward waitq)
  *
  * Return Value:
@@ -161,8 +161,8 @@ void cfs_waitlink_fini(cfs_waitlink_t *link)
  *   N/A
  */
 
  *   N/A
  */
 
-void cfs_waitq_add_internal(cfs_waitq_t *waitq,
-                            cfs_waitlink_t *link,
+void cfs_waitq_add_internal(wait_queue_head_t *waitq,
+                           wait_queue_t *link,
                             __u32 waitqid )
 { 
     LASSERT(waitq != NULL);
                             __u32 waitqid )
 { 
     LASSERT(waitq != NULL);
@@ -182,12 +182,12 @@ void cfs_waitq_add_internal(cfs_waitq_t *waitq,
        spin_unlock(&(waitq->guard));
 }
 /*
        spin_unlock(&(waitq->guard));
 }
 /*
- * cfs_waitq_add
+ * add_wait_queue
  *   To queue the wait link node to the wait queue
  *
  * Arguments:
  *   To queue the wait link node to the wait queue
  *
  * Arguments:
- *   waitq:  pointer to the cfs_waitq_t structure
- *   link:  pointer to the cfs_waitlink_t structure
+ *   waitq:  pointer to the wait_queue_head_t structure
+ *   link:  pointer to the wait_queue_t structure
  *
  * Return Value:
  *   N/A
  *
  * Return Value:
  *   N/A
@@ -196,19 +196,19 @@ void cfs_waitq_add_internal(cfs_waitq_t *waitq,
  *   N/A
  */
 
  *   N/A
  */
 
-void cfs_waitq_add(cfs_waitq_t *waitq,
-                   cfs_waitlink_t *link)
+void add_wait_queue(wait_queue_head_t *waitq,
+                  wait_queue_t *link)
 { 
     cfs_waitq_add_internal(waitq, link, CFS_WAITQ_CHAN_NORMAL);
 }
 
 /*
 { 
     cfs_waitq_add_internal(waitq, link, CFS_WAITQ_CHAN_NORMAL);
 }
 
 /*
- * cfs_waitq_add_exclusive
+ * add_wait_queue_exclusive
  *   To set the wait link node to exclusive mode
  *   and queue it to the wait queue
  *
  * Arguments:
  *   To set the wait link node to exclusive mode
  *   and queue it to the wait queue
  *
  * Arguments:
- *   waitq:  pointer to the cfs_waitq_t structure
+ *   waitq:  pointer to the wait_queue_head_t structure
  *   link:  pointer to the cfs_wait_link structure
  *
  * Return Value:
  *   link:  pointer to the cfs_wait_link structure
  *
  * Return Value:
@@ -218,8 +218,8 @@ void cfs_waitq_add(cfs_waitq_t *waitq,
  *   N/A
  */
 
  *   N/A
  */
 
-void cfs_waitq_add_exclusive( cfs_waitq_t *waitq,
-                              cfs_waitlink_t *link)
+void add_wait_queue_exclusive( wait_queue_head_t *waitq,
+                             wait_queue_t *link)
 {
     LASSERT(waitq != NULL);
     LASSERT(link != NULL);
 {
     LASSERT(waitq != NULL);
     LASSERT(link != NULL);
@@ -227,16 +227,16 @@ void cfs_waitq_add_exclusive( cfs_waitq_t *waitq,
     LASSERT(link->magic == CFS_WAITLINK_MAGIC);
 
        link->flags |= CFS_WAITQ_EXCLUSIVE;
     LASSERT(link->magic == CFS_WAITLINK_MAGIC);
 
        link->flags |= CFS_WAITQ_EXCLUSIVE;
-    cfs_waitq_add(waitq, link);
+    add_wait_queue(waitq, link);
 }
 
 /*
 }
 
 /*
- * cfs_waitq_del
+ * remove_wait_queue
  *   To remove the wait link node from the waitq
  *
  * Arguments:
  *   waitq:  pointer to the cfs_ waitq_t structure
  *   To remove the wait link node from the waitq
  *
  * Arguments:
  *   waitq:  pointer to the cfs_ waitq_t structure
- *   link:  pointer to the cfs_waitlink_t structure
+ *   link:  pointer to the wait_queue_t structure
  *
  * Return Value:
  *   N/A
  *
  * Return Value:
  *   N/A
@@ -245,8 +245,8 @@ void cfs_waitq_add_exclusive( cfs_waitq_t *waitq,
  *   N/A
  */
 
  *   N/A
  */
 
-void cfs_waitq_del( cfs_waitq_t *waitq,
-                    cfs_waitlink_t *link)
+void remove_wait_queue( wait_queue_head_t *waitq,
+                   wait_queue_t *link)
 {
     int i = 0;
 
 {
     int i = 0;
 
@@ -274,7 +274,7 @@ void cfs_waitq_del( cfs_waitq_t *waitq,
 }
 
 /*
 }
 
 /*
- * cfs_waitq_active
+ * waitqueue_active
  *   Is the waitq active (not empty) ?
  *
  * Arguments:
  *   Is the waitq active (not empty) ?
  *
  * Arguments:
@@ -288,7 +288,7 @@ void cfs_waitq_del( cfs_waitq_t *waitq,
  *   We always returns TRUE here, the same to Darwin.
  */
 
  *   We always returns TRUE here, the same to Darwin.
  */
 
-int cfs_waitq_active(cfs_waitq_t *waitq)
+int waitqueue_active(wait_queue_head_t *waitq)
 {
     LASSERT(waitq != NULL);
     LASSERT(waitq->magic == CFS_WAITQ_MAGIC);
 {
     LASSERT(waitq != NULL);
     LASSERT(waitq->magic == CFS_WAITQ_MAGIC);
@@ -297,12 +297,12 @@ int cfs_waitq_active(cfs_waitq_t *waitq)
 }
 
 /*
 }
 
 /*
- * cfs_waitq_signal_nr
+ * wake_up_nr
  *   To wake up all the non-exclusive tasks plus nr exclusive
  *   ones in the waitq
  *
  * Arguments:
  *   To wake up all the non-exclusive tasks plus nr exclusive
  *   ones in the waitq
  *
  * Arguments:
- *   waitq:  pointer to the cfs_waitq_t structure
+ *   waitq:  pointer to the wait_queue_head_t structure
  *   nr:    number of exclusive tasks to be woken up
  *
  * Return Value:
  *   nr:    number of exclusive tasks to be woken up
  *
  * Return Value:
@@ -313,7 +313,7 @@ int cfs_waitq_active(cfs_waitq_t *waitq)
  */
 
 
  */
 
 
-void cfs_waitq_signal_nr(cfs_waitq_t *waitq, int nr)
+void wake_up_nr(wait_queue_head_t *waitq, int nr)
 {
     int     result;
     cfs_waitlink_channel_t * scan;
 {
     int     result;
     cfs_waitlink_channel_t * scan;
@@ -326,7 +326,7 @@ void cfs_waitq_signal_nr(cfs_waitq_t *waitq, int nr)
                             cfs_waitlink_channel_t,
                             link) {
 
                             cfs_waitlink_channel_t,
                             link) {
 
-        cfs_waitlink_t *waitl = scan->waitl;
+       wait_queue_t *waitl = scan->waitl;
 
         result = cfs_wake_event(waitl->event);
         LASSERT( result == FALSE || result == TRUE );
 
         result = cfs_wake_event(waitl->event);
         LASSERT( result == FALSE || result == TRUE );
@@ -344,11 +344,11 @@ void cfs_waitq_signal_nr(cfs_waitq_t *waitq, int nr)
 }
 
 /*
 }
 
 /*
- * cfs_waitq_signal
+ * wake_up
  *   To wake up all the non-exclusive tasks and 1 exclusive
  *
  * Arguments:
  *   To wake up all the non-exclusive tasks and 1 exclusive
  *
  * Arguments:
- *   waitq:  pointer to the cfs_waitq_t structure
+ *   waitq:  pointer to the wait_queue_head_t structure
  *
  * Return Value:
  *   N/A
  *
  * Return Value:
  *   N/A
@@ -357,18 +357,18 @@ void cfs_waitq_signal_nr(cfs_waitq_t *waitq, int nr)
  *   N/A
  */
 
  *   N/A
  */
 
-void cfs_waitq_signal(cfs_waitq_t *waitq)
+void wake_up(wait_queue_head_t *waitq)
 {
 {
-    cfs_waitq_signal_nr(waitq, 1);
+    wake_up_nr(waitq, 1);
 }
 
 
 /*
 }
 
 
 /*
- * cfs_waitq_broadcast
+ * wake_up_all
  *   To wake up all the tasks in the waitq
  *
  * Arguments:
  *   To wake up all the tasks in the waitq
  *
  * Arguments:
- *   waitq:  pointer to the cfs_waitq_t structure
+ *   waitq:  pointer to the wait_queue_head_t structure
  *
  * Return Value:
  *   N/A
  *
  * Return Value:
  *   N/A
@@ -377,20 +377,20 @@ void cfs_waitq_signal(cfs_waitq_t *waitq)
  *   N/A
  */
 
  *   N/A
  */
 
-void cfs_waitq_broadcast(cfs_waitq_t *waitq)
+void wake_up_all(wait_queue_head_t *waitq)
 {
     LASSERT(waitq != NULL);
     LASSERT(waitq->magic ==CFS_WAITQ_MAGIC);
 
 {
     LASSERT(waitq != NULL);
     LASSERT(waitq->magic ==CFS_WAITQ_MAGIC);
 
-       cfs_waitq_signal_nr(waitq, 0);
+       wake_up_nr(waitq, 0);
 }
 
 /*
 }
 
 /*
- * cfs_waitq_wait
+ * waitq_wait
  *   To wait on the link node until it is signaled.
  *
  * Arguments:
  *   To wait on the link node until it is signaled.
  *
  * Arguments:
- *   link:  pointer to the cfs_waitlink_t structure
+ *   link:  pointer to the wait_queue_t structure
  *
  * Return Value:
  *   N/A
  *
  * Return Value:
  *   N/A
@@ -399,7 +399,7 @@ void cfs_waitq_broadcast(cfs_waitq_t *waitq)
  *   N/A
  */
 
  *   N/A
  */
 
-void cfs_waitq_wait(cfs_waitlink_t *link, cfs_task_state_t state)
+void waitq_wait(wait_queue_t *link, long state)
 { 
     LASSERT(link != NULL);
     LASSERT(link->magic == CFS_WAITLINK_MAGIC);
 { 
     LASSERT(link != NULL);
     LASSERT(link->magic == CFS_WAITLINK_MAGIC);
@@ -413,11 +413,11 @@ void cfs_waitq_wait(cfs_waitlink_t *link, cfs_task_state_t state)
 }
 
 /*
 }
 
 /*
- * cfs_waitq_timedwait
+ * waitq_timedwait
  *   To wait the link node to be signaled with a timeout limit
  *
  * Arguments:
  *   To wait the link node to be signaled with a timeout limit
  *
  * Arguments:
- *   link:   pointer to the cfs_waitlink_t structure
+ *   link:   pointer to the wait_queue_t structure
  *   timeout: the timeout limitation
  *
  * Return Value:
  *   timeout: the timeout limitation
  *
  * Return Value:
@@ -429,8 +429,8 @@ void cfs_waitq_wait(cfs_waitlink_t *link, cfs_task_state_t state)
  *   What if it happens to be woken up at the just timeout time !?
  */
 
  *   What if it happens to be woken up at the just timeout time !?
  */
 
-int64_t cfs_waitq_timedwait( cfs_waitlink_t *link,
-                             cfs_task_state_t state,
+int64_t waitq_timedwait( wait_queue_t *link,
+                            long state,
                              int64_t timeout)
 { 
 
                              int64_t timeout)
 { 
 
index b56d266..776a656 100644 (file)
@@ -51,7 +51,7 @@ typedef struct cfs_wi_sched {
        /** serialised workitems */
        spinlock_t              ws_lock;
        /** where schedulers sleep */
        /** serialised workitems */
        spinlock_t              ws_lock;
        /** where schedulers sleep */
-       cfs_waitq_t             ws_waitq;
+       wait_queue_head_t               ws_waitq;
 #endif
        /** concurrent workitems */
        cfs_list_t              ws_runq;
 #endif
        /** concurrent workitems */
        cfs_list_t              ws_runq;
@@ -216,26 +216,26 @@ cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
        LASSERT(!cfs_in_interrupt()); /* because we use plain spinlock */
        LASSERT(!sched->ws_stopping);
 
        LASSERT(!cfs_in_interrupt()); /* because we use plain spinlock */
        LASSERT(!sched->ws_stopping);
 
-        cfs_wi_sched_lock(sched);
+       cfs_wi_sched_lock(sched);
 
 
-        if (!wi->wi_scheduled) {
-                LASSERT (cfs_list_empty(&wi->wi_list));
+       if (!wi->wi_scheduled) {
+               LASSERT (cfs_list_empty(&wi->wi_list));
 
 
-                wi->wi_scheduled = 1;
+               wi->wi_scheduled = 1;
                sched->ws_nscheduled++;
                sched->ws_nscheduled++;
-                if (!wi->wi_running) {
-                        cfs_list_add_tail(&wi->wi_list, &sched->ws_runq);
+               if (!wi->wi_running) {
+                       cfs_list_add_tail(&wi->wi_list, &sched->ws_runq);
 #ifdef __KERNEL__
 #ifdef __KERNEL__
-                        cfs_waitq_signal(&sched->ws_waitq);
+                       wake_up(&sched->ws_waitq);
 #endif
 #endif
-                } else {
-                        cfs_list_add(&wi->wi_list, &sched->ws_rerunq);
-                }
-        }
+               } else {
+                       cfs_list_add(&wi->wi_list, &sched->ws_rerunq);
+               }
+       }
 
 
-        LASSERT (!cfs_list_empty(&wi->wi_list));
-        cfs_wi_sched_unlock(sched);
-        return;
+       LASSERT (!cfs_list_empty(&wi->wi_list));
+       cfs_wi_sched_unlock(sched);
+       return;
 }
 EXPORT_SYMBOL(cfs_wi_schedule);
 
 }
 EXPORT_SYMBOL(cfs_wi_schedule);
 
@@ -303,14 +303,14 @@ cfs_wi_scheduler (void *arg)
                         cfs_list_move_tail(&wi->wi_list, &sched->ws_runq);
                 }
 
                         cfs_list_move_tail(&wi->wi_list, &sched->ws_runq);
                 }
 
-                if (!cfs_list_empty(&sched->ws_runq)) {
-                        cfs_wi_sched_unlock(sched);
-                        /* don't sleep because some workitems still
-                         * expect me to come back soon */
-                        cfs_cond_resched();
-                        cfs_wi_sched_lock(sched);
-                        continue;
-                }
+               if (!cfs_list_empty(&sched->ws_runq)) {
+                       cfs_wi_sched_unlock(sched);
+                       /* don't sleep because some workitems still
+                        * expect me to come back soon */
+                       cond_resched();
+                       cfs_wi_sched_lock(sched);
+                       continue;
+               }
 
                cfs_wi_sched_unlock(sched);
                rc = wait_event_interruptible_exclusive(sched->ws_waitq,
 
                cfs_wi_sched_unlock(sched);
                rc = wait_event_interruptible_exclusive(sched->ws_waitq,
@@ -396,7 +396,7 @@ cfs_wi_sched_destroy(struct cfs_wi_sched *sched)
        spin_unlock(&cfs_wi_data.wi_glock);
 
 #ifdef __KERNEL__
        spin_unlock(&cfs_wi_data.wi_glock);
 
 #ifdef __KERNEL__
-       cfs_waitq_broadcast(&sched->ws_waitq);
+       wake_up_all(&sched->ws_waitq);
 
        spin_lock(&cfs_wi_data.wi_glock);
        {
 
        spin_lock(&cfs_wi_data.wi_glock);
        {
@@ -445,7 +445,7 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
 
 #ifdef __KERNEL__
        spin_lock_init(&sched->ws_lock);
 
 #ifdef __KERNEL__
        spin_lock_init(&sched->ws_lock);
-       cfs_waitq_init(&sched->ws_waitq);
+       init_waitqueue_head(&sched->ws_waitq);
 #endif
        CFS_INIT_LIST_HEAD(&sched->ws_runq);
        CFS_INIT_LIST_HEAD(&sched->ws_rerunq);
 #endif
        CFS_INIT_LIST_HEAD(&sched->ws_runq);
        CFS_INIT_LIST_HEAD(&sched->ws_rerunq);
@@ -459,7 +459,7 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
                spin_lock(&cfs_wi_data.wi_glock);
                while (sched->ws_starting > 0) {
                        spin_unlock(&cfs_wi_data.wi_glock);
                spin_lock(&cfs_wi_data.wi_glock);
                while (sched->ws_starting > 0) {
                        spin_unlock(&cfs_wi_data.wi_glock);
-                       cfs_schedule();
+                       schedule();
                        spin_lock(&cfs_wi_data.wi_glock);
                }
 
                        spin_lock(&cfs_wi_data.wi_glock);
                }
 
@@ -529,7 +529,7 @@ cfs_wi_shutdown (void)
        /* nobody should contend on this list */
        cfs_list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) {
                sched->ws_stopping = 1;
        /* nobody should contend on this list */
        cfs_list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) {
                sched->ws_stopping = 1;
-               cfs_waitq_broadcast(&sched->ws_waitq);
+               wake_up_all(&sched->ws_waitq);
        }
 
        cfs_list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) {
        }
 
        cfs_list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) {
index 8aaa239..079ea62 100644 (file)
@@ -725,7 +725,7 @@ typedef struct
        /* Event Queue container */
        struct lnet_res_container       ln_eq_container;
 #ifdef __KERNEL__
        /* Event Queue container */
        struct lnet_res_container       ln_eq_container;
 #ifdef __KERNEL__
-       cfs_waitq_t                     ln_eq_waitq;
+       wait_queue_head_t                       ln_eq_waitq;
        spinlock_t                      ln_eq_wait_lock;
 #else
 # ifndef HAVE_LIBPTHREAD
        spinlock_t                      ln_eq_wait_lock;
 #else
 # ifndef HAVE_LIBPTHREAD
index db839ca..f80347c 100644 (file)
@@ -2529,9 +2529,9 @@ mxlnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
 void
 mxlnd_sleep(unsigned long timeout)
 {
 void
 mxlnd_sleep(unsigned long timeout)
 {
-        cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
-        cfs_schedule_timeout(timeout);
-        return;
+       set_current_state(TASK_INTERRUPTIBLE);
+       schedule_timeout(timeout);
+       return;
 }
 
 /**
 }
 
 /**
index 461eaaf..2da50e2 100644 (file)
@@ -790,19 +790,19 @@ kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
                goto failed_2;
        }
 
                goto failed_2;
        }
 
-        if (dev->ibd_hdev->ibh_ibdev != cmid->device) {
-                /* wakeup failover thread and teardown connection */
-                if (kiblnd_dev_can_failover(dev)) {
-                        cfs_list_add_tail(&dev->ibd_fail_list,
-                                      &kiblnd_data.kib_failed_devs);
-                        cfs_waitq_signal(&kiblnd_data.kib_failover_waitq);
-                }
+       if (dev->ibd_hdev->ibh_ibdev != cmid->device) {
+               /* wakeup failover thread and teardown connection */
+               if (kiblnd_dev_can_failover(dev)) {
+                       cfs_list_add_tail(&dev->ibd_fail_list,
+                                     &kiblnd_data.kib_failed_devs);
+                       wake_up(&kiblnd_data.kib_failover_waitq);
+               }
 
                write_unlock_irqrestore(glock, flags);
 
                write_unlock_irqrestore(glock, flags);
-                CERROR("cmid HCA(%s), kib_dev(%s) need failover\n",
-                       cmid->device->name, dev->ibd_ifname);
-                goto failed_2;
-        }
+               CERROR("cmid HCA(%s), kib_dev(%s) need failover\n",
+                      cmid->device->name, dev->ibd_ifname);
+               goto failed_2;
+       }
 
         kiblnd_hdev_addref_locked(dev->ibd_hdev);
         conn->ibc_hdev = dev->ibd_hdev;
 
         kiblnd_hdev_addref_locked(dev->ibd_hdev);
         conn->ibc_hdev = dev->ibd_hdev;
@@ -1325,7 +1325,7 @@ kiblnd_current_hdev(kib_dev_t *dev)
                if (i++ % 50 == 0)
                        CDEBUG(D_NET, "%s: Wait for failover\n",
                               dev->ibd_ifname);
                if (i++ % 50 == 0)
                        CDEBUG(D_NET, "%s: Wait for failover\n",
                               dev->ibd_ifname);
-               cfs_schedule_timeout(cfs_time_seconds(1) / 100);
+               schedule_timeout(cfs_time_seconds(1) / 100);
 
                read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
        }
 
                read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
        }
@@ -1672,7 +1672,7 @@ kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
                spin_unlock(&fps->fps_lock);
                CDEBUG(D_NET, "Another thread is allocating new "
                       "FMR pool, waiting for her to complete\n");
                spin_unlock(&fps->fps_lock);
                CDEBUG(D_NET, "Another thread is allocating new "
                       "FMR pool, waiting for her to complete\n");
-               cfs_schedule();
+               schedule();
                goto again;
 
        }
                goto again;
 
        }
@@ -1875,7 +1875,7 @@ kiblnd_pool_alloc_node(kib_poolset_t *ps)
                 CDEBUG(D_NET, "Another thread is allocating new "
                        "%s pool, waiting for her to complete\n",
                        ps->ps_name);
                 CDEBUG(D_NET, "Another thread is allocating new "
                        "%s pool, waiting for her to complete\n",
                        ps->ps_name);
-                cfs_schedule();
+               schedule();
                 goto again;
         }
 
                 goto again;
         }
 
@@ -2831,20 +2831,20 @@ kiblnd_base_shutdown(void)
                 LASSERT (cfs_list_empty(&kiblnd_data.kib_connd_zombies));
                 LASSERT (cfs_list_empty(&kiblnd_data.kib_connd_conns));
 
                 LASSERT (cfs_list_empty(&kiblnd_data.kib_connd_zombies));
                 LASSERT (cfs_list_empty(&kiblnd_data.kib_connd_conns));
 
-                /* flag threads to terminate; wake and wait for them to die */
-                kiblnd_data.kib_shutdown = 1;
+               /* flag threads to terminate; wake and wait for them to die */
+               kiblnd_data.kib_shutdown = 1;
 
                /* NB: we really want to stop scheduler threads net by net
                 * instead of the whole module, this should be improved
                 * with dynamic configuration LNet */
                cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds)
 
                /* NB: we really want to stop scheduler threads net by net
                 * instead of the whole module, this should be improved
                 * with dynamic configuration LNet */
                cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds)
-                       cfs_waitq_broadcast(&sched->ibs_waitq);
+                       wake_up_all(&sched->ibs_waitq);
 
 
-                cfs_waitq_broadcast(&kiblnd_data.kib_connd_waitq);
-                cfs_waitq_broadcast(&kiblnd_data.kib_failover_waitq);
+               wake_up_all(&kiblnd_data.kib_connd_waitq);
+               wake_up_all(&kiblnd_data.kib_failover_waitq);
 
 
-                i = 2;
-                while (cfs_atomic_read(&kiblnd_data.kib_nthreads) != 0) {
+               i = 2;
+               while (cfs_atomic_read(&kiblnd_data.kib_nthreads) != 0) {
                         i++;
                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
                                "Waiting for %d threads to terminate\n",
                         i++;
                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
                                "Waiting for %d threads to terminate\n",
@@ -2975,10 +2975,10 @@ kiblnd_base_startup(void)
                 CFS_INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]);
 
        spin_lock_init(&kiblnd_data.kib_connd_lock);
                 CFS_INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]);
 
        spin_lock_init(&kiblnd_data.kib_connd_lock);
-        CFS_INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
-        CFS_INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies);
-        cfs_waitq_init(&kiblnd_data.kib_connd_waitq);
-       cfs_waitq_init(&kiblnd_data.kib_failover_waitq);
+       CFS_INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
+       CFS_INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies);
+       init_waitqueue_head(&kiblnd_data.kib_connd_waitq);
+       init_waitqueue_head(&kiblnd_data.kib_failover_waitq);
 
        kiblnd_data.kib_scheds = cfs_percpt_alloc(lnet_cpt_table(),
                                                  sizeof(*sched));
 
        kiblnd_data.kib_scheds = cfs_percpt_alloc(lnet_cpt_table(),
                                                  sizeof(*sched));
@@ -2990,7 +2990,7 @@ kiblnd_base_startup(void)
 
                spin_lock_init(&sched->ibs_lock);
                CFS_INIT_LIST_HEAD(&sched->ibs_conns);
 
                spin_lock_init(&sched->ibs_lock);
                CFS_INIT_LIST_HEAD(&sched->ibs_conns);
-               cfs_waitq_init(&sched->ibs_waitq);
+               init_waitqueue_head(&sched->ibs_waitq);
 
                nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
                if (*kiblnd_tunables.kib_nscheds > 0) {
 
                nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
                if (*kiblnd_tunables.kib_nscheds > 0) {
index 95e60db..661756c 100644 (file)
@@ -378,7 +378,7 @@ struct kib_sched_info {
        /* serialise */
        spinlock_t              ibs_lock;
        /* schedulers sleep here */
        /* serialise */
        spinlock_t              ibs_lock;
        /* schedulers sleep here */
-       cfs_waitq_t             ibs_waitq;
+       wait_queue_head_t               ibs_waitq;
        /* conns to check for rx completions */
        cfs_list_t              ibs_conns;
        /* number of scheduler threads */
        /* conns to check for rx completions */
        cfs_list_t              ibs_conns;
        /* number of scheduler threads */
@@ -396,7 +396,7 @@ typedef struct
        /* list head of failed devices */
        cfs_list_t              kib_failed_devs;
        /* schedulers sleep here */
        /* list head of failed devices */
        cfs_list_t              kib_failed_devs;
        /* schedulers sleep here */
-       cfs_waitq_t             kib_failover_waitq;
+       wait_queue_head_t               kib_failover_waitq;
        cfs_atomic_t            kib_nthreads;   /* # live threads */
        /* stabilize net/dev/peer/conn ops */
        rwlock_t                kib_global_lock;
        cfs_atomic_t            kib_nthreads;   /* # live threads */
        /* stabilize net/dev/peer/conn ops */
        rwlock_t                kib_global_lock;
@@ -411,7 +411,7 @@ typedef struct
        /* connections with zero refcount */
        cfs_list_t              kib_connd_zombies;
        /* connection daemon sleeps here */
        /* connections with zero refcount */
        cfs_list_t              kib_connd_zombies;
        /* connection daemon sleeps here */
-       cfs_waitq_t             kib_connd_waitq;
+       wait_queue_head_t               kib_connd_waitq;
        spinlock_t              kib_connd_lock; /* serialise */
        struct ib_qp_attr       kib_error_qpa;  /* QP->ERROR */
        /* percpt data for schedulers */
        spinlock_t              kib_connd_lock; /* serialise */
        struct ib_qp_attr       kib_error_qpa;  /* QP->ERROR */
        /* percpt data for schedulers */
@@ -713,7 +713,7 @@ do {                                                                        \
                spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);  \
                cfs_list_add_tail(&(conn)->ibc_list,                    \
                                  &kiblnd_data.kib_connd_zombies);      \
                spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);  \
                cfs_list_add_tail(&(conn)->ibc_list,                    \
                                  &kiblnd_data.kib_connd_zombies);      \
-               cfs_waitq_signal(&kiblnd_data.kib_connd_waitq);         \
+               wake_up(&kiblnd_data.kib_connd_waitq);          \
                spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);\
        }                                                               \
 } while (0)
                spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);\
        }                                                               \
 } while (0)
index 4d9bada..9ca2cb0 100644 (file)
@@ -1909,17 +1909,17 @@ kiblnd_close_conn_locked (kib_conn_t *conn, int error)
 
         kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING);
 
 
         kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING);
 
-        if (error != 0 &&
-            kiblnd_dev_can_failover(dev)) {
-                cfs_list_add_tail(&dev->ibd_fail_list,
-                              &kiblnd_data.kib_failed_devs);
-                cfs_waitq_signal(&kiblnd_data.kib_failover_waitq);
-        }
+       if (error != 0 &&
+           kiblnd_dev_can_failover(dev)) {
+               cfs_list_add_tail(&dev->ibd_fail_list,
+                             &kiblnd_data.kib_failed_devs);
+               wake_up(&kiblnd_data.kib_failover_waitq);
+       }
 
        spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
 
        cfs_list_add_tail(&conn->ibc_list, &kiblnd_data.kib_connd_conns);
 
        spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
 
        cfs_list_add_tail(&conn->ibc_list, &kiblnd_data.kib_connd_conns);
-       cfs_waitq_signal(&kiblnd_data.kib_connd_waitq);
+       wake_up(&kiblnd_data.kib_connd_waitq);
 
        spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
 }
 
        spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
 }
@@ -3126,19 +3126,19 @@ kiblnd_disconnect_conn (kib_conn_t *conn)
 int
 kiblnd_connd (void *arg)
 {
 int
 kiblnd_connd (void *arg)
 {
-        cfs_waitlink_t     wait;
-        unsigned long      flags;
-        kib_conn_t        *conn;
-        int                timeout;
-        int                i;
-        int                dropped_lock;
-        int                peer_index = 0;
-        unsigned long      deadline = jiffies;
+       wait_queue_t     wait;
+       unsigned long      flags;
+       kib_conn_t        *conn;
+       int                timeout;
+       int                i;
+       int                dropped_lock;
+       int                peer_index = 0;
+       unsigned long      deadline = jiffies;
 
 
-        cfs_block_allsigs ();
+       cfs_block_allsigs ();
 
 
-        cfs_waitlink_init (&wait);
-        kiblnd_data.kib_connd = current;
+       init_waitqueue_entry_current (&wait);
+       kiblnd_data.kib_connd = current;
 
        spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
 
 
        spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
 
@@ -3214,14 +3214,14 @@ kiblnd_connd (void *arg)
                        continue;
 
                /* Nothing to do for 'timeout'  */
                        continue;
 
                /* Nothing to do for 'timeout'  */
-               cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
-               cfs_waitq_add(&kiblnd_data.kib_connd_waitq, &wait);
+               set_current_state(TASK_INTERRUPTIBLE);
+               add_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
                spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
 
                spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
 
-               cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE, timeout);
+               waitq_timedwait(&wait, TASK_INTERRUPTIBLE, timeout);
 
 
-               cfs_set_current_state(CFS_TASK_RUNNING);
-               cfs_waitq_del(&kiblnd_data.kib_connd_waitq, &wait);
+               set_current_state(TASK_RUNNING);
+               remove_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
                spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
        }
 
                spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
        }
 
@@ -3303,8 +3303,8 @@ kiblnd_cq_completion(struct ib_cq *cq, void *arg)
                conn->ibc_scheduled = 1;
                cfs_list_add_tail(&conn->ibc_sched_list, &sched->ibs_conns);
 
                conn->ibc_scheduled = 1;
                cfs_list_add_tail(&conn->ibc_sched_list, &sched->ibs_conns);
 
-               if (cfs_waitq_active(&sched->ibs_waitq))
-                       cfs_waitq_signal(&sched->ibs_waitq);
+               if (waitqueue_active(&sched->ibs_waitq))
+                       wake_up(&sched->ibs_waitq);
        }
 
        spin_unlock_irqrestore(&sched->ibs_lock, flags);
        }
 
        spin_unlock_irqrestore(&sched->ibs_lock, flags);
@@ -3325,7 +3325,7 @@ kiblnd_scheduler(void *arg)
        long                    id = (long)arg;
        struct kib_sched_info   *sched;
        kib_conn_t              *conn;
        long                    id = (long)arg;
        struct kib_sched_info   *sched;
        kib_conn_t              *conn;
-       cfs_waitlink_t          wait;
+       wait_queue_t            wait;
        unsigned long           flags;
        struct ib_wc            wc;
        int                     did_something;
        unsigned long           flags;
        struct ib_wc            wc;
        int                     did_something;
@@ -3334,7 +3334,7 @@ kiblnd_scheduler(void *arg)
 
        cfs_block_allsigs();
 
 
        cfs_block_allsigs();
 
-       cfs_waitlink_init(&wait);
+       init_waitqueue_entry_current(&wait);
 
        sched = kiblnd_data.kib_scheds[KIB_THREAD_CPT(id)];
 
 
        sched = kiblnd_data.kib_scheds[KIB_THREAD_CPT(id)];
 
@@ -3352,7 +3352,7 @@ kiblnd_scheduler(void *arg)
                if (busy_loops++ >= IBLND_RESCHED) {
                        spin_unlock_irqrestore(&sched->ibs_lock, flags);
 
                if (busy_loops++ >= IBLND_RESCHED) {
                        spin_unlock_irqrestore(&sched->ibs_lock, flags);
 
-                       cfs_cond_resched();
+                       cond_resched();
                        busy_loops = 0;
 
                        spin_lock_irqsave(&sched->ibs_lock, flags);
                        busy_loops = 0;
 
                        spin_lock_irqsave(&sched->ibs_lock, flags);
@@ -3409,8 +3409,8 @@ kiblnd_scheduler(void *arg)
                                kiblnd_conn_addref(conn);
                                cfs_list_add_tail(&conn->ibc_sched_list,
                                                  &sched->ibs_conns);
                                kiblnd_conn_addref(conn);
                                cfs_list_add_tail(&conn->ibc_sched_list,
                                                  &sched->ibs_conns);
-                               if (cfs_waitq_active(&sched->ibs_waitq))
-                                       cfs_waitq_signal(&sched->ibs_waitq);
+                               if (waitqueue_active(&sched->ibs_waitq))
+                                       wake_up(&sched->ibs_waitq);
                        } else {
                                conn->ibc_scheduled = 0;
                        }
                        } else {
                                conn->ibc_scheduled = 0;
                        }
@@ -3429,15 +3429,15 @@ kiblnd_scheduler(void *arg)
                 if (did_something)
                         continue;
 
                 if (did_something)
                         continue;
 
-               cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
-               cfs_waitq_add_exclusive(&sched->ibs_waitq, &wait);
+               set_current_state(TASK_INTERRUPTIBLE);
+               add_wait_queue_exclusive(&sched->ibs_waitq, &wait);
                spin_unlock_irqrestore(&sched->ibs_lock, flags);
 
                spin_unlock_irqrestore(&sched->ibs_lock, flags);
 
-               cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE);
+               waitq_wait(&wait, TASK_INTERRUPTIBLE);
                busy_loops = 0;
 
                busy_loops = 0;
 
-               cfs_waitq_del(&sched->ibs_waitq, &wait);
-               cfs_set_current_state(CFS_TASK_RUNNING);
+               remove_wait_queue(&sched->ibs_waitq, &wait);
+               set_current_state(TASK_RUNNING);
                spin_lock_irqsave(&sched->ibs_lock, flags);
        }
 
                spin_lock_irqsave(&sched->ibs_lock, flags);
        }
 
@@ -3451,16 +3451,16 @@ int
 kiblnd_failover_thread(void *arg)
 {
        rwlock_t                *glock = &kiblnd_data.kib_global_lock;
 kiblnd_failover_thread(void *arg)
 {
        rwlock_t                *glock = &kiblnd_data.kib_global_lock;
-        kib_dev_t         *dev;
-        cfs_waitlink_t     wait;
-        unsigned long      flags;
-        int                rc;
+       kib_dev_t         *dev;
+       wait_queue_t     wait;
+       unsigned long      flags;
+       int                rc;
 
 
-        LASSERT (*kiblnd_tunables.kib_dev_failover != 0);
+       LASSERT (*kiblnd_tunables.kib_dev_failover != 0);
 
 
-        cfs_block_allsigs ();
+       cfs_block_allsigs ();
 
 
-        cfs_waitlink_init(&wait);
+       init_waitqueue_entry_current(&wait);
        write_lock_irqsave(glock, flags);
 
         while (!kiblnd_data.kib_shutdown) {
        write_lock_irqsave(glock, flags);
 
         while (!kiblnd_data.kib_shutdown) {
@@ -3506,14 +3506,14 @@ kiblnd_failover_thread(void *arg)
                 /* long sleep if no more pending failover */
                 long_sleep = cfs_list_empty(&kiblnd_data.kib_failed_devs);
 
                 /* long sleep if no more pending failover */
                 long_sleep = cfs_list_empty(&kiblnd_data.kib_failed_devs);
 
-                cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
-                cfs_waitq_add(&kiblnd_data.kib_failover_waitq, &wait);
+               set_current_state(TASK_INTERRUPTIBLE);
+               add_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
                write_unlock_irqrestore(glock, flags);
 
                rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) :
                                                   cfs_time_seconds(1));
                write_unlock_irqrestore(glock, flags);
 
                rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) :
                                                   cfs_time_seconds(1));
-               cfs_set_current_state(CFS_TASK_RUNNING);
-               cfs_waitq_del(&kiblnd_data.kib_failover_waitq, &wait);
+               set_current_state(TASK_RUNNING);
+               remove_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
                write_lock_irqsave(glock, flags);
 
                 if (!long_sleep || rc != 0)
                write_lock_irqsave(glock, flags);
 
                 if (!long_sleep || rc != 0)
index ee12cd8..960e938 100644 (file)
@@ -571,18 +571,18 @@ kptllnd_base_shutdown (void)
                 kptllnd_data.kptl_shutdown = 2;
                 cfs_mb();
 
                 kptllnd_data.kptl_shutdown = 2;
                 cfs_mb();
 
-                i = 2;
-                while (cfs_atomic_read (&kptllnd_data.kptl_nthreads) != 0) {
-                        /* Wake up all threads*/
-                        cfs_waitq_broadcast(&kptllnd_data.kptl_sched_waitq);
-                        cfs_waitq_broadcast(&kptllnd_data.kptl_watchdog_waitq);
-
-                        i++;
-                        CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
-                               "Waiting for %d threads to terminate\n",
-                               cfs_atomic_read(&kptllnd_data.kptl_nthreads));
-                        cfs_pause(cfs_time_seconds(1));
-                }
+               i = 2;
+               while (cfs_atomic_read (&kptllnd_data.kptl_nthreads) != 0) {
+                       /* Wake up all threads*/
+                       wake_up_all(&kptllnd_data.kptl_sched_waitq);
+                       wake_up_all(&kptllnd_data.kptl_watchdog_waitq);
+
+                       i++;
+                       CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
+                              "Waiting for %d threads to terminate\n",
+                              cfs_atomic_read(&kptllnd_data.kptl_nthreads));
+                       cfs_pause(cfs_time_seconds(1));
+               }
 
                 CDEBUG(D_NET, "All Threads stopped\n");
                 LASSERT(cfs_list_empty(&kptllnd_data.kptl_sched_txq));
 
                 CDEBUG(D_NET, "All Threads stopped\n");
                 LASSERT(cfs_list_empty(&kptllnd_data.kptl_sched_txq));
@@ -678,12 +678,12 @@ kptllnd_base_startup (void)
        rwlock_init(&kptllnd_data.kptl_net_rw_lock);
         CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_nets);
 
        rwlock_init(&kptllnd_data.kptl_net_rw_lock);
         CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_nets);
 
-        /* Setup the sched locks/lists/waitq */
+       /* Setup the sched locks/lists/waitq */
        spin_lock_init(&kptllnd_data.kptl_sched_lock);
        spin_lock_init(&kptllnd_data.kptl_sched_lock);
-        cfs_waitq_init(&kptllnd_data.kptl_sched_waitq);
-        CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_txq);
-        CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_rxq);
-        CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_rxbq);
+       init_waitqueue_head(&kptllnd_data.kptl_sched_waitq);
+       CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_txq);
+       CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_rxq);
+       CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_rxbq);
 
         /* Init kptl_ptlid2str_lock before any call to kptllnd_ptlid2str */
        spin_lock_init(&kptllnd_data.kptl_ptlid2str_lock);
 
         /* Init kptl_ptlid2str_lock before any call to kptllnd_ptlid2str */
        spin_lock_init(&kptllnd_data.kptl_ptlid2str_lock);
@@ -775,9 +775,9 @@ kptllnd_base_startup (void)
         kptllnd_data.kptl_nak_msg->ptlm_srcstamp = kptllnd_data.kptl_incarnation;
 
        rwlock_init(&kptllnd_data.kptl_peer_rw_lock);
         kptllnd_data.kptl_nak_msg->ptlm_srcstamp = kptllnd_data.kptl_incarnation;
 
        rwlock_init(&kptllnd_data.kptl_peer_rw_lock);
-        cfs_waitq_init(&kptllnd_data.kptl_watchdog_waitq);
-        CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_closing_peers);
-        CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_zombie_peers);
+       init_waitqueue_head(&kptllnd_data.kptl_watchdog_waitq);
+       CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_closing_peers);
+       CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_zombie_peers);
 
         /* Allocate and setup the peer hash table */
         kptllnd_data.kptl_peer_hash_size =
 
         /* Allocate and setup the peer hash table */
         kptllnd_data.kptl_peer_hash_size =
index 682367c..32a5509 100644 (file)
@@ -262,14 +262,14 @@ struct kptl_data
        cfs_list_t              kptl_nets;              /* kptl_net instance*/
 
        spinlock_t              kptl_sched_lock;        /* serialise... */
        cfs_list_t              kptl_nets;              /* kptl_net instance*/
 
        spinlock_t              kptl_sched_lock;        /* serialise... */
-        cfs_waitq_t             kptl_sched_waitq;      /* schedulers sleep here */
-        cfs_list_t              kptl_sched_txq;        /* tx requiring attention */
-        cfs_list_t              kptl_sched_rxq;        /* rx requiring attention */
-        cfs_list_t              kptl_sched_rxbq;       /* rxb requiring reposting */
+       wait_queue_head_t       kptl_sched_waitq;      /* schedulers sleep here */
+       cfs_list_t              kptl_sched_txq;        /* tx requiring attention */
+       cfs_list_t              kptl_sched_rxq;        /* rx requiring attention */
+       cfs_list_t              kptl_sched_rxbq;       /* rxb requiring reposting */
 
 
-        cfs_waitq_t             kptl_watchdog_waitq;   /* watchdog sleeps here */
+       wait_queue_head_t       kptl_watchdog_waitq;   /* watchdog sleeps here */
 
 
-        kptl_rx_buffer_pool_t   kptl_rx_buffer_pool;   /* rx buffer pool */
+       kptl_rx_buffer_pool_t   kptl_rx_buffer_pool;   /* rx buffer pool */
        struct kmem_cache       *kptl_rx_cache;         /* rx descripter cache */
 
         cfs_atomic_t            kptl_ntx;              /* # tx descs allocated */
        struct kmem_cache       *kptl_rx_cache;         /* rx descripter cache */
 
         cfs_atomic_t            kptl_ntx;              /* # tx descs allocated */
@@ -399,7 +399,7 @@ kptllnd_rx_buffer_decref_locked(kptl_rx_buffer_t *rxb)
 
                cfs_list_add_tail(&rxb->rxb_repost_list,
                                  &kptllnd_data.kptl_sched_rxbq);
 
                cfs_list_add_tail(&rxb->rxb_repost_list,
                                  &kptllnd_data.kptl_sched_rxbq);
-               cfs_waitq_signal(&kptllnd_data.kptl_sched_waitq);
+               wake_up(&kptllnd_data.kptl_sched_waitq);
 
                spin_unlock(&kptllnd_data.kptl_sched_lock);
        }
 
                spin_unlock(&kptllnd_data.kptl_sched_lock);
        }
index caf90a6..6b47a16 100644 (file)
@@ -668,17 +668,17 @@ kptllnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
 int
 kptllnd_watchdog(void *arg)
 {
 int
 kptllnd_watchdog(void *arg)
 {
-        int                 id = (long)arg;
-        cfs_waitlink_t      waitlink;
-        int                 stamp = 0;
-        int                 peer_index = 0;
-        unsigned long       deadline = jiffies;
-        int                 timeout;
-        int                 i;
+       int                 id = (long)arg;
+       wait_queue_t        waitlink;
+       int                 stamp = 0;
+       int                 peer_index = 0;
+       unsigned long       deadline = jiffies;
+       int                 timeout;
+       int                 i;
 
 
-        cfs_block_allsigs();
+       cfs_block_allsigs();
 
 
-        cfs_waitlink_init(&waitlink);
+       init_waitqueue_entry_current(&waitlink);
 
         /* threads shut down in phase 2 after all peers have been destroyed */
         while (kptllnd_data.kptl_shutdown < 2) {
 
         /* threads shut down in phase 2 after all peers have been destroyed */
         while (kptllnd_data.kptl_shutdown < 2) {
@@ -717,36 +717,36 @@ kptllnd_watchdog(void *arg)
 
                 kptllnd_handle_closing_peers();
 
 
                 kptllnd_handle_closing_peers();
 
-                cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
-                cfs_waitq_add_exclusive(&kptllnd_data.kptl_watchdog_waitq,
-                                        &waitlink);
+               set_current_state(TASK_INTERRUPTIBLE);
+               add_wait_queue_exclusive(&kptllnd_data.kptl_watchdog_waitq,
+                                       &waitlink);
 
 
-                cfs_waitq_timedwait(&waitlink, CFS_TASK_INTERRUPTIBLE, timeout);
+               waitq_timedwait(&waitlink, TASK_INTERRUPTIBLE, timeout);
 
 
-                cfs_set_current_state (CFS_TASK_RUNNING);
-                cfs_waitq_del(&kptllnd_data.kptl_watchdog_waitq, &waitlink);
-        }
+               set_current_state (TASK_RUNNING);
+               remove_wait_queue(&kptllnd_data.kptl_watchdog_waitq, &waitlink);
+       }
 
 
-        kptllnd_thread_fini();
-        CDEBUG(D_NET, "<<<\n");
-        return (0);
+       kptllnd_thread_fini();
+       CDEBUG(D_NET, "<<<\n");
+       return (0);
 };
 
 int
 kptllnd_scheduler (void *arg)
 {
 };
 
 int
 kptllnd_scheduler (void *arg)
 {
-        int                 id = (long)arg;
-        cfs_waitlink_t      waitlink;
-        unsigned long       flags;
-        int                 did_something;
-        int                 counter = 0;
-        kptl_rx_t          *rx;
-        kptl_rx_buffer_t   *rxb;
-        kptl_tx_t          *tx;
+       int                 id = (long)arg;
+       wait_queue_t        waitlink;
+       unsigned long       flags;
+       int                 did_something;
+       int                 counter = 0;
+       kptl_rx_t          *rx;
+       kptl_rx_buffer_t   *rxb;
+       kptl_tx_t          *tx;
 
 
-        cfs_block_allsigs();
+       cfs_block_allsigs();
 
 
-        cfs_waitlink_init(&waitlink);
+       init_waitqueue_entry_current(&waitlink);
 
        spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
 
 
        spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
 
@@ -808,24 +808,24 @@ kptllnd_scheduler (void *arg)
                                 continue;
                 }
 
                                 continue;
                 }
 
-                cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
-                cfs_waitq_add_exclusive(&kptllnd_data.kptl_sched_waitq,
-                                        &waitlink);
+               set_current_state(TASK_INTERRUPTIBLE);
+               add_wait_queue_exclusive(&kptllnd_data.kptl_sched_waitq,
+                                       &waitlink);
                spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock,
                spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock,
-                                           flags);
+                                          flags);
 
 
-                if (!did_something)
-                        cfs_waitq_wait(&waitlink, CFS_TASK_INTERRUPTIBLE);
-                else
-                        cfs_cond_resched();
+               if (!did_something)
+                       waitq_wait(&waitlink, TASK_INTERRUPTIBLE);
+               else
+                       cond_resched();
 
 
-                cfs_set_current_state(CFS_TASK_RUNNING);
-                cfs_waitq_del(&kptllnd_data.kptl_sched_waitq, &waitlink);
+               set_current_state(TASK_RUNNING);
+               remove_wait_queue(&kptllnd_data.kptl_sched_waitq, &waitlink);
 
                spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
 
 
                spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
 
-                counter = 0;
-        }
+               counter = 0;
+       }
 
        spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock, flags);
 
 
        spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock, flags);
 
index 6cc44f0..ebed094 100644 (file)
@@ -424,37 +424,37 @@ kptllnd_handle_closing_peers ()
 void
 kptllnd_peer_close_locked(kptl_peer_t *peer, int why)
 {
 void
 kptllnd_peer_close_locked(kptl_peer_t *peer, int why)
 {
-        switch (peer->peer_state) {
-        default:
-                LBUG();
-
-        case PEER_STATE_WAITING_HELLO:
-        case PEER_STATE_ACTIVE:
-                /* Ensure new peers see a new incarnation of me */
-                LASSERT(peer->peer_myincarnation <= kptllnd_data.kptl_incarnation);
-                if (peer->peer_myincarnation == kptllnd_data.kptl_incarnation)
-                        kptllnd_data.kptl_incarnation++;
-
-                /* Removing from peer table */
-                kptllnd_data.kptl_n_active_peers--;
-                LASSERT (kptllnd_data.kptl_n_active_peers >= 0);
-
-                cfs_list_del(&peer->peer_list);
-                kptllnd_peer_unreserve_buffers();
-
-                peer->peer_error = why; /* stash 'why' only on first close */
-                peer->peer_state = PEER_STATE_CLOSING;
-
-                /* Schedule for immediate attention, taking peer table's ref */
-                cfs_list_add_tail(&peer->peer_list,
-                                 &kptllnd_data.kptl_closing_peers);
-                cfs_waitq_signal(&kptllnd_data.kptl_watchdog_waitq);
-                break;
-
-        case PEER_STATE_ZOMBIE:
-        case PEER_STATE_CLOSING:
-                break;
-        }
+       switch (peer->peer_state) {
+       default:
+               LBUG();
+
+       case PEER_STATE_WAITING_HELLO:
+       case PEER_STATE_ACTIVE:
+               /* Ensure new peers see a new incarnation of me */
+               LASSERT(peer->peer_myincarnation <= kptllnd_data.kptl_incarnation);
+               if (peer->peer_myincarnation == kptllnd_data.kptl_incarnation)
+                       kptllnd_data.kptl_incarnation++;
+
+               /* Removing from peer table */
+               kptllnd_data.kptl_n_active_peers--;
+               LASSERT (kptllnd_data.kptl_n_active_peers >= 0);
+
+               cfs_list_del(&peer->peer_list);
+               kptllnd_peer_unreserve_buffers();
+
+               peer->peer_error = why; /* stash 'why' only on first close */
+               peer->peer_state = PEER_STATE_CLOSING;
+
+               /* Schedule for immediate attention, taking peer table's ref */
+               cfs_list_add_tail(&peer->peer_list,
+                                &kptllnd_data.kptl_closing_peers);
+               wake_up(&kptllnd_data.kptl_watchdog_waitq);
+               break;
+
+       case PEER_STATE_ZOMBIE:
+       case PEER_STATE_CLOSING:
+               break;
+       }
 }
 
 void
 }
 
 void
index 094326c..f49c7eb 100644 (file)
@@ -478,14 +478,14 @@ kptllnd_rx_buffer_callback (ptl_event_t *ev)
                         rx->rx_treceived = jiffies;
                         /* Queue for attention */
                        spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
                         rx->rx_treceived = jiffies;
                         /* Queue for attention */
                        spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
-                                              flags);
+                                             flags);
 
 
-                        cfs_list_add_tail(&rx->rx_list,
-                                          &kptllnd_data.kptl_sched_rxq);
-                        cfs_waitq_signal(&kptllnd_data.kptl_sched_waitq);
+                       cfs_list_add_tail(&rx->rx_list,
+                                         &kptllnd_data.kptl_sched_rxq);
+                       wake_up(&kptllnd_data.kptl_sched_waitq);
 
                        spin_unlock_irqrestore(&kptllnd_data. \
 
                        spin_unlock_irqrestore(&kptllnd_data. \
-                                                   kptl_sched_lock, flags);
+                                                  kptl_sched_lock, flags);
                 }
         }
 
                 }
         }
 
index fa2b392..9151d45 100644 (file)
@@ -512,15 +512,15 @@ kptllnd_tx_callback(ptl_event_t *ev)
 
        spin_unlock_irqrestore(&peer->peer_lock, flags);
 
 
        spin_unlock_irqrestore(&peer->peer_lock, flags);
 
-        /* drop peer's ref, but if it was the last one... */
-        if (cfs_atomic_dec_and_test(&tx->tx_refcount)) {
-                /* ...finalize it in thread context! */
+       /* drop peer's ref, but if it was the last one... */
+       if (cfs_atomic_dec_and_test(&tx->tx_refcount)) {
+               /* ...finalize it in thread context! */
                spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
 
                spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
 
-                cfs_list_add_tail(&tx->tx_list, &kptllnd_data.kptl_sched_txq);
-                cfs_waitq_signal(&kptllnd_data.kptl_sched_waitq);
+               cfs_list_add_tail(&tx->tx_list, &kptllnd_data.kptl_sched_txq);
+               wake_up(&kptllnd_data.kptl_sched_waitq);
 
                spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock,
 
                spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock,
-                                           flags);
-        }
+                                          flags);
+       }
 }
 }
index 1dce8d4..e62b094 100644 (file)
@@ -174,7 +174,7 @@ kqswnal_shutdown(lnet_ni_t *ni)
        /**********************************************************************/
        /* flag threads to terminate, wake them and wait for them to die */
        kqswnal_data.kqn_shuttingdown = 2;
        /**********************************************************************/
        /* flag threads to terminate, wake them and wait for them to die */
        kqswnal_data.kqn_shuttingdown = 2;
-       cfs_waitq_broadcast (&kqswnal_data.kqn_sched_waitq);
+       wake_up_all (&kqswnal_data.kqn_sched_waitq);
 
        while (cfs_atomic_read (&kqswnal_data.kqn_nthreads) != 0) {
                CDEBUG(D_NET, "waiting for %d threads to terminate\n",
 
        while (cfs_atomic_read (&kqswnal_data.kqn_nthreads) != 0) {
                CDEBUG(D_NET, "waiting for %d threads to terminate\n",
@@ -307,7 +307,7 @@ kqswnal_startup (lnet_ni_t *ni)
        CFS_INIT_LIST_HEAD (&kqswnal_data.kqn_readyrxds);
 
        spin_lock_init(&kqswnal_data.kqn_sched_lock);
        CFS_INIT_LIST_HEAD (&kqswnal_data.kqn_readyrxds);
 
        spin_lock_init(&kqswnal_data.kqn_sched_lock);
-       cfs_waitq_init (&kqswnal_data.kqn_sched_waitq);
+       init_waitqueue_head (&kqswnal_data.kqn_sched_waitq);
 
        /* pointers/lists/locks initialised */
        kqswnal_data.kqn_init = KQN_INIT_DATA;
 
        /* pointers/lists/locks initialised */
        kqswnal_data.kqn_init = KQN_INIT_DATA;
index 49059d2..a55ba12 100644 (file)
@@ -254,41 +254,41 @@ typedef struct
 
 typedef struct
 {
 
 typedef struct
 {
-        char                 kqn_init;        /* what's been initialised */
-        char                 kqn_shuttingdown;/* I'm trying to shut down */
-        cfs_atomic_t         kqn_nthreads;    /* # threads running */
-        lnet_ni_t           *kqn_ni;          /* _the_ instance of me */
+       char                 kqn_init;        /* what's been initialised */
+       char                 kqn_shuttingdown;/* I'm trying to shut down */
+       cfs_atomic_t         kqn_nthreads;    /* # threads running */
+       lnet_ni_t           *kqn_ni;          /* _the_ instance of me */
 
 
-        kqswnal_rx_t        *kqn_rxds;        /* stack of all the receive descriptors */
-        kqswnal_tx_t        *kqn_txds;        /* stack of all the transmit descriptors */
+       kqswnal_rx_t        *kqn_rxds;        /* stack of all the receive descriptors */
+       kqswnal_tx_t        *kqn_txds;        /* stack of all the transmit descriptors */
 
 
-        cfs_list_t           kqn_idletxds;    /* transmit descriptors free to use */
-        cfs_list_t           kqn_activetxds;  /* transmit descriptors being used */
+       cfs_list_t           kqn_idletxds;    /* transmit descriptors free to use */
+       cfs_list_t           kqn_activetxds;  /* transmit descriptors being used */
        spinlock_t      kqn_idletxd_lock;    /* serialise idle txd access */
        cfs_atomic_t    kqn_pending_txs;     /* # transmits being prepped */
 
        spinlock_t      kqn_sched_lock;      /* serialise packet schedulers */
        spinlock_t      kqn_idletxd_lock;    /* serialise idle txd access */
        cfs_atomic_t    kqn_pending_txs;     /* # transmits being prepped */
 
        spinlock_t      kqn_sched_lock;      /* serialise packet schedulers */
-        cfs_waitq_t          kqn_sched_waitq;/* scheduler blocks here */
-
-        cfs_list_t           kqn_readyrxds;  /* rxds full of data */
-        cfs_list_t           kqn_donetxds;   /* completed transmits */
-        cfs_list_t           kqn_delayedtxds;/* delayed transmits */
-
-        EP_SYS              *kqn_ep;         /* elan system */
-        EP_NMH              *kqn_ep_tx_nmh;  /* elan reserved tx vaddrs */
-        EP_NMH              *kqn_ep_rx_nmh;  /* elan reserved rx vaddrs */
-        EP_XMTR             *kqn_eptx;       /* elan transmitter */
-        EP_RCVR             *kqn_eprx_small; /* elan receiver (small messages) */
-        EP_RCVR             *kqn_eprx_large; /* elan receiver (large messages) */
-
-        int                  kqn_nnodes;     /* this cluster's size */
-        int                  kqn_elanid;     /* this nodes's elan ID */
-
-        EP_STATUSBLK         kqn_rpc_success;/* preset RPC reply status blocks */
-        EP_STATUSBLK         kqn_rpc_failed;
-        EP_STATUSBLK         kqn_rpc_version;/* reply to future version query */
-        EP_STATUSBLK         kqn_rpc_magic;  /* reply to future version query */
-}  kqswnal_data_t;
+       wait_queue_head_t    kqn_sched_waitq;/* scheduler blocks here */
+
+       cfs_list_t           kqn_readyrxds;  /* rxds full of data */
+       cfs_list_t           kqn_donetxds;   /* completed transmits */
+       cfs_list_t           kqn_delayedtxds;/* delayed transmits */
+
+       EP_SYS              *kqn_ep;         /* elan system */
+       EP_NMH              *kqn_ep_tx_nmh;  /* elan reserved tx vaddrs */
+       EP_NMH              *kqn_ep_rx_nmh;  /* elan reserved rx vaddrs */
+       EP_XMTR             *kqn_eptx;       /* elan transmitter */
+       EP_RCVR             *kqn_eprx_small; /* elan receiver (small messages) */
+       EP_RCVR             *kqn_eprx_large; /* elan receiver (large messages) */
+
+       int                  kqn_nnodes;     /* this cluster's size */
+       int                  kqn_elanid;     /* this nodes's elan ID */
+
+       EP_STATUSBLK         kqn_rpc_success;/* preset RPC reply status blocks */
+       EP_STATUSBLK         kqn_rpc_failed;
+       EP_STATUSBLK         kqn_rpc_version;/* reply to future version query */
+       EP_STATUSBLK         kqn_rpc_magic;  /* reply to future version query */
+} kqswnal_data_t;
 
 /* kqn_init state */
 #define KQN_INIT_NOTHING        0               /* MUST BE ZERO so zeroed state is initialised OK */
 
 /* kqn_init state */
 #define KQN_INIT_NOTHING        0               /* MUST BE ZERO so zeroed state is initialised OK */
index a469d3d..203cddc 100644 (file)
@@ -518,7 +518,7 @@ kqswnal_tx_done (kqswnal_tx_t *ktx, int status)
 
        cfs_list_add_tail(&ktx->ktx_schedlist,
                           &kqswnal_data.kqn_donetxds);
 
        cfs_list_add_tail(&ktx->ktx_schedlist,
                           &kqswnal_data.kqn_donetxds);
-       cfs_waitq_signal(&kqswnal_data.kqn_sched_waitq);
+       wake_up(&kqswnal_data.kqn_sched_waitq);
 
        spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, flags);
 }
 
        spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, flags);
 }
@@ -669,7 +669,7 @@ kqswnal_launch (kqswnal_tx_t *ktx)
 
                cfs_list_add_tail(&ktx->ktx_schedlist,
                                  &kqswnal_data.kqn_delayedtxds);
 
                cfs_list_add_tail(&ktx->ktx_schedlist,
                                  &kqswnal_data.kqn_delayedtxds);
-               cfs_waitq_signal(&kqswnal_data.kqn_sched_waitq);
+               wake_up(&kqswnal_data.kqn_sched_waitq);
 
                spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
                                             flags);
 
                spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
                                             flags);
@@ -1542,7 +1542,7 @@ kqswnal_rxhandler(EP_RXD *rxd)
        spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
 
        cfs_list_add_tail(&krx->krx_list, &kqswnal_data.kqn_readyrxds);
        spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
 
        cfs_list_add_tail(&krx->krx_list, &kqswnal_data.kqn_readyrxds);
-       cfs_waitq_signal(&kqswnal_data.kqn_sched_waitq);
+       wake_up(&kqswnal_data.kqn_sched_waitq);
 
        spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, flags);
 }
 
        spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, flags);
 }
@@ -1764,15 +1764,15 @@ kqswnal_scheduler (void *arg)
                                                        kqn_donetxds) ||
                                        !cfs_list_empty(&kqswnal_data. \
                                                        kqn_delayedtxds));
                                                        kqn_donetxds) ||
                                        !cfs_list_empty(&kqswnal_data. \
                                                        kqn_delayedtxds));
-                                LASSERT (rc == 0);
-                        } else if (need_resched())
-                                cfs_schedule ();
+                               LASSERT (rc == 0);
+                       } else if (need_resched())
+                               schedule ();
 
                        spin_lock_irqsave(&kqswnal_data.kqn_sched_lock,
 
                        spin_lock_irqsave(&kqswnal_data.kqn_sched_lock,
-                                               flags);
-                }
-        }
+                                              flags);
+               }
+       }
 
 
-        kqswnal_thread_fini ();
-        return (0);
+       kqswnal_thread_fini ();
+       return 0;
 }
 }
index 189db2e..91ff1b1 100644 (file)
@@ -430,44 +430,44 @@ int
 kranal_set_conn_params(kra_conn_t *conn, kra_connreq_t *connreq,
                        __u32 peer_ip, int peer_port)
 {
 kranal_set_conn_params(kra_conn_t *conn, kra_connreq_t *connreq,
                        __u32 peer_ip, int peer_port)
 {
-        kra_device_t  *dev = conn->rac_device;
-        unsigned long  flags;
-        RAP_RETURN     rrc;
-
-        /* CAVEAT EMPTOR: we're really overloading rac_last_tx + rac_keepalive
-         * to do RapkCompleteSync() timekeeping (see kibnal_scheduler). */
-        conn->rac_last_tx = jiffies;
-        conn->rac_keepalive = 0;
-
-        rrc = RapkSetRiParams(conn->rac_rihandle, &connreq->racr_riparams);
-        if (rrc != RAP_SUCCESS) {
-                CERROR("Error setting riparams from %u.%u.%u.%u/%d: %d\n",
-                       HIPQUAD(peer_ip), peer_port, rrc);
-                return -ECONNABORTED;
-        }
-
-        /* Schedule conn on rad_new_conns */
-        kranal_conn_addref(conn);
+       kra_device_t  *dev = conn->rac_device;
+       unsigned long  flags;
+       RAP_RETURN     rrc;
+
+       /* CAVEAT EMPTOR: we're really overloading rac_last_tx + rac_keepalive
+        * to do RapkCompleteSync() timekeeping (see kibnal_scheduler). */
+       conn->rac_last_tx = jiffies;
+       conn->rac_keepalive = 0;
+
+       rrc = RapkSetRiParams(conn->rac_rihandle, &connreq->racr_riparams);
+       if (rrc != RAP_SUCCESS) {
+               CERROR("Error setting riparams from %u.%u.%u.%u/%d: %d\n",
+                      HIPQUAD(peer_ip), peer_port, rrc);
+               return -ECONNABORTED;
+       }
+
+       /* Schedule conn on rad_new_conns */
+       kranal_conn_addref(conn);
        spin_lock_irqsave(&dev->rad_lock, flags);
        spin_lock_irqsave(&dev->rad_lock, flags);
-        cfs_list_add_tail(&conn->rac_schedlist, &dev->rad_new_conns);
-        cfs_waitq_signal(&dev->rad_waitq);
+       cfs_list_add_tail(&conn->rac_schedlist, &dev->rad_new_conns);
+       wake_up(&dev->rad_waitq);
        spin_unlock_irqrestore(&dev->rad_lock, flags);
 
        spin_unlock_irqrestore(&dev->rad_lock, flags);
 
-        rrc = RapkWaitToConnect(conn->rac_rihandle);
-        if (rrc != RAP_SUCCESS) {
-                CERROR("Error waiting to connect to %u.%u.%u.%u/%d: %d\n",
-                       HIPQUAD(peer_ip), peer_port, rrc);
-                return -ECONNABORTED;
-        }
-
-        /* Scheduler doesn't touch conn apart from to deschedule and decref it
-         * after RapkCompleteSync() return success, so conn is all mine */
-
-        conn->rac_peerstamp = connreq->racr_peerstamp;
-        conn->rac_peer_connstamp = connreq->racr_connstamp;
-        conn->rac_keepalive = RANAL_TIMEOUT2KEEPALIVE(connreq->racr_timeout);
-        kranal_update_reaper_timeout(conn->rac_keepalive);
-        return 0;
+       rrc = RapkWaitToConnect(conn->rac_rihandle);
+       if (rrc != RAP_SUCCESS) {
+               CERROR("Error waiting to connect to %u.%u.%u.%u/%d: %d\n",
+                      HIPQUAD(peer_ip), peer_port, rrc);
+               return -ECONNABORTED;
+       }
+
+       /* Scheduler doesn't touch conn apart from to deschedule and decref it
+        * after RapkCompleteSync() return success, so conn is all mine */
+
+       conn->rac_peerstamp = connreq->racr_peerstamp;
+       conn->rac_peer_connstamp = connreq->racr_connstamp;
+       conn->rac_keepalive = RANAL_TIMEOUT2KEEPALIVE(connreq->racr_timeout);
+       kranal_update_reaper_timeout(conn->rac_keepalive);
+       return 0;
 }
 
 int
 }
 
 int
@@ -871,31 +871,31 @@ kranal_free_acceptsock (kra_acceptsock_t *ras)
 int
 kranal_accept (lnet_ni_t *ni, struct socket *sock)
 {
 int
 kranal_accept (lnet_ni_t *ni, struct socket *sock)
 {
-        kra_acceptsock_t  *ras;
-        int                rc;
-        __u32              peer_ip;
-        int                peer_port;
-        unsigned long      flags;
+       kra_acceptsock_t  *ras;
+       int                rc;
+       __u32              peer_ip;
+       int                peer_port;
+       unsigned long      flags;
 
 
-        rc = libcfs_sock_getaddr(sock, 1, &peer_ip, &peer_port);
-        LASSERT (rc == 0);                      /* we succeeded before */
+       rc = libcfs_sock_getaddr(sock, 1, &peer_ip, &peer_port);
+       LASSERT (rc == 0);                      /* we succeeded before */
 
 
-        LIBCFS_ALLOC(ras, sizeof(*ras));
-        if (ras == NULL) {
-                CERROR("ENOMEM allocating connection request from "
-                       "%u.%u.%u.%u\n", HIPQUAD(peer_ip));
-                return -ENOMEM;
-        }
+       LIBCFS_ALLOC(ras, sizeof(*ras));
+       if (ras == NULL) {
+               CERROR("ENOMEM allocating connection request from "
+                      "%u.%u.%u.%u\n", HIPQUAD(peer_ip));
+               return -ENOMEM;
+       }
 
 
-        ras->ras_sock = sock;
+       ras->ras_sock = sock;
 
        spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
 
 
        spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
 
-        cfs_list_add_tail(&ras->ras_list, &kranal_data.kra_connd_acceptq);
-        cfs_waitq_signal(&kranal_data.kra_connd_waitq);
+       cfs_list_add_tail(&ras->ras_list, &kranal_data.kra_connd_acceptq);
+       wake_up(&kranal_data.kra_connd_waitq);
 
        spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
 
        spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
-        return 0;
+       return 0;
 }
 
 int
 }
 
 int
@@ -1498,21 +1498,21 @@ kranal_shutdown (lnet_ni_t *ni)
         /* Flag threads to terminate */
         kranal_data.kra_shutdown = 1;
 
         /* Flag threads to terminate */
         kranal_data.kra_shutdown = 1;
 
-        for (i = 0; i < kranal_data.kra_ndevs; i++) {
-                kra_device_t *dev = &kranal_data.kra_devices[i];
+       for (i = 0; i < kranal_data.kra_ndevs; i++) {
+               kra_device_t *dev = &kranal_data.kra_devices[i];
 
                spin_lock_irqsave(&dev->rad_lock, flags);
 
                spin_lock_irqsave(&dev->rad_lock, flags);
-                cfs_waitq_signal(&dev->rad_waitq);
+               wake_up(&dev->rad_waitq);
                spin_unlock_irqrestore(&dev->rad_lock, flags);
                spin_unlock_irqrestore(&dev->rad_lock, flags);
-        }
+       }
 
        spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
 
        spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
-        cfs_waitq_broadcast(&kranal_data.kra_reaper_waitq);
+       wake_up_all(&kranal_data.kra_reaper_waitq);
        spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
 
        spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
 
-        LASSERT (cfs_list_empty(&kranal_data.kra_connd_peers));
+       LASSERT (cfs_list_empty(&kranal_data.kra_connd_peers));
        spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
        spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
-        cfs_waitq_broadcast(&kranal_data.kra_connd_waitq);
+       wake_up_all(&kranal_data.kra_connd_waitq);
        spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
 
         /* Wait for threads to exit */
        spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
 
         /* Wait for threads to exit */
@@ -1607,23 +1607,23 @@ kranal_startup (lnet_ni_t *ni)
 
        rwlock_init(&kranal_data.kra_global_lock);
 
 
        rwlock_init(&kranal_data.kra_global_lock);
 
-        for (i = 0; i < RANAL_MAXDEVS; i++ ) {
-                kra_device_t  *dev = &kranal_data.kra_devices[i];
+       for (i = 0; i < RANAL_MAXDEVS; i++ ) {
+               kra_device_t  *dev = &kranal_data.kra_devices[i];
 
 
-                dev->rad_idx = i;
-                CFS_INIT_LIST_HEAD(&dev->rad_ready_conns);
-                CFS_INIT_LIST_HEAD(&dev->rad_new_conns);
-                cfs_waitq_init(&dev->rad_waitq);
+               dev->rad_idx = i;
+               CFS_INIT_LIST_HEAD(&dev->rad_ready_conns);
+               CFS_INIT_LIST_HEAD(&dev->rad_new_conns);
+               init_waitqueue_head(&dev->rad_waitq);
                spin_lock_init(&dev->rad_lock);
                spin_lock_init(&dev->rad_lock);
-        }
+       }
 
 
-        kranal_data.kra_new_min_timeout = CFS_MAX_SCHEDULE_TIMEOUT;
-        cfs_waitq_init(&kranal_data.kra_reaper_waitq);
+       kranal_data.kra_new_min_timeout = MAX_SCHEDULE_TIMEOUT;
+       init_waitqueue_head(&kranal_data.kra_reaper_waitq);
        spin_lock_init(&kranal_data.kra_reaper_lock);
 
        spin_lock_init(&kranal_data.kra_reaper_lock);
 
-        CFS_INIT_LIST_HEAD(&kranal_data.kra_connd_acceptq);
-        CFS_INIT_LIST_HEAD(&kranal_data.kra_connd_peers);
-        cfs_waitq_init(&kranal_data.kra_connd_waitq);
+       CFS_INIT_LIST_HEAD(&kranal_data.kra_connd_acceptq);
+       CFS_INIT_LIST_HEAD(&kranal_data.kra_connd_peers);
+       init_waitqueue_head(&kranal_data.kra_connd_waitq);
        spin_lock_init(&kranal_data.kra_connd_lock);
 
         CFS_INIT_LIST_HEAD(&kranal_data.kra_idle_txs);
        spin_lock_init(&kranal_data.kra_connd_lock);
 
         CFS_INIT_LIST_HEAD(&kranal_data.kra_idle_txs);
index afec808..bfc863f 100644 (file)
@@ -104,58 +104,58 @@ typedef struct
 
 typedef struct
 {
 
 typedef struct
 {
-        RAP_PVOID              rad_handle;    /* device handle */
-        RAP_PVOID              rad_fma_cqh;   /* FMA completion queue handle */
-        RAP_PVOID              rad_rdma_cqh;  /* rdma completion queue handle */
-        int                    rad_id;        /* device id */
-        int                    rad_idx;       /* index in kra_devices */
-        int                    rad_ready;     /* set by device callback */
-        cfs_list_t             rad_ready_conns;/* connections ready to tx/rx */
-        cfs_list_t             rad_new_conns; /* new connections to complete */
-        cfs_waitq_t            rad_waitq;     /* scheduler waits here */
-       spinlock_t              rad_lock;       /* serialise */
-        void                  *rad_scheduler; /* scheduling thread */
-        unsigned int           rad_nphysmap;  /* # phys mappings */
-        unsigned int           rad_nppphysmap;/* # phys pages mapped */
-        unsigned int           rad_nvirtmap;  /* # virt mappings */
-        unsigned long          rad_nobvirtmap;/* # virt bytes mapped */
+       RAP_PVOID              rad_handle;    /* device handle */
+       RAP_PVOID              rad_fma_cqh;   /* FMA completion queue handle */
+       RAP_PVOID              rad_rdma_cqh;  /* rdma completion queue handle */
+       int                    rad_id;        /* device id */
+       int                    rad_idx;       /* index in kra_devices */
+       int                    rad_ready;     /* set by device callback */
+       cfs_list_t             rad_ready_conns;/* connections ready to tx/rx */
+       cfs_list_t             rad_new_conns; /* new connections to complete */
+       wait_queue_head_t      rad_waitq;     /* scheduler waits here */
+       spinlock_t             rad_lock;        /* serialise */
+       void                   *rad_scheduler; /* scheduling thread */
+       unsigned int           rad_nphysmap;  /* # phys mappings */
+       unsigned int           rad_nppphysmap;/* # phys pages mapped */
+       unsigned int           rad_nvirtmap;  /* # virt mappings */
+       unsigned long          rad_nobvirtmap;/* # virt bytes mapped */
 } kra_device_t;
 
 typedef struct
 {
 } kra_device_t;
 
 typedef struct
 {
-        int               kra_init;            /* initialisation state */
-        int               kra_shutdown;        /* shut down? */
-        cfs_atomic_t      kra_nthreads;        /* # live threads */
-        lnet_ni_t        *kra_ni;              /* _the_ nal instance */
+       int               kra_init;            /* initialisation state */
+       int               kra_shutdown;        /* shut down? */
+       cfs_atomic_t      kra_nthreads;        /* # live threads */
+       lnet_ni_t        *kra_ni;              /* _the_ nal instance */
 
 
-        kra_device_t      kra_devices[RANAL_MAXDEVS]; /* device/ptag/cq */
-        int               kra_ndevs;           /* # devices */
+       kra_device_t      kra_devices[RANAL_MAXDEVS]; /* device/ptag/cq */
+       int               kra_ndevs;           /* # devices */
 
        rwlock_t          kra_global_lock;      /* stabilize peer/conn ops */
 
 
        rwlock_t          kra_global_lock;      /* stabilize peer/conn ops */
 
-        cfs_list_t       *kra_peers;           /* hash table of all my known peers */
-        int               kra_peer_hash_size;  /* size of kra_peers */
-        cfs_atomic_t      kra_npeers;          /* # peers extant */
-        int               kra_nonewpeers;      /* prevent new peers */
+       cfs_list_t       *kra_peers;           /* hash table of all my known peers */
+       int               kra_peer_hash_size;  /* size of kra_peers */
+       cfs_atomic_t      kra_npeers;          /* # peers extant */
+       int               kra_nonewpeers;      /* prevent new peers */
 
 
-        cfs_list_t       *kra_conns;           /* conns hashed by cqid */
-        int               kra_conn_hash_size;  /* size of kra_conns */
-        __u64             kra_peerstamp;       /* when I started up */
-        __u64             kra_connstamp;       /* conn stamp generator */
-        int               kra_next_cqid;       /* cqid generator */
-        cfs_atomic_t      kra_nconns;          /* # connections extant */
+       cfs_list_t       *kra_conns;           /* conns hashed by cqid */
+       int               kra_conn_hash_size;  /* size of kra_conns */
+       __u64             kra_peerstamp;       /* when I started up */
+       __u64             kra_connstamp;       /* conn stamp generator */
+       int               kra_next_cqid;       /* cqid generator */
+       cfs_atomic_t      kra_nconns;          /* # connections extant */
 
 
-        long              kra_new_min_timeout; /* minimum timeout on any new conn */
-        cfs_waitq_t       kra_reaper_waitq;    /* reaper sleeps here */
+       long              kra_new_min_timeout; /* minimum timeout on any new conn */
+       wait_queue_head_t       kra_reaper_waitq;    /* reaper sleeps here */
        spinlock_t        kra_reaper_lock;     /* serialise */
 
        spinlock_t        kra_reaper_lock;     /* serialise */
 
-        cfs_list_t        kra_connd_peers;     /* peers waiting for a connection */
-        cfs_list_t        kra_connd_acceptq;   /* accepted sockets to handshake */
-        cfs_waitq_t       kra_connd_waitq;     /* connection daemons sleep here */
+       cfs_list_t        kra_connd_peers;     /* peers waiting for a connection */
+       cfs_list_t        kra_connd_acceptq;   /* accepted sockets to handshake */
+       wait_queue_head_t       kra_connd_waitq;     /* connection daemons sleep here */
        spinlock_t        kra_connd_lock;       /* serialise */
 
        spinlock_t        kra_connd_lock;       /* serialise */
 
-        cfs_list_t        kra_idle_txs;        /* idle tx descriptors */
-        __u64             kra_next_tx_cookie;  /* RDMA completion cookie */
+       cfs_list_t        kra_idle_txs;        /* idle tx descriptors */
+       __u64             kra_next_tx_cookie;  /* RDMA completion cookie */
        spinlock_t        kra_tx_lock;          /* serialise */
 } kra_data_t;
 
        spinlock_t        kra_tx_lock;          /* serialise */
 } kra_data_t;
 
index d9ca41b..ba744c6 100644 (file)
@@ -57,10 +57,10 @@ kranal_device_callback(RAP_INT32 devid, RAP_PVOID arg)
 
                spin_lock_irqsave(&dev->rad_lock, flags);
 
 
                spin_lock_irqsave(&dev->rad_lock, flags);
 
-                if (!dev->rad_ready) {
-                        dev->rad_ready = 1;
-                        cfs_waitq_signal(&dev->rad_waitq);
-                }
+               if (!dev->rad_ready) {
+                       dev->rad_ready = 1;
+                       wake_up(&dev->rad_waitq);
+               }
 
                spin_unlock_irqrestore(&dev->rad_lock, flags);
                 return;
 
                spin_unlock_irqrestore(&dev->rad_lock, flags);
                 return;
@@ -77,12 +77,12 @@ kranal_schedule_conn(kra_conn_t *conn)
 
        spin_lock_irqsave(&dev->rad_lock, flags);
 
 
        spin_lock_irqsave(&dev->rad_lock, flags);
 
-        if (!conn->rac_scheduled) {
-                kranal_conn_addref(conn);       /* +1 ref for scheduler */
-                conn->rac_scheduled = 1;
-                cfs_list_add_tail(&conn->rac_schedlist, &dev->rad_ready_conns);
-                cfs_waitq_signal(&dev->rad_waitq);
-        }
+       if (!conn->rac_scheduled) {
+               kranal_conn_addref(conn);       /* +1 ref for scheduler */
+               conn->rac_scheduled = 1;
+               cfs_list_add_tail(&conn->rac_schedlist, &dev->rad_ready_conns);
+               wake_up(&dev->rad_waitq);
+       }
 
        spin_unlock_irqrestore(&dev->rad_lock, flags);
 }
 
        spin_unlock_irqrestore(&dev->rad_lock, flags);
 }
@@ -523,9 +523,9 @@ kranal_launch_tx (kra_tx_t *tx, lnet_nid_t nid)
 
                spin_lock(&kranal_data.kra_connd_lock);
 
 
                spin_lock(&kranal_data.kra_connd_lock);
 
-                cfs_list_add_tail(&peer->rap_connd_list,
-                              &kranal_data.kra_connd_peers);
-                cfs_waitq_signal(&kranal_data.kra_connd_waitq);
+               cfs_list_add_tail(&peer->rap_connd_list,
+                             &kranal_data.kra_connd_peers);
+               wake_up(&kranal_data.kra_connd_waitq);
 
                spin_unlock(&kranal_data.kra_connd_lock);
         }
 
                spin_unlock(&kranal_data.kra_connd_lock);
         }
@@ -1051,78 +1051,78 @@ kranal_reaper_check (int idx, unsigned long *min_timeoutp)
 int
 kranal_connd (void *arg)
 {
 int
 kranal_connd (void *arg)
 {
-        long               id = (long)arg;
-        cfs_waitlink_t     wait;
-        unsigned long      flags;
-        kra_peer_t        *peer;
-        kra_acceptsock_t  *ras;
-        int                did_something;
+       long               id = (long)arg;
+       wait_queue_t     wait;
+       unsigned long      flags;
+       kra_peer_t        *peer;
+       kra_acceptsock_t  *ras;
+       int                did_something;
 
 
-        cfs_block_allsigs();
+       cfs_block_allsigs();
 
 
-        cfs_waitlink_init(&wait);
+       init_waitqueue_entry_current(&wait);
 
        spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
 
 
        spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
 
-        while (!kranal_data.kra_shutdown) {
-                did_something = 0;
+       while (!kranal_data.kra_shutdown) {
+               did_something = 0;
 
 
-                if (!cfs_list_empty(&kranal_data.kra_connd_acceptq)) {
-                        ras = cfs_list_entry(kranal_data.kra_connd_acceptq.next,
-                                             kra_acceptsock_t, ras_list);
-                        cfs_list_del(&ras->ras_list);
+               if (!cfs_list_empty(&kranal_data.kra_connd_acceptq)) {
+                       ras = cfs_list_entry(kranal_data.kra_connd_acceptq.next,
+                                            kra_acceptsock_t, ras_list);
+                       cfs_list_del(&ras->ras_list);
 
                        spin_unlock_irqrestore(&kranal_data.kra_connd_lock,
 
                        spin_unlock_irqrestore(&kranal_data.kra_connd_lock,
-                                                   flags);
+                                                  flags);
 
 
-                        CDEBUG(D_NET,"About to handshake someone\n");
+                       CDEBUG(D_NET,"About to handshake someone\n");
 
 
-                        kranal_conn_handshake(ras->ras_sock, NULL);
-                        kranal_free_acceptsock(ras);
+                       kranal_conn_handshake(ras->ras_sock, NULL);
+                       kranal_free_acceptsock(ras);
 
 
-                        CDEBUG(D_NET,"Finished handshaking someone\n");
+                       CDEBUG(D_NET,"Finished handshaking someone\n");
 
                        spin_lock_irqsave(&kranal_data.kra_connd_lock,
 
                        spin_lock_irqsave(&kranal_data.kra_connd_lock,
-                                              flags);
-                        did_something = 1;
-                }
+                                             flags);
+                       did_something = 1;
+               }
 
 
-                if (!cfs_list_empty(&kranal_data.kra_connd_peers)) {
-                        peer = cfs_list_entry(kranal_data.kra_connd_peers.next,
-                                              kra_peer_t, rap_connd_list);
+               if (!cfs_list_empty(&kranal_data.kra_connd_peers)) {
+                       peer = cfs_list_entry(kranal_data.kra_connd_peers.next,
+                                             kra_peer_t, rap_connd_list);
 
 
-                        cfs_list_del_init(&peer->rap_connd_list);
+                       cfs_list_del_init(&peer->rap_connd_list);
                        spin_unlock_irqrestore(&kranal_data.kra_connd_lock,
                        spin_unlock_irqrestore(&kranal_data.kra_connd_lock,
-                                                   flags);
+                                                  flags);
 
 
-                        kranal_connect(peer);
-                        kranal_peer_decref(peer);
+                       kranal_connect(peer);
+                       kranal_peer_decref(peer);
 
                        spin_lock_irqsave(&kranal_data.kra_connd_lock,
 
                        spin_lock_irqsave(&kranal_data.kra_connd_lock,
-                                              flags);
-                        did_something = 1;
-                }
+                                             flags);
+                       did_something = 1;
+               }
 
 
-                if (did_something)
-                        continue;
+               if (did_something)
+                       continue;
 
 
-                cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
-                cfs_waitq_add_exclusive(&kranal_data.kra_connd_waitq, &wait);
+               set_current_state(TASK_INTERRUPTIBLE);
+               add_wait_queue_exclusive(&kranal_data.kra_connd_waitq, &wait);
 
                spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
 
 
                spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
 
-                cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE);
+               waitq_wait(&wait, TASK_INTERRUPTIBLE);
 
 
-                cfs_set_current_state(CFS_TASK_RUNNING);
-                cfs_waitq_del(&kranal_data.kra_connd_waitq, &wait);
+               set_current_state(TASK_RUNNING);
+               remove_wait_queue(&kranal_data.kra_connd_waitq, &wait);
 
                spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
 
                spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
-        }
+       }
 
        spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
 
 
        spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
 
-        kranal_thread_fini();
-        return 0;
+       kranal_thread_fini();
+       return 0;
 }
 
 void
 }
 
 void
@@ -1143,120 +1143,120 @@ kranal_update_reaper_timeout(long timeout)
 int
 kranal_reaper (void *arg)
 {
 int
 kranal_reaper (void *arg)
 {
-        cfs_waitlink_t     wait;
-        unsigned long      flags;
-        long               timeout;
-        int                i;
-        int                conn_entries = kranal_data.kra_conn_hash_size;
-        int                conn_index = 0;
-        int                base_index = conn_entries - 1;
-        unsigned long      next_check_time = jiffies;
-        long               next_min_timeout = CFS_MAX_SCHEDULE_TIMEOUT;
-        long               current_min_timeout = 1;
+       wait_queue_t     wait;
+       unsigned long      flags;
+       long               timeout;
+       int                i;
+       int                conn_entries = kranal_data.kra_conn_hash_size;
+       int                conn_index = 0;
+       int                base_index = conn_entries - 1;
+       unsigned long      next_check_time = jiffies;
+       long               next_min_timeout = MAX_SCHEDULE_TIMEOUT;
+       long               current_min_timeout = 1;
 
 
-        cfs_block_allsigs();
+       cfs_block_allsigs();
 
 
-        cfs_waitlink_init(&wait);
+       init_waitqueue_entry_current(&wait);
 
        spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
 
 
        spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
 
-        while (!kranal_data.kra_shutdown) {
-                /* I wake up every 'p' seconds to check for timeouts on some
-                 * more peers.  I try to check every connection 'n' times
-                 * within the global minimum of all keepalive and timeout
-                 * intervals, to ensure I attend to every connection within
-                 * (n+1)/n times its timeout intervals. */
-                const int     p = 1;
-                const int     n = 3;
-                unsigned long min_timeout;
-                int           chunk;
-
-                /* careful with the jiffy wrap... */
-                timeout = (long)(next_check_time - jiffies);
-                if (timeout > 0) {
-                        cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
-                        cfs_waitq_add(&kranal_data.kra_reaper_waitq, &wait);
+       while (!kranal_data.kra_shutdown) {
+               /* I wake up every 'p' seconds to check for timeouts on some
+                * more peers.  I try to check every connection 'n' times
+                * within the global minimum of all keepalive and timeout
+                * intervals, to ensure I attend to every connection within
+                * (n+1)/n times its timeout intervals. */
+               const int     p = 1;
+               const int     n = 3;
+               unsigned long min_timeout;
+               int           chunk;
+
+               /* careful with the jiffy wrap... */
+               timeout = (long)(next_check_time - jiffies);
+               if (timeout > 0) {
+                       set_current_state(TASK_INTERRUPTIBLE);
+                       add_wait_queue(&kranal_data.kra_reaper_waitq, &wait);
 
                        spin_unlock_irqrestore(&kranal_data.kra_reaper_lock,
 
                        spin_unlock_irqrestore(&kranal_data.kra_reaper_lock,
-                                                   flags);
+                                                  flags);
 
 
-                        cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE,
-                                            timeout);
+                       waitq_timedwait(&wait, TASK_INTERRUPTIBLE,
+                                           timeout);
 
                        spin_lock_irqsave(&kranal_data.kra_reaper_lock,
 
                        spin_lock_irqsave(&kranal_data.kra_reaper_lock,
-                                              flags);
+                                             flags);
 
 
-                        cfs_set_current_state(CFS_TASK_RUNNING);
-                        cfs_waitq_del(&kranal_data.kra_reaper_waitq, &wait);
-                        continue;
-                }
+                       set_current_state(TASK_RUNNING);
+                       remove_wait_queue(&kranal_data.kra_reaper_waitq, &wait);
+                       continue;
+               }
 
 
-                if (kranal_data.kra_new_min_timeout !=
-                    CFS_MAX_SCHEDULE_TIMEOUT) {
-                        /* new min timeout set: restart min timeout scan */
-                        next_min_timeout = CFS_MAX_SCHEDULE_TIMEOUT;
-                        base_index = conn_index - 1;
-                        if (base_index < 0)
-                                base_index = conn_entries - 1;
-
-                        if (kranal_data.kra_new_min_timeout <
-                            current_min_timeout) {
-                                current_min_timeout =
-                                        kranal_data.kra_new_min_timeout;
-                                CDEBUG(D_NET, "Set new min timeout %ld\n",
-                                       current_min_timeout);
-                        }
+               if (kranal_data.kra_new_min_timeout !=
+                   MAX_SCHEDULE_TIMEOUT) {
+                       /* new min timeout set: restart min timeout scan */
+                       next_min_timeout = MAX_SCHEDULE_TIMEOUT;
+                       base_index = conn_index - 1;
+                       if (base_index < 0)
+                               base_index = conn_entries - 1;
+
+                       if (kranal_data.kra_new_min_timeout <
+                           current_min_timeout) {
+                               current_min_timeout =
+                                       kranal_data.kra_new_min_timeout;
+                               CDEBUG(D_NET, "Set new min timeout %ld\n",
+                                      current_min_timeout);
+                       }
 
 
-                        kranal_data.kra_new_min_timeout =
-                                CFS_MAX_SCHEDULE_TIMEOUT;
-                }
-                min_timeout = current_min_timeout;
+                       kranal_data.kra_new_min_timeout =
+                               MAX_SCHEDULE_TIMEOUT;
+               }
+               min_timeout = current_min_timeout;
 
                spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
 
 
                spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
 
-                LASSERT (min_timeout > 0);
-
-                /* Compute how many table entries to check now so I get round
-                 * the whole table fast enough given that I do this at fixed
-                 * intervals of 'p' seconds) */
-                chunk = conn_entries;
-                if (min_timeout > n * p)
-                        chunk = (chunk * n * p) / min_timeout;
-                if (chunk == 0)
-                        chunk = 1;
-
-                for (i = 0; i < chunk; i++) {
-                        kranal_reaper_check(conn_index,
-                                            &next_min_timeout);
-                        conn_index = (conn_index + 1) % conn_entries;
-                }
+               LASSERT (min_timeout > 0);
+
+               /* Compute how many table entries to check now so I get round
+                * the whole table fast enough given that I do this at fixed
+                * intervals of 'p' seconds) */
+               chunk = conn_entries;
+               if (min_timeout > n * p)
+                       chunk = (chunk * n * p) / min_timeout;
+               if (chunk == 0)
+                       chunk = 1;
+
+               for (i = 0; i < chunk; i++) {
+                       kranal_reaper_check(conn_index,
+                                           &next_min_timeout);
+                       conn_index = (conn_index + 1) % conn_entries;
+               }
 
                next_check_time += p * HZ;
 
                spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
 
 
                next_check_time += p * HZ;
 
                spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
 
-                if (((conn_index - chunk <= base_index &&
-                      base_index < conn_index) ||
-                     (conn_index - conn_entries - chunk <= base_index &&
-                      base_index < conn_index - conn_entries))) {
+               if (((conn_index - chunk <= base_index &&
+                     base_index < conn_index) ||
+                    (conn_index - conn_entries - chunk <= base_index &&
+                     base_index < conn_index - conn_entries))) {
 
 
-                        /* Scanned all conns: set current_min_timeout... */
-                        if (current_min_timeout != next_min_timeout) {
-                                current_min_timeout = next_min_timeout;
-                                CDEBUG(D_NET, "Set new min timeout %ld\n",
-                                       current_min_timeout);
-                        }
+                       /* Scanned all conns: set current_min_timeout... */
+                       if (current_min_timeout != next_min_timeout) {
+                               current_min_timeout = next_min_timeout;
+                               CDEBUG(D_NET, "Set new min timeout %ld\n",
+                                      current_min_timeout);
+                       }
 
 
-                        /* ...and restart min timeout scan */
-                        next_min_timeout = CFS_MAX_SCHEDULE_TIMEOUT;
-                        base_index = conn_index - 1;
-                        if (base_index < 0)
-                                base_index = conn_entries - 1;
-                }
-        }
+                       /* ...and restart min timeout scan */
+                       next_min_timeout = MAX_SCHEDULE_TIMEOUT;
+                       base_index = conn_index - 1;
+                       if (base_index < 0)
+                               base_index = conn_entries - 1;
+               }
+       }
 
 
-        kranal_thread_fini();
-        return 0;
+       kranal_thread_fini();
+       return 0;
 }
 
 void
 }
 
 void
@@ -1923,9 +1923,9 @@ int kranal_process_new_conn (kra_conn_t *conn)
 int
 kranal_scheduler (void *arg)
 {
 int
 kranal_scheduler (void *arg)
 {
-        kra_device_t     *dev = (kra_device_t *)arg;
-        cfs_waitlink_t    wait;
-        kra_conn_t       *conn;
+       kra_device_t     *dev = (kra_device_t *)arg;
+       wait_queue_t    wait;
+       kra_conn_t       *conn;
         unsigned long     flags;
         unsigned long     deadline;
         unsigned long     soonest;
         unsigned long     flags;
         unsigned long     deadline;
         unsigned long     soonest;
@@ -1939,8 +1939,8 @@ kranal_scheduler (void *arg)
 
         cfs_block_allsigs();
 
 
         cfs_block_allsigs();
 
-        dev->rad_scheduler = current;
-        cfs_waitlink_init(&wait);
+       dev->rad_scheduler = current;
+       init_waitqueue_entry_current(&wait);
 
        spin_lock_irqsave(&dev->rad_lock, flags);
 
 
        spin_lock_irqsave(&dev->rad_lock, flags);
 
@@ -1950,8 +1950,8 @@ kranal_scheduler (void *arg)
                 if (busy_loops++ >= RANAL_RESCHED) {
                        spin_unlock_irqrestore(&dev->rad_lock, flags);
 
                 if (busy_loops++ >= RANAL_RESCHED) {
                        spin_unlock_irqrestore(&dev->rad_lock, flags);
 
-                        cfs_cond_resched();
-                        busy_loops = 0;
+                       cond_resched();
+                       busy_loops = 0;
 
                        spin_lock_irqsave(&dev->rad_lock, flags);
                 }
 
                        spin_lock_irqsave(&dev->rad_lock, flags);
                 }
@@ -2039,27 +2039,27 @@ kranal_scheduler (void *arg)
                 if (dropped_lock)               /* may sleep iff I didn't drop the lock */
                         continue;
 
                 if (dropped_lock)               /* may sleep iff I didn't drop the lock */
                         continue;
 
-                cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
-                cfs_waitq_add_exclusive(&dev->rad_waitq, &wait);
+               set_current_state(TASK_INTERRUPTIBLE);
+               add_wait_queue_exclusive(&dev->rad_waitq, &wait);
                spin_unlock_irqrestore(&dev->rad_lock, flags);
 
                spin_unlock_irqrestore(&dev->rad_lock, flags);
 
-                if (nsoonest == 0) {
-                        busy_loops = 0;
-                        cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE);
-                } else {
-                        timeout = (long)(soonest - jiffies);
-                        if (timeout > 0) {
-                                busy_loops = 0;
-                                cfs_waitq_timedwait(&wait,
-                                                    CFS_TASK_INTERRUPTIBLE,
-                                                    timeout);
-                        }
-                }
+               if (nsoonest == 0) {
+                       busy_loops = 0;
+                       waitq_wait(&wait, TASK_INTERRUPTIBLE);
+               } else {
+                       timeout = (long)(soonest - jiffies);
+                       if (timeout > 0) {
+                               busy_loops = 0;
+                               waitq_timedwait(&wait,
+                                                   TASK_INTERRUPTIBLE,
+                                                   timeout);
+                       }
+               }
 
 
-                cfs_waitq_del(&dev->rad_waitq, &wait);
-                cfs_set_current_state(CFS_TASK_RUNNING);
+               remove_wait_queue(&dev->rad_waitq, &wait);
+               set_current_state(TASK_RUNNING);
                spin_lock_irqsave(&dev->rad_lock, flags);
                spin_lock_irqsave(&dev->rad_lock, flags);
-        }
+       }
 
        spin_unlock_irqrestore(&dev->rad_lock, flags);
 
 
        spin_unlock_irqrestore(&dev->rad_lock, flags);
 
index 37fbe28..8783fe7 100644 (file)
@@ -993,8 +993,8 @@ ksocknal_accept (lnet_ni_t *ni, cfs_socket_t *sock)
 
        spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
 
 
        spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
 
-        cfs_list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
-        cfs_waitq_signal(&ksocknal_data.ksnd_connd_waitq);
+       cfs_list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
+       wake_up(&ksocknal_data.ksnd_connd_waitq);
 
        spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
         return 0;
 
        spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
         return 0;
@@ -1484,7 +1484,7 @@ ksocknal_close_conn_locked (ksock_conn_t *conn, int error)
 
        cfs_list_add_tail(&conn->ksnc_list,
                          &ksocknal_data.ksnd_deathrow_conns);
 
        cfs_list_add_tail(&conn->ksnc_list,
                          &ksocknal_data.ksnd_deathrow_conns);
-       cfs_waitq_signal(&ksocknal_data.ksnd_reaper_waitq);
+       wake_up(&ksocknal_data.ksnd_reaper_waitq);
 
        spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
 }
 
        spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
 }
@@ -1578,10 +1578,10 @@ ksocknal_terminate_conn (ksock_conn_t *conn)
                                &sched->kss_tx_conns);
                 conn->ksnc_tx_scheduled = 1;
                 /* extra ref for scheduler */
                                &sched->kss_tx_conns);
                 conn->ksnc_tx_scheduled = 1;
                 /* extra ref for scheduler */
-                ksocknal_conn_addref(conn);
+               ksocknal_conn_addref(conn);
 
 
-                cfs_waitq_signal (&sched->kss_waitq);
-        }
+               wake_up (&sched->kss_waitq);
+       }
 
        spin_unlock_bh(&sched->kss_lock);
 
 
        spin_unlock_bh(&sched->kss_lock);
 
@@ -1623,7 +1623,7 @@ ksocknal_queue_zombie_conn (ksock_conn_t *conn)
        spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
 
        cfs_list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
        spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
 
        cfs_list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
-       cfs_waitq_signal(&ksocknal_data.ksnd_reaper_waitq);
+       wake_up(&ksocknal_data.ksnd_reaper_waitq);
 
        spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
 }
 
        spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
 }
@@ -2311,8 +2311,8 @@ ksocknal_base_shutdown(void)
 
                /* flag threads to terminate; wake and wait for them to die */
                ksocknal_data.ksnd_shuttingdown = 1;
 
                /* flag threads to terminate; wake and wait for them to die */
                ksocknal_data.ksnd_shuttingdown = 1;
-               cfs_waitq_broadcast(&ksocknal_data.ksnd_connd_waitq);
-               cfs_waitq_broadcast(&ksocknal_data.ksnd_reaper_waitq);
+               wake_up_all(&ksocknal_data.ksnd_connd_waitq);
+               wake_up_all(&ksocknal_data.ksnd_reaper_waitq);
 
                if (ksocknal_data.ksnd_sched_info != NULL) {
                        cfs_percpt_for_each(info, i,
 
                if (ksocknal_data.ksnd_sched_info != NULL) {
                        cfs_percpt_for_each(info, i,
@@ -2322,7 +2322,7 @@ ksocknal_base_shutdown(void)
 
                                for (j = 0; j < info->ksi_nthreads_max; j++) {
                                        sched = &info->ksi_scheds[j];
 
                                for (j = 0; j < info->ksi_nthreads_max; j++) {
                                        sched = &info->ksi_scheds[j];
-                                       cfs_waitq_broadcast(&sched->kss_waitq);
+                                       wake_up_all(&sched->kss_waitq);
                                }
                        }
                }
                                }
                        }
                }
@@ -2392,15 +2392,15 @@ ksocknal_base_startup(void)
        CFS_INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
 
        spin_lock_init(&ksocknal_data.ksnd_reaper_lock);
        CFS_INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
 
        spin_lock_init(&ksocknal_data.ksnd_reaper_lock);
-        CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_enomem_conns);
-        CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_zombie_conns);
-        CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_deathrow_conns);
-        cfs_waitq_init(&ksocknal_data.ksnd_reaper_waitq);
+       CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_enomem_conns);
+       CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_zombie_conns);
+       CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_deathrow_conns);
+       init_waitqueue_head(&ksocknal_data.ksnd_reaper_waitq);
 
        spin_lock_init(&ksocknal_data.ksnd_connd_lock);
 
        spin_lock_init(&ksocknal_data.ksnd_connd_lock);
-        CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_connreqs);
-        CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_routes);
-        cfs_waitq_init(&ksocknal_data.ksnd_connd_waitq);
+       CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_connreqs);
+       CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_routes);
+       init_waitqueue_head(&ksocknal_data.ksnd_connd_waitq);
 
        spin_lock_init(&ksocknal_data.ksnd_tx_lock);
         CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_idle_noop_txs);
 
        spin_lock_init(&ksocknal_data.ksnd_tx_lock);
         CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_idle_noop_txs);
@@ -2445,7 +2445,7 @@ ksocknal_base_startup(void)
                        CFS_INIT_LIST_HEAD(&sched->kss_rx_conns);
                        CFS_INIT_LIST_HEAD(&sched->kss_tx_conns);
                        CFS_INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
                        CFS_INIT_LIST_HEAD(&sched->kss_rx_conns);
                        CFS_INIT_LIST_HEAD(&sched->kss_tx_conns);
                        CFS_INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
-                       cfs_waitq_init(&sched->kss_waitq);
+                       init_waitqueue_head(&sched->kss_waitq);
                }
         }
 
                }
         }
 
index 951927b..9c0c5de 100644 (file)
@@ -72,7 +72,7 @@ typedef struct                                  /* per scheduler state */
        cfs_list_t              kss_tx_conns;
        /* zombie noop tx list */
        cfs_list_t              kss_zombie_noop_txs;
        cfs_list_t              kss_tx_conns;
        /* zombie noop tx list */
        cfs_list_t              kss_zombie_noop_txs;
-       cfs_waitq_t             kss_waitq;      /* where scheduler sleeps */
+       wait_queue_head_t       kss_waitq;      /* where scheduler sleeps */
        /* # connections assigned to this scheduler */
        int                     kss_nconns;
        struct ksock_sched_info *kss_info;      /* owner of it */
        /* # connections assigned to this scheduler */
        int                     kss_nconns;
        struct ksock_sched_info *kss_info;      /* owner of it */
@@ -183,31 +183,31 @@ typedef struct
        /* schedulers information */
        struct ksock_sched_info **ksnd_sched_info;
 
        /* schedulers information */
        struct ksock_sched_info **ksnd_sched_info;
 
-        cfs_atomic_t      ksnd_nactive_txs;    /* #active txs */
+       cfs_atomic_t      ksnd_nactive_txs;    /* #active txs */
 
 
-        cfs_list_t        ksnd_deathrow_conns; /* conns to close: reaper_lock*/
-        cfs_list_t        ksnd_zombie_conns;   /* conns to free: reaper_lock */
-        cfs_list_t        ksnd_enomem_conns;   /* conns to retry: reaper_lock*/
-        cfs_waitq_t       ksnd_reaper_waitq;   /* reaper sleeps here */
-        cfs_time_t        ksnd_reaper_waketime;/* when reaper will wake */
+       cfs_list_t        ksnd_deathrow_conns; /* conns to close: reaper_lock*/
+       cfs_list_t        ksnd_zombie_conns;   /* conns to free: reaper_lock */
+       cfs_list_t        ksnd_enomem_conns;   /* conns to retry: reaper_lock*/
+       wait_queue_head_t       ksnd_reaper_waitq;   /* reaper sleeps here */
+       cfs_time_t        ksnd_reaper_waketime;/* when reaper will wake */
        spinlock_t        ksnd_reaper_lock;     /* serialise */
 
        spinlock_t        ksnd_reaper_lock;     /* serialise */
 
-        int               ksnd_enomem_tx;      /* test ENOMEM sender */
-        int               ksnd_stall_tx;       /* test sluggish sender */
-        int               ksnd_stall_rx;       /* test sluggish receiver */
-
-        cfs_list_t        ksnd_connd_connreqs; /* incoming connection requests */
-        cfs_list_t        ksnd_connd_routes;   /* routes waiting to be connected */
-        cfs_waitq_t       ksnd_connd_waitq;    /* connds sleep here */
-        int               ksnd_connd_connecting;/* # connds connecting */
-        /** time stamp of the last failed connecting attempt */
-        long              ksnd_connd_failed_stamp;
-        /** # starting connd */
-        unsigned          ksnd_connd_starting;
-        /** time stamp of the last starting connd */
-        long              ksnd_connd_starting_stamp;
-        /** # running connd */
-        unsigned          ksnd_connd_running;
+       int               ksnd_enomem_tx;      /* test ENOMEM sender */
+       int               ksnd_stall_tx;       /* test sluggish sender */
+       int               ksnd_stall_rx;       /* test sluggish receiver */
+
+       cfs_list_t        ksnd_connd_connreqs; /* incoming connection requests */
+       cfs_list_t        ksnd_connd_routes;   /* routes waiting to be connected */
+       wait_queue_head_t       ksnd_connd_waitq;    /* connds sleep here */
+       int               ksnd_connd_connecting;/* # connds connecting */
+       /** time stamp of the last failed connecting attempt */
+       long              ksnd_connd_failed_stamp;
+       /** # starting connd */
+       unsigned          ksnd_connd_starting;
+       /** time stamp of the last starting connd */
+       long              ksnd_connd_starting_stamp;
+       /** # running connd */
+       unsigned          ksnd_connd_running;
        spinlock_t        ksnd_connd_lock;      /* serialise */
 
        cfs_list_t        ksnd_idle_noop_txs;   /* list head for freed noop tx */
        spinlock_t        ksnd_connd_lock;      /* serialise */
 
        cfs_list_t        ksnd_idle_noop_txs;   /* list head for freed noop tx */
index c97cf2b..bb6974f 100644 (file)
@@ -538,14 +538,14 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
                 LASSERT (conn->ksnc_tx_scheduled);
                 cfs_list_add_tail(&conn->ksnc_tx_list,
                                   &ksocknal_data.ksnd_enomem_conns);
                 LASSERT (conn->ksnc_tx_scheduled);
                 cfs_list_add_tail(&conn->ksnc_tx_list,
                                   &ksocknal_data.ksnd_enomem_conns);
-                if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(),
-                                                   SOCKNAL_ENOMEM_RETRY),
-                                   ksocknal_data.ksnd_reaper_waketime))
-                        cfs_waitq_signal (&ksocknal_data.ksnd_reaper_waitq);
+               if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(),
+                                       SOCKNAL_ENOMEM_RETRY),
+                                       ksocknal_data.ksnd_reaper_waketime))
+                       wake_up(&ksocknal_data.ksnd_reaper_waitq);
 
                spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
 
                spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
-                return (rc);
-        }
+               return (rc);
+       }
 
         /* Actual error */
         LASSERT (rc < 0);
 
         /* Actual error */
         LASSERT (rc < 0);
@@ -598,7 +598,7 @@ ksocknal_launch_connection_locked (ksock_route_t *route)
 
        cfs_list_add_tail(&route->ksnr_connd_list,
                          &ksocknal_data.ksnd_connd_routes);
 
        cfs_list_add_tail(&route->ksnr_connd_list,
                          &ksocknal_data.ksnd_connd_routes);
-       cfs_waitq_signal(&ksocknal_data.ksnd_connd_waitq);
+       wake_up(&ksocknal_data.ksnd_connd_waitq);
 
        spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
 }
 
        spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
 }
@@ -765,15 +765,15 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
                 cfs_list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
         }
 
                 cfs_list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
         }
 
-        if (conn->ksnc_tx_ready &&      /* able to send */
-            !conn->ksnc_tx_scheduled) { /* not scheduled to send */
-                /* +1 ref for scheduler */
-                ksocknal_conn_addref(conn);
-                cfs_list_add_tail (&conn->ksnc_tx_list,
-                                   &sched->kss_tx_conns);
-                conn->ksnc_tx_scheduled = 1;
-                cfs_waitq_signal (&sched->kss_waitq);
-        }
+       if (conn->ksnc_tx_ready &&      /* able to send */
+           !conn->ksnc_tx_scheduled) { /* not scheduled to send */
+               /* +1 ref for scheduler */
+               ksocknal_conn_addref(conn);
+               cfs_list_add_tail (&conn->ksnc_tx_list,
+                                  &sched->kss_tx_conns);
+               conn->ksnc_tx_scheduled = 1;
+               wake_up(&sched->kss_waitq);
+       }
 
        spin_unlock_bh(&sched->kss_lock);
 }
 
        spin_unlock_bh(&sched->kss_lock);
 }
@@ -1354,12 +1354,12 @@ ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
 
        spin_lock_bh(&sched->kss_lock);
 
 
        spin_lock_bh(&sched->kss_lock);
 
-        switch (conn->ksnc_rx_state) {
-        case SOCKNAL_RX_PARSE_WAIT:
-                cfs_list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
-                cfs_waitq_signal (&sched->kss_waitq);
-                LASSERT (conn->ksnc_rx_ready);
-                break;
+       switch (conn->ksnc_rx_state) {
+       case SOCKNAL_RX_PARSE_WAIT:
+               cfs_list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
+               wake_up(&sched->kss_waitq);
+               LASSERT(conn->ksnc_rx_ready);
+               break;
 
         case SOCKNAL_RX_PARSE:
                 /* scheduler hasn't noticed I'm parsing yet */
 
         case SOCKNAL_RX_PARSE:
                 /* scheduler hasn't noticed I'm parsing yet */
@@ -1542,9 +1542,9 @@ int ksocknal_scheduler(void *arg)
                                        sched->kss_waitq,
                                        !ksocknal_sched_cansleep(sched));
                                LASSERT (rc == 0);
                                        sched->kss_waitq,
                                        !ksocknal_sched_cansleep(sched));
                                LASSERT (rc == 0);
-                        } else {
-                                cfs_cond_resched();
-                        }
+                       } else {
+                               cond_resched();
+                       }
 
                        spin_lock_bh(&sched->kss_lock);
                }
 
                        spin_lock_bh(&sched->kss_lock);
                }
@@ -1568,17 +1568,17 @@ void ksocknal_read_callback (ksock_conn_t *conn)
 
        spin_lock_bh(&sched->kss_lock);
 
 
        spin_lock_bh(&sched->kss_lock);
 
-        conn->ksnc_rx_ready = 1;
+       conn->ksnc_rx_ready = 1;
 
 
-        if (!conn->ksnc_rx_scheduled) {  /* not being progressed */
-                cfs_list_add_tail(&conn->ksnc_rx_list,
-                                  &sched->kss_rx_conns);
-                conn->ksnc_rx_scheduled = 1;
-                /* extra ref for scheduler */
-                ksocknal_conn_addref(conn);
+       if (!conn->ksnc_rx_scheduled) {  /* not being progressed */
+               cfs_list_add_tail(&conn->ksnc_rx_list,
+                                 &sched->kss_rx_conns);
+               conn->ksnc_rx_scheduled = 1;
+               /* extra ref for scheduler */
+               ksocknal_conn_addref(conn);
 
 
-                cfs_waitq_signal (&sched->kss_waitq);
-        }
+               wake_up (&sched->kss_waitq);
+       }
        spin_unlock_bh(&sched->kss_lock);
 
        EXIT;
        spin_unlock_bh(&sched->kss_lock);
 
        EXIT;
@@ -1588,7 +1588,7 @@ void ksocknal_read_callback (ksock_conn_t *conn)
  * Add connection to kss_tx_conns of scheduler
  * and wakeup the scheduler.
  */
  * Add connection to kss_tx_conns of scheduler
  * and wakeup the scheduler.
  */
-void ksocknal_write_callback (ksock_conn_t *conn)
+void ksocknal_write_callback(ksock_conn_t *conn)
 {
        ksock_sched_t *sched;
        ENTRY;
 {
        ksock_sched_t *sched;
        ENTRY;
@@ -1597,18 +1597,17 @@ void ksocknal_write_callback (ksock_conn_t *conn)
 
        spin_lock_bh(&sched->kss_lock);
 
 
        spin_lock_bh(&sched->kss_lock);
 
-        conn->ksnc_tx_ready = 1;
+       conn->ksnc_tx_ready = 1;
 
 
-        if (!conn->ksnc_tx_scheduled && // not being progressed
-            !cfs_list_empty(&conn->ksnc_tx_queue)){//packets to send
-                cfs_list_add_tail (&conn->ksnc_tx_list,
-                                   &sched->kss_tx_conns);
-                conn->ksnc_tx_scheduled = 1;
-                /* extra ref for scheduler */
-                ksocknal_conn_addref(conn);
+       if (!conn->ksnc_tx_scheduled && // not being progressed
+           !cfs_list_empty(&conn->ksnc_tx_queue)){//packets to send
+               cfs_list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns);
+               conn->ksnc_tx_scheduled = 1;
+               /* extra ref for scheduler */
+               ksocknal_conn_addref(conn);
 
 
-                cfs_waitq_signal (&sched->kss_waitq);
-        }
+               wake_up(&sched->kss_waitq);
+       }
 
        spin_unlock_bh(&sched->kss_lock);
 
 
        spin_unlock_bh(&sched->kss_lock);
 
@@ -2117,57 +2116,57 @@ ksocknal_connd_check_stop(long sec, long *timeout)
 static ksock_route_t *
 ksocknal_connd_get_route_locked(signed long *timeout_p)
 {
 static ksock_route_t *
 ksocknal_connd_get_route_locked(signed long *timeout_p)
 {
-        ksock_route_t *route;
-        cfs_time_t     now;
+       ksock_route_t *route;
+       cfs_time_t     now;
 
 
-        now = cfs_time_current();
+       now = cfs_time_current();
 
 
-        /* connd_routes can contain both pending and ordinary routes */
-        cfs_list_for_each_entry (route, &ksocknal_data.ksnd_connd_routes,
-                                 ksnr_connd_list) {
+       /* connd_routes can contain both pending and ordinary routes */
+       cfs_list_for_each_entry (route, &ksocknal_data.ksnd_connd_routes,
+                                ksnr_connd_list) {
 
 
-                if (route->ksnr_retry_interval == 0 ||
-                    cfs_time_aftereq(now, route->ksnr_timeout))
-                        return route;
+               if (route->ksnr_retry_interval == 0 ||
+                   cfs_time_aftereq(now, route->ksnr_timeout))
+                       return route;
 
 
-                if (*timeout_p == CFS_MAX_SCHEDULE_TIMEOUT ||
-                    (int)*timeout_p > (int)(route->ksnr_timeout - now))
-                        *timeout_p = (int)(route->ksnr_timeout - now);
-        }
+               if (*timeout_p == MAX_SCHEDULE_TIMEOUT ||
+                   (int)*timeout_p > (int)(route->ksnr_timeout - now))
+                       *timeout_p = (int)(route->ksnr_timeout - now);
+       }
 
 
-        return NULL;
+       return NULL;
 }
 
 int
 ksocknal_connd (void *arg)
 {
        spinlock_t    *connd_lock = &ksocknal_data.ksnd_connd_lock;
 }
 
 int
 ksocknal_connd (void *arg)
 {
        spinlock_t    *connd_lock = &ksocknal_data.ksnd_connd_lock;
-        ksock_connreq_t   *cr;
-        cfs_waitlink_t     wait;
-        int                nloops = 0;
-        int                cons_retry = 0;
+       ksock_connreq_t   *cr;
+       wait_queue_t     wait;
+       int                nloops = 0;
+       int                cons_retry = 0;
 
 
-        cfs_block_allsigs ();
+       cfs_block_allsigs ();
 
 
-        cfs_waitlink_init (&wait);
+       init_waitqueue_entry_current(&wait);
 
        spin_lock_bh(connd_lock);
 
 
        spin_lock_bh(connd_lock);
 
-        LASSERT(ksocknal_data.ksnd_connd_starting > 0);
-        ksocknal_data.ksnd_connd_starting--;
-        ksocknal_data.ksnd_connd_running++;
+       LASSERT(ksocknal_data.ksnd_connd_starting > 0);
+       ksocknal_data.ksnd_connd_starting--;
+       ksocknal_data.ksnd_connd_running++;
 
 
-        while (!ksocknal_data.ksnd_shuttingdown) {
-                ksock_route_t *route = NULL;
-                long sec = cfs_time_current_sec();
-                long timeout = CFS_MAX_SCHEDULE_TIMEOUT;
-                int  dropped_lock = 0;
-
-                if (ksocknal_connd_check_stop(sec, &timeout)) {
-                        /* wakeup another one to check stop */
-                        cfs_waitq_signal(&ksocknal_data.ksnd_connd_waitq);
-                        break;
-                }
+       while (!ksocknal_data.ksnd_shuttingdown) {
+               ksock_route_t *route = NULL;
+               long sec = cfs_time_current_sec();
+               long timeout = MAX_SCHEDULE_TIMEOUT;
+               int  dropped_lock = 0;
+
+               if (ksocknal_connd_check_stop(sec, &timeout)) {
+                       /* wakeup another one to check stop */
+                       wake_up(&ksocknal_data.ksnd_connd_waitq);
+                       break;
+               }
 
                 if (ksocknal_connd_check_start(sec, &timeout)) {
                         /* created new thread */
 
                 if (ksocknal_connd_check_start(sec, &timeout)) {
                         /* created new thread */
@@ -2227,21 +2226,21 @@ ksocknal_connd (void *arg)
                                continue;
                        spin_unlock_bh(connd_lock);
                        nloops = 0;
                                continue;
                        spin_unlock_bh(connd_lock);
                        nloops = 0;
-                       cfs_cond_resched();
+                       cond_resched();
                        spin_lock_bh(connd_lock);
                        continue;
                }
 
                /* Nothing to do for 'timeout'  */
                        spin_lock_bh(connd_lock);
                        continue;
                }
 
                /* Nothing to do for 'timeout'  */
-               cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
-               cfs_waitq_add_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
+               set_current_state(TASK_INTERRUPTIBLE);
+               add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
                spin_unlock_bh(connd_lock);
 
                nloops = 0;
                spin_unlock_bh(connd_lock);
 
                nloops = 0;
-               cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE, timeout);
+               waitq_timedwait(&wait, TASK_INTERRUPTIBLE, timeout);
 
 
-               cfs_set_current_state(CFS_TASK_RUNNING);
-               cfs_waitq_del(&ksocknal_data.ksnd_connd_waitq, &wait);
+               set_current_state(TASK_RUNNING);
+               remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);
                spin_lock_bh(connd_lock);
        }
        ksocknal_data.ksnd_connd_running--;
                spin_lock_bh(connd_lock);
        }
        ksocknal_data.ksnd_connd_running--;
@@ -2526,13 +2525,12 @@ ksocknal_check_peer_timeouts (int idx)
        read_unlock(&ksocknal_data.ksnd_global_lock);
 }
 
        read_unlock(&ksocknal_data.ksnd_global_lock);
 }
 
-int
-ksocknal_reaper (void *arg)
+int ksocknal_reaper(void *arg)
 {
 {
-        cfs_waitlink_t     wait;
-        ksock_conn_t      *conn;
-        ksock_sched_t     *sched;
-        cfs_list_t         enomem_conns;
+       wait_queue_t     wait;
+       ksock_conn_t      *conn;
+       ksock_sched_t     *sched;
+       cfs_list_t         enomem_conns;
         int                nenomem_conns;
         cfs_duration_t     timeout;
         int                i;
         int                nenomem_conns;
         cfs_duration_t     timeout;
         int                i;
@@ -2541,8 +2539,8 @@ ksocknal_reaper (void *arg)
 
         cfs_block_allsigs ();
 
 
         cfs_block_allsigs ();
 
-        CFS_INIT_LIST_HEAD(&enomem_conns);
-        cfs_waitlink_init (&wait);
+       CFS_INIT_LIST_HEAD(&enomem_conns);
+       init_waitqueue_entry_current(&wait);
 
        spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
 
 
        spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
 
@@ -2599,7 +2597,7 @@ ksocknal_reaper (void *arg)
                        conn->ksnc_tx_ready = 1;
                        cfs_list_add_tail(&conn->ksnc_tx_list,
                                          &sched->kss_tx_conns);
                        conn->ksnc_tx_ready = 1;
                        cfs_list_add_tail(&conn->ksnc_tx_list,
                                          &sched->kss_tx_conns);
-                       cfs_waitq_signal(&sched->kss_waitq);
+                       wake_up(&sched->kss_waitq);
 
                        spin_unlock_bh(&sched->kss_lock);
                         nenomem_conns++;
 
                        spin_unlock_bh(&sched->kss_lock);
                         nenomem_conns++;
@@ -2643,17 +2641,16 @@ ksocknal_reaper (void *arg)
                 ksocknal_data.ksnd_reaper_waketime =
                         cfs_time_add(cfs_time_current(), timeout);
 
                 ksocknal_data.ksnd_reaper_waketime =
                         cfs_time_add(cfs_time_current(), timeout);
 
-                cfs_set_current_state (CFS_TASK_INTERRUPTIBLE);
-                cfs_waitq_add (&ksocknal_data.ksnd_reaper_waitq, &wait);
+                       set_current_state(TASK_INTERRUPTIBLE);
+               add_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
 
 
-                if (!ksocknal_data.ksnd_shuttingdown &&
-                    cfs_list_empty (&ksocknal_data.ksnd_deathrow_conns) &&
-                    cfs_list_empty (&ksocknal_data.ksnd_zombie_conns))
-                        cfs_waitq_timedwait (&wait, CFS_TASK_INTERRUPTIBLE,
-                                             timeout);
+               if (!ksocknal_data.ksnd_shuttingdown &&
+                   cfs_list_empty(&ksocknal_data.ksnd_deathrow_conns) &&
+                   cfs_list_empty(&ksocknal_data.ksnd_zombie_conns))
+                       waitq_timedwait(&wait, TASK_INTERRUPTIBLE, timeout);
 
 
-                cfs_set_current_state (CFS_TASK_RUNNING);
-                cfs_waitq_del (&ksocknal_data.ksnd_reaper_waitq, &wait);
+               set_current_state(TASK_RUNNING);
+               remove_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
 
                spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
        }
 
                spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
        }
index 50b4043..1ae1338 100644 (file)
@@ -100,7 +100,7 @@ void
 lnet_init_locks(void)
 {
        spin_lock_init(&the_lnet.ln_eq_wait_lock);
 lnet_init_locks(void)
 {
        spin_lock_init(&the_lnet.ln_eq_wait_lock);
-       cfs_waitq_init(&the_lnet.ln_eq_waitq);
+       init_waitqueue_head(&the_lnet.ln_eq_waitq);
        mutex_init(&the_lnet.ln_lnd_mutex);
        mutex_init(&the_lnet.ln_api_mutex);
 }
        mutex_init(&the_lnet.ln_lnd_mutex);
        mutex_init(&the_lnet.ln_api_mutex);
 }
index c3fce64..f9dcaa1 100644 (file)
@@ -234,8 +234,8 @@ lnet_eq_enqueue_event(lnet_eq_t *eq, lnet_event_t *ev)
 
 #ifdef __KERNEL__
        /* Wake anyone waiting in LNetEQPoll() */
 
 #ifdef __KERNEL__
        /* Wake anyone waiting in LNetEQPoll() */
-       if (cfs_waitq_active(&the_lnet.ln_eq_waitq))
-               cfs_waitq_broadcast(&the_lnet.ln_eq_waitq);
+       if (waitqueue_active(&the_lnet.ln_eq_waitq))
+               wake_up_all(&the_lnet.ln_eq_waitq);
 #else
 # ifndef HAVE_LIBPTHREAD
        /* LNetEQPoll() calls into _the_ LND to wait for action */
 #else
 # ifndef HAVE_LIBPTHREAD
        /* LNetEQPoll() calls into _the_ LND to wait for action */
@@ -339,26 +339,26 @@ lnet_eq_wait_locked(int *timeout_ms)
 {
        int             tms = *timeout_ms;
        int             wait;
 {
        int             tms = *timeout_ms;
        int             wait;
-       cfs_waitlink_t  wl;
+       wait_queue_t  wl;
        cfs_time_t      now;
 
        if (tms == 0)
                return -1; /* don't want to wait and no new event */
 
        cfs_time_t      now;
 
        if (tms == 0)
                return -1; /* don't want to wait and no new event */
 
-       cfs_waitlink_init(&wl);
-       cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
-       cfs_waitq_add(&the_lnet.ln_eq_waitq, &wl);
+       init_waitqueue_entry_current(&wl);
+       set_current_state(TASK_INTERRUPTIBLE);
+       add_wait_queue(&the_lnet.ln_eq_waitq, &wl);
 
        lnet_eq_wait_unlock();
 
        if (tms < 0) {
 
        lnet_eq_wait_unlock();
 
        if (tms < 0) {
-               cfs_waitq_wait(&wl, CFS_TASK_INTERRUPTIBLE);
+               waitq_wait(&wl, TASK_INTERRUPTIBLE);
 
        } else {
                struct timeval tv;
 
                now = cfs_time_current();
 
        } else {
                struct timeval tv;
 
                now = cfs_time_current();
-               cfs_waitq_timedwait(&wl, CFS_TASK_INTERRUPTIBLE,
+               waitq_timedwait(&wl, TASK_INTERRUPTIBLE,
                                    cfs_time_seconds(tms) / 1000);
                cfs_duration_usec(cfs_time_sub(cfs_time_current(), now), &tv);
                tms -= (int)(tv.tv_sec * 1000 + tv.tv_usec / 1000);
                                    cfs_time_seconds(tms) / 1000);
                cfs_duration_usec(cfs_time_sub(cfs_time_current(), now), &tv);
                tms -= (int)(tv.tv_sec * 1000 + tv.tv_usec / 1000);
@@ -370,7 +370,7 @@ lnet_eq_wait_locked(int *timeout_ms)
        *timeout_ms = tms;
 
        lnet_eq_wait_lock();
        *timeout_ms = tms;
 
        lnet_eq_wait_lock();
-       cfs_waitq_del(&the_lnet.ln_eq_waitq, &wl);
+       remove_wait_queue(&the_lnet.ln_eq_waitq, &wl);
 
        return wait;
 }
 
        return wait;
 }
index 9423278..551e4b3 100644 (file)
@@ -1276,12 +1276,12 @@ rescan:
 
                lnet_prune_rc_data(0); /* don't wait for UNLINK */
 
 
                lnet_prune_rc_data(0); /* don't wait for UNLINK */
 
-                /* Call cfs_pause() here always adds 1 to load average 
-                 * because kernel counts # active tasks as nr_running 
-                 * + nr_uninterruptible. */
-                cfs_schedule_timeout_and_set_state(CFS_TASK_INTERRUPTIBLE,
-                                                   cfs_time_seconds(1));
-        }
+               /* Call cfs_pause() here always adds 1 to load average
+                * because kernel counts # active tasks as nr_running
+                * + nr_uninterruptible. */
+               schedule_timeout_and_set_state(TASK_INTERRUPTIBLE,
+                                                  cfs_time_seconds(1));
+       }
 
        LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_STOPPING);
 
 
        LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_STOPPING);
 
index a7729ed..72d6add 100644 (file)
@@ -66,25 +66,25 @@ lstcon_rpc_done(srpc_client_rpc_t *rpc)
                 * I'm just a poor body and nobody loves me */
                spin_unlock(&rpc->crpc_lock);
 
                 * I'm just a poor body and nobody loves me */
                spin_unlock(&rpc->crpc_lock);
 
-                /* release it */
-                lstcon_rpc_put(crpc);
-                return;
-        }
+               /* release it */
+               lstcon_rpc_put(crpc);
+               return;
+       }
 
 
-        /* not an orphan RPC */
-        crpc->crp_finished = 1;
+       /* not an orphan RPC */
+       crpc->crp_finished = 1;
 
 
-        if (crpc->crp_stamp == 0) {
-                /* not aborted */
-                LASSERT (crpc->crp_status == 0);
+       if (crpc->crp_stamp == 0) {
+               /* not aborted */
+               LASSERT (crpc->crp_status == 0);
 
 
-                crpc->crp_stamp  = cfs_time_current();
-                crpc->crp_status = rpc->crpc_status;
-        }
+               crpc->crp_stamp  = cfs_time_current();
+               crpc->crp_status = rpc->crpc_status;
+       }
 
 
-        /* wakeup (transaction)thread if I'm the last RPC in the transaction */
-        if (cfs_atomic_dec_and_test(&crpc->crp_trans->tas_remaining))
-                cfs_waitq_signal(&crpc->crp_trans->tas_waitq);
+       /* wakeup (transaction)thread if I'm the last RPC in the transaction */
+       if (cfs_atomic_dec_and_test(&crpc->crp_trans->tas_remaining))
+               wake_up(&crpc->crp_trans->tas_waitq);
 
        spin_unlock(&rpc->crpc_lock);
 }
 
        spin_unlock(&rpc->crpc_lock);
 }
@@ -265,9 +265,9 @@ lstcon_rpc_trans_prep(cfs_list_t *translist,
 
         cfs_list_add_tail(&trans->tas_link, &console_session.ses_trans_list);
 
 
         cfs_list_add_tail(&trans->tas_link, &console_session.ses_trans_list);
 
-        CFS_INIT_LIST_HEAD(&trans->tas_rpcs_list);
-        cfs_atomic_set(&trans->tas_remaining, 0);
-        cfs_waitq_init(&trans->tas_waitq);
+       CFS_INIT_LIST_HEAD(&trans->tas_rpcs_list);
+       cfs_atomic_set(&trans->tas_remaining, 0);
+       init_waitqueue_head(&trans->tas_waitq);
 
        spin_lock(&console_session.ses_rpc_lock);
        trans->tas_features = console_session.ses_features;
 
        spin_lock(&console_session.ses_rpc_lock);
        trans->tas_features = console_session.ses_features;
@@ -361,9 +361,9 @@ lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout)
 
        mutex_unlock(&console_session.ses_mutex);
 
 
        mutex_unlock(&console_session.ses_mutex);
 
-        cfs_waitq_wait_event_interruptible_timeout(trans->tas_waitq,
-                                              lstcon_rpc_trans_check(trans),
-                                              cfs_time_seconds(timeout), rc);
+       rc = wait_event_interruptible_timeout(trans->tas_waitq,
+                                             lstcon_rpc_trans_check(trans),
+                                             cfs_time_seconds(timeout));
 
         rc = (rc > 0)? 0: ((rc < 0)? -EINTR: -ETIMEDOUT);
 
 
         rc = (rc > 0)? 0: ((rc < 0)? -EINTR: -ETIMEDOUT);
 
@@ -1354,11 +1354,11 @@ lstcon_rpc_cleanup_wait(void)
                         trans = cfs_list_entry(pacer, lstcon_rpc_trans_t,
                                                tas_link);
 
                         trans = cfs_list_entry(pacer, lstcon_rpc_trans_t,
                                                tas_link);
 
-                        CDEBUG(D_NET, "Session closed, wakeup transaction %s\n",
-                               lstcon_rpc_trans_name(trans->tas_opc));
+                       CDEBUG(D_NET, "Session closed, wakeup transaction %s\n",
+                              lstcon_rpc_trans_name(trans->tas_opc));
 
 
-                        cfs_waitq_signal(&trans->tas_waitq);
-                }
+                       wake_up(&trans->tas_waitq);
+               }
 
                mutex_unlock(&console_session.ses_mutex);
 
 
                mutex_unlock(&console_session.ses_mutex);
 
index 51b9f6c..59aead3 100644 (file)
@@ -80,16 +80,16 @@ typedef struct lstcon_rpc {
 } lstcon_rpc_t;
 
 typedef struct lstcon_rpc_trans {
 } lstcon_rpc_t;
 
 typedef struct lstcon_rpc_trans {
-        cfs_list_t            tas_olink;     /* link chain on owner list */
-        cfs_list_t            tas_link;      /* link chain on global list */
-        int                   tas_opc;       /* operation code of transaction */
+       cfs_list_t            tas_olink;     /* link chain on owner list */
+       cfs_list_t            tas_link;      /* link chain on global list */
+       int                   tas_opc;       /* operation code of transaction */
        /* features mask is uptodate */
        unsigned              tas_feats_updated;
        /* test features mask */
        unsigned              tas_features;
        /* features mask is uptodate */
        unsigned              tas_feats_updated;
        /* test features mask */
        unsigned              tas_features;
-        cfs_waitq_t           tas_waitq;     /* wait queue head */
-        cfs_atomic_t          tas_remaining; /* # of un-scheduled rpcs */
-        cfs_list_t            tas_rpcs_list; /* queued requests */
+       wait_queue_head_t     tas_waitq;     /* wait queue head */
+       cfs_atomic_t          tas_remaining; /* # of un-scheduled rpcs */
+       cfs_list_t            tas_rpcs_list; /* queued requests */
 } lstcon_rpc_trans_t;
 
 #define LST_TRANS_PRIVATE       0x1000
 } lstcon_rpc_trans_t;
 
 #define LST_TRANS_PRIVATE       0x1000
index 60fb45a..bd917e8 100644 (file)
@@ -1167,7 +1167,7 @@ srpc_del_client_rpc_timer (srpc_client_rpc_t *rpc)
        while (rpc->crpc_timeout != 0) {
                spin_unlock(&rpc->crpc_lock);
 
        while (rpc->crpc_timeout != 0) {
                spin_unlock(&rpc->crpc_lock);
 
-               cfs_schedule();
+               schedule();
 
                spin_lock(&rpc->crpc_lock);
        }
 
                spin_lock(&rpc->crpc_lock);
        }
index e98dbbf..aaab0b0 100644 (file)
                                                     (STTIMER_NSLOTS - 1))])
 
 struct st_timer_data {
                                                     (STTIMER_NSLOTS - 1))])
 
 struct st_timer_data {
-       spinlock_t       stt_lock;
-        /* start time of the slot processed previously */
-        cfs_time_t       stt_prev_slot;
-        cfs_list_t       stt_hash[STTIMER_NSLOTS];
-        int              stt_shuttingdown;
+       spinlock_t              stt_lock;
+       /* start time of the slot processed previously */
+       cfs_time_t              stt_prev_slot;
+       cfs_list_t              stt_hash[STTIMER_NSLOTS];
+       int                     stt_shuttingdown;
 #ifdef __KERNEL__
 #ifdef __KERNEL__
-        cfs_waitq_t      stt_waitq;
-        int              stt_nthreads;
+       wait_queue_head_t       stt_waitq;
+       int                     stt_nthreads;
 #endif
 } stt_data;
 
 #endif
 } stt_data;
 
@@ -182,15 +182,13 @@ stt_timer_main (void *arg)
 
         cfs_block_allsigs();
 
 
         cfs_block_allsigs();
 
-        while (!stt_data.stt_shuttingdown) {
-                stt_check_timers(&stt_data.stt_prev_slot);
+       while (!stt_data.stt_shuttingdown) {
+               stt_check_timers(&stt_data.stt_prev_slot);
 
 
-                cfs_waitq_wait_event_timeout(stt_data.stt_waitq,
-                                   stt_data.stt_shuttingdown,
-                                   cfs_time_seconds(STTIMER_SLOTTIME),
-                                   rc);
-                rc = 0; /* Discard jiffies remaining before timeout. */
-        }
+               rc = wait_event_timeout(stt_data.stt_waitq,
+                                       stt_data.stt_shuttingdown,
+                                       cfs_time_seconds(STTIMER_SLOTTIME));
+       }
 
        spin_lock(&stt_data.stt_lock);
        stt_data.stt_nthreads--;
 
        spin_lock(&stt_data.stt_lock);
        stt_data.stt_nthreads--;
@@ -245,11 +243,11 @@ stt_startup (void)
                 CFS_INIT_LIST_HEAD(&stt_data.stt_hash[i]);
 
 #ifdef __KERNEL__
                 CFS_INIT_LIST_HEAD(&stt_data.stt_hash[i]);
 
 #ifdef __KERNEL__
-        stt_data.stt_nthreads = 0;
-        cfs_waitq_init(&stt_data.stt_waitq);
-        rc = stt_start_timer_thread();
-        if (rc != 0)
-                CERROR ("Can't spawn timer thread: %d\n", rc);
+       stt_data.stt_nthreads = 0;
+       init_waitqueue_head(&stt_data.stt_waitq);
+       rc = stt_start_timer_thread();
+       if (rc != 0)
+               CERROR ("Can't spawn timer thread: %d\n", rc);
 #endif
 
         return rc;
 #endif
 
         return rc;
@@ -268,10 +266,10 @@ stt_shutdown (void)
         stt_data.stt_shuttingdown = 1;
 
 #ifdef __KERNEL__
         stt_data.stt_shuttingdown = 1;
 
 #ifdef __KERNEL__
-        cfs_waitq_signal(&stt_data.stt_waitq);
-        lst_wait_until(stt_data.stt_nthreads == 0, stt_data.stt_lock,
-                       "waiting for %d threads to terminate\n",
-                       stt_data.stt_nthreads);
+       wake_up(&stt_data.stt_waitq);
+       lst_wait_until(stt_data.stt_nthreads == 0, stt_data.stt_lock,
+                      "waiting for %d threads to terminate\n",
+                      stt_data.stt_nthreads);
 #endif
 
        spin_unlock(&stt_data.stt_lock);
 #endif
 
        spin_unlock(&stt_data.stt_lock);
index d0da85e..4487fee 100644 (file)
@@ -238,45 +238,45 @@ static int seq_client_alloc_seq(const struct lu_env *env,
 }
 
 static int seq_fid_alloc_prep(struct lu_client_seq *seq,
 }
 
 static int seq_fid_alloc_prep(struct lu_client_seq *seq,
-                              cfs_waitlink_t *link)
+                             wait_queue_t *link)
 {
 {
-        if (seq->lcs_update) {
-                cfs_waitq_add(&seq->lcs_waitq, link);
-                cfs_set_current_state(CFS_TASK_UNINT);
+       if (seq->lcs_update) {
+               add_wait_queue(&seq->lcs_waitq, link);
+               set_current_state(TASK_UNINTERRUPTIBLE);
                mutex_unlock(&seq->lcs_mutex);
 
                mutex_unlock(&seq->lcs_mutex);
 
-                cfs_waitq_wait(link, CFS_TASK_UNINT);
+               waitq_wait(link, TASK_UNINTERRUPTIBLE);
 
                mutex_lock(&seq->lcs_mutex);
 
                mutex_lock(&seq->lcs_mutex);
-                cfs_waitq_del(&seq->lcs_waitq, link);
-                cfs_set_current_state(CFS_TASK_RUNNING);
-                return -EAGAIN;
-        }
-        ++seq->lcs_update;
+               remove_wait_queue(&seq->lcs_waitq, link);
+               set_current_state(TASK_RUNNING);
+               return -EAGAIN;
+       }
+       ++seq->lcs_update;
        mutex_unlock(&seq->lcs_mutex);
        mutex_unlock(&seq->lcs_mutex);
-        return 0;
+       return 0;
 }
 
 static void seq_fid_alloc_fini(struct lu_client_seq *seq)
 {
 }
 
 static void seq_fid_alloc_fini(struct lu_client_seq *seq)
 {
-        LASSERT(seq->lcs_update == 1);
+       LASSERT(seq->lcs_update == 1);
        mutex_lock(&seq->lcs_mutex);
        mutex_lock(&seq->lcs_mutex);
-        --seq->lcs_update;
-        cfs_waitq_signal(&seq->lcs_waitq);
+       --seq->lcs_update;
+       wake_up(&seq->lcs_waitq);
 }
 
 /**
  * Allocate the whole seq to the caller.
  **/
 int seq_client_get_seq(const struct lu_env *env,
 }
 
 /**
  * Allocate the whole seq to the caller.
  **/
 int seq_client_get_seq(const struct lu_env *env,
-                       struct lu_client_seq *seq, seqno_t *seqnr)
+                      struct lu_client_seq *seq, seqno_t *seqnr)
 {
 {
-        cfs_waitlink_t link;
-        int rc;
+       wait_queue_t link;
+       int rc;
 
 
-        LASSERT(seqnr != NULL);
+       LASSERT(seqnr != NULL);
        mutex_lock(&seq->lcs_mutex);
        mutex_lock(&seq->lcs_mutex);
-        cfs_waitlink_init(&link);
+       init_waitqueue_entry_current(&link);
 
         while (1) {
                 rc = seq_fid_alloc_prep(seq, &link);
 
         while (1) {
                 rc = seq_fid_alloc_prep(seq, &link);
@@ -318,16 +318,16 @@ EXPORT_SYMBOL(seq_client_get_seq);
 
 /* Allocate new fid on passed client @seq and save it to @fid. */
 int seq_client_alloc_fid(const struct lu_env *env,
 
 /* Allocate new fid on passed client @seq and save it to @fid. */
 int seq_client_alloc_fid(const struct lu_env *env,
-                         struct lu_client_seq *seq, struct lu_fid *fid)
+                        struct lu_client_seq *seq, struct lu_fid *fid)
 {
 {
-        cfs_waitlink_t link;
-        int rc;
-        ENTRY;
+       wait_queue_t link;
+       int rc;
+       ENTRY;
 
 
-        LASSERT(seq != NULL);
-        LASSERT(fid != NULL);
+       LASSERT(seq != NULL);
+       LASSERT(fid != NULL);
 
 
-        cfs_waitlink_init(&link);
+       init_waitqueue_entry_current(&link);
        mutex_lock(&seq->lcs_mutex);
 
        if (OBD_FAIL_CHECK(OBD_FAIL_SEQ_EXHAUST))
        mutex_lock(&seq->lcs_mutex);
 
        if (OBD_FAIL_CHECK(OBD_FAIL_SEQ_EXHAUST))
@@ -388,23 +388,23 @@ EXPORT_SYMBOL(seq_client_alloc_fid);
  */
 void seq_client_flush(struct lu_client_seq *seq)
 {
  */
 void seq_client_flush(struct lu_client_seq *seq)
 {
-        cfs_waitlink_t link;
+       wait_queue_t link;
 
 
-        LASSERT(seq != NULL);
-        cfs_waitlink_init(&link);
+       LASSERT(seq != NULL);
+       init_waitqueue_entry_current(&link);
        mutex_lock(&seq->lcs_mutex);
 
        mutex_lock(&seq->lcs_mutex);
 
-        while (seq->lcs_update) {
-                cfs_waitq_add(&seq->lcs_waitq, &link);
-                cfs_set_current_state(CFS_TASK_UNINT);
+       while (seq->lcs_update) {
+               add_wait_queue(&seq->lcs_waitq, &link);
+               set_current_state(TASK_UNINTERRUPTIBLE);
                mutex_unlock(&seq->lcs_mutex);
 
                mutex_unlock(&seq->lcs_mutex);
 
-                cfs_waitq_wait(&link, CFS_TASK_UNINT);
+               waitq_wait(&link, TASK_UNINTERRUPTIBLE);
 
                mutex_lock(&seq->lcs_mutex);
 
                mutex_lock(&seq->lcs_mutex);
-                cfs_waitq_del(&seq->lcs_waitq, &link);
-                cfs_set_current_state(CFS_TASK_RUNNING);
-        }
+               remove_wait_queue(&seq->lcs_waitq, &link);
+               set_current_state(TASK_RUNNING);
+       }
 
         fid_zero(&seq->lcs_fid);
         /**
 
         fid_zero(&seq->lcs_fid);
         /**
@@ -489,7 +489,7 @@ int seq_client_init(struct lu_client_seq *seq,
        else
                seq->lcs_width = LUSTRE_DATA_SEQ_MAX_WIDTH;
 
        else
                seq->lcs_width = LUSTRE_DATA_SEQ_MAX_WIDTH;
 
-       cfs_waitq_init(&seq->lcs_waitq);
+       init_waitqueue_head(&seq->lcs_waitq);
        /* Make sure that things are clear before work is started. */
        seq_client_flush(seq);
 
        /* Make sure that things are clear before work is started. */
        seq_client_flush(seq);
 
index d9f2237..93d1c1e 100644 (file)
@@ -74,41 +74,41 @@ static int fld_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw)
 
 static void fld_enter_request(struct client_obd *cli)
 {
 
 static void fld_enter_request(struct client_obd *cli)
 {
-        struct mdc_cache_waiter mcw;
-        struct l_wait_info lwi = { 0 };
-
-        client_obd_list_lock(&cli->cl_loi_list_lock);
-        if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
-                cfs_list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters);
-                cfs_waitq_init(&mcw.mcw_waitq);
-                client_obd_list_unlock(&cli->cl_loi_list_lock);
-                l_wait_event(mcw.mcw_waitq, fld_req_avail(cli, &mcw), &lwi);
-        } else {
-                cli->cl_r_in_flight++;
-                client_obd_list_unlock(&cli->cl_loi_list_lock);
-        }
+       struct mdc_cache_waiter mcw;
+       struct l_wait_info lwi = { 0 };
+
+       client_obd_list_lock(&cli->cl_loi_list_lock);
+       if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
+               cfs_list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters);
+               init_waitqueue_head(&mcw.mcw_waitq);
+               client_obd_list_unlock(&cli->cl_loi_list_lock);
+               l_wait_event(mcw.mcw_waitq, fld_req_avail(cli, &mcw), &lwi);
+       } else {
+               cli->cl_r_in_flight++;
+               client_obd_list_unlock(&cli->cl_loi_list_lock);
+       }
 }
 
 static void fld_exit_request(struct client_obd *cli)
 {
 }
 
 static void fld_exit_request(struct client_obd *cli)
 {
-        cfs_list_t *l, *tmp;
-        struct mdc_cache_waiter *mcw;
-
-        client_obd_list_lock(&cli->cl_loi_list_lock);
-        cli->cl_r_in_flight--;
-        cfs_list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
-
-                if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
-                        /* No free request slots anymore */
-                        break;
-                }
-
-                mcw = cfs_list_entry(l, struct mdc_cache_waiter, mcw_entry);
-                cfs_list_del_init(&mcw->mcw_entry);
-                cli->cl_r_in_flight++;
-                cfs_waitq_signal(&mcw->mcw_waitq);
-        }
-        client_obd_list_unlock(&cli->cl_loi_list_lock);
+       cfs_list_t *l, *tmp;
+       struct mdc_cache_waiter *mcw;
+
+       client_obd_list_lock(&cli->cl_loi_list_lock);
+       cli->cl_r_in_flight--;
+       cfs_list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
+
+               if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
+                       /* No free request slots anymore */
+                       break;
+               }
+
+               mcw = cfs_list_entry(l, struct mdc_cache_waiter, mcw_entry);
+               cfs_list_del_init(&mcw->mcw_entry);
+               cli->cl_r_in_flight++;
+               wake_up(&mcw->mcw_waitq);
+       }
+       client_obd_list_unlock(&cli->cl_loi_list_lock);
 }
 
 static int fld_rrb_hash(struct lu_client_fld *fld,
 }
 
 static int fld_rrb_hash(struct lu_client_fld *fld,
index 03ee887..9b7b381 100644 (file)
@@ -1561,22 +1561,22 @@ struct cl_lock {
          */
         struct cl_lock_descr  cll_descr;
         /** Protected by cl_lock::cll_guard. */
          */
         struct cl_lock_descr  cll_descr;
         /** Protected by cl_lock::cll_guard. */
-        enum cl_lock_state    cll_state;
-        /** signals state changes. */
-        cfs_waitq_t           cll_wq;
-        /**
-         * Recursive lock, most fields in cl_lock{} are protected by this.
-         *
-         * Locking rules: this mutex is never held across network
-         * communication, except when lock is being canceled.
-         *
-         * Lock ordering: a mutex of a sub-lock is taken first, then a mutex
-         * on a top-lock. Other direction is implemented through a
-         * try-lock-repeat loop. Mutices of unrelated locks can be taken only
-         * by try-locking.
-         *
-         * \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait().
-         */
+       enum cl_lock_state    cll_state;
+       /** signals state changes. */
+       wait_queue_head_t     cll_wq;
+       /**
+        * Recursive lock, most fields in cl_lock{} are protected by this.
+        *
+        * Locking rules: this mutex is never held across network
+        * communication, except when lock is being canceled.
+        *
+        * Lock ordering: a mutex of a sub-lock is taken first, then a mutex
+        * on a top-lock. Other direction is implemented through a
+        * try-lock-repeat loop. Mutices of unrelated locks can be taken only
+        * by try-locking.
+        *
+        * \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait().
+        */
        struct mutex            cll_guard;
         cfs_task_t           *cll_guarder;
         int                   cll_depth;
        struct mutex            cll_guard;
         cfs_task_t           *cll_guarder;
         int                   cll_depth;
@@ -3206,7 +3206,7 @@ struct cl_sync_io {
        /** barrier of destroy this structure */
        cfs_atomic_t            csi_barrier;
        /** completion to be signaled when transfer is complete. */
        /** barrier of destroy this structure */
        cfs_atomic_t            csi_barrier;
        /** completion to be signaled when transfer is complete. */
-       cfs_waitq_t             csi_waitq;
+       wait_queue_head_t       csi_waitq;
 };
 
 void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages);
 };
 
 void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages);
index 8d9915d..0f713e2 100644 (file)
@@ -280,7 +280,7 @@ typedef struct task_struct cfs_task_t;
 extern struct task_struct *current;
 int in_group_p(gid_t gid);
 
 extern struct task_struct *current;
 int in_group_p(gid_t gid);
 
-#define cfs_set_current_state(foo) do { current->state = foo; } while (0)
+#define set_current_state(foo) do { current->state = foo; } while (0)
 
 #define wait_event_interruptible(wq, condition)                         \
 {                                                                       \
 
 #define wait_event_interruptible(wq, condition)                         \
 {                                                                       \
@@ -378,10 +378,10 @@ struct file_lock {
         struct file_lock *fl_next;  /* singly linked list for this inode  */
         cfs_list_t fl_link;   /* doubly linked list of all locks */
         cfs_list_t fl_block;  /* circular list of blocked processes */
         struct file_lock *fl_next;  /* singly linked list for this inode  */
         cfs_list_t fl_link;   /* doubly linked list of all locks */
         cfs_list_t fl_block;  /* circular list of blocked processes */
-        void *fl_owner;
-        unsigned int fl_pid;
-        cfs_waitq_t fl_wait;
-        struct file *fl_file;
+       void *fl_owner;
+       unsigned int fl_pid;
+       wait_queue_head_t fl_wait;
+       struct file *fl_file;
         unsigned char fl_flags;
         unsigned char fl_type;
         loff_t fl_start;
         unsigned char fl_flags;
         unsigned char fl_type;
         loff_t fl_start;
index 366fe5a..549ef33 100644 (file)
@@ -555,28 +555,28 @@ struct lu_object_header {
 struct fld;
 
 struct lu_site_bkt_data {
 struct fld;
 
 struct lu_site_bkt_data {
-        /**
-         * number of busy object on this bucket
-         */
-        long                      lsb_busy;
-        /**
-         * LRU list, updated on each access to object. Protected by
-         * bucket lock of lu_site::ls_obj_hash.
-         *
-         * "Cold" end of LRU is lu_site::ls_lru.next. Accessed object are
-         * moved to the lu_site::ls_lru.prev (this is due to the non-existence
-         * of list_for_each_entry_safe_reverse()).
-         */
-        cfs_list_t                lsb_lru;
-        /**
-         * Wait-queue signaled when an object in this site is ultimately
-         * destroyed (lu_object_free()). It is used by lu_object_find() to
-         * wait before re-trying when object in the process of destruction is
-         * found in the hash table.
-         *
-         * \see htable_lookup().
-         */
-        cfs_waitq_t               lsb_marche_funebre;
+       /**
+        * number of busy object on this bucket
+        */
+       long                      lsb_busy;
+       /**
+        * LRU list, updated on each access to object. Protected by
+        * bucket lock of lu_site::ls_obj_hash.
+        *
+        * "Cold" end of LRU is lu_site::ls_lru.next. Accessed object are
+        * moved to the lu_site::ls_lru.prev (this is due to the non-existence
+        * of list_for_each_entry_safe_reverse()).
+        */
+       cfs_list_t                lsb_lru;
+       /**
+        * Wait-queue signaled when an object in this site is ultimately
+        * destroyed (lu_object_free()). It is used by lu_object_find() to
+        * wait before re-trying when object in the process of destruction is
+        * found in the hash table.
+        *
+        * \see htable_lookup().
+        */
+       wait_queue_head_t               lsb_marche_funebre;
 };
 
 enum {
 };
 
 enum {
index 9d376c9..79f4b05 100644 (file)
@@ -467,7 +467,7 @@ struct ldlm_namespace {
         * Wait queue used by __ldlm_namespace_free. Gets woken up every time
         * a resource is removed.
         */
         * Wait queue used by __ldlm_namespace_free. Gets woken up every time
         * a resource is removed.
         */
-       cfs_waitq_t             ns_waitq;
+       wait_queue_head_t       ns_waitq;
        /** LDLM pool structure for this namespace */
        struct ldlm_pool        ns_pool;
        /** Definition of how eagerly unused locks will be released from LRU */
        /** LDLM pool structure for this namespace */
        struct ldlm_pool        ns_pool;
        /** Definition of how eagerly unused locks will be released from LRU */
@@ -782,7 +782,7 @@ struct ldlm_lock {
         * it's no longer in use.  If the lock is not granted, a process sleeps
         * on this waitq to learn when it becomes granted.
         */
         * it's no longer in use.  If the lock is not granted, a process sleeps
         * on this waitq to learn when it becomes granted.
         */
-       cfs_waitq_t             l_waitq;
+       wait_queue_head_t       l_waitq;
 
        /**
         * Seconds. It will be updated if there is any activity related to
 
        /**
         * Seconds. It will be updated if there is any activity related to
index 2ce75b2..8d221e8 100644 (file)
@@ -369,9 +369,9 @@ struct lu_client_seq {
         /* Seq-server for direct talking */
         struct lu_server_seq   *lcs_srv;
 
         /* Seq-server for direct talking */
         struct lu_server_seq   *lcs_srv;
 
-        /* wait queue for fid allocation and update indicator */
-        cfs_waitq_t             lcs_waitq;
-        int                     lcs_update;
+       /* wait queue for fid allocation and update indicator */
+       wait_queue_head_t       lcs_waitq;
+       int                     lcs_update;
 };
 
 /* server sequence manager interface */
 };
 
 /* server sequence manager interface */
index acb82fd..d86f0e7 100644 (file)
@@ -192,8 +192,8 @@ struct obd_import {
         cfs_time_t                imp_sec_expire;
         /** @} */
 
         cfs_time_t                imp_sec_expire;
         /** @} */
 
-        /** Wait queue for those who need to wait for recovery completion */
-        cfs_waitq_t               imp_recovery_waitq;
+       /** Wait queue for those who need to wait for recovery completion */
+       wait_queue_head_t         imp_recovery_waitq;
 
         /** Number of requests currently in-flight */
         cfs_atomic_t              imp_inflight;
 
         /** Number of requests currently in-flight */
         cfs_atomic_t              imp_inflight;
index adeb287..022a04c 100644 (file)
@@ -708,59 +708,59 @@ struct l_wait_info {
  */
 #define __l_wait_event(wq, condition, info, ret, l_add_wait)                   \
 do {                                                                           \
  */
 #define __l_wait_event(wq, condition, info, ret, l_add_wait)                   \
 do {                                                                           \
-        cfs_waitlink_t __wait;                                                 \
-        cfs_duration_t __timeout = info->lwi_timeout;                          \
-        cfs_sigset_t   __blocked;                                              \
-        int   __allow_intr = info->lwi_allow_intr;                             \
-                                                                               \
-        ret = 0;                                                               \
-        if (condition)                                                         \
-                break;                                                         \
-                                                                               \
-        cfs_waitlink_init(&__wait);                                            \
-        l_add_wait(&wq, &__wait);                                              \
-                                                                               \
-        /* Block all signals (just the non-fatal ones if no timeout). */       \
-        if (info->lwi_on_signal != NULL && (__timeout == 0 || __allow_intr))   \
-                __blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);              \
-        else                                                                   \
-                __blocked = cfs_block_sigsinv(0);                              \
-                                                                               \
-        for (;;) {                                                             \
-                unsigned       __wstate;                                       \
-                                                                               \
-                __wstate = info->lwi_on_signal != NULL &&                      \
-                           (__timeout == 0 || __allow_intr) ?                  \
-                        CFS_TASK_INTERRUPTIBLE : CFS_TASK_UNINT;               \
-                                                                               \
-                cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);                 \
-                                                                               \
-                if (condition)                                                 \
-                        break;                                                 \
-                                                                               \
-                if (__timeout == 0) {                                          \
-                        cfs_waitq_wait(&__wait, __wstate);                     \
-                } else {                                                       \
-                        cfs_duration_t interval = info->lwi_interval?          \
-                                             min_t(cfs_duration_t,             \
-                                                 info->lwi_interval,__timeout):\
-                                             __timeout;                        \
-                        cfs_duration_t remaining = cfs_waitq_timedwait(&__wait,\
-                                                   __wstate,                   \
-                                                   interval);                  \
-                        __timeout = cfs_time_sub(__timeout,                    \
-                                            cfs_time_sub(interval, remaining));\
-                        if (__timeout == 0) {                                  \
-                                if (info->lwi_on_timeout == NULL ||            \
-                                    info->lwi_on_timeout(info->lwi_cb_data)) { \
-                                        ret = -ETIMEDOUT;                      \
-                                        break;                                 \
-                                }                                              \
-                                /* Take signals after the timeout expires. */  \
-                                if (info->lwi_on_signal != NULL)               \
-                                    (void)cfs_block_sigsinv(LUSTRE_FATAL_SIGS);\
-                        }                                                      \
-                }                                                              \
+       wait_queue_t __wait;                                                   \
+       cfs_duration_t __timeout = info->lwi_timeout;                          \
+       cfs_sigset_t   __blocked;                                              \
+       int   __allow_intr = info->lwi_allow_intr;                             \
+                                                                              \
+       ret = 0;                                                               \
+       if (condition)                                                         \
+               break;                                                         \
+                                                                              \
+       init_waitqueue_entry_current(&__wait);                                 \
+       l_add_wait(&wq, &__wait);                                              \
+                                                                              \
+       /* Block all signals (just the non-fatal ones if no timeout). */       \
+       if (info->lwi_on_signal != NULL && (__timeout == 0 || __allow_intr))   \
+               __blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);              \
+       else                                                                   \
+               __blocked = cfs_block_sigsinv(0);                              \
+                                                                              \
+       for (;;) {                                                             \
+               unsigned       __wstate;                                       \
+                                                                              \
+               __wstate = info->lwi_on_signal != NULL &&                      \
+                          (__timeout == 0 || __allow_intr) ?                  \
+                       TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;             \
+                                                                              \
+               set_current_state(TASK_INTERRUPTIBLE);                         \
+                                                                              \
+               if (condition)                                                 \
+                       break;                                                 \
+                                                                              \
+               if (__timeout == 0) {                                          \
+                       waitq_wait(&__wait, __wstate);                         \
+               } else {                                                       \
+                       cfs_duration_t interval = info->lwi_interval?          \
+                                            min_t(cfs_duration_t,             \
+                                                info->lwi_interval,__timeout):\
+                                            __timeout;                        \
+                       cfs_duration_t remaining = waitq_timedwait(&__wait,    \
+                                                  __wstate,                   \
+                                                  interval);                  \
+                       __timeout = cfs_time_sub(__timeout,                    \
+                                           cfs_time_sub(interval, remaining));\
+                       if (__timeout == 0) {                                  \
+                               if (info->lwi_on_timeout == NULL ||            \
+                                   info->lwi_on_timeout(info->lwi_cb_data)) { \
+                                       ret = -ETIMEDOUT;                      \
+                                       break;                                 \
+                               }                                              \
+                               /* Take signals after the timeout expires. */  \
+                               if (info->lwi_on_signal != NULL)               \
+                                   (void)cfs_block_sigsinv(LUSTRE_FATAL_SIGS);\
+                       }                                                      \
+               }                                                              \
                                                                                \
                 if (condition)                                                 \
                         break;                                                 \
                                                                                \
                 if (condition)                                                 \
                         break;                                                 \
@@ -785,8 +785,8 @@ do {                                                                           \
                                                                                \
        cfs_restore_sigs(__blocked);                                           \
                                                                                \
                                                                                \
        cfs_restore_sigs(__blocked);                                           \
                                                                                \
-        cfs_set_current_state(CFS_TASK_RUNNING);                               \
-        cfs_waitq_del(&wq, &__wait);                                           \
+       set_current_state(TASK_RUNNING);                                       \
+       remove_wait_queue(&wq, &__wait);                                       \
 } while (0)
 
 #else /* !__KERNEL__ */
 } while (0)
 
 #else /* !__KERNEL__ */
@@ -840,32 +840,32 @@ do {                                                                    \
 
 #define l_wait_event(wq, condition, info)                       \
 ({                                                              \
 
 #define l_wait_event(wq, condition, info)                       \
 ({                                                              \
-        int                 __ret;                              \
-        struct l_wait_info *__info = (info);                    \
-                                                                \
-        __l_wait_event(wq, condition, __info,                   \
-                       __ret, cfs_waitq_add);                   \
-        __ret;                                                  \
+       int                 __ret;                              \
+       struct l_wait_info *__info = (info);                    \
+                                                               \
+       __l_wait_event(wq, condition, __info,                   \
+                      __ret, add_wait_queue);                  \
+       __ret;                                                  \
 })
 
 #define l_wait_event_exclusive(wq, condition, info)             \
 ({                                                              \
 })
 
 #define l_wait_event_exclusive(wq, condition, info)             \
 ({                                                              \
-        int                 __ret;                              \
-        struct l_wait_info *__info = (info);                    \
-                                                                \
-        __l_wait_event(wq, condition, __info,                   \
-                       __ret, cfs_waitq_add_exclusive);         \
-        __ret;                                                  \
+       int                 __ret;                              \
+       struct l_wait_info *__info = (info);                    \
+                                                               \
+       __l_wait_event(wq, condition, __info,                   \
+                      __ret, add_wait_queue_exclusive);        \
+       __ret;                                                  \
 })
 
 #define l_wait_event_exclusive_head(wq, condition, info)        \
 ({                                                              \
 })
 
 #define l_wait_event_exclusive_head(wq, condition, info)        \
 ({                                                              \
-        int                 __ret;                              \
-        struct l_wait_info *__info = (info);                    \
-                                                                \
-        __l_wait_event(wq, condition, __info,                   \
-                       __ret, cfs_waitq_add_exclusive_head);    \
-        __ret;                                                  \
+       int                 __ret;                              \
+       struct l_wait_info *__info = (info);                    \
+                                                               \
+       __l_wait_event(wq, condition, __info,                   \
+                      __ret, add_wait_queue_exclusive_head);   \
+       __ret;                                                  \
 })
 
 #define l_wait_condition(wq, condition)                         \
 })
 
 #define l_wait_condition(wq, condition)                         \
index 708d7c8..b56f970 100644 (file)
@@ -421,7 +421,7 @@ static inline void llog_ctxt_put(struct llog_ctxt *ctxt)
 
 static inline void llog_group_init(struct obd_llog_group *olg, int group)
 {
 
 static inline void llog_group_init(struct obd_llog_group *olg, int group)
 {
-       cfs_waitq_init(&olg->olg_waitq);
+       init_waitqueue_head(&olg->olg_waitq);
        spin_lock_init(&olg->olg_lock);
        mutex_init(&olg->olg_cat_processing);
        olg->olg_seq = group;
        spin_lock_init(&olg->olg_lock);
        mutex_init(&olg->olg_cat_processing);
        olg->olg_seq = group;
index bc04678..e7152d8 100644 (file)
@@ -133,7 +133,7 @@ static inline void mdc_get_rpc_lock(struct mdc_rpc_lock *lck,
         * the common case when it isn't true. */
        while (unlikely(lck->rpcl_it == MDC_FAKE_RPCL_IT)) {
                mutex_unlock(&lck->rpcl_mutex);
         * the common case when it isn't true. */
        while (unlikely(lck->rpcl_it == MDC_FAKE_RPCL_IT)) {
                mutex_unlock(&lck->rpcl_mutex);
-               cfs_schedule_timeout(cfs_time_seconds(1) / 4);
+               schedule_timeout(cfs_time_seconds(1) / 4);
                goto again;
        }
 
                goto again;
        }
 
@@ -183,8 +183,8 @@ static inline void mdc_update_max_ea_from_body(struct obd_export *exp,
 
 
 struct mdc_cache_waiter {
 
 
 struct mdc_cache_waiter {
-        cfs_list_t              mcw_entry;
-        cfs_waitq_t             mcw_waitq;
+       cfs_list_t              mcw_entry;
+       wait_queue_head_t             mcw_waitq;
 };
 
 /* mdc/mdc_locks.c */
 };
 
 /* mdc/mdc_locks.c */
index 80d5d13..37f1974 100644 (file)
@@ -580,8 +580,8 @@ struct ptlrpc_request_set {
        /** number of uncompleted requests */
        cfs_atomic_t          set_remaining;
        /** wait queue to wait on for request events */
        /** number of uncompleted requests */
        cfs_atomic_t          set_remaining;
        /** wait queue to wait on for request events */
-       cfs_waitq_t           set_waitq;
-       cfs_waitq_t          *set_wakeup_ptr;
+       wait_queue_head_t           set_waitq;
+       wait_queue_head_t          *set_wakeup_ptr;
        /** List of requests in the set */
        cfs_list_t            set_requests;
        /**
        /** List of requests in the set */
        cfs_list_t            set_requests;
        /**
@@ -1966,10 +1966,10 @@ struct ptlrpc_request {
         /** incoming request buffer */
         struct ptlrpc_request_buffer_desc *rq_rqbd;
 
         /** incoming request buffer */
         struct ptlrpc_request_buffer_desc *rq_rqbd;
 
-        /** client-only incoming reply */
-        lnet_handle_md_t     rq_reply_md_h;
-        cfs_waitq_t          rq_reply_waitq;
-        struct ptlrpc_cb_id  rq_reply_cbid;
+       /** client-only incoming reply */
+       lnet_handle_md_t     rq_reply_md_h;
+       wait_queue_head_t    rq_reply_waitq;
+       struct ptlrpc_cb_id  rq_reply_cbid;
 
         /** our LNet NID */
         lnet_nid_t           rq_self;
 
         /** our LNet NID */
         lnet_nid_t           rq_self;
@@ -2017,7 +2017,7 @@ struct ptlrpc_request {
 
         /** Multi-rpc bits */
         /** Per-request waitq introduced by bug 21938 for recovery waiting */
 
         /** Multi-rpc bits */
         /** Per-request waitq introduced by bug 21938 for recovery waiting */
-        cfs_waitq_t rq_set_waitq;
+       wait_queue_head_t rq_set_waitq;
        /** Link item for request set lists */
        cfs_list_t  rq_set_chain;
         /** Link back to the request set */
        /** Link item for request set lists */
        cfs_list_t  rq_set_chain;
         /** Link back to the request set */
@@ -2276,7 +2276,7 @@ struct ptlrpc_bulk_desc {
        struct obd_import *bd_import;
        /** Back pointer to the request */
        struct ptlrpc_request *bd_req;
        struct obd_import *bd_import;
        /** Back pointer to the request */
        struct ptlrpc_request *bd_req;
-       cfs_waitq_t            bd_waitq;        /* server side only WQ */
+       wait_queue_head_t      bd_waitq;        /* server side only WQ */
        int                    bd_iov_count;    /* # entries in bd_iov */
        int                    bd_max_iov;      /* allocated size of bd_iov */
        int                    bd_nob;          /* # bytes covered */
        int                    bd_iov_count;    /* # entries in bd_iov */
        int                    bd_max_iov;      /* allocated size of bd_iov */
        int                    bd_nob;          /* # bytes covered */
@@ -2342,7 +2342,7 @@ struct ptlrpc_thread {
          * the svc this thread belonged to b=18582
          */
        struct ptlrpc_service_part      *t_svcpt;
          * the svc this thread belonged to b=18582
          */
        struct ptlrpc_service_part      *t_svcpt;
-       cfs_waitq_t                     t_ctl_waitq;
+       wait_queue_head_t               t_ctl_waitq;
        struct lu_env                   *t_env;
        char                            t_name[PTLRPC_THR_NAME_LEN];
 };
        struct lu_env                   *t_env;
        char                            t_name[PTLRPC_THR_NAME_LEN];
 };
@@ -2601,7 +2601,7 @@ struct ptlrpc_service_part {
         * all threads sleep on this. This wait-queue is signalled when new
         * incoming request arrives and when difficult reply has to be handled.
         */
         * all threads sleep on this. This wait-queue is signalled when new
         * incoming request arrives and when difficult reply has to be handled.
         */
-       cfs_waitq_t                     scp_waitq;
+       wait_queue_head_t               scp_waitq;
 
        /** request history */
        cfs_list_t                      scp_hist_reqs;
 
        /** request history */
        cfs_list_t                      scp_hist_reqs;
@@ -2666,7 +2666,7 @@ struct ptlrpc_service_part {
        /** List of free reply_states */
        cfs_list_t                      scp_rep_idle;
        /** waitq to run, when adding stuff to srv_free_rs_list */
        /** List of free reply_states */
        cfs_list_t                      scp_rep_idle;
        /** waitq to run, when adding stuff to srv_free_rs_list */
-       cfs_waitq_t                     scp_rep_waitq;
+       wait_queue_head_t               scp_rep_waitq;
        /** # 'difficult' replies */
        cfs_atomic_t                    scp_nreps_difficult;
 };
        /** # 'difficult' replies */
        cfs_atomic_t                    scp_nreps_difficult;
 };
@@ -3336,10 +3336,10 @@ ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req)
 static inline void
 ptlrpc_client_wake_req(struct ptlrpc_request *req)
 {
 static inline void
 ptlrpc_client_wake_req(struct ptlrpc_request *req)
 {
-        if (req->rq_set == NULL)
-                cfs_waitq_signal(&req->rq_reply_waitq);
-        else
-                cfs_waitq_signal(&req->rq_set->set_waitq);
+       if (req->rq_set == NULL)
+               wake_up(&req->rq_reply_waitq);
+       else
+               wake_up(&req->rq_set->set_waitq);
 }
 
 static inline void
 }
 
 static inline void
index 31a92a5..ad43606 100644 (file)
@@ -404,7 +404,7 @@ struct client_obd {
 
         /* number of in flight destroy rpcs is limited to max_rpcs_in_flight */
         cfs_atomic_t             cl_destroy_in_flight;
 
         /* number of in flight destroy rpcs is limited to max_rpcs_in_flight */
         cfs_atomic_t             cl_destroy_in_flight;
-        cfs_waitq_t              cl_destroy_waitq;
+       wait_queue_head_t        cl_destroy_waitq;
 
         struct mdc_rpc_lock     *cl_rpc_lock;
         struct mdc_rpc_lock     *cl_close_lock;
 
         struct mdc_rpc_lock     *cl_rpc_lock;
         struct mdc_rpc_lock     *cl_close_lock;
@@ -517,10 +517,10 @@ struct lov_qos {
                             lq_reset:1,     /* zero current penalties */
                             lq_statfs_in_progress:1; /* statfs op in
                                                         progress */
                             lq_reset:1,     /* zero current penalties */
                             lq_statfs_in_progress:1; /* statfs op in
                                                         progress */
-        /* qos statfs data */
-        struct lov_statfs_data *lq_statfs_data;
-        cfs_waitq_t         lq_statfs_waitq; /* waitqueue to notify statfs
-                                              * requests completion */
+       /* qos statfs data */
+       struct lov_statfs_data *lq_statfs_data;
+       wait_queue_head_t   lq_statfs_waitq; /* waitqueue to notify statfs
+                                             * requests completion */
 };
 
 struct lov_tgt_desc {
 };
 
 struct lov_tgt_desc {
@@ -829,9 +829,9 @@ struct target_recovery_data {
 };
 
 struct obd_llog_group {
 };
 
 struct obd_llog_group {
-        int                olg_seq;
-        struct llog_ctxt  *olg_ctxts[LLOG_MAX_CTXTS];
-        cfs_waitq_t        olg_waitq;
+       int                olg_seq;
+       struct llog_ctxt   *olg_ctxts[LLOG_MAX_CTXTS];
+       wait_queue_head_t  olg_waitq;
        spinlock_t         olg_lock;
        struct mutex       olg_cat_processing;
 };
        spinlock_t         olg_lock;
        struct mutex       olg_cat_processing;
 };
@@ -917,11 +917,11 @@ struct obd_device {
          * obd_next_recovery_transno value */
        spinlock_t                       obd_recovery_task_lock;
         __u64                            obd_next_recovery_transno;
          * obd_next_recovery_transno value */
        spinlock_t                       obd_recovery_task_lock;
         __u64                            obd_next_recovery_transno;
-        int                              obd_replayed_requests;
-        int                              obd_requests_queued_for_recovery;
-        cfs_waitq_t                      obd_next_transno_waitq;
-        /* protected by obd_recovery_task_lock */
-        cfs_timer_t                      obd_recovery_timer;
+       int                              obd_replayed_requests;
+       int                              obd_requests_queued_for_recovery;
+       wait_queue_head_t                obd_next_transno_waitq;
+       /* protected by obd_recovery_task_lock */
+       cfs_timer_t                      obd_recovery_timer;
         time_t                           obd_recovery_start; /* seconds */
         time_t                           obd_recovery_end; /* seconds, for lprocfs_status */
         int                              obd_recovery_time_hard;
         time_t                           obd_recovery_start; /* seconds */
         time_t                           obd_recovery_end; /* seconds, for lprocfs_status */
         int                              obd_recovery_time_hard;
@@ -961,9 +961,9 @@ struct obd_device {
         cfs_proc_dir_entry_t  *obd_proc_exports_entry;
         cfs_proc_dir_entry_t  *obd_svc_procroot;
         struct lprocfs_stats  *obd_svc_stats;
         cfs_proc_dir_entry_t  *obd_proc_exports_entry;
         cfs_proc_dir_entry_t  *obd_svc_procroot;
         struct lprocfs_stats  *obd_svc_stats;
-        cfs_atomic_t           obd_evict_inprogress;
-        cfs_waitq_t            obd_evict_inprogress_waitq;
-        cfs_list_t             obd_evict_list; /* protected with pet_lock */
+       cfs_atomic_t           obd_evict_inprogress;
+       wait_queue_head_t      obd_evict_inprogress_waitq;
+       cfs_list_t             obd_evict_list; /* protected with pet_lock */
 
         /**
          * Ldlm pool part. Save last calculated SLV and Limit.
 
         /**
          * Ldlm pool part. Save last calculated SLV and Limit.
index e0cc4ad..1eb6b95 100644 (file)
@@ -1221,30 +1221,30 @@ int cl_file_inode_init(struct inode *inode, struct lustre_md *md)
  */
 static void cl_object_put_last(struct lu_env *env, struct cl_object *obj)
 {
  */
 static void cl_object_put_last(struct lu_env *env, struct cl_object *obj)
 {
-        struct lu_object_header *header = obj->co_lu.lo_header;
-        cfs_waitlink_t           waiter;
+       struct lu_object_header *header = obj->co_lu.lo_header;
+       wait_queue_t           waiter;
 
 
-        if (unlikely(cfs_atomic_read(&header->loh_ref) != 1)) {
-                struct lu_site *site = obj->co_lu.lo_dev->ld_site;
-                struct lu_site_bkt_data *bkt;
+       if (unlikely(cfs_atomic_read(&header->loh_ref) != 1)) {
+               struct lu_site *site = obj->co_lu.lo_dev->ld_site;
+               struct lu_site_bkt_data *bkt;
 
 
-                bkt = lu_site_bkt_from_fid(site, &header->loh_fid);
+               bkt = lu_site_bkt_from_fid(site, &header->loh_fid);
 
 
-                cfs_waitlink_init(&waiter);
-                cfs_waitq_add(&bkt->lsb_marche_funebre, &waiter);
+               init_waitqueue_entry_current(&waiter);
+               add_wait_queue(&bkt->lsb_marche_funebre, &waiter);
 
 
-                while (1) {
-                        cfs_set_current_state(CFS_TASK_UNINT);
-                        if (cfs_atomic_read(&header->loh_ref) == 1)
-                                break;
-                        cfs_waitq_wait(&waiter, CFS_TASK_UNINT);
-                }
+               while (1) {
+                       set_current_state(TASK_UNINTERRUPTIBLE);
+                       if (cfs_atomic_read(&header->loh_ref) == 1)
+                               break;
+                       waitq_wait(&waiter, TASK_UNINTERRUPTIBLE);
+               }
 
 
-                cfs_set_current_state(CFS_TASK_RUNNING);
-                cfs_waitq_del(&bkt->lsb_marche_funebre, &waiter);
-        }
+               set_current_state(TASK_RUNNING);
+               remove_wait_queue(&bkt->lsb_marche_funebre, &waiter);
+       }
 
 
-        cl_object_put(env, obj);
+       cl_object_put(env, obj);
 }
 
 void cl_inode_fini(struct inode *inode)
 }
 
 void cl_inode_fini(struct inode *inode)
index d9cc416..2fb4a99 100644 (file)
@@ -644,29 +644,29 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
          * references being held, so that it can go away. No point in
          * holding the lock even if app still believes it has it, since
          * server already dropped it anyway. Only for granted locks too. */
          * references being held, so that it can go away. No point in
          * holding the lock even if app still believes it has it, since
          * server already dropped it anyway. Only for granted locks too. */
-        if ((lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) ==
-            (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) {
-                if (lock->l_req_mode == lock->l_granted_mode &&
-                    lock->l_granted_mode != LCK_NL &&
-                    NULL == data)
-                        ldlm_lock_decref_internal(lock, lock->l_req_mode);
-
-                /* Need to wake up the waiter if we were evicted */
-                cfs_waitq_signal(&lock->l_waitq);
-                RETURN(0);
-        }
+       if ((lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) ==
+           (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) {
+               if (lock->l_req_mode == lock->l_granted_mode &&
+                   lock->l_granted_mode != LCK_NL &&
+                   NULL == data)
+                       ldlm_lock_decref_internal(lock, lock->l_req_mode);
+
+               /* Need to wake up the waiter if we were evicted */
+               wake_up(&lock->l_waitq);
+               RETURN(0);
+       }
 
 
-        LASSERT(flags != LDLM_FL_WAIT_NOREPROC);
+       LASSERT(flags != LDLM_FL_WAIT_NOREPROC);
 
 
-        if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
-                       LDLM_FL_BLOCK_CONV))) {
-                if (NULL == data)
-                        /* mds granted the lock in the reply */
-                        goto granted;
-                /* CP AST RPC: lock get granted, wake it up */
-                cfs_waitq_signal(&lock->l_waitq);
-                RETURN(0);
-        }
+       if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
+                      LDLM_FL_BLOCK_CONV))) {
+               if (NULL == data)
+                       /* mds granted the lock in the reply */
+                       goto granted;
+               /* CP AST RPC: lock get granted, wake it up */
+               wake_up(&lock->l_waitq);
+               RETURN(0);
+       }
 
         LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
                    "sleeping");
 
         LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
                    "sleeping");
index c50448c..fc570c3 100644 (file)
@@ -371,11 +371,11 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
        CFS_INIT_LIST_HEAD(&cli->cl_lru_list);
        client_obd_list_lock_init(&cli->cl_lru_list_lock);
 
        CFS_INIT_LIST_HEAD(&cli->cl_lru_list);
        client_obd_list_lock_init(&cli->cl_lru_list_lock);
 
-        cfs_waitq_init(&cli->cl_destroy_waitq);
-        cfs_atomic_set(&cli->cl_destroy_in_flight, 0);
+       init_waitqueue_head(&cli->cl_destroy_waitq);
+       cfs_atomic_set(&cli->cl_destroy_in_flight, 0);
 #ifdef ENABLE_CHECKSUM
 #ifdef ENABLE_CHECKSUM
-        /* Turn on checksumming by default. */
-        cli->cl_checksum = 1;
+       /* Turn on checksumming by default. */
+       cli->cl_checksum = 1;
         /*
          * The supported checksum types will be worked out at connect time
          * Set cl_chksum* to CRC32 for now to avoid returning screwed info
         /*
          * The supported checksum types will be worked out at connect time
          * Set cl_chksum* to CRC32 for now to avoid returning screwed info
@@ -1193,12 +1193,12 @@ dont_check_exports:
                        spin_unlock(&target->obd_recovery_task_lock);
                 }
 
                        spin_unlock(&target->obd_recovery_task_lock);
                 }
 
-                cfs_atomic_inc(&target->obd_req_replay_clients);
-                cfs_atomic_inc(&target->obd_lock_replay_clients);
-                if (cfs_atomic_inc_return(&target->obd_connected_clients) ==
-                    target->obd_max_recoverable_clients)
-                        cfs_waitq_signal(&target->obd_next_transno_waitq);
-        }
+               cfs_atomic_inc(&target->obd_req_replay_clients);
+               cfs_atomic_inc(&target->obd_lock_replay_clients);
+               if (cfs_atomic_inc_return(&target->obd_connected_clients) ==
+                   target->obd_max_recoverable_clients)
+                       wake_up(&target->obd_next_transno_waitq);
+       }
 
         /* Tell the client we're in recovery, when client is involved in it. */
        if (target->obd_recovering && !lw_client)
 
         /* Tell the client we're in recovery, when client is involved in it. */
        if (target->obd_recovering && !lw_client)
@@ -1787,36 +1787,36 @@ static int check_for_next_lock(struct obd_device *obd)
  * evict dead clients via health_check
  */
 static int target_recovery_overseer(struct obd_device *obd,
  * evict dead clients via health_check
  */
 static int target_recovery_overseer(struct obd_device *obd,
-                                    int (*check_routine)(struct obd_device *),
-                                    int (*health_check)(struct obd_export *))
+                                   int (*check_routine)(struct obd_device *),
+                                   int (*health_check)(struct obd_export *))
 {
 repeat:
 {
 repeat:
-        cfs_wait_event(obd->obd_next_transno_waitq, check_routine(obd));
-        if (obd->obd_abort_recovery) {
-                CWARN("recovery is aborted, evict exports in recovery\n");
-                /** evict exports which didn't finish recovery yet */
-                class_disconnect_stale_exports(obd, exp_finished);
-                return 1;
-        } else if (obd->obd_recovery_expired) {
-                obd->obd_recovery_expired = 0;
-                /** If some clients died being recovered, evict them */
-                LCONSOLE_WARN("%s: recovery is timed out, "
-                              "evict stale exports\n", obd->obd_name);
-                /** evict cexports with no replay in queue, they are stalled */
-                class_disconnect_stale_exports(obd, health_check);
-                /** continue with VBR */
+       wait_event(obd->obd_next_transno_waitq, check_routine(obd));
+       if (obd->obd_abort_recovery) {
+               CWARN("recovery is aborted, evict exports in recovery\n");
+               /** evict exports which didn't finish recovery yet */
+               class_disconnect_stale_exports(obd, exp_finished);
+               return 1;
+       } else if (obd->obd_recovery_expired) {
+               obd->obd_recovery_expired = 0;
+               /** If some clients died being recovered, evict them */
+               LCONSOLE_WARN("%s: recovery is timed out, "
+                             "evict stale exports\n", obd->obd_name);
+               /** evict cexports with no replay in queue, they are stalled */
+               class_disconnect_stale_exports(obd, health_check);
+               /** continue with VBR */
                spin_lock(&obd->obd_dev_lock);
                obd->obd_version_recov = 1;
                spin_unlock(&obd->obd_dev_lock);
                spin_lock(&obd->obd_dev_lock);
                obd->obd_version_recov = 1;
                spin_unlock(&obd->obd_dev_lock);
-                /**
-                 * reset timer, recovery will proceed with versions now,
-                 * timeout is set just to handle reconnection delays
-                 */
-                extend_recovery_timer(obd, RECONNECT_DELAY_MAX, true);
-                /** Wait for recovery events again, after evicting bad clients */
-                goto repeat;
-        }
-        return 0;
+               /**
+                * reset timer, recovery will proceed with versions now,
+                * timeout is set just to handle reconnection delays
+                */
+               extend_recovery_timer(obd, RECONNECT_DELAY_MAX, true);
+               /** Wait for recovery events again, after evicting bad clients */
+               goto repeat;
+       }
+       return 0;
 }
 
 static struct ptlrpc_request *target_next_replay_req(struct obd_device *obd)
 }
 
 static struct ptlrpc_request *target_next_replay_req(struct obd_device *obd)
@@ -2126,7 +2126,7 @@ void target_stop_recovery_thread(struct obd_device *obd)
                if (obd->obd_recovering) {
                        CERROR("%s: Aborting recovery\n", obd->obd_name);
                        obd->obd_abort_recovery = 1;
                if (obd->obd_recovering) {
                        CERROR("%s: Aborting recovery\n", obd->obd_name);
                        obd->obd_abort_recovery = 1;
-                       cfs_waitq_signal(&obd->obd_next_transno_waitq);
+                       wake_up(&obd->obd_next_transno_waitq);
                }
                spin_unlock(&obd->obd_dev_lock);
                wait_for_completion(&trd->trd_finishing);
                }
                spin_unlock(&obd->obd_dev_lock);
                wait_for_completion(&trd->trd_finishing);
@@ -2144,15 +2144,15 @@ EXPORT_SYMBOL(target_recovery_fini);
 
 static void target_recovery_expired(unsigned long castmeharder)
 {
 
 static void target_recovery_expired(unsigned long castmeharder)
 {
-        struct obd_device *obd = (struct obd_device *)castmeharder;
-        CDEBUG(D_HA, "%s: recovery timed out; %d clients are still in recovery"
-               " after %lds (%d clients connected)\n",
-               obd->obd_name, cfs_atomic_read(&obd->obd_lock_replay_clients),
-               cfs_time_current_sec()- obd->obd_recovery_start,
-               cfs_atomic_read(&obd->obd_connected_clients));
-
-        obd->obd_recovery_expired = 1;
-        cfs_waitq_signal(&obd->obd_next_transno_waitq);
+       struct obd_device *obd = (struct obd_device *)castmeharder;
+       CDEBUG(D_HA, "%s: recovery timed out; %d clients are still in recovery"
+              " after %lds (%d clients connected)\n",
+              obd->obd_name, cfs_atomic_read(&obd->obd_lock_replay_clients),
+              cfs_time_current_sec()- obd->obd_recovery_start,
+              cfs_atomic_read(&obd->obd_connected_clients));
+
+       obd->obd_recovery_expired = 1;
+       wake_up(&obd->obd_next_transno_waitq);
 }
 
 void target_recovery_init(struct lu_target *lut, svc_handler_t handler)
 }
 
 void target_recovery_init(struct lu_target *lut, svc_handler_t handler)
@@ -2232,9 +2232,9 @@ int target_queue_recovery_request(struct ptlrpc_request *req,
         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_LOCK_REPLAY_DONE) {
                 /* client declares he's ready to complete recovery
                  * so, we put the request on th final queue */
         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_LOCK_REPLAY_DONE) {
                 /* client declares he's ready to complete recovery
                  * so, we put the request on th final queue */
-                target_request_copy_get(req);
-                DEBUG_REQ(D_HA, req, "queue final req");
-                cfs_waitq_signal(&obd->obd_next_transno_waitq);
+               target_request_copy_get(req);
+               DEBUG_REQ(D_HA, req, "queue final req");
+               wake_up(&obd->obd_next_transno_waitq);
                spin_lock(&obd->obd_recovery_task_lock);
                if (obd->obd_recovering) {
                        cfs_list_add_tail(&req->rq_list,
                spin_lock(&obd->obd_recovery_task_lock);
                if (obd->obd_recovering) {
                        cfs_list_add_tail(&req->rq_list,
@@ -2251,7 +2251,7 @@ int target_queue_recovery_request(struct ptlrpc_request *req,
                /* client declares he's ready to replay locks */
                target_request_copy_get(req);
                DEBUG_REQ(D_HA, req, "queue lock replay req");
                /* client declares he's ready to replay locks */
                target_request_copy_get(req);
                DEBUG_REQ(D_HA, req, "queue lock replay req");
-               cfs_waitq_signal(&obd->obd_next_transno_waitq);
+               wake_up(&obd->obd_next_transno_waitq);
                spin_lock(&obd->obd_recovery_task_lock);
                LASSERT(obd->obd_recovering);
                /* usually due to recovery abort */
                spin_lock(&obd->obd_recovery_task_lock);
                LASSERT(obd->obd_recovering);
                /* usually due to recovery abort */
@@ -2343,7 +2343,7 @@ int target_queue_recovery_request(struct ptlrpc_request *req,
 
         obd->obd_requests_queued_for_recovery++;
        spin_unlock(&obd->obd_recovery_task_lock);
 
         obd->obd_requests_queued_for_recovery++;
        spin_unlock(&obd->obd_recovery_task_lock);
-       cfs_waitq_signal(&obd->obd_next_transno_waitq);
+       wake_up(&obd->obd_next_transno_waitq);
        RETURN(0);
 }
 EXPORT_SYMBOL(target_queue_recovery_request);
        RETURN(0);
 }
 EXPORT_SYMBOL(target_queue_recovery_request);
index a0238b7..70e09be 100644 (file)
@@ -483,18 +483,18 @@ static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource)
         lock->l_resource = resource;
         lu_ref_add(&resource->lr_reference, "lock", lock);
 
         lock->l_resource = resource;
         lu_ref_add(&resource->lr_reference, "lock", lock);
 
-        cfs_atomic_set(&lock->l_refc, 2);
-        CFS_INIT_LIST_HEAD(&lock->l_res_link);
-        CFS_INIT_LIST_HEAD(&lock->l_lru);
-        CFS_INIT_LIST_HEAD(&lock->l_pending_chain);
-        CFS_INIT_LIST_HEAD(&lock->l_bl_ast);
-        CFS_INIT_LIST_HEAD(&lock->l_cp_ast);
-        CFS_INIT_LIST_HEAD(&lock->l_rk_ast);
-        cfs_waitq_init(&lock->l_waitq);
-        lock->l_blocking_lock = NULL;
-        CFS_INIT_LIST_HEAD(&lock->l_sl_mode);
-        CFS_INIT_LIST_HEAD(&lock->l_sl_policy);
-        CFS_INIT_HLIST_NODE(&lock->l_exp_hash);
+       cfs_atomic_set(&lock->l_refc, 2);
+       CFS_INIT_LIST_HEAD(&lock->l_res_link);
+       CFS_INIT_LIST_HEAD(&lock->l_lru);
+       CFS_INIT_LIST_HEAD(&lock->l_pending_chain);
+       CFS_INIT_LIST_HEAD(&lock->l_bl_ast);
+       CFS_INIT_LIST_HEAD(&lock->l_cp_ast);
+       CFS_INIT_LIST_HEAD(&lock->l_rk_ast);
+       init_waitqueue_head(&lock->l_waitq);
+       lock->l_blocking_lock = NULL;
+       CFS_INIT_LIST_HEAD(&lock->l_sl_mode);
+       CFS_INIT_LIST_HEAD(&lock->l_sl_policy);
+       CFS_INIT_HLIST_NODE(&lock->l_exp_hash);
        CFS_INIT_HLIST_NODE(&lock->l_exp_flock_hash);
 
         lprocfs_counter_incr(ldlm_res_to_ns(resource)->ns_stats,
        CFS_INIT_HLIST_NODE(&lock->l_exp_flock_hash);
 
         lprocfs_counter_incr(ldlm_res_to_ns(resource)->ns_stats,
@@ -1252,7 +1252,7 @@ void ldlm_lock_fail_match_locked(struct ldlm_lock *lock)
 {
        if ((lock->l_flags & LDLM_FL_FAIL_NOTIFIED) == 0) {
                lock->l_flags |= LDLM_FL_FAIL_NOTIFIED;
 {
        if ((lock->l_flags & LDLM_FL_FAIL_NOTIFIED) == 0) {
                lock->l_flags |= LDLM_FL_FAIL_NOTIFIED;
-               cfs_waitq_broadcast(&lock->l_waitq);
+               wake_up_all(&lock->l_waitq);
        }
 }
 EXPORT_SYMBOL(ldlm_lock_fail_match_locked);
        }
 }
 EXPORT_SYMBOL(ldlm_lock_fail_match_locked);
@@ -1275,7 +1275,7 @@ EXPORT_SYMBOL(ldlm_lock_fail_match);
 void ldlm_lock_allow_match_locked(struct ldlm_lock *lock)
 {
        lock->l_flags |= LDLM_FL_LVB_READY;
 void ldlm_lock_allow_match_locked(struct ldlm_lock *lock)
 {
        lock->l_flags |= LDLM_FL_LVB_READY;
-       cfs_waitq_broadcast(&lock->l_waitq);
+       wake_up_all(&lock->l_waitq);
 }
 EXPORT_SYMBOL(ldlm_lock_allow_match_locked);
 
 }
 EXPORT_SYMBOL(ldlm_lock_allow_match_locked);
 
index 2f7fb32..d13f033 100644 (file)
@@ -95,25 +95,25 @@ static inline unsigned int ldlm_get_rq_timeout(void)
 struct ldlm_bl_pool {
        spinlock_t              blp_lock;
 
 struct ldlm_bl_pool {
        spinlock_t              blp_lock;
 
-        /*
-         * blp_prio_list is used for callbacks that should be handled
-         * as a priority. It is used for LDLM_FL_DISCARD_DATA requests.
-         * see bug 13843
-         */
-        cfs_list_t              blp_prio_list;
-
-        /*
-         * blp_list is used for all other callbacks which are likely
-         * to take longer to process.
-         */
-        cfs_list_t              blp_list;
-
-        cfs_waitq_t             blp_waitq;
-       struct completion        blp_comp;
-        cfs_atomic_t            blp_num_threads;
-        cfs_atomic_t            blp_busy_threads;
-        int                     blp_min_threads;
-        int                     blp_max_threads;
+       /*
+        * blp_prio_list is used for callbacks that should be handled
+        * as a priority. It is used for LDLM_FL_DISCARD_DATA requests.
+        * see bug 13843
+        */
+       cfs_list_t              blp_prio_list;
+
+       /*
+        * blp_list is used for all other callbacks which are likely
+        * to take longer to process.
+        */
+       cfs_list_t              blp_list;
+
+       wait_queue_head_t       blp_waitq;
+       struct completion       blp_comp;
+       cfs_atomic_t            blp_num_threads;
+       cfs_atomic_t            blp_busy_threads;
+       int                     blp_min_threads;
+       int                     blp_max_threads;
 };
 
 struct ldlm_bl_work_item {
 };
 
 struct ldlm_bl_work_item {
@@ -150,7 +150,7 @@ static cfs_list_t waiting_locks_list;
 static cfs_timer_t waiting_locks_timer;
 
 static struct expired_lock_thread {
 static cfs_timer_t waiting_locks_timer;
 
 static struct expired_lock_thread {
-       cfs_waitq_t             elt_waitq;
+       wait_queue_head_t       elt_waitq;
        int                     elt_state;
        int                     elt_dump;
        cfs_list_t              elt_expired_locks;
        int                     elt_state;
        int                     elt_dump;
        cfs_list_t              elt_expired_locks;
@@ -173,20 +173,20 @@ static inline int have_expired_locks(void)
  */
 static int expired_lock_main(void *arg)
 {
  */
 static int expired_lock_main(void *arg)
 {
-        cfs_list_t *expired = &expired_lock_thread.elt_expired_locks;
-        struct l_wait_info lwi = { 0 };
-        int do_dump;
+       cfs_list_t *expired = &expired_lock_thread.elt_expired_locks;
+       struct l_wait_info lwi = { 0 };
+       int do_dump;
 
 
-        ENTRY;
+       ENTRY;
 
 
-        expired_lock_thread.elt_state = ELT_READY;
-        cfs_waitq_signal(&expired_lock_thread.elt_waitq);
+       expired_lock_thread.elt_state = ELT_READY;
+       wake_up(&expired_lock_thread.elt_waitq);
 
 
-        while (1) {
-                l_wait_event(expired_lock_thread.elt_waitq,
-                             have_expired_locks() ||
-                             expired_lock_thread.elt_state == ELT_TERMINATE,
-                             &lwi);
+       while (1) {
+               l_wait_event(expired_lock_thread.elt_waitq,
+                            have_expired_locks() ||
+                            expired_lock_thread.elt_state == ELT_TERMINATE,
+                            &lwi);
 
                spin_lock_bh(&waiting_locks_spinlock);
                if (expired_lock_thread.elt_dump) {
 
                spin_lock_bh(&waiting_locks_spinlock);
                if (expired_lock_thread.elt_dump) {
@@ -201,17 +201,17 @@ static int expired_lock_main(void *arg)
                        libcfs_run_lbug_upcall(&msgdata);
 
                        spin_lock_bh(&waiting_locks_spinlock);
                        libcfs_run_lbug_upcall(&msgdata);
 
                        spin_lock_bh(&waiting_locks_spinlock);
-                        expired_lock_thread.elt_dump = 0;
-                }
+                       expired_lock_thread.elt_dump = 0;
+               }
 
 
-                do_dump = 0;
+               do_dump = 0;
 
 
-                while (!cfs_list_empty(expired)) {
-                        struct obd_export *export;
-                        struct ldlm_lock *lock;
+               while (!cfs_list_empty(expired)) {
+                       struct obd_export *export;
+                       struct ldlm_lock *lock;
 
 
-                        lock = cfs_list_entry(expired->next, struct ldlm_lock,
-                                          l_pending_chain);
+                       lock = cfs_list_entry(expired->next, struct ldlm_lock,
+                                         l_pending_chain);
                        if ((void *)lock < LP_POISON + PAGE_CACHE_SIZE &&
                            (void *)lock >= LP_POISON) {
                                spin_unlock_bh(&waiting_locks_spinlock);
                        if ((void *)lock < LP_POISON + PAGE_CACHE_SIZE &&
                            (void *)lock >= LP_POISON) {
                                spin_unlock_bh(&waiting_locks_spinlock);
@@ -221,17 +221,17 @@ static int expired_lock_main(void *arg)
                        cfs_list_del_init(&lock->l_pending_chain);
                        if ((void *)lock->l_export <
                             LP_POISON + PAGE_CACHE_SIZE &&
                        cfs_list_del_init(&lock->l_pending_chain);
                        if ((void *)lock->l_export <
                             LP_POISON + PAGE_CACHE_SIZE &&
-                            (void *)lock->l_export >= LP_POISON) {
-                                CERROR("lock with free export on elt list %p\n",
-                                       lock->l_export);
-                                lock->l_export = NULL;
-                                LDLM_ERROR(lock, "free export");
-                                /* release extra ref grabbed by
-                                 * ldlm_add_waiting_lock() or
-                                 * ldlm_failed_ast() */
-                                LDLM_LOCK_RELEASE(lock);
-                                continue;
-                        }
+                           (void *)lock->l_export >= LP_POISON) {
+                               CERROR("lock with free export on elt list %p\n",
+                                      lock->l_export);
+                               lock->l_export = NULL;
+                               LDLM_ERROR(lock, "free export");
+                               /* release extra ref grabbed by
+                                * ldlm_add_waiting_lock() or
+                                * ldlm_failed_ast() */
+                               LDLM_LOCK_RELEASE(lock);
+                               continue;
+                       }
 
                        if (lock->l_flags & LDLM_FL_DESTROYED) {
                                /* release the lock refcount where
 
                        if (lock->l_flags & LDLM_FL_DESTROYED) {
                                /* release the lock refcount where
@@ -254,18 +254,18 @@ static int expired_lock_main(void *arg)
                }
                spin_unlock_bh(&waiting_locks_spinlock);
 
                }
                spin_unlock_bh(&waiting_locks_spinlock);
 
-                if (do_dump && obd_dump_on_eviction) {
-                        CERROR("dump the log upon eviction\n");
-                        libcfs_debug_dumplog();
-                }
+               if (do_dump && obd_dump_on_eviction) {
+                       CERROR("dump the log upon eviction\n");
+                       libcfs_debug_dumplog();
+               }
 
 
-                if (expired_lock_thread.elt_state == ELT_TERMINATE)
-                        break;
-        }
+               if (expired_lock_thread.elt_state == ELT_TERMINATE)
+                       break;
+       }
 
 
-        expired_lock_thread.elt_state = ELT_STOPPED;
-        cfs_waitq_signal(&expired_lock_thread.elt_waitq);
-        RETURN(0);
+       expired_lock_thread.elt_state = ELT_STOPPED;
+       wake_up(&expired_lock_thread.elt_waitq);
+       RETURN(0);
 }
 
 static int ldlm_add_waiting_lock(struct ldlm_lock *lock);
 }
 
 static int ldlm_add_waiting_lock(struct ldlm_lock *lock);
@@ -356,7 +356,7 @@ static void waiting_locks_callback(unsigned long unused)
                if (obd_dump_on_timeout && need_dump)
                        expired_lock_thread.elt_dump = __LINE__;
 
                if (obd_dump_on_timeout && need_dump)
                        expired_lock_thread.elt_dump = __LINE__;
 
-               cfs_waitq_signal(&expired_lock_thread.elt_waitq);
+               wake_up(&expired_lock_thread.elt_waitq);
        }
 
         /*
        }
 
         /*
@@ -603,7 +603,7 @@ static void ldlm_failed_ast(struct ldlm_lock *lock, int rc,
                LDLM_LOCK_GET(lock);
        cfs_list_add(&lock->l_pending_chain,
                     &expired_lock_thread.elt_expired_locks);
                LDLM_LOCK_GET(lock);
        cfs_list_add(&lock->l_pending_chain,
                     &expired_lock_thread.elt_expired_locks);
-       cfs_waitq_signal(&expired_lock_thread.elt_waitq);
+       wake_up(&expired_lock_thread.elt_waitq);
        spin_unlock_bh(&waiting_locks_spinlock);
 #else
        class_fail_export(lock->l_export);
        spin_unlock_bh(&waiting_locks_spinlock);
 #else
        class_fail_export(lock->l_export);
@@ -1699,8 +1699,8 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
        if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
                int to = cfs_time_seconds(1);
                while (to > 0) {
        if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
                int to = cfs_time_seconds(1);
                while (to > 0) {
-                       cfs_schedule_timeout_and_set_state(
-                               CFS_TASK_INTERRUPTIBLE, to);
+                       schedule_timeout_and_set_state(
+                               TASK_INTERRUPTIBLE, to);
                        if (lock->l_granted_mode == lock->l_req_mode ||
                            lock->l_flags & LDLM_FL_DESTROYED)
                                break;
                        if (lock->l_granted_mode == lock->l_req_mode ||
                            lock->l_flags & LDLM_FL_DESTROYED)
                                break;
@@ -1818,7 +1818,7 @@ out:
                lock_res_and_lock(lock);
                lock->l_flags |= LDLM_FL_FAILED;
                unlock_res_and_lock(lock);
                lock_res_and_lock(lock);
                lock->l_flags |= LDLM_FL_FAILED;
                unlock_res_and_lock(lock);
-               cfs_waitq_signal(&lock->l_waitq);
+               wake_up(&lock->l_waitq);
        }
        LDLM_LOCK_RELEASE(lock);
 }
        }
        LDLM_LOCK_RELEASE(lock);
 }
@@ -1900,7 +1900,7 @@ static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi,
        }
        spin_unlock(&blp->blp_lock);
 
        }
        spin_unlock(&blp->blp_lock);
 
-       cfs_waitq_signal(&blp->blp_waitq);
+       wake_up(&blp->blp_waitq);
 
        /* can not check blwi->blwi_flags as blwi could be already freed in
           LCF_ASYNC mode */
 
        /* can not check blwi->blwi_flags as blwi could be already freed in
           LCF_ASYNC mode */
@@ -2883,11 +2883,11 @@ static int ldlm_setup(void)
        ldlm_state->ldlm_bl_pool = blp;
 
        spin_lock_init(&blp->blp_lock);
        ldlm_state->ldlm_bl_pool = blp;
 
        spin_lock_init(&blp->blp_lock);
-        CFS_INIT_LIST_HEAD(&blp->blp_list);
-        CFS_INIT_LIST_HEAD(&blp->blp_prio_list);
-        cfs_waitq_init(&blp->blp_waitq);
-        cfs_atomic_set(&blp->blp_num_threads, 0);
-        cfs_atomic_set(&blp->blp_busy_threads, 0);
+       CFS_INIT_LIST_HEAD(&blp->blp_list);
+       CFS_INIT_LIST_HEAD(&blp->blp_prio_list);
+       init_waitqueue_head(&blp->blp_waitq);
+       cfs_atomic_set(&blp->blp_num_threads, 0);
+       cfs_atomic_set(&blp->blp_busy_threads, 0);
 
 #ifdef __KERNEL__
        if (ldlm_num_threads == 0) {
 
 #ifdef __KERNEL__
        if (ldlm_num_threads == 0) {
@@ -2908,7 +2908,7 @@ static int ldlm_setup(void)
 # ifdef HAVE_SERVER_SUPPORT
        CFS_INIT_LIST_HEAD(&expired_lock_thread.elt_expired_locks);
        expired_lock_thread.elt_state = ELT_STOPPED;
 # ifdef HAVE_SERVER_SUPPORT
        CFS_INIT_LIST_HEAD(&expired_lock_thread.elt_expired_locks);
        expired_lock_thread.elt_state = ELT_STOPPED;
-       cfs_waitq_init(&expired_lock_thread.elt_waitq);
+       init_waitqueue_head(&expired_lock_thread.elt_waitq);
 
        CFS_INIT_LIST_HEAD(&waiting_locks_list);
        spin_lock_init(&waiting_locks_spinlock);
 
        CFS_INIT_LIST_HEAD(&waiting_locks_list);
        spin_lock_init(&waiting_locks_spinlock);
@@ -2920,7 +2920,7 @@ static int ldlm_setup(void)
                GOTO(out, rc);
        }
 
                GOTO(out, rc);
        }
 
-       cfs_wait_event(expired_lock_thread.elt_waitq,
+       wait_event(expired_lock_thread.elt_waitq,
                       expired_lock_thread.elt_state == ELT_READY);
 # endif /* HAVE_SERVER_SUPPORT */
 
                       expired_lock_thread.elt_state == ELT_READY);
 # endif /* HAVE_SERVER_SUPPORT */
 
@@ -2962,7 +2962,7 @@ static int ldlm_cleanup(void)
 
                        spin_lock(&blp->blp_lock);
                        cfs_list_add_tail(&blwi.blwi_entry, &blp->blp_list);
 
                        spin_lock(&blp->blp_lock);
                        cfs_list_add_tail(&blwi.blwi_entry, &blp->blp_list);
-                       cfs_waitq_signal(&blp->blp_waitq);
+                       wake_up(&blp->blp_waitq);
                        spin_unlock(&blp->blp_lock);
 
                        wait_for_completion(&blp->blp_comp);
                        spin_unlock(&blp->blp_lock);
 
                        wait_for_completion(&blp->blp_comp);
@@ -2985,8 +2985,8 @@ static int ldlm_cleanup(void)
 # ifdef HAVE_SERVER_SUPPORT
        if (expired_lock_thread.elt_state != ELT_STOPPED) {
                expired_lock_thread.elt_state = ELT_TERMINATE;
 # ifdef HAVE_SERVER_SUPPORT
        if (expired_lock_thread.elt_state != ELT_STOPPED) {
                expired_lock_thread.elt_state = ELT_TERMINATE;
-               cfs_waitq_signal(&expired_lock_thread.elt_waitq);
-               cfs_wait_event(expired_lock_thread.elt_waitq,
+               wake_up(&expired_lock_thread.elt_waitq);
+               wait_event(expired_lock_thread.elt_waitq,
                               expired_lock_thread.elt_state == ELT_STOPPED);
        }
 # endif
                               expired_lock_thread.elt_state == ELT_STOPPED);
        }
 # endif
index 723bc2b..d7347a7 100644 (file)
@@ -1340,14 +1340,14 @@ EXPORT_SYMBOL(ldlm_pools_recalc);
 
 static int ldlm_pools_thread_main(void *arg)
 {
 
 static int ldlm_pools_thread_main(void *arg)
 {
-        struct ptlrpc_thread *thread = (struct ptlrpc_thread *)arg;
+       struct ptlrpc_thread *thread = (struct ptlrpc_thread *)arg;
        int s_time, c_time;
        int s_time, c_time;
-        ENTRY;
+       ENTRY;
 
 
-        thread_set_flags(thread, SVC_RUNNING);
-        cfs_waitq_signal(&thread->t_ctl_waitq);
+       thread_set_flags(thread, SVC_RUNNING);
+       wake_up(&thread->t_ctl_waitq);
 
 
-        CDEBUG(D_DLMTRACE, "%s: pool thread starting, process %d\n",
+       CDEBUG(D_DLMTRACE, "%s: pool thread starting, process %d\n",
               "ldlm_poold", current_pid());
 
         while (1) {
               "ldlm_poold", current_pid());
 
         while (1) {
@@ -1376,10 +1376,10 @@ static int ldlm_pools_thread_main(void *arg)
                         thread_test_and_clear_flags(thread, SVC_EVENT);
         }
 
                         thread_test_and_clear_flags(thread, SVC_EVENT);
         }
 
-        thread_set_flags(thread, SVC_STOPPED);
-        cfs_waitq_signal(&thread->t_ctl_waitq);
+       thread_set_flags(thread, SVC_STOPPED);
+       wake_up(&thread->t_ctl_waitq);
 
 
-        CDEBUG(D_DLMTRACE, "%s: pool thread exiting, process %d\n",
+       CDEBUG(D_DLMTRACE, "%s: pool thread exiting, process %d\n",
                "ldlm_poold", current_pid());
 
        complete_and_exit(&ldlm_pools_comp, 0);
                "ldlm_poold", current_pid());
 
        complete_and_exit(&ldlm_pools_comp, 0);
@@ -1399,7 +1399,7 @@ static int ldlm_pools_thread_start(void)
                RETURN(-ENOMEM);
 
        init_completion(&ldlm_pools_comp);
                RETURN(-ENOMEM);
 
        init_completion(&ldlm_pools_comp);
-       cfs_waitq_init(&ldlm_pools_thread->t_ctl_waitq);
+       init_waitqueue_head(&ldlm_pools_thread->t_ctl_waitq);
 
        task = kthread_run(ldlm_pools_thread_main, ldlm_pools_thread,
                           "ldlm_poold");
 
        task = kthread_run(ldlm_pools_thread_main, ldlm_pools_thread,
                           "ldlm_poold");
@@ -1416,25 +1416,25 @@ static int ldlm_pools_thread_start(void)
 
 static void ldlm_pools_thread_stop(void)
 {
 
 static void ldlm_pools_thread_stop(void)
 {
-        ENTRY;
+       ENTRY;
 
 
-        if (ldlm_pools_thread == NULL) {
-                EXIT;
-                return;
-        }
+       if (ldlm_pools_thread == NULL) {
+               EXIT;
+               return;
+       }
 
 
-        thread_set_flags(ldlm_pools_thread, SVC_STOPPING);
-        cfs_waitq_signal(&ldlm_pools_thread->t_ctl_waitq);
+       thread_set_flags(ldlm_pools_thread, SVC_STOPPING);
+       wake_up(&ldlm_pools_thread->t_ctl_waitq);
 
 
-        /*
-         * Make sure that pools thread is finished before freeing @thread.
-         * This fixes possible race and oops due to accessing freed memory
-         * in pools thread.
-         */
+       /*
+        * Make sure that pools thread is finished before freeing @thread.
+        * This fixes possible race and oops due to accessing freed memory
+        * in pools thread.
+        */
        wait_for_completion(&ldlm_pools_comp);
        wait_for_completion(&ldlm_pools_comp);
-        OBD_FREE_PTR(ldlm_pools_thread);
-        ldlm_pools_thread = NULL;
-        EXIT;
+       OBD_FREE_PTR(ldlm_pools_thread);
+       ldlm_pools_thread = NULL;
+       EXIT;
 }
 
 int ldlm_pools_init(void)
 }
 
 int ldlm_pools_init(void)
index ce51910..066c8c6 100644 (file)
@@ -185,23 +185,23 @@ static int ldlm_completion_tail(struct ldlm_lock *lock)
  */
 int ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data)
 {
  */
 int ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data)
 {
-        ENTRY;
+       ENTRY;
 
 
-        if (flags == LDLM_FL_WAIT_NOREPROC) {
-                LDLM_DEBUG(lock, "client-side enqueue waiting on pending lock");
-                RETURN(0);
-        }
+       if (flags == LDLM_FL_WAIT_NOREPROC) {
+               LDLM_DEBUG(lock, "client-side enqueue waiting on pending lock");
+               RETURN(0);
+       }
 
 
-        if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
-                       LDLM_FL_BLOCK_CONV))) {
-                cfs_waitq_signal(&lock->l_waitq);
-                RETURN(ldlm_completion_tail(lock));
-        }
+       if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
+                      LDLM_FL_BLOCK_CONV))) {
+               wake_up(&lock->l_waitq);
+               RETURN(ldlm_completion_tail(lock));
+       }
 
 
-        LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
-                   "going forward");
-        ldlm_reprocess_all(lock->l_resource);
-        RETURN(0);
+       LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
+                  "going forward");
+       ldlm_reprocess_all(lock->l_resource);
+       RETURN(0);
 }
 EXPORT_SYMBOL(ldlm_completion_ast_async);
 
 }
 EXPORT_SYMBOL(ldlm_completion_ast_async);
 
@@ -242,11 +242,11 @@ int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
                 goto noreproc;
         }
 
                 goto noreproc;
         }
 
-        if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
-                       LDLM_FL_BLOCK_CONV))) {
-                cfs_waitq_signal(&lock->l_waitq);
-                RETURN(0);
-        }
+       if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
+                      LDLM_FL_BLOCK_CONV))) {
+               wake_up(&lock->l_waitq);
+               RETURN(0);
+       }
 
         LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
                    "sleeping");
 
         LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
                    "sleeping");
index afb26fd..69f8e6d 100644 (file)
@@ -655,15 +655,15 @@ struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
         ns->ns_appetite = apt;
         ns->ns_client   = client;
 
         ns->ns_appetite = apt;
         ns->ns_client   = client;
 
-        CFS_INIT_LIST_HEAD(&ns->ns_list_chain);
-        CFS_INIT_LIST_HEAD(&ns->ns_unused_list);
+       CFS_INIT_LIST_HEAD(&ns->ns_list_chain);
+       CFS_INIT_LIST_HEAD(&ns->ns_unused_list);
        spin_lock_init(&ns->ns_lock);
        spin_lock_init(&ns->ns_lock);
-        cfs_atomic_set(&ns->ns_bref, 0);
-        cfs_waitq_init(&ns->ns_waitq);
+       cfs_atomic_set(&ns->ns_bref, 0);
+       init_waitqueue_head(&ns->ns_waitq);
 
 
-        ns->ns_max_nolock_size    = NS_DEFAULT_MAX_NOLOCK_BYTES;
-        ns->ns_contention_time    = NS_DEFAULT_CONTENTION_SECONDS;
-        ns->ns_contended_locks    = NS_DEFAULT_CONTENDED_LOCKS;
+       ns->ns_max_nolock_size    = NS_DEFAULT_MAX_NOLOCK_BYTES;
+       ns->ns_contention_time    = NS_DEFAULT_CONTENTION_SECONDS;
+       ns->ns_contended_locks    = NS_DEFAULT_CONTENDED_LOCKS;
 
         ns->ns_max_parallel_ast   = LDLM_DEFAULT_PARALLEL_AST_LIMIT;
         ns->ns_nr_unused          = 0;
 
         ns->ns_max_parallel_ast   = LDLM_DEFAULT_PARALLEL_AST_LIMIT;
         ns->ns_nr_unused          = 0;
@@ -999,7 +999,7 @@ int ldlm_namespace_get_return(struct ldlm_namespace *ns)
 void ldlm_namespace_put(struct ldlm_namespace *ns)
 {
        if (cfs_atomic_dec_and_lock(&ns->ns_bref, &ns->ns_lock)) {
 void ldlm_namespace_put(struct ldlm_namespace *ns)
 {
        if (cfs_atomic_dec_and_lock(&ns->ns_bref, &ns->ns_lock)) {
-               cfs_waitq_signal(&ns->ns_waitq);
+               wake_up(&ns->ns_waitq);
                spin_unlock(&ns->ns_lock);
        }
 }
                spin_unlock(&ns->ns_lock);
        }
 }
index 623e8b5..041e7f4 100644 (file)
@@ -323,7 +323,7 @@ int lfsck_master_engine(void *args)
        spin_lock(&lfsck->li_lock);
        thread_set_flags(thread, SVC_RUNNING);
        spin_unlock(&lfsck->li_lock);
        spin_lock(&lfsck->li_lock);
        thread_set_flags(thread, SVC_RUNNING);
        spin_unlock(&lfsck->li_lock);
-       cfs_waitq_broadcast(&thread->t_ctl_waitq);
+       wake_up_all(&thread->t_ctl_waitq);
 
        if (!cfs_list_empty(&lfsck->li_list_scan) ||
            cfs_list_empty(&lfsck->li_list_double_scan))
 
        if (!cfs_list_empty(&lfsck->li_list_scan) ||
            cfs_list_empty(&lfsck->li_list_double_scan))
@@ -362,7 +362,7 @@ fini_env:
 noenv:
        spin_lock(&lfsck->li_lock);
        thread_set_flags(thread, SVC_STOPPED);
 noenv:
        spin_lock(&lfsck->li_lock);
        thread_set_flags(thread, SVC_STOPPED);
-       cfs_waitq_broadcast(&thread->t_ctl_waitq);
+       wake_up_all(&thread->t_ctl_waitq);
        spin_unlock(&lfsck->li_lock);
        return rc;
 }
        spin_unlock(&lfsck->li_lock);
        return rc;
 }
index b5def07..337bd8c 100644 (file)
@@ -1061,7 +1061,7 @@ int lfsck_stop(const struct lu_env *env, struct dt_device *key, bool pause)
        thread_set_flags(thread, SVC_STOPPING);
        spin_unlock(&lfsck->li_lock);
 
        thread_set_flags(thread, SVC_STOPPING);
        spin_unlock(&lfsck->li_lock);
 
-       cfs_waitq_broadcast(&thread->t_ctl_waitq);
+       wake_up_all(&thread->t_ctl_waitq);
        l_wait_event(thread->t_ctl_waitq,
                     thread_is_stopped(thread),
                     &lwi);
        l_wait_event(thread->t_ctl_waitq,
                     thread_is_stopped(thread),
                     &lwi);
@@ -1098,7 +1098,7 @@ int lfsck_register(const struct lu_env *env, struct dt_device *key,
        CFS_INIT_LIST_HEAD(&lfsck->li_list_double_scan);
        CFS_INIT_LIST_HEAD(&lfsck->li_list_idle);
        atomic_set(&lfsck->li_ref, 1);
        CFS_INIT_LIST_HEAD(&lfsck->li_list_double_scan);
        CFS_INIT_LIST_HEAD(&lfsck->li_list_idle);
        atomic_set(&lfsck->li_ref, 1);
-       cfs_waitq_init(&lfsck->li_thread.t_ctl_waitq);
+       init_waitqueue_head(&lfsck->li_thread.t_ctl_waitq);
        lfsck->li_next = next;
        lfsck->li_bottom = key;
 
        lfsck->li_next = next;
        lfsck->li_bottom = key;
 
index 1d1cded..0ed5e52 100644 (file)
@@ -167,20 +167,20 @@ static void ll_delete_capa(struct obd_capa *ocapa)
  */
 static int capa_thread_main(void *unused)
 {
  */
 static int capa_thread_main(void *unused)
 {
-        struct obd_capa *ocapa, *tmp, *next;
-        struct inode *inode = NULL;
-        struct l_wait_info lwi = { 0 };
-        int rc;
-        ENTRY;
+       struct obd_capa *ocapa, *tmp, *next;
+       struct inode *inode = NULL;
+       struct l_wait_info lwi = { 0 };
+       int rc;
+       ENTRY;
 
 
-        thread_set_flags(&ll_capa_thread, SVC_RUNNING);
-        cfs_waitq_signal(&ll_capa_thread.t_ctl_waitq);
+       thread_set_flags(&ll_capa_thread, SVC_RUNNING);
+       wake_up(&ll_capa_thread.t_ctl_waitq);
 
 
-        while (1) {
-                l_wait_event(ll_capa_thread.t_ctl_waitq,
-                             !thread_is_running(&ll_capa_thread) ||
-                             have_expired_capa(),
-                             &lwi);
+       while (1) {
+               l_wait_event(ll_capa_thread.t_ctl_waitq,
+                            !thread_is_running(&ll_capa_thread) ||
+                            have_expired_capa(),
+                            &lwi);
 
                 if (!thread_is_running(&ll_capa_thread))
                         break;
 
                 if (!thread_is_running(&ll_capa_thread))
                         break;
@@ -280,13 +280,13 @@ static int capa_thread_main(void *unused)
        }
 
        thread_set_flags(&ll_capa_thread, SVC_STOPPED);
        }
 
        thread_set_flags(&ll_capa_thread, SVC_STOPPED);
-       cfs_waitq_signal(&ll_capa_thread.t_ctl_waitq);
+       wake_up(&ll_capa_thread.t_ctl_waitq);
        RETURN(0);
 }
 
 void ll_capa_timer_callback(unsigned long unused)
 {
        RETURN(0);
 }
 
 void ll_capa_timer_callback(unsigned long unused)
 {
-        cfs_waitq_signal(&ll_capa_thread.t_ctl_waitq);
+       wake_up(&ll_capa_thread.t_ctl_waitq);
 }
 
 int ll_capa_thread_start(void)
 }
 
 int ll_capa_thread_start(void)
@@ -294,7 +294,7 @@ int ll_capa_thread_start(void)
        cfs_task_t *task;
        ENTRY;
 
        cfs_task_t *task;
        ENTRY;
 
-       cfs_waitq_init(&ll_capa_thread.t_ctl_waitq);
+       init_waitqueue_head(&ll_capa_thread.t_ctl_waitq);
 
        task = kthread_run(capa_thread_main, NULL, "ll_capa");
        if (IS_ERR(task)) {
 
        task = kthread_run(capa_thread_main, NULL, "ll_capa");
        if (IS_ERR(task)) {
@@ -302,7 +302,7 @@ int ll_capa_thread_start(void)
                        PTR_ERR(task));
                RETURN(PTR_ERR(task));
        }
                        PTR_ERR(task));
                RETURN(PTR_ERR(task));
        }
-       cfs_wait_event(ll_capa_thread.t_ctl_waitq,
+       wait_event(ll_capa_thread.t_ctl_waitq,
                       thread_is_running(&ll_capa_thread));
 
        RETURN(0);
                       thread_is_running(&ll_capa_thread));
 
        RETURN(0);
@@ -310,10 +310,10 @@ int ll_capa_thread_start(void)
 
 void ll_capa_thread_stop(void)
 {
 
 void ll_capa_thread_stop(void)
 {
-        thread_set_flags(&ll_capa_thread, SVC_STOPPING);
-        cfs_waitq_signal(&ll_capa_thread.t_ctl_waitq);
-        cfs_wait_event(ll_capa_thread.t_ctl_waitq,
-                       thread_is_stopped(&ll_capa_thread));
+       thread_set_flags(&ll_capa_thread, SVC_STOPPING);
+       wake_up(&ll_capa_thread.t_ctl_waitq);
+       wait_event(ll_capa_thread.t_ctl_waitq,
+                      thread_is_stopped(&ll_capa_thread));
 }
 
 struct obd_capa *ll_osscapa_get(struct inode *inode, __u64 opc)
 }
 
 struct obd_capa *ll_osscapa_get(struct inode *inode, __u64 opc)
index f1a1415..5cedfa9 100644 (file)
@@ -107,15 +107,15 @@ void ll_queue_done_writing(struct inode *inode, unsigned long flags)
                        inode->i_ino, inode->i_generation);
                 cfs_list_add_tail(&lli->lli_close_list, &lcq->lcq_head);
 
                        inode->i_ino, inode->i_generation);
                 cfs_list_add_tail(&lli->lli_close_list, &lcq->lcq_head);
 
-                /* Avoid a concurrent insertion into the close thread queue:
-                 * an inode is already in the close thread, open(), write(),
-                 * close() happen, epoch is closed as the inode is marked as
-                 * LLIF_EPOCH_PENDING. When pages are written inode should not
-                 * be inserted into the queue again, clear this flag to avoid
-                 * it. */
-                lli->lli_flags &= ~LLIF_DONE_WRITING;
-
-                cfs_waitq_signal(&lcq->lcq_waitq);
+               /* Avoid a concurrent insertion into the close thread queue:
+                * an inode is already in the close thread, open(), write(),
+                * close() happen, epoch is closed as the inode is marked as
+                * LLIF_EPOCH_PENDING. When pages are written inode should not
+                * be inserted into the queue again, clear this flag to avoid
+                * it. */
+               lli->lli_flags &= ~LLIF_DONE_WRITING;
+
+               wake_up(&lcq->lcq_waitq);
                spin_unlock(&lcq->lcq_lock);
        }
        spin_unlock(&lli->lli_lock);
                spin_unlock(&lcq->lcq_lock);
        }
        spin_unlock(&lli->lli_lock);
@@ -388,7 +388,7 @@ int ll_close_thread_start(struct ll_close_queue **lcq_ret)
 
        spin_lock_init(&lcq->lcq_lock);
        CFS_INIT_LIST_HEAD(&lcq->lcq_head);
 
        spin_lock_init(&lcq->lcq_lock);
        CFS_INIT_LIST_HEAD(&lcq->lcq_head);
-       cfs_waitq_init(&lcq->lcq_waitq);
+       init_waitqueue_head(&lcq->lcq_waitq);
        init_completion(&lcq->lcq_comp);
 
        task = kthread_run(ll_close_thread, lcq, "ll_close");
        init_completion(&lcq->lcq_comp);
 
        task = kthread_run(ll_close_thread, lcq, "ll_close");
@@ -406,7 +406,7 @@ void ll_close_thread_shutdown(struct ll_close_queue *lcq)
 {
        init_completion(&lcq->lcq_comp);
        cfs_atomic_inc(&lcq->lcq_stop);
 {
        init_completion(&lcq->lcq_comp);
        cfs_atomic_inc(&lcq->lcq_stop);
-       cfs_waitq_signal(&lcq->lcq_waitq);
+       wake_up(&lcq->lcq_waitq);
        wait_for_completion(&lcq->lcq_comp);
        OBD_FREE(lcq, sizeof(*lcq));
 }
        wait_for_completion(&lcq->lcq_comp);
        OBD_FREE(lcq, sizeof(*lcq));
 }
index af73c05..d8f64b1 100644 (file)
@@ -929,7 +929,7 @@ extern struct inode_operations ll_fast_symlink_inode_operations;
 struct ll_close_queue {
        spinlock_t              lcq_lock;
        cfs_list_t              lcq_head;
 struct ll_close_queue {
        spinlock_t              lcq_lock;
        cfs_list_t              lcq_head;
-       cfs_waitq_t             lcq_waitq;
+       wait_queue_head_t       lcq_waitq;
        struct completion       lcq_comp;
        cfs_atomic_t            lcq_stop;
 };
        struct completion       lcq_comp;
        cfs_atomic_t            lcq_stop;
 };
@@ -1289,13 +1289,13 @@ struct ll_statahead_info {
         unsigned int            sai_miss_hidden;/* "ls -al", but first dentry
                                                  * is not a hidden one */
         unsigned int            sai_skip_hidden;/* skipped hidden dentry count */
         unsigned int            sai_miss_hidden;/* "ls -al", but first dentry
                                                  * is not a hidden one */
         unsigned int            sai_skip_hidden;/* skipped hidden dentry count */
-        unsigned int            sai_ls_all:1,   /* "ls -al", do stat-ahead for
-                                                 * hidden entries */
-                                sai_in_readpage:1,/* statahead is in readdir()*/
-                                sai_agl_valid:1;/* AGL is valid for the dir */
-        cfs_waitq_t             sai_waitq;      /* stat-ahead wait queue */
-        struct ptlrpc_thread    sai_thread;     /* stat-ahead thread */
-        struct ptlrpc_thread    sai_agl_thread; /* AGL thread */
+       unsigned int            sai_ls_all:1,   /* "ls -al", do stat-ahead for
+                                                * hidden entries */
+                               sai_in_readpage:1,/* statahead is in readdir()*/
+                               sai_agl_valid:1;/* AGL is valid for the dir */
+       wait_queue_head_t       sai_waitq;      /* stat-ahead wait queue */
+       struct ptlrpc_thread    sai_thread;     /* stat-ahead thread */
+       struct ptlrpc_thread    sai_agl_thread; /* AGL thread */
        cfs_list_t              sai_entries;    /* entry list */
         cfs_list_t              sai_entries_received; /* entries returned */
         cfs_list_t              sai_entries_stated;   /* entries stated */
        cfs_list_t              sai_entries;    /* entry list */
         cfs_list_t              sai_entries_received; /* entries returned */
         cfs_list_t              sai_entries_stated;   /* entries stated */
index a8622d7..1a9d1b4 100644 (file)
@@ -2091,12 +2091,12 @@ void ll_umount_begin(struct super_block *sb)
                OBD_FREE_PTR(ioc_data);
        }
 
                OBD_FREE_PTR(ioc_data);
        }
 
-        /* Really, we'd like to wait until there are no requests outstanding,
-         * and then continue.  For now, we just invalidate the requests,
-         * schedule() and sleep one second if needed, and hope.
-         */
-        cfs_schedule();
-        EXIT;
+       /* Really, we'd like to wait until there are no requests outstanding,
+        * and then continue.  For now, we just invalidate the requests,
+        * schedule() and sleep one second if needed, and hope.
+        */
+       schedule();
+       EXIT;
 }
 
 int ll_remount_fs(struct super_block *sb, int *flags, char *data)
 }
 
 int ll_remount_fs(struct super_block *sb, int *flags, char *data)
index ad3f230..9854d84 100644 (file)
@@ -118,19 +118,19 @@ enum {
 };
 
 struct lloop_device {
 };
 
 struct lloop_device {
-        int                  lo_number;
-        int                  lo_refcnt;
-        loff_t               lo_offset;
-        loff_t               lo_sizelimit;
-        int                  lo_flags;
-        int                (*ioctl)(struct lloop_device *, int cmd,
-                                    unsigned long arg);
+       int                  lo_number;
+       int                  lo_refcnt;
+       loff_t               lo_offset;
+       loff_t               lo_sizelimit;
+       int                  lo_flags;
+       int                (*ioctl)(struct lloop_device *, int cmd,
+                                   unsigned long arg);
 
 
-        struct file         *lo_backing_file;
-        struct block_device *lo_device;
-        unsigned             lo_blocksize;
+       struct file         *lo_backing_file;
+       struct block_device *lo_device;
+       unsigned             lo_blocksize;
 
 
-        int                  old_gfp_mask;
+       int                  old_gfp_mask;
 
        spinlock_t              lo_lock;
        struct bio              *lo_bio;
 
        spinlock_t              lo_lock;
        struct bio              *lo_bio;
@@ -138,20 +138,20 @@ struct lloop_device {
        int                     lo_state;
        struct semaphore        lo_sem;
        struct mutex            lo_ctl_mutex;
        int                     lo_state;
        struct semaphore        lo_sem;
        struct mutex            lo_ctl_mutex;
-        cfs_atomic_t         lo_pending;
-        cfs_waitq_t          lo_bh_wait;
+       cfs_atomic_t            lo_pending;
+       wait_queue_head_t       lo_bh_wait;
 
 
-        struct request_queue *lo_queue;
+       struct request_queue *lo_queue;
 
 
-        const struct lu_env *lo_env;
-        struct cl_io         lo_io;
-        struct ll_dio_pages  lo_pvec;
+       const struct lu_env *lo_env;
+       struct cl_io         lo_io;
+       struct ll_dio_pages  lo_pvec;
 
 
-        /* data to handle bio for lustre. */
-        struct lo_request_data {
-                struct page *lrd_pages[LLOOP_MAX_SEGMENTS];
-                loff_t       lrd_offsets[LLOOP_MAX_SEGMENTS];
-        } lo_requests[1];
+       /* data to handle bio for lustre. */
+       struct lo_request_data {
+               struct page *lrd_pages[LLOOP_MAX_SEGMENTS];
+               loff_t       lrd_offsets[LLOOP_MAX_SEGMENTS];
+       } lo_requests[1];
 };
 
 /*
 };
 
 /*
@@ -285,8 +285,8 @@ static void loop_add_bio(struct lloop_device *lo, struct bio *bio)
        spin_unlock_irqrestore(&lo->lo_lock, flags);
 
        cfs_atomic_inc(&lo->lo_pending);
        spin_unlock_irqrestore(&lo->lo_lock, flags);
 
        cfs_atomic_inc(&lo->lo_pending);
-       if (cfs_waitq_active(&lo->lo_bh_wait))
-               cfs_waitq_signal(&lo->lo_bh_wait);
+       if (waitqueue_active(&lo->lo_bh_wait))
+               wake_up(&lo->lo_bh_wait);
 }
 
 /*
 }
 
 /*
@@ -439,7 +439,7 @@ static int loop_thread(void *data)
        up(&lo->lo_sem);
 
        for (;;) {
        up(&lo->lo_sem);
 
        for (;;) {
-               cfs_wait_event(lo->lo_bh_wait, loop_active(lo));
+               wait_event(lo->lo_bh_wait, loop_active(lo));
                if (!cfs_atomic_read(&lo->lo_pending)) {
                        int exiting = 0;
                        spin_lock_irq(&lo->lo_lock);
                if (!cfs_atomic_read(&lo->lo_pending)) {
                        int exiting = 0;
                        spin_lock_irq(&lo->lo_lock);
@@ -580,7 +580,7 @@ static int loop_clr_fd(struct lloop_device *lo, struct block_device *bdev,
        spin_lock_irq(&lo->lo_lock);
        lo->lo_state = LLOOP_RUNDOWN;
        spin_unlock_irq(&lo->lo_lock);
        spin_lock_irq(&lo->lo_lock);
        lo->lo_state = LLOOP_RUNDOWN;
        spin_unlock_irq(&lo->lo_lock);
-       cfs_waitq_signal(&lo->lo_bh_wait);
+       wake_up(&lo->lo_bh_wait);
 
        down(&lo->lo_sem);
         lo->lo_backing_file = NULL;
 
        down(&lo->lo_sem);
         lo->lo_backing_file = NULL;
@@ -823,7 +823,7 @@ static int __init lloop_init(void)
 
                mutex_init(&lo->lo_ctl_mutex);
                sema_init(&lo->lo_sem, 0);
 
                mutex_init(&lo->lo_ctl_mutex);
                sema_init(&lo->lo_sem, 0);
-               cfs_waitq_init(&lo->lo_bh_wait);
+               init_waitqueue_head(&lo->lo_bh_wait);
                lo->lo_number = i;
                spin_lock_init(&lo->lo_lock);
                 disk->major = lloop_major;
                lo->lo_number = i;
                spin_lock_init(&lo->lo_lock);
                 disk->major = lloop_major;
index 26efc32..3bb99ac 100644 (file)
@@ -458,20 +458,20 @@ static void ll_agl_add(struct ll_statahead_info *sai,
        }
 
        if (added > 0)
        }
 
        if (added > 0)
-               cfs_waitq_signal(&sai->sai_agl_thread.t_ctl_waitq);
+               wake_up(&sai->sai_agl_thread.t_ctl_waitq);
 }
 
 static struct ll_statahead_info *ll_sai_alloc(void)
 {
 }
 
 static struct ll_statahead_info *ll_sai_alloc(void)
 {
-        struct ll_statahead_info *sai;
-        int                       i;
-        ENTRY;
+       struct ll_statahead_info *sai;
+       int                       i;
+       ENTRY;
 
 
-        OBD_ALLOC_PTR(sai);
-        if (!sai)
-                RETURN(NULL);
+       OBD_ALLOC_PTR(sai);
+       if (!sai)
+               RETURN(NULL);
 
 
-        cfs_atomic_set(&sai->sai_refcount, 1);
+       cfs_atomic_set(&sai->sai_refcount, 1);
 
        spin_lock(&sai_generation_lock);
        sai->sai_generation = ++sai_generation;
 
        spin_lock(&sai_generation_lock);
        sai->sai_generation = ++sai_generation;
@@ -479,24 +479,24 @@ static struct ll_statahead_info *ll_sai_alloc(void)
                sai->sai_generation = ++sai_generation;
        spin_unlock(&sai_generation_lock);
 
                sai->sai_generation = ++sai_generation;
        spin_unlock(&sai_generation_lock);
 
-        sai->sai_max = LL_SA_RPC_MIN;
-        sai->sai_index = 1;
-        cfs_waitq_init(&sai->sai_waitq);
-        cfs_waitq_init(&sai->sai_thread.t_ctl_waitq);
-        cfs_waitq_init(&sai->sai_agl_thread.t_ctl_waitq);
+       sai->sai_max = LL_SA_RPC_MIN;
+       sai->sai_index = 1;
+       init_waitqueue_head(&sai->sai_waitq);
+       init_waitqueue_head(&sai->sai_thread.t_ctl_waitq);
+       init_waitqueue_head(&sai->sai_agl_thread.t_ctl_waitq);
 
        CFS_INIT_LIST_HEAD(&sai->sai_entries);
 
        CFS_INIT_LIST_HEAD(&sai->sai_entries);
-        CFS_INIT_LIST_HEAD(&sai->sai_entries_received);
-        CFS_INIT_LIST_HEAD(&sai->sai_entries_stated);
-        CFS_INIT_LIST_HEAD(&sai->sai_entries_agl);
+       CFS_INIT_LIST_HEAD(&sai->sai_entries_received);
+       CFS_INIT_LIST_HEAD(&sai->sai_entries_stated);
+       CFS_INIT_LIST_HEAD(&sai->sai_entries_agl);
 
 
-        for (i = 0; i < LL_SA_CACHE_SIZE; i++) {
-                CFS_INIT_LIST_HEAD(&sai->sai_cache[i]);
+       for (i = 0; i < LL_SA_CACHE_SIZE; i++) {
+               CFS_INIT_LIST_HEAD(&sai->sai_cache[i]);
                spin_lock_init(&sai->sai_cache_lock[i]);
                spin_lock_init(&sai->sai_cache_lock[i]);
-        }
-        cfs_atomic_set(&sai->sai_cache_count, 0);
+       }
+       cfs_atomic_set(&sai->sai_cache_count, 0);
 
 
-        RETURN(sai);
+       RETURN(sai);
 }
 
 static inline struct ll_statahead_info *
 }
 
 static inline struct ll_statahead_info *
@@ -693,15 +693,15 @@ static void ll_post_statahead(struct ll_statahead_info *sai)
         EXIT;
 
 out:
         EXIT;
 
 out:
-        /* The "ll_sa_entry_to_stated()" will drop related ldlm ibits lock
-         * reference count by calling "ll_intent_drop_lock()" in spite of the
-         * above operations failed or not. Do not worry about calling
-         * "ll_intent_drop_lock()" more than once. */
+       /* The "ll_sa_entry_to_stated()" will drop related ldlm ibits lock
+        * reference count by calling "ll_intent_drop_lock()" in spite of the
+        * above operations failed or not. Do not worry about calling
+        * "ll_intent_drop_lock()" more than once. */
        rc = ll_sa_entry_to_stated(sai, entry,
                                   rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC);
        if (rc == 0 && entry->se_index == sai->sai_index_wait)
        rc = ll_sa_entry_to_stated(sai, entry,
                                   rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC);
        if (rc == 0 && entry->se_index == sai->sai_index_wait)
-                cfs_waitq_signal(&sai->sai_waitq);
-        ll_sa_entry_put(sai, entry);
+               wake_up(&sai->sai_waitq);
+       ll_sa_entry_put(sai, entry);
 }
 
 static int ll_statahead_interpret(struct ptlrpc_request *req,
 }
 
 static int ll_statahead_interpret(struct ptlrpc_request *req,
@@ -760,7 +760,7 @@ static int ll_statahead_interpret(struct ptlrpc_request *req,
 
                ll_sa_entry_put(sai, entry);
                if (wakeup)
 
                ll_sa_entry_put(sai, entry);
                if (wakeup)
-                       cfs_waitq_signal(&sai->sai_thread.t_ctl_waitq);
+                       wake_up(&sai->sai_thread.t_ctl_waitq);
         }
 
         EXIT;
         }
 
         EXIT;
@@ -950,14 +950,14 @@ static void ll_statahead_one(struct dentry *parent, const char* entry_name,
         if (dentry != NULL)
                 dput(dentry);
 
         if (dentry != NULL)
                 dput(dentry);
 
-        if (rc) {
-                rc1 = ll_sa_entry_to_stated(sai, entry,
-                                        rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC);
-                if (rc1 == 0 && entry->se_index == sai->sai_index_wait)
-                        cfs_waitq_signal(&sai->sai_waitq);
-        } else {
-                sai->sai_sent++;
-        }
+       if (rc) {
+               rc1 = ll_sa_entry_to_stated(sai, entry,
+                                       rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC);
+               if (rc1 == 0 && entry->se_index == sai->sai_index_wait)
+                       wake_up(&sai->sai_waitq);
+       } else {
+               sai->sai_sent++;
+       }
 
         sai->sai_index++;
         /* drop one refcount on entry by ll_sa_entry_alloc */
 
         sai->sai_index++;
         /* drop one refcount on entry by ll_sa_entry_alloc */
@@ -986,7 +986,7 @@ static int ll_agl_thread(void *arg)
        sai->sai_agl_valid = 1;
        thread_set_flags(thread, SVC_RUNNING);
        spin_unlock(&plli->lli_agl_lock);
        sai->sai_agl_valid = 1;
        thread_set_flags(thread, SVC_RUNNING);
        spin_unlock(&plli->lli_agl_lock);
-       cfs_waitq_signal(&thread->t_ctl_waitq);
+       wake_up(&thread->t_ctl_waitq);
 
         while (1) {
                 l_wait_event(thread->t_ctl_waitq,
 
         while (1) {
                 l_wait_event(thread->t_ctl_waitq,
@@ -1022,7 +1022,7 @@ static int ll_agl_thread(void *arg)
        }
        thread_set_flags(thread, SVC_STOPPED);
        spin_unlock(&plli->lli_agl_lock);
        }
        thread_set_flags(thread, SVC_STOPPED);
        spin_unlock(&plli->lli_agl_lock);
-       cfs_waitq_signal(&thread->t_ctl_waitq);
+       wake_up(&thread->t_ctl_waitq);
        ll_sai_put(sai);
        CDEBUG(D_READA, "agl thread stopped: [pid %d] [parent %.*s]\n",
               current_pid(), parent->d_name.len, parent->d_name.name);
        ll_sai_put(sai);
        CDEBUG(D_READA, "agl thread stopped: [pid %d] [parent %.*s]\n",
               current_pid(), parent->d_name.len, parent->d_name.name);
@@ -1083,7 +1083,7 @@ static int ll_statahead_thread(void *arg)
        spin_lock(&plli->lli_sa_lock);
        thread_set_flags(thread, SVC_RUNNING);
        spin_unlock(&plli->lli_sa_lock);
        spin_lock(&plli->lli_sa_lock);
        thread_set_flags(thread, SVC_RUNNING);
        spin_unlock(&plli->lli_sa_lock);
-       cfs_waitq_signal(&thread->t_ctl_waitq);
+       wake_up(&thread->t_ctl_waitq);
 
        ll_dir_chain_init(&chain);
        page = ll_get_dir_page(dir, pos, &chain);
 
        ll_dir_chain_init(&chain);
        page = ll_get_dir_page(dir, pos, &chain);
@@ -1261,7 +1261,7 @@ out:
                spin_lock(&plli->lli_agl_lock);
                thread_set_flags(agl_thread, SVC_STOPPING);
                spin_unlock(&plli->lli_agl_lock);
                spin_lock(&plli->lli_agl_lock);
                thread_set_flags(agl_thread, SVC_STOPPING);
                spin_unlock(&plli->lli_agl_lock);
-                cfs_waitq_signal(&agl_thread->t_ctl_waitq);
+               wake_up(&agl_thread->t_ctl_waitq);
 
                CDEBUG(D_READA, "stop agl thread: [pid %d]\n",
                       current_pid());
 
                CDEBUG(D_READA, "stop agl thread: [pid %d]\n",
                       current_pid());
@@ -1286,8 +1286,8 @@ out:
        }
        thread_set_flags(thread, SVC_STOPPED);
        spin_unlock(&plli->lli_sa_lock);
        }
        thread_set_flags(thread, SVC_STOPPED);
        spin_unlock(&plli->lli_sa_lock);
-        cfs_waitq_signal(&sai->sai_waitq);
-        cfs_waitq_signal(&thread->t_ctl_waitq);
+       wake_up(&sai->sai_waitq);
+       wake_up(&thread->t_ctl_waitq);
         ll_sai_put(sai);
         dput(parent);
        CDEBUG(D_READA, "statahead thread stopped: [pid %d] [parent %.*s]\n",
         ll_sai_put(sai);
         dput(parent);
        CDEBUG(D_READA, "statahead thread stopped: [pid %d] [parent %.*s]\n",
@@ -1320,7 +1320,7 @@ void ll_stop_statahead(struct inode *dir, void *key)
                 if (!thread_is_stopped(thread)) {
                         thread_set_flags(thread, SVC_STOPPING);
                        spin_unlock(&lli->lli_sa_lock);
                 if (!thread_is_stopped(thread)) {
                         thread_set_flags(thread, SVC_STOPPING);
                        spin_unlock(&lli->lli_sa_lock);
-                       cfs_waitq_signal(&thread->t_ctl_waitq);
+                       wake_up(&thread->t_ctl_waitq);
 
                        CDEBUG(D_READA, "stop statahead thread: [pid %d]\n",
                               current_pid());
 
                        CDEBUG(D_READA, "stop statahead thread: [pid %d]\n",
                               current_pid());
@@ -1512,7 +1512,7 @@ ll_sai_unplug(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
        }
 
        if (!thread_is_stopped(thread))
        }
 
        if (!thread_is_stopped(thread))
-               cfs_waitq_signal(&thread->t_ctl_waitq);
+               wake_up(&thread->t_ctl_waitq);
 
        EXIT;
 }
 
        EXIT;
 }
index 823f37c..57c5023 100644 (file)
@@ -1063,7 +1063,7 @@ int lod_pools_init(struct lod_device *lod, struct lustre_cfg *lcfg)
        OBD_ALLOC_PTR(lod->lod_qos.lq_statfs_data);
        if (NULL == lod->lod_qos.lq_statfs_data)
                RETURN(-ENOMEM);
        OBD_ALLOC_PTR(lod->lod_qos.lq_statfs_data);
        if (NULL == lod->lod_qos.lq_statfs_data)
                RETURN(-ENOMEM);
-       cfs_waitq_init(&lod->lod_qos.lq_statfs_waitq);
+       init_waitqueue_head(&lod->lod_qos.lq_statfs_waitq);
 
        /* Set up OST pool environment */
        lod->lod_pools_hash_body = cfs_hash_create("POOLS", HASH_POOLS_CUR_BITS,
 
        /* Set up OST pool environment */
        lod->lod_pools_hash_body = cfs_hash_create("POOLS", HASH_POOLS_CUR_BITS,
index 2698495..4b1f083 100644 (file)
@@ -230,7 +230,7 @@ struct lov_object {
        /**
         * Waitq - wait for no one else is using lo_lsm
         */
        /**
         * Waitq - wait for no one else is using lo_lsm
         */
-       cfs_waitq_t            lo_waitq;
+       wait_queue_head_t       lo_waitq;
        /**
         * Layout metadata. NULL if empty layout.
         */
        /**
         * Layout metadata. NULL if empty layout.
         */
@@ -444,13 +444,13 @@ struct lovsub_page {
 
 
 struct lov_thread_info {
 
 
 struct lov_thread_info {
-        struct cl_object_conf   lti_stripe_conf;
-        struct lu_fid           lti_fid;
-        struct cl_lock_descr    lti_ldescr;
-        struct ost_lvb          lti_lvb;
-        struct cl_2queue        lti_cl2q;
-        struct cl_lock_closure  lti_closure;
-        cfs_waitlink_t          lti_waiter;
+       struct cl_object_conf   lti_stripe_conf;
+       struct lu_fid           lti_fid;
+       struct cl_lock_descr    lti_ldescr;
+       struct ost_lvb          lti_lvb;
+       struct cl_2queue        lti_cl2q;
+       struct cl_lock_closure  lti_closure;
+       wait_queue_t            lti_waiter;
 };
 
 /**
 };
 
 /**
index 2c7f854..7fa6b86 100644 (file)
@@ -83,7 +83,7 @@ struct lov_request_set {
        struct brw_page                 *set_pga;
        struct lov_lock_handles         *set_lockh;
        cfs_list_t                      set_list;
        struct brw_page                 *set_pga;
        struct lov_lock_handles         *set_lockh;
        cfs_list_t                      set_list;
-       cfs_waitq_t                     set_waitq;
+       wait_queue_head_t               set_waitq;
        spinlock_t                      set_lock;
 };
 
        spinlock_t                      set_lock;
 };
 
index 21e7da7..c163cfa 100644 (file)
@@ -376,7 +376,7 @@ static void lov_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
 
        LASSERT(cfs_atomic_read(&lov->lo_active_ios) > 0);
        if (cfs_atomic_dec_and_test(&lov->lo_active_ios))
 
        LASSERT(cfs_atomic_read(&lov->lo_active_ios) > 0);
        if (cfs_atomic_dec_and_test(&lov->lo_active_ios))
-               cfs_waitq_broadcast(&lov->lo_waitq);
+               wake_up_all(&lov->lo_waitq);
        EXIT;
 }
 
        EXIT;
 }
 
@@ -848,7 +848,7 @@ static void lov_empty_io_fini(const struct lu_env *env,
         ENTRY;
 
        if (cfs_atomic_dec_and_test(&lov->lo_active_ios))
         ENTRY;
 
        if (cfs_atomic_dec_and_test(&lov->lo_active_ios))
-               cfs_waitq_broadcast(&lov->lo_waitq);
+               wake_up_all(&lov->lo_waitq);
         EXIT;
 }
 
         EXIT;
 }
 
index 1d10ef5..c5eeb32 100644 (file)
@@ -285,13 +285,13 @@ static int lov_delete_empty(const struct lu_env *env, struct lov_object *lov,
 }
 
 static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
 }
 
 static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
-                               struct lovsub_object *los, int idx)
+                              struct lovsub_object *los, int idx)
 {
 {
-        struct cl_object        *sub;
-        struct lov_layout_raid0 *r0;
-        struct lu_site          *site;
-        struct lu_site_bkt_data *bkt;
-        cfs_waitlink_t          *waiter;
+       struct cl_object        *sub;
+       struct lov_layout_raid0 *r0;
+       struct lu_site          *site;
+       struct lu_site_bkt_data *bkt;
+       wait_queue_t          *waiter;
 
         r0  = &lov->u.raid0;
         LASSERT(r0->lo_sub[idx] == los);
 
         r0  = &lov->u.raid0;
         LASSERT(r0->lo_sub[idx] == los);
@@ -307,28 +307,28 @@ static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
 
         /* ... wait until it is actually destroyed---sub-object clears its
          * ->lo_sub[] slot in lovsub_object_fini() */
 
         /* ... wait until it is actually destroyed---sub-object clears its
          * ->lo_sub[] slot in lovsub_object_fini() */
-        if (r0->lo_sub[idx] == los) {
-                waiter = &lov_env_info(env)->lti_waiter;
-                cfs_waitlink_init(waiter);
-                cfs_waitq_add(&bkt->lsb_marche_funebre, waiter);
-                cfs_set_current_state(CFS_TASK_UNINT);
-                while (1) {
-                        /* this wait-queue is signaled at the end of
-                         * lu_object_free(). */
-                        cfs_set_current_state(CFS_TASK_UNINT);
+       if (r0->lo_sub[idx] == los) {
+               waiter = &lov_env_info(env)->lti_waiter;
+               init_waitqueue_entry_current(waiter);
+               add_wait_queue(&bkt->lsb_marche_funebre, waiter);
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               while (1) {
+                       /* this wait-queue is signaled at the end of
+                        * lu_object_free(). */
+                       set_current_state(TASK_UNINTERRUPTIBLE);
                        spin_lock(&r0->lo_sub_lock);
                        if (r0->lo_sub[idx] == los) {
                                spin_unlock(&r0->lo_sub_lock);
                        spin_lock(&r0->lo_sub_lock);
                        if (r0->lo_sub[idx] == los) {
                                spin_unlock(&r0->lo_sub_lock);
-                               cfs_waitq_wait(waiter, CFS_TASK_UNINT);
+                               waitq_wait(waiter, TASK_UNINTERRUPTIBLE);
                        } else {
                                spin_unlock(&r0->lo_sub_lock);
                        } else {
                                spin_unlock(&r0->lo_sub_lock);
-                                cfs_set_current_state(CFS_TASK_RUNNING);
-                                break;
-                        }
-                }
-                cfs_waitq_del(&bkt->lsb_marche_funebre, waiter);
-        }
-        LASSERT(r0->lo_sub[idx] == NULL);
+                               set_current_state(TASK_RUNNING);
+                               break;
+                       }
+               }
+               remove_wait_queue(&bkt->lsb_marche_funebre, waiter);
+       }
+       LASSERT(r0->lo_sub[idx] == NULL);
 }
 
 static int lov_delete_raid0(const struct lu_env *env, struct lov_object *lov,
 }
 
 static int lov_delete_raid0(const struct lu_env *env, struct lov_object *lov,
@@ -736,7 +736,7 @@ int lov_object_init(const struct lu_env *env, struct lu_object *obj,
         ENTRY;
        init_rwsem(&lov->lo_type_guard);
        cfs_atomic_set(&lov->lo_active_ios, 0);
         ENTRY;
        init_rwsem(&lov->lo_type_guard);
        cfs_atomic_set(&lov->lo_active_ios, 0);
-       cfs_waitq_init(&lov->lo_waitq);
+       init_waitqueue_head(&lov->lo_waitq);
 
        cl_object_page_init(lu2cl(obj), sizeof(struct lov_page));
 
 
        cl_object_page_init(lu2cl(obj), sizeof(struct lov_page));
 
index 43b52b1..5e5da33 100644 (file)
@@ -57,7 +57,7 @@ static void lov_init_set(struct lov_request_set *set)
        set->set_cookies = 0;
        CFS_INIT_LIST_HEAD(&set->set_list);
        cfs_atomic_set(&set->set_refcount, 1);
        set->set_cookies = 0;
        CFS_INIT_LIST_HEAD(&set->set_list);
        cfs_atomic_set(&set->set_refcount, 1);
-       cfs_waitq_init(&set->set_waitq);
+       init_waitqueue_head(&set->set_waitq);
        spin_lock_init(&set->set_lock);
 }
 
        spin_lock_init(&set->set_lock);
 }
 
@@ -110,16 +110,16 @@ int lov_set_finished(struct lov_request_set *set, int idempotent)
 }
 
 void lov_update_set(struct lov_request_set *set,
 }
 
 void lov_update_set(struct lov_request_set *set,
-                    struct lov_request *req, int rc)
+                   struct lov_request *req, int rc)
 {
 {
-        req->rq_complete = 1;
-        req->rq_rc = rc;
+       req->rq_complete = 1;
+       req->rq_rc = rc;
 
 
-        cfs_atomic_inc(&set->set_completes);
-        if (rc == 0)
-                cfs_atomic_inc(&set->set_success);
+       cfs_atomic_inc(&set->set_completes);
+       if (rc == 0)
+               cfs_atomic_inc(&set->set_success);
 
 
-        cfs_waitq_signal(&set->set_waitq);
+       wake_up(&set->set_waitq);
 }
 
 int lov_update_common_set(struct lov_request_set *set,
 }
 
 int lov_update_common_set(struct lov_request_set *set,
@@ -167,7 +167,7 @@ static int lov_check_set(struct lov_obd *lov, int idx)
  */
 int lov_check_and_wait_active(struct lov_obd *lov, int ost_idx)
 {
  */
 int lov_check_and_wait_active(struct lov_obd *lov, int ost_idx)
 {
-       cfs_waitq_t waitq;
+       wait_queue_head_t waitq;
        struct l_wait_info lwi;
        struct lov_tgt_desc *tgt;
        int rc = 0;
        struct l_wait_info lwi;
        struct lov_tgt_desc *tgt;
        int rc = 0;
@@ -187,7 +187,7 @@ int lov_check_and_wait_active(struct lov_obd *lov, int ost_idx)
 
        mutex_unlock(&lov->lov_lock);
 
 
        mutex_unlock(&lov->lov_lock);
 
-       cfs_waitq_init(&waitq);
+       init_waitqueue_head(&waitq);
        lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(obd_timeout),
                                   cfs_time_seconds(1), NULL, NULL);
 
        lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(obd_timeout),
                                   cfs_time_seconds(1), NULL, NULL);
 
index ca85704..e195e1d 100644 (file)
@@ -555,49 +555,49 @@ static int mdc_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw)
  * in the future - the code may need to be revisited. */
 int mdc_enter_request(struct client_obd *cli)
 {
  * in the future - the code may need to be revisited. */
 int mdc_enter_request(struct client_obd *cli)
 {
-        int rc = 0;
-        struct mdc_cache_waiter mcw;
-        struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
-
-        client_obd_list_lock(&cli->cl_loi_list_lock);
-        if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
-                cfs_list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters);
-                cfs_waitq_init(&mcw.mcw_waitq);
-                client_obd_list_unlock(&cli->cl_loi_list_lock);
-                rc = l_wait_event(mcw.mcw_waitq, mdc_req_avail(cli, &mcw), &lwi);
-                if (rc) {
-                        client_obd_list_lock(&cli->cl_loi_list_lock);
-                        if (cfs_list_empty(&mcw.mcw_entry))
-                                cli->cl_r_in_flight--;
-                        cfs_list_del_init(&mcw.mcw_entry);
-                        client_obd_list_unlock(&cli->cl_loi_list_lock);
-                }
-        } else {
-                cli->cl_r_in_flight++;
-                client_obd_list_unlock(&cli->cl_loi_list_lock);
-        }
-        return rc;
+       int rc = 0;
+       struct mdc_cache_waiter mcw;
+       struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
+
+       client_obd_list_lock(&cli->cl_loi_list_lock);
+       if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
+               cfs_list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters);
+               init_waitqueue_head(&mcw.mcw_waitq);
+               client_obd_list_unlock(&cli->cl_loi_list_lock);
+               rc = l_wait_event(mcw.mcw_waitq, mdc_req_avail(cli, &mcw), &lwi);
+               if (rc) {
+                       client_obd_list_lock(&cli->cl_loi_list_lock);
+                       if (cfs_list_empty(&mcw.mcw_entry))
+                               cli->cl_r_in_flight--;
+                       cfs_list_del_init(&mcw.mcw_entry);
+                       client_obd_list_unlock(&cli->cl_loi_list_lock);
+               }
+       } else {
+               cli->cl_r_in_flight++;
+               client_obd_list_unlock(&cli->cl_loi_list_lock);
+       }
+       return rc;
 }
 
 void mdc_exit_request(struct client_obd *cli)
 {
 }
 
 void mdc_exit_request(struct client_obd *cli)
 {
-        cfs_list_t *l, *tmp;
-        struct mdc_cache_waiter *mcw;
+       cfs_list_t *l, *tmp;
+       struct mdc_cache_waiter *mcw;
+
+       client_obd_list_lock(&cli->cl_loi_list_lock);
+       cli->cl_r_in_flight--;
+       cfs_list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
+               if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
+                       /* No free request slots anymore */
+                       break;
+               }
 
 
-        client_obd_list_lock(&cli->cl_loi_list_lock);
-        cli->cl_r_in_flight--;
-        cfs_list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
-                if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
-                        /* No free request slots anymore */
-                        break;
-                }
-
-                mcw = cfs_list_entry(l, struct mdc_cache_waiter, mcw_entry);
-                cfs_list_del_init(&mcw->mcw_entry);
-                cli->cl_r_in_flight++;
-                cfs_waitq_signal(&mcw->mcw_waitq);
-        }
-        /* Empty waiting list? Decrease reqs in-flight number */
+               mcw = cfs_list_entry(l, struct mdc_cache_waiter, mcw_entry);
+               cfs_list_del_init(&mcw->mcw_entry);
+               cli->cl_r_in_flight++;
+               wake_up(&mcw->mcw_waitq);
+       }
+       /* Empty waiting list? Decrease reqs in-flight number */
 
 
-        client_obd_list_unlock(&cli->cl_loi_list_lock);
+       client_obd_list_unlock(&cli->cl_loi_list_lock);
 }
 }
index 55a0254..0f7a9c9 100644 (file)
@@ -1071,19 +1071,19 @@ EXPORT_SYMBOL(mdc_sendpage);
 #endif
 
 int mdc_readpage(struct obd_export *exp, struct md_op_data *op_data,
 #endif
 
 int mdc_readpage(struct obd_export *exp, struct md_op_data *op_data,
-                 struct page **pages, struct ptlrpc_request **request)
-{
-        struct ptlrpc_request   *req;
-        struct ptlrpc_bulk_desc *desc;
-        int                      i;
-        cfs_waitq_t              waitq;
-        int                      resends = 0;
-        struct l_wait_info       lwi;
-        int                      rc;
-        ENTRY;
+                struct page **pages, struct ptlrpc_request **request)
+{
+       struct ptlrpc_request   *req;
+       struct ptlrpc_bulk_desc *desc;
+       int                      i;
+       wait_queue_head_t        waitq;
+       int                      resends = 0;
+       struct l_wait_info       lwi;
+       int                      rc;
+       ENTRY;
 
 
-        *request = NULL;
-        cfs_waitq_init(&waitq);
+       *request = NULL;
+       init_waitqueue_head(&waitq);
 
 restart_bulk:
         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_READPAGE);
 
 restart_bulk:
         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_READPAGE);
index 72d2d14..4f6a44c 100644 (file)
@@ -194,68 +194,68 @@ int mdt_capa_keys_init(const struct lu_env *env, struct mdt_device *mdt)
 
 void mdt_ck_timer_callback(unsigned long castmeharder)
 {
 
 void mdt_ck_timer_callback(unsigned long castmeharder)
 {
-        struct mdt_device *mdt = (struct mdt_device *)castmeharder;
-        struct ptlrpc_thread *thread = &mdt->mdt_ck_thread;
+       struct mdt_device *mdt = (struct mdt_device *)castmeharder;
+       struct ptlrpc_thread *thread = &mdt->mdt_ck_thread;
 
 
-        ENTRY;
-        thread_add_flags(thread, SVC_EVENT);
-        cfs_waitq_signal(&thread->t_ctl_waitq);
-        EXIT;
+       ENTRY;
+       thread_add_flags(thread, SVC_EVENT);
+       wake_up(&thread->t_ctl_waitq);
+       EXIT;
 }
 
 static int mdt_ck_thread_main(void *args)
 {
 }
 
 static int mdt_ck_thread_main(void *args)
 {
-        struct mdt_device      *mdt = args;
-        struct ptlrpc_thread   *thread = &mdt->mdt_ck_thread;
-        struct lustre_capa_key *bkey = &mdt->mdt_capa_keys[0],
-                               *rkey = &mdt->mdt_capa_keys[1];
-        struct lustre_capa_key *tmp;
-        struct lu_env           env;
-        struct mdt_thread_info *info;
-        struct md_device       *next;
-        struct l_wait_info      lwi = { 0 };
-        mdsno_t                 mdsnum;
-        int                     rc;
-        ENTRY;
+       struct mdt_device      *mdt = args;
+       struct ptlrpc_thread   *thread = &mdt->mdt_ck_thread;
+       struct lustre_capa_key *bkey = &mdt->mdt_capa_keys[0],
+                              *rkey = &mdt->mdt_capa_keys[1];
+       struct lustre_capa_key *tmp;
+       struct lu_env           env;
+       struct mdt_thread_info *info;
+       struct md_device       *next;
+       struct l_wait_info      lwi = { 0 };
+       mdsno_t                 mdsnum;
+       int                     rc;
+       ENTRY;
 
        unshare_fs_struct();
 
        unshare_fs_struct();
-        cfs_block_allsigs();
+       cfs_block_allsigs();
 
 
-        thread_set_flags(thread, SVC_RUNNING);
-        cfs_waitq_signal(&thread->t_ctl_waitq);
+       thread_set_flags(thread, SVC_RUNNING);
+       wake_up(&thread->t_ctl_waitq);
 
 
-        rc = lu_env_init(&env, LCT_MD_THREAD|LCT_REMEMBER|LCT_NOREF);
-        if (rc)
-                RETURN(rc);
+       rc = lu_env_init(&env, LCT_MD_THREAD|LCT_REMEMBER|LCT_NOREF);
+       if (rc)
+               RETURN(rc);
 
 
-        thread->t_env = &env;
-        env.le_ctx.lc_thread = thread;
-        env.le_ctx.lc_cookie = 0x1;
+       thread->t_env = &env;
+       env.le_ctx.lc_thread = thread;
+       env.le_ctx.lc_cookie = 0x1;
 
 
-        info = lu_context_key_get(&env.le_ctx, &mdt_thread_key);
-        LASSERT(info != NULL);
+       info = lu_context_key_get(&env.le_ctx, &mdt_thread_key);
+       LASSERT(info != NULL);
 
        tmp = &info->mti_capa_key;
        mdsnum = mdt_seq_site(mdt)->ss_node_id;
 
        tmp = &info->mti_capa_key;
        mdsnum = mdt_seq_site(mdt)->ss_node_id;
-        while (1) {
-                l_wait_event(thread->t_ctl_waitq,
-                             thread_is_stopping(thread) ||
-                             thread_is_event(thread),
-                             &lwi);
+       while (1) {
+               l_wait_event(thread->t_ctl_waitq,
+                            thread_is_stopping(thread) ||
+                            thread_is_event(thread),
+                            &lwi);
 
 
-                if (thread_is_stopping(thread))
-                        break;
-                thread_clear_flags(thread, SVC_EVENT);
+               if (thread_is_stopping(thread))
+                       break;
+               thread_clear_flags(thread, SVC_EVENT);
 
 
-                if (cfs_time_before(cfs_time_current(), mdt->mdt_ck_expiry))
-                        break;
+               if (cfs_time_before(cfs_time_current(), mdt->mdt_ck_expiry))
+                       break;
 
 
-                *tmp = *rkey;
-                make_capa_key(tmp, mdsnum, rkey->lk_keyid);
+               *tmp = *rkey;
+               make_capa_key(tmp, mdsnum, rkey->lk_keyid);
 
 
-                next = mdt->mdt_child;
-                rc = next->md_ops->mdo_update_capa_key(&env, next, tmp);
-                if (!rc) {
+               next = mdt->mdt_child;
+               rc = next->md_ops->mdo_update_capa_key(&env, next, tmp);
+               if (!rc) {
                        spin_lock(&capa_lock);
                        *bkey = *rkey;
                        *rkey = *tmp;
                        spin_lock(&capa_lock);
                        *bkey = *rkey;
                        *rkey = *tmp;
@@ -267,25 +267,25 @@ static int mdt_ck_thread_main(void *args)
                                *rkey = *bkey;
                                memset(bkey, 0, sizeof(*bkey));
                                spin_unlock(&capa_lock);
                                *rkey = *bkey;
                                memset(bkey, 0, sizeof(*bkey));
                                spin_unlock(&capa_lock);
-                        } else {
-                                set_capa_key_expiry(mdt);
-                                DEBUG_CAPA_KEY(D_SEC, rkey, "new");
-                        }
-                }
+                       } else {
+                               set_capa_key_expiry(mdt);
+                               DEBUG_CAPA_KEY(D_SEC, rkey, "new");
+                       }
+               }
                if (rc) {
                        DEBUG_CAPA_KEY(D_ERROR, rkey, "update failed for");
                        /* next retry is in 300 sec */
                        mdt->mdt_ck_expiry = jiffies + 300 * HZ;
                }
 
                if (rc) {
                        DEBUG_CAPA_KEY(D_ERROR, rkey, "update failed for");
                        /* next retry is in 300 sec */
                        mdt->mdt_ck_expiry = jiffies + 300 * HZ;
                }
 
-                cfs_timer_arm(&mdt->mdt_ck_timer, mdt->mdt_ck_expiry);
-                CDEBUG(D_SEC, "mdt_ck_timer %lu\n", mdt->mdt_ck_expiry);
-        }
-        lu_env_fini(&env);
+               cfs_timer_arm(&mdt->mdt_ck_timer, mdt->mdt_ck_expiry);
+               CDEBUG(D_SEC, "mdt_ck_timer %lu\n", mdt->mdt_ck_expiry);
+       }
+       lu_env_fini(&env);
 
 
-        thread_set_flags(thread, SVC_STOPPED);
-        cfs_waitq_signal(&thread->t_ctl_waitq);
-        RETURN(0);
+       thread_set_flags(thread, SVC_STOPPED);
+       wake_up(&thread->t_ctl_waitq);
+       RETURN(0);
 }
 
 int mdt_ck_thread_start(struct mdt_device *mdt)
 }
 
 int mdt_ck_thread_start(struct mdt_device *mdt)
@@ -293,7 +293,7 @@ int mdt_ck_thread_start(struct mdt_device *mdt)
        struct ptlrpc_thread *thread = &mdt->mdt_ck_thread;
        cfs_task_t *task;
 
        struct ptlrpc_thread *thread = &mdt->mdt_ck_thread;
        cfs_task_t *task;
 
-       cfs_waitq_init(&thread->t_ctl_waitq);
+       init_waitqueue_head(&thread->t_ctl_waitq);
        task = kthread_run(mdt_ck_thread_main, mdt, "mdt_ck");
        if (IS_ERR(task)) {
                CERROR("cannot start mdt_ck thread, rc = %ld\n", PTR_ERR(task));
        task = kthread_run(mdt_ck_thread_main, mdt, "mdt_ck");
        if (IS_ERR(task)) {
                CERROR("cannot start mdt_ck thread, rc = %ld\n", PTR_ERR(task));
@@ -306,12 +306,12 @@ int mdt_ck_thread_start(struct mdt_device *mdt)
 
 void mdt_ck_thread_stop(struct mdt_device *mdt)
 {
 
 void mdt_ck_thread_stop(struct mdt_device *mdt)
 {
-        struct ptlrpc_thread *thread = &mdt->mdt_ck_thread;
+       struct ptlrpc_thread *thread = &mdt->mdt_ck_thread;
 
 
-        if (!thread_is_running(thread))
-                return;
+       if (!thread_is_running(thread))
+               return;
 
 
-        thread_set_flags(thread, SVC_STOPPING);
-        cfs_waitq_signal(&thread->t_ctl_waitq);
-        l_wait_condition(thread->t_ctl_waitq, thread_is_stopped(thread));
+       thread_set_flags(thread, SVC_STOPPING);
+       wake_up(&thread->t_ctl_waitq);
+       l_wait_condition(thread->t_ctl_waitq, thread_is_stopped(thread));
 }
 }
index 95437e2..f98ec0e 100644 (file)
@@ -422,7 +422,7 @@ static int mdt_coordinator(void *data)
        ENTRY;
 
        cdt->cdt_thread.t_flags = SVC_RUNNING;
        ENTRY;
 
        cdt->cdt_thread.t_flags = SVC_RUNNING;
-       cfs_waitq_signal(&cdt->cdt_thread.t_ctl_waitq);
+       wake_up(&cdt->cdt_thread.t_ctl_waitq);
 
        CDEBUG(D_HSM, "%s: coordinator thread starting, pid=%d\n",
               mdt_obd_name(mdt), current_pid());
 
        CDEBUG(D_HSM, "%s: coordinator thread starting, pid=%d\n",
               mdt_obd_name(mdt), current_pid());
@@ -652,7 +652,7 @@ out:
                 * and cdt cleaning will be done by event sender
                 */
                cdt->cdt_thread.t_flags = SVC_STOPPED;
                 * and cdt cleaning will be done by event sender
                 */
                cdt->cdt_thread.t_flags = SVC_STOPPED;
-               cfs_waitq_signal(&cdt->cdt_thread.t_ctl_waitq);
+               wake_up(&cdt->cdt_thread.t_ctl_waitq);
        }
 
        if (rc != 0)
        }
 
        if (rc != 0)
@@ -820,7 +820,7 @@ int mdt_hsm_cdt_wakeup(struct mdt_device *mdt)
 
        /* wake up coordinator */
        cdt->cdt_thread.t_flags = SVC_EVENT;
 
        /* wake up coordinator */
        cdt->cdt_thread.t_flags = SVC_EVENT;
-       cfs_waitq_signal(&cdt->cdt_thread.t_ctl_waitq);
+       wake_up(&cdt->cdt_thread.t_ctl_waitq);
 
        RETURN(0);
 }
 
        RETURN(0);
 }
@@ -840,7 +840,7 @@ int mdt_hsm_cdt_init(struct mdt_device *mdt)
 
        cdt->cdt_state = CDT_STOPPED;
 
 
        cdt->cdt_state = CDT_STOPPED;
 
-       cfs_waitq_init(&cdt->cdt_thread.t_ctl_waitq);
+       init_waitqueue_head(&cdt->cdt_thread.t_ctl_waitq);
        mutex_init(&cdt->cdt_llog_lock);
        init_rwsem(&cdt->cdt_agent_lock);
        init_rwsem(&cdt->cdt_request_lock);
        mutex_init(&cdt->cdt_llog_lock);
        init_rwsem(&cdt->cdt_agent_lock);
        init_rwsem(&cdt->cdt_request_lock);
@@ -956,7 +956,7 @@ int mdt_hsm_cdt_start(struct mdt_device *mdt)
                rc = 0;
        }
 
                rc = 0;
        }
 
-       cfs_wait_event(cdt->cdt_thread.t_ctl_waitq,
+       wait_event(cdt->cdt_thread.t_ctl_waitq,
                       (cdt->cdt_thread.t_flags & SVC_RUNNING));
 
        cdt->cdt_state = CDT_RUNNING;
                       (cdt->cdt_thread.t_flags & SVC_RUNNING));
 
        cdt->cdt_state = CDT_RUNNING;
@@ -990,9 +990,9 @@ int mdt_hsm_cdt_stop(struct mdt_device *mdt)
        if (cdt->cdt_state != CDT_STOPPING) {
                /* stop coordinator thread before cleaning */
                cdt->cdt_thread.t_flags = SVC_STOPPING;
        if (cdt->cdt_state != CDT_STOPPING) {
                /* stop coordinator thread before cleaning */
                cdt->cdt_thread.t_flags = SVC_STOPPING;
-               cfs_waitq_signal(&cdt->cdt_thread.t_ctl_waitq);
-               cfs_wait_event(cdt->cdt_thread.t_ctl_waitq,
-                              cdt->cdt_thread.t_flags & SVC_STOPPED);
+               wake_up(&cdt->cdt_thread.t_ctl_waitq);
+               wait_event(cdt->cdt_thread.t_ctl_waitq,
+                          cdt->cdt_thread.t_flags & SVC_STOPPED);
        }
        cdt->cdt_state = CDT_STOPPED;
 
        }
        cdt->cdt_state = CDT_STOPPED;
 
index f2e3026..4ea58a0 100644 (file)
@@ -496,7 +496,7 @@ int lprocfs_mgc_rd_ir_state(char *page, char **start, off_t off,
 #define RQ_LATER   0x4
 #define RQ_STOP    0x8
 static int                    rq_state = 0;
 #define RQ_LATER   0x4
 #define RQ_STOP    0x8
 static int                    rq_state = 0;
-static cfs_waitq_t            rq_waitq;
+static wait_queue_head_t      rq_waitq;
 static DECLARE_COMPLETION(rq_exit);
 
 static void do_requeue(struct config_llog_data *cld)
 static DECLARE_COMPLETION(rq_exit);
 
 static void do_requeue(struct config_llog_data *cld)
@@ -645,7 +645,7 @@ static void mgc_requeue_add(struct config_llog_data *cld)
        } else {
                rq_state |= RQ_NOW;
                spin_unlock(&config_list_lock);
        } else {
                rq_state |= RQ_NOW;
                spin_unlock(&config_list_lock);
-               cfs_waitq_signal(&rq_waitq);
+               wake_up(&rq_waitq);
        }
        EXIT;
 }
        }
        EXIT;
 }
@@ -855,7 +855,7 @@ static int mgc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
                                rq_state |= RQ_STOP;
                        spin_unlock(&config_list_lock);
                        if (running) {
                                rq_state |= RQ_STOP;
                        spin_unlock(&config_list_lock);
                        if (running) {
-                               cfs_waitq_signal(&rq_waitq);
+                               wake_up(&rq_waitq);
                                wait_for_completion(&rq_exit);
                         }
                 }
                                wait_for_completion(&rq_exit);
                         }
                 }
@@ -910,7 +910,7 @@ static int mgc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
 
         if (cfs_atomic_inc_return(&mgc_count) == 1) {
                rq_state = 0;
 
         if (cfs_atomic_inc_return(&mgc_count) == 1) {
                rq_state = 0;
-               cfs_waitq_init(&rq_waitq);
+               init_waitqueue_head(&rq_waitq);
 
                /* start requeue thread */
                rc = PTR_ERR(kthread_run(mgc_requeue_thread, NULL,
 
                /* start requeue thread */
                rc = PTR_ERR(kthread_run(mgc_requeue_thread, NULL,
@@ -1100,7 +1100,7 @@ static void mgc_notify_active(struct obd_device *unused)
        spin_lock(&config_list_lock);
        rq_state |= RQ_NOW;
        spin_unlock(&config_list_lock);
        spin_lock(&config_list_lock);
        rq_state |= RQ_NOW;
        spin_unlock(&config_list_lock);
-       cfs_waitq_signal(&rq_waitq);
+       wake_up(&rq_waitq);
 
        /* TODO: Help the MGS rebuild nidtbl. -jay */
 }
 
        /* TODO: Help the MGS rebuild nidtbl. -jay */
 }
index 06bcc99..514dc23 100644 (file)
@@ -143,12 +143,12 @@ struct fs_db {
         /* Target NIDs Table */
         struct mgs_nidtbl    fsdb_nidtbl;
 
         /* Target NIDs Table */
         struct mgs_nidtbl    fsdb_nidtbl;
 
-        /* async thread to notify clients */
-       struct mgs_device   *fsdb_mgs;
-        cfs_waitq_t          fsdb_notify_waitq;
-       struct completion       fsdb_notify_comp;
-        cfs_time_t           fsdb_notify_start;
-        cfs_atomic_t         fsdb_notify_phase;
+       /* async thread to notify clients */
+       struct mgs_device    *fsdb_mgs;
+       wait_queue_head_t    fsdb_notify_waitq;
+       struct completion    fsdb_notify_comp;
+       cfs_time_t           fsdb_notify_start;
+       cfs_atomic_t         fsdb_notify_phase;
        volatile unsigned int fsdb_notify_async:1,
                              fsdb_notify_stop:1;
         /* statistic data */
        volatile unsigned int fsdb_notify_async:1,
                              fsdb_notify_stop:1;
         /* statistic data */
index 391f925..7de8f59 100644 (file)
@@ -472,20 +472,20 @@ int mgs_ir_init_fs(const struct lu_env *env, struct mgs_device *mgs,
 {
        cfs_task_t *task;
 
 {
        cfs_task_t *task;
 
-        if (!ir_timeout)
-                ir_timeout = OBD_IR_MGS_TIMEOUT;
+       if (!ir_timeout)
+               ir_timeout = OBD_IR_MGS_TIMEOUT;
 
 
-        fsdb->fsdb_ir_state = IR_FULL;
-        if (cfs_time_before(cfs_time_current_sec(),
-                            mgs->mgs_start_time + ir_timeout))
-                fsdb->fsdb_ir_state = IR_STARTUP;
-        fsdb->fsdb_nonir_clients = 0;
-        CFS_INIT_LIST_HEAD(&fsdb->fsdb_clients);
+       fsdb->fsdb_ir_state = IR_FULL;
+       if (cfs_time_before(cfs_time_current_sec(),
+                           mgs->mgs_start_time + ir_timeout))
+               fsdb->fsdb_ir_state = IR_STARTUP;
+       fsdb->fsdb_nonir_clients = 0;
+       CFS_INIT_LIST_HEAD(&fsdb->fsdb_clients);
 
 
-        /* start notify thread */
+       /* start notify thread */
        fsdb->fsdb_mgs = mgs;
        fsdb->fsdb_mgs = mgs;
-        cfs_atomic_set(&fsdb->fsdb_notify_phase, 0);
-        cfs_waitq_init(&fsdb->fsdb_notify_waitq);
+       cfs_atomic_set(&fsdb->fsdb_notify_phase, 0);
+       init_waitqueue_head(&fsdb->fsdb_notify_waitq);
        init_completion(&fsdb->fsdb_notify_comp);
 
        task = kthread_run(mgs_ir_notify, fsdb,
        init_completion(&fsdb->fsdb_notify_comp);
 
        task = kthread_run(mgs_ir_notify, fsdb,
@@ -496,22 +496,22 @@ int mgs_ir_init_fs(const struct lu_env *env, struct mgs_device *mgs,
                CERROR("Start notify thread error %ld\n", PTR_ERR(task));
 
        mgs_nidtbl_init_fs(env, fsdb);
                CERROR("Start notify thread error %ld\n", PTR_ERR(task));
 
        mgs_nidtbl_init_fs(env, fsdb);
-        return 0;
+       return 0;
 }
 
 void mgs_ir_fini_fs(struct mgs_device *mgs, struct fs_db *fsdb)
 {
        if (test_bit(FSDB_MGS_SELF, &fsdb->fsdb_flags))
 }
 
 void mgs_ir_fini_fs(struct mgs_device *mgs, struct fs_db *fsdb)
 {
        if (test_bit(FSDB_MGS_SELF, &fsdb->fsdb_flags))
-                return;
+               return;
 
 
-        mgs_fsc_cleanup_by_fsdb(fsdb);
+       mgs_fsc_cleanup_by_fsdb(fsdb);
 
 
-        mgs_nidtbl_fini_fs(fsdb);
+       mgs_nidtbl_fini_fs(fsdb);
 
 
-        LASSERT(cfs_list_empty(&fsdb->fsdb_clients));
+       LASSERT(cfs_list_empty(&fsdb->fsdb_clients));
 
 
-        fsdb->fsdb_notify_stop = 1;
-        cfs_waitq_signal(&fsdb->fsdb_notify_waitq);
+       fsdb->fsdb_notify_stop = 1;
+       wake_up(&fsdb->fsdb_notify_waitq);
        wait_for_completion(&fsdb->fsdb_notify_comp);
 }
 
        wait_for_completion(&fsdb->fsdb_notify_comp);
 }
 
@@ -563,14 +563,14 @@ int mgs_ir_update(const struct lu_env *env, struct mgs_device *mgs,
         }
        mutex_unlock(&fsdb->fsdb_mutex);
 
         }
        mutex_unlock(&fsdb->fsdb_mutex);
 
-        LASSERT(ergo(mti->mti_flags & LDD_F_IR_CAPABLE, notify));
-        if (notify) {
-                CDEBUG(D_MGS, "Try to revoke recover lock of %s\n",
-                       fsdb->fsdb_name);
-                cfs_atomic_inc(&fsdb->fsdb_notify_phase);
-                cfs_waitq_signal(&fsdb->fsdb_notify_waitq);
-        }
-        return 0;
+       LASSERT(ergo(mti->mti_flags & LDD_F_IR_CAPABLE, notify));
+       if (notify) {
+               CDEBUG(D_MGS, "Try to revoke recover lock of %s\n",
+                      fsdb->fsdb_name);
+               cfs_atomic_inc(&fsdb->fsdb_notify_phase);
+               wake_up(&fsdb->fsdb_notify_waitq);
+       }
+       return 0;
 }
 
 /* NID table can be cached by two entities: Clients and MDTs */
 }
 
 /* NID table can be cached by two entities: Clients and MDTs */
index d1bda56..03f9e81 100644 (file)
@@ -1681,7 +1681,7 @@ EXPORT_SYMBOL(cl_req_attr_set);
 void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages)
 {
        ENTRY;
 void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages)
 {
        ENTRY;
-       cfs_waitq_init(&anchor->csi_waitq);
+       init_waitqueue_head(&anchor->csi_waitq);
        cfs_atomic_set(&anchor->csi_sync_nr, nrpages);
        cfs_atomic_set(&anchor->csi_barrier, nrpages > 0);
        anchor->csi_sync_rc = 0;
        cfs_atomic_set(&anchor->csi_sync_nr, nrpages);
        cfs_atomic_set(&anchor->csi_barrier, nrpages > 0);
        anchor->csi_sync_rc = 0;
@@ -1751,7 +1751,7 @@ void cl_sync_io_note(struct cl_sync_io *anchor, int ioret)
          */
         LASSERT(cfs_atomic_read(&anchor->csi_sync_nr) > 0);
        if (cfs_atomic_dec_and_test(&anchor->csi_sync_nr)) {
          */
         LASSERT(cfs_atomic_read(&anchor->csi_sync_nr) > 0);
        if (cfs_atomic_dec_and_test(&anchor->csi_sync_nr)) {
-               cfs_waitq_broadcast(&anchor->csi_waitq);
+               wake_up_all(&anchor->csi_waitq);
                /* it's safe to nuke or reuse anchor now */
                cfs_atomic_set(&anchor->csi_barrier, 0);
        }
                /* it's safe to nuke or reuse anchor now */
                cfs_atomic_set(&anchor->csi_barrier, 0);
        }
index c6a36fe..5d7708a 100644 (file)
@@ -386,21 +386,21 @@ static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
                 cl_object_get(obj);
                lu_object_ref_add_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock",
                                     lock);
                 cl_object_get(obj);
                lu_object_ref_add_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock",
                                     lock);
-                CFS_INIT_LIST_HEAD(&lock->cll_layers);
-                CFS_INIT_LIST_HEAD(&lock->cll_linkage);
-                CFS_INIT_LIST_HEAD(&lock->cll_inclosure);
-                lu_ref_init(&lock->cll_reference);
-                lu_ref_init(&lock->cll_holders);
+               CFS_INIT_LIST_HEAD(&lock->cll_layers);
+               CFS_INIT_LIST_HEAD(&lock->cll_linkage);
+               CFS_INIT_LIST_HEAD(&lock->cll_inclosure);
+               lu_ref_init(&lock->cll_reference);
+               lu_ref_init(&lock->cll_holders);
                mutex_init(&lock->cll_guard);
                lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
                mutex_init(&lock->cll_guard);
                lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
-                cfs_waitq_init(&lock->cll_wq);
-                head = obj->co_lu.lo_header;
+               init_waitqueue_head(&lock->cll_wq);
+               head = obj->co_lu.lo_header;
                CS_LOCKSTATE_INC(obj, CLS_NEW);
                CS_LOCK_INC(obj, total);
                CS_LOCK_INC(obj, create);
                CS_LOCKSTATE_INC(obj, CLS_NEW);
                CS_LOCK_INC(obj, total);
                CS_LOCK_INC(obj, create);
-                cl_lock_lockdep_init(lock);
-                cfs_list_for_each_entry(obj, &head->loh_layers,
-                                        co_lu.lo_linkage) {
+               cl_lock_lockdep_init(lock);
+               cfs_list_for_each_entry(obj, &head->loh_layers,
+                                       co_lu.lo_linkage) {
                         int err;
 
                         err = obj->co_ops->coo_lock_init(env, obj, lock, io);
                         int err;
 
                         err = obj->co_ops->coo_lock_init(env, obj, lock, io);
@@ -946,65 +946,65 @@ EXPORT_SYMBOL(cl_lock_hold_release);
  */
 int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
 {
  */
 int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
 {
-        cfs_waitlink_t waiter;
-        cfs_sigset_t blocked;
-        int result;
-
-        ENTRY;
-        LINVRNT(cl_lock_is_mutexed(lock));
-        LINVRNT(cl_lock_invariant(env, lock));
-        LASSERT(lock->cll_depth == 1);
-        LASSERT(lock->cll_state != CLS_FREEING); /* too late to wait */
-
-        cl_lock_trace(D_DLMTRACE, env, "state wait lock", lock);
-        result = lock->cll_error;
-        if (result == 0) {
-                /* To avoid being interrupted by the 'non-fatal' signals
-                 * (SIGCHLD, for instance), we'd block them temporarily.
-                 * LU-305 */
-                blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
-
-                cfs_waitlink_init(&waiter);
-                cfs_waitq_add(&lock->cll_wq, &waiter);
-                cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
-                cl_lock_mutex_put(env, lock);
+       wait_queue_t waiter;
+       cfs_sigset_t blocked;
+       int result;
+
+       ENTRY;
+       LINVRNT(cl_lock_is_mutexed(lock));
+       LINVRNT(cl_lock_invariant(env, lock));
+       LASSERT(lock->cll_depth == 1);
+       LASSERT(lock->cll_state != CLS_FREEING); /* too late to wait */
+
+       cl_lock_trace(D_DLMTRACE, env, "state wait lock", lock);
+       result = lock->cll_error;
+       if (result == 0) {
+               /* To avoid being interrupted by the 'non-fatal' signals
+                * (SIGCHLD, for instance), we'd block them temporarily.
+                * LU-305 */
+               blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
+
+               init_waitqueue_entry_current(&waiter);
+               add_wait_queue(&lock->cll_wq, &waiter);
+               set_current_state(TASK_INTERRUPTIBLE);
+               cl_lock_mutex_put(env, lock);
 
 
-                LASSERT(cl_lock_nr_mutexed(env) == 0);
+               LASSERT(cl_lock_nr_mutexed(env) == 0);
 
                /* Returning ERESTARTSYS instead of EINTR so syscalls
                 * can be restarted if signals are pending here */
                result = -ERESTARTSYS;
                if (likely(!OBD_FAIL_CHECK(OBD_FAIL_LOCK_STATE_WAIT_INTR))) {
 
                /* Returning ERESTARTSYS instead of EINTR so syscalls
                 * can be restarted if signals are pending here */
                result = -ERESTARTSYS;
                if (likely(!OBD_FAIL_CHECK(OBD_FAIL_LOCK_STATE_WAIT_INTR))) {
-                       cfs_waitq_wait(&waiter, CFS_TASK_INTERRUPTIBLE);
+                       waitq_wait(&waiter, TASK_INTERRUPTIBLE);
                        if (!cfs_signal_pending())
                                result = 0;
                }
 
                        if (!cfs_signal_pending())
                                result = 0;
                }
 
-                cl_lock_mutex_get(env, lock);
-                cfs_set_current_state(CFS_TASK_RUNNING);
-                cfs_waitq_del(&lock->cll_wq, &waiter);
+               cl_lock_mutex_get(env, lock);
+               set_current_state(TASK_RUNNING);
+               remove_wait_queue(&lock->cll_wq, &waiter);
 
 
-                /* Restore old blocked signals */
-                cfs_restore_sigs(blocked);
-        }
-        RETURN(result);
+               /* Restore old blocked signals */
+               cfs_restore_sigs(blocked);
+       }
+       RETURN(result);
 }
 EXPORT_SYMBOL(cl_lock_state_wait);
 
 static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
 }
 EXPORT_SYMBOL(cl_lock_state_wait);
 
 static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
-                                 enum cl_lock_state state)
+                                enum cl_lock_state state)
 {
 {
-        const struct cl_lock_slice *slice;
+       const struct cl_lock_slice *slice;
 
 
-        ENTRY;
-        LINVRNT(cl_lock_is_mutexed(lock));
-        LINVRNT(cl_lock_invariant(env, lock));
+       ENTRY;
+       LINVRNT(cl_lock_is_mutexed(lock));
+       LINVRNT(cl_lock_invariant(env, lock));
 
 
-        cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
-                if (slice->cls_ops->clo_state != NULL)
-                        slice->cls_ops->clo_state(env, slice, state);
-        cfs_waitq_broadcast(&lock->cll_wq);
-        EXIT;
+       cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
+               if (slice->cls_ops->clo_state != NULL)
+                       slice->cls_ops->clo_state(env, slice, state);
+       wake_up_all(&lock->cll_wq);
+       EXIT;
 }
 
 /**
 }
 
 /**
@@ -2007,12 +2007,12 @@ int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock)
                 if (info->clt_next_index > descr->cld_end)
                         break;
 
                 if (info->clt_next_index > descr->cld_end)
                         break;
 
-                if (res == CLP_GANG_RESCHED)
-                        cfs_cond_resched();
-        } while (res != CLP_GANG_OKAY);
+               if (res == CLP_GANG_RESCHED)
+                       cond_resched();
+       } while (res != CLP_GANG_OKAY);
 out:
 out:
-        cl_io_fini(env, io);
-        RETURN(result);
+       cl_io_fini(env, io);
+       RETURN(result);
 }
 EXPORT_SYMBOL(cl_lock_discard_pages);
 
 }
 EXPORT_SYMBOL(cl_lock_discard_pages);
 
@@ -2237,15 +2237,15 @@ EXPORT_SYMBOL(cl_lock_user_add);
 
 void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock)
 {
 
 void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock)
 {
-        LINVRNT(cl_lock_is_mutexed(lock));
-        LINVRNT(cl_lock_invariant(env, lock));
-        LASSERT(lock->cll_users > 0);
+       LINVRNT(cl_lock_is_mutexed(lock));
+       LINVRNT(cl_lock_invariant(env, lock));
+       LASSERT(lock->cll_users > 0);
 
 
-        ENTRY;
-        cl_lock_used_mod(env, lock, -1);
-        if (lock->cll_users == 0)
-                cfs_waitq_broadcast(&lock->cll_wq);
-        EXIT;
+       ENTRY;
+       cl_lock_used_mod(env, lock, -1);
+       if (lock->cll_users == 0)
+               wake_up_all(&lock->cll_wq);
+       EXIT;
 }
 EXPORT_SYMBOL(cl_lock_user_del);
 
 }
 EXPORT_SYMBOL(cl_lock_user_del);
 
index a05489f..3bc95c8 100644 (file)
@@ -179,8 +179,8 @@ EXPORT_SYMBOL(cl_page_lookup);
  * Return at least one page in @queue unless there is no covered page.
  */
 int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
  * Return at least one page in @queue unless there is no covered page.
  */
 int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
-                        struct cl_io *io, pgoff_t start, pgoff_t end,
-                        cl_page_gang_cb_t cb, void *cbdata)
+                       struct cl_io *io, pgoff_t start, pgoff_t end,
+                       cl_page_gang_cb_t cb, void *cbdata)
 {
         struct cl_object_header *hdr;
         struct cl_page          *page;
 {
         struct cl_object_header *hdr;
         struct cl_page          *page;
@@ -256,13 +256,13 @@ int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
                                    "gang_lookup", cfs_current());
                         cl_page_put(env, page);
                 }
                                    "gang_lookup", cfs_current());
                         cl_page_put(env, page);
                 }
-                if (nr < CLT_PVEC_SIZE || end_of_region)
-                        break;
+               if (nr < CLT_PVEC_SIZE || end_of_region)
+                       break;
 
 
-                if (res == CLP_GANG_OKAY && cfs_need_resched())
-                        res = CLP_GANG_RESCHED;
-                if (res != CLP_GANG_OKAY)
-                        break;
+               if (res == CLP_GANG_OKAY && need_resched())
+                       res = CLP_GANG_RESCHED;
+               if (res != CLP_GANG_OKAY)
+                       break;
 
                spin_lock(&hdr->coh_page_guard);
                tree_lock = 1;
 
                spin_lock(&hdr->coh_page_guard);
                tree_lock = 1;
@@ -1475,36 +1475,36 @@ static int page_prune_cb(const struct lu_env *env, struct cl_io *io,
  */
 int cl_pages_prune(const struct lu_env *env, struct cl_object *clobj)
 {
  */
 int cl_pages_prune(const struct lu_env *env, struct cl_object *clobj)
 {
-        struct cl_thread_info   *info;
-        struct cl_object        *obj = cl_object_top(clobj);
-        struct cl_io            *io;
-        int                      result;
+       struct cl_thread_info   *info;
+       struct cl_object        *obj = cl_object_top(clobj);
+       struct cl_io            *io;
+       int                      result;
 
 
-        ENTRY;
-        info  = cl_env_info(env);
-        io    = &info->clt_io;
+       ENTRY;
+       info  = cl_env_info(env);
+       io    = &info->clt_io;
 
 
-        /*
-         * initialize the io. This is ugly since we never do IO in this
-         * function, we just make cl_page_list functions happy. -jay
-         */
-        io->ci_obj = obj;
+       /*
+        * initialize the io. This is ugly since we never do IO in this
+        * function, we just make cl_page_list functions happy. -jay
+        */
+       io->ci_obj = obj;
        io->ci_ignore_layout = 1;
        io->ci_ignore_layout = 1;
-        result = cl_io_init(env, io, CIT_MISC, obj);
-        if (result != 0) {
-                cl_io_fini(env, io);
-                RETURN(io->ci_result);
-        }
+       result = cl_io_init(env, io, CIT_MISC, obj);
+       if (result != 0) {
+               cl_io_fini(env, io);
+               RETURN(io->ci_result);
+       }
 
 
-        do {
-                result = cl_page_gang_lookup(env, obj, io, 0, CL_PAGE_EOF,
-                                             page_prune_cb, NULL);
-                if (result == CLP_GANG_RESCHED)
-                        cfs_cond_resched();
-        } while (result != CLP_GANG_OKAY);
+       do {
+               result = cl_page_gang_lookup(env, obj, io, 0, CL_PAGE_EOF,
+                                            page_prune_cb, NULL);
+               if (result == CLP_GANG_RESCHED)
+                       cond_resched();
+       } while (result != CLP_GANG_OKAY);
 
 
-        cl_io_fini(env, io);
-        RETURN(result);
+       cl_io_fini(env, io);
+       RETURN(result);
 }
 EXPORT_SYMBOL(cl_pages_prune);
 
 }
 EXPORT_SYMBOL(cl_pages_prune);
 
index c79386e..ac2b54c 100644 (file)
@@ -1024,39 +1024,39 @@ static void init_imp_at(struct imp_at *at) {
 
 struct obd_import *class_new_import(struct obd_device *obd)
 {
 
 struct obd_import *class_new_import(struct obd_device *obd)
 {
-        struct obd_import *imp;
+       struct obd_import *imp;
 
 
-        OBD_ALLOC(imp, sizeof(*imp));
-        if (imp == NULL)
-                return NULL;
+       OBD_ALLOC(imp, sizeof(*imp));
+       if (imp == NULL)
+               return NULL;
 
        CFS_INIT_LIST_HEAD(&imp->imp_pinger_chain);
 
        CFS_INIT_LIST_HEAD(&imp->imp_pinger_chain);
-        CFS_INIT_LIST_HEAD(&imp->imp_zombie_chain);
-        CFS_INIT_LIST_HEAD(&imp->imp_replay_list);
-        CFS_INIT_LIST_HEAD(&imp->imp_sending_list);
-        CFS_INIT_LIST_HEAD(&imp->imp_delayed_list);
+       CFS_INIT_LIST_HEAD(&imp->imp_zombie_chain);
+       CFS_INIT_LIST_HEAD(&imp->imp_replay_list);
+       CFS_INIT_LIST_HEAD(&imp->imp_sending_list);
+       CFS_INIT_LIST_HEAD(&imp->imp_delayed_list);
        spin_lock_init(&imp->imp_lock);
        imp->imp_last_success_conn = 0;
        imp->imp_state = LUSTRE_IMP_NEW;
        imp->imp_obd = class_incref(obd, "import", imp);
        mutex_init(&imp->imp_sec_mutex);
        spin_lock_init(&imp->imp_lock);
        imp->imp_last_success_conn = 0;
        imp->imp_state = LUSTRE_IMP_NEW;
        imp->imp_obd = class_incref(obd, "import", imp);
        mutex_init(&imp->imp_sec_mutex);
-        cfs_waitq_init(&imp->imp_recovery_waitq);
-
-        cfs_atomic_set(&imp->imp_refcount, 2);
-        cfs_atomic_set(&imp->imp_unregistering, 0);
-        cfs_atomic_set(&imp->imp_inflight, 0);
-        cfs_atomic_set(&imp->imp_replay_inflight, 0);
-        cfs_atomic_set(&imp->imp_inval_count, 0);
-        CFS_INIT_LIST_HEAD(&imp->imp_conn_list);
-        CFS_INIT_LIST_HEAD(&imp->imp_handle.h_link);
+       init_waitqueue_head(&imp->imp_recovery_waitq);
+
+       cfs_atomic_set(&imp->imp_refcount, 2);
+       cfs_atomic_set(&imp->imp_unregistering, 0);
+       cfs_atomic_set(&imp->imp_inflight, 0);
+       cfs_atomic_set(&imp->imp_replay_inflight, 0);
+       cfs_atomic_set(&imp->imp_inval_count, 0);
+       CFS_INIT_LIST_HEAD(&imp->imp_conn_list);
+       CFS_INIT_LIST_HEAD(&imp->imp_handle.h_link);
        class_handle_hash(&imp->imp_handle, &import_handle_ops);
        class_handle_hash(&imp->imp_handle, &import_handle_ops);
-        init_imp_at(&imp->imp_at);
+       init_imp_at(&imp->imp_at);
 
 
-        /* the default magic is V2, will be used in connect RPC, and
-         * then adjusted according to the flags in request/reply. */
-        imp->imp_msg_magic = LUSTRE_MSG_MAGIC_V2;
+       /* the default magic is V2, will be used in connect RPC, and
+        * then adjusted according to the flags in request/reply. */
+       imp->imp_msg_magic = LUSTRE_MSG_MAGIC_V2;
 
 
-        return imp;
+       return imp;
 }
 EXPORT_SYMBOL(class_new_import);
 
 }
 EXPORT_SYMBOL(class_new_import);
 
@@ -1565,17 +1565,17 @@ void obd_exports_barrier(struct obd_device *obd)
        spin_lock(&obd->obd_dev_lock);
        while (!cfs_list_empty(&obd->obd_unlinked_exports)) {
                spin_unlock(&obd->obd_dev_lock);
        spin_lock(&obd->obd_dev_lock);
        while (!cfs_list_empty(&obd->obd_unlinked_exports)) {
                spin_unlock(&obd->obd_dev_lock);
-                cfs_schedule_timeout_and_set_state(CFS_TASK_UNINT,
-                                                   cfs_time_seconds(waited));
-                if (waited > 5 && IS_PO2(waited)) {
-                        LCONSOLE_WARN("%s is waiting for obd_unlinked_exports "
-                                      "more than %d seconds. "
-                                      "The obd refcount = %d. Is it stuck?\n",
-                                      obd->obd_name, waited,
-                                      cfs_atomic_read(&obd->obd_refcount));
-                        dump_exports(obd, 1);
-                }
-                waited *= 2;
+               schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE,
+                                                  cfs_time_seconds(waited));
+               if (waited > 5 && IS_PO2(waited)) {
+                       LCONSOLE_WARN("%s is waiting for obd_unlinked_exports "
+                                     "more than %d seconds. "
+                                     "The obd refcount = %d. Is it stuck?\n",
+                                     obd->obd_name, waited,
+                                     cfs_atomic_read(&obd->obd_refcount));
+                       dump_exports(obd, 1);
+               }
+               waited *= 2;
                spin_lock(&obd->obd_dev_lock);
        }
        spin_unlock(&obd->obd_dev_lock);
                spin_lock(&obd->obd_dev_lock);
        }
        spin_unlock(&obd->obd_dev_lock);
@@ -1629,7 +1629,7 @@ void obd_zombie_impexp_cull(void)
                        spin_unlock(&obd_zombie_impexp_lock);
                }
 
                        spin_unlock(&obd_zombie_impexp_lock);
                }
 
-               cfs_cond_resched();
+               cond_resched();
        } while (import != NULL || export != NULL);
        EXIT;
 }
        } while (import != NULL || export != NULL);
        EXIT;
 }
@@ -1637,7 +1637,7 @@ void obd_zombie_impexp_cull(void)
 static struct completion       obd_zombie_start;
 static struct completion       obd_zombie_stop;
 static unsigned long           obd_zombie_flags;
 static struct completion       obd_zombie_start;
 static struct completion       obd_zombie_stop;
 static unsigned long           obd_zombie_flags;
-static cfs_waitq_t             obd_zombie_waitq;
+static wait_queue_head_t       obd_zombie_waitq;
 static pid_t                   obd_zombie_pid;
 
 enum {
 static pid_t                   obd_zombie_pid;
 
 enum {
@@ -1695,12 +1695,12 @@ static void obd_zombie_import_add(struct obd_import *imp) {
  */
 static void obd_zombie_impexp_notify(void)
 {
  */
 static void obd_zombie_impexp_notify(void)
 {
-        /*
-         * Make sure obd_zomebie_impexp_thread get this notification.
-         * It is possible this signal only get by obd_zombie_barrier, and
-         * barrier gulps this notification and sleeps away and hangs ensues
-         */
-        cfs_waitq_broadcast(&obd_zombie_waitq);
+       /*
+        * Make sure obd_zomebie_impexp_thread get this notification.
+        * It is possible this signal only get by obd_zombie_barrier, and
+        * barrier gulps this notification and sleeps away and hangs ensues
+        */
+       wake_up_all(&obd_zombie_waitq);
 }
 
 /**
 }
 
 /**
@@ -1744,18 +1744,18 @@ static int obd_zombie_impexp_thread(void *unused)
        obd_zombie_pid = current_pid();
 
        while (!test_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags)) {
        obd_zombie_pid = current_pid();
 
        while (!test_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags)) {
-                struct l_wait_info lwi = { 0 };
+               struct l_wait_info lwi = { 0 };
 
 
-                l_wait_event(obd_zombie_waitq,
-                             !obd_zombie_impexp_check(NULL), &lwi);
-                obd_zombie_impexp_cull();
+               l_wait_event(obd_zombie_waitq,
+                            !obd_zombie_impexp_check(NULL), &lwi);
+               obd_zombie_impexp_cull();
 
 
-                /*
-                 * Notify obd_zombie_barrier callers that queues
-                 * may be empty.
-                 */
-                cfs_waitq_signal(&obd_zombie_waitq);
-        }
+               /*
+                * Notify obd_zombie_barrier callers that queues
+                * may be empty.
+                */
+               wake_up(&obd_zombie_waitq);
+       }
 
        complete(&obd_zombie_stop);
 
 
        complete(&obd_zombie_stop);
 
@@ -1796,7 +1796,7 @@ int obd_zombie_impexp_init(void)
        spin_lock_init(&obd_zombie_impexp_lock);
        init_completion(&obd_zombie_start);
        init_completion(&obd_zombie_stop);
        spin_lock_init(&obd_zombie_impexp_lock);
        init_completion(&obd_zombie_start);
        init_completion(&obd_zombie_stop);
-       cfs_waitq_init(&obd_zombie_waitq);
+       init_waitqueue_head(&obd_zombie_waitq);
        obd_zombie_pid = 0;
 
 #ifdef __KERNEL__
        obd_zombie_pid = 0;
 
 #ifdef __KERNEL__
index 606c22a..ded9dc6 100644 (file)
@@ -103,7 +103,7 @@ int __llog_ctxt_put(const struct lu_env *env, struct llog_ctxt *ctxt)
                rc = CTXTP(ctxt, cleanup)(env, ctxt);
 
        llog_ctxt_destroy(ctxt);
                rc = CTXTP(ctxt, cleanup)(env, ctxt);
 
        llog_ctxt_destroy(ctxt);
-       cfs_waitq_signal(&olg->olg_waitq);
+       wake_up(&olg->olg_waitq);
        return rc;
 }
 EXPORT_SYMBOL(__llog_ctxt_put);
        return rc;
 }
 EXPORT_SYMBOL(__llog_ctxt_put);
index 46eb347..5dd607b 100644 (file)
@@ -272,13 +272,13 @@ int lprocfs_evict_client_open(struct inode *inode, struct file *f)
 
 int lprocfs_evict_client_release(struct inode *inode, struct file *f)
 {
 
 int lprocfs_evict_client_release(struct inode *inode, struct file *f)
 {
-        struct proc_dir_entry *dp = PDE(f->f_dentry->d_inode);
-        struct obd_device *obd = dp->data;
+       struct proc_dir_entry *dp = PDE(f->f_dentry->d_inode);
+       struct obd_device *obd = dp->data;
 
 
-        cfs_atomic_dec(&obd->obd_evict_inprogress);
-        cfs_waitq_signal(&obd->obd_evict_inprogress_waitq);
+       cfs_atomic_dec(&obd->obd_evict_inprogress);
+       wake_up(&obd->obd_evict_inprogress_waitq);
 
 
-        return 0;
+       return 0;
 }
 
 struct file_operations lprocfs_evict_client_fops = {
 }
 
 struct file_operations lprocfs_evict_client_fops = {
index 1546068..31c4d72 100644 (file)
@@ -103,17 +103,17 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o)
         cfs_hash_bd_get(site->ls_obj_hash, &top->loh_fid, &bd);
         bkt = cfs_hash_bd_extra_get(site->ls_obj_hash, &bd);
 
         cfs_hash_bd_get(site->ls_obj_hash, &top->loh_fid, &bd);
         bkt = cfs_hash_bd_extra_get(site->ls_obj_hash, &bd);
 
-        if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) {
-                if (lu_object_is_dying(top)) {
+       if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) {
+               if (lu_object_is_dying(top)) {
 
 
-                        /*
-                         * somebody may be waiting for this, currently only
-                         * used for cl_object, see cl_object_put_last().
-                         */
-                        cfs_waitq_broadcast(&bkt->lsb_marche_funebre);
-                }
-                return;
-        }
+                       /*
+                        * somebody may be waiting for this, currently only
+                        * used for cl_object, see cl_object_put_last().
+                        */
+                       wake_up_all(&bkt->lsb_marche_funebre);
+               }
+               return;
+       }
 
         LASSERT(bkt->lsb_busy > 0);
         bkt->lsb_busy--;
 
         LASSERT(bkt->lsb_busy > 0);
         bkt->lsb_busy--;
@@ -291,20 +291,20 @@ static void lu_object_free(const struct lu_env *env, struct lu_object *o)
          */
         CFS_INIT_LIST_HEAD(&splice);
         cfs_list_splice_init(layers, &splice);
          */
         CFS_INIT_LIST_HEAD(&splice);
         cfs_list_splice_init(layers, &splice);
-        while (!cfs_list_empty(&splice)) {
-                /*
-                 * Free layers in bottom-to-top order, so that object header
-                 * lives as long as possible and ->loo_object_free() methods
-                 * can look at its contents.
-                 */
-                o = container_of0(splice.prev, struct lu_object, lo_linkage);
-                cfs_list_del_init(&o->lo_linkage);
-                LASSERT(o->lo_ops->loo_object_free != NULL);
-                o->lo_ops->loo_object_free(env, o);
-        }
+       while (!cfs_list_empty(&splice)) {
+               /*
+                * Free layers in bottom-to-top order, so that object header
+                * lives as long as possible and ->loo_object_free() methods
+                * can look at its contents.
+                */
+               o = container_of0(splice.prev, struct lu_object, lo_linkage);
+               cfs_list_del_init(&o->lo_linkage);
+               LASSERT(o->lo_ops->loo_object_free != NULL);
+               o->lo_ops->loo_object_free(env, o);
+       }
 
 
-        if (cfs_waitq_active(&bkt->lsb_marche_funebre))
-                cfs_waitq_broadcast(&bkt->lsb_marche_funebre);
+       if (waitqueue_active(&bkt->lsb_marche_funebre))
+               wake_up_all(&bkt->lsb_marche_funebre);
 }
 
 /**
 }
 
 /**
@@ -361,13 +361,13 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
                         if (count > 0 && --count == 0)
                                 break;
 
                         if (count > 0 && --count == 0)
                                 break;
 
-                }
-                cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1);
-                cfs_cond_resched();
-                /*
-                 * Free everything on the dispose list. This is safe against
-                 * races due to the reasons described in lu_object_put().
-                 */
+               }
+               cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1);
+               cond_resched();
+               /*
+                * Free everything on the dispose list. This is safe against
+                * races due to the reasons described in lu_object_put().
+                */
                 while (!cfs_list_empty(&dispose)) {
                         h = container_of0(dispose.next,
                                           struct lu_object_header, loh_lru);
                 while (!cfs_list_empty(&dispose)) {
                         h = container_of0(dispose.next,
                                           struct lu_object_header, loh_lru);
@@ -537,10 +537,10 @@ int lu_object_invariant(const struct lu_object *o)
 EXPORT_SYMBOL(lu_object_invariant);
 
 static struct lu_object *htable_lookup(struct lu_site *s,
 EXPORT_SYMBOL(lu_object_invariant);
 
 static struct lu_object *htable_lookup(struct lu_site *s,
-                                       cfs_hash_bd_t *bd,
-                                       const struct lu_fid *f,
-                                       cfs_waitlink_t *waiter,
-                                       __u64 *version)
+                                      cfs_hash_bd_t *bd,
+                                      const struct lu_fid *f,
+                                      wait_queue_t *waiter,
+                                      __u64 *version)
 {
         struct lu_site_bkt_data *bkt;
         struct lu_object_header *h;
 {
         struct lu_site_bkt_data *bkt;
         struct lu_object_header *h;
@@ -574,11 +574,11 @@ static struct lu_object *htable_lookup(struct lu_site *s,
          * drained), and moreover, lookup has to wait until object is freed.
          */
 
          * drained), and moreover, lookup has to wait until object is freed.
          */
 
-        cfs_waitlink_init(waiter);
-        cfs_waitq_add(&bkt->lsb_marche_funebre, waiter);
-        cfs_set_current_state(CFS_TASK_UNINT);
-        lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE);
-        return ERR_PTR(-EAGAIN);
+       init_waitqueue_entry_current(waiter);
+       add_wait_queue(&bkt->lsb_marche_funebre, waiter);
+       set_current_state(TASK_UNINTERRUPTIBLE);
+       lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE);
+       return ERR_PTR(-EAGAIN);
 }
 
 static struct lu_object *htable_lookup_nowait(struct lu_site *s,
 }
 
 static struct lu_object *htable_lookup_nowait(struct lu_site *s,
@@ -646,17 +646,17 @@ static struct lu_object *lu_object_new(const struct lu_env *env,
  * Core logic of lu_object_find*() functions.
  */
 static struct lu_object *lu_object_find_try(const struct lu_env *env,
  * Core logic of lu_object_find*() functions.
  */
 static struct lu_object *lu_object_find_try(const struct lu_env *env,
-                                            struct lu_device *dev,
-                                            const struct lu_fid *f,
-                                            const struct lu_object_conf *conf,
-                                            cfs_waitlink_t *waiter)
-{
-        struct lu_object      *o;
-        struct lu_object      *shadow;
-        struct lu_site        *s;
-        cfs_hash_t            *hs;
-        cfs_hash_bd_t          bd;
-        __u64                  version = 0;
+                                           struct lu_device *dev,
+                                           const struct lu_fid *f,
+                                           const struct lu_object_conf *conf,
+                                           wait_queue_t *waiter)
+{
+       struct lu_object      *o;
+       struct lu_object      *shadow;
+       struct lu_site        *s;
+       cfs_hash_t            *hs;
+       cfs_hash_bd_t          bd;
+       __u64                  version = 0;
 
         /*
          * This uses standard index maintenance protocol:
 
         /*
          * This uses standard index maintenance protocol:
@@ -724,26 +724,26 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env,
  * objects of different "stacking" to be created within the same site.
  */
 struct lu_object *lu_object_find_at(const struct lu_env *env,
  * objects of different "stacking" to be created within the same site.
  */
 struct lu_object *lu_object_find_at(const struct lu_env *env,
-                                    struct lu_device *dev,
-                                    const struct lu_fid *f,
-                                    const struct lu_object_conf *conf)
-{
-        struct lu_site_bkt_data *bkt;
-        struct lu_object        *obj;
-        cfs_waitlink_t           wait;
-
-        while (1) {
-                obj = lu_object_find_try(env, dev, f, conf, &wait);
-                if (obj != ERR_PTR(-EAGAIN))
-                        return obj;
-                /*
-                 * lu_object_find_try() already added waiter into the
-                 * wait queue.
-                 */
-                cfs_waitq_wait(&wait, CFS_TASK_UNINT);
-                bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f);
-                cfs_waitq_del(&bkt->lsb_marche_funebre, &wait);
-        }
+                                   struct lu_device *dev,
+                                   const struct lu_fid *f,
+                                   const struct lu_object_conf *conf)
+{
+       struct lu_site_bkt_data *bkt;
+       struct lu_object        *obj;
+       wait_queue_t           wait;
+
+       while (1) {
+               obj = lu_object_find_try(env, dev, f, conf, &wait);
+               if (obj != ERR_PTR(-EAGAIN))
+                       return obj;
+               /*
+                * lu_object_find_try() already added waiter into the
+                * wait queue.
+                */
+               waitq_wait(&wait, TASK_UNINTERRUPTIBLE);
+               bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f);
+               remove_wait_queue(&bkt->lsb_marche_funebre, &wait);
+       }
 }
 EXPORT_SYMBOL(lu_object_find_at);
 
 }
 EXPORT_SYMBOL(lu_object_find_at);
 
@@ -1063,11 +1063,11 @@ int lu_site_init(struct lu_site *s, struct lu_device *top)
                 return -ENOMEM;
         }
 
                 return -ENOMEM;
         }
 
-        cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
-                bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
-                CFS_INIT_LIST_HEAD(&bkt->lsb_lru);
-                cfs_waitq_init(&bkt->lsb_marche_funebre);
-        }
+       cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
+               bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
+               CFS_INIT_LIST_HEAD(&bkt->lsb_lru);
+               init_waitqueue_head(&bkt->lsb_marche_funebre);
+       }
 
         s->ls_stats = lprocfs_alloc_stats(LU_SS_LAST_STAT, 0);
         if (s->ls_stats == NULL) {
 
         s->ls_stats = lprocfs_alloc_stats(LU_SS_LAST_STAT, 0);
         if (s->ls_stats == NULL) {
@@ -2159,7 +2159,7 @@ void lu_object_assign_fid(const struct lu_env *env, struct lu_object *o,
        struct lu_fid           *old = &o->lo_header->loh_fid;
        struct lu_site_bkt_data *bkt;
        struct lu_object        *shadow;
        struct lu_fid           *old = &o->lo_header->loh_fid;
        struct lu_site_bkt_data *bkt;
        struct lu_object        *shadow;
-       cfs_waitlink_t           waiter;
+       wait_queue_t             waiter;
        cfs_hash_t              *hs;
        cfs_hash_bd_t            bd;
        __u64                    version = 0;
        cfs_hash_t              *hs;
        cfs_hash_bd_t            bd;
        __u64                    version = 0;
index ecaa4a9..43b79b8 100644 (file)
@@ -404,12 +404,12 @@ int class_attach(struct lustre_cfg *lcfg)
        /* recovery data */
        cfs_init_timer(&obd->obd_recovery_timer);
        spin_lock_init(&obd->obd_recovery_task_lock);
        /* recovery data */
        cfs_init_timer(&obd->obd_recovery_timer);
        spin_lock_init(&obd->obd_recovery_task_lock);
-        cfs_waitq_init(&obd->obd_next_transno_waitq);
-        cfs_waitq_init(&obd->obd_evict_inprogress_waitq);
-        CFS_INIT_LIST_HEAD(&obd->obd_req_replay_queue);
-        CFS_INIT_LIST_HEAD(&obd->obd_lock_replay_queue);
-        CFS_INIT_LIST_HEAD(&obd->obd_final_req_queue);
-        CFS_INIT_LIST_HEAD(&obd->obd_evict_list);
+       init_waitqueue_head(&obd->obd_next_transno_waitq);
+       init_waitqueue_head(&obd->obd_evict_inprogress_waitq);
+       CFS_INIT_LIST_HEAD(&obd->obd_req_replay_queue);
+       CFS_INIT_LIST_HEAD(&obd->obd_lock_replay_queue);
+       CFS_INIT_LIST_HEAD(&obd->obd_final_req_queue);
+       CFS_INIT_LIST_HEAD(&obd->obd_evict_list);
 
         llog_group_init(&obd->obd_olg, FID_SEQ_LLOG);
 
 
         llog_group_init(&obd->obd_olg, FID_SEQ_LLOG);
 
@@ -633,7 +633,7 @@ int class_cleanup(struct obd_device *obd, struct lustre_cfg *lcfg)
        while (obd->obd_conn_inprogress > 0) {
                spin_unlock(&obd->obd_dev_lock);
 
        while (obd->obd_conn_inprogress > 0) {
                spin_unlock(&obd->obd_dev_lock);
 
-               cfs_cond_resched();
+               cond_resched();
 
                spin_lock(&obd->obd_dev_lock);
        }
 
                spin_lock(&obd->obd_dev_lock);
        }
index dcddc39..f6bb399 100644 (file)
@@ -606,26 +606,26 @@ static int echo_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
 
 static int echo_cleanup(struct obd_device *obd)
 {
 
 static int echo_cleanup(struct obd_device *obd)
 {
-        int leaked;
-        ENTRY;
+       int leaked;
+       ENTRY;
 
 
-        lprocfs_obd_cleanup(obd);
-        lprocfs_free_obd_stats(obd);
+       lprocfs_obd_cleanup(obd);
+       lprocfs_free_obd_stats(obd);
 
 
-        ldlm_lock_decref(&obd->u.echo.eo_nl_lock, LCK_NL);
+       ldlm_lock_decref(&obd->u.echo.eo_nl_lock, LCK_NL);
 
 
-        /* XXX Bug 3413; wait for a bit to ensure the BL callback has
-         * happened before calling ldlm_namespace_free() */
-        cfs_schedule_timeout_and_set_state(CFS_TASK_UNINT, cfs_time_seconds(1));
+       /* XXX Bug 3413; wait for a bit to ensure the BL callback has
+        * happened before calling ldlm_namespace_free() */
+       schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE, cfs_time_seconds(1));
 
 
-        ldlm_namespace_free(obd->obd_namespace, NULL, obd->obd_force);
-        obd->obd_namespace = NULL;
+       ldlm_namespace_free(obd->obd_namespace, NULL, obd->obd_force);
+       obd->obd_namespace = NULL;
 
 
-        leaked = cfs_atomic_read(&obd->u.echo.eo_prep);
-        if (leaked != 0)
-                CERROR("%d prep/commitrw pages leaked\n", leaked);
+       leaked = cfs_atomic_read(&obd->u.echo.eo_prep);
+       if (leaked != 0)
+               CERROR("%d prep/commitrw pages leaked\n", leaked);
 
 
-        RETURN(0);
+       RETURN(0);
 }
 
 struct obd_ops echo_obd_ops = {
 }
 
 struct obd_ops echo_obd_ops = {
index afacf52..cf5f7fa 100644 (file)
@@ -1014,7 +1014,7 @@ static struct lu_device *echo_device_free(const struct lu_env *env,
                spin_unlock(&ec->ec_lock);
                CERROR("echo_client still has objects at cleanup time, "
                       "wait for 1 second\n");
                spin_unlock(&ec->ec_lock);
                CERROR("echo_client still has objects at cleanup time, "
                       "wait for 1 second\n");
-               cfs_schedule_timeout_and_set_state(CFS_TASK_UNINT,
+               schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE,
                                                   cfs_time_seconds(1));
                lu_site_purge(env, &ed->ed_site->cs_lu, -1);
                spin_lock(&ec->ec_lock);
                                                   cfs_time_seconds(1));
                lu_site_purge(env, &ed->ed_site->cs_lu, -1);
                spin_lock(&ec->ec_lock);
index 11a5e0e..019109b 100644 (file)
@@ -120,7 +120,7 @@ static const char *oes_strings[] = {
                /* ----- part 2 ----- */                                      \
                __ext->oe_grants, __ext->oe_nr_pages,                         \
                list_empty_marker(&__ext->oe_pages),                          \
                /* ----- part 2 ----- */                                      \
                __ext->oe_grants, __ext->oe_nr_pages,                         \
                list_empty_marker(&__ext->oe_pages),                          \
-               cfs_waitq_active(&__ext->oe_waitq) ? '+' : '-',               \
+               waitqueue_active(&__ext->oe_waitq) ? '+' : '-',               \
                __ext->oe_osclock, __ext->oe_mppr, __ext->oe_owner,           \
                /* ----- part 4 ----- */                                      \
                ## __VA_ARGS__);                                              \
                __ext->oe_osclock, __ext->oe_mppr, __ext->oe_owner,           \
                /* ----- part 4 ----- */                                      \
                ## __VA_ARGS__);                                              \
@@ -302,7 +302,7 @@ static void osc_extent_state_set(struct osc_extent *ext, int state)
 
        /* TODO: validate the state machine */
        ext->oe_state = state;
 
        /* TODO: validate the state machine */
        ext->oe_state = state;
-       cfs_waitq_broadcast(&ext->oe_waitq);
+       wake_up_all(&ext->oe_waitq);
 }
 
 static struct osc_extent *osc_extent_alloc(struct osc_object *obj)
 }
 
 static struct osc_extent *osc_extent_alloc(struct osc_object *obj)
@@ -320,7 +320,7 @@ static struct osc_extent *osc_extent_alloc(struct osc_object *obj)
        CFS_INIT_LIST_HEAD(&ext->oe_link);
        ext->oe_state = OES_INV;
        CFS_INIT_LIST_HEAD(&ext->oe_pages);
        CFS_INIT_LIST_HEAD(&ext->oe_link);
        ext->oe_state = OES_INV;
        CFS_INIT_LIST_HEAD(&ext->oe_pages);
-       cfs_waitq_init(&ext->oe_waitq);
+       init_waitqueue_head(&ext->oe_waitq);
        ext->oe_osclock = NULL;
 
        return ext;
        ext->oe_osclock = NULL;
 
        return ext;
@@ -1536,7 +1536,7 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
         * RPC size will be.
         * The exiting condition is no avail grants and no dirty pages caching,
         * that really means there is no space on the OST. */
         * RPC size will be.
         * The exiting condition is no avail grants and no dirty pages caching,
         * that really means there is no space on the OST. */
-       cfs_waitq_init(&ocw.ocw_waitq);
+       init_waitqueue_head(&ocw.ocw_waitq);
        ocw.ocw_oap   = oap;
        ocw.ocw_grant = bytes;
        while (cli->cl_dirty > 0 || cli->cl_w_in_flight > 0) {
        ocw.ocw_oap   = oap;
        ocw.ocw_grant = bytes;
        while (cli->cl_dirty > 0 || cli->cl_w_in_flight > 0) {
@@ -1622,7 +1622,7 @@ wakeup:
                CDEBUG(D_CACHE, "wake up %p for oap %p, avail grant %ld, %d\n",
                       ocw, ocw->ocw_oap, cli->cl_avail_grant, ocw->ocw_rc);
 
                CDEBUG(D_CACHE, "wake up %p for oap %p, avail grant %ld, %d\n",
                       ocw, ocw->ocw_oap, cli->cl_avail_grant, ocw->ocw_rc);
 
-               cfs_waitq_signal(&ocw->ocw_waitq);
+               wake_up(&ocw->ocw_waitq);
        }
 
        EXIT;
        }
 
        EXIT;
index 33b06e7..a2e1867 100644 (file)
@@ -668,7 +668,7 @@ struct osc_extent {
        pgoff_t            oe_max_end;
        /** waitqueue - for those who want to be notified if this extent's
         * state has changed. */
        pgoff_t            oe_max_end;
        /** waitqueue - for those who want to be notified if this extent's
         * state has changed. */
-       cfs_waitq_t        oe_waitq;
+       wait_queue_head_t        oe_waitq;
        /** lock covering this extent */
        struct cl_lock    *oe_osclock;
        /** terminator of this extent. Must be true if this extent is in IO. */
        /** lock covering this extent */
        struct cl_lock    *oe_osclock;
        /** terminator of this extent. Must be true if this extent is in IO. */
index 31fe062..7066322 100644 (file)
@@ -78,11 +78,11 @@ struct osc_async_page {
 #define oap_brw_flags   oap_brw_page.flag
 
 struct osc_cache_waiter {
 #define oap_brw_flags   oap_brw_page.flag
 
 struct osc_cache_waiter {
-        cfs_list_t              ocw_entry;
-        cfs_waitq_t             ocw_waitq;
-        struct osc_async_page  *ocw_oap;
+       cfs_list_t              ocw_entry;
+       wait_queue_head_t             ocw_waitq;
+       struct osc_async_page  *ocw_oap;
        int                     ocw_grant;
        int                     ocw_grant;
-        int                     ocw_rc;
+       int                     ocw_rc;
 };
 
 int osc_create(const struct lu_env *env, struct obd_export *exp,
 };
 
 int osc_create(const struct lu_env *env, struct obd_export *exp,
index 8a9989d..7791e89 100644 (file)
@@ -1410,20 +1410,20 @@ static int osc_lock_has_pages(struct osc_lock *olck)
         io->ci_obj = cl_object_top(obj);
        io->ci_ignore_layout = 1;
         cl_io_init(env, io, CIT_MISC, io->ci_obj);
         io->ci_obj = cl_object_top(obj);
        io->ci_ignore_layout = 1;
         cl_io_init(env, io, CIT_MISC, io->ci_obj);
-        do {
-                result = cl_page_gang_lookup(env, obj, io,
-                                             descr->cld_start, descr->cld_end,
-                                             check_cb, (void *)lock);
-                if (result == CLP_GANG_ABORT)
-                        break;
-                if (result == CLP_GANG_RESCHED)
-                        cfs_cond_resched();
-        } while (result != CLP_GANG_OKAY);
-        cl_io_fini(env, io);
+       do {
+               result = cl_page_gang_lookup(env, obj, io,
+                                            descr->cld_start, descr->cld_end,
+                                            check_cb, (void *)lock);
+               if (result == CLP_GANG_ABORT)
+                       break;
+               if (result == CLP_GANG_RESCHED)
+                       cond_resched();
+       } while (result != CLP_GANG_OKAY);
+       cl_io_fini(env, io);
        mutex_unlock(&oob->oo_debug_mutex);
        mutex_unlock(&oob->oo_debug_mutex);
-        cl_env_nested_put(&nest, env);
+       cl_env_nested_put(&nest, env);
 
 
-        return (result == CLP_GANG_ABORT);
+       return (result == CLP_GANG_ABORT);
 }
 #else
 static int osc_lock_has_pages(struct osc_lock *olck)
 }
 #else
 static int osc_lock_has_pages(struct osc_lock *olck)
index 0691f7b..2bf7e9e 100644 (file)
@@ -781,7 +781,7 @@ static void osc_lru_add(struct client_obd *cli, struct osc_page *opg)
 
        if (wakeup) {
                osc_lru_shrink(cli, osc_cache_too_much(cli));
 
        if (wakeup) {
                osc_lru_shrink(cli, osc_cache_too_much(cli));
-               cfs_waitq_broadcast(&osc_lru_waitq);
+               wake_up_all(&osc_lru_waitq);
        }
 }
 
        }
 }
 
@@ -812,7 +812,7 @@ static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del)
                        if (cfs_atomic_read(&cli->cl_lru_shrinkers) == 0 &&
                            !memory_pressure_get())
                                osc_lru_shrink(cli, osc_cache_too_much(cli));
                        if (cfs_atomic_read(&cli->cl_lru_shrinkers) == 0 &&
                            !memory_pressure_get())
                                osc_lru_shrink(cli, osc_cache_too_much(cli));
-                       cfs_waitq_signal(&osc_lru_waitq);
+                       wake_up(&osc_lru_waitq);
                }
        } else {
                LASSERT(cfs_list_empty(&opg->ops_lru));
                }
        } else {
                LASSERT(cfs_list_empty(&opg->ops_lru));
@@ -900,7 +900,7 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
                if (rc > 0)
                        continue;
 
                if (rc > 0)
                        continue;
 
-               cfs_cond_resched();
+               cond_resched();
 
                /* slowest case, all of caching pages are busy, notifying
                 * other OSCs that we're lack of LRU slots. */
 
                /* slowest case, all of caching pages are busy, notifying
                 * other OSCs that we're lack of LRU slots. */
index 0103e53..d5be348 100644 (file)
@@ -690,32 +690,32 @@ static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
 }
 
 static int osc_destroy_interpret(const struct lu_env *env,
 }
 
 static int osc_destroy_interpret(const struct lu_env *env,
-                                 struct ptlrpc_request *req, void *data,
-                                 int rc)
+                                struct ptlrpc_request *req, void *data,
+                                int rc)
 {
 {
-        struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
+       struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
 
 
-        cfs_atomic_dec(&cli->cl_destroy_in_flight);
-        cfs_waitq_signal(&cli->cl_destroy_waitq);
-        return 0;
+       cfs_atomic_dec(&cli->cl_destroy_in_flight);
+       wake_up(&cli->cl_destroy_waitq);
+       return 0;
 }
 
 static int osc_can_send_destroy(struct client_obd *cli)
 {
 }
 
 static int osc_can_send_destroy(struct client_obd *cli)
 {
-        if (cfs_atomic_inc_return(&cli->cl_destroy_in_flight) <=
-            cli->cl_max_rpcs_in_flight) {
-                /* The destroy request can be sent */
-                return 1;
-        }
-        if (cfs_atomic_dec_return(&cli->cl_destroy_in_flight) <
-            cli->cl_max_rpcs_in_flight) {
-                /*
-                 * The counter has been modified between the two atomic
-                 * operations.
-                 */
-                cfs_waitq_signal(&cli->cl_destroy_waitq);
-        }
-        return 0;
+       if (cfs_atomic_inc_return(&cli->cl_destroy_in_flight) <=
+           cli->cl_max_rpcs_in_flight) {
+               /* The destroy request can be sent */
+               return 1;
+       }
+       if (cfs_atomic_dec_return(&cli->cl_destroy_in_flight) <
+           cli->cl_max_rpcs_in_flight) {
+               /*
+                * The counter has been modified between the two atomic
+                * operations.
+                */
+               wake_up(&cli->cl_destroy_waitq);
+       }
+       return 0;
 }
 
 int osc_create(const struct lu_env *env, struct obd_export *exp,
 }
 
 int osc_create(const struct lu_env *env, struct obd_export *exp,
@@ -1657,16 +1657,16 @@ static int osc_brw_internal(int cmd, struct obd_export *exp, struct obdo *oa,
                             obd_count page_count, struct brw_page **pga,
                             struct obd_capa *ocapa)
 {
                             obd_count page_count, struct brw_page **pga,
                             struct obd_capa *ocapa)
 {
-        struct ptlrpc_request *req;
-        int                    rc;
-        cfs_waitq_t            waitq;
-        int                    generation, resends = 0;
-        struct l_wait_info     lwi;
+       struct ptlrpc_request *req;
+       int                    rc;
+       wait_queue_head_t            waitq;
+       int                    generation, resends = 0;
+       struct l_wait_info     lwi;
 
 
-        ENTRY;
+       ENTRY;
 
 
-        cfs_waitq_init(&waitq);
-        generation = exp->exp_obd->u.cli.cl_import->imp_generation;
+       init_waitqueue_head(&waitq);
+       generation = exp->exp_obd->u.cli.cl_import->imp_generation;
 
 restart_bulk:
         rc = osc_brw_prep_request(cmd, &exp->exp_obd->u.cli, oa, lsm,
 
 restart_bulk:
         rc = osc_brw_prep_request(cmd, &exp->exp_obd->u.cli, oa, lsm,
index fefad46..d657021 100644 (file)
@@ -948,21 +948,21 @@ static int osd_trans_stop(const struct lu_env *env, struct thandle *th)
                 OBD_FREE_PTR(oh);
         }
 
                 OBD_FREE_PTR(oh);
         }
 
-        /* as we want IO to journal and data IO be concurrent, we don't block
-         * awaiting data IO completion in osd_do_bio(), instead we wait here
-         * once transaction is submitted to the journal. all reqular requests
-         * don't do direct IO (except read/write), thus this wait_event becomes
-         * no-op for them.
-         *
-         * IMPORTANT: we have to wait till any IO submited by the thread is
-         * completed otherwise iobuf may be corrupted by different request
-         */
-        cfs_wait_event(iobuf->dr_wait,
-                       cfs_atomic_read(&iobuf->dr_numreqs) == 0);
-        if (!rc)
-                rc = iobuf->dr_error;
+       /* as we want IO to journal and data IO be concurrent, we don't block
+        * awaiting data IO completion in osd_do_bio(), instead we wait here
+        * once transaction is submitted to the journal. all reqular requests
+        * don't do direct IO (except read/write), thus this wait_event becomes
+        * no-op for them.
+        *
+        * IMPORTANT: we have to wait till any IO submited by the thread is
+        * completed otherwise iobuf may be corrupted by different request
+        */
+       wait_event(iobuf->dr_wait,
+                      cfs_atomic_read(&iobuf->dr_numreqs) == 0);
+       if (!rc)
+               rc = iobuf->dr_error;
 
 
-        RETURN(rc);
+       RETURN(rc);
 }
 
 static int osd_trans_cb_add(struct thandle *th, struct dt_txn_commit_cb *dcb)
 }
 
 static int osd_trans_cb_add(struct thandle *th, struct dt_txn_commit_cb *dcb)
index c831e51..cb390c4 100644 (file)
@@ -459,7 +459,7 @@ struct osd_it_quota {
 #define MAX_BLOCKS_PER_PAGE (PAGE_CACHE_SIZE / 512)
 
 struct osd_iobuf {
 #define MAX_BLOCKS_PER_PAGE (PAGE_CACHE_SIZE / 512)
 
 struct osd_iobuf {
-       cfs_waitq_t        dr_wait;
+       wait_queue_head_t  dr_wait;
        cfs_atomic_t       dr_numreqs;  /* number of reqs being processed */
        int                dr_max_pages;
        int                dr_npages;
        cfs_atomic_t       dr_numreqs;  /* number of reqs being processed */
        int                dr_max_pages;
        int                dr_npages;
index eb6b2a0..fde470a 100644 (file)
@@ -77,15 +77,15 @@ static int __osd_init_iobuf(struct osd_device *d, struct osd_iobuf *iobuf,
                 iobuf->dr_init_at);
        LASSERT(pages <= PTLRPC_MAX_BRW_PAGES);
 
                 iobuf->dr_init_at);
        LASSERT(pages <= PTLRPC_MAX_BRW_PAGES);
 
-        cfs_waitq_init(&iobuf->dr_wait);
-        cfs_atomic_set(&iobuf->dr_numreqs, 0);
-        iobuf->dr_npages = 0;
-        iobuf->dr_error = 0;
-        iobuf->dr_dev = d;
-        iobuf->dr_frags = 0;
-        iobuf->dr_elapsed = 0;
-        /* must be counted before, so assert */
-        iobuf->dr_rw = rw;
+       init_waitqueue_head(&iobuf->dr_wait);
+       cfs_atomic_set(&iobuf->dr_numreqs, 0);
+       iobuf->dr_npages = 0;
+       iobuf->dr_error = 0;
+       iobuf->dr_dev = d;
+       iobuf->dr_frags = 0;
+       iobuf->dr_elapsed = 0;
+       /* must be counted before, so assert */
+       iobuf->dr_rw = rw;
        iobuf->dr_init_at = line;
 
        blocks = pages * (PAGE_CACHE_SIZE >> osd_sb(d)->s_blocksize_bits);
        iobuf->dr_init_at = line;
 
        blocks = pages * (PAGE_CACHE_SIZE >> osd_sb(d)->s_blocksize_bits);
@@ -221,7 +221,7 @@ static int dio_complete_routine(struct bio *bio, unsigned int done, int error)
                iobuf->dr_elapsed_valid = 1;
        }
        if (cfs_atomic_dec_and_test(&iobuf->dr_numreqs))
                iobuf->dr_elapsed_valid = 1;
        }
        if (cfs_atomic_dec_and_test(&iobuf->dr_numreqs))
-               cfs_waitq_signal(&iobuf->dr_wait);
+               wake_up(&iobuf->dr_wait);
 
         /* Completed bios used to be chained off iobuf->dr_bios and freed in
          * filter_clear_dreq().  It was then possible to exhaust the biovec-256
 
         /* Completed bios used to be chained off iobuf->dr_bios and freed in
          * filter_clear_dreq().  It was then possible to exhaust the biovec-256
@@ -402,7 +402,7 @@ static int osd_do_bio(struct osd_device *osd, struct inode *inode,
          * parallel and wait for IO completion once transaction is stopped
          * see osd_trans_stop() for more details -bzzz */
         if (iobuf->dr_rw == 0) {
          * parallel and wait for IO completion once transaction is stopped
          * see osd_trans_stop() for more details -bzzz */
         if (iobuf->dr_rw == 0) {
-                cfs_wait_event(iobuf->dr_wait,
+               wait_event(iobuf->dr_wait,
                                cfs_atomic_read(&iobuf->dr_numreqs) == 0);
         }
 
                                cfs_atomic_read(&iobuf->dr_numreqs) == 0);
         }
 
index ca40fe2..669cd95 100644 (file)
@@ -414,7 +414,7 @@ static int osd_scrub_prep(struct osd_device *dev)
                spin_lock(&scrub->os_lock);
                thread_set_flags(thread, SVC_RUNNING);
                spin_unlock(&scrub->os_lock);
                spin_lock(&scrub->os_lock);
                thread_set_flags(thread, SVC_RUNNING);
                spin_unlock(&scrub->os_lock);
-               cfs_waitq_broadcast(&thread->t_ctl_waitq);
+               wake_up_all(&thread->t_ctl_waitq);
        }
        up_write(&scrub->os_rwsem);
 
        }
        up_write(&scrub->os_rwsem);
 
@@ -1078,7 +1078,7 @@ wait:
            ooc->ooc_pos_preload < scrub->os_pos_current) {
                spin_lock(&scrub->os_lock);
                it->ooi_waiting = 0;
            ooc->ooc_pos_preload < scrub->os_pos_current) {
                spin_lock(&scrub->os_lock);
                it->ooi_waiting = 0;
-               cfs_waitq_broadcast(&thread->t_ctl_waitq);
+               wake_up_all(&thread->t_ctl_waitq);
                spin_unlock(&scrub->os_lock);
        }
 
                spin_unlock(&scrub->os_lock);
        }
 
@@ -1218,7 +1218,7 @@ static int osd_otable_it_preload(const struct lu_env *env,
        if (scrub->os_waiting && osd_scrub_has_window(scrub, ooc)) {
                spin_lock(&scrub->os_lock);
                scrub->os_waiting = 0;
        if (scrub->os_waiting && osd_scrub_has_window(scrub, ooc)) {
                spin_lock(&scrub->os_lock);
                scrub->os_waiting = 0;
-               cfs_waitq_broadcast(&scrub->os_thread.t_ctl_waitq);
+               wake_up_all(&scrub->os_thread.t_ctl_waitq);
                spin_unlock(&scrub->os_lock);
        }
 
                spin_unlock(&scrub->os_lock);
        }
 
@@ -1290,7 +1290,7 @@ out:
 noenv:
        spin_lock(&scrub->os_lock);
        thread_set_flags(thread, SVC_STOPPED);
 noenv:
        spin_lock(&scrub->os_lock);
        thread_set_flags(thread, SVC_STOPPED);
-       cfs_waitq_broadcast(&thread->t_ctl_waitq);
+       wake_up_all(&thread->t_ctl_waitq);
        spin_unlock(&scrub->os_lock);
        return rc;
 }
        spin_unlock(&scrub->os_lock);
        return rc;
 }
@@ -1973,7 +1973,7 @@ static void do_osd_scrub_stop(struct osd_scrub *scrub)
        if (!thread_is_init(thread) && !thread_is_stopped(thread)) {
                thread_set_flags(thread, SVC_STOPPING);
                spin_unlock(&scrub->os_lock);
        if (!thread_is_init(thread) && !thread_is_stopped(thread)) {
                thread_set_flags(thread, SVC_STOPPING);
                spin_unlock(&scrub->os_lock);
-               cfs_waitq_broadcast(&thread->t_ctl_waitq);
+               wake_up_all(&thread->t_ctl_waitq);
                l_wait_event(thread->t_ctl_waitq,
                             thread_is_stopped(thread),
                             &lwi);
                l_wait_event(thread->t_ctl_waitq,
                             thread_is_stopped(thread),
                             &lwi);
@@ -2019,7 +2019,7 @@ int osd_scrub_setup(const struct lu_env *env, struct osd_device *dev)
        ctxt->pwd = dev->od_mnt->mnt_root;
        ctxt->fs = get_ds();
 
        ctxt->pwd = dev->od_mnt->mnt_root;
        ctxt->fs = get_ds();
 
-       cfs_waitq_init(&scrub->os_thread.t_ctl_waitq);
+       init_waitqueue_head(&scrub->os_thread.t_ctl_waitq);
        init_rwsem(&scrub->os_rwsem);
        spin_lock_init(&scrub->os_lock);
        CFS_INIT_LIST_HEAD(&scrub->os_inconsistent_items);
        init_rwsem(&scrub->os_rwsem);
        spin_lock_init(&scrub->os_lock);
        CFS_INIT_LIST_HEAD(&scrub->os_inconsistent_items);
@@ -2282,7 +2282,7 @@ again:
        if (scrub->os_waiting && osd_scrub_has_window(scrub, ooc)) {
                spin_lock(&scrub->os_lock);
                scrub->os_waiting = 0;
        if (scrub->os_waiting && osd_scrub_has_window(scrub, ooc)) {
                spin_lock(&scrub->os_lock);
                scrub->os_waiting = 0;
-               cfs_waitq_broadcast(&scrub->os_thread.t_ctl_waitq);
+               wake_up_all(&scrub->os_thread.t_ctl_waitq);
                spin_unlock(&scrub->os_lock);
        }
 
                spin_unlock(&scrub->os_lock);
        }
 
@@ -2370,7 +2370,7 @@ static int osd_otable_it_load(const struct lu_env *env,
 
        it->ooi_user_ready = 1;
        if (!scrub->os_full_speed)
 
        it->ooi_user_ready = 1;
        if (!scrub->os_full_speed)
-               cfs_waitq_broadcast(&scrub->os_thread.t_ctl_waitq);
+               wake_up_all(&scrub->os_thread.t_ctl_waitq);
 
        /* Unplug OSD layer iteration by the first next() call. */
        rc = osd_otable_it_next(env, (struct dt_it *)it);
 
        /* Unplug OSD layer iteration by the first next() call. */
        rc = osd_otable_it_next(env, (struct dt_it *)it);
@@ -2432,7 +2432,7 @@ int osd_oii_insert(struct osd_device *dev, struct osd_idmap_cache *oic,
        spin_unlock(&scrub->os_lock);
 
        if (wakeup != 0)
        spin_unlock(&scrub->os_lock);
 
        if (wakeup != 0)
-               cfs_waitq_broadcast(&thread->t_ctl_waitq);
+               wake_up_all(&thread->t_ctl_waitq);
 
        RETURN(0);
 }
 
        RETURN(0);
 }
index 7e9e206..8e71157 100644 (file)
@@ -422,7 +422,7 @@ static int osp_recovery_complete(const struct lu_env *env,
        ENTRY;
        osp->opd_recovery_completed = 1;
        if (!osp->opd_connect_mdt)
        ENTRY;
        osp->opd_recovery_completed = 1;
        if (!osp->opd_connect_mdt)
-               cfs_waitq_signal(&osp->opd_pre_waitq);
+               wake_up(&osp->opd_pre_waitq);
        RETURN(rc);
 }
 
        RETURN(rc);
 }
 
@@ -1027,7 +1027,7 @@ static int osp_import_event(struct obd_device *obd, struct obd_import *imp,
                if (d->opd_connect_mdt)
                        break;
                osp_pre_update_status(d, -ENODEV);
                if (d->opd_connect_mdt)
                        break;
                osp_pre_update_status(d, -ENODEV);
-               cfs_waitq_signal(&d->opd_pre_waitq);
+               wake_up(&d->opd_pre_waitq);
                CDEBUG(D_HA, "got disconnected\n");
                break;
        case IMP_EVENT_INACTIVE:
                CDEBUG(D_HA, "got disconnected\n");
                break;
        case IMP_EVENT_INACTIVE:
@@ -1035,7 +1035,7 @@ static int osp_import_event(struct obd_device *obd, struct obd_import *imp,
                if (d->opd_connect_mdt)
                        break;
                osp_pre_update_status(d, -ENODEV);
                if (d->opd_connect_mdt)
                        break;
                osp_pre_update_status(d, -ENODEV);
-               cfs_waitq_signal(&d->opd_pre_waitq);
+               wake_up(&d->opd_pre_waitq);
                CDEBUG(D_HA, "got inactive\n");
                break;
        case IMP_EVENT_ACTIVE:
                CDEBUG(D_HA, "got inactive\n");
                break;
        case IMP_EVENT_ACTIVE:
@@ -1046,7 +1046,7 @@ static int osp_import_event(struct obd_device *obd, struct obd_import *imp,
                d->opd_imp_seen_connected = 1;
                if (d->opd_connect_mdt)
                        break;
                d->opd_imp_seen_connected = 1;
                if (d->opd_connect_mdt)
                        break;
-               cfs_waitq_signal(&d->opd_pre_waitq);
+               wake_up(&d->opd_pre_waitq);
                __osp_sync_check_for_work(d);
                CDEBUG(D_HA, "got connected\n");
                break;
                __osp_sync_check_for_work(d);
                CDEBUG(D_HA, "got connected\n");
                break;
index 847a100..1557a65 100644 (file)
@@ -119,9 +119,9 @@ struct osp_device {
        /* dedicate precreate thread */
        struct ptlrpc_thread             opd_pre_thread;
        /* thread waits for signals about pool going empty */
        /* dedicate precreate thread */
        struct ptlrpc_thread             opd_pre_thread;
        /* thread waits for signals about pool going empty */
-       cfs_waitq_t                      opd_pre_waitq;
+       wait_queue_head_t                opd_pre_waitq;
        /* consumers (who needs new ids) wait here */
        /* consumers (who needs new ids) wait here */
-       cfs_waitq_t                      opd_pre_user_waitq;
+       wait_queue_head_t                opd_pre_user_waitq;
        /* current precreation status: working, failed, stopping? */
        int                              opd_pre_status;
        /* how many to precreate next time */
        /* current precreation status: working, failed, stopping? */
        int                              opd_pre_status;
        /* how many to precreate next time */
@@ -145,7 +145,7 @@ struct osp_device {
        int                              opd_syn_prev_done;
        /* found records */
        struct ptlrpc_thread             opd_syn_thread;
        int                              opd_syn_prev_done;
        /* found records */
        struct ptlrpc_thread             opd_syn_thread;
-       cfs_waitq_t                      opd_syn_waitq;
+       wait_queue_head_t                opd_syn_waitq;
        /* list of remotely committed rpc */
        cfs_list_t                       opd_syn_committed_there;
        /* number of changes being under sync */
        /* list of remotely committed rpc */
        cfs_list_t                       opd_syn_committed_there;
        /* number of changes being under sync */
index 208d912..7f68596 100644 (file)
@@ -70,7 +70,7 @@ static void osp_statfs_timer_cb(unsigned long _d)
        struct osp_device *d = (struct osp_device *) _d;
 
        LASSERT(d);
        struct osp_device *d = (struct osp_device *) _d;
 
        LASSERT(d);
-       cfs_waitq_signal(&d->opd_pre_waitq);
+       wake_up(&d->opd_pre_waitq);
 }
 
 static int osp_statfs_interpret(const struct lu_env *env,
 }
 
 static int osp_statfs_interpret(const struct lu_env *env,
@@ -108,7 +108,7 @@ static int osp_statfs_interpret(const struct lu_env *env,
        RETURN(0);
 out:
        /* couldn't update statfs, try again as soon as possible */
        RETURN(0);
 out:
        /* couldn't update statfs, try again as soon as possible */
-       cfs_waitq_signal(&d->opd_pre_waitq);
+       wake_up(&d->opd_pre_waitq);
        if (req->rq_import_generation == imp->imp_generation)
                CDEBUG(D_CACHE, "%s: couldn't update statfs: rc = %d\n",
                       d->opd_obd->obd_name, rc);
        if (req->rq_import_generation == imp->imp_generation)
                CDEBUG(D_CACHE, "%s: couldn't update statfs: rc = %d\n",
                       d->opd_obd->obd_name, rc);
@@ -174,7 +174,7 @@ void osp_statfs_need_now(struct osp_device *d)
                 */
                d->opd_statfs_fresh_till = cfs_time_shift(-1);
                cfs_timer_disarm(&d->opd_statfs_timer);
                 */
                d->opd_statfs_fresh_till = cfs_time_shift(-1);
                cfs_timer_disarm(&d->opd_statfs_timer);
-               cfs_waitq_signal(&d->opd_pre_waitq);
+               wake_up(&d->opd_pre_waitq);
        }
 }
 
        }
 }
 
@@ -459,7 +459,7 @@ static int osp_precreate_send(const struct lu_env *env, struct osp_device *d)
                        osp_pre_update_status(d, -ENOSPC);
                        rc = -ENOSPC;
                }
                        osp_pre_update_status(d, -ENOSPC);
                        rc = -ENOSPC;
                }
-               cfs_waitq_signal(&d->opd_pre_waitq);
+               wake_up(&d->opd_pre_waitq);
                GOTO(out_req, rc);
        }
 
                GOTO(out_req, rc);
        }
 
@@ -516,7 +516,7 @@ static int osp_precreate_send(const struct lu_env *env, struct osp_device *d)
 out_req:
        /* now we can wakeup all users awaiting for objects */
        osp_pre_update_status(d, rc);
 out_req:
        /* now we can wakeup all users awaiting for objects */
        osp_pre_update_status(d, rc);
-       cfs_waitq_signal(&d->opd_pre_user_waitq);
+       wake_up(&d->opd_pre_user_waitq);
 
        ptlrpc_req_finished(req);
        RETURN(rc);
 
        ptlrpc_req_finished(req);
        RETURN(rc);
@@ -741,7 +741,7 @@ out:
                         * this OSP isn't quite functional yet */
                        osp_pre_update_status(d, rc);
                } else {
                         * this OSP isn't quite functional yet */
                        osp_pre_update_status(d, rc);
                } else {
-                       cfs_waitq_signal(&d->opd_pre_user_waitq);
+                       wake_up(&d->opd_pre_user_waitq);
                }
        }
 
                }
        }
 
@@ -801,7 +801,7 @@ void osp_pre_update_status(struct osp_device *d, int rc)
                        d->opd_pre_grow_slow = 0;
                        d->opd_pre_grow_count = OST_MIN_PRECREATE;
                        spin_unlock(&d->opd_pre_lock);
                        d->opd_pre_grow_slow = 0;
                        d->opd_pre_grow_count = OST_MIN_PRECREATE;
                        spin_unlock(&d->opd_pre_lock);
-                       cfs_waitq_signal(&d->opd_pre_waitq);
+                       wake_up(&d->opd_pre_waitq);
                        CDEBUG(D_INFO, "%s: no space: "LPU64" blocks, "LPU64
                               " free, "LPU64" used, "LPU64" avail -> %d: "
                               "rc = %d\n", d->opd_obd->obd_name,
                        CDEBUG(D_INFO, "%s: no space: "LPU64" blocks, "LPU64
                               " free, "LPU64" used, "LPU64" avail -> %d: "
                               "rc = %d\n", d->opd_obd->obd_name,
@@ -811,7 +811,7 @@ void osp_pre_update_status(struct osp_device *d, int rc)
        }
 
 out:
        }
 
 out:
-       cfs_waitq_signal(&d->opd_pre_user_waitq);
+       wake_up(&d->opd_pre_user_waitq);
 }
 
 static int osp_init_pre_fid(struct osp_device *osp)
 }
 
 static int osp_init_pre_fid(struct osp_device *osp)
@@ -888,7 +888,7 @@ static int osp_precreate_thread(void *_arg)
        spin_lock(&d->opd_pre_lock);
        thread->t_flags = SVC_RUNNING;
        spin_unlock(&d->opd_pre_lock);
        spin_lock(&d->opd_pre_lock);
        thread->t_flags = SVC_RUNNING;
        spin_unlock(&d->opd_pre_lock);
-       cfs_waitq_signal(&thread->t_ctl_waitq);
+       wake_up(&thread->t_ctl_waitq);
 
        while (osp_precreate_running(d)) {
                /*
 
        while (osp_precreate_running(d)) {
                /*
@@ -990,7 +990,7 @@ static int osp_precreate_thread(void *_arg)
 
        thread->t_flags = SVC_STOPPED;
        lu_env_fini(&env);
 
        thread->t_flags = SVC_STOPPED;
        lu_env_fini(&env);
-       cfs_waitq_signal(&thread->t_ctl_waitq);
+       wake_up(&thread->t_ctl_waitq);
 
        RETURN(0);
 }
 
        RETURN(0);
 }
@@ -1086,7 +1086,7 @@ int osp_precreate_reserve(const struct lu_env *env, struct osp_device *d)
                        /* XXX: don't wake up if precreation is in progress */
                        if (osp_precreate_near_empty_nolock(env, d) &&
                           !osp_precreate_end_seq_nolock(env, d))
                        /* XXX: don't wake up if precreation is in progress */
                        if (osp_precreate_near_empty_nolock(env, d) &&
                           !osp_precreate_end_seq_nolock(env, d))
-                               cfs_waitq_signal(&d->opd_pre_waitq);
+                               wake_up(&d->opd_pre_waitq);
 
                        break;
                }
 
                        break;
                }
@@ -1115,7 +1115,7 @@ int osp_precreate_reserve(const struct lu_env *env, struct osp_device *d)
                }
 
                /* XXX: don't wake up if precreation is in progress */
                }
 
                /* XXX: don't wake up if precreation is in progress */
-               cfs_waitq_signal(&d->opd_pre_waitq);
+               wake_up(&d->opd_pre_waitq);
 
                lwi = LWI_TIMEOUT(expire - cfs_time_current(),
                                osp_precreate_timeout_condition, d);
 
                lwi = LWI_TIMEOUT(expire - cfs_time_current(),
                                osp_precreate_timeout_condition, d);
@@ -1162,7 +1162,7 @@ int osp_precreate_get_fid(const struct lu_env *env, struct osp_device *d,
         * osp_precreate_thread() just before orphan cleanup
         */
        if (unlikely(d->opd_pre_reserved == 0 && d->opd_pre_status))
         * osp_precreate_thread() just before orphan cleanup
         */
        if (unlikely(d->opd_pre_reserved == 0 && d->opd_pre_status))
-               cfs_waitq_signal(&d->opd_pre_waitq);
+               wake_up(&d->opd_pre_waitq);
 
        return 0;
 }
 
        return 0;
 }
@@ -1258,9 +1258,9 @@ int osp_init_precreate(struct osp_device *d)
        d->opd_pre_max_grow_count = OST_MAX_PRECREATE;
 
        spin_lock_init(&d->opd_pre_lock);
        d->opd_pre_max_grow_count = OST_MAX_PRECREATE;
 
        spin_lock_init(&d->opd_pre_lock);
-       cfs_waitq_init(&d->opd_pre_waitq);
-       cfs_waitq_init(&d->opd_pre_user_waitq);
-       cfs_waitq_init(&d->opd_pre_thread.t_ctl_waitq);
+       init_waitqueue_head(&d->opd_pre_waitq);
+       init_waitqueue_head(&d->opd_pre_user_waitq);
+       init_waitqueue_head(&d->opd_pre_thread.t_ctl_waitq);
 
        /*
         * Initialize statfs-related things
 
        /*
         * Initialize statfs-related things
@@ -1298,9 +1298,9 @@ void osp_precreate_fini(struct osp_device *d)
        cfs_timer_disarm(&d->opd_statfs_timer);
 
        thread->t_flags = SVC_STOPPING;
        cfs_timer_disarm(&d->opd_statfs_timer);
 
        thread->t_flags = SVC_STOPPING;
-       cfs_waitq_signal(&d->opd_pre_waitq);
+       wake_up(&d->opd_pre_waitq);
 
 
-       cfs_wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED);
+       wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED);
 
        EXIT;
 }
 
        EXIT;
 }
index 9591742..14a5566 100644 (file)
@@ -137,7 +137,7 @@ static inline int osp_sync_has_work(struct osp_device *d)
 #define osp_sync_check_for_work(d)                      \
 {                                                       \
        if (osp_sync_has_work(d)) {                     \
 #define osp_sync_check_for_work(d)                      \
 {                                                       \
        if (osp_sync_has_work(d)) {                     \
-               cfs_waitq_signal(&d->opd_syn_waitq);    \
+               wake_up(&d->opd_syn_waitq);    \
        }                                               \
 }
 
        }                                               \
 }
 
@@ -330,7 +330,7 @@ static void osp_sync_request_commit_cb(struct ptlrpc_request *req)
        spin_unlock(&d->opd_syn_lock);
 
        /* XXX: some batching wouldn't hurt */
        spin_unlock(&d->opd_syn_lock);
 
        /* XXX: some batching wouldn't hurt */
-       cfs_waitq_signal(&d->opd_syn_waitq);
+       wake_up(&d->opd_syn_waitq);
 }
 
 static int osp_sync_interpret(const struct lu_env *env,
 }
 
 static int osp_sync_interpret(const struct lu_env *env,
@@ -362,7 +362,7 @@ static int osp_sync_interpret(const struct lu_env *env,
                cfs_list_add(&req->rq_exp_list, &d->opd_syn_committed_there);
                spin_unlock(&d->opd_syn_lock);
 
                cfs_list_add(&req->rq_exp_list, &d->opd_syn_committed_there);
                spin_unlock(&d->opd_syn_lock);
 
-               cfs_waitq_signal(&d->opd_syn_waitq);
+               wake_up(&d->opd_syn_waitq);
        } else if (rc) {
                struct obd_import *imp = req->rq_import;
                /*
        } else if (rc) {
                struct obd_import *imp = req->rq_import;
                /*
@@ -383,7 +383,7 @@ static int osp_sync_interpret(const struct lu_env *env,
                        spin_unlock(&d->opd_syn_lock);
                }
 
                        spin_unlock(&d->opd_syn_lock);
                }
 
-               cfs_waitq_signal(&d->opd_syn_waitq);
+               wake_up(&d->opd_syn_waitq);
        } else if (unlikely(d->opd_pre_status == -ENOSPC)) {
                /*
                 * if current status is -ENOSPC (lack of free space on OST)
        } else if (unlikely(d->opd_pre_status == -ENOSPC)) {
                /*
                 * if current status is -ENOSPC (lack of free space on OST)
@@ -723,7 +723,7 @@ static void osp_sync_process_committed(const struct lu_env *env,
        /* wake up the thread if requested to stop:
         * it might be waiting for in-progress to complete */
        if (unlikely(osp_sync_running(d) == 0))
        /* wake up the thread if requested to stop:
         * it might be waiting for in-progress to complete */
        if (unlikely(osp_sync_running(d) == 0))
-               cfs_waitq_signal(&d->opd_syn_waitq);
+               wake_up(&d->opd_syn_waitq);
 
        EXIT;
 }
 
        EXIT;
 }
@@ -833,7 +833,7 @@ static int osp_sync_thread(void *_arg)
        spin_lock(&d->opd_syn_lock);
        thread->t_flags = SVC_RUNNING;
        spin_unlock(&d->opd_syn_lock);
        spin_lock(&d->opd_syn_lock);
        thread->t_flags = SVC_RUNNING;
        spin_unlock(&d->opd_syn_lock);
-       cfs_waitq_signal(&thread->t_ctl_waitq);
+       wake_up(&thread->t_ctl_waitq);
 
        ctxt = llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
        if (ctxt == NULL) {
 
        ctxt = llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
        if (ctxt == NULL) {
@@ -891,7 +891,7 @@ out:
 
        thread->t_flags = SVC_STOPPED;
 
 
        thread->t_flags = SVC_STOPPED;
 
-       cfs_waitq_signal(&thread->t_ctl_waitq);
+       wake_up(&thread->t_ctl_waitq);
 
        lu_env_fini(&env);
 
 
        lu_env_fini(&env);
 
@@ -1032,8 +1032,8 @@ int osp_sync_init(const struct lu_env *env, struct osp_device *d)
        d->opd_syn_max_rpc_in_flight = OSP_MAX_IN_FLIGHT;
        d->opd_syn_max_rpc_in_progress = OSP_MAX_IN_PROGRESS;
        spin_lock_init(&d->opd_syn_lock);
        d->opd_syn_max_rpc_in_flight = OSP_MAX_IN_FLIGHT;
        d->opd_syn_max_rpc_in_progress = OSP_MAX_IN_PROGRESS;
        spin_lock_init(&d->opd_syn_lock);
-       cfs_waitq_init(&d->opd_syn_waitq);
-       cfs_waitq_init(&d->opd_syn_thread.t_ctl_waitq);
+       init_waitqueue_head(&d->opd_syn_waitq);
+       init_waitqueue_head(&d->opd_syn_thread.t_ctl_waitq);
        CFS_INIT_LIST_HEAD(&d->opd_syn_committed_there);
 
        rc = PTR_ERR(kthread_run(osp_sync_thread, d,
        CFS_INIT_LIST_HEAD(&d->opd_syn_committed_there);
 
        rc = PTR_ERR(kthread_run(osp_sync_thread, d,
@@ -1062,8 +1062,8 @@ int osp_sync_fini(struct osp_device *d)
        ENTRY;
 
        thread->t_flags = SVC_STOPPING;
        ENTRY;
 
        thread->t_flags = SVC_STOPPING;
-       cfs_waitq_signal(&d->opd_syn_waitq);
-       cfs_wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED);
+       wake_up(&d->opd_syn_waitq);
+       wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED);
 
        /*
         * unregister transaction callbacks only when sync thread
 
        /*
         * unregister transaction callbacks only when sync thread
@@ -1098,7 +1098,7 @@ static void osp_sync_tracker_commit_cb(struct thandle *th, void *cookie)
                cfs_list_for_each_entry(d, &tr->otr_wakeup_list,
                                        opd_syn_ontrack) {
                        d->opd_syn_last_committed_id = tr->otr_committed_id;
                cfs_list_for_each_entry(d, &tr->otr_wakeup_list,
                                        opd_syn_ontrack) {
                        d->opd_syn_last_committed_id = tr->otr_committed_id;
-                       cfs_waitq_signal(&d->opd_syn_waitq);
+                       wake_up(&d->opd_syn_waitq);
                }
        }
        spin_unlock(&tr->otr_lock);
                }
        }
        spin_unlock(&tr->otr_lock);
index 38c25e5..a3ee3dc 100644 (file)
@@ -959,18 +959,18 @@ out:
         }
         /* send a bulk after reply to simulate a network delay or reordering
          * by a router */
         }
         /* send a bulk after reply to simulate a network delay or reordering
          * by a router */
-        if (unlikely(CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2))) {
-                cfs_waitq_t              waitq;
-                struct l_wait_info       lwi1;
+       if (unlikely(CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2))) {
+               wait_queue_head_t              waitq;
+               struct l_wait_info       lwi1;
 
 
-                CDEBUG(D_INFO, "reorder BULK\n");
-                cfs_waitq_init(&waitq);
+               CDEBUG(D_INFO, "reorder BULK\n");
+               init_waitqueue_head(&waitq);
 
 
-                lwi1 = LWI_TIMEOUT_INTR(cfs_time_seconds(3), NULL, NULL, NULL);
-                l_wait_event(waitq, 0, &lwi1);
-                rc = target_bulk_io(exp, desc, &lwi);
+               lwi1 = LWI_TIMEOUT_INTR(cfs_time_seconds(3), NULL, NULL, NULL);
+               l_wait_event(waitq, 0, &lwi1);
+               rc = target_bulk_io(exp, desc, &lwi);
                ptlrpc_free_bulk_nopin(desc);
                ptlrpc_free_bulk_nopin(desc);
-        }
+       }
 
         RETURN(rc);
 }
 
         RETURN(rc);
 }
index db3db53..7ab33c7 100644 (file)
@@ -112,7 +112,7 @@ struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw,
                return NULL;
 
        spin_lock_init(&desc->bd_lock);
                return NULL;
 
        spin_lock_init(&desc->bd_lock);
-       cfs_waitq_init(&desc->bd_waitq);
+       init_waitqueue_head(&desc->bd_waitq);
        desc->bd_max_iov = npages;
        desc->bd_iov_count = 0;
        desc->bd_portal = portal;
        desc->bd_max_iov = npages;
        desc->bd_iov_count = 0;
        desc->bd_portal = portal;
@@ -620,26 +620,26 @@ static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
         ptlrpc_at_set_req_timeout(request);
 
        spin_lock_init(&request->rq_lock);
         ptlrpc_at_set_req_timeout(request);
 
        spin_lock_init(&request->rq_lock);
-        CFS_INIT_LIST_HEAD(&request->rq_list);
-        CFS_INIT_LIST_HEAD(&request->rq_timed_list);
-        CFS_INIT_LIST_HEAD(&request->rq_replay_list);
-        CFS_INIT_LIST_HEAD(&request->rq_ctx_chain);
-        CFS_INIT_LIST_HEAD(&request->rq_set_chain);
-        CFS_INIT_LIST_HEAD(&request->rq_history_list);
-        CFS_INIT_LIST_HEAD(&request->rq_exp_list);
-        cfs_waitq_init(&request->rq_reply_waitq);
-        cfs_waitq_init(&request->rq_set_waitq);
-        request->rq_xid = ptlrpc_next_xid();
-        cfs_atomic_set(&request->rq_refcount, 1);
-
-        lustre_msg_set_opc(request->rq_reqmsg, opcode);
-
-        RETURN(0);
+       CFS_INIT_LIST_HEAD(&request->rq_list);
+       CFS_INIT_LIST_HEAD(&request->rq_timed_list);
+       CFS_INIT_LIST_HEAD(&request->rq_replay_list);
+       CFS_INIT_LIST_HEAD(&request->rq_ctx_chain);
+       CFS_INIT_LIST_HEAD(&request->rq_set_chain);
+       CFS_INIT_LIST_HEAD(&request->rq_history_list);
+       CFS_INIT_LIST_HEAD(&request->rq_exp_list);
+       init_waitqueue_head(&request->rq_reply_waitq);
+       init_waitqueue_head(&request->rq_set_waitq);
+       request->rq_xid = ptlrpc_next_xid();
+       cfs_atomic_set(&request->rq_refcount, 1);
+
+       lustre_msg_set_opc(request->rq_reqmsg, opcode);
+
+       RETURN(0);
 out_ctx:
 out_ctx:
-        sptlrpc_cli_ctx_put(request->rq_cli_ctx, 1);
+       sptlrpc_cli_ctx_put(request->rq_cli_ctx, 1);
 out_free:
 out_free:
-        class_import_put(imp);
-        return rc;
+       class_import_put(imp);
+       return rc;
 }
 
 int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
 }
 
 int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
@@ -861,7 +861,7 @@ struct ptlrpc_request_set *ptlrpc_prep_set(void)
                RETURN(NULL);
        cfs_atomic_set(&set->set_refcount, 1);
        CFS_INIT_LIST_HEAD(&set->set_requests);
                RETURN(NULL);
        cfs_atomic_set(&set->set_refcount, 1);
        CFS_INIT_LIST_HEAD(&set->set_requests);
-       cfs_waitq_init(&set->set_waitq);
+       init_waitqueue_head(&set->set_waitq);
        cfs_atomic_set(&set->set_new_count, 0);
        cfs_atomic_set(&set->set_remaining, 0);
        spin_lock_init(&set->set_new_req_lock);
        cfs_atomic_set(&set->set_new_count, 0);
        cfs_atomic_set(&set->set_remaining, 0);
        spin_lock_init(&set->set_new_req_lock);
@@ -1033,16 +1033,16 @@ void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
        count = cfs_atomic_inc_return(&set->set_new_count);
        spin_unlock(&set->set_new_req_lock);
 
        count = cfs_atomic_inc_return(&set->set_new_count);
        spin_unlock(&set->set_new_req_lock);
 
-        /* Only need to call wakeup once for the first entry. */
-        if (count == 1) {
-                cfs_waitq_signal(&set->set_waitq);
+       /* Only need to call wakeup once for the first entry. */
+       if (count == 1) {
+               wake_up(&set->set_waitq);
 
 
-                /* XXX: It maybe unnecessary to wakeup all the partners. But to
-                 *      guarantee the async RPC can be processed ASAP, we have
-                 *      no other better choice. It maybe fixed in future. */
-                for (i = 0; i < pc->pc_npartners; i++)
-                        cfs_waitq_signal(&pc->pc_partners[i]->pc_set->set_waitq);
-        }
+               /* XXX: It maybe unnecessary to wakeup all the partners. But to
+                *      guarantee the async RPC can be processed ASAP, we have
+                *      no other better choice. It maybe fixed in future. */
+               for (i = 0; i < pc->pc_npartners; i++)
+                       wake_up(&pc->pc_partners[i]->pc_set->set_waitq);
+       }
 }
 EXPORT_SYMBOL(ptlrpc_set_add_new_req);
 
 }
 EXPORT_SYMBOL(ptlrpc_set_add_new_req);
 
@@ -1843,8 +1843,8 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
                }
                spin_unlock(&imp->imp_lock);
 
                }
                spin_unlock(&imp->imp_lock);
 
-                cfs_atomic_dec(&set->set_remaining);
-                cfs_waitq_broadcast(&imp->imp_recovery_waitq);
+               cfs_atomic_dec(&set->set_remaining);
+               wake_up_all(&imp->imp_recovery_waitq);
 
                if (set->set_producer) {
                        /* produce a new request if possible */
 
                if (set->set_producer) {
                        /* produce a new request if possible */
@@ -2391,9 +2391,9 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
         for (;;) {
 #ifdef __KERNEL__
                /* The wq argument is ignored by user-space wait_event macros */
         for (;;) {
 #ifdef __KERNEL__
                /* The wq argument is ignored by user-space wait_event macros */
-               cfs_waitq_t *wq = (request->rq_set != NULL) ?
-                                 &request->rq_set->set_waitq :
-                                 &request->rq_reply_waitq;
+               wait_queue_head_t *wq = (request->rq_set != NULL) ?
+                                       &request->rq_set->set_waitq :
+                                       &request->rq_reply_waitq;
 #endif
                 /* Network access will complete in finite time but the HUGE
                  * timeout lets us CWARN for visibility of sluggish NALs */
 #endif
                 /* Network access will complete in finite time but the HUGE
                  * timeout lets us CWARN for visibility of sluggish NALs */
@@ -3024,22 +3024,22 @@ void *ptlrpcd_alloc_work(struct obd_import *imp,
         req->rq_no_delay = req->rq_no_resend = 1;
 
        spin_lock_init(&req->rq_lock);
         req->rq_no_delay = req->rq_no_resend = 1;
 
        spin_lock_init(&req->rq_lock);
-        CFS_INIT_LIST_HEAD(&req->rq_list);
-        CFS_INIT_LIST_HEAD(&req->rq_replay_list);
-        CFS_INIT_LIST_HEAD(&req->rq_set_chain);
-        CFS_INIT_LIST_HEAD(&req->rq_history_list);
-        CFS_INIT_LIST_HEAD(&req->rq_exp_list);
-        cfs_waitq_init(&req->rq_reply_waitq);
-        cfs_waitq_init(&req->rq_set_waitq);
-        cfs_atomic_set(&req->rq_refcount, 1);
-
-        CLASSERT (sizeof(*args) <= sizeof(req->rq_async_args));
-        args = ptlrpc_req_async_args(req);
-        args->magic  = PTLRPC_WORK_MAGIC;
-        args->cb     = cb;
-        args->cbdata = cbdata;
-
-        RETURN(req);
+       CFS_INIT_LIST_HEAD(&req->rq_list);
+       CFS_INIT_LIST_HEAD(&req->rq_replay_list);
+       CFS_INIT_LIST_HEAD(&req->rq_set_chain);
+       CFS_INIT_LIST_HEAD(&req->rq_history_list);
+       CFS_INIT_LIST_HEAD(&req->rq_exp_list);
+       init_waitqueue_head(&req->rq_reply_waitq);
+       init_waitqueue_head(&req->rq_set_waitq);
+       cfs_atomic_set(&req->rq_refcount, 1);
+
+       CLASSERT (sizeof(*args) <= sizeof(req->rq_async_args));
+       args = ptlrpc_req_async_args(req);
+       args->magic  = PTLRPC_WORK_MAGIC;
+       args->cb     = cb;
+       args->cbdata = cbdata;
+
+       RETURN(req);
 }
 EXPORT_SYMBOL(ptlrpcd_alloc_work);
 
 }
 EXPORT_SYMBOL(ptlrpcd_alloc_work);
 
index b25c72f..b2c7338 100644 (file)
@@ -379,7 +379,7 @@ void request_in_callback(lnet_event_t *ev)
 
        /* NB everything can disappear under us once the request
         * has been queued and we unlock, so do the wake now... */
 
        /* NB everything can disappear under us once the request
         * has been queued and we unlock, so do the wake now... */
-       cfs_waitq_signal(&svcpt->scp_waitq);
+       wake_up(&svcpt->scp_waitq);
 
        spin_unlock(&svcpt->scp_lock);
        EXIT;
 
        spin_unlock(&svcpt->scp_lock);
        EXIT;
@@ -470,7 +470,7 @@ void server_bulk_callback (lnet_event_t *ev)
                desc->bd_md_count--;
                /* This is the last callback no matter what... */
                if (desc->bd_md_count == 0)
                desc->bd_md_count--;
                /* This is the last callback no matter what... */
                if (desc->bd_md_count == 0)
-                       cfs_waitq_signal(&desc->bd_waitq);
+                       wake_up(&desc->bd_waitq);
        }
 
        spin_unlock(&desc->bd_lock);
        }
 
        spin_unlock(&desc->bd_lock);
@@ -553,38 +553,38 @@ int ptlrpc_uuid_to_peer (struct obd_uuid *uuid,
 
 void ptlrpc_ni_fini(void)
 {
 
 void ptlrpc_ni_fini(void)
 {
-        cfs_waitq_t         waitq;
-        struct l_wait_info  lwi;
-        int                 rc;
-        int                 retries;
-
-        /* Wait for the event queue to become idle since there may still be
-         * messages in flight with pending events (i.e. the fire-and-forget
-         * messages == client requests and "non-difficult" server
-         * replies */
-
-        for (retries = 0;; retries++) {
-                rc = LNetEQFree(ptlrpc_eq_h);
-                switch (rc) {
-                default:
-                        LBUG();
-
-                case 0:
-                        LNetNIFini();
-                        return;
-
-                case -EBUSY:
-                        if (retries != 0)
-                                CWARN("Event queue still busy\n");
-
-                        /* Wait for a bit */
-                        cfs_waitq_init(&waitq);
-                        lwi = LWI_TIMEOUT(cfs_time_seconds(2), NULL, NULL);
-                        l_wait_event(waitq, 0, &lwi);
-                        break;
-                }
-        }
-        /* notreached */
+       wait_queue_head_t         waitq;
+       struct l_wait_info  lwi;
+       int                 rc;
+       int                 retries;
+
+       /* Wait for the event queue to become idle since there may still be
+        * messages in flight with pending events (i.e. the fire-and-forget
+        * messages == client requests and "non-difficult" server
+        * replies */
+
+       for (retries = 0;; retries++) {
+               rc = LNetEQFree(ptlrpc_eq_h);
+               switch (rc) {
+               default:
+                       LBUG();
+
+               case 0:
+                       LNetNIFini();
+                       return;
+
+               case -EBUSY:
+                       if (retries != 0)
+                               CWARN("Event queue still busy\n");
+
+                       /* Wait for a bit */
+                       init_waitqueue_head(&waitq);
+                       lwi = LWI_TIMEOUT(cfs_time_seconds(2), NULL, NULL);
+                       l_wait_event(waitq, 0, &lwi);
+                       break;
+               }
+       }
+       /* notreached */
 }
 
 lnet_pid_t ptl_get_pid(void)
 }
 
 lnet_pid_t ptl_get_pid(void)
index ded347f..0b18ff9 100644 (file)
@@ -121,13 +121,13 @@ static inline unsigned long hash_mem(char *buf, int length, int bits)
 #define RSI_HASHMASK    (RSI_HASHMAX - 1)
 
 struct rsi {
 #define RSI_HASHMASK    (RSI_HASHMAX - 1)
 
 struct rsi {
-        struct cache_head       h;
-        __u32                   lustre_svc;
-        __u64                   nid;
-        cfs_waitq_t             waitq;
-        rawobj_t                in_handle, in_token;
-        rawobj_t                out_handle, out_token;
-        int                     major_status, minor_status;
+       struct cache_head       h;
+       __u32                   lustre_svc;
+       __u64                   nid;
+       wait_queue_head_t       waitq;
+       rawobj_t                in_handle, in_token;
+       rawobj_t                out_handle, out_token;
+       int                     major_status, minor_status;
 };
 
 static struct cache_head *rsi_table[RSI_HASHMAX];
 };
 
 static struct cache_head *rsi_table[RSI_HASHMAX];
@@ -184,17 +184,17 @@ static int rsi_upcall(struct cache_detail *cd, struct cache_head *h)
 
 static inline void __rsi_init(struct rsi *new, struct rsi *item)
 {
 
 static inline void __rsi_init(struct rsi *new, struct rsi *item)
 {
-        new->out_handle = RAWOBJ_EMPTY;
-        new->out_token = RAWOBJ_EMPTY;
+       new->out_handle = RAWOBJ_EMPTY;
+       new->out_token = RAWOBJ_EMPTY;
 
 
-        new->in_handle = item->in_handle;
-        item->in_handle = RAWOBJ_EMPTY;
-        new->in_token = item->in_token;
-        item->in_token = RAWOBJ_EMPTY;
+       new->in_handle = item->in_handle;
+       item->in_handle = RAWOBJ_EMPTY;
+       new->in_token = item->in_token;
+       item->in_token = RAWOBJ_EMPTY;
 
 
-        new->lustre_svc = item->lustre_svc;
-        new->nid = item->nid;
-        cfs_waitq_init(&new->waitq);
+       new->lustre_svc = item->lustre_svc;
+       new->nid = item->nid;
+       init_waitqueue_head(&new->waitq);
 }
 
 static inline void __rsi_update(struct rsi *new, struct rsi *item)
 }
 
 static inline void __rsi_update(struct rsi *new, struct rsi *item)
@@ -335,17 +335,17 @@ static int rsi_parse(struct cache_detail *cd, char *mesg, int mlen)
         rsip = rsi_update(&rsii, rsip);
         status = 0;
 out:
         rsip = rsi_update(&rsii, rsip);
         status = 0;
 out:
-        rsi_free(&rsii);
-        if (rsip) {
-                cfs_waitq_broadcast(&rsip->waitq);
-                cache_put(&rsip->h, &rsi_cache);
-        } else {
-                status = -ENOMEM;
-        }
+       rsi_free(&rsii);
+       if (rsip) {
+               wake_up_all(&rsip->waitq);
+               cache_put(&rsip->h, &rsi_cache);
+       } else {
+               status = -ENOMEM;
+       }
 
 
-        if (status)
-                CERROR("rsi parse error %d\n", status);
-        RETURN(status);
+       if (status)
+               CERROR("rsi parse error %d\n", status);
+       RETURN(status);
 }
 
 static struct cache_detail rsi_cache = {
 }
 
 static struct cache_detail rsi_cache = {
@@ -844,22 +844,22 @@ static struct cache_deferred_req* cache_upcall_defer(struct cache_req *req)
 static struct cache_req cache_upcall_chandle = { cache_upcall_defer };
 
 int gss_svc_upcall_handle_init(struct ptlrpc_request *req,
 static struct cache_req cache_upcall_chandle = { cache_upcall_defer };
 
 int gss_svc_upcall_handle_init(struct ptlrpc_request *req,
-                               struct gss_svc_reqctx *grctx,
-                               struct gss_wire_ctx *gw,
-                               struct obd_device *target,
-                               __u32 lustre_svc,
-                               rawobj_t *rvs_hdl,
-                               rawobj_t *in_token)
+                              struct gss_svc_reqctx *grctx,
+                              struct gss_wire_ctx *gw,
+                              struct obd_device *target,
+                              __u32 lustre_svc,
+                              rawobj_t *rvs_hdl,
+                              rawobj_t *in_token)
 {
 {
-        struct ptlrpc_reply_state *rs;
-        struct rsc                *rsci = NULL;
-        struct rsi                *rsip = NULL, rsikey;
-        cfs_waitlink_t             wait;
-        int                        replen = sizeof(struct ptlrpc_body);
-        struct gss_rep_header     *rephdr;
-        int                        first_check = 1;
-        int                        rc = SECSVC_DROP;
-        ENTRY;
+       struct ptlrpc_reply_state *rs;
+       struct rsc                *rsci = NULL;
+       struct rsi                *rsip = NULL, rsikey;
+       wait_queue_t             wait;
+       int                        replen = sizeof(struct ptlrpc_body);
+       struct gss_rep_header     *rephdr;
+       int                        first_check = 1;
+       int                        rc = SECSVC_DROP;
+       ENTRY;
 
         memset(&rsikey, 0, sizeof(rsikey));
         rsikey.lustre_svc = lustre_svc;
 
         memset(&rsikey, 0, sizeof(rsikey));
         rsikey.lustre_svc = lustre_svc;
@@ -888,18 +888,18 @@ int gss_svc_upcall_handle_init(struct ptlrpc_request *req,
                 GOTO(out, rc);
         }
 
                 GOTO(out, rc);
         }
 
-        cache_get(&rsip->h); /* take an extra ref */
-        cfs_waitq_init(&rsip->waitq);
-        cfs_waitlink_init(&wait);
-        cfs_waitq_add(&rsip->waitq, &wait);
+       cache_get(&rsip->h); /* take an extra ref */
+       init_waitqueue_head(&rsip->waitq);
+       init_waitqueue_entry_current(&wait);
+       add_wait_queue(&rsip->waitq, &wait);
 
 cache_check:
 
 cache_check:
-        /* Note each time cache_check() will drop a reference if return
-         * non-zero. We hold an extra reference on initial rsip, but must
-         * take care of following calls. */
-        rc = cache_check(&rsi_cache, &rsip->h, &cache_upcall_chandle);
-        switch (rc) {
-        case -EAGAIN: {
+       /* Note each time cache_check() will drop a reference if return
+        * non-zero. We hold an extra reference on initial rsip, but must
+        * take care of following calls. */
+       rc = cache_check(&rsi_cache, &rsip->h, &cache_upcall_chandle);
+       switch (rc) {
+       case -EAGAIN: {
                 int valid;
 
                 if (first_check) {
                 int valid;
 
                 if (first_check) {
@@ -908,11 +908,11 @@ cache_check:
                         read_lock(&rsi_cache.hash_lock);
                        valid = test_bit(CACHE_VALID, &rsip->h.flags);
                         if (valid == 0)
                         read_lock(&rsi_cache.hash_lock);
                        valid = test_bit(CACHE_VALID, &rsip->h.flags);
                         if (valid == 0)
-                                cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+                               set_current_state(TASK_INTERRUPTIBLE);
                         read_unlock(&rsi_cache.hash_lock);
 
                        if (valid == 0)
                         read_unlock(&rsi_cache.hash_lock);
 
                        if (valid == 0)
-                               cfs_schedule_timeout(GSS_SVC_UPCALL_TIMEOUT *
+                               schedule_timeout(GSS_SVC_UPCALL_TIMEOUT *
                                                     HZ);
 
                        cache_get(&rsip->h);
                                                     HZ);
 
                        cache_get(&rsip->h);
@@ -927,17 +927,17 @@ cache_check:
         case 0:
                 /* if not the first check, we have to release the extra
                  * reference we just added on it. */
         case 0:
                 /* if not the first check, we have to release the extra
                  * reference we just added on it. */
-                if (!first_check)
-                        cache_put(&rsip->h, &rsi_cache);
-                CDEBUG(D_SEC, "cache_check is good\n");
-                break;
-        }
+               if (!first_check)
+                       cache_put(&rsip->h, &rsi_cache);
+               CDEBUG(D_SEC, "cache_check is good\n");
+               break;
+       }
 
 
-        cfs_waitq_del(&rsip->waitq, &wait);
-        cache_put(&rsip->h, &rsi_cache);
+       remove_wait_queue(&rsip->waitq, &wait);
+       cache_put(&rsip->h, &rsi_cache);
 
 
-        if (rc)
-                GOTO(out, rc = SECSVC_DROP);
+       if (rc)
+               GOTO(out, rc = SECSVC_DROP);
 
         rc = SECSVC_DROP;
         rsci = gss_svc_searchbyctx(&rsip->out_handle);
 
         rc = SECSVC_DROP;
         rsci = gss_svc_searchbyctx(&rsip->out_handle);
@@ -1081,9 +1081,9 @@ int __init gss_init_svc_upcall(void)
        for (i = 0; i < 6; i++) {
                if (atomic_read(&rsi_cache.readers) > 0)
                        break;
        for (i = 0; i < 6; i++) {
                if (atomic_read(&rsi_cache.readers) > 0)
                        break;
-               cfs_set_current_state(TASK_UNINTERRUPTIBLE);
+               set_current_state(TASK_UNINTERRUPTIBLE);
                LASSERT(HZ >= 4);
                LASSERT(HZ >= 4);
-               cfs_schedule_timeout(HZ / 4);
+               schedule_timeout(HZ / 4);
        }
 
         if (atomic_read(&rsi_cache.readers) == 0)
        }
 
         if (atomic_read(&rsi_cache.readers) == 0)
index bc65d15..2d5d4b4 100644 (file)
@@ -368,16 +368,16 @@ void ptlrpc_invalidate_import(struct obd_import *imp)
                   }
         } while (rc != 0);
 
                   }
         } while (rc != 0);
 
-        /*
-         * Let's additionally check that no new rpcs added to import in
-         * "invalidate" state.
-         */
-        LASSERT(cfs_atomic_read(&imp->imp_inflight) == 0);
-        obd_import_event(imp->imp_obd, imp, IMP_EVENT_INVALIDATE);
-        sptlrpc_import_flush_all_ctx(imp);
-
-        cfs_atomic_dec(&imp->imp_inval_count);
-        cfs_waitq_broadcast(&imp->imp_recovery_waitq);
+       /*
+        * Let's additionally check that no new rpcs added to import in
+        * "invalidate" state.
+        */
+       LASSERT(cfs_atomic_read(&imp->imp_inflight) == 0);
+       obd_import_event(imp->imp_obd, imp, IMP_EVENT_INVALIDATE);
+       sptlrpc_import_flush_all_ctx(imp);
+
+       cfs_atomic_dec(&imp->imp_inval_count);
+       wake_up_all(&imp->imp_recovery_waitq);
 }
 EXPORT_SYMBOL(ptlrpc_invalidate_import);
 
 }
 EXPORT_SYMBOL(ptlrpc_invalidate_import);
 
@@ -1178,15 +1178,15 @@ out:
                         RETURN(-EPROTO);
                 }
 
                         RETURN(-EPROTO);
                 }
 
-                ptlrpc_maybe_ping_import_soon(imp);
+               ptlrpc_maybe_ping_import_soon(imp);
 
 
-                CDEBUG(D_HA, "recovery of %s on %s failed (%d)\n",
-                       obd2cli_tgt(imp->imp_obd),
-                       (char *)imp->imp_connection->c_remote_uuid.uuid, rc);
-        }
+               CDEBUG(D_HA, "recovery of %s on %s failed (%d)\n",
+                      obd2cli_tgt(imp->imp_obd),
+                      (char *)imp->imp_connection->c_remote_uuid.uuid, rc);
+       }
 
 
-        cfs_waitq_broadcast(&imp->imp_recovery_waitq);
-        RETURN(rc);
+       wake_up_all(&imp->imp_recovery_waitq);
+       RETURN(rc);
 }
 
 /**
 }
 
 /**
@@ -1409,13 +1409,13 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp)
                               libcfs_nid2str(imp->imp_connection->c_peer.nid));
         }
 
                               libcfs_nid2str(imp->imp_connection->c_peer.nid));
         }
 
-        if (imp->imp_state == LUSTRE_IMP_FULL) {
-                cfs_waitq_broadcast(&imp->imp_recovery_waitq);
-                ptlrpc_wake_delayed(imp);
-        }
+       if (imp->imp_state == LUSTRE_IMP_FULL) {
+               wake_up_all(&imp->imp_recovery_waitq);
+               ptlrpc_wake_delayed(imp);
+       }
 
 out:
 
 out:
-        RETURN(rc);
+       RETURN(rc);
 }
 
 int ptlrpc_disconnect_import(struct obd_import *imp, int noclose)
 }
 
 int ptlrpc_disconnect_import(struct obd_import *imp, int noclose)
index 6f2cebb..a6eb8c9 100644 (file)
@@ -460,9 +460,9 @@ int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
         for (;;) {
 #ifdef __KERNEL__
                /* The wq argument is ignored by user-space wait_event macros */
         for (;;) {
 #ifdef __KERNEL__
                /* The wq argument is ignored by user-space wait_event macros */
-               cfs_waitq_t *wq = (req->rq_set != NULL) ?
-                                 &req->rq_set->set_waitq :
-                                 &req->rq_reply_waitq;
+               wait_queue_head_t *wq = (req->rq_set != NULL) ?
+                                       &req->rq_set->set_waitq :
+                                       &req->rq_reply_waitq;
 #endif
                 /* Network access will complete in finite time but the HUGE
                  * timeout lets us CWARN for visibility of sluggish NALs */
 #endif
                 /* Network access will complete in finite time but the HUGE
                  * timeout lets us CWARN for visibility of sluggish NALs */
index 431fb90..4c91135 100644 (file)
@@ -324,7 +324,7 @@ void lustre_put_emerg_rs(struct ptlrpc_reply_state *rs)
        spin_lock(&svcpt->scp_rep_lock);
        cfs_list_add(&rs->rs_list, &svcpt->scp_rep_idle);
        spin_unlock(&svcpt->scp_rep_lock);
        spin_lock(&svcpt->scp_rep_lock);
        cfs_list_add(&rs->rs_list, &svcpt->scp_rep_idle);
        spin_unlock(&svcpt->scp_rep_lock);
-       cfs_waitq_signal(&svcpt->scp_rep_waitq);
+       wake_up(&svcpt->scp_rep_waitq);
 }
 
 int lustre_pack_reply_v2(struct ptlrpc_request *req, int count,
 }
 
 int lustre_pack_reply_v2(struct ptlrpc_request *req, int count,
index 805c487..b55d5bc 100644 (file)
@@ -240,20 +240,20 @@ static void ptlrpc_pinger_process_import(struct obd_import *imp,
 
 static int ptlrpc_pinger_main(void *arg)
 {
 
 static int ptlrpc_pinger_main(void *arg)
 {
-        struct ptlrpc_thread *thread = (struct ptlrpc_thread *)arg;
+       struct ptlrpc_thread *thread = (struct ptlrpc_thread *)arg;
        ENTRY;
 
        ENTRY;
 
-        /* Record that the thread is running */
-        thread_set_flags(thread, SVC_RUNNING);
-        cfs_waitq_signal(&thread->t_ctl_waitq);
+       /* Record that the thread is running */
+       thread_set_flags(thread, SVC_RUNNING);
+       wake_up(&thread->t_ctl_waitq);
 
 
-        /* And now, loop forever, pinging as needed. */
-        while (1) {
-                cfs_time_t this_ping = cfs_time_current();
-                struct l_wait_info lwi;
-                cfs_duration_t time_to_next_wake;
-                struct timeout_item *item;
-                cfs_list_t *iter;
+       /* And now, loop forever, pinging as needed. */
+       while (1) {
+               cfs_time_t this_ping = cfs_time_current();
+               struct l_wait_info lwi;
+               cfs_duration_t time_to_next_wake;
+               struct timeout_item *item;
+               cfs_list_t *iter;
 
                mutex_lock(&pinger_mutex);
                 cfs_list_for_each_entry(item, &timeout_list, ti_chain) {
 
                mutex_lock(&pinger_mutex);
                 cfs_list_for_each_entry(item, &timeout_list, ti_chain) {
@@ -307,7 +307,7 @@ static int ptlrpc_pinger_main(void *arg)
         }
 
        thread_set_flags(thread, SVC_STOPPED);
         }
 
        thread_set_flags(thread, SVC_STOPPED);
-       cfs_waitq_signal(&thread->t_ctl_waitq);
+       wake_up(&thread->t_ctl_waitq);
 
        CDEBUG(D_NET, "pinger thread exiting, process %d\n", current_pid());
        return 0;
 
        CDEBUG(D_NET, "pinger thread exiting, process %d\n", current_pid());
        return 0;
@@ -328,7 +328,7 @@ int ptlrpc_start_pinger(void)
            !thread_is_stopped(&pinger_thread))
                RETURN(-EALREADY);
 
            !thread_is_stopped(&pinger_thread))
                RETURN(-EALREADY);
 
-       cfs_waitq_init(&pinger_thread.t_ctl_waitq);
+       init_waitqueue_head(&pinger_thread.t_ctl_waitq);
 
        strcpy(pinger_thread.t_name, "ll_ping");
 
 
        strcpy(pinger_thread.t_name, "ll_ping");
 
@@ -370,7 +370,7 @@ int ptlrpc_stop_pinger(void)
        ptlrpc_pinger_remove_timeouts();
 
        thread_set_flags(&pinger_thread, SVC_STOPPING);
        ptlrpc_pinger_remove_timeouts();
 
        thread_set_flags(&pinger_thread, SVC_STOPPING);
-       cfs_waitq_signal(&pinger_thread.t_ctl_waitq);
+       wake_up(&pinger_thread.t_ctl_waitq);
 
        l_wait_event(pinger_thread.t_ctl_waitq,
                     thread_is_stopped(&pinger_thread), &lwi);
 
        l_wait_event(pinger_thread.t_ctl_waitq,
                     thread_is_stopped(&pinger_thread), &lwi);
@@ -560,7 +560,7 @@ void ptlrpc_pinger_wake_up()
 {
 #ifdef ENABLE_PINGER
        thread_add_flags(&pinger_thread, SVC_EVENT);
 {
 #ifdef ENABLE_PINGER
        thread_add_flags(&pinger_thread, SVC_EVENT);
-       cfs_waitq_signal(&pinger_thread.t_ctl_waitq);
+       wake_up(&pinger_thread.t_ctl_waitq);
 #endif
 }
 
 #endif
 }
 
@@ -570,7 +570,7 @@ void ptlrpc_pinger_wake_up()
 
 static int               pet_refcount = 0;
 static int               pet_state;
 
 static int               pet_refcount = 0;
 static int               pet_state;
-static cfs_waitq_t       pet_waitq;
+static wait_queue_head_t       pet_waitq;
 CFS_LIST_HEAD(pet_list);
 static DEFINE_SPINLOCK(pet_lock);
 
 CFS_LIST_HEAD(pet_list);
 static DEFINE_SPINLOCK(pet_lock);
 
@@ -592,7 +592,7 @@ int ping_evictor_wake(struct obd_export *exp)
        }
        spin_unlock(&pet_lock);
 
        }
        spin_unlock(&pet_lock);
 
-       cfs_waitq_signal(&pet_waitq);
+       wake_up(&pet_waitq);
        return 0;
 }
 
        return 0;
 }
 
@@ -684,7 +684,7 @@ void ping_evictor_start(void)
        if (++pet_refcount > 1)
                return;
 
        if (++pet_refcount > 1)
                return;
 
-       cfs_waitq_init(&pet_waitq);
+       init_waitqueue_head(&pet_waitq);
 
        task = kthread_run(ping_evictor_main, NULL, "ll_evictor");
        if (IS_ERR(task)) {
 
        task = kthread_run(ping_evictor_main, NULL, "ll_evictor");
        if (IS_ERR(task)) {
@@ -701,7 +701,7 @@ void ping_evictor_stop(void)
                 return;
 
         pet_state = PET_TERMINATE;
                 return;
 
         pet_state = PET_TERMINATE;
-        cfs_waitq_signal(&pet_waitq);
+       wake_up(&pet_waitq);
 }
 EXPORT_SYMBOL(ping_evictor_stop);
 #else /* !__KERNEL__ */
 }
 EXPORT_SYMBOL(ping_evictor_stop);
 #else /* !__KERNEL__ */
index 6631a6d..56f56ff 100644 (file)
@@ -101,7 +101,7 @@ void ptlrpcd_wake(struct ptlrpc_request *req)
 
         LASSERT(rq_set != NULL);
 
 
         LASSERT(rq_set != NULL);
 
-        cfs_waitq_signal(&rq_set->set_waitq);
+       wake_up(&rq_set->set_waitq);
 }
 EXPORT_SYMBOL(ptlrpcd_wake);
 
 }
 EXPORT_SYMBOL(ptlrpcd_wake);
 
@@ -189,15 +189,15 @@ void ptlrpcd_add_rqset(struct ptlrpc_request_set *set)
        count = cfs_atomic_add_return(i, &new->set_new_count);
        cfs_atomic_set(&set->set_remaining, 0);
        spin_unlock(&new->set_new_req_lock);
        count = cfs_atomic_add_return(i, &new->set_new_count);
        cfs_atomic_set(&set->set_remaining, 0);
        spin_unlock(&new->set_new_req_lock);
-        if (count == i) {
-                cfs_waitq_signal(&new->set_waitq);
-
-                /* XXX: It maybe unnecessary to wakeup all the partners. But to
-                 *      guarantee the async RPC can be processed ASAP, we have
-                 *      no other better choice. It maybe fixed in future. */
-                for (i = 0; i < pc->pc_npartners; i++)
-                        cfs_waitq_signal(&pc->pc_partners[i]->pc_set->set_waitq);
-        }
+       if (count == i) {
+               wake_up(&new->set_waitq);
+
+               /* XXX: It maybe unnecessary to wakeup all the partners. But to
+                *      guarantee the async RPC can be processed ASAP, we have
+                *      no other better choice. It maybe fixed in future. */
+               for (i = 0; i < pc->pc_npartners; i++)
+                       wake_up(&pc->pc_partners[i]->pc_set->set_waitq);
+       }
 #endif
 }
 EXPORT_SYMBOL(ptlrpcd_add_rqset);
 #endif
 }
 EXPORT_SYMBOL(ptlrpcd_add_rqset);
@@ -259,7 +259,7 @@ void ptlrpcd_add_req(struct ptlrpc_request *req, pdl_policy_t policy, int idx)
                 /* ptlrpc_check_set will decrease the count */
                 cfs_atomic_inc(&req->rq_set->set_remaining);
                spin_unlock(&req->rq_lock);
                 /* ptlrpc_check_set will decrease the count */
                 cfs_atomic_inc(&req->rq_set->set_remaining);
                spin_unlock(&req->rq_lock);
-               cfs_waitq_signal(&req->rq_set->set_waitq);
+               wake_up(&req->rq_set->set_waitq);
                return;
        } else {
                spin_unlock(&req->rq_lock);
                return;
        } else {
                spin_unlock(&req->rq_lock);
@@ -759,7 +759,7 @@ void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force)
        set_bit(LIOD_STOP, &pc->pc_flags);
        if (force)
                set_bit(LIOD_FORCE, &pc->pc_flags);
        set_bit(LIOD_STOP, &pc->pc_flags);
        if (force)
                set_bit(LIOD_FORCE, &pc->pc_flags);
-       cfs_waitq_signal(&pc->pc_set->set_waitq);
+       wake_up(&pc->pc_set->set_waitq);
 
 out:
        EXIT;
 
 out:
        EXIT;
index 857eb2b..b6f7d07 100644 (file)
@@ -553,7 +553,7 @@ int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
                        "ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
                        newctx, newctx->cc_flags);
 
                        "ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
                        newctx, newctx->cc_flags);
 
-               cfs_schedule_timeout_and_set_state(CFS_TASK_INTERRUPTIBLE,
+               schedule_timeout_and_set_state(TASK_INTERRUPTIBLE,
                                                   HZ);
        } else {
                 /*
                                                   HZ);
        } else {
                 /*
@@ -921,13 +921,13 @@ int sptlrpc_import_check_ctx(struct obd_import *imp)
                RETURN(-ENOMEM);
 
        spin_lock_init(&req->rq_lock);
                RETURN(-ENOMEM);
 
        spin_lock_init(&req->rq_lock);
-        cfs_atomic_set(&req->rq_refcount, 10000);
-        CFS_INIT_LIST_HEAD(&req->rq_ctx_chain);
-        cfs_waitq_init(&req->rq_reply_waitq);
-        cfs_waitq_init(&req->rq_set_waitq);
-        req->rq_import = imp;
-        req->rq_flvr = sec->ps_flvr;
-        req->rq_cli_ctx = ctx;
+       cfs_atomic_set(&req->rq_refcount, 10000);
+       CFS_INIT_LIST_HEAD(&req->rq_ctx_chain);
+       init_waitqueue_head(&req->rq_reply_waitq);
+       init_waitqueue_head(&req->rq_set_waitq);
+       req->rq_import = imp;
+       req->rq_flvr = sec->ps_flvr;
+       req->rq_cli_ctx = ctx;
 
         rc = sptlrpc_req_refresh_ctx(req, 0);
         LASSERT(cfs_list_empty(&req->rq_ctx_chain));
 
         rc = sptlrpc_req_refresh_ctx(req, 0);
         LASSERT(cfs_list_empty(&req->rq_ctx_chain));
index 77fdaee..c4f45aa 100644 (file)
@@ -80,13 +80,13 @@ static struct ptlrpc_enc_page_pool {
         unsigned long    epp_max_pages;   /* maximum pages can hold, const */
         unsigned int     epp_max_pools;   /* number of pools, const */
 
         unsigned long    epp_max_pages;   /* maximum pages can hold, const */
         unsigned int     epp_max_pools;   /* number of pools, const */
 
-        /*
-         * wait queue in case of not enough free pages.
-         */
-        cfs_waitq_t      epp_waitq;       /* waiting threads */
-        unsigned int     epp_waitqlen;    /* wait queue length */
-        unsigned long    epp_pages_short; /* # of pages wanted of in-q users */
-        unsigned int     epp_growing:1;   /* during adding pages */
+       /*
+        * wait queue in case of not enough free pages.
+        */
+       wait_queue_head_t    epp_waitq;       /* waiting threads */
+       unsigned int     epp_waitqlen;    /* wait queue length */
+       unsigned long    epp_pages_short; /* # of pages wanted of in-q users */
+       unsigned int     epp_growing:1;   /* during adding pages */
 
         /*
          * indicating how idle the pools are, from 0 to MAX_IDLE_IDX
 
         /*
          * indicating how idle the pools are, from 0 to MAX_IDLE_IDX
@@ -452,8 +452,8 @@ static inline void enc_pools_wakeup(void)
        LASSERT(spin_is_locked(&page_pools.epp_lock));
 
        if (unlikely(page_pools.epp_waitqlen)) {
        LASSERT(spin_is_locked(&page_pools.epp_lock));
 
        if (unlikely(page_pools.epp_waitqlen)) {
-               LASSERT(cfs_waitq_active(&page_pools.epp_waitq));
-               cfs_waitq_broadcast(&page_pools.epp_waitq);
+               LASSERT(waitqueue_active(&page_pools.epp_waitq));
+               wake_up_all(&page_pools.epp_waitq);
        }
 }
 
        }
 }
 
@@ -494,72 +494,72 @@ static int enc_pools_should_grow(int page_needed, long now)
  */
 int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc)
 {
  */
 int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc)
 {
-        cfs_waitlink_t  waitlink;
-        unsigned long   this_idle = -1;
-        cfs_time_t      tick = 0;
-        long            now;
-        int             p_idx, g_idx;
-        int             i;
+       wait_queue_t  waitlink;
+       unsigned long   this_idle = -1;
+       cfs_time_t      tick = 0;
+       long            now;
+       int             p_idx, g_idx;
+       int             i;
 
 
-        LASSERT(desc->bd_iov_count > 0);
-        LASSERT(desc->bd_iov_count <= page_pools.epp_max_pages);
+       LASSERT(desc->bd_iov_count > 0);
+       LASSERT(desc->bd_iov_count <= page_pools.epp_max_pages);
 
 
-        /* resent bulk, enc iov might have been allocated previously */
-        if (desc->bd_enc_iov != NULL)
-                return 0;
+       /* resent bulk, enc iov might have been allocated previously */
+       if (desc->bd_enc_iov != NULL)
+               return 0;
 
 
-        OBD_ALLOC(desc->bd_enc_iov,
-                  desc->bd_iov_count * sizeof(*desc->bd_enc_iov));
-        if (desc->bd_enc_iov == NULL)
-                return -ENOMEM;
+       OBD_ALLOC(desc->bd_enc_iov,
+                 desc->bd_iov_count * sizeof(*desc->bd_enc_iov));
+       if (desc->bd_enc_iov == NULL)
+               return -ENOMEM;
 
        spin_lock(&page_pools.epp_lock);
 
 
        spin_lock(&page_pools.epp_lock);
 
-        page_pools.epp_st_access++;
+       page_pools.epp_st_access++;
 again:
 again:
-        if (unlikely(page_pools.epp_free_pages < desc->bd_iov_count)) {
-                if (tick == 0)
-                        tick = cfs_time_current();
+       if (unlikely(page_pools.epp_free_pages < desc->bd_iov_count)) {
+               if (tick == 0)
+                       tick = cfs_time_current();
 
 
-                now = cfs_time_current_sec();
+               now = cfs_time_current_sec();
 
 
-                page_pools.epp_st_missings++;
-                page_pools.epp_pages_short += desc->bd_iov_count;
+               page_pools.epp_st_missings++;
+               page_pools.epp_pages_short += desc->bd_iov_count;
 
 
-                if (enc_pools_should_grow(desc->bd_iov_count, now)) {
-                        page_pools.epp_growing = 1;
+               if (enc_pools_should_grow(desc->bd_iov_count, now)) {
+                       page_pools.epp_growing = 1;
 
                        spin_unlock(&page_pools.epp_lock);
                        enc_pools_add_pages(page_pools.epp_pages_short / 2);
                        spin_lock(&page_pools.epp_lock);
 
 
                        spin_unlock(&page_pools.epp_lock);
                        enc_pools_add_pages(page_pools.epp_pages_short / 2);
                        spin_lock(&page_pools.epp_lock);
 
-                        page_pools.epp_growing = 0;
+                       page_pools.epp_growing = 0;
 
 
-                        enc_pools_wakeup();
-                } else {
-                        if (++page_pools.epp_waitqlen >
-                            page_pools.epp_st_max_wqlen)
-                                page_pools.epp_st_max_wqlen =
-                                                page_pools.epp_waitqlen;
+                       enc_pools_wakeup();
+               } else {
+                       if (++page_pools.epp_waitqlen >
+                           page_pools.epp_st_max_wqlen)
+                               page_pools.epp_st_max_wqlen =
+                                               page_pools.epp_waitqlen;
 
 
-                        cfs_set_current_state(CFS_TASK_UNINT);
-                        cfs_waitlink_init(&waitlink);
-                        cfs_waitq_add(&page_pools.epp_waitq, &waitlink);
+                       set_current_state(TASK_UNINTERRUPTIBLE);
+                       init_waitqueue_entry_current(&waitlink);
+                       add_wait_queue(&page_pools.epp_waitq, &waitlink);
 
                        spin_unlock(&page_pools.epp_lock);
 
                        spin_unlock(&page_pools.epp_lock);
-                       cfs_waitq_wait(&waitlink, CFS_TASK_UNINT);
-                       cfs_waitq_del(&page_pools.epp_waitq, &waitlink);
+                       waitq_wait(&waitlink, TASK_UNINTERRUPTIBLE);
+                       remove_wait_queue(&page_pools.epp_waitq, &waitlink);
                        LASSERT(page_pools.epp_waitqlen > 0);
                        spin_lock(&page_pools.epp_lock);
                        LASSERT(page_pools.epp_waitqlen > 0);
                        spin_lock(&page_pools.epp_lock);
-                        page_pools.epp_waitqlen--;
-                }
+                       page_pools.epp_waitqlen--;
+               }
 
 
-                LASSERT(page_pools.epp_pages_short >= desc->bd_iov_count);
-                page_pools.epp_pages_short -= desc->bd_iov_count;
+               LASSERT(page_pools.epp_pages_short >= desc->bd_iov_count);
+               page_pools.epp_pages_short -= desc->bd_iov_count;
 
 
-                this_idle = 0;
-                goto again;
-        }
+               this_idle = 0;
+               goto again;
+       }
 
         /* record max wait time */
         if (unlikely(tick != 0)) {
 
         /* record max wait time */
         if (unlikely(tick != 0)) {
@@ -707,16 +707,16 @@ static inline void enc_pools_free(void)
 
 int sptlrpc_enc_pool_init(void)
 {
 
 int sptlrpc_enc_pool_init(void)
 {
-        /*
-         * maximum capacity is 1/8 of total physical memory.
-         * is the 1/8 a good number?
-         */
+       /*
+        * maximum capacity is 1/8 of total physical memory.
+        * is the 1/8 a good number?
+        */
        page_pools.epp_max_pages = num_physpages / 8;
        page_pools.epp_max_pages = num_physpages / 8;
-        page_pools.epp_max_pools = npages_to_npools(page_pools.epp_max_pages);
+       page_pools.epp_max_pools = npages_to_npools(page_pools.epp_max_pages);
 
 
-        cfs_waitq_init(&page_pools.epp_waitq);
-        page_pools.epp_waitqlen = 0;
-        page_pools.epp_pages_short = 0;
+       init_waitqueue_head(&page_pools.epp_waitq);
+       page_pools.epp_waitqlen = 0;
+       page_pools.epp_pages_short = 0;
 
         page_pools.epp_growing = 0;
 
 
         page_pools.epp_growing = 0;
 
index 022f749..5e99ca8 100644 (file)
@@ -117,7 +117,7 @@ void sptlrpc_gc_add_ctx(struct ptlrpc_cli_ctx *ctx)
        spin_unlock(&sec_gc_ctx_list_lock);
 
        thread_add_flags(&sec_gc_thread, SVC_SIGNAL);
        spin_unlock(&sec_gc_ctx_list_lock);
 
        thread_add_flags(&sec_gc_thread, SVC_SIGNAL);
-       cfs_waitq_signal(&sec_gc_thread.t_ctl_waitq);
+       wake_up(&sec_gc_thread.t_ctl_waitq);
 }
 EXPORT_SYMBOL(sptlrpc_gc_add_ctx);
 
 }
 EXPORT_SYMBOL(sptlrpc_gc_add_ctx);
 
@@ -166,39 +166,39 @@ static void sec_do_gc(struct ptlrpc_sec *sec)
 
 static int sec_gc_main(void *arg)
 {
 
 static int sec_gc_main(void *arg)
 {
-        struct ptlrpc_thread *thread = (struct ptlrpc_thread *) arg;
-        struct l_wait_info    lwi;
+       struct ptlrpc_thread *thread = (struct ptlrpc_thread *) arg;
+       struct l_wait_info    lwi;
 
        unshare_fs_struct();
 
 
        unshare_fs_struct();
 
-        /* Record that the thread is running */
-        thread_set_flags(thread, SVC_RUNNING);
-        cfs_waitq_signal(&thread->t_ctl_waitq);
+       /* Record that the thread is running */
+       thread_set_flags(thread, SVC_RUNNING);
+       wake_up(&thread->t_ctl_waitq);
 
 
-        while (1) {
-                struct ptlrpc_sec *sec;
+       while (1) {
+               struct ptlrpc_sec *sec;
 
 
-                thread_clear_flags(thread, SVC_SIGNAL);
-                sec_process_ctx_list();
+               thread_clear_flags(thread, SVC_SIGNAL);
+               sec_process_ctx_list();
 again:
 again:
-                /* go through sec list do gc.
-                 * FIXME here we iterate through the whole list each time which
-                 * is not optimal. we perhaps want to use balanced binary tree
-                 * to trace each sec as order of expiry time.
-                 * another issue here is we wakeup as fixed interval instead of
-                 * according to each sec's expiry time */
+               /* go through sec list do gc.
+                * FIXME here we iterate through the whole list each time which
+                * is not optimal. we perhaps want to use balanced binary tree
+                * to trace each sec as order of expiry time.
+                * another issue here is we wakeup as fixed interval instead of
+                * according to each sec's expiry time */
                mutex_lock(&sec_gc_mutex);
                mutex_lock(&sec_gc_mutex);
-                cfs_list_for_each_entry(sec, &sec_gc_list, ps_gc_list) {
-                        /* if someone is waiting to be deleted, let it
-                         * proceed as soon as possible. */
-                        if (cfs_atomic_read(&sec_gc_wait_del)) {
-                                CDEBUG(D_SEC, "deletion pending, start over\n");
+               cfs_list_for_each_entry(sec, &sec_gc_list, ps_gc_list) {
+                       /* if someone is waiting to be deleted, let it
+                        * proceed as soon as possible. */
+                       if (cfs_atomic_read(&sec_gc_wait_del)) {
+                               CDEBUG(D_SEC, "deletion pending, start over\n");
                                mutex_unlock(&sec_gc_mutex);
                                mutex_unlock(&sec_gc_mutex);
-                                goto again;
-                        }
+                               goto again;
+                       }
 
 
-                        sec_do_gc(sec);
-                }
+                       sec_do_gc(sec);
+               }
                mutex_unlock(&sec_gc_mutex);
 
                /* check ctx list again before sleep */
                mutex_unlock(&sec_gc_mutex);
 
                /* check ctx list again before sleep */
@@ -210,13 +210,13 @@ again:
                             thread_is_signal(thread),
                             &lwi);
 
                             thread_is_signal(thread),
                             &lwi);
 
-                if (thread_test_and_clear_flags(thread, SVC_STOPPING))
-                        break;
-        }
+               if (thread_test_and_clear_flags(thread, SVC_STOPPING))
+                       break;
+       }
 
 
-        thread_set_flags(thread, SVC_STOPPED);
-        cfs_waitq_signal(&thread->t_ctl_waitq);
-        return 0;
+       thread_set_flags(thread, SVC_STOPPED);
+       wake_up(&thread->t_ctl_waitq);
+       return 0;
 }
 
 int sptlrpc_gc_init(void)
 }
 
 int sptlrpc_gc_init(void)
@@ -228,30 +228,30 @@ int sptlrpc_gc_init(void)
        spin_lock_init(&sec_gc_list_lock);
        spin_lock_init(&sec_gc_ctx_list_lock);
 
        spin_lock_init(&sec_gc_list_lock);
        spin_lock_init(&sec_gc_ctx_list_lock);
 
-        /* initialize thread control */
-        memset(&sec_gc_thread, 0, sizeof(sec_gc_thread));
-        cfs_waitq_init(&sec_gc_thread.t_ctl_waitq);
+       /* initialize thread control */
+       memset(&sec_gc_thread, 0, sizeof(sec_gc_thread));
+       init_waitqueue_head(&sec_gc_thread.t_ctl_waitq);
 
        task = kthread_run(sec_gc_main, &sec_gc_thread, "sptlrpc_gc");
        if (IS_ERR(task)) {
                CERROR("can't start gc thread: %ld\n", PTR_ERR(task));
                return PTR_ERR(task);
 
        task = kthread_run(sec_gc_main, &sec_gc_thread, "sptlrpc_gc");
        if (IS_ERR(task)) {
                CERROR("can't start gc thread: %ld\n", PTR_ERR(task));
                return PTR_ERR(task);
-        }
+       }
 
 
-        l_wait_event(sec_gc_thread.t_ctl_waitq,
-                     thread_is_running(&sec_gc_thread), &lwi);
-        return 0;
+       l_wait_event(sec_gc_thread.t_ctl_waitq,
+                    thread_is_running(&sec_gc_thread), &lwi);
+       return 0;
 }
 
 void sptlrpc_gc_fini(void)
 {
 }
 
 void sptlrpc_gc_fini(void)
 {
-        struct l_wait_info lwi = { 0 };
+       struct l_wait_info lwi = { 0 };
 
 
-        thread_set_flags(&sec_gc_thread, SVC_STOPPING);
-        cfs_waitq_signal(&sec_gc_thread.t_ctl_waitq);
+       thread_set_flags(&sec_gc_thread, SVC_STOPPING);
+       wake_up(&sec_gc_thread.t_ctl_waitq);
 
 
-        l_wait_event(sec_gc_thread.t_ctl_waitq,
-                     thread_is_stopped(&sec_gc_thread), &lwi);
+       l_wait_event(sec_gc_thread.t_ctl_waitq,
+                    thread_is_stopped(&sec_gc_thread), &lwi);
 }
 
 #else /* !__KERNEL__ */
 }
 
 #else /* !__KERNEL__ */
index a66367e..a4e887b 100644 (file)
@@ -211,7 +211,7 @@ struct ptlrpc_hr_partition;
 struct ptlrpc_hr_thread {
        int                             hrt_id;         /* thread ID */
        spinlock_t                      hrt_lock;
 struct ptlrpc_hr_thread {
        int                             hrt_id;         /* thread ID */
        spinlock_t                      hrt_lock;
-       cfs_waitq_t                     hrt_waitq;
+       wait_queue_head_t               hrt_waitq;
        cfs_list_t                      hrt_queue;      /* RS queue */
        struct ptlrpc_hr_partition      *hrt_partition;
 };
        cfs_list_t                      hrt_queue;      /* RS queue */
        struct ptlrpc_hr_partition      *hrt_partition;
 };
@@ -238,7 +238,7 @@ struct ptlrpc_hr_service {
        /* CPU partition table, it's just cfs_cpt_table for now */
        struct cfs_cpt_table            *hr_cpt_table;
        /** controller sleep waitq */
        /* CPU partition table, it's just cfs_cpt_table for now */
        struct cfs_cpt_table            *hr_cpt_table;
        /** controller sleep waitq */
-       cfs_waitq_t                     hr_waitq;
+       wait_queue_head_t               hr_waitq;
         unsigned int                   hr_stopping;
        /** roundrobin rotor for non-affinity service */
        unsigned int                    hr_rotor;
         unsigned int                   hr_stopping;
        /** roundrobin rotor for non-affinity service */
        unsigned int                    hr_rotor;
@@ -313,7 +313,7 @@ static void rs_batch_dispatch(struct rs_batch *b)
                cfs_list_splice_init(&b->rsb_replies, &hrt->hrt_queue);
                spin_unlock(&hrt->hrt_lock);
 
                cfs_list_splice_init(&b->rsb_replies, &hrt->hrt_queue);
                spin_unlock(&hrt->hrt_lock);
 
-               cfs_waitq_signal(&hrt->hrt_waitq);
+               wake_up(&hrt->hrt_waitq);
                b->rsb_n_replies = 0;
        }
 }
                b->rsb_n_replies = 0;
        }
 }
@@ -392,7 +392,7 @@ void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs)
        cfs_list_add_tail(&rs->rs_list, &hrt->hrt_queue);
        spin_unlock(&hrt->hrt_lock);
 
        cfs_list_add_tail(&rs->rs_list, &hrt->hrt_queue);
        spin_unlock(&hrt->hrt_lock);
 
-       cfs_waitq_signal(&hrt->hrt_waitq);
+       wake_up(&hrt->hrt_waitq);
        EXIT;
 #else
        cfs_list_add_tail(&rs->rs_list, &rs->rs_svcpt->scp_rep_queue);
        EXIT;
 #else
        cfs_list_add_tail(&rs->rs_list, &rs->rs_svcpt->scp_rep_queue);
@@ -504,7 +504,7 @@ static void ptlrpc_at_timer(unsigned long castmeharder)
 
        svcpt->scp_at_check = 1;
        svcpt->scp_at_checktime = cfs_time_current();
 
        svcpt->scp_at_check = 1;
        svcpt->scp_at_checktime = cfs_time_current();
-       cfs_waitq_signal(&svcpt->scp_waitq);
+       wake_up(&svcpt->scp_waitq);
 }
 
 static void
 }
 
 static void
@@ -631,7 +631,7 @@ ptlrpc_service_part_init(struct ptlrpc_service *svc,
        CFS_INIT_LIST_HEAD(&svcpt->scp_rqbd_idle);
        CFS_INIT_LIST_HEAD(&svcpt->scp_rqbd_posted);
        CFS_INIT_LIST_HEAD(&svcpt->scp_req_incoming);
        CFS_INIT_LIST_HEAD(&svcpt->scp_rqbd_idle);
        CFS_INIT_LIST_HEAD(&svcpt->scp_rqbd_posted);
        CFS_INIT_LIST_HEAD(&svcpt->scp_req_incoming);
-       cfs_waitq_init(&svcpt->scp_waitq);
+       init_waitqueue_head(&svcpt->scp_waitq);
        /* history request & rqbd list */
        CFS_INIT_LIST_HEAD(&svcpt->scp_hist_reqs);
        CFS_INIT_LIST_HEAD(&svcpt->scp_hist_rqbds);
        /* history request & rqbd list */
        CFS_INIT_LIST_HEAD(&svcpt->scp_hist_reqs);
        CFS_INIT_LIST_HEAD(&svcpt->scp_hist_rqbds);
@@ -646,7 +646,7 @@ ptlrpc_service_part_init(struct ptlrpc_service *svc,
        CFS_INIT_LIST_HEAD(&svcpt->scp_rep_queue);
 #endif
        CFS_INIT_LIST_HEAD(&svcpt->scp_rep_idle);
        CFS_INIT_LIST_HEAD(&svcpt->scp_rep_queue);
 #endif
        CFS_INIT_LIST_HEAD(&svcpt->scp_rep_idle);
-       cfs_waitq_init(&svcpt->scp_rep_waitq);
+       init_waitqueue_head(&svcpt->scp_rep_waitq);
        cfs_atomic_set(&svcpt->scp_nreps_difficult, 0);
 
        /* adaptive timeout */
        cfs_atomic_set(&svcpt->scp_nreps_difficult, 0);
 
        /* adaptive timeout */
@@ -1907,7 +1907,7 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt,
        if (rc)
                GOTO(err_req, rc);
 
        if (rc)
                GOTO(err_req, rc);
 
-       cfs_waitq_signal(&svcpt->scp_waitq);
+       wake_up(&svcpt->scp_waitq);
        RETURN(1);
 
 err_req:
        RETURN(1);
 
 err_req:
@@ -2173,7 +2173,7 @@ ptlrpc_handle_rs(struct ptlrpc_reply_state *rs)
                 ptlrpc_rs_decref (rs);
                if (cfs_atomic_dec_and_test(&svcpt->scp_nreps_difficult) &&
                    svc->srv_is_stopping)
                 ptlrpc_rs_decref (rs);
                if (cfs_atomic_dec_and_test(&svcpt->scp_nreps_difficult) &&
                    svc->srv_is_stopping)
-                       cfs_waitq_broadcast(&svcpt->scp_waitq);
+                       wake_up_all(&svcpt->scp_waitq);
                RETURN(1);
        }
 
                RETURN(1);
        }
 
@@ -2362,7 +2362,7 @@ ptlrpc_wait_event(struct ptlrpc_service_part *svcpt,
 
        lc_watchdog_disable(thread->t_watchdog);
 
 
        lc_watchdog_disable(thread->t_watchdog);
 
-       cfs_cond_resched();
+       cond_resched();
 
        l_wait_event_exclusive_head(svcpt->scp_waitq,
                                ptlrpc_thread_stopping(thread) ||
 
        l_wait_event_exclusive_head(svcpt->scp_waitq,
                                ptlrpc_thread_stopping(thread) ||
@@ -2476,14 +2476,14 @@ static int ptlrpc_main(void *arg)
        spin_unlock(&svcpt->scp_lock);
 
        /* wake up our creator in case he's still waiting. */
        spin_unlock(&svcpt->scp_lock);
 
        /* wake up our creator in case he's still waiting. */
-       cfs_waitq_signal(&thread->t_ctl_waitq);
+       wake_up(&thread->t_ctl_waitq);
 
        thread->t_watchdog = lc_watchdog_add(ptlrpc_server_get_timeout(svcpt),
                                             NULL, NULL);
 
        spin_lock(&svcpt->scp_rep_lock);
        cfs_list_add(&rs->rs_list, &svcpt->scp_rep_idle);
 
        thread->t_watchdog = lc_watchdog_add(ptlrpc_server_get_timeout(svcpt),
                                             NULL, NULL);
 
        spin_lock(&svcpt->scp_rep_lock);
        cfs_list_add(&rs->rs_list, &svcpt->scp_rep_idle);
-       cfs_waitq_signal(&svcpt->scp_rep_waitq);
+       wake_up(&svcpt->scp_rep_waitq);
        spin_unlock(&svcpt->scp_rep_lock);
 
        CDEBUG(D_NET, "service thread %d (#%d) started\n", thread->t_id,
        spin_unlock(&svcpt->scp_rep_lock);
 
        CDEBUG(D_NET, "service thread %d (#%d) started\n", thread->t_id,
@@ -2564,7 +2564,7 @@ out:
        thread->t_id = rc;
        thread_add_flags(thread, SVC_STOPPED);
 
        thread->t_id = rc;
        thread_add_flags(thread, SVC_STOPPED);
 
-       cfs_waitq_signal(&thread->t_ctl_waitq);
+       wake_up(&thread->t_ctl_waitq);
        spin_unlock(&svcpt->scp_lock);
 
        return rc;
        spin_unlock(&svcpt->scp_lock);
 
        return rc;
@@ -2607,7 +2607,7 @@ static int ptlrpc_hr_main(void *arg)
        }
 
        cfs_atomic_inc(&hrp->hrp_nstarted);
        }
 
        cfs_atomic_inc(&hrp->hrp_nstarted);
-       cfs_waitq_signal(&ptlrpc_hr.hr_waitq);
+       wake_up(&ptlrpc_hr.hr_waitq);
 
        while (!ptlrpc_hr.hr_stopping) {
                l_wait_condition(hrt->hrt_waitq, hrt_dont_sleep(hrt, &replies));
 
        while (!ptlrpc_hr.hr_stopping) {
                l_wait_condition(hrt->hrt_waitq, hrt_dont_sleep(hrt, &replies));
@@ -2624,7 +2624,7 @@ static int ptlrpc_hr_main(void *arg)
         }
 
        cfs_atomic_inc(&hrp->hrp_nstopped);
         }
 
        cfs_atomic_inc(&hrp->hrp_nstopped);
-       cfs_waitq_signal(&ptlrpc_hr.hr_waitq);
+       wake_up(&ptlrpc_hr.hr_waitq);
 
        return 0;
 }
 
        return 0;
 }
@@ -2641,13 +2641,13 @@ static void ptlrpc_stop_hr_threads(void)
                if (hrp->hrp_thrs == NULL)
                        continue; /* uninitialized */
                for (j = 0; j < hrp->hrp_nthrs; j++)
                if (hrp->hrp_thrs == NULL)
                        continue; /* uninitialized */
                for (j = 0; j < hrp->hrp_nthrs; j++)
-                       cfs_waitq_broadcast(&hrp->hrp_thrs[j].hrt_waitq);
+                       wake_up_all(&hrp->hrp_thrs[j].hrt_waitq);
        }
 
        cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
                if (hrp->hrp_thrs == NULL)
                        continue; /* uninitialized */
        }
 
        cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
                if (hrp->hrp_thrs == NULL)
                        continue; /* uninitialized */
-               cfs_wait_event(ptlrpc_hr.hr_waitq,
+               wait_event(ptlrpc_hr.hr_waitq,
                               cfs_atomic_read(&hrp->hrp_nstopped) ==
                               cfs_atomic_read(&hrp->hrp_nstarted));
        }
                               cfs_atomic_read(&hrp->hrp_nstopped) ==
                               cfs_atomic_read(&hrp->hrp_nstarted));
        }
@@ -2673,7 +2673,7 @@ static int ptlrpc_start_hr_threads(void)
                        if (IS_ERR_VALUE(rc))
                                break;
                }
                        if (IS_ERR_VALUE(rc))
                                break;
                }
-               cfs_wait_event(ptlrpc_hr.hr_waitq,
+               wait_event(ptlrpc_hr.hr_waitq,
                               cfs_atomic_read(&hrp->hrp_nstarted) == j);
                if (!IS_ERR_VALUE(rc))
                        continue;
                               cfs_atomic_read(&hrp->hrp_nstarted) == j);
                if (!IS_ERR_VALUE(rc))
                        continue;
@@ -2705,7 +2705,7 @@ static void ptlrpc_svcpt_stop_threads(struct ptlrpc_service_part *svcpt)
                thread_add_flags(thread, SVC_STOPPING);
        }
 
                thread_add_flags(thread, SVC_STOPPING);
        }
 
-       cfs_waitq_broadcast(&svcpt->scp_waitq);
+       wake_up_all(&svcpt->scp_waitq);
 
        while (!cfs_list_empty(&svcpt->scp_threads)) {
                thread = cfs_list_entry(svcpt->scp_threads.next,
 
        while (!cfs_list_empty(&svcpt->scp_threads)) {
                thread = cfs_list_entry(svcpt->scp_threads.next,
@@ -2814,7 +2814,7 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait)
        OBD_CPT_ALLOC_PTR(thread, svc->srv_cptable, svcpt->scp_cpt);
        if (thread == NULL)
                RETURN(-ENOMEM);
        OBD_CPT_ALLOC_PTR(thread, svc->srv_cptable, svcpt->scp_cpt);
        if (thread == NULL)
                RETURN(-ENOMEM);
-       cfs_waitq_init(&thread->t_ctl_waitq);
+       init_waitqueue_head(&thread->t_ctl_waitq);
 
        spin_lock(&svcpt->scp_lock);
        if (!ptlrpc_threads_increasable(svcpt)) {
 
        spin_lock(&svcpt->scp_lock);
        if (!ptlrpc_threads_increasable(svcpt)) {
@@ -2832,7 +2832,7 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait)
                if (wait) {
                        CDEBUG(D_INFO, "Waiting for creating thread %s #%d\n",
                               svc->srv_thread_name, svcpt->scp_thr_nextid);
                if (wait) {
                        CDEBUG(D_INFO, "Waiting for creating thread %s #%d\n",
                               svc->srv_thread_name, svcpt->scp_thr_nextid);
-                       cfs_schedule();
+                       schedule();
                        goto again;
                }
 
                        goto again;
                }
 
@@ -2869,7 +2869,7 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait)
                         * by ptlrpc_svcpt_stop_threads now
                         */
                        thread_add_flags(thread, SVC_STOPPED);
                         * by ptlrpc_svcpt_stop_threads now
                         */
                        thread_add_flags(thread, SVC_STOPPED);
-                       cfs_waitq_signal(&thread->t_ctl_waitq);
+                       wake_up(&thread->t_ctl_waitq);
                        spin_unlock(&svcpt->scp_lock);
                } else {
                        cfs_list_del(&thread->t_link);
                        spin_unlock(&svcpt->scp_lock);
                } else {
                        cfs_list_del(&thread->t_link);
@@ -2907,7 +2907,7 @@ int ptlrpc_hr_init(void)
        if (ptlrpc_hr.hr_partitions == NULL)
                RETURN(-ENOMEM);
 
        if (ptlrpc_hr.hr_partitions == NULL)
                RETURN(-ENOMEM);
 
-       cfs_waitq_init(&ptlrpc_hr.hr_waitq);
+       init_waitqueue_head(&ptlrpc_hr.hr_waitq);
 
        cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
                hrp->hrp_cpt = i;
 
        cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
                hrp->hrp_cpt = i;
@@ -2929,7 +2929,7 @@ int ptlrpc_hr_init(void)
 
                        hrt->hrt_id = j;
                        hrt->hrt_partition = hrp;
 
                        hrt->hrt_id = j;
                        hrt->hrt_partition = hrp;
-                       cfs_waitq_init(&hrt->hrt_waitq);
+                       init_waitqueue_head(&hrt->hrt_waitq);
                        spin_lock_init(&hrt->hrt_lock);
                        CFS_INIT_LIST_HEAD(&hrt->hrt_queue);
                }
                        spin_lock_init(&hrt->hrt_lock);
                        CFS_INIT_LIST_HEAD(&hrt->hrt_queue);
                }
index 5fb7905..16f703d 100644 (file)
@@ -172,7 +172,7 @@ retry:
                        "freed:%lu, repeat:%u\n", hash,
                        d.lid_inuse, d.lid_freed, repeat);
                repeat++;
                        "freed:%lu, repeat:%u\n", hash,
                        d.lid_inuse, d.lid_freed, repeat);
                repeat++;
-               cfs_schedule_timeout_and_set_state(CFS_TASK_INTERRUPTIBLE,
+               schedule_timeout_and_set_state(TASK_INTERRUPTIBLE,
                                                cfs_time_seconds(1));
                goto retry;
        }
                                                cfs_time_seconds(1));
                goto retry;
        }
index f16a5e2..e80b25b 100644 (file)
@@ -116,7 +116,7 @@ struct lquota_slv_entry {
        rwlock_t                lse_lock;
 
        /* waiter for pending request done */
        rwlock_t                lse_lock;
 
        /* waiter for pending request done */
-       cfs_waitq_t             lse_waiters;
+       wait_queue_head_t       lse_waiters;
 
        /* hint on current on-disk usage, in inodes or kbytes */
        __u64                   lse_usage;
 
        /* hint on current on-disk usage, in inodes or kbytes */
        __u64                   lse_usage;
index 8401030..7dcce4a 100644 (file)
@@ -236,7 +236,7 @@ static int qmt_device_init0(const struct lu_env *env, struct qmt_device *qmt,
 
        /* set up and start rebalance thread */
        thread_set_flags(&qmt->qmt_reba_thread, SVC_STOPPED);
 
        /* set up and start rebalance thread */
        thread_set_flags(&qmt->qmt_reba_thread, SVC_STOPPED);
-       cfs_waitq_init(&qmt->qmt_reba_thread.t_ctl_waitq);
+       init_waitqueue_head(&qmt->qmt_reba_thread.t_ctl_waitq);
        CFS_INIT_LIST_HEAD(&qmt->qmt_reba_list);
        spin_lock_init(&qmt->qmt_reba_lock);
        rc = qmt_start_reba_thread(qmt);
        CFS_INIT_LIST_HEAD(&qmt->qmt_reba_list);
        spin_lock_init(&qmt->qmt_reba_lock);
        rc = qmt_start_reba_thread(qmt);
index 635a314..9dd2929 100644 (file)
@@ -688,7 +688,7 @@ void qmt_id_lock_notify(struct qmt_device *qmt, struct lquota_entry *lqe)
        spin_unlock(&qmt->qmt_reba_lock);
 
        if (added)
        spin_unlock(&qmt->qmt_reba_lock);
 
        if (added)
-               cfs_waitq_signal(&qmt->qmt_reba_thread.t_ctl_waitq);
+               wake_up(&qmt->qmt_reba_thread.t_ctl_waitq);
        else
                lqe_putref(lqe);
        EXIT;
        else
                lqe_putref(lqe);
        EXIT;
@@ -726,7 +726,7 @@ static int qmt_reba_thread(void *arg)
        }
 
        thread_set_flags(thread, SVC_RUNNING);
        }
 
        thread_set_flags(thread, SVC_RUNNING);
-       cfs_waitq_signal(&thread->t_ctl_waitq);
+       wake_up(&thread->t_ctl_waitq);
 
        while (1) {
                l_wait_event(thread->t_ctl_waitq,
 
        while (1) {
                l_wait_event(thread->t_ctl_waitq,
@@ -753,7 +753,7 @@ static int qmt_reba_thread(void *arg)
        lu_env_fini(env);
        OBD_FREE_PTR(env);
        thread_set_flags(thread, SVC_STOPPED);
        lu_env_fini(env);
        OBD_FREE_PTR(env);
        thread_set_flags(thread, SVC_STOPPED);
-       cfs_waitq_signal(&thread->t_ctl_waitq);
+       wake_up(&thread->t_ctl_waitq);
        RETURN(rc);
 }
 
        RETURN(rc);
 }
 
@@ -794,7 +794,7 @@ void qmt_stop_reba_thread(struct qmt_device *qmt)
                struct l_wait_info lwi = { 0 };
 
                thread_set_flags(thread, SVC_STOPPING);
                struct l_wait_info lwi = { 0 };
 
                thread_set_flags(thread, SVC_STOPPING);
-               cfs_waitq_signal(&thread->t_ctl_waitq);
+               wake_up(&thread->t_ctl_waitq);
 
                l_wait_event(thread->t_ctl_waitq, thread_is_stopped(thread),
                             &lwi);
 
                l_wait_event(thread->t_ctl_waitq, thread_is_stopped(thread),
                             &lwi);
index 5ebdfeb..f4e26a0 100644 (file)
@@ -47,7 +47,7 @@ static void qsd_lqe_init(struct lquota_entry *lqe, void *arg)
        memset(&lqe->lqe_lockh, 0, sizeof(lqe->lqe_lockh));
        lqe->lqe_pending_write = 0;
        lqe->lqe_pending_req   = 0;
        memset(&lqe->lqe_lockh, 0, sizeof(lqe->lqe_lockh));
        lqe->lqe_pending_write = 0;
        lqe->lqe_pending_req   = 0;
-       cfs_waitq_init(&lqe->lqe_waiters);
+       init_waitqueue_head(&lqe->lqe_waiters);
        lqe->lqe_usage    = 0;
        lqe->lqe_nopreacq = false;
 }
        lqe->lqe_usage    = 0;
        lqe->lqe_nopreacq = false;
 }
index 6bef54e..627f897 100644 (file)
@@ -65,7 +65,7 @@ static inline void qsd_request_exit(struct lquota_entry *lqe)
        }
        lqe->lqe_pending_req--;
        lqe->lqe_pending_rel = 0;
        }
        lqe->lqe_pending_req--;
        lqe->lqe_pending_rel = 0;
-       cfs_waitq_broadcast(&lqe->lqe_waiters);
+       wake_up_all(&lqe->lqe_waiters);
 }
 
 /**
 }
 
 /**
index dfbf8a2..253f6b4 100644 (file)
@@ -247,7 +247,7 @@ static int qsd_conn_callback(void *data)
         * step 3) will have to wait for qsd_start() to be called */
        for (type = USRQUOTA; type < MAXQUOTAS; type++) {
                struct qsd_qtype_info *qqi = qsd->qsd_type_array[type];
         * step 3) will have to wait for qsd_start() to be called */
        for (type = USRQUOTA; type < MAXQUOTAS; type++) {
                struct qsd_qtype_info *qqi = qsd->qsd_type_array[type];
-               cfs_waitq_signal(&qqi->qqi_reint_thread.t_ctl_waitq);
+               wake_up(&qqi->qqi_reint_thread.t_ctl_waitq);
        }
 
        RETURN(0);
        }
 
        RETURN(0);
@@ -352,7 +352,7 @@ static int qsd_qtype_init(const struct lu_env *env, struct qsd_instance *qsd,
        qqi->qqi_glb_uptodate = false;
        qqi->qqi_slv_uptodate = false;
        qqi->qqi_reint        = false;
        qqi->qqi_glb_uptodate = false;
        qqi->qqi_slv_uptodate = false;
        qqi->qqi_reint        = false;
-       cfs_waitq_init(&qqi->qqi_reint_thread.t_ctl_waitq);
+       init_waitqueue_head(&qqi->qqi_reint_thread.t_ctl_waitq);
        thread_set_flags(&qqi->qqi_reint_thread, SVC_STOPPED);
        CFS_INIT_LIST_HEAD(&qqi->qqi_deferred_glb);
        CFS_INIT_LIST_HEAD(&qqi->qqi_deferred_slv);
        thread_set_flags(&qqi->qqi_reint_thread, SVC_STOPPED);
        CFS_INIT_LIST_HEAD(&qqi->qqi_deferred_glb);
        CFS_INIT_LIST_HEAD(&qqi->qqi_deferred_slv);
@@ -546,7 +546,7 @@ struct qsd_instance *qsd_init(const struct lu_env *env, char *svname,
        rwlock_init(&qsd->qsd_lock);
        CFS_INIT_LIST_HEAD(&qsd->qsd_link);
        thread_set_flags(&qsd->qsd_upd_thread, SVC_STOPPED);
        rwlock_init(&qsd->qsd_lock);
        CFS_INIT_LIST_HEAD(&qsd->qsd_link);
        thread_set_flags(&qsd->qsd_upd_thread, SVC_STOPPED);
-       cfs_waitq_init(&qsd->qsd_upd_thread.t_ctl_waitq);
+       init_waitqueue_head(&qsd->qsd_upd_thread.t_ctl_waitq);
        CFS_INIT_LIST_HEAD(&qsd->qsd_upd_list);
        spin_lock_init(&qsd->qsd_adjust_lock);
        CFS_INIT_LIST_HEAD(&qsd->qsd_adjust_list);
        CFS_INIT_LIST_HEAD(&qsd->qsd_upd_list);
        spin_lock_init(&qsd->qsd_adjust_lock);
        CFS_INIT_LIST_HEAD(&qsd->qsd_adjust_list);
@@ -762,7 +762,7 @@ int qsd_start(const struct lu_env *env, struct qsd_instance *qsd)
         * up to usage; If usage < granted, release down to usage.  */
        for (type = USRQUOTA; type < MAXQUOTAS; type++) {
                struct qsd_qtype_info   *qqi = qsd->qsd_type_array[type];
         * up to usage; If usage < granted, release down to usage.  */
        for (type = USRQUOTA; type < MAXQUOTAS; type++) {
                struct qsd_qtype_info   *qqi = qsd->qsd_type_array[type];
-               cfs_waitq_signal(&qqi->qqi_reint_thread.t_ctl_waitq);
+               wake_up(&qqi->qqi_reint_thread.t_ctl_waitq);
        }
 
        RETURN(rc);
        }
 
        RETURN(rc);
index 47b0ac1..75ce7ea 100644 (file)
@@ -447,7 +447,7 @@ static int qsd_id_glimpse_ast(struct ldlm_lock *lock, void *data)
        lqe_write_unlock(lqe);
 
        if (wakeup)
        lqe_write_unlock(lqe);
 
        if (wakeup)
-               cfs_waitq_broadcast(&lqe->lqe_waiters);
+               wake_up_all(&lqe->lqe_waiters);
        lqe_putref(lqe);
 out:
        req->rq_status = rc;
        lqe_putref(lqe);
 out:
        req->rq_status = rc;
index c914f18..08e8f93 100644 (file)
@@ -420,7 +420,7 @@ static int qsd_reint_main(void *args)
        lu_ref_add(&qqi->qqi_reference, "reint_thread", thread);
 
        thread_set_flags(thread, SVC_RUNNING);
        lu_ref_add(&qqi->qqi_reference, "reint_thread", thread);
 
        thread_set_flags(thread, SVC_RUNNING);
-       cfs_waitq_signal(&thread->t_ctl_waitq);
+       wake_up(&thread->t_ctl_waitq);
 
        OBD_ALLOC_PTR(env);
        if (env == NULL)
 
        OBD_ALLOC_PTR(env);
        if (env == NULL)
@@ -534,7 +534,7 @@ out:
        lu_ref_del(&qqi->qqi_reference, "reint_thread", thread);
 
        thread_set_flags(thread, SVC_STOPPED);
        lu_ref_del(&qqi->qqi_reference, "reint_thread", thread);
 
        thread_set_flags(thread, SVC_STOPPED);
-       cfs_waitq_signal(&thread->t_ctl_waitq);
+       wake_up(&thread->t_ctl_waitq);
        return rc;
 }
 
        return rc;
 }
 
@@ -545,7 +545,7 @@ void qsd_stop_reint_thread(struct qsd_qtype_info *qqi)
 
        if (!thread_is_stopped(thread)) {
                thread_set_flags(thread, SVC_STOPPING);
 
        if (!thread_is_stopped(thread)) {
                thread_set_flags(thread, SVC_STOPPING);
-               cfs_waitq_signal(&thread->t_ctl_waitq);
+               wake_up(&thread->t_ctl_waitq);
 
                l_wait_event(thread->t_ctl_waitq,
                             thread_is_stopped(thread), &lwi);
 
                l_wait_event(thread->t_ctl_waitq,
                             thread_is_stopped(thread), &lwi);
index 61518d0..a615877 100644 (file)
@@ -87,7 +87,7 @@ static void qsd_upd_add(struct qsd_instance *qsd, struct qsd_upd_rec *upd)
        if (!qsd->qsd_stopping) {
                list_add_tail(&upd->qur_link, &qsd->qsd_upd_list);
                /* wake up the upd thread */
        if (!qsd->qsd_stopping) {
                list_add_tail(&upd->qur_link, &qsd->qsd_upd_list);
                /* wake up the upd thread */
-               cfs_waitq_signal(&qsd->qsd_upd_thread.t_ctl_waitq);
+               wake_up(&qsd->qsd_upd_thread.t_ctl_waitq);
        } else {
                CWARN("%s: discard update.\n", qsd->qsd_svname);
                if (upd->qur_lqe)
        } else {
                CWARN("%s: discard update.\n", qsd->qsd_svname);
                if (upd->qur_lqe)
@@ -352,7 +352,7 @@ void qsd_adjust_schedule(struct lquota_entry *lqe, bool defer, bool cancel)
        spin_unlock(&qsd->qsd_adjust_lock);
 
        if (added)
        spin_unlock(&qsd->qsd_adjust_lock);
 
        if (added)
-               cfs_waitq_signal(&qsd->qsd_upd_thread.t_ctl_waitq);
+               wake_up(&qsd->qsd_upd_thread.t_ctl_waitq);
        else
                lqe_putref(lqe);
 }
        else
                lqe_putref(lqe);
 }
@@ -435,7 +435,7 @@ static int qsd_upd_thread(void *arg)
        }
 
        thread_set_flags(thread, SVC_RUNNING);
        }
 
        thread_set_flags(thread, SVC_RUNNING);
-       cfs_waitq_signal(&thread->t_ctl_waitq);
+       wake_up(&thread->t_ctl_waitq);
 
        CFS_INIT_LIST_HEAD(&queue);
        lwi = LWI_TIMEOUT(cfs_time_seconds(QSD_WB_INTERVAL), NULL, NULL);
 
        CFS_INIT_LIST_HEAD(&queue);
        lwi = LWI_TIMEOUT(cfs_time_seconds(QSD_WB_INTERVAL), NULL, NULL);
@@ -487,7 +487,7 @@ static int qsd_upd_thread(void *arg)
        lu_env_fini(env);
        OBD_FREE_PTR(env);
        thread_set_flags(thread, SVC_STOPPED);
        lu_env_fini(env);
        OBD_FREE_PTR(env);
        thread_set_flags(thread, SVC_STOPPED);
-       cfs_waitq_signal(&thread->t_ctl_waitq);
+       wake_up(&thread->t_ctl_waitq);
        RETURN(rc);
 }
 
        RETURN(rc);
 }
 
@@ -568,7 +568,7 @@ void qsd_stop_upd_thread(struct qsd_instance *qsd)
 
        if (!thread_is_stopped(thread)) {
                thread_set_flags(thread, SVC_STOPPING);
 
        if (!thread_is_stopped(thread)) {
                thread_set_flags(thread, SVC_STOPPING);
-               cfs_waitq_signal(&thread->t_ctl_waitq);
+               wake_up(&thread->t_ctl_waitq);
 
                l_wait_event(thread->t_ctl_waitq, thread_is_stopped(thread),
                             &lwi);
 
                l_wait_event(thread->t_ctl_waitq, thread_is_stopped(thread),
                             &lwi);