Whamcloud - gitweb
LU-1346 libcfs: cleanup waitq related primitives 55/6955/8
authorPeng Tao <tao.peng@emc.com>
Wed, 11 Sep 2013 17:01:45 +0000 (01:01 +0800)
committerOleg Drokin <oleg.drokin@intel.com>
Fri, 13 Sep 2013 05:06:26 +0000 (05:06 +0000)
Plus some manual change:

1. Remove __wait_event_timeout definition
2. Change cfs_waitq_wait_event_timeout and
   cfs_waitq_wait_event_interruptible_timeout to linux kernel API
3. Replace some function definitions in linux-prim.c as macros in
   linux-prim.h

Signed-off-by: Peng Tao <tao.peng@emc.com>
Signed-off-by: Liu Xuezhao <xuezhao.liu@emc.com>
Signed-off-by: James Simmons <uja.ornl@gmail.com>
Change-Id: I7e53f3deac9e4076e78c109662ff9d1e90239e8d
Reviewed-on: http://review.whamcloud.com/6955
Tested-by: Hudson
Tested-by: Maloo <whamcloud.maloo@gmail.com>
Reviewed-by: Keith Mannthey <keith.mannthey@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
147 files changed:
contrib/scripts/libcfs_cleanup.sed
libcfs/include/libcfs/darwin/darwin-prim.h
libcfs/include/libcfs/libcfs_fail.h
libcfs/include/libcfs/libcfs_prim.h
libcfs/include/libcfs/linux/linux-prim.h
libcfs/include/libcfs/lucache.h
libcfs/include/libcfs/user-lock.h
libcfs/include/libcfs/user-prim.h
libcfs/include/libcfs/winnt/winnt-prim.h
libcfs/libcfs/darwin/darwin-debug.c
libcfs/libcfs/darwin/darwin-prim.c
libcfs/libcfs/darwin/darwin-proc.c
libcfs/libcfs/darwin/darwin-sync.c
libcfs/libcfs/debug.c
libcfs/libcfs/fail.c
libcfs/libcfs/hash.c
libcfs/libcfs/linux/linux-cpu.c
libcfs/libcfs/linux/linux-prim.c
libcfs/libcfs/linux/linux-proc.c
libcfs/libcfs/lwt.c
libcfs/libcfs/module.c
libcfs/libcfs/tracefile.c
libcfs/libcfs/tracefile.h
libcfs/libcfs/upcall_cache.c
libcfs/libcfs/user-lock.c
libcfs/libcfs/user-prim.c
libcfs/libcfs/watchdog.c
libcfs/libcfs/winnt/winnt-curproc.c
libcfs/libcfs/winnt/winnt-prim.c
libcfs/libcfs/winnt/winnt-sync.c
libcfs/libcfs/workitem.c
lnet/include/lnet/lib-types.h
lnet/klnds/mxlnd/mxlnd_cb.c
lnet/klnds/o2iblnd/o2iblnd.c
lnet/klnds/o2iblnd/o2iblnd.h
lnet/klnds/o2iblnd/o2iblnd_cb.c
lnet/klnds/ptllnd/ptllnd.c
lnet/klnds/ptllnd/ptllnd.h
lnet/klnds/ptllnd/ptllnd_cb.c
lnet/klnds/ptllnd/ptllnd_peer.c
lnet/klnds/ptllnd/ptllnd_rx_buf.c
lnet/klnds/ptllnd/ptllnd_tx.c
lnet/klnds/qswlnd/qswlnd.c
lnet/klnds/qswlnd/qswlnd.h
lnet/klnds/qswlnd/qswlnd_cb.c
lnet/klnds/ralnd/ralnd.c
lnet/klnds/ralnd/ralnd.h
lnet/klnds/ralnd/ralnd_cb.c
lnet/klnds/socklnd/socklnd.c
lnet/klnds/socklnd/socklnd.h
lnet/klnds/socklnd/socklnd_cb.c
lnet/lnet/api-ni.c
lnet/lnet/lib-eq.c
lnet/lnet/router.c
lnet/selftest/conrpc.c
lnet/selftest/conrpc.h
lnet/selftest/rpc.c
lnet/selftest/timer.c
lustre/fid/fid_request.c
lustre/fld/fld_request.c
lustre/include/cl_object.h
lustre/include/liblustre.h
lustre/include/lu_object.h
lustre/include/lustre_dlm.h
lustre/include/lustre_fid.h
lustre/include/lustre_import.h
lustre/include/lustre_lib.h
lustre/include/lustre_log.h
lustre/include/lustre_mdc.h
lustre/include/lustre_net.h
lustre/include/obd.h
lustre/lclient/lcommon_cl.c
lustre/ldlm/ldlm_flock.c
lustre/ldlm/ldlm_lib.c
lustre/ldlm/ldlm_lock.c
lustre/ldlm/ldlm_lockd.c
lustre/ldlm/ldlm_pool.c
lustre/ldlm/ldlm_request.c
lustre/ldlm/ldlm_resource.c
lustre/lfsck/lfsck_engine.c
lustre/lfsck/lfsck_lib.c
lustre/llite/llite_capa.c
lustre/llite/llite_close.c
lustre/llite/llite_internal.h
lustre/llite/llite_lib.c
lustre/llite/lloop.c
lustre/llite/statahead.c
lustre/lod/lod_lov.c
lustre/lov/lov_cl_internal.h
lustre/lov/lov_internal.h
lustre/lov/lov_io.c
lustre/lov/lov_object.c
lustre/lov/lov_request.c
lustre/mdc/mdc_lib.c
lustre/mdc/mdc_request.c
lustre/mdt/mdt_capa.c
lustre/mdt/mdt_coordinator.c
lustre/mgc/mgc_request.c
lustre/mgs/mgs_internal.h
lustre/mgs/mgs_nids.c
lustre/obdclass/cl_io.c
lustre/obdclass/cl_lock.c
lustre/obdclass/cl_page.c
lustre/obdclass/genops.c
lustre/obdclass/llog_obd.c
lustre/obdclass/lprocfs_status.c
lustre/obdclass/lu_object.c
lustre/obdclass/obd_config.c
lustre/obdecho/echo.c
lustre/obdecho/echo_client.c
lustre/osc/osc_cache.c
lustre/osc/osc_cl_internal.h
lustre/osc/osc_internal.h
lustre/osc/osc_lock.c
lustre/osc/osc_page.c
lustre/osc/osc_request.c
lustre/osd-ldiskfs/osd_handler.c
lustre/osd-ldiskfs/osd_internal.h
lustre/osd-ldiskfs/osd_io.c
lustre/osd-ldiskfs/osd_scrub.c
lustre/osp/osp_dev.c
lustre/osp/osp_internal.h
lustre/osp/osp_precreate.c
lustre/osp/osp_sync.c
lustre/ost/ost_handler.c
lustre/ptlrpc/client.c
lustre/ptlrpc/events.c
lustre/ptlrpc/gss/gss_svc_upcall.c
lustre/ptlrpc/import.c
lustre/ptlrpc/niobuf.c
lustre/ptlrpc/pack_generic.c
lustre/ptlrpc/pinger.c
lustre/ptlrpc/ptlrpcd.c
lustre/ptlrpc/sec.c
lustre/ptlrpc/sec_bulk.c
lustre/ptlrpc/sec_gc.c
lustre/ptlrpc/service.c
lustre/quota/lquota_entry.c
lustre/quota/lquota_internal.h
lustre/quota/qmt_dev.c
lustre/quota/qmt_lock.c
lustre/quota/qsd_entry.c
lustre/quota/qsd_handler.c
lustre/quota/qsd_lib.c
lustre/quota/qsd_lock.c
lustre/quota/qsd_reint.c
lustre/quota/qsd_writeback.c

index 12d5389..5f6af2b 100644 (file)
@@ -496,3 +496,50 @@ s/\bcfs_module_t\b/struct module/g
 # s/\bcfs_module\b/declare_module/g
 s/\bcfs_request_module\b/request_module/g
 /#[ \t]*define[ \t]*\brequest_module\b[ \t]*\brequest_module\b/d
+# Wait Queue
+s/\bCFS_TASK_INTERRUPTIBLE\b/TASK_INTERRUPTIBLE/g
+/#[ \t]*define[ \t]*\bTASK_INTERRUPTIBLE\b[ \t]*\bTASK_INTERRUPTIBLE\b/d
+s/\bCFS_TASK_UNINT\b/TASK_UNINTERRUPTIBLE/g
+/#[ \t]*define[ \t]*\bTASK_UNINTERRUPTIBLE\b[ \t]*\bTASK_UNINTERRUPTIBLE\b/d
+s/\bCFS_TASK_RUNNING\b/TASK_RUNNING/g
+/#[ \t]*define[ \t]*\bTASK_RUNNING\b[ \t]*\bTASK_RUNNING\b/d
+s/\bcfs_set_current_state\b/set_current_state/g
+/#[ \t]*define[ \t]*\bset_current_state\b *( *\w* *)[ \t]*\bset_current_state\b *( *\w* *)/d
+s/\bcfs_wait_event\b/wait_event/g
+/#[ \t]*define[ \t]*\bwait_event\b *( *\w* *, *\w* *)[ \t]*\bwait_event\b *( *\w* *, *\w* *)/d
+s/\bcfs_waitlink_t\b/wait_queue_t/g
+/typedef[ \t]*\bwait_queue_t\b[ \t]*\bwait_queue_t\b/d
+s/\bcfs_waitq_t\b/wait_queue_head_t/g
+/typedef[ \t]*\bwait_queue_head_t\b[ \t]*\bwait_queue_head_t\b/d
+#s/\bcfs_task_state_t\b/task_state_t/g
+s/\bcfs_waitq_init\b/init_waitqueue_head/g
+/#[ \t]*define[ \t]*\binit_waitqueue_head\b *( *\w* *)[ \t]*\binit_waitqueue_head\b *( *\w* *)/d
+s/\bcfs_waitlink_init\b/init_waitqueue_entry_current/g
+s/\bcfs_waitq_add\b/add_wait_queue/g
+/#[ \t]*define[ \t]*\badd_wait_queue\b *( *\w* *, *\w* *)[ \t]*\badd_wait_queue\b *( *\w* *, *\w* *)/d
+s/\bcfs_waitq_add_exclusive\b/add_wait_queue_exclusive/g
+/#[ \t]*define[ \t]*\badd_wait_queue_exclusive\b *( *\w* *, *\w* *)[ \t]*\badd_wait_queue_exclusive\b *( *\w* *, *\w* *)/d
+s/\bcfs_waitq_del\b/remove_wait_queue/g
+/#[ \t]*define[ \t]*\bremove_wait_queue\b *( *\w* *, *\w* *)[ \t]*\bremove_wait_queue\b *( *\w* *, *\w* *)/d
+s/\bcfs_waitq_active\b/waitqueue_active/g
+/#[ \t]*define[ \t]*\bwaitqueue_active\b *( *\w* *)[ \t]*\bwaitqueue_active\b *( *\w* *)/d
+s/\bcfs_waitq_signal\b/wake_up/g
+/#[ \t]*define[ \t]*\bwake_up\b *( *\w* *)[ \t]*\bwake_up\b *( *\w* *)/d
+s/\bcfs_waitq_signal_nr\b/wake_up_nr/g
+/#[ \t]*define[ \t]*\bwake_up_nr\b *( *\w* *, *\w* *)[ \t]*\bwake_up_nr\b *( *\w* *, *\w* *)/d
+s/\bcfs_waitq_broadcast\b/wake_up_all/g
+/#[ \t]*define[ \t]*\bwake_up_all\b *( *\w* *)[ \t]*\bwake_up_all\b *( *\w* *)/d
+s/\bcfs_waitq_wait\b/waitq_wait/g
+s/\bcfs_waitq_timedwait\b/waitq_timedwait/g
+s/\bcfs_schedule_timeout\b/schedule_timeout/g
+/#[ \t]*define[ \t]*\bschedule_timeout\b *( *\w* *)[ \t]*\bschedule_timeout\b *( *\w* *)/d
+s/\bcfs_schedule\b/schedule/g
+/#[ \t]*define[ \t]*\bschedule\b *( *)[ \t]*\bschedule\b *( *)/d
+s/\bcfs_need_resched\b/need_resched/g
+/#[ \t]*define[ \t]*\bneed_resched\b *( *)[ \t]*\bneed_resched\b *( *)/d
+s/\bcfs_cond_resched\b/cond_resched/g
+/#[ \t]*define[ \t]*\bcond_resched\b *( *)[ \t]*\bcond_resched\b *( *)/d
+s/\bcfs_waitq_add_exclusive_head\b/add_wait_queue_exclusive_head/g
+s/\bcfs_schedule_timeout_and_set_state\b/schedule_timeout_and_set_state/g
+s/\bCFS_MAX_SCHEDULE_TIMEOUT\b/MAX_SCHEDULE_TIMEOUT/g
+s/\bcfs_task_state_t\b/long/g
index d2118e7..d3c5410 100644 (file)
@@ -214,34 +214,32 @@ extern cfs_task_t kthread_run(cfs_thread_t func, void *arg,
  */
 typedef struct cfs_waitq {
        struct ksleep_chan wq_ksleep_chan;
-} cfs_waitq_t;
+} wait_queue_head_t;
 
 typedef struct cfs_waitlink {
        struct cfs_waitq   *wl_waitq;
        struct ksleep_link  wl_ksleep_link;
-} cfs_waitlink_t;
+} wait_queue_t;
 
-typedef int cfs_task_state_t;
+#define TASK_INTERRUPTIBLE     THREAD_ABORTSAFE
+#define TASK_UNINTERRUPTIBLE           THREAD_UNINT
 
-#define CFS_TASK_INTERRUPTIBLE THREAD_ABORTSAFE
-#define CFS_TASK_UNINT         THREAD_UNINT
+void init_waitqueue_head(struct cfs_waitq *waitq);
+void init_waitqueue_entry_current(struct cfs_waitlink *link);
 
-void cfs_waitq_init(struct cfs_waitq *waitq);
-void cfs_waitlink_init(struct cfs_waitlink *link);
-
-void cfs_waitq_add(struct cfs_waitq *waitq, struct cfs_waitlink *link);
-void cfs_waitq_add_exclusive(struct cfs_waitq *waitq,
+void add_wait_queue(struct cfs_waitq *waitq, struct cfs_waitlink *link);
+void add_wait_queue_exclusive(struct cfs_waitq *waitq,
                             struct cfs_waitlink *link);
-void cfs_waitq_del(struct cfs_waitq *waitq, struct cfs_waitlink *link);
-int  cfs_waitq_active(struct cfs_waitq *waitq);
+void remove_wait_queue(struct cfs_waitq *waitq, struct cfs_waitlink *link);
+int  waitqueue_active(struct cfs_waitq *waitq);
 
-void cfs_waitq_signal(struct cfs_waitq *waitq);
-void cfs_waitq_signal_nr(struct cfs_waitq *waitq, int nr);
-void cfs_waitq_broadcast(struct cfs_waitq *waitq);
+void wake_up(struct cfs_waitq *waitq);
+void wake_up_nr(struct cfs_waitq *waitq, int nr);
+void wake_up_all(struct cfs_waitq *waitq);
 
-void cfs_waitq_wait(struct cfs_waitlink *link, cfs_task_state_t state);
-cfs_duration_t cfs_waitq_timedwait(struct cfs_waitlink *link,
-                                  cfs_task_state_t state, 
+void waitq_wait(struct cfs_waitlink *link, long state);
+cfs_duration_t waitq_timedwait(struct cfs_waitlink *link,
+                                  long state,
                                   cfs_duration_t timeout);
 
 /*
@@ -251,7 +249,7 @@ cfs_duration_t cfs_waitq_timedwait(struct cfs_waitlink *link,
 extern void thread_set_timer_deadline(__u64 deadline);
 extern void thread_cancel_timer(void);
 
-static inline int cfs_schedule_timeout(int state, int64_t timeout)
+static inline int schedule_timeout(int state, int64_t timeout)
 {
        int          result;
        
@@ -277,22 +275,22 @@ static inline int cfs_schedule_timeout(int state, int64_t timeout)
        return result;
 }
 
-#define cfs_schedule() cfs_schedule_timeout(CFS_TASK_UNINT, CFS_TICK)
-#define cfs_pause(tick)        cfs_schedule_timeout(CFS_TASK_UNINT, tick)
+#define schedule()     schedule_timeout(TASK_UNINTERRUPTIBLE, CFS_TICK)
+#define cfs_pause(tick)        schedule_timeout(TASK_UNINTERRUPTIBLE, tick)
 
 #define __wait_event(wq, condition)                            \
 do {                                                           \
        struct cfs_waitlink __wait;                             \
                                                                \
-       cfs_waitlink_init(&__wait);                             \
+       init_waitqueue_entry_current(&__wait);                  \
        for (;;) {                                              \
-               cfs_waitq_add(&wq, &__wait);                    \
+               add_wait_queue(&wq, &__wait);                   \
                if (condition)                                  \
                        break;                                  \
-               cfs_waitq_wait(&__wait, CFS_TASK_UNINT);        \
-               cfs_waitq_del(&wq, &__wait);                    \
+               waitq_wait(&__wait, TASK_UNINTERRUPTIBLE);      \
+               remove_wait_queue(&wq, &__wait);                \
        }                                                       \
-       cfs_waitq_del(&wq, &__wait);                            \
+       remove_wait_queue(&wq, &__wait);                        \
 } while (0)
 
 #define wait_event(wq, condition)                              \
@@ -306,24 +304,24 @@ do {                                                              \
 do {                                                           \
        struct cfs_waitlink __wait;                             \
                                                                \
-       cfs_waitlink_init(&__wait);                             \
+       init_waitqueue_entry_current(&__wait);                  \
        for (;;) {                                              \
                if (ex == 0)                                    \
-                       cfs_waitq_add(&wq, &__wait);            \
+                       add_wait_queue(&wq, &__wait);           \
                else                                            \
-                       cfs_waitq_add_exclusive(&wq, &__wait);  \
+                       add_wait_queue_exclusive(&wq, &__wait); \
                if (condition)                                  \
                        break;                                  \
                if (!cfs_signal_pending()) {                    \
-                       cfs_waitq_wait(&__wait,                 \
-                                      CFS_TASK_INTERRUPTIBLE); \
-                       cfs_waitq_del(&wq, &__wait);            \
+                       waitq_wait(&__wait,                     \
+                                      TASK_INTERRUPTIBLE);     \
+                       remove_wait_queue(&wq, &__wait);        \
                        continue;                               \
                }                                               \
                ret = -ERESTARTSYS;                             \
                break;                                          \
        }                                                       \
-       cfs_waitq_del(&wq, &__wait);                            \
+       remove_wait_queue(&wq, &__wait);                        \
 } while (0)
 
 #define wait_event_interruptible(wq, condition)                        \
@@ -354,14 +352,14 @@ extern void       wakeup_one __P((void * chan));
        } while (0)
        
 /* used in couple of places */
-static inline void sleep_on(cfs_waitq_t *waitq)
+static inline void sleep_on(wait_queue_head_t *waitq)
 {
-       cfs_waitlink_t link;
+       wait_queue_t link;
        
-       cfs_waitlink_init(&link);
-       cfs_waitq_add(waitq, &link);
-       cfs_waitq_wait(&link, CFS_TASK_UNINT);
-       cfs_waitq_del(waitq, &link);
+       init_waitqueue_entry_current(&link);
+       add_wait_queue(waitq, &link);
+       waitq_wait(&link, TASK_UNINTERRUPTIBLE);
+       remove_wait_queue(waitq, &link);
 }
 
 /*
index 19ade49..89d0b97 100644 (file)
@@ -39,7 +39,7 @@
 extern unsigned long cfs_fail_loc;
 extern unsigned int cfs_fail_val;
 
-extern cfs_waitq_t cfs_race_waitq;
+extern wait_queue_head_t cfs_race_waitq;
 extern int cfs_race_state;
 
 int __cfs_fail_check_set(__u32 id, __u32 value, int set);
@@ -150,21 +150,20 @@ static inline int cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
  * the first and continues. */
 static inline void cfs_race(__u32 id)
 {
-
-        if (CFS_FAIL_PRECHECK(id)) {
-                if (unlikely(__cfs_fail_check_set(id, 0, CFS_FAIL_LOC_NOSET))) {
-                        int rc;
-                        cfs_race_state = 0;
-                        CERROR("cfs_race id %x sleeping\n", id);
+       if (CFS_FAIL_PRECHECK(id)) {
+               if (unlikely(__cfs_fail_check_set(id, 0, CFS_FAIL_LOC_NOSET))) {
+                       int rc;
+                       cfs_race_state = 0;
+                       CERROR("cfs_race id %x sleeping\n", id);
                        rc = wait_event_interruptible(cfs_race_waitq,
                                                      cfs_race_state != 0);
-                        CERROR("cfs_fail_race id %x awake, rc=%d\n", id, rc);
-                } else {
-                        CERROR("cfs_fail_race id %x waking\n", id);
-                        cfs_race_state = 1;
-                        cfs_waitq_signal(&cfs_race_waitq);
-                }
-        }
+                       CERROR("cfs_fail_race id %x awake, rc=%d\n", id, rc);
+               } else {
+                       CERROR("cfs_fail_race id %x waking\n", id);
+                       cfs_race_state = 1;
+                       wake_up(&cfs_race_waitq);
+               }
+       }
 }
 #define CFS_RACE(id) cfs_race(id)
 #else
index 497b98a..49254df 100644 (file)
 #define __LIBCFS_PRIM_H__
 
 /*
- * Schedule
- */
-void cfs_schedule_timeout_and_set_state(cfs_task_state_t state,
-                                        int64_t timeout);
-void cfs_schedule_timeout(int64_t timeout);
-void cfs_schedule(void);
-void cfs_pause(cfs_duration_t ticks);
-int  cfs_need_resched(void);
-void cfs_cond_resched(void);
-
-/*
  * Wait Queues
  */
-void cfs_waitq_init(cfs_waitq_t *waitq);
-void cfs_waitlink_init(cfs_waitlink_t *link);
-void cfs_waitq_add(cfs_waitq_t *waitq, cfs_waitlink_t *link);
-void cfs_waitq_add_exclusive(cfs_waitq_t *waitq,
-                             cfs_waitlink_t *link);
-void cfs_waitq_add_exclusive_head(cfs_waitq_t *waitq,
-                                  cfs_waitlink_t *link);
-void cfs_waitq_del(cfs_waitq_t *waitq, cfs_waitlink_t *link);
-int  cfs_waitq_active(cfs_waitq_t *waitq);
-void cfs_waitq_signal(cfs_waitq_t *waitq);
-void cfs_waitq_signal_nr(cfs_waitq_t *waitq, int nr);
-void cfs_waitq_broadcast(cfs_waitq_t *waitq);
-void cfs_waitq_wait(cfs_waitlink_t *link, cfs_task_state_t state);
-int64_t cfs_waitq_timedwait(cfs_waitlink_t *link, cfs_task_state_t state, 
-                           int64_t timeout);
-
 /*
  * Timer
  */
index 75c064f..0d7047a 100644 (file)
@@ -107,19 +107,59 @@ typedef struct proc_dir_entry           cfs_proc_dir_entry_t;
 /*
  * Wait Queue
  */
-#define CFS_TASK_INTERRUPTIBLE          TASK_INTERRUPTIBLE
-#define CFS_TASK_UNINT                  TASK_UNINTERRUPTIBLE
-#define CFS_TASK_RUNNING                TASK_RUNNING
 
-#define cfs_set_current_state(state)    set_current_state(state)
-#define cfs_wait_event(wq, cond)        wait_event(wq, cond)
-
-typedef wait_queue_t                   cfs_waitlink_t;
-typedef wait_queue_head_t              cfs_waitq_t;
-typedef long                            cfs_task_state_t;
 
 #define CFS_DECL_WAITQ(wq)             DECLARE_WAIT_QUEUE_HEAD(wq)
 
+#define LIBCFS_WQITQ_MACROS           1
+#define init_waitqueue_entry_current(w)          init_waitqueue_entry(w, current)
+#define waitq_wait(w, s)          schedule()
+#define waitq_timedwait(w, s, t)  schedule_timeout(t)
+
+#ifndef HAVE___ADD_WAIT_QUEUE_EXCLUSIVE
+static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
+                                             wait_queue_t *wait)
+{
+       wait->flags |= WQ_FLAG_EXCLUSIVE;
+       __add_wait_queue(q, wait);
+}
+#endif /* HAVE___ADD_WAIT_QUEUE_EXCLUSIVE */
+
+/**
+ * wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively
+ * waiting threads, which is not always desirable because all threads will
+ * be waken up again and again, even user only needs a few of them to be
+ * active most time. This is not good for performance because cache can
+ * be polluted by different threads.
+ *
+ * LIFO list can resolve this problem because we always wakeup the most
+ * recent active thread by default.
+ *
+ * NB: please don't call non-exclusive & exclusive wait on the same
+ * waitq if add_wait_queue_exclusive_head is used.
+ */
+#define add_wait_queue_exclusive_head(waitq, link)                     \
+{                                                                      \
+       unsigned long flags;                                            \
+                                                                       \
+       spin_lock_irqsave(&((waitq)->lock), flags);                     \
+       __add_wait_queue_exclusive(waitq, link);                        \
+       spin_unlock_irqrestore(&((waitq)->lock), flags);                \
+}
+
+#define schedule_timeout_and_set_state(state, timeout)                 \
+{                                                                      \
+       set_current_state(state);                                       \
+       schedule_timeout(timeout);                                      \
+}
+
+/* deschedule for a bit... */
+#define cfs_pause(ticks)                                               \
+{                                                                      \
+       set_current_state(TASK_UNINTERRUPTIBLE);                        \
+       schedule_timeout(ticks);                                        \
+}
+
 /*
  * Task struct
  */
@@ -151,51 +191,6 @@ typedef sigset_t                        cfs_sigset_t;
  */
 typedef struct timer_list cfs_timer_t;
 
-#define CFS_MAX_SCHEDULE_TIMEOUT MAX_SCHEDULE_TIMEOUT
-
-#ifndef wait_event_timeout /* Only for RHEL3 2.4.21 kernel */
-#define __wait_event_timeout(wq, condition, timeout, ret)        \
-do {                                                             \
-       int __ret = 0;                                           \
-       if (!(condition)) {                                      \
-               wait_queue_t __wait;                             \
-               unsigned long expire;                            \
-                                                                 \
-               init_waitqueue_entry(&__wait, current);          \
-               expire = timeout + jiffies;                      \
-               add_wait_queue(&wq, &__wait);                    \
-               for (;;) {                                       \
-                       set_current_state(TASK_UNINTERRUPTIBLE); \
-                       if (condition)                           \
-                               break;                           \
-                       if (jiffies > expire) {                  \
-                               ret = jiffies - expire;          \
-                               break;                           \
-                       }                                        \
-                       schedule_timeout(timeout);               \
-               }                                                \
-               current->state = TASK_RUNNING;                   \
-               remove_wait_queue(&wq, &__wait);                 \
-       }                                                        \
-} while (0)
-/*
-   retval == 0; condition met; we're good.
-   retval > 0; timed out.
-*/
-#define cfs_waitq_wait_event_timeout(wq, condition, timeout, ret)    \
-do {                                                                 \
-       ret = 0;                                                     \
-       if (!(condition))                                            \
-               __wait_event_timeout(wq, condition, timeout, ret);   \
-} while (0)
-#else
-#define cfs_waitq_wait_event_timeout(wq, condition, timeout, ret)    \
-        ret = wait_event_timeout(wq, condition, timeout)
-#endif
-
-#define cfs_waitq_wait_event_interruptible_timeout(wq, c, timeout, ret) \
-        ret = wait_event_interruptible_timeout(wq, c, timeout)
-
 /*
  * atomic
  */
index d9a285b..6904315 100644 (file)
@@ -83,16 +83,16 @@ struct md_identity {
 };
 
 struct upcall_cache_entry {
-        cfs_list_t              ue_hash;
-        __u64                   ue_key;
-        cfs_atomic_t            ue_refcount;
-        int                     ue_flags;
-        cfs_waitq_t             ue_waitq;
-        cfs_time_t              ue_acquire_expire;
-        cfs_time_t              ue_expire;
-        union {
-                struct md_identity     identity;
-        } u;
+       cfs_list_t              ue_hash;
+       __u64                   ue_key;
+       cfs_atomic_t            ue_refcount;
+       int                     ue_flags;
+       wait_queue_head_t       ue_waitq;
+       cfs_time_t              ue_acquire_expire;
+       cfs_time_t              ue_expire;
+       union {
+               struct md_identity     identity;
+       } u;
 };
 
 #define UC_CACHE_HASH_SIZE        (128)
index 8c6b27a..0cac240 100644 (file)
@@ -157,7 +157,7 @@ struct completion {
 
 struct completion {
        unsigned int    done;
-       cfs_waitq_t     wait;
+       wait_queue_head_t       wait;
 };
 #endif /* HAVE_LIBPTHREAD */
 
index 5885728..f8fff19 100644 (file)
@@ -78,25 +78,39 @@ typedef struct proc_dir_entry           cfs_proc_dir_entry_t;
 typedef struct cfs_waitlink {
         cfs_list_t sleeping;
         void *process;
-} cfs_waitlink_t;
+} wait_queue_t;
 
 typedef struct cfs_waitq {
         cfs_list_t sleepers;
-} cfs_waitq_t;
-
-#define CFS_DECL_WAITQ(wq) cfs_waitq_t wq
+} wait_queue_head_t;
+
+#define CFS_DECL_WAITQ(wq) wait_queue_head_t wq
+void init_waitqueue_head(struct cfs_waitq *waitq);
+void init_waitqueue_entry_current(struct cfs_waitlink *link);
+void add_wait_queue(struct cfs_waitq *waitq, struct cfs_waitlink *link);
+void add_wait_queue_exclusive(struct cfs_waitq *waitq, struct cfs_waitlink *link);
+void add_wait_queue_exclusive_head(struct cfs_waitq *waitq, struct cfs_waitlink *link);
+void remove_wait_queue(struct cfs_waitq *waitq, struct cfs_waitlink *link);
+int waitqueue_active(struct cfs_waitq *waitq);
+void wake_up(struct cfs_waitq *waitq);
+void wake_up_nr(struct cfs_waitq *waitq, int nr);
+void wake_up_all(struct cfs_waitq *waitq);
+void waitq_wait(struct cfs_waitlink *link, long state);
+int64_t waitq_timedwait(struct cfs_waitlink *link, long state, int64_t timeout);
+void schedule_timeout_and_set_state(long state, int64_t timeout);
+void cfs_pause(cfs_duration_t d);
+int need_resched(void);
+void cond_resched(void);
 
 /*
  * Task states
  */
-typedef long cfs_task_state_t;
-
-#define CFS_TASK_INTERRUPTIBLE  (0)
-#define CFS_TASK_UNINT          (1)
-#define CFS_TASK_RUNNING        (2)
+#define TASK_INTERRUPTIBLE  (0)
+#define TASK_UNINTERRUPTIBLE          (1)
+#define TASK_RUNNING        (2)
 
-static inline void cfs_schedule(void)                  {}
-static inline void cfs_schedule_timeout(int64_t t)     {}
+static inline void schedule(void)                      {}
+static inline void schedule_timeout(int64_t t) {}
 
 /*
  * Lproc
index c1b487d..cc8a3dc 100644 (file)
@@ -374,12 +374,10 @@ size_t lustre_write_file(struct file *fh, loff_t off, size_t size, char *buf);
  */
 
 
-typedef int cfs_task_state_t;
-
-#define CFS_TASK_INTERRUPTIBLE  0x00000001
-#define CFS_TASK_UNINT          0x00000002
-#define CFS_TASK_RUNNING         0x00000003
-#define CFS_TASK_UNINTERRUPTIBLE CFS_TASK_UNINT
+#define TASK_INTERRUPTIBLE      0x00000001
+#define TASK_UNINTERRUPTIBLE            0x00000002
+#define TASK_RUNNING         0x00000003
+#define CFS_TASK_UNINTERRUPTIBLE TASK_UNINTERRUPTIBLE
 
 #define CFS_WAITQ_MAGIC     'CWQM'
 #define CFS_WAITLINK_MAGIC  'CWLM'
@@ -391,10 +389,10 @@ typedef struct cfs_waitq {
        spinlock_t              guard;
        cfs_list_t              waiters;
 
-} cfs_waitq_t;
+} wait_queue_head_t;
 
 
-typedef struct cfs_waitlink cfs_waitlink_t;
+typedef struct cfs_waitlink wait_queue_t;
 
 #define CFS_WAITQ_CHANNELS     (2)
 
@@ -405,8 +403,8 @@ typedef struct cfs_waitlink cfs_waitlink_t;
 
 typedef struct cfs_waitlink_channel {
     cfs_list_t              link;
-    cfs_waitq_t *           waitq;
-    cfs_waitlink_t *        waitl;
+    wait_queue_head_t *           waitq;
+    wait_queue_t *        waitl;
 } cfs_waitlink_channel_t;
 
 struct cfs_waitlink {
@@ -423,7 +421,7 @@ enum {
        CFS_WAITQ_EXCLUSIVE = 1
 };
 
-#define CFS_DECL_WAITQ(name) cfs_waitq_t name
+#define CFS_DECL_WAITQ(name) wait_queue_head_t name
 
 /* Kernel thread */
 
@@ -540,8 +538,8 @@ typedef __u32 kernel_cap_t;
  * Task struct
  */
 
-#define CFS_MAX_SCHEDULE_TIMEOUT     ((long_ptr_t)(~0UL>>12))
-#define cfs_schedule_timeout(t)      cfs_schedule_timeout_and_set_state(0, t)
+#define MAX_SCHEDULE_TIMEOUT     ((long_ptr_t)(~0UL>>12))
+#define schedule_timeout(t)      schedule_timeout_and_set_state(0, t)
 
 struct vfsmount;
 
@@ -621,40 +619,39 @@ typedef struct _TASK_SLOT {
 
 
 #define current                      cfs_current()
-#define cfs_set_current_state(s)     do {;} while (0)
-#define cfs_set_current_state(state) cfs_set_current_state(state)
+#define set_current_state(s)     do {;} while (0)
 
-#define cfs_wait_event(wq, condition)                           \
+#define wait_event(wq, condition)                           \
 do {                                                            \
-        cfs_waitlink_t __wait;                                  \
-                                                                \
-        cfs_waitlink_init(&__wait);                             \
-        while (TRUE) {                                          \
-            cfs_waitq_add(&wq, &__wait);                        \
-            if (condition) {                                    \
-                break;                                          \
-            }                                                   \
-            cfs_waitq_wait(&__wait, CFS_TASK_INTERRUPTIBLE);    \
-            cfs_waitq_del(&wq, &__wait);                       \
-        }                                                      \
-        cfs_waitq_del(&wq, &__wait);                           \
+       wait_queue_t __wait;                                    \
+                                                               \
+       init_waitqueue_entry_current(&__wait);                  \
+       while (TRUE) {                                          \
+           add_wait_queue(&wq, &__wait);                        \
+           if (condition) {                                    \
+               break;                                          \
+           }                                                   \
+           waitq_wait(&__wait, TASK_INTERRUPTIBLE);            \
+           remove_wait_queue(&wq, &__wait);                    \
+                                                             \
+       remove_wait_queue(&wq, &__wait);                        \
 } while(0)
 
 #define wait_event_interruptible(wq, condition)                 \
 {                                                               \
-       cfs_waitlink_t __wait;                                  \
+       wait_queue_t __wait;                                    \
                                                                \
        __ret = 0;                                              \
-       cfs_waitlink_init(&__wait);                             \
+       init_waitqueue_entry_current(&__wait);                             \
        while (TRUE) {                                          \
-               cfs_waitq_add(&wq, &__wait);                    \
+               add_wait_queue(&wq, &__wait);                   \
                if (condition) {                                \
                        break;                                  \
                }                                               \
-               cfs_waitq_wait(&__wait, CFS_TASK_INTERRUPTIBLE);\
-               cfs_waitq_del(&wq, &__wait);                    \
+               waitq_wait(&__wait, TASK_INTERRUPTIBLE);\
+               remove_wait_queue(&wq, &__wait);                        \
        }                                                       \
-       cfs_waitq_del(&wq, &__wait);                            \
+       remove_wait_queue(&wq, &__wait);                            \
        __ret;                                                  \
 }
 
@@ -667,37 +664,30 @@ do {                                                            \
    retval > 0; timed out.
 */
 
-#define cfs_waitq_wait_event_interruptible_timeout(             \
-                        wq, condition, timeout, rc)             \
+#define wait_event_interruptible_timeout(wq, condition, timeout)\
 do {                                                            \
-        cfs_waitlink_t __wait;                                  \
-                                                                \
-        rc = 0;                                                 \
-        cfs_waitlink_init(&__wait);                            \
-        while (TRUE) {                                          \
-            cfs_waitq_add(&wq, &__wait);                        \
-            if (condition) {                                    \
-                break;                                          \
-            }                                                   \
-            if (cfs_waitq_timedwait(&__wait,                    \
-                CFS_TASK_INTERRUPTIBLE, timeout) == 0) {        \
-                rc = TRUE;                                      \
-                break;                                          \
-            }                                                   \
-            cfs_waitq_del(&wq, &__wait);                       \
-        }                                                      \
-        cfs_waitq_del(&wq, &__wait);                           \
+       wait_queue_t __wait;                                    \
+                                                               \
+       init_waitqueue_entry_current(&__wait);                  \
+       while (TRUE) {                                          \
+           add_wait_queue(&wq, &__wait);                       \
+           if (condition) {                                    \
+               break;                                          \
+           }                                                   \
+           if (waitq_timedwait(&__wait,                        \
+               TASK_INTERRUPTIBLE, timeout) == 0) {            \
+               break;                                          \
+           }                                                   \
+           remove_wait_queue(&wq, &__wait);                    \
+       }                                                       \
+       remove_wait_queue(&wq, &__wait);                        \
 } while(0)
 
-
-#define cfs_waitq_wait_event_timeout                            \
-        cfs_waitq_wait_event_interruptible_timeout
-
 int     init_task_manager();
 void    cleanup_task_manager();
 cfs_task_t * cfs_current();
 int     wake_up_process(cfs_task_t * task);
-void sleep_on(cfs_waitq_t *waitq);
+void sleep_on(wait_queue_head_t *waitq);
 #define cfs_might_sleep() do {} while(0)
 #define CFS_DECL_JOURNAL_DATA  
 #define CFS_PUSH_JOURNAL           do {;} while(0)
index bcf477a..7bf0da6 100644 (file)
@@ -50,13 +50,13 @@ void libcfs_run_lbug_upcall(struct libcfs_debug_msg_data *msgdata)
 
 void lbug_with_loc(struct libcfs_debug_msg_data *msgdata)
 {
-        libcfs_catastrophe = 1;
-        CEMERG("LBUG: pid: %u thread: %#x\n",
+       libcfs_catastrophe = 1;
+       CEMERG("LBUG: pid: %u thread: %#x\n",
               (unsigned)current_pid(), (unsigned)current_thread());
-        libcfs_debug_dumplog();
-        libcfs_run_lbug_upcall(msgdata);
-        while (1)
-                cfs_schedule();
+       libcfs_debug_dumplog();
+       libcfs_run_lbug_upcall(msgdata);
+       while (1)
+               schedule();
 
        /* panic("lbug_with_loc(%s, %s, %d)", file, func, line) */
 }
index e91dfab..26ceb58 100644 (file)
@@ -235,7 +235,7 @@ struct kernel_thread_arg cfs_thread_arg;
                        break;                                  \
                }                                               \
                spin_unlock(&(pta)->lock);                      \
-               cfs_schedule();                                 \
+               schedule();                                     \
        } while(1);                                             \
 
 /*
@@ -257,7 +257,7 @@ struct kernel_thread_arg cfs_thread_arg;
                        break;                                  \
                }                                               \
                spin_unlock(&(pta)->lock);                      \
-               cfs_schedule();                                 \
+               schedule();                                     \
        } while(1)
 
 /*
@@ -276,7 +276,7 @@ struct kernel_thread_arg cfs_thread_arg;
                        break;                                  \
                }                                               \
                spin_unlock(&(pta)->lock);                      \
-               cfs_schedule();                                 \
+               schedule();                                     \
        } while (1);                                            \
 
 /*
@@ -460,42 +460,42 @@ void lustre_net_ex(boolean_t state, funnel_t *cone)
 }
 #endif /* !__DARWIN8__ */
 
-void cfs_waitq_init(struct cfs_waitq *waitq)
+void init_waitqueue_head(struct cfs_waitq *waitq)
 {
        ksleep_chan_init(&waitq->wq_ksleep_chan);
 }
 
-void cfs_waitlink_init(struct cfs_waitlink *link)
+void init_waitqueue_entry_current(struct cfs_waitlink *link)
 {
        ksleep_link_init(&link->wl_ksleep_link);
 }
 
-void cfs_waitq_add(struct cfs_waitq *waitq, struct cfs_waitlink *link)
+void add_wait_queue(struct cfs_waitq *waitq, struct cfs_waitlink *link)
 {
-        link->wl_waitq = waitq;
+       link->wl_waitq = waitq;
        ksleep_add(&waitq->wq_ksleep_chan, &link->wl_ksleep_link);
 }
 
-void cfs_waitq_add_exclusive(struct cfs_waitq *waitq,
-                             struct cfs_waitlink *link)
+void add_wait_queue_exclusive(struct cfs_waitq *waitq,
+                             struct cfs_waitlink *link)
 {
-        link->wl_waitq = waitq;
+       link->wl_waitq = waitq;
        link->wl_ksleep_link.flags |= KSLEEP_EXCLUSIVE;
        ksleep_add(&waitq->wq_ksleep_chan, &link->wl_ksleep_link);
 }
 
-void cfs_waitq_del(struct cfs_waitq *waitq,
+void remove_wait_queue(struct cfs_waitq *waitq,
                    struct cfs_waitlink *link)
 {
        ksleep_del(&waitq->wq_ksleep_chan, &link->wl_ksleep_link);
 }
 
-int cfs_waitq_active(struct cfs_waitq *waitq)
+int waitqueue_active(struct cfs_waitq *waitq)
 {
        return (1);
 }
 
-void cfs_waitq_signal(struct cfs_waitq *waitq)
+void wake_up(struct cfs_waitq *waitq)
 {
        /*
         * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
@@ -504,23 +504,23 @@ void cfs_waitq_signal(struct cfs_waitq *waitq)
        ksleep_wake(&waitq->wq_ksleep_chan);
 }
 
-void cfs_waitq_signal_nr(struct cfs_waitq *waitq, int nr)
+void wake_up_nr(struct cfs_waitq *waitq, int nr)
 {
        ksleep_wake_nr(&waitq->wq_ksleep_chan, nr);
 }
 
-void cfs_waitq_broadcast(struct cfs_waitq *waitq)
+void wake_up_all(struct cfs_waitq *waitq)
 {
        ksleep_wake_all(&waitq->wq_ksleep_chan);
 }
 
-void cfs_waitq_wait(struct cfs_waitlink *link, cfs_task_state_t state)
+void waitq_wait(struct cfs_waitlink *link, long state)
 {
-        ksleep_wait(&link->wl_waitq->wq_ksleep_chan, state);
+       ksleep_wait(&link->wl_waitq->wq_ksleep_chan, state);
 }
 
-cfs_duration_t  cfs_waitq_timedwait(struct cfs_waitlink *link,
-                                    cfs_task_state_t state,
+cfs_duration_t  waitq_timedwait(struct cfs_waitlink *link,
+                                   long state,
                                     cfs_duration_t timeout)
 {
         return ksleep_timedwait(&link->wl_waitq->wq_ksleep_chan, 
index 1780f5b..aacc66b 100644 (file)
@@ -144,21 +144,21 @@ static int sysctl_debug_mb SYSCTL_HANDLER_ARGS
 
 static int proc_fail_loc SYSCTL_HANDLER_ARGS
 {
-        int error = 0;
-        long old_fail_loc = cfs_fail_loc;
-
-        error = sysctl_handle_long(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
-        if (!error && req->newptr != USER_ADDR_NULL) {
-                if (old_fail_loc != cfs_fail_loc)
-                        cfs_waitq_signal(&cfs_race_waitq);
-        } else  if (req->newptr != USER_ADDR_NULL) {
-                /* Something was wrong with the write request */
-                printf ("sysctl fail loc fault: %d.\n", error);
-        } else {
-                /* Read request */
-                error = SYSCTL_OUT(req, &cfs_fail_loc, sizeof cfs_fail_loc);
-        }
-        return error;
+       int error = 0;
+       long old_fail_loc = cfs_fail_loc;
+
+       error = sysctl_handle_long(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
+       if (!error && req->newptr != USER_ADDR_NULL) {
+               if (old_fail_loc != cfs_fail_loc)
+                       wake_up(&cfs_race_waitq);
+       } else  if (req->newptr != USER_ADDR_NULL) {
+               /* Something was wrong with the write request */
+               printf ("sysctl fail loc fault: %d.\n", error);
+       } else {
+               /* Read request */
+               error = SYSCTL_OUT(req, &cfs_fail_loc, sizeof cfs_fail_loc);
+       }
+       return error;
 }
 
 /*
index 872ca00..81110c7 100644 (file)
@@ -753,7 +753,7 @@ static void add_hit(struct ksleep_chan *chan, event_t event)
        }
 }
 
-void ksleep_wait(struct ksleep_chan *chan, cfs_task_state_t state)
+void ksleep_wait(struct ksleep_chan *chan, long state)
 {
        event_t event;
        int     result;
@@ -783,8 +783,8 @@ void ksleep_wait(struct ksleep_chan *chan, cfs_task_state_t state)
  * implemented), or waitq was already in the "signalled" state).
  */
 int64_t ksleep_timedwait(struct ksleep_chan *chan, 
-                         cfs_task_state_t state,
-                         __u64 timeout)
+                        long state,
+                        __u64 timeout)
 {
        event_t event;
 
index 6818a20..c418fbb 100644 (file)
@@ -110,7 +110,7 @@ EXPORT_SYMBOL(libcfs_panic_on_lbug);
 cfs_atomic_t libcfs_kmemory = CFS_ATOMIC_INIT(0);
 EXPORT_SYMBOL(libcfs_kmemory);
 
-static cfs_waitq_t debug_ctlwq;
+static wait_queue_head_t debug_ctlwq;
 
 char libcfs_debug_file_path_arr[PATH_MAX] = LIBCFS_DEBUG_FILE_PATH_DEFAULT;
 
@@ -247,23 +247,23 @@ void libcfs_debug_dumplog_internal(void *arg)
 
 int libcfs_debug_dumplog_thread(void *arg)
 {
-        libcfs_debug_dumplog_internal(arg);
-        cfs_waitq_signal(&debug_ctlwq);
-        return 0;
+       libcfs_debug_dumplog_internal(arg);
+       wake_up(&debug_ctlwq);
+       return 0;
 }
 
 void libcfs_debug_dumplog(void)
 {
-        cfs_waitlink_t wait;
-        cfs_task_t    *dumper;
-        ENTRY;
+       wait_queue_t wait;
+       cfs_task_t    *dumper;
+       ENTRY;
 
-        /* we're being careful to ensure that the kernel thread is
-         * able to set our state to running as it exits before we
-         * get to schedule() */
-       cfs_waitlink_init(&wait);
-       cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
-       cfs_waitq_add(&debug_ctlwq, &wait);
+       /* we're being careful to ensure that the kernel thread is
+        * able to set our state to running as it exits before we
+        * get to schedule() */
+       init_waitqueue_entry_current(&wait);
+       set_current_state(TASK_INTERRUPTIBLE);
+       add_wait_queue(&debug_ctlwq, &wait);
 
        dumper = kthread_run(libcfs_debug_dumplog_thread,
                             (void *)(long)current_pid(),
@@ -271,28 +271,28 @@ void libcfs_debug_dumplog(void)
        if (IS_ERR(dumper))
                printk(KERN_ERR "LustreError: cannot start log dump thread:"
                       " %ld\n", PTR_ERR(dumper));
-        else
-                cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE);
+       else
+               waitq_wait(&wait, TASK_INTERRUPTIBLE);
 
-        /* be sure to teardown if cfs_create_thread() failed */
-        cfs_waitq_del(&debug_ctlwq, &wait);
-        cfs_set_current_state(CFS_TASK_RUNNING);
+       /* be sure to teardown if cfs_create_thread() failed */
+       remove_wait_queue(&debug_ctlwq, &wait);
+       set_current_state(TASK_RUNNING);
 }
 EXPORT_SYMBOL(libcfs_debug_dumplog);
 
 int libcfs_debug_init(unsigned long bufsize)
 {
-        int    rc = 0;
-        unsigned int max = libcfs_debug_mb;
+       int    rc = 0;
+       unsigned int max = libcfs_debug_mb;
 
-        cfs_waitq_init(&debug_ctlwq);
+       init_waitqueue_head(&debug_ctlwq);
 
-        if (libcfs_console_max_delay <= 0 || /* not set by user or */
-            libcfs_console_min_delay <= 0 || /* set to invalid values */
-            libcfs_console_min_delay >= libcfs_console_max_delay) {
-                libcfs_console_max_delay = CDEBUG_DEFAULT_MAX_DELAY;
-                libcfs_console_min_delay = CDEBUG_DEFAULT_MIN_DELAY;
-        }
+       if (libcfs_console_max_delay <= 0 || /* not set by user or */
+           libcfs_console_min_delay <= 0 || /* set to invalid values */
+           libcfs_console_min_delay >= libcfs_console_max_delay) {
+               libcfs_console_max_delay = CDEBUG_DEFAULT_MAX_DELAY;
+               libcfs_console_min_delay = CDEBUG_DEFAULT_MIN_DELAY;
+       }
 
         if (libcfs_debug_file_path != NULL) {
                 memset(libcfs_debug_file_path_arr, 0, PATH_MAX);
index f84e03b..e11caff 100644 (file)
@@ -41,7 +41,7 @@
 
 unsigned long cfs_fail_loc = 0;
 unsigned int cfs_fail_val = 0;
-cfs_waitq_t cfs_race_waitq;
+wait_queue_head_t cfs_race_waitq;
 int cfs_race_state;
 
 EXPORT_SYMBOL(cfs_fail_loc);
@@ -125,17 +125,17 @@ EXPORT_SYMBOL(__cfs_fail_check_set);
 
 int __cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
 {
-        int ret = 0;
-
-        ret = __cfs_fail_check_set(id, value, set);
-        if (ret) {
-                CERROR("cfs_fail_timeout id %x sleeping for %dms\n",
-                       id, ms);
-                cfs_schedule_timeout_and_set_state(CFS_TASK_UNINT,
-                                                   cfs_time_seconds(ms) / 1000);
-                cfs_set_current_state(CFS_TASK_RUNNING);
-                CERROR("cfs_fail_timeout id %x awake\n", id);
-        }
-        return ret;
+       int ret = 0;
+
+       ret = __cfs_fail_check_set(id, value, set);
+       if (ret) {
+               CERROR("cfs_fail_timeout id %x sleeping for %dms\n",
+                      id, ms);
+               schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE,
+                                                  cfs_time_seconds(ms) / 1000);
+               set_current_state(TASK_RUNNING);
+               CERROR("cfs_fail_timeout id %x awake\n", id);
+       }
+       return ret;
 }
 EXPORT_SYMBOL(__cfs_fail_timeout_set);
index c1232b7..b8aaa12 100644 (file)
@@ -1003,7 +1003,7 @@ static void cfs_hash_depth_wi_cancel(cfs_hash_t *hs)
        spin_lock(&hs->hs_dep_lock);
        while (hs->hs_dep_bits != 0) {
                spin_unlock(&hs->hs_dep_lock);
-               cfs_cond_resched();
+               cond_resched();
                spin_lock(&hs->hs_dep_lock);
        }
        spin_unlock(&hs->hs_dep_lock);
@@ -1139,10 +1139,10 @@ cfs_hash_destroy(cfs_hash_t *hs)
                                 cfs_hash_exit(hs, hnode);
                         }
                 }
-                LASSERT(bd.bd_bucket->hsb_count == 0);
-                cfs_hash_bd_unlock(hs, &bd, 1);
-                cfs_cond_resched();
-        }
+               LASSERT(bd.bd_bucket->hsb_count == 0);
+               cfs_hash_bd_unlock(hs, &bd, 1);
+               cond_resched();
+       }
 
         LASSERT(cfs_atomic_read(&hs->hs_count) == 0);
 
@@ -1479,11 +1479,11 @@ cfs_hash_for_each_tight(cfs_hash_t *hs, cfs_hash_for_each_cb_t func,
                 cfs_hash_bd_unlock(hs, &bd, excl);
                 if (loop < CFS_HASH_LOOP_HOG)
                         continue;
-                loop = 0;
-                cfs_hash_unlock(hs, 0);
-                cfs_cond_resched();
-                cfs_hash_lock(hs, 0);
-        }
+               loop = 0;
+               cfs_hash_unlock(hs, 0);
+               cond_resched();
+               cfs_hash_lock(hs, 0);
+       }
  out:
         cfs_hash_unlock(hs, 0);
 
@@ -1614,11 +1614,11 @@ cfs_hash_for_each_relax(cfs_hash_t *hs, cfs_hash_for_each_cb_t func, void *data)
                                 cfs_hash_bd_unlock(hs, &bd, 0);
                                 cfs_hash_unlock(hs, 0);
 
-                                rc = func(hs, &bd, hnode, data);
-                                if (stop_on_change)
-                                        cfs_hash_put(hs, hnode);
-                                cfs_cond_resched();
-                                count++;
+                               rc = func(hs, &bd, hnode, data);
+                               if (stop_on_change)
+                                       cfs_hash_put(hs, hnode);
+                               cond_resched();
+                               count++;
 
                                 cfs_hash_lock(hs, 0);
                                 cfs_hash_bd_lock(hs, &bd, 0);
@@ -1798,14 +1798,14 @@ cfs_hash_rehash_cancel_locked(cfs_hash_t *hs)
         }
 
         for (i = 2; cfs_hash_is_rehashing(hs); i++) {
-                cfs_hash_unlock(hs, 1);
-                /* raise console warning while waiting too long */
-                CDEBUG(IS_PO2(i >> 3) ? D_WARNING : D_INFO,
-                       "hash %s is still rehashing, rescheded %d\n",
-                       hs->hs_name, i - 1);
-                cfs_cond_resched();
-                cfs_hash_lock(hs, 1);
-        }
+               cfs_hash_unlock(hs, 1);
+               /* raise console warning while waiting too long */
+               CDEBUG(IS_PO2(i >> 3) ? D_WARNING : D_INFO,
+                      "hash %s is still rehashing, rescheded %d\n",
+                      hs->hs_name, i - 1);
+               cond_resched();
+               cfs_hash_lock(hs, 1);
+       }
 }
 EXPORT_SYMBOL(cfs_hash_rehash_cancel_locked);
 
@@ -1951,11 +1951,11 @@ cfs_hash_rehash_worker(cfs_workitem_t *wi)
                         continue;
                 }
 
-                count = 0;
-                cfs_hash_unlock(hs, 1);
-                cfs_cond_resched();
-                cfs_hash_lock(hs, 1);
-        }
+               count = 0;
+               cfs_hash_unlock(hs, 1);
+               cond_resched();
+               cfs_hash_lock(hs, 1);
+       }
 
         hs->hs_rehash_count++;
 
index a708148..2a198fb 100644 (file)
@@ -630,7 +630,7 @@ cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt)
                rc = set_cpus_allowed_ptr(cfs_current(), cpumask);
                set_mems_allowed(*nodemask);
                if (rc == 0)
-                       cfs_schedule(); /* switch to allowed CPU */
+                       schedule(); /* switch to allowed CPU */
 
                return rc;
        }
index 27d95e9..b6719d1 100644 (file)
 #include <asm/kgdb.h>
 #endif
 
-#define LINUX_WAITQ(w) ((wait_queue_t *) w)
-#define LINUX_WAITQ_HEAD(w) ((wait_queue_head_t *) w)
-
-void
-cfs_waitq_init(cfs_waitq_t *waitq)
-{
-        init_waitqueue_head(LINUX_WAITQ_HEAD(waitq));
-}
-EXPORT_SYMBOL(cfs_waitq_init);
-
-void
-cfs_waitlink_init(cfs_waitlink_t *link)
-{
-        init_waitqueue_entry(LINUX_WAITQ(link), current);
-}
-EXPORT_SYMBOL(cfs_waitlink_init);
-
-void
-cfs_waitq_add(cfs_waitq_t *waitq, cfs_waitlink_t *link)
-{
-        add_wait_queue(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
-}
-EXPORT_SYMBOL(cfs_waitq_add);
-
-#ifndef HAVE___ADD_WAIT_QUEUE_EXCLUSIVE
-
-static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
-                                              wait_queue_t *wait)
-{
-        wait->flags |= WQ_FLAG_EXCLUSIVE;
-        __add_wait_queue(q, wait);
-}
-
-#endif /* HAVE___ADD_WAIT_QUEUE_EXCLUSIVE */
-
-void
-cfs_waitq_add_exclusive(cfs_waitq_t *waitq,
-                        cfs_waitlink_t *link)
-{
-        add_wait_queue_exclusive(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
-}
-EXPORT_SYMBOL(cfs_waitq_add_exclusive);
-
-/**
- * wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively
- * waiting threads, which is not always desirable because all threads will
- * be waken up again and again, even user only needs a few of them to be
- * active most time. This is not good for performance because cache can
- * be polluted by different threads.
- *
- * LIFO list can resolve this problem because we always wakeup the most
- * recent active thread by default.
- *
- * NB: please don't call non-exclusive & exclusive wait on the same
- * waitq if cfs_waitq_add_exclusive_head is used.
- */
-void
-cfs_waitq_add_exclusive_head(cfs_waitq_t *waitq, cfs_waitlink_t *link)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&LINUX_WAITQ_HEAD(waitq)->lock, flags);
-       __add_wait_queue_exclusive(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
-       spin_unlock_irqrestore(&LINUX_WAITQ_HEAD(waitq)->lock, flags);
-}
-EXPORT_SYMBOL(cfs_waitq_add_exclusive_head);
-
-void
-cfs_waitq_del(cfs_waitq_t *waitq, cfs_waitlink_t *link)
-{
-        remove_wait_queue(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
-}
-EXPORT_SYMBOL(cfs_waitq_del);
-
-int
-cfs_waitq_active(cfs_waitq_t *waitq)
-{
-        return waitqueue_active(LINUX_WAITQ_HEAD(waitq));
-}
-EXPORT_SYMBOL(cfs_waitq_active);
-
-void
-cfs_waitq_signal(cfs_waitq_t *waitq)
-{
-        wake_up(LINUX_WAITQ_HEAD(waitq));
-}
-EXPORT_SYMBOL(cfs_waitq_signal);
-
-void
-cfs_waitq_signal_nr(cfs_waitq_t *waitq, int nr)
-{
-        wake_up_nr(LINUX_WAITQ_HEAD(waitq), nr);
-}
-EXPORT_SYMBOL(cfs_waitq_signal_nr);
-
-void
-cfs_waitq_broadcast(cfs_waitq_t *waitq)
-{
-        wake_up_all(LINUX_WAITQ_HEAD(waitq));
-}
-EXPORT_SYMBOL(cfs_waitq_broadcast);
-
-void
-cfs_waitq_wait(cfs_waitlink_t *link, cfs_task_state_t state)
-{
-        schedule();
-}
-EXPORT_SYMBOL(cfs_waitq_wait);
-
-int64_t
-cfs_waitq_timedwait(cfs_waitlink_t *link, cfs_task_state_t state,
-                    int64_t timeout)
-{
-        return schedule_timeout(timeout);
-}
-EXPORT_SYMBOL(cfs_waitq_timedwait);
-
-void
-cfs_schedule_timeout_and_set_state(cfs_task_state_t state, int64_t timeout)
-{
-        set_current_state(state);
-        schedule_timeout(timeout);
-}
-EXPORT_SYMBOL(cfs_schedule_timeout_and_set_state);
-
-void
-cfs_schedule_timeout(int64_t timeout)
-{
-        schedule_timeout(timeout);
-}
-EXPORT_SYMBOL(cfs_schedule_timeout);
-
-void
-cfs_schedule(void)
-{
-        schedule();
-}
-EXPORT_SYMBOL(cfs_schedule);
-
-/* deschedule for a bit... */
-void
-cfs_pause(cfs_duration_t ticks)
-{
-        set_current_state(TASK_UNINTERRUPTIBLE);
-        schedule_timeout(ticks);
-}
-EXPORT_SYMBOL(cfs_pause);
-
-int cfs_need_resched(void)
-{
-        return need_resched();
-}
-EXPORT_SYMBOL(cfs_need_resched);
-
-void cfs_cond_resched(void)
-{
-        cond_resched();
-}
-EXPORT_SYMBOL(cfs_cond_resched);
-
 void cfs_init_timer(cfs_timer_t *t)
 {
         init_timer(t);
index 027802c..bbc363a 100644 (file)
@@ -342,13 +342,13 @@ int LL_PROC_PROTO(libcfs_force_lbug)
 
 int LL_PROC_PROTO(proc_fail_loc)
 {
-        int rc;
-        long old_fail_loc = cfs_fail_loc;
+       int rc;
+       long old_fail_loc = cfs_fail_loc;
 
-        rc = ll_proc_dolongvec(table, write, filp, buffer, lenp, ppos);
-        if (old_fail_loc != cfs_fail_loc)
-                cfs_waitq_signal(&cfs_race_waitq);
-        return rc;
+       rc = ll_proc_dolongvec(table, write, filp, buffer, lenp, ppos);
+       if (old_fail_loc != cfs_fail_loc)
+               wake_up(&cfs_race_waitq);
+       return rc;
 }
 
 static int __proc_cpt_table(void *data, int write,
index 72db8c8..2fa7a5d 100644 (file)
@@ -97,13 +97,13 @@ lwt_control (int enable, int clear)
         if (!cfs_capable(CFS_CAP_SYS_ADMIN))
                 return (-EPERM);
 
-        if (!enable) {
-                LWT_EVENT(0,0,0,0);
-                lwt_enabled = 0;
-                cfs_mb();
-                /* give people some time to stop adding traces */
-                cfs_schedule_timeout(10);
-        }
+       if (!enable) {
+               LWT_EVENT(0,0,0,0);
+               lwt_enabled = 0;
+               cfs_mb();
+               /* give people some time to stop adding traces */
+               schedule_timeout(10);
+       }
 
        for (i = 0; i < num_online_cpus(); i++) {
                p = lwt_cpus[i].lwtc_current_page;
index 8ab7e6c..04c9ea5 100644 (file)
@@ -385,7 +385,7 @@ static int init_libcfs_module(void)
        mutex_init(&cfs_trace_thread_mutex);
        init_rwsem(&ioctl_list_sem);
        CFS_INIT_LIST_HEAD(&ioctl_list);
-       cfs_waitq_init(&cfs_race_waitq);
+       init_waitqueue_head(&cfs_race_waitq);
 
        rc = libcfs_debug_init(5 * 1024 * 1024);
        if (rc < 0) {
index 49acdf2..57aefc8 100644 (file)
@@ -171,14 +171,14 @@ cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
                cfs_list_add_tail(&tage->linkage, &tcd->tcd_pages);
                tcd->tcd_cur_pages++;
 
-                if (tcd->tcd_cur_pages > 8 && thread_running) {
-                        struct tracefiled_ctl *tctl = &trace_tctl;
-                        /*
-                         * wake up tracefiled to process some pages.
-                         */
-                        cfs_waitq_signal(&tctl->tctl_waitq);
-                }
-                return tage;
+               if (tcd->tcd_cur_pages > 8 && thread_running) {
+                       struct tracefiled_ctl *tctl = &trace_tctl;
+                       /*
+                        * wake up tracefiled to process some pages.
+                        */
+                       wake_up(&tctl->tctl_waitq);
+               }
+               return tage;
         }
         return NULL;
 }
@@ -996,8 +996,8 @@ static int tracefiled(void *arg)
        spin_lock_init(&pc.pc_lock);
        complete(&tctl->tctl_start);
 
-        while (1) {
-                cfs_waitlink_t __wait;
+       while (1) {
+               wait_queue_t __wait;
 
                 pc.pc_want_daemon_pages = 0;
                 collect_pages(&pc);
@@ -1083,12 +1083,12 @@ end_loop:
                                 break;
                         }
                 }
-                cfs_waitlink_init(&__wait);
-                cfs_waitq_add(&tctl->tctl_waitq, &__wait);
-                cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
-                cfs_waitq_timedwait(&__wait, CFS_TASK_INTERRUPTIBLE,
-                                    cfs_time_seconds(1));
-                cfs_waitq_del(&tctl->tctl_waitq, &__wait);
+               init_waitqueue_entry_current(&__wait);
+               add_wait_queue(&tctl->tctl_waitq, &__wait);
+               set_current_state(TASK_INTERRUPTIBLE);
+               waitq_timedwait(&__wait, TASK_INTERRUPTIBLE,
+                               cfs_time_seconds(1));
+               remove_wait_queue(&tctl->tctl_waitq, &__wait);
         }
        complete(&tctl->tctl_stop);
         return 0;
@@ -1105,7 +1105,7 @@ int cfs_trace_start_thread(void)
 
        init_completion(&tctl->tctl_start);
        init_completion(&tctl->tctl_stop);
-       cfs_waitq_init(&tctl->tctl_waitq);
+       init_waitqueue_head(&tctl->tctl_waitq);
        cfs_atomic_set(&tctl->tctl_shutdown, 0);
 
        if (IS_ERR(kthread_run(tracefiled, tctl, "ktracefiled"))) {
index 12c9ce9..d9dfb9f 100644 (file)
@@ -225,7 +225,7 @@ struct page_collection {
 struct tracefiled_ctl {
        struct completion       tctl_start;
        struct completion       tctl_stop;
-       cfs_waitq_t             tctl_waitq;
+       wait_queue_head_t       tctl_waitq;
        pid_t                   tctl_pid;
        cfs_atomic_t            tctl_shutdown;
 };
index 0604790..43014e2 100644 (file)
@@ -50,14 +50,14 @@ static struct upcall_cache_entry *alloc_entry(struct upcall_cache *cache,
         if (!entry)
                 return NULL;
 
-        UC_CACHE_SET_NEW(entry);
-        CFS_INIT_LIST_HEAD(&entry->ue_hash);
-        entry->ue_key = key;
-        cfs_atomic_set(&entry->ue_refcount, 0);
-        cfs_waitq_init(&entry->ue_waitq);
-        if (cache->uc_ops->init_entry)
-                cache->uc_ops->init_entry(entry, args);
-        return entry;
+       UC_CACHE_SET_NEW(entry);
+       CFS_INIT_LIST_HEAD(&entry->ue_hash);
+       entry->ue_key = key;
+       cfs_atomic_set(&entry->ue_refcount, 0);
+       init_waitqueue_head(&entry->ue_waitq);
+       if (cache->uc_ops->init_entry)
+               cache->uc_ops->init_entry(entry, args);
+       return entry;
 }
 
 /* protected by cache lock */
@@ -126,11 +126,11 @@ static int check_unlink_entry(struct upcall_cache *cache,
                                     entry->ue_acquire_expire))
                         return 0;
 
-                UC_CACHE_SET_EXPIRED(entry);
-                cfs_waitq_broadcast(&entry->ue_waitq);
-        } else if (!UC_CACHE_IS_INVALID(entry)) {
-                UC_CACHE_SET_EXPIRED(entry);
-        }
+               UC_CACHE_SET_EXPIRED(entry);
+               wake_up_all(&entry->ue_waitq);
+       } else if (!UC_CACHE_IS_INVALID(entry)) {
+               UC_CACHE_SET_EXPIRED(entry);
+       }
 
         cfs_list_del_init(&entry->ue_hash);
         if (!cfs_atomic_read(&entry->ue_refcount))
@@ -148,11 +148,11 @@ static inline int refresh_entry(struct upcall_cache *cache,
 struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *cache,
                                                   __u64 key, void *args)
 {
-        struct upcall_cache_entry *entry = NULL, *new = NULL, *next;
-        cfs_list_t *head;
-        cfs_waitlink_t wait;
-        int rc, found;
-        ENTRY;
+       struct upcall_cache_entry *entry = NULL, *new = NULL, *next;
+       cfs_list_t *head;
+       wait_queue_t wait;
+       int rc, found;
+       ENTRY;
 
         LASSERT(cache);
 
@@ -202,13 +202,13 @@ find_again:
                 entry->ue_acquire_expire =
                         cfs_time_shift(cache->uc_acquire_expire);
                 if (rc < 0) {
-                        UC_CACHE_CLEAR_ACQUIRING(entry);
-                        UC_CACHE_SET_INVALID(entry);
-                        cfs_waitq_broadcast(&entry->ue_waitq);
-                        if (unlikely(rc == -EREMCHG)) {
-                                put_entry(cache, entry);
-                                GOTO(out, entry = ERR_PTR(rc));
-                        }
+                       UC_CACHE_CLEAR_ACQUIRING(entry);
+                       UC_CACHE_SET_INVALID(entry);
+                       wake_up_all(&entry->ue_waitq);
+                       if (unlikely(rc == -EREMCHG)) {
+                               put_entry(cache, entry);
+                               GOTO(out, entry = ERR_PTR(rc));
+                       }
                 }
         }
         /* someone (and only one) is doing upcall upon this item,
@@ -216,27 +216,27 @@ find_again:
         if (UC_CACHE_IS_ACQUIRING(entry)) {
                 long expiry = (entry == new) ?
                               cfs_time_seconds(cache->uc_acquire_expire) :
-                              CFS_MAX_SCHEDULE_TIMEOUT;
-                long left;
+                             MAX_SCHEDULE_TIMEOUT;
+               long left;
 
-                cfs_waitlink_init(&wait);
-                cfs_waitq_add(&entry->ue_waitq, &wait);
-                cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+               init_waitqueue_entry_current(&wait);
+               add_wait_queue(&entry->ue_waitq, &wait);
+               set_current_state(TASK_INTERRUPTIBLE);
                spin_unlock(&cache->uc_lock);
 
-               left = cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE,
+               left = waitq_timedwait(&wait, TASK_INTERRUPTIBLE,
                                           expiry);
 
                spin_lock(&cache->uc_lock);
-                cfs_waitq_del(&entry->ue_waitq, &wait);
-                if (UC_CACHE_IS_ACQUIRING(entry)) {
-                        /* we're interrupted or upcall failed in the middle */
-                        rc = left > 0 ? -EINTR : -ETIMEDOUT;
-                        CERROR("acquire for key "LPU64": error %d\n",
-                               entry->ue_key, rc);
-                        put_entry(cache, entry);
-                        GOTO(out, entry = ERR_PTR(rc));
-                }
+               remove_wait_queue(&entry->ue_waitq, &wait);
+               if (UC_CACHE_IS_ACQUIRING(entry)) {
+                       /* we're interrupted or upcall failed in the middle */
+                       rc = left > 0 ? -EINTR : -ETIMEDOUT;
+                       CERROR("acquire for key "LPU64": error %d\n",
+                              entry->ue_key, rc);
+                       put_entry(cache, entry);
+                       GOTO(out, entry = ERR_PTR(rc));
+               }
         }
 
         /* invalid means error, don't need to try again */
@@ -353,7 +353,7 @@ out:
         }
         UC_CACHE_CLEAR_ACQUIRING(entry);
        spin_unlock(&cache->uc_lock);
-       cfs_waitq_broadcast(&entry->ue_waitq);
+       wake_up_all(&entry->ue_waitq);
        put_entry(cache, entry);
 
        RETURN(rc);
index 4337e12..fcba2cd 100644 (file)
@@ -172,7 +172,7 @@ void init_completion(struct completion *c)
 {
        LASSERT(c != NULL);
        c->done = 0;
-       cfs_waitq_init(&c->wait);
+       init_waitqueue_head(&c->wait);
 }
 
 void fini_completion(struct completion *c)
@@ -183,7 +183,7 @@ void complete(struct completion *c)
 {
        LASSERT(c != NULL);
        c->done  = 1;
-       cfs_waitq_signal(&c->wait);
+       wake_up(&c->wait);
 }
 
 void wait_for_completion(struct completion *c)
index 7d7c059..34b0c5e 100644 (file)
  * Wait queue. No-op implementation.
  */
 
-void cfs_waitq_init(struct cfs_waitq *waitq)
+void init_waitqueue_head(struct cfs_waitq *waitq)
 {
-        LASSERT(waitq != NULL);
-        (void)waitq;
+       LASSERT(waitq != NULL);
+       (void)waitq;
 }
 
-void cfs_waitlink_init(struct cfs_waitlink *link)
+void init_waitqueue_entry_current(struct cfs_waitlink *link)
 {
-        LASSERT(link != NULL);
-        (void)link;
+       LASSERT(link != NULL);
+       (void)link;
 }
 
-void cfs_waitq_add(struct cfs_waitq *waitq, struct cfs_waitlink *link)
+void add_wait_queue(struct cfs_waitq *waitq, struct cfs_waitlink *link)
 {
-        LASSERT(waitq != NULL);
-        LASSERT(link != NULL);
-        (void)waitq;
-        (void)link;
+       LASSERT(waitq != NULL);
+       LASSERT(link != NULL);
+       (void)waitq;
+       (void)link;
 }
 
-void cfs_waitq_add_exclusive(struct cfs_waitq *waitq, struct cfs_waitlink *link)
+void add_wait_queue_exclusive(struct cfs_waitq *waitq, struct cfs_waitlink *link)
 {
-        LASSERT(waitq != NULL);
-        LASSERT(link != NULL);
-        (void)waitq;
-        (void)link;
+       LASSERT(waitq != NULL);
+       LASSERT(link != NULL);
+       (void)waitq;
+       (void)link;
 }
 
-void cfs_waitq_add_exclusive_head(struct cfs_waitq *waitq, struct cfs_waitlink *link)
+void add_wait_queue_exclusive_head(struct cfs_waitq *waitq, struct cfs_waitlink *link)
 {
-        cfs_waitq_add_exclusive(waitq, link);
+       add_wait_queue_exclusive(waitq, link);
 }
 
-void cfs_waitq_del(struct cfs_waitq *waitq, struct cfs_waitlink *link)
+void remove_wait_queue(struct cfs_waitq *waitq, struct cfs_waitlink *link)
 {
-        LASSERT(waitq != NULL);
-        LASSERT(link != NULL);
-        (void)waitq;
-        (void)link;
+       LASSERT(waitq != NULL);
+       LASSERT(link != NULL);
+       (void)waitq;
+       (void)link;
 }
 
-int cfs_waitq_active(struct cfs_waitq *waitq)
+int waitqueue_active(struct cfs_waitq *waitq)
 {
-        LASSERT(waitq != NULL);
-        (void)waitq;
-        return 0;
+       LASSERT(waitq != NULL);
+       (void)waitq;
+       return 0;
 }
 
-void cfs_waitq_signal(struct cfs_waitq *waitq)
+void wake_up(struct cfs_waitq *waitq)
 {
-        LASSERT(waitq != NULL);
-        (void)waitq;
+       LASSERT(waitq != NULL);
+       (void)waitq;
 }
 
-void cfs_waitq_signal_nr(struct cfs_waitq *waitq, int nr)
+void wake_up_nr(struct cfs_waitq *waitq, int nr)
 {
-        LASSERT(waitq != NULL);
-        (void)waitq;
+       LASSERT(waitq != NULL);
+       (void)waitq;
 }
 
-void cfs_waitq_broadcast(struct cfs_waitq *waitq)
+void wake_up_all(struct cfs_waitq *waitq)
 {
-        LASSERT(waitq != NULL);
-        (void)waitq;
+       LASSERT(waitq != NULL);
+       (void)waitq;
 }
 
-void cfs_waitq_wait(struct cfs_waitlink *link, cfs_task_state_t state)
+void waitq_wait(struct cfs_waitlink *link, long state)
 {
-        LASSERT(link != NULL);
-        (void)link;
+       LASSERT(link != NULL);
+       (void)link;
 
-        /* well, wait for something to happen */
+       /* well, wait for something to happen */
        call_wait_handler(0);
 }
 
-int64_t cfs_waitq_timedwait(struct cfs_waitlink *link, cfs_task_state_t state,
-                            int64_t timeout)
+int64_t waitq_timedwait(struct cfs_waitlink *link, long state,
+                       int64_t timeout)
 {
-        LASSERT(link != NULL);
-        (void)link;
+       LASSERT(link != NULL);
+       (void)link;
        call_wait_handler(timeout);
-        return 0;
+       return 0;
 }
 
-void cfs_schedule_timeout_and_set_state(cfs_task_state_t state, int64_t timeout)
+void schedule_timeout_and_set_state(long state, int64_t timeout)
 {
-        cfs_waitlink_t    l;
-        /* sleep(timeout) here instead? */
-        cfs_waitq_timedwait(&l, state, timeout);
+       wait_queue_t    l;
+       /* sleep(timeout) here instead? */
+       waitq_timedwait(&l, state, timeout);
 }
 
 void
 cfs_pause(cfs_duration_t d)
 {
-        struct timespec s;
+       struct timespec s;
 
-        cfs_duration_nsec(d, &s);
-        nanosleep(&s, NULL);
+       cfs_duration_nsec(d, &s);
+       nanosleep(&s, NULL);
 }
 
-int cfs_need_resched(void)
+int need_resched(void)
 {
-        return 0;
+       return 0;
 }
 
-void cfs_cond_resched(void)
+void cond_resched(void)
 {
 }
 
index 0e32954..bbad0b1 100644 (file)
@@ -70,7 +70,7 @@ struct lc_watchdog {
  */
 static struct completion lcw_start_completion;
 static struct completion  lcw_stop_completion;
-static cfs_waitq_t lcw_event_waitq;
+static wait_queue_head_t lcw_event_waitq;
 
 /*
  * Set this and wake lcw_event_waitq to stop the dispatcher.
@@ -135,7 +135,7 @@ static void lcw_cb(ulong_ptr_t data)
        spin_lock_bh(&lcw_pending_timers_lock);
        lcw->lcw_refcount++; /* +1 for pending list */
        cfs_list_add(&lcw->lcw_list, &lcw_pending_timers);
-       cfs_waitq_signal(&lcw_event_waitq);
+       wake_up(&lcw_event_waitq);
 
        spin_unlock_bh(&lcw_pending_timers_lock);
        spin_unlock_bh(&lcw->lcw_lock);
@@ -303,7 +303,7 @@ static void lcw_dispatch_start(void)
 
        init_completion(&lcw_stop_completion);
        init_completion(&lcw_start_completion);
-        cfs_waitq_init(&lcw_event_waitq);
+       init_waitqueue_head(&lcw_event_waitq);
 
        CDEBUG(D_INFO, "starting dispatch thread\n");
        task = kthread_run(lcw_dispatch_main, NULL, "lc_watchdogd");
@@ -327,7 +327,7 @@ static void lcw_dispatch_stop(void)
        CDEBUG(D_INFO, "trying to stop watchdog dispatcher.\n");
 
        set_bit(LCW_FLAG_STOP, &lcw_flags);
-       cfs_waitq_signal(&lcw_event_waitq);
+       wake_up(&lcw_event_waitq);
 
        wait_for_completion(&lcw_stop_completion);
 
index 9f6a7cf..8ea15b3 100644 (file)
@@ -405,11 +405,11 @@ errorout:
 void
 cfs_pause(cfs_duration_t ticks)
 {
-    cfs_schedule_timeout_and_set_state(CFS_TASK_UNINTERRUPTIBLE, ticks);
+    schedule_timeout_and_set_state(CFS_TASK_UNINTERRUPTIBLE, ticks);
 }
 
 void
-cfs_schedule_timeout_and_set_state(cfs_task_state_t state, int64_t time)
+schedule_timeout_and_set_state(long state, int64_t time)
 {
     cfs_task_t * task = cfs_current();
     PTASK_SLOT   slot = NULL;
@@ -422,7 +422,7 @@ cfs_schedule_timeout_and_set_state(cfs_task_state_t state, int64_t time)
     slot = CONTAINING_RECORD(task, TASK_SLOT, task);
     cfs_assert(slot->Magic == TASKSLT_MAGIC);
 
-    if (time == CFS_MAX_SCHEDULE_TIMEOUT) {
+    if (time == MAX_SCHEDULE_TIMEOUT) {
         time = 0;
     }
 
@@ -430,9 +430,9 @@ cfs_schedule_timeout_and_set_state(cfs_task_state_t state, int64_t time)
 }
 
 void
-cfs_schedule()
+schedule()
 {
-    cfs_schedule_timeout_and_set_state(CFS_TASK_UNINTERRUPTIBLE, 0);
+    schedule_timeout_and_set_state(CFS_TASK_UNINTERRUPTIBLE, 0);
 }
 
 int
@@ -456,14 +456,14 @@ wake_up_process(
 }
 
 void
-sleep_on(cfs_waitq_t *waitq)
+sleep_on(wait_queue_head_t *waitq)
 {
-       cfs_waitlink_t link;
+       wait_queue_t link;
        
-       cfs_waitlink_init(&link);
-       cfs_waitq_add(waitq, &link);
-       cfs_waitq_wait(&link, CFS_TASK_INTERRUPTIBLE);
-       cfs_waitq_del(waitq, &link);
+       init_waitqueue_entry_current(&link);
+       add_wait_queue(waitq, &link);
+       waitq_wait(&link, TASK_INTERRUPTIBLE);
+       remove_wait_queue(waitq, &link);
 }
 
 EXPORT_SYMBOL(current_uid);
index d62bd01..351e171 100644 (file)
@@ -732,12 +732,12 @@ errorout:
     return NT_SUCCESS(status);
 }
 
-int cfs_need_resched(void)
+int need_resched(void)
 {
         return 0;
 }
 
-void cfs_cond_resched(void)
+void cond_resched(void)
 {
 }
 
index a6353bc..18817ee 100644 (file)
  */
 
 /*
- * cfs_waitq_init
+ * init_waitqueue_head
  *   To initialize the wait queue
  *
  * Arguments:
- *   waitq:  pointer to the cfs_waitq_t structure
+ *   waitq:  pointer to the wait_queue_head_t structure
  *
  * Return Value:
  *   N/A
@@ -57,7 +57,7 @@
  *   N/A
  */
 
-void cfs_waitq_init(cfs_waitq_t *waitq)
+void init_waitqueue_head(wait_queue_head_t *waitq)
 {
     waitq->magic = CFS_WAITQ_MAGIC;
     waitq->flags = 0;
@@ -66,11 +66,11 @@ void cfs_waitq_init(cfs_waitq_t *waitq)
 }
 
 /*
- * cfs_waitlink_init
+ * init_waitqueue_entry_current
  *   To initialize the wake link node
  *
  * Arguments:
- *   link:  pointer to the cfs_waitlink_t structure
+ *   link:  pointer to the wait_queue_t structure
  *
  * Return Value:
  *   N/A
@@ -79,7 +79,7 @@ void cfs_waitq_init(cfs_waitq_t *waitq)
  *   N/A
  */
 
-void cfs_waitlink_init(cfs_waitlink_t *link)
+void init_waitqueue_entry_current(wait_queue_t *link)
 {
     cfs_task_t * task = cfs_current();
     PTASK_SLOT   slot = NULL;
@@ -93,7 +93,7 @@ void cfs_waitlink_init(cfs_waitlink_t *link)
     slot = CONTAINING_RECORD(task, TASK_SLOT, task);
     cfs_assert(slot->Magic == TASKSLT_MAGIC);
 
-    memset(link, 0, sizeof(cfs_waitlink_t));
+    memset(link, 0, sizeof(wait_queue_t));
 
     link->magic = CFS_WAITLINK_MAGIC;
     link->flags = 0;
@@ -115,7 +115,7 @@ void cfs_waitlink_init(cfs_waitlink_t *link)
  *   To finilize the wake link node
  *
  * Arguments:
- *   link:  pointer to the cfs_waitlink_t structure
+ *   link:  pointer to the wait_queue_t structure
  *
  * Return Value:
  *   N/A
@@ -124,7 +124,7 @@ void cfs_waitlink_init(cfs_waitlink_t *link)
  *   N/A
  */
 
-void cfs_waitlink_fini(cfs_waitlink_t *link)
+void cfs_waitlink_fini(wait_queue_t *link)
 {
     cfs_task_t * task = cfs_current();
     PTASK_SLOT   slot = NULL;
@@ -150,8 +150,8 @@ void cfs_waitlink_fini(cfs_waitlink_t *link)
  *   To queue the wait link node to the wait queue
  *
  * Arguments:
- *   waitq:  pointer to the cfs_waitq_t structure
- *   link:   pointer to the cfs_waitlink_t structure
+ *   waitq:  pointer to the wait_queue_head_t structure
+ *   link:   pointer to the wait_queue_t structure
  *   int:    queue no (Normal or Forward waitq)
  *
  * Return Value:
@@ -161,8 +161,8 @@ void cfs_waitlink_fini(cfs_waitlink_t *link)
  *   N/A
  */
 
-void cfs_waitq_add_internal(cfs_waitq_t *waitq,
-                            cfs_waitlink_t *link,
+void cfs_waitq_add_internal(wait_queue_head_t *waitq,
+                           wait_queue_t *link,
                             __u32 waitqid )
 { 
     LASSERT(waitq != NULL);
@@ -182,12 +182,12 @@ void cfs_waitq_add_internal(cfs_waitq_t *waitq,
        spin_unlock(&(waitq->guard));
 }
 /*
- * cfs_waitq_add
+ * add_wait_queue
  *   To queue the wait link node to the wait queue
  *
  * Arguments:
- *   waitq:  pointer to the cfs_waitq_t structure
- *   link:  pointer to the cfs_waitlink_t structure
+ *   waitq:  pointer to the wait_queue_head_t structure
+ *   link:  pointer to the wait_queue_t structure
  *
  * Return Value:
  *   N/A
@@ -196,19 +196,19 @@ void cfs_waitq_add_internal(cfs_waitq_t *waitq,
  *   N/A
  */
 
-void cfs_waitq_add(cfs_waitq_t *waitq,
-                   cfs_waitlink_t *link)
+void add_wait_queue(wait_queue_head_t *waitq,
+                  wait_queue_t *link)
 { 
     cfs_waitq_add_internal(waitq, link, CFS_WAITQ_CHAN_NORMAL);
 }
 
 /*
- * cfs_waitq_add_exclusive
+ * add_wait_queue_exclusive
  *   To set the wait link node to exclusive mode
  *   and queue it to the wait queue
  *
  * Arguments:
- *   waitq:  pointer to the cfs_waitq_t structure
+ *   waitq:  pointer to the wait_queue_head_t structure
  *   link:  pointer to the cfs_wait_link structure
  *
  * Return Value:
@@ -218,8 +218,8 @@ void cfs_waitq_add(cfs_waitq_t *waitq,
  *   N/A
  */
 
-void cfs_waitq_add_exclusive( cfs_waitq_t *waitq,
-                              cfs_waitlink_t *link)
+void add_wait_queue_exclusive( wait_queue_head_t *waitq,
+                             wait_queue_t *link)
 {
     LASSERT(waitq != NULL);
     LASSERT(link != NULL);
@@ -227,16 +227,16 @@ void cfs_waitq_add_exclusive( cfs_waitq_t *waitq,
     LASSERT(link->magic == CFS_WAITLINK_MAGIC);
 
        link->flags |= CFS_WAITQ_EXCLUSIVE;
-    cfs_waitq_add(waitq, link);
+    add_wait_queue(waitq, link);
 }
 
 /*
- * cfs_waitq_del
+ * remove_wait_queue
  *   To remove the wait link node from the waitq
  *
  * Arguments:
  *   waitq:  pointer to the cfs_ waitq_t structure
- *   link:  pointer to the cfs_waitlink_t structure
+ *   link:  pointer to the wait_queue_t structure
  *
  * Return Value:
  *   N/A
@@ -245,8 +245,8 @@ void cfs_waitq_add_exclusive( cfs_waitq_t *waitq,
  *   N/A
  */
 
-void cfs_waitq_del( cfs_waitq_t *waitq,
-                    cfs_waitlink_t *link)
+void remove_wait_queue( wait_queue_head_t *waitq,
+                   wait_queue_t *link)
 {
     int i = 0;
 
@@ -274,7 +274,7 @@ void cfs_waitq_del( cfs_waitq_t *waitq,
 }
 
 /*
- * cfs_waitq_active
+ * waitqueue_active
  *   Is the waitq active (not empty) ?
  *
  * Arguments:
@@ -288,7 +288,7 @@ void cfs_waitq_del( cfs_waitq_t *waitq,
  *   We always returns TRUE here, the same to Darwin.
  */
 
-int cfs_waitq_active(cfs_waitq_t *waitq)
+int waitqueue_active(wait_queue_head_t *waitq)
 {
     LASSERT(waitq != NULL);
     LASSERT(waitq->magic == CFS_WAITQ_MAGIC);
@@ -297,12 +297,12 @@ int cfs_waitq_active(cfs_waitq_t *waitq)
 }
 
 /*
- * cfs_waitq_signal_nr
+ * wake_up_nr
  *   To wake up all the non-exclusive tasks plus nr exclusive
  *   ones in the waitq
  *
  * Arguments:
- *   waitq:  pointer to the cfs_waitq_t structure
+ *   waitq:  pointer to the wait_queue_head_t structure
  *   nr:    number of exclusive tasks to be woken up
  *
  * Return Value:
@@ -313,7 +313,7 @@ int cfs_waitq_active(cfs_waitq_t *waitq)
  */
 
 
-void cfs_waitq_signal_nr(cfs_waitq_t *waitq, int nr)
+void wake_up_nr(wait_queue_head_t *waitq, int nr)
 {
     int     result;
     cfs_waitlink_channel_t * scan;
@@ -326,7 +326,7 @@ void cfs_waitq_signal_nr(cfs_waitq_t *waitq, int nr)
                             cfs_waitlink_channel_t,
                             link) {
 
-        cfs_waitlink_t *waitl = scan->waitl;
+       wait_queue_t *waitl = scan->waitl;
 
         result = cfs_wake_event(waitl->event);
         LASSERT( result == FALSE || result == TRUE );
@@ -344,11 +344,11 @@ void cfs_waitq_signal_nr(cfs_waitq_t *waitq, int nr)
 }
 
 /*
- * cfs_waitq_signal
+ * wake_up
  *   To wake up all the non-exclusive tasks and 1 exclusive
  *
  * Arguments:
- *   waitq:  pointer to the cfs_waitq_t structure
+ *   waitq:  pointer to the wait_queue_head_t structure
  *
  * Return Value:
  *   N/A
@@ -357,18 +357,18 @@ void cfs_waitq_signal_nr(cfs_waitq_t *waitq, int nr)
  *   N/A
  */
 
-void cfs_waitq_signal(cfs_waitq_t *waitq)
+void wake_up(wait_queue_head_t *waitq)
 {
-    cfs_waitq_signal_nr(waitq, 1);
+    wake_up_nr(waitq, 1);
 }
 
 
 /*
- * cfs_waitq_broadcast
+ * wake_up_all
  *   To wake up all the tasks in the waitq
  *
  * Arguments:
- *   waitq:  pointer to the cfs_waitq_t structure
+ *   waitq:  pointer to the wait_queue_head_t structure
  *
  * Return Value:
  *   N/A
@@ -377,20 +377,20 @@ void cfs_waitq_signal(cfs_waitq_t *waitq)
  *   N/A
  */
 
-void cfs_waitq_broadcast(cfs_waitq_t *waitq)
+void wake_up_all(wait_queue_head_t *waitq)
 {
     LASSERT(waitq != NULL);
     LASSERT(waitq->magic ==CFS_WAITQ_MAGIC);
 
-       cfs_waitq_signal_nr(waitq, 0);
+       wake_up_nr(waitq, 0);
 }
 
 /*
- * cfs_waitq_wait
+ * waitq_wait
  *   To wait on the link node until it is signaled.
  *
  * Arguments:
- *   link:  pointer to the cfs_waitlink_t structure
+ *   link:  pointer to the wait_queue_t structure
  *
  * Return Value:
  *   N/A
@@ -399,7 +399,7 @@ void cfs_waitq_broadcast(cfs_waitq_t *waitq)
  *   N/A
  */
 
-void cfs_waitq_wait(cfs_waitlink_t *link, cfs_task_state_t state)
+void waitq_wait(wait_queue_t *link, long state)
 { 
     LASSERT(link != NULL);
     LASSERT(link->magic == CFS_WAITLINK_MAGIC);
@@ -413,11 +413,11 @@ void cfs_waitq_wait(cfs_waitlink_t *link, cfs_task_state_t state)
 }
 
 /*
- * cfs_waitq_timedwait
+ * waitq_timedwait
  *   To wait the link node to be signaled with a timeout limit
  *
  * Arguments:
- *   link:   pointer to the cfs_waitlink_t structure
+ *   link:   pointer to the wait_queue_t structure
  *   timeout: the timeout limitation
  *
  * Return Value:
@@ -429,8 +429,8 @@ void cfs_waitq_wait(cfs_waitlink_t *link, cfs_task_state_t state)
  *   What if it happens to be woken up at the just timeout time !?
  */
 
-int64_t cfs_waitq_timedwait( cfs_waitlink_t *link,
-                             cfs_task_state_t state,
+int64_t waitq_timedwait( wait_queue_t *link,
+                            long state,
                              int64_t timeout)
 { 
 
index b56d266..776a656 100644 (file)
@@ -51,7 +51,7 @@ typedef struct cfs_wi_sched {
        /** serialised workitems */
        spinlock_t              ws_lock;
        /** where schedulers sleep */
-       cfs_waitq_t             ws_waitq;
+       wait_queue_head_t               ws_waitq;
 #endif
        /** concurrent workitems */
        cfs_list_t              ws_runq;
@@ -216,26 +216,26 @@ cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
        LASSERT(!cfs_in_interrupt()); /* because we use plain spinlock */
        LASSERT(!sched->ws_stopping);
 
-        cfs_wi_sched_lock(sched);
+       cfs_wi_sched_lock(sched);
 
-        if (!wi->wi_scheduled) {
-                LASSERT (cfs_list_empty(&wi->wi_list));
+       if (!wi->wi_scheduled) {
+               LASSERT (cfs_list_empty(&wi->wi_list));
 
-                wi->wi_scheduled = 1;
+               wi->wi_scheduled = 1;
                sched->ws_nscheduled++;
-                if (!wi->wi_running) {
-                        cfs_list_add_tail(&wi->wi_list, &sched->ws_runq);
+               if (!wi->wi_running) {
+                       cfs_list_add_tail(&wi->wi_list, &sched->ws_runq);
 #ifdef __KERNEL__
-                        cfs_waitq_signal(&sched->ws_waitq);
+                       wake_up(&sched->ws_waitq);
 #endif
-                } else {
-                        cfs_list_add(&wi->wi_list, &sched->ws_rerunq);
-                }
-        }
+               } else {
+                       cfs_list_add(&wi->wi_list, &sched->ws_rerunq);
+               }
+       }
 
-        LASSERT (!cfs_list_empty(&wi->wi_list));
-        cfs_wi_sched_unlock(sched);
-        return;
+       LASSERT (!cfs_list_empty(&wi->wi_list));
+       cfs_wi_sched_unlock(sched);
+       return;
 }
 EXPORT_SYMBOL(cfs_wi_schedule);
 
@@ -303,14 +303,14 @@ cfs_wi_scheduler (void *arg)
                         cfs_list_move_tail(&wi->wi_list, &sched->ws_runq);
                 }
 
-                if (!cfs_list_empty(&sched->ws_runq)) {
-                        cfs_wi_sched_unlock(sched);
-                        /* don't sleep because some workitems still
-                         * expect me to come back soon */
-                        cfs_cond_resched();
-                        cfs_wi_sched_lock(sched);
-                        continue;
-                }
+               if (!cfs_list_empty(&sched->ws_runq)) {
+                       cfs_wi_sched_unlock(sched);
+                       /* don't sleep because some workitems still
+                        * expect me to come back soon */
+                       cond_resched();
+                       cfs_wi_sched_lock(sched);
+                       continue;
+               }
 
                cfs_wi_sched_unlock(sched);
                rc = wait_event_interruptible_exclusive(sched->ws_waitq,
@@ -396,7 +396,7 @@ cfs_wi_sched_destroy(struct cfs_wi_sched *sched)
        spin_unlock(&cfs_wi_data.wi_glock);
 
 #ifdef __KERNEL__
-       cfs_waitq_broadcast(&sched->ws_waitq);
+       wake_up_all(&sched->ws_waitq);
 
        spin_lock(&cfs_wi_data.wi_glock);
        {
@@ -445,7 +445,7 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
 
 #ifdef __KERNEL__
        spin_lock_init(&sched->ws_lock);
-       cfs_waitq_init(&sched->ws_waitq);
+       init_waitqueue_head(&sched->ws_waitq);
 #endif
        CFS_INIT_LIST_HEAD(&sched->ws_runq);
        CFS_INIT_LIST_HEAD(&sched->ws_rerunq);
@@ -459,7 +459,7 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
                spin_lock(&cfs_wi_data.wi_glock);
                while (sched->ws_starting > 0) {
                        spin_unlock(&cfs_wi_data.wi_glock);
-                       cfs_schedule();
+                       schedule();
                        spin_lock(&cfs_wi_data.wi_glock);
                }
 
@@ -529,7 +529,7 @@ cfs_wi_shutdown (void)
        /* nobody should contend on this list */
        cfs_list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) {
                sched->ws_stopping = 1;
-               cfs_waitq_broadcast(&sched->ws_waitq);
+               wake_up_all(&sched->ws_waitq);
        }
 
        cfs_list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) {
index 8aaa239..079ea62 100644 (file)
@@ -725,7 +725,7 @@ typedef struct
        /* Event Queue container */
        struct lnet_res_container       ln_eq_container;
 #ifdef __KERNEL__
-       cfs_waitq_t                     ln_eq_waitq;
+       wait_queue_head_t                       ln_eq_waitq;
        spinlock_t                      ln_eq_wait_lock;
 #else
 # ifndef HAVE_LIBPTHREAD
index db839ca..f80347c 100644 (file)
@@ -2529,9 +2529,9 @@ mxlnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
 void
 mxlnd_sleep(unsigned long timeout)
 {
-        cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
-        cfs_schedule_timeout(timeout);
-        return;
+       set_current_state(TASK_INTERRUPTIBLE);
+       schedule_timeout(timeout);
+       return;
 }
 
 /**
index 461eaaf..2da50e2 100644 (file)
@@ -790,19 +790,19 @@ kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
                goto failed_2;
        }
 
-        if (dev->ibd_hdev->ibh_ibdev != cmid->device) {
-                /* wakeup failover thread and teardown connection */
-                if (kiblnd_dev_can_failover(dev)) {
-                        cfs_list_add_tail(&dev->ibd_fail_list,
-                                      &kiblnd_data.kib_failed_devs);
-                        cfs_waitq_signal(&kiblnd_data.kib_failover_waitq);
-                }
+       if (dev->ibd_hdev->ibh_ibdev != cmid->device) {
+               /* wakeup failover thread and teardown connection */
+               if (kiblnd_dev_can_failover(dev)) {
+                       cfs_list_add_tail(&dev->ibd_fail_list,
+                                     &kiblnd_data.kib_failed_devs);
+                       wake_up(&kiblnd_data.kib_failover_waitq);
+               }
 
                write_unlock_irqrestore(glock, flags);
-                CERROR("cmid HCA(%s), kib_dev(%s) need failover\n",
-                       cmid->device->name, dev->ibd_ifname);
-                goto failed_2;
-        }
+               CERROR("cmid HCA(%s), kib_dev(%s) need failover\n",
+                      cmid->device->name, dev->ibd_ifname);
+               goto failed_2;
+       }
 
         kiblnd_hdev_addref_locked(dev->ibd_hdev);
         conn->ibc_hdev = dev->ibd_hdev;
@@ -1325,7 +1325,7 @@ kiblnd_current_hdev(kib_dev_t *dev)
                if (i++ % 50 == 0)
                        CDEBUG(D_NET, "%s: Wait for failover\n",
                               dev->ibd_ifname);
-               cfs_schedule_timeout(cfs_time_seconds(1) / 100);
+               schedule_timeout(cfs_time_seconds(1) / 100);
 
                read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
        }
@@ -1672,7 +1672,7 @@ kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
                spin_unlock(&fps->fps_lock);
                CDEBUG(D_NET, "Another thread is allocating new "
                       "FMR pool, waiting for her to complete\n");
-               cfs_schedule();
+               schedule();
                goto again;
 
        }
@@ -1875,7 +1875,7 @@ kiblnd_pool_alloc_node(kib_poolset_t *ps)
                 CDEBUG(D_NET, "Another thread is allocating new "
                        "%s pool, waiting for her to complete\n",
                        ps->ps_name);
-                cfs_schedule();
+               schedule();
                 goto again;
         }
 
@@ -2831,20 +2831,20 @@ kiblnd_base_shutdown(void)
                 LASSERT (cfs_list_empty(&kiblnd_data.kib_connd_zombies));
                 LASSERT (cfs_list_empty(&kiblnd_data.kib_connd_conns));
 
-                /* flag threads to terminate; wake and wait for them to die */
-                kiblnd_data.kib_shutdown = 1;
+               /* flag threads to terminate; wake and wait for them to die */
+               kiblnd_data.kib_shutdown = 1;
 
                /* NB: we really want to stop scheduler threads net by net
                 * instead of the whole module, this should be improved
                 * with dynamic configuration LNet */
                cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds)
-                       cfs_waitq_broadcast(&sched->ibs_waitq);
+                       wake_up_all(&sched->ibs_waitq);
 
-                cfs_waitq_broadcast(&kiblnd_data.kib_connd_waitq);
-                cfs_waitq_broadcast(&kiblnd_data.kib_failover_waitq);
+               wake_up_all(&kiblnd_data.kib_connd_waitq);
+               wake_up_all(&kiblnd_data.kib_failover_waitq);
 
-                i = 2;
-                while (cfs_atomic_read(&kiblnd_data.kib_nthreads) != 0) {
+               i = 2;
+               while (cfs_atomic_read(&kiblnd_data.kib_nthreads) != 0) {
                         i++;
                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
                                "Waiting for %d threads to terminate\n",
@@ -2975,10 +2975,10 @@ kiblnd_base_startup(void)
                 CFS_INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]);
 
        spin_lock_init(&kiblnd_data.kib_connd_lock);
-        CFS_INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
-        CFS_INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies);
-        cfs_waitq_init(&kiblnd_data.kib_connd_waitq);
-       cfs_waitq_init(&kiblnd_data.kib_failover_waitq);
+       CFS_INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
+       CFS_INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies);
+       init_waitqueue_head(&kiblnd_data.kib_connd_waitq);
+       init_waitqueue_head(&kiblnd_data.kib_failover_waitq);
 
        kiblnd_data.kib_scheds = cfs_percpt_alloc(lnet_cpt_table(),
                                                  sizeof(*sched));
@@ -2990,7 +2990,7 @@ kiblnd_base_startup(void)
 
                spin_lock_init(&sched->ibs_lock);
                CFS_INIT_LIST_HEAD(&sched->ibs_conns);
-               cfs_waitq_init(&sched->ibs_waitq);
+               init_waitqueue_head(&sched->ibs_waitq);
 
                nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
                if (*kiblnd_tunables.kib_nscheds > 0) {
index 95e60db..661756c 100644 (file)
@@ -378,7 +378,7 @@ struct kib_sched_info {
        /* serialise */
        spinlock_t              ibs_lock;
        /* schedulers sleep here */
-       cfs_waitq_t             ibs_waitq;
+       wait_queue_head_t               ibs_waitq;
        /* conns to check for rx completions */
        cfs_list_t              ibs_conns;
        /* number of scheduler threads */
@@ -396,7 +396,7 @@ typedef struct
        /* list head of failed devices */
        cfs_list_t              kib_failed_devs;
        /* schedulers sleep here */
-       cfs_waitq_t             kib_failover_waitq;
+       wait_queue_head_t               kib_failover_waitq;
        cfs_atomic_t            kib_nthreads;   /* # live threads */
        /* stabilize net/dev/peer/conn ops */
        rwlock_t                kib_global_lock;
@@ -411,7 +411,7 @@ typedef struct
        /* connections with zero refcount */
        cfs_list_t              kib_connd_zombies;
        /* connection daemon sleeps here */
-       cfs_waitq_t             kib_connd_waitq;
+       wait_queue_head_t               kib_connd_waitq;
        spinlock_t              kib_connd_lock; /* serialise */
        struct ib_qp_attr       kib_error_qpa;  /* QP->ERROR */
        /* percpt data for schedulers */
@@ -713,7 +713,7 @@ do {                                                                        \
                spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);  \
                cfs_list_add_tail(&(conn)->ibc_list,                    \
                                  &kiblnd_data.kib_connd_zombies);      \
-               cfs_waitq_signal(&kiblnd_data.kib_connd_waitq);         \
+               wake_up(&kiblnd_data.kib_connd_waitq);          \
                spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);\
        }                                                               \
 } while (0)
index 4d9bada..9ca2cb0 100644 (file)
@@ -1909,17 +1909,17 @@ kiblnd_close_conn_locked (kib_conn_t *conn, int error)
 
         kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING);
 
-        if (error != 0 &&
-            kiblnd_dev_can_failover(dev)) {
-                cfs_list_add_tail(&dev->ibd_fail_list,
-                              &kiblnd_data.kib_failed_devs);
-                cfs_waitq_signal(&kiblnd_data.kib_failover_waitq);
-        }
+       if (error != 0 &&
+           kiblnd_dev_can_failover(dev)) {
+               cfs_list_add_tail(&dev->ibd_fail_list,
+                             &kiblnd_data.kib_failed_devs);
+               wake_up(&kiblnd_data.kib_failover_waitq);
+       }
 
        spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
 
        cfs_list_add_tail(&conn->ibc_list, &kiblnd_data.kib_connd_conns);
-       cfs_waitq_signal(&kiblnd_data.kib_connd_waitq);
+       wake_up(&kiblnd_data.kib_connd_waitq);
 
        spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
 }
@@ -3126,19 +3126,19 @@ kiblnd_disconnect_conn (kib_conn_t *conn)
 int
 kiblnd_connd (void *arg)
 {
-        cfs_waitlink_t     wait;
-        unsigned long      flags;
-        kib_conn_t        *conn;
-        int                timeout;
-        int                i;
-        int                dropped_lock;
-        int                peer_index = 0;
-        unsigned long      deadline = jiffies;
+       wait_queue_t     wait;
+       unsigned long      flags;
+       kib_conn_t        *conn;
+       int                timeout;
+       int                i;
+       int                dropped_lock;
+       int                peer_index = 0;
+       unsigned long      deadline = jiffies;
 
-        cfs_block_allsigs ();
+       cfs_block_allsigs ();
 
-        cfs_waitlink_init (&wait);
-        kiblnd_data.kib_connd = current;
+       init_waitqueue_entry_current (&wait);
+       kiblnd_data.kib_connd = current;
 
        spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
 
@@ -3214,14 +3214,14 @@ kiblnd_connd (void *arg)
                        continue;
 
                /* Nothing to do for 'timeout'  */
-               cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
-               cfs_waitq_add(&kiblnd_data.kib_connd_waitq, &wait);
+               set_current_state(TASK_INTERRUPTIBLE);
+               add_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
                spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
 
-               cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE, timeout);
+               waitq_timedwait(&wait, TASK_INTERRUPTIBLE, timeout);
 
-               cfs_set_current_state(CFS_TASK_RUNNING);
-               cfs_waitq_del(&kiblnd_data.kib_connd_waitq, &wait);
+               set_current_state(TASK_RUNNING);
+               remove_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
                spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
        }
 
@@ -3303,8 +3303,8 @@ kiblnd_cq_completion(struct ib_cq *cq, void *arg)
                conn->ibc_scheduled = 1;
                cfs_list_add_tail(&conn->ibc_sched_list, &sched->ibs_conns);
 
-               if (cfs_waitq_active(&sched->ibs_waitq))
-                       cfs_waitq_signal(&sched->ibs_waitq);
+               if (waitqueue_active(&sched->ibs_waitq))
+                       wake_up(&sched->ibs_waitq);
        }
 
        spin_unlock_irqrestore(&sched->ibs_lock, flags);
@@ -3325,7 +3325,7 @@ kiblnd_scheduler(void *arg)
        long                    id = (long)arg;
        struct kib_sched_info   *sched;
        kib_conn_t              *conn;
-       cfs_waitlink_t          wait;
+       wait_queue_t            wait;
        unsigned long           flags;
        struct ib_wc            wc;
        int                     did_something;
@@ -3334,7 +3334,7 @@ kiblnd_scheduler(void *arg)
 
        cfs_block_allsigs();
 
-       cfs_waitlink_init(&wait);
+       init_waitqueue_entry_current(&wait);
 
        sched = kiblnd_data.kib_scheds[KIB_THREAD_CPT(id)];
 
@@ -3352,7 +3352,7 @@ kiblnd_scheduler(void *arg)
                if (busy_loops++ >= IBLND_RESCHED) {
                        spin_unlock_irqrestore(&sched->ibs_lock, flags);
 
-                       cfs_cond_resched();
+                       cond_resched();
                        busy_loops = 0;
 
                        spin_lock_irqsave(&sched->ibs_lock, flags);
@@ -3409,8 +3409,8 @@ kiblnd_scheduler(void *arg)
                                kiblnd_conn_addref(conn);
                                cfs_list_add_tail(&conn->ibc_sched_list,
                                                  &sched->ibs_conns);
-                               if (cfs_waitq_active(&sched->ibs_waitq))
-                                       cfs_waitq_signal(&sched->ibs_waitq);
+                               if (waitqueue_active(&sched->ibs_waitq))
+                                       wake_up(&sched->ibs_waitq);
                        } else {
                                conn->ibc_scheduled = 0;
                        }
@@ -3429,15 +3429,15 @@ kiblnd_scheduler(void *arg)
                 if (did_something)
                         continue;
 
-               cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
-               cfs_waitq_add_exclusive(&sched->ibs_waitq, &wait);
+               set_current_state(TASK_INTERRUPTIBLE);
+               add_wait_queue_exclusive(&sched->ibs_waitq, &wait);
                spin_unlock_irqrestore(&sched->ibs_lock, flags);
 
-               cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE);
+               waitq_wait(&wait, TASK_INTERRUPTIBLE);
                busy_loops = 0;
 
-               cfs_waitq_del(&sched->ibs_waitq, &wait);
-               cfs_set_current_state(CFS_TASK_RUNNING);
+               remove_wait_queue(&sched->ibs_waitq, &wait);
+               set_current_state(TASK_RUNNING);
                spin_lock_irqsave(&sched->ibs_lock, flags);
        }
 
@@ -3451,16 +3451,16 @@ int
 kiblnd_failover_thread(void *arg)
 {
        rwlock_t                *glock = &kiblnd_data.kib_global_lock;
-        kib_dev_t         *dev;
-        cfs_waitlink_t     wait;
-        unsigned long      flags;
-        int                rc;
+       kib_dev_t         *dev;
+       wait_queue_t     wait;
+       unsigned long      flags;
+       int                rc;
 
-        LASSERT (*kiblnd_tunables.kib_dev_failover != 0);
+       LASSERT (*kiblnd_tunables.kib_dev_failover != 0);
 
-        cfs_block_allsigs ();
+       cfs_block_allsigs ();
 
-        cfs_waitlink_init(&wait);
+       init_waitqueue_entry_current(&wait);
        write_lock_irqsave(glock, flags);
 
         while (!kiblnd_data.kib_shutdown) {
@@ -3506,14 +3506,14 @@ kiblnd_failover_thread(void *arg)
                 /* long sleep if no more pending failover */
                 long_sleep = cfs_list_empty(&kiblnd_data.kib_failed_devs);
 
-                cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
-                cfs_waitq_add(&kiblnd_data.kib_failover_waitq, &wait);
+               set_current_state(TASK_INTERRUPTIBLE);
+               add_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
                write_unlock_irqrestore(glock, flags);
 
                rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) :
                                                   cfs_time_seconds(1));
-               cfs_set_current_state(CFS_TASK_RUNNING);
-               cfs_waitq_del(&kiblnd_data.kib_failover_waitq, &wait);
+               set_current_state(TASK_RUNNING);
+               remove_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
                write_lock_irqsave(glock, flags);
 
                 if (!long_sleep || rc != 0)
index ee12cd8..960e938 100644 (file)
@@ -571,18 +571,18 @@ kptllnd_base_shutdown (void)
                 kptllnd_data.kptl_shutdown = 2;
                 cfs_mb();
 
-                i = 2;
-                while (cfs_atomic_read (&kptllnd_data.kptl_nthreads) != 0) {
-                        /* Wake up all threads*/
-                        cfs_waitq_broadcast(&kptllnd_data.kptl_sched_waitq);
-                        cfs_waitq_broadcast(&kptllnd_data.kptl_watchdog_waitq);
-
-                        i++;
-                        CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
-                               "Waiting for %d threads to terminate\n",
-                               cfs_atomic_read(&kptllnd_data.kptl_nthreads));
-                        cfs_pause(cfs_time_seconds(1));
-                }
+               i = 2;
+               while (cfs_atomic_read (&kptllnd_data.kptl_nthreads) != 0) {
+                       /* Wake up all threads*/
+                       wake_up_all(&kptllnd_data.kptl_sched_waitq);
+                       wake_up_all(&kptllnd_data.kptl_watchdog_waitq);
+
+                       i++;
+                       CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
+                              "Waiting for %d threads to terminate\n",
+                              cfs_atomic_read(&kptllnd_data.kptl_nthreads));
+                       cfs_pause(cfs_time_seconds(1));
+               }
 
                 CDEBUG(D_NET, "All Threads stopped\n");
                 LASSERT(cfs_list_empty(&kptllnd_data.kptl_sched_txq));
@@ -678,12 +678,12 @@ kptllnd_base_startup (void)
        rwlock_init(&kptllnd_data.kptl_net_rw_lock);
         CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_nets);
 
-        /* Setup the sched locks/lists/waitq */
+       /* Setup the sched locks/lists/waitq */
        spin_lock_init(&kptllnd_data.kptl_sched_lock);
-        cfs_waitq_init(&kptllnd_data.kptl_sched_waitq);
-        CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_txq);
-        CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_rxq);
-        CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_rxbq);
+       init_waitqueue_head(&kptllnd_data.kptl_sched_waitq);
+       CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_txq);
+       CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_rxq);
+       CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_rxbq);
 
         /* Init kptl_ptlid2str_lock before any call to kptllnd_ptlid2str */
        spin_lock_init(&kptllnd_data.kptl_ptlid2str_lock);
@@ -775,9 +775,9 @@ kptllnd_base_startup (void)
         kptllnd_data.kptl_nak_msg->ptlm_srcstamp = kptllnd_data.kptl_incarnation;
 
        rwlock_init(&kptllnd_data.kptl_peer_rw_lock);
-        cfs_waitq_init(&kptllnd_data.kptl_watchdog_waitq);
-        CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_closing_peers);
-        CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_zombie_peers);
+       init_waitqueue_head(&kptllnd_data.kptl_watchdog_waitq);
+       CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_closing_peers);
+       CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_zombie_peers);
 
         /* Allocate and setup the peer hash table */
         kptllnd_data.kptl_peer_hash_size =
index 682367c..32a5509 100644 (file)
@@ -262,14 +262,14 @@ struct kptl_data
        cfs_list_t              kptl_nets;              /* kptl_net instance*/
 
        spinlock_t              kptl_sched_lock;        /* serialise... */
-        cfs_waitq_t             kptl_sched_waitq;      /* schedulers sleep here */
-        cfs_list_t              kptl_sched_txq;        /* tx requiring attention */
-        cfs_list_t              kptl_sched_rxq;        /* rx requiring attention */
-        cfs_list_t              kptl_sched_rxbq;       /* rxb requiring reposting */
+       wait_queue_head_t       kptl_sched_waitq;      /* schedulers sleep here */
+       cfs_list_t              kptl_sched_txq;        /* tx requiring attention */
+       cfs_list_t              kptl_sched_rxq;        /* rx requiring attention */
+       cfs_list_t              kptl_sched_rxbq;       /* rxb requiring reposting */
 
-        cfs_waitq_t             kptl_watchdog_waitq;   /* watchdog sleeps here */
+       wait_queue_head_t       kptl_watchdog_waitq;   /* watchdog sleeps here */
 
-        kptl_rx_buffer_pool_t   kptl_rx_buffer_pool;   /* rx buffer pool */
+       kptl_rx_buffer_pool_t   kptl_rx_buffer_pool;   /* rx buffer pool */
        struct kmem_cache       *kptl_rx_cache;         /* rx descripter cache */
 
         cfs_atomic_t            kptl_ntx;              /* # tx descs allocated */
@@ -399,7 +399,7 @@ kptllnd_rx_buffer_decref_locked(kptl_rx_buffer_t *rxb)
 
                cfs_list_add_tail(&rxb->rxb_repost_list,
                                  &kptllnd_data.kptl_sched_rxbq);
-               cfs_waitq_signal(&kptllnd_data.kptl_sched_waitq);
+               wake_up(&kptllnd_data.kptl_sched_waitq);
 
                spin_unlock(&kptllnd_data.kptl_sched_lock);
        }
index caf90a6..6b47a16 100644 (file)
@@ -668,17 +668,17 @@ kptllnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
 int
 kptllnd_watchdog(void *arg)
 {
-        int                 id = (long)arg;
-        cfs_waitlink_t      waitlink;
-        int                 stamp = 0;
-        int                 peer_index = 0;
-        unsigned long       deadline = jiffies;
-        int                 timeout;
-        int                 i;
+       int                 id = (long)arg;
+       wait_queue_t        waitlink;
+       int                 stamp = 0;
+       int                 peer_index = 0;
+       unsigned long       deadline = jiffies;
+       int                 timeout;
+       int                 i;
 
-        cfs_block_allsigs();
+       cfs_block_allsigs();
 
-        cfs_waitlink_init(&waitlink);
+       init_waitqueue_entry_current(&waitlink);
 
         /* threads shut down in phase 2 after all peers have been destroyed */
         while (kptllnd_data.kptl_shutdown < 2) {
@@ -717,36 +717,36 @@ kptllnd_watchdog(void *arg)
 
                 kptllnd_handle_closing_peers();
 
-                cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
-                cfs_waitq_add_exclusive(&kptllnd_data.kptl_watchdog_waitq,
-                                        &waitlink);
+               set_current_state(TASK_INTERRUPTIBLE);
+               add_wait_queue_exclusive(&kptllnd_data.kptl_watchdog_waitq,
+                                       &waitlink);
 
-                cfs_waitq_timedwait(&waitlink, CFS_TASK_INTERRUPTIBLE, timeout);
+               waitq_timedwait(&waitlink, TASK_INTERRUPTIBLE, timeout);
 
-                cfs_set_current_state (CFS_TASK_RUNNING);
-                cfs_waitq_del(&kptllnd_data.kptl_watchdog_waitq, &waitlink);
-        }
+               set_current_state (TASK_RUNNING);
+               remove_wait_queue(&kptllnd_data.kptl_watchdog_waitq, &waitlink);
+       }
 
-        kptllnd_thread_fini();
-        CDEBUG(D_NET, "<<<\n");
-        return (0);
+       kptllnd_thread_fini();
+       CDEBUG(D_NET, "<<<\n");
+       return (0);
 };
 
 int
 kptllnd_scheduler (void *arg)
 {
-        int                 id = (long)arg;
-        cfs_waitlink_t      waitlink;
-        unsigned long       flags;
-        int                 did_something;
-        int                 counter = 0;
-        kptl_rx_t          *rx;
-        kptl_rx_buffer_t   *rxb;
-        kptl_tx_t          *tx;
+       int                 id = (long)arg;
+       wait_queue_t        waitlink;
+       unsigned long       flags;
+       int                 did_something;
+       int                 counter = 0;
+       kptl_rx_t          *rx;
+       kptl_rx_buffer_t   *rxb;
+       kptl_tx_t          *tx;
 
-        cfs_block_allsigs();
+       cfs_block_allsigs();
 
-        cfs_waitlink_init(&waitlink);
+       init_waitqueue_entry_current(&waitlink);
 
        spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
 
@@ -808,24 +808,24 @@ kptllnd_scheduler (void *arg)
                                 continue;
                 }
 
-                cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
-                cfs_waitq_add_exclusive(&kptllnd_data.kptl_sched_waitq,
-                                        &waitlink);
+               set_current_state(TASK_INTERRUPTIBLE);
+               add_wait_queue_exclusive(&kptllnd_data.kptl_sched_waitq,
+                                       &waitlink);
                spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock,
-                                           flags);
+                                          flags);
 
-                if (!did_something)
-                        cfs_waitq_wait(&waitlink, CFS_TASK_INTERRUPTIBLE);
-                else
-                        cfs_cond_resched();
+               if (!did_something)
+                       waitq_wait(&waitlink, TASK_INTERRUPTIBLE);
+               else
+                       cond_resched();
 
-                cfs_set_current_state(CFS_TASK_RUNNING);
-                cfs_waitq_del(&kptllnd_data.kptl_sched_waitq, &waitlink);
+               set_current_state(TASK_RUNNING);
+               remove_wait_queue(&kptllnd_data.kptl_sched_waitq, &waitlink);
 
                spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
 
-                counter = 0;
-        }
+               counter = 0;
+       }
 
        spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock, flags);
 
index 6cc44f0..ebed094 100644 (file)
@@ -424,37 +424,37 @@ kptllnd_handle_closing_peers ()
 void
 kptllnd_peer_close_locked(kptl_peer_t *peer, int why)
 {
-        switch (peer->peer_state) {
-        default:
-                LBUG();
-
-        case PEER_STATE_WAITING_HELLO:
-        case PEER_STATE_ACTIVE:
-                /* Ensure new peers see a new incarnation of me */
-                LASSERT(peer->peer_myincarnation <= kptllnd_data.kptl_incarnation);
-                if (peer->peer_myincarnation == kptllnd_data.kptl_incarnation)
-                        kptllnd_data.kptl_incarnation++;
-
-                /* Removing from peer table */
-                kptllnd_data.kptl_n_active_peers--;
-                LASSERT (kptllnd_data.kptl_n_active_peers >= 0);
-
-                cfs_list_del(&peer->peer_list);
-                kptllnd_peer_unreserve_buffers();
-
-                peer->peer_error = why; /* stash 'why' only on first close */
-                peer->peer_state = PEER_STATE_CLOSING;
-
-                /* Schedule for immediate attention, taking peer table's ref */
-                cfs_list_add_tail(&peer->peer_list,
-                                 &kptllnd_data.kptl_closing_peers);
-                cfs_waitq_signal(&kptllnd_data.kptl_watchdog_waitq);
-                break;
-
-        case PEER_STATE_ZOMBIE:
-        case PEER_STATE_CLOSING:
-                break;
-        }
+       switch (peer->peer_state) {
+       default:
+               LBUG();
+
+       case PEER_STATE_WAITING_HELLO:
+       case PEER_STATE_ACTIVE:
+               /* Ensure new peers see a new incarnation of me */
+               LASSERT(peer->peer_myincarnation <= kptllnd_data.kptl_incarnation);
+               if (peer->peer_myincarnation == kptllnd_data.kptl_incarnation)
+                       kptllnd_data.kptl_incarnation++;
+
+               /* Removing from peer table */
+               kptllnd_data.kptl_n_active_peers--;
+               LASSERT (kptllnd_data.kptl_n_active_peers >= 0);
+
+               cfs_list_del(&peer->peer_list);
+               kptllnd_peer_unreserve_buffers();
+
+               peer->peer_error = why; /* stash 'why' only on first close */
+               peer->peer_state = PEER_STATE_CLOSING;
+
+               /* Schedule for immediate attention, taking peer table's ref */
+               cfs_list_add_tail(&peer->peer_list,
+                                &kptllnd_data.kptl_closing_peers);
+               wake_up(&kptllnd_data.kptl_watchdog_waitq);
+               break;
+
+       case PEER_STATE_ZOMBIE:
+       case PEER_STATE_CLOSING:
+               break;
+       }
 }
 
 void
index 094326c..f49c7eb 100644 (file)
@@ -478,14 +478,14 @@ kptllnd_rx_buffer_callback (ptl_event_t *ev)
                         rx->rx_treceived = jiffies;
                         /* Queue for attention */
                        spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
-                                              flags);
+                                             flags);
 
-                        cfs_list_add_tail(&rx->rx_list,
-                                          &kptllnd_data.kptl_sched_rxq);
-                        cfs_waitq_signal(&kptllnd_data.kptl_sched_waitq);
+                       cfs_list_add_tail(&rx->rx_list,
+                                         &kptllnd_data.kptl_sched_rxq);
+                       wake_up(&kptllnd_data.kptl_sched_waitq);
 
                        spin_unlock_irqrestore(&kptllnd_data. \
-                                                   kptl_sched_lock, flags);
+                                                  kptl_sched_lock, flags);
                 }
         }
 
index fa2b392..9151d45 100644 (file)
@@ -512,15 +512,15 @@ kptllnd_tx_callback(ptl_event_t *ev)
 
        spin_unlock_irqrestore(&peer->peer_lock, flags);
 
-        /* drop peer's ref, but if it was the last one... */
-        if (cfs_atomic_dec_and_test(&tx->tx_refcount)) {
-                /* ...finalize it in thread context! */
+       /* drop peer's ref, but if it was the last one... */
+       if (cfs_atomic_dec_and_test(&tx->tx_refcount)) {
+               /* ...finalize it in thread context! */
                spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
 
-                cfs_list_add_tail(&tx->tx_list, &kptllnd_data.kptl_sched_txq);
-                cfs_waitq_signal(&kptllnd_data.kptl_sched_waitq);
+               cfs_list_add_tail(&tx->tx_list, &kptllnd_data.kptl_sched_txq);
+               wake_up(&kptllnd_data.kptl_sched_waitq);
 
                spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock,
-                                           flags);
-        }
+                                          flags);
+       }
 }
index 1dce8d4..e62b094 100644 (file)
@@ -174,7 +174,7 @@ kqswnal_shutdown(lnet_ni_t *ni)
        /**********************************************************************/
        /* flag threads to terminate, wake them and wait for them to die */
        kqswnal_data.kqn_shuttingdown = 2;
-       cfs_waitq_broadcast (&kqswnal_data.kqn_sched_waitq);
+       wake_up_all (&kqswnal_data.kqn_sched_waitq);
 
        while (cfs_atomic_read (&kqswnal_data.kqn_nthreads) != 0) {
                CDEBUG(D_NET, "waiting for %d threads to terminate\n",
@@ -307,7 +307,7 @@ kqswnal_startup (lnet_ni_t *ni)
        CFS_INIT_LIST_HEAD (&kqswnal_data.kqn_readyrxds);
 
        spin_lock_init(&kqswnal_data.kqn_sched_lock);
-       cfs_waitq_init (&kqswnal_data.kqn_sched_waitq);
+       init_waitqueue_head (&kqswnal_data.kqn_sched_waitq);
 
        /* pointers/lists/locks initialised */
        kqswnal_data.kqn_init = KQN_INIT_DATA;
index 49059d2..a55ba12 100644 (file)
@@ -254,41 +254,41 @@ typedef struct
 
 typedef struct
 {
-        char                 kqn_init;        /* what's been initialised */
-        char                 kqn_shuttingdown;/* I'm trying to shut down */
-        cfs_atomic_t         kqn_nthreads;    /* # threads running */
-        lnet_ni_t           *kqn_ni;          /* _the_ instance of me */
+       char                 kqn_init;        /* what's been initialised */
+       char                 kqn_shuttingdown;/* I'm trying to shut down */
+       cfs_atomic_t         kqn_nthreads;    /* # threads running */
+       lnet_ni_t           *kqn_ni;          /* _the_ instance of me */
 
-        kqswnal_rx_t        *kqn_rxds;        /* stack of all the receive descriptors */
-        kqswnal_tx_t        *kqn_txds;        /* stack of all the transmit descriptors */
+       kqswnal_rx_t        *kqn_rxds;        /* stack of all the receive descriptors */
+       kqswnal_tx_t        *kqn_txds;        /* stack of all the transmit descriptors */
 
-        cfs_list_t           kqn_idletxds;    /* transmit descriptors free to use */
-        cfs_list_t           kqn_activetxds;  /* transmit descriptors being used */
+       cfs_list_t           kqn_idletxds;    /* transmit descriptors free to use */
+       cfs_list_t           kqn_activetxds;  /* transmit descriptors being used */
        spinlock_t      kqn_idletxd_lock;    /* serialise idle txd access */
        cfs_atomic_t    kqn_pending_txs;     /* # transmits being prepped */
 
        spinlock_t      kqn_sched_lock;      /* serialise packet schedulers */
-        cfs_waitq_t          kqn_sched_waitq;/* scheduler blocks here */
-
-        cfs_list_t           kqn_readyrxds;  /* rxds full of data */
-        cfs_list_t           kqn_donetxds;   /* completed transmits */
-        cfs_list_t           kqn_delayedtxds;/* delayed transmits */
-
-        EP_SYS              *kqn_ep;         /* elan system */
-        EP_NMH              *kqn_ep_tx_nmh;  /* elan reserved tx vaddrs */
-        EP_NMH              *kqn_ep_rx_nmh;  /* elan reserved rx vaddrs */
-        EP_XMTR             *kqn_eptx;       /* elan transmitter */
-        EP_RCVR             *kqn_eprx_small; /* elan receiver (small messages) */
-        EP_RCVR             *kqn_eprx_large; /* elan receiver (large messages) */
-
-        int                  kqn_nnodes;     /* this cluster's size */
-        int                  kqn_elanid;     /* this nodes's elan ID */
-
-        EP_STATUSBLK         kqn_rpc_success;/* preset RPC reply status blocks */
-        EP_STATUSBLK         kqn_rpc_failed;
-        EP_STATUSBLK         kqn_rpc_version;/* reply to future version query */
-        EP_STATUSBLK         kqn_rpc_magic;  /* reply to future version query */
-}  kqswnal_data_t;
+       wait_queue_head_t    kqn_sched_waitq;/* scheduler blocks here */
+
+       cfs_list_t           kqn_readyrxds;  /* rxds full of data */
+       cfs_list_t           kqn_donetxds;   /* completed transmits */
+       cfs_list_t           kqn_delayedtxds;/* delayed transmits */
+
+       EP_SYS              *kqn_ep;         /* elan system */
+       EP_NMH              *kqn_ep_tx_nmh;  /* elan reserved tx vaddrs */
+       EP_NMH              *kqn_ep_rx_nmh;  /* elan reserved rx vaddrs */
+       EP_XMTR             *kqn_eptx;       /* elan transmitter */
+       EP_RCVR             *kqn_eprx_small; /* elan receiver (small messages) */
+       EP_RCVR             *kqn_eprx_large; /* elan receiver (large messages) */
+
+       int                  kqn_nnodes;     /* this cluster's size */
+       int                  kqn_elanid;     /* this nodes's elan ID */
+
+       EP_STATUSBLK         kqn_rpc_success;/* preset RPC reply status blocks */
+       EP_STATUSBLK         kqn_rpc_failed;
+       EP_STATUSBLK         kqn_rpc_version;/* reply to future version query */
+       EP_STATUSBLK         kqn_rpc_magic;  /* reply to future version query */
+} kqswnal_data_t;
 
 /* kqn_init state */
 #define KQN_INIT_NOTHING        0               /* MUST BE ZERO so zeroed state is initialised OK */
index a469d3d..203cddc 100644 (file)
@@ -518,7 +518,7 @@ kqswnal_tx_done (kqswnal_tx_t *ktx, int status)
 
        cfs_list_add_tail(&ktx->ktx_schedlist,
                           &kqswnal_data.kqn_donetxds);
-       cfs_waitq_signal(&kqswnal_data.kqn_sched_waitq);
+       wake_up(&kqswnal_data.kqn_sched_waitq);
 
        spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, flags);
 }
@@ -669,7 +669,7 @@ kqswnal_launch (kqswnal_tx_t *ktx)
 
                cfs_list_add_tail(&ktx->ktx_schedlist,
                                  &kqswnal_data.kqn_delayedtxds);
-               cfs_waitq_signal(&kqswnal_data.kqn_sched_waitq);
+               wake_up(&kqswnal_data.kqn_sched_waitq);
 
                spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
                                             flags);
@@ -1542,7 +1542,7 @@ kqswnal_rxhandler(EP_RXD *rxd)
        spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
 
        cfs_list_add_tail(&krx->krx_list, &kqswnal_data.kqn_readyrxds);
-       cfs_waitq_signal(&kqswnal_data.kqn_sched_waitq);
+       wake_up(&kqswnal_data.kqn_sched_waitq);
 
        spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, flags);
 }
@@ -1764,15 +1764,15 @@ kqswnal_scheduler (void *arg)
                                                        kqn_donetxds) ||
                                        !cfs_list_empty(&kqswnal_data. \
                                                        kqn_delayedtxds));
-                                LASSERT (rc == 0);
-                        } else if (need_resched())
-                                cfs_schedule ();
+                               LASSERT (rc == 0);
+                       } else if (need_resched())
+                               schedule ();
 
                        spin_lock_irqsave(&kqswnal_data.kqn_sched_lock,
-                                               flags);
-                }
-        }
+                                              flags);
+               }
+       }
 
-        kqswnal_thread_fini ();
-        return (0);
+       kqswnal_thread_fini ();
+       return 0;
 }
index 189db2e..91ff1b1 100644 (file)
@@ -430,44 +430,44 @@ int
 kranal_set_conn_params(kra_conn_t *conn, kra_connreq_t *connreq,
                        __u32 peer_ip, int peer_port)
 {
-        kra_device_t  *dev = conn->rac_device;
-        unsigned long  flags;
-        RAP_RETURN     rrc;
-
-        /* CAVEAT EMPTOR: we're really overloading rac_last_tx + rac_keepalive
-         * to do RapkCompleteSync() timekeeping (see kibnal_scheduler). */
-        conn->rac_last_tx = jiffies;
-        conn->rac_keepalive = 0;
-
-        rrc = RapkSetRiParams(conn->rac_rihandle, &connreq->racr_riparams);
-        if (rrc != RAP_SUCCESS) {
-                CERROR("Error setting riparams from %u.%u.%u.%u/%d: %d\n",
-                       HIPQUAD(peer_ip), peer_port, rrc);
-                return -ECONNABORTED;
-        }
-
-        /* Schedule conn on rad_new_conns */
-        kranal_conn_addref(conn);
+       kra_device_t  *dev = conn->rac_device;
+       unsigned long  flags;
+       RAP_RETURN     rrc;
+
+       /* CAVEAT EMPTOR: we're really overloading rac_last_tx + rac_keepalive
+        * to do RapkCompleteSync() timekeeping (see kibnal_scheduler). */
+       conn->rac_last_tx = jiffies;
+       conn->rac_keepalive = 0;
+
+       rrc = RapkSetRiParams(conn->rac_rihandle, &connreq->racr_riparams);
+       if (rrc != RAP_SUCCESS) {
+               CERROR("Error setting riparams from %u.%u.%u.%u/%d: %d\n",
+                      HIPQUAD(peer_ip), peer_port, rrc);
+               return -ECONNABORTED;
+       }
+
+       /* Schedule conn on rad_new_conns */
+       kranal_conn_addref(conn);
        spin_lock_irqsave(&dev->rad_lock, flags);
-        cfs_list_add_tail(&conn->rac_schedlist, &dev->rad_new_conns);
-        cfs_waitq_signal(&dev->rad_waitq);
+       cfs_list_add_tail(&conn->rac_schedlist, &dev->rad_new_conns);
+       wake_up(&dev->rad_waitq);
        spin_unlock_irqrestore(&dev->rad_lock, flags);
 
-        rrc = RapkWaitToConnect(conn->rac_rihandle);
-        if (rrc != RAP_SUCCESS) {
-                CERROR("Error waiting to connect to %u.%u.%u.%u/%d: %d\n",
-                       HIPQUAD(peer_ip), peer_port, rrc);
-                return -ECONNABORTED;
-        }
-
-        /* Scheduler doesn't touch conn apart from to deschedule and decref it
-         * after RapkCompleteSync() return success, so conn is all mine */
-
-        conn->rac_peerstamp = connreq->racr_peerstamp;
-        conn->rac_peer_connstamp = connreq->racr_connstamp;
-        conn->rac_keepalive = RANAL_TIMEOUT2KEEPALIVE(connreq->racr_timeout);
-        kranal_update_reaper_timeout(conn->rac_keepalive);
-        return 0;
+       rrc = RapkWaitToConnect(conn->rac_rihandle);
+       if (rrc != RAP_SUCCESS) {
+               CERROR("Error waiting to connect to %u.%u.%u.%u/%d: %d\n",
+                      HIPQUAD(peer_ip), peer_port, rrc);
+               return -ECONNABORTED;
+       }
+
+       /* Scheduler doesn't touch conn apart from to deschedule and decref it
+        * after RapkCompleteSync() return success, so conn is all mine */
+
+       conn->rac_peerstamp = connreq->racr_peerstamp;
+       conn->rac_peer_connstamp = connreq->racr_connstamp;
+       conn->rac_keepalive = RANAL_TIMEOUT2KEEPALIVE(connreq->racr_timeout);
+       kranal_update_reaper_timeout(conn->rac_keepalive);
+       return 0;
 }
 
 int
@@ -871,31 +871,31 @@ kranal_free_acceptsock (kra_acceptsock_t *ras)
 int
 kranal_accept (lnet_ni_t *ni, struct socket *sock)
 {
-        kra_acceptsock_t  *ras;
-        int                rc;
-        __u32              peer_ip;
-        int                peer_port;
-        unsigned long      flags;
+       kra_acceptsock_t  *ras;
+       int                rc;
+       __u32              peer_ip;
+       int                peer_port;
+       unsigned long      flags;
 
-        rc = libcfs_sock_getaddr(sock, 1, &peer_ip, &peer_port);
-        LASSERT (rc == 0);                      /* we succeeded before */
+       rc = libcfs_sock_getaddr(sock, 1, &peer_ip, &peer_port);
+       LASSERT (rc == 0);                      /* we succeeded before */
 
-        LIBCFS_ALLOC(ras, sizeof(*ras));
-        if (ras == NULL) {
-                CERROR("ENOMEM allocating connection request from "
-                       "%u.%u.%u.%u\n", HIPQUAD(peer_ip));
-                return -ENOMEM;
-        }
+       LIBCFS_ALLOC(ras, sizeof(*ras));
+       if (ras == NULL) {
+               CERROR("ENOMEM allocating connection request from "
+                      "%u.%u.%u.%u\n", HIPQUAD(peer_ip));
+               return -ENOMEM;
+       }
 
-        ras->ras_sock = sock;
+       ras->ras_sock = sock;
 
        spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
 
-        cfs_list_add_tail(&ras->ras_list, &kranal_data.kra_connd_acceptq);
-        cfs_waitq_signal(&kranal_data.kra_connd_waitq);
+       cfs_list_add_tail(&ras->ras_list, &kranal_data.kra_connd_acceptq);
+       wake_up(&kranal_data.kra_connd_waitq);
 
        spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
-        return 0;
+       return 0;
 }
 
 int
@@ -1498,21 +1498,21 @@ kranal_shutdown (lnet_ni_t *ni)
         /* Flag threads to terminate */
         kranal_data.kra_shutdown = 1;
 
-        for (i = 0; i < kranal_data.kra_ndevs; i++) {
-                kra_device_t *dev = &kranal_data.kra_devices[i];
+       for (i = 0; i < kranal_data.kra_ndevs; i++) {
+               kra_device_t *dev = &kranal_data.kra_devices[i];
 
                spin_lock_irqsave(&dev->rad_lock, flags);
-                cfs_waitq_signal(&dev->rad_waitq);
+               wake_up(&dev->rad_waitq);
                spin_unlock_irqrestore(&dev->rad_lock, flags);
-        }
+       }
 
        spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
-        cfs_waitq_broadcast(&kranal_data.kra_reaper_waitq);
+       wake_up_all(&kranal_data.kra_reaper_waitq);
        spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
 
-        LASSERT (cfs_list_empty(&kranal_data.kra_connd_peers));
+       LASSERT (cfs_list_empty(&kranal_data.kra_connd_peers));
        spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
-        cfs_waitq_broadcast(&kranal_data.kra_connd_waitq);
+       wake_up_all(&kranal_data.kra_connd_waitq);
        spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
 
         /* Wait for threads to exit */
@@ -1607,23 +1607,23 @@ kranal_startup (lnet_ni_t *ni)
 
        rwlock_init(&kranal_data.kra_global_lock);
 
-        for (i = 0; i < RANAL_MAXDEVS; i++ ) {
-                kra_device_t  *dev = &kranal_data.kra_devices[i];
+       for (i = 0; i < RANAL_MAXDEVS; i++ ) {
+               kra_device_t  *dev = &kranal_data.kra_devices[i];
 
-                dev->rad_idx = i;
-                CFS_INIT_LIST_HEAD(&dev->rad_ready_conns);
-                CFS_INIT_LIST_HEAD(&dev->rad_new_conns);
-                cfs_waitq_init(&dev->rad_waitq);
+               dev->rad_idx = i;
+               CFS_INIT_LIST_HEAD(&dev->rad_ready_conns);
+               CFS_INIT_LIST_HEAD(&dev->rad_new_conns);
+               init_waitqueue_head(&dev->rad_waitq);
                spin_lock_init(&dev->rad_lock);
-        }
+       }
 
-        kranal_data.kra_new_min_timeout = CFS_MAX_SCHEDULE_TIMEOUT;
-        cfs_waitq_init(&kranal_data.kra_reaper_waitq);
+       kranal_data.kra_new_min_timeout = MAX_SCHEDULE_TIMEOUT;
+       init_waitqueue_head(&kranal_data.kra_reaper_waitq);
        spin_lock_init(&kranal_data.kra_reaper_lock);
 
-        CFS_INIT_LIST_HEAD(&kranal_data.kra_connd_acceptq);
-        CFS_INIT_LIST_HEAD(&kranal_data.kra_connd_peers);
-        cfs_waitq_init(&kranal_data.kra_connd_waitq);
+       CFS_INIT_LIST_HEAD(&kranal_data.kra_connd_acceptq);
+       CFS_INIT_LIST_HEAD(&kranal_data.kra_connd_peers);
+       init_waitqueue_head(&kranal_data.kra_connd_waitq);
        spin_lock_init(&kranal_data.kra_connd_lock);
 
         CFS_INIT_LIST_HEAD(&kranal_data.kra_idle_txs);
index afec808..bfc863f 100644 (file)
@@ -104,58 +104,58 @@ typedef struct
 
 typedef struct
 {
-        RAP_PVOID              rad_handle;    /* device handle */
-        RAP_PVOID              rad_fma_cqh;   /* FMA completion queue handle */
-        RAP_PVOID              rad_rdma_cqh;  /* rdma completion queue handle */
-        int                    rad_id;        /* device id */
-        int                    rad_idx;       /* index in kra_devices */
-        int                    rad_ready;     /* set by device callback */
-        cfs_list_t             rad_ready_conns;/* connections ready to tx/rx */
-        cfs_list_t             rad_new_conns; /* new connections to complete */
-        cfs_waitq_t            rad_waitq;     /* scheduler waits here */
-       spinlock_t              rad_lock;       /* serialise */
-        void                  *rad_scheduler; /* scheduling thread */
-        unsigned int           rad_nphysmap;  /* # phys mappings */
-        unsigned int           rad_nppphysmap;/* # phys pages mapped */
-        unsigned int           rad_nvirtmap;  /* # virt mappings */
-        unsigned long          rad_nobvirtmap;/* # virt bytes mapped */
+       RAP_PVOID              rad_handle;    /* device handle */
+       RAP_PVOID              rad_fma_cqh;   /* FMA completion queue handle */
+       RAP_PVOID              rad_rdma_cqh;  /* rdma completion queue handle */
+       int                    rad_id;        /* device id */
+       int                    rad_idx;       /* index in kra_devices */
+       int                    rad_ready;     /* set by device callback */
+       cfs_list_t             rad_ready_conns;/* connections ready to tx/rx */
+       cfs_list_t             rad_new_conns; /* new connections to complete */
+       wait_queue_head_t      rad_waitq;     /* scheduler waits here */
+       spinlock_t             rad_lock;        /* serialise */
+       void                   *rad_scheduler; /* scheduling thread */
+       unsigned int           rad_nphysmap;  /* # phys mappings */
+       unsigned int           rad_nppphysmap;/* # phys pages mapped */
+       unsigned int           rad_nvirtmap;  /* # virt mappings */
+       unsigned long          rad_nobvirtmap;/* # virt bytes mapped */
 } kra_device_t;
 
 typedef struct
 {
-        int               kra_init;            /* initialisation state */
-        int               kra_shutdown;        /* shut down? */
-        cfs_atomic_t      kra_nthreads;        /* # live threads */
-        lnet_ni_t        *kra_ni;              /* _the_ nal instance */
+       int               kra_init;            /* initialisation state */
+       int               kra_shutdown;        /* shut down? */
+       cfs_atomic_t      kra_nthreads;        /* # live threads */
+       lnet_ni_t        *kra_ni;              /* _the_ nal instance */
 
-        kra_device_t      kra_devices[RANAL_MAXDEVS]; /* device/ptag/cq */
-        int               kra_ndevs;           /* # devices */
+       kra_device_t      kra_devices[RANAL_MAXDEVS]; /* device/ptag/cq */
+       int               kra_ndevs;           /* # devices */
 
        rwlock_t          kra_global_lock;      /* stabilize peer/conn ops */
 
-        cfs_list_t       *kra_peers;           /* hash table of all my known peers */
-        int               kra_peer_hash_size;  /* size of kra_peers */
-        cfs_atomic_t      kra_npeers;          /* # peers extant */
-        int               kra_nonewpeers;      /* prevent new peers */
+       cfs_list_t       *kra_peers;           /* hash table of all my known peers */
+       int               kra_peer_hash_size;  /* size of kra_peers */
+       cfs_atomic_t      kra_npeers;          /* # peers extant */
+       int               kra_nonewpeers;      /* prevent new peers */
 
-        cfs_list_t       *kra_conns;           /* conns hashed by cqid */
-        int               kra_conn_hash_size;  /* size of kra_conns */
-        __u64             kra_peerstamp;       /* when I started up */
-        __u64             kra_connstamp;       /* conn stamp generator */
-        int               kra_next_cqid;       /* cqid generator */
-        cfs_atomic_t      kra_nconns;          /* # connections extant */
+       cfs_list_t       *kra_conns;           /* conns hashed by cqid */
+       int               kra_conn_hash_size;  /* size of kra_conns */
+       __u64             kra_peerstamp;       /* when I started up */
+       __u64             kra_connstamp;       /* conn stamp generator */
+       int               kra_next_cqid;       /* cqid generator */
+       cfs_atomic_t      kra_nconns;          /* # connections extant */
 
-        long              kra_new_min_timeout; /* minimum timeout on any new conn */
-        cfs_waitq_t       kra_reaper_waitq;    /* reaper sleeps here */
+       long              kra_new_min_timeout; /* minimum timeout on any new conn */
+       wait_queue_head_t       kra_reaper_waitq;    /* reaper sleeps here */
        spinlock_t        kra_reaper_lock;     /* serialise */
 
-        cfs_list_t        kra_connd_peers;     /* peers waiting for a connection */
-        cfs_list_t        kra_connd_acceptq;   /* accepted sockets to handshake */
-        cfs_waitq_t       kra_connd_waitq;     /* connection daemons sleep here */
+       cfs_list_t        kra_connd_peers;     /* peers waiting for a connection */
+       cfs_list_t        kra_connd_acceptq;   /* accepted sockets to handshake */
+       wait_queue_head_t       kra_connd_waitq;     /* connection daemons sleep here */
        spinlock_t        kra_connd_lock;       /* serialise */
 
-        cfs_list_t        kra_idle_txs;        /* idle tx descriptors */
-        __u64             kra_next_tx_cookie;  /* RDMA completion cookie */
+       cfs_list_t        kra_idle_txs;        /* idle tx descriptors */
+       __u64             kra_next_tx_cookie;  /* RDMA completion cookie */
        spinlock_t        kra_tx_lock;          /* serialise */
 } kra_data_t;
 
index d9ca41b..ba744c6 100644 (file)
@@ -57,10 +57,10 @@ kranal_device_callback(RAP_INT32 devid, RAP_PVOID arg)
 
                spin_lock_irqsave(&dev->rad_lock, flags);
 
-                if (!dev->rad_ready) {
-                        dev->rad_ready = 1;
-                        cfs_waitq_signal(&dev->rad_waitq);
-                }
+               if (!dev->rad_ready) {
+                       dev->rad_ready = 1;
+                       wake_up(&dev->rad_waitq);
+               }
 
                spin_unlock_irqrestore(&dev->rad_lock, flags);
                 return;
@@ -77,12 +77,12 @@ kranal_schedule_conn(kra_conn_t *conn)
 
        spin_lock_irqsave(&dev->rad_lock, flags);
 
-        if (!conn->rac_scheduled) {
-                kranal_conn_addref(conn);       /* +1 ref for scheduler */
-                conn->rac_scheduled = 1;
-                cfs_list_add_tail(&conn->rac_schedlist, &dev->rad_ready_conns);
-                cfs_waitq_signal(&dev->rad_waitq);
-        }
+       if (!conn->rac_scheduled) {
+               kranal_conn_addref(conn);       /* +1 ref for scheduler */
+               conn->rac_scheduled = 1;
+               cfs_list_add_tail(&conn->rac_schedlist, &dev->rad_ready_conns);
+               wake_up(&dev->rad_waitq);
+       }
 
        spin_unlock_irqrestore(&dev->rad_lock, flags);
 }
@@ -523,9 +523,9 @@ kranal_launch_tx (kra_tx_t *tx, lnet_nid_t nid)
 
                spin_lock(&kranal_data.kra_connd_lock);
 
-                cfs_list_add_tail(&peer->rap_connd_list,
-                              &kranal_data.kra_connd_peers);
-                cfs_waitq_signal(&kranal_data.kra_connd_waitq);
+               cfs_list_add_tail(&peer->rap_connd_list,
+                             &kranal_data.kra_connd_peers);
+               wake_up(&kranal_data.kra_connd_waitq);
 
                spin_unlock(&kranal_data.kra_connd_lock);
         }
@@ -1051,78 +1051,78 @@ kranal_reaper_check (int idx, unsigned long *min_timeoutp)
 int
 kranal_connd (void *arg)
 {
-        long               id = (long)arg;
-        cfs_waitlink_t     wait;
-        unsigned long      flags;
-        kra_peer_t        *peer;
-        kra_acceptsock_t  *ras;
-        int                did_something;
+       long               id = (long)arg;
+       wait_queue_t     wait;
+       unsigned long      flags;
+       kra_peer_t        *peer;
+       kra_acceptsock_t  *ras;
+       int                did_something;
 
-        cfs_block_allsigs();
+       cfs_block_allsigs();
 
-        cfs_waitlink_init(&wait);
+       init_waitqueue_entry_current(&wait);
 
        spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
 
-        while (!kranal_data.kra_shutdown) {
-                did_something = 0;
+       while (!kranal_data.kra_shutdown) {
+               did_something = 0;
 
-                if (!cfs_list_empty(&kranal_data.kra_connd_acceptq)) {
-                        ras = cfs_list_entry(kranal_data.kra_connd_acceptq.next,
-                                             kra_acceptsock_t, ras_list);
-                        cfs_list_del(&ras->ras_list);
+               if (!cfs_list_empty(&kranal_data.kra_connd_acceptq)) {
+                       ras = cfs_list_entry(kranal_data.kra_connd_acceptq.next,
+                                            kra_acceptsock_t, ras_list);
+                       cfs_list_del(&ras->ras_list);
 
                        spin_unlock_irqrestore(&kranal_data.kra_connd_lock,
-                                                   flags);
+                                                  flags);
 
-                        CDEBUG(D_NET,"About to handshake someone\n");
+                       CDEBUG(D_NET,"About to handshake someone\n");
 
-                        kranal_conn_handshake(ras->ras_sock, NULL);
-                        kranal_free_acceptsock(ras);
+                       kranal_conn_handshake(ras->ras_sock, NULL);
+                       kranal_free_acceptsock(ras);
 
-                        CDEBUG(D_NET,"Finished handshaking someone\n");
+                       CDEBUG(D_NET,"Finished handshaking someone\n");
 
                        spin_lock_irqsave(&kranal_data.kra_connd_lock,
-                                              flags);
-                        did_something = 1;
-                }
+                                             flags);
+                       did_something = 1;
+               }
 
-                if (!cfs_list_empty(&kranal_data.kra_connd_peers)) {
-                        peer = cfs_list_entry(kranal_data.kra_connd_peers.next,
-                                              kra_peer_t, rap_connd_list);
+               if (!cfs_list_empty(&kranal_data.kra_connd_peers)) {
+                       peer = cfs_list_entry(kranal_data.kra_connd_peers.next,
+                                             kra_peer_t, rap_connd_list);
 
-                        cfs_list_del_init(&peer->rap_connd_list);
+                       cfs_list_del_init(&peer->rap_connd_list);
                        spin_unlock_irqrestore(&kranal_data.kra_connd_lock,
-                                                   flags);
+                                                  flags);
 
-                        kranal_connect(peer);
-                        kranal_peer_decref(peer);
+                       kranal_connect(peer);
+                       kranal_peer_decref(peer);
 
                        spin_lock_irqsave(&kranal_data.kra_connd_lock,
-                                              flags);
-                        did_something = 1;
-                }
+                                             flags);
+                       did_something = 1;
+               }
 
-                if (did_something)
-                        continue;
+               if (did_something)
+                       continue;
 
-                cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
-                cfs_waitq_add_exclusive(&kranal_data.kra_connd_waitq, &wait);
+               set_current_state(TASK_INTERRUPTIBLE);
+               add_wait_queue_exclusive(&kranal_data.kra_connd_waitq, &wait);
 
                spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
 
-                cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE);
+               waitq_wait(&wait, TASK_INTERRUPTIBLE);
 
-                cfs_set_current_state(CFS_TASK_RUNNING);
-                cfs_waitq_del(&kranal_data.kra_connd_waitq, &wait);
+               set_current_state(TASK_RUNNING);
+               remove_wait_queue(&kranal_data.kra_connd_waitq, &wait);
 
                spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
-        }
+       }
 
        spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
 
-        kranal_thread_fini();
-        return 0;
+       kranal_thread_fini();
+       return 0;
 }
 
 void
@@ -1143,120 +1143,120 @@ kranal_update_reaper_timeout(long timeout)
 int
 kranal_reaper (void *arg)
 {
-        cfs_waitlink_t     wait;
-        unsigned long      flags;
-        long               timeout;
-        int                i;
-        int                conn_entries = kranal_data.kra_conn_hash_size;
-        int                conn_index = 0;
-        int                base_index = conn_entries - 1;
-        unsigned long      next_check_time = jiffies;
-        long               next_min_timeout = CFS_MAX_SCHEDULE_TIMEOUT;
-        long               current_min_timeout = 1;
+       wait_queue_t     wait;
+       unsigned long      flags;
+       long               timeout;
+       int                i;
+       int                conn_entries = kranal_data.kra_conn_hash_size;
+       int                conn_index = 0;
+       int                base_index = conn_entries - 1;
+       unsigned long      next_check_time = jiffies;
+       long               next_min_timeout = MAX_SCHEDULE_TIMEOUT;
+       long               current_min_timeout = 1;
 
-        cfs_block_allsigs();
+       cfs_block_allsigs();
 
-        cfs_waitlink_init(&wait);
+       init_waitqueue_entry_current(&wait);
 
        spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
 
-        while (!kranal_data.kra_shutdown) {
-                /* I wake up every 'p' seconds to check for timeouts on some
-                 * more peers.  I try to check every connection 'n' times
-                 * within the global minimum of all keepalive and timeout
-                 * intervals, to ensure I attend to every connection within
-                 * (n+1)/n times its timeout intervals. */
-                const int     p = 1;
-                const int     n = 3;
-                unsigned long min_timeout;
-                int           chunk;
-
-                /* careful with the jiffy wrap... */
-                timeout = (long)(next_check_time - jiffies);
-                if (timeout > 0) {
-                        cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
-                        cfs_waitq_add(&kranal_data.kra_reaper_waitq, &wait);
+       while (!kranal_data.kra_shutdown) {
+               /* I wake up every 'p' seconds to check for timeouts on some
+                * more peers.  I try to check every connection 'n' times
+                * within the global minimum of all keepalive and timeout
+                * intervals, to ensure I attend to every connection within
+                * (n+1)/n times its timeout intervals. */
+               const int     p = 1;
+               const int     n = 3;
+               unsigned long min_timeout;
+               int           chunk;
+
+               /* careful with the jiffy wrap... */
+               timeout = (long)(next_check_time - jiffies);
+               if (timeout > 0) {
+                       set_current_state(TASK_INTERRUPTIBLE);
+                       add_wait_queue(&kranal_data.kra_reaper_waitq, &wait);
 
                        spin_unlock_irqrestore(&kranal_data.kra_reaper_lock,
-                                                   flags);
+                                                  flags);
 
-                        cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE,
-                                            timeout);
+                       waitq_timedwait(&wait, TASK_INTERRUPTIBLE,
+                                           timeout);
 
                        spin_lock_irqsave(&kranal_data.kra_reaper_lock,
-                                              flags);
+                                             flags);
 
-                        cfs_set_current_state(CFS_TASK_RUNNING);
-                        cfs_waitq_del(&kranal_data.kra_reaper_waitq, &wait);
-                        continue;
-                }
+                       set_current_state(TASK_RUNNING);
+                       remove_wait_queue(&kranal_data.kra_reaper_waitq, &wait);
+                       continue;
+               }
 
-                if (kranal_data.kra_new_min_timeout !=
-                    CFS_MAX_SCHEDULE_TIMEOUT) {
-                        /* new min timeout set: restart min timeout scan */
-                        next_min_timeout = CFS_MAX_SCHEDULE_TIMEOUT;
-                        base_index = conn_index - 1;
-                        if (base_index < 0)
-                                base_index = conn_entries - 1;
-
-                        if (kranal_data.kra_new_min_timeout <
-                            current_min_timeout) {
-                                current_min_timeout =
-                                        kranal_data.kra_new_min_timeout;
-                                CDEBUG(D_NET, "Set new min timeout %ld\n",
-                                       current_min_timeout);
-                        }
+               if (kranal_data.kra_new_min_timeout !=
+                   MAX_SCHEDULE_TIMEOUT) {
+                       /* new min timeout set: restart min timeout scan */
+                       next_min_timeout = MAX_SCHEDULE_TIMEOUT;
+                       base_index = conn_index - 1;
+                       if (base_index < 0)
+                               base_index = conn_entries - 1;
+
+                       if (kranal_data.kra_new_min_timeout <
+                           current_min_timeout) {
+                               current_min_timeout =
+                                       kranal_data.kra_new_min_timeout;
+                               CDEBUG(D_NET, "Set new min timeout %ld\n",
+                                      current_min_timeout);
+                       }
 
-                        kranal_data.kra_new_min_timeout =
-                                CFS_MAX_SCHEDULE_TIMEOUT;
-                }
-                min_timeout = current_min_timeout;
+                       kranal_data.kra_new_min_timeout =
+                               MAX_SCHEDULE_TIMEOUT;
+               }
+               min_timeout = current_min_timeout;
 
                spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
 
-                LASSERT (min_timeout > 0);
-
-                /* Compute how many table entries to check now so I get round
-                 * the whole table fast enough given that I do this at fixed
-                 * intervals of 'p' seconds) */
-                chunk = conn_entries;
-                if (min_timeout > n * p)
-                        chunk = (chunk * n * p) / min_timeout;
-                if (chunk == 0)
-                        chunk = 1;
-
-                for (i = 0; i < chunk; i++) {
-                        kranal_reaper_check(conn_index,
-                                            &next_min_timeout);
-                        conn_index = (conn_index + 1) % conn_entries;
-                }
+               LASSERT (min_timeout > 0);
+
+               /* Compute how many table entries to check now so I get round
+                * the whole table fast enough given that I do this at fixed
+                * intervals of 'p' seconds) */
+               chunk = conn_entries;
+               if (min_timeout > n * p)
+                       chunk = (chunk * n * p) / min_timeout;
+               if (chunk == 0)
+                       chunk = 1;
+
+               for (i = 0; i < chunk; i++) {
+                       kranal_reaper_check(conn_index,
+                                           &next_min_timeout);
+                       conn_index = (conn_index + 1) % conn_entries;
+               }
 
                next_check_time += p * HZ;
 
                spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
 
-                if (((conn_index - chunk <= base_index &&
-                      base_index < conn_index) ||
-                     (conn_index - conn_entries - chunk <= base_index &&
-                      base_index < conn_index - conn_entries))) {
+               if (((conn_index - chunk <= base_index &&
+                     base_index < conn_index) ||
+                    (conn_index - conn_entries - chunk <= base_index &&
+                     base_index < conn_index - conn_entries))) {
 
-                        /* Scanned all conns: set current_min_timeout... */
-                        if (current_min_timeout != next_min_timeout) {
-                                current_min_timeout = next_min_timeout;
-                                CDEBUG(D_NET, "Set new min timeout %ld\n",
-                                       current_min_timeout);
-                        }
+                       /* Scanned all conns: set current_min_timeout... */
+                       if (current_min_timeout != next_min_timeout) {
+                               current_min_timeout = next_min_timeout;
+                               CDEBUG(D_NET, "Set new min timeout %ld\n",
+                                      current_min_timeout);
+                       }
 
-                        /* ...and restart min timeout scan */
-                        next_min_timeout = CFS_MAX_SCHEDULE_TIMEOUT;
-                        base_index = conn_index - 1;
-                        if (base_index < 0)
-                                base_index = conn_entries - 1;
-                }
-        }
+                       /* ...and restart min timeout scan */
+                       next_min_timeout = MAX_SCHEDULE_TIMEOUT;
+                       base_index = conn_index - 1;
+                       if (base_index < 0)
+                               base_index = conn_entries - 1;
+               }
+       }
 
-        kranal_thread_fini();
-        return 0;
+       kranal_thread_fini();
+       return 0;
 }
 
 void
@@ -1923,9 +1923,9 @@ int kranal_process_new_conn (kra_conn_t *conn)
 int
 kranal_scheduler (void *arg)
 {
-        kra_device_t     *dev = (kra_device_t *)arg;
-        cfs_waitlink_t    wait;
-        kra_conn_t       *conn;
+       kra_device_t     *dev = (kra_device_t *)arg;
+       wait_queue_t    wait;
+       kra_conn_t       *conn;
         unsigned long     flags;
         unsigned long     deadline;
         unsigned long     soonest;
@@ -1939,8 +1939,8 @@ kranal_scheduler (void *arg)
 
         cfs_block_allsigs();
 
-        dev->rad_scheduler = current;
-        cfs_waitlink_init(&wait);
+       dev->rad_scheduler = current;
+       init_waitqueue_entry_current(&wait);
 
        spin_lock_irqsave(&dev->rad_lock, flags);
 
@@ -1950,8 +1950,8 @@ kranal_scheduler (void *arg)
                 if (busy_loops++ >= RANAL_RESCHED) {
                        spin_unlock_irqrestore(&dev->rad_lock, flags);
 
-                        cfs_cond_resched();
-                        busy_loops = 0;
+                       cond_resched();
+                       busy_loops = 0;
 
                        spin_lock_irqsave(&dev->rad_lock, flags);
                 }
@@ -2039,27 +2039,27 @@ kranal_scheduler (void *arg)
                 if (dropped_lock)               /* may sleep iff I didn't drop the lock */
                         continue;
 
-                cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
-                cfs_waitq_add_exclusive(&dev->rad_waitq, &wait);
+               set_current_state(TASK_INTERRUPTIBLE);
+               add_wait_queue_exclusive(&dev->rad_waitq, &wait);
                spin_unlock_irqrestore(&dev->rad_lock, flags);
 
-                if (nsoonest == 0) {
-                        busy_loops = 0;
-                        cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE);
-                } else {
-                        timeout = (long)(soonest - jiffies);
-                        if (timeout > 0) {
-                                busy_loops = 0;
-                                cfs_waitq_timedwait(&wait,
-                                                    CFS_TASK_INTERRUPTIBLE,
-                                                    timeout);
-                        }
-                }
+               if (nsoonest == 0) {
+                       busy_loops = 0;
+                       waitq_wait(&wait, TASK_INTERRUPTIBLE);
+               } else {
+                       timeout = (long)(soonest - jiffies);
+                       if (timeout > 0) {
+                               busy_loops = 0;
+                               waitq_timedwait(&wait,
+                                                   TASK_INTERRUPTIBLE,
+                                                   timeout);
+                       }
+               }
 
-                cfs_waitq_del(&dev->rad_waitq, &wait);
-                cfs_set_current_state(CFS_TASK_RUNNING);
+               remove_wait_queue(&dev->rad_waitq, &wait);
+               set_current_state(TASK_RUNNING);
                spin_lock_irqsave(&dev->rad_lock, flags);
-        }
+       }
 
        spin_unlock_irqrestore(&dev->rad_lock, flags);
 
index 37fbe28..8783fe7 100644 (file)
@@ -993,8 +993,8 @@ ksocknal_accept (lnet_ni_t *ni, cfs_socket_t *sock)
 
        spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
 
-        cfs_list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
-        cfs_waitq_signal(&ksocknal_data.ksnd_connd_waitq);
+       cfs_list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
+       wake_up(&ksocknal_data.ksnd_connd_waitq);
 
        spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
         return 0;
@@ -1484,7 +1484,7 @@ ksocknal_close_conn_locked (ksock_conn_t *conn, int error)
 
        cfs_list_add_tail(&conn->ksnc_list,
                          &ksocknal_data.ksnd_deathrow_conns);
-       cfs_waitq_signal(&ksocknal_data.ksnd_reaper_waitq);
+       wake_up(&ksocknal_data.ksnd_reaper_waitq);
 
        spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
 }
@@ -1578,10 +1578,10 @@ ksocknal_terminate_conn (ksock_conn_t *conn)
                                &sched->kss_tx_conns);
                 conn->ksnc_tx_scheduled = 1;
                 /* extra ref for scheduler */
-                ksocknal_conn_addref(conn);
+               ksocknal_conn_addref(conn);
 
-                cfs_waitq_signal (&sched->kss_waitq);
-        }
+               wake_up (&sched->kss_waitq);
+       }
 
        spin_unlock_bh(&sched->kss_lock);
 
@@ -1623,7 +1623,7 @@ ksocknal_queue_zombie_conn (ksock_conn_t *conn)
        spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
 
        cfs_list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
-       cfs_waitq_signal(&ksocknal_data.ksnd_reaper_waitq);
+       wake_up(&ksocknal_data.ksnd_reaper_waitq);
 
        spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
 }
@@ -2311,8 +2311,8 @@ ksocknal_base_shutdown(void)
 
                /* flag threads to terminate; wake and wait for them to die */
                ksocknal_data.ksnd_shuttingdown = 1;
-               cfs_waitq_broadcast(&ksocknal_data.ksnd_connd_waitq);
-               cfs_waitq_broadcast(&ksocknal_data.ksnd_reaper_waitq);
+               wake_up_all(&ksocknal_data.ksnd_connd_waitq);
+               wake_up_all(&ksocknal_data.ksnd_reaper_waitq);
 
                if (ksocknal_data.ksnd_sched_info != NULL) {
                        cfs_percpt_for_each(info, i,
@@ -2322,7 +2322,7 @@ ksocknal_base_shutdown(void)
 
                                for (j = 0; j < info->ksi_nthreads_max; j++) {
                                        sched = &info->ksi_scheds[j];
-                                       cfs_waitq_broadcast(&sched->kss_waitq);
+                                       wake_up_all(&sched->kss_waitq);
                                }
                        }
                }
@@ -2392,15 +2392,15 @@ ksocknal_base_startup(void)
        CFS_INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
 
        spin_lock_init(&ksocknal_data.ksnd_reaper_lock);
-        CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_enomem_conns);
-        CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_zombie_conns);
-        CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_deathrow_conns);
-        cfs_waitq_init(&ksocknal_data.ksnd_reaper_waitq);
+       CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_enomem_conns);
+       CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_zombie_conns);
+       CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_deathrow_conns);
+       init_waitqueue_head(&ksocknal_data.ksnd_reaper_waitq);
 
        spin_lock_init(&ksocknal_data.ksnd_connd_lock);
-        CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_connreqs);
-        CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_routes);
-        cfs_waitq_init(&ksocknal_data.ksnd_connd_waitq);
+       CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_connreqs);
+       CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_routes);
+       init_waitqueue_head(&ksocknal_data.ksnd_connd_waitq);
 
        spin_lock_init(&ksocknal_data.ksnd_tx_lock);
         CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_idle_noop_txs);
@@ -2445,7 +2445,7 @@ ksocknal_base_startup(void)
                        CFS_INIT_LIST_HEAD(&sched->kss_rx_conns);
                        CFS_INIT_LIST_HEAD(&sched->kss_tx_conns);
                        CFS_INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
-                       cfs_waitq_init(&sched->kss_waitq);
+                       init_waitqueue_head(&sched->kss_waitq);
                }
         }
 
index 951927b..9c0c5de 100644 (file)
@@ -72,7 +72,7 @@ typedef struct                                  /* per scheduler state */
        cfs_list_t              kss_tx_conns;
        /* zombie noop tx list */
        cfs_list_t              kss_zombie_noop_txs;
-       cfs_waitq_t             kss_waitq;      /* where scheduler sleeps */
+       wait_queue_head_t       kss_waitq;      /* where scheduler sleeps */
        /* # connections assigned to this scheduler */
        int                     kss_nconns;
        struct ksock_sched_info *kss_info;      /* owner of it */
@@ -183,31 +183,31 @@ typedef struct
        /* schedulers information */
        struct ksock_sched_info **ksnd_sched_info;
 
-        cfs_atomic_t      ksnd_nactive_txs;    /* #active txs */
+       cfs_atomic_t      ksnd_nactive_txs;    /* #active txs */
 
-        cfs_list_t        ksnd_deathrow_conns; /* conns to close: reaper_lock*/
-        cfs_list_t        ksnd_zombie_conns;   /* conns to free: reaper_lock */
-        cfs_list_t        ksnd_enomem_conns;   /* conns to retry: reaper_lock*/
-        cfs_waitq_t       ksnd_reaper_waitq;   /* reaper sleeps here */
-        cfs_time_t        ksnd_reaper_waketime;/* when reaper will wake */
+       cfs_list_t        ksnd_deathrow_conns; /* conns to close: reaper_lock*/
+       cfs_list_t        ksnd_zombie_conns;   /* conns to free: reaper_lock */
+       cfs_list_t        ksnd_enomem_conns;   /* conns to retry: reaper_lock*/
+       wait_queue_head_t       ksnd_reaper_waitq;   /* reaper sleeps here */
+       cfs_time_t        ksnd_reaper_waketime;/* when reaper will wake */
        spinlock_t        ksnd_reaper_lock;     /* serialise */
 
-        int               ksnd_enomem_tx;      /* test ENOMEM sender */
-        int               ksnd_stall_tx;       /* test sluggish sender */
-        int               ksnd_stall_rx;       /* test sluggish receiver */
-
-        cfs_list_t        ksnd_connd_connreqs; /* incoming connection requests */
-        cfs_list_t        ksnd_connd_routes;   /* routes waiting to be connected */
-        cfs_waitq_t       ksnd_connd_waitq;    /* connds sleep here */
-        int               ksnd_connd_connecting;/* # connds connecting */
-        /** time stamp of the last failed connecting attempt */
-        long              ksnd_connd_failed_stamp;
-        /** # starting connd */
-        unsigned          ksnd_connd_starting;
-        /** time stamp of the last starting connd */
-        long              ksnd_connd_starting_stamp;
-        /** # running connd */
-        unsigned          ksnd_connd_running;
+       int               ksnd_enomem_tx;      /* test ENOMEM sender */
+       int               ksnd_stall_tx;       /* test sluggish sender */
+       int               ksnd_stall_rx;       /* test sluggish receiver */
+
+       cfs_list_t        ksnd_connd_connreqs; /* incoming connection requests */
+       cfs_list_t        ksnd_connd_routes;   /* routes waiting to be connected */
+       wait_queue_head_t       ksnd_connd_waitq;    /* connds sleep here */
+       int               ksnd_connd_connecting;/* # connds connecting */
+       /** time stamp of the last failed connecting attempt */
+       long              ksnd_connd_failed_stamp;
+       /** # starting connd */
+       unsigned          ksnd_connd_starting;
+       /** time stamp of the last starting connd */
+       long              ksnd_connd_starting_stamp;
+       /** # running connd */
+       unsigned          ksnd_connd_running;
        spinlock_t        ksnd_connd_lock;      /* serialise */
 
        cfs_list_t        ksnd_idle_noop_txs;   /* list head for freed noop tx */
index c97cf2b..bb6974f 100644 (file)
@@ -538,14 +538,14 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
                 LASSERT (conn->ksnc_tx_scheduled);
                 cfs_list_add_tail(&conn->ksnc_tx_list,
                                   &ksocknal_data.ksnd_enomem_conns);
-                if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(),
-                                                   SOCKNAL_ENOMEM_RETRY),
-                                   ksocknal_data.ksnd_reaper_waketime))
-                        cfs_waitq_signal (&ksocknal_data.ksnd_reaper_waitq);
+               if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(),
+                                       SOCKNAL_ENOMEM_RETRY),
+                                       ksocknal_data.ksnd_reaper_waketime))
+                       wake_up(&ksocknal_data.ksnd_reaper_waitq);
 
                spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
-                return (rc);
-        }
+               return (rc);
+       }
 
         /* Actual error */
         LASSERT (rc < 0);
@@ -598,7 +598,7 @@ ksocknal_launch_connection_locked (ksock_route_t *route)
 
        cfs_list_add_tail(&route->ksnr_connd_list,
                          &ksocknal_data.ksnd_connd_routes);
-       cfs_waitq_signal(&ksocknal_data.ksnd_connd_waitq);
+       wake_up(&ksocknal_data.ksnd_connd_waitq);
 
        spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
 }
@@ -765,15 +765,15 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
                 cfs_list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
         }
 
-        if (conn->ksnc_tx_ready &&      /* able to send */
-            !conn->ksnc_tx_scheduled) { /* not scheduled to send */
-                /* +1 ref for scheduler */
-                ksocknal_conn_addref(conn);
-                cfs_list_add_tail (&conn->ksnc_tx_list,
-                                   &sched->kss_tx_conns);
-                conn->ksnc_tx_scheduled = 1;
-                cfs_waitq_signal (&sched->kss_waitq);
-        }
+       if (conn->ksnc_tx_ready &&      /* able to send */
+           !conn->ksnc_tx_scheduled) { /* not scheduled to send */
+               /* +1 ref for scheduler */
+               ksocknal_conn_addref(conn);
+               cfs_list_add_tail (&conn->ksnc_tx_list,
+                                  &sched->kss_tx_conns);
+               conn->ksnc_tx_scheduled = 1;
+               wake_up(&sched->kss_waitq);
+       }
 
        spin_unlock_bh(&sched->kss_lock);
 }
@@ -1354,12 +1354,12 @@ ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
 
        spin_lock_bh(&sched->kss_lock);
 
-        switch (conn->ksnc_rx_state) {
-        case SOCKNAL_RX_PARSE_WAIT:
-                cfs_list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
-                cfs_waitq_signal (&sched->kss_waitq);
-                LASSERT (conn->ksnc_rx_ready);
-                break;
+       switch (conn->ksnc_rx_state) {
+       case SOCKNAL_RX_PARSE_WAIT:
+               cfs_list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
+               wake_up(&sched->kss_waitq);
+               LASSERT(conn->ksnc_rx_ready);
+               break;
 
         case SOCKNAL_RX_PARSE:
                 /* scheduler hasn't noticed I'm parsing yet */
@@ -1542,9 +1542,9 @@ int ksocknal_scheduler(void *arg)
                                        sched->kss_waitq,
                                        !ksocknal_sched_cansleep(sched));
                                LASSERT (rc == 0);
-                        } else {
-                                cfs_cond_resched();
-                        }
+                       } else {
+                               cond_resched();
+                       }
 
                        spin_lock_bh(&sched->kss_lock);
                }
@@ -1568,17 +1568,17 @@ void ksocknal_read_callback (ksock_conn_t *conn)
 
        spin_lock_bh(&sched->kss_lock);
 
-        conn->ksnc_rx_ready = 1;
+       conn->ksnc_rx_ready = 1;
 
-        if (!conn->ksnc_rx_scheduled) {  /* not being progressed */
-                cfs_list_add_tail(&conn->ksnc_rx_list,
-                                  &sched->kss_rx_conns);
-                conn->ksnc_rx_scheduled = 1;
-                /* extra ref for scheduler */
-                ksocknal_conn_addref(conn);
+       if (!conn->ksnc_rx_scheduled) {  /* not being progressed */
+               cfs_list_add_tail(&conn->ksnc_rx_list,
+                                 &sched->kss_rx_conns);
+               conn->ksnc_rx_scheduled = 1;
+               /* extra ref for scheduler */
+               ksocknal_conn_addref(conn);
 
-                cfs_waitq_signal (&sched->kss_waitq);
-        }
+               wake_up (&sched->kss_waitq);
+       }
        spin_unlock_bh(&sched->kss_lock);
 
        EXIT;
@@ -1588,7 +1588,7 @@ void ksocknal_read_callback (ksock_conn_t *conn)
  * Add connection to kss_tx_conns of scheduler
  * and wakeup the scheduler.
  */
-void ksocknal_write_callback (ksock_conn_t *conn)
+void ksocknal_write_callback(ksock_conn_t *conn)
 {
        ksock_sched_t *sched;
        ENTRY;
@@ -1597,18 +1597,17 @@ void ksocknal_write_callback (ksock_conn_t *conn)
 
        spin_lock_bh(&sched->kss_lock);
 
-        conn->ksnc_tx_ready = 1;
+       conn->ksnc_tx_ready = 1;
 
-        if (!conn->ksnc_tx_scheduled && // not being progressed
-            !cfs_list_empty(&conn->ksnc_tx_queue)){//packets to send
-                cfs_list_add_tail (&conn->ksnc_tx_list,
-                                   &sched->kss_tx_conns);
-                conn->ksnc_tx_scheduled = 1;
-                /* extra ref for scheduler */
-                ksocknal_conn_addref(conn);
+       if (!conn->ksnc_tx_scheduled && // not being progressed
+           !cfs_list_empty(&conn->ksnc_tx_queue)){//packets to send
+               cfs_list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns);
+               conn->ksnc_tx_scheduled = 1;
+               /* extra ref for scheduler */
+               ksocknal_conn_addref(conn);
 
-                cfs_waitq_signal (&sched->kss_waitq);
-        }
+               wake_up(&sched->kss_waitq);
+       }
 
        spin_unlock_bh(&sched->kss_lock);
 
@@ -2117,57 +2116,57 @@ ksocknal_connd_check_stop(long sec, long *timeout)
 static ksock_route_t *
 ksocknal_connd_get_route_locked(signed long *timeout_p)
 {
-        ksock_route_t *route;
-        cfs_time_t     now;
+       ksock_route_t *route;
+       cfs_time_t     now;
 
-        now = cfs_time_current();
+       now = cfs_time_current();
 
-        /* connd_routes can contain both pending and ordinary routes */
-        cfs_list_for_each_entry (route, &ksocknal_data.ksnd_connd_routes,
-                                 ksnr_connd_list) {
+       /* connd_routes can contain both pending and ordinary routes */
+       cfs_list_for_each_entry (route, &ksocknal_data.ksnd_connd_routes,
+                                ksnr_connd_list) {
 
-                if (route->ksnr_retry_interval == 0 ||
-                    cfs_time_aftereq(now, route->ksnr_timeout))
-                        return route;
+               if (route->ksnr_retry_interval == 0 ||
+                   cfs_time_aftereq(now, route->ksnr_timeout))
+                       return route;
 
-                if (*timeout_p == CFS_MAX_SCHEDULE_TIMEOUT ||
-                    (int)*timeout_p > (int)(route->ksnr_timeout - now))
-                        *timeout_p = (int)(route->ksnr_timeout - now);
-        }
+               if (*timeout_p == MAX_SCHEDULE_TIMEOUT ||
+                   (int)*timeout_p > (int)(route->ksnr_timeout - now))
+                       *timeout_p = (int)(route->ksnr_timeout - now);
+       }
 
-        return NULL;
+       return NULL;
 }
 
 int
 ksocknal_connd (void *arg)
 {
        spinlock_t    *connd_lock = &ksocknal_data.ksnd_connd_lock;
-        ksock_connreq_t   *cr;
-        cfs_waitlink_t     wait;
-        int                nloops = 0;
-        int                cons_retry = 0;
+       ksock_connreq_t   *cr;
+       wait_queue_t     wait;
+       int                nloops = 0;
+       int                cons_retry = 0;
 
-        cfs_block_allsigs ();
+       cfs_block_allsigs ();
 
-        cfs_waitlink_init (&wait);
+       init_waitqueue_entry_current(&wait);
 
        spin_lock_bh(connd_lock);
 
-        LASSERT(ksocknal_data.ksnd_connd_starting > 0);
-        ksocknal_data.ksnd_connd_starting--;
-        ksocknal_data.ksnd_connd_running++;
+       LASSERT(ksocknal_data.ksnd_connd_starting > 0);
+       ksocknal_data.ksnd_connd_starting--;
+       ksocknal_data.ksnd_connd_running++;
 
-        while (!ksocknal_data.ksnd_shuttingdown) {
-                ksock_route_t *route = NULL;
-                long sec = cfs_time_current_sec();
-                long timeout = CFS_MAX_SCHEDULE_TIMEOUT;
-                int  dropped_lock = 0;
-
-                if (ksocknal_connd_check_stop(sec, &timeout)) {
-                        /* wakeup another one to check stop */
-                        cfs_waitq_signal(&ksocknal_data.ksnd_connd_waitq);
-                        break;
-                }
+       while (!ksocknal_data.ksnd_shuttingdown) {
+               ksock_route_t *route = NULL;
+               long sec = cfs_time_current_sec();
+               long timeout = MAX_SCHEDULE_TIMEOUT;
+               int  dropped_lock = 0;
+
+               if (ksocknal_connd_check_stop(sec, &timeout)) {
+                       /* wakeup another one to check stop */
+                       wake_up(&ksocknal_data.ksnd_connd_waitq);
+                       break;
+               }
 
                 if (ksocknal_connd_check_start(sec, &timeout)) {
                         /* created new thread */
@@ -2227,21 +2226,21 @@ ksocknal_connd (void *arg)
                                continue;
                        spin_unlock_bh(connd_lock);
                        nloops = 0;
-                       cfs_cond_resched();
+                       cond_resched();
                        spin_lock_bh(connd_lock);
                        continue;
                }
 
                /* Nothing to do for 'timeout'  */
-               cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
-               cfs_waitq_add_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
+               set_current_state(TASK_INTERRUPTIBLE);
+               add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
                spin_unlock_bh(connd_lock);
 
                nloops = 0;
-               cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE, timeout);
+               waitq_timedwait(&wait, TASK_INTERRUPTIBLE, timeout);
 
-               cfs_set_current_state(CFS_TASK_RUNNING);
-               cfs_waitq_del(&ksocknal_data.ksnd_connd_waitq, &wait);
+               set_current_state(TASK_RUNNING);
+               remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);
                spin_lock_bh(connd_lock);
        }
        ksocknal_data.ksnd_connd_running--;
@@ -2526,13 +2525,12 @@ ksocknal_check_peer_timeouts (int idx)
        read_unlock(&ksocknal_data.ksnd_global_lock);
 }
 
-int
-ksocknal_reaper (void *arg)
+int ksocknal_reaper(void *arg)
 {
-        cfs_waitlink_t     wait;
-        ksock_conn_t      *conn;
-        ksock_sched_t     *sched;
-        cfs_list_t         enomem_conns;
+       wait_queue_t     wait;
+       ksock_conn_t      *conn;
+       ksock_sched_t     *sched;
+       cfs_list_t         enomem_conns;
         int                nenomem_conns;
         cfs_duration_t     timeout;
         int                i;
@@ -2541,8 +2539,8 @@ ksocknal_reaper (void *arg)
 
         cfs_block_allsigs ();
 
-        CFS_INIT_LIST_HEAD(&enomem_conns);
-        cfs_waitlink_init (&wait);
+       CFS_INIT_LIST_HEAD(&enomem_conns);
+       init_waitqueue_entry_current(&wait);
 
        spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
 
@@ -2599,7 +2597,7 @@ ksocknal_reaper (void *arg)
                        conn->ksnc_tx_ready = 1;
                        cfs_list_add_tail(&conn->ksnc_tx_list,
                                          &sched->kss_tx_conns);
-                       cfs_waitq_signal(&sched->kss_waitq);
+                       wake_up(&sched->kss_waitq);
 
                        spin_unlock_bh(&sched->kss_lock);
                         nenomem_conns++;
@@ -2643,17 +2641,16 @@ ksocknal_reaper (void *arg)
                 ksocknal_data.ksnd_reaper_waketime =
                         cfs_time_add(cfs_time_current(), timeout);
 
-                cfs_set_current_state (CFS_TASK_INTERRUPTIBLE);
-                cfs_waitq_add (&ksocknal_data.ksnd_reaper_waitq, &wait);
+                       set_current_state(TASK_INTERRUPTIBLE);
+               add_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
 
-                if (!ksocknal_data.ksnd_shuttingdown &&
-                    cfs_list_empty (&ksocknal_data.ksnd_deathrow_conns) &&
-                    cfs_list_empty (&ksocknal_data.ksnd_zombie_conns))
-                        cfs_waitq_timedwait (&wait, CFS_TASK_INTERRUPTIBLE,
-                                             timeout);
+               if (!ksocknal_data.ksnd_shuttingdown &&
+                   cfs_list_empty(&ksocknal_data.ksnd_deathrow_conns) &&
+                   cfs_list_empty(&ksocknal_data.ksnd_zombie_conns))
+                       waitq_timedwait(&wait, TASK_INTERRUPTIBLE, timeout);
 
-                cfs_set_current_state (CFS_TASK_RUNNING);
-                cfs_waitq_del (&ksocknal_data.ksnd_reaper_waitq, &wait);
+               set_current_state(TASK_RUNNING);
+               remove_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
 
                spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
        }
index 50b4043..1ae1338 100644 (file)
@@ -100,7 +100,7 @@ void
 lnet_init_locks(void)
 {
        spin_lock_init(&the_lnet.ln_eq_wait_lock);
-       cfs_waitq_init(&the_lnet.ln_eq_waitq);
+       init_waitqueue_head(&the_lnet.ln_eq_waitq);
        mutex_init(&the_lnet.ln_lnd_mutex);
        mutex_init(&the_lnet.ln_api_mutex);
 }
index c3fce64..f9dcaa1 100644 (file)
@@ -234,8 +234,8 @@ lnet_eq_enqueue_event(lnet_eq_t *eq, lnet_event_t *ev)
 
 #ifdef __KERNEL__
        /* Wake anyone waiting in LNetEQPoll() */
-       if (cfs_waitq_active(&the_lnet.ln_eq_waitq))
-               cfs_waitq_broadcast(&the_lnet.ln_eq_waitq);
+       if (waitqueue_active(&the_lnet.ln_eq_waitq))
+               wake_up_all(&the_lnet.ln_eq_waitq);
 #else
 # ifndef HAVE_LIBPTHREAD
        /* LNetEQPoll() calls into _the_ LND to wait for action */
@@ -339,26 +339,26 @@ lnet_eq_wait_locked(int *timeout_ms)
 {
        int             tms = *timeout_ms;
        int             wait;
-       cfs_waitlink_t  wl;
+       wait_queue_t  wl;
        cfs_time_t      now;
 
        if (tms == 0)
                return -1; /* don't want to wait and no new event */
 
-       cfs_waitlink_init(&wl);
-       cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
-       cfs_waitq_add(&the_lnet.ln_eq_waitq, &wl);
+       init_waitqueue_entry_current(&wl);
+       set_current_state(TASK_INTERRUPTIBLE);
+       add_wait_queue(&the_lnet.ln_eq_waitq, &wl);
 
        lnet_eq_wait_unlock();
 
        if (tms < 0) {
-               cfs_waitq_wait(&wl, CFS_TASK_INTERRUPTIBLE);
+               waitq_wait(&wl, TASK_INTERRUPTIBLE);
 
        } else {
                struct timeval tv;
 
                now = cfs_time_current();
-               cfs_waitq_timedwait(&wl, CFS_TASK_INTERRUPTIBLE,
+               waitq_timedwait(&wl, TASK_INTERRUPTIBLE,
                                    cfs_time_seconds(tms) / 1000);
                cfs_duration_usec(cfs_time_sub(cfs_time_current(), now), &tv);
                tms -= (int)(tv.tv_sec * 1000 + tv.tv_usec / 1000);
@@ -370,7 +370,7 @@ lnet_eq_wait_locked(int *timeout_ms)
        *timeout_ms = tms;
 
        lnet_eq_wait_lock();
-       cfs_waitq_del(&the_lnet.ln_eq_waitq, &wl);
+       remove_wait_queue(&the_lnet.ln_eq_waitq, &wl);
 
        return wait;
 }
index 9423278..551e4b3 100644 (file)
@@ -1276,12 +1276,12 @@ rescan:
 
                lnet_prune_rc_data(0); /* don't wait for UNLINK */
 
-                /* Call cfs_pause() here always adds 1 to load average 
-                 * because kernel counts # active tasks as nr_running 
-                 * + nr_uninterruptible. */
-                cfs_schedule_timeout_and_set_state(CFS_TASK_INTERRUPTIBLE,
-                                                   cfs_time_seconds(1));
-        }
+               /* Call cfs_pause() here always adds 1 to load average
+                * because kernel counts # active tasks as nr_running
+                * + nr_uninterruptible. */
+               schedule_timeout_and_set_state(TASK_INTERRUPTIBLE,
+                                                  cfs_time_seconds(1));
+       }
 
        LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_STOPPING);
 
index a7729ed..72d6add 100644 (file)
@@ -66,25 +66,25 @@ lstcon_rpc_done(srpc_client_rpc_t *rpc)
                 * I'm just a poor body and nobody loves me */
                spin_unlock(&rpc->crpc_lock);
 
-                /* release it */
-                lstcon_rpc_put(crpc);
-                return;
-        }
+               /* release it */
+               lstcon_rpc_put(crpc);
+               return;
+       }
 
-        /* not an orphan RPC */
-        crpc->crp_finished = 1;
+       /* not an orphan RPC */
+       crpc->crp_finished = 1;
 
-        if (crpc->crp_stamp == 0) {
-                /* not aborted */
-                LASSERT (crpc->crp_status == 0);
+       if (crpc->crp_stamp == 0) {
+               /* not aborted */
+               LASSERT (crpc->crp_status == 0);
 
-                crpc->crp_stamp  = cfs_time_current();
-                crpc->crp_status = rpc->crpc_status;
-        }
+               crpc->crp_stamp  = cfs_time_current();
+               crpc->crp_status = rpc->crpc_status;
+       }
 
-        /* wakeup (transaction)thread if I'm the last RPC in the transaction */
-        if (cfs_atomic_dec_and_test(&crpc->crp_trans->tas_remaining))
-                cfs_waitq_signal(&crpc->crp_trans->tas_waitq);
+       /* wakeup (transaction)thread if I'm the last RPC in the transaction */
+       if (cfs_atomic_dec_and_test(&crpc->crp_trans->tas_remaining))
+               wake_up(&crpc->crp_trans->tas_waitq);
 
        spin_unlock(&rpc->crpc_lock);
 }
@@ -265,9 +265,9 @@ lstcon_rpc_trans_prep(cfs_list_t *translist,
 
         cfs_list_add_tail(&trans->tas_link, &console_session.ses_trans_list);
 
-        CFS_INIT_LIST_HEAD(&trans->tas_rpcs_list);
-        cfs_atomic_set(&trans->tas_remaining, 0);
-        cfs_waitq_init(&trans->tas_waitq);
+       CFS_INIT_LIST_HEAD(&trans->tas_rpcs_list);
+       cfs_atomic_set(&trans->tas_remaining, 0);
+       init_waitqueue_head(&trans->tas_waitq);
 
        spin_lock(&console_session.ses_rpc_lock);
        trans->tas_features = console_session.ses_features;
@@ -361,9 +361,9 @@ lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout)
 
        mutex_unlock(&console_session.ses_mutex);
 
-        cfs_waitq_wait_event_interruptible_timeout(trans->tas_waitq,
-                                              lstcon_rpc_trans_check(trans),
-                                              cfs_time_seconds(timeout), rc);
+       rc = wait_event_interruptible_timeout(trans->tas_waitq,
+                                             lstcon_rpc_trans_check(trans),
+                                             cfs_time_seconds(timeout));
 
         rc = (rc > 0)? 0: ((rc < 0)? -EINTR: -ETIMEDOUT);
 
@@ -1354,11 +1354,11 @@ lstcon_rpc_cleanup_wait(void)
                         trans = cfs_list_entry(pacer, lstcon_rpc_trans_t,
                                                tas_link);
 
-                        CDEBUG(D_NET, "Session closed, wakeup transaction %s\n",
-                               lstcon_rpc_trans_name(trans->tas_opc));
+                       CDEBUG(D_NET, "Session closed, wakeup transaction %s\n",
+                              lstcon_rpc_trans_name(trans->tas_opc));
 
-                        cfs_waitq_signal(&trans->tas_waitq);
-                }
+                       wake_up(&trans->tas_waitq);
+               }
 
                mutex_unlock(&console_session.ses_mutex);
 
index 51b9f6c..59aead3 100644 (file)
@@ -80,16 +80,16 @@ typedef struct lstcon_rpc {
 } lstcon_rpc_t;
 
 typedef struct lstcon_rpc_trans {
-        cfs_list_t            tas_olink;     /* link chain on owner list */
-        cfs_list_t            tas_link;      /* link chain on global list */
-        int                   tas_opc;       /* operation code of transaction */
+       cfs_list_t            tas_olink;     /* link chain on owner list */
+       cfs_list_t            tas_link;      /* link chain on global list */
+       int                   tas_opc;       /* operation code of transaction */
        /* features mask is uptodate */
        unsigned              tas_feats_updated;
        /* test features mask */
        unsigned              tas_features;
-        cfs_waitq_t           tas_waitq;     /* wait queue head */
-        cfs_atomic_t          tas_remaining; /* # of un-scheduled rpcs */
-        cfs_list_t            tas_rpcs_list; /* queued requests */
+       wait_queue_head_t     tas_waitq;     /* wait queue head */
+       cfs_atomic_t          tas_remaining; /* # of un-scheduled rpcs */
+       cfs_list_t            tas_rpcs_list; /* queued requests */
 } lstcon_rpc_trans_t;
 
 #define LST_TRANS_PRIVATE       0x1000
index 60fb45a..bd917e8 100644 (file)
@@ -1167,7 +1167,7 @@ srpc_del_client_rpc_timer (srpc_client_rpc_t *rpc)
        while (rpc->crpc_timeout != 0) {
                spin_unlock(&rpc->crpc_lock);
 
-               cfs_schedule();
+               schedule();
 
                spin_lock(&rpc->crpc_lock);
        }
index e98dbbf..aaab0b0 100644 (file)
                                                     (STTIMER_NSLOTS - 1))])
 
 struct st_timer_data {
-       spinlock_t       stt_lock;
-        /* start time of the slot processed previously */
-        cfs_time_t       stt_prev_slot;
-        cfs_list_t       stt_hash[STTIMER_NSLOTS];
-        int              stt_shuttingdown;
+       spinlock_t              stt_lock;
+       /* start time of the slot processed previously */
+       cfs_time_t              stt_prev_slot;
+       cfs_list_t              stt_hash[STTIMER_NSLOTS];
+       int                     stt_shuttingdown;
 #ifdef __KERNEL__
-        cfs_waitq_t      stt_waitq;
-        int              stt_nthreads;
+       wait_queue_head_t       stt_waitq;
+       int                     stt_nthreads;
 #endif
 } stt_data;
 
@@ -182,15 +182,13 @@ stt_timer_main (void *arg)
 
         cfs_block_allsigs();
 
-        while (!stt_data.stt_shuttingdown) {
-                stt_check_timers(&stt_data.stt_prev_slot);
+       while (!stt_data.stt_shuttingdown) {
+               stt_check_timers(&stt_data.stt_prev_slot);
 
-                cfs_waitq_wait_event_timeout(stt_data.stt_waitq,
-                                   stt_data.stt_shuttingdown,
-                                   cfs_time_seconds(STTIMER_SLOTTIME),
-                                   rc);
-                rc = 0; /* Discard jiffies remaining before timeout. */
-        }
+               rc = wait_event_timeout(stt_data.stt_waitq,
+                                       stt_data.stt_shuttingdown,
+                                       cfs_time_seconds(STTIMER_SLOTTIME));
+       }
 
        spin_lock(&stt_data.stt_lock);
        stt_data.stt_nthreads--;
@@ -245,11 +243,11 @@ stt_startup (void)
                 CFS_INIT_LIST_HEAD(&stt_data.stt_hash[i]);
 
 #ifdef __KERNEL__
-        stt_data.stt_nthreads = 0;
-        cfs_waitq_init(&stt_data.stt_waitq);
-        rc = stt_start_timer_thread();
-        if (rc != 0)
-                CERROR ("Can't spawn timer thread: %d\n", rc);
+       stt_data.stt_nthreads = 0;
+       init_waitqueue_head(&stt_data.stt_waitq);
+       rc = stt_start_timer_thread();
+       if (rc != 0)
+               CERROR ("Can't spawn timer thread: %d\n", rc);
 #endif
 
         return rc;
@@ -268,10 +266,10 @@ stt_shutdown (void)
         stt_data.stt_shuttingdown = 1;
 
 #ifdef __KERNEL__
-        cfs_waitq_signal(&stt_data.stt_waitq);
-        lst_wait_until(stt_data.stt_nthreads == 0, stt_data.stt_lock,
-                       "waiting for %d threads to terminate\n",
-                       stt_data.stt_nthreads);
+       wake_up(&stt_data.stt_waitq);
+       lst_wait_until(stt_data.stt_nthreads == 0, stt_data.stt_lock,
+                      "waiting for %d threads to terminate\n",
+                      stt_data.stt_nthreads);
 #endif
 
        spin_unlock(&stt_data.stt_lock);
index d0da85e..4487fee 100644 (file)
@@ -238,45 +238,45 @@ static int seq_client_alloc_seq(const struct lu_env *env,
 }
 
 static int seq_fid_alloc_prep(struct lu_client_seq *seq,
-                              cfs_waitlink_t *link)
+                             wait_queue_t *link)
 {
-        if (seq->lcs_update) {
-                cfs_waitq_add(&seq->lcs_waitq, link);
-                cfs_set_current_state(CFS_TASK_UNINT);
+       if (seq->lcs_update) {
+               add_wait_queue(&seq->lcs_waitq, link);
+               set_current_state(TASK_UNINTERRUPTIBLE);
                mutex_unlock(&seq->lcs_mutex);
 
-                cfs_waitq_wait(link, CFS_TASK_UNINT);
+               waitq_wait(link, TASK_UNINTERRUPTIBLE);
 
                mutex_lock(&seq->lcs_mutex);
-                cfs_waitq_del(&seq->lcs_waitq, link);
-                cfs_set_current_state(CFS_TASK_RUNNING);
-                return -EAGAIN;
-        }
-        ++seq->lcs_update;
+               remove_wait_queue(&seq->lcs_waitq, link);
+               set_current_state(TASK_RUNNING);
+               return -EAGAIN;
+       }
+       ++seq->lcs_update;
        mutex_unlock(&seq->lcs_mutex);
-        return 0;
+       return 0;
 }
 
 static void seq_fid_alloc_fini(struct lu_client_seq *seq)
 {
-        LASSERT(seq->lcs_update == 1);
+       LASSERT(seq->lcs_update == 1);
        mutex_lock(&seq->lcs_mutex);
-        --seq->lcs_update;
-        cfs_waitq_signal(&seq->lcs_waitq);
+       --seq->lcs_update;
+       wake_up(&seq->lcs_waitq);
 }
 
 /**
  * Allocate the whole seq to the caller.
  **/
 int seq_client_get_seq(const struct lu_env *env,
-                       struct lu_client_seq *seq, seqno_t *seqnr)
+                      struct lu_client_seq *seq, seqno_t *seqnr)
 {
-        cfs_waitlink_t link;
-        int rc;
+       wait_queue_t link;
+       int rc;
 
-        LASSERT(seqnr != NULL);
+       LASSERT(seqnr != NULL);
        mutex_lock(&seq->lcs_mutex);
-        cfs_waitlink_init(&link);
+       init_waitqueue_entry_current(&link);
 
         while (1) {
                 rc = seq_fid_alloc_prep(seq, &link);
@@ -318,16 +318,16 @@ EXPORT_SYMBOL(seq_client_get_seq);
 
 /* Allocate new fid on passed client @seq and save it to @fid. */
 int seq_client_alloc_fid(const struct lu_env *env,
-                         struct lu_client_seq *seq, struct lu_fid *fid)
+                        struct lu_client_seq *seq, struct lu_fid *fid)
 {
-        cfs_waitlink_t link;
-        int rc;
-        ENTRY;
+       wait_queue_t link;
+       int rc;
+       ENTRY;
 
-        LASSERT(seq != NULL);
-        LASSERT(fid != NULL);
+       LASSERT(seq != NULL);
+       LASSERT(fid != NULL);
 
-        cfs_waitlink_init(&link);
+       init_waitqueue_entry_current(&link);
        mutex_lock(&seq->lcs_mutex);
 
        if (OBD_FAIL_CHECK(OBD_FAIL_SEQ_EXHAUST))
@@ -388,23 +388,23 @@ EXPORT_SYMBOL(seq_client_alloc_fid);
  */
 void seq_client_flush(struct lu_client_seq *seq)
 {
-        cfs_waitlink_t link;
+       wait_queue_t link;
 
-        LASSERT(seq != NULL);
-        cfs_waitlink_init(&link);
+       LASSERT(seq != NULL);
+       init_waitqueue_entry_current(&link);
        mutex_lock(&seq->lcs_mutex);
 
-        while (seq->lcs_update) {
-                cfs_waitq_add(&seq->lcs_waitq, &link);
-                cfs_set_current_state(CFS_TASK_UNINT);
+       while (seq->lcs_update) {
+               add_wait_queue(&seq->lcs_waitq, &link);
+               set_current_state(TASK_UNINTERRUPTIBLE);
                mutex_unlock(&seq->lcs_mutex);
 
-                cfs_waitq_wait(&link, CFS_TASK_UNINT);
+               waitq_wait(&link, TASK_UNINTERRUPTIBLE);
 
                mutex_lock(&seq->lcs_mutex);
-                cfs_waitq_del(&seq->lcs_waitq, &link);
-                cfs_set_current_state(CFS_TASK_RUNNING);
-        }
+               remove_wait_queue(&seq->lcs_waitq, &link);
+               set_current_state(TASK_RUNNING);
+       }
 
         fid_zero(&seq->lcs_fid);
         /**
@@ -489,7 +489,7 @@ int seq_client_init(struct lu_client_seq *seq,
        else
                seq->lcs_width = LUSTRE_DATA_SEQ_MAX_WIDTH;
 
-       cfs_waitq_init(&seq->lcs_waitq);
+       init_waitqueue_head(&seq->lcs_waitq);
        /* Make sure that things are clear before work is started. */
        seq_client_flush(seq);
 
index d9f2237..93d1c1e 100644 (file)
@@ -74,41 +74,41 @@ static int fld_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw)
 
 static void fld_enter_request(struct client_obd *cli)
 {
-        struct mdc_cache_waiter mcw;
-        struct l_wait_info lwi = { 0 };
-
-        client_obd_list_lock(&cli->cl_loi_list_lock);
-        if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
-                cfs_list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters);
-                cfs_waitq_init(&mcw.mcw_waitq);
-                client_obd_list_unlock(&cli->cl_loi_list_lock);
-                l_wait_event(mcw.mcw_waitq, fld_req_avail(cli, &mcw), &lwi);
-        } else {
-                cli->cl_r_in_flight++;
-                client_obd_list_unlock(&cli->cl_loi_list_lock);
-        }
+       struct mdc_cache_waiter mcw;
+       struct l_wait_info lwi = { 0 };
+
+       client_obd_list_lock(&cli->cl_loi_list_lock);
+       if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
+               cfs_list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters);
+               init_waitqueue_head(&mcw.mcw_waitq);
+               client_obd_list_unlock(&cli->cl_loi_list_lock);
+               l_wait_event(mcw.mcw_waitq, fld_req_avail(cli, &mcw), &lwi);
+       } else {
+               cli->cl_r_in_flight++;
+               client_obd_list_unlock(&cli->cl_loi_list_lock);
+       }
 }
 
 static void fld_exit_request(struct client_obd *cli)
 {
-        cfs_list_t *l, *tmp;
-        struct mdc_cache_waiter *mcw;
-
-        client_obd_list_lock(&cli->cl_loi_list_lock);
-        cli->cl_r_in_flight--;
-        cfs_list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
-
-                if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
-                        /* No free request slots anymore */
-                        break;
-                }
-
-                mcw = cfs_list_entry(l, struct mdc_cache_waiter, mcw_entry);
-                cfs_list_del_init(&mcw->mcw_entry);
-                cli->cl_r_in_flight++;
-                cfs_waitq_signal(&mcw->mcw_waitq);
-        }
-        client_obd_list_unlock(&cli->cl_loi_list_lock);
+       cfs_list_t *l, *tmp;
+       struct mdc_cache_waiter *mcw;
+
+       client_obd_list_lock(&cli->cl_loi_list_lock);
+       cli->cl_r_in_flight--;
+       cfs_list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
+
+               if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
+                       /* No free request slots anymore */
+                       break;
+               }
+
+               mcw = cfs_list_entry(l, struct mdc_cache_waiter, mcw_entry);
+               cfs_list_del_init(&mcw->mcw_entry);
+               cli->cl_r_in_flight++;
+               wake_up(&mcw->mcw_waitq);
+       }
+       client_obd_list_unlock(&cli->cl_loi_list_lock);
 }
 
 static int fld_rrb_hash(struct lu_client_fld *fld,
index 03ee887..9b7b381 100644 (file)
@@ -1561,22 +1561,22 @@ struct cl_lock {
          */
         struct cl_lock_descr  cll_descr;
         /** Protected by cl_lock::cll_guard. */
-        enum cl_lock_state    cll_state;
-        /** signals state changes. */
-        cfs_waitq_t           cll_wq;
-        /**
-         * Recursive lock, most fields in cl_lock{} are protected by this.
-         *
-         * Locking rules: this mutex is never held across network
-         * communication, except when lock is being canceled.
-         *
-         * Lock ordering: a mutex of a sub-lock is taken first, then a mutex
-         * on a top-lock. Other direction is implemented through a
-         * try-lock-repeat loop. Mutices of unrelated locks can be taken only
-         * by try-locking.
-         *
-         * \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait().
-         */
+       enum cl_lock_state    cll_state;
+       /** signals state changes. */
+       wait_queue_head_t     cll_wq;
+       /**
+        * Recursive lock, most fields in cl_lock{} are protected by this.
+        *
+        * Locking rules: this mutex is never held across network
+        * communication, except when lock is being canceled.
+        *
+        * Lock ordering: a mutex of a sub-lock is taken first, then a mutex
+        * on a top-lock. Other direction is implemented through a
+        * try-lock-repeat loop. Mutices of unrelated locks can be taken only
+        * by try-locking.
+        *
+        * \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait().
+        */
        struct mutex            cll_guard;
         cfs_task_t           *cll_guarder;
         int                   cll_depth;
@@ -3206,7 +3206,7 @@ struct cl_sync_io {
        /** barrier of destroy this structure */
        cfs_atomic_t            csi_barrier;
        /** completion to be signaled when transfer is complete. */
-       cfs_waitq_t             csi_waitq;
+       wait_queue_head_t       csi_waitq;
 };
 
 void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages);
index 8d9915d..0f713e2 100644 (file)
@@ -280,7 +280,7 @@ typedef struct task_struct cfs_task_t;
 extern struct task_struct *current;
 int in_group_p(gid_t gid);
 
-#define cfs_set_current_state(foo) do { current->state = foo; } while (0)
+#define set_current_state(foo) do { current->state = foo; } while (0)
 
 #define wait_event_interruptible(wq, condition)                         \
 {                                                                       \
@@ -378,10 +378,10 @@ struct file_lock {
         struct file_lock *fl_next;  /* singly linked list for this inode  */
         cfs_list_t fl_link;   /* doubly linked list of all locks */
         cfs_list_t fl_block;  /* circular list of blocked processes */
-        void *fl_owner;
-        unsigned int fl_pid;
-        cfs_waitq_t fl_wait;
-        struct file *fl_file;
+       void *fl_owner;
+       unsigned int fl_pid;
+       wait_queue_head_t fl_wait;
+       struct file *fl_file;
         unsigned char fl_flags;
         unsigned char fl_type;
         loff_t fl_start;
index 366fe5a..549ef33 100644 (file)
@@ -555,28 +555,28 @@ struct lu_object_header {
 struct fld;
 
 struct lu_site_bkt_data {
-        /**
-         * number of busy object on this bucket
-         */
-        long                      lsb_busy;
-        /**
-         * LRU list, updated on each access to object. Protected by
-         * bucket lock of lu_site::ls_obj_hash.
-         *
-         * "Cold" end of LRU is lu_site::ls_lru.next. Accessed object are
-         * moved to the lu_site::ls_lru.prev (this is due to the non-existence
-         * of list_for_each_entry_safe_reverse()).
-         */
-        cfs_list_t                lsb_lru;
-        /**
-         * Wait-queue signaled when an object in this site is ultimately
-         * destroyed (lu_object_free()). It is used by lu_object_find() to
-         * wait before re-trying when object in the process of destruction is
-         * found in the hash table.
-         *
-         * \see htable_lookup().
-         */
-        cfs_waitq_t               lsb_marche_funebre;
+       /**
+        * number of busy object on this bucket
+        */
+       long                      lsb_busy;
+       /**
+        * LRU list, updated on each access to object. Protected by
+        * bucket lock of lu_site::ls_obj_hash.
+        *
+        * "Cold" end of LRU is lu_site::ls_lru.next. Accessed object are
+        * moved to the lu_site::ls_lru.prev (this is due to the non-existence
+        * of list_for_each_entry_safe_reverse()).
+        */
+       cfs_list_t                lsb_lru;
+       /**
+        * Wait-queue signaled when an object in this site is ultimately
+        * destroyed (lu_object_free()). It is used by lu_object_find() to
+        * wait before re-trying when object in the process of destruction is
+        * found in the hash table.
+        *
+        * \see htable_lookup().
+        */
+       wait_queue_head_t               lsb_marche_funebre;
 };
 
 enum {
index 9d376c9..79f4b05 100644 (file)
@@ -467,7 +467,7 @@ struct ldlm_namespace {
         * Wait queue used by __ldlm_namespace_free. Gets woken up every time
         * a resource is removed.
         */
-       cfs_waitq_t             ns_waitq;
+       wait_queue_head_t       ns_waitq;
        /** LDLM pool structure for this namespace */
        struct ldlm_pool        ns_pool;
        /** Definition of how eagerly unused locks will be released from LRU */
@@ -782,7 +782,7 @@ struct ldlm_lock {
         * it's no longer in use.  If the lock is not granted, a process sleeps
         * on this waitq to learn when it becomes granted.
         */
-       cfs_waitq_t             l_waitq;
+       wait_queue_head_t       l_waitq;
 
        /**
         * Seconds. It will be updated if there is any activity related to
index 2ce75b2..8d221e8 100644 (file)
@@ -369,9 +369,9 @@ struct lu_client_seq {
         /* Seq-server for direct talking */
         struct lu_server_seq   *lcs_srv;
 
-        /* wait queue for fid allocation and update indicator */
-        cfs_waitq_t             lcs_waitq;
-        int                     lcs_update;
+       /* wait queue for fid allocation and update indicator */
+       wait_queue_head_t       lcs_waitq;
+       int                     lcs_update;
 };
 
 /* server sequence manager interface */
index acb82fd..d86f0e7 100644 (file)
@@ -192,8 +192,8 @@ struct obd_import {
         cfs_time_t                imp_sec_expire;
         /** @} */
 
-        /** Wait queue for those who need to wait for recovery completion */
-        cfs_waitq_t               imp_recovery_waitq;
+       /** Wait queue for those who need to wait for recovery completion */
+       wait_queue_head_t         imp_recovery_waitq;
 
         /** Number of requests currently in-flight */
         cfs_atomic_t              imp_inflight;
index adeb287..022a04c 100644 (file)
@@ -708,59 +708,59 @@ struct l_wait_info {
  */
 #define __l_wait_event(wq, condition, info, ret, l_add_wait)                   \
 do {                                                                           \
-        cfs_waitlink_t __wait;                                                 \
-        cfs_duration_t __timeout = info->lwi_timeout;                          \
-        cfs_sigset_t   __blocked;                                              \
-        int   __allow_intr = info->lwi_allow_intr;                             \
-                                                                               \
-        ret = 0;                                                               \
-        if (condition)                                                         \
-                break;                                                         \
-                                                                               \
-        cfs_waitlink_init(&__wait);                                            \
-        l_add_wait(&wq, &__wait);                                              \
-                                                                               \
-        /* Block all signals (just the non-fatal ones if no timeout). */       \
-        if (info->lwi_on_signal != NULL && (__timeout == 0 || __allow_intr))   \
-                __blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);              \
-        else                                                                   \
-                __blocked = cfs_block_sigsinv(0);                              \
-                                                                               \
-        for (;;) {                                                             \
-                unsigned       __wstate;                                       \
-                                                                               \
-                __wstate = info->lwi_on_signal != NULL &&                      \
-                           (__timeout == 0 || __allow_intr) ?                  \
-                        CFS_TASK_INTERRUPTIBLE : CFS_TASK_UNINT;               \
-                                                                               \
-                cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);                 \
-                                                                               \
-                if (condition)                                                 \
-                        break;                                                 \
-                                                                               \
-                if (__timeout == 0) {                                          \
-                        cfs_waitq_wait(&__wait, __wstate);                     \
-                } else {                                                       \
-                        cfs_duration_t interval = info->lwi_interval?          \
-                                             min_t(cfs_duration_t,             \
-                                                 info->lwi_interval,__timeout):\
-                                             __timeout;                        \
-                        cfs_duration_t remaining = cfs_waitq_timedwait(&__wait,\
-                                                   __wstate,                   \
-                                                   interval);                  \
-                        __timeout = cfs_time_sub(__timeout,                    \
-                                            cfs_time_sub(interval, remaining));\
-                        if (__timeout == 0) {                                  \
-                                if (info->lwi_on_timeout == NULL ||            \
-                                    info->lwi_on_timeout(info->lwi_cb_data)) { \
-                                        ret = -ETIMEDOUT;                      \
-                                        break;                                 \
-                                }                                              \
-                                /* Take signals after the timeout expires. */  \
-                                if (info->lwi_on_signal != NULL)               \
-                                    (void)cfs_block_sigsinv(LUSTRE_FATAL_SIGS);\
-                        }                                                      \
-                }                                                              \
+       wait_queue_t __wait;                                                   \
+       cfs_duration_t __timeout = info->lwi_timeout;                          \
+       cfs_sigset_t   __blocked;                                              \
+       int   __allow_intr = info->lwi_allow_intr;                             \
+                                                                              \
+       ret = 0;                                                               \
+       if (condition)                                                         \
+               break;                                                         \
+                                                                              \
+       init_waitqueue_entry_current(&__wait);                                 \
+       l_add_wait(&wq, &__wait);                                              \
+                                                                              \
+       /* Block all signals (just the non-fatal ones if no timeout). */       \
+       if (info->lwi_on_signal != NULL && (__timeout == 0 || __allow_intr))   \
+               __blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);              \
+       else                                                                   \
+               __blocked = cfs_block_sigsinv(0);                              \
+                                                                              \
+       for (;;) {                                                             \
+               unsigned       __wstate;                                       \
+                                                                              \
+               __wstate = info->lwi_on_signal != NULL &&                      \
+                          (__timeout == 0 || __allow_intr) ?                  \
+                       TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;             \
+                                                                              \
+               set_current_state(TASK_INTERRUPTIBLE);                         \
+                                                                              \
+               if (condition)                                                 \
+                       break;                                                 \
+                                                                              \
+               if (__timeout == 0) {                                          \
+                       waitq_wait(&__wait, __wstate);                         \
+               } else {                                                       \
+                       cfs_duration_t interval = info->lwi_interval?          \
+                                            min_t(cfs_duration_t,             \
+                                                info->lwi_interval,__timeout):\
+                                            __timeout;                        \
+                       cfs_duration_t remaining = waitq_timedwait(&__wait,    \
+                                                  __wstate,                   \
+                                                  interval);                  \
+                       __timeout = cfs_time_sub(__timeout,                    \
+                                           cfs_time_sub(interval, remaining));\
+                       if (__timeout == 0) {                                  \
+                               if (info->lwi_on_timeout == NULL ||            \
+                                   info->lwi_on_timeout(info->lwi_cb_data)) { \
+                                       ret = -ETIMEDOUT;                      \
+                                       break;                                 \
+                               }                                              \
+                               /* Take signals after the timeout expires. */  \
+                               if (info->lwi_on_signal != NULL)               \
+                                   (void)cfs_block_sigsinv(LUSTRE_FATAL_SIGS);\
+                       }                                                      \
+               }                                                              \
                                                                                \
                 if (condition)                                                 \
                         break;                                                 \
@@ -785,8 +785,8 @@ do {                                                                           \
                                                                                \
        cfs_restore_sigs(__blocked);                                           \
                                                                                \
-        cfs_set_current_state(CFS_TASK_RUNNING);                               \
-        cfs_waitq_del(&wq, &__wait);                                           \
+       set_current_state(TASK_RUNNING);                                       \
+       remove_wait_queue(&wq, &__wait);                                       \
 } while (0)
 
 #else /* !__KERNEL__ */
@@ -840,32 +840,32 @@ do {                                                                    \
 
 #define l_wait_event(wq, condition, info)                       \
 ({                                                              \
-        int                 __ret;                              \
-        struct l_wait_info *__info = (info);                    \
-                                                                \
-        __l_wait_event(wq, condition, __info,                   \
-                       __ret, cfs_waitq_add);                   \
-        __ret;                                                  \
+       int                 __ret;                              \
+       struct l_wait_info *__info = (info);                    \
+                                                               \
+       __l_wait_event(wq, condition, __info,                   \
+                      __ret, add_wait_queue);                  \
+       __ret;                                                  \
 })
 
 #define l_wait_event_exclusive(wq, condition, info)             \
 ({                                                              \
-        int                 __ret;                              \
-        struct l_wait_info *__info = (info);                    \
-                                                                \
-        __l_wait_event(wq, condition, __info,                   \
-                       __ret, cfs_waitq_add_exclusive);         \
-        __ret;                                                  \
+       int                 __ret;                              \
+       struct l_wait_info *__info = (info);                    \
+                                                               \
+       __l_wait_event(wq, condition, __info,                   \
+                      __ret, add_wait_queue_exclusive);        \
+       __ret;                                                  \
 })
 
 #define l_wait_event_exclusive_head(wq, condition, info)        \
 ({                                                              \
-        int                 __ret;                      &