# s/\bcfs_module\b/declare_module/g
s/\bcfs_request_module\b/request_module/g
/#[ \t]*define[ \t]*\brequest_module\b[ \t]*\brequest_module\b/d
+# Wait Queue
+s/\bCFS_TASK_INTERRUPTIBLE\b/TASK_INTERRUPTIBLE/g
+/#[ \t]*define[ \t]*\bTASK_INTERRUPTIBLE\b[ \t]*\bTASK_INTERRUPTIBLE\b/d
+s/\bCFS_TASK_UNINT\b/TASK_UNINTERRUPTIBLE/g
+/#[ \t]*define[ \t]*\bTASK_UNINTERRUPTIBLE\b[ \t]*\bTASK_UNINTERRUPTIBLE\b/d
+s/\bCFS_TASK_RUNNING\b/TASK_RUNNING/g
+/#[ \t]*define[ \t]*\bTASK_RUNNING\b[ \t]*\bTASK_RUNNING\b/d
+s/\bcfs_set_current_state\b/set_current_state/g
+/#[ \t]*define[ \t]*\bset_current_state\b *( *\w* *)[ \t]*\bset_current_state\b *( *\w* *)/d
+s/\bcfs_wait_event\b/wait_event/g
+/#[ \t]*define[ \t]*\bwait_event\b *( *\w* *, *\w* *)[ \t]*\bwait_event\b *( *\w* *, *\w* *)/d
+s/\bcfs_waitlink_t\b/wait_queue_t/g
+/typedef[ \t]*\bwait_queue_t\b[ \t]*\bwait_queue_t\b/d
+s/\bcfs_waitq_t\b/wait_queue_head_t/g
+/typedef[ \t]*\bwait_queue_head_t\b[ \t]*\bwait_queue_head_t\b/d
+#s/\bcfs_task_state_t\b/task_state_t/g
+s/\bcfs_waitq_init\b/init_waitqueue_head/g
+/#[ \t]*define[ \t]*\binit_waitqueue_head\b *( *\w* *)[ \t]*\binit_waitqueue_head\b *( *\w* *)/d
+s/\bcfs_waitlink_init\b/init_waitqueue_entry_current/g
+s/\bcfs_waitq_add\b/add_wait_queue/g
+/#[ \t]*define[ \t]*\badd_wait_queue\b *( *\w* *, *\w* *)[ \t]*\badd_wait_queue\b *( *\w* *, *\w* *)/d
+s/\bcfs_waitq_add_exclusive\b/add_wait_queue_exclusive/g
+/#[ \t]*define[ \t]*\badd_wait_queue_exclusive\b *( *\w* *, *\w* *)[ \t]*\badd_wait_queue_exclusive\b *( *\w* *, *\w* *)/d
+s/\bcfs_waitq_del\b/remove_wait_queue/g
+/#[ \t]*define[ \t]*\bremove_wait_queue\b *( *\w* *, *\w* *)[ \t]*\bremove_wait_queue\b *( *\w* *, *\w* *)/d
+s/\bcfs_waitq_active\b/waitqueue_active/g
+/#[ \t]*define[ \t]*\bwaitqueue_active\b *( *\w* *)[ \t]*\bwaitqueue_active\b *( *\w* *)/d
+s/\bcfs_waitq_signal\b/wake_up/g
+/#[ \t]*define[ \t]*\bwake_up\b *( *\w* *)[ \t]*\bwake_up\b *( *\w* *)/d
+s/\bcfs_waitq_signal_nr\b/wake_up_nr/g
+/#[ \t]*define[ \t]*\bwake_up_nr\b *( *\w* *, *\w* *)[ \t]*\bwake_up_nr\b *( *\w* *, *\w* *)/d
+s/\bcfs_waitq_broadcast\b/wake_up_all/g
+/#[ \t]*define[ \t]*\bwake_up_all\b *( *\w* *)[ \t]*\bwake_up_all\b *( *\w* *)/d
+s/\bcfs_waitq_wait\b/waitq_wait/g
+s/\bcfs_waitq_timedwait\b/waitq_timedwait/g
+s/\bcfs_schedule_timeout\b/schedule_timeout/g
+/#[ \t]*define[ \t]*\bschedule_timeout\b *( *\w* *)[ \t]*\bschedule_timeout\b *( *\w* *)/d
+s/\bcfs_schedule\b/schedule/g
+/#[ \t]*define[ \t]*\bschedule\b *( *)[ \t]*\bschedule\b *( *)/d
+s/\bcfs_need_resched\b/need_resched/g
+/#[ \t]*define[ \t]*\bneed_resched\b *( *)[ \t]*\bneed_resched\b *( *)/d
+s/\bcfs_cond_resched\b/cond_resched/g
+/#[ \t]*define[ \t]*\bcond_resched\b *( *)[ \t]*\bcond_resched\b *( *)/d
+s/\bcfs_waitq_add_exclusive_head\b/add_wait_queue_exclusive_head/g
+s/\bcfs_schedule_timeout_and_set_state\b/schedule_timeout_and_set_state/g
+s/\bCFS_MAX_SCHEDULE_TIMEOUT\b/MAX_SCHEDULE_TIMEOUT/g
+s/\bcfs_task_state_t\b/long/g
*/
typedef struct cfs_waitq {
struct ksleep_chan wq_ksleep_chan;
-} cfs_waitq_t;
+} wait_queue_head_t;
typedef struct cfs_waitlink {
struct cfs_waitq *wl_waitq;
struct ksleep_link wl_ksleep_link;
-} cfs_waitlink_t;
+} wait_queue_t;
-typedef int cfs_task_state_t;
+#define TASK_INTERRUPTIBLE THREAD_ABORTSAFE
+#define TASK_UNINTERRUPTIBLE THREAD_UNINT
-#define CFS_TASK_INTERRUPTIBLE THREAD_ABORTSAFE
-#define CFS_TASK_UNINT THREAD_UNINT
+void init_waitqueue_head(struct cfs_waitq *waitq);
+void init_waitqueue_entry_current(struct cfs_waitlink *link);
-void cfs_waitq_init(struct cfs_waitq *waitq);
-void cfs_waitlink_init(struct cfs_waitlink *link);
-
-void cfs_waitq_add(struct cfs_waitq *waitq, struct cfs_waitlink *link);
-void cfs_waitq_add_exclusive(struct cfs_waitq *waitq,
+void add_wait_queue(struct cfs_waitq *waitq, struct cfs_waitlink *link);
+void add_wait_queue_exclusive(struct cfs_waitq *waitq,
struct cfs_waitlink *link);
-void cfs_waitq_del(struct cfs_waitq *waitq, struct cfs_waitlink *link);
-int cfs_waitq_active(struct cfs_waitq *waitq);
+void remove_wait_queue(struct cfs_waitq *waitq, struct cfs_waitlink *link);
+int waitqueue_active(struct cfs_waitq *waitq);
-void cfs_waitq_signal(struct cfs_waitq *waitq);
-void cfs_waitq_signal_nr(struct cfs_waitq *waitq, int nr);
-void cfs_waitq_broadcast(struct cfs_waitq *waitq);
+void wake_up(struct cfs_waitq *waitq);
+void wake_up_nr(struct cfs_waitq *waitq, int nr);
+void wake_up_all(struct cfs_waitq *waitq);
-void cfs_waitq_wait(struct cfs_waitlink *link, cfs_task_state_t state);
-cfs_duration_t cfs_waitq_timedwait(struct cfs_waitlink *link,
- cfs_task_state_t state,
+void waitq_wait(struct cfs_waitlink *link, long state);
+cfs_duration_t waitq_timedwait(struct cfs_waitlink *link,
+ long state,
cfs_duration_t timeout);
/*
extern void thread_set_timer_deadline(__u64 deadline);
extern void thread_cancel_timer(void);
-static inline int cfs_schedule_timeout(int state, int64_t timeout)
+static inline int schedule_timeout(int state, int64_t timeout)
{
int result;
return result;
}
-#define cfs_schedule() cfs_schedule_timeout(CFS_TASK_UNINT, CFS_TICK)
-#define cfs_pause(tick) cfs_schedule_timeout(CFS_TASK_UNINT, tick)
+#define schedule() schedule_timeout(TASK_UNINTERRUPTIBLE, CFS_TICK)
+#define cfs_pause(tick) schedule_timeout(TASK_UNINTERRUPTIBLE, tick)
#define __wait_event(wq, condition) \
do { \
struct cfs_waitlink __wait; \
\
- cfs_waitlink_init(&__wait); \
+ init_waitqueue_entry_current(&__wait); \
for (;;) { \
- cfs_waitq_add(&wq, &__wait); \
+ add_wait_queue(&wq, &__wait); \
if (condition) \
break; \
- cfs_waitq_wait(&__wait, CFS_TASK_UNINT); \
- cfs_waitq_del(&wq, &__wait); \
+ waitq_wait(&__wait, TASK_UNINTERRUPTIBLE); \
+ remove_wait_queue(&wq, &__wait); \
} \
- cfs_waitq_del(&wq, &__wait); \
+ remove_wait_queue(&wq, &__wait); \
} while (0)
#define wait_event(wq, condition) \
do { \
struct cfs_waitlink __wait; \
\
- cfs_waitlink_init(&__wait); \
+ init_waitqueue_entry_current(&__wait); \
for (;;) { \
if (ex == 0) \
- cfs_waitq_add(&wq, &__wait); \
+ add_wait_queue(&wq, &__wait); \
else \
- cfs_waitq_add_exclusive(&wq, &__wait); \
+ add_wait_queue_exclusive(&wq, &__wait); \
if (condition) \
break; \
if (!cfs_signal_pending()) { \
- cfs_waitq_wait(&__wait, \
- CFS_TASK_INTERRUPTIBLE); \
- cfs_waitq_del(&wq, &__wait); \
+ waitq_wait(&__wait, \
+ TASK_INTERRUPTIBLE); \
+ remove_wait_queue(&wq, &__wait); \
continue; \
} \
ret = -ERESTARTSYS; \
break; \
} \
- cfs_waitq_del(&wq, &__wait); \
+ remove_wait_queue(&wq, &__wait); \
} while (0)
#define wait_event_interruptible(wq, condition) \
} while (0)
/* used in couple of places */
-static inline void sleep_on(cfs_waitq_t *waitq)
+static inline void sleep_on(wait_queue_head_t *waitq)
{
- cfs_waitlink_t link;
+ wait_queue_t link;
- cfs_waitlink_init(&link);
- cfs_waitq_add(waitq, &link);
- cfs_waitq_wait(&link, CFS_TASK_UNINT);
- cfs_waitq_del(waitq, &link);
+ init_waitqueue_entry_current(&link);
+ add_wait_queue(waitq, &link);
+ waitq_wait(&link, TASK_UNINTERRUPTIBLE);
+ remove_wait_queue(waitq, &link);
}
/*
extern unsigned long cfs_fail_loc;
extern unsigned int cfs_fail_val;
-extern cfs_waitq_t cfs_race_waitq;
+extern wait_queue_head_t cfs_race_waitq;
extern int cfs_race_state;
int __cfs_fail_check_set(__u32 id, __u32 value, int set);
* the first and continues. */
static inline void cfs_race(__u32 id)
{
-
- if (CFS_FAIL_PRECHECK(id)) {
- if (unlikely(__cfs_fail_check_set(id, 0, CFS_FAIL_LOC_NOSET))) {
- int rc;
- cfs_race_state = 0;
- CERROR("cfs_race id %x sleeping\n", id);
+ if (CFS_FAIL_PRECHECK(id)) {
+ if (unlikely(__cfs_fail_check_set(id, 0, CFS_FAIL_LOC_NOSET))) {
+ int rc;
+ cfs_race_state = 0;
+ CERROR("cfs_race id %x sleeping\n", id);
rc = wait_event_interruptible(cfs_race_waitq,
cfs_race_state != 0);
- CERROR("cfs_fail_race id %x awake, rc=%d\n", id, rc);
- } else {
- CERROR("cfs_fail_race id %x waking\n", id);
- cfs_race_state = 1;
- cfs_waitq_signal(&cfs_race_waitq);
- }
- }
+ CERROR("cfs_fail_race id %x awake, rc=%d\n", id, rc);
+ } else {
+ CERROR("cfs_fail_race id %x waking\n", id);
+ cfs_race_state = 1;
+ wake_up(&cfs_race_waitq);
+ }
+ }
}
#define CFS_RACE(id) cfs_race(id)
#else
#define __LIBCFS_PRIM_H__
/*
- * Schedule
- */
-void cfs_schedule_timeout_and_set_state(cfs_task_state_t state,
- int64_t timeout);
-void cfs_schedule_timeout(int64_t timeout);
-void cfs_schedule(void);
-void cfs_pause(cfs_duration_t ticks);
-int cfs_need_resched(void);
-void cfs_cond_resched(void);
-
-/*
* Wait Queues
*/
-void cfs_waitq_init(cfs_waitq_t *waitq);
-void cfs_waitlink_init(cfs_waitlink_t *link);
-void cfs_waitq_add(cfs_waitq_t *waitq, cfs_waitlink_t *link);
-void cfs_waitq_add_exclusive(cfs_waitq_t *waitq,
- cfs_waitlink_t *link);
-void cfs_waitq_add_exclusive_head(cfs_waitq_t *waitq,
- cfs_waitlink_t *link);
-void cfs_waitq_del(cfs_waitq_t *waitq, cfs_waitlink_t *link);
-int cfs_waitq_active(cfs_waitq_t *waitq);
-void cfs_waitq_signal(cfs_waitq_t *waitq);
-void cfs_waitq_signal_nr(cfs_waitq_t *waitq, int nr);
-void cfs_waitq_broadcast(cfs_waitq_t *waitq);
-void cfs_waitq_wait(cfs_waitlink_t *link, cfs_task_state_t state);
-int64_t cfs_waitq_timedwait(cfs_waitlink_t *link, cfs_task_state_t state,
- int64_t timeout);
-
/*
* Timer
*/
/*
* Wait Queue
*/
-#define CFS_TASK_INTERRUPTIBLE TASK_INTERRUPTIBLE
-#define CFS_TASK_UNINT TASK_UNINTERRUPTIBLE
-#define CFS_TASK_RUNNING TASK_RUNNING
-#define cfs_set_current_state(state) set_current_state(state)
-#define cfs_wait_event(wq, cond) wait_event(wq, cond)
-
-typedef wait_queue_t cfs_waitlink_t;
-typedef wait_queue_head_t cfs_waitq_t;
-typedef long cfs_task_state_t;
#define CFS_DECL_WAITQ(wq) DECLARE_WAIT_QUEUE_HEAD(wq)
+#define LIBCFS_WQITQ_MACROS 1
+#define init_waitqueue_entry_current(w) init_waitqueue_entry(w, current)
+#define waitq_wait(w, s) schedule()
+#define waitq_timedwait(w, s, t) schedule_timeout(t)
+
+#ifndef HAVE___ADD_WAIT_QUEUE_EXCLUSIVE
+static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
+ wait_queue_t *wait)
+{
+ wait->flags |= WQ_FLAG_EXCLUSIVE;
+ __add_wait_queue(q, wait);
+}
+#endif /* HAVE___ADD_WAIT_QUEUE_EXCLUSIVE */
+
+/**
+ * wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively
+ * waiting threads, which is not always desirable because all threads will
+ * be waken up again and again, even user only needs a few of them to be
+ * active most time. This is not good for performance because cache can
+ * be polluted by different threads.
+ *
+ * LIFO list can resolve this problem because we always wakeup the most
+ * recent active thread by default.
+ *
+ * NB: please don't call non-exclusive & exclusive wait on the same
+ * waitq if add_wait_queue_exclusive_head is used.
+ */
+#define add_wait_queue_exclusive_head(waitq, link) \
+{ \
+ unsigned long flags; \
+ \
+ spin_lock_irqsave(&((waitq)->lock), flags); \
+ __add_wait_queue_exclusive(waitq, link); \
+ spin_unlock_irqrestore(&((waitq)->lock), flags); \
+}
+
+#define schedule_timeout_and_set_state(state, timeout) \
+{ \
+ set_current_state(state); \
+ schedule_timeout(timeout); \
+}
+
+/* deschedule for a bit... */
+#define cfs_pause(ticks) \
+{ \
+ set_current_state(TASK_UNINTERRUPTIBLE); \
+ schedule_timeout(ticks); \
+}
+
/*
* Task struct
*/
*/
typedef struct timer_list cfs_timer_t;
-#define CFS_MAX_SCHEDULE_TIMEOUT MAX_SCHEDULE_TIMEOUT
-
-#ifndef wait_event_timeout /* Only for RHEL3 2.4.21 kernel */
-#define __wait_event_timeout(wq, condition, timeout, ret) \
-do { \
- int __ret = 0; \
- if (!(condition)) { \
- wait_queue_t __wait; \
- unsigned long expire; \
- \
- init_waitqueue_entry(&__wait, current); \
- expire = timeout + jiffies; \
- add_wait_queue(&wq, &__wait); \
- for (;;) { \
- set_current_state(TASK_UNINTERRUPTIBLE); \
- if (condition) \
- break; \
- if (jiffies > expire) { \
- ret = jiffies - expire; \
- break; \
- } \
- schedule_timeout(timeout); \
- } \
- current->state = TASK_RUNNING; \
- remove_wait_queue(&wq, &__wait); \
- } \
-} while (0)
-/*
- retval == 0; condition met; we're good.
- retval > 0; timed out.
-*/
-#define cfs_waitq_wait_event_timeout(wq, condition, timeout, ret) \
-do { \
- ret = 0; \
- if (!(condition)) \
- __wait_event_timeout(wq, condition, timeout, ret); \
-} while (0)
-#else
-#define cfs_waitq_wait_event_timeout(wq, condition, timeout, ret) \
- ret = wait_event_timeout(wq, condition, timeout)
-#endif
-
-#define cfs_waitq_wait_event_interruptible_timeout(wq, c, timeout, ret) \
- ret = wait_event_interruptible_timeout(wq, c, timeout)
-
/*
* atomic
*/
};
struct upcall_cache_entry {
- cfs_list_t ue_hash;
- __u64 ue_key;
- cfs_atomic_t ue_refcount;
- int ue_flags;
- cfs_waitq_t ue_waitq;
- cfs_time_t ue_acquire_expire;
- cfs_time_t ue_expire;
- union {
- struct md_identity identity;
- } u;
+ cfs_list_t ue_hash;
+ __u64 ue_key;
+ cfs_atomic_t ue_refcount;
+ int ue_flags;
+ wait_queue_head_t ue_waitq;
+ cfs_time_t ue_acquire_expire;
+ cfs_time_t ue_expire;
+ union {
+ struct md_identity identity;
+ } u;
};
#define UC_CACHE_HASH_SIZE (128)
struct completion {
unsigned int done;
- cfs_waitq_t wait;
+ wait_queue_head_t wait;
};
#endif /* HAVE_LIBPTHREAD */
typedef struct cfs_waitlink {
cfs_list_t sleeping;
void *process;
-} cfs_waitlink_t;
+} wait_queue_t;
typedef struct cfs_waitq {
cfs_list_t sleepers;
-} cfs_waitq_t;
-
-#define CFS_DECL_WAITQ(wq) cfs_waitq_t wq
+} wait_queue_head_t;
+
+#define CFS_DECL_WAITQ(wq) wait_queue_head_t wq
+void init_waitqueue_head(struct cfs_waitq *waitq);
+void init_waitqueue_entry_current(struct cfs_waitlink *link);
+void add_wait_queue(struct cfs_waitq *waitq, struct cfs_waitlink *link);
+void add_wait_queue_exclusive(struct cfs_waitq *waitq, struct cfs_waitlink *link);
+void add_wait_queue_exclusive_head(struct cfs_waitq *waitq, struct cfs_waitlink *link);
+void remove_wait_queue(struct cfs_waitq *waitq, struct cfs_waitlink *link);
+int waitqueue_active(struct cfs_waitq *waitq);
+void wake_up(struct cfs_waitq *waitq);
+void wake_up_nr(struct cfs_waitq *waitq, int nr);
+void wake_up_all(struct cfs_waitq *waitq);
+void waitq_wait(struct cfs_waitlink *link, long state);
+int64_t waitq_timedwait(struct cfs_waitlink *link, long state, int64_t timeout);
+void schedule_timeout_and_set_state(long state, int64_t timeout);
+void cfs_pause(cfs_duration_t d);
+int need_resched(void);
+void cond_resched(void);
/*
* Task states
*/
-typedef long cfs_task_state_t;
-
-#define CFS_TASK_INTERRUPTIBLE (0)
-#define CFS_TASK_UNINT (1)
-#define CFS_TASK_RUNNING (2)
+#define TASK_INTERRUPTIBLE (0)
+#define TASK_UNINTERRUPTIBLE (1)
+#define TASK_RUNNING (2)
-static inline void cfs_schedule(void) {}
-static inline void cfs_schedule_timeout(int64_t t) {}
+static inline void schedule(void) {}
+static inline void schedule_timeout(int64_t t) {}
/*
* Lproc
*/
-typedef int cfs_task_state_t;
-
-#define CFS_TASK_INTERRUPTIBLE 0x00000001
-#define CFS_TASK_UNINT 0x00000002
-#define CFS_TASK_RUNNING 0x00000003
-#define CFS_TASK_UNINTERRUPTIBLE CFS_TASK_UNINT
+#define TASK_INTERRUPTIBLE 0x00000001
+#define TASK_UNINTERRUPTIBLE 0x00000002
+#define TASK_RUNNING 0x00000003
+#define CFS_TASK_UNINTERRUPTIBLE TASK_UNINTERRUPTIBLE
#define CFS_WAITQ_MAGIC 'CWQM'
#define CFS_WAITLINK_MAGIC 'CWLM'
spinlock_t guard;
cfs_list_t waiters;
-} cfs_waitq_t;
+} wait_queue_head_t;
-typedef struct cfs_waitlink cfs_waitlink_t;
+typedef struct cfs_waitlink wait_queue_t;
#define CFS_WAITQ_CHANNELS (2)
typedef struct cfs_waitlink_channel {
cfs_list_t link;
- cfs_waitq_t * waitq;
- cfs_waitlink_t * waitl;
+ wait_queue_head_t * waitq;
+ wait_queue_t * waitl;
} cfs_waitlink_channel_t;
struct cfs_waitlink {
CFS_WAITQ_EXCLUSIVE = 1
};
-#define CFS_DECL_WAITQ(name) cfs_waitq_t name
+#define CFS_DECL_WAITQ(name) wait_queue_head_t name
/* Kernel thread */
* Task struct
*/
-#define CFS_MAX_SCHEDULE_TIMEOUT ((long_ptr_t)(~0UL>>12))
-#define cfs_schedule_timeout(t) cfs_schedule_timeout_and_set_state(0, t)
+#define MAX_SCHEDULE_TIMEOUT ((long_ptr_t)(~0UL>>12))
+#define schedule_timeout(t) schedule_timeout_and_set_state(0, t)
struct vfsmount;
#define current cfs_current()
-#define cfs_set_current_state(s) do {;} while (0)
-#define cfs_set_current_state(state) cfs_set_current_state(state)
+#define set_current_state(s) do {;} while (0)
-#define cfs_wait_event(wq, condition) \
+#define wait_event(wq, condition) \
do { \
- cfs_waitlink_t __wait; \
- \
- cfs_waitlink_init(&__wait); \
- while (TRUE) { \
- cfs_waitq_add(&wq, &__wait); \
- if (condition) { \
- break; \
- } \
- cfs_waitq_wait(&__wait, CFS_TASK_INTERRUPTIBLE); \
- cfs_waitq_del(&wq, &__wait); \
- } \
- cfs_waitq_del(&wq, &__wait); \
+ wait_queue_t __wait; \
+ \
+ init_waitqueue_entry_current(&__wait); \
+ while (TRUE) { \
+ add_wait_queue(&wq, &__wait); \
+ if (condition) { \
+ break; \
+ } \
+ waitq_wait(&__wait, TASK_INTERRUPTIBLE); \
+ remove_wait_queue(&wq, &__wait); \
+ } \
+ remove_wait_queue(&wq, &__wait); \
} while(0)
#define wait_event_interruptible(wq, condition) \
{ \
- cfs_waitlink_t __wait; \
+ wait_queue_t __wait; \
\
__ret = 0; \
- cfs_waitlink_init(&__wait); \
+ init_waitqueue_entry_current(&__wait); \
while (TRUE) { \
- cfs_waitq_add(&wq, &__wait); \
+ add_wait_queue(&wq, &__wait); \
if (condition) { \
break; \
} \
- cfs_waitq_wait(&__wait, CFS_TASK_INTERRUPTIBLE);\
- cfs_waitq_del(&wq, &__wait); \
+ waitq_wait(&__wait, TASK_INTERRUPTIBLE);\
+ remove_wait_queue(&wq, &__wait); \
} \
- cfs_waitq_del(&wq, &__wait); \
+ remove_wait_queue(&wq, &__wait); \
__ret; \
}
retval > 0; timed out.
*/
-#define cfs_waitq_wait_event_interruptible_timeout( \
- wq, condition, timeout, rc) \
+#define wait_event_interruptible_timeout(wq, condition, timeout)\
do { \
- cfs_waitlink_t __wait; \
- \
- rc = 0; \
- cfs_waitlink_init(&__wait); \
- while (TRUE) { \
- cfs_waitq_add(&wq, &__wait); \
- if (condition) { \
- break; \
- } \
- if (cfs_waitq_timedwait(&__wait, \
- CFS_TASK_INTERRUPTIBLE, timeout) == 0) { \
- rc = TRUE; \
- break; \
- } \
- cfs_waitq_del(&wq, &__wait); \
- } \
- cfs_waitq_del(&wq, &__wait); \
+ wait_queue_t __wait; \
+ \
+ init_waitqueue_entry_current(&__wait); \
+ while (TRUE) { \
+ add_wait_queue(&wq, &__wait); \
+ if (condition) { \
+ break; \
+ } \
+ if (waitq_timedwait(&__wait, \
+ TASK_INTERRUPTIBLE, timeout) == 0) { \
+ break; \
+ } \
+ remove_wait_queue(&wq, &__wait); \
+ } \
+ remove_wait_queue(&wq, &__wait); \
} while(0)
-
-#define cfs_waitq_wait_event_timeout \
- cfs_waitq_wait_event_interruptible_timeout
-
int init_task_manager();
void cleanup_task_manager();
cfs_task_t * cfs_current();
int wake_up_process(cfs_task_t * task);
-void sleep_on(cfs_waitq_t *waitq);
+void sleep_on(wait_queue_head_t *waitq);
#define cfs_might_sleep() do {} while(0)
#define CFS_DECL_JOURNAL_DATA
#define CFS_PUSH_JOURNAL do {;} while(0)
void lbug_with_loc(struct libcfs_debug_msg_data *msgdata)
{
- libcfs_catastrophe = 1;
- CEMERG("LBUG: pid: %u thread: %#x\n",
+ libcfs_catastrophe = 1;
+ CEMERG("LBUG: pid: %u thread: %#x\n",
(unsigned)current_pid(), (unsigned)current_thread());
- libcfs_debug_dumplog();
- libcfs_run_lbug_upcall(msgdata);
- while (1)
- cfs_schedule();
+ libcfs_debug_dumplog();
+ libcfs_run_lbug_upcall(msgdata);
+ while (1)
+ schedule();
/* panic("lbug_with_loc(%s, %s, %d)", file, func, line) */
}
break; \
} \
spin_unlock(&(pta)->lock); \
- cfs_schedule(); \
+ schedule(); \
} while(1); \
/*
break; \
} \
spin_unlock(&(pta)->lock); \
- cfs_schedule(); \
+ schedule(); \
} while(1)
/*
break; \
} \
spin_unlock(&(pta)->lock); \
- cfs_schedule(); \
+ schedule(); \
} while (1); \
/*
}
#endif /* !__DARWIN8__ */
-void cfs_waitq_init(struct cfs_waitq *waitq)
+void init_waitqueue_head(struct cfs_waitq *waitq)
{
ksleep_chan_init(&waitq->wq_ksleep_chan);
}
-void cfs_waitlink_init(struct cfs_waitlink *link)
+void init_waitqueue_entry_current(struct cfs_waitlink *link)
{
ksleep_link_init(&link->wl_ksleep_link);
}
-void cfs_waitq_add(struct cfs_waitq *waitq, struct cfs_waitlink *link)
+void add_wait_queue(struct cfs_waitq *waitq, struct cfs_waitlink *link)
{
- link->wl_waitq = waitq;
+ link->wl_waitq = waitq;
ksleep_add(&waitq->wq_ksleep_chan, &link->wl_ksleep_link);
}
-void cfs_waitq_add_exclusive(struct cfs_waitq *waitq,
- struct cfs_waitlink *link)
+void add_wait_queue_exclusive(struct cfs_waitq *waitq,
+ struct cfs_waitlink *link)
{
- link->wl_waitq = waitq;
+ link->wl_waitq = waitq;
link->wl_ksleep_link.flags |= KSLEEP_EXCLUSIVE;
ksleep_add(&waitq->wq_ksleep_chan, &link->wl_ksleep_link);
}
-void cfs_waitq_del(struct cfs_waitq *waitq,
+void remove_wait_queue(struct cfs_waitq *waitq,
struct cfs_waitlink *link)
{
ksleep_del(&waitq->wq_ksleep_chan, &link->wl_ksleep_link);
}
-int cfs_waitq_active(struct cfs_waitq *waitq)
+int waitqueue_active(struct cfs_waitq *waitq)
{
return (1);
}
-void cfs_waitq_signal(struct cfs_waitq *waitq)
+void wake_up(struct cfs_waitq *waitq)
{
/*
* XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
ksleep_wake(&waitq->wq_ksleep_chan);
}
-void cfs_waitq_signal_nr(struct cfs_waitq *waitq, int nr)
+void wake_up_nr(struct cfs_waitq *waitq, int nr)
{
ksleep_wake_nr(&waitq->wq_ksleep_chan, nr);
}
-void cfs_waitq_broadcast(struct cfs_waitq *waitq)
+void wake_up_all(struct cfs_waitq *waitq)
{
ksleep_wake_all(&waitq->wq_ksleep_chan);
}
-void cfs_waitq_wait(struct cfs_waitlink *link, cfs_task_state_t state)
+void waitq_wait(struct cfs_waitlink *link, long state)
{
- ksleep_wait(&link->wl_waitq->wq_ksleep_chan, state);
+ ksleep_wait(&link->wl_waitq->wq_ksleep_chan, state);
}
-cfs_duration_t cfs_waitq_timedwait(struct cfs_waitlink *link,
- cfs_task_state_t state,
+cfs_duration_t waitq_timedwait(struct cfs_waitlink *link,
+ long state,
cfs_duration_t timeout)
{
return ksleep_timedwait(&link->wl_waitq->wq_ksleep_chan,
static int proc_fail_loc SYSCTL_HANDLER_ARGS
{
- int error = 0;
- long old_fail_loc = cfs_fail_loc;
-
- error = sysctl_handle_long(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
- if (!error && req->newptr != USER_ADDR_NULL) {
- if (old_fail_loc != cfs_fail_loc)
- cfs_waitq_signal(&cfs_race_waitq);
- } else if (req->newptr != USER_ADDR_NULL) {
- /* Something was wrong with the write request */
- printf ("sysctl fail loc fault: %d.\n", error);
- } else {
- /* Read request */
- error = SYSCTL_OUT(req, &cfs_fail_loc, sizeof cfs_fail_loc);
- }
- return error;
+ int error = 0;
+ long old_fail_loc = cfs_fail_loc;
+
+ error = sysctl_handle_long(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
+ if (!error && req->newptr != USER_ADDR_NULL) {
+ if (old_fail_loc != cfs_fail_loc)
+ wake_up(&cfs_race_waitq);
+ } else if (req->newptr != USER_ADDR_NULL) {
+ /* Something was wrong with the write request */
+ printf ("sysctl fail loc fault: %d.\n", error);
+ } else {
+ /* Read request */
+ error = SYSCTL_OUT(req, &cfs_fail_loc, sizeof cfs_fail_loc);
+ }
+ return error;
}
/*
}
}
-void ksleep_wait(struct ksleep_chan *chan, cfs_task_state_t state)
+void ksleep_wait(struct ksleep_chan *chan, long state)
{
event_t event;
int result;
* implemented), or waitq was already in the "signalled" state).
*/
int64_t ksleep_timedwait(struct ksleep_chan *chan,
- cfs_task_state_t state,
- __u64 timeout)
+ long state,
+ __u64 timeout)
{
event_t event;
cfs_atomic_t libcfs_kmemory = CFS_ATOMIC_INIT(0);
EXPORT_SYMBOL(libcfs_kmemory);
-static cfs_waitq_t debug_ctlwq;
+static wait_queue_head_t debug_ctlwq;
char libcfs_debug_file_path_arr[PATH_MAX] = LIBCFS_DEBUG_FILE_PATH_DEFAULT;
int libcfs_debug_dumplog_thread(void *arg)
{
- libcfs_debug_dumplog_internal(arg);
- cfs_waitq_signal(&debug_ctlwq);
- return 0;
+ libcfs_debug_dumplog_internal(arg);
+ wake_up(&debug_ctlwq);
+ return 0;
}
void libcfs_debug_dumplog(void)
{
- cfs_waitlink_t wait;
- cfs_task_t *dumper;
- ENTRY;
+ wait_queue_t wait;
+ cfs_task_t *dumper;
+ ENTRY;
- /* we're being careful to ensure that the kernel thread is
- * able to set our state to running as it exits before we
- * get to schedule() */
- cfs_waitlink_init(&wait);
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add(&debug_ctlwq, &wait);
+ /* we're being careful to ensure that the kernel thread is
+ * able to set our state to running as it exits before we
+ * get to schedule() */
+ init_waitqueue_entry_current(&wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&debug_ctlwq, &wait);
dumper = kthread_run(libcfs_debug_dumplog_thread,
(void *)(long)current_pid(),
if (IS_ERR(dumper))
printk(KERN_ERR "LustreError: cannot start log dump thread:"
" %ld\n", PTR_ERR(dumper));
- else
- cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE);
+ else
+ waitq_wait(&wait, TASK_INTERRUPTIBLE);
- /* be sure to teardown if cfs_create_thread() failed */
- cfs_waitq_del(&debug_ctlwq, &wait);
- cfs_set_current_state(CFS_TASK_RUNNING);
+ /* be sure to teardown if cfs_create_thread() failed */
+ remove_wait_queue(&debug_ctlwq, &wait);
+ set_current_state(TASK_RUNNING);
}
EXPORT_SYMBOL(libcfs_debug_dumplog);
int libcfs_debug_init(unsigned long bufsize)
{
- int rc = 0;
- unsigned int max = libcfs_debug_mb;
+ int rc = 0;
+ unsigned int max = libcfs_debug_mb;
- cfs_waitq_init(&debug_ctlwq);
+ init_waitqueue_head(&debug_ctlwq);
- if (libcfs_console_max_delay <= 0 || /* not set by user or */
- libcfs_console_min_delay <= 0 || /* set to invalid values */
- libcfs_console_min_delay >= libcfs_console_max_delay) {
- libcfs_console_max_delay = CDEBUG_DEFAULT_MAX_DELAY;
- libcfs_console_min_delay = CDEBUG_DEFAULT_MIN_DELAY;
- }
+ if (libcfs_console_max_delay <= 0 || /* not set by user or */
+ libcfs_console_min_delay <= 0 || /* set to invalid values */
+ libcfs_console_min_delay >= libcfs_console_max_delay) {
+ libcfs_console_max_delay = CDEBUG_DEFAULT_MAX_DELAY;
+ libcfs_console_min_delay = CDEBUG_DEFAULT_MIN_DELAY;
+ }
if (libcfs_debug_file_path != NULL) {
memset(libcfs_debug_file_path_arr, 0, PATH_MAX);
unsigned long cfs_fail_loc = 0;
unsigned int cfs_fail_val = 0;
-cfs_waitq_t cfs_race_waitq;
+wait_queue_head_t cfs_race_waitq;
int cfs_race_state;
EXPORT_SYMBOL(cfs_fail_loc);
int __cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
{
- int ret = 0;
-
- ret = __cfs_fail_check_set(id, value, set);
- if (ret) {
- CERROR("cfs_fail_timeout id %x sleeping for %dms\n",
- id, ms);
- cfs_schedule_timeout_and_set_state(CFS_TASK_UNINT,
- cfs_time_seconds(ms) / 1000);
- cfs_set_current_state(CFS_TASK_RUNNING);
- CERROR("cfs_fail_timeout id %x awake\n", id);
- }
- return ret;
+ int ret = 0;
+
+ ret = __cfs_fail_check_set(id, value, set);
+ if (ret) {
+ CERROR("cfs_fail_timeout id %x sleeping for %dms\n",
+ id, ms);
+ schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE,
+ cfs_time_seconds(ms) / 1000);
+ set_current_state(TASK_RUNNING);
+ CERROR("cfs_fail_timeout id %x awake\n", id);
+ }
+ return ret;
}
EXPORT_SYMBOL(__cfs_fail_timeout_set);
spin_lock(&hs->hs_dep_lock);
while (hs->hs_dep_bits != 0) {
spin_unlock(&hs->hs_dep_lock);
- cfs_cond_resched();
+ cond_resched();
spin_lock(&hs->hs_dep_lock);
}
spin_unlock(&hs->hs_dep_lock);
cfs_hash_exit(hs, hnode);
}
}
- LASSERT(bd.bd_bucket->hsb_count == 0);
- cfs_hash_bd_unlock(hs, &bd, 1);
- cfs_cond_resched();
- }
+ LASSERT(bd.bd_bucket->hsb_count == 0);
+ cfs_hash_bd_unlock(hs, &bd, 1);
+ cond_resched();
+ }
LASSERT(cfs_atomic_read(&hs->hs_count) == 0);
cfs_hash_bd_unlock(hs, &bd, excl);
if (loop < CFS_HASH_LOOP_HOG)
continue;
- loop = 0;
- cfs_hash_unlock(hs, 0);
- cfs_cond_resched();
- cfs_hash_lock(hs, 0);
- }
+ loop = 0;
+ cfs_hash_unlock(hs, 0);
+ cond_resched();
+ cfs_hash_lock(hs, 0);
+ }
out:
cfs_hash_unlock(hs, 0);
cfs_hash_bd_unlock(hs, &bd, 0);
cfs_hash_unlock(hs, 0);
- rc = func(hs, &bd, hnode, data);
- if (stop_on_change)
- cfs_hash_put(hs, hnode);
- cfs_cond_resched();
- count++;
+ rc = func(hs, &bd, hnode, data);
+ if (stop_on_change)
+ cfs_hash_put(hs, hnode);
+ cond_resched();
+ count++;
cfs_hash_lock(hs, 0);
cfs_hash_bd_lock(hs, &bd, 0);
}
for (i = 2; cfs_hash_is_rehashing(hs); i++) {
- cfs_hash_unlock(hs, 1);
- /* raise console warning while waiting too long */
- CDEBUG(IS_PO2(i >> 3) ? D_WARNING : D_INFO,
- "hash %s is still rehashing, rescheded %d\n",
- hs->hs_name, i - 1);
- cfs_cond_resched();
- cfs_hash_lock(hs, 1);
- }
+ cfs_hash_unlock(hs, 1);
+ /* raise console warning while waiting too long */
+ CDEBUG(IS_PO2(i >> 3) ? D_WARNING : D_INFO,
+ "hash %s is still rehashing, rescheded %d\n",
+ hs->hs_name, i - 1);
+ cond_resched();
+ cfs_hash_lock(hs, 1);
+ }
}
EXPORT_SYMBOL(cfs_hash_rehash_cancel_locked);
continue;
}
- count = 0;
- cfs_hash_unlock(hs, 1);
- cfs_cond_resched();
- cfs_hash_lock(hs, 1);
- }
+ count = 0;
+ cfs_hash_unlock(hs, 1);
+ cond_resched();
+ cfs_hash_lock(hs, 1);
+ }
hs->hs_rehash_count++;
rc = set_cpus_allowed_ptr(cfs_current(), cpumask);
set_mems_allowed(*nodemask);
if (rc == 0)
- cfs_schedule(); /* switch to allowed CPU */
+ schedule(); /* switch to allowed CPU */
return rc;
}
#include <asm/kgdb.h>
#endif
-#define LINUX_WAITQ(w) ((wait_queue_t *) w)
-#define LINUX_WAITQ_HEAD(w) ((wait_queue_head_t *) w)
-
-void
-cfs_waitq_init(cfs_waitq_t *waitq)
-{
- init_waitqueue_head(LINUX_WAITQ_HEAD(waitq));
-}
-EXPORT_SYMBOL(cfs_waitq_init);
-
-void
-cfs_waitlink_init(cfs_waitlink_t *link)
-{
- init_waitqueue_entry(LINUX_WAITQ(link), current);
-}
-EXPORT_SYMBOL(cfs_waitlink_init);
-
-void
-cfs_waitq_add(cfs_waitq_t *waitq, cfs_waitlink_t *link)
-{
- add_wait_queue(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
-}
-EXPORT_SYMBOL(cfs_waitq_add);
-
-#ifndef HAVE___ADD_WAIT_QUEUE_EXCLUSIVE
-
-static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
- wait_queue_t *wait)
-{
- wait->flags |= WQ_FLAG_EXCLUSIVE;
- __add_wait_queue(q, wait);
-}
-
-#endif /* HAVE___ADD_WAIT_QUEUE_EXCLUSIVE */
-
-void
-cfs_waitq_add_exclusive(cfs_waitq_t *waitq,
- cfs_waitlink_t *link)
-{
- add_wait_queue_exclusive(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
-}
-EXPORT_SYMBOL(cfs_waitq_add_exclusive);
-
-/**
- * wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively
- * waiting threads, which is not always desirable because all threads will
- * be waken up again and again, even user only needs a few of them to be
- * active most time. This is not good for performance because cache can
- * be polluted by different threads.
- *
- * LIFO list can resolve this problem because we always wakeup the most
- * recent active thread by default.
- *
- * NB: please don't call non-exclusive & exclusive wait on the same
- * waitq if cfs_waitq_add_exclusive_head is used.
- */
-void
-cfs_waitq_add_exclusive_head(cfs_waitq_t *waitq, cfs_waitlink_t *link)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&LINUX_WAITQ_HEAD(waitq)->lock, flags);
- __add_wait_queue_exclusive(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
- spin_unlock_irqrestore(&LINUX_WAITQ_HEAD(waitq)->lock, flags);
-}
-EXPORT_SYMBOL(cfs_waitq_add_exclusive_head);
-
-void
-cfs_waitq_del(cfs_waitq_t *waitq, cfs_waitlink_t *link)
-{
- remove_wait_queue(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
-}
-EXPORT_SYMBOL(cfs_waitq_del);
-
-int
-cfs_waitq_active(cfs_waitq_t *waitq)
-{
- return waitqueue_active(LINUX_WAITQ_HEAD(waitq));
-}
-EXPORT_SYMBOL(cfs_waitq_active);
-
-void
-cfs_waitq_signal(cfs_waitq_t *waitq)
-{
- wake_up(LINUX_WAITQ_HEAD(waitq));
-}
-EXPORT_SYMBOL(cfs_waitq_signal);
-
-void
-cfs_waitq_signal_nr(cfs_waitq_t *waitq, int nr)
-{
- wake_up_nr(LINUX_WAITQ_HEAD(waitq), nr);
-}
-EXPORT_SYMBOL(cfs_waitq_signal_nr);
-
-void
-cfs_waitq_broadcast(cfs_waitq_t *waitq)
-{
- wake_up_all(LINUX_WAITQ_HEAD(waitq));
-}
-EXPORT_SYMBOL(cfs_waitq_broadcast);
-
-void
-cfs_waitq_wait(cfs_waitlink_t *link, cfs_task_state_t state)
-{
- schedule();
-}
-EXPORT_SYMBOL(cfs_waitq_wait);
-
-int64_t
-cfs_waitq_timedwait(cfs_waitlink_t *link, cfs_task_state_t state,
- int64_t timeout)
-{
- return schedule_timeout(timeout);
-}
-EXPORT_SYMBOL(cfs_waitq_timedwait);
-
-void
-cfs_schedule_timeout_and_set_state(cfs_task_state_t state, int64_t timeout)
-{
- set_current_state(state);
- schedule_timeout(timeout);
-}
-EXPORT_SYMBOL(cfs_schedule_timeout_and_set_state);
-
-void
-cfs_schedule_timeout(int64_t timeout)
-{
- schedule_timeout(timeout);
-}
-EXPORT_SYMBOL(cfs_schedule_timeout);
-
-void
-cfs_schedule(void)
-{
- schedule();
-}
-EXPORT_SYMBOL(cfs_schedule);
-
-/* deschedule for a bit... */
-void
-cfs_pause(cfs_duration_t ticks)
-{
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(ticks);
-}
-EXPORT_SYMBOL(cfs_pause);
-
-int cfs_need_resched(void)
-{
- return need_resched();
-}
-EXPORT_SYMBOL(cfs_need_resched);
-
-void cfs_cond_resched(void)
-{
- cond_resched();
-}
-EXPORT_SYMBOL(cfs_cond_resched);
-
void cfs_init_timer(cfs_timer_t *t)
{
init_timer(t);
int LL_PROC_PROTO(proc_fail_loc)
{
- int rc;
- long old_fail_loc = cfs_fail_loc;
+ int rc;
+ long old_fail_loc = cfs_fail_loc;
- rc = ll_proc_dolongvec(table, write, filp, buffer, lenp, ppos);
- if (old_fail_loc != cfs_fail_loc)
- cfs_waitq_signal(&cfs_race_waitq);
- return rc;
+ rc = ll_proc_dolongvec(table, write, filp, buffer, lenp, ppos);
+ if (old_fail_loc != cfs_fail_loc)
+ wake_up(&cfs_race_waitq);
+ return rc;
}
static int __proc_cpt_table(void *data, int write,
if (!cfs_capable(CFS_CAP_SYS_ADMIN))
return (-EPERM);
- if (!enable) {
- LWT_EVENT(0,0,0,0);
- lwt_enabled = 0;
- cfs_mb();
- /* give people some time to stop adding traces */
- cfs_schedule_timeout(10);
- }
+ if (!enable) {
+ LWT_EVENT(0,0,0,0);
+ lwt_enabled = 0;
+ cfs_mb();
+ /* give people some time to stop adding traces */
+ schedule_timeout(10);
+ }
for (i = 0; i < num_online_cpus(); i++) {
p = lwt_cpus[i].lwtc_current_page;
mutex_init(&cfs_trace_thread_mutex);
init_rwsem(&ioctl_list_sem);
CFS_INIT_LIST_HEAD(&ioctl_list);
- cfs_waitq_init(&cfs_race_waitq);
+ init_waitqueue_head(&cfs_race_waitq);
rc = libcfs_debug_init(5 * 1024 * 1024);
if (rc < 0) {
cfs_list_add_tail(&tage->linkage, &tcd->tcd_pages);
tcd->tcd_cur_pages++;
- if (tcd->tcd_cur_pages > 8 && thread_running) {
- struct tracefiled_ctl *tctl = &trace_tctl;
- /*
- * wake up tracefiled to process some pages.
- */
- cfs_waitq_signal(&tctl->tctl_waitq);
- }
- return tage;
+ if (tcd->tcd_cur_pages > 8 && thread_running) {
+ struct tracefiled_ctl *tctl = &trace_tctl;
+ /*
+ * wake up tracefiled to process some pages.
+ */
+ wake_up(&tctl->tctl_waitq);
+ }
+ return tage;
}
return NULL;
}
spin_lock_init(&pc.pc_lock);
complete(&tctl->tctl_start);
- while (1) {
- cfs_waitlink_t __wait;
+ while (1) {
+ wait_queue_t __wait;
pc.pc_want_daemon_pages = 0;
collect_pages(&pc);
break;
}
}
- cfs_waitlink_init(&__wait);
- cfs_waitq_add(&tctl->tctl_waitq, &__wait);
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_timedwait(&__wait, CFS_TASK_INTERRUPTIBLE,
- cfs_time_seconds(1));
- cfs_waitq_del(&tctl->tctl_waitq, &__wait);
+ init_waitqueue_entry_current(&__wait);
+ add_wait_queue(&tctl->tctl_waitq, &__wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ waitq_timedwait(&__wait, TASK_INTERRUPTIBLE,
+ cfs_time_seconds(1));
+ remove_wait_queue(&tctl->tctl_waitq, &__wait);
}
complete(&tctl->tctl_stop);
return 0;
init_completion(&tctl->tctl_start);
init_completion(&tctl->tctl_stop);
- cfs_waitq_init(&tctl->tctl_waitq);
+ init_waitqueue_head(&tctl->tctl_waitq);
cfs_atomic_set(&tctl->tctl_shutdown, 0);
if (IS_ERR(kthread_run(tracefiled, tctl, "ktracefiled"))) {
struct tracefiled_ctl {
struct completion tctl_start;
struct completion tctl_stop;
- cfs_waitq_t tctl_waitq;
+ wait_queue_head_t tctl_waitq;
pid_t tctl_pid;
cfs_atomic_t tctl_shutdown;
};
if (!entry)
return NULL;
- UC_CACHE_SET_NEW(entry);
- CFS_INIT_LIST_HEAD(&entry->ue_hash);
- entry->ue_key = key;
- cfs_atomic_set(&entry->ue_refcount, 0);
- cfs_waitq_init(&entry->ue_waitq);
- if (cache->uc_ops->init_entry)
- cache->uc_ops->init_entry(entry, args);
- return entry;
+ UC_CACHE_SET_NEW(entry);
+ CFS_INIT_LIST_HEAD(&entry->ue_hash);
+ entry->ue_key = key;
+ cfs_atomic_set(&entry->ue_refcount, 0);
+ init_waitqueue_head(&entry->ue_waitq);
+ if (cache->uc_ops->init_entry)
+ cache->uc_ops->init_entry(entry, args);
+ return entry;
}
/* protected by cache lock */
entry->ue_acquire_expire))
return 0;
- UC_CACHE_SET_EXPIRED(entry);
- cfs_waitq_broadcast(&entry->ue_waitq);
- } else if (!UC_CACHE_IS_INVALID(entry)) {
- UC_CACHE_SET_EXPIRED(entry);
- }
+ UC_CACHE_SET_EXPIRED(entry);
+ wake_up_all(&entry->ue_waitq);
+ } else if (!UC_CACHE_IS_INVALID(entry)) {
+ UC_CACHE_SET_EXPIRED(entry);
+ }
cfs_list_del_init(&entry->ue_hash);
if (!cfs_atomic_read(&entry->ue_refcount))
struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *cache,
__u64 key, void *args)
{
- struct upcall_cache_entry *entry = NULL, *new = NULL, *next;
- cfs_list_t *head;
- cfs_waitlink_t wait;
- int rc, found;
- ENTRY;
+ struct upcall_cache_entry *entry = NULL, *new = NULL, *next;
+ cfs_list_t *head;
+ wait_queue_t wait;
+ int rc, found;
+ ENTRY;
LASSERT(cache);
entry->ue_acquire_expire =
cfs_time_shift(cache->uc_acquire_expire);
if (rc < 0) {
- UC_CACHE_CLEAR_ACQUIRING(entry);
- UC_CACHE_SET_INVALID(entry);
- cfs_waitq_broadcast(&entry->ue_waitq);
- if (unlikely(rc == -EREMCHG)) {
- put_entry(cache, entry);
- GOTO(out, entry = ERR_PTR(rc));
- }
+ UC_CACHE_CLEAR_ACQUIRING(entry);
+ UC_CACHE_SET_INVALID(entry);
+ wake_up_all(&entry->ue_waitq);
+ if (unlikely(rc == -EREMCHG)) {
+ put_entry(cache, entry);
+ GOTO(out, entry = ERR_PTR(rc));
+ }
}
}
/* someone (and only one) is doing upcall upon this item,
if (UC_CACHE_IS_ACQUIRING(entry)) {
long expiry = (entry == new) ?
cfs_time_seconds(cache->uc_acquire_expire) :
- CFS_MAX_SCHEDULE_TIMEOUT;
- long left;
+ MAX_SCHEDULE_TIMEOUT;
+ long left;
- cfs_waitlink_init(&wait);
- cfs_waitq_add(&entry->ue_waitq, &wait);
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+ init_waitqueue_entry_current(&wait);
+ add_wait_queue(&entry->ue_waitq, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
spin_unlock(&cache->uc_lock);
- left = cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE,
+ left = waitq_timedwait(&wait, TASK_INTERRUPTIBLE,
expiry);
spin_lock(&cache->uc_lock);
- cfs_waitq_del(&entry->ue_waitq, &wait);
- if (UC_CACHE_IS_ACQUIRING(entry)) {
- /* we're interrupted or upcall failed in the middle */
- rc = left > 0 ? -EINTR : -ETIMEDOUT;
- CERROR("acquire for key "LPU64": error %d\n",
- entry->ue_key, rc);
- put_entry(cache, entry);
- GOTO(out, entry = ERR_PTR(rc));
- }
+ remove_wait_queue(&entry->ue_waitq, &wait);
+ if (UC_CACHE_IS_ACQUIRING(entry)) {
+ /* we're interrupted or upcall failed in the middle */
+ rc = left > 0 ? -EINTR : -ETIMEDOUT;
+ CERROR("acquire for key "LPU64": error %d\n",
+ entry->ue_key, rc);
+ put_entry(cache, entry);
+ GOTO(out, entry = ERR_PTR(rc));
+ }
}
/* invalid means error, don't need to try again */
}
UC_CACHE_CLEAR_ACQUIRING(entry);
spin_unlock(&cache->uc_lock);
- cfs_waitq_broadcast(&entry->ue_waitq);
+ wake_up_all(&entry->ue_waitq);
put_entry(cache, entry);
RETURN(rc);
{
LASSERT(c != NULL);
c->done = 0;
- cfs_waitq_init(&c->wait);
+ init_waitqueue_head(&c->wait);
}
void fini_completion(struct completion *c)
{
LASSERT(c != NULL);
c->done = 1;
- cfs_waitq_signal(&c->wait);
+ wake_up(&c->wait);
}
void wait_for_completion(struct completion *c)
* Wait queue. No-op implementation.
*/
-void cfs_waitq_init(struct cfs_waitq *waitq)
+void init_waitqueue_head(struct cfs_waitq *waitq)
{
- LASSERT(waitq != NULL);
- (void)waitq;
+ LASSERT(waitq != NULL);
+ (void)waitq;
}
-void cfs_waitlink_init(struct cfs_waitlink *link)
+void init_waitqueue_entry_current(struct cfs_waitlink *link)
{
- LASSERT(link != NULL);
- (void)link;
+ LASSERT(link != NULL);
+ (void)link;
}
-void cfs_waitq_add(struct cfs_waitq *waitq, struct cfs_waitlink *link)
+void add_wait_queue(struct cfs_waitq *waitq, struct cfs_waitlink *link)
{
- LASSERT(waitq != NULL);
- LASSERT(link != NULL);
- (void)waitq;
- (void)link;
+ LASSERT(waitq != NULL);
+ LASSERT(link != NULL);
+ (void)waitq;
+ (void)link;
}
-void cfs_waitq_add_exclusive(struct cfs_waitq *waitq, struct cfs_waitlink *link)
+void add_wait_queue_exclusive(struct cfs_waitq *waitq, struct cfs_waitlink *link)
{
- LASSERT(waitq != NULL);
- LASSERT(link != NULL);
- (void)waitq;
- (void)link;
+ LASSERT(waitq != NULL);
+ LASSERT(link != NULL);
+ (void)waitq;
+ (void)link;
}
-void cfs_waitq_add_exclusive_head(struct cfs_waitq *waitq, struct cfs_waitlink *link)
+void add_wait_queue_exclusive_head(struct cfs_waitq *waitq, struct cfs_waitlink *link)
{
- cfs_waitq_add_exclusive(waitq, link);
+ add_wait_queue_exclusive(waitq, link);
}
-void cfs_waitq_del(struct cfs_waitq *waitq, struct cfs_waitlink *link)
+void remove_wait_queue(struct cfs_waitq *waitq, struct cfs_waitlink *link)
{
- LASSERT(waitq != NULL);
- LASSERT(link != NULL);
- (void)waitq;
- (void)link;
+ LASSERT(waitq != NULL);
+ LASSERT(link != NULL);
+ (void)waitq;
+ (void)link;
}
-int cfs_waitq_active(struct cfs_waitq *waitq)
+int waitqueue_active(struct cfs_waitq *waitq)
{
- LASSERT(waitq != NULL);
- (void)waitq;
- return 0;
+ LASSERT(waitq != NULL);
+ (void)waitq;
+ return 0;
}
-void cfs_waitq_signal(struct cfs_waitq *waitq)
+void wake_up(struct cfs_waitq *waitq)
{
- LASSERT(waitq != NULL);
- (void)waitq;
+ LASSERT(waitq != NULL);
+ (void)waitq;
}
-void cfs_waitq_signal_nr(struct cfs_waitq *waitq, int nr)
+void wake_up_nr(struct cfs_waitq *waitq, int nr)
{
- LASSERT(waitq != NULL);
- (void)waitq;
+ LASSERT(waitq != NULL);
+ (void)waitq;
}
-void cfs_waitq_broadcast(struct cfs_waitq *waitq)
+void wake_up_all(struct cfs_waitq *waitq)
{
- LASSERT(waitq != NULL);
- (void)waitq;
+ LASSERT(waitq != NULL);
+ (void)waitq;
}
-void cfs_waitq_wait(struct cfs_waitlink *link, cfs_task_state_t state)
+void waitq_wait(struct cfs_waitlink *link, long state)
{
- LASSERT(link != NULL);
- (void)link;
+ LASSERT(link != NULL);
+ (void)link;
- /* well, wait for something to happen */
+ /* well, wait for something to happen */
call_wait_handler(0);
}
-int64_t cfs_waitq_timedwait(struct cfs_waitlink *link, cfs_task_state_t state,
- int64_t timeout)
+int64_t waitq_timedwait(struct cfs_waitlink *link, long state,
+ int64_t timeout)
{
- LASSERT(link != NULL);
- (void)link;
+ LASSERT(link != NULL);
+ (void)link;
call_wait_handler(timeout);
- return 0;
+ return 0;
}
-void cfs_schedule_timeout_and_set_state(cfs_task_state_t state, int64_t timeout)
+void schedule_timeout_and_set_state(long state, int64_t timeout)
{
- cfs_waitlink_t l;
- /* sleep(timeout) here instead? */
- cfs_waitq_timedwait(&l, state, timeout);
+ wait_queue_t l;
+ /* sleep(timeout) here instead? */
+ waitq_timedwait(&l, state, timeout);
}
void
cfs_pause(cfs_duration_t d)
{
- struct timespec s;
+ struct timespec s;
- cfs_duration_nsec(d, &s);
- nanosleep(&s, NULL);
+ cfs_duration_nsec(d, &s);
+ nanosleep(&s, NULL);
}
-int cfs_need_resched(void)
+int need_resched(void)
{
- return 0;
+ return 0;
}
-void cfs_cond_resched(void)
+void cond_resched(void)
{
}
*/
static struct completion lcw_start_completion;
static struct completion lcw_stop_completion;
-static cfs_waitq_t lcw_event_waitq;
+static wait_queue_head_t lcw_event_waitq;
/*
* Set this and wake lcw_event_waitq to stop the dispatcher.
spin_lock_bh(&lcw_pending_timers_lock);
lcw->lcw_refcount++; /* +1 for pending list */
cfs_list_add(&lcw->lcw_list, &lcw_pending_timers);
- cfs_waitq_signal(&lcw_event_waitq);
+ wake_up(&lcw_event_waitq);
spin_unlock_bh(&lcw_pending_timers_lock);
spin_unlock_bh(&lcw->lcw_lock);
init_completion(&lcw_stop_completion);
init_completion(&lcw_start_completion);
- cfs_waitq_init(&lcw_event_waitq);
+ init_waitqueue_head(&lcw_event_waitq);
CDEBUG(D_INFO, "starting dispatch thread\n");
task = kthread_run(lcw_dispatch_main, NULL, "lc_watchdogd");
CDEBUG(D_INFO, "trying to stop watchdog dispatcher.\n");
set_bit(LCW_FLAG_STOP, &lcw_flags);
- cfs_waitq_signal(&lcw_event_waitq);
+ wake_up(&lcw_event_waitq);
wait_for_completion(&lcw_stop_completion);
void
cfs_pause(cfs_duration_t ticks)
{
- cfs_schedule_timeout_and_set_state(CFS_TASK_UNINTERRUPTIBLE, ticks);
+ schedule_timeout_and_set_state(CFS_TASK_UNINTERRUPTIBLE, ticks);
}
void
-cfs_schedule_timeout_and_set_state(cfs_task_state_t state, int64_t time)
+schedule_timeout_and_set_state(long state, int64_t time)
{
cfs_task_t * task = cfs_current();
PTASK_SLOT slot = NULL;
slot = CONTAINING_RECORD(task, TASK_SLOT, task);
cfs_assert(slot->Magic == TASKSLT_MAGIC);
- if (time == CFS_MAX_SCHEDULE_TIMEOUT) {
+ if (time == MAX_SCHEDULE_TIMEOUT) {
time = 0;
}
}
void
-cfs_schedule()
+schedule()
{
- cfs_schedule_timeout_and_set_state(CFS_TASK_UNINTERRUPTIBLE, 0);
+ schedule_timeout_and_set_state(CFS_TASK_UNINTERRUPTIBLE, 0);
}
int
}
void
-sleep_on(cfs_waitq_t *waitq)
+sleep_on(wait_queue_head_t *waitq)
{
- cfs_waitlink_t link;
+ wait_queue_t link;
- cfs_waitlink_init(&link);
- cfs_waitq_add(waitq, &link);
- cfs_waitq_wait(&link, CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_del(waitq, &link);
+ init_waitqueue_entry_current(&link);
+ add_wait_queue(waitq, &link);
+ waitq_wait(&link, TASK_INTERRUPTIBLE);
+ remove_wait_queue(waitq, &link);
}
EXPORT_SYMBOL(current_uid);
return NT_SUCCESS(status);
}
-int cfs_need_resched(void)
+int need_resched(void)
{
return 0;
}
-void cfs_cond_resched(void)
+void cond_resched(void)
{
}
*/
/*
- * cfs_waitq_init
+ * init_waitqueue_head
* To initialize the wait queue
*
* Arguments:
- * waitq: pointer to the cfs_waitq_t structure
+ * waitq: pointer to the wait_queue_head_t structure
*
* Return Value:
* N/A
* N/A
*/
-void cfs_waitq_init(cfs_waitq_t *waitq)
+void init_waitqueue_head(wait_queue_head_t *waitq)
{
waitq->magic = CFS_WAITQ_MAGIC;
waitq->flags = 0;
}
/*
- * cfs_waitlink_init
+ * init_waitqueue_entry_current
* To initialize the wake link node
*
* Arguments:
- * link: pointer to the cfs_waitlink_t structure
+ * link: pointer to the wait_queue_t structure
*
* Return Value:
* N/A
* N/A
*/
-void cfs_waitlink_init(cfs_waitlink_t *link)
+void init_waitqueue_entry_current(wait_queue_t *link)
{
cfs_task_t * task = cfs_current();
PTASK_SLOT slot = NULL;
slot = CONTAINING_RECORD(task, TASK_SLOT, task);
cfs_assert(slot->Magic == TASKSLT_MAGIC);
- memset(link, 0, sizeof(cfs_waitlink_t));
+ memset(link, 0, sizeof(wait_queue_t));
link->magic = CFS_WAITLINK_MAGIC;
link->flags = 0;
* To finilize the wake link node
*
* Arguments:
- * link: pointer to the cfs_waitlink_t structure
+ * link: pointer to the wait_queue_t structure
*
* Return Value:
* N/A
* N/A
*/
-void cfs_waitlink_fini(cfs_waitlink_t *link)
+void cfs_waitlink_fini(wait_queue_t *link)
{
cfs_task_t * task = cfs_current();
PTASK_SLOT slot = NULL;
* To queue the wait link node to the wait queue
*
* Arguments:
- * waitq: pointer to the cfs_waitq_t structure
- * link: pointer to the cfs_waitlink_t structure
+ * waitq: pointer to the wait_queue_head_t structure
+ * link: pointer to the wait_queue_t structure
* int: queue no (Normal or Forward waitq)
*
* Return Value:
* N/A
*/
-void cfs_waitq_add_internal(cfs_waitq_t *waitq,
- cfs_waitlink_t *link,
+void cfs_waitq_add_internal(wait_queue_head_t *waitq,
+ wait_queue_t *link,
__u32 waitqid )
{
LASSERT(waitq != NULL);
spin_unlock(&(waitq->guard));
}
/*
- * cfs_waitq_add
+ * add_wait_queue
* To queue the wait link node to the wait queue
*
* Arguments:
- * waitq: pointer to the cfs_waitq_t structure
- * link: pointer to the cfs_waitlink_t structure
+ * waitq: pointer to the wait_queue_head_t structure
+ * link: pointer to the wait_queue_t structure
*
* Return Value:
* N/A
* N/A
*/
-void cfs_waitq_add(cfs_waitq_t *waitq,
- cfs_waitlink_t *link)
+void add_wait_queue(wait_queue_head_t *waitq,
+ wait_queue_t *link)
{
cfs_waitq_add_internal(waitq, link, CFS_WAITQ_CHAN_NORMAL);
}
/*
- * cfs_waitq_add_exclusive
+ * add_wait_queue_exclusive
* To set the wait link node to exclusive mode
* and queue it to the wait queue
*
* Arguments:
- * waitq: pointer to the cfs_waitq_t structure
+ * waitq: pointer to the wait_queue_head_t structure
* link: pointer to the cfs_wait_link structure
*
* Return Value:
* N/A
*/
-void cfs_waitq_add_exclusive( cfs_waitq_t *waitq,
- cfs_waitlink_t *link)
+void add_wait_queue_exclusive( wait_queue_head_t *waitq,
+ wait_queue_t *link)
{
LASSERT(waitq != NULL);
LASSERT(link != NULL);
LASSERT(link->magic == CFS_WAITLINK_MAGIC);
link->flags |= CFS_WAITQ_EXCLUSIVE;
- cfs_waitq_add(waitq, link);
+ add_wait_queue(waitq, link);
}
/*
- * cfs_waitq_del
+ * remove_wait_queue
* To remove the wait link node from the waitq
*
* Arguments:
* waitq: pointer to the cfs_ waitq_t structure
- * link: pointer to the cfs_waitlink_t structure
+ * link: pointer to the wait_queue_t structure
*
* Return Value:
* N/A
* N/A
*/
-void cfs_waitq_del( cfs_waitq_t *waitq,
- cfs_waitlink_t *link)
+void remove_wait_queue( wait_queue_head_t *waitq,
+ wait_queue_t *link)
{
int i = 0;
}
/*
- * cfs_waitq_active
+ * waitqueue_active
* Is the waitq active (not empty) ?
*
* Arguments:
* We always returns TRUE here, the same to Darwin.
*/
-int cfs_waitq_active(cfs_waitq_t *waitq)
+int waitqueue_active(wait_queue_head_t *waitq)
{
LASSERT(waitq != NULL);
LASSERT(waitq->magic == CFS_WAITQ_MAGIC);
}
/*
- * cfs_waitq_signal_nr
+ * wake_up_nr
* To wake up all the non-exclusive tasks plus nr exclusive
* ones in the waitq
*
* Arguments:
- * waitq: pointer to the cfs_waitq_t structure
+ * waitq: pointer to the wait_queue_head_t structure
* nr: number of exclusive tasks to be woken up
*
* Return Value:
*/
-void cfs_waitq_signal_nr(cfs_waitq_t *waitq, int nr)
+void wake_up_nr(wait_queue_head_t *waitq, int nr)
{
int result;
cfs_waitlink_channel_t * scan;
cfs_waitlink_channel_t,
link) {
- cfs_waitlink_t *waitl = scan->waitl;
+ wait_queue_t *waitl = scan->waitl;
result = cfs_wake_event(waitl->event);
LASSERT( result == FALSE || result == TRUE );
}
/*
- * cfs_waitq_signal
+ * wake_up
* To wake up all the non-exclusive tasks and 1 exclusive
*
* Arguments:
- * waitq: pointer to the cfs_waitq_t structure
+ * waitq: pointer to the wait_queue_head_t structure
*
* Return Value:
* N/A
* N/A
*/
-void cfs_waitq_signal(cfs_waitq_t *waitq)
+void wake_up(wait_queue_head_t *waitq)
{
- cfs_waitq_signal_nr(waitq, 1);
+ wake_up_nr(waitq, 1);
}
/*
- * cfs_waitq_broadcast
+ * wake_up_all
* To wake up all the tasks in the waitq
*
* Arguments:
- * waitq: pointer to the cfs_waitq_t structure
+ * waitq: pointer to the wait_queue_head_t structure
*
* Return Value:
* N/A
* N/A
*/
-void cfs_waitq_broadcast(cfs_waitq_t *waitq)
+void wake_up_all(wait_queue_head_t *waitq)
{
LASSERT(waitq != NULL);
LASSERT(waitq->magic ==CFS_WAITQ_MAGIC);
- cfs_waitq_signal_nr(waitq, 0);
+ wake_up_nr(waitq, 0);
}
/*
- * cfs_waitq_wait
+ * waitq_wait
* To wait on the link node until it is signaled.
*
* Arguments:
- * link: pointer to the cfs_waitlink_t structure
+ * link: pointer to the wait_queue_t structure
*
* Return Value:
* N/A
* N/A
*/
-void cfs_waitq_wait(cfs_waitlink_t *link, cfs_task_state_t state)
+void waitq_wait(wait_queue_t *link, long state)
{
LASSERT(link != NULL);
LASSERT(link->magic == CFS_WAITLINK_MAGIC);
}
/*
- * cfs_waitq_timedwait
+ * waitq_timedwait
* To wait the link node to be signaled with a timeout limit
*
* Arguments:
- * link: pointer to the cfs_waitlink_t structure
+ * link: pointer to the wait_queue_t structure
* timeout: the timeout limitation
*
* Return Value:
* What if it happens to be woken up at the just timeout time !?
*/
-int64_t cfs_waitq_timedwait( cfs_waitlink_t *link,
- cfs_task_state_t state,
+int64_t waitq_timedwait( wait_queue_t *link,
+ long state,
int64_t timeout)
{
/** serialised workitems */
spinlock_t ws_lock;
/** where schedulers sleep */
- cfs_waitq_t ws_waitq;
+ wait_queue_head_t ws_waitq;
#endif
/** concurrent workitems */
cfs_list_t ws_runq;
LASSERT(!cfs_in_interrupt()); /* because we use plain spinlock */
LASSERT(!sched->ws_stopping);
- cfs_wi_sched_lock(sched);
+ cfs_wi_sched_lock(sched);
- if (!wi->wi_scheduled) {
- LASSERT (cfs_list_empty(&wi->wi_list));
+ if (!wi->wi_scheduled) {
+ LASSERT (cfs_list_empty(&wi->wi_list));
- wi->wi_scheduled = 1;
+ wi->wi_scheduled = 1;
sched->ws_nscheduled++;
- if (!wi->wi_running) {
- cfs_list_add_tail(&wi->wi_list, &sched->ws_runq);
+ if (!wi->wi_running) {
+ cfs_list_add_tail(&wi->wi_list, &sched->ws_runq);
#ifdef __KERNEL__
- cfs_waitq_signal(&sched->ws_waitq);
+ wake_up(&sched->ws_waitq);
#endif
- } else {
- cfs_list_add(&wi->wi_list, &sched->ws_rerunq);
- }
- }
+ } else {
+ cfs_list_add(&wi->wi_list, &sched->ws_rerunq);
+ }
+ }
- LASSERT (!cfs_list_empty(&wi->wi_list));
- cfs_wi_sched_unlock(sched);
- return;
+ LASSERT (!cfs_list_empty(&wi->wi_list));
+ cfs_wi_sched_unlock(sched);
+ return;
}
EXPORT_SYMBOL(cfs_wi_schedule);
cfs_list_move_tail(&wi->wi_list, &sched->ws_runq);
}
- if (!cfs_list_empty(&sched->ws_runq)) {
- cfs_wi_sched_unlock(sched);
- /* don't sleep because some workitems still
- * expect me to come back soon */
- cfs_cond_resched();
- cfs_wi_sched_lock(sched);
- continue;
- }
+ if (!cfs_list_empty(&sched->ws_runq)) {
+ cfs_wi_sched_unlock(sched);
+ /* don't sleep because some workitems still
+ * expect me to come back soon */
+ cond_resched();
+ cfs_wi_sched_lock(sched);
+ continue;
+ }
cfs_wi_sched_unlock(sched);
rc = wait_event_interruptible_exclusive(sched->ws_waitq,
spin_unlock(&cfs_wi_data.wi_glock);
#ifdef __KERNEL__
- cfs_waitq_broadcast(&sched->ws_waitq);
+ wake_up_all(&sched->ws_waitq);
spin_lock(&cfs_wi_data.wi_glock);
{
#ifdef __KERNEL__
spin_lock_init(&sched->ws_lock);
- cfs_waitq_init(&sched->ws_waitq);
+ init_waitqueue_head(&sched->ws_waitq);
#endif
CFS_INIT_LIST_HEAD(&sched->ws_runq);
CFS_INIT_LIST_HEAD(&sched->ws_rerunq);
spin_lock(&cfs_wi_data.wi_glock);
while (sched->ws_starting > 0) {
spin_unlock(&cfs_wi_data.wi_glock);
- cfs_schedule();
+ schedule();
spin_lock(&cfs_wi_data.wi_glock);
}
/* nobody should contend on this list */
cfs_list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) {
sched->ws_stopping = 1;
- cfs_waitq_broadcast(&sched->ws_waitq);
+ wake_up_all(&sched->ws_waitq);
}
cfs_list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) {
/* Event Queue container */
struct lnet_res_container ln_eq_container;
#ifdef __KERNEL__
- cfs_waitq_t ln_eq_waitq;
+ wait_queue_head_t ln_eq_waitq;
spinlock_t ln_eq_wait_lock;
#else
# ifndef HAVE_LIBPTHREAD
void
mxlnd_sleep(unsigned long timeout)
{
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_schedule_timeout(timeout);
- return;
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(timeout);
+ return;
}
/**
goto failed_2;
}
- if (dev->ibd_hdev->ibh_ibdev != cmid->device) {
- /* wakeup failover thread and teardown connection */
- if (kiblnd_dev_can_failover(dev)) {
- cfs_list_add_tail(&dev->ibd_fail_list,
- &kiblnd_data.kib_failed_devs);
- cfs_waitq_signal(&kiblnd_data.kib_failover_waitq);
- }
+ if (dev->ibd_hdev->ibh_ibdev != cmid->device) {
+ /* wakeup failover thread and teardown connection */
+ if (kiblnd_dev_can_failover(dev)) {
+ cfs_list_add_tail(&dev->ibd_fail_list,
+ &kiblnd_data.kib_failed_devs);
+ wake_up(&kiblnd_data.kib_failover_waitq);
+ }
write_unlock_irqrestore(glock, flags);
- CERROR("cmid HCA(%s), kib_dev(%s) need failover\n",
- cmid->device->name, dev->ibd_ifname);
- goto failed_2;
- }
+ CERROR("cmid HCA(%s), kib_dev(%s) need failover\n",
+ cmid->device->name, dev->ibd_ifname);
+ goto failed_2;
+ }
kiblnd_hdev_addref_locked(dev->ibd_hdev);
conn->ibc_hdev = dev->ibd_hdev;
if (i++ % 50 == 0)
CDEBUG(D_NET, "%s: Wait for failover\n",
dev->ibd_ifname);
- cfs_schedule_timeout(cfs_time_seconds(1) / 100);
+ schedule_timeout(cfs_time_seconds(1) / 100);
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
}
spin_unlock(&fps->fps_lock);
CDEBUG(D_NET, "Another thread is allocating new "
"FMR pool, waiting for her to complete\n");
- cfs_schedule();
+ schedule();
goto again;
}
CDEBUG(D_NET, "Another thread is allocating new "
"%s pool, waiting for her to complete\n",
ps->ps_name);
- cfs_schedule();
+ schedule();
goto again;
}
LASSERT (cfs_list_empty(&kiblnd_data.kib_connd_zombies));
LASSERT (cfs_list_empty(&kiblnd_data.kib_connd_conns));
- /* flag threads to terminate; wake and wait for them to die */
- kiblnd_data.kib_shutdown = 1;
+ /* flag threads to terminate; wake and wait for them to die */
+ kiblnd_data.kib_shutdown = 1;
/* NB: we really want to stop scheduler threads net by net
* instead of the whole module, this should be improved
* with dynamic configuration LNet */
cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds)
- cfs_waitq_broadcast(&sched->ibs_waitq);
+ wake_up_all(&sched->ibs_waitq);
- cfs_waitq_broadcast(&kiblnd_data.kib_connd_waitq);
- cfs_waitq_broadcast(&kiblnd_data.kib_failover_waitq);
+ wake_up_all(&kiblnd_data.kib_connd_waitq);
+ wake_up_all(&kiblnd_data.kib_failover_waitq);
- i = 2;
- while (cfs_atomic_read(&kiblnd_data.kib_nthreads) != 0) {
+ i = 2;
+ while (cfs_atomic_read(&kiblnd_data.kib_nthreads) != 0) {
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
"Waiting for %d threads to terminate\n",
CFS_INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]);
spin_lock_init(&kiblnd_data.kib_connd_lock);
- CFS_INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
- CFS_INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies);
- cfs_waitq_init(&kiblnd_data.kib_connd_waitq);
- cfs_waitq_init(&kiblnd_data.kib_failover_waitq);
+ CFS_INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
+ CFS_INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies);
+ init_waitqueue_head(&kiblnd_data.kib_connd_waitq);
+ init_waitqueue_head(&kiblnd_data.kib_failover_waitq);
kiblnd_data.kib_scheds = cfs_percpt_alloc(lnet_cpt_table(),
sizeof(*sched));
spin_lock_init(&sched->ibs_lock);
CFS_INIT_LIST_HEAD(&sched->ibs_conns);
- cfs_waitq_init(&sched->ibs_waitq);
+ init_waitqueue_head(&sched->ibs_waitq);
nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
if (*kiblnd_tunables.kib_nscheds > 0) {
/* serialise */
spinlock_t ibs_lock;
/* schedulers sleep here */
- cfs_waitq_t ibs_waitq;
+ wait_queue_head_t ibs_waitq;
/* conns to check for rx completions */
cfs_list_t ibs_conns;
/* number of scheduler threads */
/* list head of failed devices */
cfs_list_t kib_failed_devs;
/* schedulers sleep here */
- cfs_waitq_t kib_failover_waitq;
+ wait_queue_head_t kib_failover_waitq;
cfs_atomic_t kib_nthreads; /* # live threads */
/* stabilize net/dev/peer/conn ops */
rwlock_t kib_global_lock;
/* connections with zero refcount */
cfs_list_t kib_connd_zombies;
/* connection daemon sleeps here */
- cfs_waitq_t kib_connd_waitq;
+ wait_queue_head_t kib_connd_waitq;
spinlock_t kib_connd_lock; /* serialise */
struct ib_qp_attr kib_error_qpa; /* QP->ERROR */
/* percpt data for schedulers */
spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); \
cfs_list_add_tail(&(conn)->ibc_list, \
&kiblnd_data.kib_connd_zombies); \
- cfs_waitq_signal(&kiblnd_data.kib_connd_waitq); \
+ wake_up(&kiblnd_data.kib_connd_waitq); \
spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);\
} \
} while (0)
kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING);
- if (error != 0 &&
- kiblnd_dev_can_failover(dev)) {
- cfs_list_add_tail(&dev->ibd_fail_list,
- &kiblnd_data.kib_failed_devs);
- cfs_waitq_signal(&kiblnd_data.kib_failover_waitq);
- }
+ if (error != 0 &&
+ kiblnd_dev_can_failover(dev)) {
+ cfs_list_add_tail(&dev->ibd_fail_list,
+ &kiblnd_data.kib_failed_devs);
+ wake_up(&kiblnd_data.kib_failover_waitq);
+ }
spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
cfs_list_add_tail(&conn->ibc_list, &kiblnd_data.kib_connd_conns);
- cfs_waitq_signal(&kiblnd_data.kib_connd_waitq);
+ wake_up(&kiblnd_data.kib_connd_waitq);
spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
}
int
kiblnd_connd (void *arg)
{
- cfs_waitlink_t wait;
- unsigned long flags;
- kib_conn_t *conn;
- int timeout;
- int i;
- int dropped_lock;
- int peer_index = 0;
- unsigned long deadline = jiffies;
+ wait_queue_t wait;
+ unsigned long flags;
+ kib_conn_t *conn;
+ int timeout;
+ int i;
+ int dropped_lock;
+ int peer_index = 0;
+ unsigned long deadline = jiffies;
- cfs_block_allsigs ();
+ cfs_block_allsigs ();
- cfs_waitlink_init (&wait);
- kiblnd_data.kib_connd = current;
+ init_waitqueue_entry_current (&wait);
+ kiblnd_data.kib_connd = current;
spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
continue;
/* Nothing to do for 'timeout' */
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add(&kiblnd_data.kib_connd_waitq, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
- cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE, timeout);
+ waitq_timedwait(&wait, TASK_INTERRUPTIBLE, timeout);
- cfs_set_current_state(CFS_TASK_RUNNING);
- cfs_waitq_del(&kiblnd_data.kib_connd_waitq, &wait);
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
}
conn->ibc_scheduled = 1;
cfs_list_add_tail(&conn->ibc_sched_list, &sched->ibs_conns);
- if (cfs_waitq_active(&sched->ibs_waitq))
- cfs_waitq_signal(&sched->ibs_waitq);
+ if (waitqueue_active(&sched->ibs_waitq))
+ wake_up(&sched->ibs_waitq);
}
spin_unlock_irqrestore(&sched->ibs_lock, flags);
long id = (long)arg;
struct kib_sched_info *sched;
kib_conn_t *conn;
- cfs_waitlink_t wait;
+ wait_queue_t wait;
unsigned long flags;
struct ib_wc wc;
int did_something;
cfs_block_allsigs();
- cfs_waitlink_init(&wait);
+ init_waitqueue_entry_current(&wait);
sched = kiblnd_data.kib_scheds[KIB_THREAD_CPT(id)];
if (busy_loops++ >= IBLND_RESCHED) {
spin_unlock_irqrestore(&sched->ibs_lock, flags);
- cfs_cond_resched();
+ cond_resched();
busy_loops = 0;
spin_lock_irqsave(&sched->ibs_lock, flags);
kiblnd_conn_addref(conn);
cfs_list_add_tail(&conn->ibc_sched_list,
&sched->ibs_conns);
- if (cfs_waitq_active(&sched->ibs_waitq))
- cfs_waitq_signal(&sched->ibs_waitq);
+ if (waitqueue_active(&sched->ibs_waitq))
+ wake_up(&sched->ibs_waitq);
} else {
conn->ibc_scheduled = 0;
}
if (did_something)
continue;
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add_exclusive(&sched->ibs_waitq, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue_exclusive(&sched->ibs_waitq, &wait);
spin_unlock_irqrestore(&sched->ibs_lock, flags);
- cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE);
+ waitq_wait(&wait, TASK_INTERRUPTIBLE);
busy_loops = 0;
- cfs_waitq_del(&sched->ibs_waitq, &wait);
- cfs_set_current_state(CFS_TASK_RUNNING);
+ remove_wait_queue(&sched->ibs_waitq, &wait);
+ set_current_state(TASK_RUNNING);
spin_lock_irqsave(&sched->ibs_lock, flags);
}
kiblnd_failover_thread(void *arg)
{
rwlock_t *glock = &kiblnd_data.kib_global_lock;
- kib_dev_t *dev;
- cfs_waitlink_t wait;
- unsigned long flags;
- int rc;
+ kib_dev_t *dev;
+ wait_queue_t wait;
+ unsigned long flags;
+ int rc;
- LASSERT (*kiblnd_tunables.kib_dev_failover != 0);
+ LASSERT (*kiblnd_tunables.kib_dev_failover != 0);
- cfs_block_allsigs ();
+ cfs_block_allsigs ();
- cfs_waitlink_init(&wait);
+ init_waitqueue_entry_current(&wait);
write_lock_irqsave(glock, flags);
while (!kiblnd_data.kib_shutdown) {
/* long sleep if no more pending failover */
long_sleep = cfs_list_empty(&kiblnd_data.kib_failed_devs);
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add(&kiblnd_data.kib_failover_waitq, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
write_unlock_irqrestore(glock, flags);
rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) :
cfs_time_seconds(1));
- cfs_set_current_state(CFS_TASK_RUNNING);
- cfs_waitq_del(&kiblnd_data.kib_failover_waitq, &wait);
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
write_lock_irqsave(glock, flags);
if (!long_sleep || rc != 0)
kptllnd_data.kptl_shutdown = 2;
cfs_mb();
- i = 2;
- while (cfs_atomic_read (&kptllnd_data.kptl_nthreads) != 0) {
- /* Wake up all threads*/
- cfs_waitq_broadcast(&kptllnd_data.kptl_sched_waitq);
- cfs_waitq_broadcast(&kptllnd_data.kptl_watchdog_waitq);
-
- i++;
- CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
- "Waiting for %d threads to terminate\n",
- cfs_atomic_read(&kptllnd_data.kptl_nthreads));
- cfs_pause(cfs_time_seconds(1));
- }
+ i = 2;
+ while (cfs_atomic_read (&kptllnd_data.kptl_nthreads) != 0) {
+ /* Wake up all threads*/
+ wake_up_all(&kptllnd_data.kptl_sched_waitq);
+ wake_up_all(&kptllnd_data.kptl_watchdog_waitq);
+
+ i++;
+ CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
+ "Waiting for %d threads to terminate\n",
+ cfs_atomic_read(&kptllnd_data.kptl_nthreads));
+ cfs_pause(cfs_time_seconds(1));
+ }
CDEBUG(D_NET, "All Threads stopped\n");
LASSERT(cfs_list_empty(&kptllnd_data.kptl_sched_txq));
rwlock_init(&kptllnd_data.kptl_net_rw_lock);
CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_nets);
- /* Setup the sched locks/lists/waitq */
+ /* Setup the sched locks/lists/waitq */
spin_lock_init(&kptllnd_data.kptl_sched_lock);
- cfs_waitq_init(&kptllnd_data.kptl_sched_waitq);
- CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_txq);
- CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_rxq);
- CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_rxbq);
+ init_waitqueue_head(&kptllnd_data.kptl_sched_waitq);
+ CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_txq);
+ CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_rxq);
+ CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_rxbq);
/* Init kptl_ptlid2str_lock before any call to kptllnd_ptlid2str */
spin_lock_init(&kptllnd_data.kptl_ptlid2str_lock);
kptllnd_data.kptl_nak_msg->ptlm_srcstamp = kptllnd_data.kptl_incarnation;
rwlock_init(&kptllnd_data.kptl_peer_rw_lock);
- cfs_waitq_init(&kptllnd_data.kptl_watchdog_waitq);
- CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_closing_peers);
- CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_zombie_peers);
+ init_waitqueue_head(&kptllnd_data.kptl_watchdog_waitq);
+ CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_closing_peers);
+ CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_zombie_peers);
/* Allocate and setup the peer hash table */
kptllnd_data.kptl_peer_hash_size =
cfs_list_t kptl_nets; /* kptl_net instance*/
spinlock_t kptl_sched_lock; /* serialise... */
- cfs_waitq_t kptl_sched_waitq; /* schedulers sleep here */
- cfs_list_t kptl_sched_txq; /* tx requiring attention */
- cfs_list_t kptl_sched_rxq; /* rx requiring attention */
- cfs_list_t kptl_sched_rxbq; /* rxb requiring reposting */
+ wait_queue_head_t kptl_sched_waitq; /* schedulers sleep here */
+ cfs_list_t kptl_sched_txq; /* tx requiring attention */
+ cfs_list_t kptl_sched_rxq; /* rx requiring attention */
+ cfs_list_t kptl_sched_rxbq; /* rxb requiring reposting */
- cfs_waitq_t kptl_watchdog_waitq; /* watchdog sleeps here */
+ wait_queue_head_t kptl_watchdog_waitq; /* watchdog sleeps here */
- kptl_rx_buffer_pool_t kptl_rx_buffer_pool; /* rx buffer pool */
+ kptl_rx_buffer_pool_t kptl_rx_buffer_pool; /* rx buffer pool */
struct kmem_cache *kptl_rx_cache; /* rx descripter cache */
cfs_atomic_t kptl_ntx; /* # tx descs allocated */
cfs_list_add_tail(&rxb->rxb_repost_list,
&kptllnd_data.kptl_sched_rxbq);
- cfs_waitq_signal(&kptllnd_data.kptl_sched_waitq);
+ wake_up(&kptllnd_data.kptl_sched_waitq);
spin_unlock(&kptllnd_data.kptl_sched_lock);
}
int
kptllnd_watchdog(void *arg)
{
- int id = (long)arg;
- cfs_waitlink_t waitlink;
- int stamp = 0;
- int peer_index = 0;
- unsigned long deadline = jiffies;
- int timeout;
- int i;
+ int id = (long)arg;
+ wait_queue_t waitlink;
+ int stamp = 0;
+ int peer_index = 0;
+ unsigned long deadline = jiffies;
+ int timeout;
+ int i;
- cfs_block_allsigs();
+ cfs_block_allsigs();
- cfs_waitlink_init(&waitlink);
+ init_waitqueue_entry_current(&waitlink);
/* threads shut down in phase 2 after all peers have been destroyed */
while (kptllnd_data.kptl_shutdown < 2) {
kptllnd_handle_closing_peers();
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add_exclusive(&kptllnd_data.kptl_watchdog_waitq,
- &waitlink);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue_exclusive(&kptllnd_data.kptl_watchdog_waitq,
+ &waitlink);
- cfs_waitq_timedwait(&waitlink, CFS_TASK_INTERRUPTIBLE, timeout);
+ waitq_timedwait(&waitlink, TASK_INTERRUPTIBLE, timeout);
- cfs_set_current_state (CFS_TASK_RUNNING);
- cfs_waitq_del(&kptllnd_data.kptl_watchdog_waitq, &waitlink);
- }
+ set_current_state (TASK_RUNNING);
+ remove_wait_queue(&kptllnd_data.kptl_watchdog_waitq, &waitlink);
+ }
- kptllnd_thread_fini();
- CDEBUG(D_NET, "<<<\n");
- return (0);
+ kptllnd_thread_fini();
+ CDEBUG(D_NET, "<<<\n");
+ return (0);
};
int
kptllnd_scheduler (void *arg)
{
- int id = (long)arg;
- cfs_waitlink_t waitlink;
- unsigned long flags;
- int did_something;
- int counter = 0;
- kptl_rx_t *rx;
- kptl_rx_buffer_t *rxb;
- kptl_tx_t *tx;
+ int id = (long)arg;
+ wait_queue_t waitlink;
+ unsigned long flags;
+ int did_something;
+ int counter = 0;
+ kptl_rx_t *rx;
+ kptl_rx_buffer_t *rxb;
+ kptl_tx_t *tx;
- cfs_block_allsigs();
+ cfs_block_allsigs();
- cfs_waitlink_init(&waitlink);
+ init_waitqueue_entry_current(&waitlink);
spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
continue;
}
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add_exclusive(&kptllnd_data.kptl_sched_waitq,
- &waitlink);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue_exclusive(&kptllnd_data.kptl_sched_waitq,
+ &waitlink);
spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock,
- flags);
+ flags);
- if (!did_something)
- cfs_waitq_wait(&waitlink, CFS_TASK_INTERRUPTIBLE);
- else
- cfs_cond_resched();
+ if (!did_something)
+ waitq_wait(&waitlink, TASK_INTERRUPTIBLE);
+ else
+ cond_resched();
- cfs_set_current_state(CFS_TASK_RUNNING);
- cfs_waitq_del(&kptllnd_data.kptl_sched_waitq, &waitlink);
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&kptllnd_data.kptl_sched_waitq, &waitlink);
spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
- counter = 0;
- }
+ counter = 0;
+ }
spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock, flags);
void
kptllnd_peer_close_locked(kptl_peer_t *peer, int why)
{
- switch (peer->peer_state) {
- default:
- LBUG();
-
- case PEER_STATE_WAITING_HELLO:
- case PEER_STATE_ACTIVE:
- /* Ensure new peers see a new incarnation of me */
- LASSERT(peer->peer_myincarnation <= kptllnd_data.kptl_incarnation);
- if (peer->peer_myincarnation == kptllnd_data.kptl_incarnation)
- kptllnd_data.kptl_incarnation++;
-
- /* Removing from peer table */
- kptllnd_data.kptl_n_active_peers--;
- LASSERT (kptllnd_data.kptl_n_active_peers >= 0);
-
- cfs_list_del(&peer->peer_list);
- kptllnd_peer_unreserve_buffers();
-
- peer->peer_error = why; /* stash 'why' only on first close */
- peer->peer_state = PEER_STATE_CLOSING;
-
- /* Schedule for immediate attention, taking peer table's ref */
- cfs_list_add_tail(&peer->peer_list,
- &kptllnd_data.kptl_closing_peers);
- cfs_waitq_signal(&kptllnd_data.kptl_watchdog_waitq);
- break;
-
- case PEER_STATE_ZOMBIE:
- case PEER_STATE_CLOSING:
- break;
- }
+ switch (peer->peer_state) {
+ default:
+ LBUG();
+
+ case PEER_STATE_WAITING_HELLO:
+ case PEER_STATE_ACTIVE:
+ /* Ensure new peers see a new incarnation of me */
+ LASSERT(peer->peer_myincarnation <= kptllnd_data.kptl_incarnation);
+ if (peer->peer_myincarnation == kptllnd_data.kptl_incarnation)
+ kptllnd_data.kptl_incarnation++;
+
+ /* Removing from peer table */
+ kptllnd_data.kptl_n_active_peers--;
+ LASSERT (kptllnd_data.kptl_n_active_peers >= 0);
+
+ cfs_list_del(&peer->peer_list);
+ kptllnd_peer_unreserve_buffers();
+
+ peer->peer_error = why; /* stash 'why' only on first close */
+ peer->peer_state = PEER_STATE_CLOSING;
+
+ /* Schedule for immediate attention, taking peer table's ref */
+ cfs_list_add_tail(&peer->peer_list,
+ &kptllnd_data.kptl_closing_peers);
+ wake_up(&kptllnd_data.kptl_watchdog_waitq);
+ break;
+
+ case PEER_STATE_ZOMBIE:
+ case PEER_STATE_CLOSING:
+ break;
+ }
}
void
rx->rx_treceived = jiffies;
/* Queue for attention */
spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
- flags);
+ flags);
- cfs_list_add_tail(&rx->rx_list,
- &kptllnd_data.kptl_sched_rxq);
- cfs_waitq_signal(&kptllnd_data.kptl_sched_waitq);
+ cfs_list_add_tail(&rx->rx_list,
+ &kptllnd_data.kptl_sched_rxq);
+ wake_up(&kptllnd_data.kptl_sched_waitq);
spin_unlock_irqrestore(&kptllnd_data. \
- kptl_sched_lock, flags);
+ kptl_sched_lock, flags);
}
}
spin_unlock_irqrestore(&peer->peer_lock, flags);
- /* drop peer's ref, but if it was the last one... */
- if (cfs_atomic_dec_and_test(&tx->tx_refcount)) {
- /* ...finalize it in thread context! */
+ /* drop peer's ref, but if it was the last one... */
+ if (cfs_atomic_dec_and_test(&tx->tx_refcount)) {
+ /* ...finalize it in thread context! */
spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
- cfs_list_add_tail(&tx->tx_list, &kptllnd_data.kptl_sched_txq);
- cfs_waitq_signal(&kptllnd_data.kptl_sched_waitq);
+ cfs_list_add_tail(&tx->tx_list, &kptllnd_data.kptl_sched_txq);
+ wake_up(&kptllnd_data.kptl_sched_waitq);
spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock,
- flags);
- }
+ flags);
+ }
}
/**********************************************************************/
/* flag threads to terminate, wake them and wait for them to die */
kqswnal_data.kqn_shuttingdown = 2;
- cfs_waitq_broadcast (&kqswnal_data.kqn_sched_waitq);
+ wake_up_all (&kqswnal_data.kqn_sched_waitq);
while (cfs_atomic_read (&kqswnal_data.kqn_nthreads) != 0) {
CDEBUG(D_NET, "waiting for %d threads to terminate\n",
CFS_INIT_LIST_HEAD (&kqswnal_data.kqn_readyrxds);
spin_lock_init(&kqswnal_data.kqn_sched_lock);
- cfs_waitq_init (&kqswnal_data.kqn_sched_waitq);
+ init_waitqueue_head (&kqswnal_data.kqn_sched_waitq);
/* pointers/lists/locks initialised */
kqswnal_data.kqn_init = KQN_INIT_DATA;
typedef struct
{
- char kqn_init; /* what's been initialised */
- char kqn_shuttingdown;/* I'm trying to shut down */
- cfs_atomic_t kqn_nthreads; /* # threads running */
- lnet_ni_t *kqn_ni; /* _the_ instance of me */
+ char kqn_init; /* what's been initialised */
+ char kqn_shuttingdown;/* I'm trying to shut down */
+ cfs_atomic_t kqn_nthreads; /* # threads running */
+ lnet_ni_t *kqn_ni; /* _the_ instance of me */
- kqswnal_rx_t *kqn_rxds; /* stack of all the receive descriptors */
- kqswnal_tx_t *kqn_txds; /* stack of all the transmit descriptors */
+ kqswnal_rx_t *kqn_rxds; /* stack of all the receive descriptors */
+ kqswnal_tx_t *kqn_txds; /* stack of all the transmit descriptors */
- cfs_list_t kqn_idletxds; /* transmit descriptors free to use */
- cfs_list_t kqn_activetxds; /* transmit descriptors being used */
+ cfs_list_t kqn_idletxds; /* transmit descriptors free to use */
+ cfs_list_t kqn_activetxds; /* transmit descriptors being used */
spinlock_t kqn_idletxd_lock; /* serialise idle txd access */
cfs_atomic_t kqn_pending_txs; /* # transmits being prepped */
spinlock_t kqn_sched_lock; /* serialise packet schedulers */
- cfs_waitq_t kqn_sched_waitq;/* scheduler blocks here */
-
- cfs_list_t kqn_readyrxds; /* rxds full of data */
- cfs_list_t kqn_donetxds; /* completed transmits */
- cfs_list_t kqn_delayedtxds;/* delayed transmits */
-
- EP_SYS *kqn_ep; /* elan system */
- EP_NMH *kqn_ep_tx_nmh; /* elan reserved tx vaddrs */
- EP_NMH *kqn_ep_rx_nmh; /* elan reserved rx vaddrs */
- EP_XMTR *kqn_eptx; /* elan transmitter */
- EP_RCVR *kqn_eprx_small; /* elan receiver (small messages) */
- EP_RCVR *kqn_eprx_large; /* elan receiver (large messages) */
-
- int kqn_nnodes; /* this cluster's size */
- int kqn_elanid; /* this nodes's elan ID */
-
- EP_STATUSBLK kqn_rpc_success;/* preset RPC reply status blocks */
- EP_STATUSBLK kqn_rpc_failed;
- EP_STATUSBLK kqn_rpc_version;/* reply to future version query */
- EP_STATUSBLK kqn_rpc_magic; /* reply to future version query */
-} kqswnal_data_t;
+ wait_queue_head_t kqn_sched_waitq;/* scheduler blocks here */
+
+ cfs_list_t kqn_readyrxds; /* rxds full of data */
+ cfs_list_t kqn_donetxds; /* completed transmits */
+ cfs_list_t kqn_delayedtxds;/* delayed transmits */
+
+ EP_SYS *kqn_ep; /* elan system */
+ EP_NMH *kqn_ep_tx_nmh; /* elan reserved tx vaddrs */
+ EP_NMH *kqn_ep_rx_nmh; /* elan reserved rx vaddrs */
+ EP_XMTR *kqn_eptx; /* elan transmitter */
+ EP_RCVR *kqn_eprx_small; /* elan receiver (small messages) */
+ EP_RCVR *kqn_eprx_large; /* elan receiver (large messages) */
+
+ int kqn_nnodes; /* this cluster's size */
+ int kqn_elanid; /* this nodes's elan ID */
+
+ EP_STATUSBLK kqn_rpc_success;/* preset RPC reply status blocks */
+ EP_STATUSBLK kqn_rpc_failed;
+ EP_STATUSBLK kqn_rpc_version;/* reply to future version query */
+ EP_STATUSBLK kqn_rpc_magic; /* reply to future version query */
+} kqswnal_data_t;
/* kqn_init state */
#define KQN_INIT_NOTHING 0 /* MUST BE ZERO so zeroed state is initialised OK */
cfs_list_add_tail(&ktx->ktx_schedlist,
&kqswnal_data.kqn_donetxds);
- cfs_waitq_signal(&kqswnal_data.kqn_sched_waitq);
+ wake_up(&kqswnal_data.kqn_sched_waitq);
spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, flags);
}
cfs_list_add_tail(&ktx->ktx_schedlist,
&kqswnal_data.kqn_delayedtxds);
- cfs_waitq_signal(&kqswnal_data.kqn_sched_waitq);
+ wake_up(&kqswnal_data.kqn_sched_waitq);
spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
flags);
spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
cfs_list_add_tail(&krx->krx_list, &kqswnal_data.kqn_readyrxds);
- cfs_waitq_signal(&kqswnal_data.kqn_sched_waitq);
+ wake_up(&kqswnal_data.kqn_sched_waitq);
spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, flags);
}
kqn_donetxds) ||
!cfs_list_empty(&kqswnal_data. \
kqn_delayedtxds));
- LASSERT (rc == 0);
- } else if (need_resched())
- cfs_schedule ();
+ LASSERT (rc == 0);
+ } else if (need_resched())
+ schedule ();
spin_lock_irqsave(&kqswnal_data.kqn_sched_lock,
- flags);
- }
- }
+ flags);
+ }
+ }
- kqswnal_thread_fini ();
- return (0);
+ kqswnal_thread_fini ();
+ return 0;
}
kranal_set_conn_params(kra_conn_t *conn, kra_connreq_t *connreq,
__u32 peer_ip, int peer_port)
{
- kra_device_t *dev = conn->rac_device;
- unsigned long flags;
- RAP_RETURN rrc;
-
- /* CAVEAT EMPTOR: we're really overloading rac_last_tx + rac_keepalive
- * to do RapkCompleteSync() timekeeping (see kibnal_scheduler). */
- conn->rac_last_tx = jiffies;
- conn->rac_keepalive = 0;
-
- rrc = RapkSetRiParams(conn->rac_rihandle, &connreq->racr_riparams);
- if (rrc != RAP_SUCCESS) {
- CERROR("Error setting riparams from %u.%u.%u.%u/%d: %d\n",
- HIPQUAD(peer_ip), peer_port, rrc);
- return -ECONNABORTED;
- }
-
- /* Schedule conn on rad_new_conns */
- kranal_conn_addref(conn);
+ kra_device_t *dev = conn->rac_device;
+ unsigned long flags;
+ RAP_RETURN rrc;
+
+ /* CAVEAT EMPTOR: we're really overloading rac_last_tx + rac_keepalive
+ * to do RapkCompleteSync() timekeeping (see kibnal_scheduler). */
+ conn->rac_last_tx = jiffies;
+ conn->rac_keepalive = 0;
+
+ rrc = RapkSetRiParams(conn->rac_rihandle, &connreq->racr_riparams);
+ if (rrc != RAP_SUCCESS) {
+ CERROR("Error setting riparams from %u.%u.%u.%u/%d: %d\n",
+ HIPQUAD(peer_ip), peer_port, rrc);
+ return -ECONNABORTED;
+ }
+
+ /* Schedule conn on rad_new_conns */
+ kranal_conn_addref(conn);
spin_lock_irqsave(&dev->rad_lock, flags);
- cfs_list_add_tail(&conn->rac_schedlist, &dev->rad_new_conns);
- cfs_waitq_signal(&dev->rad_waitq);
+ cfs_list_add_tail(&conn->rac_schedlist, &dev->rad_new_conns);
+ wake_up(&dev->rad_waitq);
spin_unlock_irqrestore(&dev->rad_lock, flags);
- rrc = RapkWaitToConnect(conn->rac_rihandle);
- if (rrc != RAP_SUCCESS) {
- CERROR("Error waiting to connect to %u.%u.%u.%u/%d: %d\n",
- HIPQUAD(peer_ip), peer_port, rrc);
- return -ECONNABORTED;
- }
-
- /* Scheduler doesn't touch conn apart from to deschedule and decref it
- * after RapkCompleteSync() return success, so conn is all mine */
-
- conn->rac_peerstamp = connreq->racr_peerstamp;
- conn->rac_peer_connstamp = connreq->racr_connstamp;
- conn->rac_keepalive = RANAL_TIMEOUT2KEEPALIVE(connreq->racr_timeout);
- kranal_update_reaper_timeout(conn->rac_keepalive);
- return 0;
+ rrc = RapkWaitToConnect(conn->rac_rihandle);
+ if (rrc != RAP_SUCCESS) {
+ CERROR("Error waiting to connect to %u.%u.%u.%u/%d: %d\n",
+ HIPQUAD(peer_ip), peer_port, rrc);
+ return -ECONNABORTED;
+ }
+
+ /* Scheduler doesn't touch conn apart from to deschedule and decref it
+ * after RapkCompleteSync() return success, so conn is all mine */
+
+ conn->rac_peerstamp = connreq->racr_peerstamp;
+ conn->rac_peer_connstamp = connreq->racr_connstamp;
+ conn->rac_keepalive = RANAL_TIMEOUT2KEEPALIVE(connreq->racr_timeout);
+ kranal_update_reaper_timeout(conn->rac_keepalive);
+ return 0;
}
int
int
kranal_accept (lnet_ni_t *ni, struct socket *sock)
{
- kra_acceptsock_t *ras;
- int rc;
- __u32 peer_ip;
- int peer_port;
- unsigned long flags;
+ kra_acceptsock_t *ras;
+ int rc;
+ __u32 peer_ip;
+ int peer_port;
+ unsigned long flags;
- rc = libcfs_sock_getaddr(sock, 1, &peer_ip, &peer_port);
- LASSERT (rc == 0); /* we succeeded before */
+ rc = libcfs_sock_getaddr(sock, 1, &peer_ip, &peer_port);
+ LASSERT (rc == 0); /* we succeeded before */
- LIBCFS_ALLOC(ras, sizeof(*ras));
- if (ras == NULL) {
- CERROR("ENOMEM allocating connection request from "
- "%u.%u.%u.%u\n", HIPQUAD(peer_ip));
- return -ENOMEM;
- }
+ LIBCFS_ALLOC(ras, sizeof(*ras));
+ if (ras == NULL) {
+ CERROR("ENOMEM allocating connection request from "
+ "%u.%u.%u.%u\n", HIPQUAD(peer_ip));
+ return -ENOMEM;
+ }
- ras->ras_sock = sock;
+ ras->ras_sock = sock;
spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
- cfs_list_add_tail(&ras->ras_list, &kranal_data.kra_connd_acceptq);
- cfs_waitq_signal(&kranal_data.kra_connd_waitq);
+ cfs_list_add_tail(&ras->ras_list, &kranal_data.kra_connd_acceptq);
+ wake_up(&kranal_data.kra_connd_waitq);
spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
- return 0;
+ return 0;
}
int
/* Flag threads to terminate */
kranal_data.kra_shutdown = 1;
- for (i = 0; i < kranal_data.kra_ndevs; i++) {
- kra_device_t *dev = &kranal_data.kra_devices[i];
+ for (i = 0; i < kranal_data.kra_ndevs; i++) {
+ kra_device_t *dev = &kranal_data.kra_devices[i];
spin_lock_irqsave(&dev->rad_lock, flags);
- cfs_waitq_signal(&dev->rad_waitq);
+ wake_up(&dev->rad_waitq);
spin_unlock_irqrestore(&dev->rad_lock, flags);
- }
+ }
spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
- cfs_waitq_broadcast(&kranal_data.kra_reaper_waitq);
+ wake_up_all(&kranal_data.kra_reaper_waitq);
spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
- LASSERT (cfs_list_empty(&kranal_data.kra_connd_peers));
+ LASSERT (cfs_list_empty(&kranal_data.kra_connd_peers));
spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
- cfs_waitq_broadcast(&kranal_data.kra_connd_waitq);
+ wake_up_all(&kranal_data.kra_connd_waitq);
spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
/* Wait for threads to exit */
rwlock_init(&kranal_data.kra_global_lock);
- for (i = 0; i < RANAL_MAXDEVS; i++ ) {
- kra_device_t *dev = &kranal_data.kra_devices[i];
+ for (i = 0; i < RANAL_MAXDEVS; i++ ) {
+ kra_device_t *dev = &kranal_data.kra_devices[i];
- dev->rad_idx = i;
- CFS_INIT_LIST_HEAD(&dev->rad_ready_conns);
- CFS_INIT_LIST_HEAD(&dev->rad_new_conns);
- cfs_waitq_init(&dev->rad_waitq);
+ dev->rad_idx = i;
+ CFS_INIT_LIST_HEAD(&dev->rad_ready_conns);
+ CFS_INIT_LIST_HEAD(&dev->rad_new_conns);
+ init_waitqueue_head(&dev->rad_waitq);
spin_lock_init(&dev->rad_lock);
- }
+ }
- kranal_data.kra_new_min_timeout = CFS_MAX_SCHEDULE_TIMEOUT;
- cfs_waitq_init(&kranal_data.kra_reaper_waitq);
+ kranal_data.kra_new_min_timeout = MAX_SCHEDULE_TIMEOUT;
+ init_waitqueue_head(&kranal_data.kra_reaper_waitq);
spin_lock_init(&kranal_data.kra_reaper_lock);
- CFS_INIT_LIST_HEAD(&kranal_data.kra_connd_acceptq);
- CFS_INIT_LIST_HEAD(&kranal_data.kra_connd_peers);
- cfs_waitq_init(&kranal_data.kra_connd_waitq);
+ CFS_INIT_LIST_HEAD(&kranal_data.kra_connd_acceptq);
+ CFS_INIT_LIST_HEAD(&kranal_data.kra_connd_peers);
+ init_waitqueue_head(&kranal_data.kra_connd_waitq);
spin_lock_init(&kranal_data.kra_connd_lock);
CFS_INIT_LIST_HEAD(&kranal_data.kra_idle_txs);
typedef struct
{
- RAP_PVOID rad_handle; /* device handle */
- RAP_PVOID rad_fma_cqh; /* FMA completion queue handle */
- RAP_PVOID rad_rdma_cqh; /* rdma completion queue handle */
- int rad_id; /* device id */
- int rad_idx; /* index in kra_devices */
- int rad_ready; /* set by device callback */
- cfs_list_t rad_ready_conns;/* connections ready to tx/rx */
- cfs_list_t rad_new_conns; /* new connections to complete */
- cfs_waitq_t rad_waitq; /* scheduler waits here */
- spinlock_t rad_lock; /* serialise */
- void *rad_scheduler; /* scheduling thread */
- unsigned int rad_nphysmap; /* # phys mappings */
- unsigned int rad_nppphysmap;/* # phys pages mapped */
- unsigned int rad_nvirtmap; /* # virt mappings */
- unsigned long rad_nobvirtmap;/* # virt bytes mapped */
+ RAP_PVOID rad_handle; /* device handle */
+ RAP_PVOID rad_fma_cqh; /* FMA completion queue handle */
+ RAP_PVOID rad_rdma_cqh; /* rdma completion queue handle */
+ int rad_id; /* device id */
+ int rad_idx; /* index in kra_devices */
+ int rad_ready; /* set by device callback */
+ cfs_list_t rad_ready_conns;/* connections ready to tx/rx */
+ cfs_list_t rad_new_conns; /* new connections to complete */
+ wait_queue_head_t rad_waitq; /* scheduler waits here */
+ spinlock_t rad_lock; /* serialise */
+ void *rad_scheduler; /* scheduling thread */
+ unsigned int rad_nphysmap; /* # phys mappings */
+ unsigned int rad_nppphysmap;/* # phys pages mapped */
+ unsigned int rad_nvirtmap; /* # virt mappings */
+ unsigned long rad_nobvirtmap;/* # virt bytes mapped */
} kra_device_t;
typedef struct
{
- int kra_init; /* initialisation state */
- int kra_shutdown; /* shut down? */
- cfs_atomic_t kra_nthreads; /* # live threads */
- lnet_ni_t *kra_ni; /* _the_ nal instance */
+ int kra_init; /* initialisation state */
+ int kra_shutdown; /* shut down? */
+ cfs_atomic_t kra_nthreads; /* # live threads */
+ lnet_ni_t *kra_ni; /* _the_ nal instance */
- kra_device_t kra_devices[RANAL_MAXDEVS]; /* device/ptag/cq */
- int kra_ndevs; /* # devices */
+ kra_device_t kra_devices[RANAL_MAXDEVS]; /* device/ptag/cq */
+ int kra_ndevs; /* # devices */
rwlock_t kra_global_lock; /* stabilize peer/conn ops */
- cfs_list_t *kra_peers; /* hash table of all my known peers */
- int kra_peer_hash_size; /* size of kra_peers */
- cfs_atomic_t kra_npeers; /* # peers extant */
- int kra_nonewpeers; /* prevent new peers */
+ cfs_list_t *kra_peers; /* hash table of all my known peers */
+ int kra_peer_hash_size; /* size of kra_peers */
+ cfs_atomic_t kra_npeers; /* # peers extant */
+ int kra_nonewpeers; /* prevent new peers */
- cfs_list_t *kra_conns; /* conns hashed by cqid */
- int kra_conn_hash_size; /* size of kra_conns */
- __u64 kra_peerstamp; /* when I started up */
- __u64 kra_connstamp; /* conn stamp generator */
- int kra_next_cqid; /* cqid generator */
- cfs_atomic_t kra_nconns; /* # connections extant */
+ cfs_list_t *kra_conns; /* conns hashed by cqid */
+ int kra_conn_hash_size; /* size of kra_conns */
+ __u64 kra_peerstamp; /* when I started up */
+ __u64 kra_connstamp; /* conn stamp generator */
+ int kra_next_cqid; /* cqid generator */
+ cfs_atomic_t kra_nconns; /* # connections extant */
- long kra_new_min_timeout; /* minimum timeout on any new conn */
- cfs_waitq_t kra_reaper_waitq; /* reaper sleeps here */
+ long kra_new_min_timeout; /* minimum timeout on any new conn */
+ wait_queue_head_t kra_reaper_waitq; /* reaper sleeps here */
spinlock_t kra_reaper_lock; /* serialise */
- cfs_list_t kra_connd_peers; /* peers waiting for a connection */
- cfs_list_t kra_connd_acceptq; /* accepted sockets to handshake */
- cfs_waitq_t kra_connd_waitq; /* connection daemons sleep here */
+ cfs_list_t kra_connd_peers; /* peers waiting for a connection */
+ cfs_list_t kra_connd_acceptq; /* accepted sockets to handshake */
+ wait_queue_head_t kra_connd_waitq; /* connection daemons sleep here */
spinlock_t kra_connd_lock; /* serialise */
- cfs_list_t kra_idle_txs; /* idle tx descriptors */
- __u64 kra_next_tx_cookie; /* RDMA completion cookie */
+ cfs_list_t kra_idle_txs; /* idle tx descriptors */
+ __u64 kra_next_tx_cookie; /* RDMA completion cookie */
spinlock_t kra_tx_lock; /* serialise */
} kra_data_t;
spin_lock_irqsave(&dev->rad_lock, flags);
- if (!dev->rad_ready) {
- dev->rad_ready = 1;
- cfs_waitq_signal(&dev->rad_waitq);
- }
+ if (!dev->rad_ready) {
+ dev->rad_ready = 1;
+ wake_up(&dev->rad_waitq);
+ }
spin_unlock_irqrestore(&dev->rad_lock, flags);
return;
spin_lock_irqsave(&dev->rad_lock, flags);
- if (!conn->rac_scheduled) {
- kranal_conn_addref(conn); /* +1 ref for scheduler */
- conn->rac_scheduled = 1;
- cfs_list_add_tail(&conn->rac_schedlist, &dev->rad_ready_conns);
- cfs_waitq_signal(&dev->rad_waitq);
- }
+ if (!conn->rac_scheduled) {
+ kranal_conn_addref(conn); /* +1 ref for scheduler */
+ conn->rac_scheduled = 1;
+ cfs_list_add_tail(&conn->rac_schedlist, &dev->rad_ready_conns);
+ wake_up(&dev->rad_waitq);
+ }
spin_unlock_irqrestore(&dev->rad_lock, flags);
}
spin_lock(&kranal_data.kra_connd_lock);
- cfs_list_add_tail(&peer->rap_connd_list,
- &kranal_data.kra_connd_peers);
- cfs_waitq_signal(&kranal_data.kra_connd_waitq);
+ cfs_list_add_tail(&peer->rap_connd_list,
+ &kranal_data.kra_connd_peers);
+ wake_up(&kranal_data.kra_connd_waitq);
spin_unlock(&kranal_data.kra_connd_lock);
}
int
kranal_connd (void *arg)
{
- long id = (long)arg;
- cfs_waitlink_t wait;
- unsigned long flags;
- kra_peer_t *peer;
- kra_acceptsock_t *ras;
- int did_something;
+ long id = (long)arg;
+ wait_queue_t wait;
+ unsigned long flags;
+ kra_peer_t *peer;
+ kra_acceptsock_t *ras;
+ int did_something;
- cfs_block_allsigs();
+ cfs_block_allsigs();
- cfs_waitlink_init(&wait);
+ init_waitqueue_entry_current(&wait);
spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
- while (!kranal_data.kra_shutdown) {
- did_something = 0;
+ while (!kranal_data.kra_shutdown) {
+ did_something = 0;
- if (!cfs_list_empty(&kranal_data.kra_connd_acceptq)) {
- ras = cfs_list_entry(kranal_data.kra_connd_acceptq.next,
- kra_acceptsock_t, ras_list);
- cfs_list_del(&ras->ras_list);
+ if (!cfs_list_empty(&kranal_data.kra_connd_acceptq)) {
+ ras = cfs_list_entry(kranal_data.kra_connd_acceptq.next,
+ kra_acceptsock_t, ras_list);
+ cfs_list_del(&ras->ras_list);
spin_unlock_irqrestore(&kranal_data.kra_connd_lock,
- flags);
+ flags);
- CDEBUG(D_NET,"About to handshake someone\n");
+ CDEBUG(D_NET,"About to handshake someone\n");
- kranal_conn_handshake(ras->ras_sock, NULL);
- kranal_free_acceptsock(ras);
+ kranal_conn_handshake(ras->ras_sock, NULL);
+ kranal_free_acceptsock(ras);
- CDEBUG(D_NET,"Finished handshaking someone\n");
+ CDEBUG(D_NET,"Finished handshaking someone\n");
spin_lock_irqsave(&kranal_data.kra_connd_lock,
- flags);
- did_something = 1;
- }
+ flags);
+ did_something = 1;
+ }
- if (!cfs_list_empty(&kranal_data.kra_connd_peers)) {
- peer = cfs_list_entry(kranal_data.kra_connd_peers.next,
- kra_peer_t, rap_connd_list);
+ if (!cfs_list_empty(&kranal_data.kra_connd_peers)) {
+ peer = cfs_list_entry(kranal_data.kra_connd_peers.next,
+ kra_peer_t, rap_connd_list);
- cfs_list_del_init(&peer->rap_connd_list);
+ cfs_list_del_init(&peer->rap_connd_list);
spin_unlock_irqrestore(&kranal_data.kra_connd_lock,
- flags);
+ flags);
- kranal_connect(peer);
- kranal_peer_decref(peer);
+ kranal_connect(peer);
+ kranal_peer_decref(peer);
spin_lock_irqsave(&kranal_data.kra_connd_lock,
- flags);
- did_something = 1;
- }
+ flags);
+ did_something = 1;
+ }
- if (did_something)
- continue;
+ if (did_something)
+ continue;
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add_exclusive(&kranal_data.kra_connd_waitq, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue_exclusive(&kranal_data.kra_connd_waitq, &wait);
spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
- cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE);
+ waitq_wait(&wait, TASK_INTERRUPTIBLE);
- cfs_set_current_state(CFS_TASK_RUNNING);
- cfs_waitq_del(&kranal_data.kra_connd_waitq, &wait);
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&kranal_data.kra_connd_waitq, &wait);
spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
- }
+ }
spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
- kranal_thread_fini();
- return 0;
+ kranal_thread_fini();
+ return 0;
}
void
int
kranal_reaper (void *arg)
{
- cfs_waitlink_t wait;
- unsigned long flags;
- long timeout;
- int i;
- int conn_entries = kranal_data.kra_conn_hash_size;
- int conn_index = 0;
- int base_index = conn_entries - 1;
- unsigned long next_check_time = jiffies;
- long next_min_timeout = CFS_MAX_SCHEDULE_TIMEOUT;
- long current_min_timeout = 1;
+ wait_queue_t wait;
+ unsigned long flags;
+ long timeout;
+ int i;
+ int conn_entries = kranal_data.kra_conn_hash_size;
+ int conn_index = 0;
+ int base_index = conn_entries - 1;
+ unsigned long next_check_time = jiffies;
+ long next_min_timeout = MAX_SCHEDULE_TIMEOUT;
+ long current_min_timeout = 1;
- cfs_block_allsigs();
+ cfs_block_allsigs();
- cfs_waitlink_init(&wait);
+ init_waitqueue_entry_current(&wait);
spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
- while (!kranal_data.kra_shutdown) {
- /* I wake up every 'p' seconds to check for timeouts on some
- * more peers. I try to check every connection 'n' times
- * within the global minimum of all keepalive and timeout
- * intervals, to ensure I attend to every connection within
- * (n+1)/n times its timeout intervals. */
- const int p = 1;
- const int n = 3;
- unsigned long min_timeout;
- int chunk;
-
- /* careful with the jiffy wrap... */
- timeout = (long)(next_check_time - jiffies);
- if (timeout > 0) {
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add(&kranal_data.kra_reaper_waitq, &wait);
+ while (!kranal_data.kra_shutdown) {
+ /* I wake up every 'p' seconds to check for timeouts on some
+ * more peers. I try to check every connection 'n' times
+ * within the global minimum of all keepalive and timeout
+ * intervals, to ensure I attend to every connection within
+ * (n+1)/n times its timeout intervals. */
+ const int p = 1;
+ const int n = 3;
+ unsigned long min_timeout;
+ int chunk;
+
+ /* careful with the jiffy wrap... */
+ timeout = (long)(next_check_time - jiffies);
+ if (timeout > 0) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&kranal_data.kra_reaper_waitq, &wait);
spin_unlock_irqrestore(&kranal_data.kra_reaper_lock,
- flags);
+ flags);
- cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE,
- timeout);
+ waitq_timedwait(&wait, TASK_INTERRUPTIBLE,
+ timeout);
spin_lock_irqsave(&kranal_data.kra_reaper_lock,
- flags);
+ flags);
- cfs_set_current_state(CFS_TASK_RUNNING);
- cfs_waitq_del(&kranal_data.kra_reaper_waitq, &wait);
- continue;
- }
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&kranal_data.kra_reaper_waitq, &wait);
+ continue;
+ }
- if (kranal_data.kra_new_min_timeout !=
- CFS_MAX_SCHEDULE_TIMEOUT) {
- /* new min timeout set: restart min timeout scan */
- next_min_timeout = CFS_MAX_SCHEDULE_TIMEOUT;
- base_index = conn_index - 1;
- if (base_index < 0)
- base_index = conn_entries - 1;
-
- if (kranal_data.kra_new_min_timeout <
- current_min_timeout) {
- current_min_timeout =
- kranal_data.kra_new_min_timeout;
- CDEBUG(D_NET, "Set new min timeout %ld\n",
- current_min_timeout);
- }
+ if (kranal_data.kra_new_min_timeout !=
+ MAX_SCHEDULE_TIMEOUT) {
+ /* new min timeout set: restart min timeout scan */
+ next_min_timeout = MAX_SCHEDULE_TIMEOUT;
+ base_index = conn_index - 1;
+ if (base_index < 0)
+ base_index = conn_entries - 1;
+
+ if (kranal_data.kra_new_min_timeout <
+ current_min_timeout) {
+ current_min_timeout =
+ kranal_data.kra_new_min_timeout;
+ CDEBUG(D_NET, "Set new min timeout %ld\n",
+ current_min_timeout);
+ }
- kranal_data.kra_new_min_timeout =
- CFS_MAX_SCHEDULE_TIMEOUT;
- }
- min_timeout = current_min_timeout;
+ kranal_data.kra_new_min_timeout =
+ MAX_SCHEDULE_TIMEOUT;
+ }
+ min_timeout = current_min_timeout;
spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
- LASSERT (min_timeout > 0);
-
- /* Compute how many table entries to check now so I get round
- * the whole table fast enough given that I do this at fixed
- * intervals of 'p' seconds) */
- chunk = conn_entries;
- if (min_timeout > n * p)
- chunk = (chunk * n * p) / min_timeout;
- if (chunk == 0)
- chunk = 1;
-
- for (i = 0; i < chunk; i++) {
- kranal_reaper_check(conn_index,
- &next_min_timeout);
- conn_index = (conn_index + 1) % conn_entries;
- }
+ LASSERT (min_timeout > 0);
+
+ /* Compute how many table entries to check now so I get round
+ * the whole table fast enough given that I do this at fixed
+ * intervals of 'p' seconds) */
+ chunk = conn_entries;
+ if (min_timeout > n * p)
+ chunk = (chunk * n * p) / min_timeout;
+ if (chunk == 0)
+ chunk = 1;
+
+ for (i = 0; i < chunk; i++) {
+ kranal_reaper_check(conn_index,
+ &next_min_timeout);
+ conn_index = (conn_index + 1) % conn_entries;
+ }
next_check_time += p * HZ;
spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
- if (((conn_index - chunk <= base_index &&
- base_index < conn_index) ||
- (conn_index - conn_entries - chunk <= base_index &&
- base_index < conn_index - conn_entries))) {
+ if (((conn_index - chunk <= base_index &&
+ base_index < conn_index) ||
+ (conn_index - conn_entries - chunk <= base_index &&
+ base_index < conn_index - conn_entries))) {
- /* Scanned all conns: set current_min_timeout... */
- if (current_min_timeout != next_min_timeout) {
- current_min_timeout = next_min_timeout;
- CDEBUG(D_NET, "Set new min timeout %ld\n",
- current_min_timeout);
- }
+ /* Scanned all conns: set current_min_timeout... */
+ if (current_min_timeout != next_min_timeout) {
+ current_min_timeout = next_min_timeout;
+ CDEBUG(D_NET, "Set new min timeout %ld\n",
+ current_min_timeout);
+ }
- /* ...and restart min timeout scan */
- next_min_timeout = CFS_MAX_SCHEDULE_TIMEOUT;
- base_index = conn_index - 1;
- if (base_index < 0)
- base_index = conn_entries - 1;
- }
- }
+ /* ...and restart min timeout scan */
+ next_min_timeout = MAX_SCHEDULE_TIMEOUT;
+ base_index = conn_index - 1;
+ if (base_index < 0)
+ base_index = conn_entries - 1;
+ }
+ }
- kranal_thread_fini();
- return 0;
+ kranal_thread_fini();
+ return 0;
}
void
int
kranal_scheduler (void *arg)
{
- kra_device_t *dev = (kra_device_t *)arg;
- cfs_waitlink_t wait;
- kra_conn_t *conn;
+ kra_device_t *dev = (kra_device_t *)arg;
+ wait_queue_t wait;
+ kra_conn_t *conn;
unsigned long flags;
unsigned long deadline;
unsigned long soonest;
cfs_block_allsigs();
- dev->rad_scheduler = current;
- cfs_waitlink_init(&wait);
+ dev->rad_scheduler = current;
+ init_waitqueue_entry_current(&wait);
spin_lock_irqsave(&dev->rad_lock, flags);
if (busy_loops++ >= RANAL_RESCHED) {
spin_unlock_irqrestore(&dev->rad_lock, flags);
- cfs_cond_resched();
- busy_loops = 0;
+ cond_resched();
+ busy_loops = 0;
spin_lock_irqsave(&dev->rad_lock, flags);
}
if (dropped_lock) /* may sleep iff I didn't drop the lock */
continue;
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add_exclusive(&dev->rad_waitq, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue_exclusive(&dev->rad_waitq, &wait);
spin_unlock_irqrestore(&dev->rad_lock, flags);
- if (nsoonest == 0) {
- busy_loops = 0;
- cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE);
- } else {
- timeout = (long)(soonest - jiffies);
- if (timeout > 0) {
- busy_loops = 0;
- cfs_waitq_timedwait(&wait,
- CFS_TASK_INTERRUPTIBLE,
- timeout);
- }
- }
+ if (nsoonest == 0) {
+ busy_loops = 0;
+ waitq_wait(&wait, TASK_INTERRUPTIBLE);
+ } else {
+ timeout = (long)(soonest - jiffies);
+ if (timeout > 0) {
+ busy_loops = 0;
+ waitq_timedwait(&wait,
+ TASK_INTERRUPTIBLE,
+ timeout);
+ }
+ }
- cfs_waitq_del(&dev->rad_waitq, &wait);
- cfs_set_current_state(CFS_TASK_RUNNING);
+ remove_wait_queue(&dev->rad_waitq, &wait);
+ set_current_state(TASK_RUNNING);
spin_lock_irqsave(&dev->rad_lock, flags);
- }
+ }
spin_unlock_irqrestore(&dev->rad_lock, flags);
spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
- cfs_list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
- cfs_waitq_signal(&ksocknal_data.ksnd_connd_waitq);
+ cfs_list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
+ wake_up(&ksocknal_data.ksnd_connd_waitq);
spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
return 0;
cfs_list_add_tail(&conn->ksnc_list,
&ksocknal_data.ksnd_deathrow_conns);
- cfs_waitq_signal(&ksocknal_data.ksnd_reaper_waitq);
+ wake_up(&ksocknal_data.ksnd_reaper_waitq);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
}
&sched->kss_tx_conns);
conn->ksnc_tx_scheduled = 1;
/* extra ref for scheduler */
- ksocknal_conn_addref(conn);
+ ksocknal_conn_addref(conn);
- cfs_waitq_signal (&sched->kss_waitq);
- }
+ wake_up (&sched->kss_waitq);
+ }
spin_unlock_bh(&sched->kss_lock);
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
cfs_list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
- cfs_waitq_signal(&ksocknal_data.ksnd_reaper_waitq);
+ wake_up(&ksocknal_data.ksnd_reaper_waitq);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
}
/* flag threads to terminate; wake and wait for them to die */
ksocknal_data.ksnd_shuttingdown = 1;
- cfs_waitq_broadcast(&ksocknal_data.ksnd_connd_waitq);
- cfs_waitq_broadcast(&ksocknal_data.ksnd_reaper_waitq);
+ wake_up_all(&ksocknal_data.ksnd_connd_waitq);
+ wake_up_all(&ksocknal_data.ksnd_reaper_waitq);
if (ksocknal_data.ksnd_sched_info != NULL) {
cfs_percpt_for_each(info, i,
for (j = 0; j < info->ksi_nthreads_max; j++) {
sched = &info->ksi_scheds[j];
- cfs_waitq_broadcast(&sched->kss_waitq);
+ wake_up_all(&sched->kss_waitq);
}
}
}
CFS_INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
spin_lock_init(&ksocknal_data.ksnd_reaper_lock);
- CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_enomem_conns);
- CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_zombie_conns);
- CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_deathrow_conns);
- cfs_waitq_init(&ksocknal_data.ksnd_reaper_waitq);
+ CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_enomem_conns);
+ CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_zombie_conns);
+ CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_deathrow_conns);
+ init_waitqueue_head(&ksocknal_data.ksnd_reaper_waitq);
spin_lock_init(&ksocknal_data.ksnd_connd_lock);
- CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_connreqs);
- CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_routes);
- cfs_waitq_init(&ksocknal_data.ksnd_connd_waitq);
+ CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_connreqs);
+ CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_routes);
+ init_waitqueue_head(&ksocknal_data.ksnd_connd_waitq);
spin_lock_init(&ksocknal_data.ksnd_tx_lock);
CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_idle_noop_txs);
CFS_INIT_LIST_HEAD(&sched->kss_rx_conns);
CFS_INIT_LIST_HEAD(&sched->kss_tx_conns);
CFS_INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
- cfs_waitq_init(&sched->kss_waitq);
+ init_waitqueue_head(&sched->kss_waitq);
}
}
cfs_list_t kss_tx_conns;
/* zombie noop tx list */
cfs_list_t kss_zombie_noop_txs;
- cfs_waitq_t kss_waitq; /* where scheduler sleeps */
+ wait_queue_head_t kss_waitq; /* where scheduler sleeps */
/* # connections assigned to this scheduler */
int kss_nconns;
struct ksock_sched_info *kss_info; /* owner of it */
/* schedulers information */
struct ksock_sched_info **ksnd_sched_info;
- cfs_atomic_t ksnd_nactive_txs; /* #active txs */
+ cfs_atomic_t ksnd_nactive_txs; /* #active txs */
- cfs_list_t ksnd_deathrow_conns; /* conns to close: reaper_lock*/
- cfs_list_t ksnd_zombie_conns; /* conns to free: reaper_lock */
- cfs_list_t ksnd_enomem_conns; /* conns to retry: reaper_lock*/
- cfs_waitq_t ksnd_reaper_waitq; /* reaper sleeps here */
- cfs_time_t ksnd_reaper_waketime;/* when reaper will wake */
+ cfs_list_t ksnd_deathrow_conns; /* conns to close: reaper_lock*/
+ cfs_list_t ksnd_zombie_conns; /* conns to free: reaper_lock */
+ cfs_list_t ksnd_enomem_conns; /* conns to retry: reaper_lock*/
+ wait_queue_head_t ksnd_reaper_waitq; /* reaper sleeps here */
+ cfs_time_t ksnd_reaper_waketime;/* when reaper will wake */
spinlock_t ksnd_reaper_lock; /* serialise */
- int ksnd_enomem_tx; /* test ENOMEM sender */
- int ksnd_stall_tx; /* test sluggish sender */
- int ksnd_stall_rx; /* test sluggish receiver */
-
- cfs_list_t ksnd_connd_connreqs; /* incoming connection requests */
- cfs_list_t ksnd_connd_routes; /* routes waiting to be connected */
- cfs_waitq_t ksnd_connd_waitq; /* connds sleep here */
- int ksnd_connd_connecting;/* # connds connecting */
- /** time stamp of the last failed connecting attempt */
- long ksnd_connd_failed_stamp;
- /** # starting connd */
- unsigned ksnd_connd_starting;
- /** time stamp of the last starting connd */
- long ksnd_connd_starting_stamp;
- /** # running connd */
- unsigned ksnd_connd_running;
+ int ksnd_enomem_tx; /* test ENOMEM sender */
+ int ksnd_stall_tx; /* test sluggish sender */
+ int ksnd_stall_rx; /* test sluggish receiver */
+
+ cfs_list_t ksnd_connd_connreqs; /* incoming connection requests */
+ cfs_list_t ksnd_connd_routes; /* routes waiting to be connected */
+ wait_queue_head_t ksnd_connd_waitq; /* connds sleep here */
+ int ksnd_connd_connecting;/* # connds connecting */
+ /** time stamp of the last failed connecting attempt */
+ long ksnd_connd_failed_stamp;
+ /** # starting connd */
+ unsigned ksnd_connd_starting;
+ /** time stamp of the last starting connd */
+ long ksnd_connd_starting_stamp;
+ /** # running connd */
+ unsigned ksnd_connd_running;
spinlock_t ksnd_connd_lock; /* serialise */
cfs_list_t ksnd_idle_noop_txs; /* list head for freed noop tx */
LASSERT (conn->ksnc_tx_scheduled);
cfs_list_add_tail(&conn->ksnc_tx_list,
&ksocknal_data.ksnd_enomem_conns);
- if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(),
- SOCKNAL_ENOMEM_RETRY),
- ksocknal_data.ksnd_reaper_waketime))
- cfs_waitq_signal (&ksocknal_data.ksnd_reaper_waitq);
+ if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(),
+ SOCKNAL_ENOMEM_RETRY),
+ ksocknal_data.ksnd_reaper_waketime))
+ wake_up(&ksocknal_data.ksnd_reaper_waitq);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
- return (rc);
- }
+ return (rc);
+ }
/* Actual error */
LASSERT (rc < 0);
cfs_list_add_tail(&route->ksnr_connd_list,
&ksocknal_data.ksnd_connd_routes);
- cfs_waitq_signal(&ksocknal_data.ksnd_connd_waitq);
+ wake_up(&ksocknal_data.ksnd_connd_waitq);
spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
}
cfs_list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
}
- if (conn->ksnc_tx_ready && /* able to send */
- !conn->ksnc_tx_scheduled) { /* not scheduled to send */
- /* +1 ref for scheduler */
- ksocknal_conn_addref(conn);
- cfs_list_add_tail (&conn->ksnc_tx_list,
- &sched->kss_tx_conns);
- conn->ksnc_tx_scheduled = 1;
- cfs_waitq_signal (&sched->kss_waitq);
- }
+ if (conn->ksnc_tx_ready && /* able to send */
+ !conn->ksnc_tx_scheduled) { /* not scheduled to send */
+ /* +1 ref for scheduler */
+ ksocknal_conn_addref(conn);
+ cfs_list_add_tail (&conn->ksnc_tx_list,
+ &sched->kss_tx_conns);
+ conn->ksnc_tx_scheduled = 1;
+ wake_up(&sched->kss_waitq);
+ }
spin_unlock_bh(&sched->kss_lock);
}
spin_lock_bh(&sched->kss_lock);
- switch (conn->ksnc_rx_state) {
- case SOCKNAL_RX_PARSE_WAIT:
- cfs_list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
- cfs_waitq_signal (&sched->kss_waitq);
- LASSERT (conn->ksnc_rx_ready);
- break;
+ switch (conn->ksnc_rx_state) {
+ case SOCKNAL_RX_PARSE_WAIT:
+ cfs_list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
+ wake_up(&sched->kss_waitq);
+ LASSERT(conn->ksnc_rx_ready);
+ break;
case SOCKNAL_RX_PARSE:
/* scheduler hasn't noticed I'm parsing yet */
sched->kss_waitq,
!ksocknal_sched_cansleep(sched));
LASSERT (rc == 0);
- } else {
- cfs_cond_resched();
- }
+ } else {
+ cond_resched();
+ }
spin_lock_bh(&sched->kss_lock);
}
spin_lock_bh(&sched->kss_lock);
- conn->ksnc_rx_ready = 1;
+ conn->ksnc_rx_ready = 1;
- if (!conn->ksnc_rx_scheduled) { /* not being progressed */
- cfs_list_add_tail(&conn->ksnc_rx_list,
- &sched->kss_rx_conns);
- conn->ksnc_rx_scheduled = 1;
- /* extra ref for scheduler */
- ksocknal_conn_addref(conn);
+ if (!conn->ksnc_rx_scheduled) { /* not being progressed */
+ cfs_list_add_tail(&conn->ksnc_rx_list,
+ &sched->kss_rx_conns);
+ conn->ksnc_rx_scheduled = 1;
+ /* extra ref for scheduler */
+ ksocknal_conn_addref(conn);
- cfs_waitq_signal (&sched->kss_waitq);
- }
+ wake_up (&sched->kss_waitq);
+ }
spin_unlock_bh(&sched->kss_lock);
EXIT;
* Add connection to kss_tx_conns of scheduler
* and wakeup the scheduler.
*/
-void ksocknal_write_callback (ksock_conn_t *conn)
+void ksocknal_write_callback(ksock_conn_t *conn)
{
ksock_sched_t *sched;
ENTRY;
spin_lock_bh(&sched->kss_lock);
- conn->ksnc_tx_ready = 1;
+ conn->ksnc_tx_ready = 1;
- if (!conn->ksnc_tx_scheduled && // not being progressed
- !cfs_list_empty(&conn->ksnc_tx_queue)){//packets to send
- cfs_list_add_tail (&conn->ksnc_tx_list,
- &sched->kss_tx_conns);
- conn->ksnc_tx_scheduled = 1;
- /* extra ref for scheduler */
- ksocknal_conn_addref(conn);
+ if (!conn->ksnc_tx_scheduled && // not being progressed
+ !cfs_list_empty(&conn->ksnc_tx_queue)){//packets to send
+ cfs_list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns);
+ conn->ksnc_tx_scheduled = 1;
+ /* extra ref for scheduler */
+ ksocknal_conn_addref(conn);
- cfs_waitq_signal (&sched->kss_waitq);
- }
+ wake_up(&sched->kss_waitq);
+ }
spin_unlock_bh(&sched->kss_lock);
static ksock_route_t *
ksocknal_connd_get_route_locked(signed long *timeout_p)
{
- ksock_route_t *route;
- cfs_time_t now;
+ ksock_route_t *route;
+ cfs_time_t now;
- now = cfs_time_current();
+ now = cfs_time_current();
- /* connd_routes can contain both pending and ordinary routes */
- cfs_list_for_each_entry (route, &ksocknal_data.ksnd_connd_routes,
- ksnr_connd_list) {
+ /* connd_routes can contain both pending and ordinary routes */
+ cfs_list_for_each_entry (route, &ksocknal_data.ksnd_connd_routes,
+ ksnr_connd_list) {
- if (route->ksnr_retry_interval == 0 ||
- cfs_time_aftereq(now, route->ksnr_timeout))
- return route;
+ if (route->ksnr_retry_interval == 0 ||
+ cfs_time_aftereq(now, route->ksnr_timeout))
+ return route;
- if (*timeout_p == CFS_MAX_SCHEDULE_TIMEOUT ||
- (int)*timeout_p > (int)(route->ksnr_timeout - now))
- *timeout_p = (int)(route->ksnr_timeout - now);
- }
+ if (*timeout_p == MAX_SCHEDULE_TIMEOUT ||
+ (int)*timeout_p > (int)(route->ksnr_timeout - now))
+ *timeout_p = (int)(route->ksnr_timeout - now);
+ }
- return NULL;
+ return NULL;
}
int
ksocknal_connd (void *arg)
{
spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
- ksock_connreq_t *cr;
- cfs_waitlink_t wait;
- int nloops = 0;
- int cons_retry = 0;
+ ksock_connreq_t *cr;
+ wait_queue_t wait;
+ int nloops = 0;
+ int cons_retry = 0;
- cfs_block_allsigs ();
+ cfs_block_allsigs ();
- cfs_waitlink_init (&wait);
+ init_waitqueue_entry_current(&wait);
spin_lock_bh(connd_lock);
- LASSERT(ksocknal_data.ksnd_connd_starting > 0);
- ksocknal_data.ksnd_connd_starting--;
- ksocknal_data.ksnd_connd_running++;
+ LASSERT(ksocknal_data.ksnd_connd_starting > 0);
+ ksocknal_data.ksnd_connd_starting--;
+ ksocknal_data.ksnd_connd_running++;
- while (!ksocknal_data.ksnd_shuttingdown) {
- ksock_route_t *route = NULL;
- long sec = cfs_time_current_sec();
- long timeout = CFS_MAX_SCHEDULE_TIMEOUT;
- int dropped_lock = 0;
-
- if (ksocknal_connd_check_stop(sec, &timeout)) {
- /* wakeup another one to check stop */
- cfs_waitq_signal(&ksocknal_data.ksnd_connd_waitq);
- break;
- }
+ while (!ksocknal_data.ksnd_shuttingdown) {
+ ksock_route_t *route = NULL;
+ long sec = cfs_time_current_sec();
+ long timeout = MAX_SCHEDULE_TIMEOUT;
+ int dropped_lock = 0;
+
+ if (ksocknal_connd_check_stop(sec, &timeout)) {
+ /* wakeup another one to check stop */
+ wake_up(&ksocknal_data.ksnd_connd_waitq);
+ break;
+ }
if (ksocknal_connd_check_start(sec, &timeout)) {
/* created new thread */
continue;
spin_unlock_bh(connd_lock);
nloops = 0;
- cfs_cond_resched();
+ cond_resched();
spin_lock_bh(connd_lock);
continue;
}
/* Nothing to do for 'timeout' */
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
spin_unlock_bh(connd_lock);
nloops = 0;
- cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE, timeout);
+ waitq_timedwait(&wait, TASK_INTERRUPTIBLE, timeout);
- cfs_set_current_state(CFS_TASK_RUNNING);
- cfs_waitq_del(&ksocknal_data.ksnd_connd_waitq, &wait);
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);
spin_lock_bh(connd_lock);
}
ksocknal_data.ksnd_connd_running--;
read_unlock(&ksocknal_data.ksnd_global_lock);
}
-int
-ksocknal_reaper (void *arg)
+int ksocknal_reaper(void *arg)
{
- cfs_waitlink_t wait;
- ksock_conn_t *conn;
- ksock_sched_t *sched;
- cfs_list_t enomem_conns;
+ wait_queue_t wait;
+ ksock_conn_t *conn;
+ ksock_sched_t *sched;
+ cfs_list_t enomem_conns;
int nenomem_conns;
cfs_duration_t timeout;
int i;
cfs_block_allsigs ();
- CFS_INIT_LIST_HEAD(&enomem_conns);
- cfs_waitlink_init (&wait);
+ CFS_INIT_LIST_HEAD(&enomem_conns);
+ init_waitqueue_entry_current(&wait);
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
conn->ksnc_tx_ready = 1;
cfs_list_add_tail(&conn->ksnc_tx_list,
&sched->kss_tx_conns);
- cfs_waitq_signal(&sched->kss_waitq);
+ wake_up(&sched->kss_waitq);
spin_unlock_bh(&sched->kss_lock);
nenomem_conns++;
ksocknal_data.ksnd_reaper_waketime =
cfs_time_add(cfs_time_current(), timeout);
- cfs_set_current_state (CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add (&ksocknal_data.ksnd_reaper_waitq, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
- if (!ksocknal_data.ksnd_shuttingdown &&
- cfs_list_empty (&ksocknal_data.ksnd_deathrow_conns) &&
- cfs_list_empty (&ksocknal_data.ksnd_zombie_conns))
- cfs_waitq_timedwait (&wait, CFS_TASK_INTERRUPTIBLE,
- timeout);
+ if (!ksocknal_data.ksnd_shuttingdown &&
+ cfs_list_empty(&ksocknal_data.ksnd_deathrow_conns) &&
+ cfs_list_empty(&ksocknal_data.ksnd_zombie_conns))
+ waitq_timedwait(&wait, TASK_INTERRUPTIBLE, timeout);
- cfs_set_current_state (CFS_TASK_RUNNING);
- cfs_waitq_del (&ksocknal_data.ksnd_reaper_waitq, &wait);
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
}
lnet_init_locks(void)
{
spin_lock_init(&the_lnet.ln_eq_wait_lock);
- cfs_waitq_init(&the_lnet.ln_eq_waitq);
+ init_waitqueue_head(&the_lnet.ln_eq_waitq);
mutex_init(&the_lnet.ln_lnd_mutex);
mutex_init(&the_lnet.ln_api_mutex);
}
#ifdef __KERNEL__
/* Wake anyone waiting in LNetEQPoll() */
- if (cfs_waitq_active(&the_lnet.ln_eq_waitq))
- cfs_waitq_broadcast(&the_lnet.ln_eq_waitq);
+ if (waitqueue_active(&the_lnet.ln_eq_waitq))
+ wake_up_all(&the_lnet.ln_eq_waitq);
#else
# ifndef HAVE_LIBPTHREAD
/* LNetEQPoll() calls into _the_ LND to wait for action */
{
int tms = *timeout_ms;
int wait;
- cfs_waitlink_t wl;
+ wait_queue_t wl;
cfs_time_t now;
if (tms == 0)
return -1; /* don't want to wait and no new event */
- cfs_waitlink_init(&wl);
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add(&the_lnet.ln_eq_waitq, &wl);
+ init_waitqueue_entry_current(&wl);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&the_lnet.ln_eq_waitq, &wl);
lnet_eq_wait_unlock();
if (tms < 0) {
- cfs_waitq_wait(&wl, CFS_TASK_INTERRUPTIBLE);
+ waitq_wait(&wl, TASK_INTERRUPTIBLE);
} else {
struct timeval tv;
now = cfs_time_current();
- cfs_waitq_timedwait(&wl, CFS_TASK_INTERRUPTIBLE,
+ waitq_timedwait(&wl, TASK_INTERRUPTIBLE,
cfs_time_seconds(tms) / 1000);
cfs_duration_usec(cfs_time_sub(cfs_time_current(), now), &tv);
tms -= (int)(tv.tv_sec * 1000 + tv.tv_usec / 1000);
*timeout_ms = tms;
lnet_eq_wait_lock();
- cfs_waitq_del(&the_lnet.ln_eq_waitq, &wl);
+ remove_wait_queue(&the_lnet.ln_eq_waitq, &wl);
return wait;
}
lnet_prune_rc_data(0); /* don't wait for UNLINK */
- /* Call cfs_pause() here always adds 1 to load average
- * because kernel counts # active tasks as nr_running
- * + nr_uninterruptible. */
- cfs_schedule_timeout_and_set_state(CFS_TASK_INTERRUPTIBLE,
- cfs_time_seconds(1));
- }
+ /* Call cfs_pause() here always adds 1 to load average
+ * because kernel counts # active tasks as nr_running
+ * + nr_uninterruptible. */
+ schedule_timeout_and_set_state(TASK_INTERRUPTIBLE,
+ cfs_time_seconds(1));
+ }
LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_STOPPING);
* I'm just a poor body and nobody loves me */
spin_unlock(&rpc->crpc_lock);
- /* release it */
- lstcon_rpc_put(crpc);
- return;
- }
+ /* release it */
+ lstcon_rpc_put(crpc);
+ return;
+ }
- /* not an orphan RPC */
- crpc->crp_finished = 1;
+ /* not an orphan RPC */
+ crpc->crp_finished = 1;
- if (crpc->crp_stamp == 0) {
- /* not aborted */
- LASSERT (crpc->crp_status == 0);
+ if (crpc->crp_stamp == 0) {
+ /* not aborted */
+ LASSERT (crpc->crp_status == 0);
- crpc->crp_stamp = cfs_time_current();
- crpc->crp_status = rpc->crpc_status;
- }
+ crpc->crp_stamp = cfs_time_current();
+ crpc->crp_status = rpc->crpc_status;
+ }
- /* wakeup (transaction)thread if I'm the last RPC in the transaction */
- if (cfs_atomic_dec_and_test(&crpc->crp_trans->tas_remaining))
- cfs_waitq_signal(&crpc->crp_trans->tas_waitq);
+ /* wakeup (transaction)thread if I'm the last RPC in the transaction */
+ if (cfs_atomic_dec_and_test(&crpc->crp_trans->tas_remaining))
+ wake_up(&crpc->crp_trans->tas_waitq);
spin_unlock(&rpc->crpc_lock);
}
cfs_list_add_tail(&trans->tas_link, &console_session.ses_trans_list);
- CFS_INIT_LIST_HEAD(&trans->tas_rpcs_list);
- cfs_atomic_set(&trans->tas_remaining, 0);
- cfs_waitq_init(&trans->tas_waitq);
+ CFS_INIT_LIST_HEAD(&trans->tas_rpcs_list);
+ cfs_atomic_set(&trans->tas_remaining, 0);
+ init_waitqueue_head(&trans->tas_waitq);
spin_lock(&console_session.ses_rpc_lock);
trans->tas_features = console_session.ses_features;
mutex_unlock(&console_session.ses_mutex);
- cfs_waitq_wait_event_interruptible_timeout(trans->tas_waitq,
- lstcon_rpc_trans_check(trans),
- cfs_time_seconds(timeout), rc);
+ rc = wait_event_interruptible_timeout(trans->tas_waitq,
+ lstcon_rpc_trans_check(trans),
+ cfs_time_seconds(timeout));
rc = (rc > 0)? 0: ((rc < 0)? -EINTR: -ETIMEDOUT);
trans = cfs_list_entry(pacer, lstcon_rpc_trans_t,
tas_link);
- CDEBUG(D_NET, "Session closed, wakeup transaction %s\n",
- lstcon_rpc_trans_name(trans->tas_opc));
+ CDEBUG(D_NET, "Session closed, wakeup transaction %s\n",
+ lstcon_rpc_trans_name(trans->tas_opc));
- cfs_waitq_signal(&trans->tas_waitq);
- }
+ wake_up(&trans->tas_waitq);
+ }
mutex_unlock(&console_session.ses_mutex);
} lstcon_rpc_t;
typedef struct lstcon_rpc_trans {
- cfs_list_t tas_olink; /* link chain on owner list */
- cfs_list_t tas_link; /* link chain on global list */
- int tas_opc; /* operation code of transaction */
+ cfs_list_t tas_olink; /* link chain on owner list */
+ cfs_list_t tas_link; /* link chain on global list */
+ int tas_opc; /* operation code of transaction */
/* features mask is uptodate */
unsigned tas_feats_updated;
/* test features mask */
unsigned tas_features;
- cfs_waitq_t tas_waitq; /* wait queue head */
- cfs_atomic_t tas_remaining; /* # of un-scheduled rpcs */
- cfs_list_t tas_rpcs_list; /* queued requests */
+ wait_queue_head_t tas_waitq; /* wait queue head */
+ cfs_atomic_t tas_remaining; /* # of un-scheduled rpcs */
+ cfs_list_t tas_rpcs_list; /* queued requests */
} lstcon_rpc_trans_t;
#define LST_TRANS_PRIVATE 0x1000
while (rpc->crpc_timeout != 0) {
spin_unlock(&rpc->crpc_lock);
- cfs_schedule();
+ schedule();
spin_lock(&rpc->crpc_lock);
}
(STTIMER_NSLOTS - 1))])
struct st_timer_data {
- spinlock_t stt_lock;
- /* start time of the slot processed previously */
- cfs_time_t stt_prev_slot;
- cfs_list_t stt_hash[STTIMER_NSLOTS];
- int stt_shuttingdown;
+ spinlock_t stt_lock;
+ /* start time of the slot processed previously */
+ cfs_time_t stt_prev_slot;
+ cfs_list_t stt_hash[STTIMER_NSLOTS];
+ int stt_shuttingdown;
#ifdef __KERNEL__
- cfs_waitq_t stt_waitq;
- int stt_nthreads;
+ wait_queue_head_t stt_waitq;
+ int stt_nthreads;
#endif
} stt_data;
cfs_block_allsigs();
- while (!stt_data.stt_shuttingdown) {
- stt_check_timers(&stt_data.stt_prev_slot);
+ while (!stt_data.stt_shuttingdown) {
+ stt_check_timers(&stt_data.stt_prev_slot);
- cfs_waitq_wait_event_timeout(stt_data.stt_waitq,
- stt_data.stt_shuttingdown,
- cfs_time_seconds(STTIMER_SLOTTIME),
- rc);
- rc = 0; /* Discard jiffies remaining before timeout. */
- }
+ rc = wait_event_timeout(stt_data.stt_waitq,
+ stt_data.stt_shuttingdown,
+ cfs_time_seconds(STTIMER_SLOTTIME));
+ }
spin_lock(&stt_data.stt_lock);
stt_data.stt_nthreads--;
CFS_INIT_LIST_HEAD(&stt_data.stt_hash[i]);
#ifdef __KERNEL__
- stt_data.stt_nthreads = 0;
- cfs_waitq_init(&stt_data.stt_waitq);
- rc = stt_start_timer_thread();
- if (rc != 0)
- CERROR ("Can't spawn timer thread: %d\n", rc);
+ stt_data.stt_nthreads = 0;
+ init_waitqueue_head(&stt_data.stt_waitq);
+ rc = stt_start_timer_thread();
+ if (rc != 0)
+ CERROR ("Can't spawn timer thread: %d\n", rc);
#endif
return rc;
stt_data.stt_shuttingdown = 1;
#ifdef __KERNEL__
- cfs_waitq_signal(&stt_data.stt_waitq);
- lst_wait_until(stt_data.stt_nthreads == 0, stt_data.stt_lock,
- "waiting for %d threads to terminate\n",
- stt_data.stt_nthreads);
+ wake_up(&stt_data.stt_waitq);
+ lst_wait_until(stt_data.stt_nthreads == 0, stt_data.stt_lock,
+ "waiting for %d threads to terminate\n",
+ stt_data.stt_nthreads);
#endif
spin_unlock(&stt_data.stt_lock);
}
static int seq_fid_alloc_prep(struct lu_client_seq *seq,
- cfs_waitlink_t *link)
+ wait_queue_t *link)
{
- if (seq->lcs_update) {
- cfs_waitq_add(&seq->lcs_waitq, link);
- cfs_set_current_state(CFS_TASK_UNINT);
+ if (seq->lcs_update) {
+ add_wait_queue(&seq->lcs_waitq, link);
+ set_current_state(TASK_UNINTERRUPTIBLE);
mutex_unlock(&seq->lcs_mutex);
- cfs_waitq_wait(link, CFS_TASK_UNINT);
+ waitq_wait(link, TASK_UNINTERRUPTIBLE);
mutex_lock(&seq->lcs_mutex);
- cfs_waitq_del(&seq->lcs_waitq, link);
- cfs_set_current_state(CFS_TASK_RUNNING);
- return -EAGAIN;
- }
- ++seq->lcs_update;
+ remove_wait_queue(&seq->lcs_waitq, link);
+ set_current_state(TASK_RUNNING);
+ return -EAGAIN;
+ }
+ ++seq->lcs_update;
mutex_unlock(&seq->lcs_mutex);
- return 0;
+ return 0;
}
static void seq_fid_alloc_fini(struct lu_client_seq *seq)
{
- LASSERT(seq->lcs_update == 1);
+ LASSERT(seq->lcs_update == 1);
mutex_lock(&seq->lcs_mutex);
- --seq->lcs_update;
- cfs_waitq_signal(&seq->lcs_waitq);
+ --seq->lcs_update;
+ wake_up(&seq->lcs_waitq);
}
/**
* Allocate the whole seq to the caller.
**/
int seq_client_get_seq(const struct lu_env *env,
- struct lu_client_seq *seq, seqno_t *seqnr)
+ struct lu_client_seq *seq, seqno_t *seqnr)
{
- cfs_waitlink_t link;
- int rc;
+ wait_queue_t link;
+ int rc;
- LASSERT(seqnr != NULL);
+ LASSERT(seqnr != NULL);
mutex_lock(&seq->lcs_mutex);
- cfs_waitlink_init(&link);
+ init_waitqueue_entry_current(&link);
while (1) {
rc = seq_fid_alloc_prep(seq, &link);
/* Allocate new fid on passed client @seq and save it to @fid. */
int seq_client_alloc_fid(const struct lu_env *env,
- struct lu_client_seq *seq, struct lu_fid *fid)
+ struct lu_client_seq *seq, struct lu_fid *fid)
{
- cfs_waitlink_t link;
- int rc;
- ENTRY;
+ wait_queue_t link;
+ int rc;
+ ENTRY;
- LASSERT(seq != NULL);
- LASSERT(fid != NULL);
+ LASSERT(seq != NULL);
+ LASSERT(fid != NULL);
- cfs_waitlink_init(&link);
+ init_waitqueue_entry_current(&link);
mutex_lock(&seq->lcs_mutex);
if (OBD_FAIL_CHECK(OBD_FAIL_SEQ_EXHAUST))
*/
void seq_client_flush(struct lu_client_seq *seq)
{
- cfs_waitlink_t link;
+ wait_queue_t link;
- LASSERT(seq != NULL);
- cfs_waitlink_init(&link);
+ LASSERT(seq != NULL);
+ init_waitqueue_entry_current(&link);
mutex_lock(&seq->lcs_mutex);
- while (seq->lcs_update) {
- cfs_waitq_add(&seq->lcs_waitq, &link);
- cfs_set_current_state(CFS_TASK_UNINT);
+ while (seq->lcs_update) {
+ add_wait_queue(&seq->lcs_waitq, &link);
+ set_current_state(TASK_UNINTERRUPTIBLE);
mutex_unlock(&seq->lcs_mutex);
- cfs_waitq_wait(&link, CFS_TASK_UNINT);
+ waitq_wait(&link, TASK_UNINTERRUPTIBLE);
mutex_lock(&seq->lcs_mutex);
- cfs_waitq_del(&seq->lcs_waitq, &link);
- cfs_set_current_state(CFS_TASK_RUNNING);
- }
+ remove_wait_queue(&seq->lcs_waitq, &link);
+ set_current_state(TASK_RUNNING);
+ }
fid_zero(&seq->lcs_fid);
/**
else
seq->lcs_width = LUSTRE_DATA_SEQ_MAX_WIDTH;
- cfs_waitq_init(&seq->lcs_waitq);
+ init_waitqueue_head(&seq->lcs_waitq);
/* Make sure that things are clear before work is started. */
seq_client_flush(seq);
static void fld_enter_request(struct client_obd *cli)
{
- struct mdc_cache_waiter mcw;
- struct l_wait_info lwi = { 0 };
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
- if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
- cfs_list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters);
- cfs_waitq_init(&mcw.mcw_waitq);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- l_wait_event(mcw.mcw_waitq, fld_req_avail(cli, &mcw), &lwi);
- } else {
- cli->cl_r_in_flight++;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- }
+ struct mdc_cache_waiter mcw;
+ struct l_wait_info lwi = { 0 };
+
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
+ cfs_list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters);
+ init_waitqueue_head(&mcw.mcw_waitq);
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+ l_wait_event(mcw.mcw_waitq, fld_req_avail(cli, &mcw), &lwi);
+ } else {
+ cli->cl_r_in_flight++;
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+ }
}
static void fld_exit_request(struct client_obd *cli)
{
- cfs_list_t *l, *tmp;
- struct mdc_cache_waiter *mcw;
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
- cli->cl_r_in_flight--;
- cfs_list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
-
- if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
- /* No free request slots anymore */
- break;
- }
-
- mcw = cfs_list_entry(l, struct mdc_cache_waiter, mcw_entry);
- cfs_list_del_init(&mcw->mcw_entry);
- cli->cl_r_in_flight++;
- cfs_waitq_signal(&mcw->mcw_waitq);
- }
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ cfs_list_t *l, *tmp;
+ struct mdc_cache_waiter *mcw;
+
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ cli->cl_r_in_flight--;
+ cfs_list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
+
+ if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
+ /* No free request slots anymore */
+ break;
+ }
+
+ mcw = cfs_list_entry(l, struct mdc_cache_waiter, mcw_entry);
+ cfs_list_del_init(&mcw->mcw_entry);
+ cli->cl_r_in_flight++;
+ wake_up(&mcw->mcw_waitq);
+ }
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
}
static int fld_rrb_hash(struct lu_client_fld *fld,
*/
struct cl_lock_descr cll_descr;
/** Protected by cl_lock::cll_guard. */
- enum cl_lock_state cll_state;
- /** signals state changes. */
- cfs_waitq_t cll_wq;
- /**
- * Recursive lock, most fields in cl_lock{} are protected by this.
- *
- * Locking rules: this mutex is never held across network
- * communication, except when lock is being canceled.
- *
- * Lock ordering: a mutex of a sub-lock is taken first, then a mutex
- * on a top-lock. Other direction is implemented through a
- * try-lock-repeat loop. Mutices of unrelated locks can be taken only
- * by try-locking.
- *
- * \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait().
- */
+ enum cl_lock_state cll_state;
+ /** signals state changes. */
+ wait_queue_head_t cll_wq;
+ /**
+ * Recursive lock, most fields in cl_lock{} are protected by this.
+ *
+ * Locking rules: this mutex is never held across network
+ * communication, except when lock is being canceled.
+ *
+ * Lock ordering: a mutex of a sub-lock is taken first, then a mutex
+ * on a top-lock. Other direction is implemented through a
+ * try-lock-repeat loop. Mutices of unrelated locks can be taken only
+ * by try-locking.
+ *
+ * \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait().
+ */
struct mutex cll_guard;
cfs_task_t *cll_guarder;
int cll_depth;
/** barrier of destroy this structure */
cfs_atomic_t csi_barrier;
/** completion to be signaled when transfer is complete. */
- cfs_waitq_t csi_waitq;
+ wait_queue_head_t csi_waitq;
};
void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages);
extern struct task_struct *current;
int in_group_p(gid_t gid);
-#define cfs_set_current_state(foo) do { current->state = foo; } while (0)
+#define set_current_state(foo) do { current->state = foo; } while (0)
#define wait_event_interruptible(wq, condition) \
{ \
struct file_lock *fl_next; /* singly linked list for this inode */
cfs_list_t fl_link; /* doubly linked list of all locks */
cfs_list_t fl_block; /* circular list of blocked processes */
- void *fl_owner;
- unsigned int fl_pid;
- cfs_waitq_t fl_wait;
- struct file *fl_file;
+ void *fl_owner;
+ unsigned int fl_pid;
+ wait_queue_head_t fl_wait;
+ struct file *fl_file;
unsigned char fl_flags;
unsigned char fl_type;
loff_t fl_start;
struct fld;
struct lu_site_bkt_data {
- /**
- * number of busy object on this bucket
- */
- long lsb_busy;
- /**
- * LRU list, updated on each access to object. Protected by
- * bucket lock of lu_site::ls_obj_hash.
- *
- * "Cold" end of LRU is lu_site::ls_lru.next. Accessed object are
- * moved to the lu_site::ls_lru.prev (this is due to the non-existence
- * of list_for_each_entry_safe_reverse()).
- */
- cfs_list_t lsb_lru;
- /**
- * Wait-queue signaled when an object in this site is ultimately
- * destroyed (lu_object_free()). It is used by lu_object_find() to
- * wait before re-trying when object in the process of destruction is
- * found in the hash table.
- *
- * \see htable_lookup().
- */
- cfs_waitq_t lsb_marche_funebre;
+ /**
+ * number of busy object on this bucket
+ */
+ long lsb_busy;
+ /**
+ * LRU list, updated on each access to object. Protected by
+ * bucket lock of lu_site::ls_obj_hash.
+ *
+ * "Cold" end of LRU is lu_site::ls_lru.next. Accessed object are
+ * moved to the lu_site::ls_lru.prev (this is due to the non-existence
+ * of list_for_each_entry_safe_reverse()).
+ */
+ cfs_list_t lsb_lru;
+ /**
+ * Wait-queue signaled when an object in this site is ultimately
+ * destroyed (lu_object_free()). It is used by lu_object_find() to
+ * wait before re-trying when object in the process of destruction is
+ * found in the hash table.
+ *
+ * \see htable_lookup().
+ */
+ wait_queue_head_t lsb_marche_funebre;
};
enum {
* Wait queue used by __ldlm_namespace_free. Gets woken up every time
* a resource is removed.
*/
- cfs_waitq_t ns_waitq;
+ wait_queue_head_t ns_waitq;
/** LDLM pool structure for this namespace */
struct ldlm_pool ns_pool;
/** Definition of how eagerly unused locks will be released from LRU */
* it's no longer in use. If the lock is not granted, a process sleeps
* on this waitq to learn when it becomes granted.
*/
- cfs_waitq_t l_waitq;
+ wait_queue_head_t l_waitq;
/**
* Seconds. It will be updated if there is any activity related to
/* Seq-server for direct talking */
struct lu_server_seq *lcs_srv;
- /* wait queue for fid allocation and update indicator */
- cfs_waitq_t lcs_waitq;
- int lcs_update;
+ /* wait queue for fid allocation and update indicator */
+ wait_queue_head_t lcs_waitq;
+ int lcs_update;
};
/* server sequence manager interface */
cfs_time_t imp_sec_expire;
/** @} */
- /** Wait queue for those who need to wait for recovery completion */
- cfs_waitq_t imp_recovery_waitq;
+ /** Wait queue for those who need to wait for recovery completion */
+ wait_queue_head_t imp_recovery_waitq;
/** Number of requests currently in-flight */
cfs_atomic_t imp_inflight;
*/
#define __l_wait_event(wq, condition, info, ret, l_add_wait) \
do { \
- cfs_waitlink_t __wait; \
- cfs_duration_t __timeout = info->lwi_timeout; \
- cfs_sigset_t __blocked; \
- int __allow_intr = info->lwi_allow_intr; \
- \
- ret = 0; \
- if (condition) \
- break; \
- \
- cfs_waitlink_init(&__wait); \
- l_add_wait(&wq, &__wait); \
- \
- /* Block all signals (just the non-fatal ones if no timeout). */ \
- if (info->lwi_on_signal != NULL && (__timeout == 0 || __allow_intr)) \
- __blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS); \
- else \
- __blocked = cfs_block_sigsinv(0); \
- \
- for (;;) { \
- unsigned __wstate; \
- \
- __wstate = info->lwi_on_signal != NULL && \
- (__timeout == 0 || __allow_intr) ? \
- CFS_TASK_INTERRUPTIBLE : CFS_TASK_UNINT; \
- \
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE); \
- \
- if (condition) \
- break; \
- \
- if (__timeout == 0) { \
- cfs_waitq_wait(&__wait, __wstate); \
- } else { \
- cfs_duration_t interval = info->lwi_interval? \
- min_t(cfs_duration_t, \
- info->lwi_interval,__timeout):\
- __timeout; \
- cfs_duration_t remaining = cfs_waitq_timedwait(&__wait,\
- __wstate, \
- interval); \
- __timeout = cfs_time_sub(__timeout, \
- cfs_time_sub(interval, remaining));\
- if (__timeout == 0) { \
- if (info->lwi_on_timeout == NULL || \
- info->lwi_on_timeout(info->lwi_cb_data)) { \
- ret = -ETIMEDOUT; \
- break; \
- } \
- /* Take signals after the timeout expires. */ \
- if (info->lwi_on_signal != NULL) \
- (void)cfs_block_sigsinv(LUSTRE_FATAL_SIGS);\
- } \
- } \
+ wait_queue_t __wait; \
+ cfs_duration_t __timeout = info->lwi_timeout; \
+ cfs_sigset_t __blocked; \
+ int __allow_intr = info->lwi_allow_intr; \
+ \
+ ret = 0; \
+ if (condition) \
+ break; \
+ \
+ init_waitqueue_entry_current(&__wait); \
+ l_add_wait(&wq, &__wait); \
+ \
+ /* Block all signals (just the non-fatal ones if no timeout). */ \
+ if (info->lwi_on_signal != NULL && (__timeout == 0 || __allow_intr)) \
+ __blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS); \
+ else \
+ __blocked = cfs_block_sigsinv(0); \
+ \
+ for (;;) { \
+ unsigned __wstate; \
+ \
+ __wstate = info->lwi_on_signal != NULL && \
+ (__timeout == 0 || __allow_intr) ? \
+ TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; \
+ \
+ set_current_state(TASK_INTERRUPTIBLE); \
+ \
+ if (condition) \
+ break; \
+ \
+ if (__timeout == 0) { \
+ waitq_wait(&__wait, __wstate); \
+ } else { \
+ cfs_duration_t interval = info->lwi_interval? \
+ min_t(cfs_duration_t, \
+ info->lwi_interval,__timeout):\
+ __timeout; \
+ cfs_duration_t remaining = waitq_timedwait(&__wait, \
+ __wstate, \
+ interval); \
+ __timeout = cfs_time_sub(__timeout, \
+ cfs_time_sub(interval, remaining));\
+ if (__timeout == 0) { \
+ if (info->lwi_on_timeout == NULL || \
+ info->lwi_on_timeout(info->lwi_cb_data)) { \
+ ret = -ETIMEDOUT; \
+ break; \
+ } \
+ /* Take signals after the timeout expires. */ \
+ if (info->lwi_on_signal != NULL) \
+ (void)cfs_block_sigsinv(LUSTRE_FATAL_SIGS);\
+ } \
+ } \
\
if (condition) \
break; \
\
cfs_restore_sigs(__blocked); \
\
- cfs_set_current_state(CFS_TASK_RUNNING); \
- cfs_waitq_del(&wq, &__wait); \
+ set_current_state(TASK_RUNNING); \
+ remove_wait_queue(&wq, &__wait); \
} while (0)
#else /* !__KERNEL__ */
#define l_wait_event(wq, condition, info) \
({ \
- int __ret; \
- struct l_wait_info *__info = (info); \
- \
- __l_wait_event(wq, condition, __info, \
- __ret, cfs_waitq_add); \
- __ret; \
+ int __ret; \
+ struct l_wait_info *__info = (info); \
+ \
+ __l_wait_event(wq, condition, __info, \
+ __ret, add_wait_queue); \
+ __ret; \
})
#define l_wait_event_exclusive(wq, condition, info) \
({ \
- int __ret; \
- struct l_wait_info *__info = (info); \
- \
- __l_wait_event(wq, condition, __info, \
- __ret, cfs_waitq_add_exclusive); \
- __ret; \
+ int __ret; \
+ struct l_wait_info *__info = (info); \
+ \
+ __l_wait_event(wq, condition, __info, \
+ __ret, add_wait_queue_exclusive); \
+ __ret; \
})
#define l_wait_event_exclusive_head(wq, condition, info) \
({ \
- int __ret; &