# s/\bcfs_module\b/declare_module/g
s/\bcfs_request_module\b/request_module/g
/#[ \t]*define[ \t]*\brequest_module\b[ \t]*\brequest_module\b/d
+# Wait Queue
+s/\bCFS_TASK_INTERRUPTIBLE\b/TASK_INTERRUPTIBLE/g
+/#[ \t]*define[ \t]*\bTASK_INTERRUPTIBLE\b[ \t]*\bTASK_INTERRUPTIBLE\b/d
+s/\bCFS_TASK_UNINT\b/TASK_UNINTERRUPTIBLE/g
+/#[ \t]*define[ \t]*\bTASK_UNINTERRUPTIBLE\b[ \t]*\bTASK_UNINTERRUPTIBLE\b/d
+s/\bCFS_TASK_RUNNING\b/TASK_RUNNING/g
+/#[ \t]*define[ \t]*\bTASK_RUNNING\b[ \t]*\bTASK_RUNNING\b/d
+s/\bcfs_set_current_state\b/set_current_state/g
+/#[ \t]*define[ \t]*\bset_current_state\b *( *\w* *)[ \t]*\bset_current_state\b *( *\w* *)/d
+s/\bcfs_wait_event\b/wait_event/g
+/#[ \t]*define[ \t]*\bwait_event\b *( *\w* *, *\w* *)[ \t]*\bwait_event\b *( *\w* *, *\w* *)/d
+s/\bcfs_waitlink_t\b/wait_queue_t/g
+/typedef[ \t]*\bwait_queue_t\b[ \t]*\bwait_queue_t\b/d
+s/\bcfs_waitq_t\b/wait_queue_head_t/g
+/typedef[ \t]*\bwait_queue_head_t\b[ \t]*\bwait_queue_head_t\b/d
+#s/\bcfs_task_state_t\b/task_state_t/g
+s/\bcfs_waitq_init\b/init_waitqueue_head/g
+/#[ \t]*define[ \t]*\binit_waitqueue_head\b *( *\w* *)[ \t]*\binit_waitqueue_head\b *( *\w* *)/d
+s/\bcfs_waitlink_init\b/init_waitqueue_entry_current/g
+s/\bcfs_waitq_add\b/add_wait_queue/g
+/#[ \t]*define[ \t]*\badd_wait_queue\b *( *\w* *, *\w* *)[ \t]*\badd_wait_queue\b *( *\w* *, *\w* *)/d
+s/\bcfs_waitq_add_exclusive\b/add_wait_queue_exclusive/g
+/#[ \t]*define[ \t]*\badd_wait_queue_exclusive\b *( *\w* *, *\w* *)[ \t]*\badd_wait_queue_exclusive\b *( *\w* *, *\w* *)/d
+s/\bcfs_waitq_del\b/remove_wait_queue/g
+/#[ \t]*define[ \t]*\bremove_wait_queue\b *( *\w* *, *\w* *)[ \t]*\bremove_wait_queue\b *( *\w* *, *\w* *)/d
+s/\bcfs_waitq_active\b/waitqueue_active/g
+/#[ \t]*define[ \t]*\bwaitqueue_active\b *( *\w* *)[ \t]*\bwaitqueue_active\b *( *\w* *)/d
+s/\bcfs_waitq_signal\b/wake_up/g
+/#[ \t]*define[ \t]*\bwake_up\b *( *\w* *)[ \t]*\bwake_up\b *( *\w* *)/d
+s/\bcfs_waitq_signal_nr\b/wake_up_nr/g
+/#[ \t]*define[ \t]*\bwake_up_nr\b *( *\w* *, *\w* *)[ \t]*\bwake_up_nr\b *( *\w* *, *\w* *)/d
+s/\bcfs_waitq_broadcast\b/wake_up_all/g
+/#[ \t]*define[ \t]*\bwake_up_all\b *( *\w* *)[ \t]*\bwake_up_all\b *( *\w* *)/d
+s/\bcfs_waitq_wait\b/waitq_wait/g
+s/\bcfs_waitq_timedwait\b/waitq_timedwait/g
+s/\bcfs_schedule_timeout\b/schedule_timeout/g
+/#[ \t]*define[ \t]*\bschedule_timeout\b *( *\w* *)[ \t]*\bschedule_timeout\b *( *\w* *)/d
+s/\bcfs_schedule\b/schedule/g
+/#[ \t]*define[ \t]*\bschedule\b *( *)[ \t]*\bschedule\b *( *)/d
+s/\bcfs_need_resched\b/need_resched/g
+/#[ \t]*define[ \t]*\bneed_resched\b *( *)[ \t]*\bneed_resched\b *( *)/d
+s/\bcfs_cond_resched\b/cond_resched/g
+/#[ \t]*define[ \t]*\bcond_resched\b *( *)[ \t]*\bcond_resched\b *( *)/d
+s/\bcfs_waitq_add_exclusive_head\b/add_wait_queue_exclusive_head/g
+s/\bcfs_schedule_timeout_and_set_state\b/schedule_timeout_and_set_state/g
+s/\bCFS_MAX_SCHEDULE_TIMEOUT\b/MAX_SCHEDULE_TIMEOUT/g
+s/\bcfs_task_state_t\b/long/g
*/
typedef struct cfs_waitq {
struct ksleep_chan wq_ksleep_chan;
-} cfs_waitq_t;
+} wait_queue_head_t;
typedef struct cfs_waitlink {
struct cfs_waitq *wl_waitq;
struct ksleep_link wl_ksleep_link;
-} cfs_waitlink_t;
+} wait_queue_t;
-typedef int cfs_task_state_t;
+#define TASK_INTERRUPTIBLE THREAD_ABORTSAFE
+#define TASK_UNINTERRUPTIBLE THREAD_UNINT
-#define CFS_TASK_INTERRUPTIBLE THREAD_ABORTSAFE
-#define CFS_TASK_UNINT THREAD_UNINT
+void init_waitqueue_head(struct cfs_waitq *waitq);
+void init_waitqueue_entry_current(struct cfs_waitlink *link);
-void cfs_waitq_init(struct cfs_waitq *waitq);
-void cfs_waitlink_init(struct cfs_waitlink *link);
-
-void cfs_waitq_add(struct cfs_waitq *waitq, struct cfs_waitlink *link);
-void cfs_waitq_add_exclusive(struct cfs_waitq *waitq,
+void add_wait_queue(struct cfs_waitq *waitq, struct cfs_waitlink *link);
+void add_wait_queue_exclusive(struct cfs_waitq *waitq,
struct cfs_waitlink *link);
-void cfs_waitq_del(struct cfs_waitq *waitq, struct cfs_waitlink *link);
-int cfs_waitq_active(struct cfs_waitq *waitq);
+void remove_wait_queue(struct cfs_waitq *waitq, struct cfs_waitlink *link);
+int waitqueue_active(struct cfs_waitq *waitq);
-void cfs_waitq_signal(struct cfs_waitq *waitq);
-void cfs_waitq_signal_nr(struct cfs_waitq *waitq, int nr);
-void cfs_waitq_broadcast(struct cfs_waitq *waitq);
+void wake_up(struct cfs_waitq *waitq);
+void wake_up_nr(struct cfs_waitq *waitq, int nr);
+void wake_up_all(struct cfs_waitq *waitq);
-void cfs_waitq_wait(struct cfs_waitlink *link, cfs_task_state_t state);
-cfs_duration_t cfs_waitq_timedwait(struct cfs_waitlink *link,
- cfs_task_state_t state,
+void waitq_wait(struct cfs_waitlink *link, long state);
+cfs_duration_t waitq_timedwait(struct cfs_waitlink *link,
+ long state,
cfs_duration_t timeout);
/*
extern void thread_set_timer_deadline(__u64 deadline);
extern void thread_cancel_timer(void);
-static inline int cfs_schedule_timeout(int state, int64_t timeout)
+static inline int schedule_timeout(int state, int64_t timeout)
{
int result;
return result;
}
-#define cfs_schedule() cfs_schedule_timeout(CFS_TASK_UNINT, CFS_TICK)
-#define cfs_pause(tick) cfs_schedule_timeout(CFS_TASK_UNINT, tick)
+#define schedule() schedule_timeout(TASK_UNINTERRUPTIBLE, CFS_TICK)
+#define cfs_pause(tick) schedule_timeout(TASK_UNINTERRUPTIBLE, tick)
#define __wait_event(wq, condition) \
do { \
struct cfs_waitlink __wait; \
\
- cfs_waitlink_init(&__wait); \
+ init_waitqueue_entry_current(&__wait); \
for (;;) { \
- cfs_waitq_add(&wq, &__wait); \
+ add_wait_queue(&wq, &__wait); \
if (condition) \
break; \
- cfs_waitq_wait(&__wait, CFS_TASK_UNINT); \
- cfs_waitq_del(&wq, &__wait); \
+ waitq_wait(&__wait, TASK_UNINTERRUPTIBLE); \
+ remove_wait_queue(&wq, &__wait); \
} \
- cfs_waitq_del(&wq, &__wait); \
+ remove_wait_queue(&wq, &__wait); \
} while (0)
#define wait_event(wq, condition) \
do { \
struct cfs_waitlink __wait; \
\
- cfs_waitlink_init(&__wait); \
+ init_waitqueue_entry_current(&__wait); \
for (;;) { \
if (ex == 0) \
- cfs_waitq_add(&wq, &__wait); \
+ add_wait_queue(&wq, &__wait); \
else \
- cfs_waitq_add_exclusive(&wq, &__wait); \
+ add_wait_queue_exclusive(&wq, &__wait); \
if (condition) \
break; \
if (!cfs_signal_pending()) { \
- cfs_waitq_wait(&__wait, \
- CFS_TASK_INTERRUPTIBLE); \
- cfs_waitq_del(&wq, &__wait); \
+ waitq_wait(&__wait, \
+ TASK_INTERRUPTIBLE); \
+ remove_wait_queue(&wq, &__wait); \
continue; \
} \
ret = -ERESTARTSYS; \
break; \
} \
- cfs_waitq_del(&wq, &__wait); \
+ remove_wait_queue(&wq, &__wait); \
} while (0)
#define wait_event_interruptible(wq, condition) \
} while (0)
/* used in couple of places */
-static inline void sleep_on(cfs_waitq_t *waitq)
+static inline void sleep_on(wait_queue_head_t *waitq)
{
- cfs_waitlink_t link;
+ wait_queue_t link;
- cfs_waitlink_init(&link);
- cfs_waitq_add(waitq, &link);
- cfs_waitq_wait(&link, CFS_TASK_UNINT);
- cfs_waitq_del(waitq, &link);
+ init_waitqueue_entry_current(&link);
+ add_wait_queue(waitq, &link);
+ waitq_wait(&link, TASK_UNINTERRUPTIBLE);
+ remove_wait_queue(waitq, &link);
}
/*
extern unsigned long cfs_fail_loc;
extern unsigned int cfs_fail_val;
-extern cfs_waitq_t cfs_race_waitq;
+extern wait_queue_head_t cfs_race_waitq;
extern int cfs_race_state;
int __cfs_fail_check_set(__u32 id, __u32 value, int set);
* the first and continues. */
static inline void cfs_race(__u32 id)
{
-
- if (CFS_FAIL_PRECHECK(id)) {
- if (unlikely(__cfs_fail_check_set(id, 0, CFS_FAIL_LOC_NOSET))) {
- int rc;
- cfs_race_state = 0;
- CERROR("cfs_race id %x sleeping\n", id);
+ if (CFS_FAIL_PRECHECK(id)) {
+ if (unlikely(__cfs_fail_check_set(id, 0, CFS_FAIL_LOC_NOSET))) {
+ int rc;
+ cfs_race_state = 0;
+ CERROR("cfs_race id %x sleeping\n", id);
rc = wait_event_interruptible(cfs_race_waitq,
cfs_race_state != 0);
- CERROR("cfs_fail_race id %x awake, rc=%d\n", id, rc);
- } else {
- CERROR("cfs_fail_race id %x waking\n", id);
- cfs_race_state = 1;
- cfs_waitq_signal(&cfs_race_waitq);
- }
- }
+ CERROR("cfs_fail_race id %x awake, rc=%d\n", id, rc);
+ } else {
+ CERROR("cfs_fail_race id %x waking\n", id);
+ cfs_race_state = 1;
+ wake_up(&cfs_race_waitq);
+ }
+ }
}
#define CFS_RACE(id) cfs_race(id)
#else
#define __LIBCFS_PRIM_H__
/*
- * Schedule
- */
-void cfs_schedule_timeout_and_set_state(cfs_task_state_t state,
- int64_t timeout);
-void cfs_schedule_timeout(int64_t timeout);
-void cfs_schedule(void);
-void cfs_pause(cfs_duration_t ticks);
-int cfs_need_resched(void);
-void cfs_cond_resched(void);
-
-/*
* Wait Queues
*/
-void cfs_waitq_init(cfs_waitq_t *waitq);
-void cfs_waitlink_init(cfs_waitlink_t *link);
-void cfs_waitq_add(cfs_waitq_t *waitq, cfs_waitlink_t *link);
-void cfs_waitq_add_exclusive(cfs_waitq_t *waitq,
- cfs_waitlink_t *link);
-void cfs_waitq_add_exclusive_head(cfs_waitq_t *waitq,
- cfs_waitlink_t *link);
-void cfs_waitq_del(cfs_waitq_t *waitq, cfs_waitlink_t *link);
-int cfs_waitq_active(cfs_waitq_t *waitq);
-void cfs_waitq_signal(cfs_waitq_t *waitq);
-void cfs_waitq_signal_nr(cfs_waitq_t *waitq, int nr);
-void cfs_waitq_broadcast(cfs_waitq_t *waitq);
-void cfs_waitq_wait(cfs_waitlink_t *link, cfs_task_state_t state);
-int64_t cfs_waitq_timedwait(cfs_waitlink_t *link, cfs_task_state_t state,
- int64_t timeout);
-
/*
* Timer
*/
/*
* Wait Queue
*/
-#define CFS_TASK_INTERRUPTIBLE TASK_INTERRUPTIBLE
-#define CFS_TASK_UNINT TASK_UNINTERRUPTIBLE
-#define CFS_TASK_RUNNING TASK_RUNNING
-#define cfs_set_current_state(state) set_current_state(state)
-#define cfs_wait_event(wq, cond) wait_event(wq, cond)
-
-typedef wait_queue_t cfs_waitlink_t;
-typedef wait_queue_head_t cfs_waitq_t;
-typedef long cfs_task_state_t;
#define CFS_DECL_WAITQ(wq) DECLARE_WAIT_QUEUE_HEAD(wq)
+#define LIBCFS_WQITQ_MACROS 1
+#define init_waitqueue_entry_current(w) init_waitqueue_entry(w, current)
+#define waitq_wait(w, s) schedule()
+#define waitq_timedwait(w, s, t) schedule_timeout(t)
+
+#ifndef HAVE___ADD_WAIT_QUEUE_EXCLUSIVE
+static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
+ wait_queue_t *wait)
+{
+ wait->flags |= WQ_FLAG_EXCLUSIVE;
+ __add_wait_queue(q, wait);
+}
+#endif /* HAVE___ADD_WAIT_QUEUE_EXCLUSIVE */
+
+/**
+ * wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively
+ * waiting threads, which is not always desirable because all threads will
+ * be waken up again and again, even user only needs a few of them to be
+ * active most time. This is not good for performance because cache can
+ * be polluted by different threads.
+ *
+ * LIFO list can resolve this problem because we always wakeup the most
+ * recent active thread by default.
+ *
+ * NB: please don't call non-exclusive & exclusive wait on the same
+ * waitq if add_wait_queue_exclusive_head is used.
+ */
+#define add_wait_queue_exclusive_head(waitq, link) \
+{ \
+ unsigned long flags; \
+ \
+ spin_lock_irqsave(&((waitq)->lock), flags); \
+ __add_wait_queue_exclusive(waitq, link); \
+ spin_unlock_irqrestore(&((waitq)->lock), flags); \
+}
+
+#define schedule_timeout_and_set_state(state, timeout) \
+{ \
+ set_current_state(state); \
+ schedule_timeout(timeout); \
+}
+
+/* deschedule for a bit... */
+#define cfs_pause(ticks) \
+{ \
+ set_current_state(TASK_UNINTERRUPTIBLE); \
+ schedule_timeout(ticks); \
+}
+
/*
* Task struct
*/
*/
typedef struct timer_list cfs_timer_t;
-#define CFS_MAX_SCHEDULE_TIMEOUT MAX_SCHEDULE_TIMEOUT
-
-#ifndef wait_event_timeout /* Only for RHEL3 2.4.21 kernel */
-#define __wait_event_timeout(wq, condition, timeout, ret) \
-do { \
- int __ret = 0; \
- if (!(condition)) { \
- wait_queue_t __wait; \
- unsigned long expire; \
- \
- init_waitqueue_entry(&__wait, current); \
- expire = timeout + jiffies; \
- add_wait_queue(&wq, &__wait); \
- for (;;) { \
- set_current_state(TASK_UNINTERRUPTIBLE); \
- if (condition) \
- break; \
- if (jiffies > expire) { \
- ret = jiffies - expire; \
- break; \
- } \
- schedule_timeout(timeout); \
- } \
- current->state = TASK_RUNNING; \
- remove_wait_queue(&wq, &__wait); \
- } \
-} while (0)
-/*
- retval == 0; condition met; we're good.
- retval > 0; timed out.
-*/
-#define cfs_waitq_wait_event_timeout(wq, condition, timeout, ret) \
-do { \
- ret = 0; \
- if (!(condition)) \
- __wait_event_timeout(wq, condition, timeout, ret); \
-} while (0)
-#else
-#define cfs_waitq_wait_event_timeout(wq, condition, timeout, ret) \
- ret = wait_event_timeout(wq, condition, timeout)
-#endif
-
-#define cfs_waitq_wait_event_interruptible_timeout(wq, c, timeout, ret) \
- ret = wait_event_interruptible_timeout(wq, c, timeout)
-
/*
* atomic
*/
};
struct upcall_cache_entry {
- cfs_list_t ue_hash;
- __u64 ue_key;
- cfs_atomic_t ue_refcount;
- int ue_flags;
- cfs_waitq_t ue_waitq;
- cfs_time_t ue_acquire_expire;
- cfs_time_t ue_expire;
- union {
- struct md_identity identity;
- } u;
+ cfs_list_t ue_hash;
+ __u64 ue_key;
+ cfs_atomic_t ue_refcount;
+ int ue_flags;
+ wait_queue_head_t ue_waitq;
+ cfs_time_t ue_acquire_expire;
+ cfs_time_t ue_expire;
+ union {
+ struct md_identity identity;
+ } u;
};
#define UC_CACHE_HASH_SIZE (128)
struct completion {
unsigned int done;
- cfs_waitq_t wait;
+ wait_queue_head_t wait;
};
#endif /* HAVE_LIBPTHREAD */
typedef struct cfs_waitlink {
cfs_list_t sleeping;
void *process;
-} cfs_waitlink_t;
+} wait_queue_t;
typedef struct cfs_waitq {
cfs_list_t sleepers;
-} cfs_waitq_t;
-
-#define CFS_DECL_WAITQ(wq) cfs_waitq_t wq
+} wait_queue_head_t;
+
+#define CFS_DECL_WAITQ(wq) wait_queue_head_t wq
+void init_waitqueue_head(struct cfs_waitq *waitq);
+void init_waitqueue_entry_current(struct cfs_waitlink *link);
+void add_wait_queue(struct cfs_waitq *waitq, struct cfs_waitlink *link);
+void add_wait_queue_exclusive(struct cfs_waitq *waitq, struct cfs_waitlink *link);
+void add_wait_queue_exclusive_head(struct cfs_waitq *waitq, struct cfs_waitlink *link);
+void remove_wait_queue(struct cfs_waitq *waitq, struct cfs_waitlink *link);
+int waitqueue_active(struct cfs_waitq *waitq);
+void wake_up(struct cfs_waitq *waitq);
+void wake_up_nr(struct cfs_waitq *waitq, int nr);
+void wake_up_all(struct cfs_waitq *waitq);
+void waitq_wait(struct cfs_waitlink *link, long state);
+int64_t waitq_timedwait(struct cfs_waitlink *link, long state, int64_t timeout);
+void schedule_timeout_and_set_state(long state, int64_t timeout);
+void cfs_pause(cfs_duration_t d);
+int need_resched(void);
+void cond_resched(void);
/*
* Task states
*/
-typedef long cfs_task_state_t;
-
-#define CFS_TASK_INTERRUPTIBLE (0)
-#define CFS_TASK_UNINT (1)
-#define CFS_TASK_RUNNING (2)
+#define TASK_INTERRUPTIBLE (0)
+#define TASK_UNINTERRUPTIBLE (1)
+#define TASK_RUNNING (2)
-static inline void cfs_schedule(void) {}
-static inline void cfs_schedule_timeout(int64_t t) {}
+static inline void schedule(void) {}
+static inline void schedule_timeout(int64_t t) {}
/*
* Lproc
*/
-typedef int cfs_task_state_t;
-
-#define CFS_TASK_INTERRUPTIBLE 0x00000001
-#define CFS_TASK_UNINT 0x00000002
-#define CFS_TASK_RUNNING 0x00000003
-#define CFS_TASK_UNINTERRUPTIBLE CFS_TASK_UNINT
+#define TASK_INTERRUPTIBLE 0x00000001
+#define TASK_UNINTERRUPTIBLE 0x00000002
+#define TASK_RUNNING 0x00000003
+#define CFS_TASK_UNINTERRUPTIBLE TASK_UNINTERRUPTIBLE
#define CFS_WAITQ_MAGIC 'CWQM'
#define CFS_WAITLINK_MAGIC 'CWLM'
spinlock_t guard;
cfs_list_t waiters;
-} cfs_waitq_t;
+} wait_queue_head_t;
-typedef struct cfs_waitlink cfs_waitlink_t;
+typedef struct cfs_waitlink wait_queue_t;
#define CFS_WAITQ_CHANNELS (2)
typedef struct cfs_waitlink_channel {
cfs_list_t link;
- cfs_waitq_t * waitq;
- cfs_waitlink_t * waitl;
+ wait_queue_head_t * waitq;
+ wait_queue_t * waitl;
} cfs_waitlink_channel_t;
struct cfs_waitlink {
CFS_WAITQ_EXCLUSIVE = 1
};
-#define CFS_DECL_WAITQ(name) cfs_waitq_t name
+#define CFS_DECL_WAITQ(name) wait_queue_head_t name
/* Kernel thread */
* Task struct
*/
-#define CFS_MAX_SCHEDULE_TIMEOUT ((long_ptr_t)(~0UL>>12))
-#define cfs_schedule_timeout(t) cfs_schedule_timeout_and_set_state(0, t)
+#define MAX_SCHEDULE_TIMEOUT ((long_ptr_t)(~0UL>>12))
+#define schedule_timeout(t) schedule_timeout_and_set_state(0, t)
struct vfsmount;
#define current cfs_current()
-#define cfs_set_current_state(s) do {;} while (0)
-#define cfs_set_current_state(state) cfs_set_current_state(state)
+#define set_current_state(s) do {;} while (0)
-#define cfs_wait_event(wq, condition) \
+#define wait_event(wq, condition) \
do { \
- cfs_waitlink_t __wait; \
- \
- cfs_waitlink_init(&__wait); \
- while (TRUE) { \
- cfs_waitq_add(&wq, &__wait); \
- if (condition) { \
- break; \
- } \
- cfs_waitq_wait(&__wait, CFS_TASK_INTERRUPTIBLE); \
- cfs_waitq_del(&wq, &__wait); \
- } \
- cfs_waitq_del(&wq, &__wait); \
+ wait_queue_t __wait; \
+ \
+ init_waitqueue_entry_current(&__wait); \
+ while (TRUE) { \
+ add_wait_queue(&wq, &__wait); \
+ if (condition) { \
+ break; \
+ } \
+ waitq_wait(&__wait, TASK_INTERRUPTIBLE); \
+ remove_wait_queue(&wq, &__wait); \
+ } \
+ remove_wait_queue(&wq, &__wait); \
} while(0)
#define wait_event_interruptible(wq, condition) \
{ \
- cfs_waitlink_t __wait; \
+ wait_queue_t __wait; \
\
__ret = 0; \
- cfs_waitlink_init(&__wait); \
+ init_waitqueue_entry_current(&__wait); \
while (TRUE) { \
- cfs_waitq_add(&wq, &__wait); \
+ add_wait_queue(&wq, &__wait); \
if (condition) { \
break; \
} \
- cfs_waitq_wait(&__wait, CFS_TASK_INTERRUPTIBLE);\
- cfs_waitq_del(&wq, &__wait); \
+ waitq_wait(&__wait, TASK_INTERRUPTIBLE);\
+ remove_wait_queue(&wq, &__wait); \
} \
- cfs_waitq_del(&wq, &__wait); \
+ remove_wait_queue(&wq, &__wait); \
__ret; \
}
retval > 0; timed out.
*/
-#define cfs_waitq_wait_event_interruptible_timeout( \
- wq, condition, timeout, rc) \
+#define wait_event_interruptible_timeout(wq, condition, timeout)\
do { \
- cfs_waitlink_t __wait; \
- \
- rc = 0; \
- cfs_waitlink_init(&__wait); \
- while (TRUE) { \
- cfs_waitq_add(&wq, &__wait); \
- if (condition) { \
- break; \
- } \
- if (cfs_waitq_timedwait(&__wait, \
- CFS_TASK_INTERRUPTIBLE, timeout) == 0) { \
- rc = TRUE; \
- break; \
- } \
- cfs_waitq_del(&wq, &__wait); \
- } \
- cfs_waitq_del(&wq, &__wait); \
+ wait_queue_t __wait; \
+ \
+ init_waitqueue_entry_current(&__wait); \
+ while (TRUE) { \
+ add_wait_queue(&wq, &__wait); \
+ if (condition) { \
+ break; \
+ } \
+ if (waitq_timedwait(&__wait, \
+ TASK_INTERRUPTIBLE, timeout) == 0) { \
+ break; \
+ } \
+ remove_wait_queue(&wq, &__wait); \
+ } \
+ remove_wait_queue(&wq, &__wait); \
} while(0)
-
-#define cfs_waitq_wait_event_timeout \
- cfs_waitq_wait_event_interruptible_timeout
-
int init_task_manager();
void cleanup_task_manager();
cfs_task_t * cfs_current();
int wake_up_process(cfs_task_t * task);
-void sleep_on(cfs_waitq_t *waitq);
+void sleep_on(wait_queue_head_t *waitq);
#define cfs_might_sleep() do {} while(0)
#define CFS_DECL_JOURNAL_DATA
#define CFS_PUSH_JOURNAL do {;} while(0)
void lbug_with_loc(struct libcfs_debug_msg_data *msgdata)
{
- libcfs_catastrophe = 1;
- CEMERG("LBUG: pid: %u thread: %#x\n",
+ libcfs_catastrophe = 1;
+ CEMERG("LBUG: pid: %u thread: %#x\n",
(unsigned)current_pid(), (unsigned)current_thread());
- libcfs_debug_dumplog();
- libcfs_run_lbug_upcall(msgdata);
- while (1)
- cfs_schedule();
+ libcfs_debug_dumplog();
+ libcfs_run_lbug_upcall(msgdata);
+ while (1)
+ schedule();
/* panic("lbug_with_loc(%s, %s, %d)", file, func, line) */
}
break; \
} \
spin_unlock(&(pta)->lock); \
- cfs_schedule(); \
+ schedule(); \
} while(1); \
/*
break; \
} \
spin_unlock(&(pta)->lock); \
- cfs_schedule(); \
+ schedule(); \
} while(1)
/*
break; \
} \
spin_unlock(&(pta)->lock); \
- cfs_schedule(); \
+ schedule(); \
} while (1); \
/*
}
#endif /* !__DARWIN8__ */
-void cfs_waitq_init(struct cfs_waitq *waitq)
+void init_waitqueue_head(struct cfs_waitq *waitq)
{
ksleep_chan_init(&waitq->wq_ksleep_chan);
}
-void cfs_waitlink_init(struct cfs_waitlink *link)
+void init_waitqueue_entry_current(struct cfs_waitlink *link)
{
ksleep_link_init(&link->wl_ksleep_link);
}
-void cfs_waitq_add(struct cfs_waitq *waitq, struct cfs_waitlink *link)
+void add_wait_queue(struct cfs_waitq *waitq, struct cfs_waitlink *link)
{
- link->wl_waitq = waitq;
+ link->wl_waitq = waitq;
ksleep_add(&waitq->wq_ksleep_chan, &link->wl_ksleep_link);
}
-void cfs_waitq_add_exclusive(struct cfs_waitq *waitq,
- struct cfs_waitlink *link)
+void add_wait_queue_exclusive(struct cfs_waitq *waitq,
+ struct cfs_waitlink *link)
{
- link->wl_waitq = waitq;
+ link->wl_waitq = waitq;
link->wl_ksleep_link.flags |= KSLEEP_EXCLUSIVE;
ksleep_add(&waitq->wq_ksleep_chan, &link->wl_ksleep_link);
}
-void cfs_waitq_del(struct cfs_waitq *waitq,
+void remove_wait_queue(struct cfs_waitq *waitq,
struct cfs_waitlink *link)
{
ksleep_del(&waitq->wq_ksleep_chan, &link->wl_ksleep_link);
}
-int cfs_waitq_active(struct cfs_waitq *waitq)
+int waitqueue_active(struct cfs_waitq *waitq)
{
return (1);
}
-void cfs_waitq_signal(struct cfs_waitq *waitq)
+void wake_up(struct cfs_waitq *waitq)
{
/*
* XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
ksleep_wake(&waitq->wq_ksleep_chan);
}
-void cfs_waitq_signal_nr(struct cfs_waitq *waitq, int nr)
+void wake_up_nr(struct cfs_waitq *waitq, int nr)
{
ksleep_wake_nr(&waitq->wq_ksleep_chan, nr);
}
-void cfs_waitq_broadcast(struct cfs_waitq *waitq)
+void wake_up_all(struct cfs_waitq *waitq)
{
ksleep_wake_all(&waitq->wq_ksleep_chan);
}
-void cfs_waitq_wait(struct cfs_waitlink *link, cfs_task_state_t state)
+void waitq_wait(struct cfs_waitlink *link, long state)
{
- ksleep_wait(&link->wl_waitq->wq_ksleep_chan, state);
+ ksleep_wait(&link->wl_waitq->wq_ksleep_chan, state);
}
-cfs_duration_t cfs_waitq_timedwait(struct cfs_waitlink *link,
- cfs_task_state_t state,
+cfs_duration_t waitq_timedwait(struct cfs_waitlink *link,
+ long state,
cfs_duration_t timeout)
{
return ksleep_timedwait(&link->wl_waitq->wq_ksleep_chan,
static int proc_fail_loc SYSCTL_HANDLER_ARGS
{
- int error = 0;
- long old_fail_loc = cfs_fail_loc;
-
- error = sysctl_handle_long(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
- if (!error && req->newptr != USER_ADDR_NULL) {
- if (old_fail_loc != cfs_fail_loc)
- cfs_waitq_signal(&cfs_race_waitq);
- } else if (req->newptr != USER_ADDR_NULL) {
- /* Something was wrong with the write request */
- printf ("sysctl fail loc fault: %d.\n", error);
- } else {
- /* Read request */
- error = SYSCTL_OUT(req, &cfs_fail_loc, sizeof cfs_fail_loc);
- }
- return error;
+ int error = 0;
+ long old_fail_loc = cfs_fail_loc;
+
+ error = sysctl_handle_long(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
+ if (!error && req->newptr != USER_ADDR_NULL) {
+ if (old_fail_loc != cfs_fail_loc)
+ wake_up(&cfs_race_waitq);
+ } else if (req->newptr != USER_ADDR_NULL) {
+ /* Something was wrong with the write request */
+ printf ("sysctl fail loc fault: %d.\n", error);
+ } else {
+ /* Read request */
+ error = SYSCTL_OUT(req, &cfs_fail_loc, sizeof cfs_fail_loc);
+ }
+ return error;
}
/*
}
}
-void ksleep_wait(struct ksleep_chan *chan, cfs_task_state_t state)
+void ksleep_wait(struct ksleep_chan *chan, long state)
{
event_t event;
int result;
* implemented), or waitq was already in the "signalled" state).
*/
int64_t ksleep_timedwait(struct ksleep_chan *chan,
- cfs_task_state_t state,
- __u64 timeout)
+ long state,
+ __u64 timeout)
{
event_t event;
cfs_atomic_t libcfs_kmemory = CFS_ATOMIC_INIT(0);
EXPORT_SYMBOL(libcfs_kmemory);
-static cfs_waitq_t debug_ctlwq;
+static wait_queue_head_t debug_ctlwq;
char libcfs_debug_file_path_arr[PATH_MAX] = LIBCFS_DEBUG_FILE_PATH_DEFAULT;
int libcfs_debug_dumplog_thread(void *arg)
{
- libcfs_debug_dumplog_internal(arg);
- cfs_waitq_signal(&debug_ctlwq);
- return 0;
+ libcfs_debug_dumplog_internal(arg);
+ wake_up(&debug_ctlwq);
+ return 0;
}
void libcfs_debug_dumplog(void)
{
- cfs_waitlink_t wait;
- cfs_task_t *dumper;
- ENTRY;
+ wait_queue_t wait;
+ cfs_task_t *dumper;
+ ENTRY;
- /* we're being careful to ensure that the kernel thread is
- * able to set our state to running as it exits before we
- * get to schedule() */
- cfs_waitlink_init(&wait);
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add(&debug_ctlwq, &wait);
+ /* we're being careful to ensure that the kernel thread is
+ * able to set our state to running as it exits before we
+ * get to schedule() */
+ init_waitqueue_entry_current(&wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&debug_ctlwq, &wait);
dumper = kthread_run(libcfs_debug_dumplog_thread,
(void *)(long)current_pid(),
if (IS_ERR(dumper))
printk(KERN_ERR "LustreError: cannot start log dump thread:"
" %ld\n", PTR_ERR(dumper));
- else
- cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE);
+ else
+ waitq_wait(&wait, TASK_INTERRUPTIBLE);
- /* be sure to teardown if cfs_create_thread() failed */
- cfs_waitq_del(&debug_ctlwq, &wait);
- cfs_set_current_state(CFS_TASK_RUNNING);
+ /* be sure to teardown if cfs_create_thread() failed */
+ remove_wait_queue(&debug_ctlwq, &wait);
+ set_current_state(TASK_RUNNING);
}
EXPORT_SYMBOL(libcfs_debug_dumplog);
int libcfs_debug_init(unsigned long bufsize)
{
- int rc = 0;
- unsigned int max = libcfs_debug_mb;
+ int rc = 0;
+ unsigned int max = libcfs_debug_mb;
- cfs_waitq_init(&debug_ctlwq);
+ init_waitqueue_head(&debug_ctlwq);
- if (libcfs_console_max_delay <= 0 || /* not set by user or */
- libcfs_console_min_delay <= 0 || /* set to invalid values */
- libcfs_console_min_delay >= libcfs_console_max_delay) {
- libcfs_console_max_delay = CDEBUG_DEFAULT_MAX_DELAY;
- libcfs_console_min_delay = CDEBUG_DEFAULT_MIN_DELAY;
- }
+ if (libcfs_console_max_delay <= 0 || /* not set by user or */
+ libcfs_console_min_delay <= 0 || /* set to invalid values */
+ libcfs_console_min_delay >= libcfs_console_max_delay) {
+ libcfs_console_max_delay = CDEBUG_DEFAULT_MAX_DELAY;
+ libcfs_console_min_delay = CDEBUG_DEFAULT_MIN_DELAY;
+ }
if (libcfs_debug_file_path != NULL) {
memset(libcfs_debug_file_path_arr, 0, PATH_MAX);
unsigned long cfs_fail_loc = 0;
unsigned int cfs_fail_val = 0;
-cfs_waitq_t cfs_race_waitq;
+wait_queue_head_t cfs_race_waitq;
int cfs_race_state;
EXPORT_SYMBOL(cfs_fail_loc);
int __cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
{
- int ret = 0;
-
- ret = __cfs_fail_check_set(id, value, set);
- if (ret) {
- CERROR("cfs_fail_timeout id %x sleeping for %dms\n",
- id, ms);
- cfs_schedule_timeout_and_set_state(CFS_TASK_UNINT,
- cfs_time_seconds(ms) / 1000);
- cfs_set_current_state(CFS_TASK_RUNNING);
- CERROR("cfs_fail_timeout id %x awake\n", id);
- }
- return ret;
+ int ret = 0;
+
+ ret = __cfs_fail_check_set(id, value, set);
+ if (ret) {
+ CERROR("cfs_fail_timeout id %x sleeping for %dms\n",
+ id, ms);
+ schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE,
+ cfs_time_seconds(ms) / 1000);
+ set_current_state(TASK_RUNNING);
+ CERROR("cfs_fail_timeout id %x awake\n", id);
+ }
+ return ret;
}
EXPORT_SYMBOL(__cfs_fail_timeout_set);
spin_lock(&hs->hs_dep_lock);
while (hs->hs_dep_bits != 0) {
spin_unlock(&hs->hs_dep_lock);
- cfs_cond_resched();
+ cond_resched();
spin_lock(&hs->hs_dep_lock);
}
spin_unlock(&hs->hs_dep_lock);
cfs_hash_exit(hs, hnode);
}
}
- LASSERT(bd.bd_bucket->hsb_count == 0);
- cfs_hash_bd_unlock(hs, &bd, 1);
- cfs_cond_resched();
- }
+ LASSERT(bd.bd_bucket->hsb_count == 0);
+ cfs_hash_bd_unlock(hs, &bd, 1);
+ cond_resched();
+ }
LASSERT(cfs_atomic_read(&hs->hs_count) == 0);
cfs_hash_bd_unlock(hs, &bd, excl);
if (loop < CFS_HASH_LOOP_HOG)
continue;
- loop = 0;
- cfs_hash_unlock(hs, 0);
- cfs_cond_resched();
- cfs_hash_lock(hs, 0);
- }
+ loop = 0;
+ cfs_hash_unlock(hs, 0);
+ cond_resched();
+ cfs_hash_lock(hs, 0);
+ }
out:
cfs_hash_unlock(hs, 0);
cfs_hash_bd_unlock(hs, &bd, 0);
cfs_hash_unlock(hs, 0);
- rc = func(hs, &bd, hnode, data);
- if (stop_on_change)
- cfs_hash_put(hs, hnode);
- cfs_cond_resched();
- count++;
+ rc = func(hs, &bd, hnode, data);
+ if (stop_on_change)
+ cfs_hash_put(hs, hnode);
+ cond_resched();
+ count++;
cfs_hash_lock(hs, 0);
cfs_hash_bd_lock(hs, &bd, 0);
}
for (i = 2; cfs_hash_is_rehashing(hs); i++) {
- cfs_hash_unlock(hs, 1);
- /* raise console warning while waiting too long */
- CDEBUG(IS_PO2(i >> 3) ? D_WARNING : D_INFO,
- "hash %s is still rehashing, rescheded %d\n",
- hs->hs_name, i - 1);
- cfs_cond_resched();
- cfs_hash_lock(hs, 1);
- }
+ cfs_hash_unlock(hs, 1);
+ /* raise console warning while waiting too long */
+ CDEBUG(IS_PO2(i >> 3) ? D_WARNING : D_INFO,
+ "hash %s is still rehashing, rescheded %d\n",
+ hs->hs_name, i - 1);
+ cond_resched();
+ cfs_hash_lock(hs, 1);
+ }
}
EXPORT_SYMBOL(cfs_hash_rehash_cancel_locked);
continue;
}
- count = 0;
- cfs_hash_unlock(hs, 1);
- cfs_cond_resched();
- cfs_hash_lock(hs, 1);
- }
+ count = 0;
+ cfs_hash_unlock(hs, 1);
+ cond_resched();
+ cfs_hash_lock(hs, 1);
+ }
hs->hs_rehash_count++;
rc = set_cpus_allowed_ptr(cfs_current(), cpumask);
set_mems_allowed(*nodemask);
if (rc == 0)
- cfs_schedule(); /* switch to allowed CPU */
+ schedule(); /* switch to allowed CPU */
return rc;
}
#include <asm/kgdb.h>
#endif
-#define LINUX_WAITQ(w) ((wait_queue_t *) w)
-#define LINUX_WAITQ_HEAD(w) ((wait_queue_head_t *) w)
-
-void
-cfs_waitq_init(cfs_waitq_t *waitq)
-{
- init_waitqueue_head(LINUX_WAITQ_HEAD(waitq));
-}
-EXPORT_SYMBOL(cfs_waitq_init);
-
-void
-cfs_waitlink_init(cfs_waitlink_t *link)
-{
- init_waitqueue_entry(LINUX_WAITQ(link), current);
-}
-EXPORT_SYMBOL(cfs_waitlink_init);
-
-void
-cfs_waitq_add(cfs_waitq_t *waitq, cfs_waitlink_t *link)
-{
- add_wait_queue(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
-}
-EXPORT_SYMBOL(cfs_waitq_add);
-
-#ifndef HAVE___ADD_WAIT_QUEUE_EXCLUSIVE
-
-static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
- wait_queue_t *wait)
-{
- wait->flags |= WQ_FLAG_EXCLUSIVE;
- __add_wait_queue(q, wait);
-}
-
-#endif /* HAVE___ADD_WAIT_QUEUE_EXCLUSIVE */
-
-void
-cfs_waitq_add_exclusive(cfs_waitq_t *waitq,
- cfs_waitlink_t *link)
-{
- add_wait_queue_exclusive(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
-}
-EXPORT_SYMBOL(cfs_waitq_add_exclusive);
-
-/**
- * wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively
- * waiting threads, which is not always desirable because all threads will
- * be waken up again and again, even user only needs a few of them to be
- * active most time. This is not good for performance because cache can
- * be polluted by different threads.
- *
- * LIFO list can resolve this problem because we always wakeup the most
- * recent active thread by default.
- *
- * NB: please don't call non-exclusive & exclusive wait on the same
- * waitq if cfs_waitq_add_exclusive_head is used.
- */
-void
-cfs_waitq_add_exclusive_head(cfs_waitq_t *waitq, cfs_waitlink_t *link)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&LINUX_WAITQ_HEAD(waitq)->lock, flags);
- __add_wait_queue_exclusive(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
- spin_unlock_irqrestore(&LINUX_WAITQ_HEAD(waitq)->lock, flags);
-}
-EXPORT_SYMBOL(cfs_waitq_add_exclusive_head);
-
-void
-cfs_waitq_del(cfs_waitq_t *waitq, cfs_waitlink_t *link)
-{
- remove_wait_queue(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
-}
-EXPORT_SYMBOL(cfs_waitq_del);
-
-int
-cfs_waitq_active(cfs_waitq_t *waitq)
-{
- return waitqueue_active(LINUX_WAITQ_HEAD(waitq));
-}
-EXPORT_SYMBOL(cfs_waitq_active);
-
-void
-cfs_waitq_signal(cfs_waitq_t *waitq)
-{
- wake_up(LINUX_WAITQ_HEAD(waitq));
-}
-EXPORT_SYMBOL(cfs_waitq_signal);
-
-void
-cfs_waitq_signal_nr(cfs_waitq_t *waitq, int nr)
-{
- wake_up_nr(LINUX_WAITQ_HEAD(waitq), nr);
-}
-EXPORT_SYMBOL(cfs_waitq_signal_nr);
-
-void
-cfs_waitq_broadcast(cfs_waitq_t *waitq)
-{
- wake_up_all(LINUX_WAITQ_HEAD(waitq));
-}
-EXPORT_SYMBOL(cfs_waitq_broadcast);
-
-void
-cfs_waitq_wait(cfs_waitlink_t *link, cfs_task_state_t state)
-{
- schedule();
-}
-EXPORT_SYMBOL(cfs_waitq_wait);
-
-int64_t
-cfs_waitq_timedwait(cfs_waitlink_t *link, cfs_task_state_t state,
- int64_t timeout)
-{
- return schedule_timeout(timeout);
-}
-EXPORT_SYMBOL(cfs_waitq_timedwait);
-
-void
-cfs_schedule_timeout_and_set_state(cfs_task_state_t state, int64_t timeout)
-{
- set_current_state(state);
- schedule_timeout(timeout);
-}
-EXPORT_SYMBOL(cfs_schedule_timeout_and_set_state);
-
-void
-cfs_schedule_timeout(int64_t timeout)
-{
- schedule_timeout(timeout);
-}
-EXPORT_SYMBOL(cfs_schedule_timeout);
-
-void
-cfs_schedule(void)
-{
- schedule();
-}
-EXPORT_SYMBOL(cfs_schedule);
-
-/* deschedule for a bit... */
-void
-cfs_pause(cfs_duration_t ticks)
-{
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(ticks);
-}
-EXPORT_SYMBOL(cfs_pause);
-
-int cfs_need_resched(void)
-{
- return need_resched();
-}
-EXPORT_SYMBOL(cfs_need_resched);
-
-void cfs_cond_resched(void)
-{
- cond_resched();
-}
-EXPORT_SYMBOL(cfs_cond_resched);
-
void cfs_init_timer(cfs_timer_t *t)
{
init_timer(t);
int LL_PROC_PROTO(proc_fail_loc)
{
- int rc;
- long old_fail_loc = cfs_fail_loc;
+ int rc;
+ long old_fail_loc = cfs_fail_loc;
- rc = ll_proc_dolongvec(table, write, filp, buffer, lenp, ppos);
- if (old_fail_loc != cfs_fail_loc)
- cfs_waitq_signal(&cfs_race_waitq);
- return rc;
+ rc = ll_proc_dolongvec(table, write, filp, buffer, lenp, ppos);
+ if (old_fail_loc != cfs_fail_loc)
+ wake_up(&cfs_race_waitq);
+ return rc;
}
static int __proc_cpt_table(void *data, int write,
if (!cfs_capable(CFS_CAP_SYS_ADMIN))
return (-EPERM);
- if (!enable) {
- LWT_EVENT(0,0,0,0);
- lwt_enabled = 0;
- cfs_mb();
- /* give people some time to stop adding traces */
- cfs_schedule_timeout(10);
- }
+ if (!enable) {
+ LWT_EVENT(0,0,0,0);
+ lwt_enabled = 0;
+ cfs_mb();
+ /* give people some time to stop adding traces */
+ schedule_timeout(10);
+ }
for (i = 0; i < num_online_cpus(); i++) {
p = lwt_cpus[i].lwtc_current_page;
mutex_init(&cfs_trace_thread_mutex);
init_rwsem(&ioctl_list_sem);
CFS_INIT_LIST_HEAD(&ioctl_list);
- cfs_waitq_init(&cfs_race_waitq);
+ init_waitqueue_head(&cfs_race_waitq);
rc = libcfs_debug_init(5 * 1024 * 1024);
if (rc < 0) {
cfs_list_add_tail(&tage->linkage, &tcd->tcd_pages);
tcd->tcd_cur_pages++;
- if (tcd->tcd_cur_pages > 8 && thread_running) {
- struct tracefiled_ctl *tctl = &trace_tctl;
- /*
- * wake up tracefiled to process some pages.
- */
- cfs_waitq_signal(&tctl->tctl_waitq);
- }
- return tage;
+ if (tcd->tcd_cur_pages > 8 && thread_running) {
+ struct tracefiled_ctl *tctl = &trace_tctl;
+ /*
+ * wake up tracefiled to process some pages.
+ */
+ wake_up(&tctl->tctl_waitq);
+ }
+ return tage;
}
return NULL;
}
spin_lock_init(&pc.pc_lock);
complete(&tctl->tctl_start);
- while (1) {
- cfs_waitlink_t __wait;
+ while (1) {
+ wait_queue_t __wait;
pc.pc_want_daemon_pages = 0;
collect_pages(&pc);
break;
}
}
- cfs_waitlink_init(&__wait);
- cfs_waitq_add(&tctl->tctl_waitq, &__wait);
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_timedwait(&__wait, CFS_TASK_INTERRUPTIBLE,
- cfs_time_seconds(1));
- cfs_waitq_del(&tctl->tctl_waitq, &__wait);
+ init_waitqueue_entry_current(&__wait);
+ add_wait_queue(&tctl->tctl_waitq, &__wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ waitq_timedwait(&__wait, TASK_INTERRUPTIBLE,
+ cfs_time_seconds(1));
+ remove_wait_queue(&tctl->tctl_waitq, &__wait);
}
complete(&tctl->tctl_stop);
return 0;
init_completion(&tctl->tctl_start);
init_completion(&tctl->tctl_stop);
- cfs_waitq_init(&tctl->tctl_waitq);
+ init_waitqueue_head(&tctl->tctl_waitq);
cfs_atomic_set(&tctl->tctl_shutdown, 0);
if (IS_ERR(kthread_run(tracefiled, tctl, "ktracefiled"))) {
struct tracefiled_ctl {
struct completion tctl_start;
struct completion tctl_stop;
- cfs_waitq_t tctl_waitq;
+ wait_queue_head_t tctl_waitq;
pid_t tctl_pid;
cfs_atomic_t tctl_shutdown;
};
if (!entry)
return NULL;
- UC_CACHE_SET_NEW(entry);
- CFS_INIT_LIST_HEAD(&entry->ue_hash);
- entry->ue_key = key;
- cfs_atomic_set(&entry->ue_refcount, 0);
- cfs_waitq_init(&entry->ue_waitq);
- if (cache->uc_ops->init_entry)
- cache->uc_ops->init_entry(entry, args);
- return entry;
+ UC_CACHE_SET_NEW(entry);
+ CFS_INIT_LIST_HEAD(&entry->ue_hash);
+ entry->ue_key = key;
+ cfs_atomic_set(&entry->ue_refcount, 0);
+ init_waitqueue_head(&entry->ue_waitq);
+ if (cache->uc_ops->init_entry)
+ cache->uc_ops->init_entry(entry, args);
+ return entry;
}
/* protected by cache lock */
entry->ue_acquire_expire))
return 0;
- UC_CACHE_SET_EXPIRED(entry);
- cfs_waitq_broadcast(&entry->ue_waitq);
- } else if (!UC_CACHE_IS_INVALID(entry)) {
- UC_CACHE_SET_EXPIRED(entry);
- }
+ UC_CACHE_SET_EXPIRED(entry);
+ wake_up_all(&entry->ue_waitq);
+ } else if (!UC_CACHE_IS_INVALID(entry)) {
+ UC_CACHE_SET_EXPIRED(entry);
+ }
cfs_list_del_init(&entry->ue_hash);
if (!cfs_atomic_read(&entry->ue_refcount))
struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *cache,
__u64 key, void *args)
{
- struct upcall_cache_entry *entry = NULL, *new = NULL, *next;
- cfs_list_t *head;
- cfs_waitlink_t wait;
- int rc, found;
- ENTRY;
+ struct upcall_cache_entry *entry = NULL, *new = NULL, *next;
+ cfs_list_t *head;
+ wait_queue_t wait;
+ int rc, found;
+ ENTRY;
LASSERT(cache);
entry->ue_acquire_expire =
cfs_time_shift(cache->uc_acquire_expire);
if (rc < 0) {
- UC_CACHE_CLEAR_ACQUIRING(entry);
- UC_CACHE_SET_INVALID(entry);
- cfs_waitq_broadcast(&entry->ue_waitq);
- if (unlikely(rc == -EREMCHG)) {
- put_entry(cache, entry);
- GOTO(out, entry = ERR_PTR(rc));
- }
+ UC_CACHE_CLEAR_ACQUIRING(entry);
+ UC_CACHE_SET_INVALID(entry);
+ wake_up_all(&entry->ue_waitq);
+ if (unlikely(rc == -EREMCHG)) {
+ put_entry(cache, entry);
+ GOTO(out, entry = ERR_PTR(rc));
+ }
}
}
/* someone (and only one) is doing upcall upon this item,
if (UC_CACHE_IS_ACQUIRING(entry)) {
long expiry = (entry == new) ?
cfs_time_seconds(cache->uc_acquire_expire) :
- CFS_MAX_SCHEDULE_TIMEOUT;
- long left;
+ MAX_SCHEDULE_TIMEOUT;
+ long left;
- cfs_waitlink_init(&wait);
- cfs_waitq_add(&entry->ue_waitq, &wait);
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+ init_waitqueue_entry_current(&wait);
+ add_wait_queue(&entry->ue_waitq, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
spin_unlock(&cache->uc_lock);
- left = cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE,
+ left = waitq_timedwait(&wait, TASK_INTERRUPTIBLE,
expiry);
spin_lock(&cache->uc_lock);
- cfs_waitq_del(&entry->ue_waitq, &wait);
- if (UC_CACHE_IS_ACQUIRING(entry)) {
- /* we're interrupted or upcall failed in the middle */
- rc = left > 0 ? -EINTR : -ETIMEDOUT;
- CERROR("acquire for key "LPU64": error %d\n",
- entry->ue_key, rc);
- put_entry(cache, entry);
- GOTO(out, entry = ERR_PTR(rc));
- }
+ remove_wait_queue(&entry->ue_waitq, &wait);
+ if (UC_CACHE_IS_ACQUIRING(entry)) {
+ /* we're interrupted or upcall failed in the middle */
+ rc = left > 0 ? -EINTR : -ETIMEDOUT;
+ CERROR("acquire for key "LPU64": error %d\n",
+ entry->ue_key, rc);
+ put_entry(cache, entry);
+ GOTO(out, entry = ERR_PTR(rc));
+ }
}
/* invalid means error, don't need to try again */
}
UC_CACHE_CLEAR_ACQUIRING(entry);
spin_unlock(&cache->uc_lock);
- cfs_waitq_broadcast(&entry->ue_waitq);
+ wake_up_all(&entry->ue_waitq);
put_entry(cache, entry);
RETURN(rc);
{
LASSERT(c != NULL);
c->done = 0;
- cfs_waitq_init(&c->wait);
+ init_waitqueue_head(&c->wait);
}
void fini_completion(struct completion *c)
{
LASSERT(c != NULL);
c->done = 1;
- cfs_waitq_signal(&c->wait);
+ wake_up(&c->wait);
}
void wait_for_completion(struct completion *c)
* Wait queue. No-op implementation.
*/
-void cfs_waitq_init(struct cfs_waitq *waitq)
+void init_waitqueue_head(struct cfs_waitq *waitq)
{
- LASSERT(waitq != NULL);
- (void)waitq;
+ LASSERT(waitq != NULL);
+ (void)waitq;
}
-void cfs_waitlink_init(struct cfs_waitlink *link)
+void init_waitqueue_entry_current(struct cfs_waitlink *link)
{
- LASSERT(link != NULL);
- (void)link;
+ LASSERT(link != NULL);
+ (void)link;
}
-void cfs_waitq_add(struct cfs_waitq *waitq, struct cfs_waitlink *link)
+void add_wait_queue(struct cfs_waitq *waitq, struct cfs_waitlink *link)
{
- LASSERT(waitq != NULL);
- LASSERT(link != NULL);
- (void)waitq;
- (void)link;
+ LASSERT(waitq != NULL);
+ LASSERT(link != NULL);
+ (void)waitq;
+ (void)link;
}
-void cfs_waitq_add_exclusive(struct cfs_waitq *waitq, struct cfs_waitlink *link)
+void add_wait_queue_exclusive(struct cfs_waitq *waitq, struct cfs_waitlink *link)
{
- LASSERT(waitq != NULL);
- LASSERT(link != NULL);
- (void)waitq;
- (void)link;
+ LASSERT(waitq != NULL);
+ LASSERT(link != NULL);
+ (void)waitq;
+ (void)link;
}
-void cfs_waitq_add_exclusive_head(struct cfs_waitq *waitq, struct cfs_waitlink *link)
+void add_wait_queue_exclusive_head(struct cfs_waitq *waitq, struct cfs_waitlink *link)
{
- cfs_waitq_add_exclusive(waitq, link);
+ add_wait_queue_exclusive(waitq, link);
}
-void cfs_waitq_del(struct cfs_waitq *waitq, struct cfs_waitlink *link)
+void remove_wait_queue(struct cfs_waitq *waitq, struct cfs_waitlink *link)
{
- LASSERT(waitq != NULL);
- LASSERT(link != NULL);
- (void)waitq;
- (void)link;
+ LASSERT(waitq != NULL);
+ LASSERT(link != NULL);
+ (void)waitq;
+ (void)link;
}
-int cfs_waitq_active(struct cfs_waitq *waitq)
+int waitqueue_active(struct cfs_waitq *waitq)
{
- LASSERT(waitq != NULL);
- (void)waitq;
- return 0;
+ LASSERT(waitq != NULL);
+ (void)waitq;
+ return 0;
}
-void cfs_waitq_signal(struct cfs_waitq *waitq)
+void wake_up(struct cfs_waitq *waitq)
{
- LASSERT(waitq != NULL);
- (void)waitq;
+ LASSERT(waitq != NULL);
+ (void)waitq;
}
-void cfs_waitq_signal_nr(struct cfs_waitq *waitq, int nr)
+void wake_up_nr(struct cfs_waitq *waitq, int nr)
{
- LASSERT(waitq != NULL);
- (void)waitq;
+ LASSERT(waitq != NULL);
+ (void)waitq;
}
-void cfs_waitq_broadcast(struct cfs_waitq *waitq)
+void wake_up_all(struct cfs_waitq *waitq)
{
- LASSERT(waitq != NULL);
- (void)waitq;
+ LASSERT(waitq != NULL);
+ (void)waitq;
}
-void cfs_waitq_wait(struct cfs_waitlink *link, cfs_task_state_t state)
+void waitq_wait(struct cfs_waitlink *link, long state)
{
- LASSERT(link != NULL);
- (void)link;
+ LASSERT(link != NULL);
+ (void)link;
- /* well, wait for something to happen */
+ /* well, wait for something to happen */
call_wait_handler(0);
}
-int64_t cfs_waitq_timedwait(struct cfs_waitlink *link, cfs_task_state_t state,
- int64_t timeout)
+int64_t waitq_timedwait(struct cfs_waitlink *link, long state,
+ int64_t timeout)
{
- LASSERT(link != NULL);
- (void)link;
+ LASSERT(link != NULL);
+ (void)link;
call_wait_handler(timeout);
- return 0;
+ return 0;
}
-void cfs_schedule_timeout_and_set_state(cfs_task_state_t state, int64_t timeout)
+void schedule_timeout_and_set_state(long state, int64_t timeout)
{
- cfs_waitlink_t l;
- /* sleep(timeout) here instead? */
- cfs_waitq_timedwait(&l, state, timeout);
+ wait_queue_t l;
+ /* sleep(timeout) here instead? */
+ waitq_timedwait(&l, state, timeout);
}
void
cfs_pause(cfs_duration_t d)
{
- struct timespec s;
+ struct timespec s;
- cfs_duration_nsec(d, &s);
- nanosleep(&s, NULL);
+ cfs_duration_nsec(d, &s);
+ nanosleep(&s, NULL);
}
-int cfs_need_resched(void)
+int need_resched(void)
{
- return 0;
+ return 0;
}
-void cfs_cond_resched(void)
+void cond_resched(void)
{
}
*/
static struct completion lcw_start_completion;
static struct completion lcw_stop_completion;
-static cfs_waitq_t lcw_event_waitq;
+static wait_queue_head_t lcw_event_waitq;
/*
* Set this and wake lcw_event_waitq to stop the dispatcher.
spin_lock_bh(&lcw_pending_timers_lock);
lcw->lcw_refcount++; /* +1 for pending list */
cfs_list_add(&lcw->lcw_list, &lcw_pending_timers);
- cfs_waitq_signal(&lcw_event_waitq);
+ wake_up(&lcw_event_waitq);
spin_unlock_bh(&lcw_pending_timers_lock);
spin_unlock_bh(&lcw->lcw_lock);
init_completion(&lcw_stop_completion);
init_completion(&lcw_start_completion);
- cfs_waitq_init(&lcw_event_waitq);
+ init_waitqueue_head(&lcw_event_waitq);
CDEBUG(D_INFO, "starting dispatch thread\n");
task = kthread_run(lcw_dispatch_main, NULL, "lc_watchdogd");
CDEBUG(D_INFO, "trying to stop watchdog dispatcher.\n");
set_bit(LCW_FLAG_STOP, &lcw_flags);
- cfs_waitq_signal(&lcw_event_waitq);
+ wake_up(&lcw_event_waitq);
wait_for_completion(&lcw_stop_completion);
void
cfs_pause(cfs_duration_t ticks)
{
- cfs_schedule_timeout_and_set_state(CFS_TASK_UNINTERRUPTIBLE, ticks);
+ schedule_timeout_and_set_state(CFS_TASK_UNINTERRUPTIBLE, ticks);
}
void
-cfs_schedule_timeout_and_set_state(cfs_task_state_t state, int64_t time)
+schedule_timeout_and_set_state(long state, int64_t time)
{
cfs_task_t * task = cfs_current();
PTASK_SLOT slot = NULL;
slot = CONTAINING_RECORD(task, TASK_SLOT, task);
cfs_assert(slot->Magic == TASKSLT_MAGIC);
- if (time == CFS_MAX_SCHEDULE_TIMEOUT) {
+ if (time == MAX_SCHEDULE_TIMEOUT) {
time = 0;
}
}
void
-cfs_schedule()
+schedule()
{
- cfs_schedule_timeout_and_set_state(CFS_TASK_UNINTERRUPTIBLE, 0);
+ schedule_timeout_and_set_state(CFS_TASK_UNINTERRUPTIBLE, 0);
}
int
}
void
-sleep_on(cfs_waitq_t *waitq)
+sleep_on(wait_queue_head_t *waitq)
{
- cfs_waitlink_t link;
+ wait_queue_t link;
- cfs_waitlink_init(&link);
- cfs_waitq_add(waitq, &link);
- cfs_waitq_wait(&link, CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_del(waitq, &link);
+ init_waitqueue_entry_current(&link);
+ add_wait_queue(waitq, &link);
+ waitq_wait(&link, TASK_INTERRUPTIBLE);
+ remove_wait_queue(waitq, &link);
}
EXPORT_SYMBOL(current_uid);
return NT_SUCCESS(status);
}
-int cfs_need_resched(void)
+int need_resched(void)
{
return 0;
}
-void cfs_cond_resched(void)
+void cond_resched(void)
{
}
*/
/*
- * cfs_waitq_init
+ * init_waitqueue_head
* To initialize the wait queue
*
* Arguments:
- * waitq: pointer to the cfs_waitq_t structure
+ * waitq: pointer to the wait_queue_head_t structure
*
* Return Value:
* N/A
* N/A
*/
-void cfs_waitq_init(cfs_waitq_t *waitq)
+void init_waitqueue_head(wait_queue_head_t *waitq)
{
waitq->magic = CFS_WAITQ_MAGIC;
waitq->flags = 0;
}
/*
- * cfs_waitlink_init
+ * init_waitqueue_entry_current
* To initialize the wake link node
*
* Arguments:
- * link: pointer to the cfs_waitlink_t structure
+ * link: pointer to the wait_queue_t structure
*
* Return Value:
* N/A
* N/A
*/
-void cfs_waitlink_init(cfs_waitlink_t *link)
+void init_waitqueue_entry_current(wait_queue_t *link)
{
cfs_task_t * task = cfs_current();
PTASK_SLOT slot = NULL;
slot = CONTAINING_RECORD(task, TASK_SLOT, task);
cfs_assert(slot->Magic == TASKSLT_MAGIC);
- memset(link, 0, sizeof(cfs_waitlink_t));
+ memset(link, 0, sizeof(wait_queue_t));
link->magic = CFS_WAITLINK_MAGIC;
link->flags = 0;
* To finilize the wake link node
*
* Arguments:
- * link: pointer to the cfs_waitlink_t structure
+ * link: pointer to the wait_queue_t structure
*
* Return Value:
* N/A
* N/A
*/
-void cfs_waitlink_fini(cfs_waitlink_t *link)
+void cfs_waitlink_fini(wait_queue_t *link)
{
cfs_task_t * task = cfs_current();
PTASK_SLOT slot = NULL;
* To queue the wait link node to the wait queue
*
* Arguments:
- * waitq: pointer to the cfs_waitq_t structure
- * link: pointer to the cfs_waitlink_t structure
+ * waitq: pointer to the wait_queue_head_t structure
+ * link: pointer to the wait_queue_t structure
* int: queue no (Normal or Forward waitq)
*
* Return Value:
* N/A
*/
-void cfs_waitq_add_internal(cfs_waitq_t *waitq,
- cfs_waitlink_t *link,
+void cfs_waitq_add_internal(wait_queue_head_t *waitq,
+ wait_queue_t *link,
__u32 waitqid )
{
LASSERT(waitq != NULL);
spin_unlock(&(waitq->guard));
}
/*
- * cfs_waitq_add
+ * add_wait_queue
* To queue the wait link node to the wait queue
*
* Arguments:
- * waitq: pointer to the cfs_waitq_t structure
- * link: pointer to the cfs_waitlink_t structure
+ * waitq: pointer to the wait_queue_head_t structure
+ * link: pointer to the wait_queue_t structure
*
* Return Value:
* N/A
* N/A
*/
-void cfs_waitq_add(cfs_waitq_t *waitq,
- cfs_waitlink_t *link)
+void add_wait_queue(wait_queue_head_t *waitq,
+ wait_queue_t *link)
{
cfs_waitq_add_internal(waitq, link, CFS_WAITQ_CHAN_NORMAL);
}
/*
- * cfs_waitq_add_exclusive
+ * add_wait_queue_exclusive
* To set the wait link node to exclusive mode
* and queue it to the wait queue
*
* Arguments:
- * waitq: pointer to the cfs_waitq_t structure
+ * waitq: pointer to the wait_queue_head_t structure
* link: pointer to the cfs_wait_link structure
*
* Return Value:
* N/A
*/
-void cfs_waitq_add_exclusive( cfs_waitq_t *waitq,
- cfs_waitlink_t *link)
+void add_wait_queue_exclusive( wait_queue_head_t *waitq,
+ wait_queue_t *link)
{
LASSERT(waitq != NULL);
LASSERT(link != NULL);
LASSERT(link->magic == CFS_WAITLINK_MAGIC);
link->flags |= CFS_WAITQ_EXCLUSIVE;
- cfs_waitq_add(waitq, link);
+ add_wait_queue(waitq, link);
}
/*
- * cfs_waitq_del
+ * remove_wait_queue
* To remove the wait link node from the waitq
*
* Arguments:
* waitq: pointer to the cfs_ waitq_t structure
- * link: pointer to the cfs_waitlink_t structure
+ * link: pointer to the wait_queue_t structure
*
* Return Value:
* N/A
* N/A
*/
-void cfs_waitq_del( cfs_waitq_t *waitq,
- cfs_waitlink_t *link)
+void remove_wait_queue( wait_queue_head_t *waitq,
+ wait_queue_t *link)
{
int i = 0;
}
/*
- * cfs_waitq_active
+ * waitqueue_active
* Is the waitq active (not empty) ?
*
* Arguments:
* We always returns TRUE here, the same to Darwin.
*/
-int cfs_waitq_active(cfs_waitq_t *waitq)
+int waitqueue_active(wait_queue_head_t *waitq)
{
LASSERT(waitq != NULL);
LASSERT(waitq->magic == CFS_WAITQ_MAGIC);
}
/*
- * cfs_waitq_signal_nr
+ * wake_up_nr
* To wake up all the non-exclusive tasks plus nr exclusive
* ones in the waitq
*
* Arguments:
- * waitq: pointer to the cfs_waitq_t structure
+ * waitq: pointer to the wait_queue_head_t structure
* nr: number of exclusive tasks to be woken up
*
* Return Value:
*/
-void cfs_waitq_signal_nr(cfs_waitq_t *waitq, int nr)
+void wake_up_nr(wait_queue_head_t *waitq, int nr)
{
int result;
cfs_waitlink_channel_t * scan;
cfs_waitlink_channel_t,
link) {
- cfs_waitlink_t *waitl = scan->waitl;
+ wait_queue_t *waitl = scan->waitl;
result = cfs_wake_event(waitl->event);
LASSERT( result == FALSE || result == TRUE );
}
/*
- * cfs_waitq_signal
+ * wake_up
* To wake up all the non-exclusive tasks and 1 exclusive
*
* Arguments:
- * waitq: pointer to the cfs_waitq_t structure
+ * waitq: pointer to the wait_queue_head_t structure
*
* Return Value:
* N/A
* N/A
*/
-void cfs_waitq_signal(cfs_waitq_t *waitq)
+void wake_up(wait_queue_head_t *waitq)
{
- cfs_waitq_signal_nr(waitq, 1);
+ wake_up_nr(waitq, 1);
}
/*
- * cfs_waitq_broadcast
+ * wake_up_all
* To wake up all the tasks in the waitq
*
* Arguments:
- * waitq: pointer to the cfs_waitq_t structure
+ * waitq: pointer to the wait_queue_head_t structure
*
* Return Value:
* N/A
* N/A
*/
-void cfs_waitq_broadcast(cfs_waitq_t *waitq)
+void wake_up_all(wait_queue_head_t *waitq)
{
LASSERT(waitq != NULL);
LASSERT(waitq->magic ==CFS_WAITQ_MAGIC);
- cfs_waitq_signal_nr(waitq, 0);
+ wake_up_nr(waitq, 0);
}
/*
- * cfs_waitq_wait
+ * waitq_wait
* To wait on the link node until it is signaled.
*
* Arguments:
- * link: pointer to the cfs_waitlink_t structure
+ * link: pointer to the wait_queue_t structure
*
* Return Value:
* N/A
* N/A
*/
-void cfs_waitq_wait(cfs_waitlink_t *link, cfs_task_state_t state)
+void waitq_wait(wait_queue_t *link, long state)
{
LASSERT(link != NULL);
LASSERT(link->magic == CFS_WAITLINK_MAGIC);
}
/*
- * cfs_waitq_timedwait
+ * waitq_timedwait
* To wait the link node to be signaled with a timeout limit
*
* Arguments:
- * link: pointer to the cfs_waitlink_t structure
+ * link: pointer to the wait_queue_t structure
* timeout: the timeout limitation
*
* Return Value:
* What if it happens to be woken up at the just timeout time !?
*/
-int64_t cfs_waitq_timedwait( cfs_waitlink_t *link,
- cfs_task_state_t state,
+int64_t waitq_timedwait( wait_queue_t *link,
+ long state,
int64_t timeout)
{
/** serialised workitems */
spinlock_t ws_lock;
/** where schedulers sleep */
- cfs_waitq_t ws_waitq;
+ wait_queue_head_t ws_waitq;
#endif
/** concurrent workitems */
cfs_list_t ws_runq;
LASSERT(!cfs_in_interrupt()); /* because we use plain spinlock */
LASSERT(!sched->ws_stopping);
- cfs_wi_sched_lock(sched);
+ cfs_wi_sched_lock(sched);
- if (!wi->wi_scheduled) {
- LASSERT (cfs_list_empty(&wi->wi_list));
+ if (!wi->wi_scheduled) {
+ LASSERT (cfs_list_empty(&wi->wi_list));
- wi->wi_scheduled = 1;
+ wi->wi_scheduled = 1;
sched->ws_nscheduled++;
- if (!wi->wi_running) {
- cfs_list_add_tail(&wi->wi_list, &sched->ws_runq);
+ if (!wi->wi_running) {
+ cfs_list_add_tail(&wi->wi_list, &sched->ws_runq);
#ifdef __KERNEL__
- cfs_waitq_signal(&sched->ws_waitq);
+ wake_up(&sched->ws_waitq);
#endif
- } else {
- cfs_list_add(&wi->wi_list, &sched->ws_rerunq);
- }
- }
+ } else {
+ cfs_list_add(&wi->wi_list, &sched->ws_rerunq);
+ }
+ }
- LASSERT (!cfs_list_empty(&wi->wi_list));
- cfs_wi_sched_unlock(sched);
- return;
+ LASSERT (!cfs_list_empty(&wi->wi_list));
+ cfs_wi_sched_unlock(sched);
+ return;
}
EXPORT_SYMBOL(cfs_wi_schedule);
cfs_list_move_tail(&wi->wi_list, &sched->ws_runq);
}
- if (!cfs_list_empty(&sched->ws_runq)) {
- cfs_wi_sched_unlock(sched);
- /* don't sleep because some workitems still
- * expect me to come back soon */
- cfs_cond_resched();
- cfs_wi_sched_lock(sched);
- continue;
- }
+ if (!cfs_list_empty(&sched->ws_runq)) {
+ cfs_wi_sched_unlock(sched);
+ /* don't sleep because some workitems still
+ * expect me to come back soon */
+ cond_resched();
+ cfs_wi_sched_lock(sched);
+ continue;
+ }
cfs_wi_sched_unlock(sched);
rc = wait_event_interruptible_exclusive(sched->ws_waitq,
spin_unlock(&cfs_wi_data.wi_glock);
#ifdef __KERNEL__
- cfs_waitq_broadcast(&sched->ws_waitq);
+ wake_up_all(&sched->ws_waitq);
spin_lock(&cfs_wi_data.wi_glock);
{
#ifdef __KERNEL__
spin_lock_init(&sched->ws_lock);
- cfs_waitq_init(&sched->ws_waitq);
+ init_waitqueue_head(&sched->ws_waitq);
#endif
CFS_INIT_LIST_HEAD(&sched->ws_runq);
CFS_INIT_LIST_HEAD(&sched->ws_rerunq);
spin_lock(&cfs_wi_data.wi_glock);
while (sched->ws_starting > 0) {
spin_unlock(&cfs_wi_data.wi_glock);
- cfs_schedule();
+ schedule();
spin_lock(&cfs_wi_data.wi_glock);
}
/* nobody should contend on this list */
cfs_list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) {
sched->ws_stopping = 1;
- cfs_waitq_broadcast(&sched->ws_waitq);
+ wake_up_all(&sched->ws_waitq);
}
cfs_list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) {
/* Event Queue container */
struct lnet_res_container ln_eq_container;
#ifdef __KERNEL__
- cfs_waitq_t ln_eq_waitq;
+ wait_queue_head_t ln_eq_waitq;
spinlock_t ln_eq_wait_lock;
#else
# ifndef HAVE_LIBPTHREAD
void
mxlnd_sleep(unsigned long timeout)
{
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_schedule_timeout(timeout);
- return;
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(timeout);
+ return;
}
/**
goto failed_2;
}
- if (dev->ibd_hdev->ibh_ibdev != cmid->device) {
- /* wakeup failover thread and teardown connection */
- if (kiblnd_dev_can_failover(dev)) {
- cfs_list_add_tail(&dev->ibd_fail_list,
- &kiblnd_data.kib_failed_devs);
- cfs_waitq_signal(&kiblnd_data.kib_failover_waitq);
- }
+ if (dev->ibd_hdev->ibh_ibdev != cmid->device) {
+ /* wakeup failover thread and teardown connection */
+ if (kiblnd_dev_can_failover(dev)) {
+ cfs_list_add_tail(&dev->ibd_fail_list,
+ &kiblnd_data.kib_failed_devs);
+ wake_up(&kiblnd_data.kib_failover_waitq);
+ }
write_unlock_irqrestore(glock, flags);
- CERROR("cmid HCA(%s), kib_dev(%s) need failover\n",
- cmid->device->name, dev->ibd_ifname);
- goto failed_2;
- }
+ CERROR("cmid HCA(%s), kib_dev(%s) need failover\n",
+ cmid->device->name, dev->ibd_ifname);
+ goto failed_2;
+ }
kiblnd_hdev_addref_locked(dev->ibd_hdev);
conn->ibc_hdev = dev->ibd_hdev;
if (i++ % 50 == 0)
CDEBUG(D_NET, "%s: Wait for failover\n",
dev->ibd_ifname);
- cfs_schedule_timeout(cfs_time_seconds(1) / 100);
+ schedule_timeout(cfs_time_seconds(1) / 100);
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
}
spin_unlock(&fps->fps_lock);
CDEBUG(D_NET, "Another thread is allocating new "
"FMR pool, waiting for her to complete\n");
- cfs_schedule();
+ schedule();
goto again;
}
CDEBUG(D_NET, "Another thread is allocating new "
"%s pool, waiting for her to complete\n",
ps->ps_name);
- cfs_schedule();
+ schedule();
goto again;
}
LASSERT (cfs_list_empty(&kiblnd_data.kib_connd_zombies));
LASSERT (cfs_list_empty(&kiblnd_data.kib_connd_conns));
- /* flag threads to terminate; wake and wait for them to die */
- kiblnd_data.kib_shutdown = 1;
+ /* flag threads to terminate; wake and wait for them to die */
+ kiblnd_data.kib_shutdown = 1;
/* NB: we really want to stop scheduler threads net by net
* instead of the whole module, this should be improved
* with dynamic configuration LNet */
cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds)
- cfs_waitq_broadcast(&sched->ibs_waitq);
+ wake_up_all(&sched->ibs_waitq);
- cfs_waitq_broadcast(&kiblnd_data.kib_connd_waitq);
- cfs_waitq_broadcast(&kiblnd_data.kib_failover_waitq);
+ wake_up_all(&kiblnd_data.kib_connd_waitq);
+ wake_up_all(&kiblnd_data.kib_failover_waitq);
- i = 2;
- while (cfs_atomic_read(&kiblnd_data.kib_nthreads) != 0) {
+ i = 2;
+ while (cfs_atomic_read(&kiblnd_data.kib_nthreads) != 0) {
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
"Waiting for %d threads to terminate\n",
CFS_INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]);
spin_lock_init(&kiblnd_data.kib_connd_lock);
- CFS_INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
- CFS_INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies);
- cfs_waitq_init(&kiblnd_data.kib_connd_waitq);
- cfs_waitq_init(&kiblnd_data.kib_failover_waitq);
+ CFS_INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
+ CFS_INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies);
+ init_waitqueue_head(&kiblnd_data.kib_connd_waitq);
+ init_waitqueue_head(&kiblnd_data.kib_failover_waitq);
kiblnd_data.kib_scheds = cfs_percpt_alloc(lnet_cpt_table(),
sizeof(*sched));
spin_lock_init(&sched->ibs_lock);
CFS_INIT_LIST_HEAD(&sched->ibs_conns);
- cfs_waitq_init(&sched->ibs_waitq);
+ init_waitqueue_head(&sched->ibs_waitq);
nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
if (*kiblnd_tunables.kib_nscheds > 0) {
/* serialise */
spinlock_t ibs_lock;
/* schedulers sleep here */
- cfs_waitq_t ibs_waitq;
+ wait_queue_head_t ibs_waitq;
/* conns to check for rx completions */
cfs_list_t ibs_conns;
/* number of scheduler threads */
/* list head of failed devices */
cfs_list_t kib_failed_devs;
/* schedulers sleep here */
- cfs_waitq_t kib_failover_waitq;
+ wait_queue_head_t kib_failover_waitq;
cfs_atomic_t kib_nthreads; /* # live threads */
/* stabilize net/dev/peer/conn ops */
rwlock_t kib_global_lock;
/* connections with zero refcount */
cfs_list_t kib_connd_zombies;
/* connection daemon sleeps here */
- cfs_waitq_t kib_connd_waitq;
+ wait_queue_head_t kib_connd_waitq;
spinlock_t kib_connd_lock; /* serialise */
struct ib_qp_attr kib_error_qpa; /* QP->ERROR */
/* percpt data for schedulers */
spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); \
cfs_list_add_tail(&(conn)->ibc_list, \
&kiblnd_data.kib_connd_zombies); \
- cfs_waitq_signal(&kiblnd_data.kib_connd_waitq); \
+ wake_up(&kiblnd_data.kib_connd_waitq); \
spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);\
} \
} while (0)
kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING);
- if (error != 0 &&
- kiblnd_dev_can_failover(dev)) {
- cfs_list_add_tail(&dev->ibd_fail_list,
- &kiblnd_data.kib_failed_devs);
- cfs_waitq_signal(&kiblnd_data.kib_failover_waitq);
- }
+ if (error != 0 &&
+ kiblnd_dev_can_failover(dev)) {
+ cfs_list_add_tail(&dev->ibd_fail_list,
+ &kiblnd_data.kib_failed_devs);
+ wake_up(&kiblnd_data.kib_failover_waitq);
+ }
spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
cfs_list_add_tail(&conn->ibc_list, &kiblnd_data.kib_connd_conns);
- cfs_waitq_signal(&kiblnd_data.kib_connd_waitq);
+ wake_up(&kiblnd_data.kib_connd_waitq);
spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
}
int
kiblnd_connd (void *arg)
{
- cfs_waitlink_t wait;
- unsigned long flags;
- kib_conn_t *conn;
- int timeout;
- int i;
- int dropped_lock;
- int peer_index = 0;
- unsigned long deadline = jiffies;
+ wait_queue_t wait;
+ unsigned long flags;
+ kib_conn_t *conn;
+ int timeout;
+ int i;
+ int dropped_lock;
+ int peer_index = 0;
+ unsigned long deadline = jiffies;
- cfs_block_allsigs ();
+ cfs_block_allsigs ();
- cfs_waitlink_init (&wait);
- kiblnd_data.kib_connd = current;
+ init_waitqueue_entry_current (&wait);
+ kiblnd_data.kib_connd = current;
spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
continue;
/* Nothing to do for 'timeout' */
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add(&kiblnd_data.kib_connd_waitq, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
- cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE, timeout);
+ waitq_timedwait(&wait, TASK_INTERRUPTIBLE, timeout);
- cfs_set_current_state(CFS_TASK_RUNNING);
- cfs_waitq_del(&kiblnd_data.kib_connd_waitq, &wait);
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
}
conn->ibc_scheduled = 1;
cfs_list_add_tail(&conn->ibc_sched_list, &sched->ibs_conns);
- if (cfs_waitq_active(&sched->ibs_waitq))
- cfs_waitq_signal(&sched->ibs_waitq);
+ if (waitqueue_active(&sched->ibs_waitq))
+ wake_up(&sched->ibs_waitq);
}
spin_unlock_irqrestore(&sched->ibs_lock, flags);
long id = (long)arg;
struct kib_sched_info *sched;
kib_conn_t *conn;
- cfs_waitlink_t wait;
+ wait_queue_t wait;
unsigned long flags;
struct ib_wc wc;
int did_something;
cfs_block_allsigs();
- cfs_waitlink_init(&wait);
+ init_waitqueue_entry_current(&wait);
sched = kiblnd_data.kib_scheds[KIB_THREAD_CPT(id)];
if (busy_loops++ >= IBLND_RESCHED) {
spin_unlock_irqrestore(&sched->ibs_lock, flags);
- cfs_cond_resched();
+ cond_resched();
busy_loops = 0;
spin_lock_irqsave(&sched->ibs_lock, flags);
kiblnd_conn_addref(conn);
cfs_list_add_tail(&conn->ibc_sched_list,
&sched->ibs_conns);
- if (cfs_waitq_active(&sched->ibs_waitq))
- cfs_waitq_signal(&sched->ibs_waitq);
+ if (waitqueue_active(&sched->ibs_waitq))
+ wake_up(&sched->ibs_waitq);
} else {
conn->ibc_scheduled = 0;
}
if (did_something)
continue;
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add_exclusive(&sched->ibs_waitq, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue_exclusive(&sched->ibs_waitq, &wait);
spin_unlock_irqrestore(&sched->ibs_lock, flags);
- cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE);
+ waitq_wait(&wait, TASK_INTERRUPTIBLE);
busy_loops = 0;
- cfs_waitq_del(&sched->ibs_waitq, &wait);
- cfs_set_current_state(CFS_TASK_RUNNING);
+ remove_wait_queue(&sched->ibs_waitq, &wait);
+ set_current_state(TASK_RUNNING);
spin_lock_irqsave(&sched->ibs_lock, flags);
}
kiblnd_failover_thread(void *arg)
{
rwlock_t *glock = &kiblnd_data.kib_global_lock;
- kib_dev_t *dev;
- cfs_waitlink_t wait;
- unsigned long flags;
- int rc;
+ kib_dev_t *dev;
+ wait_queue_t wait;
+ unsigned long flags;
+ int rc;
- LASSERT (*kiblnd_tunables.kib_dev_failover != 0);
+ LASSERT (*kiblnd_tunables.kib_dev_failover != 0);
- cfs_block_allsigs ();
+ cfs_block_allsigs ();
- cfs_waitlink_init(&wait);
+ init_waitqueue_entry_current(&wait);
write_lock_irqsave(glock, flags);
while (!kiblnd_data.kib_shutdown) {
/* long sleep if no more pending failover */
long_sleep = cfs_list_empty(&kiblnd_data.kib_failed_devs);
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add(&kiblnd_data.kib_failover_waitq, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
write_unlock_irqrestore(glock, flags);
rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) :
cfs_time_seconds(1));
- cfs_set_current_state(CFS_TASK_RUNNING);
- cfs_waitq_del(&kiblnd_data.kib_failover_waitq, &wait);
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
write_lock_irqsave(glock, flags);
if (!long_sleep || rc != 0)
kptllnd_data.kptl_shutdown = 2;
cfs_mb();
- i = 2;
- while (cfs_atomic_read (&kptllnd_data.kptl_nthreads) != 0) {
- /* Wake up all threads*/
- cfs_waitq_broadcast(&kptllnd_data.kptl_sched_waitq);
- cfs_waitq_broadcast(&kptllnd_data.kptl_watchdog_waitq);
-
- i++;
- CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
- "Waiting for %d threads to terminate\n",
- cfs_atomic_read(&kptllnd_data.kptl_nthreads));
- cfs_pause(cfs_time_seconds(1));
- }
+ i = 2;
+ while (cfs_atomic_read (&kptllnd_data.kptl_nthreads) != 0) {
+ /* Wake up all threads*/
+ wake_up_all(&kptllnd_data.kptl_sched_waitq);
+ wake_up_all(&kptllnd_data.kptl_watchdog_waitq);
+
+ i++;
+ CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
+ "Waiting for %d threads to terminate\n",
+ cfs_atomic_read(&kptllnd_data.kptl_nthreads));
+ cfs_pause(cfs_time_seconds(1));
+ }
CDEBUG(D_NET, "All Threads stopped\n");
LASSERT(cfs_list_empty(&kptllnd_data.kptl_sched_txq));
rwlock_init(&kptllnd_data.kptl_net_rw_lock);
CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_nets);
- /* Setup the sched locks/lists/waitq */
+ /* Setup the sched locks/lists/waitq */
spin_lock_init(&kptllnd_data.kptl_sched_lock);
- cfs_waitq_init(&kptllnd_data.kptl_sched_waitq);
- CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_txq);
- CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_rxq);
- CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_rxbq);
+ init_waitqueue_head(&kptllnd_data.kptl_sched_waitq);
+ CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_txq);
+ CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_rxq);
+ CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_rxbq);
/* Init kptl_ptlid2str_lock before any call to kptllnd_ptlid2str */
spin_lock_init(&kptllnd_data.kptl_ptlid2str_lock);
kptllnd_data.kptl_nak_msg->ptlm_srcstamp = kptllnd_data.kptl_incarnation;
rwlock_init(&kptllnd_data.kptl_peer_rw_lock);
- cfs_waitq_init(&kptllnd_data.kptl_watchdog_waitq);
- CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_closing_peers);
- CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_zombie_peers);
+ init_waitqueue_head(&kptllnd_data.kptl_watchdog_waitq);
+ CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_closing_peers);
+ CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_zombie_peers);
/* Allocate and setup the peer hash table */
kptllnd_data.kptl_peer_hash_size =
cfs_list_t kptl_nets; /* kptl_net instance*/
spinlock_t kptl_sched_lock; /* serialise... */
- cfs_waitq_t kptl_sched_waitq; /* schedulers sleep here */
- cfs_list_t kptl_sched_txq; /* tx requiring attention */
- cfs_list_t kptl_sched_rxq; /* rx requiring attention */
- cfs_list_t kptl_sched_rxbq; /* rxb requiring reposting */
+ wait_queue_head_t kptl_sched_waitq; /* schedulers sleep here */
+ cfs_list_t kptl_sched_txq; /* tx requiring attention */
+ cfs_list_t kptl_sched_rxq; /* rx requiring attention */
+ cfs_list_t kptl_sched_rxbq; /* rxb requiring reposting */
- cfs_waitq_t kptl_watchdog_waitq; /* watchdog sleeps here */
+ wait_queue_head_t kptl_watchdog_waitq; /* watchdog sleeps here */
- kptl_rx_buffer_pool_t kptl_rx_buffer_pool; /* rx buffer pool */
+ kptl_rx_buffer_pool_t kptl_rx_buffer_pool; /* rx buffer pool */
struct kmem_cache *kptl_rx_cache; /* rx descripter cache */
cfs_atomic_t kptl_ntx; /* # tx descs allocated */
cfs_list_add_tail(&rxb->rxb_repost_list,
&kptllnd_data.kptl_sched_rxbq);
- cfs_waitq_signal(&kptllnd_data.kptl_sched_waitq);
+ wake_up(&kptllnd_data.kptl_sched_waitq);
spin_unlock(&kptllnd_data.kptl_sched_lock);
}
int
kptllnd_watchdog(void *arg)
{
- int id = (long)arg;
- cfs_waitlink_t waitlink;
- int stamp = 0;
- int peer_index = 0;
- unsigned long deadline = jiffies;
- int timeout;
- int i;
+ int id = (long)arg;
+ wait_queue_t waitlink;
+ int stamp = 0;
+ int peer_index = 0;
+ unsigned long deadline = jiffies;
+ int timeout;
+ int i;
- cfs_block_allsigs();
+ cfs_block_allsigs();
- cfs_waitlink_init(&waitlink);
+ init_waitqueue_entry_current(&waitlink);
/* threads shut down in phase 2 after all peers have been destroyed */
while (kptllnd_data.kptl_shutdown < 2) {
kptllnd_handle_closing_peers();
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add_exclusive(&kptllnd_data.kptl_watchdog_waitq,
- &waitlink);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue_exclusive(&kptllnd_data.kptl_watchdog_waitq,
+ &waitlink);
- cfs_waitq_timedwait(&waitlink, CFS_TASK_INTERRUPTIBLE, timeout);
+ waitq_timedwait(&waitlink, TASK_INTERRUPTIBLE, timeout);
- cfs_set_current_state (CFS_TASK_RUNNING);
- cfs_waitq_del(&kptllnd_data.kptl_watchdog_waitq, &waitlink);
- }
+ set_current_state (TASK_RUNNING);
+ remove_wait_queue(&kptllnd_data.kptl_watchdog_waitq, &waitlink);
+ }
- kptllnd_thread_fini();
- CDEBUG(D_NET, "<<<\n");
- return (0);
+ kptllnd_thread_fini();
+ CDEBUG(D_NET, "<<<\n");
+ return (0);
};
int
kptllnd_scheduler (void *arg)
{
- int id = (long)arg;
- cfs_waitlink_t waitlink;
- unsigned long flags;
- int did_something;
- int counter = 0;
- kptl_rx_t *rx;
- kptl_rx_buffer_t *rxb;
- kptl_tx_t *tx;
+ int id = (long)arg;
+ wait_queue_t waitlink;
+ unsigned long flags;
+ int did_something;
+ int counter = 0;
+ kptl_rx_t *rx;
+ kptl_rx_buffer_t *rxb;
+ kptl_tx_t *tx;
- cfs_block_allsigs();
+ cfs_block_allsigs();
- cfs_waitlink_init(&waitlink);
+ init_waitqueue_entry_current(&waitlink);
spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
continue;
}
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add_exclusive(&kptllnd_data.kptl_sched_waitq,
- &waitlink);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue_exclusive(&kptllnd_data.kptl_sched_waitq,
+ &waitlink);
spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock,
- flags);
+ flags);
- if (!did_something)
- cfs_waitq_wait(&waitlink, CFS_TASK_INTERRUPTIBLE);
- else
- cfs_cond_resched();
+ if (!did_something)
+ waitq_wait(&waitlink, TASK_INTERRUPTIBLE);
+ else
+ cond_resched();
- cfs_set_current_state(CFS_TASK_RUNNING);
- cfs_waitq_del(&kptllnd_data.kptl_sched_waitq, &waitlink);
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&kptllnd_data.kptl_sched_waitq, &waitlink);
spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
- counter = 0;
- }
+ counter = 0;
+ }
spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock, flags);
void
kptllnd_peer_close_locked(kptl_peer_t *peer, int why)
{
- switch (peer->peer_state) {
- default:
- LBUG();
-
- case PEER_STATE_WAITING_HELLO:
- case PEER_STATE_ACTIVE:
- /* Ensure new peers see a new incarnation of me */
- LASSERT(peer->peer_myincarnation <= kptllnd_data.kptl_incarnation);
- if (peer->peer_myincarnation == kptllnd_data.kptl_incarnation)
- kptllnd_data.kptl_incarnation++;
-
- /* Removing from peer table */
- kptllnd_data.kptl_n_active_peers--;
- LASSERT (kptllnd_data.kptl_n_active_peers >= 0);
-
- cfs_list_del(&peer->peer_list);
- kptllnd_peer_unreserve_buffers();
-
- peer->peer_error = why; /* stash 'why' only on first close */
- peer->peer_state = PEER_STATE_CLOSING;
-
- /* Schedule for immediate attention, taking peer table's ref */
- cfs_list_add_tail(&peer->peer_list,
- &kptllnd_data.kptl_closing_peers);
- cfs_waitq_signal(&kptllnd_data.kptl_watchdog_waitq);
- break;
-
- case PEER_STATE_ZOMBIE:
- case PEER_STATE_CLOSING:
- break;
- }
+ switch (peer->peer_state) {
+ default:
+ LBUG();
+
+ case PEER_STATE_WAITING_HELLO:
+ case PEER_STATE_ACTIVE:
+ /* Ensure new peers see a new incarnation of me */
+ LASSERT(peer->peer_myincarnation <= kptllnd_data.kptl_incarnation);
+ if (peer->peer_myincarnation == kptllnd_data.kptl_incarnation)
+ kptllnd_data.kptl_incarnation++;
+
+ /* Removing from peer table */
+ kptllnd_data.kptl_n_active_peers--;
+ LASSERT (kptllnd_data.kptl_n_active_peers >= 0);
+
+ cfs_list_del(&peer->peer_list);
+ kptllnd_peer_unreserve_buffers();
+
+ peer->peer_error = why; /* stash 'why' only on first close */
+ peer->peer_state = PEER_STATE_CLOSING;
+
+ /* Schedule for immediate attention, taking peer table's ref */
+ cfs_list_add_tail(&peer->peer_list,
+ &kptllnd_data.kptl_closing_peers);
+ wake_up(&kptllnd_data.kptl_watchdog_waitq);
+ break;
+
+ case PEER_STATE_ZOMBIE:
+ case PEER_STATE_CLOSING:
+ break;
+ }
}
void
rx->rx_treceived = jiffies;
/* Queue for attention */
spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
- flags);
+ flags);
- cfs_list_add_tail(&rx->rx_list,
- &kptllnd_data.kptl_sched_rxq);
- cfs_waitq_signal(&kptllnd_data.kptl_sched_waitq);
+ cfs_list_add_tail(&rx->rx_list,
+ &kptllnd_data.kptl_sched_rxq);
+ wake_up(&kptllnd_data.kptl_sched_waitq);
spin_unlock_irqrestore(&kptllnd_data. \
- kptl_sched_lock, flags);
+ kptl_sched_lock, flags);
}
}
spin_unlock_irqrestore(&peer->peer_lock, flags);
- /* drop peer's ref, but if it was the last one... */
- if (cfs_atomic_dec_and_test(&tx->tx_refcount)) {
- /* ...finalize it in thread context! */
+ /* drop peer's ref, but if it was the last one... */
+ if (cfs_atomic_dec_and_test(&tx->tx_refcount)) {
+ /* ...finalize it in thread context! */
spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
- cfs_list_add_tail(&tx->tx_list, &kptllnd_data.kptl_sched_txq);
- cfs_waitq_signal(&kptllnd_data.kptl_sched_waitq);
+ cfs_list_add_tail(&tx->tx_list, &kptllnd_data.kptl_sched_txq);
+ wake_up(&kptllnd_data.kptl_sched_waitq);
spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock,
- flags);
- }
+ flags);
+ }
}
/**********************************************************************/
/* flag threads to terminate, wake them and wait for them to die */
kqswnal_data.kqn_shuttingdown = 2;
- cfs_waitq_broadcast (&kqswnal_data.kqn_sched_waitq);
+ wake_up_all (&kqswnal_data.kqn_sched_waitq);
while (cfs_atomic_read (&kqswnal_data.kqn_nthreads) != 0) {
CDEBUG(D_NET, "waiting for %d threads to terminate\n",
CFS_INIT_LIST_HEAD (&kqswnal_data.kqn_readyrxds);
spin_lock_init(&kqswnal_data.kqn_sched_lock);
- cfs_waitq_init (&kqswnal_data.kqn_sched_waitq);
+ init_waitqueue_head (&kqswnal_data.kqn_sched_waitq);
/* pointers/lists/locks initialised */
kqswnal_data.kqn_init = KQN_INIT_DATA;
typedef struct
{
- char kqn_init; /* what's been initialised */
- char kqn_shuttingdown;/* I'm trying to shut down */
- cfs_atomic_t kqn_nthreads; /* # threads running */
- lnet_ni_t *kqn_ni; /* _the_ instance of me */
+ char kqn_init; /* what's been initialised */
+ char kqn_shuttingdown;/* I'm trying to shut down */
+ cfs_atomic_t kqn_nthreads; /* # threads running */
+ lnet_ni_t *kqn_ni; /* _the_ instance of me */
- kqswnal_rx_t *kqn_rxds; /* stack of all the receive descriptors */
- kqswnal_tx_t *kqn_txds; /* stack of all the transmit descriptors */
+ kqswnal_rx_t *kqn_rxds; /* stack of all the receive descriptors */
+ kqswnal_tx_t *kqn_txds; /* stack of all the transmit descriptors */
- cfs_list_t kqn_idletxds; /* transmit descriptors free to use */
- cfs_list_t kqn_activetxds; /* transmit descriptors being used */
+ cfs_list_t kqn_idletxds; /* transmit descriptors free to use */
+ cfs_list_t kqn_activetxds; /* transmit descriptors being used */
spinlock_t kqn_idletxd_lock; /* serialise idle txd access */
cfs_atomic_t kqn_pending_txs; /* # transmits being prepped */
spinlock_t kqn_sched_lock; /* serialise packet schedulers */
- cfs_waitq_t kqn_sched_waitq;/* scheduler blocks here */
-
- cfs_list_t kqn_readyrxds; /* rxds full of data */
- cfs_list_t kqn_donetxds; /* completed transmits */
- cfs_list_t kqn_delayedtxds;/* delayed transmits */
-
- EP_SYS *kqn_ep; /* elan system */
- EP_NMH *kqn_ep_tx_nmh; /* elan reserved tx vaddrs */
- EP_NMH *kqn_ep_rx_nmh; /* elan reserved rx vaddrs */
- EP_XMTR *kqn_eptx; /* elan transmitter */
- EP_RCVR *kqn_eprx_small; /* elan receiver (small messages) */
- EP_RCVR *kqn_eprx_large; /* elan receiver (large messages) */
-
- int kqn_nnodes; /* this cluster's size */
- int kqn_elanid; /* this nodes's elan ID */
-
- EP_STATUSBLK kqn_rpc_success;/* preset RPC reply status blocks */
- EP_STATUSBLK kqn_rpc_failed;
- EP_STATUSBLK kqn_rpc_version;/* reply to future version query */
- EP_STATUSBLK kqn_rpc_magic; /* reply to future version query */
-} kqswnal_data_t;
+ wait_queue_head_t kqn_sched_waitq;/* scheduler blocks here */
+
+ cfs_list_t kqn_readyrxds; /* rxds full of data */
+ cfs_list_t kqn_donetxds; /* completed transmits */
+ cfs_list_t kqn_delayedtxds;/* delayed transmits */
+
+ EP_SYS *kqn_ep; /* elan system */
+ EP_NMH *kqn_ep_tx_nmh; /* elan reserved tx vaddrs */
+ EP_NMH *kqn_ep_rx_nmh; /* elan reserved rx vaddrs */
+ EP_XMTR *kqn_eptx; /* elan transmitter */
+ EP_RCVR *kqn_eprx_small; /* elan receiver (small messages) */
+ EP_RCVR *kqn_eprx_large; /* elan receiver (large messages) */
+
+ int kqn_nnodes; /* this cluster's size */
+ int kqn_elanid; /* this nodes's elan ID */
+
+ EP_STATUSBLK kqn_rpc_success;/* preset RPC reply status blocks */
+ EP_STATUSBLK kqn_rpc_failed;
+ EP_STATUSBLK kqn_rpc_version;/* reply to future version query */
+ EP_STATUSBLK kqn_rpc_magic; /* reply to future version query */
+} kqswnal_data_t;
/* kqn_init state */
#define KQN_INIT_NOTHING 0 /* MUST BE ZERO so zeroed state is initialised OK */
cfs_list_add_tail(&ktx->ktx_schedlist,
&kqswnal_data.kqn_donetxds);
- cfs_waitq_signal(&kqswnal_data.kqn_sched_waitq);
+ wake_up(&kqswnal_data.kqn_sched_waitq);
spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, flags);
}
cfs_list_add_tail(&ktx->ktx_schedlist,
&kqswnal_data.kqn_delayedtxds);
- cfs_waitq_signal(&kqswnal_data.kqn_sched_waitq);
+ wake_up(&kqswnal_data.kqn_sched_waitq);
spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
flags);
spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
cfs_list_add_tail(&krx->krx_list, &kqswnal_data.kqn_readyrxds);
- cfs_waitq_signal(&kqswnal_data.kqn_sched_waitq);
+ wake_up(&kqswnal_data.kqn_sched_waitq);
spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, flags);
}
kqn_donetxds) ||
!cfs_list_empty(&kqswnal_data. \
kqn_delayedtxds));
- LASSERT (rc == 0);
- } else if (need_resched())
- cfs_schedule ();
+ LASSERT (rc == 0);
+ } else if (need_resched())
+ schedule ();
spin_lock_irqsave(&kqswnal_data.kqn_sched_lock,
- flags);
- }
- }
+ flags);
+ }
+ }
- kqswnal_thread_fini ();
- return (0);
+ kqswnal_thread_fini ();
+ return 0;
}
kranal_set_conn_params(kra_conn_t *conn, kra_connreq_t *connreq,
__u32 peer_ip, int peer_port)
{
- kra_device_t *dev = conn->rac_device;
- unsigned long flags;
- RAP_RETURN rrc;
-
- /* CAVEAT EMPTOR: we're really overloading rac_last_tx + rac_keepalive
- * to do RapkCompleteSync() timekeeping (see kibnal_scheduler). */
- conn->rac_last_tx = jiffies;
- conn->rac_keepalive = 0;
-
- rrc = RapkSetRiParams(conn->rac_rihandle, &connreq->racr_riparams);
- if (rrc != RAP_SUCCESS) {
- CERROR("Error setting riparams from %u.%u.%u.%u/%d: %d\n",
- HIPQUAD(peer_ip), peer_port, rrc);
- return -ECONNABORTED;
- }
-
- /* Schedule conn on rad_new_conns */
- kranal_conn_addref(conn);
+ kra_device_t *dev = conn->rac_device;
+ unsigned long flags;
+ RAP_RETURN rrc;
+
+ /* CAVEAT EMPTOR: we're really overloading rac_last_tx + rac_keepalive
+ * to do RapkCompleteSync() timekeeping (see kibnal_scheduler). */
+ conn->rac_last_tx = jiffies;
+ conn->rac_keepalive = 0;
+
+ rrc = RapkSetRiParams(conn->rac_rihandle, &connreq->racr_riparams);
+ if (rrc != RAP_SUCCESS) {
+ CERROR("Error setting riparams from %u.%u.%u.%u/%d: %d\n",
+ HIPQUAD(peer_ip), peer_port, rrc);
+ return -ECONNABORTED;
+ }
+
+ /* Schedule conn on rad_new_conns */
+ kranal_conn_addref(conn);
spin_lock_irqsave(&dev->rad_lock, flags);
- cfs_list_add_tail(&conn->rac_schedlist, &dev->rad_new_conns);
- cfs_waitq_signal(&dev->rad_waitq);
+ cfs_list_add_tail(&conn->rac_schedlist, &dev->rad_new_conns);
+ wake_up(&dev->rad_waitq);
spin_unlock_irqrestore(&dev->rad_lock, flags);
- rrc = RapkWaitToConnect(conn->rac_rihandle);
- if (rrc != RAP_SUCCESS) {
- CERROR("Error waiting to connect to %u.%u.%u.%u/%d: %d\n",
- HIPQUAD(peer_ip), peer_port, rrc);
- return -ECONNABORTED;
- }
-
- /* Scheduler doesn't touch conn apart from to deschedule and decref it
- * after RapkCompleteSync() return success, so conn is all mine */
-
- conn->rac_peerstamp = connreq->racr_peerstamp;
- conn->rac_peer_connstamp = connreq->racr_connstamp;
- conn->rac_keepalive = RANAL_TIMEOUT2KEEPALIVE(connreq->racr_timeout);
- kranal_update_reaper_timeout(conn->rac_keepalive);
- return 0;
+ rrc = RapkWaitToConnect(conn->rac_rihandle);
+ if (rrc != RAP_SUCCESS) {
+ CERROR("Error waiting to connect to %u.%u.%u.%u/%d: %d\n",
+ HIPQUAD(peer_ip), peer_port, rrc);
+ return -ECONNABORTED;
+ }
+
+ /* Scheduler doesn't touch conn apart from to deschedule and decref it
+ * after RapkCompleteSync() return success, so conn is all mine */
+
+ conn->rac_peerstamp = connreq->racr_peerstamp;
+ conn->rac_peer_connstamp = connreq->racr_connstamp;
+ conn->rac_keepalive = RANAL_TIMEOUT2KEEPALIVE(connreq->racr_timeout);
+ kranal_update_reaper_timeout(conn->rac_keepalive);
+ return 0;
}
int
int
kranal_accept (lnet_ni_t *ni, struct socket *sock)
{
- kra_acceptsock_t *ras;
- int rc;
- __u32 peer_ip;
- int peer_port;
- unsigned long flags;
+ kra_acceptsock_t *ras;
+ int rc;
+ __u32 peer_ip;
+ int peer_port;
+ unsigned long flags;
- rc = libcfs_sock_getaddr(sock, 1, &peer_ip, &peer_port);
- LASSERT (rc == 0); /* we succeeded before */
+ rc = libcfs_sock_getaddr(sock, 1, &peer_ip, &peer_port);
+ LASSERT (rc == 0); /* we succeeded before */
- LIBCFS_ALLOC(ras, sizeof(*ras));
- if (ras == NULL) {
- CERROR("ENOMEM allocating connection request from "
- "%u.%u.%u.%u\n", HIPQUAD(peer_ip));
- return -ENOMEM;
- }
+ LIBCFS_ALLOC(ras, sizeof(*ras));
+ if (ras == NULL) {
+ CERROR("ENOMEM allocating connection request from "
+ "%u.%u.%u.%u\n", HIPQUAD(peer_ip));
+ return -ENOMEM;
+ }
- ras->ras_sock = sock;
+ ras->ras_sock = sock;
spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
- cfs_list_add_tail(&ras->ras_list, &kranal_data.kra_connd_acceptq);
- cfs_waitq_signal(&kranal_data.kra_connd_waitq);
+ cfs_list_add_tail(&ras->ras_list, &kranal_data.kra_connd_acceptq);
+ wake_up(&kranal_data.kra_connd_waitq);
spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
- return 0;
+ return 0;
}
int
/* Flag threads to terminate */
kranal_data.kra_shutdown = 1;
- for (i = 0; i < kranal_data.kra_ndevs; i++) {
- kra_device_t *dev = &kranal_data.kra_devices[i];
+ for (i = 0; i < kranal_data.kra_ndevs; i++) {
+ kra_device_t *dev = &kranal_data.kra_devices[i];
spin_lock_irqsave(&dev->rad_lock, flags);
- cfs_waitq_signal(&dev->rad_waitq);
+ wake_up(&dev->rad_waitq);
spin_unlock_irqrestore(&dev->rad_lock, flags);
- }
+ }
spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
- cfs_waitq_broadcast(&kranal_data.kra_reaper_waitq);
+ wake_up_all(&kranal_data.kra_reaper_waitq);
spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
- LASSERT (cfs_list_empty(&kranal_data.kra_connd_peers));
+ LASSERT (cfs_list_empty(&kranal_data.kra_connd_peers));
spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
- cfs_waitq_broadcast(&kranal_data.kra_connd_waitq);
+ wake_up_all(&kranal_data.kra_connd_waitq);
spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
/* Wait for threads to exit */
rwlock_init(&kranal_data.kra_global_lock);
- for (i = 0; i < RANAL_MAXDEVS; i++ ) {
- kra_device_t *dev = &kranal_data.kra_devices[i];
+ for (i = 0; i < RANAL_MAXDEVS; i++ ) {
+ kra_device_t *dev = &kranal_data.kra_devices[i];
- dev->rad_idx = i;
- CFS_INIT_LIST_HEAD(&dev->rad_ready_conns);
- CFS_INIT_LIST_HEAD(&dev->rad_new_conns);
- cfs_waitq_init(&dev->rad_waitq);
+ dev->rad_idx = i;
+ CFS_INIT_LIST_HEAD(&dev->rad_ready_conns);
+ CFS_INIT_LIST_HEAD(&dev->rad_new_conns);
+ init_waitqueue_head(&dev->rad_waitq);
spin_lock_init(&dev->rad_lock);
- }
+ }
- kranal_data.kra_new_min_timeout = CFS_MAX_SCHEDULE_TIMEOUT;
- cfs_waitq_init(&kranal_data.kra_reaper_waitq);
+ kranal_data.kra_new_min_timeout = MAX_SCHEDULE_TIMEOUT;
+ init_waitqueue_head(&kranal_data.kra_reaper_waitq);
spin_lock_init(&kranal_data.kra_reaper_lock);
- CFS_INIT_LIST_HEAD(&kranal_data.kra_connd_acceptq);
- CFS_INIT_LIST_HEAD(&kranal_data.kra_connd_peers);
- cfs_waitq_init(&kranal_data.kra_connd_waitq);
+ CFS_INIT_LIST_HEAD(&kranal_data.kra_connd_acceptq);
+ CFS_INIT_LIST_HEAD(&kranal_data.kra_connd_peers);
+ init_waitqueue_head(&kranal_data.kra_connd_waitq);
spin_lock_init(&kranal_data.kra_connd_lock);
CFS_INIT_LIST_HEAD(&kranal_data.kra_idle_txs);
typedef struct
{
- RAP_PVOID rad_handle; /* device handle */
- RAP_PVOID rad_fma_cqh; /* FMA completion queue handle */
- RAP_PVOID rad_rdma_cqh; /* rdma completion queue handle */
- int rad_id; /* device id */
- int rad_idx; /* index in kra_devices */
- int rad_ready; /* set by device callback */
- cfs_list_t rad_ready_conns;/* connections ready to tx/rx */
- cfs_list_t rad_new_conns; /* new connections to complete */
- cfs_waitq_t rad_waitq; /* scheduler waits here */
- spinlock_t rad_lock; /* serialise */
- void *rad_scheduler; /* scheduling thread */
- unsigned int rad_nphysmap; /* # phys mappings */
- unsigned int rad_nppphysmap;/* # phys pages mapped */
- unsigned int rad_nvirtmap; /* # virt mappings */
- unsigned long rad_nobvirtmap;/* # virt bytes mapped */
+ RAP_PVOID rad_handle; /* device handle */
+ RAP_PVOID rad_fma_cqh; /* FMA completion queue handle */
+ RAP_PVOID rad_rdma_cqh; /* rdma completion queue handle */
+ int rad_id; /* device id */
+ int rad_idx; /* index in kra_devices */
+ int rad_ready; /* set by device callback */
+ cfs_list_t rad_ready_conns;/* connections ready to tx/rx */
+ cfs_list_t rad_new_conns; /* new connections to complete */
+ wait_queue_head_t rad_waitq; /* scheduler waits here */
+ spinlock_t rad_lock; /* serialise */
+ void *rad_scheduler; /* scheduling thread */
+ unsigned int rad_nphysmap; /* # phys mappings */
+ unsigned int rad_nppphysmap;/* # phys pages mapped */
+ unsigned int rad_nvirtmap; /* # virt mappings */
+ unsigned long rad_nobvirtmap;/* # virt bytes mapped */
} kra_device_t;
typedef struct
{
- int kra_init; /* initialisation state */
- int kra_shutdown; /* shut down? */
- cfs_atomic_t kra_nthreads; /* # live threads */
- lnet_ni_t *kra_ni; /* _the_ nal instance */
+ int kra_init; /* initialisation state */
+ int kra_shutdown; /* shut down? */
+ cfs_atomic_t kra_nthreads; /* # live threads */
+ lnet_ni_t *kra_ni; /* _the_ nal instance */
- kra_device_t kra_devices[RANAL_MAXDEVS]; /* device/ptag/cq */
- int kra_ndevs; /* # devices */
+ kra_device_t kra_devices[RANAL_MAXDEVS]; /* device/ptag/cq */
+ int kra_ndevs; /* # devices */
rwlock_t kra_global_lock; /* stabilize peer/conn ops */
- cfs_list_t *kra_peers; /* hash table of all my known peers */
- int kra_peer_hash_size; /* size of kra_peers */
- cfs_atomic_t kra_npeers; /* # peers extant */
- int kra_nonewpeers; /* prevent new peers */
+ cfs_list_t *kra_peers; /* hash table of all my known peers */
+ int kra_peer_hash_size; /* size of kra_peers */
+ cfs_atomic_t kra_npeers; /* # peers extant */
+ int kra_nonewpeers; /* prevent new peers */
- cfs_list_t *kra_conns; /* conns hashed by cqid */
- int kra_conn_hash_size; /* size of kra_conns */
- __u64 kra_peerstamp; /* when I started up */
- __u64 kra_connstamp; /* conn stamp generator */
- int kra_next_cqid; /* cqid generator */
- cfs_atomic_t kra_nconns; /* # connections extant */
+ cfs_list_t *kra_conns; /* conns hashed by cqid */
+ int kra_conn_hash_size; /* size of kra_conns */
+ __u64 kra_peerstamp; /* when I started up */
+ __u64 kra_connstamp; /* conn stamp generator */
+ int kra_next_cqid; /* cqid generator */
+ cfs_atomic_t kra_nconns; /* # connections extant */
- long kra_new_min_timeout; /* minimum timeout on any new conn */
- cfs_waitq_t kra_reaper_waitq; /* reaper sleeps here */
+ long kra_new_min_timeout; /* minimum timeout on any new conn */
+ wait_queue_head_t kra_reaper_waitq; /* reaper sleeps here */
spinlock_t kra_reaper_lock; /* serialise */
- cfs_list_t kra_connd_peers; /* peers waiting for a connection */
- cfs_list_t kra_connd_acceptq; /* accepted sockets to handshake */
- cfs_waitq_t kra_connd_waitq; /* connection daemons sleep here */
+ cfs_list_t kra_connd_peers; /* peers waiting for a connection */
+ cfs_list_t kra_connd_acceptq; /* accepted sockets to handshake */
+ wait_queue_head_t kra_connd_waitq; /* connection daemons sleep here */
spinlock_t kra_connd_lock; /* serialise */
- cfs_list_t kra_idle_txs; /* idle tx descriptors */
- __u64 kra_next_tx_cookie; /* RDMA completion cookie */
+ cfs_list_t kra_idle_txs; /* idle tx descriptors */
+ __u64 kra_next_tx_cookie; /* RDMA completion cookie */
spinlock_t kra_tx_lock; /* serialise */
} kra_data_t;
spin_lock_irqsave(&dev->rad_lock, flags);
- if (!dev->rad_ready) {
- dev->rad_ready = 1;
- cfs_waitq_signal(&dev->rad_waitq);
- }
+ if (!dev->rad_ready) {
+ dev->rad_ready = 1;
+ wake_up(&dev->rad_waitq);
+ }
spin_unlock_irqrestore(&dev->rad_lock, flags);
return;
spin_lock_irqsave(&dev->rad_lock, flags);
- if (!conn->rac_scheduled) {
- kranal_conn_addref(conn); /* +1 ref for scheduler */
- conn->rac_scheduled = 1;
- cfs_list_add_tail(&conn->rac_schedlist, &dev->rad_ready_conns);
- cfs_waitq_signal(&dev->rad_waitq);
- }
+ if (!conn->rac_scheduled) {
+ kranal_conn_addref(conn); /* +1 ref for scheduler */
+ conn->rac_scheduled = 1;
+ cfs_list_add_tail(&conn->rac_schedlist, &dev->rad_ready_conns);
+ wake_up(&dev->rad_waitq);
+ }
spin_unlock_irqrestore(&dev->rad_lock, flags);
}
spin_lock(&kranal_data.kra_connd_lock);
- cfs_list_add_tail(&peer->rap_connd_list,
- &kranal_data.kra_connd_peers);
- cfs_waitq_signal(&kranal_data.kra_connd_waitq);
+ cfs_list_add_tail(&peer->rap_connd_list,
+ &kranal_data.kra_connd_peers);
+ wake_up(&kranal_data.kra_connd_waitq);
spin_unlock(&kranal_data.kra_connd_lock);
}
int
kranal_connd (void *arg)
{
- long id = (long)arg;
- cfs_waitlink_t wait;
- unsigned long flags;
- kra_peer_t *peer;
- kra_acceptsock_t *ras;
- int did_something;
+ long id = (long)arg;
+ wait_queue_t wait;
+ unsigned long flags;
+ kra_peer_t *peer;
+ kra_acceptsock_t *ras;
+ int did_something;
- cfs_block_allsigs();
+ cfs_block_allsigs();
- cfs_waitlink_init(&wait);
+ init_waitqueue_entry_current(&wait);
spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
- while (!kranal_data.kra_shutdown) {
- did_something = 0;
+ while (!kranal_data.kra_shutdown) {
+ did_something = 0;
- if (!cfs_list_empty(&kranal_data.kra_connd_acceptq)) {
- ras = cfs_list_entry(kranal_data.kra_connd_acceptq.next,
- kra_acceptsock_t, ras_list);
- cfs_list_del(&ras->ras_list);
+ if (!cfs_list_empty(&kranal_data.kra_connd_acceptq)) {
+ ras = cfs_list_entry(kranal_data.kra_connd_acceptq.next,
+ kra_acceptsock_t, ras_list);
+ cfs_list_del(&ras->ras_list);
spin_unlock_irqrestore(&kranal_data.kra_connd_lock,
- flags);
+ flags);
- CDEBUG(D_NET,"About to handshake someone\n");
+ CDEBUG(D_NET,"About to handshake someone\n");
- kranal_conn_handshake(ras->ras_sock, NULL);
- kranal_free_acceptsock(ras);
+ kranal_conn_handshake(ras->ras_sock, NULL);
+ kranal_free_acceptsock(ras);
- CDEBUG(D_NET,"Finished handshaking someone\n");
+ CDEBUG(D_NET,"Finished handshaking someone\n");
spin_lock_irqsave(&kranal_data.kra_connd_lock,
- flags);
- did_something = 1;
- }
+ flags);
+ did_something = 1;
+ }
- if (!cfs_list_empty(&kranal_data.kra_connd_peers)) {
- peer = cfs_list_entry(kranal_data.kra_connd_peers.next,
- kra_peer_t, rap_connd_list);
+ if (!cfs_list_empty(&kranal_data.kra_connd_peers)) {
+ peer = cfs_list_entry(kranal_data.kra_connd_peers.next,
+ kra_peer_t, rap_connd_list);
- cfs_list_del_init(&peer->rap_connd_list);
+ cfs_list_del_init(&peer->rap_connd_list);
spin_unlock_irqrestore(&kranal_data.kra_connd_lock,
- flags);
+ flags);
- kranal_connect(peer);
- kranal_peer_decref(peer);
+ kranal_connect(peer);
+ kranal_peer_decref(peer);
spin_lock_irqsave(&kranal_data.kra_connd_lock,
- flags);
- did_something = 1;
- }
+ flags);
+ did_something = 1;
+ }
- if (did_something)
- continue;
+ if (did_something)
+ continue;
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add_exclusive(&kranal_data.kra_connd_waitq, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue_exclusive(&kranal_data.kra_connd_waitq, &wait);
spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
- cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE);
+ waitq_wait(&wait, TASK_INTERRUPTIBLE);
- cfs_set_current_state(CFS_TASK_RUNNING);
- cfs_waitq_del(&kranal_data.kra_connd_waitq, &wait);
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&kranal_data.kra_connd_waitq, &wait);
spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
- }
+ }
spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
- kranal_thread_fini();
- return 0;
+ kranal_thread_fini();
+ return 0;
}
void
int
kranal_reaper (void *arg)
{
- cfs_waitlink_t wait;
- unsigned long flags;
- long timeout;
- int i;
- int conn_entries = kranal_data.kra_conn_hash_size;
- int conn_index = 0;
- int base_index = conn_entries - 1;
- unsigned long next_check_time = jiffies;
- long next_min_timeout = CFS_MAX_SCHEDULE_TIMEOUT;
- long current_min_timeout = 1;
+ wait_queue_t wait;
+ unsigned long flags;
+ long timeout;
+ int i;
+ int conn_entries = kranal_data.kra_conn_hash_size;
+ int conn_index = 0;
+ int base_index = conn_entries - 1;
+ unsigned long next_check_time = jiffies;
+ long next_min_timeout = MAX_SCHEDULE_TIMEOUT;
+ long current_min_timeout = 1;
- cfs_block_allsigs();
+ cfs_block_allsigs();
- cfs_waitlink_init(&wait);
+ init_waitqueue_entry_current(&wait);
spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
- while (!kranal_data.kra_shutdown) {
- /* I wake up every 'p' seconds to check for timeouts on some
- * more peers. I try to check every connection 'n' times
- * within the global minimum of all keepalive and timeout
- * intervals, to ensure I attend to every connection within
- * (n+1)/n times its timeout intervals. */
- const int p = 1;
- const int n = 3;
- unsigned long min_timeout;
- int chunk;
-
- /* careful with the jiffy wrap... */
- timeout = (long)(next_check_time - jiffies);
- if (timeout > 0) {
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add(&kranal_data.kra_reaper_waitq, &wait);
+ while (!kranal_data.kra_shutdown) {
+ /* I wake up every 'p' seconds to check for timeouts on some
+ * more peers. I try to check every connection 'n' times
+ * within the global minimum of all keepalive and timeout
+ * intervals, to ensure I attend to every connection within
+ * (n+1)/n times its timeout intervals. */
+ const int p = 1;
+ const int n = 3;
+ unsigned long min_timeout;
+ int chunk;
+
+ /* careful with the jiffy wrap... */
+ timeout = (long)(next_check_time - jiffies);
+ if (timeout > 0) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&kranal_data.kra_reaper_waitq, &wait);
spin_unlock_irqrestore(&kranal_data.kra_reaper_lock,
- flags);
+ flags);
- cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE,
- timeout);
+ waitq_timedwait(&wait, TASK_INTERRUPTIBLE,
+ timeout);
spin_lock_irqsave(&kranal_data.kra_reaper_lock,
- flags);
+ flags);
- cfs_set_current_state(CFS_TASK_RUNNING);
- cfs_waitq_del(&kranal_data.kra_reaper_waitq, &wait);
- continue;
- }
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&kranal_data.kra_reaper_waitq, &wait);
+ continue;
+ }
- if (kranal_data.kra_new_min_timeout !=
- CFS_MAX_SCHEDULE_TIMEOUT) {
- /* new min timeout set: restart min timeout scan */
- next_min_timeout = CFS_MAX_SCHEDULE_TIMEOUT;
- base_index = conn_index - 1;
- if (base_index < 0)
- base_index = conn_entries - 1;
-
- if (kranal_data.kra_new_min_timeout <
- current_min_timeout) {
- current_min_timeout =
- kranal_data.kra_new_min_timeout;
- CDEBUG(D_NET, "Set new min timeout %ld\n",
- current_min_timeout);
- }
+ if (kranal_data.kra_new_min_timeout !=
+ MAX_SCHEDULE_TIMEOUT) {
+ /* new min timeout set: restart min timeout scan */
+ next_min_timeout = MAX_SCHEDULE_TIMEOUT;
+ base_index = conn_index - 1;
+ if (base_index < 0)
+ base_index = conn_entries - 1;
+
+ if (kranal_data.kra_new_min_timeout <
+ current_min_timeout) {
+ current_min_timeout =
+ kranal_data.kra_new_min_timeout;
+ CDEBUG(D_NET, "Set new min timeout %ld\n",
+ current_min_timeout);
+ }
- kranal_data.kra_new_min_timeout =
- CFS_MAX_SCHEDULE_TIMEOUT;
- }
- min_timeout = current_min_timeout;
+ kranal_data.kra_new_min_timeout =
+ MAX_SCHEDULE_TIMEOUT;
+ }
+ min_timeout = current_min_timeout;
spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
- LASSERT (min_timeout > 0);
-
- /* Compute how many table entries to check now so I get round
- * the whole table fast enough given that I do this at fixed
- * intervals of 'p' seconds) */
- chunk = conn_entries;
- if (min_timeout > n * p)
- chunk = (chunk * n * p) / min_timeout;
- if (chunk == 0)
- chunk = 1;
-
- for (i = 0; i < chunk; i++) {
- kranal_reaper_check(conn_index,
- &next_min_timeout);
- conn_index = (conn_index + 1) % conn_entries;
- }
+ LASSERT (min_timeout > 0);
+
+ /* Compute how many table entries to check now so I get round
+ * the whole table fast enough given that I do this at fixed
+ * intervals of 'p' seconds) */
+ chunk = conn_entries;
+ if (min_timeout > n * p)
+ chunk = (chunk * n * p) / min_timeout;
+ if (chunk == 0)
+ chunk = 1;
+
+ for (i = 0; i < chunk; i++) {
+ kranal_reaper_check(conn_index,
+ &next_min_timeout);
+ conn_index = (conn_index + 1) % conn_entries;
+ }
next_check_time += p * HZ;
spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
- if (((conn_index - chunk <= base_index &&
- base_index < conn_index) ||
- (conn_index - conn_entries - chunk <= base_index &&
- base_index < conn_index - conn_entries))) {
+ if (((conn_index - chunk <= base_index &&
+ base_index < conn_index) ||
+ (conn_index - conn_entries - chunk <= base_index &&
+ base_index < conn_index - conn_entries))) {
- /* Scanned all conns: set current_min_timeout... */
- if (current_min_timeout != next_min_timeout) {
- current_min_timeout = next_min_timeout;
- CDEBUG(D_NET, "Set new min timeout %ld\n",
- current_min_timeout);
- }
+ /* Scanned all conns: set current_min_timeout... */
+ if (current_min_timeout != next_min_timeout) {
+ current_min_timeout = next_min_timeout;
+ CDEBUG(D_NET, "Set new min timeout %ld\n",
+ current_min_timeout);
+ }
- /* ...and restart min timeout scan */
- next_min_timeout = CFS_MAX_SCHEDULE_TIMEOUT;
- base_index = conn_index - 1;
- if (base_index < 0)
- base_index = conn_entries - 1;
- }
- }
+ /* ...and restart min timeout scan */
+ next_min_timeout = MAX_SCHEDULE_TIMEOUT;
+ base_index = conn_index - 1;
+ if (base_index < 0)
+ base_index = conn_entries - 1;
+ }
+ }
- kranal_thread_fini();
- return 0;
+ kranal_thread_fini();
+ return 0;
}
void
int
kranal_scheduler (void *arg)
{
- kra_device_t *dev = (kra_device_t *)arg;
- cfs_waitlink_t wait;
- kra_conn_t *conn;
+ kra_device_t *dev = (kra_device_t *)arg;
+ wait_queue_t wait;
+ kra_conn_t *conn;
unsigned long flags;
unsigned long deadline;
unsigned long soonest;
cfs_block_allsigs();
- dev->rad_scheduler = current;
- cfs_waitlink_init(&wait);
+ dev->rad_scheduler = current;
+ init_waitqueue_entry_current(&wait);
spin_lock_irqsave(&dev->rad_lock, flags);
if (busy_loops++ >= RANAL_RESCHED) {
spin_unlock_irqrestore(&dev->rad_lock, flags);
- cfs_cond_resched();
- busy_loops = 0;
+ cond_resched();
+ busy_loops = 0;
spin_lock_irqsave(&dev->rad_lock, flags);
}
if (dropped_lock) /* may sleep iff I didn't drop the lock */
continue;
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add_exclusive(&dev->rad_waitq, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue_exclusive(&dev->rad_waitq, &wait);
spin_unlock_irqrestore(&dev->rad_lock, flags);
- if (nsoonest == 0) {
- busy_loops = 0;
- cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE);
- } else {
- timeout = (long)(soonest - jiffies);
- if (timeout > 0) {
- busy_loops = 0;
- cfs_waitq_timedwait(&wait,
- CFS_TASK_INTERRUPTIBLE,
- timeout);
- }
- }
+ if (nsoonest == 0) {
+ busy_loops = 0;
+ waitq_wait(&wait, TASK_INTERRUPTIBLE);
+ } else {
+ timeout = (long)(soonest - jiffies);
+ if (timeout > 0) {
+ busy_loops = 0;
+ waitq_timedwait(&wait,
+ TASK_INTERRUPTIBLE,
+ timeout);
+ }
+ }
- cfs_waitq_del(&dev->rad_waitq, &wait);
- cfs_set_current_state(CFS_TASK_RUNNING);
+ remove_wait_queue(&dev->rad_waitq, &wait);
+ set_current_state(TASK_RUNNING);
spin_lock_irqsave(&dev->rad_lock, flags);
- }
+ }
spin_unlock_irqrestore(&dev->rad_lock, flags);
spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
- cfs_list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
- cfs_waitq_signal(&ksocknal_data.ksnd_connd_waitq);
+ cfs_list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
+ wake_up(&ksocknal_data.ksnd_connd_waitq);
spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
return 0;
cfs_list_add_tail(&conn->ksnc_list,
&ksocknal_data.ksnd_deathrow_conns);
- cfs_waitq_signal(&ksocknal_data.ksnd_reaper_waitq);
+ wake_up(&ksocknal_data.ksnd_reaper_waitq);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
}
&sched->kss_tx_conns);
conn->ksnc_tx_scheduled = 1;
/* extra ref for scheduler */
- ksocknal_conn_addref(conn);
+ ksocknal_conn_addref(conn);
- cfs_waitq_signal (&sched->kss_waitq);
- }
+ wake_up (&sched->kss_waitq);
+ }
spin_unlock_bh(&sched->kss_lock);
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
cfs_list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
- cfs_waitq_signal(&ksocknal_data.ksnd_reaper_waitq);
+ wake_up(&ksocknal_data.ksnd_reaper_waitq);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
}
/* flag threads to terminate; wake and wait for them to die */
ksocknal_data.ksnd_shuttingdown = 1;
- cfs_waitq_broadcast(&ksocknal_data.ksnd_connd_waitq);
- cfs_waitq_broadcast(&ksocknal_data.ksnd_reaper_waitq);
+ wake_up_all(&ksocknal_data.ksnd_connd_waitq);
+ wake_up_all(&ksocknal_data.ksnd_reaper_waitq);
if (ksocknal_data.ksnd_sched_info != NULL) {
cfs_percpt_for_each(info, i,
for (j = 0; j < info->ksi_nthreads_max; j++) {
sched = &info->ksi_scheds[j];
- cfs_waitq_broadcast(&sched->kss_waitq);
+ wake_up_all(&sched->kss_waitq);
}
}
}
CFS_INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
spin_lock_init(&ksocknal_data.ksnd_reaper_lock);
- CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_enomem_conns);
- CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_zombie_conns);
- CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_deathrow_conns);
- cfs_waitq_init(&ksocknal_data.ksnd_reaper_waitq);
+ CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_enomem_conns);
+ CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_zombie_conns);
+ CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_deathrow_conns);
+ init_waitqueue_head(&ksocknal_data.ksnd_reaper_waitq);
spin_lock_init(&ksocknal_data.ksnd_connd_lock);
- CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_connreqs);
- CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_routes);
- cfs_waitq_init(&ksocknal_data.ksnd_connd_waitq);
+ CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_connreqs);
+ CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_routes);
+ init_waitqueue_head(&ksocknal_data.ksnd_connd_waitq);
spin_lock_init(&ksocknal_data.ksnd_tx_lock);
CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_idle_noop_txs);
CFS_INIT_LIST_HEAD(&sched->kss_rx_conns);
CFS_INIT_LIST_HEAD(&sched->kss_tx_conns);
CFS_INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
- cfs_waitq_init(&sched->kss_waitq);
+ init_waitqueue_head(&sched->kss_waitq);
}
}
cfs_list_t kss_tx_conns;
/* zombie noop tx list */
cfs_list_t kss_zombie_noop_txs;
- cfs_waitq_t kss_waitq; /* where scheduler sleeps */
+ wait_queue_head_t kss_waitq; /* where scheduler sleeps */
/* # connections assigned to this scheduler */
int kss_nconns;
struct ksock_sched_info *kss_info; /* owner of it */
/* schedulers information */
struct ksock_sched_info **ksnd_sched_info;
- cfs_atomic_t ksnd_nactive_txs; /* #active txs */
+ cfs_atomic_t ksnd_nactive_txs; /* #active txs */
- cfs_list_t ksnd_deathrow_conns; /* conns to close: reaper_lock*/
- cfs_list_t ksnd_zombie_conns; /* conns to free: reaper_lock */
- cfs_list_t ksnd_enomem_conns; /* conns to retry: reaper_lock*/
- cfs_waitq_t ksnd_reaper_waitq; /* reaper sleeps here */
- cfs_time_t ksnd_reaper_waketime;/* when reaper will wake */
+ cfs_list_t ksnd_deathrow_conns; /* conns to close: reaper_lock*/
+ cfs_list_t ksnd_zombie_conns; /* conns to free: reaper_lock */
+ cfs_list_t ksnd_enomem_conns; /* conns to retry: reaper_lock*/
+ wait_queue_head_t ksnd_reaper_waitq; /* reaper sleeps here */
+ cfs_time_t ksnd_reaper_waketime;/* when reaper will wake */
spinlock_t ksnd_reaper_lock; /* serialise */
- int ksnd_enomem_tx; /* test ENOMEM sender */
- int ksnd_stall_tx; /* test sluggish sender */
- int ksnd_stall_rx; /* test sluggish receiver */
-
- cfs_list_t ksnd_connd_connreqs; /* incoming connection requests */
- cfs_list_t ksnd_connd_routes; /* routes waiting to be connected */
- cfs_waitq_t ksnd_connd_waitq; /* connds sleep here */
- int ksnd_connd_connecting;/* # connds connecting */
- /** time stamp of the last failed connecting attempt */
- long ksnd_connd_failed_stamp;
- /** # starting connd */
- unsigned ksnd_connd_starting;
- /** time stamp of the last starting connd */
- long ksnd_connd_starting_stamp;
- /** # running connd */
- unsigned ksnd_connd_running;
+ int ksnd_enomem_tx; /* test ENOMEM sender */
+ int ksnd_stall_tx; /* test sluggish sender */
+ int ksnd_stall_rx; /* test sluggish receiver */
+
+ cfs_list_t ksnd_connd_connreqs; /* incoming connection requests */
+ cfs_list_t ksnd_connd_routes; /* routes waiting to be connected */
+ wait_queue_head_t ksnd_connd_waitq; /* connds sleep here */
+ int ksnd_connd_connecting;/* # connds connecting */
+ /** time stamp of the last failed connecting attempt */
+ long ksnd_connd_failed_stamp;
+ /** # starting connd */
+ unsigned ksnd_connd_starting;
+ /** time stamp of the last starting connd */
+ long ksnd_connd_starting_stamp;
+ /** # running connd */
+ unsigned ksnd_connd_running;
spinlock_t ksnd_connd_lock; /* serialise */
cfs_list_t ksnd_idle_noop_txs; /* list head for freed noop tx */
LASSERT (conn->ksnc_tx_scheduled);
cfs_list_add_tail(&conn->ksnc_tx_list,
&ksocknal_data.ksnd_enomem_conns);
- if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(),
- SOCKNAL_ENOMEM_RETRY),
- ksocknal_data.ksnd_reaper_waketime))
- cfs_waitq_signal (&ksocknal_data.ksnd_reaper_waitq);
+ if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(),
+ SOCKNAL_ENOMEM_RETRY),
+ ksocknal_data.ksnd_reaper_waketime))
+ wake_up(&ksocknal_data.ksnd_reaper_waitq);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
- return (rc);
- }
+ return (rc);
+ }
/* Actual error */
LASSERT (rc < 0);
cfs_list_add_tail(&route->ksnr_connd_list,
&ksocknal_data.ksnd_connd_routes);
- cfs_waitq_signal(&ksocknal_data.ksnd_connd_waitq);
+ wake_up(&ksocknal_data.ksnd_connd_waitq);
spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
}
cfs_list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
}
- if (conn->ksnc_tx_ready && /* able to send */
- !conn->ksnc_tx_scheduled) { /* not scheduled to send */
- /* +1 ref for scheduler */
- ksocknal_conn_addref(conn);
- cfs_list_add_tail (&conn->ksnc_tx_list,
- &sched->kss_tx_conns);
- conn->ksnc_tx_scheduled = 1;
- cfs_waitq_signal (&sched->kss_waitq);
- }
+ if (conn->ksnc_tx_ready && /* able to send */
+ !conn->ksnc_tx_scheduled) { /* not scheduled to send */
+ /* +1 ref for scheduler */
+ ksocknal_conn_addref(conn);
+ cfs_list_add_tail (&conn->ksnc_tx_list,
+ &sched->kss_tx_conns);
+ conn->ksnc_tx_scheduled = 1;
+ wake_up(&sched->kss_waitq);
+ }
spin_unlock_bh(&sched->kss_lock);
}
spin_lock_bh(&sched->kss_lock);
- switch (conn->ksnc_rx_state) {
- case SOCKNAL_RX_PARSE_WAIT:
- cfs_list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
- cfs_waitq_signal (&sched->kss_waitq);
- LASSERT (conn->ksnc_rx_ready);
- break;
+ switch (conn->ksnc_rx_state) {
+ case SOCKNAL_RX_PARSE_WAIT:
+ cfs_list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
+ wake_up(&sched->kss_waitq);
+ LASSERT(conn->ksnc_rx_ready);
+ break;
case SOCKNAL_RX_PARSE:
/* scheduler hasn't noticed I'm parsing yet */
sched->kss_waitq,
!ksocknal_sched_cansleep(sched));
LASSERT (rc == 0);
- } else {
- cfs_cond_resched();
- }
+ } else {
+ cond_resched();
+ }
spin_lock_bh(&sched->kss_lock);
}
spin_lock_bh(&sched->kss_lock);
- conn->ksnc_rx_ready = 1;
+ conn->ksnc_rx_ready = 1;
- if (!conn->ksnc_rx_scheduled) { /* not being progressed */
- cfs_list_add_tail(&conn->ksnc_rx_list,
- &sched->kss_rx_conns);
- conn->ksnc_rx_scheduled = 1;
- /* extra ref for scheduler */
- ksocknal_conn_addref(conn);
+ if (!conn->ksnc_rx_scheduled) { /* not being progressed */
+ cfs_list_add_tail(&conn->ksnc_rx_list,
+ &sched->kss_rx_conns);
+ conn->ksnc_rx_scheduled = 1;
+ /* extra ref for scheduler */
+ ksocknal_conn_addref(conn);
- cfs_waitq_signal (&sched->kss_waitq);
- }
+ wake_up (&sched->kss_waitq);
+ }
spin_unlock_bh(&sched->kss_lock);
EXIT;
* Add connection to kss_tx_conns of scheduler
* and wakeup the scheduler.
*/
-void ksocknal_write_callback (ksock_conn_t *conn)
+void ksocknal_write_callback(ksock_conn_t *conn)
{
ksock_sched_t *sched;
ENTRY;
spin_lock_bh(&sched->kss_lock);
- conn->ksnc_tx_ready = 1;
+ conn->ksnc_tx_ready = 1;
- if (!conn->ksnc_tx_scheduled && // not being progressed
- !cfs_list_empty(&conn->ksnc_tx_queue)){//packets to send
- cfs_list_add_tail (&conn->ksnc_tx_list,
- &sched->kss_tx_conns);
- conn->ksnc_tx_scheduled = 1;
- /* extra ref for scheduler */
- ksocknal_conn_addref(conn);
+ if (!conn->ksnc_tx_scheduled && // not being progressed
+ !cfs_list_empty(&conn->ksnc_tx_queue)){//packets to send
+ cfs_list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns);
+ conn->ksnc_tx_scheduled = 1;
+ /* extra ref for scheduler */
+ ksocknal_conn_addref(conn);
- cfs_waitq_signal (&sched->kss_waitq);
- }
+ wake_up(&sched->kss_waitq);
+ }
spin_unlock_bh(&sched->kss_lock);
static ksock_route_t *
ksocknal_connd_get_route_locked(signed long *timeout_p)
{
- ksock_route_t *route;
- cfs_time_t now;
+ ksock_route_t *route;
+ cfs_time_t now;
- now = cfs_time_current();
+ now = cfs_time_current();
- /* connd_routes can contain both pending and ordinary routes */
- cfs_list_for_each_entry (route, &ksocknal_data.ksnd_connd_routes,
- ksnr_connd_list) {
+ /* connd_routes can contain both pending and ordinary routes */
+ cfs_list_for_each_entry (route, &ksocknal_data.ksnd_connd_routes,
+ ksnr_connd_list) {
- if (route->ksnr_retry_interval == 0 ||
- cfs_time_aftereq(now, route->ksnr_timeout))
- return route;
+ if (route->ksnr_retry_interval == 0 ||
+ cfs_time_aftereq(now, route->ksnr_timeout))
+ return route;
- if (*timeout_p == CFS_MAX_SCHEDULE_TIMEOUT ||
- (int)*timeout_p > (int)(route->ksnr_timeout - now))
- *timeout_p = (int)(route->ksnr_timeout - now);
- }
+ if (*timeout_p == MAX_SCHEDULE_TIMEOUT ||
+ (int)*timeout_p > (int)(route->ksnr_timeout - now))
+ *timeout_p = (int)(route->ksnr_timeout - now);
+ }
- return NULL;
+ return NULL;
}
int
ksocknal_connd (void *arg)
{
spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
- ksock_connreq_t *cr;
- cfs_waitlink_t wait;
- int nloops = 0;
- int cons_retry = 0;
+ ksock_connreq_t *cr;
+ wait_queue_t wait;
+ int nloops = 0;
+ int cons_retry = 0;
- cfs_block_allsigs ();
+ cfs_block_allsigs ();
- cfs_waitlink_init (&wait);
+ init_waitqueue_entry_current(&wait);
spin_lock_bh(connd_lock);
- LASSERT(ksocknal_data.ksnd_connd_starting > 0);
- ksocknal_data.ksnd_connd_starting--;
- ksocknal_data.ksnd_connd_running++;
+ LASSERT(ksocknal_data.ksnd_connd_starting > 0);
+ ksocknal_data.ksnd_connd_starting--;
+ ksocknal_data.ksnd_connd_running++;
- while (!ksocknal_data.ksnd_shuttingdown) {
- ksock_route_t *route = NULL;
- long sec = cfs_time_current_sec();
- long timeout = CFS_MAX_SCHEDULE_TIMEOUT;
- int dropped_lock = 0;
-
- if (ksocknal_connd_check_stop(sec, &timeout)) {
- /* wakeup another one to check stop */
- cfs_waitq_signal(&ksocknal_data.ksnd_connd_waitq);
- break;
- }
+ while (!ksocknal_data.ksnd_shuttingdown) {
+ ksock_route_t *route = NULL;
+ long sec = cfs_time_current_sec();
+ long timeout = MAX_SCHEDULE_TIMEOUT;
+ int dropped_lock = 0;
+
+ if (ksocknal_connd_check_stop(sec, &timeout)) {
+ /* wakeup another one to check stop */
+ wake_up(&ksocknal_data.ksnd_connd_waitq);
+ break;
+ }
if (ksocknal_connd_check_start(sec, &timeout)) {
/* created new thread */
continue;
spin_unlock_bh(connd_lock);
nloops = 0;
- cfs_cond_resched();
+ cond_resched();
spin_lock_bh(connd_lock);
continue;
}
/* Nothing to do for 'timeout' */
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
spin_unlock_bh(connd_lock);
nloops = 0;
- cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE, timeout);
+ waitq_timedwait(&wait, TASK_INTERRUPTIBLE, timeout);
- cfs_set_current_state(CFS_TASK_RUNNING);
- cfs_waitq_del(&ksocknal_data.ksnd_connd_waitq, &wait);
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);
spin_lock_bh(connd_lock);
}
ksocknal_data.ksnd_connd_running--;
read_unlock(&ksocknal_data.ksnd_global_lock);
}
-int
-ksocknal_reaper (void *arg)
+int ksocknal_reaper(void *arg)
{
- cfs_waitlink_t wait;
- ksock_conn_t *conn;
- ksock_sched_t *sched;
- cfs_list_t enomem_conns;
+ wait_queue_t wait;
+ ksock_conn_t *conn;
+ ksock_sched_t *sched;
+ cfs_list_t enomem_conns;
int nenomem_conns;
cfs_duration_t timeout;
int i;
cfs_block_allsigs ();
- CFS_INIT_LIST_HEAD(&enomem_conns);
- cfs_waitlink_init (&wait);
+ CFS_INIT_LIST_HEAD(&enomem_conns);
+ init_waitqueue_entry_current(&wait);
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
conn->ksnc_tx_ready = 1;
cfs_list_add_tail(&conn->ksnc_tx_list,
&sched->kss_tx_conns);
- cfs_waitq_signal(&sched->kss_waitq);
+ wake_up(&sched->kss_waitq);
spin_unlock_bh(&sched->kss_lock);
nenomem_conns++;
ksocknal_data.ksnd_reaper_waketime =
cfs_time_add(cfs_time_current(), timeout);
- cfs_set_current_state (CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add (&ksocknal_data.ksnd_reaper_waitq, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
- if (!ksocknal_data.ksnd_shuttingdown &&
- cfs_list_empty (&ksocknal_data.ksnd_deathrow_conns) &&
- cfs_list_empty (&ksocknal_data.ksnd_zombie_conns))
- cfs_waitq_timedwait (&wait, CFS_TASK_INTERRUPTIBLE,
- timeout);
+ if (!ksocknal_data.ksnd_shuttingdown &&
+ cfs_list_empty(&ksocknal_data.ksnd_deathrow_conns) &&
+ cfs_list_empty(&ksocknal_data.ksnd_zombie_conns))
+ waitq_timedwait(&wait, TASK_INTERRUPTIBLE, timeout);
- cfs_set_current_state (CFS_TASK_RUNNING);
- cfs_waitq_del (&ksocknal_data.ksnd_reaper_waitq, &wait);
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
}
lnet_init_locks(void)
{
spin_lock_init(&the_lnet.ln_eq_wait_lock);
- cfs_waitq_init(&the_lnet.ln_eq_waitq);
+ init_waitqueue_head(&the_lnet.ln_eq_waitq);
mutex_init(&the_lnet.ln_lnd_mutex);
mutex_init(&the_lnet.ln_api_mutex);
}
#ifdef __KERNEL__
/* Wake anyone waiting in LNetEQPoll() */
- if (cfs_waitq_active(&the_lnet.ln_eq_waitq))
- cfs_waitq_broadcast(&the_lnet.ln_eq_waitq);
+ if (waitqueue_active(&the_lnet.ln_eq_waitq))
+ wake_up_all(&the_lnet.ln_eq_waitq);
#else
# ifndef HAVE_LIBPTHREAD
/* LNetEQPoll() calls into _the_ LND to wait for action */
{
int tms = *timeout_ms;
int wait;
- cfs_waitlink_t wl;
+ wait_queue_t wl;
cfs_time_t now;
if (tms == 0)
return -1; /* don't want to wait and no new event */
- cfs_waitlink_init(&wl);
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add(&the_lnet.ln_eq_waitq, &wl);
+ init_waitqueue_entry_current(&wl);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&the_lnet.ln_eq_waitq, &wl);
lnet_eq_wait_unlock();
if (tms < 0) {
- cfs_waitq_wait(&wl, CFS_TASK_INTERRUPTIBLE);
+ waitq_wait(&wl, TASK_INTERRUPTIBLE);
} else {
struct timeval tv;
now = cfs_time_current();
- cfs_waitq_timedwait(&wl, CFS_TASK_INTERRUPTIBLE,
+ waitq_timedwait(&wl, TASK_INTERRUPTIBLE,
cfs_time_seconds(tms) / 1000);
cfs_duration_usec(cfs_time_sub(cfs_time_current(), now), &tv);
tms -= (int)(tv.tv_sec * 1000 + tv.tv_usec / 1000);
*timeout_ms = tms;
lnet_eq_wait_lock();
- cfs_waitq_del(&the_lnet.ln_eq_waitq, &wl);
+ remove_wait_queue(&the_lnet.ln_eq_waitq, &wl);
return wait;
}
lnet_prune_rc_data(0); /* don't wait for UNLINK */
- /* Call cfs_pause() here always adds 1 to load average
- * because kernel counts # active tasks as nr_running
- * + nr_uninterruptible. */
- cfs_schedule_timeout_and_set_state(CFS_TASK_INTERRUPTIBLE,
- cfs_time_seconds(1));
- }
+ /* Call cfs_pause() here always adds 1 to load average
+ * because kernel counts # active tasks as nr_running
+ * + nr_uninterruptible. */
+ schedule_timeout_and_set_state(TASK_INTERRUPTIBLE,
+ cfs_time_seconds(1));
+ }
LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_STOPPING);
* I'm just a poor body and nobody loves me */
spin_unlock(&rpc->crpc_lock);
- /* release it */
- lstcon_rpc_put(crpc);
- return;
- }
+ /* release it */
+ lstcon_rpc_put(crpc);
+ return;
+ }
- /* not an orphan RPC */
- crpc->crp_finished = 1;
+ /* not an orphan RPC */
+ crpc->crp_finished = 1;
- if (crpc->crp_stamp == 0) {
- /* not aborted */
- LASSERT (crpc->crp_status == 0);
+ if (crpc->crp_stamp == 0) {
+ /* not aborted */
+ LASSERT (crpc->crp_status == 0);
- crpc->crp_stamp = cfs_time_current();
- crpc->crp_status = rpc->crpc_status;
- }
+ crpc->crp_stamp = cfs_time_current();
+ crpc->crp_status = rpc->crpc_status;
+ }
- /* wakeup (transaction)thread if I'm the last RPC in the transaction */
- if (cfs_atomic_dec_and_test(&crpc->crp_trans->tas_remaining))
- cfs_waitq_signal(&crpc->crp_trans->tas_waitq);
+ /* wakeup (transaction)thread if I'm the last RPC in the transaction */
+ if (cfs_atomic_dec_and_test(&crpc->crp_trans->tas_remaining))
+ wake_up(&crpc->crp_trans->tas_waitq);
spin_unlock(&rpc->crpc_lock);
}
cfs_list_add_tail(&trans->tas_link, &console_session.ses_trans_list);
- CFS_INIT_LIST_HEAD(&trans->tas_rpcs_list);
- cfs_atomic_set(&trans->tas_remaining, 0);
- cfs_waitq_init(&trans->tas_waitq);
+ CFS_INIT_LIST_HEAD(&trans->tas_rpcs_list);
+ cfs_atomic_set(&trans->tas_remaining, 0);
+ init_waitqueue_head(&trans->tas_waitq);
spin_lock(&console_session.ses_rpc_lock);
trans->tas_features = console_session.ses_features;
mutex_unlock(&console_session.ses_mutex);
- cfs_waitq_wait_event_interruptible_timeout(trans->tas_waitq,
- lstcon_rpc_trans_check(trans),
- cfs_time_seconds(timeout), rc);
+ rc = wait_event_interruptible_timeout(trans->tas_waitq,
+ lstcon_rpc_trans_check(trans),
+ cfs_time_seconds(timeout));
rc = (rc > 0)? 0: ((rc < 0)? -EINTR: -ETIMEDOUT);
trans = cfs_list_entry(pacer, lstcon_rpc_trans_t,
tas_link);
- CDEBUG(D_NET, "Session closed, wakeup transaction %s\n",
- lstcon_rpc_trans_name(trans->tas_opc));
+ CDEBUG(D_NET, "Session closed, wakeup transaction %s\n",
+ lstcon_rpc_trans_name(trans->tas_opc));
- cfs_waitq_signal(&trans->tas_waitq);
- }
+ wake_up(&trans->tas_waitq);
+ }
mutex_unlock(&console_session.ses_mutex);
} lstcon_rpc_t;
typedef struct lstcon_rpc_trans {
- cfs_list_t tas_olink; /* link chain on owner list */
- cfs_list_t tas_link; /* link chain on global list */
- int tas_opc; /* operation code of transaction */
+ cfs_list_t tas_olink; /* link chain on owner list */
+ cfs_list_t tas_link; /* link chain on global list */
+ int tas_opc; /* operation code of transaction */
/* features mask is uptodate */
unsigned tas_feats_updated;
/* test features mask */
unsigned tas_features;
- cfs_waitq_t tas_waitq; /* wait queue head */
- cfs_atomic_t tas_remaining; /* # of un-scheduled rpcs */
- cfs_list_t tas_rpcs_list; /* queued requests */
+ wait_queue_head_t tas_waitq; /* wait queue head */
+ cfs_atomic_t tas_remaining; /* # of un-scheduled rpcs */
+ cfs_list_t tas_rpcs_list; /* queued requests */
} lstcon_rpc_trans_t;
#define LST_TRANS_PRIVATE 0x1000
while (rpc->crpc_timeout != 0) {
spin_unlock(&rpc->crpc_lock);
- cfs_schedule();
+ schedule();
spin_lock(&rpc->crpc_lock);
}
(STTIMER_NSLOTS - 1))])
struct st_timer_data {
- spinlock_t stt_lock;
- /* start time of the slot processed previously */
- cfs_time_t stt_prev_slot;
- cfs_list_t stt_hash[STTIMER_NSLOTS];
- int stt_shuttingdown;
+ spinlock_t stt_lock;
+ /* start time of the slot processed previously */
+ cfs_time_t stt_prev_slot;
+ cfs_list_t stt_hash[STTIMER_NSLOTS];
+ int stt_shuttingdown;
#ifdef __KERNEL__
- cfs_waitq_t stt_waitq;
- int stt_nthreads;
+ wait_queue_head_t stt_waitq;
+ int stt_nthreads;
#endif
} stt_data;
cfs_block_allsigs();
- while (!stt_data.stt_shuttingdown) {
- stt_check_timers(&stt_data.stt_prev_slot);
+ while (!stt_data.stt_shuttingdown) {
+ stt_check_timers(&stt_data.stt_prev_slot);
- cfs_waitq_wait_event_timeout(stt_data.stt_waitq,
- stt_data.stt_shuttingdown,
- cfs_time_seconds(STTIMER_SLOTTIME),
- rc);
- rc = 0; /* Discard jiffies remaining before timeout. */
- }
+ rc = wait_event_timeout(stt_data.stt_waitq,
+ stt_data.stt_shuttingdown,
+ cfs_time_seconds(STTIMER_SLOTTIME));
+ }
spin_lock(&stt_data.stt_lock);
stt_data.stt_nthreads--;
CFS_INIT_LIST_HEAD(&stt_data.stt_hash[i]);
#ifdef __KERNEL__
- stt_data.stt_nthreads = 0;
- cfs_waitq_init(&stt_data.stt_waitq);
- rc = stt_start_timer_thread();
- if (rc != 0)
- CERROR ("Can't spawn timer thread: %d\n", rc);
+ stt_data.stt_nthreads = 0;
+ init_waitqueue_head(&stt_data.stt_waitq);
+ rc = stt_start_timer_thread();
+ if (rc != 0)
+ CERROR ("Can't spawn timer thread: %d\n", rc);
#endif
return rc;
stt_data.stt_shuttingdown = 1;
#ifdef __KERNEL__
- cfs_waitq_signal(&stt_data.stt_waitq);
- lst_wait_until(stt_data.stt_nthreads == 0, stt_data.stt_lock,
- "waiting for %d threads to terminate\n",
- stt_data.stt_nthreads);
+ wake_up(&stt_data.stt_waitq);
+ lst_wait_until(stt_data.stt_nthreads == 0, stt_data.stt_lock,
+ "waiting for %d threads to terminate\n",
+ stt_data.stt_nthreads);
#endif
spin_unlock(&stt_data.stt_lock);
}
static int seq_fid_alloc_prep(struct lu_client_seq *seq,
- cfs_waitlink_t *link)
+ wait_queue_t *link)
{
- if (seq->lcs_update) {
- cfs_waitq_add(&seq->lcs_waitq, link);
- cfs_set_current_state(CFS_TASK_UNINT);
+ if (seq->lcs_update) {
+ add_wait_queue(&seq->lcs_waitq, link);
+ set_current_state(TASK_UNINTERRUPTIBLE);
mutex_unlock(&seq->lcs_mutex);
- cfs_waitq_wait(link, CFS_TASK_UNINT);
+ waitq_wait(link, TASK_UNINTERRUPTIBLE);
mutex_lock(&seq->lcs_mutex);
- cfs_waitq_del(&seq->lcs_waitq, link);
- cfs_set_current_state(CFS_TASK_RUNNING);
- return -EAGAIN;
- }
- ++seq->lcs_update;
+ remove_wait_queue(&seq->lcs_waitq, link);
+ set_current_state(TASK_RUNNING);
+ return -EAGAIN;
+ }
+ ++seq->lcs_update;
mutex_unlock(&seq->lcs_mutex);
- return 0;
+ return 0;
}
static void seq_fid_alloc_fini(struct lu_client_seq *seq)
{
- LASSERT(seq->lcs_update == 1);
+ LASSERT(seq->lcs_update == 1);
mutex_lock(&seq->lcs_mutex);
- --seq->lcs_update;
- cfs_waitq_signal(&seq->lcs_waitq);
+ --seq->lcs_update;
+ wake_up(&seq->lcs_waitq);
}
/**
* Allocate the whole seq to the caller.
**/
int seq_client_get_seq(const struct lu_env *env,
- struct lu_client_seq *seq, seqno_t *seqnr)
+ struct lu_client_seq *seq, seqno_t *seqnr)
{
- cfs_waitlink_t link;
- int rc;
+ wait_queue_t link;
+ int rc;
- LASSERT(seqnr != NULL);
+ LASSERT(seqnr != NULL);
mutex_lock(&seq->lcs_mutex);
- cfs_waitlink_init(&link);
+ init_waitqueue_entry_current(&link);
while (1) {
rc = seq_fid_alloc_prep(seq, &link);
/* Allocate new fid on passed client @seq and save it to @fid. */
int seq_client_alloc_fid(const struct lu_env *env,
- struct lu_client_seq *seq, struct lu_fid *fid)
+ struct lu_client_seq *seq, struct lu_fid *fid)
{
- cfs_waitlink_t link;
- int rc;
- ENTRY;
+ wait_queue_t link;
+ int rc;
+ ENTRY;
- LASSERT(seq != NULL);
- LASSERT(fid != NULL);
+ LASSERT(seq != NULL);
+ LASSERT(fid != NULL);
- cfs_waitlink_init(&link);
+ init_waitqueue_entry_current(&link);
mutex_lock(&seq->lcs_mutex);
if (OBD_FAIL_CHECK(OBD_FAIL_SEQ_EXHAUST))
*/
void seq_client_flush(struct lu_client_seq *seq)
{
- cfs_waitlink_t link;
+ wait_queue_t link;
- LASSERT(seq != NULL);
- cfs_waitlink_init(&link);
+ LASSERT(seq != NULL);
+ init_waitqueue_entry_current(&link);
mutex_lock(&seq->lcs_mutex);
- while (seq->lcs_update) {
- cfs_waitq_add(&seq->lcs_waitq, &link);
- cfs_set_current_state(CFS_TASK_UNINT);
+ while (seq->lcs_update) {
+ add_wait_queue(&seq->lcs_waitq, &link);
+ set_current_state(TASK_UNINTERRUPTIBLE);
mutex_unlock(&seq->lcs_mutex);
- cfs_waitq_wait(&link, CFS_TASK_UNINT);
+ waitq_wait(&link, TASK_UNINTERRUPTIBLE);
mutex_lock(&seq->lcs_mutex);
- cfs_waitq_del(&seq->lcs_waitq, &link);
- cfs_set_current_state(CFS_TASK_RUNNING);
- }
+ remove_wait_queue(&seq->lcs_waitq, &link);
+ set_current_state(TASK_RUNNING);
+ }
fid_zero(&seq->lcs_fid);
/**
else
seq->lcs_width = LUSTRE_DATA_SEQ_MAX_WIDTH;
- cfs_waitq_init(&seq->lcs_waitq);
+ init_waitqueue_head(&seq->lcs_waitq);
/* Make sure that things are clear before work is started. */
seq_client_flush(seq);
static void fld_enter_request(struct client_obd *cli)
{
- struct mdc_cache_waiter mcw;
- struct l_wait_info lwi = { 0 };
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
- if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
- cfs_list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters);
- cfs_waitq_init(&mcw.mcw_waitq);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- l_wait_event(mcw.mcw_waitq, fld_req_avail(cli, &mcw), &lwi);
- } else {
- cli->cl_r_in_flight++;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- }
+ struct mdc_cache_waiter mcw;
+ struct l_wait_info lwi = { 0 };
+
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
+ cfs_list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters);
+ init_waitqueue_head(&mcw.mcw_waitq);
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+ l_wait_event(mcw.mcw_waitq, fld_req_avail(cli, &mcw), &lwi);
+ } else {
+ cli->cl_r_in_flight++;
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+ }
}
static void fld_exit_request(struct client_obd *cli)
{
- cfs_list_t *l, *tmp;
- struct mdc_cache_waiter *mcw;
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
- cli->cl_r_in_flight--;
- cfs_list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
-
- if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
- /* No free request slots anymore */
- break;
- }
-
- mcw = cfs_list_entry(l, struct mdc_cache_waiter, mcw_entry);
- cfs_list_del_init(&mcw->mcw_entry);
- cli->cl_r_in_flight++;
- cfs_waitq_signal(&mcw->mcw_waitq);
- }
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ cfs_list_t *l, *tmp;
+ struct mdc_cache_waiter *mcw;
+
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ cli->cl_r_in_flight--;
+ cfs_list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
+
+ if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
+ /* No free request slots anymore */
+ break;
+ }
+
+ mcw = cfs_list_entry(l, struct mdc_cache_waiter, mcw_entry);
+ cfs_list_del_init(&mcw->mcw_entry);
+ cli->cl_r_in_flight++;
+ wake_up(&mcw->mcw_waitq);
+ }
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
}
static int fld_rrb_hash(struct lu_client_fld *fld,
*/
struct cl_lock_descr cll_descr;
/** Protected by cl_lock::cll_guard. */
- enum cl_lock_state cll_state;
- /** signals state changes. */
- cfs_waitq_t cll_wq;
- /**
- * Recursive lock, most fields in cl_lock{} are protected by this.
- *
- * Locking rules: this mutex is never held across network
- * communication, except when lock is being canceled.
- *
- * Lock ordering: a mutex of a sub-lock is taken first, then a mutex
- * on a top-lock. Other direction is implemented through a
- * try-lock-repeat loop. Mutices of unrelated locks can be taken only
- * by try-locking.
- *
- * \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait().
- */
+ enum cl_lock_state cll_state;
+ /** signals state changes. */
+ wait_queue_head_t cll_wq;
+ /**
+ * Recursive lock, most fields in cl_lock{} are protected by this.
+ *
+ * Locking rules: this mutex is never held across network
+ * communication, except when lock is being canceled.
+ *
+ * Lock ordering: a mutex of a sub-lock is taken first, then a mutex
+ * on a top-lock. Other direction is implemented through a
+ * try-lock-repeat loop. Mutices of unrelated locks can be taken only
+ * by try-locking.
+ *
+ * \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait().
+ */
struct mutex cll_guard;
cfs_task_t *cll_guarder;
int cll_depth;
/** barrier of destroy this structure */
cfs_atomic_t csi_barrier;
/** completion to be signaled when transfer is complete. */
- cfs_waitq_t csi_waitq;
+ wait_queue_head_t csi_waitq;
};
void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages);
extern struct task_struct *current;
int in_group_p(gid_t gid);
-#define cfs_set_current_state(foo) do { current->state = foo; } while (0)
+#define set_current_state(foo) do { current->state = foo; } while (0)
#define wait_event_interruptible(wq, condition) \
{ \
struct file_lock *fl_next; /* singly linked list for this inode */
cfs_list_t fl_link; /* doubly linked list of all locks */
cfs_list_t fl_block; /* circular list of blocked processes */
- void *fl_owner;
- unsigned int fl_pid;
- cfs_waitq_t fl_wait;
- struct file *fl_file;
+ void *fl_owner;
+ unsigned int fl_pid;
+ wait_queue_head_t fl_wait;
+ struct file *fl_file;
unsigned char fl_flags;
unsigned char fl_type;
loff_t fl_start;
struct fld;
struct lu_site_bkt_data {
- /**
- * number of busy object on this bucket
- */
- long lsb_busy;
- /**
- * LRU list, updated on each access to object. Protected by
- * bucket lock of lu_site::ls_obj_hash.
- *
- * "Cold" end of LRU is lu_site::ls_lru.next. Accessed object are
- * moved to the lu_site::ls_lru.prev (this is due to the non-existence
- * of list_for_each_entry_safe_reverse()).
- */
- cfs_list_t lsb_lru;
- /**
- * Wait-queue signaled when an object in this site is ultimately
- * destroyed (lu_object_free()). It is used by lu_object_find() to
- * wait before re-trying when object in the process of destruction is
- * found in the hash table.
- *
- * \see htable_lookup().
- */
- cfs_waitq_t lsb_marche_funebre;
+ /**
+ * number of busy object on this bucket
+ */
+ long lsb_busy;
+ /**
+ * LRU list, updated on each access to object. Protected by
+ * bucket lock of lu_site::ls_obj_hash.
+ *
+ * "Cold" end of LRU is lu_site::ls_lru.next. Accessed object are
+ * moved to the lu_site::ls_lru.prev (this is due to the non-existence
+ * of list_for_each_entry_safe_reverse()).
+ */
+ cfs_list_t lsb_lru;
+ /**
+ * Wait-queue signaled when an object in this site is ultimately
+ * destroyed (lu_object_free()). It is used by lu_object_find() to
+ * wait before re-trying when object in the process of destruction is
+ * found in the hash table.
+ *
+ * \see htable_lookup().
+ */
+ wait_queue_head_t lsb_marche_funebre;
};
enum {
* Wait queue used by __ldlm_namespace_free. Gets woken up every time
* a resource is removed.
*/
- cfs_waitq_t ns_waitq;
+ wait_queue_head_t ns_waitq;
/** LDLM pool structure for this namespace */
struct ldlm_pool ns_pool;
/** Definition of how eagerly unused locks will be released from LRU */
* it's no longer in use. If the lock is not granted, a process sleeps
* on this waitq to learn when it becomes granted.
*/
- cfs_waitq_t l_waitq;
+ wait_queue_head_t l_waitq;
/**
* Seconds. It will be updated if there is any activity related to
/* Seq-server for direct talking */
struct lu_server_seq *lcs_srv;
- /* wait queue for fid allocation and update indicator */
- cfs_waitq_t lcs_waitq;
- int lcs_update;
+ /* wait queue for fid allocation and update indicator */
+ wait_queue_head_t lcs_waitq;
+ int lcs_update;
};
/* server sequence manager interface */
cfs_time_t imp_sec_expire;
/** @} */
- /** Wait queue for those who need to wait for recovery completion */
- cfs_waitq_t imp_recovery_waitq;
+ /** Wait queue for those who need to wait for recovery completion */
+ wait_queue_head_t imp_recovery_waitq;
/** Number of requests currently in-flight */
cfs_atomic_t imp_inflight;
*/
#define __l_wait_event(wq, condition, info, ret, l_add_wait) \
do { \
- cfs_waitlink_t __wait; \
- cfs_duration_t __timeout = info->lwi_timeout; \
- cfs_sigset_t __blocked; \
- int __allow_intr = info->lwi_allow_intr; \
- \
- ret = 0; \
- if (condition) \
- break; \
- \
- cfs_waitlink_init(&__wait); \
- l_add_wait(&wq, &__wait); \
- \
- /* Block all signals (just the non-fatal ones if no timeout). */ \
- if (info->lwi_on_signal != NULL && (__timeout == 0 || __allow_intr)) \
- __blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS); \
- else \
- __blocked = cfs_block_sigsinv(0); \
- \
- for (;;) { \
- unsigned __wstate; \
- \
- __wstate = info->lwi_on_signal != NULL && \
- (__timeout == 0 || __allow_intr) ? \
- CFS_TASK_INTERRUPTIBLE : CFS_TASK_UNINT; \
- \
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE); \
- \
- if (condition) \
- break; \
- \
- if (__timeout == 0) { \
- cfs_waitq_wait(&__wait, __wstate); \
- } else { \
- cfs_duration_t interval = info->lwi_interval? \
- min_t(cfs_duration_t, \
- info->lwi_interval,__timeout):\
- __timeout; \
- cfs_duration_t remaining = cfs_waitq_timedwait(&__wait,\
- __wstate, \
- interval); \
- __timeout = cfs_time_sub(__timeout, \
- cfs_time_sub(interval, remaining));\
- if (__timeout == 0) { \
- if (info->lwi_on_timeout == NULL || \
- info->lwi_on_timeout(info->lwi_cb_data)) { \
- ret = -ETIMEDOUT; \
- break; \
- } \
- /* Take signals after the timeout expires. */ \
- if (info->lwi_on_signal != NULL) \
- (void)cfs_block_sigsinv(LUSTRE_FATAL_SIGS);\
- } \
- } \
+ wait_queue_t __wait; \
+ cfs_duration_t __timeout = info->lwi_timeout; \
+ cfs_sigset_t __blocked; \
+ int __allow_intr = info->lwi_allow_intr; \
+ \
+ ret = 0; \
+ if (condition) \
+ break; \
+ \
+ init_waitqueue_entry_current(&__wait); \
+ l_add_wait(&wq, &__wait); \
+ \
+ /* Block all signals (just the non-fatal ones if no timeout). */ \
+ if (info->lwi_on_signal != NULL && (__timeout == 0 || __allow_intr)) \
+ __blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS); \
+ else \
+ __blocked = cfs_block_sigsinv(0); \
+ \
+ for (;;) { \
+ unsigned __wstate; \
+ \
+ __wstate = info->lwi_on_signal != NULL && \
+ (__timeout == 0 || __allow_intr) ? \
+ TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; \
+ \
+ set_current_state(TASK_INTERRUPTIBLE); \
+ \
+ if (condition) \
+ break; \
+ \
+ if (__timeout == 0) { \
+ waitq_wait(&__wait, __wstate); \
+ } else { \
+ cfs_duration_t interval = info->lwi_interval? \
+ min_t(cfs_duration_t, \
+ info->lwi_interval,__timeout):\
+ __timeout; \
+ cfs_duration_t remaining = waitq_timedwait(&__wait, \
+ __wstate, \
+ interval); \
+ __timeout = cfs_time_sub(__timeout, \
+ cfs_time_sub(interval, remaining));\
+ if (__timeout == 0) { \
+ if (info->lwi_on_timeout == NULL || \
+ info->lwi_on_timeout(info->lwi_cb_data)) { \
+ ret = -ETIMEDOUT; \
+ break; \
+ } \
+ /* Take signals after the timeout expires. */ \
+ if (info->lwi_on_signal != NULL) \
+ (void)cfs_block_sigsinv(LUSTRE_FATAL_SIGS);\
+ } \
+ } \
\
if (condition) \
break; \
\
cfs_restore_sigs(__blocked); \
\
- cfs_set_current_state(CFS_TASK_RUNNING); \
- cfs_waitq_del(&wq, &__wait); \
+ set_current_state(TASK_RUNNING); \
+ remove_wait_queue(&wq, &__wait); \
} while (0)
#else /* !__KERNEL__ */
#define l_wait_event(wq, condition, info) \
({ \
- int __ret; \
- struct l_wait_info *__info = (info); \
- \
- __l_wait_event(wq, condition, __info, \
- __ret, cfs_waitq_add); \
- __ret; \
+ int __ret; \
+ struct l_wait_info *__info = (info); \
+ \
+ __l_wait_event(wq, condition, __info, \
+ __ret, add_wait_queue); \
+ __ret; \
})
#define l_wait_event_exclusive(wq, condition, info) \
({ \
- int __ret; \
- struct l_wait_info *__info = (info); \
- \
- __l_wait_event(wq, condition, __info, \
- __ret, cfs_waitq_add_exclusive); \
- __ret; \
+ int __ret; \
+ struct l_wait_info *__info = (info); \
+ \
+ __l_wait_event(wq, condition, __info, \
+ __ret, add_wait_queue_exclusive); \
+ __ret; \
})
#define l_wait_event_exclusive_head(wq, condition, info) \
({ \
- int __ret; \
- struct l_wait_info *__info = (info); \
- \
- __l_wait_event(wq, condition, __info, \
- __ret, cfs_waitq_add_exclusive_head); \
- __ret; \
+ int __ret; \
+ struct l_wait_info *__info = (info); \
+ \
+ __l_wait_event(wq, condition, __info, \
+ __ret, add_wait_queue_exclusive_head); \
+ __ret; \
})
#define l_wait_condition(wq, condition) \
static inline void llog_group_init(struct obd_llog_group *olg, int group)
{
- cfs_waitq_init(&olg->olg_waitq);
+ init_waitqueue_head(&olg->olg_waitq);
spin_lock_init(&olg->olg_lock);
mutex_init(&olg->olg_cat_processing);
olg->olg_seq = group;
* the common case when it isn't true. */
while (unlikely(lck->rpcl_it == MDC_FAKE_RPCL_IT)) {
mutex_unlock(&lck->rpcl_mutex);
- cfs_schedule_timeout(cfs_time_seconds(1) / 4);
+ schedule_timeout(cfs_time_seconds(1) / 4);
goto again;
}
struct mdc_cache_waiter {
- cfs_list_t mcw_entry;
- cfs_waitq_t mcw_waitq;
+ cfs_list_t mcw_entry;
+ wait_queue_head_t mcw_waitq;
};
/* mdc/mdc_locks.c */
/** number of uncompleted requests */
cfs_atomic_t set_remaining;
/** wait queue to wait on for request events */
- cfs_waitq_t set_waitq;
- cfs_waitq_t *set_wakeup_ptr;
+ wait_queue_head_t set_waitq;
+ wait_queue_head_t *set_wakeup_ptr;
/** List of requests in the set */
cfs_list_t set_requests;
/**
/** incoming request buffer */
struct ptlrpc_request_buffer_desc *rq_rqbd;
- /** client-only incoming reply */
- lnet_handle_md_t rq_reply_md_h;
- cfs_waitq_t rq_reply_waitq;
- struct ptlrpc_cb_id rq_reply_cbid;
+ /** client-only incoming reply */
+ lnet_handle_md_t rq_reply_md_h;
+ wait_queue_head_t rq_reply_waitq;
+ struct ptlrpc_cb_id rq_reply_cbid;
/** our LNet NID */
lnet_nid_t rq_self;
/** Multi-rpc bits */
/** Per-request waitq introduced by bug 21938 for recovery waiting */
- cfs_waitq_t rq_set_waitq;
+ wait_queue_head_t rq_set_waitq;
/** Link item for request set lists */
cfs_list_t rq_set_chain;
/** Link back to the request set */
struct obd_import *bd_import;
/** Back pointer to the request */
struct ptlrpc_request *bd_req;
- cfs_waitq_t bd_waitq; /* server side only WQ */
+ wait_queue_head_t bd_waitq; /* server side only WQ */
int bd_iov_count; /* # entries in bd_iov */
int bd_max_iov; /* allocated size of bd_iov */
int bd_nob; /* # bytes covered */
* the svc this thread belonged to b=18582
*/
struct ptlrpc_service_part *t_svcpt;
- cfs_waitq_t t_ctl_waitq;
+ wait_queue_head_t t_ctl_waitq;
struct lu_env *t_env;
char t_name[PTLRPC_THR_NAME_LEN];
};
* all threads sleep on this. This wait-queue is signalled when new
* incoming request arrives and when difficult reply has to be handled.
*/
- cfs_waitq_t scp_waitq;
+ wait_queue_head_t scp_waitq;
/** request history */
cfs_list_t scp_hist_reqs;
/** List of free reply_states */
cfs_list_t scp_rep_idle;
/** waitq to run, when adding stuff to srv_free_rs_list */
- cfs_waitq_t scp_rep_waitq;
+ wait_queue_head_t scp_rep_waitq;
/** # 'difficult' replies */
cfs_atomic_t scp_nreps_difficult;
};
static inline void
ptlrpc_client_wake_req(struct ptlrpc_request *req)
{
- if (req->rq_set == NULL)
- cfs_waitq_signal(&req->rq_reply_waitq);
- else
- cfs_waitq_signal(&req->rq_set->set_waitq);
+ if (req->rq_set == NULL)
+ wake_up(&req->rq_reply_waitq);
+ else
+ wake_up(&req->rq_set->set_waitq);
}
static inline void
/* number of in flight destroy rpcs is limited to max_rpcs_in_flight */
cfs_atomic_t cl_destroy_in_flight;
- cfs_waitq_t cl_destroy_waitq;
+ wait_queue_head_t cl_destroy_waitq;
struct mdc_rpc_lock *cl_rpc_lock;
struct mdc_rpc_lock *cl_close_lock;
lq_reset:1, /* zero current penalties */
lq_statfs_in_progress:1; /* statfs op in
progress */
- /* qos statfs data */
- struct lov_statfs_data *lq_statfs_data;
- cfs_waitq_t lq_statfs_waitq; /* waitqueue to notify statfs
- * requests completion */
+ /* qos statfs data */
+ struct lov_statfs_data *lq_statfs_data;
+ wait_queue_head_t lq_statfs_waitq; /* waitqueue to notify statfs
+ * requests completion */
};
struct lov_tgt_desc {
};
struct obd_llog_group {
- int olg_seq;
- struct llog_ctxt *olg_ctxts[LLOG_MAX_CTXTS];
- cfs_waitq_t olg_waitq;
+ int olg_seq;
+ struct llog_ctxt *olg_ctxts[LLOG_MAX_CTXTS];
+ wait_queue_head_t olg_waitq;
spinlock_t olg_lock;
struct mutex olg_cat_processing;
};
* obd_next_recovery_transno value */
spinlock_t obd_recovery_task_lock;
__u64 obd_next_recovery_transno;
- int obd_replayed_requests;
- int obd_requests_queued_for_recovery;
- cfs_waitq_t obd_next_transno_waitq;
- /* protected by obd_recovery_task_lock */
- cfs_timer_t obd_recovery_timer;
+ int obd_replayed_requests;
+ int obd_requests_queued_for_recovery;
+ wait_queue_head_t obd_next_transno_waitq;
+ /* protected by obd_recovery_task_lock */
+ cfs_timer_t obd_recovery_timer;
time_t obd_recovery_start; /* seconds */
time_t obd_recovery_end; /* seconds, for lprocfs_status */
int obd_recovery_time_hard;
cfs_proc_dir_entry_t *obd_proc_exports_entry;
cfs_proc_dir_entry_t *obd_svc_procroot;
struct lprocfs_stats *obd_svc_stats;
- cfs_atomic_t obd_evict_inprogress;
- cfs_waitq_t obd_evict_inprogress_waitq;
- cfs_list_t obd_evict_list; /* protected with pet_lock */
+ cfs_atomic_t obd_evict_inprogress;
+ wait_queue_head_t obd_evict_inprogress_waitq;
+ cfs_list_t obd_evict_list; /* protected with pet_lock */
/**
* Ldlm pool part. Save last calculated SLV and Limit.
*/
static void cl_object_put_last(struct lu_env *env, struct cl_object *obj)
{
- struct lu_object_header *header = obj->co_lu.lo_header;
- cfs_waitlink_t waiter;
+ struct lu_object_header *header = obj->co_lu.lo_header;
+ wait_queue_t waiter;
- if (unlikely(cfs_atomic_read(&header->loh_ref) != 1)) {
- struct lu_site *site = obj->co_lu.lo_dev->ld_site;
- struct lu_site_bkt_data *bkt;
+ if (unlikely(cfs_atomic_read(&header->loh_ref) != 1)) {
+ struct lu_site *site = obj->co_lu.lo_dev->ld_site;
+ struct lu_site_bkt_data *bkt;
- bkt = lu_site_bkt_from_fid(site, &header->loh_fid);
+ bkt = lu_site_bkt_from_fid(site, &header->loh_fid);
- cfs_waitlink_init(&waiter);
- cfs_waitq_add(&bkt->lsb_marche_funebre, &waiter);
+ init_waitqueue_entry_current(&waiter);
+ add_wait_queue(&bkt->lsb_marche_funebre, &waiter);
- while (1) {
- cfs_set_current_state(CFS_TASK_UNINT);
- if (cfs_atomic_read(&header->loh_ref) == 1)
- break;
- cfs_waitq_wait(&waiter, CFS_TASK_UNINT);
- }
+ while (1) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (cfs_atomic_read(&header->loh_ref) == 1)
+ break;
+ waitq_wait(&waiter, TASK_UNINTERRUPTIBLE);
+ }
- cfs_set_current_state(CFS_TASK_RUNNING);
- cfs_waitq_del(&bkt->lsb_marche_funebre, &waiter);
- }
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&bkt->lsb_marche_funebre, &waiter);
+ }
- cl_object_put(env, obj);
+ cl_object_put(env, obj);
}
void cl_inode_fini(struct inode *inode)
* references being held, so that it can go away. No point in
* holding the lock even if app still believes it has it, since
* server already dropped it anyway. Only for granted locks too. */
- if ((lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) ==
- (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) {
- if (lock->l_req_mode == lock->l_granted_mode &&
- lock->l_granted_mode != LCK_NL &&
- NULL == data)
- ldlm_lock_decref_internal(lock, lock->l_req_mode);
-
- /* Need to wake up the waiter if we were evicted */
- cfs_waitq_signal(&lock->l_waitq);
- RETURN(0);
- }
+ if ((lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) ==
+ (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) {
+ if (lock->l_req_mode == lock->l_granted_mode &&
+ lock->l_granted_mode != LCK_NL &&
+ NULL == data)
+ ldlm_lock_decref_internal(lock, lock->l_req_mode);
+
+ /* Need to wake up the waiter if we were evicted */
+ wake_up(&lock->l_waitq);
+ RETURN(0);
+ }
- LASSERT(flags != LDLM_FL_WAIT_NOREPROC);
+ LASSERT(flags != LDLM_FL_WAIT_NOREPROC);
- if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
- LDLM_FL_BLOCK_CONV))) {
- if (NULL == data)
- /* mds granted the lock in the reply */
- goto granted;
- /* CP AST RPC: lock get granted, wake it up */
- cfs_waitq_signal(&lock->l_waitq);
- RETURN(0);
- }
+ if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
+ LDLM_FL_BLOCK_CONV))) {
+ if (NULL == data)
+ /* mds granted the lock in the reply */
+ goto granted;
+ /* CP AST RPC: lock get granted, wake it up */
+ wake_up(&lock->l_waitq);
+ RETURN(0);
+ }
LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
"sleeping");
CFS_INIT_LIST_HEAD(&cli->cl_lru_list);
client_obd_list_lock_init(&cli->cl_lru_list_lock);
- cfs_waitq_init(&cli->cl_destroy_waitq);
- cfs_atomic_set(&cli->cl_destroy_in_flight, 0);
+ init_waitqueue_head(&cli->cl_destroy_waitq);
+ cfs_atomic_set(&cli->cl_destroy_in_flight, 0);
#ifdef ENABLE_CHECKSUM
- /* Turn on checksumming by default. */
- cli->cl_checksum = 1;
+ /* Turn on checksumming by default. */
+ cli->cl_checksum = 1;
/*
* The supported checksum types will be worked out at connect time
* Set cl_chksum* to CRC32 for now to avoid returning screwed info
spin_unlock(&target->obd_recovery_task_lock);
}
- cfs_atomic_inc(&target->obd_req_replay_clients);
- cfs_atomic_inc(&target->obd_lock_replay_clients);
- if (cfs_atomic_inc_return(&target->obd_connected_clients) ==
- target->obd_max_recoverable_clients)
- cfs_waitq_signal(&target->obd_next_transno_waitq);
- }
+ cfs_atomic_inc(&target->obd_req_replay_clients);
+ cfs_atomic_inc(&target->obd_lock_replay_clients);
+ if (cfs_atomic_inc_return(&target->obd_connected_clients) ==
+ target->obd_max_recoverable_clients)
+ wake_up(&target->obd_next_transno_waitq);
+ }
/* Tell the client we're in recovery, when client is involved in it. */
if (target->obd_recovering && !lw_client)
* evict dead clients via health_check
*/
static int target_recovery_overseer(struct obd_device *obd,
- int (*check_routine)(struct obd_device *),
- int (*health_check)(struct obd_export *))
+ int (*check_routine)(struct obd_device *),
+ int (*health_check)(struct obd_export *))
{
repeat:
- cfs_wait_event(obd->obd_next_transno_waitq, check_routine(obd));
- if (obd->obd_abort_recovery) {
- CWARN("recovery is aborted, evict exports in recovery\n");
- /** evict exports which didn't finish recovery yet */
- class_disconnect_stale_exports(obd, exp_finished);
- return 1;
- } else if (obd->obd_recovery_expired) {
- obd->obd_recovery_expired = 0;
- /** If some clients died being recovered, evict them */
- LCONSOLE_WARN("%s: recovery is timed out, "
- "evict stale exports\n", obd->obd_name);
- /** evict cexports with no replay in queue, they are stalled */
- class_disconnect_stale_exports(obd, health_check);
- /** continue with VBR */
+ wait_event(obd->obd_next_transno_waitq, check_routine(obd));
+ if (obd->obd_abort_recovery) {
+ CWARN("recovery is aborted, evict exports in recovery\n");
+ /** evict exports which didn't finish recovery yet */
+ class_disconnect_stale_exports(obd, exp_finished);
+ return 1;
+ } else if (obd->obd_recovery_expired) {
+ obd->obd_recovery_expired = 0;
+ /** If some clients died being recovered, evict them */
+ LCONSOLE_WARN("%s: recovery is timed out, "
+ "evict stale exports\n", obd->obd_name);
+ /** evict cexports with no replay in queue, they are stalled */
+ class_disconnect_stale_exports(obd, health_check);
+ /** continue with VBR */
spin_lock(&obd->obd_dev_lock);
obd->obd_version_recov = 1;
spin_unlock(&obd->obd_dev_lock);
- /**
- * reset timer, recovery will proceed with versions now,
- * timeout is set just to handle reconnection delays
- */
- extend_recovery_timer(obd, RECONNECT_DELAY_MAX, true);
- /** Wait for recovery events again, after evicting bad clients */
- goto repeat;
- }
- return 0;
+ /**
+ * reset timer, recovery will proceed with versions now,
+ * timeout is set just to handle reconnection delays
+ */
+ extend_recovery_timer(obd, RECONNECT_DELAY_MAX, true);
+ /** Wait for recovery events again, after evicting bad clients */
+ goto repeat;
+ }
+ return 0;
}
static struct ptlrpc_request *target_next_replay_req(struct obd_device *obd)
if (obd->obd_recovering) {
CERROR("%s: Aborting recovery\n", obd->obd_name);
obd->obd_abort_recovery = 1;
- cfs_waitq_signal(&obd->obd_next_transno_waitq);
+ wake_up(&obd->obd_next_transno_waitq);
}
spin_unlock(&obd->obd_dev_lock);
wait_for_completion(&trd->trd_finishing);
static void target_recovery_expired(unsigned long castmeharder)
{
- struct obd_device *obd = (struct obd_device *)castmeharder;
- CDEBUG(D_HA, "%s: recovery timed out; %d clients are still in recovery"
- " after %lds (%d clients connected)\n",
- obd->obd_name, cfs_atomic_read(&obd->obd_lock_replay_clients),
- cfs_time_current_sec()- obd->obd_recovery_start,
- cfs_atomic_read(&obd->obd_connected_clients));
-
- obd->obd_recovery_expired = 1;
- cfs_waitq_signal(&obd->obd_next_transno_waitq);
+ struct obd_device *obd = (struct obd_device *)castmeharder;
+ CDEBUG(D_HA, "%s: recovery timed out; %d clients are still in recovery"
+ " after %lds (%d clients connected)\n",
+ obd->obd_name, cfs_atomic_read(&obd->obd_lock_replay_clients),
+ cfs_time_current_sec()- obd->obd_recovery_start,
+ cfs_atomic_read(&obd->obd_connected_clients));
+
+ obd->obd_recovery_expired = 1;
+ wake_up(&obd->obd_next_transno_waitq);
}
void target_recovery_init(struct lu_target *lut, svc_handler_t handler)
if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_LOCK_REPLAY_DONE) {
/* client declares he's ready to complete recovery
* so, we put the request on th final queue */
- target_request_copy_get(req);
- DEBUG_REQ(D_HA, req, "queue final req");
- cfs_waitq_signal(&obd->obd_next_transno_waitq);
+ target_request_copy_get(req);
+ DEBUG_REQ(D_HA, req, "queue final req");
+ wake_up(&obd->obd_next_transno_waitq);
spin_lock(&obd->obd_recovery_task_lock);
if (obd->obd_recovering) {
cfs_list_add_tail(&req->rq_list,
/* client declares he's ready to replay locks */
target_request_copy_get(req);
DEBUG_REQ(D_HA, req, "queue lock replay req");
- cfs_waitq_signal(&obd->obd_next_transno_waitq);
+ wake_up(&obd->obd_next_transno_waitq);
spin_lock(&obd->obd_recovery_task_lock);
LASSERT(obd->obd_recovering);
/* usually due to recovery abort */
obd->obd_requests_queued_for_recovery++;
spin_unlock(&obd->obd_recovery_task_lock);
- cfs_waitq_signal(&obd->obd_next_transno_waitq);
+ wake_up(&obd->obd_next_transno_waitq);
RETURN(0);
}
EXPORT_SYMBOL(target_queue_recovery_request);
lock->l_resource = resource;
lu_ref_add(&resource->lr_reference, "lock", lock);
- cfs_atomic_set(&lock->l_refc, 2);
- CFS_INIT_LIST_HEAD(&lock->l_res_link);
- CFS_INIT_LIST_HEAD(&lock->l_lru);
- CFS_INIT_LIST_HEAD(&lock->l_pending_chain);
- CFS_INIT_LIST_HEAD(&lock->l_bl_ast);
- CFS_INIT_LIST_HEAD(&lock->l_cp_ast);
- CFS_INIT_LIST_HEAD(&lock->l_rk_ast);
- cfs_waitq_init(&lock->l_waitq);
- lock->l_blocking_lock = NULL;
- CFS_INIT_LIST_HEAD(&lock->l_sl_mode);
- CFS_INIT_LIST_HEAD(&lock->l_sl_policy);
- CFS_INIT_HLIST_NODE(&lock->l_exp_hash);
+ cfs_atomic_set(&lock->l_refc, 2);
+ CFS_INIT_LIST_HEAD(&lock->l_res_link);
+ CFS_INIT_LIST_HEAD(&lock->l_lru);
+ CFS_INIT_LIST_HEAD(&lock->l_pending_chain);
+ CFS_INIT_LIST_HEAD(&lock->l_bl_ast);
+ CFS_INIT_LIST_HEAD(&lock->l_cp_ast);
+ CFS_INIT_LIST_HEAD(&lock->l_rk_ast);
+ init_waitqueue_head(&lock->l_waitq);
+ lock->l_blocking_lock = NULL;
+ CFS_INIT_LIST_HEAD(&lock->l_sl_mode);
+ CFS_INIT_LIST_HEAD(&lock->l_sl_policy);
+ CFS_INIT_HLIST_NODE(&lock->l_exp_hash);
CFS_INIT_HLIST_NODE(&lock->l_exp_flock_hash);
lprocfs_counter_incr(ldlm_res_to_ns(resource)->ns_stats,
{
if ((lock->l_flags & LDLM_FL_FAIL_NOTIFIED) == 0) {
lock->l_flags |= LDLM_FL_FAIL_NOTIFIED;
- cfs_waitq_broadcast(&lock->l_waitq);
+ wake_up_all(&lock->l_waitq);
}
}
EXPORT_SYMBOL(ldlm_lock_fail_match_locked);
void ldlm_lock_allow_match_locked(struct ldlm_lock *lock)
{
lock->l_flags |= LDLM_FL_LVB_READY;
- cfs_waitq_broadcast(&lock->l_waitq);
+ wake_up_all(&lock->l_waitq);
}
EXPORT_SYMBOL(ldlm_lock_allow_match_locked);
struct ldlm_bl_pool {
spinlock_t blp_lock;
- /*
- * blp_prio_list is used for callbacks that should be handled
- * as a priority. It is used for LDLM_FL_DISCARD_DATA requests.
- * see bug 13843
- */
- cfs_list_t blp_prio_list;
-
- /*
- * blp_list is used for all other callbacks which are likely
- * to take longer to process.
- */
- cfs_list_t blp_list;
-
- cfs_waitq_t blp_waitq;
- struct completion blp_comp;
- cfs_atomic_t blp_num_threads;
- cfs_atomic_t blp_busy_threads;
- int blp_min_threads;
- int blp_max_threads;
+ /*
+ * blp_prio_list is used for callbacks that should be handled
+ * as a priority. It is used for LDLM_FL_DISCARD_DATA requests.
+ * see bug 13843
+ */
+ cfs_list_t blp_prio_list;
+
+ /*
+ * blp_list is used for all other callbacks which are likely
+ * to take longer to process.
+ */
+ cfs_list_t blp_list;
+
+ wait_queue_head_t blp_waitq;
+ struct completion blp_comp;
+ cfs_atomic_t blp_num_threads;
+ cfs_atomic_t blp_busy_threads;
+ int blp_min_threads;
+ int blp_max_threads;
};
struct ldlm_bl_work_item {
static cfs_timer_t waiting_locks_timer;
static struct expired_lock_thread {
- cfs_waitq_t elt_waitq;
+ wait_queue_head_t elt_waitq;
int elt_state;
int elt_dump;
cfs_list_t elt_expired_locks;
*/
static int expired_lock_main(void *arg)
{
- cfs_list_t *expired = &expired_lock_thread.elt_expired_locks;
- struct l_wait_info lwi = { 0 };
- int do_dump;
+ cfs_list_t *expired = &expired_lock_thread.elt_expired_locks;
+ struct l_wait_info lwi = { 0 };
+ int do_dump;
- ENTRY;
+ ENTRY;
- expired_lock_thread.elt_state = ELT_READY;
- cfs_waitq_signal(&expired_lock_thread.elt_waitq);
+ expired_lock_thread.elt_state = ELT_READY;
+ wake_up(&expired_lock_thread.elt_waitq);
- while (1) {
- l_wait_event(expired_lock_thread.elt_waitq,
- have_expired_locks() ||
- expired_lock_thread.elt_state == ELT_TERMINATE,
- &lwi);
+ while (1) {
+ l_wait_event(expired_lock_thread.elt_waitq,
+ have_expired_locks() ||
+ expired_lock_thread.elt_state == ELT_TERMINATE,
+ &lwi);
spin_lock_bh(&waiting_locks_spinlock);
if (expired_lock_thread.elt_dump) {
libcfs_run_lbug_upcall(&msgdata);
spin_lock_bh(&waiting_locks_spinlock);
- expired_lock_thread.elt_dump = 0;
- }
+ expired_lock_thread.elt_dump = 0;
+ }
- do_dump = 0;
+ do_dump = 0;
- while (!cfs_list_empty(expired)) {
- struct obd_export *export;
- struct ldlm_lock *lock;
+ while (!cfs_list_empty(expired)) {
+ struct obd_export *export;
+ struct ldlm_lock *lock;
- lock = cfs_list_entry(expired->next, struct ldlm_lock,
- l_pending_chain);
+ lock = cfs_list_entry(expired->next, struct ldlm_lock,
+ l_pending_chain);
if ((void *)lock < LP_POISON + PAGE_CACHE_SIZE &&
(void *)lock >= LP_POISON) {
spin_unlock_bh(&waiting_locks_spinlock);
cfs_list_del_init(&lock->l_pending_chain);
if ((void *)lock->l_export <
LP_POISON + PAGE_CACHE_SIZE &&
- (void *)lock->l_export >= LP_POISON) {
- CERROR("lock with free export on elt list %p\n",
- lock->l_export);
- lock->l_export = NULL;
- LDLM_ERROR(lock, "free export");
- /* release extra ref grabbed by
- * ldlm_add_waiting_lock() or
- * ldlm_failed_ast() */
- LDLM_LOCK_RELEASE(lock);
- continue;
- }
+ (void *)lock->l_export >= LP_POISON) {
+ CERROR("lock with free export on elt list %p\n",
+ lock->l_export);
+ lock->l_export = NULL;
+ LDLM_ERROR(lock, "free export");
+ /* release extra ref grabbed by
+ * ldlm_add_waiting_lock() or
+ * ldlm_failed_ast() */
+ LDLM_LOCK_RELEASE(lock);
+ continue;
+ }
if (lock->l_flags & LDLM_FL_DESTROYED) {
/* release the lock refcount where
}
spin_unlock_bh(&waiting_locks_spinlock);
- if (do_dump && obd_dump_on_eviction) {
- CERROR("dump the log upon eviction\n");
- libcfs_debug_dumplog();
- }
+ if (do_dump && obd_dump_on_eviction) {
+ CERROR("dump the log upon eviction\n");
+ libcfs_debug_dumplog();
+ }
- if (expired_lock_thread.elt_state == ELT_TERMINATE)
- break;
- }
+ if (expired_lock_thread.elt_state == ELT_TERMINATE)
+ break;
+ }
- expired_lock_thread.elt_state = ELT_STOPPED;
- cfs_waitq_signal(&expired_lock_thread.elt_waitq);
- RETURN(0);
+ expired_lock_thread.elt_state = ELT_STOPPED;
+ wake_up(&expired_lock_thread.elt_waitq);
+ RETURN(0);
}
static int ldlm_add_waiting_lock(struct ldlm_lock *lock);
if (obd_dump_on_timeout && need_dump)
expired_lock_thread.elt_dump = __LINE__;
- cfs_waitq_signal(&expired_lock_thread.elt_waitq);
+ wake_up(&expired_lock_thread.elt_waitq);
}
/*
LDLM_LOCK_GET(lock);
cfs_list_add(&lock->l_pending_chain,
&expired_lock_thread.elt_expired_locks);
- cfs_waitq_signal(&expired_lock_thread.elt_waitq);
+ wake_up(&expired_lock_thread.elt_waitq);
spin_unlock_bh(&waiting_locks_spinlock);
#else
class_fail_export(lock->l_export);
if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
int to = cfs_time_seconds(1);
while (to > 0) {
- cfs_schedule_timeout_and_set_state(
- CFS_TASK_INTERRUPTIBLE, to);
+ schedule_timeout_and_set_state(
+ TASK_INTERRUPTIBLE, to);
if (lock->l_granted_mode == lock->l_req_mode ||
lock->l_flags & LDLM_FL_DESTROYED)
break;
lock_res_and_lock(lock);
lock->l_flags |= LDLM_FL_FAILED;
unlock_res_and_lock(lock);
- cfs_waitq_signal(&lock->l_waitq);
+ wake_up(&lock->l_waitq);
}
LDLM_LOCK_RELEASE(lock);
}
}
spin_unlock(&blp->blp_lock);
- cfs_waitq_signal(&blp->blp_waitq);
+ wake_up(&blp->blp_waitq);
/* can not check blwi->blwi_flags as blwi could be already freed in
LCF_ASYNC mode */
ldlm_state->ldlm_bl_pool = blp;
spin_lock_init(&blp->blp_lock);
- CFS_INIT_LIST_HEAD(&blp->blp_list);
- CFS_INIT_LIST_HEAD(&blp->blp_prio_list);
- cfs_waitq_init(&blp->blp_waitq);
- cfs_atomic_set(&blp->blp_num_threads, 0);
- cfs_atomic_set(&blp->blp_busy_threads, 0);
+ CFS_INIT_LIST_HEAD(&blp->blp_list);
+ CFS_INIT_LIST_HEAD(&blp->blp_prio_list);
+ init_waitqueue_head(&blp->blp_waitq);
+ cfs_atomic_set(&blp->blp_num_threads, 0);
+ cfs_atomic_set(&blp->blp_busy_threads, 0);
#ifdef __KERNEL__
if (ldlm_num_threads == 0) {
# ifdef HAVE_SERVER_SUPPORT
CFS_INIT_LIST_HEAD(&expired_lock_thread.elt_expired_locks);
expired_lock_thread.elt_state = ELT_STOPPED;
- cfs_waitq_init(&expired_lock_thread.elt_waitq);
+ init_waitqueue_head(&expired_lock_thread.elt_waitq);
CFS_INIT_LIST_HEAD(&waiting_locks_list);
spin_lock_init(&waiting_locks_spinlock);
GOTO(out, rc);
}
- cfs_wait_event(expired_lock_thread.elt_waitq,
+ wait_event(expired_lock_thread.elt_waitq,
expired_lock_thread.elt_state == ELT_READY);
# endif /* HAVE_SERVER_SUPPORT */
spin_lock(&blp->blp_lock);
cfs_list_add_tail(&blwi.blwi_entry, &blp->blp_list);
- cfs_waitq_signal(&blp->blp_waitq);
+ wake_up(&blp->blp_waitq);
spin_unlock(&blp->blp_lock);
wait_for_completion(&blp->blp_comp);
# ifdef HAVE_SERVER_SUPPORT
if (expired_lock_thread.elt_state != ELT_STOPPED) {
expired_lock_thread.elt_state = ELT_TERMINATE;
- cfs_waitq_signal(&expired_lock_thread.elt_waitq);
- cfs_wait_event(expired_lock_thread.elt_waitq,
+ wake_up(&expired_lock_thread.elt_waitq);
+ wait_event(expired_lock_thread.elt_waitq,
expired_lock_thread.elt_state == ELT_STOPPED);
}
# endif
static int ldlm_pools_thread_main(void *arg)
{
- struct ptlrpc_thread *thread = (struct ptlrpc_thread *)arg;
+ struct ptlrpc_thread *thread = (struct ptlrpc_thread *)arg;
int s_time, c_time;
- ENTRY;
+ ENTRY;
- thread_set_flags(thread, SVC_RUNNING);
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ thread_set_flags(thread, SVC_RUNNING);
+ wake_up(&thread->t_ctl_waitq);
- CDEBUG(D_DLMTRACE, "%s: pool thread starting, process %d\n",
+ CDEBUG(D_DLMTRACE, "%s: pool thread starting, process %d\n",
"ldlm_poold", current_pid());
while (1) {
thread_test_and_clear_flags(thread, SVC_EVENT);
}
- thread_set_flags(thread, SVC_STOPPED);
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ thread_set_flags(thread, SVC_STOPPED);
+ wake_up(&thread->t_ctl_waitq);
- CDEBUG(D_DLMTRACE, "%s: pool thread exiting, process %d\n",
+ CDEBUG(D_DLMTRACE, "%s: pool thread exiting, process %d\n",
"ldlm_poold", current_pid());
complete_and_exit(&ldlm_pools_comp, 0);
RETURN(-ENOMEM);
init_completion(&ldlm_pools_comp);
- cfs_waitq_init(&ldlm_pools_thread->t_ctl_waitq);
+ init_waitqueue_head(&ldlm_pools_thread->t_ctl_waitq);
task = kthread_run(ldlm_pools_thread_main, ldlm_pools_thread,
"ldlm_poold");
static void ldlm_pools_thread_stop(void)
{
- ENTRY;
+ ENTRY;
- if (ldlm_pools_thread == NULL) {
- EXIT;
- return;
- }
+ if (ldlm_pools_thread == NULL) {
+ EXIT;
+ return;
+ }
- thread_set_flags(ldlm_pools_thread, SVC_STOPPING);
- cfs_waitq_signal(&ldlm_pools_thread->t_ctl_waitq);
+ thread_set_flags(ldlm_pools_thread, SVC_STOPPING);
+ wake_up(&ldlm_pools_thread->t_ctl_waitq);
- /*
- * Make sure that pools thread is finished before freeing @thread.
- * This fixes possible race and oops due to accessing freed memory
- * in pools thread.
- */
+ /*
+ * Make sure that pools thread is finished before freeing @thread.
+ * This fixes possible race and oops due to accessing freed memory
+ * in pools thread.
+ */
wait_for_completion(&ldlm_pools_comp);
- OBD_FREE_PTR(ldlm_pools_thread);
- ldlm_pools_thread = NULL;
- EXIT;
+ OBD_FREE_PTR(ldlm_pools_thread);
+ ldlm_pools_thread = NULL;
+ EXIT;
}
int ldlm_pools_init(void)
*/
int ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data)
{
- ENTRY;
+ ENTRY;
- if (flags == LDLM_FL_WAIT_NOREPROC) {
- LDLM_DEBUG(lock, "client-side enqueue waiting on pending lock");
- RETURN(0);
- }
+ if (flags == LDLM_FL_WAIT_NOREPROC) {
+ LDLM_DEBUG(lock, "client-side enqueue waiting on pending lock");
+ RETURN(0);
+ }
- if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
- LDLM_FL_BLOCK_CONV))) {
- cfs_waitq_signal(&lock->l_waitq);
- RETURN(ldlm_completion_tail(lock));
- }
+ if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
+ LDLM_FL_BLOCK_CONV))) {
+ wake_up(&lock->l_waitq);
+ RETURN(ldlm_completion_tail(lock));
+ }
- LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
- "going forward");
- ldlm_reprocess_all(lock->l_resource);
- RETURN(0);
+ LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
+ "going forward");
+ ldlm_reprocess_all(lock->l_resource);
+ RETURN(0);
}
EXPORT_SYMBOL(ldlm_completion_ast_async);
goto noreproc;
}
- if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
- LDLM_FL_BLOCK_CONV))) {
- cfs_waitq_signal(&lock->l_waitq);
- RETURN(0);
- }
+ if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
+ LDLM_FL_BLOCK_CONV))) {
+ wake_up(&lock->l_waitq);
+ RETURN(0);
+ }
LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
"sleeping");
ns->ns_appetite = apt;
ns->ns_client = client;
- CFS_INIT_LIST_HEAD(&ns->ns_list_chain);
- CFS_INIT_LIST_HEAD(&ns->ns_unused_list);
+ CFS_INIT_LIST_HEAD(&ns->ns_list_chain);
+ CFS_INIT_LIST_HEAD(&ns->ns_unused_list);
spin_lock_init(&ns->ns_lock);
- cfs_atomic_set(&ns->ns_bref, 0);
- cfs_waitq_init(&ns->ns_waitq);
+ cfs_atomic_set(&ns->ns_bref, 0);
+ init_waitqueue_head(&ns->ns_waitq);
- ns->ns_max_nolock_size = NS_DEFAULT_MAX_NOLOCK_BYTES;
- ns->ns_contention_time = NS_DEFAULT_CONTENTION_SECONDS;
- ns->ns_contended_locks = NS_DEFAULT_CONTENDED_LOCKS;
+ ns->ns_max_nolock_size = NS_DEFAULT_MAX_NOLOCK_BYTES;
+ ns->ns_contention_time = NS_DEFAULT_CONTENTION_SECONDS;
+ ns->ns_contended_locks = NS_DEFAULT_CONTENDED_LOCKS;
ns->ns_max_parallel_ast = LDLM_DEFAULT_PARALLEL_AST_LIMIT;
ns->ns_nr_unused = 0;
void ldlm_namespace_put(struct ldlm_namespace *ns)
{
if (cfs_atomic_dec_and_lock(&ns->ns_bref, &ns->ns_lock)) {
- cfs_waitq_signal(&ns->ns_waitq);
+ wake_up(&ns->ns_waitq);
spin_unlock(&ns->ns_lock);
}
}
spin_lock(&lfsck->li_lock);
thread_set_flags(thread, SVC_RUNNING);
spin_unlock(&lfsck->li_lock);
- cfs_waitq_broadcast(&thread->t_ctl_waitq);
+ wake_up_all(&thread->t_ctl_waitq);
if (!cfs_list_empty(&lfsck->li_list_scan) ||
cfs_list_empty(&lfsck->li_list_double_scan))
noenv:
spin_lock(&lfsck->li_lock);
thread_set_flags(thread, SVC_STOPPED);
- cfs_waitq_broadcast(&thread->t_ctl_waitq);
+ wake_up_all(&thread->t_ctl_waitq);
spin_unlock(&lfsck->li_lock);
return rc;
}
thread_set_flags(thread, SVC_STOPPING);
spin_unlock(&lfsck->li_lock);
- cfs_waitq_broadcast(&thread->t_ctl_waitq);
+ wake_up_all(&thread->t_ctl_waitq);
l_wait_event(thread->t_ctl_waitq,
thread_is_stopped(thread),
&lwi);
CFS_INIT_LIST_HEAD(&lfsck->li_list_double_scan);
CFS_INIT_LIST_HEAD(&lfsck->li_list_idle);
atomic_set(&lfsck->li_ref, 1);
- cfs_waitq_init(&lfsck->li_thread.t_ctl_waitq);
+ init_waitqueue_head(&lfsck->li_thread.t_ctl_waitq);
lfsck->li_next = next;
lfsck->li_bottom = key;
*/
static int capa_thread_main(void *unused)
{
- struct obd_capa *ocapa, *tmp, *next;
- struct inode *inode = NULL;
- struct l_wait_info lwi = { 0 };
- int rc;
- ENTRY;
+ struct obd_capa *ocapa, *tmp, *next;
+ struct inode *inode = NULL;
+ struct l_wait_info lwi = { 0 };
+ int rc;
+ ENTRY;
- thread_set_flags(&ll_capa_thread, SVC_RUNNING);
- cfs_waitq_signal(&ll_capa_thread.t_ctl_waitq);
+ thread_set_flags(&ll_capa_thread, SVC_RUNNING);
+ wake_up(&ll_capa_thread.t_ctl_waitq);
- while (1) {
- l_wait_event(ll_capa_thread.t_ctl_waitq,
- !thread_is_running(&ll_capa_thread) ||
- have_expired_capa(),
- &lwi);
+ while (1) {
+ l_wait_event(ll_capa_thread.t_ctl_waitq,
+ !thread_is_running(&ll_capa_thread) ||
+ have_expired_capa(),
+ &lwi);
if (!thread_is_running(&ll_capa_thread))
break;
}
thread_set_flags(&ll_capa_thread, SVC_STOPPED);
- cfs_waitq_signal(&ll_capa_thread.t_ctl_waitq);
+ wake_up(&ll_capa_thread.t_ctl_waitq);
RETURN(0);
}
void ll_capa_timer_callback(unsigned long unused)
{
- cfs_waitq_signal(&ll_capa_thread.t_ctl_waitq);
+ wake_up(&ll_capa_thread.t_ctl_waitq);
}
int ll_capa_thread_start(void)
cfs_task_t *task;
ENTRY;
- cfs_waitq_init(&ll_capa_thread.t_ctl_waitq);
+ init_waitqueue_head(&ll_capa_thread.t_ctl_waitq);
task = kthread_run(capa_thread_main, NULL, "ll_capa");
if (IS_ERR(task)) {
PTR_ERR(task));
RETURN(PTR_ERR(task));
}
- cfs_wait_event(ll_capa_thread.t_ctl_waitq,
+ wait_event(ll_capa_thread.t_ctl_waitq,
thread_is_running(&ll_capa_thread));
RETURN(0);
void ll_capa_thread_stop(void)
{
- thread_set_flags(&ll_capa_thread, SVC_STOPPING);
- cfs_waitq_signal(&ll_capa_thread.t_ctl_waitq);
- cfs_wait_event(ll_capa_thread.t_ctl_waitq,
- thread_is_stopped(&ll_capa_thread));
+ thread_set_flags(&ll_capa_thread, SVC_STOPPING);
+ wake_up(&ll_capa_thread.t_ctl_waitq);
+ wait_event(ll_capa_thread.t_ctl_waitq,
+ thread_is_stopped(&ll_capa_thread));
}
struct obd_capa *ll_osscapa_get(struct inode *inode, __u64 opc)
inode->i_ino, inode->i_generation);
cfs_list_add_tail(&lli->lli_close_list, &lcq->lcq_head);
- /* Avoid a concurrent insertion into the close thread queue:
- * an inode is already in the close thread, open(), write(),
- * close() happen, epoch is closed as the inode is marked as
- * LLIF_EPOCH_PENDING. When pages are written inode should not
- * be inserted into the queue again, clear this flag to avoid
- * it. */
- lli->lli_flags &= ~LLIF_DONE_WRITING;
-
- cfs_waitq_signal(&lcq->lcq_waitq);
+ /* Avoid a concurrent insertion into the close thread queue:
+ * an inode is already in the close thread, open(), write(),
+ * close() happen, epoch is closed as the inode is marked as
+ * LLIF_EPOCH_PENDING. When pages are written inode should not
+ * be inserted into the queue again, clear this flag to avoid
+ * it. */
+ lli->lli_flags &= ~LLIF_DONE_WRITING;
+
+ wake_up(&lcq->lcq_waitq);
spin_unlock(&lcq->lcq_lock);
}
spin_unlock(&lli->lli_lock);
spin_lock_init(&lcq->lcq_lock);
CFS_INIT_LIST_HEAD(&lcq->lcq_head);
- cfs_waitq_init(&lcq->lcq_waitq);
+ init_waitqueue_head(&lcq->lcq_waitq);
init_completion(&lcq->lcq_comp);
task = kthread_run(ll_close_thread, lcq, "ll_close");
{
init_completion(&lcq->lcq_comp);
cfs_atomic_inc(&lcq->lcq_stop);
- cfs_waitq_signal(&lcq->lcq_waitq);
+ wake_up(&lcq->lcq_waitq);
wait_for_completion(&lcq->lcq_comp);
OBD_FREE(lcq, sizeof(*lcq));
}
struct ll_close_queue {
spinlock_t lcq_lock;
cfs_list_t lcq_head;
- cfs_waitq_t lcq_waitq;
+ wait_queue_head_t lcq_waitq;
struct completion lcq_comp;
cfs_atomic_t lcq_stop;
};
unsigned int sai_miss_hidden;/* "ls -al", but first dentry
* is not a hidden one */
unsigned int sai_skip_hidden;/* skipped hidden dentry count */
- unsigned int sai_ls_all:1, /* "ls -al", do stat-ahead for
- * hidden entries */
- sai_in_readpage:1,/* statahead is in readdir()*/
- sai_agl_valid:1;/* AGL is valid for the dir */
- cfs_waitq_t sai_waitq; /* stat-ahead wait queue */
- struct ptlrpc_thread sai_thread; /* stat-ahead thread */
- struct ptlrpc_thread sai_agl_thread; /* AGL thread */
+ unsigned int sai_ls_all:1, /* "ls -al", do stat-ahead for
+ * hidden entries */
+ sai_in_readpage:1,/* statahead is in readdir()*/
+ sai_agl_valid:1;/* AGL is valid for the dir */
+ wait_queue_head_t sai_waitq; /* stat-ahead wait queue */
+ struct ptlrpc_thread sai_thread; /* stat-ahead thread */
+ struct ptlrpc_thread sai_agl_thread; /* AGL thread */
cfs_list_t sai_entries; /* entry list */
cfs_list_t sai_entries_received; /* entries returned */
cfs_list_t sai_entries_stated; /* entries stated */
OBD_FREE_PTR(ioc_data);
}
- /* Really, we'd like to wait until there are no requests outstanding,
- * and then continue. For now, we just invalidate the requests,
- * schedule() and sleep one second if needed, and hope.
- */
- cfs_schedule();
- EXIT;
+ /* Really, we'd like to wait until there are no requests outstanding,
+ * and then continue. For now, we just invalidate the requests,
+ * schedule() and sleep one second if needed, and hope.
+ */
+ schedule();
+ EXIT;
}
int ll_remount_fs(struct super_block *sb, int *flags, char *data)
};
struct lloop_device {
- int lo_number;
- int lo_refcnt;
- loff_t lo_offset;
- loff_t lo_sizelimit;
- int lo_flags;
- int (*ioctl)(struct lloop_device *, int cmd,
- unsigned long arg);
+ int lo_number;
+ int lo_refcnt;
+ loff_t lo_offset;
+ loff_t lo_sizelimit;
+ int lo_flags;
+ int (*ioctl)(struct lloop_device *, int cmd,
+ unsigned long arg);
- struct file *lo_backing_file;
- struct block_device *lo_device;
- unsigned lo_blocksize;
+ struct file *lo_backing_file;
+ struct block_device *lo_device;
+ unsigned lo_blocksize;
- int old_gfp_mask;
+ int old_gfp_mask;
spinlock_t lo_lock;
struct bio *lo_bio;
int lo_state;
struct semaphore lo_sem;
struct mutex lo_ctl_mutex;
- cfs_atomic_t lo_pending;
- cfs_waitq_t lo_bh_wait;
+ cfs_atomic_t lo_pending;
+ wait_queue_head_t lo_bh_wait;
- struct request_queue *lo_queue;
+ struct request_queue *lo_queue;
- const struct lu_env *lo_env;
- struct cl_io lo_io;
- struct ll_dio_pages lo_pvec;
+ const struct lu_env *lo_env;
+ struct cl_io lo_io;
+ struct ll_dio_pages lo_pvec;
- /* data to handle bio for lustre. */
- struct lo_request_data {
- struct page *lrd_pages[LLOOP_MAX_SEGMENTS];
- loff_t lrd_offsets[LLOOP_MAX_SEGMENTS];
- } lo_requests[1];
+ /* data to handle bio for lustre. */
+ struct lo_request_data {
+ struct page *lrd_pages[LLOOP_MAX_SEGMENTS];
+ loff_t lrd_offsets[LLOOP_MAX_SEGMENTS];
+ } lo_requests[1];
};
/*
spin_unlock_irqrestore(&lo->lo_lock, flags);
cfs_atomic_inc(&lo->lo_pending);
- if (cfs_waitq_active(&lo->lo_bh_wait))
- cfs_waitq_signal(&lo->lo_bh_wait);
+ if (waitqueue_active(&lo->lo_bh_wait))
+ wake_up(&lo->lo_bh_wait);
}
/*
up(&lo->lo_sem);
for (;;) {
- cfs_wait_event(lo->lo_bh_wait, loop_active(lo));
+ wait_event(lo->lo_bh_wait, loop_active(lo));
if (!cfs_atomic_read(&lo->lo_pending)) {
int exiting = 0;
spin_lock_irq(&lo->lo_lock);
spin_lock_irq(&lo->lo_lock);
lo->lo_state = LLOOP_RUNDOWN;
spin_unlock_irq(&lo->lo_lock);
- cfs_waitq_signal(&lo->lo_bh_wait);
+ wake_up(&lo->lo_bh_wait);
down(&lo->lo_sem);
lo->lo_backing_file = NULL;
mutex_init(&lo->lo_ctl_mutex);
sema_init(&lo->lo_sem, 0);
- cfs_waitq_init(&lo->lo_bh_wait);
+ init_waitqueue_head(&lo->lo_bh_wait);
lo->lo_number = i;
spin_lock_init(&lo->lo_lock);
disk->major = lloop_major;
}
if (added > 0)
- cfs_waitq_signal(&sai->sai_agl_thread.t_ctl_waitq);
+ wake_up(&sai->sai_agl_thread.t_ctl_waitq);
}
static struct ll_statahead_info *ll_sai_alloc(void)
{
- struct ll_statahead_info *sai;
- int i;
- ENTRY;
+ struct ll_statahead_info *sai;
+ int i;
+ ENTRY;
- OBD_ALLOC_PTR(sai);
- if (!sai)
- RETURN(NULL);
+ OBD_ALLOC_PTR(sai);
+ if (!sai)
+ RETURN(NULL);
- cfs_atomic_set(&sai->sai_refcount, 1);
+ cfs_atomic_set(&sai->sai_refcount, 1);
spin_lock(&sai_generation_lock);
sai->sai_generation = ++sai_generation;
sai->sai_generation = ++sai_generation;
spin_unlock(&sai_generation_lock);
- sai->sai_max = LL_SA_RPC_MIN;
- sai->sai_index = 1;
- cfs_waitq_init(&sai->sai_waitq);
- cfs_waitq_init(&sai->sai_thread.t_ctl_waitq);
- cfs_waitq_init(&sai->sai_agl_thread.t_ctl_waitq);
+ sai->sai_max = LL_SA_RPC_MIN;
+ sai->sai_index = 1;
+ init_waitqueue_head(&sai->sai_waitq);
+ init_waitqueue_head(&sai->sai_thread.t_ctl_waitq);
+ init_waitqueue_head(&sai->sai_agl_thread.t_ctl_waitq);
CFS_INIT_LIST_HEAD(&sai->sai_entries);
- CFS_INIT_LIST_HEAD(&sai->sai_entries_received);
- CFS_INIT_LIST_HEAD(&sai->sai_entries_stated);
- CFS_INIT_LIST_HEAD(&sai->sai_entries_agl);
+ CFS_INIT_LIST_HEAD(&sai->sai_entries_received);
+ CFS_INIT_LIST_HEAD(&sai->sai_entries_stated);
+ CFS_INIT_LIST_HEAD(&sai->sai_entries_agl);
- for (i = 0; i < LL_SA_CACHE_SIZE; i++) {
- CFS_INIT_LIST_HEAD(&sai->sai_cache[i]);
+ for (i = 0; i < LL_SA_CACHE_SIZE; i++) {
+ CFS_INIT_LIST_HEAD(&sai->sai_cache[i]);
spin_lock_init(&sai->sai_cache_lock[i]);
- }
- cfs_atomic_set(&sai->sai_cache_count, 0);
+ }
+ cfs_atomic_set(&sai->sai_cache_count, 0);
- RETURN(sai);
+ RETURN(sai);
}
static inline struct ll_statahead_info *
EXIT;
out:
- /* The "ll_sa_entry_to_stated()" will drop related ldlm ibits lock
- * reference count by calling "ll_intent_drop_lock()" in spite of the
- * above operations failed or not. Do not worry about calling
- * "ll_intent_drop_lock()" more than once. */
+ /* The "ll_sa_entry_to_stated()" will drop related ldlm ibits lock
+ * reference count by calling "ll_intent_drop_lock()" in spite of the
+ * above operations failed or not. Do not worry about calling
+ * "ll_intent_drop_lock()" more than once. */
rc = ll_sa_entry_to_stated(sai, entry,
rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC);
if (rc == 0 && entry->se_index == sai->sai_index_wait)
- cfs_waitq_signal(&sai->sai_waitq);
- ll_sa_entry_put(sai, entry);
+ wake_up(&sai->sai_waitq);
+ ll_sa_entry_put(sai, entry);
}
static int ll_statahead_interpret(struct ptlrpc_request *req,
ll_sa_entry_put(sai, entry);
if (wakeup)
- cfs_waitq_signal(&sai->sai_thread.t_ctl_waitq);
+ wake_up(&sai->sai_thread.t_ctl_waitq);
}
EXIT;
if (dentry != NULL)
dput(dentry);
- if (rc) {
- rc1 = ll_sa_entry_to_stated(sai, entry,
- rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC);
- if (rc1 == 0 && entry->se_index == sai->sai_index_wait)
- cfs_waitq_signal(&sai->sai_waitq);
- } else {
- sai->sai_sent++;
- }
+ if (rc) {
+ rc1 = ll_sa_entry_to_stated(sai, entry,
+ rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC);
+ if (rc1 == 0 && entry->se_index == sai->sai_index_wait)
+ wake_up(&sai->sai_waitq);
+ } else {
+ sai->sai_sent++;
+ }
sai->sai_index++;
/* drop one refcount on entry by ll_sa_entry_alloc */
sai->sai_agl_valid = 1;
thread_set_flags(thread, SVC_RUNNING);
spin_unlock(&plli->lli_agl_lock);
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ wake_up(&thread->t_ctl_waitq);
while (1) {
l_wait_event(thread->t_ctl_waitq,
}
thread_set_flags(thread, SVC_STOPPED);
spin_unlock(&plli->lli_agl_lock);
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ wake_up(&thread->t_ctl_waitq);
ll_sai_put(sai);
CDEBUG(D_READA, "agl thread stopped: [pid %d] [parent %.*s]\n",
current_pid(), parent->d_name.len, parent->d_name.name);
spin_lock(&plli->lli_sa_lock);
thread_set_flags(thread, SVC_RUNNING);
spin_unlock(&plli->lli_sa_lock);
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ wake_up(&thread->t_ctl_waitq);
ll_dir_chain_init(&chain);
page = ll_get_dir_page(dir, pos, &chain);
spin_lock(&plli->lli_agl_lock);
thread_set_flags(agl_thread, SVC_STOPPING);
spin_unlock(&plli->lli_agl_lock);
- cfs_waitq_signal(&agl_thread->t_ctl_waitq);
+ wake_up(&agl_thread->t_ctl_waitq);
CDEBUG(D_READA, "stop agl thread: [pid %d]\n",
current_pid());
}
thread_set_flags(thread, SVC_STOPPED);
spin_unlock(&plli->lli_sa_lock);
- cfs_waitq_signal(&sai->sai_waitq);
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ wake_up(&sai->sai_waitq);
+ wake_up(&thread->t_ctl_waitq);
ll_sai_put(sai);
dput(parent);
CDEBUG(D_READA, "statahead thread stopped: [pid %d] [parent %.*s]\n",
if (!thread_is_stopped(thread)) {
thread_set_flags(thread, SVC_STOPPING);
spin_unlock(&lli->lli_sa_lock);
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ wake_up(&thread->t_ctl_waitq);
CDEBUG(D_READA, "stop statahead thread: [pid %d]\n",
current_pid());
}
if (!thread_is_stopped(thread))
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ wake_up(&thread->t_ctl_waitq);
EXIT;
}
OBD_ALLOC_PTR(lod->lod_qos.lq_statfs_data);
if (NULL == lod->lod_qos.lq_statfs_data)
RETURN(-ENOMEM);
- cfs_waitq_init(&lod->lod_qos.lq_statfs_waitq);
+ init_waitqueue_head(&lod->lod_qos.lq_statfs_waitq);
/* Set up OST pool environment */
lod->lod_pools_hash_body = cfs_hash_create("POOLS", HASH_POOLS_CUR_BITS,
/**
* Waitq - wait for no one else is using lo_lsm
*/
- cfs_waitq_t lo_waitq;
+ wait_queue_head_t lo_waitq;
/**
* Layout metadata. NULL if empty layout.
*/
struct lov_thread_info {
- struct cl_object_conf lti_stripe_conf;
- struct lu_fid lti_fid;
- struct cl_lock_descr lti_ldescr;
- struct ost_lvb lti_lvb;
- struct cl_2queue lti_cl2q;
- struct cl_lock_closure lti_closure;
- cfs_waitlink_t lti_waiter;
+ struct cl_object_conf lti_stripe_conf;
+ struct lu_fid lti_fid;
+ struct cl_lock_descr lti_ldescr;
+ struct ost_lvb lti_lvb;
+ struct cl_2queue lti_cl2q;
+ struct cl_lock_closure lti_closure;
+ wait_queue_t lti_waiter;
};
/**
struct brw_page *set_pga;
struct lov_lock_handles *set_lockh;
cfs_list_t set_list;
- cfs_waitq_t set_waitq;
+ wait_queue_head_t set_waitq;
spinlock_t set_lock;
};
LASSERT(cfs_atomic_read(&lov->lo_active_ios) > 0);
if (cfs_atomic_dec_and_test(&lov->lo_active_ios))
- cfs_waitq_broadcast(&lov->lo_waitq);
+ wake_up_all(&lov->lo_waitq);
EXIT;
}
ENTRY;
if (cfs_atomic_dec_and_test(&lov->lo_active_ios))
- cfs_waitq_broadcast(&lov->lo_waitq);
+ wake_up_all(&lov->lo_waitq);
EXIT;
}
}
static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
- struct lovsub_object *los, int idx)
+ struct lovsub_object *los, int idx)
{
- struct cl_object *sub;
- struct lov_layout_raid0 *r0;
- struct lu_site *site;
- struct lu_site_bkt_data *bkt;
- cfs_waitlink_t *waiter;
+ struct cl_object *sub;
+ struct lov_layout_raid0 *r0;
+ struct lu_site *site;
+ struct lu_site_bkt_data *bkt;
+ wait_queue_t *waiter;
r0 = &lov->u.raid0;
LASSERT(r0->lo_sub[idx] == los);
/* ... wait until it is actually destroyed---sub-object clears its
* ->lo_sub[] slot in lovsub_object_fini() */
- if (r0->lo_sub[idx] == los) {
- waiter = &lov_env_info(env)->lti_waiter;
- cfs_waitlink_init(waiter);
- cfs_waitq_add(&bkt->lsb_marche_funebre, waiter);
- cfs_set_current_state(CFS_TASK_UNINT);
- while (1) {
- /* this wait-queue is signaled at the end of
- * lu_object_free(). */
- cfs_set_current_state(CFS_TASK_UNINT);
+ if (r0->lo_sub[idx] == los) {
+ waiter = &lov_env_info(env)->lti_waiter;
+ init_waitqueue_entry_current(waiter);
+ add_wait_queue(&bkt->lsb_marche_funebre, waiter);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ while (1) {
+ /* this wait-queue is signaled at the end of
+ * lu_object_free(). */
+ set_current_state(TASK_UNINTERRUPTIBLE);
spin_lock(&r0->lo_sub_lock);
if (r0->lo_sub[idx] == los) {
spin_unlock(&r0->lo_sub_lock);
- cfs_waitq_wait(waiter, CFS_TASK_UNINT);
+ waitq_wait(waiter, TASK_UNINTERRUPTIBLE);
} else {
spin_unlock(&r0->lo_sub_lock);
- cfs_set_current_state(CFS_TASK_RUNNING);
- break;
- }
- }
- cfs_waitq_del(&bkt->lsb_marche_funebre, waiter);
- }
- LASSERT(r0->lo_sub[idx] == NULL);
+ set_current_state(TASK_RUNNING);
+ break;
+ }
+ }
+ remove_wait_queue(&bkt->lsb_marche_funebre, waiter);
+ }
+ LASSERT(r0->lo_sub[idx] == NULL);
}
static int lov_delete_raid0(const struct lu_env *env, struct lov_object *lov,
ENTRY;
init_rwsem(&lov->lo_type_guard);
cfs_atomic_set(&lov->lo_active_ios, 0);
- cfs_waitq_init(&lov->lo_waitq);
+ init_waitqueue_head(&lov->lo_waitq);
cl_object_page_init(lu2cl(obj), sizeof(struct lov_page));
set->set_cookies = 0;
CFS_INIT_LIST_HEAD(&set->set_list);
cfs_atomic_set(&set->set_refcount, 1);
- cfs_waitq_init(&set->set_waitq);
+ init_waitqueue_head(&set->set_waitq);
spin_lock_init(&set->set_lock);
}
}
void lov_update_set(struct lov_request_set *set,
- struct lov_request *req, int rc)
+ struct lov_request *req, int rc)
{
- req->rq_complete = 1;
- req->rq_rc = rc;
+ req->rq_complete = 1;
+ req->rq_rc = rc;
- cfs_atomic_inc(&set->set_completes);
- if (rc == 0)
- cfs_atomic_inc(&set->set_success);
+ cfs_atomic_inc(&set->set_completes);
+ if (rc == 0)
+ cfs_atomic_inc(&set->set_success);
- cfs_waitq_signal(&set->set_waitq);
+ wake_up(&set->set_waitq);
}
int lov_update_common_set(struct lov_request_set *set,
*/
int lov_check_and_wait_active(struct lov_obd *lov, int ost_idx)
{
- cfs_waitq_t waitq;
+ wait_queue_head_t waitq;
struct l_wait_info lwi;
struct lov_tgt_desc *tgt;
int rc = 0;
mutex_unlock(&lov->lov_lock);
- cfs_waitq_init(&waitq);
+ init_waitqueue_head(&waitq);
lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(obd_timeout),
cfs_time_seconds(1), NULL, NULL);
* in the future - the code may need to be revisited. */
int mdc_enter_request(struct client_obd *cli)
{
- int rc = 0;
- struct mdc_cache_waiter mcw;
- struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
- if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
- cfs_list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters);
- cfs_waitq_init(&mcw.mcw_waitq);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- rc = l_wait_event(mcw.mcw_waitq, mdc_req_avail(cli, &mcw), &lwi);
- if (rc) {
- client_obd_list_lock(&cli->cl_loi_list_lock);
- if (cfs_list_empty(&mcw.mcw_entry))
- cli->cl_r_in_flight--;
- cfs_list_del_init(&mcw.mcw_entry);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- }
- } else {
- cli->cl_r_in_flight++;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- }
- return rc;
+ int rc = 0;
+ struct mdc_cache_waiter mcw;
+ struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
+
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
+ cfs_list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters);
+ init_waitqueue_head(&mcw.mcw_waitq);
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+ rc = l_wait_event(mcw.mcw_waitq, mdc_req_avail(cli, &mcw), &lwi);
+ if (rc) {
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ if (cfs_list_empty(&mcw.mcw_entry))
+ cli->cl_r_in_flight--;
+ cfs_list_del_init(&mcw.mcw_entry);
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+ }
+ } else {
+ cli->cl_r_in_flight++;
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+ }
+ return rc;
}
void mdc_exit_request(struct client_obd *cli)
{
- cfs_list_t *l, *tmp;
- struct mdc_cache_waiter *mcw;
+ cfs_list_t *l, *tmp;
+ struct mdc_cache_waiter *mcw;
+
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ cli->cl_r_in_flight--;
+ cfs_list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
+ if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
+ /* No free request slots anymore */
+ break;
+ }
- client_obd_list_lock(&cli->cl_loi_list_lock);
- cli->cl_r_in_flight--;
- cfs_list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
- if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
- /* No free request slots anymore */
- break;
- }
-
- mcw = cfs_list_entry(l, struct mdc_cache_waiter, mcw_entry);
- cfs_list_del_init(&mcw->mcw_entry);
- cli->cl_r_in_flight++;
- cfs_waitq_signal(&mcw->mcw_waitq);
- }
- /* Empty waiting list? Decrease reqs in-flight number */
+ mcw = cfs_list_entry(l, struct mdc_cache_waiter, mcw_entry);
+ cfs_list_del_init(&mcw->mcw_entry);
+ cli->cl_r_in_flight++;
+ wake_up(&mcw->mcw_waitq);
+ }
+ /* Empty waiting list? Decrease reqs in-flight number */
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
}
#endif
int mdc_readpage(struct obd_export *exp, struct md_op_data *op_data,
- struct page **pages, struct ptlrpc_request **request)
-{
- struct ptlrpc_request *req;
- struct ptlrpc_bulk_desc *desc;
- int i;
- cfs_waitq_t waitq;
- int resends = 0;
- struct l_wait_info lwi;
- int rc;
- ENTRY;
+ struct page **pages, struct ptlrpc_request **request)
+{
+ struct ptlrpc_request *req;
+ struct ptlrpc_bulk_desc *desc;
+ int i;
+ wait_queue_head_t waitq;
+ int resends = 0;
+ struct l_wait_info lwi;
+ int rc;
+ ENTRY;
- *request = NULL;
- cfs_waitq_init(&waitq);
+ *request = NULL;
+ init_waitqueue_head(&waitq);
restart_bulk:
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_READPAGE);
void mdt_ck_timer_callback(unsigned long castmeharder)
{
- struct mdt_device *mdt = (struct mdt_device *)castmeharder;
- struct ptlrpc_thread *thread = &mdt->mdt_ck_thread;
+ struct mdt_device *mdt = (struct mdt_device *)castmeharder;
+ struct ptlrpc_thread *thread = &mdt->mdt_ck_thread;
- ENTRY;
- thread_add_flags(thread, SVC_EVENT);
- cfs_waitq_signal(&thread->t_ctl_waitq);
- EXIT;
+ ENTRY;
+ thread_add_flags(thread, SVC_EVENT);
+ wake_up(&thread->t_ctl_waitq);
+ EXIT;
}
static int mdt_ck_thread_main(void *args)
{
- struct mdt_device *mdt = args;
- struct ptlrpc_thread *thread = &mdt->mdt_ck_thread;
- struct lustre_capa_key *bkey = &mdt->mdt_capa_keys[0],
- *rkey = &mdt->mdt_capa_keys[1];
- struct lustre_capa_key *tmp;
- struct lu_env env;
- struct mdt_thread_info *info;
- struct md_device *next;
- struct l_wait_info lwi = { 0 };
- mdsno_t mdsnum;
- int rc;
- ENTRY;
+ struct mdt_device *mdt = args;
+ struct ptlrpc_thread *thread = &mdt->mdt_ck_thread;
+ struct lustre_capa_key *bkey = &mdt->mdt_capa_keys[0],
+ *rkey = &mdt->mdt_capa_keys[1];
+ struct lustre_capa_key *tmp;
+ struct lu_env env;
+ struct mdt_thread_info *info;
+ struct md_device *next;
+ struct l_wait_info lwi = { 0 };
+ mdsno_t mdsnum;
+ int rc;
+ ENTRY;
unshare_fs_struct();
- cfs_block_allsigs();
+ cfs_block_allsigs();
- thread_set_flags(thread, SVC_RUNNING);
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ thread_set_flags(thread, SVC_RUNNING);
+ wake_up(&thread->t_ctl_waitq);
- rc = lu_env_init(&env, LCT_MD_THREAD|LCT_REMEMBER|LCT_NOREF);
- if (rc)
- RETURN(rc);
+ rc = lu_env_init(&env, LCT_MD_THREAD|LCT_REMEMBER|LCT_NOREF);
+ if (rc)
+ RETURN(rc);
- thread->t_env = &env;
- env.le_ctx.lc_thread = thread;
- env.le_ctx.lc_cookie = 0x1;
+ thread->t_env = &env;
+ env.le_ctx.lc_thread = thread;
+ env.le_ctx.lc_cookie = 0x1;
- info = lu_context_key_get(&env.le_ctx, &mdt_thread_key);
- LASSERT(info != NULL);
+ info = lu_context_key_get(&env.le_ctx, &mdt_thread_key);
+ LASSERT(info != NULL);
tmp = &info->mti_capa_key;
mdsnum = mdt_seq_site(mdt)->ss_node_id;
- while (1) {
- l_wait_event(thread->t_ctl_waitq,
- thread_is_stopping(thread) ||
- thread_is_event(thread),
- &lwi);
+ while (1) {
+ l_wait_event(thread->t_ctl_waitq,
+ thread_is_stopping(thread) ||
+ thread_is_event(thread),
+ &lwi);
- if (thread_is_stopping(thread))
- break;
- thread_clear_flags(thread, SVC_EVENT);
+ if (thread_is_stopping(thread))
+ break;
+ thread_clear_flags(thread, SVC_EVENT);
- if (cfs_time_before(cfs_time_current(), mdt->mdt_ck_expiry))
- break;
+ if (cfs_time_before(cfs_time_current(), mdt->mdt_ck_expiry))
+ break;
- *tmp = *rkey;
- make_capa_key(tmp, mdsnum, rkey->lk_keyid);
+ *tmp = *rkey;
+ make_capa_key(tmp, mdsnum, rkey->lk_keyid);
- next = mdt->mdt_child;
- rc = next->md_ops->mdo_update_capa_key(&env, next, tmp);
- if (!rc) {
+ next = mdt->mdt_child;
+ rc = next->md_ops->mdo_update_capa_key(&env, next, tmp);
+ if (!rc) {
spin_lock(&capa_lock);
*bkey = *rkey;
*rkey = *tmp;
*rkey = *bkey;
memset(bkey, 0, sizeof(*bkey));
spin_unlock(&capa_lock);
- } else {
- set_capa_key_expiry(mdt);
- DEBUG_CAPA_KEY(D_SEC, rkey, "new");
- }
- }
+ } else {
+ set_capa_key_expiry(mdt);
+ DEBUG_CAPA_KEY(D_SEC, rkey, "new");
+ }
+ }
if (rc) {
DEBUG_CAPA_KEY(D_ERROR, rkey, "update failed for");
/* next retry is in 300 sec */
mdt->mdt_ck_expiry = jiffies + 300 * HZ;
}
- cfs_timer_arm(&mdt->mdt_ck_timer, mdt->mdt_ck_expiry);
- CDEBUG(D_SEC, "mdt_ck_timer %lu\n", mdt->mdt_ck_expiry);
- }
- lu_env_fini(&env);
+ cfs_timer_arm(&mdt->mdt_ck_timer, mdt->mdt_ck_expiry);
+ CDEBUG(D_SEC, "mdt_ck_timer %lu\n", mdt->mdt_ck_expiry);
+ }
+ lu_env_fini(&env);
- thread_set_flags(thread, SVC_STOPPED);
- cfs_waitq_signal(&thread->t_ctl_waitq);
- RETURN(0);
+ thread_set_flags(thread, SVC_STOPPED);
+ wake_up(&thread->t_ctl_waitq);
+ RETURN(0);
}
int mdt_ck_thread_start(struct mdt_device *mdt)
struct ptlrpc_thread *thread = &mdt->mdt_ck_thread;
cfs_task_t *task;
- cfs_waitq_init(&thread->t_ctl_waitq);
+ init_waitqueue_head(&thread->t_ctl_waitq);
task = kthread_run(mdt_ck_thread_main, mdt, "mdt_ck");
if (IS_ERR(task)) {
CERROR("cannot start mdt_ck thread, rc = %ld\n", PTR_ERR(task));
void mdt_ck_thread_stop(struct mdt_device *mdt)
{
- struct ptlrpc_thread *thread = &mdt->mdt_ck_thread;
+ struct ptlrpc_thread *thread = &mdt->mdt_ck_thread;
- if (!thread_is_running(thread))
- return;
+ if (!thread_is_running(thread))
+ return;
- thread_set_flags(thread, SVC_STOPPING);
- cfs_waitq_signal(&thread->t_ctl_waitq);
- l_wait_condition(thread->t_ctl_waitq, thread_is_stopped(thread));
+ thread_set_flags(thread, SVC_STOPPING);
+ wake_up(&thread->t_ctl_waitq);
+ l_wait_condition(thread->t_ctl_waitq, thread_is_stopped(thread));
}
ENTRY;
cdt->cdt_thread.t_flags = SVC_RUNNING;
- cfs_waitq_signal(&cdt->cdt_thread.t_ctl_waitq);
+ wake_up(&cdt->cdt_thread.t_ctl_waitq);
CDEBUG(D_HSM, "%s: coordinator thread starting, pid=%d\n",
mdt_obd_name(mdt), current_pid());
* and cdt cleaning will be done by event sender
*/
cdt->cdt_thread.t_flags = SVC_STOPPED;
- cfs_waitq_signal(&cdt->cdt_thread.t_ctl_waitq);
+ wake_up(&cdt->cdt_thread.t_ctl_waitq);
}
if (rc != 0)
/* wake up coordinator */
cdt->cdt_thread.t_flags = SVC_EVENT;
- cfs_waitq_signal(&cdt->cdt_thread.t_ctl_waitq);
+ wake_up(&cdt->cdt_thread.t_ctl_waitq);
RETURN(0);
}
cdt->cdt_state = CDT_STOPPED;
- cfs_waitq_init(&cdt->cdt_thread.t_ctl_waitq);
+ init_waitqueue_head(&cdt->cdt_thread.t_ctl_waitq);
mutex_init(&cdt->cdt_llog_lock);
init_rwsem(&cdt->cdt_agent_lock);
init_rwsem(&cdt->cdt_request_lock);
rc = 0;
}
- cfs_wait_event(cdt->cdt_thread.t_ctl_waitq,
+ wait_event(cdt->cdt_thread.t_ctl_waitq,
(cdt->cdt_thread.t_flags & SVC_RUNNING));
cdt->cdt_state = CDT_RUNNING;
if (cdt->cdt_state != CDT_STOPPING) {
/* stop coordinator thread before cleaning */
cdt->cdt_thread.t_flags = SVC_STOPPING;
- cfs_waitq_signal(&cdt->cdt_thread.t_ctl_waitq);
- cfs_wait_event(cdt->cdt_thread.t_ctl_waitq,
- cdt->cdt_thread.t_flags & SVC_STOPPED);
+ wake_up(&cdt->cdt_thread.t_ctl_waitq);
+ wait_event(cdt->cdt_thread.t_ctl_waitq,
+ cdt->cdt_thread.t_flags & SVC_STOPPED);
}
cdt->cdt_state = CDT_STOPPED;
#define RQ_LATER 0x4
#define RQ_STOP 0x8
static int rq_state = 0;
-static cfs_waitq_t rq_waitq;
+static wait_queue_head_t rq_waitq;
static DECLARE_COMPLETION(rq_exit);
static void do_requeue(struct config_llog_data *cld)
} else {
rq_state |= RQ_NOW;
spin_unlock(&config_list_lock);
- cfs_waitq_signal(&rq_waitq);
+ wake_up(&rq_waitq);
}
EXIT;
}
rq_state |= RQ_STOP;
spin_unlock(&config_list_lock);
if (running) {
- cfs_waitq_signal(&rq_waitq);
+ wake_up(&rq_waitq);
wait_for_completion(&rq_exit);
}
}
if (cfs_atomic_inc_return(&mgc_count) == 1) {
rq_state = 0;
- cfs_waitq_init(&rq_waitq);
+ init_waitqueue_head(&rq_waitq);
/* start requeue thread */
rc = PTR_ERR(kthread_run(mgc_requeue_thread, NULL,
spin_lock(&config_list_lock);
rq_state |= RQ_NOW;
spin_unlock(&config_list_lock);
- cfs_waitq_signal(&rq_waitq);
+ wake_up(&rq_waitq);
/* TODO: Help the MGS rebuild nidtbl. -jay */
}
/* Target NIDs Table */
struct mgs_nidtbl fsdb_nidtbl;
- /* async thread to notify clients */
- struct mgs_device *fsdb_mgs;
- cfs_waitq_t fsdb_notify_waitq;
- struct completion fsdb_notify_comp;
- cfs_time_t fsdb_notify_start;
- cfs_atomic_t fsdb_notify_phase;
+ /* async thread to notify clients */
+ struct mgs_device *fsdb_mgs;
+ wait_queue_head_t fsdb_notify_waitq;
+ struct completion fsdb_notify_comp;
+ cfs_time_t fsdb_notify_start;
+ cfs_atomic_t fsdb_notify_phase;
volatile unsigned int fsdb_notify_async:1,
fsdb_notify_stop:1;
/* statistic data */
{
cfs_task_t *task;
- if (!ir_timeout)
- ir_timeout = OBD_IR_MGS_TIMEOUT;
+ if (!ir_timeout)
+ ir_timeout = OBD_IR_MGS_TIMEOUT;
- fsdb->fsdb_ir_state = IR_FULL;
- if (cfs_time_before(cfs_time_current_sec(),
- mgs->mgs_start_time + ir_timeout))
- fsdb->fsdb_ir_state = IR_STARTUP;
- fsdb->fsdb_nonir_clients = 0;
- CFS_INIT_LIST_HEAD(&fsdb->fsdb_clients);
+ fsdb->fsdb_ir_state = IR_FULL;
+ if (cfs_time_before(cfs_time_current_sec(),
+ mgs->mgs_start_time + ir_timeout))
+ fsdb->fsdb_ir_state = IR_STARTUP;
+ fsdb->fsdb_nonir_clients = 0;
+ CFS_INIT_LIST_HEAD(&fsdb->fsdb_clients);
- /* start notify thread */
+ /* start notify thread */
fsdb->fsdb_mgs = mgs;
- cfs_atomic_set(&fsdb->fsdb_notify_phase, 0);
- cfs_waitq_init(&fsdb->fsdb_notify_waitq);
+ cfs_atomic_set(&fsdb->fsdb_notify_phase, 0);
+ init_waitqueue_head(&fsdb->fsdb_notify_waitq);
init_completion(&fsdb->fsdb_notify_comp);
task = kthread_run(mgs_ir_notify, fsdb,
CERROR("Start notify thread error %ld\n", PTR_ERR(task));
mgs_nidtbl_init_fs(env, fsdb);
- return 0;
+ return 0;
}
void mgs_ir_fini_fs(struct mgs_device *mgs, struct fs_db *fsdb)
{
if (test_bit(FSDB_MGS_SELF, &fsdb->fsdb_flags))
- return;
+ return;
- mgs_fsc_cleanup_by_fsdb(fsdb);
+ mgs_fsc_cleanup_by_fsdb(fsdb);
- mgs_nidtbl_fini_fs(fsdb);
+ mgs_nidtbl_fini_fs(fsdb);
- LASSERT(cfs_list_empty(&fsdb->fsdb_clients));
+ LASSERT(cfs_list_empty(&fsdb->fsdb_clients));
- fsdb->fsdb_notify_stop = 1;
- cfs_waitq_signal(&fsdb->fsdb_notify_waitq);
+ fsdb->fsdb_notify_stop = 1;
+ wake_up(&fsdb->fsdb_notify_waitq);
wait_for_completion(&fsdb->fsdb_notify_comp);
}
}
mutex_unlock(&fsdb->fsdb_mutex);
- LASSERT(ergo(mti->mti_flags & LDD_F_IR_CAPABLE, notify));
- if (notify) {
- CDEBUG(D_MGS, "Try to revoke recover lock of %s\n",
- fsdb->fsdb_name);
- cfs_atomic_inc(&fsdb->fsdb_notify_phase);
- cfs_waitq_signal(&fsdb->fsdb_notify_waitq);
- }
- return 0;
+ LASSERT(ergo(mti->mti_flags & LDD_F_IR_CAPABLE, notify));
+ if (notify) {
+ CDEBUG(D_MGS, "Try to revoke recover lock of %s\n",
+ fsdb->fsdb_name);
+ cfs_atomic_inc(&fsdb->fsdb_notify_phase);
+ wake_up(&fsdb->fsdb_notify_waitq);
+ }
+ return 0;
}
/* NID table can be cached by two entities: Clients and MDTs */
void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages)
{
ENTRY;
- cfs_waitq_init(&anchor->csi_waitq);
+ init_waitqueue_head(&anchor->csi_waitq);
cfs_atomic_set(&anchor->csi_sync_nr, nrpages);
cfs_atomic_set(&anchor->csi_barrier, nrpages > 0);
anchor->csi_sync_rc = 0;
*/
LASSERT(cfs_atomic_read(&anchor->csi_sync_nr) > 0);
if (cfs_atomic_dec_and_test(&anchor->csi_sync_nr)) {
- cfs_waitq_broadcast(&anchor->csi_waitq);
+ wake_up_all(&anchor->csi_waitq);
/* it's safe to nuke or reuse anchor now */
cfs_atomic_set(&anchor->csi_barrier, 0);
}
cl_object_get(obj);
lu_object_ref_add_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock",
lock);
- CFS_INIT_LIST_HEAD(&lock->cll_layers);
- CFS_INIT_LIST_HEAD(&lock->cll_linkage);
- CFS_INIT_LIST_HEAD(&lock->cll_inclosure);
- lu_ref_init(&lock->cll_reference);
- lu_ref_init(&lock->cll_holders);
+ CFS_INIT_LIST_HEAD(&lock->cll_layers);
+ CFS_INIT_LIST_HEAD(&lock->cll_linkage);
+ CFS_INIT_LIST_HEAD(&lock->cll_inclosure);
+ lu_ref_init(&lock->cll_reference);
+ lu_ref_init(&lock->cll_holders);
mutex_init(&lock->cll_guard);
lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
- cfs_waitq_init(&lock->cll_wq);
- head = obj->co_lu.lo_header;
+ init_waitqueue_head(&lock->cll_wq);
+ head = obj->co_lu.lo_header;
CS_LOCKSTATE_INC(obj, CLS_NEW);
CS_LOCK_INC(obj, total);
CS_LOCK_INC(obj, create);
- cl_lock_lockdep_init(lock);
- cfs_list_for_each_entry(obj, &head->loh_layers,
- co_lu.lo_linkage) {
+ cl_lock_lockdep_init(lock);
+ cfs_list_for_each_entry(obj, &head->loh_layers,
+ co_lu.lo_linkage) {
int err;
err = obj->co_ops->coo_lock_init(env, obj, lock, io);
*/
int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
{
- cfs_waitlink_t waiter;
- cfs_sigset_t blocked;
- int result;
-
- ENTRY;
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERT(lock->cll_depth == 1);
- LASSERT(lock->cll_state != CLS_FREEING); /* too late to wait */
-
- cl_lock_trace(D_DLMTRACE, env, "state wait lock", lock);
- result = lock->cll_error;
- if (result == 0) {
- /* To avoid being interrupted by the 'non-fatal' signals
- * (SIGCHLD, for instance), we'd block them temporarily.
- * LU-305 */
- blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
-
- cfs_waitlink_init(&waiter);
- cfs_waitq_add(&lock->cll_wq, &waiter);
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cl_lock_mutex_put(env, lock);
+ wait_queue_t waiter;
+ cfs_sigset_t blocked;
+ int result;
+
+ ENTRY;
+ LINVRNT(cl_lock_is_mutexed(lock));
+ LINVRNT(cl_lock_invariant(env, lock));
+ LASSERT(lock->cll_depth == 1);
+ LASSERT(lock->cll_state != CLS_FREEING); /* too late to wait */
+
+ cl_lock_trace(D_DLMTRACE, env, "state wait lock", lock);
+ result = lock->cll_error;
+ if (result == 0) {
+ /* To avoid being interrupted by the 'non-fatal' signals
+ * (SIGCHLD, for instance), we'd block them temporarily.
+ * LU-305 */
+ blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
+
+ init_waitqueue_entry_current(&waiter);
+ add_wait_queue(&lock->cll_wq, &waiter);
+ set_current_state(TASK_INTERRUPTIBLE);
+ cl_lock_mutex_put(env, lock);
- LASSERT(cl_lock_nr_mutexed(env) == 0);
+ LASSERT(cl_lock_nr_mutexed(env) == 0);
/* Returning ERESTARTSYS instead of EINTR so syscalls
* can be restarted if signals are pending here */
result = -ERESTARTSYS;
if (likely(!OBD_FAIL_CHECK(OBD_FAIL_LOCK_STATE_WAIT_INTR))) {
- cfs_waitq_wait(&waiter, CFS_TASK_INTERRUPTIBLE);
+ waitq_wait(&waiter, TASK_INTERRUPTIBLE);
if (!cfs_signal_pending())
result = 0;
}
- cl_lock_mutex_get(env, lock);
- cfs_set_current_state(CFS_TASK_RUNNING);
- cfs_waitq_del(&lock->cll_wq, &waiter);
+ cl_lock_mutex_get(env, lock);
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&lock->cll_wq, &waiter);
- /* Restore old blocked signals */
- cfs_restore_sigs(blocked);
- }
- RETURN(result);
+ /* Restore old blocked signals */
+ cfs_restore_sigs(blocked);
+ }
+ RETURN(result);
}
EXPORT_SYMBOL(cl_lock_state_wait);
static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
- enum cl_lock_state state)
+ enum cl_lock_state state)
{
- const struct cl_lock_slice *slice;
+ const struct cl_lock_slice *slice;
- ENTRY;
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
+ ENTRY;
+ LINVRNT(cl_lock_is_mutexed(lock));
+ LINVRNT(cl_lock_invariant(env, lock));
- cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
- if (slice->cls_ops->clo_state != NULL)
- slice->cls_ops->clo_state(env, slice, state);
- cfs_waitq_broadcast(&lock->cll_wq);
- EXIT;
+ cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
+ if (slice->cls_ops->clo_state != NULL)
+ slice->cls_ops->clo_state(env, slice, state);
+ wake_up_all(&lock->cll_wq);
+ EXIT;
}
/**
if (info->clt_next_index > descr->cld_end)
break;
- if (res == CLP_GANG_RESCHED)
- cfs_cond_resched();
- } while (res != CLP_GANG_OKAY);
+ if (res == CLP_GANG_RESCHED)
+ cond_resched();
+ } while (res != CLP_GANG_OKAY);
out:
- cl_io_fini(env, io);
- RETURN(result);
+ cl_io_fini(env, io);
+ RETURN(result);
}
EXPORT_SYMBOL(cl_lock_discard_pages);
void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock)
{
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERT(lock->cll_users > 0);
+ LINVRNT(cl_lock_is_mutexed(lock));
+ LINVRNT(cl_lock_invariant(env, lock));
+ LASSERT(lock->cll_users > 0);
- ENTRY;
- cl_lock_used_mod(env, lock, -1);
- if (lock->cll_users == 0)
- cfs_waitq_broadcast(&lock->cll_wq);
- EXIT;
+ ENTRY;
+ cl_lock_used_mod(env, lock, -1);
+ if (lock->cll_users == 0)
+ wake_up_all(&lock->cll_wq);
+ EXIT;
}
EXPORT_SYMBOL(cl_lock_user_del);
* Return at least one page in @queue unless there is no covered page.
*/
int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io, pgoff_t start, pgoff_t end,
- cl_page_gang_cb_t cb, void *cbdata)
+ struct cl_io *io, pgoff_t start, pgoff_t end,
+ cl_page_gang_cb_t cb, void *cbdata)
{
struct cl_object_header *hdr;
struct cl_page *page;
"gang_lookup", cfs_current());
cl_page_put(env, page);
}
- if (nr < CLT_PVEC_SIZE || end_of_region)
- break;
+ if (nr < CLT_PVEC_SIZE || end_of_region)
+ break;
- if (res == CLP_GANG_OKAY && cfs_need_resched())
- res = CLP_GANG_RESCHED;
- if (res != CLP_GANG_OKAY)
- break;
+ if (res == CLP_GANG_OKAY && need_resched())
+ res = CLP_GANG_RESCHED;
+ if (res != CLP_GANG_OKAY)
+ break;
spin_lock(&hdr->coh_page_guard);
tree_lock = 1;
*/
int cl_pages_prune(const struct lu_env *env, struct cl_object *clobj)
{
- struct cl_thread_info *info;
- struct cl_object *obj = cl_object_top(clobj);
- struct cl_io *io;
- int result;
+ struct cl_thread_info *info;
+ struct cl_object *obj = cl_object_top(clobj);
+ struct cl_io *io;
+ int result;
- ENTRY;
- info = cl_env_info(env);
- io = &info->clt_io;
+ ENTRY;
+ info = cl_env_info(env);
+ io = &info->clt_io;
- /*
- * initialize the io. This is ugly since we never do IO in this
- * function, we just make cl_page_list functions happy. -jay
- */
- io->ci_obj = obj;
+ /*
+ * initialize the io. This is ugly since we never do IO in this
+ * function, we just make cl_page_list functions happy. -jay
+ */
+ io->ci_obj = obj;
io->ci_ignore_layout = 1;
- result = cl_io_init(env, io, CIT_MISC, obj);
- if (result != 0) {
- cl_io_fini(env, io);
- RETURN(io->ci_result);
- }
+ result = cl_io_init(env, io, CIT_MISC, obj);
+ if (result != 0) {
+ cl_io_fini(env, io);
+ RETURN(io->ci_result);
+ }
- do {
- result = cl_page_gang_lookup(env, obj, io, 0, CL_PAGE_EOF,
- page_prune_cb, NULL);
- if (result == CLP_GANG_RESCHED)
- cfs_cond_resched();
- } while (result != CLP_GANG_OKAY);
+ do {
+ result = cl_page_gang_lookup(env, obj, io, 0, CL_PAGE_EOF,
+ page_prune_cb, NULL);
+ if (result == CLP_GANG_RESCHED)
+ cond_resched();
+ } while (result != CLP_GANG_OKAY);
- cl_io_fini(env, io);
- RETURN(result);
+ cl_io_fini(env, io);
+ RETURN(result);
}
EXPORT_SYMBOL(cl_pages_prune);
struct obd_import *class_new_import(struct obd_device *obd)
{
- struct obd_import *imp;
+ struct obd_import *imp;
- OBD_ALLOC(imp, sizeof(*imp));
- if (imp == NULL)
- return NULL;
+ OBD_ALLOC(imp, sizeof(*imp));
+ if (imp == NULL)
+ return NULL;
CFS_INIT_LIST_HEAD(&imp->imp_pinger_chain);
- CFS_INIT_LIST_HEAD(&imp->imp_zombie_chain);
- CFS_INIT_LIST_HEAD(&imp->imp_replay_list);
- CFS_INIT_LIST_HEAD(&imp->imp_sending_list);
- CFS_INIT_LIST_HEAD(&imp->imp_delayed_list);
+ CFS_INIT_LIST_HEAD(&imp->imp_zombie_chain);
+ CFS_INIT_LIST_HEAD(&imp->imp_replay_list);
+ CFS_INIT_LIST_HEAD(&imp->imp_sending_list);
+ CFS_INIT_LIST_HEAD(&imp->imp_delayed_list);
spin_lock_init(&imp->imp_lock);
imp->imp_last_success_conn = 0;
imp->imp_state = LUSTRE_IMP_NEW;
imp->imp_obd = class_incref(obd, "import", imp);
mutex_init(&imp->imp_sec_mutex);
- cfs_waitq_init(&imp->imp_recovery_waitq);
-
- cfs_atomic_set(&imp->imp_refcount, 2);
- cfs_atomic_set(&imp->imp_unregistering, 0);
- cfs_atomic_set(&imp->imp_inflight, 0);
- cfs_atomic_set(&imp->imp_replay_inflight, 0);
- cfs_atomic_set(&imp->imp_inval_count, 0);
- CFS_INIT_LIST_HEAD(&imp->imp_conn_list);
- CFS_INIT_LIST_HEAD(&imp->imp_handle.h_link);
+ init_waitqueue_head(&imp->imp_recovery_waitq);
+
+ cfs_atomic_set(&imp->imp_refcount, 2);
+ cfs_atomic_set(&imp->imp_unregistering, 0);
+ cfs_atomic_set(&imp->imp_inflight, 0);
+ cfs_atomic_set(&imp->imp_replay_inflight, 0);
+ cfs_atomic_set(&imp->imp_inval_count, 0);
+ CFS_INIT_LIST_HEAD(&imp->imp_conn_list);
+ CFS_INIT_LIST_HEAD(&imp->imp_handle.h_link);
class_handle_hash(&imp->imp_handle, &import_handle_ops);
- init_imp_at(&imp->imp_at);
+ init_imp_at(&imp->imp_at);
- /* the default magic is V2, will be used in connect RPC, and
- * then adjusted according to the flags in request/reply. */
- imp->imp_msg_magic = LUSTRE_MSG_MAGIC_V2;
+ /* the default magic is V2, will be used in connect RPC, and
+ * then adjusted according to the flags in request/reply. */
+ imp->imp_msg_magic = LUSTRE_MSG_MAGIC_V2;
- return imp;
+ return imp;
}
EXPORT_SYMBOL(class_new_import);
spin_lock(&obd->obd_dev_lock);
while (!cfs_list_empty(&obd->obd_unlinked_exports)) {
spin_unlock(&obd->obd_dev_lock);
- cfs_schedule_timeout_and_set_state(CFS_TASK_UNINT,
- cfs_time_seconds(waited));
- if (waited > 5 && IS_PO2(waited)) {
- LCONSOLE_WARN("%s is waiting for obd_unlinked_exports "
- "more than %d seconds. "
- "The obd refcount = %d. Is it stuck?\n",
- obd->obd_name, waited,
- cfs_atomic_read(&obd->obd_refcount));
- dump_exports(obd, 1);
- }
- waited *= 2;
+ schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE,
+ cfs_time_seconds(waited));
+ if (waited > 5 && IS_PO2(waited)) {
+ LCONSOLE_WARN("%s is waiting for obd_unlinked_exports "
+ "more than %d seconds. "
+ "The obd refcount = %d. Is it stuck?\n",
+ obd->obd_name, waited,
+ cfs_atomic_read(&obd->obd_refcount));
+ dump_exports(obd, 1);
+ }
+ waited *= 2;
spin_lock(&obd->obd_dev_lock);
}
spin_unlock(&obd->obd_dev_lock);
spin_unlock(&obd_zombie_impexp_lock);
}
- cfs_cond_resched();
+ cond_resched();
} while (import != NULL || export != NULL);
EXIT;
}
static struct completion obd_zombie_start;
static struct completion obd_zombie_stop;
static unsigned long obd_zombie_flags;
-static cfs_waitq_t obd_zombie_waitq;
+static wait_queue_head_t obd_zombie_waitq;
static pid_t obd_zombie_pid;
enum {
*/
static void obd_zombie_impexp_notify(void)
{
- /*
- * Make sure obd_zomebie_impexp_thread get this notification.
- * It is possible this signal only get by obd_zombie_barrier, and
- * barrier gulps this notification and sleeps away and hangs ensues
- */
- cfs_waitq_broadcast(&obd_zombie_waitq);
+ /*
+ * Make sure obd_zomebie_impexp_thread get this notification.
+ * It is possible this signal only get by obd_zombie_barrier, and
+ * barrier gulps this notification and sleeps away and hangs ensues
+ */
+ wake_up_all(&obd_zombie_waitq);
}
/**
obd_zombie_pid = current_pid();
while (!test_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags)) {
- struct l_wait_info lwi = { 0 };
+ struct l_wait_info lwi = { 0 };
- l_wait_event(obd_zombie_waitq,
- !obd_zombie_impexp_check(NULL), &lwi);
- obd_zombie_impexp_cull();
+ l_wait_event(obd_zombie_waitq,
+ !obd_zombie_impexp_check(NULL), &lwi);
+ obd_zombie_impexp_cull();
- /*
- * Notify obd_zombie_barrier callers that queues
- * may be empty.
- */
- cfs_waitq_signal(&obd_zombie_waitq);
- }
+ /*
+ * Notify obd_zombie_barrier callers that queues
+ * may be empty.
+ */
+ wake_up(&obd_zombie_waitq);
+ }
complete(&obd_zombie_stop);
spin_lock_init(&obd_zombie_impexp_lock);
init_completion(&obd_zombie_start);
init_completion(&obd_zombie_stop);
- cfs_waitq_init(&obd_zombie_waitq);
+ init_waitqueue_head(&obd_zombie_waitq);
obd_zombie_pid = 0;
#ifdef __KERNEL__
rc = CTXTP(ctxt, cleanup)(env, ctxt);
llog_ctxt_destroy(ctxt);
- cfs_waitq_signal(&olg->olg_waitq);
+ wake_up(&olg->olg_waitq);
return rc;
}
EXPORT_SYMBOL(__llog_ctxt_put);
int lprocfs_evict_client_release(struct inode *inode, struct file *f)
{
- struct proc_dir_entry *dp = PDE(f->f_dentry->d_inode);
- struct obd_device *obd = dp->data;
+ struct proc_dir_entry *dp = PDE(f->f_dentry->d_inode);
+ struct obd_device *obd = dp->data;
- cfs_atomic_dec(&obd->obd_evict_inprogress);
- cfs_waitq_signal(&obd->obd_evict_inprogress_waitq);
+ cfs_atomic_dec(&obd->obd_evict_inprogress);
+ wake_up(&obd->obd_evict_inprogress_waitq);
- return 0;
+ return 0;
}
struct file_operations lprocfs_evict_client_fops = {
cfs_hash_bd_get(site->ls_obj_hash, &top->loh_fid, &bd);
bkt = cfs_hash_bd_extra_get(site->ls_obj_hash, &bd);
- if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) {
- if (lu_object_is_dying(top)) {
+ if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) {
+ if (lu_object_is_dying(top)) {
- /*
- * somebody may be waiting for this, currently only
- * used for cl_object, see cl_object_put_last().
- */
- cfs_waitq_broadcast(&bkt->lsb_marche_funebre);
- }
- return;
- }
+ /*
+ * somebody may be waiting for this, currently only
+ * used for cl_object, see cl_object_put_last().
+ */
+ wake_up_all(&bkt->lsb_marche_funebre);
+ }
+ return;
+ }
LASSERT(bkt->lsb_busy > 0);
bkt->lsb_busy--;
*/
CFS_INIT_LIST_HEAD(&splice);
cfs_list_splice_init(layers, &splice);
- while (!cfs_list_empty(&splice)) {
- /*
- * Free layers in bottom-to-top order, so that object header
- * lives as long as possible and ->loo_object_free() methods
- * can look at its contents.
- */
- o = container_of0(splice.prev, struct lu_object, lo_linkage);
- cfs_list_del_init(&o->lo_linkage);
- LASSERT(o->lo_ops->loo_object_free != NULL);
- o->lo_ops->loo_object_free(env, o);
- }
+ while (!cfs_list_empty(&splice)) {
+ /*
+ * Free layers in bottom-to-top order, so that object header
+ * lives as long as possible and ->loo_object_free() methods
+ * can look at its contents.
+ */
+ o = container_of0(splice.prev, struct lu_object, lo_linkage);
+ cfs_list_del_init(&o->lo_linkage);
+ LASSERT(o->lo_ops->loo_object_free != NULL);
+ o->lo_ops->loo_object_free(env, o);
+ }
- if (cfs_waitq_active(&bkt->lsb_marche_funebre))
- cfs_waitq_broadcast(&bkt->lsb_marche_funebre);
+ if (waitqueue_active(&bkt->lsb_marche_funebre))
+ wake_up_all(&bkt->lsb_marche_funebre);
}
/**
if (count > 0 && --count == 0)
break;
- }
- cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1);
- cfs_cond_resched();
- /*
- * Free everything on the dispose list. This is safe against
- * races due to the reasons described in lu_object_put().
- */
+ }
+ cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1);
+ cond_resched();
+ /*
+ * Free everything on the dispose list. This is safe against
+ * races due to the reasons described in lu_object_put().
+ */
while (!cfs_list_empty(&dispose)) {
h = container_of0(dispose.next,
struct lu_object_header, loh_lru);
EXPORT_SYMBOL(lu_object_invariant);
static struct lu_object *htable_lookup(struct lu_site *s,
- cfs_hash_bd_t *bd,
- const struct lu_fid *f,
- cfs_waitlink_t *waiter,
- __u64 *version)
+ cfs_hash_bd_t *bd,
+ const struct lu_fid *f,
+ wait_queue_t *waiter,
+ __u64 *version)
{
struct lu_site_bkt_data *bkt;
struct lu_object_header *h;
* drained), and moreover, lookup has to wait until object is freed.
*/
- cfs_waitlink_init(waiter);
- cfs_waitq_add(&bkt->lsb_marche_funebre, waiter);
- cfs_set_current_state(CFS_TASK_UNINT);
- lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE);
- return ERR_PTR(-EAGAIN);
+ init_waitqueue_entry_current(waiter);
+ add_wait_queue(&bkt->lsb_marche_funebre, waiter);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE);
+ return ERR_PTR(-EAGAIN);
}
static struct lu_object *htable_lookup_nowait(struct lu_site *s,
* Core logic of lu_object_find*() functions.
*/
static struct lu_object *lu_object_find_try(const struct lu_env *env,
- struct lu_device *dev,
- const struct lu_fid *f,
- const struct lu_object_conf *conf,
- cfs_waitlink_t *waiter)
-{
- struct lu_object *o;
- struct lu_object *shadow;
- struct lu_site *s;
- cfs_hash_t *hs;
- cfs_hash_bd_t bd;
- __u64 version = 0;
+ struct lu_device *dev,
+ const struct lu_fid *f,
+ const struct lu_object_conf *conf,
+ wait_queue_t *waiter)
+{
+ struct lu_object *o;
+ struct lu_object *shadow;
+ struct lu_site *s;
+ cfs_hash_t *hs;
+ cfs_hash_bd_t bd;
+ __u64 version = 0;
/*
* This uses standard index maintenance protocol:
* objects of different "stacking" to be created within the same site.
*/
struct lu_object *lu_object_find_at(const struct lu_env *env,
- struct lu_device *dev,
- const struct lu_fid *f,
- const struct lu_object_conf *conf)
-{
- struct lu_site_bkt_data *bkt;
- struct lu_object *obj;
- cfs_waitlink_t wait;
-
- while (1) {
- obj = lu_object_find_try(env, dev, f, conf, &wait);
- if (obj != ERR_PTR(-EAGAIN))
- return obj;
- /*
- * lu_object_find_try() already added waiter into the
- * wait queue.
- */
- cfs_waitq_wait(&wait, CFS_TASK_UNINT);
- bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f);
- cfs_waitq_del(&bkt->lsb_marche_funebre, &wait);
- }
+ struct lu_device *dev,
+ const struct lu_fid *f,
+ const struct lu_object_conf *conf)
+{
+ struct lu_site_bkt_data *bkt;
+ struct lu_object *obj;
+ wait_queue_t wait;
+
+ while (1) {
+ obj = lu_object_find_try(env, dev, f, conf, &wait);
+ if (obj != ERR_PTR(-EAGAIN))
+ return obj;
+ /*
+ * lu_object_find_try() already added waiter into the
+ * wait queue.
+ */
+ waitq_wait(&wait, TASK_UNINTERRUPTIBLE);
+ bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f);
+ remove_wait_queue(&bkt->lsb_marche_funebre, &wait);
+ }
}
EXPORT_SYMBOL(lu_object_find_at);
return -ENOMEM;
}
- cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
- bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
- CFS_INIT_LIST_HEAD(&bkt->lsb_lru);
- cfs_waitq_init(&bkt->lsb_marche_funebre);
- }
+ cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
+ bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
+ CFS_INIT_LIST_HEAD(&bkt->lsb_lru);
+ init_waitqueue_head(&bkt->lsb_marche_funebre);
+ }
s->ls_stats = lprocfs_alloc_stats(LU_SS_LAST_STAT, 0);
if (s->ls_stats == NULL) {
struct lu_fid *old = &o->lo_header->loh_fid;
struct lu_site_bkt_data *bkt;
struct lu_object *shadow;
- cfs_waitlink_t waiter;
+ wait_queue_t waiter;
cfs_hash_t *hs;
cfs_hash_bd_t bd;
__u64 version = 0;
/* recovery data */
cfs_init_timer(&obd->obd_recovery_timer);
spin_lock_init(&obd->obd_recovery_task_lock);
- cfs_waitq_init(&obd->obd_next_transno_waitq);
- cfs_waitq_init(&obd->obd_evict_inprogress_waitq);
- CFS_INIT_LIST_HEAD(&obd->obd_req_replay_queue);
- CFS_INIT_LIST_HEAD(&obd->obd_lock_replay_queue);
- CFS_INIT_LIST_HEAD(&obd->obd_final_req_queue);
- CFS_INIT_LIST_HEAD(&obd->obd_evict_list);
+ init_waitqueue_head(&obd->obd_next_transno_waitq);
+ init_waitqueue_head(&obd->obd_evict_inprogress_waitq);
+ CFS_INIT_LIST_HEAD(&obd->obd_req_replay_queue);
+ CFS_INIT_LIST_HEAD(&obd->obd_lock_replay_queue);
+ CFS_INIT_LIST_HEAD(&obd->obd_final_req_queue);
+ CFS_INIT_LIST_HEAD(&obd->obd_evict_list);
llog_group_init(&obd->obd_olg, FID_SEQ_LLOG);
while (obd->obd_conn_inprogress > 0) {
spin_unlock(&obd->obd_dev_lock);
- cfs_cond_resched();
+ cond_resched();
spin_lock(&obd->obd_dev_lock);
}
static int echo_cleanup(struct obd_device *obd)
{
- int leaked;
- ENTRY;
+ int leaked;
+ ENTRY;
- lprocfs_obd_cleanup(obd);
- lprocfs_free_obd_stats(obd);
+ lprocfs_obd_cleanup(obd);
+ lprocfs_free_obd_stats(obd);
- ldlm_lock_decref(&obd->u.echo.eo_nl_lock, LCK_NL);
+ ldlm_lock_decref(&obd->u.echo.eo_nl_lock, LCK_NL);
- /* XXX Bug 3413; wait for a bit to ensure the BL callback has
- * happened before calling ldlm_namespace_free() */
- cfs_schedule_timeout_and_set_state(CFS_TASK_UNINT, cfs_time_seconds(1));
+ /* XXX Bug 3413; wait for a bit to ensure the BL callback has
+ * happened before calling ldlm_namespace_free() */
+ schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE, cfs_time_seconds(1));
- ldlm_namespace_free(obd->obd_namespace, NULL, obd->obd_force);
- obd->obd_namespace = NULL;
+ ldlm_namespace_free(obd->obd_namespace, NULL, obd->obd_force);
+ obd->obd_namespace = NULL;
- leaked = cfs_atomic_read(&obd->u.echo.eo_prep);
- if (leaked != 0)
- CERROR("%d prep/commitrw pages leaked\n", leaked);
+ leaked = cfs_atomic_read(&obd->u.echo.eo_prep);
+ if (leaked != 0)
+ CERROR("%d prep/commitrw pages leaked\n", leaked);
- RETURN(0);
+ RETURN(0);
}
struct obd_ops echo_obd_ops = {
spin_unlock(&ec->ec_lock);
CERROR("echo_client still has objects at cleanup time, "
"wait for 1 second\n");
- cfs_schedule_timeout_and_set_state(CFS_TASK_UNINT,
+ schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE,
cfs_time_seconds(1));
lu_site_purge(env, &ed->ed_site->cs_lu, -1);
spin_lock(&ec->ec_lock);
/* ----- part 2 ----- */ \
__ext->oe_grants, __ext->oe_nr_pages, \
list_empty_marker(&__ext->oe_pages), \
- cfs_waitq_active(&__ext->oe_waitq) ? '+' : '-', \
+ waitqueue_active(&__ext->oe_waitq) ? '+' : '-', \
__ext->oe_osclock, __ext->oe_mppr, __ext->oe_owner, \
/* ----- part 4 ----- */ \
## __VA_ARGS__); \
/* TODO: validate the state machine */
ext->oe_state = state;
- cfs_waitq_broadcast(&ext->oe_waitq);
+ wake_up_all(&ext->oe_waitq);
}
static struct osc_extent *osc_extent_alloc(struct osc_object *obj)
CFS_INIT_LIST_HEAD(&ext->oe_link);
ext->oe_state = OES_INV;
CFS_INIT_LIST_HEAD(&ext->oe_pages);
- cfs_waitq_init(&ext->oe_waitq);
+ init_waitqueue_head(&ext->oe_waitq);
ext->oe_osclock = NULL;
return ext;
* RPC size will be.
* The exiting condition is no avail grants and no dirty pages caching,
* that really means there is no space on the OST. */
- cfs_waitq_init(&ocw.ocw_waitq);
+ init_waitqueue_head(&ocw.ocw_waitq);
ocw.ocw_oap = oap;
ocw.ocw_grant = bytes;
while (cli->cl_dirty > 0 || cli->cl_w_in_flight > 0) {
CDEBUG(D_CACHE, "wake up %p for oap %p, avail grant %ld, %d\n",
ocw, ocw->ocw_oap, cli->cl_avail_grant, ocw->ocw_rc);
- cfs_waitq_signal(&ocw->ocw_waitq);
+ wake_up(&ocw->ocw_waitq);
}
EXIT;
pgoff_t oe_max_end;
/** waitqueue - for those who want to be notified if this extent's
* state has changed. */
- cfs_waitq_t oe_waitq;
+ wait_queue_head_t oe_waitq;
/** lock covering this extent */
struct cl_lock *oe_osclock;
/** terminator of this extent. Must be true if this extent is in IO. */
#define oap_brw_flags oap_brw_page.flag
struct osc_cache_waiter {
- cfs_list_t ocw_entry;
- cfs_waitq_t ocw_waitq;
- struct osc_async_page *ocw_oap;
+ cfs_list_t ocw_entry;
+ wait_queue_head_t ocw_waitq;
+ struct osc_async_page *ocw_oap;
int ocw_grant;
- int ocw_rc;
+ int ocw_rc;
};
int osc_create(const struct lu_env *env, struct obd_export *exp,
io->ci_obj = cl_object_top(obj);
io->ci_ignore_layout = 1;
cl_io_init(env, io, CIT_MISC, io->ci_obj);
- do {
- result = cl_page_gang_lookup(env, obj, io,
- descr->cld_start, descr->cld_end,
- check_cb, (void *)lock);
- if (result == CLP_GANG_ABORT)
- break;
- if (result == CLP_GANG_RESCHED)
- cfs_cond_resched();
- } while (result != CLP_GANG_OKAY);
- cl_io_fini(env, io);
+ do {
+ result = cl_page_gang_lookup(env, obj, io,
+ descr->cld_start, descr->cld_end,
+ check_cb, (void *)lock);
+ if (result == CLP_GANG_ABORT)
+ break;
+ if (result == CLP_GANG_RESCHED)
+ cond_resched();
+ } while (result != CLP_GANG_OKAY);
+ cl_io_fini(env, io);
mutex_unlock(&oob->oo_debug_mutex);
- cl_env_nested_put(&nest, env);
+ cl_env_nested_put(&nest, env);
- return (result == CLP_GANG_ABORT);
+ return (result == CLP_GANG_ABORT);
}
#else
static int osc_lock_has_pages(struct osc_lock *olck)
if (wakeup) {
osc_lru_shrink(cli, osc_cache_too_much(cli));
- cfs_waitq_broadcast(&osc_lru_waitq);
+ wake_up_all(&osc_lru_waitq);
}
}
if (cfs_atomic_read(&cli->cl_lru_shrinkers) == 0 &&
!memory_pressure_get())
osc_lru_shrink(cli, osc_cache_too_much(cli));
- cfs_waitq_signal(&osc_lru_waitq);
+ wake_up(&osc_lru_waitq);
}
} else {
LASSERT(cfs_list_empty(&opg->ops_lru));
if (rc > 0)
continue;
- cfs_cond_resched();
+ cond_resched();
/* slowest case, all of caching pages are busy, notifying
* other OSCs that we're lack of LRU slots. */
}
static int osc_destroy_interpret(const struct lu_env *env,
- struct ptlrpc_request *req, void *data,
- int rc)
+ struct ptlrpc_request *req, void *data,
+ int rc)
{
- struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
+ struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
- cfs_atomic_dec(&cli->cl_destroy_in_flight);
- cfs_waitq_signal(&cli->cl_destroy_waitq);
- return 0;
+ cfs_atomic_dec(&cli->cl_destroy_in_flight);
+ wake_up(&cli->cl_destroy_waitq);
+ return 0;
}
static int osc_can_send_destroy(struct client_obd *cli)
{
- if (cfs_atomic_inc_return(&cli->cl_destroy_in_flight) <=
- cli->cl_max_rpcs_in_flight) {
- /* The destroy request can be sent */
- return 1;
- }
- if (cfs_atomic_dec_return(&cli->cl_destroy_in_flight) <
- cli->cl_max_rpcs_in_flight) {
- /*
- * The counter has been modified between the two atomic
- * operations.
- */
- cfs_waitq_signal(&cli->cl_destroy_waitq);
- }
- return 0;
+ if (cfs_atomic_inc_return(&cli->cl_destroy_in_flight) <=
+ cli->cl_max_rpcs_in_flight) {
+ /* The destroy request can be sent */
+ return 1;
+ }
+ if (cfs_atomic_dec_return(&cli->cl_destroy_in_flight) <
+ cli->cl_max_rpcs_in_flight) {
+ /*
+ * The counter has been modified between the two atomic
+ * operations.
+ */
+ wake_up(&cli->cl_destroy_waitq);
+ }
+ return 0;
}
int osc_create(const struct lu_env *env, struct obd_export *exp,
obd_count page_count, struct brw_page **pga,
struct obd_capa *ocapa)
{
- struct ptlrpc_request *req;
- int rc;
- cfs_waitq_t waitq;
- int generation, resends = 0;
- struct l_wait_info lwi;
+ struct ptlrpc_request *req;
+ int rc;
+ wait_queue_head_t waitq;
+ int generation, resends = 0;
+ struct l_wait_info lwi;
- ENTRY;
+ ENTRY;
- cfs_waitq_init(&waitq);
- generation = exp->exp_obd->u.cli.cl_import->imp_generation;
+ init_waitqueue_head(&waitq);
+ generation = exp->exp_obd->u.cli.cl_import->imp_generation;
restart_bulk:
rc = osc_brw_prep_request(cmd, &exp->exp_obd->u.cli, oa, lsm,
OBD_FREE_PTR(oh);
}
- /* as we want IO to journal and data IO be concurrent, we don't block
- * awaiting data IO completion in osd_do_bio(), instead we wait here
- * once transaction is submitted to the journal. all reqular requests
- * don't do direct IO (except read/write), thus this wait_event becomes
- * no-op for them.
- *
- * IMPORTANT: we have to wait till any IO submited by the thread is
- * completed otherwise iobuf may be corrupted by different request
- */
- cfs_wait_event(iobuf->dr_wait,
- cfs_atomic_read(&iobuf->dr_numreqs) == 0);
- if (!rc)
- rc = iobuf->dr_error;
+ /* as we want IO to journal and data IO be concurrent, we don't block
+ * awaiting data IO completion in osd_do_bio(), instead we wait here
+ * once transaction is submitted to the journal. all reqular requests
+ * don't do direct IO (except read/write), thus this wait_event becomes
+ * no-op for them.
+ *
+ * IMPORTANT: we have to wait till any IO submited by the thread is
+ * completed otherwise iobuf may be corrupted by different request
+ */
+ wait_event(iobuf->dr_wait,
+ cfs_atomic_read(&iobuf->dr_numreqs) == 0);
+ if (!rc)
+ rc = iobuf->dr_error;
- RETURN(rc);
+ RETURN(rc);
}
static int osd_trans_cb_add(struct thandle *th, struct dt_txn_commit_cb *dcb)
#define MAX_BLOCKS_PER_PAGE (PAGE_CACHE_SIZE / 512)
struct osd_iobuf {
- cfs_waitq_t dr_wait;
+ wait_queue_head_t dr_wait;
cfs_atomic_t dr_numreqs; /* number of reqs being processed */
int dr_max_pages;
int dr_npages;
iobuf->dr_init_at);
LASSERT(pages <= PTLRPC_MAX_BRW_PAGES);
- cfs_waitq_init(&iobuf->dr_wait);
- cfs_atomic_set(&iobuf->dr_numreqs, 0);
- iobuf->dr_npages = 0;
- iobuf->dr_error = 0;
- iobuf->dr_dev = d;
- iobuf->dr_frags = 0;
- iobuf->dr_elapsed = 0;
- /* must be counted before, so assert */
- iobuf->dr_rw = rw;
+ init_waitqueue_head(&iobuf->dr_wait);
+ cfs_atomic_set(&iobuf->dr_numreqs, 0);
+ iobuf->dr_npages = 0;
+ iobuf->dr_error = 0;
+ iobuf->dr_dev = d;
+ iobuf->dr_frags = 0;
+ iobuf->dr_elapsed = 0;
+ /* must be counted before, so assert */
+ iobuf->dr_rw = rw;
iobuf->dr_init_at = line;
blocks = pages * (PAGE_CACHE_SIZE >> osd_sb(d)->s_blocksize_bits);
iobuf->dr_elapsed_valid = 1;
}
if (cfs_atomic_dec_and_test(&iobuf->dr_numreqs))
- cfs_waitq_signal(&iobuf->dr_wait);
+ wake_up(&iobuf->dr_wait);
/* Completed bios used to be chained off iobuf->dr_bios and freed in
* filter_clear_dreq(). It was then possible to exhaust the biovec-256
* parallel and wait for IO completion once transaction is stopped
* see osd_trans_stop() for more details -bzzz */
if (iobuf->dr_rw == 0) {
- cfs_wait_event(iobuf->dr_wait,
+ wait_event(iobuf->dr_wait,
cfs_atomic_read(&iobuf->dr_numreqs) == 0);
}
spin_lock(&scrub->os_lock);
thread_set_flags(thread, SVC_RUNNING);
spin_unlock(&scrub->os_lock);
- cfs_waitq_broadcast(&thread->t_ctl_waitq);
+ wake_up_all(&thread->t_ctl_waitq);
}
up_write(&scrub->os_rwsem);
ooc->ooc_pos_preload < scrub->os_pos_current) {
spin_lock(&scrub->os_lock);
it->ooi_waiting = 0;
- cfs_waitq_broadcast(&thread->t_ctl_waitq);
+ wake_up_all(&thread->t_ctl_waitq);
spin_unlock(&scrub->os_lock);
}
if (scrub->os_waiting && osd_scrub_has_window(scrub, ooc)) {
spin_lock(&scrub->os_lock);
scrub->os_waiting = 0;
- cfs_waitq_broadcast(&scrub->os_thread.t_ctl_waitq);
+ wake_up_all(&scrub->os_thread.t_ctl_waitq);
spin_unlock(&scrub->os_lock);
}
noenv:
spin_lock(&scrub->os_lock);
thread_set_flags(thread, SVC_STOPPED);
- cfs_waitq_broadcast(&thread->t_ctl_waitq);
+ wake_up_all(&thread->t_ctl_waitq);
spin_unlock(&scrub->os_lock);
return rc;
}
if (!thread_is_init(thread) && !thread_is_stopped(thread)) {
thread_set_flags(thread, SVC_STOPPING);
spin_unlock(&scrub->os_lock);
- cfs_waitq_broadcast(&thread->t_ctl_waitq);
+ wake_up_all(&thread->t_ctl_waitq);
l_wait_event(thread->t_ctl_waitq,
thread_is_stopped(thread),
&lwi);
ctxt->pwd = dev->od_mnt->mnt_root;
ctxt->fs = get_ds();
- cfs_waitq_init(&scrub->os_thread.t_ctl_waitq);
+ init_waitqueue_head(&scrub->os_thread.t_ctl_waitq);
init_rwsem(&scrub->os_rwsem);
spin_lock_init(&scrub->os_lock);
CFS_INIT_LIST_HEAD(&scrub->os_inconsistent_items);
if (scrub->os_waiting && osd_scrub_has_window(scrub, ooc)) {
spin_lock(&scrub->os_lock);
scrub->os_waiting = 0;
- cfs_waitq_broadcast(&scrub->os_thread.t_ctl_waitq);
+ wake_up_all(&scrub->os_thread.t_ctl_waitq);
spin_unlock(&scrub->os_lock);
}
it->ooi_user_ready = 1;
if (!scrub->os_full_speed)
- cfs_waitq_broadcast(&scrub->os_thread.t_ctl_waitq);
+ wake_up_all(&scrub->os_thread.t_ctl_waitq);
/* Unplug OSD layer iteration by the first next() call. */
rc = osd_otable_it_next(env, (struct dt_it *)it);
spin_unlock(&scrub->os_lock);
if (wakeup != 0)
- cfs_waitq_broadcast(&thread->t_ctl_waitq);
+ wake_up_all(&thread->t_ctl_waitq);
RETURN(0);
}
ENTRY;
osp->opd_recovery_completed = 1;
if (!osp->opd_connect_mdt)
- cfs_waitq_signal(&osp->opd_pre_waitq);
+ wake_up(&osp->opd_pre_waitq);
RETURN(rc);
}
if (d->opd_connect_mdt)
break;
osp_pre_update_status(d, -ENODEV);
- cfs_waitq_signal(&d->opd_pre_waitq);
+ wake_up(&d->opd_pre_waitq);
CDEBUG(D_HA, "got disconnected\n");
break;
case IMP_EVENT_INACTIVE:
if (d->opd_connect_mdt)
break;
osp_pre_update_status(d, -ENODEV);
- cfs_waitq_signal(&d->opd_pre_waitq);
+ wake_up(&d->opd_pre_waitq);
CDEBUG(D_HA, "got inactive\n");
break;
case IMP_EVENT_ACTIVE:
d->opd_imp_seen_connected = 1;
if (d->opd_connect_mdt)
break;
- cfs_waitq_signal(&d->opd_pre_waitq);
+ wake_up(&d->opd_pre_waitq);
__osp_sync_check_for_work(d);
CDEBUG(D_HA, "got connected\n");
break;
/* dedicate precreate thread */
struct ptlrpc_thread opd_pre_thread;
/* thread waits for signals about pool going empty */
- cfs_waitq_t opd_pre_waitq;
+ wait_queue_head_t opd_pre_waitq;
/* consumers (who needs new ids) wait here */
- cfs_waitq_t opd_pre_user_waitq;
+ wait_queue_head_t opd_pre_user_waitq;
/* current precreation status: working, failed, stopping? */
int opd_pre_status;
/* how many to precreate next time */
int opd_syn_prev_done;
/* found records */
struct ptlrpc_thread opd_syn_thread;
- cfs_waitq_t opd_syn_waitq;
+ wait_queue_head_t opd_syn_waitq;
/* list of remotely committed rpc */
cfs_list_t opd_syn_committed_there;
/* number of changes being under sync */
struct osp_device *d = (struct osp_device *) _d;
LASSERT(d);
- cfs_waitq_signal(&d->opd_pre_waitq);
+ wake_up(&d->opd_pre_waitq);
}
static int osp_statfs_interpret(const struct lu_env *env,
RETURN(0);
out:
/* couldn't update statfs, try again as soon as possible */
- cfs_waitq_signal(&d->opd_pre_waitq);
+ wake_up(&d->opd_pre_waitq);
if (req->rq_import_generation == imp->imp_generation)
CDEBUG(D_CACHE, "%s: couldn't update statfs: rc = %d\n",
d->opd_obd->obd_name, rc);
*/
d->opd_statfs_fresh_till = cfs_time_shift(-1);
cfs_timer_disarm(&d->opd_statfs_timer);
- cfs_waitq_signal(&d->opd_pre_waitq);
+ wake_up(&d->opd_pre_waitq);
}
}
osp_pre_update_status(d, -ENOSPC);
rc = -ENOSPC;
}
- cfs_waitq_signal(&d->opd_pre_waitq);
+ wake_up(&d->opd_pre_waitq);
GOTO(out_req, rc);
}
out_req:
/* now we can wakeup all users awaiting for objects */
osp_pre_update_status(d, rc);
- cfs_waitq_signal(&d->opd_pre_user_waitq);
+ wake_up(&d->opd_pre_user_waitq);
ptlrpc_req_finished(req);
RETURN(rc);
* this OSP isn't quite functional yet */
osp_pre_update_status(d, rc);
} else {
- cfs_waitq_signal(&d->opd_pre_user_waitq);
+ wake_up(&d->opd_pre_user_waitq);
}
}
d->opd_pre_grow_slow = 0;
d->opd_pre_grow_count = OST_MIN_PRECREATE;
spin_unlock(&d->opd_pre_lock);
- cfs_waitq_signal(&d->opd_pre_waitq);
+ wake_up(&d->opd_pre_waitq);
CDEBUG(D_INFO, "%s: no space: "LPU64" blocks, "LPU64
" free, "LPU64" used, "LPU64" avail -> %d: "
"rc = %d\n", d->opd_obd->obd_name,
}
out:
- cfs_waitq_signal(&d->opd_pre_user_waitq);
+ wake_up(&d->opd_pre_user_waitq);
}
static int osp_init_pre_fid(struct osp_device *osp)
spin_lock(&d->opd_pre_lock);
thread->t_flags = SVC_RUNNING;
spin_unlock(&d->opd_pre_lock);
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ wake_up(&thread->t_ctl_waitq);
while (osp_precreate_running(d)) {
/*
thread->t_flags = SVC_STOPPED;
lu_env_fini(&env);
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ wake_up(&thread->t_ctl_waitq);
RETURN(0);
}
/* XXX: don't wake up if precreation is in progress */
if (osp_precreate_near_empty_nolock(env, d) &&
!osp_precreate_end_seq_nolock(env, d))
- cfs_waitq_signal(&d->opd_pre_waitq);
+ wake_up(&d->opd_pre_waitq);
break;
}
}
/* XXX: don't wake up if precreation is in progress */
- cfs_waitq_signal(&d->opd_pre_waitq);
+ wake_up(&d->opd_pre_waitq);
lwi = LWI_TIMEOUT(expire - cfs_time_current(),
osp_precreate_timeout_condition, d);
* osp_precreate_thread() just before orphan cleanup
*/
if (unlikely(d->opd_pre_reserved == 0 && d->opd_pre_status))
- cfs_waitq_signal(&d->opd_pre_waitq);
+ wake_up(&d->opd_pre_waitq);
return 0;
}
d->opd_pre_max_grow_count = OST_MAX_PRECREATE;
spin_lock_init(&d->opd_pre_lock);
- cfs_waitq_init(&d->opd_pre_waitq);
- cfs_waitq_init(&d->opd_pre_user_waitq);
- cfs_waitq_init(&d->opd_pre_thread.t_ctl_waitq);
+ init_waitqueue_head(&d->opd_pre_waitq);
+ init_waitqueue_head(&d->opd_pre_user_waitq);
+ init_waitqueue_head(&d->opd_pre_thread.t_ctl_waitq);
/*
* Initialize statfs-related things
cfs_timer_disarm(&d->opd_statfs_timer);
thread->t_flags = SVC_STOPPING;
- cfs_waitq_signal(&d->opd_pre_waitq);
+ wake_up(&d->opd_pre_waitq);
- cfs_wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED);
+ wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED);
EXIT;
}
#define osp_sync_check_for_work(d) \
{ \
if (osp_sync_has_work(d)) { \
- cfs_waitq_signal(&d->opd_syn_waitq); \
+ wake_up(&d->opd_syn_waitq); \
} \
}
spin_unlock(&d->opd_syn_lock);
/* XXX: some batching wouldn't hurt */
- cfs_waitq_signal(&d->opd_syn_waitq);
+ wake_up(&d->opd_syn_waitq);
}
static int osp_sync_interpret(const struct lu_env *env,
cfs_list_add(&req->rq_exp_list, &d->opd_syn_committed_there);
spin_unlock(&d->opd_syn_lock);
- cfs_waitq_signal(&d->opd_syn_waitq);
+ wake_up(&d->opd_syn_waitq);
} else if (rc) {
struct obd_import *imp = req->rq_import;
/*
spin_unlock(&d->opd_syn_lock);
}
- cfs_waitq_signal(&d->opd_syn_waitq);
+ wake_up(&d->opd_syn_waitq);
} else if (unlikely(d->opd_pre_status == -ENOSPC)) {
/*
* if current status is -ENOSPC (lack of free space on OST)
/* wake up the thread if requested to stop:
* it might be waiting for in-progress to complete */
if (unlikely(osp_sync_running(d) == 0))
- cfs_waitq_signal(&d->opd_syn_waitq);
+ wake_up(&d->opd_syn_waitq);
EXIT;
}
spin_lock(&d->opd_syn_lock);
thread->t_flags = SVC_RUNNING;
spin_unlock(&d->opd_syn_lock);
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ wake_up(&thread->t_ctl_waitq);
ctxt = llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
if (ctxt == NULL) {
thread->t_flags = SVC_STOPPED;
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ wake_up(&thread->t_ctl_waitq);
lu_env_fini(&env);
d->opd_syn_max_rpc_in_flight = OSP_MAX_IN_FLIGHT;
d->opd_syn_max_rpc_in_progress = OSP_MAX_IN_PROGRESS;
spin_lock_init(&d->opd_syn_lock);
- cfs_waitq_init(&d->opd_syn_waitq);
- cfs_waitq_init(&d->opd_syn_thread.t_ctl_waitq);
+ init_waitqueue_head(&d->opd_syn_waitq);
+ init_waitqueue_head(&d->opd_syn_thread.t_ctl_waitq);
CFS_INIT_LIST_HEAD(&d->opd_syn_committed_there);
rc = PTR_ERR(kthread_run(osp_sync_thread, d,
ENTRY;
thread->t_flags = SVC_STOPPING;
- cfs_waitq_signal(&d->opd_syn_waitq);
- cfs_wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED);
+ wake_up(&d->opd_syn_waitq);
+ wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED);
/*
* unregister transaction callbacks only when sync thread
cfs_list_for_each_entry(d, &tr->otr_wakeup_list,
opd_syn_ontrack) {
d->opd_syn_last_committed_id = tr->otr_committed_id;
- cfs_waitq_signal(&d->opd_syn_waitq);
+ wake_up(&d->opd_syn_waitq);
}
}
spin_unlock(&tr->otr_lock);
}
/* send a bulk after reply to simulate a network delay or reordering
* by a router */
- if (unlikely(CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2))) {
- cfs_waitq_t waitq;
- struct l_wait_info lwi1;
+ if (unlikely(CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2))) {
+ wait_queue_head_t waitq;
+ struct l_wait_info lwi1;
- CDEBUG(D_INFO, "reorder BULK\n");
- cfs_waitq_init(&waitq);
+ CDEBUG(D_INFO, "reorder BULK\n");
+ init_waitqueue_head(&waitq);
- lwi1 = LWI_TIMEOUT_INTR(cfs_time_seconds(3), NULL, NULL, NULL);
- l_wait_event(waitq, 0, &lwi1);
- rc = target_bulk_io(exp, desc, &lwi);
+ lwi1 = LWI_TIMEOUT_INTR(cfs_time_seconds(3), NULL, NULL, NULL);
+ l_wait_event(waitq, 0, &lwi1);
+ rc = target_bulk_io(exp, desc, &lwi);
ptlrpc_free_bulk_nopin(desc);
- }
+ }
RETURN(rc);
}
return NULL;
spin_lock_init(&desc->bd_lock);
- cfs_waitq_init(&desc->bd_waitq);
+ init_waitqueue_head(&desc->bd_waitq);
desc->bd_max_iov = npages;
desc->bd_iov_count = 0;
desc->bd_portal = portal;
ptlrpc_at_set_req_timeout(request);
spin_lock_init(&request->rq_lock);
- CFS_INIT_LIST_HEAD(&request->rq_list);
- CFS_INIT_LIST_HEAD(&request->rq_timed_list);
- CFS_INIT_LIST_HEAD(&request->rq_replay_list);
- CFS_INIT_LIST_HEAD(&request->rq_ctx_chain);
- CFS_INIT_LIST_HEAD(&request->rq_set_chain);
- CFS_INIT_LIST_HEAD(&request->rq_history_list);
- CFS_INIT_LIST_HEAD(&request->rq_exp_list);
- cfs_waitq_init(&request->rq_reply_waitq);
- cfs_waitq_init(&request->rq_set_waitq);
- request->rq_xid = ptlrpc_next_xid();
- cfs_atomic_set(&request->rq_refcount, 1);
-
- lustre_msg_set_opc(request->rq_reqmsg, opcode);
-
- RETURN(0);
+ CFS_INIT_LIST_HEAD(&request->rq_list);
+ CFS_INIT_LIST_HEAD(&request->rq_timed_list);
+ CFS_INIT_LIST_HEAD(&request->rq_replay_list);
+ CFS_INIT_LIST_HEAD(&request->rq_ctx_chain);
+ CFS_INIT_LIST_HEAD(&request->rq_set_chain);
+ CFS_INIT_LIST_HEAD(&request->rq_history_list);
+ CFS_INIT_LIST_HEAD(&request->rq_exp_list);
+ init_waitqueue_head(&request->rq_reply_waitq);
+ init_waitqueue_head(&request->rq_set_waitq);
+ request->rq_xid = ptlrpc_next_xid();
+ cfs_atomic_set(&request->rq_refcount, 1);
+
+ lustre_msg_set_opc(request->rq_reqmsg, opcode);
+
+ RETURN(0);
out_ctx:
- sptlrpc_cli_ctx_put(request->rq_cli_ctx, 1);
+ sptlrpc_cli_ctx_put(request->rq_cli_ctx, 1);
out_free:
- class_import_put(imp);
- return rc;
+ class_import_put(imp);
+ return rc;
}
int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
RETURN(NULL);
cfs_atomic_set(&set->set_refcount, 1);
CFS_INIT_LIST_HEAD(&set->set_requests);
- cfs_waitq_init(&set->set_waitq);
+ init_waitqueue_head(&set->set_waitq);
cfs_atomic_set(&set->set_new_count, 0);
cfs_atomic_set(&set->set_remaining, 0);
spin_lock_init(&set->set_new_req_lock);
count = cfs_atomic_inc_return(&set->set_new_count);
spin_unlock(&set->set_new_req_lock);
- /* Only need to call wakeup once for the first entry. */
- if (count == 1) {
- cfs_waitq_signal(&set->set_waitq);
+ /* Only need to call wakeup once for the first entry. */
+ if (count == 1) {
+ wake_up(&set->set_waitq);
- /* XXX: It maybe unnecessary to wakeup all the partners. But to
- * guarantee the async RPC can be processed ASAP, we have
- * no other better choice. It maybe fixed in future. */
- for (i = 0; i < pc->pc_npartners; i++)
- cfs_waitq_signal(&pc->pc_partners[i]->pc_set->set_waitq);
- }
+ /* XXX: It maybe unnecessary to wakeup all the partners. But to
+ * guarantee the async RPC can be processed ASAP, we have
+ * no other better choice. It maybe fixed in future. */
+ for (i = 0; i < pc->pc_npartners; i++)
+ wake_up(&pc->pc_partners[i]->pc_set->set_waitq);
+ }
}
EXPORT_SYMBOL(ptlrpc_set_add_new_req);
}
spin_unlock(&imp->imp_lock);
- cfs_atomic_dec(&set->set_remaining);
- cfs_waitq_broadcast(&imp->imp_recovery_waitq);
+ cfs_atomic_dec(&set->set_remaining);
+ wake_up_all(&imp->imp_recovery_waitq);
if (set->set_producer) {
/* produce a new request if possible */
for (;;) {
#ifdef __KERNEL__
/* The wq argument is ignored by user-space wait_event macros */
- cfs_waitq_t *wq = (request->rq_set != NULL) ?
- &request->rq_set->set_waitq :
- &request->rq_reply_waitq;
+ wait_queue_head_t *wq = (request->rq_set != NULL) ?
+ &request->rq_set->set_waitq :
+ &request->rq_reply_waitq;
#endif
/* Network access will complete in finite time but the HUGE
* timeout lets us CWARN for visibility of sluggish NALs */
req->rq_no_delay = req->rq_no_resend = 1;
spin_lock_init(&req->rq_lock);
- CFS_INIT_LIST_HEAD(&req->rq_list);
- CFS_INIT_LIST_HEAD(&req->rq_replay_list);
- CFS_INIT_LIST_HEAD(&req->rq_set_chain);
- CFS_INIT_LIST_HEAD(&req->rq_history_list);
- CFS_INIT_LIST_HEAD(&req->rq_exp_list);
- cfs_waitq_init(&req->rq_reply_waitq);
- cfs_waitq_init(&req->rq_set_waitq);
- cfs_atomic_set(&req->rq_refcount, 1);
-
- CLASSERT (sizeof(*args) <= sizeof(req->rq_async_args));
- args = ptlrpc_req_async_args(req);
- args->magic = PTLRPC_WORK_MAGIC;
- args->cb = cb;
- args->cbdata = cbdata;
-
- RETURN(req);
+ CFS_INIT_LIST_HEAD(&req->rq_list);
+ CFS_INIT_LIST_HEAD(&req->rq_replay_list);
+ CFS_INIT_LIST_HEAD(&req->rq_set_chain);
+ CFS_INIT_LIST_HEAD(&req->rq_history_list);
+ CFS_INIT_LIST_HEAD(&req->rq_exp_list);
+ init_waitqueue_head(&req->rq_reply_waitq);
+ init_waitqueue_head(&req->rq_set_waitq);
+ cfs_atomic_set(&req->rq_refcount, 1);
+
+ CLASSERT (sizeof(*args) <= sizeof(req->rq_async_args));
+ args = ptlrpc_req_async_args(req);
+ args->magic = PTLRPC_WORK_MAGIC;
+ args->cb = cb;
+ args->cbdata = cbdata;
+
+ RETURN(req);
}
EXPORT_SYMBOL(ptlrpcd_alloc_work);
/* NB everything can disappear under us once the request
* has been queued and we unlock, so do the wake now... */
- cfs_waitq_signal(&svcpt->scp_waitq);
+ wake_up(&svcpt->scp_waitq);
spin_unlock(&svcpt->scp_lock);
EXIT;
desc->bd_md_count--;
/* This is the last callback no matter what... */
if (desc->bd_md_count == 0)
- cfs_waitq_signal(&desc->bd_waitq);
+ wake_up(&desc->bd_waitq);
}
spin_unlock(&desc->bd_lock);
void ptlrpc_ni_fini(void)
{
- cfs_waitq_t waitq;
- struct l_wait_info lwi;
- int rc;
- int retries;
-
- /* Wait for the event queue to become idle since there may still be
- * messages in flight with pending events (i.e. the fire-and-forget
- * messages == client requests and "non-difficult" server
- * replies */
-
- for (retries = 0;; retries++) {
- rc = LNetEQFree(ptlrpc_eq_h);
- switch (rc) {
- default:
- LBUG();
-
- case 0:
- LNetNIFini();
- return;
-
- case -EBUSY:
- if (retries != 0)
- CWARN("Event queue still busy\n");
-
- /* Wait for a bit */
- cfs_waitq_init(&waitq);
- lwi = LWI_TIMEOUT(cfs_time_seconds(2), NULL, NULL);
- l_wait_event(waitq, 0, &lwi);
- break;
- }
- }
- /* notreached */
+ wait_queue_head_t waitq;
+ struct l_wait_info lwi;
+ int rc;
+ int retries;
+
+ /* Wait for the event queue to become idle since there may still be
+ * messages in flight with pending events (i.e. the fire-and-forget
+ * messages == client requests and "non-difficult" server
+ * replies */
+
+ for (retries = 0;; retries++) {
+ rc = LNetEQFree(ptlrpc_eq_h);
+ switch (rc) {
+ default:
+ LBUG();
+
+ case 0:
+ LNetNIFini();
+ return;
+
+ case -EBUSY:
+ if (retries != 0)
+ CWARN("Event queue still busy\n");
+
+ /* Wait for a bit */
+ init_waitqueue_head(&waitq);
+ lwi = LWI_TIMEOUT(cfs_time_seconds(2), NULL, NULL);
+ l_wait_event(waitq, 0, &lwi);
+ break;
+ }
+ }
+ /* notreached */
}
lnet_pid_t ptl_get_pid(void)
#define RSI_HASHMASK (RSI_HASHMAX - 1)
struct rsi {
- struct cache_head h;
- __u32 lustre_svc;
- __u64 nid;
- cfs_waitq_t waitq;
- rawobj_t in_handle, in_token;
- rawobj_t out_handle, out_token;
- int major_status, minor_status;
+ struct cache_head h;
+ __u32 lustre_svc;
+ __u64 nid;
+ wait_queue_head_t waitq;
+ rawobj_t in_handle, in_token;
+ rawobj_t out_handle, out_token;
+ int major_status, minor_status;
};
static struct cache_head *rsi_table[RSI_HASHMAX];
static inline void __rsi_init(struct rsi *new, struct rsi *item)
{
- new->out_handle = RAWOBJ_EMPTY;
- new->out_token = RAWOBJ_EMPTY;
+ new->out_handle = RAWOBJ_EMPTY;
+ new->out_token = RAWOBJ_EMPTY;
- new->in_handle = item->in_handle;
- item->in_handle = RAWOBJ_EMPTY;
- new->in_token = item->in_token;
- item->in_token = RAWOBJ_EMPTY;
+ new->in_handle = item->in_handle;
+ item->in_handle = RAWOBJ_EMPTY;
+ new->in_token = item->in_token;
+ item->in_token = RAWOBJ_EMPTY;
- new->lustre_svc = item->lustre_svc;
- new->nid = item->nid;
- cfs_waitq_init(&new->waitq);
+ new->lustre_svc = item->lustre_svc;
+ new->nid = item->nid;
+ init_waitqueue_head(&new->waitq);
}
static inline void __rsi_update(struct rsi *new, struct rsi *item)
rsip = rsi_update(&rsii, rsip);
status = 0;
out:
- rsi_free(&rsii);
- if (rsip) {
- cfs_waitq_broadcast(&rsip->waitq);
- cache_put(&rsip->h, &rsi_cache);
- } else {
- status = -ENOMEM;
- }
+ rsi_free(&rsii);
+ if (rsip) {
+ wake_up_all(&rsip->waitq);
+ cache_put(&rsip->h, &rsi_cache);
+ } else {
+ status = -ENOMEM;
+ }
- if (status)
- CERROR("rsi parse error %d\n", status);
- RETURN(status);
+ if (status)
+ CERROR("rsi parse error %d\n", status);
+ RETURN(status);
}
static struct cache_detail rsi_cache = {
static struct cache_req cache_upcall_chandle = { cache_upcall_defer };
int gss_svc_upcall_handle_init(struct ptlrpc_request *req,
- struct gss_svc_reqctx *grctx,
- struct gss_wire_ctx *gw,
- struct obd_device *target,
- __u32 lustre_svc,
- rawobj_t *rvs_hdl,
- rawobj_t *in_token)
+ struct gss_svc_reqctx *grctx,
+ struct gss_wire_ctx *gw,
+ struct obd_device *target,
+ __u32 lustre_svc,
+ rawobj_t *rvs_hdl,
+ rawobj_t *in_token)
{
- struct ptlrpc_reply_state *rs;
- struct rsc *rsci = NULL;
- struct rsi *rsip = NULL, rsikey;
- cfs_waitlink_t wait;
- int replen = sizeof(struct ptlrpc_body);
- struct gss_rep_header *rephdr;
- int first_check = 1;
- int rc = SECSVC_DROP;
- ENTRY;
+ struct ptlrpc_reply_state *rs;
+ struct rsc *rsci = NULL;
+ struct rsi *rsip = NULL, rsikey;
+ wait_queue_t wait;
+ int replen = sizeof(struct ptlrpc_body);
+ struct gss_rep_header *rephdr;
+ int first_check = 1;
+ int rc = SECSVC_DROP;
+ ENTRY;
memset(&rsikey, 0, sizeof(rsikey));
rsikey.lustre_svc = lustre_svc;
GOTO(out, rc);
}
- cache_get(&rsip->h); /* take an extra ref */
- cfs_waitq_init(&rsip->waitq);
- cfs_waitlink_init(&wait);
- cfs_waitq_add(&rsip->waitq, &wait);
+ cache_get(&rsip->h); /* take an extra ref */
+ init_waitqueue_head(&rsip->waitq);
+ init_waitqueue_entry_current(&wait);
+ add_wait_queue(&rsip->waitq, &wait);
cache_check:
- /* Note each time cache_check() will drop a reference if return
- * non-zero. We hold an extra reference on initial rsip, but must
- * take care of following calls. */
- rc = cache_check(&rsi_cache, &rsip->h, &cache_upcall_chandle);
- switch (rc) {
- case -EAGAIN: {
+ /* Note each time cache_check() will drop a reference if return
+ * non-zero. We hold an extra reference on initial rsip, but must
+ * take care of following calls. */
+ rc = cache_check(&rsi_cache, &rsip->h, &cache_upcall_chandle);
+ switch (rc) {
+ case -EAGAIN: {
int valid;
if (first_check) {
read_lock(&rsi_cache.hash_lock);
valid = test_bit(CACHE_VALID, &rsip->h.flags);
if (valid == 0)
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+ set_current_state(TASK_INTERRUPTIBLE);
read_unlock(&rsi_cache.hash_lock);
if (valid == 0)
- cfs_schedule_timeout(GSS_SVC_UPCALL_TIMEOUT *
+ schedule_timeout(GSS_SVC_UPCALL_TIMEOUT *
HZ);
cache_get(&rsip->h);
case 0:
/* if not the first check, we have to release the extra
* reference we just added on it. */
- if (!first_check)
- cache_put(&rsip->h, &rsi_cache);
- CDEBUG(D_SEC, "cache_check is good\n");
- break;
- }
+ if (!first_check)
+ cache_put(&rsip->h, &rsi_cache);
+ CDEBUG(D_SEC, "cache_check is good\n");
+ break;
+ }
- cfs_waitq_del(&rsip->waitq, &wait);
- cache_put(&rsip->h, &rsi_cache);
+ remove_wait_queue(&rsip->waitq, &wait);
+ cache_put(&rsip->h, &rsi_cache);
- if (rc)
- GOTO(out, rc = SECSVC_DROP);
+ if (rc)
+ GOTO(out, rc = SECSVC_DROP);
rc = SECSVC_DROP;
rsci = gss_svc_searchbyctx(&rsip->out_handle);
for (i = 0; i < 6; i++) {
if (atomic_read(&rsi_cache.readers) > 0)
break;
- cfs_set_current_state(TASK_UNINTERRUPTIBLE);
+ set_current_state(TASK_UNINTERRUPTIBLE);
LASSERT(HZ >= 4);
- cfs_schedule_timeout(HZ / 4);
+ schedule_timeout(HZ / 4);
}
if (atomic_read(&rsi_cache.readers) == 0)
}
} while (rc != 0);
- /*
- * Let's additionally check that no new rpcs added to import in
- * "invalidate" state.
- */
- LASSERT(cfs_atomic_read(&imp->imp_inflight) == 0);
- obd_import_event(imp->imp_obd, imp, IMP_EVENT_INVALIDATE);
- sptlrpc_import_flush_all_ctx(imp);
-
- cfs_atomic_dec(&imp->imp_inval_count);
- cfs_waitq_broadcast(&imp->imp_recovery_waitq);
+ /*
+ * Let's additionally check that no new rpcs added to import in
+ * "invalidate" state.
+ */
+ LASSERT(cfs_atomic_read(&imp->imp_inflight) == 0);
+ obd_import_event(imp->imp_obd, imp, IMP_EVENT_INVALIDATE);
+ sptlrpc_import_flush_all_ctx(imp);
+
+ cfs_atomic_dec(&imp->imp_inval_count);
+ wake_up_all(&imp->imp_recovery_waitq);
}
EXPORT_SYMBOL(ptlrpc_invalidate_import);
RETURN(-EPROTO);
}
- ptlrpc_maybe_ping_import_soon(imp);
+ ptlrpc_maybe_ping_import_soon(imp);
- CDEBUG(D_HA, "recovery of %s on %s failed (%d)\n",
- obd2cli_tgt(imp->imp_obd),
- (char *)imp->imp_connection->c_remote_uuid.uuid, rc);
- }
+ CDEBUG(D_HA, "recovery of %s on %s failed (%d)\n",
+ obd2cli_tgt(imp->imp_obd),
+ (char *)imp->imp_connection->c_remote_uuid.uuid, rc);
+ }
- cfs_waitq_broadcast(&imp->imp_recovery_waitq);
- RETURN(rc);
+ wake_up_all(&imp->imp_recovery_waitq);
+ RETURN(rc);
}
/**
libcfs_nid2str(imp->imp_connection->c_peer.nid));
}
- if (imp->imp_state == LUSTRE_IMP_FULL) {
- cfs_waitq_broadcast(&imp->imp_recovery_waitq);
- ptlrpc_wake_delayed(imp);
- }
+ if (imp->imp_state == LUSTRE_IMP_FULL) {
+ wake_up_all(&imp->imp_recovery_waitq);
+ ptlrpc_wake_delayed(imp);
+ }
out:
- RETURN(rc);
+ RETURN(rc);
}
int ptlrpc_disconnect_import(struct obd_import *imp, int noclose)
for (;;) {
#ifdef __KERNEL__
/* The wq argument is ignored by user-space wait_event macros */
- cfs_waitq_t *wq = (req->rq_set != NULL) ?
- &req->rq_set->set_waitq :
- &req->rq_reply_waitq;
+ wait_queue_head_t *wq = (req->rq_set != NULL) ?
+ &req->rq_set->set_waitq :
+ &req->rq_reply_waitq;
#endif
/* Network access will complete in finite time but the HUGE
* timeout lets us CWARN for visibility of sluggish NALs */
spin_lock(&svcpt->scp_rep_lock);
cfs_list_add(&rs->rs_list, &svcpt->scp_rep_idle);
spin_unlock(&svcpt->scp_rep_lock);
- cfs_waitq_signal(&svcpt->scp_rep_waitq);
+ wake_up(&svcpt->scp_rep_waitq);
}
int lustre_pack_reply_v2(struct ptlrpc_request *req, int count,
static int ptlrpc_pinger_main(void *arg)
{
- struct ptlrpc_thread *thread = (struct ptlrpc_thread *)arg;
+ struct ptlrpc_thread *thread = (struct ptlrpc_thread *)arg;
ENTRY;
- /* Record that the thread is running */
- thread_set_flags(thread, SVC_RUNNING);
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ /* Record that the thread is running */
+ thread_set_flags(thread, SVC_RUNNING);
+ wake_up(&thread->t_ctl_waitq);
- /* And now, loop forever, pinging as needed. */
- while (1) {
- cfs_time_t this_ping = cfs_time_current();
- struct l_wait_info lwi;
- cfs_duration_t time_to_next_wake;
- struct timeout_item *item;
- cfs_list_t *iter;
+ /* And now, loop forever, pinging as needed. */
+ while (1) {
+ cfs_time_t this_ping = cfs_time_current();
+ struct l_wait_info lwi;
+ cfs_duration_t time_to_next_wake;
+ struct timeout_item *item;
+ cfs_list_t *iter;
mutex_lock(&pinger_mutex);
cfs_list_for_each_entry(item, &timeout_list, ti_chain) {
}
thread_set_flags(thread, SVC_STOPPED);
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ wake_up(&thread->t_ctl_waitq);
CDEBUG(D_NET, "pinger thread exiting, process %d\n", current_pid());
return 0;
!thread_is_stopped(&pinger_thread))
RETURN(-EALREADY);
- cfs_waitq_init(&pinger_thread.t_ctl_waitq);
+ init_waitqueue_head(&pinger_thread.t_ctl_waitq);
strcpy(pinger_thread.t_name, "ll_ping");
ptlrpc_pinger_remove_timeouts();
thread_set_flags(&pinger_thread, SVC_STOPPING);
- cfs_waitq_signal(&pinger_thread.t_ctl_waitq);
+ wake_up(&pinger_thread.t_ctl_waitq);
l_wait_event(pinger_thread.t_ctl_waitq,
thread_is_stopped(&pinger_thread), &lwi);
{
#ifdef ENABLE_PINGER
thread_add_flags(&pinger_thread, SVC_EVENT);
- cfs_waitq_signal(&pinger_thread.t_ctl_waitq);
+ wake_up(&pinger_thread.t_ctl_waitq);
#endif
}
static int pet_refcount = 0;
static int pet_state;
-static cfs_waitq_t pet_waitq;
+static wait_queue_head_t pet_waitq;
CFS_LIST_HEAD(pet_list);
static DEFINE_SPINLOCK(pet_lock);
}
spin_unlock(&pet_lock);
- cfs_waitq_signal(&pet_waitq);
+ wake_up(&pet_waitq);
return 0;
}
if (++pet_refcount > 1)
return;
- cfs_waitq_init(&pet_waitq);
+ init_waitqueue_head(&pet_waitq);
task = kthread_run(ping_evictor_main, NULL, "ll_evictor");
if (IS_ERR(task)) {
return;
pet_state = PET_TERMINATE;
- cfs_waitq_signal(&pet_waitq);
+ wake_up(&pet_waitq);
}
EXPORT_SYMBOL(ping_evictor_stop);
#else /* !__KERNEL__ */
LASSERT(rq_set != NULL);
- cfs_waitq_signal(&rq_set->set_waitq);
+ wake_up(&rq_set->set_waitq);
}
EXPORT_SYMBOL(ptlrpcd_wake);
count = cfs_atomic_add_return(i, &new->set_new_count);
cfs_atomic_set(&set->set_remaining, 0);
spin_unlock(&new->set_new_req_lock);
- if (count == i) {
- cfs_waitq_signal(&new->set_waitq);
-
- /* XXX: It maybe unnecessary to wakeup all the partners. But to
- * guarantee the async RPC can be processed ASAP, we have
- * no other better choice. It maybe fixed in future. */
- for (i = 0; i < pc->pc_npartners; i++)
- cfs_waitq_signal(&pc->pc_partners[i]->pc_set->set_waitq);
- }
+ if (count == i) {
+ wake_up(&new->set_waitq);
+
+ /* XXX: It maybe unnecessary to wakeup all the partners. But to
+ * guarantee the async RPC can be processed ASAP, we have
+ * no other better choice. It maybe fixed in future. */
+ for (i = 0; i < pc->pc_npartners; i++)
+ wake_up(&pc->pc_partners[i]->pc_set->set_waitq);
+ }
#endif
}
EXPORT_SYMBOL(ptlrpcd_add_rqset);
/* ptlrpc_check_set will decrease the count */
cfs_atomic_inc(&req->rq_set->set_remaining);
spin_unlock(&req->rq_lock);
- cfs_waitq_signal(&req->rq_set->set_waitq);
+ wake_up(&req->rq_set->set_waitq);
return;
} else {
spin_unlock(&req->rq_lock);
set_bit(LIOD_STOP, &pc->pc_flags);
if (force)
set_bit(LIOD_FORCE, &pc->pc_flags);
- cfs_waitq_signal(&pc->pc_set->set_waitq);
+ wake_up(&pc->pc_set->set_waitq);
out:
EXIT;
"ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
newctx, newctx->cc_flags);
- cfs_schedule_timeout_and_set_state(CFS_TASK_INTERRUPTIBLE,
+ schedule_timeout_and_set_state(TASK_INTERRUPTIBLE,
HZ);
} else {
/*
RETURN(-ENOMEM);
spin_lock_init(&req->rq_lock);
- cfs_atomic_set(&req->rq_refcount, 10000);
- CFS_INIT_LIST_HEAD(&req->rq_ctx_chain);
- cfs_waitq_init(&req->rq_reply_waitq);
- cfs_waitq_init(&req->rq_set_waitq);
- req->rq_import = imp;
- req->rq_flvr = sec->ps_flvr;
- req->rq_cli_ctx = ctx;
+ cfs_atomic_set(&req->rq_refcount, 10000);
+ CFS_INIT_LIST_HEAD(&req->rq_ctx_chain);
+ init_waitqueue_head(&req->rq_reply_waitq);
+ init_waitqueue_head(&req->rq_set_waitq);
+ req->rq_import = imp;
+ req->rq_flvr = sec->ps_flvr;
+ req->rq_cli_ctx = ctx;
rc = sptlrpc_req_refresh_ctx(req, 0);
LASSERT(cfs_list_empty(&req->rq_ctx_chain));
unsigned long epp_max_pages; /* maximum pages can hold, const */
unsigned int epp_max_pools; /* number of pools, const */
- /*
- * wait queue in case of not enough free pages.
- */
- cfs_waitq_t epp_waitq; /* waiting threads */
- unsigned int epp_waitqlen; /* wait queue length */
- unsigned long epp_pages_short; /* # of pages wanted of in-q users */
- unsigned int epp_growing:1; /* during adding pages */
+ /*
+ * wait queue in case of not enough free pages.
+ */
+ wait_queue_head_t epp_waitq; /* waiting threads */
+ unsigned int epp_waitqlen; /* wait queue length */
+ unsigned long epp_pages_short; /* # of pages wanted of in-q users */
+ unsigned int epp_growing:1; /* during adding pages */
/*
* indicating how idle the pools are, from 0 to MAX_IDLE_IDX
LASSERT(spin_is_locked(&page_pools.epp_lock));
if (unlikely(page_pools.epp_waitqlen)) {
- LASSERT(cfs_waitq_active(&page_pools.epp_waitq));
- cfs_waitq_broadcast(&page_pools.epp_waitq);
+ LASSERT(waitqueue_active(&page_pools.epp_waitq));
+ wake_up_all(&page_pools.epp_waitq);
}
}
*/
int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc)
{
- cfs_waitlink_t waitlink;
- unsigned long this_idle = -1;
- cfs_time_t tick = 0;
- long now;
- int p_idx, g_idx;
- int i;
+ wait_queue_t waitlink;
+ unsigned long this_idle = -1;
+ cfs_time_t tick = 0;
+ long now;
+ int p_idx, g_idx;
+ int i;
- LASSERT(desc->bd_iov_count > 0);
- LASSERT(desc->bd_iov_count <= page_pools.epp_max_pages);
+ LASSERT(desc->bd_iov_count > 0);
+ LASSERT(desc->bd_iov_count <= page_pools.epp_max_pages);
- /* resent bulk, enc iov might have been allocated previously */
- if (desc->bd_enc_iov != NULL)
- return 0;
+ /* resent bulk, enc iov might have been allocated previously */
+ if (desc->bd_enc_iov != NULL)
+ return 0;
- OBD_ALLOC(desc->bd_enc_iov,
- desc->bd_iov_count * sizeof(*desc->bd_enc_iov));
- if (desc->bd_enc_iov == NULL)
- return -ENOMEM;
+ OBD_ALLOC(desc->bd_enc_iov,
+ desc->bd_iov_count * sizeof(*desc->bd_enc_iov));
+ if (desc->bd_enc_iov == NULL)
+ return -ENOMEM;
spin_lock(&page_pools.epp_lock);
- page_pools.epp_st_access++;
+ page_pools.epp_st_access++;
again:
- if (unlikely(page_pools.epp_free_pages < desc->bd_iov_count)) {
- if (tick == 0)
- tick = cfs_time_current();
+ if (unlikely(page_pools.epp_free_pages < desc->bd_iov_count)) {
+ if (tick == 0)
+ tick = cfs_time_current();
- now = cfs_time_current_sec();
+ now = cfs_time_current_sec();
- page_pools.epp_st_missings++;
- page_pools.epp_pages_short += desc->bd_iov_count;
+ page_pools.epp_st_missings++;
+ page_pools.epp_pages_short += desc->bd_iov_count;
- if (enc_pools_should_grow(desc->bd_iov_count, now)) {
- page_pools.epp_growing = 1;
+ if (enc_pools_should_grow(desc->bd_iov_count, now)) {
+ page_pools.epp_growing = 1;
spin_unlock(&page_pools.epp_lock);
enc_pools_add_pages(page_pools.epp_pages_short / 2);
spin_lock(&page_pools.epp_lock);
- page_pools.epp_growing = 0;
+ page_pools.epp_growing = 0;
- enc_pools_wakeup();
- } else {
- if (++page_pools.epp_waitqlen >
- page_pools.epp_st_max_wqlen)
- page_pools.epp_st_max_wqlen =
- page_pools.epp_waitqlen;
+ enc_pools_wakeup();
+ } else {
+ if (++page_pools.epp_waitqlen >
+ page_pools.epp_st_max_wqlen)
+ page_pools.epp_st_max_wqlen =
+ page_pools.epp_waitqlen;
- cfs_set_current_state(CFS_TASK_UNINT);
- cfs_waitlink_init(&waitlink);
- cfs_waitq_add(&page_pools.epp_waitq, &waitlink);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ init_waitqueue_entry_current(&waitlink);
+ add_wait_queue(&page_pools.epp_waitq, &waitlink);
spin_unlock(&page_pools.epp_lock);
- cfs_waitq_wait(&waitlink, CFS_TASK_UNINT);
- cfs_waitq_del(&page_pools.epp_waitq, &waitlink);
+ waitq_wait(&waitlink, TASK_UNINTERRUPTIBLE);
+ remove_wait_queue(&page_pools.epp_waitq, &waitlink);
LASSERT(page_pools.epp_waitqlen > 0);
spin_lock(&page_pools.epp_lock);
- page_pools.epp_waitqlen--;
- }
+ page_pools.epp_waitqlen--;
+ }
- LASSERT(page_pools.epp_pages_short >= desc->bd_iov_count);
- page_pools.epp_pages_short -= desc->bd_iov_count;
+ LASSERT(page_pools.epp_pages_short >= desc->bd_iov_count);
+ page_pools.epp_pages_short -= desc->bd_iov_count;
- this_idle = 0;
- goto again;
- }
+ this_idle = 0;
+ goto again;
+ }
/* record max wait time */
if (unlikely(tick != 0)) {
int sptlrpc_enc_pool_init(void)
{
- /*
- * maximum capacity is 1/8 of total physical memory.
- * is the 1/8 a good number?
- */
+ /*
+ * maximum capacity is 1/8 of total physical memory.
+ * is the 1/8 a good number?
+ */
page_pools.epp_max_pages = num_physpages / 8;
- page_pools.epp_max_pools = npages_to_npools(page_pools.epp_max_pages);
+ page_pools.epp_max_pools = npages_to_npools(page_pools.epp_max_pages);
- cfs_waitq_init(&page_pools.epp_waitq);
- page_pools.epp_waitqlen = 0;
- page_pools.epp_pages_short = 0;
+ init_waitqueue_head(&page_pools.epp_waitq);
+ page_pools.epp_waitqlen = 0;
+ page_pools.epp_pages_short = 0;
page_pools.epp_growing = 0;
spin_unlock(&sec_gc_ctx_list_lock);
thread_add_flags(&sec_gc_thread, SVC_SIGNAL);
- cfs_waitq_signal(&sec_gc_thread.t_ctl_waitq);
+ wake_up(&sec_gc_thread.t_ctl_waitq);
}
EXPORT_SYMBOL(sptlrpc_gc_add_ctx);
static int sec_gc_main(void *arg)
{
- struct ptlrpc_thread *thread = (struct ptlrpc_thread *) arg;
- struct l_wait_info lwi;
+ struct ptlrpc_thread *thread = (struct ptlrpc_thread *) arg;
+ struct l_wait_info lwi;
unshare_fs_struct();
- /* Record that the thread is running */
- thread_set_flags(thread, SVC_RUNNING);
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ /* Record that the thread is running */
+ thread_set_flags(thread, SVC_RUNNING);
+ wake_up(&thread->t_ctl_waitq);
- while (1) {
- struct ptlrpc_sec *sec;
+ while (1) {
+ struct ptlrpc_sec *sec;
- thread_clear_flags(thread, SVC_SIGNAL);
- sec_process_ctx_list();
+ thread_clear_flags(thread, SVC_SIGNAL);
+ sec_process_ctx_list();
again:
- /* go through sec list do gc.
- * FIXME here we iterate through the whole list each time which
- * is not optimal. we perhaps want to use balanced binary tree
- * to trace each sec as order of expiry time.
- * another issue here is we wakeup as fixed interval instead of
- * according to each sec's expiry time */
+ /* go through sec list do gc.
+ * FIXME here we iterate through the whole list each time which
+ * is not optimal. we perhaps want to use balanced binary tree
+ * to trace each sec as order of expiry time.
+ * another issue here is we wakeup as fixed interval instead of
+ * according to each sec's expiry time */
mutex_lock(&sec_gc_mutex);
- cfs_list_for_each_entry(sec, &sec_gc_list, ps_gc_list) {
- /* if someone is waiting to be deleted, let it
- * proceed as soon as possible. */
- if (cfs_atomic_read(&sec_gc_wait_del)) {
- CDEBUG(D_SEC, "deletion pending, start over\n");
+ cfs_list_for_each_entry(sec, &sec_gc_list, ps_gc_list) {
+ /* if someone is waiting to be deleted, let it
+ * proceed as soon as possible. */
+ if (cfs_atomic_read(&sec_gc_wait_del)) {
+ CDEBUG(D_SEC, "deletion pending, start over\n");
mutex_unlock(&sec_gc_mutex);
- goto again;
- }
+ goto again;
+ }
- sec_do_gc(sec);
- }
+ sec_do_gc(sec);
+ }
mutex_unlock(&sec_gc_mutex);
/* check ctx list again before sleep */
thread_is_signal(thread),
&lwi);
- if (thread_test_and_clear_flags(thread, SVC_STOPPING))
- break;
- }
+ if (thread_test_and_clear_flags(thread, SVC_STOPPING))
+ break;
+ }
- thread_set_flags(thread, SVC_STOPPED);
- cfs_waitq_signal(&thread->t_ctl_waitq);
- return 0;
+ thread_set_flags(thread, SVC_STOPPED);
+ wake_up(&thread->t_ctl_waitq);
+ return 0;
}
int sptlrpc_gc_init(void)
spin_lock_init(&sec_gc_list_lock);
spin_lock_init(&sec_gc_ctx_list_lock);
- /* initialize thread control */
- memset(&sec_gc_thread, 0, sizeof(sec_gc_thread));
- cfs_waitq_init(&sec_gc_thread.t_ctl_waitq);
+ /* initialize thread control */
+ memset(&sec_gc_thread, 0, sizeof(sec_gc_thread));
+ init_waitqueue_head(&sec_gc_thread.t_ctl_waitq);
task = kthread_run(sec_gc_main, &sec_gc_thread, "sptlrpc_gc");
if (IS_ERR(task)) {
CERROR("can't start gc thread: %ld\n", PTR_ERR(task));
return PTR_ERR(task);
- }
+ }
- l_wait_event(sec_gc_thread.t_ctl_waitq,
- thread_is_running(&sec_gc_thread), &lwi);
- return 0;
+ l_wait_event(sec_gc_thread.t_ctl_waitq,
+ thread_is_running(&sec_gc_thread), &lwi);
+ return 0;
}
void sptlrpc_gc_fini(void)
{
- struct l_wait_info lwi = { 0 };
+ struct l_wait_info lwi = { 0 };
- thread_set_flags(&sec_gc_thread, SVC_STOPPING);
- cfs_waitq_signal(&sec_gc_thread.t_ctl_waitq);
+ thread_set_flags(&sec_gc_thread, SVC_STOPPING);
+ wake_up(&sec_gc_thread.t_ctl_waitq);
- l_wait_event(sec_gc_thread.t_ctl_waitq,
- thread_is_stopped(&sec_gc_thread), &lwi);
+ l_wait_event(sec_gc_thread.t_ctl_waitq,
+ thread_is_stopped(&sec_gc_thread), &lwi);
}
#else /* !__KERNEL__ */
struct ptlrpc_hr_thread {
int hrt_id; /* thread ID */
spinlock_t hrt_lock;
- cfs_waitq_t hrt_waitq;
+ wait_queue_head_t hrt_waitq;
cfs_list_t hrt_queue; /* RS queue */
struct ptlrpc_hr_partition *hrt_partition;
};
/* CPU partition table, it's just cfs_cpt_table for now */
struct cfs_cpt_table *hr_cpt_table;
/** controller sleep waitq */
- cfs_waitq_t hr_waitq;
+ wait_queue_head_t hr_waitq;
unsigned int hr_stopping;
/** roundrobin rotor for non-affinity service */
unsigned int hr_rotor;
cfs_list_splice_init(&b->rsb_replies, &hrt->hrt_queue);
spin_unlock(&hrt->hrt_lock);
- cfs_waitq_signal(&hrt->hrt_waitq);
+ wake_up(&hrt->hrt_waitq);
b->rsb_n_replies = 0;
}
}
cfs_list_add_tail(&rs->rs_list, &hrt->hrt_queue);
spin_unlock(&hrt->hrt_lock);
- cfs_waitq_signal(&hrt->hrt_waitq);
+ wake_up(&hrt->hrt_waitq);
EXIT;
#else
cfs_list_add_tail(&rs->rs_list, &rs->rs_svcpt->scp_rep_queue);
svcpt->scp_at_check = 1;
svcpt->scp_at_checktime = cfs_time_current();
- cfs_waitq_signal(&svcpt->scp_waitq);
+ wake_up(&svcpt->scp_waitq);
}
static void
CFS_INIT_LIST_HEAD(&svcpt->scp_rqbd_idle);
CFS_INIT_LIST_HEAD(&svcpt->scp_rqbd_posted);
CFS_INIT_LIST_HEAD(&svcpt->scp_req_incoming);
- cfs_waitq_init(&svcpt->scp_waitq);
+ init_waitqueue_head(&svcpt->scp_waitq);
/* history request & rqbd list */
CFS_INIT_LIST_HEAD(&svcpt->scp_hist_reqs);
CFS_INIT_LIST_HEAD(&svcpt->scp_hist_rqbds);
CFS_INIT_LIST_HEAD(&svcpt->scp_rep_queue);
#endif
CFS_INIT_LIST_HEAD(&svcpt->scp_rep_idle);
- cfs_waitq_init(&svcpt->scp_rep_waitq);
+ init_waitqueue_head(&svcpt->scp_rep_waitq);
cfs_atomic_set(&svcpt->scp_nreps_difficult, 0);
/* adaptive timeout */
if (rc)
GOTO(err_req, rc);
- cfs_waitq_signal(&svcpt->scp_waitq);
+ wake_up(&svcpt->scp_waitq);
RETURN(1);
err_req:
ptlrpc_rs_decref (rs);
if (cfs_atomic_dec_and_test(&svcpt->scp_nreps_difficult) &&
svc->srv_is_stopping)
- cfs_waitq_broadcast(&svcpt->scp_waitq);
+ wake_up_all(&svcpt->scp_waitq);
RETURN(1);
}
lc_watchdog_disable(thread->t_watchdog);
- cfs_cond_resched();
+ cond_resched();
l_wait_event_exclusive_head(svcpt->scp_waitq,
ptlrpc_thread_stopping(thread) ||
spin_unlock(&svcpt->scp_lock);
/* wake up our creator in case he's still waiting. */
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ wake_up(&thread->t_ctl_waitq);
thread->t_watchdog = lc_watchdog_add(ptlrpc_server_get_timeout(svcpt),
NULL, NULL);
spin_lock(&svcpt->scp_rep_lock);
cfs_list_add(&rs->rs_list, &svcpt->scp_rep_idle);
- cfs_waitq_signal(&svcpt->scp_rep_waitq);
+ wake_up(&svcpt->scp_rep_waitq);
spin_unlock(&svcpt->scp_rep_lock);
CDEBUG(D_NET, "service thread %d (#%d) started\n", thread->t_id,
thread->t_id = rc;
thread_add_flags(thread, SVC_STOPPED);
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ wake_up(&thread->t_ctl_waitq);
spin_unlock(&svcpt->scp_lock);
return rc;
}
cfs_atomic_inc(&hrp->hrp_nstarted);
- cfs_waitq_signal(&ptlrpc_hr.hr_waitq);
+ wake_up(&ptlrpc_hr.hr_waitq);
while (!ptlrpc_hr.hr_stopping) {
l_wait_condition(hrt->hrt_waitq, hrt_dont_sleep(hrt, &replies));
}
cfs_atomic_inc(&hrp->hrp_nstopped);
- cfs_waitq_signal(&ptlrpc_hr.hr_waitq);
+ wake_up(&ptlrpc_hr.hr_waitq);
return 0;
}
if (hrp->hrp_thrs == NULL)
continue; /* uninitialized */
for (j = 0; j < hrp->hrp_nthrs; j++)
- cfs_waitq_broadcast(&hrp->hrp_thrs[j].hrt_waitq);
+ wake_up_all(&hrp->hrp_thrs[j].hrt_waitq);
}
cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
if (hrp->hrp_thrs == NULL)
continue; /* uninitialized */
- cfs_wait_event(ptlrpc_hr.hr_waitq,
+ wait_event(ptlrpc_hr.hr_waitq,
cfs_atomic_read(&hrp->hrp_nstopped) ==
cfs_atomic_read(&hrp->hrp_nstarted));
}
if (IS_ERR_VALUE(rc))
break;
}
- cfs_wait_event(ptlrpc_hr.hr_waitq,
+ wait_event(ptlrpc_hr.hr_waitq,
cfs_atomic_read(&hrp->hrp_nstarted) == j);
if (!IS_ERR_VALUE(rc))
continue;
thread_add_flags(thread, SVC_STOPPING);
}
- cfs_waitq_broadcast(&svcpt->scp_waitq);
+ wake_up_all(&svcpt->scp_waitq);
while (!cfs_list_empty(&svcpt->scp_threads)) {
thread = cfs_list_entry(svcpt->scp_threads.next,
OBD_CPT_ALLOC_PTR(thread, svc->srv_cptable, svcpt->scp_cpt);
if (thread == NULL)
RETURN(-ENOMEM);
- cfs_waitq_init(&thread->t_ctl_waitq);
+ init_waitqueue_head(&thread->t_ctl_waitq);
spin_lock(&svcpt->scp_lock);
if (!ptlrpc_threads_increasable(svcpt)) {
if (wait) {
CDEBUG(D_INFO, "Waiting for creating thread %s #%d\n",
svc->srv_thread_name, svcpt->scp_thr_nextid);
- cfs_schedule();
+ schedule();
goto again;
}
* by ptlrpc_svcpt_stop_threads now
*/
thread_add_flags(thread, SVC_STOPPED);
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ wake_up(&thread->t_ctl_waitq);
spin_unlock(&svcpt->scp_lock);
} else {
cfs_list_del(&thread->t_link);
if (ptlrpc_hr.hr_partitions == NULL)
RETURN(-ENOMEM);
- cfs_waitq_init(&ptlrpc_hr.hr_waitq);
+ init_waitqueue_head(&ptlrpc_hr.hr_waitq);
cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
hrp->hrp_cpt = i;
hrt->hrt_id = j;
hrt->hrt_partition = hrp;
- cfs_waitq_init(&hrt->hrt_waitq);
+ init_waitqueue_head(&hrt->hrt_waitq);
spin_lock_init(&hrt->hrt_lock);
CFS_INIT_LIST_HEAD(&hrt->hrt_queue);
}
"freed:%lu, repeat:%u\n", hash,
d.lid_inuse, d.lid_freed, repeat);
repeat++;
- cfs_schedule_timeout_and_set_state(CFS_TASK_INTERRUPTIBLE,
+ schedule_timeout_and_set_state(TASK_INTERRUPTIBLE,
cfs_time_seconds(1));
goto retry;
}
rwlock_t lse_lock;
/* waiter for pending request done */
- cfs_waitq_t lse_waiters;
+ wait_queue_head_t lse_waiters;
/* hint on current on-disk usage, in inodes or kbytes */
__u64 lse_usage;
/* set up and start rebalance thread */
thread_set_flags(&qmt->qmt_reba_thread, SVC_STOPPED);
- cfs_waitq_init(&qmt->qmt_reba_thread.t_ctl_waitq);
+ init_waitqueue_head(&qmt->qmt_reba_thread.t_ctl_waitq);
CFS_INIT_LIST_HEAD(&qmt->qmt_reba_list);
spin_lock_init(&qmt->qmt_reba_lock);
rc = qmt_start_reba_thread(qmt);
spin_unlock(&qmt->qmt_reba_lock);
if (added)
- cfs_waitq_signal(&qmt->qmt_reba_thread.t_ctl_waitq);
+ wake_up(&qmt->qmt_reba_thread.t_ctl_waitq);
else
lqe_putref(lqe);
EXIT;
}
thread_set_flags(thread, SVC_RUNNING);
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ wake_up(&thread->t_ctl_waitq);
while (1) {
l_wait_event(thread->t_ctl_waitq,
lu_env_fini(env);
OBD_FREE_PTR(env);
thread_set_flags(thread, SVC_STOPPED);
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ wake_up(&thread->t_ctl_waitq);
RETURN(rc);
}
struct l_wait_info lwi = { 0 };
thread_set_flags(thread, SVC_STOPPING);
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ wake_up(&thread->t_ctl_waitq);
l_wait_event(thread->t_ctl_waitq, thread_is_stopped(thread),
&lwi);
memset(&lqe->lqe_lockh, 0, sizeof(lqe->lqe_lockh));
lqe->lqe_pending_write = 0;
lqe->lqe_pending_req = 0;
- cfs_waitq_init(&lqe->lqe_waiters);
+ init_waitqueue_head(&lqe->lqe_waiters);
lqe->lqe_usage = 0;
lqe->lqe_nopreacq = false;
}
}
lqe->lqe_pending_req--;
lqe->lqe_pending_rel = 0;
- cfs_waitq_broadcast(&lqe->lqe_waiters);
+ wake_up_all(&lqe->lqe_waiters);
}
/**
* step 3) will have to wait for qsd_start() to be called */
for (type = USRQUOTA; type < MAXQUOTAS; type++) {
struct qsd_qtype_info *qqi = qsd->qsd_type_array[type];
- cfs_waitq_signal(&qqi->qqi_reint_thread.t_ctl_waitq);
+ wake_up(&qqi->qqi_reint_thread.t_ctl_waitq);
}
RETURN(0);
qqi->qqi_glb_uptodate = false;
qqi->qqi_slv_uptodate = false;
qqi->qqi_reint = false;
- cfs_waitq_init(&qqi->qqi_reint_thread.t_ctl_waitq);
+ init_waitqueue_head(&qqi->qqi_reint_thread.t_ctl_waitq);
thread_set_flags(&qqi->qqi_reint_thread, SVC_STOPPED);
CFS_INIT_LIST_HEAD(&qqi->qqi_deferred_glb);
CFS_INIT_LIST_HEAD(&qqi->qqi_deferred_slv);
rwlock_init(&qsd->qsd_lock);
CFS_INIT_LIST_HEAD(&qsd->qsd_link);
thread_set_flags(&qsd->qsd_upd_thread, SVC_STOPPED);
- cfs_waitq_init(&qsd->qsd_upd_thread.t_ctl_waitq);
+ init_waitqueue_head(&qsd->qsd_upd_thread.t_ctl_waitq);
CFS_INIT_LIST_HEAD(&qsd->qsd_upd_list);
spin_lock_init(&qsd->qsd_adjust_lock);
CFS_INIT_LIST_HEAD(&qsd->qsd_adjust_list);
* up to usage; If usage < granted, release down to usage. */
for (type = USRQUOTA; type < MAXQUOTAS; type++) {
struct qsd_qtype_info *qqi = qsd->qsd_type_array[type];
- cfs_waitq_signal(&qqi->qqi_reint_thread.t_ctl_waitq);
+ wake_up(&qqi->qqi_reint_thread.t_ctl_waitq);
}
RETURN(rc);
lqe_write_unlock(lqe);
if (wakeup)
- cfs_waitq_broadcast(&lqe->lqe_waiters);
+ wake_up_all(&lqe->lqe_waiters);
lqe_putref(lqe);
out:
req->rq_status = rc;
lu_ref_add(&qqi->qqi_reference, "reint_thread", thread);
thread_set_flags(thread, SVC_RUNNING);
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ wake_up(&thread->t_ctl_waitq);
OBD_ALLOC_PTR(env);
if (env == NULL)
lu_ref_del(&qqi->qqi_reference, "reint_thread", thread);
thread_set_flags(thread, SVC_STOPPED);
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ wake_up(&thread->t_ctl_waitq);
return rc;
}
if (!thread_is_stopped(thread)) {
thread_set_flags(thread, SVC_STOPPING);
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ wake_up(&thread->t_ctl_waitq);
l_wait_event(thread->t_ctl_waitq,
thread_is_stopped(thread), &lwi);
if (!qsd->qsd_stopping) {
list_add_tail(&upd->qur_link, &qsd->qsd_upd_list);
/* wake up the upd thread */
- cfs_waitq_signal(&qsd->qsd_upd_thread.t_ctl_waitq);
+ wake_up(&qsd->qsd_upd_thread.t_ctl_waitq);
} else {
CWARN("%s: discard update.\n", qsd->qsd_svname);
if (upd->qur_lqe)
spin_unlock(&qsd->qsd_adjust_lock);
if (added)
- cfs_waitq_signal(&qsd->qsd_upd_thread.t_ctl_waitq);
+ wake_up(&qsd->qsd_upd_thread.t_ctl_waitq);
else
lqe_putref(lqe);
}
}
thread_set_flags(thread, SVC_RUNNING);
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ wake_up(&thread->t_ctl_waitq);
CFS_INIT_LIST_HEAD(&queue);
lwi = LWI_TIMEOUT(cfs_time_seconds(QSD_WB_INTERVAL), NULL, NULL);
lu_env_fini(env);
OBD_FREE_PTR(env);
thread_set_flags(thread, SVC_STOPPED);
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ wake_up(&thread->t_ctl_waitq);
RETURN(rc);
}
if (!thread_is_stopped(thread)) {
thread_set_flags(thread, SVC_STOPPING);
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ wake_up(&thread->t_ctl_waitq);
l_wait_event(thread->t_ctl_waitq, thread_is_stopped(thread),
&lwi);