#define cfs_schedule() cfs_schedule_timeout(CFS_TASK_UNINT, CFS_TICK)
#define cfs_pause(tick) cfs_schedule_timeout(CFS_TASK_UNINT, tick)
+/* XXX cfs_cond_resched() is sometimes called at each loop iteration
+ * (e.g. lustre_hash_for_each_empty()), so this definition is pretty
+ * unefficient and can be harmful if we have many elements to process */
+#define cfs_cond_resched() cfs_schedule_timeout(CFS_TASK_INTERRUPTIBLE, 1)
+
#define __wait_event(wq, condition) \
do { \
struct cfs_waitlink __wait; \
#include <libcfs/darwin/darwin-prim.h>
#include <lnet/lnet.h>
-#define our_cond_resched() cfs_schedule_timeout(CFS_TASK_INTERRUPTIBLE, 1)
-
#ifdef CONFIG_SMP
#define LASSERT_SPIN_LOCKED(lock) do {} while(0) /* XXX */
#else
#define PageUptodate Page_Uptodate
#define our_recalc_sigpending(current) recalc_sigpending(current)
#define num_online_cpus() smp_num_cpus
-static inline void our_cond_resched(void)
-{
- if (current->need_resched)
- schedule ();
-}
#define work_struct_t struct tq_struct
#define cfs_get_work_data(type,field,data) (data)
#else
#define wait_on_page wait_on_page_locked
#define our_recalc_sigpending(current) recalc_sigpending()
#define strtok(a,b) strpbrk(a, b)
-static inline void our_cond_resched(void)
-{
- cond_resched();
-}
#define work_struct_t struct work_struct
#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) */
#define cfs_schedule() schedule()
#define cfs_kthread_run(fn, data, fmt, arg...) kthread_run(fn, data, fmt, ##arg)
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
+static inline void cfs_cond_resched(void)
+{
+ if (current->need_resched)
+ schedule();
+}
+#else
+#define cfs_cond_resched() cond_resched()
+#endif
+
/* Kernel thread */
typedef int (*cfs_thread_t)(void *);
cfs_waitlink_t l; \
cfs_waitq_timedwait(&l, s, t); \
} while (0)
+#define cfs_cond_resched() do {} while(0)
#define CFS_TASK_INTERRUPTIBLE (0)
#define CFS_TASK_UNINT (0)
#define CFS_SYSFS_MODULE_PARM 0 /* no sysfs access to module parameters */
-
-static inline void our_cond_resched()
-{
- schedule_timeout(1i64);
-}
-
#ifdef CONFIG_SMP
#define LASSERT_SPIN_LOCKED(lock) do {} while(0) /* XXX */
#else
#define cfs_schedule_timeout(state, time) schedule_timeout(time)
void sleep_on(cfs_waitq_t *waitq);
+/* XXX cfs_cond_resched() is sometimes called at each loop iteration
+ * (e.g. lustre_hash_for_each_empty()), so this definition is pretty
+ * unefficient and can be harmful if we have many elements to process */
+static inline void cfs_cond_resched()
+{
+ schedule_timeout(1i64);
+}
+
#define CFS_DECL_JOURNAL_DATA
#define CFS_PUSH_JOURNAL do {;} while(0)
#define CFS_POP_JOURNAL do {;} while(0)
spin_unlock_irqrestore(&kibnal_data.kib_sched_lock,
flags);
- our_cond_resched();
+ cfs_cond_resched();
busy_loops = 0;
spin_lock_irqsave(&kibnal_data.kib_sched_lock, flags);
spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock,
flags);
- our_cond_resched();
+ cfs_cond_resched();
busy_loops = 0;
spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
!list_empty(&kibnal_data.kib_sched_rxq) ||
kibnal_data.kib_shutdown);
} else {
- our_cond_resched();
+ cfs_cond_resched();
}
spin_lock_irqsave(&kibnal_data.kib_sched_lock,
if (busy_loops++ >= RANAL_RESCHED) {
spin_unlock_irqrestore(&dev->rad_lock, flags);
- our_cond_resched();
+ cfs_cond_resched();
busy_loops = 0;
spin_lock_irqsave(&dev->rad_lock, flags);
!ksocknal_sched_cansleep(sched), rc);
LASSERT (rc == 0);
} else {
- our_cond_resched();
+ cfs_cond_resched();
}
cfs_spin_lock_bh (&sched->kss_lock);
spin_unlock_irqrestore(&kibnal_data.kib_sched_lock,
flags);
- our_cond_resched();
+ cfs_cond_resched();
busy_loops = 0;
spin_lock_irqsave(&kibnal_data.kib_sched_lock, flags);
swi_data.wi_waitq,
!swi_sched_cansleep(&swi_data.wi_runq));
else
- our_cond_resched();
+ cfs_cond_resched();
spin_lock(&swi_data.wi_lock);
}
swi_data.wi_serial_waitq,
!swi_sched_cansleep(&swi_data.wi_serial_runq));
else
- our_cond_resched();
+ cfs_cond_resched();
spin_lock(&swi_data.wi_lock);
}