* unefficient and can be harmful if we have many elements to process */
#define cfs_cond_resched() cfs_schedule_timeout(CFS_TASK_INTERRUPTIBLE, 1)
+static inline int cfs_cond_resched_lock(cfs_spinlock_t *lock)
+{
+ return 0;
+}
+
#define __wait_event(wq, condition) \
do { \
struct cfs_waitlink __wait; \
#define cfs_schedule() schedule()
#define cfs_kthread_run(fn, data, fmt, arg...) kthread_run(fn, data, fmt, ##arg)
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
-static inline void cfs_cond_resched(void)
-{
- if (current->need_resched)
- schedule();
-}
-#else
#define cfs_cond_resched() cond_resched()
-#endif
+#define cfs_cond_resched_lock(lock) cond_resched_lock(lock)
/* Kernel thread */
typedef int (*cfs_thread_t)(void *);
cfs_waitlink_t l; \
cfs_waitq_timedwait(&l, s, t); \
} while (0)
-#define cfs_cond_resched() do {} while(0)
+#define cfs_cond_resched() do {} while(0)
+#define cfs_cond_resched_lock(l) (0)
#define CFS_TASK_INTERRUPTIBLE (0)
#define CFS_TASK_UNINT (0)
schedule_timeout(1i64);
}
+static inline int cfs_cond_resched_lock(cfs_spinlock_t *lock)
+{
+ return 0;
+}
+
#define CFS_DECL_JOURNAL_DATA
#define CFS_PUSH_JOURNAL do {;} while(0)
#define CFS_POP_JOURNAL do {;} while(0)
Details : fix race with per-nid stats by delaying procfs cleanup until
exp_refcount == 0
+Severity : normal
+Bugzilla : 21556
+Description: extent lock cancellation on client can keep the cpu busy for too
+ long.
+
-------------------------------------------------------------------------------
2010-04-30 Oracle, Inc.
spin_lock(&lock->l_extents_list_lock);
while (!list_empty(&lock->l_extents_list)) {
+ if (unlikely(cfs_cond_resched_lock(&lock->l_extents_list_lock)))
+ continue;
+
extent = list_entry(lock->l_extents_list.next,
struct osc_async_page, oap_page_list);