Whamcloud - gitweb
Branch b1_8
authorjohann <johann>
Thu, 10 Dec 2009 22:31:00 +0000 (22:31 +0000)
committerjohann <johann>
Thu, 10 Dec 2009 22:31:00 +0000 (22:31 +0000)
b=19557
i=adilger
i=andrew

add cond_resched() calls to lustre_hash_for_each_empty() to avoid hogging
the cpu when many locks to process on disconnect/eviction.

14 files changed:
lnet/include/libcfs/darwin/darwin-prim.h
lnet/include/libcfs/darwin/kp30.h
lnet/include/libcfs/linux/kp30.h
lnet/include/libcfs/linux/linux-prim.h
lnet/include/libcfs/user-prim.h
lnet/include/libcfs/winnt/kp30.h
lnet/include/libcfs/winnt/winnt-prim.h
lnet/klnds/iiblnd/iiblnd_cb.c
lnet/klnds/o2iblnd/o2iblnd_cb.c
lnet/klnds/openiblnd/openiblnd_cb.c
lnet/klnds/ralnd/ralnd_cb.c
lnet/klnds/socklnd/socklnd_cb.c
lnet/klnds/viblnd/viblnd_cb.c
lnet/selftest/workitem.c

index b0bec96..55ed1b3 100644 (file)
@@ -281,6 +281,11 @@ static inline int cfs_schedule_timeout(int state, int64_t timeout)
 #define cfs_schedule() cfs_schedule_timeout(CFS_TASK_UNINT, CFS_TICK)
 #define cfs_pause(tick)        cfs_schedule_timeout(CFS_TASK_UNINT, tick)
 
+/* XXX cfs_cond_resched() is sometimes called at each loop iteration
+ * (e.g. lustre_hash_for_each_empty()), so this definition is pretty
+ * unefficient and can be harmful if we have many elements to process */
+#define cfs_cond_resched() cfs_schedule_timeout(CFS_TASK_INTERRUPTIBLE, 1)
+
 #define __wait_event(wq, condition)                            \
 do {                                                           \
        struct cfs_waitlink __wait;                             \
index 7d022a9..01a9f60 100644 (file)
@@ -57,8 +57,6 @@
 #include <libcfs/darwin/darwin-prim.h>
 #include <lnet/lnet.h>
 
-#define our_cond_resched() cfs_schedule_timeout(CFS_TASK_INTERRUPTIBLE, 1)
-
 #ifdef CONFIG_SMP
 #define LASSERT_SPIN_LOCKED(lock) do {} while(0) /* XXX */
 #else
index 0431bab..ec5713d 100644 (file)
@@ -98,11 +98,6 @@ do {                                                                          \
 #define PageUptodate Page_Uptodate
 #define our_recalc_sigpending(current) recalc_sigpending(current)
 #define num_online_cpus() smp_num_cpus
-static inline void our_cond_resched(void)
-{
-        if (current->need_resched)
-               schedule ();
-}
 #define work_struct_t                   struct tq_struct
 #define cfs_get_work_data(type,field,data)   (data)
 #else
@@ -130,10 +125,6 @@ do {                                                                          \
 #define wait_on_page wait_on_page_locked
 #define our_recalc_sigpending(current) recalc_sigpending()
 #define strtok(a,b) strpbrk(a, b)
-static inline void our_cond_resched(void)
-{
-        cond_resched();
-}
 #define work_struct_t      struct work_struct
 
 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) */
index 713718f..754e036 100644 (file)
@@ -159,6 +159,16 @@ typedef long                            cfs_task_state_t;
 #define cfs_schedule()                  schedule()
 #define cfs_kthread_run(fn, data, fmt, arg...) kthread_run(fn, data, fmt, ##arg)
 
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
+static inline void cfs_cond_resched(void)
+{
+        if (current->need_resched)
+               schedule();
+}
+#else
+#define cfs_cond_resched()              cond_resched()
+#endif
+
 /* Kernel thread */
 typedef int (*cfs_thread_t)(void *);
 
index bdcc683..dc679a7 100644 (file)
@@ -99,6 +99,7 @@ int64_t cfs_waitq_timedwait(struct cfs_waitlink *link, int state, int64_t timeou
                 cfs_waitlink_t    l;            \
                 cfs_waitq_timedwait(&l, s, t);  \
         } while (0)
+#define cfs_cond_resched() do {} while(0)
 
 #define CFS_TASK_INTERRUPTIBLE  (0)
 #define CFS_TASK_UNINT          (0)
index 336a649..fd12f19 100644 (file)
 
 #define CFS_SYSFS_MODULE_PARM    0 /* no sysfs access to module parameters */
 
-
-static inline void our_cond_resched()
-{
-    schedule_timeout(1i64);
-}
-
 #ifdef CONFIG_SMP
 #define LASSERT_SPIN_LOCKED(lock) do {} while(0) /* XXX */
 #else
index a980556..abcf379 100644 (file)
@@ -541,6 +541,14 @@ int     wake_up_process(cfs_task_t * task);
 #define cfs_schedule_timeout(state, time)  schedule_timeout(time)
 void sleep_on(cfs_waitq_t *waitq);
 
+/* XXX cfs_cond_resched() is sometimes called at each loop iteration
+ * (e.g. lustre_hash_for_each_empty()), so this definition is pretty
+ * unefficient and can be harmful if we have many elements to process */
+static inline void cfs_cond_resched()
+{
+    schedule_timeout(1i64);
+}
+
 #define CFS_DECL_JOURNAL_DATA  
 #define CFS_PUSH_JOURNAL           do {;} while(0)
 #define CFS_POP_JOURNAL                    do {;} while(0)
index fb6ce60..9e7b62c 100644 (file)
@@ -3310,7 +3310,7 @@ kibnal_scheduler(void *arg)
                         spin_unlock_irqrestore(&kibnal_data.kib_sched_lock,
                                                flags);
 
-                        our_cond_resched();
+                        cfs_cond_resched();
                         busy_loops = 0;
                         
                         spin_lock_irqsave(&kibnal_data.kib_sched_lock, flags);
index 01c4621..2500341 100644 (file)
@@ -3184,7 +3184,7 @@ kiblnd_scheduler(void *arg)
                         spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock,
                                                flags);
 
-                        our_cond_resched();
+                        cfs_cond_resched();
                         busy_loops = 0;
 
                         spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
index 85f1166..31dade0 100644 (file)
@@ -2614,7 +2614,7 @@ kibnal_scheduler(void *arg)
                                         !list_empty(&kibnal_data.kib_sched_rxq) || 
                                         kibnal_data.kib_shutdown);
                         } else {
-                                our_cond_resched();
+                                cfs_cond_resched();
                         }
 
                         spin_lock_irqsave(&kibnal_data.kib_sched_lock,
index 9fa2958..5c2871f 100644 (file)
@@ -1941,7 +1941,7 @@ kranal_scheduler (void *arg)
                 if (busy_loops++ >= RANAL_RESCHED) {
                         spin_unlock_irqrestore(&dev->rad_lock, flags);
 
-                        our_cond_resched();
+                        cfs_cond_resched();
                         busy_loops = 0;
 
                         spin_lock_irqsave(&dev->rad_lock, flags);
index 44384cd..89041f8 100644 (file)
@@ -1520,7 +1520,7 @@ int ksocknal_scheduler (void *arg)
                                         !ksocknal_sched_cansleep(sched), rc);
                                 LASSERT (rc == 0);
                         } else {
-                                our_cond_resched();
+                                cfs_cond_resched();
                         }
 
                         cfs_spin_lock_bh (&sched->kss_lock);
index 0528b0e..3fc6376 100644 (file)
@@ -3583,7 +3583,7 @@ kibnal_scheduler(void *arg)
                         spin_unlock_irqrestore(&kibnal_data.kib_sched_lock,
                                                flags);
 
-                        our_cond_resched();
+                        cfs_cond_resched();
                         busy_loops = 0;
 
                         spin_lock_irqsave(&kibnal_data.kib_sched_lock, flags);
index 5e49b8b..67ff22e 100644 (file)
@@ -193,7 +193,7 @@ swi_scheduler_main (void *arg)
                                    swi_data.wi_waitq,
                                    !swi_sched_cansleep(&swi_data.wi_runq));
                 else
-                        our_cond_resched();
+                        cfs_cond_resched();
 
                 spin_lock(&swi_data.wi_lock);
         }
@@ -247,7 +247,7 @@ swi_serial_scheduler_main (void *arg)
                              swi_data.wi_serial_waitq,
                              !swi_sched_cansleep(&swi_data.wi_serial_runq));
                 else
-                        our_cond_resched();
+                        cfs_cond_resched();
 
                 spin_lock(&swi_data.wi_lock);
         }