Whamcloud - gitweb
LU-10649 llite: yield cpu after call to ll_agl_trigger 40/31240/4
authorAnn Koehler <amk@cray.com>
Wed, 7 Jun 2017 19:28:03 +0000 (14:28 -0500)
committerOleg Drokin <oleg.drokin@intel.com>
Mon, 9 Apr 2018 19:49:01 +0000 (19:49 +0000)
The statahead and agl threads loop over all entries in the
directory without yielding the CPU. If the number of entries in
the directory is large enough then these threads may trigger
soft lockups. The fix is to add calls to cond_resched() after
calling ll_agl_trigger(), which gets the glimpse lock for a
file.

Change-Id: I4fbc72a3c6bc77f2ffd8e3fd0daf4c8906bb954a
Cray-bug-id: LUS-2584
Signed-off-by: Chris Horn <hornc@cray.com>
Reviewed-on: https://review.whamcloud.com/31240
Reviewed-by: Patrick Farrell <paf@cray.com>
Reviewed-by: Sergey Cheremencev <c17829@cray.com>
Tested-by: Jenkins
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
lustre/llite/statahead.c

index a5d93ed..2da5070 100644 (file)
@@ -925,6 +925,7 @@ static int ll_agl_thread(void *arg)
                        list_del_init(&clli->lli_agl_list);
                        spin_unlock(&plli->lli_agl_lock);
                        ll_agl_trigger(&clli->lli_vfs_inode, sai);
                        list_del_init(&clli->lli_agl_list);
                        spin_unlock(&plli->lli_agl_lock);
                        ll_agl_trigger(&clli->lli_vfs_inode, sai);
+                       cond_resched();
                } else {
                        spin_unlock(&plli->lli_agl_lock);
                }
                } else {
                        spin_unlock(&plli->lli_agl_lock);
                }
@@ -1112,7 +1113,7 @@ static int ll_statahead_thread(void *arg)
 
                                        ll_agl_trigger(&clli->lli_vfs_inode,
                                                        sai);
 
                                        ll_agl_trigger(&clli->lli_vfs_inode,
                                                        sai);
-
+                                       cond_resched();
                                        spin_lock(&lli->lli_agl_lock);
                                }
                                spin_unlock(&lli->lli_agl_lock);
                                        spin_lock(&lli->lli_agl_lock);
                                }
                                spin_unlock(&lli->lli_agl_lock);