information, please refer to bugzilla 17630.
Severity : normal
+Frequency : rare, at shutdown
+Bugzilla : 18773
+Descriptoin: panic at umount
+Details : llap_shrinker can be raced with killing super block from list and
+ this produce panic with access to already freeded pointer
+
+Severity : normal
Frequency : rare
Bugzilla : 18154
Descriptoin: don't lose wakeup for imp_recovery_waitq
#define log2(n) ffz(~(n))
#endif
-static inline void ll_pglist_fini(struct ll_sb_info *sbi)
+static void ll_pglist_fini(struct ll_sb_info *sbi)
{
struct page *page;
int i;
sbi->ll_pglist = NULL;
}
-static inline int ll_pglist_init(struct ll_sb_info *sbi)
+static int ll_pglist_init(struct ll_sb_info *sbi)
{
struct ll_pglist_data *pd;
unsigned long budget;
ENTRY;
if (sbi != NULL) {
- ll_pglist_fini(sbi);
spin_lock(&ll_sb_lock);
list_del(&sbi->ll_list);
spin_unlock(&ll_sb_lock);
+ /* dont allow find cache via sb list first */
+ ll_pglist_fini(sbi);
lcounter_destroy(&sbi->ll_async_page_count);
OBD_FREE(sbi->ll_async_page_sample,
sizeof(long) * num_possible_cpus());
struct ll_sb_info *sbi;
int count = 0;
+ /* don't race with umount */
+ spin_lock(&ll_sb_lock);
list_for_each_entry(sbi, &ll_super_blocks, ll_list)
count += llap_shrink_cache(sbi, priority);
+ spin_unlock(&ll_sb_lock);
#if defined(HAVE_CACHE_RETURN_INT)
return count;
int keep;
if (unlikely(need_resched())) {
+ list_del(&dummy_llap.llap_pglist_item);
ll_pglist_cpu_unlock(sbi, cpu);
- cond_resched();
- ll_pglist_cpu_lock(sbi, cpu);
+ /* vmscan::shrink_slab() have own schedule() */
+ return count;
}
llap = llite_pglist_next_llap(head,