From: yangsheng Date: Fri, 9 Sep 2011 16:15:13 +0000 (+0800) Subject: LU-506 FC15: update shrinker to use shrink_control callback X-Git-Tag: 2.1.53~62 X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=commitdiff_plain;h=35844df97415ce3b736104ec34927609a99d637b LU-506 FC15: update shrinker to use shrink_control callback Linux 3.0 memory pressure shrinker now takes "struct shrink_control" as its argument instead of "nr_to_scan" and "gfp_mask". This was backported to Fedora 15. Signed-off-by: Yang Sheng Change-Id: Id9f6a9e10efe785d2837d1ad73098d2808a2f076 Reviewed-on: http://review.whamcloud.com/1331 Tested-by: Hudson Tested-by: Maloo Reviewed-by: Andreas Dilger Reviewed-by: Lai Siyao Reviewed-by: Niu Yawei --- diff --git a/libcfs/autoconf/lustre-libcfs.m4 b/libcfs/autoconf/lustre-libcfs.m4 index 4843432..bef4651 100644 --- a/libcfs/autoconf/lustre-libcfs.m4 +++ b/libcfs/autoconf/lustre-libcfs.m4 @@ -671,6 +671,25 @@ AC_DEFUN([LIBCFS_HAVE_OOM_H], ]) ]) +# +# RHEL6/2.6.32 want to have pointer to shrinker self pointer in handler function +# +AC_DEFUN([LC_SHRINKER_WANT_SHRINK_PTR], +[AC_MSG_CHECKING([shrinker want self pointer in handler]) +LB_LINUX_TRY_COMPILE([ + #include +],[ + struct shrinker *tmp = NULL; + tmp->shrink(tmp, 0, 0); +],[ + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_SHRINKER_WANT_SHRINK_PTR, 1, + [shrinker want self pointer in handler]) +],[ + AC_MSG_RESULT(no) +]) +]) + # 2.6.18 store oom parameters in task struct. # 2.6.32 store oom parameters in signal struct AC_DEFUN([LIBCFS_OOMADJ_IN_SIG], @@ -713,18 +732,22 @@ LB_LINUX_TRY_COMPILE([ ]) ]) -# RHEL6/2.6.32 want to have pointer to shrinker self pointer in handler function -AC_DEFUN([LC_SHRINKER_WANT_SHRINK_PTR], -[AC_MSG_CHECKING([shrinker want self pointer in handler]) + +# +# FC15 2.6.40-5 backported the "shrink_control" parameter to the memory +# pressure shrinker from Linux 3.0 +# +AC_DEFUN([LC_SHRINK_CONTROL], +[AC_MSG_CHECKING([shrink_control is present]) LB_LINUX_TRY_COMPILE([ #include ],[ - struct shrinker tmp = {0}; - tmp.shrink(NULL, 0, 0); + struct shrink_control tmp = {0}; + tmp.nr_to_scan = sizeof(tmp); ],[ AC_MSG_RESULT(yes) - AC_DEFINE(HAVE_SHRINKER_WANT_SHRINK_PTR, 1, - [shrinker want self pointer in handler]) + AC_DEFINE(HAVE_SHRINK_CONTROL, 1, + [shrink_control is present]) ],[ AC_MSG_RESULT(no) ]) @@ -784,6 +807,8 @@ LIBCFS_HAVE_OOM_H LIBCFS_OOMADJ_IN_SIG # 2.6.34 LIBCFS_ADD_WAIT_QUEUE_EXCLUSIVE +# 2.6.40 fc15 +LC_SHRINK_CONTROL ]) # diff --git a/libcfs/include/libcfs/linux/linux-mem.h b/libcfs/include/libcfs/linux/linux-mem.h index 744dd42..0b92c10 100644 --- a/libcfs/include/libcfs/linux/linux-mem.h +++ b/libcfs/include/libcfs/linux/linux-mem.h @@ -161,14 +161,25 @@ extern int cfs_mem_is_in_cache(const void *addr, const cfs_mem_cache_t *kmem); */ #define cfs_shrinker shrinker -#ifdef HAVE_SHRINKER_WANT_SHRINK_PTR -#define SHRINKER_FIRST_ARG struct shrinker *shrinker, +#ifdef HAVE_SHRINK_CONTROL +# define SHRINKER_ARGS(sc, nr_to_scan, gfp_mask) \ + struct shrinker *shrinker, \ + struct shrink_control *sc +# define shrink_param(sc, var) ((sc)->var) #else -#define SHRINKER_FIRST_ARG +# ifdef HAVE_SHRINKER_WANT_SHRINK_PTR +# define SHRINKER_ARGS(sc, nr_to_scan, gfp_mask) \ + struct shrinker *shrinker, \ + int nr_to_scan, gfp_t gfp_mask +# else +# define SHRINKER_ARGS(sc, nr_to_scan, gfp_mask) \ + int nr_to_scan, gfp_t gfp_mask +# endif +# define shrink_param(sc, var) (var) #endif #ifdef HAVE_REGISTER_SHRINKER -typedef int (*cfs_shrinker_t)(SHRINKER_FIRST_ARG int nr_to_scan, gfp_t gfp_mask); +typedef int (*cfs_shrinker_t)(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask)); static inline struct cfs_shrinker *cfs_set_shrinker(int seek, cfs_shrinker_t func) diff --git a/lustre/ldlm/ldlm_pool.c b/lustre/ldlm/ldlm_pool.c index 23e847b..4f5b42d 100644 --- a/lustre/ldlm/ldlm_pool.c +++ b/lustre/ldlm/ldlm_pool.c @@ -1154,16 +1154,18 @@ static int ldlm_pools_shrink(ldlm_side_t client, int nr, return cached; } -static int ldlm_pools_srv_shrink(SHRINKER_FIRST_ARG int nr_to_scan, - unsigned int gfp_mask) +static int ldlm_pools_srv_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask)) { - return ldlm_pools_shrink(LDLM_NAMESPACE_SERVER, nr_to_scan, gfp_mask); + return ldlm_pools_shrink(LDLM_NAMESPACE_SERVER, + shrink_param(sc, nr_to_scan), + shrink_param(sc, gfp_mask)); } -static int ldlm_pools_cli_shrink(SHRINKER_FIRST_ARG int nr_to_scan, - unsigned int gfp_mask) +static int ldlm_pools_cli_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask)) { - return ldlm_pools_shrink(LDLM_NAMESPACE_CLIENT, nr_to_scan, gfp_mask); + return ldlm_pools_shrink(LDLM_NAMESPACE_CLIENT, + shrink_param(sc, nr_to_scan), + shrink_param(sc, gfp_mask)); } void ldlm_pools_recalc(ldlm_side_t client) diff --git a/lustre/obdclass/lu_object.c b/lustre/obdclass/lu_object.c index f0e9535..c7bedec 100644 --- a/lustre/obdclass/lu_object.c +++ b/lustre/obdclass/lu_object.c @@ -1675,25 +1675,25 @@ static void lu_site_stats_get(cfs_hash_t *hs, } #ifdef __KERNEL__ -static int lu_cache_shrink(SHRINKER_FIRST_ARG int nr_to_scan, - unsigned int gfp_mask) + +static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask)) { lu_site_stats_t stats; struct lu_site *s; struct lu_site *tmp; int cached = 0; - int remain = nr_to_scan; + int remain = shrink_param(sc, nr_to_scan); CFS_LIST_HEAD(splice); - if (nr_to_scan != 0) { - if (!(gfp_mask & __GFP_FS)) + if (remain != 0) { + if (!(shrink_param(sc, gfp_mask) & __GFP_FS)) return -1; - CDEBUG(D_INODE, "Shrink %d objects\n", nr_to_scan); + CDEBUG(D_INODE, "Shrink %d objects\n", remain); } cfs_down(&lu_sites_guard); cfs_list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) { - if (nr_to_scan != 0) { + if (shrink_param(sc, nr_to_scan) != 0) { remain = lu_site_purge(&lu_shrink_env, s, remain); /* * Move just shrunk site to the tail of site list to @@ -1705,14 +1705,14 @@ static int lu_cache_shrink(SHRINKER_FIRST_ARG int nr_to_scan, memset(&stats, 0, sizeof(stats)); lu_site_stats_get(s->ls_obj_hash, &stats, 0); cached += stats.lss_total - stats.lss_busy; - if (nr_to_scan && remain <= 0) + if (shrink_param(sc, nr_to_scan) && remain <= 0) break; } cfs_list_splice(&splice, lu_sites.prev); cfs_up(&lu_sites_guard); cached = (cached / 100) * sysctl_vfs_cache_pressure; - if (nr_to_scan == 0) + if (shrink_param(sc, nr_to_scan) == 0) CDEBUG(D_INODE, "%d objects cached\n", cached); return cached; } diff --git a/lustre/ptlrpc/sec_bulk.c b/lustre/ptlrpc/sec_bulk.c index 1fc74d0..dfe6b87 100644 --- a/lustre/ptlrpc/sec_bulk.c +++ b/lustre/ptlrpc/sec_bulk.c @@ -239,17 +239,20 @@ static void enc_pools_release_free_pages(long npages) * could be called frequently for query (@nr_to_scan == 0). * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool. */ -static int enc_pools_shrink(SHRINKER_FIRST_ARG int nr_to_scan, - unsigned int gfp_mask) +static int enc_pools_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask)) { - if (unlikely(nr_to_scan != 0)) { + if (unlikely(shrink_param(sc, nr_to_scan) != 0)) { cfs_spin_lock(&page_pools.epp_lock); - nr_to_scan = min(nr_to_scan, (int) page_pools.epp_free_pages - - PTLRPC_MAX_BRW_PAGES); - if (nr_to_scan > 0) { - enc_pools_release_free_pages(nr_to_scan); - CDEBUG(D_SEC, "released %d pages, %ld left\n", - nr_to_scan, page_pools.epp_free_pages); + shrink_param(sc, nr_to_scan) = min_t(unsigned long, + shrink_param(sc, nr_to_scan), + page_pools.epp_free_pages - + PTLRPC_MAX_BRW_PAGES); + if (shrink_param(sc, nr_to_scan) > 0) { + enc_pools_release_free_pages(shrink_param(sc, + nr_to_scan)); + CDEBUG(D_SEC, "released %ld pages, %ld left\n", + (long)shrink_param(sc, nr_to_scan), + page_pools.epp_free_pages); page_pools.epp_st_shrinks++; page_pools.epp_last_shrink = cfs_time_current_sec();