Whamcloud - gitweb
LU-4367 ptlrpc: add OBD_CONNECT_UNLINK_CLOSE flag
[fs/lustre-release.git] / lustre / ptlrpc / sec_bulk.c
index 488ed8a..9da60ad 100644 (file)
@@ -232,30 +232,46 @@ static void enc_pools_release_free_pages(long npages)
 }
 
 /*
- * could be called frequently for query (@nr_to_scan == 0).
  * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
  */
-static int enc_pools_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
+static unsigned long enc_pools_shrink_count(struct shrinker *s,
+                                           struct shrink_control *sc)
 {
-       if (unlikely(shrink_param(sc, nr_to_scan) != 0)) {
+       /*
+        * if no pool access for a long time, we consider it's fully idle.
+        * a little race here is fine.
+        */
+       if (unlikely(cfs_time_current_sec() - page_pools.epp_last_access >
+                    CACHE_QUIESCENT_PERIOD)) {
                spin_lock(&page_pools.epp_lock);
-                shrink_param(sc, nr_to_scan) = min_t(unsigned long,
-                                                   shrink_param(sc, nr_to_scan),
-                                                   page_pools.epp_free_pages -
-                                                   PTLRPC_MAX_BRW_PAGES);
-                if (shrink_param(sc, nr_to_scan) > 0) {
-                        enc_pools_release_free_pages(shrink_param(sc,
-                                                                  nr_to_scan));
-                        CDEBUG(D_SEC, "released %ld pages, %ld left\n",
-                               (long)shrink_param(sc, nr_to_scan),
-                               page_pools.epp_free_pages);
-
-                        page_pools.epp_st_shrinks++;
-                        page_pools.epp_last_shrink = cfs_time_current_sec();
-                }
+               page_pools.epp_idle_idx = IDLE_IDX_MAX;
                spin_unlock(&page_pools.epp_lock);
        }
 
+       LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
+       return max((int)page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES, 0) *
+               (IDLE_IDX_MAX - page_pools.epp_idle_idx) / IDLE_IDX_MAX;
+}
+
+/*
+ * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
+ */
+static unsigned long enc_pools_shrink_scan(struct shrinker *s,
+                                          struct shrink_control *sc)
+{
+       spin_lock(&page_pools.epp_lock);
+       sc->nr_to_scan = min_t(unsigned long, sc->nr_to_scan,
+                             page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES);
+       if (sc->nr_to_scan > 0) {
+               enc_pools_release_free_pages(sc->nr_to_scan);
+               CDEBUG(D_SEC, "released %ld pages, %ld left\n",
+                      (long)sc->nr_to_scan, page_pools.epp_free_pages);
+
+               page_pools.epp_st_shrinks++;
+               page_pools.epp_last_shrink = cfs_time_current_sec();
+       }
+       spin_unlock(&page_pools.epp_lock);
+
        /*
         * if no pool access for a long time, we consider it's fully idle.
         * a little race here is fine.
@@ -268,10 +284,31 @@ static int enc_pools_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
        }
 
        LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
-       return max((int)page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES, 0) *
-               (IDLE_IDX_MAX - page_pools.epp_idle_idx) / IDLE_IDX_MAX;
+       return sc->nr_to_scan;
+}
+
+#ifndef HAVE_SHRINKER_COUNT
+/*
+ * could be called frequently for query (@nr_to_scan == 0).
+ * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
+ */
+static int enc_pools_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
+{
+       struct shrink_control scv = {
+               .nr_to_scan = shrink_param(sc, nr_to_scan),
+               .gfp_mask   = shrink_param(sc, gfp_mask)
+       };
+#if !defined(HAVE_SHRINKER_WANT_SHRINK_PTR) && !defined(HAVE_SHRINK_CONTROL)
+       struct shrinker* shrinker = NULL;
+#endif
+
+       enc_pools_shrink_scan(shrinker, &scv);
+
+       return enc_pools_shrink_count(shrinker, &scv);
 }
 
+#endif /* HAVE_SHRINKER_COUNT */
+
 static inline
 int npages_to_npools(unsigned long npages)
 {
@@ -419,8 +456,8 @@ static int enc_pools_add_pages(int npages)
                        goto out_pools;
 
                for (j = 0; j < PAGES_PER_POOL && alloced < npages; j++) {
-                       pools[i][j] = alloc_page(__GFP_IO |
-                                                    __GFP_HIGHMEM);
+                       pools[i][j] = alloc_page(GFP_NOFS |
+                                                __GFP_HIGHMEM);
                        if (pools[i][j] == NULL)
                                goto out_pools;
 
@@ -448,7 +485,7 @@ out:
 
 static inline void enc_pools_wakeup(void)
 {
-       LASSERT(spin_is_locked(&page_pools.epp_lock));
+       assert_spin_locked(&page_pools.epp_lock);
 
        if (unlikely(page_pools.epp_waitqlen)) {
                LASSERT(waitqueue_active(&page_pools.epp_waitq));
@@ -706,6 +743,8 @@ static inline void enc_pools_free(void)
 
 int sptlrpc_enc_pool_init(void)
 {
+       DEF_SHRINKER_VAR(shvar, enc_pools_shrink,
+                        enc_pools_shrink_count, enc_pools_shrink_scan);
        /*
         * maximum capacity is 1/8 of total physical memory.
         * is the 1/8 a good number?
@@ -741,8 +780,7 @@ int sptlrpc_enc_pool_init(void)
         if (page_pools.epp_pools == NULL)
                 return -ENOMEM;
 
-       pools_shrinker = set_shrinker(pools_shrinker_seeks,
-                                          enc_pools_shrink);
+       pools_shrinker = set_shrinker(pools_shrinker_seeks, &shvar);
         if (pools_shrinker == NULL) {
                 enc_pools_free();
                 return -ENOMEM;