Whamcloud - gitweb
EX-8270 ptlrpc: replace ELEMENT_SIZE
authorPatrick Farrell <pfarrell@whamcloud.com>
Wed, 20 Sep 2023 21:55:42 +0000 (17:55 -0400)
committerAndreas Dilger <adilger@whamcloud.com>
Fri, 22 Sep 2023 23:55:07 +0000 (23:55 +0000)
The ELEMENT_SIZE macro is fine, but it takes a pool index
and doesn't handle the pool of order 0.  Change it to a
function.  (This is marginally less efficient in one spot,
since it replaces a shift with a divide, but it should be
just fine.)

Test-Parameters: trivial
Signed-off-by: Patrick Farrell <pfarrell@whamcloud.com>
Change-Id: I322037e50bbdb8e0274b37f82618b6907b6d2906
Reviewed-on: https://review.whamcloud.com/c/ex/lustre-release/+/52445
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
lustre/ptlrpc/sec_bulk.c

index f70cfe6..7996083 100644 (file)
@@ -54,7 +54,6 @@
 #define POOLS_COUNT (PPOOL_MAX_CHUNK_BITS - PPOOL_MIN_CHUNK_BITS + 1)
 #define PPOOL_ORDER_TO_INDEX(bits) ((bits) - PPOOL_MIN_CHUNK_BITS + 1)
 #define POOL_BITS(pool) ((pool) + PPOOL_MIN_CHUNK_BITS - 1)
-#define ELEMENT_SIZE(pool) (1 << (PPOOL_MIN_CHUNK_BITS + (pool) - 1))
 #define PAGES_TO_MiB(pages)    ((pages) >> (20 - PAGE_SHIFT))
 #define MiB_TO_PAGES(mb)       ((mb) << (20 - PAGE_SHIFT))
 /* deprecated - see pool_max_memory_mb below */
@@ -133,6 +132,14 @@ static struct ptlrpc_page_pool {
        struct mutex add_pages_mutex;
 } **page_pools;
 
+static int element_size(struct ptlrpc_page_pool *pool)
+{
+       if (pool->ppp_index == 0)
+               return PAGE_SIZE;
+
+       return 1 << (PPOOL_MIN_CHUNK_BITS + pool->ppp_index - 1);
+}
+
 /*
  * Keep old name (encrypt_page_pool vs page_pool) for compatibility with user
  * tools pulling stats
@@ -205,7 +212,7 @@ int page_pools_seq_show(struct seq_file *m, void *v)
                if (!pool->ppp_st_access)
                        continue;
                spin_lock(&pool->ppp_lock);
-               seq_printf(m, "  pool_%luk:\n"
+               seq_printf(m, "  pool_%dk:\n"
                           "    max_pages: %lu\n"
                           "    max_pools: %u\n"
                           "    total_pages: %lu\n"
@@ -223,8 +230,8 @@ int page_pools_seq_show(struct seq_file *m, void *v)
                           "    max_waitqueue_depth: %u\n"
                           "    max_wait_time_ms: %lld\n"
                           "    out_of_mem: %lu\n",
-                          (pool_index ? ELEMENT_SIZE(pool_index - 10) :
-                          PAGE_SIZE >> 10),
+                          /* convert from bytes to KiB */
+                          element_size(pool) >> 10,
                           pool->ppp_max_pages,
                           pool->ppp_max_pools,
                           pool->ppp_total_pages,
@@ -279,7 +286,7 @@ static void pool_release_free_pages(long npages, struct ptlrpc_page_pool *pool)
                        __free_page(pool->ppp_pools[p_idx][g_idx]);
                else
                        OBD_FREE_LARGE(pool->ppp_pools[p_idx][g_idx],
-                                      ELEMENT_SIZE(pool->ppp_index));
+                                      element_size(pool));
 
                pool->ppp_pools[p_idx][g_idx] = NULL;
 
@@ -404,7 +411,7 @@ static unsigned long pool_cleanup(void ***pools, int npools,
                                                __free_page(pools[i][j]);
                                        } else {
                                                OBD_FREE_LARGE(pools[i][j],
-                                                       ELEMENT_SIZE(pool->ppp_index));
+                                                       element_size(pool));
                                        }
                                        cleaned++;
                                }
@@ -527,13 +534,8 @@ static int pool_add_pages(int npages, struct ptlrpc_page_pool *page_pool)
        int i, j, rc = -ENOMEM;
        unsigned int pool_index = page_pool->ppp_index;
 
-       if (pool_index == PAGES_POOL) {
-               if (npages < POOL_INIT_SIZE >> PAGE_SHIFT)
-                       npages = POOL_INIT_SIZE >> PAGE_SHIFT;
-       } else {
-               if (npages < POOL_INIT_SIZE / ELEMENT_SIZE(pool_index))
-                       npages = POOL_INIT_SIZE / ELEMENT_SIZE(pool_index);
-       }
+       if (npages < POOL_INIT_SIZE / element_size(page_pool))
+               npages = POOL_INIT_SIZE / element_size(page_pool);
 
        mutex_lock(&page_pool->add_pages_mutex);
 
@@ -559,7 +561,7 @@ static int pool_add_pages(int npages, struct ptlrpc_page_pool *page_pool)
                                        __GFP_HIGHMEM);
                        else {
                                OBD_ALLOC_LARGE(pools[i][j],
-                                       ELEMENT_SIZE(pool_index));
+                                       element_size(page_pool));
                        }
                        if (pools[i][j] == NULL)
                                goto out_pools;
@@ -959,7 +961,7 @@ static bool __grow_pool_try(int needed, struct ptlrpc_page_pool *pool)
                CDEBUG(D_SEC,
                       "pool %d is %lu elements (size %d bytes), growing by %d items\n",
                        pool->ppp_index, pool->ppp_pages_short,
-                       ELEMENT_SIZE(pool->ppp_index), to_add);
+                       element_size(pool), to_add);
                /* we can't hold a spinlock over page allocation */
                rc = pool_add_pages(to_add, pool);
                if (rc == 0)