From 7273e6467670a6b22ae58be324290a0740134985 Mon Sep 17 00:00:00 2001 From: Patrick Farrell Date: Wed, 20 Sep 2023 17:55:42 -0400 Subject: [PATCH] EX-8270 ptlrpc: replace ELEMENT_SIZE The ELEMENT_SIZE macro is fine, but it takes a pool index and doesn't handle the pool of order 0. Change it to a function. (This is marginally less efficient in one spot, since it replaces a shift with a divide, but it should be just fine.) Test-Parameters: trivial Signed-off-by: Patrick Farrell Change-Id: I322037e50bbdb8e0274b37f82618b6907b6d2906 Reviewed-on: https://review.whamcloud.com/c/ex/lustre-release/+/52445 Tested-by: jenkins Tested-by: Maloo Reviewed-by: Andreas Dilger --- lustre/ptlrpc/sec_bulk.c | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/lustre/ptlrpc/sec_bulk.c b/lustre/ptlrpc/sec_bulk.c index f70cfe6..7996083 100644 --- a/lustre/ptlrpc/sec_bulk.c +++ b/lustre/ptlrpc/sec_bulk.c @@ -54,7 +54,6 @@ #define POOLS_COUNT (PPOOL_MAX_CHUNK_BITS - PPOOL_MIN_CHUNK_BITS + 1) #define PPOOL_ORDER_TO_INDEX(bits) ((bits) - PPOOL_MIN_CHUNK_BITS + 1) #define POOL_BITS(pool) ((pool) + PPOOL_MIN_CHUNK_BITS - 1) -#define ELEMENT_SIZE(pool) (1 << (PPOOL_MIN_CHUNK_BITS + (pool) - 1)) #define PAGES_TO_MiB(pages) ((pages) >> (20 - PAGE_SHIFT)) #define MiB_TO_PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) /* deprecated - see pool_max_memory_mb below */ @@ -133,6 +132,14 @@ static struct ptlrpc_page_pool { struct mutex add_pages_mutex; } **page_pools; +static int element_size(struct ptlrpc_page_pool *pool) +{ + if (pool->ppp_index == 0) + return PAGE_SIZE; + + return 1 << (PPOOL_MIN_CHUNK_BITS + pool->ppp_index - 1); +} + /* * Keep old name (encrypt_page_pool vs page_pool) for compatibility with user * tools pulling stats @@ -205,7 +212,7 @@ int page_pools_seq_show(struct seq_file *m, void *v) if (!pool->ppp_st_access) continue; spin_lock(&pool->ppp_lock); - seq_printf(m, " pool_%luk:\n" + seq_printf(m, " pool_%dk:\n" " max_pages: %lu\n" " max_pools: %u\n" " total_pages: %lu\n" @@ -223,8 +230,8 @@ int page_pools_seq_show(struct seq_file *m, void *v) " max_waitqueue_depth: %u\n" " max_wait_time_ms: %lld\n" " out_of_mem: %lu\n", - (pool_index ? ELEMENT_SIZE(pool_index - 10) : - PAGE_SIZE >> 10), + /* convert from bytes to KiB */ + element_size(pool) >> 10, pool->ppp_max_pages, pool->ppp_max_pools, pool->ppp_total_pages, @@ -279,7 +286,7 @@ static void pool_release_free_pages(long npages, struct ptlrpc_page_pool *pool) __free_page(pool->ppp_pools[p_idx][g_idx]); else OBD_FREE_LARGE(pool->ppp_pools[p_idx][g_idx], - ELEMENT_SIZE(pool->ppp_index)); + element_size(pool)); pool->ppp_pools[p_idx][g_idx] = NULL; @@ -404,7 +411,7 @@ static unsigned long pool_cleanup(void ***pools, int npools, __free_page(pools[i][j]); } else { OBD_FREE_LARGE(pools[i][j], - ELEMENT_SIZE(pool->ppp_index)); + element_size(pool)); } cleaned++; } @@ -527,13 +534,8 @@ static int pool_add_pages(int npages, struct ptlrpc_page_pool *page_pool) int i, j, rc = -ENOMEM; unsigned int pool_index = page_pool->ppp_index; - if (pool_index == PAGES_POOL) { - if (npages < POOL_INIT_SIZE >> PAGE_SHIFT) - npages = POOL_INIT_SIZE >> PAGE_SHIFT; - } else { - if (npages < POOL_INIT_SIZE / ELEMENT_SIZE(pool_index)) - npages = POOL_INIT_SIZE / ELEMENT_SIZE(pool_index); - } + if (npages < POOL_INIT_SIZE / element_size(page_pool)) + npages = POOL_INIT_SIZE / element_size(page_pool); mutex_lock(&page_pool->add_pages_mutex); @@ -559,7 +561,7 @@ static int pool_add_pages(int npages, struct ptlrpc_page_pool *page_pool) __GFP_HIGHMEM); else { OBD_ALLOC_LARGE(pools[i][j], - ELEMENT_SIZE(pool_index)); + element_size(page_pool)); } if (pools[i][j] == NULL) goto out_pools; @@ -959,7 +961,7 @@ static bool __grow_pool_try(int needed, struct ptlrpc_page_pool *pool) CDEBUG(D_SEC, "pool %d is %lu elements (size %d bytes), growing by %d items\n", pool->ppp_index, pool->ppp_pages_short, - ELEMENT_SIZE(pool->ppp_index), to_add); + element_size(pool), to_add); /* we can't hold a spinlock over page allocation */ rc = pool_add_pages(to_add, pool); if (rc == 0) -- 1.8.3.1