From: Patrick Farrell Date: Sat, 16 Sep 2023 03:58:38 +0000 (-0400) Subject: EX-8270 ptlrpc: start removing 'enc' from pool X-Git-Url: https://git.whamcloud.com/?a=commitdiff_plain;h=426049b35e416af562376efc2a0ede9b3edc0b15;p=fs%2Flustre-release.git EX-8270 ptlrpc: start removing 'enc' from pool Pools are no longer encryption page pools, start renaming them accordingly. (The 'epp' naming in the struct has been left for the next patch.) Test-Parameters: trivial Signed-off-by: Patrick Farrell Change-Id: Iba3c98641e24173d95bf8bcf0df2424bbabf3ef9 Reviewed-on: https://review.whamcloud.com/c/ex/lustre-release/+/52430 Tested-by: jenkins Tested-by: Maloo Reviewed-by: Andreas Dilger --- diff --git a/lustre/include/lustre_sec.h b/lustre/include/lustre_sec.h index 2d7fa68..9d1aa95 100644 --- a/lustre/include/lustre_sec.h +++ b/lustre/include/lustre_sec.h @@ -1164,15 +1164,15 @@ int sptlrpc_cli_install_rvs_ctx(struct obd_import *imp, /* bulk security api */ #define PAGES_POOL 0 -int sptlrpc_enc_pool_add_user(void); -int sptlrpc_enc_pool_del_user(void); -int sptlrpc_enc_pool_get_desc_pages(struct ptlrpc_bulk_desc *desc); -int sptlrpc_enc_pool_get_pages_array(struct page **pa, unsigned int count); -int sptlrpc_enc_pool_get_pages(void **buf, unsigned int order); -void sptlrpc_enc_pool_put_desc_pages(struct ptlrpc_bulk_desc *desc); -void sptlrpc_enc_pool_put_pages_array(struct page **pa, unsigned int count); -void sptlrpc_enc_pool_put_pages(void *buf, unsigned int order); -int sptlrpc_enc_pool_get_free_pages(unsigned int pool); +int sptlrpc_pool_add_user(void); +int sptlrpc_pool_del_user(void); +int sptlrpc_pool_get_desc_pages(struct ptlrpc_bulk_desc *desc); +int sptlrpc_pool_get_pages_array(struct page **pa, unsigned int count); +int sptlrpc_pool_get_pages(void **buf, unsigned int order); +void sptlrpc_pool_put_desc_pages(struct ptlrpc_bulk_desc *desc); +void sptlrpc_pool_put_pages_array(struct page **pa, unsigned int count); +void sptlrpc_pool_put_pages(void *buf, unsigned int order); +int sptlrpc_pool_get_free_pages(unsigned int pool); int pool_is_at_full_capacity(void); int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req, diff --git a/lustre/llite/dir.c b/lustre/llite/dir.c index 3983265..3ea4dbd 100644 --- a/lustre/llite/dir.c +++ b/lustre/llite/dir.c @@ -2482,7 +2482,7 @@ out_detach: rc = llcrypt_ioctl_add_key(file, (void __user *)arg); #ifdef CONFIG_LL_ENCRYPTION if (!rc) - sptlrpc_enc_pool_add_user(); + sptlrpc_pool_add_user(); #endif return rc; case LL_IOC_REMOVE_ENCRYPTION_KEY: @@ -2491,7 +2491,7 @@ out_detach: rc = llcrypt_ioctl_remove_key(file, (void __user *)arg); #ifdef CONFIG_LL_ENCRYPTION if (!rc) - sptlrpc_enc_pool_del_user(); + sptlrpc_pool_del_user(); #endif return rc; case LL_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS: @@ -2501,7 +2501,7 @@ out_detach: (void __user *)arg); #ifdef CONFIG_LL_ENCRYPTION if (!rc) - sptlrpc_enc_pool_del_user(); + sptlrpc_pool_del_user(); #endif return rc; case LL_IOC_GET_ENCRYPTION_KEY_STATUS: diff --git a/lustre/osc/osc_compress.c b/lustre/osc/osc_compress.c index 916035b..6f63c6b 100644 --- a/lustre/osc/osc_compress.c +++ b/lustre/osc/osc_compress.c @@ -245,7 +245,7 @@ void free_cpga(struct brw_page **cpga, u32 page_count) continue; if (cpga[i]->bp_cmp_chunk) - sptlrpc_enc_pool_put_pages(&cpga[i]->bp_cmp_chunk, + sptlrpc_pool_put_pages(&cpga[i]->bp_cmp_chunk, cpga[i]->bp_cmp_chunk_size); OBD_FREE(cpga[i], sizeof(**cpga)); @@ -351,8 +351,8 @@ int compress_request(const char *obd_name, struct obdo *oa, OBD_ALLOC(*cpga, *page_count * sizeof(**cpga)); src_buf_bits = chunk_bits; dest_buf_bits = chunk_bits + 1; - sptlrpc_enc_pool_get_pages(&src, src_buf_bits); - sptlrpc_enc_pool_get_pages(&wrkmem, dest_buf_bits); + sptlrpc_pool_get_pages(&src, src_buf_bits); + sptlrpc_pool_get_pages(&wrkmem, dest_buf_bits); if (*cpga == NULL || wrkmem == NULL || src == NULL) GOTO(out, rc = -ENOMEM); @@ -373,7 +373,7 @@ int compress_request(const char *obd_name, struct obdo *oa, merge_chunk(pga, chunk_start, pga_i + 1 - chunk_start, src, &src_size); dst_size = 2 * chunk_size; - sptlrpc_enc_pool_get_pages(&dst, dest_buf_bits); + sptlrpc_pool_get_pages(&dst, dest_buf_bits); if (dst == NULL) GOTO(out, rc = -ENOMEM); @@ -402,7 +402,7 @@ int compress_request(const char *obd_name, struct obdo *oa, fill_bits); if (!done) { - sptlrpc_enc_pool_put_pages(&dst, dest_buf_bits); + sptlrpc_pool_put_pages(&dst, dest_buf_bits); } else { (*cpga)[cpga_i]->bp_cmp_chunk = dst; (*cpga)[cpga_i]->bp_cmp_chunk_size = @@ -422,10 +422,10 @@ int compress_request(const char *obd_name, struct obdo *oa, count); out: if (wrkmem != NULL) - sptlrpc_enc_pool_put_pages(&wrkmem, dest_buf_bits); + sptlrpc_pool_put_pages(&wrkmem, dest_buf_bits); if (src != NULL) - sptlrpc_enc_pool_put_pages(&src, src_buf_bits); + sptlrpc_pool_put_pages(&src, src_buf_bits); if (rc != 0 && *cpga != NULL) free_cpga(*cpga, *page_count); @@ -535,9 +535,9 @@ int decompress_request(struct osc_brw_async_args *aa, int page_count) CDEBUG(D_SEC, "chunk_size: %i, pages_in_chunk: %i\n", chunk_size, pages_in_chunk); - sptlrpc_enc_pool_get_pages((void **)&src, + sptlrpc_pool_get_pages((void **)&src, buf_bits); - sptlrpc_enc_pool_get_pages((void **)&dst, + sptlrpc_pool_get_pages((void **)&dst, buf_bits); if (src == NULL || dst == NULL) GOTO(out, rc = -ENOMEM); @@ -575,10 +575,10 @@ int decompress_request(struct osc_brw_async_args *aa, int page_count) CDEBUG(D_SEC, "Decompressed %i pages (%i chunks)\n", page_count, count); out: if (src != NULL) - sptlrpc_enc_pool_put_pages(&src, buf_bits); + sptlrpc_pool_put_pages(&src, buf_bits); if (dst != NULL) - sptlrpc_enc_pool_put_pages(&dst, buf_bits); + sptlrpc_pool_put_pages(&dst, buf_bits); RETURN(rc); } diff --git a/lustre/osc/osc_request.c b/lustre/osc/osc_request.c index 69c6a00..12af7cf 100644 --- a/lustre/osc/osc_request.c +++ b/lustre/osc/osc_request.c @@ -1494,7 +1494,7 @@ static inline void osc_release_bounce_pages(struct brw_page **pga, } if (pa) { - sptlrpc_enc_pool_put_pages_array(pa, j); + sptlrpc_pool_put_pages_array(pa, j); OBD_FREE_PTR_ARRAY_LARGE(pa, page_count); } #endif @@ -1621,7 +1621,7 @@ osc_brw_prep_request(int cmd, struct client_obd *cli, struct obdo *oa, RETURN(-ENOMEM); } - rc = sptlrpc_enc_pool_get_pages_array(pa, page_count); + rc = sptlrpc_pool_get_pages_array(pa, page_count); if (rc) { CDEBUG(D_SEC, "failed to allocate from enc pool: %d\n", rc); @@ -1683,7 +1683,7 @@ retry_encrypt: goto retry_encrypt; } if (pa) { - sptlrpc_enc_pool_put_pages_array(pa + i, + sptlrpc_pool_put_pages_array(pa + i, page_count - i); OBD_FREE_PTR_ARRAY_LARGE(pa, page_count); diff --git a/lustre/ptlrpc/client.c b/lustre/ptlrpc/client.c index 46031ad..111ac32 100644 --- a/lustre/ptlrpc/client.c +++ b/lustre/ptlrpc/client.c @@ -292,7 +292,7 @@ void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc) LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL)); LASSERT(desc->bd_frag_ops != NULL); - sptlrpc_enc_pool_put_desc_pages(desc); + sptlrpc_pool_put_desc_pages(desc); if (desc->bd_export) class_export_put(desc->bd_export); @@ -1655,7 +1655,7 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req) /* do not try to go further if there is not enough memory in enc_pool */ if (req->rq_sent && req->rq_bulk) if (req->rq_bulk->bd_iov_count > - sptlrpc_enc_pool_get_free_pages(PAGES_POOL) && + sptlrpc_pool_get_free_pages(PAGES_POOL) && pool_is_at_full_capacity()) RETURN(-ENOMEM); diff --git a/lustre/ptlrpc/gss/gss_bulk.c b/lustre/ptlrpc/gss/gss_bulk.c index 00db59f..a0be6d9 100644 --- a/lustre/ptlrpc/gss/gss_bulk.c +++ b/lustre/ptlrpc/gss/gss_bulk.c @@ -137,7 +137,7 @@ int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx, if (desc->bd_iov_count == 0) RETURN(0); - rc = sptlrpc_enc_pool_get_desc_pages(desc); + rc = sptlrpc_pool_get_desc_pages(desc); if (rc) { CERROR("bulk write: failed to allocate " "encryption pages: %d\n", rc); @@ -303,7 +303,7 @@ static int gss_prep_bulk(struct ptlrpc_bulk_desc *desc, if (desc->bd_iov_count == 0) return 0; - rc = sptlrpc_enc_pool_get_desc_pages(desc); + rc = sptlrpc_pool_get_desc_pages(desc); if (rc) return rc; @@ -492,7 +492,7 @@ int gss_svc_wrap_bulk(struct ptlrpc_request *req, break; } - rc = sptlrpc_enc_pool_get_desc_pages(desc); + rc = sptlrpc_pool_get_desc_pages(desc); if (rc) { bsdv->bsd_flags |= BSD_FL_ERR; CERROR("bulk read: failed to allocate encryption " diff --git a/lustre/ptlrpc/gss/sec_gss.c b/lustre/ptlrpc/gss/sec_gss.c index aba31b4..4f2cae9 100644 --- a/lustre/ptlrpc/gss/sec_gss.c +++ b/lustre/ptlrpc/gss/sec_gss.c @@ -1114,7 +1114,7 @@ int gss_sec_create_common(struct gss_sec *gsec, } if (SPTLRPC_FLVR_BULK_SVC(sec->ps_flvr.sf_rpc) == SPTLRPC_BULK_SVC_PRIV) - sptlrpc_enc_pool_add_user(); + sptlrpc_pool_add_user(); CDEBUG(D_SEC, "create %s%s@%p\n", (svcctx ? "reverse " : ""), policy->sp_name, gsec); @@ -1138,7 +1138,7 @@ void gss_sec_destroy_common(struct gss_sec *gsec) class_import_put(sec->ps_import); if (SPTLRPC_FLVR_BULK_SVC(sec->ps_flvr.sf_rpc) == SPTLRPC_BULK_SVC_PRIV) - sptlrpc_enc_pool_del_user(); + sptlrpc_pool_del_user(); EXIT; } diff --git a/lustre/ptlrpc/ptlrpc_internal.h b/lustre/ptlrpc/ptlrpc_internal.h index dd1e6cd..263d306 100644 --- a/lustre/ptlrpc/ptlrpc_internal.h +++ b/lustre/ptlrpc/ptlrpc_internal.h @@ -291,8 +291,8 @@ int sptlrpc_plain_init(void); void sptlrpc_plain_fini(void); /* sec_bulk.c */ -int sptlrpc_enc_pool_init(void); -void sptlrpc_enc_pool_fini(void); +int sptlrpc_pool_init(void); +void sptlrpc_pool_fini(void); int encrypt_page_pools_seq_show(struct seq_file *m, void *v); int page_pools_seq_show(struct seq_file *m, void *v); diff --git a/lustre/ptlrpc/sec.c b/lustre/ptlrpc/sec.c index ffdf75b..e5d2178 100644 --- a/lustre/ptlrpc/sec.c +++ b/lustre/ptlrpc/sec.c @@ -2720,7 +2720,7 @@ int sptlrpc_init(void) if (rc) goto out_gc; - rc = sptlrpc_enc_pool_init(); + rc = sptlrpc_pool_init(); if (rc) goto out_conf; @@ -2743,7 +2743,7 @@ out_plain: out_null: sptlrpc_null_fini(); out_pool: - sptlrpc_enc_pool_fini(); + sptlrpc_pool_fini(); out_conf: sptlrpc_conf_fini(); out_gc: @@ -2757,7 +2757,7 @@ void sptlrpc_fini(void) sptlrpc_lproc_fini(); sptlrpc_plain_fini(); sptlrpc_null_fini(); - sptlrpc_enc_pool_fini(); + sptlrpc_pool_fini(); sptlrpc_conf_fini(); sptlrpc_gc_fini(); } diff --git a/lustre/ptlrpc/sec_bulk.c b/lustre/ptlrpc/sec_bulk.c index 84383b2..8a28056 100644 --- a/lustre/ptlrpc/sec_bulk.c +++ b/lustre/ptlrpc/sec_bulk.c @@ -62,7 +62,7 @@ MODULE_PARM_DESC(enc_pool_max_memory_mb, "Encoding pool max memory (MB), 1/8 of total physical memory by default"); /* - * bulk encryption page pools + * lustre page pools */ #define PTRS_PER_PAGE (PAGE_SIZE / sizeof(void *)) @@ -73,7 +73,7 @@ MODULE_PARM_DESC(enc_pool_max_memory_mb, #define CACHE_QUIESCENT_PERIOD (20) -static struct ptlrpc_enc_page_pool { +static struct ptlrpc_page_pool { unsigned long epp_max_pages; /* maximum pages can hold, const */ unsigned int epp_max_pools; /* number of pools, const */ @@ -180,7 +180,7 @@ int encrypt_page_pools_seq_show(struct seq_file *m, void *v) int page_pools_seq_show(struct seq_file *m, void *v) { int pool_index; - struct ptlrpc_enc_page_pool *pool; + struct ptlrpc_page_pool *pool; seq_printf(m, "physical_pages: %lu\n" "pages per pool: %lu\n\n" @@ -235,11 +235,11 @@ int page_pools_seq_show(struct seq_file *m, void *v) return 0; } -static void enc_pools_release_free_pages(long npages, unsigned int pool_idx) +static void pools_release_free_pages(long npages, unsigned int pool_idx) { int p_idx, g_idx; int p_idx_max1, p_idx_max2; - struct ptlrpc_enc_page_pool *pool = page_pools[pool_idx]; + struct ptlrpc_page_pool *pool = page_pools[pool_idx]; LASSERT(npages > 0); LASSERT(npages <= pool->epp_free_pages); @@ -291,11 +291,11 @@ static void enc_pools_release_free_pages(long npages, unsigned int pool_idx) /* * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool. */ -static unsigned long enc_pools_shrink_count(struct shrinker *s, +static unsigned long pools_shrink_count(struct shrinker *s, struct shrink_control *sc) { unsigned int pool_index = SEEKS_TO_INDEX(s); - struct ptlrpc_enc_page_pool *pool = page_pools[pool_index]; + struct ptlrpc_page_pool *pool = page_pools[pool_index]; /* * if no pool access for a long time, we consider it's fully * idle. A little race here is fine. @@ -317,12 +317,12 @@ static unsigned long enc_pools_shrink_count(struct shrinker *s, /* * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool. */ -static unsigned long enc_pools_shrink_scan(struct shrinker *s, +static unsigned long pools_shrink_scan(struct shrinker *s, struct shrink_control *sc) { /* Get pool number passed as part of pools_shrinker_seeks value */ unsigned int pool_index = SEEKS_TO_INDEX(s); - struct ptlrpc_enc_page_pool *pool = page_pools[pool_index]; + struct ptlrpc_page_pool *pool = page_pools[pool_index]; spin_lock(&pool->epp_lock); if (pool->epp_free_pages <= PTLRPC_MAX_BRW_PAGES) @@ -331,7 +331,7 @@ static unsigned long enc_pools_shrink_scan(struct shrinker *s, sc->nr_to_scan = min_t(unsigned long, sc->nr_to_scan, pool->epp_free_pages - PTLRPC_MAX_BRW_PAGES); if (sc->nr_to_scan > 0) { - enc_pools_release_free_pages(sc->nr_to_scan, pool_index); + pools_release_free_pages(sc->nr_to_scan, pool_index); CDEBUG(D_SEC, "released %ld pages, %ld left\n", (long)sc->nr_to_scan, pool->epp_free_pages); @@ -361,12 +361,12 @@ static unsigned long enc_pools_shrink_scan(struct shrinker *s, * could be called frequently for query (@nr_to_scan == 0). * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool. */ -static int enc_pools_shrink(struct shrinker *shrinker, +static int pools_shrink(struct shrinker *shrinker, struct shrink_control *sc) { - enc_pools_shrink_scan(shrinker, sc); + pools_shrink_scan(shrinker, sc); - return enc_pools_shrink_count(shrinker, sc); + return pools_shrink_count(shrinker, sc); } #endif /* HAVE_SHRINKER_COUNT */ @@ -379,7 +379,7 @@ int npages_to_npools(unsigned long npages) /* * return how many pages cleaned up. */ -static unsigned long enc_pools_cleanup(void ***pools, int npools, int pool_idx) +static unsigned long pools_cleanup(void ***pools, int npools, int pool_idx) { unsigned long cleaned = 0; int i, j; @@ -412,13 +412,13 @@ static unsigned long enc_pools_cleanup(void ***pools, int npools, int pool_idx) * we have options to avoid most memory copy with some tricks. but we choose * the simplest way to avoid complexity. It's not frequently called. */ -static void enc_pools_insert(void ***pools, int npools, int npages, +static void pools_insert(void ***pools, int npools, int npages, unsigned int pool_idx) { int freeslot; int op_idx, np_idx, og_idx, ng_idx; int cur_npools, end_npools; - struct ptlrpc_enc_page_pool *page_pool = page_pools[pool_idx]; + struct ptlrpc_page_pool *page_pool = page_pools[pool_idx]; LASSERT(npages > 0); LASSERT(page_pool->epp_total_pages+npages <= page_pool->epp_max_pages); @@ -514,7 +514,7 @@ static int pool_add_pages(int npages, int pool_index) void ***pools; int npools, alloced = 0; int i, j, rc = -ENOMEM; - struct ptlrpc_enc_page_pool *page_pool = page_pools[pool_index]; + struct ptlrpc_page_pool *page_pool = page_pools[pool_index]; if (pool_index == PAGES_POOL) { if (npages < POOL_INIT_SIZE >> PAGE_SHIFT) @@ -558,26 +558,26 @@ static int pool_add_pages(int npages, int pool_index) } LASSERT(alloced == npages); - enc_pools_insert(pools, npools, npages, pool_index); + pools_insert(pools, npools, npages, pool_index); CDEBUG(D_SEC, "added %d pages into pools\n", npages); OBD_FREE_PTR_ARRAY(pools, npools); rc = 0; out_pools: if (rc) { - enc_pools_cleanup(pools, npools, pool_index); + pools_cleanup(pools, npools, pool_index); } out: if (rc) { page_pool->epp_st_grow_fails++; - CERROR("Failed to allocate %d enc pages\n", npages); + CERROR("Failed to allocate %d pages\n", npages); } mutex_unlock(&page_pool->add_pages_mutex); return rc; } -static inline void enc_pools_wakeup(unsigned int pool) +static inline void pools_wakeup(unsigned int pool) { assert_spin_locked(&page_pools[pool]->epp_lock); @@ -586,8 +586,8 @@ static inline void enc_pools_wakeup(unsigned int pool) wake_up_all(&page_pools[pool]->epp_waitq); } -static int enc_pools_should_grow(int page_needed, time64_t now, - unsigned int pool_index) +static int pools_should_grow(int page_needed, time64_t now, + unsigned int pool_index) { /* * don't grow if someone else is growing the pools right now, @@ -620,14 +620,14 @@ static int enc_pools_should_grow(int page_needed, time64_t now, /* * Export the number of free pages in the pool */ -int sptlrpc_enc_pool_get_free_pages(unsigned int pool) +int sptlrpc_pool_get_free_pages(unsigned int pool) { return page_pools[pool]->epp_free_pages; } -EXPORT_SYMBOL(sptlrpc_enc_pool_get_free_pages); +EXPORT_SYMBOL(sptlrpc_pool_get_free_pages); /* - * Let outside world know if enc_pool full capacity is reached + * Let outside world know if pool full capacity is reached */ int __pool_is_at_full_capacity(unsigned int pool) { @@ -636,7 +636,7 @@ int __pool_is_at_full_capacity(unsigned int pool) } /* - * Let outside world know if enc_pool full capacity is reached + * Let outside world know if pool full capacity is reached */ int pool_is_at_full_capacity(void) { @@ -666,11 +666,11 @@ static inline void **page_from_bufarray(void *array, int index) /* * we allocate the requested pages atomically. */ -static inline int __sptlrpc_enc_pool_get_pages(void *array, unsigned int count, - unsigned int pool_idx, - void **(*page_from)(void *, int)) +static inline int __sptlrpc_pool_get_pages(void *array, unsigned int count, + unsigned int pool_idx, + void **(*page_from)(void *, int)) { - struct ptlrpc_enc_page_pool *page_pool = page_pools[pool_idx]; + struct ptlrpc_page_pool *page_pool = page_pools[pool_idx]; wait_queue_entry_t waitlink; unsigned long this_idle = -1; u64 tick_ns = 0; @@ -697,7 +697,7 @@ again: page_pool->epp_st_missings++; page_pool->epp_pages_short += count; - if (enc_pools_should_grow(count, now, pool_idx)) { + if (pools_should_grow(count, now, pool_idx)) { page_pool->epp_growing = 1; spin_unlock(&page_pool->epp_lock); @@ -708,7 +708,7 @@ again: page_pool->epp_growing = 0; - enc_pools_wakeup(pool_idx); + pools_wakeup(pool_idx); } else { if (page_pool->epp_growing) { if (++page_pool->epp_waitqlen > @@ -797,7 +797,7 @@ out_unlock: return rc; } -int sptlrpc_enc_pool_get_desc_pages(struct ptlrpc_bulk_desc *desc) +int sptlrpc_pool_get_desc_pages(struct ptlrpc_bulk_desc *desc) { int rc; @@ -813,7 +813,7 @@ int sptlrpc_enc_pool_get_desc_pages(struct ptlrpc_bulk_desc *desc) if (desc->bd_enc_vec == NULL) return -ENOMEM; - rc = __sptlrpc_enc_pool_get_pages((void *)desc, desc->bd_iov_count, + rc = __sptlrpc_pool_get_pages((void *)desc, desc->bd_iov_count, PAGES_POOL, page_from_bulkdesc); if (rc) { OBD_FREE_LARGE(desc->bd_enc_vec, @@ -823,30 +823,30 @@ int sptlrpc_enc_pool_get_desc_pages(struct ptlrpc_bulk_desc *desc) } return rc; } -EXPORT_SYMBOL(sptlrpc_enc_pool_get_desc_pages); +EXPORT_SYMBOL(sptlrpc_pool_get_desc_pages); -int sptlrpc_enc_pool_get_pages_array(struct page **pa, unsigned int count) +int sptlrpc_pool_get_pages_array(struct page **pa, unsigned int count) { - return __sptlrpc_enc_pool_get_pages((void *)pa, count, PAGES_POOL, + return __sptlrpc_pool_get_pages((void *)pa, count, PAGES_POOL, page_from_pagearray); } -EXPORT_SYMBOL(sptlrpc_enc_pool_get_pages_array); +EXPORT_SYMBOL(sptlrpc_pool_get_pages_array); -int sptlrpc_enc_pool_get_pages(void **pages, unsigned int order) +int sptlrpc_pool_get_pages(void **pages, unsigned int order) { - return __sptlrpc_enc_pool_get_pages((void *)pages, 0, + return __sptlrpc_pool_get_pages((void *)pages, 0, PPOOL_ORDER_TO_INDEX(order), page_from_bufarray); } -EXPORT_SYMBOL(sptlrpc_enc_pool_get_pages); +EXPORT_SYMBOL(sptlrpc_pool_get_pages); -static int __sptlrpc_enc_pool_put_pages(void *array, unsigned int count, +static int __sptlrpc_pool_put_pages(void *array, unsigned int count, unsigned int pool_idx, void **(*page_from)(void *, int)) { int p_idx, g_idx; int i, rc = 0; - struct ptlrpc_enc_page_pool *page_pool; + struct ptlrpc_page_pool *page_pool; LASSERTF(pool_idx < POOLS_COUNT, "count %u, pool %u\n", count, pool_idx); @@ -884,21 +884,21 @@ static int __sptlrpc_enc_pool_put_pages(void *array, unsigned int count, } page_pool->epp_free_pages += count; - enc_pools_wakeup(pool_idx); + pools_wakeup(pool_idx); out_unlock: spin_unlock(&page_pool->epp_lock); return rc; } -void sptlrpc_enc_pool_put_desc_pages(struct ptlrpc_bulk_desc *desc) +void sptlrpc_pool_put_desc_pages(struct ptlrpc_bulk_desc *desc) { int rc; if (desc->bd_enc_vec == NULL) return; - rc = __sptlrpc_enc_pool_put_pages((void *)desc, desc->bd_iov_count, + rc = __sptlrpc_pool_put_pages((void *)desc, desc->bd_iov_count, PAGES_POOL, page_from_bulkdesc); if (rc) CDEBUG(D_SEC, "error putting pages in enc pool: %d\n", rc); @@ -908,29 +908,29 @@ void sptlrpc_enc_pool_put_desc_pages(struct ptlrpc_bulk_desc *desc) desc->bd_enc_vec = NULL; } -void sptlrpc_enc_pool_put_pages_array(struct page **pa, unsigned int count) +void sptlrpc_pool_put_pages_array(struct page **pa, unsigned int count) { int rc; - rc = __sptlrpc_enc_pool_put_pages((void *)pa, count, PAGES_POOL, + rc = __sptlrpc_pool_put_pages((void *)pa, count, PAGES_POOL, page_from_pagearray); if (rc) CDEBUG(D_SEC, "error putting pages in enc pool: %d\n", rc); } -EXPORT_SYMBOL(sptlrpc_enc_pool_put_pages_array); +EXPORT_SYMBOL(sptlrpc_pool_put_pages_array); -void sptlrpc_enc_pool_put_pages(void *buf, unsigned int order) +void sptlrpc_pool_put_pages(void *buf, unsigned int order) { int rc; - rc = __sptlrpc_enc_pool_put_pages(buf, 1, + rc = __sptlrpc_pool_put_pages(buf, 1, PPOOL_ORDER_TO_INDEX(order), page_from_bufarray); if (rc) CDEBUG(D_SEC, "error putting pages in enc pool: %d\n", rc); } -EXPORT_SYMBOL(sptlrpc_enc_pool_put_pages); +EXPORT_SYMBOL(sptlrpc_pool_put_pages); /* @@ -938,7 +938,7 @@ EXPORT_SYMBOL(sptlrpc_enc_pool_put_pages); * initial pages in add_user() if current pools are empty, rest would be * handled by the pools's self-adaption. */ -int sptlrpc_enc_pool_add_user(void) +int sptlrpc_pool_add_user(void) { int need_grow = 0; @@ -956,20 +956,20 @@ int sptlrpc_enc_pool_add_user(void) spin_lock(&page_pools[PAGES_POOL]->epp_lock); page_pools[PAGES_POOL]->epp_growing = 0; - enc_pools_wakeup(PAGES_POOL); + pools_wakeup(PAGES_POOL); spin_unlock(&page_pools[PAGES_POOL]->epp_lock); } return 0; } -EXPORT_SYMBOL(sptlrpc_enc_pool_add_user); +EXPORT_SYMBOL(sptlrpc_pool_add_user); -int sptlrpc_enc_pool_del_user(void) +int sptlrpc_pool_del_user(void) { return 0; } -EXPORT_SYMBOL(sptlrpc_enc_pool_del_user); +EXPORT_SYMBOL(sptlrpc_pool_del_user); -static inline void enc_pools_alloc(struct ptlrpc_enc_page_pool *pool) +static inline void pools_alloc(struct ptlrpc_page_pool *pool) { LASSERT(pool->epp_max_pools); OBD_ALLOC_LARGE(pool->epp_pools, @@ -977,7 +977,7 @@ static inline void enc_pools_alloc(struct ptlrpc_enc_page_pool *pool) sizeof(*pool->epp_pools)); } -static inline void enc_pools_free(unsigned int i) +static inline void pools_free(unsigned int i) { LASSERT(page_pools[i]->epp_max_pools); LASSERT(page_pools[i]->epp_pools); @@ -987,11 +987,11 @@ static inline void enc_pools_free(unsigned int i) sizeof(*page_pools[i]->epp_pools)); } -int sptlrpc_enc_pool_init(void) +int sptlrpc_pool_init(void) { int pool_index = 0, to_revert; int rc = 0; - struct ptlrpc_enc_page_pool *pool; + struct ptlrpc_page_pool *pool; ENTRY; OBD_ALLOC(page_pools, POOLS_COUNT * sizeof(*page_pools)); @@ -1020,15 +1020,15 @@ int sptlrpc_enc_pool_init(void) spin_lock_init(&pool->epp_lock); pool->epp_st_max_wait = ktime_set(0, 0); - enc_pools_alloc(pool); + pools_alloc(pool); CDEBUG(D_SEC, "Allocated pool %i\n", pool_index); if (pool->epp_pools == NULL) GOTO(fail, rc = -ENOMEM); #ifdef HAVE_SHRINKER_COUNT - pool->pool_shrinker.count_objects = enc_pools_shrink_count; - pool->pool_shrinker.scan_objects = enc_pools_shrink_scan; + pool->pool_shrinker.count_objects = pools_shrink_count; + pool->pool_shrinker.scan_objects = pools_shrink_scan; #else - pool->pool_shrinker.shrink = enc_pools_shrink; + pool->pool_shrinker.shrink = pools_shrink; #endif pool->pool_shrinker.seeks = INDEX_TO_SEEKS(pool_index); /* Pass pool number as part of pools_shrinker_seeks value */ @@ -1046,7 +1046,7 @@ fail: if (pool) { unregister_shrinker(&pool->pool_shrinker); if (pool->epp_pools) - enc_pools_free(pool_index); + pools_free(pool_index); OBD_FREE(pool, sizeof(**page_pools)); } } @@ -1055,11 +1055,11 @@ fail: RETURN(rc); } -void sptlrpc_enc_pool_fini(void) +void sptlrpc_pool_fini(void) { unsigned long cleaned, npools; int pool_index; - struct ptlrpc_enc_page_pool *pool; + struct ptlrpc_page_pool *pool; for (pool_index = 0; pool_index < POOLS_COUNT; pool_index++) { pool = page_pools[pool_index]; @@ -1068,11 +1068,11 @@ void sptlrpc_enc_pool_fini(void) LASSERT(pool->epp_total_pages == pool->epp_free_pages); npools = npages_to_npools(pool->epp_total_pages); - cleaned = enc_pools_cleanup(pool->epp_pools, + cleaned = pools_cleanup(pool->epp_pools, npools, pool_index); LASSERT(cleaned == pool->epp_total_pages); - enc_pools_free(pool_index); + pools_free(pool_index); if (pool->epp_st_access > 0) { CDEBUG(D_SEC,