From 9821754235e2417868dbaeb987b33e22c517346f Mon Sep 17 00:00:00 2001 From: Wang Shilong Date: Sat, 8 Feb 2020 19:41:46 +0800 Subject: [PATCH] LU-13134 osc: re-declare ops_from/to to shrink osc_page @ops_from and @ops_to is within PAGE_SIZE, use PAGE_SHIFT bits to limit it is fine, on x86_64 platform, this patch will reduce another 8 bytes. Notice, previous @ops_to is exclusive which could be PAGE_SIZE, this patch change it to inclusive which means max value will be PAGE_SIZE - 1, and be careful to calculate its length. After this patch, cl_page size could reduce from 320 to 312 bytes, and we are able to allocate 13 objects using slab pool for 4K page. Change-Id: Ic260c0a6580292301b5397276042e399c0f07e11 Signed-off-by: Wang Shilong Reviewed-on: https://review.whamcloud.com/37487 Reviewed-by: Andreas Dilger Reviewed-by: Neil Brown Tested-by: jenkins Tested-by: Maloo Reviewed-by: Oleg Drokin --- lustre/include/lustre_osc.h | 8 ++++---- lustre/osc/osc_cache.c | 5 +++-- lustre/osc/osc_page.c | 24 ++++++++++++------------ 3 files changed, 19 insertions(+), 18 deletions(-) diff --git a/lustre/include/lustre_osc.h b/lustre/include/lustre_osc.h index ae7b06e..4072fbd 100644 --- a/lustre/include/lustre_osc.h +++ b/lustre/include/lustre_osc.h @@ -496,17 +496,17 @@ struct osc_page { * An offset within page from which next transfer starts. This is used * by cl_page_clip() to submit partial page transfers. */ - int ops_from; + unsigned int ops_from:PAGE_SHIFT, /** - * An offset within page at which next transfer ends. + * An offset within page at which next transfer ends(inclusive). * * \see osc_page::ops_from. */ - int ops_to; + ops_to:PAGE_SHIFT, /** * Boolean, true iff page is under transfer. Used for sanity checking. */ - unsigned ops_transfer_pinned:1, + ops_transfer_pinned:1, /** * in LRU? */ diff --git a/lustre/osc/osc_cache.c b/lustre/osc/osc_cache.c index e2b971d6..795040d 100644 --- a/lustre/osc/osc_cache.c +++ b/lustre/osc/osc_cache.c @@ -2323,7 +2323,7 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io, oap->oap_cmd = cmd; oap->oap_page_off = ops->ops_from; - oap->oap_count = ops->ops_to - ops->ops_from; + oap->oap_count = ops->ops_to - ops->ops_from + 1; /* No need to hold a lock here, * since this page is not in any list yet. */ oap->oap_async_flags = 0; @@ -2584,7 +2584,8 @@ int osc_queue_sync_pages(const struct lu_env *env, const struct cl_io *io, ++page_count; mppr <<= (page_count > mppr); - if (unlikely(opg->ops_from > 0 || opg->ops_to < PAGE_SIZE)) + if (unlikely(opg->ops_from > 0 || + opg->ops_to < PAGE_SIZE - 1)) can_merge = false; } diff --git a/lustre/osc/osc_page.c b/lustre/osc/osc_page.c index d981845..ae7d01b 100644 --- a/lustre/osc/osc_page.c +++ b/lustre/osc/osc_page.c @@ -214,7 +214,8 @@ static void osc_page_clip(const struct lu_env *env, struct osc_async_page *oap = &opg->ops_oap; opg->ops_from = from; - opg->ops_to = to; + /* argument @to is exclusive, but @ops_to is inclusive */ + opg->ops_to = to - 1; spin_lock(&oap->oap_lock); oap->oap_async_flags |= ASYNC_COUNT_STABLE; spin_unlock(&oap->oap_lock); @@ -249,30 +250,29 @@ static const struct cl_page_operations osc_page_ops = { }; int osc_page_init(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, pgoff_t index) + struct cl_page *cl_page, pgoff_t index) { struct osc_object *osc = cl2osc(obj); - struct osc_page *opg = cl_object_page_slice(obj, page); + struct osc_page *opg = cl_object_page_slice(obj, cl_page); struct osc_io *oio = osc_env_io(env); int result; opg->ops_from = 0; - opg->ops_to = PAGE_SIZE; + opg->ops_to = PAGE_SIZE - 1; INIT_LIST_HEAD(&opg->ops_lru); - result = osc_prep_async_page(osc, opg, page->cp_vmpage, + result = osc_prep_async_page(osc, opg, cl_page->cp_vmpage, cl_offset(obj, index)); if (result != 0) return result; opg->ops_srvlock = osc_io_srvlock(oio); - cl_page_slice_add(page, &opg->ops_cl, obj, &osc_page_ops); - page->cp_osc_index = index; - + cl_page_slice_add(cl_page, &opg->ops_cl, obj, &osc_page_ops); + cl_page->cp_osc_index = index; /* reserve an LRU space for this page */ - if (page->cp_type == CPT_CACHEABLE) { + if (cl_page->cp_type == CPT_CACHEABLE) { result = osc_lru_alloc(env, osc_cli(osc), opg); if (result == 0) { result = radix_tree_preload(GFP_NOFS); @@ -310,9 +310,9 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg, LASSERT(oap->oap_async_flags & ASYNC_READY); LASSERT(oap->oap_async_flags & ASYNC_COUNT_STABLE); - oap->oap_cmd = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ; - oap->oap_page_off = opg->ops_from; - oap->oap_count = opg->ops_to - opg->ops_from; + oap->oap_cmd = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ; + oap->oap_page_off = opg->ops_from; + oap->oap_count = opg->ops_to - opg->ops_from + 1; oap->oap_brw_flags = OBD_BRW_SYNC | brw_flags; if (oio->oi_cap_sys_resource) { -- 1.8.3.1