From 5fb29cd1e77ca7bee34791138b49f5dd50d5116b Mon Sep 17 00:00:00 2001 From: Wang Shilong Date: Sat, 8 Feb 2020 10:19:07 +0800 Subject: [PATCH] LU-13134 obdclass: re-declare cl_page variables to reduce its size With following changes: 1) make CPS_CACHED declare start from 1 consistent with CPT_CACHED 2) add CPT_NR to indicate max allowed CPT state value. 3) Reserve 4 bits for @cp_state which allow 15 kind of states 4) Reserve 2 bits for @cp_type which allow 3 kinds of cl_page types 5) use short int for @cp_kmem_index and We still have another 16 bits reserved for future extension. 6)move @cp_lov_index after @cp_ref to fill 4 bytes hole. After this patch, cl_page size could reduce from 336 bytes to 320 bytes Change-Id: I92d5652a42850890ac6ce61e54884450dda25cc7 Signed-off-by: Wang Shilong Reviewed-on: https://review.whamcloud.com/37480 Reviewed-by: Andreas Dilger Reviewed-by: James Simmons Tested-by: jenkins Tested-by: Maloo Reviewed-by: Oleg Drokin --- lustre/include/cl_object.h | 48 +++++++------ lustre/obdclass/cl_page.c | 171 ++++++++++++++++++++++----------------------- 2 files changed, 112 insertions(+), 107 deletions(-) diff --git a/lustre/include/cl_object.h b/lustre/include/cl_object.h index 668158b..b65e140 100644 --- a/lustre/include/cl_object.h +++ b/lustre/include/cl_object.h @@ -623,7 +623,7 @@ enum cl_page_state { * * \invariant cl_page::cp_owner == NULL && cl_page::cp_req == NULL */ - CPS_CACHED, + CPS_CACHED = 1, /** * Page is exclusively owned by some cl_io. Page may end up in this * state as a result of @@ -715,8 +715,13 @@ enum cl_page_type { * to vmpage which is not belonging to the same object of cl_page. * it is used in DirectIO and lockless IO. */ CPT_TRANSIENT, + CPT_NR }; +#define CP_STATE_BITS 4 +#define CP_TYPE_BITS 2 +#define CP_MAX_LAYER 3 + /** * Fields are protected by the lock on struct page, except for atomics and * immutables. @@ -728,46 +733,47 @@ enum cl_page_type { */ struct cl_page { /** Reference counter. */ - atomic_t cp_ref; - /* which slab kmem index this memory allocated from */ - int cp_kmem_index; + atomic_t cp_ref; + /** layout_entry + stripe index, composed using lov_comp_index() */ + unsigned int cp_lov_index; + pgoff_t cp_osc_index; /** An object this page is a part of. Immutable after creation. */ struct cl_object *cp_obj; /** vmpage */ struct page *cp_vmpage; /** Linkage of pages within group. Pages must be owned */ - struct list_head cp_batch; + struct list_head cp_batch; /** array of slices offset. Immutable after creation. */ - unsigned char cp_layer_offset[3]; + unsigned char cp_layer_offset[CP_MAX_LAYER]; /* 24 bits */ /** current slice index */ - unsigned char cp_layer_count:2; + unsigned char cp_layer_count:2; /* 26 bits */ /** * Page state. This field is const to avoid accidental update, it is * modified only internally within cl_page.c. Protected by a VM lock. */ - const enum cl_page_state cp_state; + enum cl_page_state cp_state:CP_STATE_BITS; /* 30 bits */ /** * Page type. Only CPT_TRANSIENT is used so far. Immutable after * creation. */ - enum cl_page_type cp_type; + enum cl_page_type cp_type:CP_TYPE_BITS; /* 32 bits */ + /* which slab kmem index this memory allocated from */ + short int cp_kmem_index; /* 48 bits */ + unsigned int cp_unused1:16; /* 64 bits */ - /** - * Owning IO in cl_page_state::CPS_OWNED state. Sub-page can be owned - * by sub-io. Protected by a VM lock. - */ + /** + * Owning IO in cl_page_state::CPS_OWNED state. Sub-page can be owned + * by sub-io. Protected by a VM lock. + */ struct cl_io *cp_owner; - /** List of references to this page, for debugging. */ - struct lu_ref cp_reference; + /** List of references to this page, for debugging. */ + struct lu_ref cp_reference; /** Link to an object, for debugging. */ - struct lu_ref_link cp_obj_ref; + struct lu_ref_link cp_obj_ref; /** Link to a queue, for debugging. */ - struct lu_ref_link cp_queue_ref; + struct lu_ref_link cp_queue_ref; /** Assigned if doing a sync_io */ - struct cl_sync_io *cp_sync_io; - /** layout_entry + stripe index, composed using lov_comp_index() */ - unsigned int cp_lov_index; - pgoff_t cp_osc_index; + struct cl_sync_io *cp_sync_io; }; /** diff --git a/lustre/obdclass/cl_page.c b/lustre/obdclass/cl_page.c index c86b97d..9723b38 100644 --- a/lustre/obdclass/cl_page.c +++ b/lustre/obdclass/cl_page.c @@ -210,17 +210,6 @@ static void cl_page_free(const struct lu_env *env, struct cl_page *cl_page, EXIT; } -/** - * Helper function updating page state. This is the only place in the code - * where cl_page::cp_state field is mutated. - */ -static inline void cl_page_state_set_trust(struct cl_page *page, - enum cl_page_state state) -{ - /* bypass const. */ - *(enum cl_page_state *)&page->cp_state = state; -} - static struct cl_page *__cl_page_alloc(struct cl_object *o) { int i = 0; @@ -274,38 +263,45 @@ check: return cl_page; } -struct cl_page *cl_page_alloc(const struct lu_env *env, - struct cl_object *o, pgoff_t ind, struct page *vmpage, - enum cl_page_type type) +struct cl_page *cl_page_alloc(const struct lu_env *env, struct cl_object *o, + pgoff_t ind, struct page *vmpage, + enum cl_page_type type) { - struct cl_page *page; + struct cl_page *cl_page; struct lu_object_header *head; ENTRY; - page = __cl_page_alloc(o); - if (page != NULL) { + cl_page = __cl_page_alloc(o); + if (cl_page != NULL) { int result = 0; - atomic_set(&page->cp_ref, 1); - page->cp_obj = o; + + /* + * Please fix cl_page:cp_state/type declaration if + * these assertions fail in the future. + */ + BUILD_BUG_ON((1 << CP_STATE_BITS) < CPS_NR); /* cp_state */ + BUILD_BUG_ON((1 << CP_TYPE_BITS) < CPT_NR); /* cp_type */ + atomic_set(&cl_page->cp_ref, 1); + cl_page->cp_obj = o; cl_object_get(o); - lu_object_ref_add_at(&o->co_lu, &page->cp_obj_ref, "cl_page", - page); - page->cp_vmpage = vmpage; - cl_page_state_set_trust(page, CPS_CACHED); - page->cp_type = type; - INIT_LIST_HEAD(&page->cp_batch); - lu_ref_init(&page->cp_reference); + lu_object_ref_add_at(&o->co_lu, &cl_page->cp_obj_ref, + "cl_page", cl_page); + cl_page->cp_vmpage = vmpage; + cl_page->cp_state = CPS_CACHED; + cl_page->cp_type = type; + INIT_LIST_HEAD(&cl_page->cp_batch); + lu_ref_init(&cl_page->cp_reference); head = o->co_lu.lo_header; list_for_each_entry(o, &head->loh_layers, co_lu.lo_linkage) { if (o->co_ops->coo_page_init != NULL) { - result = o->co_ops->coo_page_init(env, o, page, - ind); + result = o->co_ops->coo_page_init(env, o, + cl_page, ind); if (result != 0) { - cl_page_delete0(env, page); - cl_page_free(env, page, NULL); - page = ERR_PTR(result); + cl_page_delete0(env, cl_page); + cl_page_free(env, cl_page, NULL); + cl_page = ERR_PTR(result); break; } } @@ -316,9 +312,9 @@ struct cl_page *cl_page_alloc(const struct lu_env *env, cs_pagestate_dec(o, CPS_CACHED); } } else { - page = ERR_PTR(-ENOMEM); + cl_page = ERR_PTR(-ENOMEM); } - RETURN(page); + RETURN(cl_page); } /** @@ -383,62 +379,64 @@ static inline int cl_page_invariant(const struct cl_page *pg) } static void cl_page_state_set0(const struct lu_env *env, - struct cl_page *page, enum cl_page_state state) + struct cl_page *cl_page, + enum cl_page_state state) { - enum cl_page_state old; + enum cl_page_state old; - /* - * Matrix of allowed state transitions [old][new], for sanity - * checking. - */ - static const int allowed_transitions[CPS_NR][CPS_NR] = { - [CPS_CACHED] = { - [CPS_CACHED] = 0, - [CPS_OWNED] = 1, /* io finds existing cached page */ - [CPS_PAGEIN] = 0, - [CPS_PAGEOUT] = 1, /* write-out from the cache */ - [CPS_FREEING] = 1, /* eviction on the memory pressure */ - }, - [CPS_OWNED] = { - [CPS_CACHED] = 1, /* release to the cache */ - [CPS_OWNED] = 0, - [CPS_PAGEIN] = 1, /* start read immediately */ - [CPS_PAGEOUT] = 1, /* start write immediately */ - [CPS_FREEING] = 1, /* lock invalidation or truncate */ - }, - [CPS_PAGEIN] = { - [CPS_CACHED] = 1, /* io completion */ - [CPS_OWNED] = 0, - [CPS_PAGEIN] = 0, - [CPS_PAGEOUT] = 0, - [CPS_FREEING] = 0, - }, - [CPS_PAGEOUT] = { - [CPS_CACHED] = 1, /* io completion */ - [CPS_OWNED] = 0, - [CPS_PAGEIN] = 0, - [CPS_PAGEOUT] = 0, - [CPS_FREEING] = 0, - }, - [CPS_FREEING] = { - [CPS_CACHED] = 0, - [CPS_OWNED] = 0, - [CPS_PAGEIN] = 0, - [CPS_PAGEOUT] = 0, - [CPS_FREEING] = 0, - } - }; + /* + * Matrix of allowed state transitions [old][new], for sanity + * checking. + */ + static const int allowed_transitions[CPS_NR][CPS_NR] = { + [CPS_CACHED] = { + [CPS_CACHED] = 0, + [CPS_OWNED] = 1, /* io finds existing cached page */ + [CPS_PAGEIN] = 0, + [CPS_PAGEOUT] = 1, /* write-out from the cache */ + [CPS_FREEING] = 1, /* eviction on the memory pressure */ + }, + [CPS_OWNED] = { + [CPS_CACHED] = 1, /* release to the cache */ + [CPS_OWNED] = 0, + [CPS_PAGEIN] = 1, /* start read immediately */ + [CPS_PAGEOUT] = 1, /* start write immediately */ + [CPS_FREEING] = 1, /* lock invalidation or truncate */ + }, + [CPS_PAGEIN] = { + [CPS_CACHED] = 1, /* io completion */ + [CPS_OWNED] = 0, + [CPS_PAGEIN] = 0, + [CPS_PAGEOUT] = 0, + [CPS_FREEING] = 0, + }, + [CPS_PAGEOUT] = { + [CPS_CACHED] = 1, /* io completion */ + [CPS_OWNED] = 0, + [CPS_PAGEIN] = 0, + [CPS_PAGEOUT] = 0, + [CPS_FREEING] = 0, + }, + [CPS_FREEING] = { + [CPS_CACHED] = 0, + [CPS_OWNED] = 0, + [CPS_PAGEIN] = 0, + [CPS_PAGEOUT] = 0, + [CPS_FREEING] = 0, + } + }; - ENTRY; - old = page->cp_state; - PASSERT(env, page, allowed_transitions[old][state]); - CL_PAGE_HEADER(D_TRACE, env, page, "%d -> %d\n", old, state); - PASSERT(env, page, page->cp_state == old); - PASSERT(env, page, equi(state == CPS_OWNED, page->cp_owner != NULL)); - - cs_pagestate_dec(page->cp_obj, page->cp_state); - cs_pagestate_inc(page->cp_obj, state); - cl_page_state_set_trust(page, state); + ENTRY; + old = cl_page->cp_state; + PASSERT(env, cl_page, allowed_transitions[old][state]); + CL_PAGE_HEADER(D_TRACE, env, cl_page, "%d -> %d\n", old, state); + PASSERT(env, cl_page, cl_page->cp_state == old); + PASSERT(env, cl_page, equi(state == CPS_OWNED, + cl_page->cp_owner != NULL)); + + cs_pagestate_dec(cl_page->cp_obj, cl_page->cp_state); + cs_pagestate_inc(cl_page->cp_obj, state); + cl_page->cp_state = state; EXIT; } @@ -1217,6 +1215,7 @@ void cl_page_slice_add(struct cl_page *cl_page, struct cl_page_slice *slice, ((char *)cl_page + sizeof(*cl_page)); ENTRY; + LASSERT(cl_page->cp_layer_count < CP_MAX_LAYER); LASSERT(offset < (1 << sizeof(cl_page->cp_layer_offset[0]) * 8)); cl_page->cp_layer_offset[cl_page->cp_layer_count++] = offset; slice->cpl_obj = obj; -- 1.8.3.1