The cpl_index field is not necessary for the vvp layer,
since that's the same as the vm page index, and it has
already been duplicated for the lov layer.
So, moving the osc usage of this to the top, we can save
24 bytes per page (in the slices) by removing this from
the cl_page_slice, at the cost of 8 bytes in the cl_page.
So a net savings of 16 bytes.
After this patch, cl_page could be reduced from 408 to
392 bytes.
Signed-off-by: Patrick Farrell <farr0186@gmail.com>
Change-Id: Ib4fd75f112afb816443799314cd2ef75ff4f4c84
Reviewed-on: https://review.whamcloud.com/37417
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
Reviewed-by: Wang Shilong <wshilong@ddn.com>
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
struct cl_sync_io *cp_sync_io;
/** layout_entry + stripe index, composed using lov_comp_index() */
unsigned int cp_lov_index;
struct cl_sync_io *cp_sync_io;
/** layout_entry + stripe index, composed using lov_comp_index() */
unsigned int cp_lov_index;
*/
struct cl_page_slice {
struct cl_page *cpl_page;
*/
struct cl_page_slice {
struct cl_page *cpl_page;
/**
* Object slice corresponding to this page slice. Immutable after
* creation.
/**
* Object slice corresponding to this page slice. Immutable after
* creation.
}
void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
}
void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
- struct cl_object *obj, pgoff_t index,
const struct cl_page_operations *ops);
void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
struct cl_object *obj,
const struct cl_page_operations *ops);
void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
struct cl_object *obj,
static inline pgoff_t osc_index(struct osc_page *opg)
{
static inline pgoff_t osc_index(struct osc_page *opg)
{
- return opg->ops_cl.cpl_index;
+ return opg->ops_cl.cpl_page->cp_osc_index;
}
static inline struct cl_page *oap2cl_page(struct osc_async_page *oap)
}
static inline struct cl_page *oap2cl_page(struct osc_async_page *oap)
static inline pgoff_t vvp_index(struct vvp_page *vpg)
{
static inline pgoff_t vvp_index(struct vvp_page *vpg)
{
- return vpg->vpg_cl.cpl_index;
+ return vpg->vpg_page->index;
atomic_inc(&page->cp_ref);
SetPagePrivate(vmpage);
vmpage->private = (unsigned long)page;
atomic_inc(&page->cp_ref);
SetPagePrivate(vmpage);
vmpage->private = (unsigned long)page;
- cl_page_slice_add(page, &vpg->vpg_cl, obj, index,
+ cl_page_slice_add(page, &vpg->vpg_cl, obj,
&vvp_page_ops);
} else {
struct vvp_object *clobj = cl2vvp(obj);
&vvp_page_ops);
} else {
struct vvp_object *clobj = cl2vvp(obj);
- cl_page_slice_add(page, &vpg->vpg_cl, obj, index,
+ cl_page_slice_add(page, &vpg->vpg_cl, obj,
&vvp_transient_page_ops);
atomic_inc(&clobj->vob_transient_pages);
}
&vvp_transient_page_ops);
atomic_inc(&clobj->vob_transient_pages);
}
page->cp_lov_index = lov_comp_index(entry, stripe);
lpg->lps_layout_gen = loo->lo_lsm->lsm_layout_gen;
page->cp_lov_index = lov_comp_index(entry, stripe);
lpg->lps_layout_gen = loo->lo_lsm->lsm_layout_gen;
- cl_page_slice_add(page, &lpg->lps_cl, obj, index, &lov_comp_page_ops);
+ cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_comp_page_ops);
sub = lov_sub_get(env, lio, page->cp_lov_index);
if (IS_ERR(sub))
sub = lov_sub_get(env, lio, page->cp_lov_index);
if (IS_ERR(sub))
ENTRY;
page->cp_lov_index = ~0;
ENTRY;
page->cp_lov_index = ~0;
- cl_page_slice_add(page, &lpg->lps_cl, obj, index, &lov_empty_page_ops);
+ cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_empty_page_ops);
addr = kmap(page->cp_vmpage);
memset(addr, 0, cl_page_size(obj));
kunmap(page->cp_vmpage);
addr = kmap(page->cp_vmpage);
memset(addr, 0, cl_page_size(obj));
kunmap(page->cp_vmpage);
* \see cl_lock_slice_add(), cl_req_slice_add(), cl_io_slice_add()
*/
void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
* \see cl_lock_slice_add(), cl_req_slice_add(), cl_io_slice_add()
*/
void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
- struct cl_object *obj, pgoff_t index,
const struct cl_page_operations *ops)
{
ENTRY;
list_add_tail(&slice->cpl_linkage, &page->cp_layers);
slice->cpl_obj = obj;
const struct cl_page_operations *ops)
{
ENTRY;
list_add_tail(&slice->cpl_linkage, &page->cp_layers);
slice->cpl_obj = obj;
- slice->cpl_index = index;
slice->cpl_ops = ops;
slice->cpl_page = page;
EXIT;
slice->cpl_ops = ops;
slice->cpl_page = page;
EXIT;
* wait_on_bit() interface to wait for the bit to be clear.
*/
ep->ep_lock = 0;
* wait_on_bit() interface to wait for the bit to be clear.
*/
ep->ep_lock = 0;
- cl_page_slice_add(page, &ep->ep_cl, obj, index, &echo_page_ops);
+ cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops);
atomic_inc(&eco->eo_npages);
RETURN(0);
}
atomic_inc(&eco->eo_npages);
RETURN(0);
}
return result;
opg->ops_srvlock = osc_io_srvlock(oio);
return result;
opg->ops_srvlock = osc_io_srvlock(oio);
- cl_page_slice_add(page, &opg->ops_cl, obj, index,
- &osc_page_ops);
+ cl_page_slice_add(page, &opg->ops_cl, obj, &osc_page_ops);
+ page->cp_osc_index = index;
/* reserve an LRU space for this page */
/* reserve an LRU space for this page */