summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
05b9da4)
When doing readahead, we see an amazing amount of time
(~5-8%) just looking up the page index from the lov layer.
In particular, this is more than half the time spent
submitting pages:
- 14.14% cl_io_submit_rw
- 13.40% lov_io_submit
- 8.24% lov_page_index
This requires several indirections, all of which can be
avoided by moving this up to the cl_page struct.
Signed-off-by: Patrick Farrell <pfarrell@whamcloud.com>
Change-Id: I99bd7eb4d6556ac89c1aa9aeb4b7afc99774b212
Reviewed-on: https://review.whamcloud.com/35470
Reviewed-by: Wang Shilong <wshilong@ddn.com>
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Li Xi <lixi@ddn.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
struct lu_ref_link cp_queue_ref;
/** Assigned if doing a sync_io */
struct cl_sync_io *cp_sync_io;
struct lu_ref_link cp_queue_ref;
/** Assigned if doing a sync_io */
struct cl_sync_io *cp_sync_io;
+ /** layout_entry + stripe index, composed using lov_comp_index() */
+ unsigned int cp_lov_index;
struct lov_page {
struct cl_page_slice lps_cl;
struct lov_page {
struct cl_page_slice lps_cl;
- /** layout_entry + stripe index, composed using lov_comp_index() */
- unsigned int lps_index;
/* the layout gen when this page was created */
__u32 lps_layout_gen;
};
/* the layout gen when this page was created */
__u32 lps_layout_gen;
};
* Lov io operations.
*
*/
* Lov io operations.
*
*/
-
-int lov_page_index(const struct cl_page *page)
-{
- const struct cl_page_slice *slice;
- ENTRY;
-
- slice = cl_page_at(page, &lov_device_type);
- LASSERT(slice != NULL);
- LASSERT(slice->cpl_obj != NULL);
-
- RETURN(cl2lov_page(slice)->lps_index);
-}
-
static int lov_io_subio_init(const struct lu_env *env, struct lov_io *lio,
struct cl_io *io)
{
static int lov_io_subio_init(const struct lu_env *env, struct lov_io *lio,
struct cl_io *io)
{
cl_2queue_init(cl2q);
cl_page_list_move(&cl2q->c2_qin, qin, page);
cl_2queue_init(cl2q);
cl_page_list_move(&cl2q->c2_qin, qin, page);
- index = lov_page_index(page);
+ index = page->cp_lov_index;
cl_page_list_for_each_safe(page, tmp, qin) {
/* this page is not on this stripe */
cl_page_list_for_each_safe(page, tmp, qin) {
/* this page is not on this stripe */
- if (index != lov_page_index(page))
+ if (index != page->cp_lov_index)
continue;
cl_page_list_move(&cl2q->c2_qin, qin, page);
continue;
cl_page_list_move(&cl2q->c2_qin, qin, page);
cl_page_list_move(plist, queue, page);
cl_page_list_move(plist, queue, page);
- index = lov_page_index(page);
+ index = page->cp_lov_index;
while (queue->pl_nr > 0) {
page = cl_page_list_first(queue);
while (queue->pl_nr > 0) {
page = cl_page_list_first(queue);
- if (index != lov_page_index(page))
+ if (index != page->cp_lov_index)
break;
cl_page_list_move(plist, queue, page);
break;
cl_page_list_move(plist, queue, page);
fio = &ios->cis_io->u.ci_fault;
lio = cl2lov_io(env, ios);
fio = &ios->cis_io->u.ci_fault;
lio = cl2lov_io(env, ios);
- sub = lov_sub_get(env, lio, lov_page_index(fio->ft_page));
+ sub = lov_sub_get(env, lio, fio->ft_page->cp_lov_index);
sub->sub_io.u.ci_fault.ft_nob = fio->ft_nob;
RETURN(lov_io_start(env, ios));
sub->sub_io.u.ci_fault.ft_nob = fio->ft_nob;
RETURN(lov_io_start(env, ios));
struct lov_page *lp = cl2lov_page(slice);
return (*printer)(env, cookie,
struct lov_page *lp = cl2lov_page(slice);
return (*printer)(env, cookie,
- LUSTRE_LOV_NAME"-page@%p, comp index: %x, gen: %u\n",
- lp, lp->lps_index, lp->lps_layout_gen);
+ LUSTRE_LOV_NAME"-page@%p, gen: %u\n",
+ lp, lp->lps_layout_gen);
}
static const struct cl_page_operations lov_comp_page_ops = {
}
static const struct cl_page_operations lov_comp_page_ops = {
rc = lov_stripe_offset(loo->lo_lsm, entry, offset, stripe, &suboff);
LASSERT(rc == 0);
rc = lov_stripe_offset(loo->lo_lsm, entry, offset, stripe, &suboff);
LASSERT(rc == 0);
- lpg->lps_index = lov_comp_index(entry, stripe);
+ page->cp_lov_index = lov_comp_index(entry, stripe);
lpg->lps_layout_gen = loo->lo_lsm->lsm_layout_gen;
cl_page_slice_add(page, &lpg->lps_cl, obj, index, &lov_comp_page_ops);
lpg->lps_layout_gen = loo->lo_lsm->lsm_layout_gen;
cl_page_slice_add(page, &lpg->lps_cl, obj, index, &lov_comp_page_ops);
- sub = lov_sub_get(env, lio, lpg->lps_index);
+ sub = lov_sub_get(env, lio, page->cp_lov_index);
if (IS_ERR(sub))
RETURN(PTR_ERR(sub));
if (IS_ERR(sub))
RETURN(PTR_ERR(sub));
+ page->cp_lov_index = ~0;
cl_page_slice_add(page, &lpg->lps_cl, obj, index, &lov_empty_page_ops);
addr = kmap(page->cp_vmpage);
memset(addr, 0, cl_page_size(obj));
cl_page_slice_add(page, &lpg->lps_cl, obj, index, &lov_empty_page_ops);
addr = kmap(page->cp_vmpage);
memset(addr, 0, cl_page_size(obj));