X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fobdclass%2Fcl_page.c;h=0a466e9041fc44ca9df1c8d6cdeb43d181805706;hb=58f3eb928f563f24f0b3ee41970fa23a59fdcb87;hp=18c5ad311e2c9d95be0a1582f1d1134d74e41559;hpb=19388c83fd9b430143387ba28276c68697bf369c;p=fs%2Flustre-release.git diff --git a/lustre/obdclass/cl_page.c b/lustre/obdclass/cl_page.c index 18c5ad3..0a466e9 100644 --- a/lustre/obdclass/cl_page.c +++ b/lustre/obdclass/cl_page.c @@ -219,14 +219,12 @@ void cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj, for (i = 0, j = 0; i < nr; ++i) { page = pvec[i]; pvec[i] = NULL; + + LASSERT(page->cp_type == CPT_CACHEABLE); if (page->cp_index > end) break; if (page->cp_state == CPS_FREEING) continue; - if (page->cp_type == CPT_TRANSIENT) { - /* God, we found a transient page!*/ - continue; - } slice = cl_page_at_trusted(page, dtype); /* @@ -357,8 +355,7 @@ static int cl_page_alloc(const struct lu_env *env, struct cl_object *o, err = o->co_ops->coo_page_init(env, o, page, vmpage); if (err != NULL) { - cl_page_state_set_trust(page, - CPS_FREEING); + cl_page_delete0(env, page, 0); cl_page_free(env, page); page = err; break; @@ -398,7 +395,7 @@ static struct cl_page *cl_page_find0(const struct lu_env *env, enum cl_page_type type, struct cl_page *parent) { - struct cl_page *page; + struct cl_page *page = NULL; struct cl_page *ghost = NULL; struct cl_object_header *hdr; struct cl_site *site = cl_object_site(o); @@ -412,7 +409,7 @@ static struct cl_page *cl_page_find0(const struct lu_env *env, hdr = cl_object_header(o); cfs_atomic_inc(&site->cs_pages.cs_lookup); - CDEBUG(D_PAGE, "%lu@"DFID" %p %lu %d\n", + CDEBUG(D_PAGE, "%lu@"DFID" %p %lx %d\n", idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type); /* fast path. */ if (type == CPT_CACHEABLE) { @@ -431,11 +428,8 @@ static struct cl_page *cl_page_find0(const struct lu_env *env, cl_page_vmpage(env, page) == vmpage && (void *)radix_tree_lookup(&hdr->coh_tree, idx) == page)); - } else { - cfs_spin_lock(&hdr->coh_page_guard); - page = cl_page_lookup(hdr, idx); - cfs_spin_unlock(&hdr->coh_page_guard); } + if (page != NULL) { cfs_atomic_inc(&site->cs_pages.cs_hit); RETURN(page); @@ -445,6 +439,16 @@ static struct cl_page *cl_page_find0(const struct lu_env *env, err = cl_page_alloc(env, o, idx, vmpage, type, &page); if (err != 0) RETURN(page); + + if (type == CPT_TRANSIENT) { + if (parent) { + LASSERT(page->cp_parent == NULL); + page->cp_parent = parent; + parent->cp_child = page; + } + RETURN(page); + } + /* * XXX optimization: use radix_tree_preload() here, and change tree * gfp mask to GFP_KERNEL in cl_object_header_init(). @@ -467,27 +471,8 @@ static struct cl_page *cl_page_find0(const struct lu_env *env, * which is very useful during diagnosing and debugging. */ page = ERR_PTR(err); - if (err == -EEXIST) { - /* - * XXX in case of a lookup for CPT_TRANSIENT page, - * nothing protects a CPT_CACHEABLE page from being - * concurrently moved into CPS_FREEING state. - */ - page = cl_page_lookup(hdr, idx); - PASSERT(env, page, page != NULL); - if (page->cp_type == CPT_TRANSIENT && - type == CPT_CACHEABLE) { - /* XXX: We should make sure that inode sem - * keeps being held in the lifetime of - * transient pages, so it is impossible to - * have conflicting transient pages. - */ - cfs_spin_unlock(&hdr->coh_page_guard); - cl_page_put(env, page); - cfs_spin_lock(&hdr->coh_page_guard); - page = ERR_PTR(-EBUSY); - } - } + CL_PAGE_DEBUG(D_ERROR, env, ghost, + "fail to insert into radix tree: %d\n", err); } else { if (parent) { LASSERT(page->cp_parent == NULL); @@ -1159,23 +1144,25 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg, cl_page_export(env, pg, 0); cl_page_state_set0(env, pg, CPS_FREEING); - if (!radix) - /* - * !radix means that @pg is not yet in the radix tree, skip - * removing it. - */ - tmp = pg->cp_child; - for (; tmp != NULL; tmp = tmp->cp_child) { - void *value; - struct cl_object_header *hdr; - - hdr = cl_object_header(tmp->cp_obj); - cfs_spin_lock(&hdr->coh_page_guard); - value = radix_tree_delete(&hdr->coh_tree, tmp->cp_index); - PASSERT(env, tmp, value == tmp); - PASSERT(env, tmp, hdr->coh_pages > 0); - hdr->coh_pages--; - cfs_spin_unlock(&hdr->coh_page_guard); + if (tmp->cp_type == CPT_CACHEABLE) { + if (!radix) + /* !radix means that @pg is not yet in the radix tree, + * skip removing it. + */ + tmp = pg->cp_child; + for (; tmp != NULL; tmp = tmp->cp_child) { + void *value; + struct cl_object_header *hdr; + + hdr = cl_object_header(tmp->cp_obj); + cfs_spin_lock(&hdr->coh_page_guard); + value = radix_tree_delete(&hdr->coh_tree, + tmp->cp_index); + PASSERT(env, tmp, value == tmp); + PASSERT(env, tmp, hdr->coh_pages > 0); + hdr->coh_pages--; + cfs_spin_unlock(&hdr->coh_page_guard); + } } CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_delete),