cfs_page_t *vmpage)
{
struct lov_object *loo = cl2lov(obj);
- struct lov_io *lio = lov_env_io(env);
- int result;
-
- loff_t offset;
- int stripe;
- obd_off suboff;
- struct cl_page *subpage;
- struct cl_object *subobj;
struct lov_layout_raid0 *r0 = lov_r0(loo);
- struct lov_io_sub *sub;
-
+ struct lov_io *lio = lov_env_io(env);
+ struct cl_page *subpage;
+ struct cl_object *subobj;
+ struct lov_io_sub *sub;
+ struct lov_page *lpg;
+ struct cl_page *result;
+ loff_t offset;
+ obd_off suboff;
+ int stripe;
+ int rc;
ENTRY;
offset = cl_offset(obj, page->cp_index);
stripe = lov_stripe_number(r0->lo_lsm, offset);
LASSERT(stripe < r0->lo_nr);
- result = lov_stripe_offset(r0->lo_lsm, offset, stripe,
+ rc = lov_stripe_offset(r0->lo_lsm, offset, stripe,
&suboff);
- LASSERT(result == 0);
+ LASSERT(rc == 0);
- subobj = lovsub2cl(r0->lo_sub[stripe]);
- sub = lov_sub_get(env, lio, stripe);
+ OBD_SLAB_ALLOC_PTR_GFP(lpg, lov_page_kmem, CFS_ALLOC_IO);
+ if (lpg == NULL)
+ GOTO(out, result = ERR_PTR(-ENOMEM));
+
+ lpg->lps_invalid = 1;
+ cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_page_ops);
+
+ sub = lov_sub_get(env, lio, stripe);
if (IS_ERR(sub))
- GOTO(out, result = PTR_ERR(sub));
+ GOTO(out, result = (struct cl_page *)sub);
- subpage = cl_page_find(sub->sub_env, subobj,
- cl_index(subobj, suboff), vmpage,
- page->cp_type);
+ subobj = lovsub2cl(r0->lo_sub[stripe]);
+ subpage = cl_page_find_sub(sub->sub_env, subobj,
+ cl_index(subobj, suboff), vmpage, page);
lov_sub_put(sub);
- if (!IS_ERR(subpage)) {
- struct lov_page *lpg;
-
- OBD_SLAB_ALLOC_PTR_GFP(lpg, lov_page_kmem, CFS_ALLOC_IO);
- if (lpg == NULL) {
- cl_page_put(env, subpage);
- GOTO(out, result = -ENOMEM);
- }
-
- if (subpage->cp_parent != NULL) {
- /*
- * This is only possible when TRANSIENT page
- * is being created, and CACHEABLE sub-page
- * (attached to already existing top-page) has
- * been found. Tell cl_page_find() to use
- * existing page.
- */
- LASSERT(subpage->cp_type == CPT_CACHEABLE);
- LASSERT(page->cp_type == CPT_TRANSIENT);
- lpg->lps_invalid = 1;
- cl_page_put(env, subpage);
- /*
- * XXX This assumes that lov is in the topmost
- * cl_page.
- */
- result = PTR_ERR(cl_page_top(subpage));
- } else {
- lu_ref_add(&subpage->cp_reference, "lov", page);
- subpage->cp_parent = page;
- page->cp_child = subpage;
- }
- cl_page_slice_add(page, &lpg->lps_cl,
- obj, &lov_page_ops);
- } else
- result = PTR_ERR(subpage);
+ if (IS_ERR(subpage))
+ GOTO(out, result = subpage);
+
+ if (likely(subpage->cp_parent == page)) {
+ lu_ref_add(&subpage->cp_reference, "lov", page);
+ lpg->lps_invalid = 0;
+ result = NULL;
+ } else {
+ /*
+ * This is only possible when TRANSIENT page
+ * is being created, and CACHEABLE sub-page
+ * (attached to already existing top-page) has
+ * been found. Tell cl_page_find() to use
+ * existing page.
+ */
+ LASSERT(subpage->cp_type == CPT_CACHEABLE);
+ LASSERT(page->cp_type == CPT_TRANSIENT);
+ /* TODO: this is problematic, what if the page is being freed? */
+ result = cl_page_top(subpage);
+ cl_page_get(result);
+ cl_page_put(env, subpage);
+ }
+ EXIT;
out:
- RETURN(ERR_PTR(result));
+ return(result);
}
* for osc, in case of ...
*/
PASSERT(env, page, slice != NULL);
+
page = slice->cpl_page;
/*
* Can safely call cl_page_get_trust() under
*
* \see cl_object_find(), cl_lock_find()
*/
-struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *o,
- pgoff_t idx, struct page *vmpage,
- enum cl_page_type type)
+static struct cl_page *cl_page_find0(const struct lu_env *env,
+ struct cl_object *o,
+ pgoff_t idx, struct page *vmpage,
+ enum cl_page_type type,
+ struct cl_page *parent)
{
struct cl_page *page;
struct cl_page *ghost = NULL;
* consistent even when VM locking is somehow busted,
* which is very useful during diagnosing and debugging.
*/
+ page = ERR_PTR(err);
if (err == -EEXIST) {
/*
* XXX in case of a lookup for CPT_TRANSIENT page,
spin_lock(&hdr->coh_page_guard);
page = ERR_PTR(-EBUSY);
}
- } else
- page = ERR_PTR(err);
- } else
+ }
+ } else {
+ if (parent) {
+ LASSERT(page->cp_parent == NULL);
+ page->cp_parent = parent;
+ parent->cp_child = page;
+ }
hdr->coh_pages++;
+ }
spin_unlock(&hdr->coh_page_guard);
if (unlikely(ghost != NULL)) {
}
RETURN(page);
}
+
+struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *o,
+ pgoff_t idx, struct page *vmpage,
+ enum cl_page_type type)
+{
+ return cl_page_find0(env, o, idx, vmpage, type, NULL);
+}
EXPORT_SYMBOL(cl_page_find);
+
+struct cl_page *cl_page_find_sub(const struct lu_env *env, struct cl_object *o,
+ pgoff_t idx, struct page *vmpage,
+ struct cl_page *parent)
+{
+ return cl_page_find0(env, o, idx, vmpage, parent->cp_type, parent);
+}
+EXPORT_SYMBOL(cl_page_find_sub);
+
static inline int cl_page_invariant(const struct cl_page *pg)
{
struct cl_object_header *header;