X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Flov%2Flov_page.c;h=f850589b82586c3c3cc9b446f89a4a2914b88f05;hb=09fe7811cb076a00a905b747d0048294a0ef2e4d;hp=3efbc4130823ac5a0247800d7ada20205b39ae1b;hpb=fbf5870b9848929d352460f1f005b79c0b5ccc5a;p=fs%2Flustre-release.git diff --git a/lustre/lov/lov_page.c b/lustre/lov/lov_page.c index 3efbc41..f850589 100644 --- a/lustre/lov/lov_page.c +++ b/lustre/lov/lov_page.c @@ -26,7 +26,7 @@ * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. */ /* @@ -42,7 +42,9 @@ #include "lov_cl_internal.h" -/** \addtogroup lov lov @{ */ +/** \addtogroup lov + * @{ + */ /***************************************************************************** * @@ -81,8 +83,9 @@ static void lov_page_fini(const struct lu_env *env, EXIT; } -static void lov_page_own(const struct lu_env *env, - const struct cl_page_slice *slice, struct cl_io *io) +static int lov_page_own(const struct lu_env *env, + const struct cl_page_slice *slice, struct cl_io *io, + int nonblock) { struct lov_io *lio = lov_env_io(env); struct lov_io_sub *sub; @@ -97,13 +100,13 @@ static void lov_page_own(const struct lu_env *env, lov_sub_put(sub); } else LBUG(); /* Arrgh */ - EXIT; + RETURN(0); } static void lov_page_assume(const struct lu_env *env, const struct cl_page_slice *slice, struct cl_io *io) { - return lov_page_own(env, slice, io); + lov_page_own(env, slice, io, 0); } static int lov_page_print(const struct lu_env *env, @@ -137,61 +140,68 @@ struct cl_page *lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj, struct cl_page *page, cfs_page_t *vmpage) { - struct lov_page *lpg; struct lov_object *loo = cl2lov(obj); - int result; - + struct lov_layout_raid0 *r0 = lov_r0(loo); + struct lov_io *lio = lov_env_io(env); + struct cl_page *subpage; + struct cl_object *subobj; + struct lov_io_sub *sub; + struct lov_page *lpg; + struct cl_page *result; + loff_t offset; + obd_off suboff; + int stripe; + int rc; ENTRY; - OBD_SLAB_ALLOC_PTR(lpg, lov_page_kmem); - if (lpg != NULL) { - loff_t offset; - int stripe; - obd_off suboff; - struct cl_page *subpage; - struct cl_object *subobj; - struct lov_layout_raid0 *r0 = lov_r0(loo); - - offset = cl_offset(obj, page->cp_index); - stripe = lov_stripe_number(r0->lo_lsm, offset); - result = lov_stripe_offset(r0->lo_lsm, offset, stripe, - &suboff); - LASSERT(stripe < r0->lo_nr); - LASSERT(result == 0); - - subobj = lovsub2cl(r0->lo_sub[stripe]); - subpage = cl_page_find(env, subobj, - cl_index(subobj, suboff), vmpage, - page->cp_type); - if (!IS_ERR(subpage)) { - if (subpage->cp_parent != NULL) { - /* - * This is only possible when TRANSIENT page - * is being created, and CACHEABLE sub-page - * (attached to already existing top-page) has - * been found. Tell cl_page_find() to use - * existing page. - */ - LASSERT(subpage->cp_type == CPT_CACHEABLE); - LASSERT(page->cp_type == CPT_TRANSIENT); - lpg->lps_invalid = 1; - cl_page_put(env, subpage); - /* - * XXX This assumes that lov is in the topmost - * cl_page. - */ - result = PTR_ERR(cl_page_top(subpage)); - } else { - lu_ref_add(&subpage->cp_reference, "lov", page); - subpage->cp_parent = page; - page->cp_child = subpage; - } - cl_page_slice_add(page, &lpg->lps_cl, - obj, &lov_page_ops); - } else - result = PTR_ERR(subpage); - } else - result = -ENOMEM; - RETURN(ERR_PTR(result)); + + offset = cl_offset(obj, page->cp_index); + stripe = lov_stripe_number(r0->lo_lsm, offset); + LASSERT(stripe < r0->lo_nr); + rc = lov_stripe_offset(r0->lo_lsm, offset, stripe, + &suboff); + LASSERT(rc == 0); + + OBD_SLAB_ALLOC_PTR_GFP(lpg, lov_page_kmem, CFS_ALLOC_IO); + if (lpg == NULL) + GOTO(out, result = ERR_PTR(-ENOMEM)); + + lpg->lps_invalid = 1; + cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_page_ops); + + sub = lov_sub_get(env, lio, stripe); + if (IS_ERR(sub)) + GOTO(out, result = (struct cl_page *)sub); + + subobj = lovsub2cl(r0->lo_sub[stripe]); + subpage = cl_page_find_sub(sub->sub_env, subobj, + cl_index(subobj, suboff), vmpage, page); + lov_sub_put(sub); + if (IS_ERR(subpage)) + GOTO(out, result = subpage); + + if (likely(subpage->cp_parent == page)) { + lu_ref_add(&subpage->cp_reference, "lov", page); + lpg->lps_invalid = 0; + result = NULL; + } else { + /* + * This is only possible when TRANSIENT page + * is being created, and CACHEABLE sub-page + * (attached to already existing top-page) has + * been found. Tell cl_page_find() to use + * existing page. + */ + LASSERT(subpage->cp_type == CPT_CACHEABLE); + LASSERT(page->cp_type == CPT_TRANSIENT); + /* TODO: this is problematic, what if the page is being freed? */ + result = cl_page_top(subpage); + cl_page_get(result); + cl_page_put(env, subpage); + } + + EXIT; +out: + return(result); } @@ -208,7 +218,7 @@ struct cl_page *lov_page_init_empty(const struct lu_env *env, int result = -ENOMEM; ENTRY; - OBD_SLAB_ALLOC_PTR(lpg, lov_page_kmem); + OBD_SLAB_ALLOC_PTR_GFP(lpg, lov_page_kmem, CFS_ALLOC_IO); if (lpg != NULL) { void *addr; cl_page_slice_add(page, &lpg->lps_cl, @@ -216,7 +226,7 @@ struct cl_page *lov_page_init_empty(const struct lu_env *env, addr = cfs_kmap(vmpage); memset(addr, 0, cl_page_size(obj)); cfs_kunmap(vmpage); - cl_page_export(env, page); + cl_page_export(env, page, 1); result = 0; } RETURN(ERR_PTR(result));