X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Flov%2Flov_page.c;h=121ad67a2686dd151a29432f697a1738556f54b7;hb=f1a2e6107c124d010d89973cfd716fbd17b689f0;hp=4609de2ea61b013e73b4ebc375f3f1f4d78b4ba2;hpb=b15f3875f46eec3c5186fe6b84cf2cda7ceb8518;p=fs%2Flustre-release.git diff --git a/lustre/lov/lov_page.c b/lustre/lov/lov_page.c index 4609de2..121ad67 100644 --- a/lustre/lov/lov_page.c +++ b/lustre/lov/lov_page.c @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -26,8 +24,10 @@ * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2011, 2014, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -36,6 +36,7 @@ * Implementation of cl_page for LOV layer. * * Author: Nikita Danilov + * Author: Jinshan Xiong */ #define DEBUG_SUBSYSTEM S_LOV @@ -52,177 +53,137 @@ * */ -static int lov_page_invariant(const struct cl_page_slice *slice) -{ - const struct cl_page *page = slice->cpl_page; - const struct cl_page *sub = lov_sub_page(slice); - - return ergo(sub != NULL, - page->cp_child == sub && - sub->cp_parent == page && - page->cp_state == sub->cp_state); -} - -static void lov_page_fini(const struct lu_env *env, - struct cl_page_slice *slice) -{ - struct lov_page *lp = cl2lov_page(slice); - struct cl_page *sub = lov_sub_page(slice); - - LINVRNT(lov_page_invariant(slice)); - ENTRY; - - if (sub != NULL) { - LASSERT(sub->cp_state == CPS_FREEING); - lu_ref_del(&sub->cp_reference, "lov", sub->cp_parent); - sub->cp_parent = NULL; - slice->cpl_page->cp_child = NULL; - cl_page_put(env, sub); - } - OBD_SLAB_FREE_PTR(lp, lov_page_kmem); - EXIT; -} - -static int lov_page_own(const struct lu_env *env, - const struct cl_page_slice *slice, struct cl_io *io, - int nonblock) -{ - struct lov_io *lio = lov_env_io(env); - struct lov_io_sub *sub; - - LINVRNT(lov_page_invariant(slice)); - LINVRNT(!cl2lov_page(slice)->lps_invalid); - ENTRY; - - sub = lov_page_subio(env, lio, slice); - if (!IS_ERR(sub)) { - lov_sub_page(slice)->cp_owner = sub->sub_io; - lov_sub_put(sub); - } else - LBUG(); /* Arrgh */ - RETURN(0); -} - -static void lov_page_assume(const struct lu_env *env, - const struct cl_page_slice *slice, struct cl_io *io) +/** + * Adjust the stripe index by layout of raid0. @max_index is the maximum + * page index covered by an underlying DLM lock. + * This function converts max_index from stripe level to file level, and make + * sure it's not beyond one stripe. + */ +static int lov_raid0_page_is_under_lock(const struct lu_env *env, + const struct cl_page_slice *slice, + struct cl_io *unused, + pgoff_t *max_index) { - lov_page_own(env, slice, io, 0); + struct lov_object *loo = cl2lov(slice->cpl_obj); + struct lov_layout_raid0 *r0 = lov_r0(loo); + pgoff_t index = *max_index; + unsigned int pps; /* pages per stripe */ + ENTRY; + + CDEBUG(D_READA, DFID "*max_index = %lu, nr = %d\n", + PFID(lu_object_fid(lov2lu(loo))), index, r0->lo_nr); + + if (index == 0) /* the page is not covered by any lock */ + RETURN(0); + + if (r0->lo_nr == 1) /* single stripe file */ + RETURN(0); + + /* max_index is stripe level, convert it into file level */ + if (index != CL_PAGE_EOF) { + int stripeno = lov_page_stripe(slice->cpl_page); + *max_index = lov_stripe_pgoff(loo->lo_lsm, index, stripeno); + } + + /* calculate the end of current stripe */ + pps = loo->lo_lsm->lsm_stripe_size >> PAGE_CACHE_SHIFT; + index = slice->cpl_index + pps - slice->cpl_index % pps - 1; + + CDEBUG(D_READA, DFID "*max_index = %lu, index = %lu, pps = %u, " + "stripe_size = %u, stripe no = %u, page index = %lu\n", + PFID(lu_object_fid(lov2lu(loo))), *max_index, index, pps, + loo->lo_lsm->lsm_stripe_size, lov_page_stripe(slice->cpl_page), + slice->cpl_index); + + /* never exceed the end of the stripe */ + *max_index = min_t(pgoff_t, *max_index, index); + RETURN(0); } -static int lov_page_print(const struct lu_env *env, - const struct cl_page_slice *slice, - void *cookie, lu_printer_t printer) +static int lov_raid0_page_print(const struct lu_env *env, + const struct cl_page_slice *slice, + void *cookie, lu_printer_t printer) { - struct lov_page *lp = cl2lov_page(slice); + struct lov_page *lp = cl2lov_page(slice); - return (*printer)(env, cookie, LUSTRE_LOV_NAME"-page@%p\n", lp); + return (*printer)(env, cookie, LUSTRE_LOV_NAME"-page@%p, raid0\n", lp); } -static const struct cl_page_operations lov_page_ops = { - .cpo_fini = lov_page_fini, - .cpo_own = lov_page_own, - .cpo_assume = lov_page_assume, - .cpo_print = lov_page_print +static const struct cl_page_operations lov_raid0_page_ops = { + .cpo_is_under_lock = lov_raid0_page_is_under_lock, + .cpo_print = lov_raid0_page_print }; -static void lov_empty_page_fini(const struct lu_env *env, - struct cl_page_slice *slice) +int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj, + struct cl_page *page, pgoff_t index) { - struct lov_page *lp = cl2lov_page(slice); - - LASSERT(slice->cpl_page->cp_child == NULL); - ENTRY; - OBD_SLAB_FREE_PTR(lp, lov_page_kmem); - EXIT; + struct lov_object *loo = cl2lov(obj); + struct lov_layout_raid0 *r0 = lov_r0(loo); + struct lov_io *lio = lov_env_io(env); + struct cl_object *subobj; + struct cl_object *o; + struct lov_io_sub *sub; + struct lov_page *lpg = cl_object_page_slice(obj, page); + loff_t offset; + loff_t suboff; + int stripe; + int rc; + ENTRY; + + offset = cl_offset(obj, index); + stripe = lov_stripe_number(loo->lo_lsm, offset); + LASSERT(stripe < r0->lo_nr); + rc = lov_stripe_offset(loo->lo_lsm, offset, stripe, + &suboff); + LASSERT(rc == 0); + + lpg->lps_stripe = stripe; + cl_page_slice_add(page, &lpg->lps_cl, obj, index, &lov_raid0_page_ops); + + sub = lov_sub_get(env, lio, stripe); + if (IS_ERR(sub)) + RETURN(PTR_ERR(sub)); + + subobj = lovsub2cl(r0->lo_sub[stripe]); + list_for_each_entry(o, &subobj->co_lu.lo_header->loh_layers, + co_lu.lo_linkage) { + if (o->co_ops->coo_page_init != NULL) { + rc = o->co_ops->coo_page_init(sub->sub_env, o, page, + cl_index(subobj, suboff)); + if (rc != 0) + break; + } + } + lov_sub_put(sub); + RETURN(rc); } -struct cl_page *lov_page_init_raid0(const struct lu_env *env, - struct cl_object *obj, struct cl_page *page, - cfs_page_t *vmpage) +static int lov_empty_page_print(const struct lu_env *env, + const struct cl_page_slice *slice, + void *cookie, lu_printer_t printer) { - struct lov_page *lpg; - struct lov_object *loo = cl2lov(obj); - int result; - - ENTRY; - OBD_SLAB_ALLOC_PTR_GFP(lpg, lov_page_kmem, CFS_ALLOC_IO); - if (lpg != NULL) { - loff_t offset; - int stripe; - obd_off suboff; - struct cl_page *subpage; - struct cl_object *subobj; - struct lov_layout_raid0 *r0 = lov_r0(loo); - - offset = cl_offset(obj, page->cp_index); - stripe = lov_stripe_number(r0->lo_lsm, offset); - result = lov_stripe_offset(r0->lo_lsm, offset, stripe, - &suboff); - LASSERT(stripe < r0->lo_nr); - LASSERT(result == 0); - - subobj = lovsub2cl(r0->lo_sub[stripe]); - subpage = cl_page_find(env, subobj, - cl_index(subobj, suboff), vmpage, - page->cp_type); - if (!IS_ERR(subpage)) { - if (subpage->cp_parent != NULL) { - /* - * This is only possible when TRANSIENT page - * is being created, and CACHEABLE sub-page - * (attached to already existing top-page) has - * been found. Tell cl_page_find() to use - * existing page. - */ - LASSERT(subpage->cp_type == CPT_CACHEABLE); - LASSERT(page->cp_type == CPT_TRANSIENT); - lpg->lps_invalid = 1; - cl_page_put(env, subpage); - /* - * XXX This assumes that lov is in the topmost - * cl_page. - */ - result = PTR_ERR(cl_page_top(subpage)); - } else { - lu_ref_add(&subpage->cp_reference, "lov", page); - subpage->cp_parent = page; - page->cp_child = subpage; - } - cl_page_slice_add(page, &lpg->lps_cl, - obj, &lov_page_ops); - } else - result = PTR_ERR(subpage); - } else - result = -ENOMEM; - RETURN(ERR_PTR(result)); -} + struct lov_page *lp = cl2lov_page(slice); + return (*printer)(env, cookie, LUSTRE_LOV_NAME"-page@%p, empty.\n", lp); +} static const struct cl_page_operations lov_empty_page_ops = { - .cpo_fini = lov_empty_page_fini, - .cpo_print = lov_page_print + .cpo_print = lov_empty_page_print }; -struct cl_page *lov_page_init_empty(const struct lu_env *env, - struct cl_object *obj, struct cl_page *page, - cfs_page_t *vmpage) +int lov_page_init_empty(const struct lu_env *env, struct cl_object *obj, + struct cl_page *page, pgoff_t index) { - struct lov_page *lpg; - int result = -ENOMEM; - ENTRY; - - OBD_SLAB_ALLOC_PTR_GFP(lpg, lov_page_kmem, CFS_ALLOC_IO); - if (lpg != NULL) { - void *addr; - cl_page_slice_add(page, &lpg->lps_cl, - obj, &lov_empty_page_ops); - addr = cfs_kmap(vmpage); - memset(addr, 0, cl_page_size(obj)); - cfs_kunmap(vmpage); - cl_page_export(env, page, 1); - result = 0; - } - RETURN(ERR_PTR(result)); + struct lov_page *lpg = cl_object_page_slice(obj, page); + void *addr; + ENTRY; + + cl_page_slice_add(page, &lpg->lps_cl, obj, index, &lov_empty_page_ops); + addr = kmap(page->cp_vmpage); + memset(addr, 0, cl_page_size(obj)); + kunmap(page->cp_vmpage); + cl_page_export(env, page, 1); + RETURN(0); }