* @{
*/
-/*****************************************************************************
- *
+/**
* Lov page operations.
- *
*/
int lov_page_init_composite(const struct lu_env *env, struct cl_object *obj,
struct cl_page *page, pgoff_t index)
stripe_cached = lio->lis_cached_entry != LIS_CACHE_ENTRY_NONE &&
page->cp_type == CPT_TRANSIENT;
- offset = cl_offset(obj, index);
+ offset = index << PAGE_SHIFT;
if (stripe_cached) {
entry = lio->lis_cached_entry;
+ /* if there's no layout at this offset, we'll end up here with
+ * a cached layout entry, so we must verify the layout includes
+ * this offset
+ */
+ if (!lov_io_layout_at_confirm(lio, entry, offset))
+ return -ENODATA;
stripe = lio->lis_cached_stripe;
/* Offset can never go backwards in an i/o, so this is valid */
suboff = lio->lis_cached_suboff + offset - lio->lis_cached_off;
} else {
entry = lov_io_layout_at(lio, offset);
+ if (entry < 0)
+ return -ENODATA;
stripe = lov_stripe_number(loo->lo_lsm, entry, offset);
rc = lov_stripe_offset(loo->lo_lsm, entry, offset, stripe,
cl_object_for_each(o, subobj) {
if (o->co_ops->coo_page_init) {
rc = o->co_ops->coo_page_init(sub->sub_env, o, page,
- cl_index(subobj, suboff));
+ suboff >> PAGE_SHIFT);
if (rc != 0)
break;
}
}
int lov_page_init_empty(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, pgoff_t index)
+ struct cl_page *cl_page, pgoff_t index)
{
void *addr;
ENTRY;
- BUILD_BUG_ON(!__same_type(page->cp_lov_index, CP_LOV_INDEX_EMPTY));
- page->cp_lov_index = CP_LOV_INDEX_EMPTY;
+ BUILD_BUG_ON(!__same_type(cl_page->cp_lov_index, CP_LOV_INDEX_EMPTY));
+ cl_page->cp_lov_index = CP_LOV_INDEX_EMPTY;
- addr = kmap(page->cp_vmpage);
- memset(addr, 0, cl_page_size(obj));
- kunmap(page->cp_vmpage);
- cl_page_export(env, page, 1);
+ addr = kmap(cl_page->cp_vmpage);
+ memset(addr, 0, PAGE_SIZE);
+ kunmap(cl_page->cp_vmpage);
+ SetPageUptodate(cl_page->cp_vmpage);
RETURN(0);
}