* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2013, Intel Corporation.
+ * Copyright (c) 2011, 2014, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
struct lov_stripe_md *lsm = lio->lis_object->lo_lsm;
struct cl_io *parent = lio->lis_cl.cis_io;
- switch(io->ci_type) {
- case CIT_SETATTR: {
- io->u.ci_setattr.sa_attr = parent->u.ci_setattr.sa_attr;
- io->u.ci_setattr.sa_valid = parent->u.ci_setattr.sa_valid;
- io->u.ci_setattr.sa_capa = parent->u.ci_setattr.sa_capa;
+ switch (io->ci_type) {
+ case CIT_SETATTR: {
+ io->u.ci_setattr.sa_attr = parent->u.ci_setattr.sa_attr;
+ io->u.ci_setattr.sa_attr_flags =
+ parent->u.ci_setattr.sa_attr_flags;
+ io->u.ci_setattr.sa_valid = parent->u.ci_setattr.sa_valid;
+ io->u.ci_setattr.sa_stripe_index = stripe;
+ io->u.ci_setattr.sa_parent_fid =
+ parent->u.ci_setattr.sa_parent_fid;
+ io->u.ci_setattr.sa_capa = parent->u.ci_setattr.sa_capa;
if (cl_io_is_trunc(io)) {
loff_t new_size = parent->u.ci_setattr.sa_attr.lvb_size;
LASSERT(sub->sub_stripe < lio->lis_stripe_count);
ENTRY;
+ if (unlikely(lov_r0(lov)->lo_sub[stripe] == NULL))
+ RETURN(-EIO);
+
result = 0;
sub->sub_io_initialized = 0;
sub->sub_borrowed = 0;
int lov_page_stripe(const struct cl_page *page)
{
- struct lovsub_object *subobj;
const struct cl_page_slice *slice;
ENTRY;
- slice = cl_page_at(page, &lovsub_device_type);
+ slice = cl_page_at(page, &lov_device_type);
LASSERT(slice != NULL);
LASSERT(slice->cpl_obj != NULL);
- subobj = cl2lovsub(slice->cpl_obj);
- RETURN(subobj->lso_index);
+ RETURN(cl2lov_page(slice)->lps_stripe);
}
struct lov_io_sub *lov_page_subio(const struct lu_env *env, struct lov_io *lio,
RETURN(result);
}
-static void lov_io_slice_init(struct lov_io *lio,
- struct lov_object *obj, struct cl_io *io)
+static int lov_io_slice_init(struct lov_io *lio,
+ struct lov_object *obj, struct cl_io *io)
{
ENTRY;
lio->lis_io_endpos = lio->lis_endpos;
if (cl_io_is_append(io)) {
LASSERT(io->ci_type == CIT_WRITE);
+
+ /* If there is LOV EA hole, then we may cannot locate
+ * the current file-tail exactly. */
+ if (unlikely(obj->lo_lsm->lsm_pattern &
+ LOV_PATTERN_F_HOLE))
+ RETURN(-EIO);
+
lio->lis_pos = 0;
lio->lis_endpos = OBD_OBJECT_EOF;
}
LBUG();
}
- EXIT;
+ RETURN(0);
}
static void lov_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
EXIT;
}
-static obd_off lov_offset_mod(obd_off val, int delta)
+static loff_t lov_offset_mod(loff_t val, int delta)
{
if (val != OBD_OBJECT_EOF)
val += delta;
{
struct lov_io *lio = cl2lov_io(env, ios);
struct lov_stripe_md *lsm = lio->lis_object->lo_lsm;
- struct lov_io_sub *sub;
- obd_off endpos;
- obd_off start;
- obd_off end;
+ struct lov_io_sub *sub;
+ loff_t endpos;
+ loff_t start;
+ loff_t end;
int stripe;
int rc = 0;
endpos, &start, &end))
continue;
+ if (unlikely(lov_r0(lio->lis_object)->lo_sub[stripe] == NULL)) {
+ if (ios->cis_io->ci_type == CIT_READ ||
+ ios->cis_io->ci_type == CIT_WRITE ||
+ ios->cis_io->ci_type == CIT_FAULT)
+ RETURN(-EIO);
+
+ continue;
+ }
+
end = lov_offset_mod(end, +1);
sub = lov_sub_get(env, lio, stripe);
if (!IS_ERR(sub)) {
rc = PTR_ERR(sub);
if (!rc)
- cfs_list_add_tail(&sub->sub_linkage, &lio->lis_active);
+ list_add_tail(&sub->sub_linkage, &lio->lis_active);
else
break;
}
int rc = 0;
ENTRY;
- cfs_list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
+ list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
lov_sub_enter(sub);
rc = iofunc(sub->sub_env, sub->sub_io);
lov_sub_exit(sub);
ENTRY;
rc = lov_io_call(env, lio, lov_io_iter_fini_wrapper);
LASSERT(rc == 0);
- while (!cfs_list_empty(&lio->lis_active))
- cfs_list_del_init(lio->lis_active.next);
+ while (!list_empty(&lio->lis_active))
+ list_del_init(lio->lis_active.next);
EXIT;
}
EXIT;
}
+static int lov_io_read_ahead(const struct lu_env *env,
+ const struct cl_io_slice *ios,
+ pgoff_t start, struct cl_read_ahead *ra)
+{
+ struct lov_io *lio = cl2lov_io(env, ios);
+ struct lov_object *loo = lio->lis_object;
+ struct cl_object *obj = lov2cl(loo);
+ struct lov_layout_raid0 *r0 = lov_r0(loo);
+ struct lov_io_sub *sub;
+ loff_t suboff;
+ pgoff_t ra_end;
+ unsigned int pps; /* pages per stripe */
+ int stripe;
+ int rc;
+ ENTRY;
+
+ stripe = lov_stripe_number(loo->lo_lsm, cl_offset(obj, start));
+ if (unlikely(r0->lo_sub[stripe] == NULL))
+ RETURN(-EIO);
+
+ sub = lov_sub_get(env, lio, stripe);
+
+ lov_stripe_offset(loo->lo_lsm, cl_offset(obj, start), stripe, &suboff);
+ rc = cl_io_read_ahead(sub->sub_env, sub->sub_io,
+ cl_index(lovsub2cl(r0->lo_sub[stripe]), suboff),
+ ra);
+ lov_sub_put(sub);
+
+ CDEBUG(D_READA, DFID " cra_end = %lu, stripes = %d, rc = %d\n",
+ PFID(lu_object_fid(lov2lu(loo))), ra->cra_end, r0->lo_nr, rc);
+ if (rc != 0)
+ RETURN(rc);
+
+ /**
+ * Adjust the stripe index by layout of raid0. ra->cra_end is the maximum
+ * page index covered by an underlying DLM lock.
+ * This function converts cra_end from stripe level to file level, and
+ * make sure it's not beyond stripe boundary.
+ */
+ if (r0->lo_nr == 1) /* single stripe file */
+ RETURN(0);
+
+ /* cra_end is stripe level, convert it into file level */
+ ra_end = ra->cra_end;
+ if (ra_end != CL_PAGE_EOF)
+ ra_end = lov_stripe_pgoff(loo->lo_lsm, ra_end, stripe);
+
+ pps = loo->lo_lsm->lsm_stripe_size >> PAGE_CACHE_SHIFT;
+
+ CDEBUG(D_READA, DFID " max_index = %lu, pps = %u, "
+ "stripe_size = %u, stripe no = %u, start index = %lu\n",
+ PFID(lu_object_fid(lov2lu(loo))), ra_end, pps,
+ loo->lo_lsm->lsm_stripe_size, stripe, start);
+
+ /* never exceed the end of the stripe */
+ ra->cra_end = min_t(pgoff_t, ra_end, start + pps - start % pps - 1);
+ RETURN(0);
+}
+
/**
* lov implementation of cl_operations::cio_submit() method. It takes a list
* of pages in \a queue, splits it into per-stripe sub-lists, invokes
ENTRY;
*written = 0;
- cfs_list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
+ list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
struct cl_io *subio = sub->sub_io;
lov_sub_enter(sub);
.cio_fini = lov_io_fini
}
},
+ .cio_read_ahead = lov_io_read_ahead,
.cio_submit = lov_io_submit,
.cio_commit_async = lov_io_commit_async,
};
ENTRY;
INIT_LIST_HEAD(&lio->lis_active);
- lov_io_slice_init(lio, lov, io);
+ io->ci_result = lov_io_slice_init(lio, lov, io);
+ if (io->ci_result != 0)
+ RETURN(io->ci_result);
+
if (io->ci_result == 0) {
io->ci_result = lov_io_subio_init(env, lio, io);
if (io->ci_result == 0) {