*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*
* Implementation of cl_io for LOV layer.
*
*ext = (typeof(*ext)) { lio->lis_pos, lio->lis_endpos };
io->ci_need_write_intent = 0;
- if (!(io->ci_type == CIT_WRITE || cl_io_is_trunc(io) ||
- cl_io_is_mkwrite(io)))
+ if (!(io->ci_type == CIT_WRITE || cl_io_is_mkwrite(io) ||
+ cl_io_is_fallocate(io) || cl_io_is_trunc(io)))
RETURN(0);
/*
CDEBUG(D_LAYOUT, "designated I/O mirror state: %d\n",
lov_flr_state(obj));
- if ((cl_io_is_trunc(io) || io->ci_type == CIT_WRITE) &&
+ if ((cl_io_is_trunc(io) || io->ci_type == CIT_WRITE ||
+ cl_io_is_fallocate(io)) &&
(io->ci_layout_version != obj->lo_lsm->lsm_layout_gen)) {
/*
* For resync I/O, the ci_layout_version was the layout
if (!lre->lre_valid)
continue;
+ if (lre->lre_foreign)
+ continue;
+
lov_foreach_mirror_layout_entry(obj, lle, lre) {
if (!lle->lle_valid)
continue;
io->ci_result = 0;
lio->lis_object = obj;
-
- LASSERT(obj->lo_lsm != NULL);
+ lio->lis_cached_entry = LIS_CACHE_ENTRY_NONE;
switch (io->ci_type) {
case CIT_READ:
break;
}
+ case CIT_LSEEK: {
+ lio->lis_pos = io->u.ci_lseek.ls_start;
+ lio->lis_endpos = OBD_OBJECT_EOF;
+ break;
+ }
+
case CIT_GLIMPSE:
lio->lis_pos = 0;
lio->lis_endpos = OBD_OBJECT_EOF;
LBUG();
}
+ /*
+ * CIT_MISC + ci_ignore_layout can identify the I/O from the OSC layer,
+ * it won't care/access lov layout related info.
+ */
+ if (io->ci_ignore_layout && io->ci_type == CIT_MISC)
+ GOTO(out, result = 0);
+
+ LASSERT(obj->lo_lsm != NULL);
+
result = lov_io_mirror_init(lio, obj, io);
if (result)
GOTO(out, result);
/* check if it needs to instantiate layout */
if (!(io->ci_type == CIT_WRITE || cl_io_is_mkwrite(io) ||
+ cl_io_is_fallocate(io) ||
(cl_io_is_trunc(io) && io->u.ci_setattr.sa_attr.lvb_size > 0)))
GOTO(out, result = 0);
{
struct lov_io *lio = cl2lov_io(env, ios);
struct lov_object *lov = cl2lov(ios->cis_obj);
+ struct lov_io_sub *sub;
ENTRY;
-
LASSERT(list_empty(&lio->lis_active));
- while (!list_empty(&lio->lis_subios)) {
- struct lov_io_sub *sub = list_entry(lio->lis_subios.next,
- struct lov_io_sub,
- sub_list);
-
+ while ((sub = list_first_entry_or_null(&lio->lis_subios,
+ struct lov_io_sub,
+ sub_list)) != NULL) {
list_del_init(&sub->sub_list);
lio->lis_nr_subios--;
LASSERT(atomic_read(&lov->lo_active_ios) > 0);
if (atomic_dec_and_test(&lov->lo_active_ios))
- wake_up_all(&lov->lo_waitq);
+ wake_up(&lov->lo_waitq);
EXIT;
}
parent->u.ci_setattr.sa_parent_fid;
/* For SETATTR(fallocate) pass the subtype to lower IO */
io->u.ci_setattr.sa_subtype = parent->u.ci_setattr.sa_subtype;
+ if (cl_io_is_fallocate(io)) {
+ io->u.ci_setattr.sa_falloc_offset = start;
+ io->u.ci_setattr.sa_falloc_end = end;
+ io->u.ci_setattr.sa_falloc_uid =
+ parent->u.ci_setattr.sa_falloc_uid;
+ io->u.ci_setattr.sa_falloc_gid =
+ parent->u.ci_setattr.sa_falloc_gid;
+ }
if (cl_io_is_trunc(io)) {
loff_t new_size = parent->u.ci_setattr.sa_attr.lvb_size;
new_size = lov_size_to_stripe(lsm, index, new_size,
stripe);
io->u.ci_setattr.sa_attr.lvb_size = new_size;
- } else if (cl_io_is_fallocate(io)) {
- io->u.ci_setattr.sa_falloc_offset = start;
- io->u.ci_setattr.sa_falloc_end = end;
- io->u.ci_setattr.sa_attr.lvb_size =
- parent->u.ci_setattr.sa_attr.lvb_size;
}
lov_lsm2layout(lsm, lsm->lsm_entries[index],
&io->u.ci_setattr.sa_layout);
io->u.ci_ladvise.li_flags = parent->u.ci_ladvise.li_flags;
break;
}
+ case CIT_LSEEK: {
+ io->u.ci_lseek.ls_start = start;
+ io->u.ci_lseek.ls_whence = parent->u.ci_lseek.ls_whence;
+ io->u.ci_lseek.ls_result = parent->u.ci_lseek.ls_result;
+ break;
+ }
case CIT_GLIMPSE:
case CIT_MISC:
default:
return val;
}
+static int lov_io_add_sub(const struct lu_env *env, struct lov_io *lio,
+ struct lov_io_sub *sub, u64 start, u64 end)
+{
+ int rc;
+
+ end = lov_offset_mod(end, 1);
+ lov_io_sub_inherit(sub, lio, start, end);
+ rc = cl_io_iter_init(sub->sub_env, &sub->sub_io);
+ if (rc != 0) {
+ cl_io_iter_fini(sub->sub_env, &sub->sub_io);
+ return rc;
+ }
+
+ list_add_tail(&sub->sub_linkage, &lio->lis_active);
+
+ return rc;
+}
static int lov_io_iter_init(const struct lu_env *env,
const struct cl_io_slice *ios)
{
u64 start;
u64 end;
int stripe;
+ bool tested_trunc_stripe = false;
+
+ r0->lo_trunc_stripeno = -1;
CDEBUG(D_VFSTRACE, "component[%d] flags %#x\n",
index, lsm->lsm_entries[index]->lsme_flags);
continue;
}
+ if (lsm_entry_is_foreign(lsm, index))
+ continue;
+
if (!le->lle_valid && !ios->cis_io->ci_designated_mirror) {
CERROR("I/O to invalid component: %d, mirror: %d\n",
index, lio->lis_mirror_index);
continue;
}
- end = lov_offset_mod(end, 1);
+ if (cl_io_is_trunc(ios->cis_io) &&
+ !tested_trunc_stripe) {
+ int prev;
+ u64 tr_start;
+
+ prev = (stripe == 0) ? r0->lo_nr - 1 :
+ stripe - 1;
+ /**
+ * Only involving previous stripe if the
+ * truncate in this component is at the
+ * beginning of this stripe.
+ */
+ tested_trunc_stripe = true;
+ if (ext.e_start < lsm->lsm_entries[index]->
+ lsme_extent.e_start) {
+ /* need previous stripe involvement */
+ r0->lo_trunc_stripeno = prev;
+ } else {
+ tr_start = ext.e_start;
+ tr_start = lov_do_div64(tr_start,
+ stripe_width(lsm, index));
+ /* tr_start %= stripe_swidth */
+ if (tr_start == stripe * lsm->
+ lsm_entries[index]->
+ lsme_stripe_size)
+ r0->lo_trunc_stripeno = prev;
+ }
+ }
+
+ /* if the last stripe is the trunc stripeno */
+ if (r0->lo_trunc_stripeno == stripe)
+ r0->lo_trunc_stripeno = -1;
+
sub = lov_sub_get(env, lio,
lov_comp_index(index, stripe));
- if (IS_ERR(sub)) {
- rc = PTR_ERR(sub);
+ if (IS_ERR(sub))
+ return PTR_ERR(sub);
+
+ rc = lov_io_add_sub(env, lio, sub, start, end);
+ if (rc != 0)
break;
+ }
+ if (rc != 0)
+ break;
+
+ if (r0->lo_trunc_stripeno != -1) {
+ stripe = r0->lo_trunc_stripeno;
+ if (unlikely(!r0->lo_sub[stripe])) {
+ r0->lo_trunc_stripeno = -1;
+ continue;
}
+ sub = lov_sub_get(env, lio,
+ lov_comp_index(index, stripe));
+ if (IS_ERR(sub))
+ return PTR_ERR(sub);
- lov_io_sub_inherit(sub, lio, start, end);
- rc = cl_io_iter_init(sub->sub_env, &sub->sub_io);
- if (rc != 0)
- cl_io_iter_fini(sub->sub_env, &sub->sub_io);
+ /**
+ * the prev sub could be used by another truncate, we'd
+ * skip it. LU-14128 happends when expand truncate +
+ * read get wrong kms.
+ */
+ if (!list_empty(&sub->sub_linkage)) {
+ r0->lo_trunc_stripeno = -1;
+ continue;
+ }
+
+ (void)lov_stripe_intersects(lsm, index, stripe, &ext,
+ &start, &end);
+ rc = lov_io_add_sub(env, lio, sub, start, end);
if (rc != 0)
break;
- CDEBUG(D_VFSTRACE, "shrink: %d [%llu, %llu)\n",
- stripe, start, end);
-
- list_add_tail(&sub->sub_linkage, &lio->lis_active);
}
- if (rc != 0)
- break;
}
RETURN(rc);
}
lse = lov_lse(lio->lis_object, index);
+ if (lsme_is_foreign(lse))
+ RETURN(-EINVAL);
+
next = MAX_LFS_FILESIZE;
if (lse->lsme_stripe_count > 1) {
unsigned long ssize = lse->lsme_stripe_size;
{
int rc;
+ /* Before ending each i/o, we must set lis_cached_entry to tell the
+ * next i/o not to use stale cached lis information.
+ */
+ cl2lov_io(env, ios)->lis_cached_entry = LIS_CACHE_ENTRY_NONE;
+
rc = lov_io_call(env, cl2lov_io(env, ios), lov_io_end_wrapper);
LASSERT(rc == 0);
}
offset = cl_offset(obj, start);
index = lov_io_layout_at(lio, offset);
- if (index < 0 || !lsm_entry_inited(loo->lo_lsm, index))
+ if (index < 0 || !lsm_entry_inited(loo->lo_lsm, index) ||
+ lsm_entry_is_foreign(loo->lo_lsm, index))
RETURN(-ENODATA);
/* avoid readahead to expand to stale components */
RETURN(0);
}
+int lov_io_lru_reserve(const struct lu_env *env,
+ const struct cl_io_slice *ios, loff_t pos, size_t bytes)
+{
+ struct lov_io *lio = cl2lov_io(env, ios);
+ struct lov_stripe_md *lsm = lio->lis_object->lo_lsm;
+ struct lov_io_sub *sub;
+ struct lu_extent ext;
+ int index;
+ int rc = 0;
+
+ ENTRY;
+
+ ext.e_start = pos;
+ ext.e_end = pos + bytes;
+ lov_foreach_io_layout(index, lio, &ext) {
+ struct lov_layout_entry *le = lov_entry(lio->lis_object, index);
+ struct lov_layout_raid0 *r0 = &le->lle_raid0;
+ u64 start;
+ u64 end;
+ int stripe;
+
+ if (!lsm_entry_inited(lsm, index))
+ continue;
+
+ if (!le->lle_valid && !ios->cis_io->ci_designated_mirror) {
+ CERROR(DFID": I/O to invalid component: %d, mirror: %d\n",
+ PFID(lu_object_fid(lov2lu(lio->lis_object))),
+ index, lio->lis_mirror_index);
+ RETURN(-EIO);
+ }
+
+ for (stripe = 0; stripe < r0->lo_nr; stripe++) {
+ if (!lov_stripe_intersects(lsm, index, stripe,
+ &ext, &start, &end))
+ continue;
+
+ if (unlikely(!r0->lo_sub[stripe]))
+ RETURN(-EIO);
+
+ sub = lov_sub_get(env, lio,
+ lov_comp_index(index, stripe));
+ if (IS_ERR(sub))
+ return PTR_ERR(sub);
+
+ rc = cl_io_lru_reserve(sub->sub_env, &sub->sub_io, start,
+ end - start + 1);
+ if (rc != 0)
+ RETURN(rc);
+ }
+ }
+
+ RETURN(0);
+}
+
/**
* lov implementation of cl_operations::cio_submit() method. It takes a list
* of pages in \a queue, splits it into per-stripe sub-lists, invokes
struct lov_io *lio = cl2lov_io(env, ios);
struct lov_io_sub *sub;
struct cl_page_list *plist = &lov_env_info(env)->lti_plist;
- struct cl_page *page;
+ struct cl_page *page = cl_page_list_first(qin);
struct cl_page *tmp;
+ bool dio = false;
int index;
int rc = 0;
ENTRY;
+ if (page->cp_type == CPT_TRANSIENT)
+ dio = true;
+
cl_page_list_init(plist);
while (qin->pl_nr > 0) {
struct cl_2queue *cl2q = &lov_env_info(env)->lti_cl2q;
cl_page_list_move(&cl2q->c2_qin, qin, page);
index = page->cp_lov_index;
- cl_page_list_for_each_safe(page, tmp, qin) {
- /* this page is not on this stripe */
- if (index != page->cp_lov_index)
- continue;
-
- cl_page_list_move(&cl2q->c2_qin, qin, page);
+ /* DIO is already split by stripe */
+ if (!dio) {
+ cl_page_list_for_each_safe(page, tmp, qin) {
+ /* this page is not on this stripe */
+ if (index != page->cp_lov_index)
+ continue;
+
+ cl_page_list_move(&cl2q->c2_qin, qin, page);
+ }
+ } else {
+ cl_page_list_splice(qin, &cl2q->c2_qin);
}
sub = lov_sub_get(env, lio, index);
break;
from = 0;
+
+ if (lov_comp_entry(index) !=
+ lov_comp_entry(page->cp_lov_index))
+ cl_io_extent_release(sub->sub_env, &sub->sub_io);
}
/* for error case, add the page back into the qin list */
struct cl_fault_io *fio;
struct lov_io *lio;
struct lov_io_sub *sub;
+ loff_t offset;
+ int entry;
+ int stripe;
ENTRY;
fio = &ios->cis_io->u.ci_fault;
lio = cl2lov_io(env, ios);
+
+ /**
+ * LU-14502: ft_page could be an existing cl_page associated with
+ * the vmpage covering the fault index, and the page may still
+ * refer to another mirror of an old IO.
+ */
+ if (lov_is_flr(lio->lis_object)) {
+ offset = cl_offset(ios->cis_obj, fio->ft_index);
+ entry = lov_io_layout_at(lio, offset);
+ if (entry < 0) {
+ CERROR(DFID": page fault index %lu invalid component: "
+ "%d, mirror: %d\n",
+ PFID(lu_object_fid(&ios->cis_obj->co_lu)),
+ fio->ft_index, entry,
+ lio->lis_mirror_index);
+ RETURN(-EIO);
+ }
+ stripe = lov_stripe_number(lio->lis_object->lo_lsm,
+ entry, offset);
+
+ if (fio->ft_page->cp_lov_index !=
+ lov_comp_index(entry, stripe)) {
+ CDEBUG(D_INFO, DFID": page fault at index %lu, "
+ "at mirror %u comp entry %u stripe %u, "
+ "been used with comp entry %u stripe %u\n",
+ PFID(lu_object_fid(&ios->cis_obj->co_lu)),
+ fio->ft_index, lio->lis_mirror_index,
+ entry, stripe,
+ lov_comp_entry(fio->ft_page->cp_lov_index),
+ lov_comp_stripe(fio->ft_page->cp_lov_index));
+
+ fio->ft_page->cp_lov_index =
+ lov_comp_index(entry, stripe);
+ }
+ }
+
sub = lov_sub_get(env, lio, fio->ft_page->cp_lov_index);
sub->sub_io.u.ci_fault.ft_nob = fio->ft_nob;
RETURN(lov_io_start(env, ios));
}
+static int lov_io_setattr_start(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct lov_io *lio = cl2lov_io(env, ios);
+ struct cl_io *parent = ios->cis_io;
+ struct lov_io_sub *sub;
+ struct lov_stripe_md *lsm = lio->lis_object->lo_lsm;
+
+ ENTRY;
+
+ if (cl_io_is_fallocate(parent)) {
+ list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
+ loff_t size = parent->u.ci_setattr.sa_attr.lvb_size;
+ int index = lov_comp_entry(sub->sub_subio_index);
+ int stripe = lov_comp_stripe(sub->sub_subio_index);
+
+ size = lov_size_to_stripe(lsm, index, size, stripe);
+ sub->sub_io.u.ci_setattr.sa_attr.lvb_size = size;
+ sub->sub_io.u.ci_setattr.sa_avalid =
+ parent->u.ci_setattr.sa_avalid;
+ }
+ }
+
+ RETURN(lov_io_start(env, ios));
+}
+
static void lov_io_fsync_end(const struct lu_env *env,
const struct cl_io_slice *ios)
{
RETURN_EXIT;
}
+static void lov_io_lseek_end(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct lov_io *lio = cl2lov_io(env, ios);
+ struct cl_io *io = lio->lis_cl.cis_io;
+ struct lov_stripe_md *lsm = lio->lis_object->lo_lsm;
+ struct lov_io_sub *sub;
+ loff_t offset = -ENXIO;
+ __u64 hole_off = 0;
+ bool seek_hole = io->u.ci_lseek.ls_whence == SEEK_HOLE;
+
+ ENTRY;
+
+ list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
+ struct cl_io *subio = &sub->sub_io;
+ int index = lov_comp_entry(sub->sub_subio_index);
+ int stripe = lov_comp_stripe(sub->sub_subio_index);
+ loff_t sub_off, lov_off;
+ __u64 comp_end = lsm->lsm_entries[index]->lsme_extent.e_end;
+
+ lov_io_end_wrapper(sub->sub_env, subio);
+
+ if (io->ci_result == 0)
+ io->ci_result = sub->sub_io.ci_result;
+
+ if (io->ci_result)
+ continue;
+
+ CDEBUG(D_INFO, DFID": entry %x stripe %u: SEEK_%s from %lld\n",
+ PFID(lu_object_fid(lov2lu(lio->lis_object))),
+ index, stripe, seek_hole ? "HOLE" : "DATA",
+ subio->u.ci_lseek.ls_start);
+
+ /* first subio with positive result is what we need */
+ sub_off = subio->u.ci_lseek.ls_result;
+ /* Expected error, offset is out of stripe file size */
+ if (sub_off == -ENXIO)
+ continue;
+ /* Any other errors are not expected with ci_result == 0 */
+ if (sub_off < 0) {
+ CDEBUG(D_INFO, "unexpected error: rc = %lld\n",
+ sub_off);
+ io->ci_result = sub_off;
+ continue;
+ }
+ lov_off = lov_stripe_size(lsm, index, sub_off + 1, stripe) - 1;
+ if (lov_off < 0) {
+ /* the only way to get negatove lov_off here is too big
+ * result. Return -EOVERFLOW then.
+ */
+ io->ci_result = -EOVERFLOW;
+ CDEBUG(D_INFO, "offset %llu is too big: rc = %d\n",
+ (u64)lov_off, io->ci_result);
+ continue;
+ }
+ if (lov_off < io->u.ci_lseek.ls_start) {
+ io->ci_result = -EINVAL;
+ CDEBUG(D_INFO, "offset %lld < start %lld: rc = %d\n",
+ sub_off, io->u.ci_lseek.ls_start, io->ci_result);
+ continue;
+ }
+ /* resulting offset can be out of component range if stripe
+ * object is full and its file size was returned as virtual
+ * hole start. Skip this result, the next component will give
+ * us correct lseek result but keep possible hole offset in
+ * case there is no more components ahead
+ */
+ if (lov_off >= comp_end) {
+ /* must be SEEK_HOLE case */
+ if (likely(seek_hole)) {
+ /* save comp end as potential hole offset */
+ hole_off = max_t(__u64, comp_end, hole_off);
+ } else {
+ io->ci_result = -EINVAL;
+ CDEBUG(D_INFO,
+ "off %lld >= comp_end %llu: rc = %d\n",
+ lov_off, comp_end, io->ci_result);
+ }
+ continue;
+ }
+
+ CDEBUG(D_INFO, "SEEK_%s: %lld->%lld/%lld: rc = %d\n",
+ seek_hole ? "HOLE" : "DATA",
+ subio->u.ci_lseek.ls_start, sub_off, lov_off,
+ sub->sub_io.ci_result);
+ offset = min_t(__u64, offset, lov_off);
+ }
+ /* no result but some component returns hole as component end */
+ if (seek_hole && offset == -ENXIO && hole_off > 0)
+ offset = hole_off;
+
+ io->u.ci_lseek.ls_result = offset;
+ RETURN_EXIT;
+}
+
static const struct cl_io_operations lov_io_ops = {
.op = {
[CIT_READ] = {
.cio_iter_fini = lov_io_iter_fini,
.cio_lock = lov_io_lock,
.cio_unlock = lov_io_unlock,
- .cio_start = lov_io_start,
+ .cio_start = lov_io_setattr_start,
.cio_end = lov_io_end
},
[CIT_DATA_VERSION] = {
.cio_start = lov_io_start,
.cio_end = lov_io_end
},
+ [CIT_LSEEK] = {
+ .cio_fini = lov_io_fini,
+ .cio_iter_init = lov_io_iter_init,
+ .cio_iter_fini = lov_io_iter_fini,
+ .cio_lock = lov_io_lock,
+ .cio_unlock = lov_io_unlock,
+ .cio_start = lov_io_start,
+ .cio_end = lov_io_lseek_end
+ },
[CIT_GLIMPSE] = {
.cio_fini = lov_io_fini,
},
}
},
.cio_read_ahead = lov_io_read_ahead,
+ .cio_lru_reserve = lov_io_lru_reserve,
.cio_submit = lov_io_submit,
.cio_commit_async = lov_io_commit_async,
};
ENTRY;
if (atomic_dec_and_test(&lov->lo_active_ios))
- wake_up_all(&lov->lo_waitq);
+ wake_up(&lov->lo_waitq);
EXIT;
}
break;
case CIT_FSYNC:
case CIT_LADVISE:
+ case CIT_LSEEK:
case CIT_SETATTR:
case CIT_DATA_VERSION:
result = +1;
case CIT_READ:
case CIT_WRITE:
case CIT_FAULT:
+ case CIT_LSEEK:
io->ci_restore_needed = 1;
result = -ENODATA;
break;
for (i = start_index; i <= end_index; i++) {
struct lov_layout_entry *lle = lov_entry(lov, i);
+ LASSERT(!lsme_is_foreign(lle->lle_lsme));
+
if ((offset >= lle->lle_extent->e_start &&
offset < lle->lle_extent->e_end) ||
(offset == OBD_OBJECT_EOF &&