4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2016, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * Implementation of cl_io for LOV layer.
34 * Author: Nikita Danilov <nikita.danilov@sun.com>
35 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
38 #define DEBUG_SUBSYSTEM S_LOV
40 #include "lov_cl_internal.h"
46 static inline struct lov_io_sub *lov_sub_alloc(struct lov_io *lio, int index)
48 struct lov_io_sub *sub;
50 if (lio->lis_nr_subios == 0) {
51 LASSERT(lio->lis_single_subio_index == -1);
52 sub = &lio->lis_single_subio;
53 lio->lis_single_subio_index = index;
54 memset(sub, 0, sizeof(*sub));
60 INIT_LIST_HEAD(&sub->sub_list);
61 INIT_LIST_HEAD(&sub->sub_linkage);
62 sub->sub_subio_index = index;
68 static inline void lov_sub_free(struct lov_io *lio, struct lov_io_sub *sub)
70 if (sub->sub_subio_index == lio->lis_single_subio_index) {
71 LASSERT(sub == &lio->lis_single_subio);
72 lio->lis_single_subio_index = -1;
78 static void lov_io_sub_fini(const struct lu_env *env, struct lov_io *lio,
79 struct lov_io_sub *sub)
83 cl_io_fini(sub->sub_env, &sub->sub_io);
85 if (sub->sub_env != NULL && !IS_ERR(sub->sub_env)) {
86 cl_env_put(sub->sub_env, &sub->sub_refcheck);
93 is_index_within_mirror(struct lov_object *lov, int index, int mirror_index)
95 struct lov_layout_composite *comp = &lov->u.composite;
96 struct lov_mirror_entry *lre = &comp->lo_mirrors[mirror_index];
98 return (index >= lre->lre_start && index <= lre->lre_end);
101 static int lov_io_sub_init(const struct lu_env *env, struct lov_io *lio,
102 struct lov_io_sub *sub)
104 struct lov_object *lov = lio->lis_object;
105 struct cl_io *sub_io;
106 struct cl_object *sub_obj;
107 struct cl_io *io = lio->lis_cl.cis_io;
108 int index = lov_comp_entry(sub->sub_subio_index);
109 int stripe = lov_comp_stripe(sub->sub_subio_index);
111 LASSERT(sub->sub_env == NULL);
114 if (unlikely(!lov_r0(lov, index)->lo_sub ||
115 !lov_r0(lov, index)->lo_sub[stripe]))
118 LASSERTF(is_index_within_mirror(lov, index, lio->lis_mirror_index),
119 DFID "iot = %d, index = %d, mirror = %d\n",
120 PFID(lu_object_fid(lov2lu(lov))), io->ci_type, index,
121 lio->lis_mirror_index);
123 /* obtain new environment */
124 sub->sub_env = cl_env_get(&sub->sub_refcheck);
125 if (IS_ERR(sub->sub_env))
126 result = PTR_ERR(sub->sub_env);
128 sub_obj = lovsub2cl(lov_r0(lov, index)->lo_sub[stripe]);
129 sub_io = &sub->sub_io;
131 sub_io->ci_obj = sub_obj;
132 sub_io->ci_result = 0;
134 sub_io->ci_parent = io;
135 sub_io->ci_lockreq = io->ci_lockreq;
136 sub_io->ci_type = io->ci_type;
137 sub_io->ci_no_srvlock = io->ci_no_srvlock;
138 sub_io->ci_noatime = io->ci_noatime;
139 sub_io->ci_pio = io->ci_pio;
140 sub_io->ci_lock_no_expand = io->ci_lock_no_expand;
141 sub_io->ci_ndelay = io->ci_ndelay;
142 sub_io->ci_layout_version = io->ci_layout_version;
144 result = cl_io_sub_init(sub->sub_env, sub_io, io->ci_type, sub_obj);
147 lov_io_sub_fini(env, lio, sub);
152 struct lov_io_sub *lov_sub_get(const struct lu_env *env,
153 struct lov_io *lio, int index)
155 struct lov_io_sub *sub;
160 list_for_each_entry(sub, &lio->lis_subios, sub_list) {
161 if (sub->sub_subio_index == index) {
168 sub = lov_sub_alloc(lio, index);
170 GOTO(out, rc = -ENOMEM);
172 rc = lov_io_sub_init(env, lio, sub);
174 lov_sub_free(lio, sub);
178 list_add_tail(&sub->sub_list, &lio->lis_subios);
179 lio->lis_nr_subios++;
187 /*****************************************************************************
193 int lov_page_index(const struct cl_page *page)
195 const struct cl_page_slice *slice;
198 slice = cl_page_at(page, &lov_device_type);
199 LASSERT(slice != NULL);
200 LASSERT(slice->cpl_obj != NULL);
202 RETURN(cl2lov_page(slice)->lps_index);
205 static int lov_io_subio_init(const struct lu_env *env, struct lov_io *lio,
210 LASSERT(lio->lis_object != NULL);
212 INIT_LIST_HEAD(&lio->lis_subios);
213 lio->lis_single_subio_index = -1;
214 lio->lis_nr_subios = 0;
220 * Decide if it will need write intent RPC
222 static int lov_io_mirror_write_intent(struct lov_io *lio,
223 struct lov_object *obj, struct cl_io *io)
225 struct lov_layout_composite *comp = &obj->u.composite;
226 struct lu_extent *ext = &io->ci_write_intent;
227 struct lov_mirror_entry *lre;
228 struct lov_mirror_entry *primary;
229 struct lov_layout_entry *lle;
233 *ext = (typeof(*ext)) { lio->lis_pos, lio->lis_endpos };
234 io->ci_need_write_intent = 0;
236 if (!(io->ci_type == CIT_WRITE || cl_io_is_trunc(io) ||
237 cl_io_is_mkwrite(io)))
240 if (lov_flr_state(obj) == LCM_FL_RDONLY ||
241 lov_flr_state(obj) == LCM_FL_SYNC_PENDING) {
242 io->ci_need_write_intent = 1;
246 LASSERT((lov_flr_state(obj) == LCM_FL_WRITE_PENDING));
247 LASSERT(comp->lo_preferred_mirror >= 0);
249 /* need to iterate all components to see if there are
250 * multiple components covering the writing component */
251 primary = &comp->lo_mirrors[comp->lo_preferred_mirror];
252 LASSERT(!primary->lre_stale);
253 lov_foreach_mirror_layout_entry(obj, lle, primary) {
254 LASSERT(lle->lle_valid);
255 if (!lu_extent_is_overlapped(ext, lle->lle_extent))
258 ext->e_start = MIN(ext->e_start, lle->lle_extent->e_start);
259 ext->e_end = MAX(ext->e_end, lle->lle_extent->e_end);
263 CERROR(DFID ": cannot find any valid components covering "
264 "file extent "DEXT", mirror: %d\n",
265 PFID(lu_object_fid(lov2lu(obj))), PEXT(ext),
266 primary->lre_mirror_id);
271 lov_foreach_mirror_entry(obj, lre) {
275 lov_foreach_mirror_layout_entry(obj, lle, lre) {
279 if (lu_extent_is_overlapped(ext, lle->lle_extent)) {
286 CDEBUG(D_VFSTRACE, DFID "there are %zd components to be staled to "
287 "modify file extent "DEXT", iot: %d\n",
288 PFID(lu_object_fid(lov2lu(obj))), count, PEXT(ext), io->ci_type);
290 io->ci_need_write_intent = count > 0;
295 static int lov_io_mirror_init(struct lov_io *lio, struct lov_object *obj,
298 struct lov_layout_composite *comp = &obj->u.composite;
304 if (!lov_is_flr(obj)) {
305 LASSERT(comp->lo_preferred_mirror == 0);
306 lio->lis_mirror_index = comp->lo_preferred_mirror;
311 result = lov_io_mirror_write_intent(lio, obj, io);
315 if (io->ci_need_write_intent) {
316 CDEBUG(D_VFSTRACE, DFID " need write intent for [%llu, %llu)\n",
317 PFID(lu_object_fid(lov2lu(obj))),
318 lio->lis_pos, lio->lis_endpos);
320 /* stop cl_io_init() loop */
324 /* transfer the layout version for verification */
325 io->ci_layout_version = obj->lo_lsm->lsm_layout_gen;
327 if (io->ci_ndelay_tried == 0 || /* first time to try */
328 /* reset the mirror index if layout has changed */
329 lio->lis_mirror_layout_gen != obj->lo_lsm->lsm_layout_gen) {
330 lio->lis_mirror_layout_gen = obj->lo_lsm->lsm_layout_gen;
331 index = lio->lis_mirror_index = comp->lo_preferred_mirror;
333 index = lio->lis_mirror_index;
336 /* move mirror index to the next one */
337 index = (index + 1) % comp->lo_mirror_count;
340 for (i = 0; i < comp->lo_mirror_count; i++) {
341 struct lu_extent ext = { .e_start = lio->lis_pos,
342 .e_end = lio->lis_pos + 1 };
343 struct lov_mirror_entry *lre;
344 struct lov_layout_entry *lle;
347 lre = &comp->lo_mirrors[(index + i) % comp->lo_mirror_count];
351 lov_foreach_mirror_layout_entry(obj, lle, lre) {
355 if (lu_extent_is_overlapped(&ext, lle->lle_extent)) {
362 index = (index + i) % comp->lo_mirror_count;
366 if (i == comp->lo_mirror_count) {
367 CERROR(DFID": failed to find a component covering "
368 "I/O region at %llu\n",
369 PFID(lu_object_fid(lov2lu(obj))), lio->lis_pos);
371 dump_lsm(D_ERROR, obj->lo_lsm);
376 CDEBUG(D_VFSTRACE, DFID ": flr state: %d, move mirror from %d to %d, "
377 "have retried: %d, mirror count: %d\n",
378 PFID(lu_object_fid(lov2lu(obj))), lov_flr_state(obj),
379 lio->lis_mirror_index, index, io->ci_ndelay_tried,
380 comp->lo_mirror_count);
382 lio->lis_mirror_index = index;
384 /* FLR: if all mirrors have been tried once, most likely the network
385 * of this client has been partitioned. We should relinquish CPU for
386 * a while before trying again. */
387 ++io->ci_ndelay_tried;
388 if (io->ci_ndelay && io->ci_ndelay_tried >= comp->lo_mirror_count) {
389 set_current_state(TASK_INTERRUPTIBLE);
390 schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC)); /* 10ms */
391 if (signal_pending(current))
394 /* reset retry counter */
395 io->ci_ndelay_tried = 1;
398 CDEBUG(D_VFSTRACE, "use %sdelayed RPC state for this IO\n",
399 io->ci_ndelay ? "non-" : "");
404 static int lov_io_slice_init(struct lov_io *lio,
405 struct lov_object *obj, struct cl_io *io)
407 struct lu_extent ext;
413 lio->lis_object = obj;
415 LASSERT(obj->lo_lsm != NULL);
417 switch (io->ci_type) {
420 lio->lis_pos = io->u.ci_rw.rw_range.cir_pos;
421 lio->lis_endpos = lio->lis_pos + io->u.ci_rw.rw_range.cir_count;
422 lio->lis_io_endpos = lio->lis_endpos;
423 if (cl_io_is_append(io)) {
424 LASSERT(io->ci_type == CIT_WRITE);
426 /* If there is LOV EA hole, then we may cannot locate
427 * the current file-tail exactly. */
428 if (unlikely(obj->lo_lsm->lsm_entries[0]->lsme_pattern &
430 GOTO(out, result = -EIO);
433 lio->lis_endpos = OBD_OBJECT_EOF;
438 if (cl_io_is_trunc(io))
439 lio->lis_pos = io->u.ci_setattr.sa_attr.lvb_size;
442 lio->lis_endpos = OBD_OBJECT_EOF;
445 case CIT_DATA_VERSION:
447 lio->lis_endpos = OBD_OBJECT_EOF;
451 pgoff_t index = io->u.ci_fault.ft_index;
452 lio->lis_pos = cl_offset(io->ci_obj, index);
453 lio->lis_endpos = cl_offset(io->ci_obj, index + 1);
458 lio->lis_pos = io->u.ci_fsync.fi_start;
459 lio->lis_endpos = io->u.ci_fsync.fi_end;
464 lio->lis_pos = io->u.ci_ladvise.li_start;
465 lio->lis_endpos = io->u.ci_ladvise.li_end;
471 lio->lis_endpos = OBD_OBJECT_EOF;
473 if (lov_flr_state(obj) == LCM_FL_RDONLY &&
474 !OBD_FAIL_CHECK(OBD_FAIL_FLR_GLIMPSE_IMMUTABLE))
475 /* SoM is accurate, no need glimpse */
476 GOTO(out, result = 1);
481 lio->lis_endpos = OBD_OBJECT_EOF;
488 result = lov_io_mirror_init(lio, obj, io);
492 /* check if it needs to instantiate layout */
493 if (!(io->ci_type == CIT_WRITE || cl_io_is_mkwrite(io) ||
494 (cl_io_is_trunc(io) && io->u.ci_setattr.sa_attr.lvb_size > 0)))
495 GOTO(out, result = 0);
497 ext.e_start = lio->lis_pos;
498 ext.e_end = lio->lis_endpos;
500 /* for truncate, it only needs to instantiate the components
501 * before the truncated size. */
502 if (cl_io_is_trunc(io)) {
504 ext.e_end = io->u.ci_setattr.sa_attr.lvb_size;
508 lov_foreach_io_layout(index, lio, &ext) {
509 if (!lsm_entry_inited(obj->lo_lsm, index)) {
510 io->ci_need_write_intent = 1;
511 io->ci_write_intent = ext;
512 GOTO(out, result = 1);
521 static void lov_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
523 struct lov_io *lio = cl2lov_io(env, ios);
524 struct lov_object *lov = cl2lov(ios->cis_obj);
528 LASSERT(list_empty(&lio->lis_active));
530 while (!list_empty(&lio->lis_subios)) {
531 struct lov_io_sub *sub = list_entry(lio->lis_subios.next,
535 list_del_init(&sub->sub_list);
536 lio->lis_nr_subios--;
538 lov_io_sub_fini(env, lio, sub);
539 lov_sub_free(lio, sub);
541 LASSERT(lio->lis_nr_subios == 0);
543 LASSERT(atomic_read(&lov->lo_active_ios) > 0);
544 if (atomic_dec_and_test(&lov->lo_active_ios))
545 wake_up_all(&lov->lo_waitq);
549 static void lov_io_sub_inherit(struct lov_io_sub *sub, struct lov_io *lio,
550 loff_t start, loff_t end)
552 struct cl_io *io = &sub->sub_io;
553 struct lov_stripe_md *lsm = lio->lis_object->lo_lsm;
554 struct cl_io *parent = lio->lis_cl.cis_io;
555 int index = lov_comp_entry(sub->sub_subio_index);
556 int stripe = lov_comp_stripe(sub->sub_subio_index);
558 io->ci_pio = parent->ci_pio;
559 switch (io->ci_type) {
561 io->u.ci_setattr.sa_attr = parent->u.ci_setattr.sa_attr;
562 io->u.ci_setattr.sa_attr_flags =
563 parent->u.ci_setattr.sa_attr_flags;
564 io->u.ci_setattr.sa_valid = parent->u.ci_setattr.sa_valid;
565 io->u.ci_setattr.sa_stripe_index = stripe;
566 io->u.ci_setattr.sa_parent_fid =
567 parent->u.ci_setattr.sa_parent_fid;
568 if (cl_io_is_trunc(io)) {
569 loff_t new_size = parent->u.ci_setattr.sa_attr.lvb_size;
571 new_size = lov_size_to_stripe(lsm, index, new_size,
573 io->u.ci_setattr.sa_attr.lvb_size = new_size;
575 lov_lsm2layout(lsm, lsm->lsm_entries[index],
576 &io->u.ci_setattr.sa_layout);
579 case CIT_DATA_VERSION: {
580 io->u.ci_data_version.dv_data_version = 0;
581 io->u.ci_data_version.dv_flags =
582 parent->u.ci_data_version.dv_flags;
586 struct cl_object *obj = parent->ci_obj;
587 loff_t off = cl_offset(obj, parent->u.ci_fault.ft_index);
589 io->u.ci_fault = parent->u.ci_fault;
590 off = lov_size_to_stripe(lsm, index, off, stripe);
591 io->u.ci_fault.ft_index = cl_index(obj, off);
595 io->u.ci_fsync.fi_start = start;
596 io->u.ci_fsync.fi_end = end;
597 io->u.ci_fsync.fi_fid = parent->u.ci_fsync.fi_fid;
598 io->u.ci_fsync.fi_mode = parent->u.ci_fsync.fi_mode;
603 io->u.ci_rw.rw_ptask = parent->u.ci_rw.rw_ptask;
604 io->u.ci_rw.rw_iter = parent->u.ci_rw.rw_iter;
605 io->u.ci_rw.rw_iocb = parent->u.ci_rw.rw_iocb;
606 io->u.ci_rw.rw_file = parent->u.ci_rw.rw_file;
607 io->u.ci_rw.rw_sync = parent->u.ci_rw.rw_sync;
608 if (cl_io_is_append(parent)) {
609 io->u.ci_rw.rw_append = 1;
611 io->u.ci_rw.rw_range.cir_pos = start;
612 io->u.ci_rw.rw_range.cir_count = end - start;
617 io->u.ci_ladvise.li_start = start;
618 io->u.ci_ladvise.li_end = end;
619 io->u.ci_ladvise.li_fid = parent->u.ci_ladvise.li_fid;
620 io->u.ci_ladvise.li_advice = parent->u.ci_ladvise.li_advice;
621 io->u.ci_ladvise.li_flags = parent->u.ci_ladvise.li_flags;
631 static loff_t lov_offset_mod(loff_t val, int delta)
633 if (val != OBD_OBJECT_EOF)
638 static int lov_io_iter_init(const struct lu_env *env,
639 const struct cl_io_slice *ios)
641 struct lov_io *lio = cl2lov_io(env, ios);
642 struct lov_stripe_md *lsm = lio->lis_object->lo_lsm;
643 struct lov_io_sub *sub;
644 struct lu_extent ext;
650 ext.e_start = lio->lis_pos;
651 ext.e_end = lio->lis_endpos;
653 lov_foreach_io_layout(index, lio, &ext) {
654 struct lov_layout_raid0 *r0 = lov_r0(lio->lis_object, index);
659 CDEBUG(D_VFSTRACE, "component[%d] flags %#x\n",
660 index, lsm->lsm_entries[index]->lsme_flags);
661 if (!lsm_entry_inited(lsm, index)) {
662 /* Read from uninitialized components should return
663 * zero filled pages. */
667 for (stripe = 0; stripe < r0->lo_nr; stripe++) {
668 if (!lov_stripe_intersects(lsm, index, stripe,
672 if (unlikely(r0->lo_sub[stripe] == NULL)) {
673 if (ios->cis_io->ci_type == CIT_READ ||
674 ios->cis_io->ci_type == CIT_WRITE ||
675 ios->cis_io->ci_type == CIT_FAULT)
681 end = lov_offset_mod(end, 1);
682 sub = lov_sub_get(env, lio,
683 lov_comp_index(index, stripe));
689 lov_io_sub_inherit(sub, lio, start, end);
690 rc = cl_io_iter_init(sub->sub_env, &sub->sub_io);
692 cl_io_iter_fini(sub->sub_env, &sub->sub_io);
697 "shrink stripe: {%d, %d} range: [%llu, %llu)\n",
698 index, stripe, start, end);
700 list_add_tail(&sub->sub_linkage, &lio->lis_active);
708 static int lov_io_rw_iter_init(const struct lu_env *env,
709 const struct cl_io_slice *ios)
711 struct cl_io *io = ios->cis_io;
712 struct lov_io *lio = cl2lov_io(env, ios);
713 struct lov_stripe_md_entry *lse;
714 struct cl_io_range *range = &io->u.ci_rw.rw_range;
715 loff_t start = range->cir_pos;
719 LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
722 if (cl_io_is_append(io))
723 RETURN(lov_io_iter_init(env, ios));
725 index = lov_io_layout_at(lio, range->cir_pos);
726 if (index < 0) { /* non-existing layout component */
727 if (io->ci_type == CIT_READ) {
728 /* TODO: it needs to detect the next component and
729 * then set the next pos */
731 /* execute it in main thread */
734 RETURN(lov_io_iter_init(env, ios));
740 lse = lov_lse(lio->lis_object, index);
742 next = MAX_LFS_FILESIZE;
743 if (lse->lsme_stripe_count > 1) {
744 unsigned long ssize = lse->lsme_stripe_size;
746 lov_do_div64(start, ssize);
747 next = (start + 1) * ssize;
748 if (next <= start * ssize)
749 next = MAX_LFS_FILESIZE;
752 LASSERTF(range->cir_pos >= lse->lsme_extent.e_start,
753 "pos %lld, [%lld, %lld)\n", range->cir_pos,
754 lse->lsme_extent.e_start, lse->lsme_extent.e_end);
755 next = min_t(__u64, next, lse->lsme_extent.e_end);
756 next = min_t(loff_t, next, lio->lis_io_endpos);
758 io->ci_continue = next < lio->lis_io_endpos;
759 range->cir_count = next - range->cir_pos;
760 lio->lis_pos = range->cir_pos;
761 lio->lis_endpos = range->cir_pos + range->cir_count;
763 "stripe: {%d, %llu} range: [%llu, %llu) end: %llu, count: %zd\n",
764 index, start, lio->lis_pos, lio->lis_endpos,
765 lio->lis_io_endpos, range->cir_count);
767 if (!io->ci_continue) {
768 /* the last piece of IO, execute it in main thread */
776 * XXX The following call should be optimized: we know, that
777 * [lio->lis_pos, lio->lis_endpos) intersects with exactly one stripe.
779 RETURN(lov_io_iter_init(env, ios));
782 static int lov_io_setattr_iter_init(const struct lu_env *env,
783 const struct cl_io_slice *ios)
785 struct lov_io *lio = cl2lov_io(env, ios);
786 struct cl_io *io = ios->cis_io;
790 if (cl_io_is_trunc(io) && lio->lis_pos > 0) {
791 index = lov_io_layout_at(lio, lio->lis_pos - 1);
792 /* no entry found for such offset */
794 RETURN(io->ci_result = -ENODATA);
797 RETURN(lov_io_iter_init(env, ios));
800 static int lov_io_call(const struct lu_env *env, struct lov_io *lio,
801 int (*iofunc)(const struct lu_env *, struct cl_io *))
803 struct cl_io *parent = lio->lis_cl.cis_io;
804 struct lov_io_sub *sub;
808 list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
809 rc = iofunc(sub->sub_env, &sub->sub_io);
813 if (parent->ci_result == 0)
814 parent->ci_result = sub->sub_io.ci_result;
819 static int lov_io_lock(const struct lu_env *env, const struct cl_io_slice *ios)
822 RETURN(lov_io_call(env, cl2lov_io(env, ios), cl_io_lock));
825 static int lov_io_start(const struct lu_env *env, const struct cl_io_slice *ios)
828 RETURN(lov_io_call(env, cl2lov_io(env, ios), cl_io_start));
831 static int lov_io_end_wrapper(const struct lu_env *env, struct cl_io *io)
835 * It's possible that lov_io_start() wasn't called against this
836 * sub-io, either because previous sub-io failed, or upper layer
839 if (io->ci_state == CIS_IO_GOING)
842 io->ci_state = CIS_IO_FINISHED;
846 static int lov_io_iter_fini_wrapper(const struct lu_env *env, struct cl_io *io)
848 cl_io_iter_fini(env, io);
852 static int lov_io_unlock_wrapper(const struct lu_env *env, struct cl_io *io)
854 cl_io_unlock(env, io);
858 static void lov_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
862 rc = lov_io_call(env, cl2lov_io(env, ios), lov_io_end_wrapper);
867 lov_io_data_version_end(const struct lu_env *env, const struct cl_io_slice *ios)
869 struct lov_io *lio = cl2lov_io(env, ios);
870 struct cl_io *parent = lio->lis_cl.cis_io;
871 struct lov_io_sub *sub;
874 list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
875 lov_io_end_wrapper(env, &sub->sub_io);
877 parent->u.ci_data_version.dv_data_version +=
878 sub->sub_io.u.ci_data_version.dv_data_version;
880 if (parent->ci_result == 0)
881 parent->ci_result = sub->sub_io.ci_result;
887 static void lov_io_iter_fini(const struct lu_env *env,
888 const struct cl_io_slice *ios)
890 struct lov_io *lio = cl2lov_io(env, ios);
894 rc = lov_io_call(env, lio, lov_io_iter_fini_wrapper);
896 while (!list_empty(&lio->lis_active))
897 list_del_init(lio->lis_active.next);
901 static void lov_io_unlock(const struct lu_env *env,
902 const struct cl_io_slice *ios)
907 rc = lov_io_call(env, cl2lov_io(env, ios), lov_io_unlock_wrapper);
912 static int lov_io_read_ahead(const struct lu_env *env,
913 const struct cl_io_slice *ios,
914 pgoff_t start, struct cl_read_ahead *ra)
916 struct lov_io *lio = cl2lov_io(env, ios);
917 struct lov_object *loo = lio->lis_object;
918 struct cl_object *obj = lov2cl(loo);
919 struct lov_layout_raid0 *r0;
920 struct lov_io_sub *sub;
924 unsigned int pps; /* pages per stripe */
930 offset = cl_offset(obj, start);
931 index = lov_io_layout_at(lio, offset);
932 if (index < 0 || !lsm_entry_inited(loo->lo_lsm, index))
935 /* avoid readahead to expand to stale components */
936 if (!lov_entry(loo, index)->lle_valid)
939 stripe = lov_stripe_number(loo->lo_lsm, index, offset);
941 r0 = lov_r0(loo, index);
942 if (unlikely(r0->lo_sub[stripe] == NULL))
945 sub = lov_sub_get(env, lio, lov_comp_index(index, stripe));
947 RETURN(PTR_ERR(sub));
949 lov_stripe_offset(loo->lo_lsm, index, offset, stripe, &suboff);
950 rc = cl_io_read_ahead(sub->sub_env, &sub->sub_io,
951 cl_index(lovsub2cl(r0->lo_sub[stripe]), suboff),
954 CDEBUG(D_READA, DFID " cra_end = %lu, stripes = %d, rc = %d\n",
955 PFID(lu_object_fid(lov2lu(loo))), ra->cra_end, r0->lo_nr, rc);
960 * Adjust the stripe index by layout of comp. ra->cra_end is the
961 * maximum page index covered by an underlying DLM lock.
962 * This function converts cra_end from stripe level to file level, and
963 * make sure it's not beyond stripe and component boundary.
966 /* cra_end is stripe level, convert it into file level */
967 ra_end = ra->cra_end;
968 if (ra_end != CL_PAGE_EOF)
969 ra->cra_end = lov_stripe_pgoff(loo->lo_lsm, index,
972 /* boundary of current component */
973 ra_end = cl_index(obj, (loff_t)lov_io_extent(lio, index)->e_end);
974 if (ra_end != CL_PAGE_EOF && ra->cra_end >= ra_end)
975 ra->cra_end = ra_end - 1;
977 if (r0->lo_nr == 1) /* single stripe file */
980 pps = lov_lse(loo, index)->lsme_stripe_size >> PAGE_SHIFT;
982 CDEBUG(D_READA, DFID " max_index = %lu, pps = %u, index = %u, "
983 "stripe_size = %u, stripe no = %u, start index = %lu\n",
984 PFID(lu_object_fid(lov2lu(loo))), ra->cra_end, pps, index,
985 lov_lse(loo, index)->lsme_stripe_size, stripe, start);
987 /* never exceed the end of the stripe */
988 ra->cra_end = min_t(pgoff_t,
989 ra->cra_end, start + pps - start % pps - 1);
994 * lov implementation of cl_operations::cio_submit() method. It takes a list
995 * of pages in \a queue, splits it into per-stripe sub-lists, invokes
996 * cl_io_submit() on underlying devices to submit sub-lists, and then splices
999 * Major complication of this function is a need to handle memory cleansing:
1000 * cl_io_submit() is called to write out pages as a part of VM memory
1001 * reclamation, and hence it may not fail due to memory shortages (system
1002 * dead-locks otherwise). To deal with this, some resources (sub-lists,
1003 * sub-environment, etc.) are allocated per-device on "startup" (i.e., in a
1004 * not-memory cleansing context), and in case of memory shortage, these
1005 * pre-allocated resources are used by lov_io_submit() under
1006 * lov_device::ld_mutex mutex.
1008 static int lov_io_submit(const struct lu_env *env,
1009 const struct cl_io_slice *ios,
1010 enum cl_req_type crt, struct cl_2queue *queue)
1012 struct cl_page_list *qin = &queue->c2_qin;
1013 struct lov_io *lio = cl2lov_io(env, ios);
1014 struct lov_io_sub *sub;
1015 struct cl_page_list *plist = &lov_env_info(env)->lti_plist;
1016 struct cl_page *page;
1021 if (lio->lis_nr_subios == 1) {
1022 int idx = lio->lis_single_subio_index;
1024 sub = lov_sub_get(env, lio, idx);
1025 LASSERT(!IS_ERR(sub));
1026 LASSERT(sub == &lio->lis_single_subio);
1027 rc = cl_io_submit_rw(sub->sub_env, &sub->sub_io,
1032 cl_page_list_init(plist);
1033 while (qin->pl_nr > 0) {
1034 struct cl_2queue *cl2q = &lov_env_info(env)->lti_cl2q;
1036 cl_2queue_init(cl2q);
1038 page = cl_page_list_first(qin);
1039 cl_page_list_move(&cl2q->c2_qin, qin, page);
1041 index = lov_page_index(page);
1042 while (qin->pl_nr > 0) {
1043 page = cl_page_list_first(qin);
1044 if (index != lov_page_index(page))
1047 cl_page_list_move(&cl2q->c2_qin, qin, page);
1050 sub = lov_sub_get(env, lio, index);
1052 rc = cl_io_submit_rw(sub->sub_env, &sub->sub_io,
1058 cl_page_list_splice(&cl2q->c2_qin, plist);
1059 cl_page_list_splice(&cl2q->c2_qout, &queue->c2_qout);
1060 cl_2queue_fini(env, cl2q);
1066 cl_page_list_splice(plist, qin);
1067 cl_page_list_fini(env, plist);
1072 static int lov_io_commit_async(const struct lu_env *env,
1073 const struct cl_io_slice *ios,
1074 struct cl_page_list *queue, int from, int to,
1077 struct cl_page_list *plist = &lov_env_info(env)->lti_plist;
1078 struct lov_io *lio = cl2lov_io(env, ios);
1079 struct lov_io_sub *sub;
1080 struct cl_page *page;
1084 if (lio->lis_nr_subios == 1) {
1085 int idx = lio->lis_single_subio_index;
1087 sub = lov_sub_get(env, lio, idx);
1088 LASSERT(!IS_ERR(sub));
1089 LASSERT(sub == &lio->lis_single_subio);
1090 rc = cl_io_commit_async(sub->sub_env, &sub->sub_io, queue,
1095 cl_page_list_init(plist);
1096 while (queue->pl_nr > 0) {
1100 LASSERT(plist->pl_nr == 0);
1101 page = cl_page_list_first(queue);
1102 cl_page_list_move(plist, queue, page);
1104 index = lov_page_index(page);
1105 while (queue->pl_nr > 0) {
1106 page = cl_page_list_first(queue);
1107 if (index != lov_page_index(page))
1110 cl_page_list_move(plist, queue, page);
1113 if (queue->pl_nr > 0) /* still has more pages */
1114 stripe_to = PAGE_SIZE;
1116 sub = lov_sub_get(env, lio, index);
1118 rc = cl_io_commit_async(sub->sub_env, &sub->sub_io,
1119 plist, from, stripe_to, cb);
1125 if (plist->pl_nr > 0) /* short write */
1131 /* for error case, add the page back into the qin list */
1132 LASSERT(ergo(rc == 0, plist->pl_nr == 0));
1133 while (plist->pl_nr > 0) {
1134 /* error occurred, add the uncommitted pages back into queue */
1135 page = cl_page_list_last(plist);
1136 cl_page_list_move_head(queue, plist, page);
1142 static int lov_io_fault_start(const struct lu_env *env,
1143 const struct cl_io_slice *ios)
1145 struct cl_fault_io *fio;
1147 struct lov_io_sub *sub;
1151 fio = &ios->cis_io->u.ci_fault;
1152 lio = cl2lov_io(env, ios);
1153 sub = lov_sub_get(env, lio, lov_page_index(fio->ft_page));
1154 sub->sub_io.u.ci_fault.ft_nob = fio->ft_nob;
1156 RETURN(lov_io_start(env, ios));
1159 static void lov_io_fsync_end(const struct lu_env *env,
1160 const struct cl_io_slice *ios)
1162 struct lov_io *lio = cl2lov_io(env, ios);
1163 struct lov_io_sub *sub;
1164 unsigned int *written = &ios->cis_io->u.ci_fsync.fi_nr_written;
1168 list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
1169 struct cl_io *subio = &sub->sub_io;
1171 lov_io_end_wrapper(sub->sub_env, subio);
1173 if (subio->ci_result == 0)
1174 *written += subio->u.ci_fsync.fi_nr_written;
1179 static const struct cl_io_operations lov_io_ops = {
1182 .cio_fini = lov_io_fini,
1183 .cio_iter_init = lov_io_rw_iter_init,
1184 .cio_iter_fini = lov_io_iter_fini,
1185 .cio_lock = lov_io_lock,
1186 .cio_unlock = lov_io_unlock,
1187 .cio_start = lov_io_start,
1188 .cio_end = lov_io_end
1191 .cio_fini = lov_io_fini,
1192 .cio_iter_init = lov_io_rw_iter_init,
1193 .cio_iter_fini = lov_io_iter_fini,
1194 .cio_lock = lov_io_lock,
1195 .cio_unlock = lov_io_unlock,
1196 .cio_start = lov_io_start,
1197 .cio_end = lov_io_end
1200 .cio_fini = lov_io_fini,
1201 .cio_iter_init = lov_io_setattr_iter_init,
1202 .cio_iter_fini = lov_io_iter_fini,
1203 .cio_lock = lov_io_lock,
1204 .cio_unlock = lov_io_unlock,
1205 .cio_start = lov_io_start,
1206 .cio_end = lov_io_end
1208 [CIT_DATA_VERSION] = {
1209 .cio_fini = lov_io_fini,
1210 .cio_iter_init = lov_io_iter_init,
1211 .cio_iter_fini = lov_io_iter_fini,
1212 .cio_lock = lov_io_lock,
1213 .cio_unlock = lov_io_unlock,
1214 .cio_start = lov_io_start,
1215 .cio_end = lov_io_data_version_end,
1218 .cio_fini = lov_io_fini,
1219 .cio_iter_init = lov_io_iter_init,
1220 .cio_iter_fini = lov_io_iter_fini,
1221 .cio_lock = lov_io_lock,
1222 .cio_unlock = lov_io_unlock,
1223 .cio_start = lov_io_fault_start,
1224 .cio_end = lov_io_end
1227 .cio_fini = lov_io_fini,
1228 .cio_iter_init = lov_io_iter_init,
1229 .cio_iter_fini = lov_io_iter_fini,
1230 .cio_lock = lov_io_lock,
1231 .cio_unlock = lov_io_unlock,
1232 .cio_start = lov_io_start,
1233 .cio_end = lov_io_fsync_end
1236 .cio_fini = lov_io_fini,
1237 .cio_iter_init = lov_io_iter_init,
1238 .cio_iter_fini = lov_io_iter_fini,
1239 .cio_lock = lov_io_lock,
1240 .cio_unlock = lov_io_unlock,
1241 .cio_start = lov_io_start,
1242 .cio_end = lov_io_end
1245 .cio_fini = lov_io_fini,
1248 .cio_fini = lov_io_fini
1251 .cio_read_ahead = lov_io_read_ahead,
1252 .cio_submit = lov_io_submit,
1253 .cio_commit_async = lov_io_commit_async,
1256 /*****************************************************************************
1258 * Empty lov io operations.
1262 static void lov_empty_io_fini(const struct lu_env *env,
1263 const struct cl_io_slice *ios)
1265 struct lov_object *lov = cl2lov(ios->cis_obj);
1268 if (atomic_dec_and_test(&lov->lo_active_ios))
1269 wake_up_all(&lov->lo_waitq);
1273 static int lov_empty_io_submit(const struct lu_env *env,
1274 const struct cl_io_slice *ios,
1275 enum cl_req_type crt, struct cl_2queue *queue)
1280 static void lov_empty_impossible(const struct lu_env *env,
1281 struct cl_io_slice *ios)
1286 #define LOV_EMPTY_IMPOSSIBLE ((void *)lov_empty_impossible)
1289 * An io operation vector for files without stripes.
1291 static const struct cl_io_operations lov_empty_io_ops = {
1294 .cio_fini = lov_empty_io_fini,
1296 .cio_iter_init = LOV_EMPTY_IMPOSSIBLE,
1297 .cio_lock = LOV_EMPTY_IMPOSSIBLE,
1298 .cio_start = LOV_EMPTY_IMPOSSIBLE,
1299 .cio_end = LOV_EMPTY_IMPOSSIBLE
1303 .cio_fini = lov_empty_io_fini,
1304 .cio_iter_init = LOV_EMPTY_IMPOSSIBLE,
1305 .cio_lock = LOV_EMPTY_IMPOSSIBLE,
1306 .cio_start = LOV_EMPTY_IMPOSSIBLE,
1307 .cio_end = LOV_EMPTY_IMPOSSIBLE
1310 .cio_fini = lov_empty_io_fini,
1311 .cio_iter_init = LOV_EMPTY_IMPOSSIBLE,
1312 .cio_lock = LOV_EMPTY_IMPOSSIBLE,
1313 .cio_start = LOV_EMPTY_IMPOSSIBLE,
1314 .cio_end = LOV_EMPTY_IMPOSSIBLE
1317 .cio_fini = lov_empty_io_fini,
1318 .cio_iter_init = LOV_EMPTY_IMPOSSIBLE,
1319 .cio_lock = LOV_EMPTY_IMPOSSIBLE,
1320 .cio_start = LOV_EMPTY_IMPOSSIBLE,
1321 .cio_end = LOV_EMPTY_IMPOSSIBLE
1324 .cio_fini = lov_empty_io_fini
1327 .cio_fini = lov_empty_io_fini
1330 .cio_fini = lov_empty_io_fini
1333 .cio_fini = lov_empty_io_fini
1336 .cio_submit = lov_empty_io_submit,
1337 .cio_commit_async = LOV_EMPTY_IMPOSSIBLE
1340 int lov_io_init_composite(const struct lu_env *env, struct cl_object *obj,
1343 struct lov_io *lio = lov_env_io(env);
1344 struct lov_object *lov = cl2lov(obj);
1348 INIT_LIST_HEAD(&lio->lis_active);
1349 result = lov_io_slice_init(lio, lov, io);
1353 result = lov_io_subio_init(env, lio, io);
1355 cl_io_slice_add(io, &lio->lis_cl, obj, &lov_io_ops);
1356 atomic_inc(&lov->lo_active_ios);
1360 io->ci_result = result < 0 ? result : 0;
1364 int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj,
1367 struct lov_object *lov = cl2lov(obj);
1368 struct lov_io *lio = lov_env_io(env);
1372 lio->lis_object = lov;
1373 switch (io->ci_type) {
1384 case CIT_DATA_VERSION:
1392 CERROR("Page fault on a file without stripes: "DFID"\n",
1393 PFID(lu_object_fid(&obj->co_lu)));
1397 cl_io_slice_add(io, &lio->lis_cl, obj, &lov_empty_io_ops);
1398 atomic_inc(&lov->lo_active_ios);
1401 io->ci_result = result < 0 ? result : 0;
1405 int lov_io_init_released(const struct lu_env *env, struct cl_object *obj,
1408 struct lov_object *lov = cl2lov(obj);
1409 struct lov_io *lio = lov_env_io(env);
1413 LASSERT(lov->lo_lsm != NULL);
1414 lio->lis_object = lov;
1416 switch (io->ci_type) {
1418 LASSERTF(0, "invalid type %d\n", io->ci_type);
1419 result = -EOPNOTSUPP;
1425 case CIT_DATA_VERSION:
1429 /* the truncate to 0 is managed by MDT:
1430 * - in open, for open O_TRUNC
1431 * - in setattr, for truncate
1433 /* the truncate is for size > 0 so triggers a restore */
1434 if (cl_io_is_trunc(io)) {
1435 io->ci_restore_needed = 1;
1443 io->ci_restore_needed = 1;
1449 cl_io_slice_add(io, &lio->lis_cl, obj, &lov_empty_io_ops);
1450 atomic_inc(&lov->lo_active_ios);
1453 io->ci_result = result < 0 ? result : 0;
1458 * Return the index in composite:lo_entries by the file offset
1460 int lov_io_layout_at(struct lov_io *lio, __u64 offset)
1462 struct lov_object *lov = lio->lis_object;
1463 struct lov_layout_composite *comp = &lov->u.composite;
1464 int start_index = 0;
1465 int end_index = comp->lo_entry_count - 1;
1468 LASSERT(lov->lo_type == LLT_COMP);
1470 /* This is actual file offset so nothing can cover eof. */
1471 if (offset == LUSTRE_EOF)
1474 if (lov_is_flr(lov)) {
1475 struct lov_mirror_entry *lre;
1477 LASSERT(lio->lis_mirror_index >= 0);
1479 lre = &comp->lo_mirrors[lio->lis_mirror_index];
1480 start_index = lre->lre_start;
1481 end_index = lre->lre_end;
1484 for (i = start_index; i <= end_index; i++) {
1485 struct lov_layout_entry *lle = lov_entry(lov, i);
1487 if ((offset >= lle->lle_extent->e_start &&
1488 offset < lle->lle_extent->e_end) ||
1489 (offset == OBD_OBJECT_EOF &&
1490 lle->lle_extent->e_end == OBD_OBJECT_EOF))