4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * Implementation of cl_io for LOV layer.
34 * Author: Nikita Danilov <nikita.danilov@sun.com>
35 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
38 #define DEBUG_SUBSYSTEM S_LOV
40 #include "lov_cl_internal.h"
46 static inline struct lov_io_sub *lov_sub_alloc(struct lov_io *lio, int index)
48 struct lov_io_sub *sub;
50 if (lio->lis_nr_subios == 0) {
51 LASSERT(lio->lis_single_subio_index == -1);
52 sub = &lio->lis_single_subio;
53 lio->lis_single_subio_index = index;
54 memset(sub, 0, sizeof(*sub));
60 INIT_LIST_HEAD(&sub->sub_list);
61 INIT_LIST_HEAD(&sub->sub_linkage);
62 sub->sub_subio_index = index;
68 static inline void lov_sub_free(struct lov_io *lio, struct lov_io_sub *sub)
70 if (sub->sub_subio_index == lio->lis_single_subio_index) {
71 LASSERT(sub == &lio->lis_single_subio);
72 lio->lis_single_subio_index = -1;
78 static void lov_io_sub_fini(const struct lu_env *env, struct lov_io *lio,
79 struct lov_io_sub *sub)
83 cl_io_fini(sub->sub_env, &sub->sub_io);
85 if (sub->sub_env && !IS_ERR(sub->sub_env)) {
86 cl_env_put(sub->sub_env, &sub->sub_refcheck);
93 is_index_within_mirror(struct lov_object *lov, int index, int mirror_index)
95 struct lov_layout_composite *comp = &lov->u.composite;
96 struct lov_mirror_entry *lre = &comp->lo_mirrors[mirror_index];
98 return (index >= lre->lre_start && index <= lre->lre_end);
101 static int lov_io_sub_init(const struct lu_env *env, struct lov_io *lio,
102 struct lov_io_sub *sub)
104 struct lov_object *lov = lio->lis_object;
105 struct cl_io *sub_io;
106 struct cl_object *sub_obj;
107 struct cl_io *io = lio->lis_cl.cis_io;
108 int index = lov_comp_entry(sub->sub_subio_index);
109 int stripe = lov_comp_stripe(sub->sub_subio_index);
111 LASSERT(sub->sub_env == NULL);
114 if (unlikely(!lov_r0(lov, index)->lo_sub ||
115 !lov_r0(lov, index)->lo_sub[stripe]))
118 LASSERTF(ergo(lov_is_flr(lov),
119 is_index_within_mirror(lov, index,
120 lio->lis_mirror_index)),
121 DFID "iot = %d, index = %d, mirror = %d\n",
122 PFID(lu_object_fid(lov2lu(lov))), io->ci_type, index,
123 lio->lis_mirror_index);
125 /* obtain new environment */
126 sub->sub_env = cl_env_get(&sub->sub_refcheck);
127 if (IS_ERR(sub->sub_env)) {
128 result = PTR_ERR(sub->sub_env);
132 sub_obj = lovsub2cl(lov_r0(lov, index)->lo_sub[stripe]);
133 sub_io = &sub->sub_io;
135 sub_io->ci_obj = sub_obj;
136 sub_io->ci_result = 0;
138 sub_io->ci_parent = io;
139 sub_io->ci_lockreq = io->ci_lockreq;
140 sub_io->ci_type = io->ci_type;
141 sub_io->ci_no_srvlock = io->ci_no_srvlock;
142 sub_io->ci_noatime = io->ci_noatime;
143 sub_io->ci_async_readahead = io->ci_async_readahead;
144 sub_io->ci_lock_no_expand = io->ci_lock_no_expand;
145 sub_io->ci_ndelay = io->ci_ndelay;
146 sub_io->ci_layout_version = io->ci_layout_version;
147 sub_io->ci_tried_all_mirrors = io->ci_tried_all_mirrors;
149 result = cl_io_sub_init(sub->sub_env, sub_io, io->ci_type, sub_obj);
152 lov_io_sub_fini(env, lio, sub);
157 struct lov_io_sub *lov_sub_get(const struct lu_env *env,
158 struct lov_io *lio, int index)
160 struct lov_io_sub *sub;
165 list_for_each_entry(sub, &lio->lis_subios, sub_list) {
166 if (sub->sub_subio_index == index) {
173 sub = lov_sub_alloc(lio, index);
175 GOTO(out, rc = -ENOMEM);
177 rc = lov_io_sub_init(env, lio, sub);
179 lov_sub_free(lio, sub);
183 list_add_tail(&sub->sub_list, &lio->lis_subios);
184 lio->lis_nr_subios++;
190 sub->sub_io.ci_noquota = lio->lis_cl.cis_io->ci_noquota;
194 /*****************************************************************************
199 static int lov_io_subio_init(const struct lu_env *env, struct lov_io *lio,
204 LASSERT(lio->lis_object != NULL);
206 INIT_LIST_HEAD(&lio->lis_subios);
207 lio->lis_single_subio_index = -1;
208 lio->lis_nr_subios = 0;
214 * Decide if it will need write intent RPC
216 static int lov_io_mirror_write_intent(struct lov_io *lio,
217 struct lov_object *obj, struct cl_io *io)
219 struct lov_layout_composite *comp = &obj->u.composite;
220 struct lu_extent *ext = &io->ci_write_intent;
221 struct lov_mirror_entry *lre;
222 struct lov_mirror_entry *primary;
223 struct lov_layout_entry *lle;
227 *ext = (typeof(*ext)) { lio->lis_pos, lio->lis_endpos };
228 io->ci_need_write_intent = 0;
230 if (!(io->ci_type == CIT_WRITE || cl_io_is_trunc(io) ||
231 cl_io_is_mkwrite(io)))
235 * FLR: check if it needs to send a write intent RPC to server.
236 * Writing to sync_pending file needs write intent RPC to change
237 * the file state back to write_pending, so that the layout version
238 * can be increased when the state changes to sync_pending at a later
239 * time. Otherwise there exists a chance that an evicted client may
240 * dirty the file data while resync client is working on it.
241 * Designated I/O is allowed for resync workload.
243 if (lov_flr_state(obj) == LCM_FL_RDONLY ||
244 (lov_flr_state(obj) == LCM_FL_SYNC_PENDING &&
245 io->ci_designated_mirror == 0)) {
246 io->ci_need_write_intent = 1;
250 LASSERT((lov_flr_state(obj) == LCM_FL_WRITE_PENDING));
251 LASSERT(comp->lo_preferred_mirror >= 0);
254 * need to iterate all components to see if there are
255 * multiple components covering the writing component
257 primary = &comp->lo_mirrors[comp->lo_preferred_mirror];
258 LASSERT(!primary->lre_stale);
259 lov_foreach_mirror_layout_entry(obj, lle, primary) {
260 LASSERT(lle->lle_valid);
261 if (!lu_extent_is_overlapped(ext, lle->lle_extent))
264 ext->e_start = min(ext->e_start, lle->lle_extent->e_start);
265 ext->e_end = max(ext->e_end, lle->lle_extent->e_end);
269 CERROR(DFID ": cannot find any valid components covering "
270 "file extent "DEXT", mirror: %d\n",
271 PFID(lu_object_fid(lov2lu(obj))), PEXT(ext),
272 primary->lre_mirror_id);
277 lov_foreach_mirror_entry(obj, lre) {
281 lov_foreach_mirror_layout_entry(obj, lle, lre) {
285 if (lu_extent_is_overlapped(ext, lle->lle_extent)) {
292 CDEBUG(D_VFSTRACE, DFID "there are %zd components to be staled to "
293 "modify file extent "DEXT", iot: %d\n",
294 PFID(lu_object_fid(lov2lu(obj))), count, PEXT(ext), io->ci_type);
296 io->ci_need_write_intent = count > 0;
301 static int lov_io_mirror_init(struct lov_io *lio, struct lov_object *obj,
304 struct lov_layout_composite *comp = &obj->u.composite;
310 if (!lov_is_flr(obj)) {
311 LASSERT(comp->lo_preferred_mirror == 0);
312 lio->lis_mirror_index = comp->lo_preferred_mirror;
317 /* transfer the layout version for verification */
318 if (io->ci_layout_version == 0)
319 io->ci_layout_version = obj->lo_lsm->lsm_layout_gen;
321 /* find the corresponding mirror for designated mirror IO */
322 if (io->ci_designated_mirror > 0) {
323 struct lov_mirror_entry *entry;
325 LASSERT(!io->ci_ndelay);
327 CDEBUG(D_LAYOUT, "designated I/O mirror state: %d\n",
330 if ((cl_io_is_trunc(io) || io->ci_type == CIT_WRITE) &&
331 (io->ci_layout_version != obj->lo_lsm->lsm_layout_gen)) {
333 * For resync I/O, the ci_layout_version was the layout
334 * version when resync starts. If it doesn't match the
335 * current object layout version, it means the layout
341 io->ci_layout_version |= LU_LAYOUT_RESYNC;
344 lio->lis_mirror_index = -1;
345 lov_foreach_mirror_entry(obj, entry) {
346 if (entry->lre_mirror_id ==
347 io->ci_designated_mirror) {
348 lio->lis_mirror_index = index;
355 RETURN(lio->lis_mirror_index < 0 ? -EINVAL : 0);
358 result = lov_io_mirror_write_intent(lio, obj, io);
362 if (io->ci_need_write_intent) {
363 CDEBUG(D_VFSTRACE, DFID " need write intent for [%llu, %llu)\n",
364 PFID(lu_object_fid(lov2lu(obj))),
365 lio->lis_pos, lio->lis_endpos);
367 if (cl_io_is_trunc(io)) {
369 * for truncate, we uses [size, EOF) to judge whether
370 * a write intent needs to be send, but we need to
371 * restore the write extent to [0, size).
373 io->ci_write_intent.e_start = 0;
374 io->ci_write_intent.e_end =
375 io->u.ci_setattr.sa_attr.lvb_size;
377 /* stop cl_io_init() loop */
381 if (io->ci_ndelay_tried == 0 || /* first time to try */
382 /* reset the mirror index if layout has changed */
383 lio->lis_mirror_layout_gen != obj->lo_lsm->lsm_layout_gen) {
384 lio->lis_mirror_layout_gen = obj->lo_lsm->lsm_layout_gen;
385 index = lio->lis_mirror_index = comp->lo_preferred_mirror;
387 index = lio->lis_mirror_index;
390 /* move mirror index to the next one */
391 index = (index + 1) % comp->lo_mirror_count;
394 for (i = 0; i < comp->lo_mirror_count; i++) {
395 struct lu_extent ext = { .e_start = lio->lis_pos,
396 .e_end = lio->lis_pos + 1 };
397 struct lov_mirror_entry *lre;
398 struct lov_layout_entry *lle;
401 lre = &comp->lo_mirrors[(index + i) % comp->lo_mirror_count];
405 lov_foreach_mirror_layout_entry(obj, lle, lre) {
409 if (lu_extent_is_overlapped(&ext, lle->lle_extent)) {
413 } /* each component of the mirror */
415 index = (index + i) % comp->lo_mirror_count;
420 if (i == comp->lo_mirror_count) {
421 CERROR(DFID": failed to find a component covering "
422 "I/O region at %llu\n",
423 PFID(lu_object_fid(lov2lu(obj))), lio->lis_pos);
425 dump_lsm(D_ERROR, obj->lo_lsm);
430 CDEBUG(D_VFSTRACE, DFID ": flr state: %d, move mirror from %d to %d, "
431 "have retried: %d, mirror count: %d\n",
432 PFID(lu_object_fid(lov2lu(obj))), lov_flr_state(obj),
433 lio->lis_mirror_index, index, io->ci_ndelay_tried,
434 comp->lo_mirror_count);
436 lio->lis_mirror_index = index;
439 * FLR: if all mirrors have been tried once, most likely the network
440 * of this client has been partitioned. We should relinquish CPU for
441 * a while before trying again.
443 if (io->ci_ndelay && io->ci_ndelay_tried > 0 &&
444 (io->ci_ndelay_tried % comp->lo_mirror_count == 0)) {
445 schedule_timeout_interruptible(cfs_time_seconds(1) / 100);
446 if (signal_pending(current))
450 * we'd set ci_tried_all_mirrors to turn off fast mirror
451 * switching for read after we've tried all mirrors several
454 io->ci_tried_all_mirrors = io->ci_ndelay_tried %
455 (comp->lo_mirror_count * 4) == 0;
457 ++io->ci_ndelay_tried;
459 CDEBUG(D_VFSTRACE, "use %sdelayed RPC state for this IO\n",
460 io->ci_ndelay ? "non-" : "");
465 static int lov_io_slice_init(struct lov_io *lio,
466 struct lov_object *obj, struct cl_io *io)
473 lio->lis_object = obj;
475 LASSERT(obj->lo_lsm != NULL);
477 switch (io->ci_type) {
480 lio->lis_pos = io->u.ci_rw.crw_pos;
481 lio->lis_endpos = io->u.ci_rw.crw_pos + io->u.ci_rw.crw_count;
482 lio->lis_io_endpos = lio->lis_endpos;
483 if (cl_io_is_append(io)) {
484 LASSERT(io->ci_type == CIT_WRITE);
487 * If there is LOV EA hole, then we may cannot locate
488 * the current file-tail exactly.
490 if (unlikely(obj->lo_lsm->lsm_entries[0]->lsme_pattern &
492 GOTO(out, result = -EIO);
495 lio->lis_endpos = OBD_OBJECT_EOF;
500 if (cl_io_is_fallocate(io)) {
501 lio->lis_pos = io->u.ci_setattr.sa_falloc_offset;
502 lio->lis_endpos = io->u.ci_setattr.sa_falloc_end;
503 } else if (cl_io_is_trunc(io)) {
504 lio->lis_pos = io->u.ci_setattr.sa_attr.lvb_size;
505 lio->lis_endpos = OBD_OBJECT_EOF;
508 lio->lis_endpos = OBD_OBJECT_EOF;
512 case CIT_DATA_VERSION:
514 lio->lis_endpos = OBD_OBJECT_EOF;
518 pgoff_t index = io->u.ci_fault.ft_index;
520 lio->lis_pos = cl_offset(io->ci_obj, index);
521 lio->lis_endpos = cl_offset(io->ci_obj, index + 1);
526 lio->lis_pos = io->u.ci_fsync.fi_start;
527 lio->lis_endpos = io->u.ci_fsync.fi_end;
532 lio->lis_pos = io->u.ci_ladvise.li_start;
533 lio->lis_endpos = io->u.ci_ladvise.li_end;
539 lio->lis_endpos = OBD_OBJECT_EOF;
541 if (lov_flr_state(obj) == LCM_FL_RDONLY &&
542 !OBD_FAIL_CHECK(OBD_FAIL_FLR_GLIMPSE_IMMUTABLE))
543 /* SoM is accurate, no need glimpse */
544 GOTO(out, result = 1);
549 lio->lis_endpos = OBD_OBJECT_EOF;
556 result = lov_io_mirror_init(lio, obj, io);
560 /* check if it needs to instantiate layout */
561 if (!(io->ci_type == CIT_WRITE || cl_io_is_mkwrite(io) ||
562 (cl_io_is_trunc(io) && io->u.ci_setattr.sa_attr.lvb_size > 0)))
563 GOTO(out, result = 0);
566 * for truncate, it only needs to instantiate the components
567 * before the truncated size.
569 if (cl_io_is_trunc(io)) {
570 io->ci_write_intent.e_start = 0;
571 /* for writes, e_end is endpos, the location of the file
572 * pointer after the write is completed, so it is not accessed.
573 * For truncate, 'end' is the size, and *is* acccessed.
574 * In other words, writes are [start, end), but truncate is
575 * [start, size], where both are included. So add 1 to the
576 * size when creating the write intent to account for this.
578 io->ci_write_intent.e_end =
579 io->u.ci_setattr.sa_attr.lvb_size + 1;
581 io->ci_write_intent.e_start = lio->lis_pos;
582 io->ci_write_intent.e_end = lio->lis_endpos;
586 lov_foreach_io_layout(index, lio, &io->ci_write_intent) {
587 if (!lsm_entry_inited(obj->lo_lsm, index)) {
588 io->ci_need_write_intent = 1;
593 if (io->ci_need_write_intent && io->ci_designated_mirror > 0) {
595 * REINT_SYNC RPC has already tried to instantiate all of the
596 * components involved, obviously it didn't succeed. Skip this
597 * mirror for now. The server won't be able to figure out
598 * which mirror it should instantiate components
600 CERROR(DFID": trying to instantiate components for designated "
601 "I/O, file state: %d\n",
602 PFID(lu_object_fid(lov2lu(obj))), lov_flr_state(obj));
604 io->ci_need_write_intent = 0;
605 GOTO(out, result = -EIO);
608 if (io->ci_need_write_intent)
609 GOTO(out, result = 1);
617 static void lov_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
619 struct lov_io *lio = cl2lov_io(env, ios);
620 struct lov_object *lov = cl2lov(ios->cis_obj);
624 LASSERT(list_empty(&lio->lis_active));
626 while (!list_empty(&lio->lis_subios)) {
627 struct lov_io_sub *sub = list_entry(lio->lis_subios.next,
631 list_del_init(&sub->sub_list);
632 lio->lis_nr_subios--;
634 lov_io_sub_fini(env, lio, sub);
635 lov_sub_free(lio, sub);
637 LASSERT(lio->lis_nr_subios == 0);
639 LASSERT(atomic_read(&lov->lo_active_ios) > 0);
640 if (atomic_dec_and_test(&lov->lo_active_ios))
641 wake_up_all(&lov->lo_waitq);
645 static void lov_io_sub_inherit(struct lov_io_sub *sub, struct lov_io *lio,
646 loff_t start, loff_t end)
648 struct cl_io *io = &sub->sub_io;
649 struct lov_stripe_md *lsm = lio->lis_object->lo_lsm;
650 struct cl_io *parent = lio->lis_cl.cis_io;
651 int index = lov_comp_entry(sub->sub_subio_index);
652 int stripe = lov_comp_stripe(sub->sub_subio_index);
654 switch (io->ci_type) {
656 io->u.ci_setattr.sa_attr = parent->u.ci_setattr.sa_attr;
657 io->u.ci_setattr.sa_attr_flags =
658 parent->u.ci_setattr.sa_attr_flags;
659 io->u.ci_setattr.sa_avalid = parent->u.ci_setattr.sa_avalid;
660 io->u.ci_setattr.sa_xvalid = parent->u.ci_setattr.sa_xvalid;
661 io->u.ci_setattr.sa_falloc_mode =
662 parent->u.ci_setattr.sa_falloc_mode;
663 io->u.ci_setattr.sa_stripe_index = stripe;
664 io->u.ci_setattr.sa_parent_fid =
665 parent->u.ci_setattr.sa_parent_fid;
666 /* For SETATTR(fallocate) pass the subtype to lower IO */
667 io->u.ci_setattr.sa_subtype = parent->u.ci_setattr.sa_subtype;
668 if (cl_io_is_trunc(io)) {
669 loff_t new_size = parent->u.ci_setattr.sa_attr.lvb_size;
671 new_size = lov_size_to_stripe(lsm, index, new_size,
673 io->u.ci_setattr.sa_attr.lvb_size = new_size;
674 } else if (cl_io_is_fallocate(io)) {
675 io->u.ci_setattr.sa_falloc_offset = start;
676 io->u.ci_setattr.sa_falloc_end = end;
677 io->u.ci_setattr.sa_attr.lvb_size =
678 parent->u.ci_setattr.sa_attr.lvb_size;
680 lov_lsm2layout(lsm, lsm->lsm_entries[index],
681 &io->u.ci_setattr.sa_layout);
684 case CIT_DATA_VERSION: {
685 io->u.ci_data_version.dv_data_version = 0;
686 io->u.ci_data_version.dv_flags =
687 parent->u.ci_data_version.dv_flags;
691 struct cl_object *obj = parent->ci_obj;
692 loff_t off = cl_offset(obj, parent->u.ci_fault.ft_index);
694 io->u.ci_fault = parent->u.ci_fault;
695 off = lov_size_to_stripe(lsm, index, off, stripe);
696 io->u.ci_fault.ft_index = cl_index(obj, off);
700 io->u.ci_fsync.fi_start = start;
701 io->u.ci_fsync.fi_end = end;
702 io->u.ci_fsync.fi_fid = parent->u.ci_fsync.fi_fid;
703 io->u.ci_fsync.fi_mode = parent->u.ci_fsync.fi_mode;
708 io->u.ci_wr.wr_sync = cl_io_is_sync_write(parent);
709 io->ci_tried_all_mirrors = parent->ci_tried_all_mirrors;
710 if (cl_io_is_append(parent)) {
711 io->u.ci_wr.wr_append = 1;
713 io->u.ci_rw.crw_pos = start;
714 io->u.ci_rw.crw_count = end - start;
719 io->u.ci_ladvise.li_start = start;
720 io->u.ci_ladvise.li_end = end;
721 io->u.ci_ladvise.li_fid = parent->u.ci_ladvise.li_fid;
722 io->u.ci_ladvise.li_advice = parent->u.ci_ladvise.li_advice;
723 io->u.ci_ladvise.li_flags = parent->u.ci_ladvise.li_flags;
733 static loff_t lov_offset_mod(loff_t val, int delta)
735 if (val != OBD_OBJECT_EOF)
740 static int lov_io_iter_init(const struct lu_env *env,
741 const struct cl_io_slice *ios)
743 struct lov_io *lio = cl2lov_io(env, ios);
744 struct lov_stripe_md *lsm = lio->lis_object->lo_lsm;
745 struct lov_io_sub *sub;
746 struct lu_extent ext;
752 ext.e_start = lio->lis_pos;
753 ext.e_end = lio->lis_endpos;
755 lov_foreach_io_layout(index, lio, &ext) {
756 struct lov_layout_entry *le = lov_entry(lio->lis_object, index);
757 struct lov_layout_raid0 *r0 = &le->lle_raid0;
762 CDEBUG(D_VFSTRACE, "component[%d] flags %#x\n",
763 index, lsm->lsm_entries[index]->lsme_flags);
764 if (!lsm_entry_inited(lsm, index)) {
766 * Read from uninitialized components should return
772 if (!le->lle_valid && !ios->cis_io->ci_designated_mirror) {
773 CERROR("I/O to invalid component: %d, mirror: %d\n",
774 index, lio->lis_mirror_index);
778 for (stripe = 0; stripe < r0->lo_nr; stripe++) {
779 if (!lov_stripe_intersects(lsm, index, stripe,
783 if (unlikely(!r0->lo_sub[stripe])) {
784 if (ios->cis_io->ci_type == CIT_READ ||
785 ios->cis_io->ci_type == CIT_WRITE ||
786 ios->cis_io->ci_type == CIT_FAULT)
792 end = lov_offset_mod(end, 1);
793 sub = lov_sub_get(env, lio,
794 lov_comp_index(index, stripe));
800 lov_io_sub_inherit(sub, lio, start, end);
801 rc = cl_io_iter_init(sub->sub_env, &sub->sub_io);
803 cl_io_iter_fini(sub->sub_env, &sub->sub_io);
807 CDEBUG(D_VFSTRACE, "shrink: %d [%llu, %llu)\n",
810 list_add_tail(&sub->sub_linkage, &lio->lis_active);
818 static int lov_io_rw_iter_init(const struct lu_env *env,
819 const struct cl_io_slice *ios)
821 struct lov_io *lio = cl2lov_io(env, ios);
822 struct cl_io *io = ios->cis_io;
823 struct lov_stripe_md_entry *lse;
824 loff_t start = io->u.ci_rw.crw_pos;
828 LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
831 if (cl_io_is_append(io))
832 RETURN(lov_io_iter_init(env, ios));
834 index = lov_io_layout_at(lio, io->u.ci_rw.crw_pos);
835 if (index < 0) { /* non-existing layout component */
836 if (io->ci_type == CIT_READ) {
838 * TODO: it needs to detect the next component and
839 * then set the next pos
843 RETURN(lov_io_iter_init(env, ios));
849 if (!lov_entry(lio->lis_object, index)->lle_valid &&
850 !io->ci_designated_mirror)
851 RETURN(io->ci_type == CIT_READ ? -EAGAIN : -EIO);
853 lse = lov_lse(lio->lis_object, index);
855 next = MAX_LFS_FILESIZE;
856 if (lse->lsme_stripe_count > 1) {
857 unsigned long ssize = lse->lsme_stripe_size;
859 lov_do_div64(start, ssize);
860 next = (start + 1) * ssize;
861 if (next <= start * ssize)
862 next = MAX_LFS_FILESIZE;
865 LASSERTF(io->u.ci_rw.crw_pos >= lse->lsme_extent.e_start,
866 "pos %lld, [%lld, %lld)\n", io->u.ci_rw.crw_pos,
867 lse->lsme_extent.e_start, lse->lsme_extent.e_end);
868 next = min_t(__u64, next, lse->lsme_extent.e_end);
869 next = min_t(loff_t, next, lio->lis_io_endpos);
871 io->ci_continue = next < lio->lis_io_endpos;
872 io->u.ci_rw.crw_count = next - io->u.ci_rw.crw_pos;
873 lio->lis_pos = io->u.ci_rw.crw_pos;
874 lio->lis_endpos = io->u.ci_rw.crw_pos + io->u.ci_rw.crw_count;
876 "stripe: %llu chunk: [%llu, %llu) %llu, %zd\n",
877 (__u64)start, lio->lis_pos, lio->lis_endpos,
878 (__u64)lio->lis_io_endpos, io->u.ci_rw.crw_count);
881 * XXX The following call should be optimized: we know, that
882 * [lio->lis_pos, lio->lis_endpos) intersects with exactly one stripe.
884 RETURN(lov_io_iter_init(env, ios));
887 static int lov_io_setattr_iter_init(const struct lu_env *env,
888 const struct cl_io_slice *ios)
890 struct lov_io *lio = cl2lov_io(env, ios);
891 struct cl_io *io = ios->cis_io;
895 if (cl_io_is_trunc(io) && lio->lis_pos > 0) {
896 index = lov_io_layout_at(lio, lio->lis_pos - 1);
897 /* no entry found for such offset */
899 RETURN(io->ci_result = -ENODATA);
902 RETURN(lov_io_iter_init(env, ios));
905 static int lov_io_call(const struct lu_env *env, struct lov_io *lio,
906 int (*iofunc)(const struct lu_env *, struct cl_io *))
908 struct cl_io *parent = lio->lis_cl.cis_io;
909 struct lov_io_sub *sub;
913 list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
914 rc = iofunc(sub->sub_env, &sub->sub_io);
918 if (parent->ci_result == 0)
919 parent->ci_result = sub->sub_io.ci_result;
924 static int lov_io_lock(const struct lu_env *env, const struct cl_io_slice *ios)
927 RETURN(lov_io_call(env, cl2lov_io(env, ios), cl_io_lock));
930 static int lov_io_start(const struct lu_env *env, const struct cl_io_slice *ios)
933 RETURN(lov_io_call(env, cl2lov_io(env, ios), cl_io_start));
936 static int lov_io_end_wrapper(const struct lu_env *env, struct cl_io *io)
940 * It's possible that lov_io_start() wasn't called against this
941 * sub-io, either because previous sub-io failed, or upper layer
944 if (io->ci_state == CIS_IO_GOING)
947 io->ci_state = CIS_IO_FINISHED;
951 static int lov_io_iter_fini_wrapper(const struct lu_env *env, struct cl_io *io)
953 cl_io_iter_fini(env, io);
957 static int lov_io_unlock_wrapper(const struct lu_env *env, struct cl_io *io)
959 cl_io_unlock(env, io);
963 static void lov_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
967 rc = lov_io_call(env, cl2lov_io(env, ios), lov_io_end_wrapper);
972 lov_io_data_version_end(const struct lu_env *env, const struct cl_io_slice *ios)
974 struct lov_io *lio = cl2lov_io(env, ios);
975 struct cl_io *parent = lio->lis_cl.cis_io;
976 struct cl_data_version_io *pdv = &parent->u.ci_data_version;
977 struct lov_io_sub *sub;
980 list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
981 struct cl_data_version_io *sdv = &sub->sub_io.u.ci_data_version;
983 lov_io_end_wrapper(sub->sub_env, &sub->sub_io);
985 pdv->dv_data_version += sdv->dv_data_version;
986 if (pdv->dv_layout_version > sdv->dv_layout_version)
987 pdv->dv_layout_version = sdv->dv_layout_version;
989 if (parent->ci_result == 0)
990 parent->ci_result = sub->sub_io.ci_result;
996 static void lov_io_iter_fini(const struct lu_env *env,
997 const struct cl_io_slice *ios)
999 struct lov_io *lio = cl2lov_io(env, ios);
1003 rc = lov_io_call(env, lio, lov_io_iter_fini_wrapper);
1005 while (!list_empty(&lio->lis_active))
1006 list_del_init(lio->lis_active.next);
1010 static void lov_io_unlock(const struct lu_env *env,
1011 const struct cl_io_slice *ios)
1016 rc = lov_io_call(env, cl2lov_io(env, ios), lov_io_unlock_wrapper);
1021 static int lov_io_read_ahead(const struct lu_env *env,
1022 const struct cl_io_slice *ios,
1023 pgoff_t start, struct cl_read_ahead *ra)
1025 struct lov_io *lio = cl2lov_io(env, ios);
1026 struct lov_object *loo = lio->lis_object;
1027 struct cl_object *obj = lov2cl(loo);
1028 struct lov_layout_raid0 *r0;
1029 struct lov_io_sub *sub;
1033 unsigned int pps; /* pages per stripe */
1039 offset = cl_offset(obj, start);
1040 index = lov_io_layout_at(lio, offset);
1041 if (index < 0 || !lsm_entry_inited(loo->lo_lsm, index))
1044 /* avoid readahead to expand to stale components */
1045 if (!lov_entry(loo, index)->lle_valid)
1048 stripe = lov_stripe_number(loo->lo_lsm, index, offset);
1050 r0 = lov_r0(loo, index);
1051 if (unlikely(!r0->lo_sub[stripe]))
1054 sub = lov_sub_get(env, lio, lov_comp_index(index, stripe));
1056 RETURN(PTR_ERR(sub));
1058 lov_stripe_offset(loo->lo_lsm, index, offset, stripe, &suboff);
1059 rc = cl_io_read_ahead(sub->sub_env, &sub->sub_io,
1060 cl_index(lovsub2cl(r0->lo_sub[stripe]), suboff),
1063 CDEBUG(D_READA, DFID " cra_end = %lu, stripes = %d, rc = %d\n",
1064 PFID(lu_object_fid(lov2lu(loo))), ra->cra_end_idx,
1070 * Adjust the stripe index by layout of comp. ra->cra_end is the
1071 * maximum page index covered by an underlying DLM lock.
1072 * This function converts cra_end from stripe level to file level, and
1073 * make sure it's not beyond stripe and component boundary.
1076 /* cra_end is stripe level, convert it into file level */
1077 ra_end = ra->cra_end_idx;
1078 if (ra_end != CL_PAGE_EOF)
1079 ra->cra_end_idx = lov_stripe_pgoff(loo->lo_lsm, index,
1082 /* boundary of current component */
1083 ra_end = cl_index(obj, (loff_t)lov_io_extent(lio, index)->e_end);
1084 if (ra_end != CL_PAGE_EOF && ra->cra_end_idx >= ra_end)
1085 ra->cra_end_idx = ra_end - 1;
1087 if (r0->lo_nr == 1) /* single stripe file */
1090 pps = lov_lse(loo, index)->lsme_stripe_size >> PAGE_SHIFT;
1092 CDEBUG(D_READA, DFID " max_index = %lu, pps = %u, index = %d, "
1093 "stripe_size = %u, stripe no = %u, start index = %lu\n",
1094 PFID(lu_object_fid(lov2lu(loo))), ra->cra_end_idx, pps, index,
1095 lov_lse(loo, index)->lsme_stripe_size, stripe, start);
1097 /* never exceed the end of the stripe */
1098 ra->cra_end_idx = min_t(pgoff_t, ra->cra_end_idx,
1099 start + pps - start % pps - 1);
1104 * lov implementation of cl_operations::cio_submit() method. It takes a list
1105 * of pages in \a queue, splits it into per-stripe sub-lists, invokes
1106 * cl_io_submit() on underlying devices to submit sub-lists, and then splices
1109 * Major complication of this function is a need to handle memory cleansing:
1110 * cl_io_submit() is called to write out pages as a part of VM memory
1111 * reclamation, and hence it may not fail due to memory shortages (system
1112 * dead-locks otherwise). To deal with this, some resources (sub-lists,
1113 * sub-environment, etc.) are allocated per-device on "startup" (i.e., in a
1114 * not-memory cleansing context), and in case of memory shortage, these
1115 * pre-allocated resources are used by lov_io_submit() under
1116 * lov_device::ld_mutex mutex.
1118 static int lov_io_submit(const struct lu_env *env,
1119 const struct cl_io_slice *ios,
1120 enum cl_req_type crt, struct cl_2queue *queue)
1122 struct cl_page_list *qin = &queue->c2_qin;
1123 struct lov_io *lio = cl2lov_io(env, ios);
1124 struct lov_io_sub *sub;
1125 struct cl_page_list *plist = &lov_env_info(env)->lti_plist;
1126 struct cl_page *page;
1127 struct cl_page *tmp;
1132 cl_page_list_init(plist);
1133 while (qin->pl_nr > 0) {
1134 struct cl_2queue *cl2q = &lov_env_info(env)->lti_cl2q;
1136 page = cl_page_list_first(qin);
1137 if (lov_page_is_empty(page)) {
1138 cl_page_list_move(&queue->c2_qout, qin, page);
1141 * it could only be mirror read to get here therefore
1142 * the pages will be transient. We don't care about
1143 * the return code of cl_page_prep() at all.
1145 (void) cl_page_prep(env, ios->cis_io, page, crt);
1146 cl_page_completion(env, page, crt, 0);
1150 cl_2queue_init(cl2q);
1151 cl_page_list_move(&cl2q->c2_qin, qin, page);
1153 index = page->cp_lov_index;
1154 cl_page_list_for_each_safe(page, tmp, qin) {
1155 /* this page is not on this stripe */
1156 if (index != page->cp_lov_index)
1159 cl_page_list_move(&cl2q->c2_qin, qin, page);
1162 sub = lov_sub_get(env, lio, index);
1164 rc = cl_io_submit_rw(sub->sub_env, &sub->sub_io,
1170 cl_page_list_splice(&cl2q->c2_qin, plist);
1171 cl_page_list_splice(&cl2q->c2_qout, &queue->c2_qout);
1172 cl_2queue_fini(env, cl2q);
1178 cl_page_list_splice(plist, qin);
1179 cl_page_list_fini(env, plist);
1184 static int lov_io_commit_async(const struct lu_env *env,
1185 const struct cl_io_slice *ios,
1186 struct cl_page_list *queue, int from, int to,
1189 struct cl_page_list *plist = &lov_env_info(env)->lti_plist;
1190 struct lov_io *lio = cl2lov_io(env, ios);
1191 struct lov_io_sub *sub;
1192 struct cl_page *page;
1196 if (lio->lis_nr_subios == 1) {
1197 int idx = lio->lis_single_subio_index;
1199 LASSERT(!lov_page_is_empty(cl_page_list_first(queue)));
1201 sub = lov_sub_get(env, lio, idx);
1202 LASSERT(!IS_ERR(sub));
1203 LASSERT(sub == &lio->lis_single_subio);
1204 rc = cl_io_commit_async(sub->sub_env, &sub->sub_io, queue,
1209 cl_page_list_init(plist);
1210 while (queue->pl_nr > 0) {
1214 LASSERT(plist->pl_nr == 0);
1215 page = cl_page_list_first(queue);
1216 LASSERT(!lov_page_is_empty(page));
1218 cl_page_list_move(plist, queue, page);
1220 index = page->cp_lov_index;
1221 while (queue->pl_nr > 0) {
1222 page = cl_page_list_first(queue);
1223 if (index != page->cp_lov_index)
1226 cl_page_list_move(plist, queue, page);
1229 if (queue->pl_nr > 0) /* still has more pages */
1230 stripe_to = PAGE_SIZE;
1232 sub = lov_sub_get(env, lio, index);
1234 rc = cl_io_commit_async(sub->sub_env, &sub->sub_io,
1235 plist, from, stripe_to, cb);
1241 if (plist->pl_nr > 0) /* short write */
1247 /* for error case, add the page back into the qin list */
1248 LASSERT(ergo(rc == 0, plist->pl_nr == 0));
1249 while (plist->pl_nr > 0) {
1250 /* error occurred, add the uncommitted pages back into queue */
1251 page = cl_page_list_last(plist);
1252 cl_page_list_move_head(queue, plist, page);
1258 static int lov_io_fault_start(const struct lu_env *env,
1259 const struct cl_io_slice *ios)
1261 struct cl_fault_io *fio;
1263 struct lov_io_sub *sub;
1267 fio = &ios->cis_io->u.ci_fault;
1268 lio = cl2lov_io(env, ios);
1269 sub = lov_sub_get(env, lio, fio->ft_page->cp_lov_index);
1270 sub->sub_io.u.ci_fault.ft_nob = fio->ft_nob;
1272 RETURN(lov_io_start(env, ios));
1275 static void lov_io_fsync_end(const struct lu_env *env,
1276 const struct cl_io_slice *ios)
1278 struct lov_io *lio = cl2lov_io(env, ios);
1279 struct lov_io_sub *sub;
1280 unsigned int *written = &ios->cis_io->u.ci_fsync.fi_nr_written;
1284 list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
1285 struct cl_io *subio = &sub->sub_io;
1287 lov_io_end_wrapper(sub->sub_env, subio);
1289 if (subio->ci_result == 0)
1290 *written += subio->u.ci_fsync.fi_nr_written;
1295 static const struct cl_io_operations lov_io_ops = {
1298 .cio_fini = lov_io_fini,
1299 .cio_iter_init = lov_io_rw_iter_init,
1300 .cio_iter_fini = lov_io_iter_fini,
1301 .cio_lock = lov_io_lock,
1302 .cio_unlock = lov_io_unlock,
1303 .cio_start = lov_io_start,
1304 .cio_end = lov_io_end
1307 .cio_fini = lov_io_fini,
1308 .cio_iter_init = lov_io_rw_iter_init,
1309 .cio_iter_fini = lov_io_iter_fini,
1310 .cio_lock = lov_io_lock,
1311 .cio_unlock = lov_io_unlock,
1312 .cio_start = lov_io_start,
1313 .cio_end = lov_io_end
1316 .cio_fini = lov_io_fini,
1317 .cio_iter_init = lov_io_setattr_iter_init,
1318 .cio_iter_fini = lov_io_iter_fini,
1319 .cio_lock = lov_io_lock,
1320 .cio_unlock = lov_io_unlock,
1321 .cio_start = lov_io_start,
1322 .cio_end = lov_io_end
1324 [CIT_DATA_VERSION] = {
1325 .cio_fini = lov_io_fini,
1326 .cio_iter_init = lov_io_iter_init,
1327 .cio_iter_fini = lov_io_iter_fini,
1328 .cio_lock = lov_io_lock,
1329 .cio_unlock = lov_io_unlock,
1330 .cio_start = lov_io_start,
1331 .cio_end = lov_io_data_version_end,
1334 .cio_fini = lov_io_fini,
1335 .cio_iter_init = lov_io_iter_init,
1336 .cio_iter_fini = lov_io_iter_fini,
1337 .cio_lock = lov_io_lock,
1338 .cio_unlock = lov_io_unlock,
1339 .cio_start = lov_io_fault_start,
1340 .cio_end = lov_io_end
1343 .cio_fini = lov_io_fini,
1344 .cio_iter_init = lov_io_iter_init,
1345 .cio_iter_fini = lov_io_iter_fini,
1346 .cio_lock = lov_io_lock,
1347 .cio_unlock = lov_io_unlock,
1348 .cio_start = lov_io_start,
1349 .cio_end = lov_io_fsync_end
1352 .cio_fini = lov_io_fini,
1353 .cio_iter_init = lov_io_iter_init,
1354 .cio_iter_fini = lov_io_iter_fini,
1355 .cio_lock = lov_io_lock,
1356 .cio_unlock = lov_io_unlock,
1357 .cio_start = lov_io_start,
1358 .cio_end = lov_io_end
1361 .cio_fini = lov_io_fini,
1364 .cio_fini = lov_io_fini
1367 .cio_read_ahead = lov_io_read_ahead,
1368 .cio_submit = lov_io_submit,
1369 .cio_commit_async = lov_io_commit_async,
1372 /*****************************************************************************
1374 * Empty lov io operations.
1378 static void lov_empty_io_fini(const struct lu_env *env,
1379 const struct cl_io_slice *ios)
1381 struct lov_object *lov = cl2lov(ios->cis_obj);
1384 if (atomic_dec_and_test(&lov->lo_active_ios))
1385 wake_up_all(&lov->lo_waitq);
1389 static int lov_empty_io_submit(const struct lu_env *env,
1390 const struct cl_io_slice *ios,
1391 enum cl_req_type crt, struct cl_2queue *queue)
1396 static void lov_empty_impossible(const struct lu_env *env,
1397 struct cl_io_slice *ios)
1402 #define LOV_EMPTY_IMPOSSIBLE ((void *)lov_empty_impossible)
1405 * An io operation vector for files without stripes.
1407 static const struct cl_io_operations lov_empty_io_ops = {
1410 .cio_fini = lov_empty_io_fini,
1412 .cio_iter_init = LOV_EMPTY_IMPOSSIBLE,
1413 .cio_lock = LOV_EMPTY_IMPOSSIBLE,
1414 .cio_start = LOV_EMPTY_IMPOSSIBLE,
1415 .cio_end = LOV_EMPTY_IMPOSSIBLE
1419 .cio_fini = lov_empty_io_fini,
1420 .cio_iter_init = LOV_EMPTY_IMPOSSIBLE,
1421 .cio_lock = LOV_EMPTY_IMPOSSIBLE,
1422 .cio_start = LOV_EMPTY_IMPOSSIBLE,
1423 .cio_end = LOV_EMPTY_IMPOSSIBLE
1426 .cio_fini = lov_empty_io_fini,
1427 .cio_iter_init = LOV_EMPTY_IMPOSSIBLE,
1428 .cio_lock = LOV_EMPTY_IMPOSSIBLE,
1429 .cio_start = LOV_EMPTY_IMPOSSIBLE,
1430 .cio_end = LOV_EMPTY_IMPOSSIBLE
1433 .cio_fini = lov_empty_io_fini,
1434 .cio_iter_init = LOV_EMPTY_IMPOSSIBLE,
1435 .cio_lock = LOV_EMPTY_IMPOSSIBLE,
1436 .cio_start = LOV_EMPTY_IMPOSSIBLE,
1437 .cio_end = LOV_EMPTY_IMPOSSIBLE
1440 .cio_fini = lov_empty_io_fini
1443 .cio_fini = lov_empty_io_fini
1446 .cio_fini = lov_empty_io_fini
1449 .cio_fini = lov_empty_io_fini
1452 .cio_submit = lov_empty_io_submit,
1453 .cio_commit_async = LOV_EMPTY_IMPOSSIBLE
1456 int lov_io_init_composite(const struct lu_env *env, struct cl_object *obj,
1459 struct lov_io *lio = lov_env_io(env);
1460 struct lov_object *lov = cl2lov(obj);
1465 INIT_LIST_HEAD(&lio->lis_active);
1466 result = lov_io_slice_init(lio, lov, io);
1470 result = lov_io_subio_init(env, lio, io);
1472 cl_io_slice_add(io, &lio->lis_cl, obj, &lov_io_ops);
1473 atomic_inc(&lov->lo_active_ios);
1477 io->ci_result = result < 0 ? result : 0;
1481 int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj,
1484 struct lov_object *lov = cl2lov(obj);
1485 struct lov_io *lio = lov_env_io(env);
1489 lio->lis_object = lov;
1490 switch (io->ci_type) {
1501 case CIT_DATA_VERSION:
1509 CERROR("Page fault on a file without stripes: "DFID"\n",
1510 PFID(lu_object_fid(&obj->co_lu)));
1514 cl_io_slice_add(io, &lio->lis_cl, obj, &lov_empty_io_ops);
1515 atomic_inc(&lov->lo_active_ios);
1518 io->ci_result = result < 0 ? result : 0;
1522 int lov_io_init_released(const struct lu_env *env, struct cl_object *obj,
1525 struct lov_object *lov = cl2lov(obj);
1526 struct lov_io *lio = lov_env_io(env);
1530 LASSERT(lov->lo_lsm != NULL);
1531 lio->lis_object = lov;
1533 switch (io->ci_type) {
1535 LASSERTF(0, "invalid type %d\n", io->ci_type);
1536 result = -EOPNOTSUPP;
1542 case CIT_DATA_VERSION:
1547 * the truncate to 0 is managed by MDT:
1548 * - in open, for open O_TRUNC
1549 * - in setattr, for truncate
1552 * the truncate is for size > 0 so triggers a restore,
1553 * also trigger a restore for prealloc/punch
1555 if (cl_io_is_trunc(io) || cl_io_is_fallocate(io)) {
1556 io->ci_restore_needed = 1;
1564 io->ci_restore_needed = 1;
1570 cl_io_slice_add(io, &lio->lis_cl, obj, &lov_empty_io_ops);
1571 atomic_inc(&lov->lo_active_ios);
1574 io->ci_result = result < 0 ? result : 0;
1579 * Return the index in composite:lo_entries by the file offset
1581 int lov_io_layout_at(struct lov_io *lio, __u64 offset)
1583 struct lov_object *lov = lio->lis_object;
1584 struct lov_layout_composite *comp = &lov->u.composite;
1585 int start_index = 0;
1586 int end_index = comp->lo_entry_count - 1;
1589 LASSERT(lov->lo_type == LLT_COMP);
1591 /* This is actual file offset so nothing can cover eof. */
1592 if (offset == LUSTRE_EOF)
1595 if (lov_is_flr(lov)) {
1596 struct lov_mirror_entry *lre;
1598 LASSERT(lio->lis_mirror_index >= 0);
1600 lre = &comp->lo_mirrors[lio->lis_mirror_index];
1601 start_index = lre->lre_start;
1602 end_index = lre->lre_end;
1605 for (i = start_index; i <= end_index; i++) {
1606 struct lov_layout_entry *lle = lov_entry(lov, i);
1608 if ((offset >= lle->lle_extent->e_start &&
1609 offset < lle->lle_extent->e_end) ||
1610 (offset == OBD_OBJECT_EOF &&
1611 lle->lle_extent->e_end == OBD_OBJECT_EOF))