4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * Implementation of cl_io for LOV layer.
34 * Author: Nikita Danilov <nikita.danilov@sun.com>
35 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
38 #define DEBUG_SUBSYSTEM S_LOV
40 #include "lov_cl_internal.h"
46 static inline struct lov_io_sub *lov_sub_alloc(struct lov_io *lio, int index)
48 struct lov_io_sub *sub;
50 if (lio->lis_nr_subios == 0) {
51 LASSERT(lio->lis_single_subio_index == -1);
52 sub = &lio->lis_single_subio;
53 lio->lis_single_subio_index = index;
54 memset(sub, 0, sizeof(*sub));
60 INIT_LIST_HEAD(&sub->sub_list);
61 INIT_LIST_HEAD(&sub->sub_linkage);
62 sub->sub_subio_index = index;
68 static inline void lov_sub_free(struct lov_io *lio, struct lov_io_sub *sub)
70 if (sub->sub_subio_index == lio->lis_single_subio_index) {
71 LASSERT(sub == &lio->lis_single_subio);
72 lio->lis_single_subio_index = -1;
78 static void lov_io_sub_fini(const struct lu_env *env, struct lov_io *lio,
79 struct lov_io_sub *sub)
83 cl_io_fini(sub->sub_env, &sub->sub_io);
85 if (sub->sub_env && !IS_ERR(sub->sub_env)) {
86 cl_env_put(sub->sub_env, &sub->sub_refcheck);
93 is_index_within_mirror(struct lov_object *lov, int index, int mirror_index)
95 struct lov_layout_composite *comp = &lov->u.composite;
96 struct lov_mirror_entry *lre = &comp->lo_mirrors[mirror_index];
98 return (index >= lre->lre_start && index <= lre->lre_end);
101 static int lov_io_sub_init(const struct lu_env *env, struct lov_io *lio,
102 struct lov_io_sub *sub)
104 struct lov_object *lov = lio->lis_object;
105 struct cl_io *sub_io;
106 struct cl_object *sub_obj;
107 struct cl_io *io = lio->lis_cl.cis_io;
108 int index = lov_comp_entry(sub->sub_subio_index);
109 int stripe = lov_comp_stripe(sub->sub_subio_index);
111 LASSERT(sub->sub_env == NULL);
114 if (unlikely(!lov_r0(lov, index)->lo_sub ||
115 !lov_r0(lov, index)->lo_sub[stripe]))
118 LASSERTF(is_index_within_mirror(lov, index, lio->lis_mirror_index),
119 DFID "iot = %d, index = %d, mirror = %d\n",
120 PFID(lu_object_fid(lov2lu(lov))), io->ci_type, index,
121 lio->lis_mirror_index);
123 /* obtain new environment */
124 sub->sub_env = cl_env_get(&sub->sub_refcheck);
125 if (IS_ERR(sub->sub_env)) {
126 result = PTR_ERR(sub->sub_env);
130 sub_obj = lovsub2cl(lov_r0(lov, index)->lo_sub[stripe]);
131 sub_io = &sub->sub_io;
133 sub_io->ci_obj = sub_obj;
134 sub_io->ci_result = 0;
136 sub_io->ci_parent = io;
137 sub_io->ci_lockreq = io->ci_lockreq;
138 sub_io->ci_type = io->ci_type;
139 sub_io->ci_no_srvlock = io->ci_no_srvlock;
140 sub_io->ci_noatime = io->ci_noatime;
141 sub_io->ci_async_readahead = io->ci_async_readahead;
142 sub_io->ci_lock_no_expand = io->ci_lock_no_expand;
143 sub_io->ci_ndelay = io->ci_ndelay;
144 sub_io->ci_layout_version = io->ci_layout_version;
146 result = cl_io_sub_init(sub->sub_env, sub_io, io->ci_type, sub_obj);
149 lov_io_sub_fini(env, lio, sub);
154 struct lov_io_sub *lov_sub_get(const struct lu_env *env,
155 struct lov_io *lio, int index)
157 struct lov_io_sub *sub;
162 list_for_each_entry(sub, &lio->lis_subios, sub_list) {
163 if (sub->sub_subio_index == index) {
170 sub = lov_sub_alloc(lio, index);
172 GOTO(out, rc = -ENOMEM);
174 rc = lov_io_sub_init(env, lio, sub);
176 lov_sub_free(lio, sub);
180 list_add_tail(&sub->sub_list, &lio->lis_subios);
181 lio->lis_nr_subios++;
189 /*****************************************************************************
195 int lov_page_index(const struct cl_page *page)
197 const struct cl_page_slice *slice;
200 slice = cl_page_at(page, &lov_device_type);
201 LASSERT(slice != NULL);
202 LASSERT(slice->cpl_obj != NULL);
204 RETURN(cl2lov_page(slice)->lps_index);
207 static int lov_io_subio_init(const struct lu_env *env, struct lov_io *lio,
212 LASSERT(lio->lis_object != NULL);
214 INIT_LIST_HEAD(&lio->lis_subios);
215 lio->lis_single_subio_index = -1;
216 lio->lis_nr_subios = 0;
222 * Decide if it will need write intent RPC
224 static int lov_io_mirror_write_intent(struct lov_io *lio,
225 struct lov_object *obj, struct cl_io *io)
227 struct lov_layout_composite *comp = &obj->u.composite;
228 struct lu_extent *ext = &io->ci_write_intent;
229 struct lov_mirror_entry *lre;
230 struct lov_mirror_entry *primary;
231 struct lov_layout_entry *lle;
235 *ext = (typeof(*ext)) { lio->lis_pos, lio->lis_endpos };
236 io->ci_need_write_intent = 0;
238 if (!(io->ci_type == CIT_WRITE || cl_io_is_trunc(io) ||
239 cl_io_is_mkwrite(io)))
243 * FLR: check if it needs to send a write intent RPC to server.
244 * Writing to sync_pending file needs write intent RPC to change
245 * the file state back to write_pending, so that the layout version
246 * can be increased when the state changes to sync_pending at a later
247 * time. Otherwise there exists a chance that an evicted client may
248 * dirty the file data while resync client is working on it.
249 * Designated I/O is allowed for resync workload.
251 if (lov_flr_state(obj) == LCM_FL_RDONLY ||
252 (lov_flr_state(obj) == LCM_FL_SYNC_PENDING &&
253 io->ci_designated_mirror == 0)) {
254 io->ci_need_write_intent = 1;
258 LASSERT((lov_flr_state(obj) == LCM_FL_WRITE_PENDING));
259 LASSERT(comp->lo_preferred_mirror >= 0);
262 * need to iterate all components to see if there are
263 * multiple components covering the writing component
265 primary = &comp->lo_mirrors[comp->lo_preferred_mirror];
266 LASSERT(!primary->lre_stale);
267 lov_foreach_mirror_layout_entry(obj, lle, primary) {
268 LASSERT(lle->lle_valid);
269 if (!lu_extent_is_overlapped(ext, lle->lle_extent))
272 ext->e_start = MIN(ext->e_start, lle->lle_extent->e_start);
273 ext->e_end = MAX(ext->e_end, lle->lle_extent->e_end);
277 CERROR(DFID ": cannot find any valid components covering "
278 "file extent "DEXT", mirror: %d\n",
279 PFID(lu_object_fid(lov2lu(obj))), PEXT(ext),
280 primary->lre_mirror_id);
285 lov_foreach_mirror_entry(obj, lre) {
289 lov_foreach_mirror_layout_entry(obj, lle, lre) {
293 if (lu_extent_is_overlapped(ext, lle->lle_extent)) {
300 CDEBUG(D_VFSTRACE, DFID "there are %zd components to be staled to "
301 "modify file extent "DEXT", iot: %d\n",
302 PFID(lu_object_fid(lov2lu(obj))), count, PEXT(ext), io->ci_type);
304 io->ci_need_write_intent = count > 0;
309 static int lov_io_mirror_init(struct lov_io *lio, struct lov_object *obj,
312 struct lov_layout_composite *comp = &obj->u.composite;
318 if (!lov_is_flr(obj)) {
319 LASSERT(comp->lo_preferred_mirror == 0);
320 lio->lis_mirror_index = comp->lo_preferred_mirror;
325 /* transfer the layout version for verification */
326 if (io->ci_layout_version == 0)
327 io->ci_layout_version = obj->lo_lsm->lsm_layout_gen;
329 /* find the corresponding mirror for designated mirror IO */
330 if (io->ci_designated_mirror > 0) {
331 struct lov_mirror_entry *entry;
333 LASSERT(!io->ci_ndelay);
335 CDEBUG(D_LAYOUT, "designated I/O mirror state: %d\n",
338 if ((cl_io_is_trunc(io) || io->ci_type == CIT_WRITE) &&
339 (io->ci_layout_version != obj->lo_lsm->lsm_layout_gen)) {
341 * For resync I/O, the ci_layout_version was the layout
342 * version when resync starts. If it doesn't match the
343 * current object layout version, it means the layout
349 io->ci_layout_version |= LU_LAYOUT_RESYNC;
352 lio->lis_mirror_index = -1;
353 lov_foreach_mirror_entry(obj, entry) {
354 if (entry->lre_mirror_id ==
355 io->ci_designated_mirror) {
356 lio->lis_mirror_index = index;
363 RETURN(lio->lis_mirror_index < 0 ? -EINVAL : 0);
366 result = lov_io_mirror_write_intent(lio, obj, io);
370 if (io->ci_need_write_intent) {
371 CDEBUG(D_VFSTRACE, DFID " need write intent for [%llu, %llu)\n",
372 PFID(lu_object_fid(lov2lu(obj))),
373 lio->lis_pos, lio->lis_endpos);
375 if (cl_io_is_trunc(io)) {
377 * for truncate, we uses [size, EOF) to judge whether
378 * a write intent needs to be send, but we need to
379 * restore the write extent to [0, size).
381 io->ci_write_intent.e_start = 0;
382 io->ci_write_intent.e_end =
383 io->u.ci_setattr.sa_attr.lvb_size;
385 /* stop cl_io_init() loop */
389 if (io->ci_ndelay_tried == 0 || /* first time to try */
390 /* reset the mirror index if layout has changed */
391 lio->lis_mirror_layout_gen != obj->lo_lsm->lsm_layout_gen) {
392 lio->lis_mirror_layout_gen = obj->lo_lsm->lsm_layout_gen;
393 index = lio->lis_mirror_index = comp->lo_last_read_mirror;
395 index = lio->lis_mirror_index;
398 /* move mirror index to the next one */
399 spin_lock(&comp->lo_write_lock);
400 if (index == comp->lo_last_read_mirror) {
402 index = (index + 1) % comp->lo_mirror_count;
403 if (comp->lo_mirrors[index].lre_valid)
405 } while (index != comp->lo_last_read_mirror);
407 /* reset last read replica so that other threads can
408 * take advantage of our retries. */
409 comp->lo_last_read_mirror = index;
411 /* last read index was moved by other thread */
412 index = comp->lo_last_read_mirror;
414 spin_unlock(&comp->lo_write_lock);
417 /* make sure the mirror chosen covers the extent we'll read */
418 for (i = 0; i < comp->lo_mirror_count; i++) {
419 struct lu_extent ext = { .e_start = lio->lis_pos,
420 .e_end = lio->lis_pos + 1 };
421 struct lov_mirror_entry *lre;
422 struct lov_layout_entry *lle;
425 lre = &comp->lo_mirrors[(index + i) % comp->lo_mirror_count];
429 lov_foreach_mirror_layout_entry(obj, lle, lre) {
433 if (lu_extent_is_overlapped(&ext, lle->lle_extent)) {
440 index = (index + i) % comp->lo_mirror_count;
444 if (i == comp->lo_mirror_count) {
445 CERROR(DFID": failed to find a component covering "
446 "I/O region at %llu\n",
447 PFID(lu_object_fid(lov2lu(obj))), lio->lis_pos);
449 dump_lsm(D_ERROR, obj->lo_lsm);
454 CDEBUG(D_VFSTRACE, DFID ": flr state: %d, move mirror from %d to %d, "
455 "have retried: %d, mirror count: %d\n",
456 PFID(lu_object_fid(lov2lu(obj))), lov_flr_state(obj),
457 lio->lis_mirror_index, index, io->ci_ndelay_tried,
458 comp->lo_mirror_count);
460 lio->lis_mirror_index = index;
463 * FLR: if all mirrors have been tried once, most likely the network
464 * of this client has been partitioned. We should relinquish CPU for
465 * a while before trying again.
467 ++io->ci_ndelay_tried;
468 if (io->ci_ndelay && io->ci_ndelay_tried >= comp->lo_mirror_count) {
469 set_current_state(TASK_INTERRUPTIBLE);
470 schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC)); /* 10ms */
471 if (signal_pending(current))
474 /* reset retry counter */
475 io->ci_ndelay_tried = 1;
478 CDEBUG(D_VFSTRACE, "use %sdelayed RPC state for this IO\n",
479 io->ci_ndelay ? "non-" : "");
484 static int lov_io_slice_init(struct lov_io *lio,
485 struct lov_object *obj, struct cl_io *io)
492 lio->lis_object = obj;
494 LASSERT(obj->lo_lsm != NULL);
496 switch (io->ci_type) {
499 lio->lis_pos = io->u.ci_rw.crw_pos;
500 lio->lis_endpos = io->u.ci_rw.crw_pos + io->u.ci_rw.crw_count;
501 lio->lis_io_endpos = lio->lis_endpos;
502 if (cl_io_is_append(io)) {
503 LASSERT(io->ci_type == CIT_WRITE);
506 * If there is LOV EA hole, then we may cannot locate
507 * the current file-tail exactly.
509 if (unlikely(obj->lo_lsm->lsm_entries[0]->lsme_pattern &
511 GOTO(out, result = -EIO);
514 lio->lis_endpos = OBD_OBJECT_EOF;
519 if (cl_io_is_trunc(io))
520 lio->lis_pos = io->u.ci_setattr.sa_attr.lvb_size;
523 lio->lis_endpos = OBD_OBJECT_EOF;
526 case CIT_DATA_VERSION:
528 lio->lis_endpos = OBD_OBJECT_EOF;
532 pgoff_t index = io->u.ci_fault.ft_index;
534 lio->lis_pos = cl_offset(io->ci_obj, index);
535 lio->lis_endpos = cl_offset(io->ci_obj, index + 1);
540 lio->lis_pos = io->u.ci_fsync.fi_start;
541 lio->lis_endpos = io->u.ci_fsync.fi_end;
546 lio->lis_pos = io->u.ci_ladvise.li_start;
547 lio->lis_endpos = io->u.ci_ladvise.li_end;
553 lio->lis_endpos = OBD_OBJECT_EOF;
555 if (lov_flr_state(obj) == LCM_FL_RDONLY &&
556 !OBD_FAIL_CHECK(OBD_FAIL_FLR_GLIMPSE_IMMUTABLE))
557 /* SoM is accurate, no need glimpse */
558 GOTO(out, result = 1);
563 lio->lis_endpos = OBD_OBJECT_EOF;
570 result = lov_io_mirror_init(lio, obj, io);
574 /* check if it needs to instantiate layout */
575 if (!(io->ci_type == CIT_WRITE || cl_io_is_mkwrite(io) ||
576 (cl_io_is_trunc(io) && io->u.ci_setattr.sa_attr.lvb_size > 0)))
577 GOTO(out, result = 0);
580 * for truncate, it only needs to instantiate the components
581 * before the truncated size.
583 if (cl_io_is_trunc(io)) {
584 io->ci_write_intent.e_start = 0;
585 io->ci_write_intent.e_end = io->u.ci_setattr.sa_attr.lvb_size;
587 io->ci_write_intent.e_start = lio->lis_pos;
588 io->ci_write_intent.e_end = lio->lis_endpos;
592 lov_foreach_io_layout(index, lio, &io->ci_write_intent) {
593 if (!lsm_entry_inited(obj->lo_lsm, index)) {
594 io->ci_need_write_intent = 1;
599 if (io->ci_need_write_intent && io->ci_designated_mirror > 0) {
601 * REINT_SYNC RPC has already tried to instantiate all of the
602 * components involved, obviously it didn't succeed. Skip this
603 * mirror for now. The server won't be able to figure out
604 * which mirror it should instantiate components
606 CERROR(DFID": trying to instantiate components for designated "
607 "I/O, file state: %d\n",
608 PFID(lu_object_fid(lov2lu(obj))), lov_flr_state(obj));
610 io->ci_need_write_intent = 0;
611 GOTO(out, result = -EIO);
614 if (io->ci_need_write_intent)
615 GOTO(out, result = 1);
623 static void lov_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
625 struct lov_io *lio = cl2lov_io(env, ios);
626 struct lov_object *lov = cl2lov(ios->cis_obj);
630 LASSERT(list_empty(&lio->lis_active));
632 while (!list_empty(&lio->lis_subios)) {
633 struct lov_io_sub *sub = list_entry(lio->lis_subios.next,
637 list_del_init(&sub->sub_list);
638 lio->lis_nr_subios--;
640 lov_io_sub_fini(env, lio, sub);
641 lov_sub_free(lio, sub);
643 LASSERT(lio->lis_nr_subios == 0);
645 LASSERT(atomic_read(&lov->lo_active_ios) > 0);
646 if (atomic_dec_and_test(&lov->lo_active_ios))
647 wake_up_all(&lov->lo_waitq);
651 static void lov_io_sub_inherit(struct lov_io_sub *sub, struct lov_io *lio,
652 loff_t start, loff_t end)
654 struct cl_io *io = &sub->sub_io;
655 struct lov_stripe_md *lsm = lio->lis_object->lo_lsm;
656 struct cl_io *parent = lio->lis_cl.cis_io;
657 int index = lov_comp_entry(sub->sub_subio_index);
658 int stripe = lov_comp_stripe(sub->sub_subio_index);
660 switch (io->ci_type) {
662 io->u.ci_setattr.sa_attr = parent->u.ci_setattr.sa_attr;
663 io->u.ci_setattr.sa_attr_flags =
664 parent->u.ci_setattr.sa_attr_flags;
665 io->u.ci_setattr.sa_avalid = parent->u.ci_setattr.sa_avalid;
666 io->u.ci_setattr.sa_xvalid = parent->u.ci_setattr.sa_xvalid;
667 io->u.ci_setattr.sa_stripe_index = stripe;
668 io->u.ci_setattr.sa_parent_fid =
669 parent->u.ci_setattr.sa_parent_fid;
670 if (cl_io_is_trunc(io)) {
671 loff_t new_size = parent->u.ci_setattr.sa_attr.lvb_size;
673 new_size = lov_size_to_stripe(lsm, index, new_size,
675 io->u.ci_setattr.sa_attr.lvb_size = new_size;
677 lov_lsm2layout(lsm, lsm->lsm_entries[index],
678 &io->u.ci_setattr.sa_layout);
681 case CIT_DATA_VERSION: {
682 io->u.ci_data_version.dv_data_version = 0;
683 io->u.ci_data_version.dv_flags =
684 parent->u.ci_data_version.dv_flags;
688 struct cl_object *obj = parent->ci_obj;
689 loff_t off = cl_offset(obj, parent->u.ci_fault.ft_index);
691 io->u.ci_fault = parent->u.ci_fault;
692 off = lov_size_to_stripe(lsm, index, off, stripe);
693 io->u.ci_fault.ft_index = cl_index(obj, off);
697 io->u.ci_fsync.fi_start = start;
698 io->u.ci_fsync.fi_end = end;
699 io->u.ci_fsync.fi_fid = parent->u.ci_fsync.fi_fid;
700 io->u.ci_fsync.fi_mode = parent->u.ci_fsync.fi_mode;
705 io->u.ci_wr.wr_sync = cl_io_is_sync_write(parent);
706 if (cl_io_is_append(parent)) {
707 io->u.ci_wr.wr_append = 1;
709 io->u.ci_rw.crw_pos = start;
710 io->u.ci_rw.crw_count = end - start;
715 io->u.ci_ladvise.li_start = start;
716 io->u.ci_ladvise.li_end = end;
717 io->u.ci_ladvise.li_fid = parent->u.ci_ladvise.li_fid;
718 io->u.ci_ladvise.li_advice = parent->u.ci_ladvise.li_advice;
719 io->u.ci_ladvise.li_flags = parent->u.ci_ladvise.li_flags;
729 static loff_t lov_offset_mod(loff_t val, int delta)
731 if (val != OBD_OBJECT_EOF)
736 static int lov_io_iter_init(const struct lu_env *env,
737 const struct cl_io_slice *ios)
739 struct lov_io *lio = cl2lov_io(env, ios);
740 struct lov_stripe_md *lsm = lio->lis_object->lo_lsm;
741 struct lov_io_sub *sub;
742 struct lu_extent ext;
748 ext.e_start = lio->lis_pos;
749 ext.e_end = lio->lis_endpos;
751 lov_foreach_io_layout(index, lio, &ext) {
752 struct lov_layout_entry *le = lov_entry(lio->lis_object, index);
753 struct lov_layout_raid0 *r0 = &le->lle_raid0;
758 CDEBUG(D_VFSTRACE, "component[%d] flags %#x\n",
759 index, lsm->lsm_entries[index]->lsme_flags);
760 if (!lsm_entry_inited(lsm, index)) {
762 * Read from uninitialized components should return
768 if (!le->lle_valid && !ios->cis_io->ci_designated_mirror) {
769 CERROR("I/O to invalid component: %d, mirror: %d\n",
770 index, lio->lis_mirror_index);
774 for (stripe = 0; stripe < r0->lo_nr; stripe++) {
775 if (!lov_stripe_intersects(lsm, index, stripe,
779 if (unlikely(!r0->lo_sub[stripe])) {
780 if (ios->cis_io->ci_type == CIT_READ ||
781 ios->cis_io->ci_type == CIT_WRITE ||
782 ios->cis_io->ci_type == CIT_FAULT)
788 end = lov_offset_mod(end, 1);
789 sub = lov_sub_get(env, lio,
790 lov_comp_index(index, stripe));
796 lov_io_sub_inherit(sub, lio, start, end);
797 rc = cl_io_iter_init(sub->sub_env, &sub->sub_io);
799 cl_io_iter_fini(sub->sub_env, &sub->sub_io);
803 CDEBUG(D_VFSTRACE, "shrink: %d [%llu, %llu)\n",
806 list_add_tail(&sub->sub_linkage, &lio->lis_active);
814 static int lov_io_rw_iter_init(const struct lu_env *env,
815 const struct cl_io_slice *ios)
817 struct lov_io *lio = cl2lov_io(env, ios);
818 struct cl_io *io = ios->cis_io;
819 struct lov_stripe_md_entry *lse;
820 loff_t start = io->u.ci_rw.crw_pos;
824 LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
827 if (cl_io_is_append(io))
828 RETURN(lov_io_iter_init(env, ios));
830 index = lov_io_layout_at(lio, io->u.ci_rw.crw_pos);
831 if (index < 0) { /* non-existing layout component */
832 if (io->ci_type == CIT_READ) {
834 * TODO: it needs to detect the next component and
835 * then set the next pos
839 RETURN(lov_io_iter_init(env, ios));
845 if (!lov_entry(lio->lis_object, index)->lle_valid &&
846 !io->ci_designated_mirror)
847 RETURN(io->ci_type == CIT_READ ? -EAGAIN : -EIO);
849 lse = lov_lse(lio->lis_object, index);
851 next = MAX_LFS_FILESIZE;
852 if (lse->lsme_stripe_count > 1) {
853 unsigned long ssize = lse->lsme_stripe_size;
855 lov_do_div64(start, ssize);
856 next = (start + 1) * ssize;
857 if (next <= start * ssize)
858 next = MAX_LFS_FILESIZE;
861 LASSERTF(io->u.ci_rw.crw_pos >= lse->lsme_extent.e_start,
862 "pos %lld, [%lld, %lld)\n", io->u.ci_rw.crw_pos,
863 lse->lsme_extent.e_start, lse->lsme_extent.e_end);
864 next = min_t(__u64, next, lse->lsme_extent.e_end);
865 next = min_t(loff_t, next, lio->lis_io_endpos);
867 io->ci_continue = next < lio->lis_io_endpos;
868 io->u.ci_rw.crw_count = next - io->u.ci_rw.crw_pos;
869 lio->lis_pos = io->u.ci_rw.crw_pos;
870 lio->lis_endpos = io->u.ci_rw.crw_pos + io->u.ci_rw.crw_count;
872 "stripe: %llu chunk: [%llu, %llu) %llu, %zd\n",
873 (__u64)start, lio->lis_pos, lio->lis_endpos,
874 (__u64)lio->lis_io_endpos, io->u.ci_rw.crw_count);
877 * XXX The following call should be optimized: we know, that
878 * [lio->lis_pos, lio->lis_endpos) intersects with exactly one stripe.
880 RETURN(lov_io_iter_init(env, ios));
883 static int lov_io_setattr_iter_init(const struct lu_env *env,
884 const struct cl_io_slice *ios)
886 struct lov_io *lio = cl2lov_io(env, ios);
887 struct cl_io *io = ios->cis_io;
891 if (cl_io_is_trunc(io) && lio->lis_pos > 0) {
892 index = lov_io_layout_at(lio, lio->lis_pos - 1);
893 /* no entry found for such offset */
895 RETURN(io->ci_result = -ENODATA);
898 RETURN(lov_io_iter_init(env, ios));
901 static int lov_io_call(const struct lu_env *env, struct lov_io *lio,
902 int (*iofunc)(const struct lu_env *, struct cl_io *))
904 struct cl_io *parent = lio->lis_cl.cis_io;
905 struct lov_io_sub *sub;
909 list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
910 rc = iofunc(sub->sub_env, &sub->sub_io);
914 if (parent->ci_result == 0)
915 parent->ci_result = sub->sub_io.ci_result;
920 static int lov_io_lock(const struct lu_env *env, const struct cl_io_slice *ios)
923 RETURN(lov_io_call(env, cl2lov_io(env, ios), cl_io_lock));
926 static int lov_io_start(const struct lu_env *env, const struct cl_io_slice *ios)
929 RETURN(lov_io_call(env, cl2lov_io(env, ios), cl_io_start));
932 static int lov_io_end_wrapper(const struct lu_env *env, struct cl_io *io)
936 * It's possible that lov_io_start() wasn't called against this
937 * sub-io, either because previous sub-io failed, or upper layer
940 if (io->ci_state == CIS_IO_GOING)
943 io->ci_state = CIS_IO_FINISHED;
947 static int lov_io_iter_fini_wrapper(const struct lu_env *env, struct cl_io *io)
949 cl_io_iter_fini(env, io);
953 static int lov_io_unlock_wrapper(const struct lu_env *env, struct cl_io *io)
955 cl_io_unlock(env, io);
959 static void lov_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
963 rc = lov_io_call(env, cl2lov_io(env, ios), lov_io_end_wrapper);
968 lov_io_data_version_end(const struct lu_env *env, const struct cl_io_slice *ios)
970 struct lov_io *lio = cl2lov_io(env, ios);
971 struct cl_io *parent = lio->lis_cl.cis_io;
972 struct cl_data_version_io *pdv = &parent->u.ci_data_version;
973 struct lov_io_sub *sub;
976 list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
977 struct cl_data_version_io *sdv = &sub->sub_io.u.ci_data_version;
979 lov_io_end_wrapper(sub->sub_env, &sub->sub_io);
981 pdv->dv_data_version += sdv->dv_data_version;
982 if (pdv->dv_layout_version > sdv->dv_layout_version)
983 pdv->dv_layout_version = sdv->dv_layout_version;
985 if (parent->ci_result == 0)
986 parent->ci_result = sub->sub_io.ci_result;
992 static void lov_io_iter_fini(const struct lu_env *env,
993 const struct cl_io_slice *ios)
995 struct lov_io *lio = cl2lov_io(env, ios);
999 rc = lov_io_call(env, lio, lov_io_iter_fini_wrapper);
1001 while (!list_empty(&lio->lis_active))
1002 list_del_init(lio->lis_active.next);
1006 static void lov_io_unlock(const struct lu_env *env,
1007 const struct cl_io_slice *ios)
1012 rc = lov_io_call(env, cl2lov_io(env, ios), lov_io_unlock_wrapper);
1017 static int lov_io_read_ahead(const struct lu_env *env,
1018 const struct cl_io_slice *ios,
1019 pgoff_t start, struct cl_read_ahead *ra)
1021 struct lov_io *lio = cl2lov_io(env, ios);
1022 struct lov_object *loo = lio->lis_object;
1023 struct cl_object *obj = lov2cl(loo);
1024 struct lov_layout_raid0 *r0;
1025 struct lov_io_sub *sub;
1029 unsigned int pps; /* pages per stripe */
1035 offset = cl_offset(obj, start);
1036 index = lov_io_layout_at(lio, offset);
1037 if (index < 0 || !lsm_entry_inited(loo->lo_lsm, index))
1040 /* avoid readahead to expand to stale components */
1041 if (!lov_entry(loo, index)->lle_valid)
1044 stripe = lov_stripe_number(loo->lo_lsm, index, offset);
1046 r0 = lov_r0(loo, index);
1047 if (unlikely(!r0->lo_sub[stripe]))
1050 sub = lov_sub_get(env, lio, lov_comp_index(index, stripe));
1052 RETURN(PTR_ERR(sub));
1054 lov_stripe_offset(loo->lo_lsm, index, offset, stripe, &suboff);
1055 rc = cl_io_read_ahead(sub->sub_env, &sub->sub_io,
1056 cl_index(lovsub2cl(r0->lo_sub[stripe]), suboff),
1059 CDEBUG(D_READA, DFID " cra_end = %lu, stripes = %d, rc = %d\n",
1060 PFID(lu_object_fid(lov2lu(loo))), ra->cra_end, r0->lo_nr, rc);
1065 * Adjust the stripe index by layout of comp. ra->cra_end is the
1066 * maximum page index covered by an underlying DLM lock.
1067 * This function converts cra_end from stripe level to file level, and
1068 * make sure it's not beyond stripe and component boundary.
1071 /* cra_end is stripe level, convert it into file level */
1072 ra_end = ra->cra_end;
1073 if (ra_end != CL_PAGE_EOF)
1074 ra->cra_end = lov_stripe_pgoff(loo->lo_lsm, index,
1077 /* boundary of current component */
1078 ra_end = cl_index(obj, (loff_t)lov_io_extent(lio, index)->e_end);
1079 if (ra_end != CL_PAGE_EOF && ra->cra_end >= ra_end)
1080 ra->cra_end = ra_end - 1;
1082 if (r0->lo_nr == 1) /* single stripe file */
1085 pps = lov_lse(loo, index)->lsme_stripe_size >> PAGE_SHIFT;
1087 CDEBUG(D_READA, DFID " max_index = %lu, pps = %u, index = %u, "
1088 "stripe_size = %u, stripe no = %u, start index = %lu\n",
1089 PFID(lu_object_fid(lov2lu(loo))), ra->cra_end, pps, index,
1090 lov_lse(loo, index)->lsme_stripe_size, stripe, start);
1092 /* never exceed the end of the stripe */
1093 ra->cra_end = min_t(pgoff_t,
1094 ra->cra_end, start + pps - start % pps - 1);
1099 * lov implementation of cl_operations::cio_submit() method. It takes a list
1100 * of pages in \a queue, splits it into per-stripe sub-lists, invokes
1101 * cl_io_submit() on underlying devices to submit sub-lists, and then splices
1104 * Major complication of this function is a need to handle memory cleansing:
1105 * cl_io_submit() is called to write out pages as a part of VM memory
1106 * reclamation, and hence it may not fail due to memory shortages (system
1107 * dead-locks otherwise). To deal with this, some resources (sub-lists,
1108 * sub-environment, etc.) are allocated per-device on "startup" (i.e., in a
1109 * not-memory cleansing context), and in case of memory shortage, these
1110 * pre-allocated resources are used by lov_io_submit() under
1111 * lov_device::ld_mutex mutex.
1113 static int lov_io_submit(const struct lu_env *env,
1114 const struct cl_io_slice *ios,
1115 enum cl_req_type crt, struct cl_2queue *queue)
1117 struct cl_page_list *qin = &queue->c2_qin;
1118 struct lov_io *lio = cl2lov_io(env, ios);
1119 struct lov_io_sub *sub;
1120 struct cl_page_list *plist = &lov_env_info(env)->lti_plist;
1121 struct cl_page *page;
1126 cl_page_list_init(plist);
1127 while (qin->pl_nr > 0) {
1128 struct cl_2queue *cl2q = &lov_env_info(env)->lti_cl2q;
1130 page = cl_page_list_first(qin);
1131 if (lov_page_is_empty(page)) {
1132 cl_page_list_move(&queue->c2_qout, qin, page);
1135 * it could only be mirror read to get here therefore
1136 * the pages will be transient. We don't care about
1137 * the return code of cl_page_prep() at all.
1139 (void) cl_page_prep(env, ios->cis_io, page, crt);
1140 cl_page_completion(env, page, crt, 0);
1144 cl_2queue_init(cl2q);
1145 cl_page_list_move(&cl2q->c2_qin, qin, page);
1147 index = lov_page_index(page);
1148 while (qin->pl_nr > 0) {
1149 page = cl_page_list_first(qin);
1150 if (index != lov_page_index(page))
1153 cl_page_list_move(&cl2q->c2_qin, qin, page);
1156 sub = lov_sub_get(env, lio, index);
1158 rc = cl_io_submit_rw(sub->sub_env, &sub->sub_io,
1164 cl_page_list_splice(&cl2q->c2_qin, plist);
1165 cl_page_list_splice(&cl2q->c2_qout, &queue->c2_qout);
1166 cl_2queue_fini(env, cl2q);
1172 cl_page_list_splice(plist, qin);
1173 cl_page_list_fini(env, plist);
1178 static int lov_io_commit_async(const struct lu_env *env,
1179 const struct cl_io_slice *ios,
1180 struct cl_page_list *queue, int from, int to,
1183 struct cl_page_list *plist = &lov_env_info(env)->lti_plist;
1184 struct lov_io *lio = cl2lov_io(env, ios);
1185 struct lov_io_sub *sub;
1186 struct cl_page *page;
1190 if (lio->lis_nr_subios == 1) {
1191 int idx = lio->lis_single_subio_index;
1193 LASSERT(!lov_page_is_empty(cl_page_list_first(queue)));
1195 sub = lov_sub_get(env, lio, idx);
1196 LASSERT(!IS_ERR(sub));
1197 LASSERT(sub == &lio->lis_single_subio);
1198 rc = cl_io_commit_async(sub->sub_env, &sub->sub_io, queue,
1203 cl_page_list_init(plist);
1204 while (queue->pl_nr > 0) {
1208 LASSERT(plist->pl_nr == 0);
1209 page = cl_page_list_first(queue);
1210 LASSERT(!lov_page_is_empty(page));
1212 cl_page_list_move(plist, queue, page);
1214 index = lov_page_index(page);
1215 while (queue->pl_nr > 0) {
1216 page = cl_page_list_first(queue);
1217 if (index != lov_page_index(page))
1220 cl_page_list_move(plist, queue, page);
1223 if (queue->pl_nr > 0) /* still has more pages */
1224 stripe_to = PAGE_SIZE;
1226 sub = lov_sub_get(env, lio, index);
1228 rc = cl_io_commit_async(sub->sub_env, &sub->sub_io,
1229 plist, from, stripe_to, cb);
1235 if (plist->pl_nr > 0) /* short write */
1241 /* for error case, add the page back into the qin list */
1242 LASSERT(ergo(rc == 0, plist->pl_nr == 0));
1243 while (plist->pl_nr > 0) {
1244 /* error occurred, add the uncommitted pages back into queue */
1245 page = cl_page_list_last(plist);
1246 cl_page_list_move_head(queue, plist, page);
1252 static int lov_io_fault_start(const struct lu_env *env,
1253 const struct cl_io_slice *ios)
1255 struct cl_fault_io *fio;
1257 struct lov_io_sub *sub;
1261 fio = &ios->cis_io->u.ci_fault;
1262 lio = cl2lov_io(env, ios);
1263 sub = lov_sub_get(env, lio, lov_page_index(fio->ft_page));
1264 sub->sub_io.u.ci_fault.ft_nob = fio->ft_nob;
1266 RETURN(lov_io_start(env, ios));
1269 static void lov_io_fsync_end(const struct lu_env *env,
1270 const struct cl_io_slice *ios)
1272 struct lov_io *lio = cl2lov_io(env, ios);
1273 struct lov_io_sub *sub;
1274 unsigned int *written = &ios->cis_io->u.ci_fsync.fi_nr_written;
1278 list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
1279 struct cl_io *subio = &sub->sub_io;
1281 lov_io_end_wrapper(sub->sub_env, subio);
1283 if (subio->ci_result == 0)
1284 *written += subio->u.ci_fsync.fi_nr_written;
1289 static const struct cl_io_operations lov_io_ops = {
1292 .cio_fini = lov_io_fini,
1293 .cio_iter_init = lov_io_rw_iter_init,
1294 .cio_iter_fini = lov_io_iter_fini,
1295 .cio_lock = lov_io_lock,
1296 .cio_unlock = lov_io_unlock,
1297 .cio_start = lov_io_start,
1298 .cio_end = lov_io_end
1301 .cio_fini = lov_io_fini,
1302 .cio_iter_init = lov_io_rw_iter_init,
1303 .cio_iter_fini = lov_io_iter_fini,
1304 .cio_lock = lov_io_lock,
1305 .cio_unlock = lov_io_unlock,
1306 .cio_start = lov_io_start,
1307 .cio_end = lov_io_end
1310 .cio_fini = lov_io_fini,
1311 .cio_iter_init = lov_io_setattr_iter_init,
1312 .cio_iter_fini = lov_io_iter_fini,
1313 .cio_lock = lov_io_lock,
1314 .cio_unlock = lov_io_unlock,
1315 .cio_start = lov_io_start,
1316 .cio_end = lov_io_end
1318 [CIT_DATA_VERSION] = {
1319 .cio_fini = lov_io_fini,
1320 .cio_iter_init = lov_io_iter_init,
1321 .cio_iter_fini = lov_io_iter_fini,
1322 .cio_lock = lov_io_lock,
1323 .cio_unlock = lov_io_unlock,
1324 .cio_start = lov_io_start,
1325 .cio_end = lov_io_data_version_end,
1328 .cio_fini = lov_io_fini,
1329 .cio_iter_init = lov_io_iter_init,
1330 .cio_iter_fini = lov_io_iter_fini,
1331 .cio_lock = lov_io_lock,
1332 .cio_unlock = lov_io_unlock,
1333 .cio_start = lov_io_fault_start,
1334 .cio_end = lov_io_end
1337 .cio_fini = lov_io_fini,
1338 .cio_iter_init = lov_io_iter_init,
1339 .cio_iter_fini = lov_io_iter_fini,
1340 .cio_lock = lov_io_lock,
1341 .cio_unlock = lov_io_unlock,
1342 .cio_start = lov_io_start,
1343 .cio_end = lov_io_fsync_end
1346 .cio_fini = lov_io_fini,
1347 .cio_iter_init = lov_io_iter_init,
1348 .cio_iter_fini = lov_io_iter_fini,
1349 .cio_lock = lov_io_lock,
1350 .cio_unlock = lov_io_unlock,
1351 .cio_start = lov_io_start,
1352 .cio_end = lov_io_end
1355 .cio_fini = lov_io_fini,
1358 .cio_fini = lov_io_fini
1361 .cio_read_ahead = lov_io_read_ahead,
1362 .cio_submit = lov_io_submit,
1363 .cio_commit_async = lov_io_commit_async,
1366 /*****************************************************************************
1368 * Empty lov io operations.
1372 static void lov_empty_io_fini(const struct lu_env *env,
1373 const struct cl_io_slice *ios)
1375 struct lov_object *lov = cl2lov(ios->cis_obj);
1378 if (atomic_dec_and_test(&lov->lo_active_ios))
1379 wake_up_all(&lov->lo_waitq);
1383 static int lov_empty_io_submit(const struct lu_env *env,
1384 const struct cl_io_slice *ios,
1385 enum cl_req_type crt, struct cl_2queue *queue)
1390 static void lov_empty_impossible(const struct lu_env *env,
1391 struct cl_io_slice *ios)
1396 #define LOV_EMPTY_IMPOSSIBLE ((void *)lov_empty_impossible)
1399 * An io operation vector for files without stripes.
1401 static const struct cl_io_operations lov_empty_io_ops = {
1404 .cio_fini = lov_empty_io_fini,
1406 .cio_iter_init = LOV_EMPTY_IMPOSSIBLE,
1407 .cio_lock = LOV_EMPTY_IMPOSSIBLE,
1408 .cio_start = LOV_EMPTY_IMPOSSIBLE,
1409 .cio_end = LOV_EMPTY_IMPOSSIBLE
1413 .cio_fini = lov_empty_io_fini,
1414 .cio_iter_init = LOV_EMPTY_IMPOSSIBLE,
1415 .cio_lock = LOV_EMPTY_IMPOSSIBLE,
1416 .cio_start = LOV_EMPTY_IMPOSSIBLE,
1417 .cio_end = LOV_EMPTY_IMPOSSIBLE
1420 .cio_fini = lov_empty_io_fini,
1421 .cio_iter_init = LOV_EMPTY_IMPOSSIBLE,
1422 .cio_lock = LOV_EMPTY_IMPOSSIBLE,
1423 .cio_start = LOV_EMPTY_IMPOSSIBLE,
1424 .cio_end = LOV_EMPTY_IMPOSSIBLE
1427 .cio_fini = lov_empty_io_fini,
1428 .cio_iter_init = LOV_EMPTY_IMPOSSIBLE,
1429 .cio_lock = LOV_EMPTY_IMPOSSIBLE,
1430 .cio_start = LOV_EMPTY_IMPOSSIBLE,
1431 .cio_end = LOV_EMPTY_IMPOSSIBLE
1434 .cio_fini = lov_empty_io_fini
1437 .cio_fini = lov_empty_io_fini
1440 .cio_fini = lov_empty_io_fini
1443 .cio_fini = lov_empty_io_fini
1446 .cio_submit = lov_empty_io_submit,
1447 .cio_commit_async = LOV_EMPTY_IMPOSSIBLE
1450 int lov_io_init_composite(const struct lu_env *env, struct cl_object *obj,
1453 struct lov_io *lio = lov_env_io(env);
1454 struct lov_object *lov = cl2lov(obj);
1459 INIT_LIST_HEAD(&lio->lis_active);
1460 result = lov_io_slice_init(lio, lov, io);
1464 result = lov_io_subio_init(env, lio, io);
1466 cl_io_slice_add(io, &lio->lis_cl, obj, &lov_io_ops);
1467 atomic_inc(&lov->lo_active_ios);
1471 io->ci_result = result < 0 ? result : 0;
1475 int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj,
1478 struct lov_object *lov = cl2lov(obj);
1479 struct lov_io *lio = lov_env_io(env);
1483 lio->lis_object = lov;
1484 switch (io->ci_type) {
1495 case CIT_DATA_VERSION:
1503 CERROR("Page fault on a file without stripes: "DFID"\n",
1504 PFID(lu_object_fid(&obj->co_lu)));
1508 cl_io_slice_add(io, &lio->lis_cl, obj, &lov_empty_io_ops);
1509 atomic_inc(&lov->lo_active_ios);
1512 io->ci_result = result < 0 ? result : 0;
1516 int lov_io_init_released(const struct lu_env *env, struct cl_object *obj,
1519 struct lov_object *lov = cl2lov(obj);
1520 struct lov_io *lio = lov_env_io(env);
1524 LASSERT(lov->lo_lsm != NULL);
1525 lio->lis_object = lov;
1527 switch (io->ci_type) {
1529 LASSERTF(0, "invalid type %d\n", io->ci_type);
1530 result = -EOPNOTSUPP;
1536 case CIT_DATA_VERSION:
1541 * the truncate to 0 is managed by MDT:
1542 * - in open, for open O_TRUNC
1543 * - in setattr, for truncate
1545 /* the truncate is for size > 0 so triggers a restore */
1546 if (cl_io_is_trunc(io)) {
1547 io->ci_restore_needed = 1;
1555 io->ci_restore_needed = 1;
1561 cl_io_slice_add(io, &lio->lis_cl, obj, &lov_empty_io_ops);
1562 atomic_inc(&lov->lo_active_ios);
1565 io->ci_result = result < 0 ? result : 0;
1570 * Return the index in composite:lo_entries by the file offset
1572 int lov_io_layout_at(struct lov_io *lio, __u64 offset)
1574 struct lov_object *lov = lio->lis_object;
1575 struct lov_layout_composite *comp = &lov->u.composite;
1576 int start_index = 0;
1577 int end_index = comp->lo_entry_count - 1;
1580 LASSERT(lov->lo_type == LLT_COMP);
1582 /* This is actual file offset so nothing can cover eof. */
1583 if (offset == LUSTRE_EOF)
1586 if (lov_is_flr(lov)) {
1587 struct lov_mirror_entry *lre;
1589 LASSERT(lio->lis_mirror_index >= 0);
1591 lre = &comp->lo_mirrors[lio->lis_mirror_index];
1592 start_index = lre->lre_start;
1593 end_index = lre->lre_end;
1596 for (i = start_index; i <= end_index; i++) {
1597 struct lov_layout_entry *lle = lov_entry(lov, i);
1599 if ((offset >= lle->lle_extent->e_start &&
1600 offset < lle->lle_extent->e_end) ||
1601 (offset == OBD_OBJECT_EOF &&
1602 lle->lle_extent->e_end == OBD_OBJECT_EOF))