4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * Implementation of cl_io for LOV layer.
34 * Author: Nikita Danilov <nikita.danilov@sun.com>
35 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
38 #define DEBUG_SUBSYSTEM S_LOV
40 #include "lov_cl_internal.h"
46 static inline struct lov_io_sub *lov_sub_alloc(struct lov_io *lio, int index)
48 struct lov_io_sub *sub;
50 if (lio->lis_nr_subios == 0) {
51 LASSERT(lio->lis_single_subio_index == -1);
52 sub = &lio->lis_single_subio;
53 lio->lis_single_subio_index = index;
54 memset(sub, 0, sizeof(*sub));
60 INIT_LIST_HEAD(&sub->sub_list);
61 INIT_LIST_HEAD(&sub->sub_linkage);
62 sub->sub_subio_index = index;
68 static inline void lov_sub_free(struct lov_io *lio, struct lov_io_sub *sub)
70 if (sub->sub_subio_index == lio->lis_single_subio_index) {
71 LASSERT(sub == &lio->lis_single_subio);
72 lio->lis_single_subio_index = -1;
78 static void lov_io_sub_fini(const struct lu_env *env, struct lov_io *lio,
79 struct lov_io_sub *sub)
83 cl_io_fini(sub->sub_env, &sub->sub_io);
85 if (sub->sub_env != NULL && !IS_ERR(sub->sub_env)) {
86 cl_env_put(sub->sub_env, &sub->sub_refcheck);
93 is_index_within_mirror(struct lov_object *lov, int index, int mirror_index)
95 struct lov_layout_composite *comp = &lov->u.composite;
96 struct lov_mirror_entry *lre = &comp->lo_mirrors[mirror_index];
98 return (index >= lre->lre_start && index <= lre->lre_end);
101 static int lov_io_sub_init(const struct lu_env *env, struct lov_io *lio,
102 struct lov_io_sub *sub)
104 struct lov_object *lov = lio->lis_object;
105 struct cl_io *sub_io;
106 struct cl_object *sub_obj;
107 struct cl_io *io = lio->lis_cl.cis_io;
108 int index = lov_comp_entry(sub->sub_subio_index);
109 int stripe = lov_comp_stripe(sub->sub_subio_index);
111 LASSERT(sub->sub_env == NULL);
114 if (unlikely(!lov_r0(lov, index)->lo_sub ||
115 !lov_r0(lov, index)->lo_sub[stripe]))
118 LASSERTF(is_index_within_mirror(lov, index, lio->lis_mirror_index),
119 DFID "iot = %d, index = %d, mirror = %d\n",
120 PFID(lu_object_fid(lov2lu(lov))), io->ci_type, index,
121 lio->lis_mirror_index);
123 /* obtain new environment */
124 sub->sub_env = cl_env_get(&sub->sub_refcheck);
125 if (IS_ERR(sub->sub_env))
126 result = PTR_ERR(sub->sub_env);
128 sub_obj = lovsub2cl(lov_r0(lov, index)->lo_sub[stripe]);
129 sub_io = &sub->sub_io;
131 sub_io->ci_obj = sub_obj;
132 sub_io->ci_result = 0;
134 sub_io->ci_parent = io;
135 sub_io->ci_lockreq = io->ci_lockreq;
136 sub_io->ci_type = io->ci_type;
137 sub_io->ci_no_srvlock = io->ci_no_srvlock;
138 sub_io->ci_noatime = io->ci_noatime;
139 sub_io->ci_pio = io->ci_pio;
140 sub_io->ci_lock_no_expand = io->ci_lock_no_expand;
141 sub_io->ci_ndelay = io->ci_ndelay;
142 sub_io->ci_layout_version = io->ci_layout_version;
144 result = cl_io_sub_init(sub->sub_env, sub_io, io->ci_type, sub_obj);
147 lov_io_sub_fini(env, lio, sub);
152 struct lov_io_sub *lov_sub_get(const struct lu_env *env,
153 struct lov_io *lio, int index)
155 struct lov_io_sub *sub;
160 list_for_each_entry(sub, &lio->lis_subios, sub_list) {
161 if (sub->sub_subio_index == index) {
168 sub = lov_sub_alloc(lio, index);
170 GOTO(out, rc = -ENOMEM);
172 rc = lov_io_sub_init(env, lio, sub);
174 lov_sub_free(lio, sub);
178 list_add_tail(&sub->sub_list, &lio->lis_subios);
179 lio->lis_nr_subios++;
187 /*****************************************************************************
193 int lov_page_index(const struct cl_page *page)
195 const struct cl_page_slice *slice;
198 slice = cl_page_at(page, &lov_device_type);
199 LASSERT(slice != NULL);
200 LASSERT(slice->cpl_obj != NULL);
202 RETURN(cl2lov_page(slice)->lps_index);
205 static int lov_io_subio_init(const struct lu_env *env, struct lov_io *lio,
210 LASSERT(lio->lis_object != NULL);
212 INIT_LIST_HEAD(&lio->lis_subios);
213 lio->lis_single_subio_index = -1;
214 lio->lis_nr_subios = 0;
220 * Decide if it will need write intent RPC
222 static int lov_io_mirror_write_intent(struct lov_io *lio,
223 struct lov_object *obj, struct cl_io *io)
225 struct lov_layout_composite *comp = &obj->u.composite;
226 struct lu_extent *ext = &io->ci_write_intent;
227 struct lov_mirror_entry *lre;
228 struct lov_mirror_entry *primary;
229 struct lov_layout_entry *lle;
233 *ext = (typeof(*ext)) { lio->lis_pos, lio->lis_endpos };
234 io->ci_need_write_intent = 0;
236 if (!(io->ci_type == CIT_WRITE || cl_io_is_trunc(io) ||
237 cl_io_is_mkwrite(io)))
240 /* FLR: check if it needs to send a write intent RPC to server.
241 * Writing to sync_pending file needs write intent RPC to change
242 * the file state back to write_pending, so that the layout version
243 * can be increased when the state changes to sync_pending at a later
244 * time. Otherwise there exists a chance that an evicted client may
245 * dirty the file data while resync client is working on it.
246 * Designated I/O is allowed for resync workload.
248 if (lov_flr_state(obj) == LCM_FL_RDONLY ||
249 (lov_flr_state(obj) == LCM_FL_SYNC_PENDING &&
250 io->ci_designated_mirror == 0)) {
251 io->ci_need_write_intent = 1;
255 LASSERT((lov_flr_state(obj) == LCM_FL_WRITE_PENDING));
256 LASSERT(comp->lo_preferred_mirror >= 0);
258 /* need to iterate all components to see if there are
259 * multiple components covering the writing component */
260 primary = &comp->lo_mirrors[comp->lo_preferred_mirror];
261 LASSERT(!primary->lre_stale);
262 lov_foreach_mirror_layout_entry(obj, lle, primary) {
263 LASSERT(lle->lle_valid);
264 if (!lu_extent_is_overlapped(ext, lle->lle_extent))
267 ext->e_start = MIN(ext->e_start, lle->lle_extent->e_start);
268 ext->e_end = MAX(ext->e_end, lle->lle_extent->e_end);
272 CERROR(DFID ": cannot find any valid components covering "
273 "file extent "DEXT", mirror: %d\n",
274 PFID(lu_object_fid(lov2lu(obj))), PEXT(ext),
275 primary->lre_mirror_id);
280 lov_foreach_mirror_entry(obj, lre) {
284 lov_foreach_mirror_layout_entry(obj, lle, lre) {
288 if (lu_extent_is_overlapped(ext, lle->lle_extent)) {
295 CDEBUG(D_VFSTRACE, DFID "there are %zd components to be staled to "
296 "modify file extent "DEXT", iot: %d\n",
297 PFID(lu_object_fid(lov2lu(obj))), count, PEXT(ext), io->ci_type);
299 io->ci_need_write_intent = count > 0;
304 static int lov_io_mirror_init(struct lov_io *lio, struct lov_object *obj,
307 struct lov_layout_composite *comp = &obj->u.composite;
313 if (!lov_is_flr(obj)) {
314 LASSERT(comp->lo_preferred_mirror == 0);
315 lio->lis_mirror_index = comp->lo_preferred_mirror;
320 /* transfer the layout version for verification */
321 if (io->ci_layout_version == 0)
322 io->ci_layout_version = obj->lo_lsm->lsm_layout_gen;
324 /* find the corresponding mirror for designated mirror IO */
325 if (io->ci_designated_mirror > 0) {
326 struct lov_mirror_entry *entry;
328 LASSERT(!io->ci_ndelay);
330 CDEBUG(D_LAYOUT, "designated I/O mirror state: %d\n",
333 if ((cl_io_is_trunc(io) || io->ci_type == CIT_WRITE) &&
334 (io->ci_layout_version != obj->lo_lsm->lsm_layout_gen)) {
335 /* For resync I/O, the ci_layout_version was the layout
336 * version when resync starts. If it doesn't match the
337 * current object layout version, it means the layout
338 * has been changed */
342 io->ci_layout_version |= LU_LAYOUT_RESYNC;
345 lio->lis_mirror_index = -1;
346 lov_foreach_mirror_entry(obj, entry) {
347 if (entry->lre_mirror_id ==
348 io->ci_designated_mirror) {
349 lio->lis_mirror_index = index;
356 RETURN(lio->lis_mirror_index < 0 ? -EINVAL : 0);
359 result = lov_io_mirror_write_intent(lio, obj, io);
363 if (io->ci_need_write_intent) {
364 CDEBUG(D_VFSTRACE, DFID " need write intent for [%llu, %llu)\n",
365 PFID(lu_object_fid(lov2lu(obj))),
366 lio->lis_pos, lio->lis_endpos);
368 if (cl_io_is_trunc(io)) {
370 * for truncate, we uses [size, EOF) to judge whether
371 * a write intent needs to be send, but we need to
372 * restore the write extent to [0, size).
374 io->ci_write_intent.e_start = 0;
375 io->ci_write_intent.e_end =
376 io->u.ci_setattr.sa_attr.lvb_size;
378 /* stop cl_io_init() loop */
382 if (io->ci_ndelay_tried == 0 || /* first time to try */
383 /* reset the mirror index if layout has changed */
384 lio->lis_mirror_layout_gen != obj->lo_lsm->lsm_layout_gen) {
385 lio->lis_mirror_layout_gen = obj->lo_lsm->lsm_layout_gen;
386 index = lio->lis_mirror_index = comp->lo_preferred_mirror;
388 index = lio->lis_mirror_index;
391 /* move mirror index to the next one */
392 index = (index + 1) % comp->lo_mirror_count;
395 for (i = 0; i < comp->lo_mirror_count; i++) {
396 struct lu_extent ext = { .e_start = lio->lis_pos,
397 .e_end = lio->lis_pos + 1 };
398 struct lov_mirror_entry *lre;
399 struct lov_layout_entry *lle;
402 lre = &comp->lo_mirrors[(index + i) % comp->lo_mirror_count];
406 lov_foreach_mirror_layout_entry(obj, lle, lre) {
410 if (lu_extent_is_overlapped(&ext, lle->lle_extent)) {
417 index = (index + i) % comp->lo_mirror_count;
421 if (i == comp->lo_mirror_count) {
422 CERROR(DFID": failed to find a component covering "
423 "I/O region at %llu\n",
424 PFID(lu_object_fid(lov2lu(obj))), lio->lis_pos);
426 dump_lsm(D_ERROR, obj->lo_lsm);
431 CDEBUG(D_VFSTRACE, DFID ": flr state: %d, move mirror from %d to %d, "
432 "have retried: %d, mirror count: %d\n",
433 PFID(lu_object_fid(lov2lu(obj))), lov_flr_state(obj),
434 lio->lis_mirror_index, index, io->ci_ndelay_tried,
435 comp->lo_mirror_count);
437 lio->lis_mirror_index = index;
439 /* FLR: if all mirrors have been tried once, most likely the network
440 * of this client has been partitioned. We should relinquish CPU for
441 * a while before trying again. */
442 ++io->ci_ndelay_tried;
443 if (io->ci_ndelay && io->ci_ndelay_tried >= comp->lo_mirror_count) {
444 set_current_state(TASK_INTERRUPTIBLE);
445 schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC)); /* 10ms */
446 if (signal_pending(current))
449 /* reset retry counter */
450 io->ci_ndelay_tried = 1;
453 CDEBUG(D_VFSTRACE, "use %sdelayed RPC state for this IO\n",
454 io->ci_ndelay ? "non-" : "");
459 static int lov_io_slice_init(struct lov_io *lio,
460 struct lov_object *obj, struct cl_io *io)
467 lio->lis_object = obj;
469 LASSERT(obj->lo_lsm != NULL);
471 switch (io->ci_type) {
474 lio->lis_pos = io->u.ci_rw.rw_range.cir_pos;
475 lio->lis_endpos = lio->lis_pos + io->u.ci_rw.rw_range.cir_count;
476 lio->lis_io_endpos = lio->lis_endpos;
477 if (cl_io_is_append(io)) {
478 LASSERT(io->ci_type == CIT_WRITE);
480 /* If there is LOV EA hole, then we may cannot locate
481 * the current file-tail exactly. */
482 if (unlikely(obj->lo_lsm->lsm_entries[0]->lsme_pattern &
484 GOTO(out, result = -EIO);
487 lio->lis_endpos = OBD_OBJECT_EOF;
492 if (cl_io_is_trunc(io))
493 lio->lis_pos = io->u.ci_setattr.sa_attr.lvb_size;
496 lio->lis_endpos = OBD_OBJECT_EOF;
499 case CIT_DATA_VERSION:
501 lio->lis_endpos = OBD_OBJECT_EOF;
505 pgoff_t index = io->u.ci_fault.ft_index;
506 lio->lis_pos = cl_offset(io->ci_obj, index);
507 lio->lis_endpos = cl_offset(io->ci_obj, index + 1);
512 lio->lis_pos = io->u.ci_fsync.fi_start;
513 lio->lis_endpos = io->u.ci_fsync.fi_end;
518 lio->lis_pos = io->u.ci_ladvise.li_start;
519 lio->lis_endpos = io->u.ci_ladvise.li_end;
525 lio->lis_endpos = OBD_OBJECT_EOF;
527 if (lov_flr_state(obj) == LCM_FL_RDONLY &&
528 !OBD_FAIL_CHECK(OBD_FAIL_FLR_GLIMPSE_IMMUTABLE))
529 /* SoM is accurate, no need glimpse */
530 GOTO(out, result = 1);
535 lio->lis_endpos = OBD_OBJECT_EOF;
542 result = lov_io_mirror_init(lio, obj, io);
546 /* check if it needs to instantiate layout */
547 if (!(io->ci_type == CIT_WRITE || cl_io_is_mkwrite(io) ||
548 (cl_io_is_trunc(io) && io->u.ci_setattr.sa_attr.lvb_size > 0)))
549 GOTO(out, result = 0);
551 /* for truncate, it only needs to instantiate the components
552 * before the truncated size. */
553 if (cl_io_is_trunc(io)) {
554 io->ci_write_intent.e_start = 0;
555 io->ci_write_intent.e_end = io->u.ci_setattr.sa_attr.lvb_size;
557 io->ci_write_intent.e_start = lio->lis_pos;
558 io->ci_write_intent.e_end = lio->lis_endpos;
562 lov_foreach_io_layout(index, lio, &io->ci_write_intent) {
563 if (!lsm_entry_inited(obj->lo_lsm, index)) {
564 io->ci_need_write_intent = 1;
569 if (io->ci_need_write_intent && io->ci_designated_mirror > 0) {
570 /* REINT_SYNC RPC has already tried to instantiate all of the
571 * components involved, obviously it didn't succeed. Skip this
572 * mirror for now. The server won't be able to figure out
573 * which mirror it should instantiate components */
574 CERROR(DFID": trying to instantiate components for designated "
575 "I/O, file state: %d\n",
576 PFID(lu_object_fid(lov2lu(obj))), lov_flr_state(obj));
578 io->ci_need_write_intent = 0;
579 GOTO(out, result = -EIO);
582 if (io->ci_need_write_intent)
583 GOTO(out, result = 1);
591 static void lov_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
593 struct lov_io *lio = cl2lov_io(env, ios);
594 struct lov_object *lov = cl2lov(ios->cis_obj);
598 LASSERT(list_empty(&lio->lis_active));
600 while (!list_empty(&lio->lis_subios)) {
601 struct lov_io_sub *sub = list_entry(lio->lis_subios.next,
605 list_del_init(&sub->sub_list);
606 lio->lis_nr_subios--;
608 lov_io_sub_fini(env, lio, sub);
609 lov_sub_free(lio, sub);
611 LASSERT(lio->lis_nr_subios == 0);
613 LASSERT(atomic_read(&lov->lo_active_ios) > 0);
614 if (atomic_dec_and_test(&lov->lo_active_ios))
615 wake_up_all(&lov->lo_waitq);
619 static void lov_io_sub_inherit(struct lov_io_sub *sub, struct lov_io *lio,
620 loff_t start, loff_t end)
622 struct cl_io *io = &sub->sub_io;
623 struct lov_stripe_md *lsm = lio->lis_object->lo_lsm;
624 struct cl_io *parent = lio->lis_cl.cis_io;
625 int index = lov_comp_entry(sub->sub_subio_index);
626 int stripe = lov_comp_stripe(sub->sub_subio_index);
628 io->ci_pio = parent->ci_pio;
629 switch (io->ci_type) {
631 io->u.ci_setattr.sa_attr = parent->u.ci_setattr.sa_attr;
632 io->u.ci_setattr.sa_attr_flags =
633 parent->u.ci_setattr.sa_attr_flags;
634 io->u.ci_setattr.sa_valid = parent->u.ci_setattr.sa_valid;
635 io->u.ci_setattr.sa_stripe_index = stripe;
636 io->u.ci_setattr.sa_parent_fid =
637 parent->u.ci_setattr.sa_parent_fid;
638 if (cl_io_is_trunc(io)) {
639 loff_t new_size = parent->u.ci_setattr.sa_attr.lvb_size;
641 new_size = lov_size_to_stripe(lsm, index, new_size,
643 io->u.ci_setattr.sa_attr.lvb_size = new_size;
645 lov_lsm2layout(lsm, lsm->lsm_entries[index],
646 &io->u.ci_setattr.sa_layout);
649 case CIT_DATA_VERSION: {
650 io->u.ci_data_version.dv_data_version = 0;
651 io->u.ci_data_version.dv_flags =
652 parent->u.ci_data_version.dv_flags;
656 struct cl_object *obj = parent->ci_obj;
657 loff_t off = cl_offset(obj, parent->u.ci_fault.ft_index);
659 io->u.ci_fault = parent->u.ci_fault;
660 off = lov_size_to_stripe(lsm, index, off, stripe);
661 io->u.ci_fault.ft_index = cl_index(obj, off);
665 io->u.ci_fsync.fi_start = start;
666 io->u.ci_fsync.fi_end = end;
667 io->u.ci_fsync.fi_fid = parent->u.ci_fsync.fi_fid;
668 io->u.ci_fsync.fi_mode = parent->u.ci_fsync.fi_mode;
673 io->u.ci_rw.rw_ptask = parent->u.ci_rw.rw_ptask;
674 io->u.ci_rw.rw_iter = parent->u.ci_rw.rw_iter;
675 io->u.ci_rw.rw_iocb = parent->u.ci_rw.rw_iocb;
676 io->u.ci_rw.rw_file = parent->u.ci_rw.rw_file;
677 io->u.ci_rw.rw_sync = parent->u.ci_rw.rw_sync;
678 if (cl_io_is_append(parent)) {
679 io->u.ci_rw.rw_append = 1;
681 io->u.ci_rw.rw_range.cir_pos = start;
682 io->u.ci_rw.rw_range.cir_count = end - start;
687 io->u.ci_ladvise.li_start = start;
688 io->u.ci_ladvise.li_end = end;
689 io->u.ci_ladvise.li_fid = parent->u.ci_ladvise.li_fid;
690 io->u.ci_ladvise.li_advice = parent->u.ci_ladvise.li_advice;
691 io->u.ci_ladvise.li_flags = parent->u.ci_ladvise.li_flags;
701 static loff_t lov_offset_mod(loff_t val, int delta)
703 if (val != OBD_OBJECT_EOF)
708 static int lov_io_iter_init(const struct lu_env *env,
709 const struct cl_io_slice *ios)
711 struct lov_io *lio = cl2lov_io(env, ios);
712 struct lov_stripe_md *lsm = lio->lis_object->lo_lsm;
713 struct lov_io_sub *sub;
714 struct lu_extent ext;
720 ext.e_start = lio->lis_pos;
721 ext.e_end = lio->lis_endpos;
723 lov_foreach_io_layout(index, lio, &ext) {
724 struct lov_layout_entry *le = lov_entry(lio->lis_object, index);
725 struct lov_layout_raid0 *r0 = &le->lle_raid0;
730 CDEBUG(D_VFSTRACE, "component[%d] flags %#x\n",
731 index, lsm->lsm_entries[index]->lsme_flags);
732 if (!lsm_entry_inited(lsm, index)) {
733 /* Read from uninitialized components should return
734 * zero filled pages. */
738 if (!le->lle_valid && !ios->cis_io->ci_designated_mirror) {
739 CERROR("I/O to invalid component: %d, mirror: %d\n",
740 index, lio->lis_mirror_index);
744 for (stripe = 0; stripe < r0->lo_nr; stripe++) {
745 if (!lov_stripe_intersects(lsm, index, stripe,
749 if (unlikely(r0->lo_sub[stripe] == NULL)) {
750 if (ios->cis_io->ci_type == CIT_READ ||
751 ios->cis_io->ci_type == CIT_WRITE ||
752 ios->cis_io->ci_type == CIT_FAULT)
758 end = lov_offset_mod(end, 1);
759 sub = lov_sub_get(env, lio,
760 lov_comp_index(index, stripe));
766 lov_io_sub_inherit(sub, lio, start, end);
767 rc = cl_io_iter_init(sub->sub_env, &sub->sub_io);
769 cl_io_iter_fini(sub->sub_env, &sub->sub_io);
774 "shrink stripe: {%d, %d} range: [%llu, %llu)\n",
775 index, stripe, start, end);
777 list_add_tail(&sub->sub_linkage, &lio->lis_active);
785 static int lov_io_rw_iter_init(const struct lu_env *env,
786 const struct cl_io_slice *ios)
788 struct cl_io *io = ios->cis_io;
789 struct lov_io *lio = cl2lov_io(env, ios);
790 struct lov_stripe_md_entry *lse;
791 struct cl_io_range *range = &io->u.ci_rw.rw_range;
792 loff_t start = range->cir_pos;
796 LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
799 if (cl_io_is_append(io))
800 RETURN(lov_io_iter_init(env, ios));
802 index = lov_io_layout_at(lio, range->cir_pos);
803 if (index < 0) { /* non-existing layout component */
804 if (io->ci_type == CIT_READ) {
805 /* TODO: it needs to detect the next component and
806 * then set the next pos */
808 /* execute it in main thread */
811 RETURN(lov_io_iter_init(env, ios));
817 if (!lov_entry(lio->lis_object, index)->lle_valid &&
818 !io->ci_designated_mirror)
819 RETURN(io->ci_type == CIT_READ ? -EAGAIN : -EIO);
821 lse = lov_lse(lio->lis_object, index);
823 next = MAX_LFS_FILESIZE;
824 if (lse->lsme_stripe_count > 1) {
825 unsigned long ssize = lse->lsme_stripe_size;
827 lov_do_div64(start, ssize);
828 next = (start + 1) * ssize;
829 if (next <= start * ssize)
830 next = MAX_LFS_FILESIZE;
833 LASSERTF(range->cir_pos >= lse->lsme_extent.e_start,
834 "pos %lld, [%lld, %lld)\n", range->cir_pos,
835 lse->lsme_extent.e_start, lse->lsme_extent.e_end);
836 next = min_t(__u64, next, lse->lsme_extent.e_end);
837 next = min_t(loff_t, next, lio->lis_io_endpos);
839 io->ci_continue = next < lio->lis_io_endpos;
840 range->cir_count = next - range->cir_pos;
841 lio->lis_pos = range->cir_pos;
842 lio->lis_endpos = range->cir_pos + range->cir_count;
844 "stripe: {%d, %llu} range: [%llu, %llu) end: %llu, count: %zd\n",
845 index, start, lio->lis_pos, lio->lis_endpos,
846 lio->lis_io_endpos, range->cir_count);
848 if (!io->ci_continue) {
849 /* the last piece of IO, execute it in main thread */
857 * XXX The following call should be optimized: we know, that
858 * [lio->lis_pos, lio->lis_endpos) intersects with exactly one stripe.
860 RETURN(lov_io_iter_init(env, ios));
863 static int lov_io_setattr_iter_init(const struct lu_env *env,
864 const struct cl_io_slice *ios)
866 struct lov_io *lio = cl2lov_io(env, ios);
867 struct cl_io *io = ios->cis_io;
871 if (cl_io_is_trunc(io) && lio->lis_pos > 0) {
872 index = lov_io_layout_at(lio, lio->lis_pos - 1);
873 /* no entry found for such offset */
875 RETURN(io->ci_result = -ENODATA);
878 RETURN(lov_io_iter_init(env, ios));
881 static int lov_io_call(const struct lu_env *env, struct lov_io *lio,
882 int (*iofunc)(const struct lu_env *, struct cl_io *))
884 struct cl_io *parent = lio->lis_cl.cis_io;
885 struct lov_io_sub *sub;
889 list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
890 rc = iofunc(sub->sub_env, &sub->sub_io);
894 if (parent->ci_result == 0)
895 parent->ci_result = sub->sub_io.ci_result;
900 static int lov_io_lock(const struct lu_env *env, const struct cl_io_slice *ios)
903 RETURN(lov_io_call(env, cl2lov_io(env, ios), cl_io_lock));
906 static int lov_io_start(const struct lu_env *env, const struct cl_io_slice *ios)
909 RETURN(lov_io_call(env, cl2lov_io(env, ios), cl_io_start));
912 static int lov_io_end_wrapper(const struct lu_env *env, struct cl_io *io)
916 * It's possible that lov_io_start() wasn't called against this
917 * sub-io, either because previous sub-io failed, or upper layer
920 if (io->ci_state == CIS_IO_GOING)
923 io->ci_state = CIS_IO_FINISHED;
927 static int lov_io_iter_fini_wrapper(const struct lu_env *env, struct cl_io *io)
929 cl_io_iter_fini(env, io);
933 static int lov_io_unlock_wrapper(const struct lu_env *env, struct cl_io *io)
935 cl_io_unlock(env, io);
939 static void lov_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
943 rc = lov_io_call(env, cl2lov_io(env, ios), lov_io_end_wrapper);
948 lov_io_data_version_end(const struct lu_env *env, const struct cl_io_slice *ios)
950 struct lov_io *lio = cl2lov_io(env, ios);
951 struct cl_io *parent = lio->lis_cl.cis_io;
952 struct cl_data_version_io *pdv = &parent->u.ci_data_version;
953 struct lov_io_sub *sub;
956 list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
957 struct cl_data_version_io *sdv = &sub->sub_io.u.ci_data_version;
959 lov_io_end_wrapper(sub->sub_env, &sub->sub_io);
961 pdv->dv_data_version += sdv->dv_data_version;
962 if (pdv->dv_layout_version > sdv->dv_layout_version)
963 pdv->dv_layout_version = sdv->dv_layout_version;
965 if (parent->ci_result == 0)
966 parent->ci_result = sub->sub_io.ci_result;
972 static void lov_io_iter_fini(const struct lu_env *env,
973 const struct cl_io_slice *ios)
975 struct lov_io *lio = cl2lov_io(env, ios);
979 rc = lov_io_call(env, lio, lov_io_iter_fini_wrapper);
981 while (!list_empty(&lio->lis_active))
982 list_del_init(lio->lis_active.next);
986 static void lov_io_unlock(const struct lu_env *env,
987 const struct cl_io_slice *ios)
992 rc = lov_io_call(env, cl2lov_io(env, ios), lov_io_unlock_wrapper);
997 static int lov_io_read_ahead(const struct lu_env *env,
998 const struct cl_io_slice *ios,
999 pgoff_t start, struct cl_read_ahead *ra)
1001 struct lov_io *lio = cl2lov_io(env, ios);
1002 struct lov_object *loo = lio->lis_object;
1003 struct cl_object *obj = lov2cl(loo);
1004 struct lov_layout_raid0 *r0;
1005 struct lov_io_sub *sub;
1009 unsigned int pps; /* pages per stripe */
1015 offset = cl_offset(obj, start);
1016 index = lov_io_layout_at(lio, offset);
1017 if (index < 0 || !lsm_entry_inited(loo->lo_lsm, index))
1020 /* avoid readahead to expand to stale components */
1021 if (!lov_entry(loo, index)->lle_valid)
1024 stripe = lov_stripe_number(loo->lo_lsm, index, offset);
1026 r0 = lov_r0(loo, index);
1027 if (unlikely(r0->lo_sub[stripe] == NULL))
1030 sub = lov_sub_get(env, lio, lov_comp_index(index, stripe));
1032 RETURN(PTR_ERR(sub));
1034 lov_stripe_offset(loo->lo_lsm, index, offset, stripe, &suboff);
1035 rc = cl_io_read_ahead(sub->sub_env, &sub->sub_io,
1036 cl_index(lovsub2cl(r0->lo_sub[stripe]), suboff),
1039 CDEBUG(D_READA, DFID " cra_end = %lu, stripes = %d, rc = %d\n",
1040 PFID(lu_object_fid(lov2lu(loo))), ra->cra_end, r0->lo_nr, rc);
1045 * Adjust the stripe index by layout of comp. ra->cra_end is the
1046 * maximum page index covered by an underlying DLM lock.
1047 * This function converts cra_end from stripe level to file level, and
1048 * make sure it's not beyond stripe and component boundary.
1051 /* cra_end is stripe level, convert it into file level */
1052 ra_end = ra->cra_end;
1053 if (ra_end != CL_PAGE_EOF)
1054 ra->cra_end = lov_stripe_pgoff(loo->lo_lsm, index,
1057 /* boundary of current component */
1058 ra_end = cl_index(obj, (loff_t)lov_io_extent(lio, index)->e_end);
1059 if (ra_end != CL_PAGE_EOF && ra->cra_end >= ra_end)
1060 ra->cra_end = ra_end - 1;
1062 if (r0->lo_nr == 1) /* single stripe file */
1065 pps = lov_lse(loo, index)->lsme_stripe_size >> PAGE_SHIFT;
1067 CDEBUG(D_READA, DFID " max_index = %lu, pps = %u, index = %u, "
1068 "stripe_size = %u, stripe no = %u, start index = %lu\n",
1069 PFID(lu_object_fid(lov2lu(loo))), ra->cra_end, pps, index,
1070 lov_lse(loo, index)->lsme_stripe_size, stripe, start);
1072 /* never exceed the end of the stripe */
1073 ra->cra_end = min_t(pgoff_t,
1074 ra->cra_end, start + pps - start % pps - 1);
1079 * lov implementation of cl_operations::cio_submit() method. It takes a list
1080 * of pages in \a queue, splits it into per-stripe sub-lists, invokes
1081 * cl_io_submit() on underlying devices to submit sub-lists, and then splices
1084 * Major complication of this function is a need to handle memory cleansing:
1085 * cl_io_submit() is called to write out pages as a part of VM memory
1086 * reclamation, and hence it may not fail due to memory shortages (system
1087 * dead-locks otherwise). To deal with this, some resources (sub-lists,
1088 * sub-environment, etc.) are allocated per-device on "startup" (i.e., in a
1089 * not-memory cleansing context), and in case of memory shortage, these
1090 * pre-allocated resources are used by lov_io_submit() under
1091 * lov_device::ld_mutex mutex.
1093 static int lov_io_submit(const struct lu_env *env,
1094 const struct cl_io_slice *ios,
1095 enum cl_req_type crt, struct cl_2queue *queue)
1097 struct cl_page_list *qin = &queue->c2_qin;
1098 struct lov_io *lio = cl2lov_io(env, ios);
1099 struct lov_io_sub *sub;
1100 struct cl_page_list *plist = &lov_env_info(env)->lti_plist;
1101 struct cl_page *page;
1106 cl_page_list_init(plist);
1107 while (qin->pl_nr > 0) {
1108 struct cl_2queue *cl2q = &lov_env_info(env)->lti_cl2q;
1110 page = cl_page_list_first(qin);
1111 if (lov_page_is_empty(page)) {
1112 cl_page_list_move(&queue->c2_qout, qin, page);
1114 /* it could only be mirror read to get here therefore
1115 * the pages will be transient. We don't care about
1116 * the return code of cl_page_prep() at all. */
1117 (void) cl_page_prep(env, ios->cis_io, page, crt);
1118 cl_page_completion(env, page, crt, 0);
1122 cl_2queue_init(cl2q);
1123 cl_page_list_move(&cl2q->c2_qin, qin, page);
1125 index = lov_page_index(page);
1126 while (qin->pl_nr > 0) {
1127 page = cl_page_list_first(qin);
1128 if (index != lov_page_index(page))
1131 cl_page_list_move(&cl2q->c2_qin, qin, page);
1134 sub = lov_sub_get(env, lio, index);
1136 rc = cl_io_submit_rw(sub->sub_env, &sub->sub_io,
1142 cl_page_list_splice(&cl2q->c2_qin, plist);
1143 cl_page_list_splice(&cl2q->c2_qout, &queue->c2_qout);
1144 cl_2queue_fini(env, cl2q);
1150 cl_page_list_splice(plist, qin);
1151 cl_page_list_fini(env, plist);
1156 static int lov_io_commit_async(const struct lu_env *env,
1157 const struct cl_io_slice *ios,
1158 struct cl_page_list *queue, int from, int to,
1161 struct cl_page_list *plist = &lov_env_info(env)->lti_plist;
1162 struct lov_io *lio = cl2lov_io(env, ios);
1163 struct lov_io_sub *sub;
1164 struct cl_page *page;
1168 if (lio->lis_nr_subios == 1) {
1169 int idx = lio->lis_single_subio_index;
1171 LASSERT(!lov_page_is_empty(cl_page_list_first(queue)));
1173 sub = lov_sub_get(env, lio, idx);
1174 LASSERT(!IS_ERR(sub));
1175 LASSERT(sub == &lio->lis_single_subio);
1176 rc = cl_io_commit_async(sub->sub_env, &sub->sub_io, queue,
1181 cl_page_list_init(plist);
1182 while (queue->pl_nr > 0) {
1186 LASSERT(plist->pl_nr == 0);
1187 page = cl_page_list_first(queue);
1188 LASSERT(!lov_page_is_empty(page));
1190 cl_page_list_move(plist, queue, page);
1192 index = lov_page_index(page);
1193 while (queue->pl_nr > 0) {
1194 page = cl_page_list_first(queue);
1195 if (index != lov_page_index(page))
1198 cl_page_list_move(plist, queue, page);
1201 if (queue->pl_nr > 0) /* still has more pages */
1202 stripe_to = PAGE_SIZE;
1204 sub = lov_sub_get(env, lio, index);
1206 rc = cl_io_commit_async(sub->sub_env, &sub->sub_io,
1207 plist, from, stripe_to, cb);
1213 if (plist->pl_nr > 0) /* short write */
1219 /* for error case, add the page back into the qin list */
1220 LASSERT(ergo(rc == 0, plist->pl_nr == 0));
1221 while (plist->pl_nr > 0) {
1222 /* error occurred, add the uncommitted pages back into queue */
1223 page = cl_page_list_last(plist);
1224 cl_page_list_move_head(queue, plist, page);
1230 static int lov_io_fault_start(const struct lu_env *env,
1231 const struct cl_io_slice *ios)
1233 struct cl_fault_io *fio;
1235 struct lov_io_sub *sub;
1239 fio = &ios->cis_io->u.ci_fault;
1240 lio = cl2lov_io(env, ios);
1241 sub = lov_sub_get(env, lio, lov_page_index(fio->ft_page));
1242 sub->sub_io.u.ci_fault.ft_nob = fio->ft_nob;
1244 RETURN(lov_io_start(env, ios));
1247 static void lov_io_fsync_end(const struct lu_env *env,
1248 const struct cl_io_slice *ios)
1250 struct lov_io *lio = cl2lov_io(env, ios);
1251 struct lov_io_sub *sub;
1252 unsigned int *written = &ios->cis_io->u.ci_fsync.fi_nr_written;
1256 list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
1257 struct cl_io *subio = &sub->sub_io;
1259 lov_io_end_wrapper(sub->sub_env, subio);
1261 if (subio->ci_result == 0)
1262 *written += subio->u.ci_fsync.fi_nr_written;
1267 static const struct cl_io_operations lov_io_ops = {
1270 .cio_fini = lov_io_fini,
1271 .cio_iter_init = lov_io_rw_iter_init,
1272 .cio_iter_fini = lov_io_iter_fini,
1273 .cio_lock = lov_io_lock,
1274 .cio_unlock = lov_io_unlock,
1275 .cio_start = lov_io_start,
1276 .cio_end = lov_io_end
1279 .cio_fini = lov_io_fini,
1280 .cio_iter_init = lov_io_rw_iter_init,
1281 .cio_iter_fini = lov_io_iter_fini,
1282 .cio_lock = lov_io_lock,
1283 .cio_unlock = lov_io_unlock,
1284 .cio_start = lov_io_start,
1285 .cio_end = lov_io_end
1288 .cio_fini = lov_io_fini,
1289 .cio_iter_init = lov_io_setattr_iter_init,
1290 .cio_iter_fini = lov_io_iter_fini,
1291 .cio_lock = lov_io_lock,
1292 .cio_unlock = lov_io_unlock,
1293 .cio_start = lov_io_start,
1294 .cio_end = lov_io_end
1296 [CIT_DATA_VERSION] = {
1297 .cio_fini = lov_io_fini,
1298 .cio_iter_init = lov_io_iter_init,
1299 .cio_iter_fini = lov_io_iter_fini,
1300 .cio_lock = lov_io_lock,
1301 .cio_unlock = lov_io_unlock,
1302 .cio_start = lov_io_start,
1303 .cio_end = lov_io_data_version_end,
1306 .cio_fini = lov_io_fini,
1307 .cio_iter_init = lov_io_iter_init,
1308 .cio_iter_fini = lov_io_iter_fini,
1309 .cio_lock = lov_io_lock,
1310 .cio_unlock = lov_io_unlock,
1311 .cio_start = lov_io_fault_start,
1312 .cio_end = lov_io_end
1315 .cio_fini = lov_io_fini,
1316 .cio_iter_init = lov_io_iter_init,
1317 .cio_iter_fini = lov_io_iter_fini,
1318 .cio_lock = lov_io_lock,
1319 .cio_unlock = lov_io_unlock,
1320 .cio_start = lov_io_start,
1321 .cio_end = lov_io_fsync_end
1324 .cio_fini = lov_io_fini,
1325 .cio_iter_init = lov_io_iter_init,
1326 .cio_iter_fini = lov_io_iter_fini,
1327 .cio_lock = lov_io_lock,
1328 .cio_unlock = lov_io_unlock,
1329 .cio_start = lov_io_start,
1330 .cio_end = lov_io_end
1333 .cio_fini = lov_io_fini,
1336 .cio_fini = lov_io_fini
1339 .cio_read_ahead = lov_io_read_ahead,
1340 .cio_submit = lov_io_submit,
1341 .cio_commit_async = lov_io_commit_async,
1344 /*****************************************************************************
1346 * Empty lov io operations.
1350 static void lov_empty_io_fini(const struct lu_env *env,
1351 const struct cl_io_slice *ios)
1353 struct lov_object *lov = cl2lov(ios->cis_obj);
1356 if (atomic_dec_and_test(&lov->lo_active_ios))
1357 wake_up_all(&lov->lo_waitq);
1361 static int lov_empty_io_submit(const struct lu_env *env,
1362 const struct cl_io_slice *ios,
1363 enum cl_req_type crt, struct cl_2queue *queue)
1368 static void lov_empty_impossible(const struct lu_env *env,
1369 struct cl_io_slice *ios)
1374 #define LOV_EMPTY_IMPOSSIBLE ((void *)lov_empty_impossible)
1377 * An io operation vector for files without stripes.
1379 static const struct cl_io_operations lov_empty_io_ops = {
1382 .cio_fini = lov_empty_io_fini,
1384 .cio_iter_init = LOV_EMPTY_IMPOSSIBLE,
1385 .cio_lock = LOV_EMPTY_IMPOSSIBLE,
1386 .cio_start = LOV_EMPTY_IMPOSSIBLE,
1387 .cio_end = LOV_EMPTY_IMPOSSIBLE
1391 .cio_fini = lov_empty_io_fini,
1392 .cio_iter_init = LOV_EMPTY_IMPOSSIBLE,
1393 .cio_lock = LOV_EMPTY_IMPOSSIBLE,
1394 .cio_start = LOV_EMPTY_IMPOSSIBLE,
1395 .cio_end = LOV_EMPTY_IMPOSSIBLE
1398 .cio_fini = lov_empty_io_fini,
1399 .cio_iter_init = LOV_EMPTY_IMPOSSIBLE,
1400 .cio_lock = LOV_EMPTY_IMPOSSIBLE,
1401 .cio_start = LOV_EMPTY_IMPOSSIBLE,
1402 .cio_end = LOV_EMPTY_IMPOSSIBLE
1405 .cio_fini = lov_empty_io_fini,
1406 .cio_iter_init = LOV_EMPTY_IMPOSSIBLE,
1407 .cio_lock = LOV_EMPTY_IMPOSSIBLE,
1408 .cio_start = LOV_EMPTY_IMPOSSIBLE,
1409 .cio_end = LOV_EMPTY_IMPOSSIBLE
1412 .cio_fini = lov_empty_io_fini
1415 .cio_fini = lov_empty_io_fini
1418 .cio_fini = lov_empty_io_fini
1421 .cio_fini = lov_empty_io_fini
1424 .cio_submit = lov_empty_io_submit,
1425 .cio_commit_async = LOV_EMPTY_IMPOSSIBLE
1428 int lov_io_init_composite(const struct lu_env *env, struct cl_object *obj,
1431 struct lov_io *lio = lov_env_io(env);
1432 struct lov_object *lov = cl2lov(obj);
1436 INIT_LIST_HEAD(&lio->lis_active);
1437 result = lov_io_slice_init(lio, lov, io);
1441 result = lov_io_subio_init(env, lio, io);
1443 cl_io_slice_add(io, &lio->lis_cl, obj, &lov_io_ops);
1444 atomic_inc(&lov->lo_active_ios);
1448 io->ci_result = result < 0 ? result : 0;
1452 int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj,
1455 struct lov_object *lov = cl2lov(obj);
1456 struct lov_io *lio = lov_env_io(env);
1460 lio->lis_object = lov;
1461 switch (io->ci_type) {
1472 case CIT_DATA_VERSION:
1480 CERROR("Page fault on a file without stripes: "DFID"\n",
1481 PFID(lu_object_fid(&obj->co_lu)));
1485 cl_io_slice_add(io, &lio->lis_cl, obj, &lov_empty_io_ops);
1486 atomic_inc(&lov->lo_active_ios);
1489 io->ci_result = result < 0 ? result : 0;
1493 int lov_io_init_released(const struct lu_env *env, struct cl_object *obj,
1496 struct lov_object *lov = cl2lov(obj);
1497 struct lov_io *lio = lov_env_io(env);
1501 LASSERT(lov->lo_lsm != NULL);
1502 lio->lis_object = lov;
1504 switch (io->ci_type) {
1506 LASSERTF(0, "invalid type %d\n", io->ci_type);
1507 result = -EOPNOTSUPP;
1513 case CIT_DATA_VERSION:
1517 /* the truncate to 0 is managed by MDT:
1518 * - in open, for open O_TRUNC
1519 * - in setattr, for truncate
1521 /* the truncate is for size > 0 so triggers a restore */
1522 if (cl_io_is_trunc(io)) {
1523 io->ci_restore_needed = 1;
1531 io->ci_restore_needed = 1;
1537 cl_io_slice_add(io, &lio->lis_cl, obj, &lov_empty_io_ops);
1538 atomic_inc(&lov->lo_active_ios);
1541 io->ci_result = result < 0 ? result : 0;
1546 * Return the index in composite:lo_entries by the file offset
1548 int lov_io_layout_at(struct lov_io *lio, __u64 offset)
1550 struct lov_object *lov = lio->lis_object;
1551 struct lov_layout_composite *comp = &lov->u.composite;
1552 int start_index = 0;
1553 int end_index = comp->lo_entry_count - 1;
1556 LASSERT(lov->lo_type == LLT_COMP);
1558 /* This is actual file offset so nothing can cover eof. */
1559 if (offset == LUSTRE_EOF)
1562 if (lov_is_flr(lov)) {
1563 struct lov_mirror_entry *lre;
1565 LASSERT(lio->lis_mirror_index >= 0);
1567 lre = &comp->lo_mirrors[lio->lis_mirror_index];
1568 start_index = lre->lre_start;
1569 end_index = lre->lre_end;
1572 for (i = start_index; i <= end_index; i++) {
1573 struct lov_layout_entry *lle = lov_entry(lov, i);
1575 if ((offset >= lle->lle_extent->e_start &&
1576 offset < lle->lle_extent->e_end) ||
1577 (offset == OBD_OBJECT_EOF &&
1578 lle->lle_extent->e_end == OBD_OBJECT_EOF))