+ if (!lov_is_flr(obj)) {
+ /* only locks/pages are manipulated for CIT_MISC op, no
+ * cl_io_loop() will be called, don't check/set mirror info.
+ */
+ if (io->ci_type != CIT_MISC) {
+ LASSERT(comp->lo_preferred_mirror == 0);
+ lio->lis_mirror_index = comp->lo_preferred_mirror;
+ }
+ io->ci_ndelay = 0;
+ RETURN(0);
+ }
+
+ /* transfer the layout version for verification */
+ if (io->ci_layout_version == 0)
+ io->ci_layout_version = obj->lo_lsm->lsm_layout_gen;
+
+ /* find the corresponding mirror for designated mirror IO */
+ if (io->ci_designated_mirror > 0) {
+ struct lov_mirror_entry *entry;
+
+ LASSERT(!io->ci_ndelay);
+
+ CDEBUG(D_LAYOUT, "designated I/O mirror state: %d\n",
+ lov_flr_state(obj));
+
+ if ((cl_io_is_trunc(io) || io->ci_type == CIT_WRITE) &&
+ (io->ci_layout_version != obj->lo_lsm->lsm_layout_gen)) {
+ /*
+ * For resync I/O, the ci_layout_version was the layout
+ * version when resync starts. If it doesn't match the
+ * current object layout version, it means the layout
+ * has been changed
+ */
+ RETURN(-ESTALE);
+ }
+
+ io->ci_layout_version |= LU_LAYOUT_RESYNC;
+
+ index = 0;
+ lio->lis_mirror_index = -1;
+ lov_foreach_mirror_entry(obj, entry) {
+ if (entry->lre_mirror_id ==
+ io->ci_designated_mirror) {
+ lio->lis_mirror_index = index;
+ break;
+ }
+
+ index++;
+ }
+
+ RETURN(lio->lis_mirror_index < 0 ? -EINVAL : 0);
+ }
+
+ result = lov_io_mirror_write_intent(lio, obj, io);
+ if (result)
+ RETURN(result);
+
+ if (io->ci_need_write_intent) {
+ CDEBUG(D_VFSTRACE, DFID " need write intent for [%llu, %llu)\n",
+ PFID(lu_object_fid(lov2lu(obj))),
+ lio->lis_pos, lio->lis_endpos);
+
+ if (cl_io_is_trunc(io)) {
+ /**
+ * for truncate, we uses [size, EOF) to judge whether
+ * a write intent needs to be send, but we need to
+ * restore the write extent to [0, size], in truncate,
+ * the byte in the size position is accessed.
+ */
+ io->ci_write_intent.e_start = 0;
+ io->ci_write_intent.e_end =
+ io->u.ci_setattr.sa_attr.lvb_size + 1;
+ }
+ /* stop cl_io_init() loop */
+ RETURN(1);
+ }
+
+ if (io->ci_ndelay_tried == 0 || /* first time to try */
+ /* reset the mirror index if layout has changed */
+ lio->lis_mirror_layout_gen != obj->lo_lsm->lsm_layout_gen) {
+ lio->lis_mirror_layout_gen = obj->lo_lsm->lsm_layout_gen;
+ index = lio->lis_mirror_index = comp->lo_preferred_mirror;
+ } else {
+ index = lio->lis_mirror_index;
+ LASSERT(index >= 0);
+
+ /* move mirror index to the next one */
+ index = (index + 1) % comp->lo_mirror_count;
+ }
+
+ for (i = 0; i < comp->lo_mirror_count; i++) {
+ struct lu_extent ext = { .e_start = lio->lis_pos,
+ .e_end = lio->lis_pos + 1 };
+ struct lov_mirror_entry *lre;
+ struct lov_layout_entry *lle;
+ bool found = false;
+
+ lre = &comp->lo_mirrors[(index + i) % comp->lo_mirror_count];
+ if (!lre->lre_valid)
+ continue;
+
+ lov_foreach_mirror_layout_entry(obj, lle, lre) {
+ if (!lle->lle_valid)
+ continue;
+
+ if (lu_extent_is_overlapped(&ext, lle->lle_extent)) {
+ found = true;
+ break;
+ }
+ } /* each component of the mirror */
+ if (found) {
+ index = (index + i) % comp->lo_mirror_count;
+ break;
+ }
+ } /* each mirror */
+
+ if (i == comp->lo_mirror_count) {
+ CERROR(DFID": failed to find a component covering "
+ "I/O region at %llu\n",
+ PFID(lu_object_fid(lov2lu(obj))), lio->lis_pos);
+
+ dump_lsm(D_ERROR, obj->lo_lsm);
+
+ RETURN(-EIO);
+ }
+
+ CDEBUG(D_VFSTRACE, DFID ": flr state: %d, move mirror from %d to %d, "
+ "have retried: %d, mirror count: %d\n",
+ PFID(lu_object_fid(lov2lu(obj))), lov_flr_state(obj),
+ lio->lis_mirror_index, index, io->ci_ndelay_tried,
+ comp->lo_mirror_count);
+
+ lio->lis_mirror_index = index;