4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2014, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Implementation of cl_io for LOV layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
42 #define DEBUG_SUBSYSTEM S_LOV
44 #include "lov_cl_internal.h"
50 static inline void lov_sub_enter(struct lov_io_sub *sub)
54 static inline void lov_sub_exit(struct lov_io_sub *sub)
59 static void lov_io_sub_fini(const struct lu_env *env, struct lov_io *lio,
60 struct lov_io_sub *sub)
63 if (sub->sub_io != NULL) {
64 if (sub->sub_io_initialized) {
66 cl_io_fini(sub->sub_env, sub->sub_io);
68 sub->sub_io_initialized = 0;
69 lio->lis_active_subios--;
71 if (sub->sub_stripe == lio->lis_single_subio_index)
72 lio->lis_single_subio_index = -1;
73 else if (!sub->sub_borrowed)
74 OBD_FREE_PTR(sub->sub_io);
77 if (sub->sub_env != NULL && !IS_ERR(sub->sub_env)) {
78 if (!sub->sub_borrowed)
79 cl_env_put(sub->sub_env, &sub->sub_refcheck);
85 static void lov_io_sub_inherit(struct cl_io *io, struct lov_io *lio,
86 int stripe, loff_t start, loff_t end)
88 struct lov_stripe_md *lsm = lio->lis_object->lo_lsm;
89 struct cl_io *parent = lio->lis_cl.cis_io;
91 switch (io->ci_type) {
93 io->u.ci_setattr.sa_attr = parent->u.ci_setattr.sa_attr;
94 io->u.ci_setattr.sa_attr_flags =
95 parent->u.ci_setattr.sa_attr_flags;
96 io->u.ci_setattr.sa_valid = parent->u.ci_setattr.sa_valid;
97 io->u.ci_setattr.sa_stripe_index = stripe;
98 io->u.ci_setattr.sa_parent_fid =
99 parent->u.ci_setattr.sa_parent_fid;
100 io->u.ci_setattr.sa_capa = parent->u.ci_setattr.sa_capa;
101 if (cl_io_is_trunc(io)) {
102 loff_t new_size = parent->u.ci_setattr.sa_attr.lvb_size;
104 new_size = lov_size_to_stripe(lsm, new_size, stripe);
105 io->u.ci_setattr.sa_attr.lvb_size = new_size;
109 case CIT_DATA_VERSION: {
110 io->u.ci_data_version.dv_data_version = 0;
111 io->u.ci_data_version.dv_flags =
112 parent->u.ci_data_version.dv_flags;
116 struct cl_object *obj = parent->ci_obj;
117 loff_t off = cl_offset(obj, parent->u.ci_fault.ft_index);
119 io->u.ci_fault = parent->u.ci_fault;
120 off = lov_size_to_stripe(lsm, off, stripe);
121 io->u.ci_fault.ft_index = cl_index(obj, off);
125 io->u.ci_fsync.fi_start = start;
126 io->u.ci_fsync.fi_end = end;
127 io->u.ci_fsync.fi_capa = parent->u.ci_fsync.fi_capa;
128 io->u.ci_fsync.fi_fid = parent->u.ci_fsync.fi_fid;
129 io->u.ci_fsync.fi_mode = parent->u.ci_fsync.fi_mode;
134 io->u.ci_wr.wr_sync = cl_io_is_sync_write(parent);
135 if (cl_io_is_append(parent)) {
136 io->u.ci_wr.wr_append = 1;
138 io->u.ci_rw.crw_pos = start;
139 io->u.ci_rw.crw_count = end - start;
148 static int lov_io_sub_init(const struct lu_env *env, struct lov_io *lio,
149 struct lov_io_sub *sub)
151 struct lov_object *lov = lio->lis_object;
152 struct lov_device *ld = lu2lov_dev(lov2cl(lov)->co_lu.lo_dev);
153 struct cl_io *sub_io;
154 struct cl_object *sub_obj;
155 struct cl_io *io = lio->lis_cl.cis_io;
157 int stripe = sub->sub_stripe;
160 LASSERT(sub->sub_io == NULL);
161 LASSERT(sub->sub_env == NULL);
162 LASSERT(sub->sub_stripe < lio->lis_stripe_count);
165 if (unlikely(lov_r0(lov)->lo_sub[stripe] == NULL))
169 sub->sub_io_initialized = 0;
170 sub->sub_borrowed = 0;
172 if (lio->lis_mem_frozen) {
173 LASSERT(mutex_is_locked(&ld->ld_mutex));
174 sub->sub_io = &ld->ld_emrg[stripe]->emrg_subio;
175 sub->sub_env = ld->ld_emrg[stripe]->emrg_env;
176 sub->sub_borrowed = 1;
180 /* obtain new environment */
181 cookie = cl_env_reenter();
182 sub->sub_env = cl_env_get(&sub->sub_refcheck);
183 cl_env_reexit(cookie);
184 if (IS_ERR(sub->sub_env))
185 result = PTR_ERR(sub->sub_env);
189 * First sub-io. Use ->lis_single_subio to
190 * avoid dynamic allocation.
192 if (lio->lis_active_subios == 0) {
193 sub->sub_io = &lio->lis_single_subio;
194 lio->lis_single_subio_index = stripe;
196 OBD_ALLOC_PTR(sub->sub_io);
197 if (sub->sub_io == NULL)
204 sub_obj = lovsub2cl(lov_r0(lov)->lo_sub[stripe]);
205 sub_io = sub->sub_io;
207 sub_io->ci_obj = sub_obj;
208 sub_io->ci_result = 0;
210 sub_io->ci_parent = io;
211 sub_io->ci_lockreq = io->ci_lockreq;
212 sub_io->ci_type = io->ci_type;
213 sub_io->ci_no_srvlock = io->ci_no_srvlock;
214 sub_io->ci_noatime = io->ci_noatime;
217 result = cl_io_sub_init(sub->sub_env, sub_io,
218 io->ci_type, sub_obj);
221 lio->lis_active_subios++;
222 sub->sub_io_initialized = 1;
227 lov_io_sub_fini(env, lio, sub);
231 struct lov_io_sub *lov_sub_get(const struct lu_env *env,
232 struct lov_io *lio, int stripe)
235 struct lov_io_sub *sub = &lio->lis_subs[stripe];
237 LASSERT(stripe < lio->lis_stripe_count);
240 if (!sub->sub_io_initialized) {
241 sub->sub_stripe = stripe;
242 rc = lov_io_sub_init(env, lio, sub);
252 void lov_sub_put(struct lov_io_sub *sub)
257 /*****************************************************************************
263 int lov_page_stripe(const struct cl_page *page)
265 const struct cl_page_slice *slice;
268 slice = cl_page_at(page, &lov_device_type);
269 LASSERT(slice != NULL);
270 LASSERT(slice->cpl_obj != NULL);
272 RETURN(cl2lov_page(slice)->lps_stripe);
275 struct lov_io_sub *lov_page_subio(const struct lu_env *env, struct lov_io *lio,
276 const struct cl_page_slice *slice)
278 struct lov_stripe_md *lsm = lio->lis_object->lo_lsm;
279 struct cl_page *page = slice->cpl_page;
282 LASSERT(lio->lis_cl.cis_io != NULL);
283 LASSERT(cl2lov(slice->cpl_obj) == lio->lis_object);
284 LASSERT(lsm != NULL);
285 LASSERT(lio->lis_nr_subios > 0);
288 stripe = lov_page_stripe(page);
289 RETURN(lov_sub_get(env, lio, stripe));
293 static int lov_io_subio_init(const struct lu_env *env, struct lov_io *lio,
296 struct lov_stripe_md *lsm;
300 LASSERT(lio->lis_object != NULL);
301 lsm = lio->lis_object->lo_lsm;
304 * Need to be optimized, we can't afford to allocate a piece of memory
305 * when writing a page. -jay
307 OBD_ALLOC_LARGE(lio->lis_subs,
308 lsm->lsm_stripe_count * sizeof lio->lis_subs[0]);
309 if (lio->lis_subs != NULL) {
310 lio->lis_nr_subios = lio->lis_stripe_count;
311 lio->lis_single_subio_index = -1;
312 lio->lis_active_subios = 0;
319 static int lov_io_slice_init(struct lov_io *lio,
320 struct lov_object *obj, struct cl_io *io)
325 lio->lis_object = obj;
327 LASSERT(obj->lo_lsm != NULL);
328 lio->lis_stripe_count = obj->lo_lsm->lsm_stripe_count;
330 switch (io->ci_type) {
333 lio->lis_pos = io->u.ci_rw.crw_pos;
334 lio->lis_endpos = io->u.ci_rw.crw_pos + io->u.ci_rw.crw_count;
335 lio->lis_io_endpos = lio->lis_endpos;
336 if (cl_io_is_append(io)) {
337 LASSERT(io->ci_type == CIT_WRITE);
339 /* If there is LOV EA hole, then we may cannot locate
340 * the current file-tail exactly. */
341 if (unlikely(obj->lo_lsm->lsm_pattern &
346 lio->lis_endpos = OBD_OBJECT_EOF;
351 if (cl_io_is_trunc(io))
352 lio->lis_pos = io->u.ci_setattr.sa_attr.lvb_size;
355 lio->lis_endpos = OBD_OBJECT_EOF;
358 case CIT_DATA_VERSION:
360 lio->lis_endpos = OBD_OBJECT_EOF;
364 pgoff_t index = io->u.ci_fault.ft_index;
365 lio->lis_pos = cl_offset(io->ci_obj, index);
366 lio->lis_endpos = cl_offset(io->ci_obj, index + 1);
371 lio->lis_pos = io->u.ci_fsync.fi_start;
372 lio->lis_endpos = io->u.ci_fsync.fi_end;
378 lio->lis_endpos = OBD_OBJECT_EOF;
388 static void lov_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
390 struct lov_io *lio = cl2lov_io(env, ios);
391 struct lov_object *lov = cl2lov(ios->cis_obj);
395 if (lio->lis_subs != NULL) {
396 for (i = 0; i < lio->lis_nr_subios; i++)
397 lov_io_sub_fini(env, lio, &lio->lis_subs[i]);
398 OBD_FREE_LARGE(lio->lis_subs,
399 lio->lis_nr_subios * sizeof lio->lis_subs[0]);
400 lio->lis_nr_subios = 0;
403 LASSERT(atomic_read(&lov->lo_active_ios) > 0);
404 if (atomic_dec_and_test(&lov->lo_active_ios))
405 wake_up_all(&lov->lo_waitq);
409 static loff_t lov_offset_mod(loff_t val, int delta)
411 if (val != OBD_OBJECT_EOF)
416 static int lov_io_iter_init(const struct lu_env *env,
417 const struct cl_io_slice *ios)
419 struct lov_io *lio = cl2lov_io(env, ios);
420 struct lov_stripe_md *lsm = lio->lis_object->lo_lsm;
421 struct lov_io_sub *sub;
429 endpos = lov_offset_mod(lio->lis_endpos, -1);
430 for (stripe = 0; stripe < lio->lis_stripe_count; stripe++) {
431 if (!lov_stripe_intersects(lsm, stripe, lio->lis_pos,
432 endpos, &start, &end))
435 if (unlikely(lov_r0(lio->lis_object)->lo_sub[stripe] == NULL)) {
436 if (ios->cis_io->ci_type == CIT_READ ||
437 ios->cis_io->ci_type == CIT_WRITE ||
438 ios->cis_io->ci_type == CIT_FAULT)
444 end = lov_offset_mod(end, +1);
445 sub = lov_sub_get(env, lio, stripe);
447 lov_io_sub_inherit(sub->sub_io, lio, stripe,
449 rc = cl_io_iter_init(sub->sub_env, sub->sub_io);
451 CDEBUG(D_VFSTRACE, "shrink: %d ["LPU64", "LPU64")\n",
457 list_add_tail(&sub->sub_linkage, &lio->lis_active);
464 static int lov_io_rw_iter_init(const struct lu_env *env,
465 const struct cl_io_slice *ios)
467 struct lov_io *lio = cl2lov_io(env, ios);
468 struct cl_io *io = ios->cis_io;
469 struct lov_stripe_md *lsm = lio->lis_object->lo_lsm;
470 loff_t start = io->u.ci_rw.crw_pos;
472 unsigned long ssize = lsm->lsm_stripe_size;
474 LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
477 /* fast path for common case. */
478 if (lio->lis_nr_subios != 1 && !cl_io_is_append(io)) {
480 lov_do_div64(start, ssize);
481 next = (start + 1) * ssize;
482 if (next <= start * ssize)
485 io->ci_continue = next < lio->lis_io_endpos;
486 io->u.ci_rw.crw_count = min_t(loff_t, lio->lis_io_endpos,
487 next) - io->u.ci_rw.crw_pos;
488 lio->lis_pos = io->u.ci_rw.crw_pos;
489 lio->lis_endpos = io->u.ci_rw.crw_pos + io->u.ci_rw.crw_count;
490 CDEBUG(D_VFSTRACE, "stripe: "LPU64" chunk: ["LPU64", "LPU64") "
491 LPU64"\n", (__u64)start, lio->lis_pos, lio->lis_endpos,
492 (__u64)lio->lis_io_endpos);
495 * XXX The following call should be optimized: we know, that
496 * [lio->lis_pos, lio->lis_endpos) intersects with exactly one stripe.
498 RETURN(lov_io_iter_init(env, ios));
501 static int lov_io_call(const struct lu_env *env, struct lov_io *lio,
502 int (*iofunc)(const struct lu_env *, struct cl_io *))
504 struct cl_io *parent = lio->lis_cl.cis_io;
505 struct lov_io_sub *sub;
509 list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
511 rc = iofunc(sub->sub_env, sub->sub_io);
516 if (parent->ci_result == 0)
517 parent->ci_result = sub->sub_io->ci_result;
522 static int lov_io_lock(const struct lu_env *env, const struct cl_io_slice *ios)
525 RETURN(lov_io_call(env, cl2lov_io(env, ios), cl_io_lock));
528 static int lov_io_start(const struct lu_env *env, const struct cl_io_slice *ios)
531 RETURN(lov_io_call(env, cl2lov_io(env, ios), cl_io_start));
534 static int lov_io_end_wrapper(const struct lu_env *env, struct cl_io *io)
538 * It's possible that lov_io_start() wasn't called against this
539 * sub-io, either because previous sub-io failed, or upper layer
542 if (io->ci_state == CIS_IO_GOING)
545 io->ci_state = CIS_IO_FINISHED;
549 static int lov_io_iter_fini_wrapper(const struct lu_env *env, struct cl_io *io)
551 cl_io_iter_fini(env, io);
555 static int lov_io_unlock_wrapper(const struct lu_env *env, struct cl_io *io)
557 cl_io_unlock(env, io);
561 static void lov_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
565 rc = lov_io_call(env, cl2lov_io(env, ios), lov_io_end_wrapper);
570 lov_io_data_version_end(const struct lu_env *env, const struct cl_io_slice *ios)
572 struct lov_io *lio = cl2lov_io(env, ios);
573 struct cl_io *parent = lio->lis_cl.cis_io;
574 struct lov_io_sub *sub;
577 list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
578 lov_io_end_wrapper(env, sub->sub_io);
580 parent->u.ci_data_version.dv_data_version +=
581 sub->sub_io->u.ci_data_version.dv_data_version;
583 if (parent->ci_result == 0)
584 parent->ci_result = sub->sub_io->ci_result;
590 static void lov_io_iter_fini(const struct lu_env *env,
591 const struct cl_io_slice *ios)
593 struct lov_io *lio = cl2lov_io(env, ios);
597 rc = lov_io_call(env, lio, lov_io_iter_fini_wrapper);
599 while (!list_empty(&lio->lis_active))
600 list_del_init(lio->lis_active.next);
604 static void lov_io_unlock(const struct lu_env *env,
605 const struct cl_io_slice *ios)
610 rc = lov_io_call(env, cl2lov_io(env, ios), lov_io_unlock_wrapper);
615 static int lov_io_read_ahead(const struct lu_env *env,
616 const struct cl_io_slice *ios,
617 pgoff_t start, struct cl_read_ahead *ra)
619 struct lov_io *lio = cl2lov_io(env, ios);
620 struct lov_object *loo = lio->lis_object;
621 struct cl_object *obj = lov2cl(loo);
622 struct lov_layout_raid0 *r0 = lov_r0(loo);
623 struct lov_io_sub *sub;
626 unsigned int pps; /* pages per stripe */
631 stripe = lov_stripe_number(loo->lo_lsm, cl_offset(obj, start));
632 if (unlikely(r0->lo_sub[stripe] == NULL))
635 sub = lov_sub_get(env, lio, stripe);
637 lov_stripe_offset(loo->lo_lsm, cl_offset(obj, start), stripe, &suboff);
638 rc = cl_io_read_ahead(sub->sub_env, sub->sub_io,
639 cl_index(lovsub2cl(r0->lo_sub[stripe]), suboff),
643 CDEBUG(D_READA, DFID " cra_end = %lu, stripes = %d, rc = %d\n",
644 PFID(lu_object_fid(lov2lu(loo))), ra->cra_end, r0->lo_nr, rc);
649 * Adjust the stripe index by layout of raid0. ra->cra_end is the maximum
650 * page index covered by an underlying DLM lock.
651 * This function converts cra_end from stripe level to file level, and
652 * make sure it's not beyond stripe boundary.
654 if (r0->lo_nr == 1) /* single stripe file */
657 /* cra_end is stripe level, convert it into file level */
658 ra_end = ra->cra_end;
659 if (ra_end != CL_PAGE_EOF)
660 ra_end = lov_stripe_pgoff(loo->lo_lsm, ra_end, stripe);
662 pps = loo->lo_lsm->lsm_stripe_size >> PAGE_CACHE_SHIFT;
664 CDEBUG(D_READA, DFID " max_index = %lu, pps = %u, "
665 "stripe_size = %u, stripe no = %u, start index = %lu\n",
666 PFID(lu_object_fid(lov2lu(loo))), ra_end, pps,
667 loo->lo_lsm->lsm_stripe_size, stripe, start);
669 /* never exceed the end of the stripe */
670 ra->cra_end = min_t(pgoff_t, ra_end, start + pps - start % pps - 1);
675 * lov implementation of cl_operations::cio_submit() method. It takes a list
676 * of pages in \a queue, splits it into per-stripe sub-lists, invokes
677 * cl_io_submit() on underlying devices to submit sub-lists, and then splices
680 * Major complication of this function is a need to handle memory cleansing:
681 * cl_io_submit() is called to write out pages as a part of VM memory
682 * reclamation, and hence it may not fail due to memory shortages (system
683 * dead-locks otherwise). To deal with this, some resources (sub-lists,
684 * sub-environment, etc.) are allocated per-device on "startup" (i.e., in a
685 * not-memory cleansing context), and in case of memory shortage, these
686 * pre-allocated resources are used by lov_io_submit() under
687 * lov_device::ld_mutex mutex.
689 static int lov_io_submit(const struct lu_env *env,
690 const struct cl_io_slice *ios,
691 enum cl_req_type crt, struct cl_2queue *queue)
693 struct cl_page_list *qin = &queue->c2_qin;
694 struct lov_io *lio = cl2lov_io(env, ios);
695 struct lov_io_sub *sub;
696 struct cl_page_list *plist = &lov_env_info(env)->lti_plist;
697 struct cl_page *page;
702 if (lio->lis_active_subios == 1) {
703 int idx = lio->lis_single_subio_index;
705 LASSERT(idx < lio->lis_nr_subios);
706 sub = lov_sub_get(env, lio, idx);
707 LASSERT(!IS_ERR(sub));
708 LASSERT(sub->sub_io == &lio->lis_single_subio);
709 rc = cl_io_submit_rw(sub->sub_env, sub->sub_io,
715 LASSERT(lio->lis_subs != NULL);
717 cl_page_list_init(plist);
718 while (qin->pl_nr > 0) {
719 struct cl_2queue *cl2q = &lov_env_info(env)->lti_cl2q;
721 cl_2queue_init(cl2q);
723 page = cl_page_list_first(qin);
724 cl_page_list_move(&cl2q->c2_qin, qin, page);
726 stripe = lov_page_stripe(page);
727 while (qin->pl_nr > 0) {
728 page = cl_page_list_first(qin);
729 if (stripe != lov_page_stripe(page))
732 cl_page_list_move(&cl2q->c2_qin, qin, page);
735 sub = lov_sub_get(env, lio, stripe);
737 rc = cl_io_submit_rw(sub->sub_env, sub->sub_io,
744 cl_page_list_splice(&cl2q->c2_qin, plist);
745 cl_page_list_splice(&cl2q->c2_qout, &queue->c2_qout);
746 cl_2queue_fini(env, cl2q);
752 cl_page_list_splice(plist, qin);
753 cl_page_list_fini(env, plist);
758 static int lov_io_commit_async(const struct lu_env *env,
759 const struct cl_io_slice *ios,
760 struct cl_page_list *queue, int from, int to,
763 struct cl_page_list *plist = &lov_env_info(env)->lti_plist;
764 struct lov_io *lio = cl2lov_io(env, ios);
765 struct lov_io_sub *sub;
766 struct cl_page *page;
770 if (lio->lis_active_subios == 1) {
771 int idx = lio->lis_single_subio_index;
773 LASSERT(idx < lio->lis_nr_subios);
774 sub = lov_sub_get(env, lio, idx);
775 LASSERT(!IS_ERR(sub));
776 LASSERT(sub->sub_io == &lio->lis_single_subio);
777 rc = cl_io_commit_async(sub->sub_env, sub->sub_io, queue,
783 LASSERT(lio->lis_subs != NULL);
785 cl_page_list_init(plist);
786 while (queue->pl_nr > 0) {
790 LASSERT(plist->pl_nr == 0);
791 page = cl_page_list_first(queue);
792 cl_page_list_move(plist, queue, page);
794 stripe = lov_page_stripe(page);
795 while (queue->pl_nr > 0) {
796 page = cl_page_list_first(queue);
797 if (stripe != lov_page_stripe(page))
800 cl_page_list_move(plist, queue, page);
803 if (queue->pl_nr > 0) /* still has more pages */
804 stripe_to = PAGE_SIZE;
806 sub = lov_sub_get(env, lio, stripe);
808 rc = cl_io_commit_async(sub->sub_env, sub->sub_io,
809 plist, from, stripe_to, cb);
816 if (plist->pl_nr > 0) /* short write */
822 /* for error case, add the page back into the qin list */
823 LASSERT(ergo(rc == 0, plist->pl_nr == 0));
824 while (plist->pl_nr > 0) {
825 /* error occurred, add the uncommitted pages back into queue */
826 page = cl_page_list_last(plist);
827 cl_page_list_move_head(queue, plist, page);
833 static int lov_io_fault_start(const struct lu_env *env,
834 const struct cl_io_slice *ios)
836 struct cl_fault_io *fio;
838 struct lov_io_sub *sub;
841 fio = &ios->cis_io->u.ci_fault;
842 lio = cl2lov_io(env, ios);
843 sub = lov_sub_get(env, lio, lov_page_stripe(fio->ft_page));
844 sub->sub_io->u.ci_fault.ft_nob = fio->ft_nob;
846 RETURN(lov_io_start(env, ios));
849 static void lov_io_fsync_end(const struct lu_env *env,
850 const struct cl_io_slice *ios)
852 struct lov_io *lio = cl2lov_io(env, ios);
853 struct lov_io_sub *sub;
854 unsigned int *written = &ios->cis_io->u.ci_fsync.fi_nr_written;
858 list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
859 struct cl_io *subio = sub->sub_io;
862 lov_io_end_wrapper(sub->sub_env, subio);
865 if (subio->ci_result == 0)
866 *written += subio->u.ci_fsync.fi_nr_written;
871 static const struct cl_io_operations lov_io_ops = {
874 .cio_fini = lov_io_fini,
875 .cio_iter_init = lov_io_rw_iter_init,
876 .cio_iter_fini = lov_io_iter_fini,
877 .cio_lock = lov_io_lock,
878 .cio_unlock = lov_io_unlock,
879 .cio_start = lov_io_start,
880 .cio_end = lov_io_end
883 .cio_fini = lov_io_fini,
884 .cio_iter_init = lov_io_rw_iter_init,
885 .cio_iter_fini = lov_io_iter_fini,
886 .cio_lock = lov_io_lock,
887 .cio_unlock = lov_io_unlock,
888 .cio_start = lov_io_start,
889 .cio_end = lov_io_end
892 .cio_fini = lov_io_fini,
893 .cio_iter_init = lov_io_iter_init,
894 .cio_iter_fini = lov_io_iter_fini,
895 .cio_lock = lov_io_lock,
896 .cio_unlock = lov_io_unlock,
897 .cio_start = lov_io_start,
898 .cio_end = lov_io_end
900 [CIT_DATA_VERSION] = {
901 .cio_fini = lov_io_fini,
902 .cio_iter_init = lov_io_iter_init,
903 .cio_iter_fini = lov_io_iter_fini,
904 .cio_lock = lov_io_lock,
905 .cio_unlock = lov_io_unlock,
906 .cio_start = lov_io_start,
907 .cio_end = lov_io_data_version_end,
910 .cio_fini = lov_io_fini,
911 .cio_iter_init = lov_io_iter_init,
912 .cio_iter_fini = lov_io_iter_fini,
913 .cio_lock = lov_io_lock,
914 .cio_unlock = lov_io_unlock,
915 .cio_start = lov_io_fault_start,
916 .cio_end = lov_io_end
919 .cio_fini = lov_io_fini,
920 .cio_iter_init = lov_io_iter_init,
921 .cio_iter_fini = lov_io_iter_fini,
922 .cio_lock = lov_io_lock,
923 .cio_unlock = lov_io_unlock,
924 .cio_start = lov_io_start,
925 .cio_end = lov_io_fsync_end
928 .cio_fini = lov_io_fini
931 .cio_read_ahead = lov_io_read_ahead,
932 .cio_submit = lov_io_submit,
933 .cio_commit_async = lov_io_commit_async,
936 /*****************************************************************************
938 * Empty lov io operations.
942 static void lov_empty_io_fini(const struct lu_env *env,
943 const struct cl_io_slice *ios)
945 struct lov_object *lov = cl2lov(ios->cis_obj);
948 if (atomic_dec_and_test(&lov->lo_active_ios))
949 wake_up_all(&lov->lo_waitq);
953 static int lov_empty_io_submit(const struct lu_env *env,
954 const struct cl_io_slice *ios,
955 enum cl_req_type crt, struct cl_2queue *queue)
960 static void lov_empty_impossible(const struct lu_env *env,
961 struct cl_io_slice *ios)
966 #define LOV_EMPTY_IMPOSSIBLE ((void *)lov_empty_impossible)
969 * An io operation vector for files without stripes.
971 static const struct cl_io_operations lov_empty_io_ops = {
974 .cio_fini = lov_empty_io_fini,
976 .cio_iter_init = LOV_EMPTY_IMPOSSIBLE,
977 .cio_lock = LOV_EMPTY_IMPOSSIBLE,
978 .cio_start = LOV_EMPTY_IMPOSSIBLE,
979 .cio_end = LOV_EMPTY_IMPOSSIBLE
983 .cio_fini = lov_empty_io_fini,
984 .cio_iter_init = LOV_EMPTY_IMPOSSIBLE,
985 .cio_lock = LOV_EMPTY_IMPOSSIBLE,
986 .cio_start = LOV_EMPTY_IMPOSSIBLE,
987 .cio_end = LOV_EMPTY_IMPOSSIBLE
990 .cio_fini = lov_empty_io_fini,
991 .cio_iter_init = LOV_EMPTY_IMPOSSIBLE,
992 .cio_lock = LOV_EMPTY_IMPOSSIBLE,
993 .cio_start = LOV_EMPTY_IMPOSSIBLE,
994 .cio_end = LOV_EMPTY_IMPOSSIBLE
997 .cio_fini = lov_empty_io_fini,
998 .cio_iter_init = LOV_EMPTY_IMPOSSIBLE,
999 .cio_lock = LOV_EMPTY_IMPOSSIBLE,
1000 .cio_start = LOV_EMPTY_IMPOSSIBLE,
1001 .cio_end = LOV_EMPTY_IMPOSSIBLE
1004 .cio_fini = lov_empty_io_fini
1007 .cio_fini = lov_empty_io_fini
1010 .cio_submit = lov_empty_io_submit,
1011 .cio_commit_async = LOV_EMPTY_IMPOSSIBLE
1014 int lov_io_init_raid0(const struct lu_env *env, struct cl_object *obj,
1017 struct lov_io *lio = lov_env_io(env);
1018 struct lov_object *lov = cl2lov(obj);
1021 INIT_LIST_HEAD(&lio->lis_active);
1022 io->ci_result = lov_io_slice_init(lio, lov, io);
1023 if (io->ci_result != 0)
1024 RETURN(io->ci_result);
1026 if (io->ci_result == 0) {
1027 io->ci_result = lov_io_subio_init(env, lio, io);
1028 if (io->ci_result == 0) {
1029 cl_io_slice_add(io, &lio->lis_cl, obj, &lov_io_ops);
1030 atomic_inc(&lov->lo_active_ios);
1033 RETURN(io->ci_result);
1036 int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj,
1039 struct lov_object *lov = cl2lov(obj);
1040 struct lov_io *lio = lov_env_io(env);
1044 lio->lis_object = lov;
1045 switch (io->ci_type) {
1054 case CIT_DATA_VERSION:
1062 CERROR("Page fault on a file without stripes: "DFID"\n",
1063 PFID(lu_object_fid(&obj->co_lu)));
1067 cl_io_slice_add(io, &lio->lis_cl, obj, &lov_empty_io_ops);
1068 atomic_inc(&lov->lo_active_ios);
1071 io->ci_result = result < 0 ? result : 0;
1072 RETURN(result != 0);
1075 int lov_io_init_released(const struct lu_env *env, struct cl_object *obj,
1078 struct lov_object *lov = cl2lov(obj);
1079 struct lov_io *lio = lov_env_io(env);
1083 LASSERT(lov->lo_lsm != NULL);
1084 lio->lis_object = lov;
1086 switch (io->ci_type) {
1088 LASSERTF(0, "invalid type %d\n", io->ci_type);
1091 case CIT_DATA_VERSION:
1095 /* the truncate to 0 is managed by MDT:
1096 * - in open, for open O_TRUNC
1097 * - in setattr, for truncate
1099 /* the truncate is for size > 0 so triggers a restore */
1100 if (cl_io_is_trunc(io))
1101 io->ci_restore_needed = 1;
1107 io->ci_restore_needed = 1;
1112 cl_io_slice_add(io, &lio->lis_cl, obj, &lov_empty_io_ops);
1113 atomic_inc(&lov->lo_active_ios);
1116 io->ci_result = result < 0 ? result : 0;
1117 RETURN(result != 0);