4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2016, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
34 * Author: Nikita Danilov <nikita.danilov@sun.com>
35 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
38 #define DEBUG_SUBSYSTEM S_CLASS
40 #include <linux/sched.h>
41 #include <linux/list.h>
42 #include <obd_class.h>
43 #include <obd_support.h>
44 #include <lustre_fid.h>
45 #include <cl_object.h>
46 #include "cl_internal.h"
47 #include <lustre_compat.h>
49 /*****************************************************************************
55 static inline int cl_io_type_is_valid(enum cl_io_type type)
57 return CIT_READ <= type && type < CIT_OP_NR;
60 static inline int cl_io_is_loopable(const struct cl_io *io)
62 return cl_io_type_is_valid(io->ci_type) && io->ci_type != CIT_MISC;
66 * cl_io invariant that holds at all times when exported cl_io_*() functions
67 * are entered and left.
69 static int cl_io_invariant(const struct cl_io *io)
76 * io can own pages only when it is ongoing. Sub-io might
77 * still be in CIS_LOCKED state when top-io is in
80 ergo(io->ci_owned_nr > 0, io->ci_state == CIS_IO_GOING ||
81 (io->ci_state == CIS_LOCKED && up != NULL));
85 * Finalize \a io, by calling cl_io_operations::cio_fini() bottom-to-top.
87 void cl_io_fini(const struct lu_env *env, struct cl_io *io)
89 struct cl_io_slice *slice;
91 LINVRNT(cl_io_type_is_valid(io->ci_type));
92 LINVRNT(cl_io_invariant(io));
95 while (!list_empty(&io->ci_layers)) {
96 slice = container_of(io->ci_layers.prev, struct cl_io_slice,
98 list_del_init(&slice->cis_linkage);
99 if (slice->cis_iop->op[io->ci_type].cio_fini != NULL)
100 slice->cis_iop->op[io->ci_type].cio_fini(env, slice);
102 * Invalidate slice to catch use after free. This assumes that
103 * slices are allocated within session and can be touched
104 * after ->cio_fini() returns.
106 slice->cis_io = NULL;
108 io->ci_state = CIS_FINI;
110 /* sanity check for layout change */
111 switch(io->ci_type) {
114 case CIT_DATA_VERSION:
118 LASSERT(!io->ci_need_restart);
122 /* Check ignore layout change conf */
123 LASSERT(ergo(io->ci_ignore_layout || !io->ci_verify_layout,
124 !io->ci_need_restart));
134 EXPORT_SYMBOL(cl_io_fini);
136 static int cl_io_init0(const struct lu_env *env, struct cl_io *io,
137 enum cl_io_type iot, struct cl_object *obj)
139 struct cl_object *scan;
142 LINVRNT(io->ci_state == CIS_ZERO || io->ci_state == CIS_FINI);
143 LINVRNT(cl_io_type_is_valid(iot));
144 LINVRNT(cl_io_invariant(io));
148 INIT_LIST_HEAD(&io->ci_lockset.cls_todo);
149 INIT_LIST_HEAD(&io->ci_lockset.cls_done);
150 INIT_LIST_HEAD(&io->ci_layers);
153 cl_object_for_each(scan, obj) {
154 if (scan->co_ops->coo_io_init != NULL) {
155 result = scan->co_ops->coo_io_init(env, scan, io);
161 io->ci_state = CIS_INIT;
166 * Initialize sub-io, by calling cl_io_operations::cio_init() top-to-bottom.
168 * \pre obj != cl_object_top(obj)
170 int cl_io_sub_init(const struct lu_env *env, struct cl_io *io,
171 enum cl_io_type iot, struct cl_object *obj)
173 LASSERT(obj != cl_object_top(obj));
175 return cl_io_init0(env, io, iot, obj);
177 EXPORT_SYMBOL(cl_io_sub_init);
180 * Initialize \a io, by calling cl_io_operations::cio_init() top-to-bottom.
182 * Caller has to call cl_io_fini() after a call to cl_io_init(), no matter
183 * what the latter returned.
185 * \pre obj == cl_object_top(obj)
186 * \pre cl_io_type_is_valid(iot)
187 * \post cl_io_type_is_valid(io->ci_type) && io->ci_type == iot
189 int cl_io_init(const struct lu_env *env, struct cl_io *io,
190 enum cl_io_type iot, struct cl_object *obj)
192 LASSERT(obj == cl_object_top(obj));
194 /* clear I/O restart from previous instance */
195 io->ci_need_restart = 0;
197 return cl_io_init0(env, io, iot, obj);
199 EXPORT_SYMBOL(cl_io_init);
202 * Initialize read or write io.
204 * \pre iot == CIT_READ || iot == CIT_WRITE
206 int cl_io_rw_init(const struct lu_env *env, struct cl_io *io,
207 enum cl_io_type iot, loff_t pos, size_t count)
209 LINVRNT(iot == CIT_READ || iot == CIT_WRITE);
210 LINVRNT(io->ci_obj != NULL);
213 if (cfs_ptengine_weight(cl_io_engine) < 2)
216 LU_OBJECT_HEADER(D_VFSTRACE, env, &io->ci_obj->co_lu,
217 "io %s range: [%llu, %llu) %s %s %s %s\n",
218 iot == CIT_READ ? "read" : "write",
220 io->u.ci_rw.rw_nonblock ? "nonblock" : "block",
221 io->u.ci_rw.rw_append ? "append" : "-",
222 io->u.ci_rw.rw_sync ? "sync" : "-",
223 io->ci_pio ? "pio" : "-");
225 io->u.ci_rw.rw_range.cir_pos = pos;
226 io->u.ci_rw.rw_range.cir_count = count;
228 RETURN(cl_io_init(env, io, iot, io->ci_obj));
230 EXPORT_SYMBOL(cl_io_rw_init);
232 static int cl_lock_descr_sort(const struct cl_lock_descr *d0,
233 const struct cl_lock_descr *d1)
235 return lu_fid_cmp(lu_object_fid(&d0->cld_obj->co_lu),
236 lu_object_fid(&d1->cld_obj->co_lu));
240 * Sort locks in lexicographical order of their (fid, start-offset) pairs.
242 static void cl_io_locks_sort(struct cl_io *io)
247 /* hidden treasure: bubble sort for now. */
249 struct cl_io_lock_link *curr;
250 struct cl_io_lock_link *prev;
251 struct cl_io_lock_link *temp;
256 list_for_each_entry_safe(curr, temp, &io->ci_lockset.cls_todo,
259 switch (cl_lock_descr_sort(&prev->cill_descr,
260 &curr->cill_descr)) {
263 * IMPOSSIBLE: Identical locks are
270 list_move_tail(&curr->cill_linkage,
271 &prev->cill_linkage);
273 continue; /* don't change prev: it's
274 * still "previous" */
275 case -1: /* already in order */
285 static void cl_lock_descr_merge(struct cl_lock_descr *d0,
286 const struct cl_lock_descr *d1)
288 d0->cld_start = min(d0->cld_start, d1->cld_start);
289 d0->cld_end = max(d0->cld_end, d1->cld_end);
291 if (d1->cld_mode == CLM_WRITE && d0->cld_mode != CLM_WRITE)
292 d0->cld_mode = CLM_WRITE;
294 if (d1->cld_mode == CLM_GROUP && d0->cld_mode != CLM_GROUP)
295 d0->cld_mode = CLM_GROUP;
298 static int cl_lockset_merge(const struct cl_lockset *set,
299 const struct cl_lock_descr *need)
301 struct cl_io_lock_link *scan;
304 list_for_each_entry(scan, &set->cls_todo, cill_linkage) {
305 if (!cl_object_same(scan->cill_descr.cld_obj, need->cld_obj))
308 /* Merge locks for the same object because ldlm lock server
309 * may expand the lock extent, otherwise there is a deadlock
310 * case if two conflicted locks are queueud for the same object
311 * and lock server expands one lock to overlap the another.
312 * The side effect is that it can generate a multi-stripe lock
313 * that may cause casacading problem */
314 cl_lock_descr_merge(&scan->cill_descr, need);
315 CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
316 scan->cill_descr.cld_mode, scan->cill_descr.cld_start,
317 scan->cill_descr.cld_end);
323 static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io,
324 struct cl_lockset *set)
326 struct cl_io_lock_link *link;
327 struct cl_io_lock_link *temp;
332 list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
333 result = cl_lock_request(env, io, &link->cill_lock);
337 list_move(&link->cill_linkage, &set->cls_done);
343 * Takes locks necessary for the current iteration of io.
345 * Calls cl_io_operations::cio_lock() top-to-bottom to collect locks required
346 * by layers for the current iteration. Then sort locks (to avoid dead-locks),
349 int cl_io_lock(const struct lu_env *env, struct cl_io *io)
351 const struct cl_io_slice *scan;
354 LINVRNT(cl_io_is_loopable(io));
355 LINVRNT(io->ci_state == CIS_IT_STARTED);
356 LINVRNT(cl_io_invariant(io));
359 list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
360 if (scan->cis_iop->op[io->ci_type].cio_lock == NULL)
362 result = scan->cis_iop->op[io->ci_type].cio_lock(env, scan);
367 cl_io_locks_sort(io);
368 result = cl_lockset_lock(env, io, &io->ci_lockset);
371 cl_io_unlock(env, io);
373 io->ci_state = CIS_LOCKED;
376 EXPORT_SYMBOL(cl_io_lock);
379 * Release locks takes by io.
381 void cl_io_unlock(const struct lu_env *env, struct cl_io *io)
383 struct cl_lockset *set;
384 struct cl_io_lock_link *link;
385 struct cl_io_lock_link *temp;
386 const struct cl_io_slice *scan;
388 LASSERT(cl_io_is_loopable(io));
389 LASSERT(CIS_IT_STARTED <= io->ci_state && io->ci_state < CIS_UNLOCKED);
390 LINVRNT(cl_io_invariant(io));
393 set = &io->ci_lockset;
395 list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
396 list_del_init(&link->cill_linkage);
397 if (link->cill_fini != NULL)
398 link->cill_fini(env, link);
401 list_for_each_entry_safe(link, temp, &set->cls_done, cill_linkage) {
402 list_del_init(&link->cill_linkage);
403 cl_lock_release(env, &link->cill_lock);
404 if (link->cill_fini != NULL)
405 link->cill_fini(env, link);
408 list_for_each_entry_reverse(scan, &io->ci_layers, cis_linkage) {
409 if (scan->cis_iop->op[io->ci_type].cio_unlock != NULL)
410 scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
412 io->ci_state = CIS_UNLOCKED;
415 EXPORT_SYMBOL(cl_io_unlock);
418 * Prepares next iteration of io.
420 * Calls cl_io_operations::cio_iter_init() top-to-bottom. This exists to give
421 * layers a chance to modify io parameters, e.g., so that lov can restrict io
422 * to a single stripe.
424 int cl_io_iter_init(const struct lu_env *env, struct cl_io *io)
426 const struct cl_io_slice *scan;
429 LINVRNT(cl_io_is_loopable(io));
430 LINVRNT(io->ci_state == CIS_INIT || io->ci_state == CIS_IT_ENDED);
431 LINVRNT(cl_io_invariant(io));
435 list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
436 if (scan->cis_iop->op[io->ci_type].cio_iter_init == NULL)
438 result = scan->cis_iop->op[io->ci_type].cio_iter_init(env,
444 io->ci_state = CIS_IT_STARTED;
447 EXPORT_SYMBOL(cl_io_iter_init);
450 * Finalizes io iteration.
452 * Calls cl_io_operations::cio_iter_fini() bottom-to-top.
454 void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io)
456 const struct cl_io_slice *scan;
458 LINVRNT(cl_io_is_loopable(io));
459 LINVRNT(io->ci_state == CIS_UNLOCKED);
460 LINVRNT(cl_io_invariant(io));
463 list_for_each_entry_reverse(scan, &io->ci_layers, cis_linkage) {
464 if (scan->cis_iop->op[io->ci_type].cio_iter_fini != NULL)
465 scan->cis_iop->op[io->ci_type].cio_iter_fini(env, scan);
467 io->ci_state = CIS_IT_ENDED;
470 EXPORT_SYMBOL(cl_io_iter_fini);
473 * Records that read or write io progressed \a nob bytes forward.
475 void cl_io_rw_advance(const struct lu_env *env, struct cl_io *io, size_t nob)
477 const struct cl_io_slice *scan;
479 LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
481 LINVRNT(cl_io_is_loopable(io));
482 LINVRNT(cl_io_invariant(io));
486 io->u.ci_rw.rw_range.cir_pos += nob;
487 io->u.ci_rw.rw_range.cir_count -= nob;
489 /* layers have to be notified. */
490 list_for_each_entry_reverse(scan, &io->ci_layers, cis_linkage) {
491 if (scan->cis_iop->op[io->ci_type].cio_advance != NULL)
492 scan->cis_iop->op[io->ci_type].cio_advance(env, scan,
499 * Adds a lock to a lockset.
501 int cl_io_lock_add(const struct lu_env *env, struct cl_io *io,
502 struct cl_io_lock_link *link)
507 if (cl_lockset_merge(&io->ci_lockset, &link->cill_descr))
510 list_add(&link->cill_linkage, &io->ci_lockset.cls_todo);
515 EXPORT_SYMBOL(cl_io_lock_add);
517 static void cl_free_io_lock_link(const struct lu_env *env,
518 struct cl_io_lock_link *link)
524 * Allocates new lock link, and uses it to add a lock to a lockset.
526 int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
527 struct cl_lock_descr *descr)
529 struct cl_io_lock_link *link;
535 link->cill_descr = *descr;
536 link->cill_fini = cl_free_io_lock_link;
537 result = cl_io_lock_add(env, io, link);
538 if (result) /* lock match */
539 link->cill_fini(env, link);
545 EXPORT_SYMBOL(cl_io_lock_alloc_add);
548 * Starts io by calling cl_io_operations::cio_start() top-to-bottom.
550 int cl_io_start(const struct lu_env *env, struct cl_io *io)
552 const struct cl_io_slice *scan;
555 LINVRNT(cl_io_is_loopable(io));
556 LINVRNT(io->ci_state == CIS_LOCKED);
557 LINVRNT(cl_io_invariant(io));
560 io->ci_state = CIS_IO_GOING;
561 list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
562 if (scan->cis_iop->op[io->ci_type].cio_start == NULL)
564 result = scan->cis_iop->op[io->ci_type].cio_start(env, scan);
572 EXPORT_SYMBOL(cl_io_start);
575 * Wait until current io iteration is finished by calling
576 * cl_io_operations::cio_end() bottom-to-top.
578 void cl_io_end(const struct lu_env *env, struct cl_io *io)
580 const struct cl_io_slice *scan;
582 LINVRNT(cl_io_is_loopable(io));
583 LINVRNT(io->ci_state == CIS_IO_GOING);
584 LINVRNT(cl_io_invariant(io));
587 list_for_each_entry_reverse(scan, &io->ci_layers, cis_linkage) {
588 if (scan->cis_iop->op[io->ci_type].cio_end != NULL)
589 scan->cis_iop->op[io->ci_type].cio_end(env, scan);
590 /* TODO: error handling. */
592 io->ci_state = CIS_IO_FINISHED;
595 EXPORT_SYMBOL(cl_io_end);
598 * Called by read io, to decide the readahead extent
600 * \see cl_io_operations::cio_read_ahead()
602 int cl_io_read_ahead(const struct lu_env *env, struct cl_io *io,
603 pgoff_t start, struct cl_read_ahead *ra)
605 const struct cl_io_slice *scan;
608 LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_FAULT);
609 LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
610 LINVRNT(cl_io_invariant(io));
613 list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
614 if (scan->cis_iop->cio_read_ahead == NULL)
617 result = scan->cis_iop->cio_read_ahead(env, scan, start, ra);
621 RETURN(result > 0 ? 0 : result);
623 EXPORT_SYMBOL(cl_io_read_ahead);
626 * Commit a list of contiguous pages into writeback cache.
628 * \returns 0 if all pages committed, or errcode if error occurred.
629 * \see cl_io_operations::cio_commit_async()
631 int cl_io_commit_async(const struct lu_env *env, struct cl_io *io,
632 struct cl_page_list *queue, int from, int to,
635 const struct cl_io_slice *scan;
639 list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
640 if (scan->cis_iop->cio_commit_async == NULL)
642 result = scan->cis_iop->cio_commit_async(env, scan, queue,
649 EXPORT_SYMBOL(cl_io_commit_async);
652 * Submits a list of pages for immediate io.
654 * After the function gets returned, The submitted pages are moved to
655 * queue->c2_qout queue, and queue->c2_qin contain both the pages don't need
656 * to be submitted, and the pages are errant to submit.
658 * \returns 0 if at least one page was submitted, error code otherwise.
659 * \see cl_io_operations::cio_submit()
661 int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
662 enum cl_req_type crt, struct cl_2queue *queue)
664 const struct cl_io_slice *scan;
668 list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
669 if (scan->cis_iop->cio_submit == NULL)
671 result = scan->cis_iop->cio_submit(env, scan, crt, queue);
676 * If ->cio_submit() failed, no pages were sent.
678 LASSERT(ergo(result != 0, list_empty(&queue->c2_qout.pl_pages)));
681 EXPORT_SYMBOL(cl_io_submit_rw);
684 * Submit a sync_io and wait for the IO to be finished, or error happens.
685 * If \a timeout is zero, it means to wait for the IO unconditionally.
687 int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
688 enum cl_req_type iot, struct cl_2queue *queue,
691 struct cl_sync_io *anchor = &cl_env_info(env)->clt_anchor;
695 cl_page_list_for_each(pg, &queue->c2_qin) {
696 LASSERT(pg->cp_sync_io == NULL);
697 pg->cp_sync_io = anchor;
700 cl_sync_io_init(anchor, queue->c2_qin.pl_nr, &cl_sync_io_end);
701 rc = cl_io_submit_rw(env, io, iot, queue);
704 * If some pages weren't sent for any reason (e.g.,
705 * read found up-to-date pages in the cache, or write found
706 * clean pages), count them as completed to avoid infinite
709 cl_page_list_for_each(pg, &queue->c2_qin) {
710 pg->cp_sync_io = NULL;
711 cl_sync_io_note(env, anchor, 1);
714 /* wait for the IO to be finished. */
715 rc = cl_sync_io_wait(env, anchor, timeout);
716 cl_page_list_assume(env, io, &queue->c2_qout);
718 LASSERT(list_empty(&queue->c2_qout.pl_pages));
719 cl_page_list_for_each(pg, &queue->c2_qin)
720 pg->cp_sync_io = NULL;
724 EXPORT_SYMBOL(cl_io_submit_sync);
727 * Cancel an IO which has been submitted by cl_io_submit_rw.
729 int cl_io_cancel(const struct lu_env *env, struct cl_io *io,
730 struct cl_page_list *queue)
732 struct cl_page *page;
735 CERROR("Canceling ongoing page trasmission\n");
736 cl_page_list_for_each(page, queue) {
739 rc = cl_page_cancel(env, page);
740 result = result ?: rc;
746 struct cl_io_pt *cl_io_submit_pt(struct cl_io *io, loff_t pos, size_t count)
751 OBD_ALLOC(pt, sizeof(*pt));
753 RETURN(ERR_PTR(-ENOMEM));
756 init_sync_kiocb(&pt->cip_iocb, io->u.ci_rw.rw_file);
757 pt->cip_iocb.ki_pos = pos;
758 #ifdef HAVE_KIOCB_KI_LEFT
759 pt->cip_iocb.ki_left = count;
760 #elif defined(HAVE_KI_NBYTES)
761 pt->cip_iocb.ki_nbytes = count;
763 pt->cip_iter = io->u.ci_rw.rw_iter;
764 iov_iter_truncate(&pt->cip_iter, count);
765 pt->cip_file = io->u.ci_rw.rw_file;
766 pt->cip_iot = io->ci_type;
768 pt->cip_count = count;
771 rc = cfs_ptask_init(&pt->cip_task, io->u.ci_rw.rw_ptask, pt,
772 PTF_ORDERED | PTF_COMPLETE |
773 PTF_USER_MM | PTF_RETRY, smp_processor_id());
777 CDEBUG(D_VFSTRACE, "submit %s range: [%llu, %llu)\n",
778 io->ci_type == CIT_READ ? "read" : "write",
781 rc = cfs_ptask_submit(&pt->cip_task, cl_io_engine);
788 OBD_FREE(pt, sizeof(*pt));
795 * Pumps io through iterations calling
797 * - cl_io_iter_init()
807 * - cl_io_iter_fini()
809 * repeatedly until there is no more io to do.
811 int cl_io_loop(const struct lu_env *env, struct cl_io *io)
813 struct cl_io_pt *pt = NULL, *head = NULL;
814 struct cl_io_pt **tail = &head;
817 size_t last_chunk_count = 0;
818 bool short_io = false;
822 LINVRNT(cl_io_is_loopable(io));
827 rc = cl_io_iter_init(env, io);
829 cl_io_iter_fini(env, io);
833 pos = io->u.ci_rw.rw_range.cir_pos;
834 count = io->u.ci_rw.rw_range.cir_count;
837 /* submit this range for parallel execution */
838 pt = cl_io_submit_pt(io, pos, count);
840 cl_io_iter_fini(env, io);
846 tail = &pt->cip_next;
848 size_t nob = io->ci_nob;
851 "execute type %u range: [%llu, %llu) nob: %zu %s\n",
852 io->ci_type, pos, pos + count, nob,
853 io->ci_continue ? "continue" : "stop");
855 rc = cl_io_lock(env, io);
857 cl_io_iter_fini(env, io);
862 * Notify layers that locks has been taken,
865 * - llite: kms, short read;
866 * - llite: generic_file_read();
868 rc = cl_io_start(env, io);
871 * Send any remaining pending
874 * - llite: ll_rw_stats_tally.
877 cl_io_unlock(env, io);
879 count = io->ci_nob - nob;
880 last_chunk_count = count;
883 cl_io_rw_advance(env, io, count);
884 cl_io_iter_fini(env, io);
885 } while (!rc && io->ci_continue);
887 if (rc == -EWOULDBLOCK && io->ci_ndelay) {
888 io->ci_need_restart = 1;
892 CDEBUG(D_VFSTRACE, "loop type %u done: nob: %zu, rc: %d %s\n",
893 io->ci_type, io->ci_nob, rc,
894 io->ci_continue ? "continue" : "stop");
896 while (head != NULL) {
900 head = head->cip_next;
902 rc2 = cfs_ptask_wait_for(&pt->cip_task);
903 LASSERTF(!rc2, "wait for task error: %d\n", rc2);
905 rc2 = cfs_ptask_result(&pt->cip_task);
907 "done %s range: [%llu, %llu) ret: %zd, rc: %d\n",
908 pt->cip_iot == CIT_READ ? "read" : "write",
909 pt->cip_pos, pt->cip_pos + pt->cip_count,
910 pt->cip_result, rc2);
912 /* save the result of ptask */
914 io->ci_need_restart |= pt->cip_need_restart;
917 if (!rc2) /* IO is done by this task successfully */
918 io->ci_nob += pt->cip_result;
919 if (pt->cip_result < pt->cip_count) {
920 /* short IO happened.
921 * Not necessary to be an error */
923 "incomplete range: [%llu, %llu) "
924 "last_chunk_count: %zu\n",
926 pt->cip_pos + pt->cip_count,
928 io->ci_nob -= last_chunk_count;
932 OBD_FREE(pt, sizeof(*pt));
935 CDEBUG(D_VFSTRACE, "return nob: %zu (%s io), rc: %d\n",
936 io->ci_nob, short_io ? "short" : "full", rc);
938 RETURN(rc < 0 ? rc : io->ci_result);
940 EXPORT_SYMBOL(cl_io_loop);
943 * Adds io slice to the cl_io.
945 * This is called by cl_object_operations::coo_io_init() methods to add a
946 * per-layer state to the io. New state is added at the end of
947 * cl_io::ci_layers list, that is, it is at the bottom of the stack.
949 * \see cl_lock_slice_add(), cl_req_slice_add(), cl_page_slice_add()
951 void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
952 struct cl_object *obj,
953 const struct cl_io_operations *ops)
955 struct list_head *linkage = &slice->cis_linkage;
957 LASSERT((linkage->prev == NULL && linkage->next == NULL) ||
958 list_empty(linkage));
961 list_add_tail(linkage, &io->ci_layers);
963 slice->cis_obj = obj;
964 slice->cis_iop = ops;
967 EXPORT_SYMBOL(cl_io_slice_add);
971 * Initializes page list.
973 void cl_page_list_init(struct cl_page_list *plist)
977 INIT_LIST_HEAD(&plist->pl_pages);
978 plist->pl_owner = current;
981 EXPORT_SYMBOL(cl_page_list_init);
984 * Adds a page to a page list.
986 void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page)
989 /* it would be better to check that page is owned by "current" io, but
990 * it is not passed here. */
991 LASSERT(page->cp_owner != NULL);
992 LINVRNT(plist->pl_owner == current);
994 LASSERT(list_empty(&page->cp_batch));
995 list_add_tail(&page->cp_batch, &plist->pl_pages);
997 lu_ref_add_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
1001 EXPORT_SYMBOL(cl_page_list_add);
1004 * Removes a page from a page list.
1006 void cl_page_list_del(const struct lu_env *env,
1007 struct cl_page_list *plist, struct cl_page *page)
1009 LASSERT(plist->pl_nr > 0);
1010 LASSERT(cl_page_is_vmlocked(env, page));
1011 LINVRNT(plist->pl_owner == current);
1014 list_del_init(&page->cp_batch);
1016 lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
1017 cl_page_put(env, page);
1020 EXPORT_SYMBOL(cl_page_list_del);
1023 * Moves a page from one page list to another.
1025 void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
1026 struct cl_page *page)
1028 LASSERT(src->pl_nr > 0);
1029 LINVRNT(dst->pl_owner == current);
1030 LINVRNT(src->pl_owner == current);
1033 list_move_tail(&page->cp_batch, &dst->pl_pages);
1036 lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue",
1040 EXPORT_SYMBOL(cl_page_list_move);
1043 * Moves a page from one page list to the head of another list.
1045 void cl_page_list_move_head(struct cl_page_list *dst, struct cl_page_list *src,
1046 struct cl_page *page)
1048 LASSERT(src->pl_nr > 0);
1049 LINVRNT(dst->pl_owner == current);
1050 LINVRNT(src->pl_owner == current);
1053 list_move(&page->cp_batch, &dst->pl_pages);
1056 lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue",
1060 EXPORT_SYMBOL(cl_page_list_move_head);
1063 * splice the cl_page_list, just as list head does
1065 void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head)
1067 struct cl_page *page;
1068 struct cl_page *tmp;
1070 LINVRNT(list->pl_owner == current);
1071 LINVRNT(head->pl_owner == current);
1074 cl_page_list_for_each_safe(page, tmp, list)
1075 cl_page_list_move(head, list, page);
1078 EXPORT_SYMBOL(cl_page_list_splice);
1081 * Disowns pages in a queue.
1083 void cl_page_list_disown(const struct lu_env *env,
1084 struct cl_io *io, struct cl_page_list *plist)
1086 struct cl_page *page;
1087 struct cl_page *temp;
1089 LINVRNT(plist->pl_owner == current);
1092 cl_page_list_for_each_safe(page, temp, plist) {
1093 LASSERT(plist->pl_nr > 0);
1095 list_del_init(&page->cp_batch);
1098 * cl_page_disown0 rather than usual cl_page_disown() is used,
1099 * because pages are possibly in CPS_FREEING state already due
1100 * to the call to cl_page_list_discard().
1103 * XXX cl_page_disown0() will fail if page is not locked.
1105 cl_page_disown0(env, io, page);
1106 lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue",
1108 cl_page_put(env, page);
1112 EXPORT_SYMBOL(cl_page_list_disown);
1115 * Releases pages from queue.
1117 void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist)
1119 struct cl_page *page;
1120 struct cl_page *temp;
1122 LINVRNT(plist->pl_owner == current);
1125 cl_page_list_for_each_safe(page, temp, plist)
1126 cl_page_list_del(env, plist, page);
1127 LASSERT(plist->pl_nr == 0);
1130 EXPORT_SYMBOL(cl_page_list_fini);
1133 * Assumes all pages in a queue.
1135 void cl_page_list_assume(const struct lu_env *env,
1136 struct cl_io *io, struct cl_page_list *plist)
1138 struct cl_page *page;
1140 LINVRNT(plist->pl_owner == current);
1142 cl_page_list_for_each(page, plist)
1143 cl_page_assume(env, io, page);
1147 * Discards all pages in a queue.
1149 void cl_page_list_discard(const struct lu_env *env, struct cl_io *io,
1150 struct cl_page_list *plist)
1152 struct cl_page *page;
1154 LINVRNT(plist->pl_owner == current);
1156 cl_page_list_for_each(page, plist)
1157 cl_page_discard(env, io, page);
1160 EXPORT_SYMBOL(cl_page_list_discard);
1163 * Initialize dual page queue.
1165 void cl_2queue_init(struct cl_2queue *queue)
1168 cl_page_list_init(&queue->c2_qin);
1169 cl_page_list_init(&queue->c2_qout);
1172 EXPORT_SYMBOL(cl_2queue_init);
1175 * Add a page to the incoming page list of 2-queue.
1177 void cl_2queue_add(struct cl_2queue *queue, struct cl_page *page)
1180 cl_page_list_add(&queue->c2_qin, page);
1183 EXPORT_SYMBOL(cl_2queue_add);
1186 * Disown pages in both lists of a 2-queue.
1188 void cl_2queue_disown(const struct lu_env *env,
1189 struct cl_io *io, struct cl_2queue *queue)
1192 cl_page_list_disown(env, io, &queue->c2_qin);
1193 cl_page_list_disown(env, io, &queue->c2_qout);
1196 EXPORT_SYMBOL(cl_2queue_disown);
1199 * Discard (truncate) pages in both lists of a 2-queue.
1201 void cl_2queue_discard(const struct lu_env *env,
1202 struct cl_io *io, struct cl_2queue *queue)
1205 cl_page_list_discard(env, io, &queue->c2_qin);
1206 cl_page_list_discard(env, io, &queue->c2_qout);
1209 EXPORT_SYMBOL(cl_2queue_discard);
1212 * Assume to own the pages in cl_2queue
1214 void cl_2queue_assume(const struct lu_env *env,
1215 struct cl_io *io, struct cl_2queue *queue)
1217 cl_page_list_assume(env, io, &queue->c2_qin);
1218 cl_page_list_assume(env, io, &queue->c2_qout);
1222 * Finalize both page lists of a 2-queue.
1224 void cl_2queue_fini(const struct lu_env *env, struct cl_2queue *queue)
1227 cl_page_list_fini(env, &queue->c2_qout);
1228 cl_page_list_fini(env, &queue->c2_qin);
1231 EXPORT_SYMBOL(cl_2queue_fini);
1234 * Initialize a 2-queue to contain \a page in its incoming page list.
1236 void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page)
1239 cl_2queue_init(queue);
1240 cl_2queue_add(queue, page);
1243 EXPORT_SYMBOL(cl_2queue_init_page);
1246 * Returns top-level io.
1248 * \see cl_object_top()
1250 struct cl_io *cl_io_top(struct cl_io *io)
1253 while (io->ci_parent != NULL)
1257 EXPORT_SYMBOL(cl_io_top);
1260 * Prints human readable representation of \a io to the \a f.
1262 void cl_io_print(const struct lu_env *env, void *cookie,
1263 lu_printer_t printer, const struct cl_io *io)
1268 * Fills in attributes that are passed to server together with transfer. Only
1269 * attributes from \a flags may be touched. This can be called multiple times
1270 * for the same request.
1272 void cl_req_attr_set(const struct lu_env *env, struct cl_object *obj,
1273 struct cl_req_attr *attr)
1275 struct cl_object *scan;
1278 cl_object_for_each(scan, obj) {
1279 if (scan->co_ops->coo_req_attr_set != NULL)
1280 scan->co_ops->coo_req_attr_set(env, scan, attr);
1284 EXPORT_SYMBOL(cl_req_attr_set);
1286 /* cl_sync_io_callback assumes the caller must call cl_sync_io_wait() to
1287 * wait for the IO to finish. */
1288 void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor)
1290 wake_up_all(&anchor->csi_waitq);
1292 /* it's safe to nuke or reuse anchor now */
1293 atomic_set(&anchor->csi_barrier, 0);
1295 EXPORT_SYMBOL(cl_sync_io_end);
1298 * Initialize synchronous io wait anchor
1300 void cl_sync_io_init(struct cl_sync_io *anchor, int nr,
1301 void (*end)(const struct lu_env *, struct cl_sync_io *))
1304 memset(anchor, 0, sizeof(*anchor));
1305 init_waitqueue_head(&anchor->csi_waitq);
1306 atomic_set(&anchor->csi_sync_nr, nr);
1307 atomic_set(&anchor->csi_barrier, nr > 0);
1308 anchor->csi_sync_rc = 0;
1309 anchor->csi_end_io = end;
1310 LASSERT(end != NULL);
1313 EXPORT_SYMBOL(cl_sync_io_init);
1316 * Wait until all IO completes. Transfer completion routine has to call
1317 * cl_sync_io_note() for every entity.
1319 int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor,
1322 struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout),
1327 LASSERT(timeout >= 0);
1329 rc = l_wait_event(anchor->csi_waitq,
1330 atomic_read(&anchor->csi_sync_nr) == 0,
1333 CERROR("IO failed: %d, still wait for %d remaining entries\n",
1334 rc, atomic_read(&anchor->csi_sync_nr));
1336 lwi = (struct l_wait_info) { 0 };
1337 (void)l_wait_event(anchor->csi_waitq,
1338 atomic_read(&anchor->csi_sync_nr) == 0,
1341 rc = anchor->csi_sync_rc;
1343 LASSERT(atomic_read(&anchor->csi_sync_nr) == 0);
1345 /* wait until cl_sync_io_note() has done wakeup */
1346 while (unlikely(atomic_read(&anchor->csi_barrier) != 0)) {
1351 EXPORT_SYMBOL(cl_sync_io_wait);
1354 * Indicate that transfer of a single page completed.
1356 void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor,
1360 if (anchor->csi_sync_rc == 0 && ioret < 0)
1361 anchor->csi_sync_rc = ioret;
1363 * Synchronous IO done without releasing page lock (e.g., as a part of
1364 * ->{prepare,commit}_write(). Completion is used to signal the end of
1367 LASSERT(atomic_read(&anchor->csi_sync_nr) > 0);
1368 if (atomic_dec_and_test(&anchor->csi_sync_nr)) {
1369 LASSERT(anchor->csi_end_io != NULL);
1370 anchor->csi_end_io(env, anchor);
1371 /* Can't access anchor any more */
1375 EXPORT_SYMBOL(cl_sync_io_note);