4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2013, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
42 #define DEBUG_SUBSYSTEM S_CLASS
44 #include <obd_class.h>
45 #include <obd_support.h>
46 #include <lustre_fid.h>
47 #include <libcfs/list.h>
48 #include <cl_object.h>
49 #include "cl_internal.h"
51 /*****************************************************************************
57 #define cl_io_for_each(slice, io) \
58 cfs_list_for_each_entry((slice), &io->ci_layers, cis_linkage)
59 #define cl_io_for_each_reverse(slice, io) \
60 cfs_list_for_each_entry_reverse((slice), &io->ci_layers, cis_linkage)
62 static inline int cl_io_type_is_valid(enum cl_io_type type)
64 return CIT_READ <= type && type < CIT_OP_NR;
67 static inline int cl_io_is_loopable(const struct cl_io *io)
69 return cl_io_type_is_valid(io->ci_type) && io->ci_type != CIT_MISC;
73 * Returns true iff there is an IO ongoing in the given environment.
75 int cl_io_is_going(const struct lu_env *env)
77 return cl_env_info(env)->clt_current_io != NULL;
79 EXPORT_SYMBOL(cl_io_is_going);
82 * cl_io invariant that holds at all times when exported cl_io_*() functions
83 * are entered and left.
85 static int cl_io_invariant(const struct cl_io *io)
92 * io can own pages only when it is ongoing. Sub-io might
93 * still be in CIS_LOCKED state when top-io is in
96 ergo(io->ci_owned_nr > 0, io->ci_state == CIS_IO_GOING ||
97 (io->ci_state == CIS_LOCKED && up != NULL));
101 * Finalize \a io, by calling cl_io_operations::cio_fini() bottom-to-top.
103 void cl_io_fini(const struct lu_env *env, struct cl_io *io)
105 struct cl_io_slice *slice;
106 struct cl_thread_info *info;
108 LINVRNT(cl_io_type_is_valid(io->ci_type));
109 LINVRNT(cl_io_invariant(io));
112 while (!cfs_list_empty(&io->ci_layers)) {
113 slice = container_of(io->ci_layers.prev, struct cl_io_slice,
115 cfs_list_del_init(&slice->cis_linkage);
116 if (slice->cis_iop->op[io->ci_type].cio_fini != NULL)
117 slice->cis_iop->op[io->ci_type].cio_fini(env, slice);
119 * Invalidate slice to catch use after free. This assumes that
120 * slices are allocated within session and can be touched
121 * after ->cio_fini() returns.
123 slice->cis_io = NULL;
125 io->ci_state = CIS_FINI;
126 info = cl_env_info(env);
127 if (info->clt_current_io == io)
128 info->clt_current_io = NULL;
130 /* sanity check for layout change */
131 switch(io->ci_type) {
137 LASSERT(!io->ci_need_restart);
141 /* Check ignore layout change conf */
142 LASSERT(ergo(io->ci_ignore_layout || !io->ci_verify_layout,
143 !io->ci_need_restart));
150 EXPORT_SYMBOL(cl_io_fini);
152 static int cl_io_init0(const struct lu_env *env, struct cl_io *io,
153 enum cl_io_type iot, struct cl_object *obj)
155 struct cl_object *scan;
158 LINVRNT(io->ci_state == CIS_ZERO || io->ci_state == CIS_FINI);
159 LINVRNT(cl_io_type_is_valid(iot));
160 LINVRNT(cl_io_invariant(io));
164 CFS_INIT_LIST_HEAD(&io->ci_lockset.cls_todo);
165 CFS_INIT_LIST_HEAD(&io->ci_lockset.cls_curr);
166 CFS_INIT_LIST_HEAD(&io->ci_lockset.cls_done);
167 CFS_INIT_LIST_HEAD(&io->ci_layers);
170 cl_object_for_each(scan, obj) {
171 if (scan->co_ops->coo_io_init != NULL) {
172 result = scan->co_ops->coo_io_init(env, scan, io);
178 io->ci_state = CIS_INIT;
183 * Initialize sub-io, by calling cl_io_operations::cio_init() top-to-bottom.
185 * \pre obj != cl_object_top(obj)
187 int cl_io_sub_init(const struct lu_env *env, struct cl_io *io,
188 enum cl_io_type iot, struct cl_object *obj)
190 struct cl_thread_info *info = cl_env_info(env);
192 LASSERT(obj != cl_object_top(obj));
193 if (info->clt_current_io == NULL)
194 info->clt_current_io = io;
195 return cl_io_init0(env, io, iot, obj);
197 EXPORT_SYMBOL(cl_io_sub_init);
200 * Initialize \a io, by calling cl_io_operations::cio_init() top-to-bottom.
202 * Caller has to call cl_io_fini() after a call to cl_io_init(), no matter
203 * what the latter returned.
205 * \pre obj == cl_object_top(obj)
206 * \pre cl_io_type_is_valid(iot)
207 * \post cl_io_type_is_valid(io->ci_type) && io->ci_type == iot
209 int cl_io_init(const struct lu_env *env, struct cl_io *io,
210 enum cl_io_type iot, struct cl_object *obj)
212 struct cl_thread_info *info = cl_env_info(env);
214 LASSERT(obj == cl_object_top(obj));
215 LASSERT(info->clt_current_io == NULL);
217 info->clt_current_io = io;
218 return cl_io_init0(env, io, iot, obj);
220 EXPORT_SYMBOL(cl_io_init);
223 * Initialize read or write io.
225 * \pre iot == CIT_READ || iot == CIT_WRITE
227 int cl_io_rw_init(const struct lu_env *env, struct cl_io *io,
228 enum cl_io_type iot, loff_t pos, size_t count)
230 LINVRNT(iot == CIT_READ || iot == CIT_WRITE);
231 LINVRNT(io->ci_obj != NULL);
234 LU_OBJECT_HEADER(D_VFSTRACE, env, &io->ci_obj->co_lu,
235 "io range: %u ["LPU64", "LPU64") %u %u\n",
236 iot, (__u64)pos, (__u64)pos + count,
237 io->u.ci_rw.crw_nonblock, io->u.ci_wr.wr_append);
238 io->u.ci_rw.crw_pos = pos;
239 io->u.ci_rw.crw_count = count;
240 RETURN(cl_io_init(env, io, iot, io->ci_obj));
242 EXPORT_SYMBOL(cl_io_rw_init);
244 static inline const struct lu_fid *
245 cl_lock_descr_fid(const struct cl_lock_descr *descr)
247 return lu_object_fid(&descr->cld_obj->co_lu);
250 static int cl_lock_descr_sort(const struct cl_lock_descr *d0,
251 const struct cl_lock_descr *d1)
253 return lu_fid_cmp(cl_lock_descr_fid(d0), cl_lock_descr_fid(d1)) ?:
254 __diff_normalize(d0->cld_start, d1->cld_start);
257 static int cl_lock_descr_cmp(const struct cl_lock_descr *d0,
258 const struct cl_lock_descr *d1)
262 ret = lu_fid_cmp(cl_lock_descr_fid(d0), cl_lock_descr_fid(d1));
265 if (d0->cld_end < d1->cld_start)
267 if (d0->cld_start > d0->cld_end)
272 static void cl_lock_descr_merge(struct cl_lock_descr *d0,
273 const struct cl_lock_descr *d1)
275 d0->cld_start = min(d0->cld_start, d1->cld_start);
276 d0->cld_end = max(d0->cld_end, d1->cld_end);
278 if (d1->cld_mode == CLM_WRITE && d0->cld_mode != CLM_WRITE)
279 d0->cld_mode = CLM_WRITE;
281 if (d1->cld_mode == CLM_GROUP && d0->cld_mode != CLM_GROUP)
282 d0->cld_mode = CLM_GROUP;
286 * Sort locks in lexicographical order of their (fid, start-offset) pairs.
288 static void cl_io_locks_sort(struct cl_io *io)
293 /* hidden treasure: bubble sort for now. */
295 struct cl_io_lock_link *curr;
296 struct cl_io_lock_link *prev;
297 struct cl_io_lock_link *temp;
302 cfs_list_for_each_entry_safe(curr, temp,
303 &io->ci_lockset.cls_todo,
306 switch (cl_lock_descr_sort(&prev->cill_descr,
307 &curr->cill_descr)) {
310 * IMPOSSIBLE: Identical locks are
317 cfs_list_move_tail(&curr->cill_linkage,
318 &prev->cill_linkage);
320 continue; /* don't change prev: it's
321 * still "previous" */
322 case -1: /* already in order */
333 * Check whether \a queue contains locks matching \a need.
335 * \retval +ve there is a matching lock in the \a queue
336 * \retval 0 there are no matching locks in the \a queue
338 int cl_queue_match(const cfs_list_t *queue,
339 const struct cl_lock_descr *need)
341 struct cl_io_lock_link *scan;
344 cfs_list_for_each_entry(scan, queue, cill_linkage) {
345 if (cl_lock_descr_match(&scan->cill_descr, need))
350 EXPORT_SYMBOL(cl_queue_match);
352 static int cl_queue_merge(const cfs_list_t *queue,
353 const struct cl_lock_descr *need)
355 struct cl_io_lock_link *scan;
358 cfs_list_for_each_entry(scan, queue, cill_linkage) {
359 if (cl_lock_descr_cmp(&scan->cill_descr, need))
361 cl_lock_descr_merge(&scan->cill_descr, need);
362 CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
363 scan->cill_descr.cld_mode, scan->cill_descr.cld_start,
364 scan->cill_descr.cld_end);
371 static int cl_lockset_match(const struct cl_lockset *set,
372 const struct cl_lock_descr *need)
374 return cl_queue_match(&set->cls_curr, need) ||
375 cl_queue_match(&set->cls_done, need);
378 static int cl_lockset_merge(const struct cl_lockset *set,
379 const struct cl_lock_descr *need)
381 return cl_queue_merge(&set->cls_todo, need) ||
382 cl_lockset_match(set, need);
385 static int cl_lockset_lock_one(const struct lu_env *env,
386 struct cl_io *io, struct cl_lockset *set,
387 struct cl_io_lock_link *link)
389 struct cl_lock *lock;
394 lock = cl_lock_request(env, io, &link->cill_descr, "io", io);
397 link->cill_lock = lock;
398 cfs_list_move(&link->cill_linkage, &set->cls_curr);
399 if (!(link->cill_descr.cld_enq_flags & CEF_ASYNC)) {
400 result = cl_wait(env, lock);
402 cfs_list_move(&link->cill_linkage,
407 result = PTR_ERR(lock);
411 static void cl_lock_link_fini(const struct lu_env *env, struct cl_io *io,
412 struct cl_io_lock_link *link)
414 struct cl_lock *lock = link->cill_lock;
417 cfs_list_del_init(&link->cill_linkage);
419 cl_lock_release(env, lock, "io", io);
420 link->cill_lock = NULL;
422 if (link->cill_fini != NULL)
423 link->cill_fini(env, link);
427 static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io,
428 struct cl_lockset *set)
430 struct cl_io_lock_link *link;
431 struct cl_io_lock_link *temp;
432 struct cl_lock *lock;
437 cfs_list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
438 if (!cl_lockset_match(set, &link->cill_descr)) {
439 /* XXX some locking to guarantee that locks aren't
440 * expanded in between. */
441 result = cl_lockset_lock_one(env, io, set, link);
445 cl_lock_link_fini(env, io, link);
448 cfs_list_for_each_entry_safe(link, temp,
449 &set->cls_curr, cill_linkage) {
450 lock = link->cill_lock;
451 result = cl_wait(env, lock);
453 cfs_list_move(&link->cill_linkage,
463 * Takes locks necessary for the current iteration of io.
465 * Calls cl_io_operations::cio_lock() top-to-bottom to collect locks required
466 * by layers for the current iteration. Then sort locks (to avoid dead-locks),
469 int cl_io_lock(const struct lu_env *env, struct cl_io *io)
471 const struct cl_io_slice *scan;
474 LINVRNT(cl_io_is_loopable(io));
475 LINVRNT(io->ci_state == CIS_IT_STARTED);
476 LINVRNT(cl_io_invariant(io));
479 cl_io_for_each(scan, io) {
480 if (scan->cis_iop->op[io->ci_type].cio_lock == NULL)
482 result = scan->cis_iop->op[io->ci_type].cio_lock(env, scan);
487 cl_io_locks_sort(io);
488 result = cl_lockset_lock(env, io, &io->ci_lockset);
491 cl_io_unlock(env, io);
493 io->ci_state = CIS_LOCKED;
496 EXPORT_SYMBOL(cl_io_lock);
499 * Release locks takes by io.
501 void cl_io_unlock(const struct lu_env *env, struct cl_io *io)
503 struct cl_lockset *set;
504 struct cl_io_lock_link *link;
505 struct cl_io_lock_link *temp;
506 const struct cl_io_slice *scan;
508 LASSERT(cl_io_is_loopable(io));
509 LASSERT(CIS_IT_STARTED <= io->ci_state && io->ci_state < CIS_UNLOCKED);
510 LINVRNT(cl_io_invariant(io));
513 set = &io->ci_lockset;
515 cfs_list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage)
516 cl_lock_link_fini(env, io, link);
518 cfs_list_for_each_entry_safe(link, temp, &set->cls_curr, cill_linkage)
519 cl_lock_link_fini(env, io, link);
521 cfs_list_for_each_entry_safe(link, temp, &set->cls_done, cill_linkage) {
522 cl_unuse(env, link->cill_lock);
523 cl_lock_link_fini(env, io, link);
525 cl_io_for_each_reverse(scan, io) {
526 if (scan->cis_iop->op[io->ci_type].cio_unlock != NULL)
527 scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
529 io->ci_state = CIS_UNLOCKED;
530 LASSERT(!cl_env_info(env)->clt_counters[CNL_TOP].ctc_nr_locks_acquired);
533 EXPORT_SYMBOL(cl_io_unlock);
536 * Prepares next iteration of io.
538 * Calls cl_io_operations::cio_iter_init() top-to-bottom. This exists to give
539 * layers a chance to modify io parameters, e.g., so that lov can restrict io
540 * to a single stripe.
542 int cl_io_iter_init(const struct lu_env *env, struct cl_io *io)
544 const struct cl_io_slice *scan;
547 LINVRNT(cl_io_is_loopable(io));
548 LINVRNT(io->ci_state == CIS_INIT || io->ci_state == CIS_IT_ENDED);
549 LINVRNT(cl_io_invariant(io));
553 cl_io_for_each(scan, io) {
554 if (scan->cis_iop->op[io->ci_type].cio_iter_init == NULL)
556 result = scan->cis_iop->op[io->ci_type].cio_iter_init(env,
562 io->ci_state = CIS_IT_STARTED;
565 EXPORT_SYMBOL(cl_io_iter_init);
568 * Finalizes io iteration.
570 * Calls cl_io_operations::cio_iter_fini() bottom-to-top.
572 void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io)
574 const struct cl_io_slice *scan;
576 LINVRNT(cl_io_is_loopable(io));
577 LINVRNT(io->ci_state == CIS_UNLOCKED);
578 LINVRNT(cl_io_invariant(io));
581 cl_io_for_each_reverse(scan, io) {
582 if (scan->cis_iop->op[io->ci_type].cio_iter_fini != NULL)
583 scan->cis_iop->op[io->ci_type].cio_iter_fini(env, scan);
585 io->ci_state = CIS_IT_ENDED;
588 EXPORT_SYMBOL(cl_io_iter_fini);
591 * Records that read or write io progressed \a nob bytes forward.
593 void cl_io_rw_advance(const struct lu_env *env, struct cl_io *io, size_t nob)
595 const struct cl_io_slice *scan;
597 LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
599 LINVRNT(cl_io_is_loopable(io));
600 LINVRNT(cl_io_invariant(io));
604 io->u.ci_rw.crw_pos += nob;
605 io->u.ci_rw.crw_count -= nob;
607 /* layers have to be notified. */
608 cl_io_for_each_reverse(scan, io) {
609 if (scan->cis_iop->op[io->ci_type].cio_advance != NULL)
610 scan->cis_iop->op[io->ci_type].cio_advance(env, scan,
615 EXPORT_SYMBOL(cl_io_rw_advance);
618 * Adds a lock to a lockset.
620 int cl_io_lock_add(const struct lu_env *env, struct cl_io *io,
621 struct cl_io_lock_link *link)
626 if (cl_lockset_merge(&io->ci_lockset, &link->cill_descr))
629 cfs_list_add(&link->cill_linkage, &io->ci_lockset.cls_todo);
634 EXPORT_SYMBOL(cl_io_lock_add);
636 static void cl_free_io_lock_link(const struct lu_env *env,
637 struct cl_io_lock_link *link)
643 * Allocates new lock link, and uses it to add a lock to a lockset.
645 int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
646 struct cl_lock_descr *descr)
648 struct cl_io_lock_link *link;
654 link->cill_descr = *descr;
655 link->cill_fini = cl_free_io_lock_link;
656 result = cl_io_lock_add(env, io, link);
657 if (result) /* lock match */
658 link->cill_fini(env, link);
664 EXPORT_SYMBOL(cl_io_lock_alloc_add);
667 * Starts io by calling cl_io_operations::cio_start() top-to-bottom.
669 int cl_io_start(const struct lu_env *env, struct cl_io *io)
671 const struct cl_io_slice *scan;
674 LINVRNT(cl_io_is_loopable(io));
675 LINVRNT(io->ci_state == CIS_LOCKED);
676 LINVRNT(cl_io_invariant(io));
679 io->ci_state = CIS_IO_GOING;
680 cl_io_for_each(scan, io) {
681 if (scan->cis_iop->op[io->ci_type].cio_start == NULL)
683 result = scan->cis_iop->op[io->ci_type].cio_start(env, scan);
691 EXPORT_SYMBOL(cl_io_start);
694 * Wait until current io iteration is finished by calling
695 * cl_io_operations::cio_end() bottom-to-top.
697 void cl_io_end(const struct lu_env *env, struct cl_io *io)
699 const struct cl_io_slice *scan;
701 LINVRNT(cl_io_is_loopable(io));
702 LINVRNT(io->ci_state == CIS_IO_GOING);
703 LINVRNT(cl_io_invariant(io));
706 cl_io_for_each_reverse(scan, io) {
707 if (scan->cis_iop->op[io->ci_type].cio_end != NULL)
708 scan->cis_iop->op[io->ci_type].cio_end(env, scan);
709 /* TODO: error handling. */
711 io->ci_state = CIS_IO_FINISHED;
714 EXPORT_SYMBOL(cl_io_end);
716 static const struct cl_page_slice *
717 cl_io_slice_page(const struct cl_io_slice *ios, struct cl_page *page)
719 const struct cl_page_slice *slice;
721 slice = cl_page_at(page, ios->cis_obj->co_lu.lo_dev->ld_type);
722 LINVRNT(slice != NULL);
727 * True iff \a page is within \a io range.
729 static int cl_page_in_io(const struct cl_page *page, const struct cl_io *io)
736 idx = page->cp_index;
737 switch (io->ci_type) {
741 * check that [start, end) and [pos, pos + count) extents
744 if (!cl_io_is_append(io)) {
745 const struct cl_io_rw_common *crw = &(io->u.ci_rw);
746 start = cl_offset(page->cp_obj, idx);
747 end = cl_offset(page->cp_obj, idx + 1);
748 result = crw->crw_pos < end &&
749 start < crw->crw_pos + crw->crw_count;
753 result = io->u.ci_fault.ft_index == idx;
762 * Called by read io, when page has to be read from the server.
764 * \see cl_io_operations::cio_read_page()
766 int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
767 struct cl_page *page)
769 const struct cl_io_slice *scan;
770 struct cl_2queue *queue;
773 LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_FAULT);
774 LINVRNT(cl_page_is_owned(page, io));
775 LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
776 LINVRNT(cl_page_in_io(page, io));
777 LINVRNT(cl_io_invariant(io));
780 queue = &io->ci_queue;
782 cl_2queue_init(queue);
784 * ->cio_read_page() methods called in the loop below are supposed to
785 * never block waiting for network (the only subtle point is the
786 * creation of new pages for read-ahead that might result in cache
787 * shrinking, but currently only clean pages are shrunk and this
788 * requires no network io).
790 * Should this ever starts blocking, retry loop would be needed for
791 * "parallel io" (see CLO_REPEAT loops in cl_lock.c).
793 cl_io_for_each(scan, io) {
794 if (scan->cis_iop->cio_read_page != NULL) {
795 const struct cl_page_slice *slice;
797 slice = cl_io_slice_page(scan, page);
798 LINVRNT(slice != NULL);
799 result = scan->cis_iop->cio_read_page(env, scan, slice);
805 result = cl_io_submit_rw(env, io, CRT_READ, queue);
807 * Unlock unsent pages in case of error.
809 cl_page_list_disown(env, io, &queue->c2_qin);
810 cl_2queue_fini(env, queue);
813 EXPORT_SYMBOL(cl_io_read_page);
816 * Commit a list of contiguous pages into writeback cache.
818 * \returns 0 if all pages committed, or errcode if error occurred.
819 * \see cl_io_operations::cio_commit_async()
821 int cl_io_commit_async(const struct lu_env *env, struct cl_io *io,
822 struct cl_page_list *queue, int from, int to,
825 const struct cl_io_slice *scan;
829 cl_io_for_each(scan, io) {
830 if (scan->cis_iop->cio_commit_async == NULL)
832 result = scan->cis_iop->cio_commit_async(env, scan, queue,
839 EXPORT_SYMBOL(cl_io_commit_async);
842 * Submits a list of pages for immediate io.
844 * After the function gets returned, The submitted pages are moved to
845 * queue->c2_qout queue, and queue->c2_qin contain both the pages don't need
846 * to be submitted, and the pages are errant to submit.
848 * \returns 0 if at least one page was submitted, error code otherwise.
849 * \see cl_io_operations::cio_submit()
851 int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
852 enum cl_req_type crt, struct cl_2queue *queue)
854 const struct cl_io_slice *scan;
858 cl_io_for_each(scan, io) {
859 if (scan->cis_iop->cio_submit == NULL)
861 result = scan->cis_iop->cio_submit(env, scan, crt, queue);
866 * If ->cio_submit() failed, no pages were sent.
868 LASSERT(ergo(result != 0, cfs_list_empty(&queue->c2_qout.pl_pages)));
871 EXPORT_SYMBOL(cl_io_submit_rw);
874 * Submit a sync_io and wait for the IO to be finished, or error happens.
875 * If \a timeout is zero, it means to wait for the IO unconditionally.
877 int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
878 enum cl_req_type iot, struct cl_2queue *queue,
881 struct cl_sync_io *anchor = &cl_env_info(env)->clt_anchor;
885 cl_page_list_for_each(pg, &queue->c2_qin) {
886 LASSERT(pg->cp_sync_io == NULL);
887 pg->cp_sync_io = anchor;
890 cl_sync_io_init(anchor, queue->c2_qin.pl_nr);
891 rc = cl_io_submit_rw(env, io, iot, queue);
894 * If some pages weren't sent for any reason (e.g.,
895 * read found up-to-date pages in the cache, or write found
896 * clean pages), count them as completed to avoid infinite
899 cl_page_list_for_each(pg, &queue->c2_qin) {
900 pg->cp_sync_io = NULL;
901 cl_sync_io_note(anchor, +1);
904 /* wait for the IO to be finished. */
905 rc = cl_sync_io_wait(env, io, &queue->c2_qout,
908 LASSERT(cfs_list_empty(&queue->c2_qout.pl_pages));
909 cl_page_list_for_each(pg, &queue->c2_qin)
910 pg->cp_sync_io = NULL;
914 EXPORT_SYMBOL(cl_io_submit_sync);
917 * Cancel an IO which has been submitted by cl_io_submit_rw.
919 int cl_io_cancel(const struct lu_env *env, struct cl_io *io,
920 struct cl_page_list *queue)
922 struct cl_page *page;
925 CERROR("Canceling ongoing page trasmission\n");
926 cl_page_list_for_each(page, queue) {
929 LINVRNT(cl_page_in_io(page, io));
930 rc = cl_page_cancel(env, page);
931 result = result ?: rc;
935 EXPORT_SYMBOL(cl_io_cancel);
940 * Pumps io through iterations calling
942 * - cl_io_iter_init()
952 * - cl_io_iter_fini()
954 * repeatedly until there is no more io to do.
956 int cl_io_loop(const struct lu_env *env, struct cl_io *io)
960 LINVRNT(cl_io_is_loopable(io));
967 result = cl_io_iter_init(env, io);
970 result = cl_io_lock(env, io);
973 * Notify layers that locks has been taken,
976 * - llite: kms, short read;
977 * - llite: generic_file_read();
979 result = cl_io_start(env, io);
981 * Send any remaining pending
984 * - llite: ll_rw_stats_tally.
987 cl_io_unlock(env, io);
988 cl_io_rw_advance(env, io, io->ci_nob - nob);
991 cl_io_iter_fini(env, io);
992 } while (result == 0 && io->ci_continue);
994 result = io->ci_result;
995 RETURN(result < 0 ? result : 0);
997 EXPORT_SYMBOL(cl_io_loop);
1000 * Adds io slice to the cl_io.
1002 * This is called by cl_object_operations::coo_io_init() methods to add a
1003 * per-layer state to the io. New state is added at the end of
1004 * cl_io::ci_layers list, that is, it is at the bottom of the stack.
1006 * \see cl_lock_slice_add(), cl_req_slice_add(), cl_page_slice_add()
1008 void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
1009 struct cl_object *obj,
1010 const struct cl_io_operations *ops)
1012 cfs_list_t *linkage = &slice->cis_linkage;
1014 LASSERT((linkage->prev == NULL && linkage->next == NULL) ||
1015 cfs_list_empty(linkage));
1018 cfs_list_add_tail(linkage, &io->ci_layers);
1020 slice->cis_obj = obj;
1021 slice->cis_iop = ops;
1024 EXPORT_SYMBOL(cl_io_slice_add);
1028 * Initializes page list.
1030 void cl_page_list_init(struct cl_page_list *plist)
1034 CFS_INIT_LIST_HEAD(&plist->pl_pages);
1035 plist->pl_owner = current;
1038 EXPORT_SYMBOL(cl_page_list_init);
1041 * Adds a page to a page list.
1043 void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page)
1046 /* it would be better to check that page is owned by "current" io, but
1047 * it is not passed here. */
1048 LASSERT(page->cp_owner != NULL);
1049 LINVRNT(plist->pl_owner == current);
1052 mutex_lock(&page->cp_mutex);
1054 LASSERT(cfs_list_empty(&page->cp_batch));
1055 cfs_list_add_tail(&page->cp_batch, &plist->pl_pages);
1057 lu_ref_add_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
1061 EXPORT_SYMBOL(cl_page_list_add);
1064 * Removes a page from a page list.
1066 void cl_page_list_del(const struct lu_env *env,
1067 struct cl_page_list *plist, struct cl_page *page)
1069 LASSERT(plist->pl_nr > 0);
1070 LINVRNT(plist->pl_owner == current);
1073 cfs_list_del_init(&page->cp_batch);
1075 mutex_unlock(&page->cp_mutex);
1078 lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
1079 cl_page_put(env, page);
1082 EXPORT_SYMBOL(cl_page_list_del);
1085 * Moves a page from one page list to another.
1087 void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
1088 struct cl_page *page)
1090 LASSERT(src->pl_nr > 0);
1091 LINVRNT(dst->pl_owner == current);
1092 LINVRNT(src->pl_owner == current);
1095 cfs_list_move_tail(&page->cp_batch, &dst->pl_pages);
1098 lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue",
1102 EXPORT_SYMBOL(cl_page_list_move);
1105 * Moves a page from one page list to the head of another list.
1107 void cl_page_list_move_head(struct cl_page_list *dst, struct cl_page_list *src,
1108 struct cl_page *page)
1110 LASSERT(src->pl_nr > 0);
1111 LINVRNT(dst->pl_owner == current);
1112 LINVRNT(src->pl_owner == current);
1115 cfs_list_move(&page->cp_batch, &dst->pl_pages);
1118 lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue",
1122 EXPORT_SYMBOL(cl_page_list_move_head);
1125 * splice the cl_page_list, just as list head does
1127 void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head)
1129 struct cl_page *page;
1130 struct cl_page *tmp;
1132 LINVRNT(list->pl_owner == current);
1133 LINVRNT(head->pl_owner == current);
1136 cl_page_list_for_each_safe(page, tmp, list)
1137 cl_page_list_move(head, list, page);
1140 EXPORT_SYMBOL(cl_page_list_splice);
1142 void cl_page_disown0(const struct lu_env *env,
1143 struct cl_io *io, struct cl_page *pg);
1146 * Disowns pages in a queue.
1148 void cl_page_list_disown(const struct lu_env *env,
1149 struct cl_io *io, struct cl_page_list *plist)
1151 struct cl_page *page;
1152 struct cl_page *temp;
1154 LINVRNT(plist->pl_owner == current);
1157 cl_page_list_for_each_safe(page, temp, plist) {
1158 LASSERT(plist->pl_nr > 0);
1160 cfs_list_del_init(&page->cp_batch);
1162 mutex_unlock(&page->cp_mutex);
1166 * cl_page_disown0 rather than usual cl_page_disown() is used,
1167 * because pages are possibly in CPS_FREEING state already due
1168 * to the call to cl_page_list_discard().
1171 * XXX cl_page_disown0() will fail if page is not locked.
1173 cl_page_disown0(env, io, page);
1174 lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue",
1176 cl_page_put(env, page);
1180 EXPORT_SYMBOL(cl_page_list_disown);
1183 * Releases pages from queue.
1185 void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist)
1187 struct cl_page *page;
1188 struct cl_page *temp;
1190 LINVRNT(plist->pl_owner == current);
1193 cl_page_list_for_each_safe(page, temp, plist)
1194 cl_page_list_del(env, plist, page);
1195 LASSERT(plist->pl_nr == 0);
1198 EXPORT_SYMBOL(cl_page_list_fini);
1201 * Owns all pages in a queue.
1203 int cl_page_list_own(const struct lu_env *env,
1204 struct cl_io *io, struct cl_page_list *plist)
1206 struct cl_page *page;
1207 struct cl_page *temp;
1211 LINVRNT(plist->pl_owner == current);
1215 cl_page_list_for_each_safe(page, temp, plist) {
1216 LASSERT(index <= page->cp_index);
1217 index = page->cp_index;
1218 if (cl_page_own(env, io, page) == 0)
1219 result = result ?: page->cp_error;
1221 cl_page_list_del(env, plist, page);
1225 EXPORT_SYMBOL(cl_page_list_own);
1228 * Assumes all pages in a queue.
1230 void cl_page_list_assume(const struct lu_env *env,
1231 struct cl_io *io, struct cl_page_list *plist)
1233 struct cl_page *page;
1235 LINVRNT(plist->pl_owner == current);
1237 cl_page_list_for_each(page, plist)
1238 cl_page_assume(env, io, page);
1240 EXPORT_SYMBOL(cl_page_list_assume);
1243 * Discards all pages in a queue.
1245 void cl_page_list_discard(const struct lu_env *env, struct cl_io *io,
1246 struct cl_page_list *plist)
1248 struct cl_page *page;
1250 LINVRNT(plist->pl_owner == current);
1252 cl_page_list_for_each(page, plist)
1253 cl_page_discard(env, io, page);
1256 EXPORT_SYMBOL(cl_page_list_discard);
1259 * Initialize dual page queue.
1261 void cl_2queue_init(struct cl_2queue *queue)
1264 cl_page_list_init(&queue->c2_qin);
1265 cl_page_list_init(&queue->c2_qout);
1268 EXPORT_SYMBOL(cl_2queue_init);
1271 * Add a page to the incoming page list of 2-queue.
1273 void cl_2queue_add(struct cl_2queue *queue, struct cl_page *page)
1276 cl_page_list_add(&queue->c2_qin, page);
1279 EXPORT_SYMBOL(cl_2queue_add);
1282 * Disown pages in both lists of a 2-queue.
1284 void cl_2queue_disown(const struct lu_env *env,
1285 struct cl_io *io, struct cl_2queue *queue)
1288 cl_page_list_disown(env, io, &queue->c2_qin);
1289 cl_page_list_disown(env, io, &queue->c2_qout);
1292 EXPORT_SYMBOL(cl_2queue_disown);
1295 * Discard (truncate) pages in both lists of a 2-queue.
1297 void cl_2queue_discard(const struct lu_env *env,
1298 struct cl_io *io, struct cl_2queue *queue)
1301 cl_page_list_discard(env, io, &queue->c2_qin);
1302 cl_page_list_discard(env, io, &queue->c2_qout);
1305 EXPORT_SYMBOL(cl_2queue_discard);
1308 * Assume to own the pages in cl_2queue
1310 void cl_2queue_assume(const struct lu_env *env,
1311 struct cl_io *io, struct cl_2queue *queue)
1313 cl_page_list_assume(env, io, &queue->c2_qin);
1314 cl_page_list_assume(env, io, &queue->c2_qout);
1316 EXPORT_SYMBOL(cl_2queue_assume);
1319 * Finalize both page lists of a 2-queue.
1321 void cl_2queue_fini(const struct lu_env *env, struct cl_2queue *queue)
1324 cl_page_list_fini(env, &queue->c2_qout);
1325 cl_page_list_fini(env, &queue->c2_qin);
1328 EXPORT_SYMBOL(cl_2queue_fini);
1331 * Initialize a 2-queue to contain \a page in its incoming page list.
1333 void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page)
1336 cl_2queue_init(queue);
1337 cl_2queue_add(queue, page);
1340 EXPORT_SYMBOL(cl_2queue_init_page);
1343 * Returns top-level io.
1345 * \see cl_object_top(), cl_page_top().
1347 struct cl_io *cl_io_top(struct cl_io *io)
1350 while (io->ci_parent != NULL)
1354 EXPORT_SYMBOL(cl_io_top);
1357 * Prints human readable representation of \a io to the \a f.
1359 void cl_io_print(const struct lu_env *env, void *cookie,
1360 lu_printer_t printer, const struct cl_io *io)
1365 * Adds request slice to the compound request.
1367 * This is called by cl_device_operations::cdo_req_init() methods to add a
1368 * per-layer state to the request. New state is added at the end of
1369 * cl_req::crq_layers list, that is, it is at the bottom of the stack.
1371 * \see cl_lock_slice_add(), cl_page_slice_add(), cl_io_slice_add()
1373 void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice,
1374 struct cl_device *dev,
1375 const struct cl_req_operations *ops)
1378 cfs_list_add_tail(&slice->crs_linkage, &req->crq_layers);
1379 slice->crs_dev = dev;
1380 slice->crs_ops = ops;
1381 slice->crs_req = req;
1384 EXPORT_SYMBOL(cl_req_slice_add);
1386 static void cl_req_free(const struct lu_env *env, struct cl_req *req)
1390 LASSERT(cfs_list_empty(&req->crq_pages));
1391 LASSERT(req->crq_nrpages == 0);
1392 LINVRNT(cfs_list_empty(&req->crq_layers));
1393 LINVRNT(equi(req->crq_nrobjs > 0, req->crq_o != NULL));
1396 if (req->crq_o != NULL) {
1397 for (i = 0; i < req->crq_nrobjs; ++i) {
1398 struct cl_object *obj = req->crq_o[i].ro_obj;
1400 lu_object_ref_del_at(&obj->co_lu,
1401 &req->crq_o[i].ro_obj_ref,
1403 cl_object_put(env, obj);
1406 OBD_FREE(req->crq_o, req->crq_nrobjs * sizeof req->crq_o[0]);
1412 static int cl_req_init(const struct lu_env *env, struct cl_req *req,
1413 struct cl_page *page)
1415 struct cl_device *dev;
1416 struct cl_page_slice *slice;
1421 page = cl_page_top(page);
1423 cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
1424 dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev);
1425 if (dev->cd_ops->cdo_req_init != NULL) {
1426 result = dev->cd_ops->cdo_req_init(env,
1432 page = page->cp_child;
1433 } while (page != NULL && result == 0);
1438 * Invokes per-request transfer completion call-backs
1439 * (cl_req_operations::cro_completion()) bottom-to-top.
1441 void cl_req_completion(const struct lu_env *env, struct cl_req *req, int rc)
1443 struct cl_req_slice *slice;
1447 * for the lack of list_for_each_entry_reverse_safe()...
1449 while (!cfs_list_empty(&req->crq_layers)) {
1450 slice = cfs_list_entry(req->crq_layers.prev,
1451 struct cl_req_slice, crs_linkage);
1452 cfs_list_del_init(&slice->crs_linkage);
1453 if (slice->crs_ops->cro_completion != NULL)
1454 slice->crs_ops->cro_completion(env, slice, rc);
1456 cl_req_free(env, req);
1459 EXPORT_SYMBOL(cl_req_completion);
1462 * Allocates new transfer request.
1464 struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
1465 enum cl_req_type crt, int nr_objects)
1469 LINVRNT(nr_objects > 0);
1476 OBD_ALLOC(req->crq_o, nr_objects * sizeof req->crq_o[0]);
1477 if (req->crq_o != NULL) {
1478 req->crq_nrobjs = nr_objects;
1479 req->crq_type = crt;
1480 CFS_INIT_LIST_HEAD(&req->crq_pages);
1481 CFS_INIT_LIST_HEAD(&req->crq_layers);
1482 result = cl_req_init(env, req, page);
1486 cl_req_completion(env, req, result);
1487 req = ERR_PTR(result);
1490 req = ERR_PTR(-ENOMEM);
1493 EXPORT_SYMBOL(cl_req_alloc);
1496 * Adds a page to a request.
1498 void cl_req_page_add(const struct lu_env *env,
1499 struct cl_req *req, struct cl_page *page)
1501 struct cl_object *obj;
1502 struct cl_req_obj *rqo;
1506 page = cl_page_top(page);
1508 LASSERT(cfs_list_empty(&page->cp_flight));
1509 LASSERT(page->cp_req == NULL);
1511 CL_PAGE_DEBUG(D_PAGE, env, page, "req %p, %d, %u\n",
1512 req, req->crq_type, req->crq_nrpages);
1514 cfs_list_add_tail(&page->cp_flight, &req->crq_pages);
1517 obj = cl_object_top(page->cp_obj);
1518 for (i = 0, rqo = req->crq_o; obj != rqo->ro_obj; ++i, ++rqo) {
1519 if (rqo->ro_obj == NULL) {
1522 lu_object_ref_add_at(&obj->co_lu, &rqo->ro_obj_ref,
1527 LASSERT(i < req->crq_nrobjs);
1530 EXPORT_SYMBOL(cl_req_page_add);
1533 * Removes a page from a request.
1535 void cl_req_page_done(const struct lu_env *env, struct cl_page *page)
1537 struct cl_req *req = page->cp_req;
1540 page = cl_page_top(page);
1542 LASSERT(!cfs_list_empty(&page->cp_flight));
1543 LASSERT(req->crq_nrpages > 0);
1545 cfs_list_del_init(&page->cp_flight);
1547 page->cp_req = NULL;
1550 EXPORT_SYMBOL(cl_req_page_done);
1553 * Notifies layers that request is about to depart by calling
1554 * cl_req_operations::cro_prep() top-to-bottom.
1556 int cl_req_prep(const struct lu_env *env, struct cl_req *req)
1560 const struct cl_req_slice *slice;
1564 * Check that the caller of cl_req_alloc() didn't lie about the number
1567 for (i = 0; i < req->crq_nrobjs; ++i)
1568 LASSERT(req->crq_o[i].ro_obj != NULL);
1571 cfs_list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
1572 if (slice->crs_ops->cro_prep != NULL) {
1573 result = slice->crs_ops->cro_prep(env, slice);
1580 EXPORT_SYMBOL(cl_req_prep);
1583 * Fills in attributes that are passed to server together with transfer. Only
1584 * attributes from \a flags may be touched. This can be called multiple times
1585 * for the same request.
1587 void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,
1588 struct cl_req_attr *attr, obd_valid flags)
1590 const struct cl_req_slice *slice;
1591 struct cl_page *page;
1594 LASSERT(!cfs_list_empty(&req->crq_pages));
1597 /* Take any page to use as a model. */
1598 page = cfs_list_entry(req->crq_pages.next, struct cl_page, cp_flight);
1600 for (i = 0; i < req->crq_nrobjs; ++i) {
1601 cfs_list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
1602 const struct cl_page_slice *scan;
1603 const struct cl_object *obj;
1605 scan = cl_page_at(page,
1606 slice->crs_dev->cd_lu_dev.ld_type);
1607 LASSERT(scan != NULL);
1608 obj = scan->cpl_obj;
1609 if (slice->crs_ops->cro_attr_set != NULL)
1610 slice->crs_ops->cro_attr_set(env, slice, obj,
1616 EXPORT_SYMBOL(cl_req_attr_set);
1618 /* XXX complete(), init_completion(), and wait_for_completion(), until they are
1619 * implemented in libcfs. */
1621 # include <linux/sched.h>
1622 #else /* __KERNEL__ */
1623 # include <liblustre.h>
1627 * Initialize synchronous io wait anchor, for transfer of \a nrpages pages.
1629 void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages)
1632 init_waitqueue_head(&anchor->csi_waitq);
1633 cfs_atomic_set(&anchor->csi_sync_nr, nrpages);
1634 cfs_atomic_set(&anchor->csi_barrier, nrpages > 0);
1635 anchor->csi_sync_rc = 0;
1638 EXPORT_SYMBOL(cl_sync_io_init);
1641 * Wait until all transfer completes. Transfer completion routine has to call
1642 * cl_sync_io_note() for every page.
1644 int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io,
1645 struct cl_page_list *queue, struct cl_sync_io *anchor,
1648 struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout),
1653 LASSERT(timeout >= 0);
1655 rc = l_wait_event(anchor->csi_waitq,
1656 cfs_atomic_read(&anchor->csi_sync_nr) == 0,
1659 CERROR("SYNC IO failed with error: %d, try to cancel "
1660 "%d remaining pages\n",
1661 rc, cfs_atomic_read(&anchor->csi_sync_nr));
1663 (void)cl_io_cancel(env, io, queue);
1665 lwi = (struct l_wait_info) { 0 };
1666 (void)l_wait_event(anchor->csi_waitq,
1667 cfs_atomic_read(&anchor->csi_sync_nr) == 0,
1670 rc = anchor->csi_sync_rc;
1672 LASSERT(cfs_atomic_read(&anchor->csi_sync_nr) == 0);
1673 cl_page_list_assume(env, io, queue);
1675 /* wait until cl_sync_io_note() has done wakeup */
1676 while (unlikely(cfs_atomic_read(&anchor->csi_barrier) != 0)) {
1682 POISON(anchor, 0x5a, sizeof *anchor);
1685 EXPORT_SYMBOL(cl_sync_io_wait);
1688 * Indicate that transfer of a single page completed.
1690 void cl_sync_io_note(struct cl_sync_io *anchor, int ioret)
1693 if (anchor->csi_sync_rc == 0 && ioret < 0)
1694 anchor->csi_sync_rc = ioret;
1696 * Synchronous IO done without releasing page lock (e.g., as a part of
1697 * ->{prepare,commit}_write(). Completion is used to signal the end of
1700 LASSERT(cfs_atomic_read(&anchor->csi_sync_nr) > 0);
1701 if (cfs_atomic_dec_and_test(&anchor->csi_sync_nr)) {
1702 wake_up_all(&anchor->csi_waitq);
1703 /* it's safe to nuke or reuse anchor now */
1704 cfs_atomic_set(&anchor->csi_barrier, 0);
1708 EXPORT_SYMBOL(cl_sync_io_note);