1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 #define DEBUG_SUBSYSTEM S_CLASS
43 # define EXPORT_SYMTAB
46 #include <obd_class.h>
47 #include <obd_support.h>
48 #include <lustre_fid.h>
49 #include <libcfs/list.h>
50 /* lu_time_global_{init,fini}() */
53 #include <cl_object.h>
54 #include "cl_internal.h"
56 /*****************************************************************************
62 #define cl_io_for_each(slice, io) \
63 list_for_each_entry((slice), &io->ci_layers, cis_linkage)
64 #define cl_io_for_each_reverse(slice, io) \
65 list_for_each_entry_reverse((slice), &io->ci_layers, cis_linkage)
67 static inline int cl_io_type_is_valid(enum cl_io_type type)
69 return CIT_READ <= type && type < CIT_OP_NR;
72 static inline int cl_io_is_loopable(const struct cl_io *io)
74 return cl_io_type_is_valid(io->ci_type) && io->ci_type != CIT_MISC;
78 * True, iff \a io is a sendfile().
80 int cl_io_is_sendfile(const struct cl_io *io)
82 return io->ci_type == CIT_READ && io->u.ci_rd.rd_is_sendfile;
84 EXPORT_SYMBOL(cl_io_is_sendfile);
87 * Returns true iff there is an IO ongoing in the given environment.
89 int cl_io_is_going(const struct lu_env *env)
91 return cl_env_info(env)->clt_current_io != NULL;
93 EXPORT_SYMBOL(cl_io_is_going);
96 * cl_io invariant that holds at all times when exported cl_io_*() functions
97 * are entered and left.
99 static int cl_io_invariant(const struct cl_io *io)
106 * io can own pages only when it is ongoing. Sub-io might
107 * still be in CIS_LOCKED state when top-io is in
110 ergo(io->ci_owned_nr > 0, io->ci_state == CIS_IO_GOING ||
111 (io->ci_state == CIS_LOCKED && up != NULL));
115 * Finalize \a io, by calling cl_io_operations::cio_fini() bottom-to-top.
117 void cl_io_fini(const struct lu_env *env, struct cl_io *io)
119 struct cl_io_slice *slice;
120 struct cl_thread_info *info;
122 LINVRNT(cl_io_type_is_valid(io->ci_type));
123 LINVRNT(cl_io_invariant(io));
126 while (!list_empty(&io->ci_layers)) {
127 slice = container_of(io->ci_layers.next, struct cl_io_slice,
129 list_del_init(&slice->cis_linkage);
130 if (slice->cis_iop->op[io->ci_type].cio_fini != NULL)
131 slice->cis_iop->op[io->ci_type].cio_fini(env, slice);
133 * Invalidate slice to catch use after free. This assumes that
134 * slices are allocated within session and can be touched
135 * after ->cio_fini() returns.
137 slice->cis_io = NULL;
139 io->ci_state = CIS_FINI;
140 info = cl_env_info(env);
141 if (info->clt_current_io == io)
142 info->clt_current_io = NULL;
145 EXPORT_SYMBOL(cl_io_fini);
147 static int cl_io_init0(const struct lu_env *env, struct cl_io *io,
148 enum cl_io_type iot, struct cl_object *obj)
150 struct cl_object *scan;
153 LINVRNT(io->ci_state == CIS_ZERO || io->ci_state == CIS_FINI);
154 LINVRNT(cl_io_type_is_valid(iot));
155 LINVRNT(cl_io_invariant(io));
159 CFS_INIT_LIST_HEAD(&io->ci_lockset.cls_todo);
160 CFS_INIT_LIST_HEAD(&io->ci_lockset.cls_curr);
161 CFS_INIT_LIST_HEAD(&io->ci_lockset.cls_done);
162 CFS_INIT_LIST_HEAD(&io->ci_layers);
165 cl_object_for_each(scan, obj) {
166 if (scan->co_ops->coo_io_init != NULL) {
167 result = scan->co_ops->coo_io_init(env, scan, io);
173 io->ci_state = CIS_INIT;
178 * Initialize sub-io, by calling cl_io_operations::cio_init() top-to-bottom.
180 * \pre obj != cl_object_top(obj)
182 int cl_io_sub_init(const struct lu_env *env, struct cl_io *io,
183 enum cl_io_type iot, struct cl_object *obj)
185 struct cl_thread_info *info = cl_env_info(env);
187 LASSERT(obj != cl_object_top(obj));
188 if (info->clt_current_io == NULL)
189 info->clt_current_io = io;
190 return cl_io_init0(env, io, iot, obj);
192 EXPORT_SYMBOL(cl_io_sub_init);
195 * Initialize \a io, by calling cl_io_operations::cio_init() top-to-bottom.
197 * Caller has to call cl_io_fini() after a call to cl_io_init(), no matter
198 * what the latter returned.
200 * \pre obj == cl_object_top(obj)
201 * \pre cl_io_type_is_valid(iot)
202 * \post cl_io_type_is_valid(io->ci_type) && io->ci_type == iot
204 int cl_io_init(const struct lu_env *env, struct cl_io *io,
205 enum cl_io_type iot, struct cl_object *obj)
207 struct cl_thread_info *info = cl_env_info(env);
209 LASSERT(obj == cl_object_top(obj));
210 LASSERT(info->clt_current_io == NULL);
212 info->clt_current_io = io;
213 return cl_io_init0(env, io, iot, obj);
215 EXPORT_SYMBOL(cl_io_init);
218 * Initialize read or write io.
220 * \pre iot == CIT_READ || iot == CIT_WRITE
222 int cl_io_rw_init(const struct lu_env *env, struct cl_io *io,
223 enum cl_io_type iot, loff_t pos, size_t count)
225 LINVRNT(iot == CIT_READ || iot == CIT_WRITE);
226 LINVRNT(io->ci_obj != NULL);
229 LU_OBJECT_HEADER(D_VFSTRACE, env, &io->ci_obj->co_lu,
230 "io range: %i [%llu, %llu) %i %i\n",
231 iot, (__u64)pos, (__u64)pos + count,
232 io->u.ci_rw.crw_nonblock, io->u.ci_wr.wr_append);
233 io->u.ci_rw.crw_pos = pos;
234 io->u.ci_rw.crw_count = count;
235 RETURN(cl_io_init(env, io, iot, io->ci_obj));
237 EXPORT_SYMBOL(cl_io_rw_init);
239 static inline const struct lu_fid *
240 cl_lock_descr_fid(const struct cl_lock_descr *descr)
242 return lu_object_fid(&descr->cld_obj->co_lu);
245 static int cl_lock_descr_cmp(const struct cl_lock_descr *d0,
246 const struct cl_lock_descr *d1)
248 return lu_fid_cmp(cl_lock_descr_fid(d0), cl_lock_descr_fid(d1)) ?:
249 __diff_normalize(d0->cld_start, d1->cld_start);
253 * Sort locks in lexicographical order of their (fid, start-offset) pairs.
255 static void cl_io_locks_sort(struct cl_io *io)
260 /* hidden treasure: bubble sort for now. */
262 struct cl_io_lock_link *curr;
263 struct cl_io_lock_link *prev;
264 struct cl_io_lock_link *temp;
269 list_for_each_entry_safe(curr, temp, &io->ci_lockset.cls_todo,
272 switch (cl_lock_descr_cmp(&prev->cill_descr,
273 &curr->cill_descr)) {
276 * IMPOSSIBLE: Identical locks are
283 list_move_tail(&curr->cill_linkage,
284 &prev->cill_linkage);
286 continue; /* don't change prev: it's
287 * still "previous" */
288 case -1: /* already in order */
299 * Check whether \a queue contains locks matching \a need.
301 * \retval +ve there is a matching lock in the \a queue
302 * \retval 0 there are no matching locks in the \a queue
304 int cl_queue_match(const struct list_head *queue,
305 const struct cl_lock_descr *need)
307 struct cl_io_lock_link *scan;
310 list_for_each_entry(scan, queue, cill_linkage) {
311 if (cl_lock_descr_match(&scan->cill_descr, need))
316 EXPORT_SYMBOL(cl_queue_match);
318 static int cl_lockset_match(const struct cl_lockset *set,
319 const struct cl_lock_descr *need, int all_queues)
321 return (all_queues ? cl_queue_match(&set->cls_todo, need) : 0) ||
322 cl_queue_match(&set->cls_curr, need) ||
323 cl_queue_match(&set->cls_done, need);
326 static int cl_lockset_lock_one(const struct lu_env *env,
327 struct cl_io *io, struct cl_lockset *set,
328 struct cl_io_lock_link *link)
330 struct cl_lock *lock;
335 lock = cl_lock_request(env, io, &link->cill_descr, link->cill_enq_flags,
338 link->cill_lock = lock;
339 list_move(&link->cill_linkage, &set->cls_curr);
340 if (!(link->cill_enq_flags & CEF_ASYNC)) {
341 result = cl_wait(env, lock);
343 list_move(&link->cill_linkage, &set->cls_done);
347 result = PTR_ERR(lock);
351 static void cl_lock_link_fini(const struct lu_env *env, struct cl_io *io,
352 struct cl_io_lock_link *link)
354 struct cl_lock *lock = link->cill_lock;
357 list_del_init(&link->cill_linkage);
359 cl_lock_release(env, lock, "io", io);
360 link->cill_lock = NULL;
362 if (link->cill_fini != NULL)
363 link->cill_fini(env, link);
367 static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io,
368 struct cl_lockset *set)
370 struct cl_io_lock_link *link;
371 struct cl_io_lock_link *temp;
372 struct cl_lock *lock;
377 list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
378 if (!cl_lockset_match(set, &link->cill_descr, 0)) {
379 /* XXX some locking to guarantee that locks aren't
380 * expanded in between. */
381 result = cl_lockset_lock_one(env, io, set, link);
385 cl_lock_link_fini(env, io, link);
388 list_for_each_entry_safe(link, temp,
389 &set->cls_curr, cill_linkage) {
390 lock = link->cill_lock;
391 result = cl_wait(env, lock);
393 list_move(&link->cill_linkage, &set->cls_done);
402 * Takes locks necessary for the current iteration of io.
404 * Calls cl_io_operations::cio_lock() top-to-bottom to collect locks required
405 * by layers for the current iteration. Then sort locks (to avoid dead-locks),
408 int cl_io_lock(const struct lu_env *env, struct cl_io *io)
410 const struct cl_io_slice *scan;
413 LINVRNT(cl_io_is_loopable(io));
414 LINVRNT(io->ci_state == CIS_IT_STARTED);
415 LINVRNT(cl_io_invariant(io));
418 cl_io_for_each(scan, io) {
419 if (scan->cis_iop->op[io->ci_type].cio_lock == NULL)
421 result = scan->cis_iop->op[io->ci_type].cio_lock(env, scan);
426 cl_io_locks_sort(io);
427 result = cl_lockset_lock(env, io, &io->ci_lockset);
430 cl_io_unlock(env, io);
432 io->ci_state = CIS_LOCKED;
435 EXPORT_SYMBOL(cl_io_lock);
438 * Release locks takes by io.
440 void cl_io_unlock(const struct lu_env *env, struct cl_io *io)
442 struct cl_lockset *set;
443 struct cl_io_lock_link *link;
444 struct cl_io_lock_link *temp;
445 const struct cl_io_slice *scan;
447 LASSERT(cl_io_is_loopable(io));
448 LASSERT(CIS_IT_STARTED <= io->ci_state && io->ci_state < CIS_UNLOCKED);
449 LINVRNT(cl_io_invariant(io));
452 set = &io->ci_lockset;
454 list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage)
455 cl_lock_link_fini(env, io, link);
457 list_for_each_entry_safe(link, temp, &set->cls_curr, cill_linkage)
458 cl_lock_link_fini(env, io, link);
460 list_for_each_entry_safe(link, temp, &set->cls_done, cill_linkage) {
461 cl_unuse(env, link->cill_lock);
462 cl_lock_link_fini(env, io, link);
464 cl_io_for_each_reverse(scan, io) {
465 if (scan->cis_iop->op[io->ci_type].cio_unlock != NULL)
466 scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
468 io->ci_state = CIS_UNLOCKED;
469 LASSERT(cl_env_info(env)->clt_nr_locks_acquired == 0);
472 EXPORT_SYMBOL(cl_io_unlock);
475 * Prepares next iteration of io.
477 * Calls cl_io_operations::cio_iter_init() top-to-bottom. This exists to give
478 * layers a chance to modify io parameters, e.g., so that lov can restrict io
479 * to a single stripe.
481 int cl_io_iter_init(const struct lu_env *env, struct cl_io *io)
483 const struct cl_io_slice *scan;
486 LINVRNT(cl_io_is_loopable(io));
487 LINVRNT(io->ci_state == CIS_INIT || io->ci_state == CIS_IT_ENDED);
488 LINVRNT(cl_io_invariant(io));
492 cl_io_for_each(scan, io) {
493 if (scan->cis_iop->op[io->ci_type].cio_iter_init == NULL)
495 result = scan->cis_iop->op[io->ci_type].cio_iter_init(env,
501 io->ci_state = CIS_IT_STARTED;
504 EXPORT_SYMBOL(cl_io_iter_init);
507 * Finalizes io iteration.
509 * Calls cl_io_operations::cio_iter_fini() bottom-to-top.
511 void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io)
513 const struct cl_io_slice *scan;
515 LINVRNT(cl_io_is_loopable(io));
516 LINVRNT(io->ci_state == CIS_UNLOCKED);
517 LINVRNT(cl_io_invariant(io));
520 cl_io_for_each_reverse(scan, io) {
521 if (scan->cis_iop->op[io->ci_type].cio_iter_fini != NULL)
522 scan->cis_iop->op[io->ci_type].cio_iter_fini(env, scan);
524 io->ci_state = CIS_IT_ENDED;
527 EXPORT_SYMBOL(cl_io_iter_fini);
530 * Records that read or write io progressed \a nob bytes forward.
532 void cl_io_rw_advance(const struct lu_env *env, struct cl_io *io, size_t nob)
534 const struct cl_io_slice *scan;
536 LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
538 LINVRNT(cl_io_is_loopable(io));
539 LINVRNT(cl_io_invariant(io));
543 io->u.ci_rw.crw_pos += nob;
544 io->u.ci_rw.crw_count -= nob;
546 /* layers have to be notified. */
547 cl_io_for_each_reverse(scan, io) {
548 if (scan->cis_iop->op[io->ci_type].cio_advance != NULL)
549 scan->cis_iop->op[io->ci_type].cio_advance(env, scan,
554 EXPORT_SYMBOL(cl_io_rw_advance);
557 * Adds a lock to a lockset.
559 int cl_io_lock_add(const struct lu_env *env, struct cl_io *io,
560 struct cl_io_lock_link *link)
565 if (cl_lockset_match(&io->ci_lockset, &link->cill_descr, 1))
568 list_add(&link->cill_linkage, &io->ci_lockset.cls_todo);
573 EXPORT_SYMBOL(cl_io_lock_add);
575 static void cl_free_io_lock_link(const struct lu_env *env,
576 struct cl_io_lock_link *link)
582 * Allocates new lock link, and uses it to add a lock to a lockset.
584 int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
585 struct cl_lock_descr *descr)
587 struct cl_io_lock_link *link;
593 link->cill_descr = *descr;
594 link->cill_fini = cl_free_io_lock_link;
595 result = cl_io_lock_add(env, io, link);
596 if (result) /* lock match */
597 link->cill_fini(env, link);
603 EXPORT_SYMBOL(cl_io_lock_alloc_add);
606 * Starts io by calling cl_io_operations::cio_start() top-to-bottom.
608 int cl_io_start(const struct lu_env *env, struct cl_io *io)
610 const struct cl_io_slice *scan;
613 LINVRNT(cl_io_is_loopable(io));
614 LINVRNT(io->ci_state == CIS_LOCKED);
615 LINVRNT(cl_io_invariant(io));
618 io->ci_state = CIS_IO_GOING;
619 cl_io_for_each(scan, io) {
620 if (scan->cis_iop->op[io->ci_type].cio_start == NULL)
622 result = scan->cis_iop->op[io->ci_type].cio_start(env, scan);
630 EXPORT_SYMBOL(cl_io_start);
633 * Wait until current io iteration is finished by calling
634 * cl_io_operations::cio_end() bottom-to-top.
636 void cl_io_end(const struct lu_env *env, struct cl_io *io)
638 const struct cl_io_slice *scan;
640 LINVRNT(cl_io_is_loopable(io));
641 LINVRNT(io->ci_state == CIS_IO_GOING);
642 LINVRNT(cl_io_invariant(io));
645 cl_io_for_each_reverse(scan, io) {
646 if (scan->cis_iop->op[io->ci_type].cio_end != NULL)
647 scan->cis_iop->op[io->ci_type].cio_end(env, scan);
648 /* TODO: error handling. */
650 io->ci_state = CIS_IO_FINISHED;
653 EXPORT_SYMBOL(cl_io_end);
655 static const struct cl_page_slice *
656 cl_io_slice_page(const struct cl_io_slice *ios, struct cl_page *page)
658 const struct cl_page_slice *slice;
660 slice = cl_page_at(page, ios->cis_obj->co_lu.lo_dev->ld_type);
661 LINVRNT(slice != NULL);
666 * True iff \a page is within \a io range.
668 static int cl_page_in_io(const struct cl_page *page, const struct cl_io *io)
675 idx = page->cp_index;
676 switch (io->ci_type) {
680 * check that [start, end) and [pos, pos + count) extents
683 start = cl_offset(page->cp_obj, idx);
684 end = cl_offset(page->cp_obj, idx + 1);
685 result = io->u.ci_rw.crw_pos < end &&
686 start < io->u.ci_rw.crw_pos + io->u.ci_rw.crw_count;
689 result = io->u.ci_fault.ft_index == idx;
698 * Called by read io, when page has to be read from the server.
700 * \see cl_io_operations::cio_read_page()
702 int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
703 struct cl_page *page)
705 const struct cl_io_slice *scan;
706 struct cl_2queue *queue;
709 LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_FAULT);
710 LINVRNT(cl_page_is_owned(page, io));
711 LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
712 LINVRNT(cl_page_in_io(page, io));
713 LINVRNT(cl_io_invariant(io));
716 queue = &io->ci_queue;
718 cl_2queue_init(queue);
720 * ->cio_read_page() methods called in the loop below are supposed to
721 * never block waiting for network (the only subtle point is the
722 * creation of new pages for read-ahead that might result in cache
723 * shrinking, but currently only clean pages are shrunk and this
724 * requires no network io).
726 * Should this ever starts blocking, retry loop would be needed for
727 * "parallel io" (see CLO_REPEAT loops in cl_lock.c).
729 cl_io_for_each(scan, io) {
730 if (scan->cis_iop->cio_read_page != NULL) {
731 const struct cl_page_slice *slice;
733 slice = cl_io_slice_page(scan, page);
734 LINVRNT(slice != NULL);
735 result = scan->cis_iop->cio_read_page(env, scan, slice);
741 result = cl_io_submit_rw(env, io, CRT_READ, queue);
743 * Unlock unsent pages in case of error.
745 cl_page_list_disown(env, io, &queue->c2_qin);
746 cl_2queue_fini(env, queue);
749 EXPORT_SYMBOL(cl_io_read_page);
752 * Called by write io to prepare page to receive data from user buffer.
754 * \see cl_io_operations::cio_prepare_write()
756 int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io,
757 struct cl_page *page, unsigned from, unsigned to)
759 const struct cl_io_slice *scan;
762 LINVRNT(io->ci_type == CIT_WRITE);
763 LINVRNT(cl_page_is_owned(page, io));
764 LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
765 LINVRNT(cl_io_invariant(io));
766 LASSERT(cl_page_in_io(page, io));
769 cl_io_for_each_reverse(scan, io) {
770 if (scan->cis_iop->cio_prepare_write != NULL) {
771 const struct cl_page_slice *slice;
773 slice = cl_io_slice_page(scan, page);
774 result = scan->cis_iop->cio_prepare_write(env, scan,
783 EXPORT_SYMBOL(cl_io_prepare_write);
786 * Called by write io after user data were copied into a page.
788 * \see cl_io_operations::cio_commit_write()
790 int cl_io_commit_write(const struct lu_env *env, struct cl_io *io,
791 struct cl_page *page, unsigned from, unsigned to)
793 const struct cl_io_slice *scan;
796 LINVRNT(io->ci_type == CIT_WRITE);
797 LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
798 LINVRNT(cl_io_invariant(io));
800 * XXX Uh... not nice. Top level cl_io_commit_write() call (vvp->lov)
801 * already called cl_page_cache_add(), moving page into CPS_CACHED
802 * state. Better (and more general) way of dealing with such situation
805 LASSERT(cl_page_is_owned(page, io) || page->cp_parent != NULL);
806 LASSERT(cl_page_in_io(page, io));
809 cl_io_for_each(scan, io) {
810 if (scan->cis_iop->cio_commit_write != NULL) {
811 const struct cl_page_slice *slice;
813 slice = cl_io_slice_page(scan, page);
814 result = scan->cis_iop->cio_commit_write(env, scan,
821 LINVRNT(result <= 0);
824 EXPORT_SYMBOL(cl_io_commit_write);
827 * Submits a list of pages for immediate io.
829 * After the function gets returned, The submitted pages are moved to
830 * queue->c2_qout queue, and queue->c2_qin contain both the pages don't need
831 * to be submitted, and the pages are errant to submit.
833 * \returns 0 if at least one page was submitted, error code otherwise.
834 * \see cl_io_operations::cio_submit()
836 int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
837 enum cl_req_type crt, struct cl_2queue *queue)
839 const struct cl_io_slice *scan;
842 LINVRNT(crt < ARRAY_SIZE(scan->cis_iop->req_op));
845 cl_io_for_each(scan, io) {
846 if (scan->cis_iop->req_op[crt].cio_submit == NULL)
848 result = scan->cis_iop->req_op[crt].cio_submit(env, scan, crt,
854 * If ->cio_submit() failed, no pages were sent.
856 LASSERT(ergo(result != 0, list_empty(&queue->c2_qout.pl_pages)));
859 EXPORT_SYMBOL(cl_io_submit_rw);
862 * Cancel an IO which has been submitted by cl_io_submit_rw.
864 int cl_io_cancel(const struct lu_env *env, struct cl_io *io,
865 struct cl_page_list *queue)
867 struct cl_page *page;
870 CERROR("Canceling ongoing page trasmission\n");
871 cl_page_list_for_each(page, queue) {
874 LINVRNT(cl_page_in_io(page, io));
875 rc = cl_page_cancel(env, page);
876 result = result ?: rc;
880 EXPORT_SYMBOL(cl_io_cancel);
885 * Pumps io through iterations calling
887 * - cl_io_iter_init()
897 * - cl_io_iter_fini()
899 * repeatedly until there is no more io to do.
901 int cl_io_loop(const struct lu_env *env, struct cl_io *io)
905 LINVRNT(cl_io_is_loopable(io));
912 result = cl_io_iter_init(env, io);
915 result = cl_io_lock(env, io);
918 * Notify layers that locks has been taken,
921 * - llite: kms, short read;
922 * - llite: generic_file_read();
924 result = cl_io_start(env, io);
926 * Send any remaining pending
929 * - llite: ll_rw_stats_tally.
932 cl_io_unlock(env, io);
933 cl_io_rw_advance(env, io, io->ci_nob - nob);
936 cl_io_iter_fini(env, io);
937 } while (result == 0 && io->ci_continue);
938 RETURN(result < 0 ? result : 0);
940 EXPORT_SYMBOL(cl_io_loop);
943 * Adds io slice to the cl_io.
945 * This is called by cl_object_operations::coo_io_init() methods to add a
946 * per-layer state to the io. New state is added at the end of
947 * cl_io::ci_layers list, that is, it is at the bottom of the stack.
949 * \see cl_lock_slice_add(), cl_req_slice_add(), cl_page_slice_add()
951 void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
952 struct cl_object *obj,
953 const struct cl_io_operations *ops)
955 struct list_head *linkage = &slice->cis_linkage;
957 LASSERT((linkage->prev == NULL && linkage->next == NULL) ||
958 list_empty(linkage));
961 list_add_tail(linkage, &io->ci_layers);
963 slice->cis_obj = obj;
964 slice->cis_iop = ops;
967 EXPORT_SYMBOL(cl_io_slice_add);
971 * Initializes page list.
973 void cl_page_list_init(struct cl_page_list *plist)
977 CFS_INIT_LIST_HEAD(&plist->pl_pages);
978 plist->pl_owner = cfs_current();
981 EXPORT_SYMBOL(cl_page_list_init);
984 * Adds a page to a page list.
986 void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page)
989 /* it would be better to check that page is owned by "current" io, but
990 * it is not passed here. */
991 LASSERT(page->cp_owner != NULL);
992 LINVRNT(plist->pl_owner == cfs_current());
995 mutex_lock(&page->cp_mutex);
997 LASSERT(list_empty(&page->cp_batch));
998 list_add_tail(&page->cp_batch, &plist->pl_pages);
1000 page->cp_queue_ref = lu_ref_add(&page->cp_reference, "queue", plist);
1004 EXPORT_SYMBOL(cl_page_list_add);
1007 * Removes a page from a page list.
1009 void cl_page_list_del(const struct lu_env *env,
1010 struct cl_page_list *plist, struct cl_page *page)
1012 LASSERT(plist->pl_nr > 0);
1013 LINVRNT(plist->pl_owner == cfs_current());
1016 list_del_init(&page->cp_batch);
1018 mutex_unlock(&page->cp_mutex);
1021 lu_ref_del_at(&page->cp_reference, page->cp_queue_ref, "queue", plist);
1022 cl_page_put(env, page);
1025 EXPORT_SYMBOL(cl_page_list_del);
1028 * Moves a page from one page list to another.
1030 void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
1031 struct cl_page *page)
1033 LASSERT(src->pl_nr > 0);
1034 LINVRNT(dst->pl_owner == cfs_current());
1035 LINVRNT(src->pl_owner == cfs_current());
1038 list_move_tail(&page->cp_batch, &dst->pl_pages);
1041 lu_ref_set_at(&page->cp_reference,
1042 page->cp_queue_ref, "queue", src, dst);
1045 EXPORT_SYMBOL(cl_page_list_move);
1048 * splice the cl_page_list, just as list head does
1050 void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head)
1052 struct cl_page *page;
1053 struct cl_page *tmp;
1055 LINVRNT(list->pl_owner == cfs_current());
1056 LINVRNT(head->pl_owner == cfs_current());
1059 cl_page_list_for_each_safe(page, tmp, list)
1060 cl_page_list_move(head, list, page);
1063 EXPORT_SYMBOL(cl_page_list_splice);
1065 void cl_page_disown0(const struct lu_env *env,
1066 struct cl_io *io, struct cl_page *pg);
1069 * Disowns pages in a queue.
1071 void cl_page_list_disown(const struct lu_env *env,
1072 struct cl_io *io, struct cl_page_list *plist)
1074 struct cl_page *page;
1075 struct cl_page *temp;
1077 LINVRNT(plist->pl_owner == cfs_current());
1080 cl_page_list_for_each_safe(page, temp, plist) {
1081 LASSERT(plist->pl_nr > 0);
1083 list_del_init(&page->cp_batch);
1085 mutex_unlock(&page->cp_mutex);
1089 * cl_page_disown0 rather than usual cl_page_disown() is used,
1090 * because pages are possibly in CPS_FREEING state already due
1091 * to the call to cl_page_list_discard().
1094 * XXX cl_page_disown0() will fail if page is not locked.
1096 cl_page_disown0(env, io, page);
1097 lu_ref_del(&page->cp_reference, "queue", plist);
1098 cl_page_put(env, page);
1102 EXPORT_SYMBOL(cl_page_list_disown);
1105 * Releases pages from queue.
1107 void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist)
1109 struct cl_page *page;
1110 struct cl_page *temp;
1112 LINVRNT(plist->pl_owner == cfs_current());
1115 cl_page_list_for_each_safe(page, temp, plist)
1116 cl_page_list_del(env, plist, page);
1117 LASSERT(plist->pl_nr == 0);
1120 EXPORT_SYMBOL(cl_page_list_fini);
1123 * Owns all pages in a queue.
1125 int cl_page_list_own(const struct lu_env *env,
1126 struct cl_io *io, struct cl_page_list *plist)
1128 struct cl_page *page;
1129 struct cl_page *temp;
1132 LINVRNT(plist->pl_owner == cfs_current());
1136 cl_page_list_for_each_safe(page, temp, plist) {
1137 if (cl_page_own(env, io, page) == 0)
1138 result = result ?: page->cp_error;
1140 cl_page_list_del(env, plist, page);
1144 EXPORT_SYMBOL(cl_page_list_own);
1147 * Assumes all pages in a queue.
1149 void cl_page_list_assume(const struct lu_env *env,
1150 struct cl_io *io, struct cl_page_list *plist)
1152 struct cl_page *page;
1154 LINVRNT(plist->pl_owner == cfs_current());
1156 cl_page_list_for_each(page, plist)
1157 cl_page_assume(env, io, page);
1159 EXPORT_SYMBOL(cl_page_list_assume);
1162 * Discards all pages in a queue.
1164 void cl_page_list_discard(const struct lu_env *env, struct cl_io *io,
1165 struct cl_page_list *plist)
1167 struct cl_page *page;
1169 LINVRNT(plist->pl_owner == cfs_current());
1171 cl_page_list_for_each(page, plist)
1172 cl_page_discard(env, io, page);
1175 EXPORT_SYMBOL(cl_page_list_discard);
1178 * Unmaps all pages in a queue from user virtual memory.
1180 int cl_page_list_unmap(const struct lu_env *env, struct cl_io *io,
1181 struct cl_page_list *plist)
1183 struct cl_page *page;
1186 LINVRNT(plist->pl_owner == cfs_current());
1189 cl_page_list_for_each(page, plist) {
1190 result = cl_page_unmap(env, io, page);
1196 EXPORT_SYMBOL(cl_page_list_unmap);
1199 * Initialize dual page queue.
1201 void cl_2queue_init(struct cl_2queue *queue)
1204 cl_page_list_init(&queue->c2_qin);
1205 cl_page_list_init(&queue->c2_qout);
1208 EXPORT_SYMBOL(cl_2queue_init);
1211 * Add a page to the incoming page list of 2-queue.
1213 void cl_2queue_add(struct cl_2queue *queue, struct cl_page *page)
1216 cl_page_list_add(&queue->c2_qin, page);
1219 EXPORT_SYMBOL(cl_2queue_add);
1222 * Disown pages in both lists of a 2-queue.
1224 void cl_2queue_disown(const struct lu_env *env,
1225 struct cl_io *io, struct cl_2queue *queue)
1228 cl_page_list_disown(env, io, &queue->c2_qin);
1229 cl_page_list_disown(env, io, &queue->c2_qout);
1232 EXPORT_SYMBOL(cl_2queue_disown);
1235 * Discard (truncate) pages in both lists of a 2-queue.
1237 void cl_2queue_discard(const struct lu_env *env,
1238 struct cl_io *io, struct cl_2queue *queue)
1241 cl_page_list_discard(env, io, &queue->c2_qin);
1242 cl_page_list_discard(env, io, &queue->c2_qout);
1245 EXPORT_SYMBOL(cl_2queue_discard);
1248 * Assume to own the pages in cl_2queue
1250 void cl_2queue_assume(const struct lu_env *env,
1251 struct cl_io *io, struct cl_2queue *queue)
1253 cl_page_list_assume(env, io, &queue->c2_qin);
1254 cl_page_list_assume(env, io, &queue->c2_qout);
1256 EXPORT_SYMBOL(cl_2queue_assume);
1259 * Finalize both page lists of a 2-queue.
1261 void cl_2queue_fini(const struct lu_env *env, struct cl_2queue *queue)
1264 cl_page_list_fini(env, &queue->c2_qout);
1265 cl_page_list_fini(env, &queue->c2_qin);
1268 EXPORT_SYMBOL(cl_2queue_fini);
1271 * Initialize a 2-queue to contain \a page in its incoming page list.
1273 void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page)
1276 cl_2queue_init(queue);
1277 cl_2queue_add(queue, page);
1280 EXPORT_SYMBOL(cl_2queue_init_page);
1283 * Returns top-level io.
1285 * \see cl_object_top(), cl_page_top().
1287 struct cl_io *cl_io_top(struct cl_io *io)
1290 while (io->ci_parent != NULL)
1294 EXPORT_SYMBOL(cl_io_top);
1297 * Prints human readable representation of \a io to the \a f.
1299 void cl_io_print(const struct lu_env *env, void *cookie,
1300 lu_printer_t printer, const struct cl_io *io)
1305 * Adds request slice to the compound request.
1307 * This is called by cl_device_operations::cdo_req_init() methods to add a
1308 * per-layer state to the request. New state is added at the end of
1309 * cl_req::crq_layers list, that is, it is at the bottom of the stack.
1311 * \see cl_lock_slice_add(), cl_page_slice_add(), cl_io_slice_add()
1313 void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice,
1314 struct cl_device *dev,
1315 const struct cl_req_operations *ops)
1318 list_add_tail(&slice->crs_linkage, &req->crq_layers);
1319 slice->crs_dev = dev;
1320 slice->crs_ops = ops;
1321 slice->crs_req = req;
1324 EXPORT_SYMBOL(cl_req_slice_add);
1326 static void cl_req_free(const struct lu_env *env, struct cl_req *req)
1330 LASSERT(list_empty(&req->crq_pages));
1331 LASSERT(req->crq_nrpages == 0);
1332 LINVRNT(list_empty(&req->crq_layers));
1333 LINVRNT(equi(req->crq_nrobjs > 0, req->crq_o != NULL));
1336 if (req->crq_o != NULL) {
1337 for (i = 0; i < req->crq_nrobjs; ++i) {
1338 struct cl_object *obj = req->crq_o[i].ro_obj;
1340 lu_object_ref_del_at(&obj->co_lu,
1341 req->crq_o[i].ro_obj_ref,
1343 cl_object_put(env, obj);
1346 OBD_FREE(req->crq_o, req->crq_nrobjs * sizeof req->crq_o[0]);
1352 static int cl_req_init(const struct lu_env *env, struct cl_req *req,
1353 struct cl_page *page)
1355 struct cl_device *dev;
1356 struct cl_page_slice *slice;
1361 page = cl_page_top(page);
1363 list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
1364 dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev);
1365 if (dev->cd_ops->cdo_req_init != NULL) {
1366 result = dev->cd_ops->cdo_req_init(env,
1372 page = page->cp_child;
1373 } while (page != NULL && result == 0);
1378 * Invokes per-request transfer completion call-backs
1379 * (cl_req_operations::cro_completion()) bottom-to-top.
1381 void cl_req_completion(const struct lu_env *env, struct cl_req *req, int rc)
1383 struct cl_req_slice *slice;
1387 * for the lack of list_for_each_entry_reverse_safe()...
1389 while (!list_empty(&req->crq_layers)) {
1390 slice = list_entry(req->crq_layers.prev,
1391 struct cl_req_slice, crs_linkage);
1392 list_del_init(&slice->crs_linkage);
1393 if (slice->crs_ops->cro_completion != NULL)
1394 slice->crs_ops->cro_completion(env, slice, rc);
1396 cl_req_free(env, req);
1399 EXPORT_SYMBOL(cl_req_completion);
1402 * Allocates new transfer request.
1404 struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
1405 enum cl_req_type crt, int nr_objects)
1409 LINVRNT(nr_objects > 0);
1416 OBD_ALLOC(req->crq_o, nr_objects * sizeof req->crq_o[0]);
1417 if (req->crq_o != NULL) {
1418 req->crq_nrobjs = nr_objects;
1419 req->crq_type = crt;
1420 CFS_INIT_LIST_HEAD(&req->crq_pages);
1421 CFS_INIT_LIST_HEAD(&req->crq_layers);
1422 result = cl_req_init(env, req, page);
1426 cl_req_completion(env, req, result);
1427 req = ERR_PTR(result);
1430 req = ERR_PTR(-ENOMEM);
1433 EXPORT_SYMBOL(cl_req_alloc);
1436 * Adds a page to a request.
1438 void cl_req_page_add(const struct lu_env *env,
1439 struct cl_req *req, struct cl_page *page)
1441 struct cl_object *obj;
1442 struct cl_req_obj *rqo;
1446 page = cl_page_top(page);
1448 LINVRNT(cl_page_is_vmlocked(env, page));
1449 LASSERT(list_empty(&page->cp_flight));
1450 LASSERT(page->cp_req == NULL);
1452 list_add_tail(&page->cp_flight, &req->crq_pages);
1455 obj = cl_object_top(page->cp_obj);
1456 for (i = 0, rqo = req->crq_o; obj != rqo->ro_obj; ++i, ++rqo) {
1457 if (rqo->ro_obj == NULL) {
1460 rqo->ro_obj_ref = lu_object_ref_add(&obj->co_lu,
1465 LASSERT(i < req->crq_nrobjs);
1468 EXPORT_SYMBOL(cl_req_page_add);
1471 * Removes a page from a request.
1473 void cl_req_page_done(const struct lu_env *env, struct cl_page *page)
1475 struct cl_req *req = page->cp_req;
1478 page = cl_page_top(page);
1480 LINVRNT(cl_page_is_vmlocked(env, page));
1481 LASSERT(!list_empty(&page->cp_flight));
1482 LASSERT(req->crq_nrpages > 0);
1484 list_del_init(&page->cp_flight);
1486 page->cp_req = NULL;
1489 EXPORT_SYMBOL(cl_req_page_done);
1492 * Notifies layers that request is about to depart by calling
1493 * cl_req_operations::cro_prep() top-to-bottom.
1495 int cl_req_prep(const struct lu_env *env, struct cl_req *req)
1499 const struct cl_req_slice *slice;
1503 * Check that the caller of cl_req_alloc() didn't lie about the number
1506 for (i = 0; i < req->crq_nrobjs; ++i)
1507 LASSERT(req->crq_o[i].ro_obj != NULL);
1510 list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
1511 if (slice->crs_ops->cro_prep != NULL) {
1512 result = slice->crs_ops->cro_prep(env, slice);
1519 EXPORT_SYMBOL(cl_req_prep);
1522 * Fills in attributes that are passed to server together with transfer. Only
1523 * attributes from \a flags may be touched. This can be called multiple times
1524 * for the same request.
1526 void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,
1527 struct cl_req_attr *attr, obd_valid flags)
1529 const struct cl_req_slice *slice;
1530 struct cl_page *page;
1533 LASSERT(!list_empty(&req->crq_pages));
1536 /* Take any page to use as a model. */
1537 page = list_entry(req->crq_pages.next, struct cl_page, cp_flight);
1539 for (i = 0; i < req->crq_nrobjs; ++i) {
1540 list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
1541 const struct cl_page_slice *scan;
1542 const struct cl_object *obj;
1544 scan = cl_page_at(page,
1545 slice->crs_dev->cd_lu_dev.ld_type);
1546 LASSERT(scan != NULL);
1547 obj = scan->cpl_obj;
1548 if (slice->crs_ops->cro_attr_set != NULL)
1549 slice->crs_ops->cro_attr_set(env, slice, obj,
1555 EXPORT_SYMBOL(cl_req_attr_set);
1557 /* XXX complete(), init_completion(), and wait_for_completion(), until they are
1558 * implemented in libcfs. */
1560 # include <linux/sched.h>
1561 #else /* __KERNEL__ */
1562 # include <liblustre.h>
1566 * Initialize synchronous io wait anchor, for transfer of \a nrpages pages.
1568 void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages)
1571 init_completion(&anchor->csi_sync_completion);
1572 atomic_set(&anchor->csi_sync_nr, nrpages);
1573 anchor->csi_sync_rc = 0;
1576 EXPORT_SYMBOL(cl_sync_io_init);
1579 * Wait until all transfer completes. Transfer completion routine has to call
1580 * cl_sync_io_note() for every page.
1582 int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io,
1583 struct cl_page_list *queue, struct cl_sync_io *anchor)
1588 rc = wait_for_completion_interruptible(&anchor->csi_sync_completion);
1591 rc2 = cl_io_cancel(env, io, queue);
1593 /* Too bad, some pages are still in IO. */
1594 CDEBUG(D_VFSTRACE, "Failed to cancel transfer (%i). "
1595 "Waiting for %i pages\n",
1596 rc2, atomic_read(&anchor->csi_sync_nr));
1597 wait_for_completion(&anchor->csi_sync_completion);
1600 rc = anchor->csi_sync_rc;
1601 LASSERT(atomic_read(&anchor->csi_sync_nr) == 0);
1602 cl_page_list_assume(env, io, queue);
1603 POISON(anchor, 0x5a, sizeof *anchor);
1606 EXPORT_SYMBOL(cl_sync_io_wait);
1609 * Indicate that transfer of a single page completed.
1611 void cl_sync_io_note(struct cl_sync_io *anchor, int ioret)
1614 if (anchor->csi_sync_rc == 0 && ioret < 0)
1615 anchor->csi_sync_rc = ioret;
1617 * Synchronous IO done without releasing page lock (e.g., as a part of
1618 * ->{prepare,commit}_write(). Completion is used to signal the end of
1621 if (atomic_dec_and_test(&anchor->csi_sync_nr))
1622 complete(&anchor->csi_sync_completion);
1625 EXPORT_SYMBOL(cl_sync_io_note);