1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 #define DEBUG_SUBSYSTEM S_CLASS
43 # define EXPORT_SYMTAB
46 #include <obd_class.h>
47 #include <obd_support.h>
48 #include <lustre_fid.h>
49 #include <libcfs/list.h>
50 /* lu_time_global_{init,fini}() */
53 #include <cl_object.h>
54 #include "cl_internal.h"
56 /*****************************************************************************
62 #define cl_io_for_each(slice, io) \
63 cfs_list_for_each_entry((slice), &io->ci_layers, cis_linkage)
64 #define cl_io_for_each_reverse(slice, io) \
65 cfs_list_for_each_entry_reverse((slice), &io->ci_layers, cis_linkage)
67 static inline int cl_io_type_is_valid(enum cl_io_type type)
69 return CIT_READ <= type && type < CIT_OP_NR;
72 static inline int cl_io_is_loopable(const struct cl_io *io)
74 return cl_io_type_is_valid(io->ci_type) && io->ci_type != CIT_MISC;
78 * Returns true iff there is an IO ongoing in the given environment.
80 int cl_io_is_going(const struct lu_env *env)
82 return cl_env_info(env)->clt_current_io != NULL;
84 EXPORT_SYMBOL(cl_io_is_going);
87 * cl_io invariant that holds at all times when exported cl_io_*() functions
88 * are entered and left.
90 static int cl_io_invariant(const struct cl_io *io)
97 * io can own pages only when it is ongoing. Sub-io might
98 * still be in CIS_LOCKED state when top-io is in
101 ergo(io->ci_owned_nr > 0, io->ci_state == CIS_IO_GOING ||
102 (io->ci_state == CIS_LOCKED && up != NULL));
106 * Finalize \a io, by calling cl_io_operations::cio_fini() bottom-to-top.
108 void cl_io_fini(const struct lu_env *env, struct cl_io *io)
110 struct cl_io_slice *slice;
111 struct cl_thread_info *info;
113 LINVRNT(cl_io_type_is_valid(io->ci_type));
114 LINVRNT(cl_io_invariant(io));
117 while (!cfs_list_empty(&io->ci_layers)) {
118 slice = container_of(io->ci_layers.next, struct cl_io_slice,
120 cfs_list_del_init(&slice->cis_linkage);
121 if (slice->cis_iop->op[io->ci_type].cio_fini != NULL)
122 slice->cis_iop->op[io->ci_type].cio_fini(env, slice);
124 * Invalidate slice to catch use after free. This assumes that
125 * slices are allocated within session and can be touched
126 * after ->cio_fini() returns.
128 slice->cis_io = NULL;
130 io->ci_state = CIS_FINI;
131 info = cl_env_info(env);
132 if (info->clt_current_io == io)
133 info->clt_current_io = NULL;
136 EXPORT_SYMBOL(cl_io_fini);
138 static int cl_io_init0(const struct lu_env *env, struct cl_io *io,
139 enum cl_io_type iot, struct cl_object *obj)
141 struct cl_object *scan;
144 LINVRNT(io->ci_state == CIS_ZERO || io->ci_state == CIS_FINI);
145 LINVRNT(cl_io_type_is_valid(iot));
146 LINVRNT(cl_io_invariant(io));
150 CFS_INIT_LIST_HEAD(&io->ci_lockset.cls_todo);
151 CFS_INIT_LIST_HEAD(&io->ci_lockset.cls_curr);
152 CFS_INIT_LIST_HEAD(&io->ci_lockset.cls_done);
153 CFS_INIT_LIST_HEAD(&io->ci_layers);
156 cl_object_for_each(scan, obj) {
157 if (scan->co_ops->coo_io_init != NULL) {
158 result = scan->co_ops->coo_io_init(env, scan, io);
164 io->ci_state = CIS_INIT;
169 * Initialize sub-io, by calling cl_io_operations::cio_init() top-to-bottom.
171 * \pre obj != cl_object_top(obj)
173 int cl_io_sub_init(const struct lu_env *env, struct cl_io *io,
174 enum cl_io_type iot, struct cl_object *obj)
176 struct cl_thread_info *info = cl_env_info(env);
178 LASSERT(obj != cl_object_top(obj));
179 if (info->clt_current_io == NULL)
180 info->clt_current_io = io;
181 return cl_io_init0(env, io, iot, obj);
183 EXPORT_SYMBOL(cl_io_sub_init);
186 * Initialize \a io, by calling cl_io_operations::cio_init() top-to-bottom.
188 * Caller has to call cl_io_fini() after a call to cl_io_init(), no matter
189 * what the latter returned.
191 * \pre obj == cl_object_top(obj)
192 * \pre cl_io_type_is_valid(iot)
193 * \post cl_io_type_is_valid(io->ci_type) && io->ci_type == iot
195 int cl_io_init(const struct lu_env *env, struct cl_io *io,
196 enum cl_io_type iot, struct cl_object *obj)
198 struct cl_thread_info *info = cl_env_info(env);
200 LASSERT(obj == cl_object_top(obj));
201 LASSERT(info->clt_current_io == NULL);
203 info->clt_current_io = io;
204 return cl_io_init0(env, io, iot, obj);
206 EXPORT_SYMBOL(cl_io_init);
209 * Initialize read or write io.
211 * \pre iot == CIT_READ || iot == CIT_WRITE
213 int cl_io_rw_init(const struct lu_env *env, struct cl_io *io,
214 enum cl_io_type iot, loff_t pos, size_t count)
216 LINVRNT(iot == CIT_READ || iot == CIT_WRITE);
217 LINVRNT(io->ci_obj != NULL);
220 LU_OBJECT_HEADER(D_VFSTRACE, env, &io->ci_obj->co_lu,
221 "io range: %u ["LPU64", "LPU64") %u %u\n",
222 iot, (__u64)pos, (__u64)pos + count,
223 io->u.ci_rw.crw_nonblock, io->u.ci_wr.wr_append);
224 io->u.ci_rw.crw_pos = pos;
225 io->u.ci_rw.crw_count = count;
226 RETURN(cl_io_init(env, io, iot, io->ci_obj));
228 EXPORT_SYMBOL(cl_io_rw_init);
230 static inline const struct lu_fid *
231 cl_lock_descr_fid(const struct cl_lock_descr *descr)
233 return lu_object_fid(&descr->cld_obj->co_lu);
236 static int cl_lock_descr_cmp(const struct cl_lock_descr *d0,
237 const struct cl_lock_descr *d1)
239 return lu_fid_cmp(cl_lock_descr_fid(d0), cl_lock_descr_fid(d1)) ?:
240 __diff_normalize(d0->cld_start, d1->cld_start);
244 * Sort locks in lexicographical order of their (fid, start-offset) pairs.
246 static void cl_io_locks_sort(struct cl_io *io)
251 /* hidden treasure: bubble sort for now. */
253 struct cl_io_lock_link *curr;
254 struct cl_io_lock_link *prev;
255 struct cl_io_lock_link *temp;
260 cfs_list_for_each_entry_safe(curr, temp,
261 &io->ci_lockset.cls_todo,
264 switch (cl_lock_descr_cmp(&prev->cill_descr,
265 &curr->cill_descr)) {
268 * IMPOSSIBLE: Identical locks are
275 cfs_list_move_tail(&curr->cill_linkage,
276 &prev->cill_linkage);
278 continue; /* don't change prev: it's
279 * still "previous" */
280 case -1: /* already in order */
291 * Check whether \a queue contains locks matching \a need.
293 * \retval +ve there is a matching lock in the \a queue
294 * \retval 0 there are no matching locks in the \a queue
296 int cl_queue_match(const cfs_list_t *queue,
297 const struct cl_lock_descr *need)
299 struct cl_io_lock_link *scan;
302 cfs_list_for_each_entry(scan, queue, cill_linkage) {
303 if (cl_lock_descr_match(&scan->cill_descr, need))
308 EXPORT_SYMBOL(cl_queue_match);
310 static int cl_lockset_match(const struct cl_lockset *set,
311 const struct cl_lock_descr *need, int all_queues)
313 return (all_queues ? cl_queue_match(&set->cls_todo, need) : 0) ||
314 cl_queue_match(&set->cls_curr, need) ||
315 cl_queue_match(&set->cls_done, need);
318 static int cl_lockset_lock_one(const struct lu_env *env,
319 struct cl_io *io, struct cl_lockset *set,
320 struct cl_io_lock_link *link)
322 struct cl_lock *lock;
327 lock = cl_lock_request(env, io, &link->cill_descr, "io", io);
329 link->cill_lock = lock;
330 cfs_list_move(&link->cill_linkage, &set->cls_curr);
331 if (!(link->cill_descr.cld_enq_flags & CEF_ASYNC)) {
332 result = cl_wait(env, lock);
334 cfs_list_move(&link->cill_linkage,
339 result = PTR_ERR(lock);
343 static void cl_lock_link_fini(const struct lu_env *env, struct cl_io *io,
344 struct cl_io_lock_link *link)
346 struct cl_lock *lock = link->cill_lock;
349 cfs_list_del_init(&link->cill_linkage);
351 cl_lock_release(env, lock, "io", io);
352 link->cill_lock = NULL;
354 if (link->cill_fini != NULL)
355 link->cill_fini(env, link);
359 static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io,
360 struct cl_lockset *set)
362 struct cl_io_lock_link *link;
363 struct cl_io_lock_link *temp;
364 struct cl_lock *lock;
369 cfs_list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
370 if (!cl_lockset_match(set, &link->cill_descr, 0)) {
371 /* XXX some locking to guarantee that locks aren't
372 * expanded in between. */
373 result = cl_lockset_lock_one(env, io, set, link);
377 cl_lock_link_fini(env, io, link);
380 cfs_list_for_each_entry_safe(link, temp,
381 &set->cls_curr, cill_linkage) {
382 lock = link->cill_lock;
383 result = cl_wait(env, lock);
385 cfs_list_move(&link->cill_linkage,
395 * Takes locks necessary for the current iteration of io.
397 * Calls cl_io_operations::cio_lock() top-to-bottom to collect locks required
398 * by layers for the current iteration. Then sort locks (to avoid dead-locks),
401 int cl_io_lock(const struct lu_env *env, struct cl_io *io)
403 const struct cl_io_slice *scan;
406 LINVRNT(cl_io_is_loopable(io));
407 LINVRNT(io->ci_state == CIS_IT_STARTED);
408 LINVRNT(cl_io_invariant(io));
411 cl_io_for_each(scan, io) {
412 if (scan->cis_iop->op[io->ci_type].cio_lock == NULL)
414 result = scan->cis_iop->op[io->ci_type].cio_lock(env, scan);
419 cl_io_locks_sort(io);
420 result = cl_lockset_lock(env, io, &io->ci_lockset);
423 cl_io_unlock(env, io);
425 io->ci_state = CIS_LOCKED;
428 EXPORT_SYMBOL(cl_io_lock);
431 * Release locks takes by io.
433 void cl_io_unlock(const struct lu_env *env, struct cl_io *io)
435 struct cl_lockset *set;
436 struct cl_io_lock_link *link;
437 struct cl_io_lock_link *temp;
438 const struct cl_io_slice *scan;
440 LASSERT(cl_io_is_loopable(io));
441 LASSERT(CIS_IT_STARTED <= io->ci_state && io->ci_state < CIS_UNLOCKED);
442 LINVRNT(cl_io_invariant(io));
445 set = &io->ci_lockset;
447 cfs_list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage)
448 cl_lock_link_fini(env, io, link);
450 cfs_list_for_each_entry_safe(link, temp, &set->cls_curr, cill_linkage)
451 cl_lock_link_fini(env, io, link);
453 cfs_list_for_each_entry_safe(link, temp, &set->cls_done, cill_linkage) {
454 cl_unuse(env, link->cill_lock);
455 cl_lock_link_fini(env, io, link);
457 cl_io_for_each_reverse(scan, io) {
458 if (scan->cis_iop->op[io->ci_type].cio_unlock != NULL)
459 scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
461 io->ci_state = CIS_UNLOCKED;
462 LASSERT(!cl_env_info(env)->clt_counters[CNL_TOP].ctc_nr_locks_acquired);
465 EXPORT_SYMBOL(cl_io_unlock);
468 * Prepares next iteration of io.
470 * Calls cl_io_operations::cio_iter_init() top-to-bottom. This exists to give
471 * layers a chance to modify io parameters, e.g., so that lov can restrict io
472 * to a single stripe.
474 int cl_io_iter_init(const struct lu_env *env, struct cl_io *io)
476 const struct cl_io_slice *scan;
479 LINVRNT(cl_io_is_loopable(io));
480 LINVRNT(io->ci_state == CIS_INIT || io->ci_state == CIS_IT_ENDED);
481 LINVRNT(cl_io_invariant(io));
485 cl_io_for_each(scan, io) {
486 if (scan->cis_iop->op[io->ci_type].cio_iter_init == NULL)
488 result = scan->cis_iop->op[io->ci_type].cio_iter_init(env,
494 io->ci_state = CIS_IT_STARTED;
497 EXPORT_SYMBOL(cl_io_iter_init);
500 * Finalizes io iteration.
502 * Calls cl_io_operations::cio_iter_fini() bottom-to-top.
504 void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io)
506 const struct cl_io_slice *scan;
508 LINVRNT(cl_io_is_loopable(io));
509 LINVRNT(io->ci_state == CIS_UNLOCKED);
510 LINVRNT(cl_io_invariant(io));
513 cl_io_for_each_reverse(scan, io) {
514 if (scan->cis_iop->op[io->ci_type].cio_iter_fini != NULL)
515 scan->cis_iop->op[io->ci_type].cio_iter_fini(env, scan);
517 io->ci_state = CIS_IT_ENDED;
520 EXPORT_SYMBOL(cl_io_iter_fini);
523 * Records that read or write io progressed \a nob bytes forward.
525 void cl_io_rw_advance(const struct lu_env *env, struct cl_io *io, size_t nob)
527 const struct cl_io_slice *scan;
529 LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
531 LINVRNT(cl_io_is_loopable(io));
532 LINVRNT(cl_io_invariant(io));
536 io->u.ci_rw.crw_pos += nob;
537 io->u.ci_rw.crw_count -= nob;
539 /* layers have to be notified. */
540 cl_io_for_each_reverse(scan, io) {
541 if (scan->cis_iop->op[io->ci_type].cio_advance != NULL)
542 scan->cis_iop->op[io->ci_type].cio_advance(env, scan,
547 EXPORT_SYMBOL(cl_io_rw_advance);
550 * Adds a lock to a lockset.
552 int cl_io_lock_add(const struct lu_env *env, struct cl_io *io,
553 struct cl_io_lock_link *link)
558 if (cl_lockset_match(&io->ci_lockset, &link->cill_descr, 1))
561 cfs_list_add(&link->cill_linkage, &io->ci_lockset.cls_todo);
566 EXPORT_SYMBOL(cl_io_lock_add);
568 static void cl_free_io_lock_link(const struct lu_env *env,
569 struct cl_io_lock_link *link)
575 * Allocates new lock link, and uses it to add a lock to a lockset.
577 int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
578 struct cl_lock_descr *descr)
580 struct cl_io_lock_link *link;
586 link->cill_descr = *descr;
587 link->cill_fini = cl_free_io_lock_link;
588 result = cl_io_lock_add(env, io, link);
589 if (result) /* lock match */
590 link->cill_fini(env, link);
596 EXPORT_SYMBOL(cl_io_lock_alloc_add);
599 * Starts io by calling cl_io_operations::cio_start() top-to-bottom.
601 int cl_io_start(const struct lu_env *env, struct cl_io *io)
603 const struct cl_io_slice *scan;
606 LINVRNT(cl_io_is_loopable(io));
607 LINVRNT(io->ci_state == CIS_LOCKED);
608 LINVRNT(cl_io_invariant(io));
611 io->ci_state = CIS_IO_GOING;
612 cl_io_for_each(scan, io) {
613 if (scan->cis_iop->op[io->ci_type].cio_start == NULL)
615 result = scan->cis_iop->op[io->ci_type].cio_start(env, scan);
623 EXPORT_SYMBOL(cl_io_start);
626 * Wait until current io iteration is finished by calling
627 * cl_io_operations::cio_end() bottom-to-top.
629 void cl_io_end(const struct lu_env *env, struct cl_io *io)
631 const struct cl_io_slice *scan;
633 LINVRNT(cl_io_is_loopable(io));
634 LINVRNT(io->ci_state == CIS_IO_GOING);
635 LINVRNT(cl_io_invariant(io));
638 cl_io_for_each_reverse(scan, io) {
639 if (scan->cis_iop->op[io->ci_type].cio_end != NULL)
640 scan->cis_iop->op[io->ci_type].cio_end(env, scan);
641 /* TODO: error handling. */
643 io->ci_state = CIS_IO_FINISHED;
646 EXPORT_SYMBOL(cl_io_end);
648 static const struct cl_page_slice *
649 cl_io_slice_page(const struct cl_io_slice *ios, struct cl_page *page)
651 const struct cl_page_slice *slice;
653 slice = cl_page_at(page, ios->cis_obj->co_lu.lo_dev->ld_type);
654 LINVRNT(slice != NULL);
659 * True iff \a page is within \a io range.
661 static int cl_page_in_io(const struct cl_page *page, const struct cl_io *io)
668 idx = page->cp_index;
669 switch (io->ci_type) {
673 * check that [start, end) and [pos, pos + count) extents
676 if (!cl_io_is_append(io)) {
677 const struct cl_io_rw_common *crw = &(io->u.ci_rw);
678 start = cl_offset(page->cp_obj, idx);
679 end = cl_offset(page->cp_obj, idx + 1);
680 result = crw->crw_pos < end &&
681 start < crw->crw_pos + crw->crw_count;
685 result = io->u.ci_fault.ft_index == idx;
694 * Called by read io, when page has to be read from the server.
696 * \see cl_io_operations::cio_read_page()
698 int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
699 struct cl_page *page)
701 const struct cl_io_slice *scan;
702 struct cl_2queue *queue;
705 LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_FAULT);
706 LINVRNT(cl_page_is_owned(page, io));
707 LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
708 LINVRNT(cl_page_in_io(page, io));
709 LINVRNT(cl_io_invariant(io));
712 queue = &io->ci_queue;
714 cl_2queue_init(queue);
716 * ->cio_read_page() methods called in the loop below are supposed to
717 * never block waiting for network (the only subtle point is the
718 * creation of new pages for read-ahead that might result in cache
719 * shrinking, but currently only clean pages are shrunk and this
720 * requires no network io).
722 * Should this ever starts blocking, retry loop would be needed for
723 * "parallel io" (see CLO_REPEAT loops in cl_lock.c).
725 cl_io_for_each(scan, io) {
726 if (scan->cis_iop->cio_read_page != NULL) {
727 const struct cl_page_slice *slice;
729 slice = cl_io_slice_page(scan, page);
730 LINVRNT(slice != NULL);
731 result = scan->cis_iop->cio_read_page(env, scan, slice);
737 result = cl_io_submit_rw(env, io, CRT_READ, queue, CRP_NORMAL);
739 * Unlock unsent pages in case of error.
741 cl_page_list_disown(env, io, &queue->c2_qin);
742 cl_2queue_fini(env, queue);
745 EXPORT_SYMBOL(cl_io_read_page);
748 * Called by write io to prepare page to receive data from user buffer.
750 * \see cl_io_operations::cio_prepare_write()
752 int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io,
753 struct cl_page *page, unsigned from, unsigned to)
755 const struct cl_io_slice *scan;
758 LINVRNT(io->ci_type == CIT_WRITE);
759 LINVRNT(cl_page_is_owned(page, io));
760 LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
761 LINVRNT(cl_io_invariant(io));
762 LASSERT(cl_page_in_io(page, io));
765 cl_io_for_each_reverse(scan, io) {
766 if (scan->cis_iop->cio_prepare_write != NULL) {
767 const struct cl_page_slice *slice;
769 slice = cl_io_slice_page(scan, page);
770 result = scan->cis_iop->cio_prepare_write(env, scan,
779 EXPORT_SYMBOL(cl_io_prepare_write);
782 * Called by write io after user data were copied into a page.
784 * \see cl_io_operations::cio_commit_write()
786 int cl_io_commit_write(const struct lu_env *env, struct cl_io *io,
787 struct cl_page *page, unsigned from, unsigned to)
789 const struct cl_io_slice *scan;
792 LINVRNT(io->ci_type == CIT_WRITE);
793 LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
794 LINVRNT(cl_io_invariant(io));
796 * XXX Uh... not nice. Top level cl_io_commit_write() call (vvp->lov)
797 * already called cl_page_cache_add(), moving page into CPS_CACHED
798 * state. Better (and more general) way of dealing with such situation
801 LASSERT(cl_page_is_owned(page, io) || page->cp_parent != NULL);
802 LASSERT(cl_page_in_io(page, io));
805 cl_io_for_each(scan, io) {
806 if (scan->cis_iop->cio_commit_write != NULL) {
807 const struct cl_page_slice *slice;
809 slice = cl_io_slice_page(scan, page);
810 result = scan->cis_iop->cio_commit_write(env, scan,
817 LINVRNT(result <= 0);
820 EXPORT_SYMBOL(cl_io_commit_write);
823 * Submits a list of pages for immediate io.
825 * After the function gets returned, The submitted pages are moved to
826 * queue->c2_qout queue, and queue->c2_qin contain both the pages don't need
827 * to be submitted, and the pages are errant to submit.
829 * \returns 0 if at least one page was submitted, error code otherwise.
830 * \see cl_io_operations::cio_submit()
832 int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
833 enum cl_req_type crt, struct cl_2queue *queue,
834 enum cl_req_priority priority)
836 const struct cl_io_slice *scan;
839 LINVRNT(crt < ARRAY_SIZE(scan->cis_iop->req_op));
842 cl_io_for_each(scan, io) {
843 if (scan->cis_iop->req_op[crt].cio_submit == NULL)
845 result = scan->cis_iop->req_op[crt].cio_submit(env, scan, crt,
851 * If ->cio_submit() failed, no pages were sent.
853 LASSERT(ergo(result != 0, cfs_list_empty(&queue->c2_qout.pl_pages)));
856 EXPORT_SYMBOL(cl_io_submit_rw);
859 * Submit a sync_io and wait for the IO to be finished, or error happens.
860 * If \a timeout is zero, it means to wait for the IO unconditionally.
862 int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
863 enum cl_req_type iot, struct cl_2queue *queue,
864 enum cl_req_priority prio, long timeout)
866 struct cl_sync_io *anchor = &cl_env_info(env)->clt_anchor;
870 LASSERT(prio == CRP_NORMAL || prio == CRP_CANCEL);
872 cl_page_list_for_each(pg, &queue->c2_qin) {
873 LASSERT(pg->cp_sync_io == NULL);
874 pg->cp_sync_io = anchor;
877 cl_sync_io_init(anchor, queue->c2_qin.pl_nr);
878 rc = cl_io_submit_rw(env, io, iot, queue, prio);
881 * If some pages weren't sent for any reason (e.g.,
882 * read found up-to-date pages in the cache, or write found
883 * clean pages), count them as completed to avoid infinite
886 cl_page_list_for_each(pg, &queue->c2_qin) {
887 pg->cp_sync_io = NULL;
888 cl_sync_io_note(anchor, +1);
891 /* wait for the IO to be finished. */
892 rc = cl_sync_io_wait(env, io, &queue->c2_qout,
895 LASSERT(cfs_list_empty(&queue->c2_qout.pl_pages));
896 cl_page_list_for_each(pg, &queue->c2_qin)
897 pg->cp_sync_io = NULL;
901 EXPORT_SYMBOL(cl_io_submit_sync);
904 * Cancel an IO which has been submitted by cl_io_submit_rw.
906 int cl_io_cancel(const struct lu_env *env, struct cl_io *io,
907 struct cl_page_list *queue)
909 struct cl_page *page;
912 CERROR("Canceling ongoing page trasmission\n");
913 cl_page_list_for_each(page, queue) {
916 LINVRNT(cl_page_in_io(page, io));
917 rc = cl_page_cancel(env, page);
918 result = result ?: rc;
922 EXPORT_SYMBOL(cl_io_cancel);
927 * Pumps io through iterations calling
929 * - cl_io_iter_init()
939 * - cl_io_iter_fini()
941 * repeatedly until there is no more io to do.
943 int cl_io_loop(const struct lu_env *env, struct cl_io *io)
947 LINVRNT(cl_io_is_loopable(io));
954 result = cl_io_iter_init(env, io);
957 result = cl_io_lock(env, io);
960 * Notify layers that locks has been taken,
963 * - llite: kms, short read;
964 * - llite: generic_file_read();
966 result = cl_io_start(env, io);
968 * Send any remaining pending
971 * - llite: ll_rw_stats_tally.
974 cl_io_unlock(env, io);
975 cl_io_rw_advance(env, io, io->ci_nob - nob);
978 cl_io_iter_fini(env, io);
979 } while (result == 0 && io->ci_continue);
980 RETURN(result < 0 ? result : 0);
982 EXPORT_SYMBOL(cl_io_loop);
985 * Adds io slice to the cl_io.
987 * This is called by cl_object_operations::coo_io_init() methods to add a
988 * per-layer state to the io. New state is added at the end of
989 * cl_io::ci_layers list, that is, it is at the bottom of the stack.
991 * \see cl_lock_slice_add(), cl_req_slice_add(), cl_page_slice_add()
993 void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
994 struct cl_object *obj,
995 const struct cl_io_operations *ops)
997 cfs_list_t *linkage = &slice->cis_linkage;
999 LASSERT((linkage->prev == NULL && linkage->next == NULL) ||
1000 cfs_list_empty(linkage));
1003 cfs_list_add_tail(linkage, &io->ci_layers);
1005 slice->cis_obj = obj;
1006 slice->cis_iop = ops;
1009 EXPORT_SYMBOL(cl_io_slice_add);
1013 * Initializes page list.
1015 void cl_page_list_init(struct cl_page_list *plist)
1019 CFS_INIT_LIST_HEAD(&plist->pl_pages);
1020 plist->pl_owner = cfs_current();
1023 EXPORT_SYMBOL(cl_page_list_init);
1026 * Adds a page to a page list.
1028 void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page)
1031 /* it would be better to check that page is owned by "current" io, but
1032 * it is not passed here. */
1033 LASSERT(page->cp_owner != NULL);
1034 LINVRNT(plist->pl_owner == cfs_current());
1037 cfs_mutex_lock(&page->cp_mutex);
1039 LASSERT(cfs_list_empty(&page->cp_batch));
1040 cfs_list_add_tail(&page->cp_batch, &plist->pl_pages);
1042 page->cp_queue_ref = lu_ref_add(&page->cp_reference, "queue", plist);
1046 EXPORT_SYMBOL(cl_page_list_add);
1049 * Removes a page from a page list.
1051 void cl_page_list_del(const struct lu_env *env,
1052 struct cl_page_list *plist, struct cl_page *page)
1054 LASSERT(plist->pl_nr > 0);
1055 LINVRNT(plist->pl_owner == cfs_current());
1058 cfs_list_del_init(&page->cp_batch);
1060 cfs_mutex_unlock(&page->cp_mutex);
1063 lu_ref_del_at(&page->cp_reference, page->cp_queue_ref, "queue", plist);
1064 cl_page_put(env, page);
1067 EXPORT_SYMBOL(cl_page_list_del);
1070 * Moves a page from one page list to another.
1072 void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
1073 struct cl_page *page)
1075 LASSERT(src->pl_nr > 0);
1076 LINVRNT(dst->pl_owner == cfs_current());
1077 LINVRNT(src->pl_owner == cfs_current());
1080 cfs_list_move_tail(&page->cp_batch, &dst->pl_pages);
1083 lu_ref_set_at(&page->cp_reference,
1084 page->cp_queue_ref, "queue", src, dst);
1087 EXPORT_SYMBOL(cl_page_list_move);
1090 * splice the cl_page_list, just as list head does
1092 void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head)
1094 struct cl_page *page;
1095 struct cl_page *tmp;
1097 LINVRNT(list->pl_owner == cfs_current());
1098 LINVRNT(head->pl_owner == cfs_current());
1101 cl_page_list_for_each_safe(page, tmp, list)
1102 cl_page_list_move(head, list, page);
1105 EXPORT_SYMBOL(cl_page_list_splice);
1107 void cl_page_disown0(const struct lu_env *env,
1108 struct cl_io *io, struct cl_page *pg);
1111 * Disowns pages in a queue.
1113 void cl_page_list_disown(const struct lu_env *env,
1114 struct cl_io *io, struct cl_page_list *plist)
1116 struct cl_page *page;
1117 struct cl_page *temp;
1119 LINVRNT(plist->pl_owner == cfs_current());
1122 cl_page_list_for_each_safe(page, temp, plist) {
1123 LASSERT(plist->pl_nr > 0);
1125 cfs_list_del_init(&page->cp_batch);
1127 cfs_mutex_unlock(&page->cp_mutex);
1131 * cl_page_disown0 rather than usual cl_page_disown() is used,
1132 * because pages are possibly in CPS_FREEING state already due
1133 * to the call to cl_page_list_discard().
1136 * XXX cl_page_disown0() will fail if page is not locked.
1138 cl_page_disown0(env, io, page);
1139 lu_ref_del(&page->cp_reference, "queue", plist);
1140 cl_page_put(env, page);
1144 EXPORT_SYMBOL(cl_page_list_disown);
1147 * Releases pages from queue.
1149 void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist)
1151 struct cl_page *page;
1152 struct cl_page *temp;
1154 LINVRNT(plist->pl_owner == cfs_current());
1157 cl_page_list_for_each_safe(page, temp, plist)
1158 cl_page_list_del(env, plist, page);
1159 LASSERT(plist->pl_nr == 0);
1162 EXPORT_SYMBOL(cl_page_list_fini);
1165 * Owns all pages in a queue.
1167 int cl_page_list_own(const struct lu_env *env,
1168 struct cl_io *io, struct cl_page_list *plist)
1170 struct cl_page *page;
1171 struct cl_page *temp;
1175 LINVRNT(plist->pl_owner == cfs_current());
1179 cl_page_list_for_each_safe(page, temp, plist) {
1180 LASSERT(index <= page->cp_index);
1181 index = page->cp_index;
1182 if (cl_page_own(env, io, page) == 0)
1183 result = result ?: page->cp_error;
1185 cl_page_list_del(env, plist, page);
1189 EXPORT_SYMBOL(cl_page_list_own);
1192 * Assumes all pages in a queue.
1194 void cl_page_list_assume(const struct lu_env *env,
1195 struct cl_io *io, struct cl_page_list *plist)
1197 struct cl_page *page;
1199 LINVRNT(plist->pl_owner == cfs_current());
1201 cl_page_list_for_each(page, plist)
1202 cl_page_assume(env, io, page);
1204 EXPORT_SYMBOL(cl_page_list_assume);
1207 * Discards all pages in a queue.
1209 void cl_page_list_discard(const struct lu_env *env, struct cl_io *io,
1210 struct cl_page_list *plist)
1212 struct cl_page *page;
1214 LINVRNT(plist->pl_owner == cfs_current());
1216 cl_page_list_for_each(page, plist)
1217 cl_page_discard(env, io, page);
1220 EXPORT_SYMBOL(cl_page_list_discard);
1223 * Unmaps all pages in a queue from user virtual memory.
1225 int cl_page_list_unmap(const struct lu_env *env, struct cl_io *io,
1226 struct cl_page_list *plist)
1228 struct cl_page *page;
1231 LINVRNT(plist->pl_owner == cfs_current());
1234 cl_page_list_for_each(page, plist) {
1235 result = cl_page_unmap(env, io, page);
1241 EXPORT_SYMBOL(cl_page_list_unmap);
1244 * Initialize dual page queue.
1246 void cl_2queue_init(struct cl_2queue *queue)
1249 cl_page_list_init(&queue->c2_qin);
1250 cl_page_list_init(&queue->c2_qout);
1253 EXPORT_SYMBOL(cl_2queue_init);
1256 * Add a page to the incoming page list of 2-queue.
1258 void cl_2queue_add(struct cl_2queue *queue, struct cl_page *page)
1261 cl_page_list_add(&queue->c2_qin, page);
1264 EXPORT_SYMBOL(cl_2queue_add);
1267 * Disown pages in both lists of a 2-queue.
1269 void cl_2queue_disown(const struct lu_env *env,
1270 struct cl_io *io, struct cl_2queue *queue)
1273 cl_page_list_disown(env, io, &queue->c2_qin);
1274 cl_page_list_disown(env, io, &queue->c2_qout);
1277 EXPORT_SYMBOL(cl_2queue_disown);
1280 * Discard (truncate) pages in both lists of a 2-queue.
1282 void cl_2queue_discard(const struct lu_env *env,
1283 struct cl_io *io, struct cl_2queue *queue)
1286 cl_page_list_discard(env, io, &queue->c2_qin);
1287 cl_page_list_discard(env, io, &queue->c2_qout);
1290 EXPORT_SYMBOL(cl_2queue_discard);
1293 * Assume to own the pages in cl_2queue
1295 void cl_2queue_assume(const struct lu_env *env,
1296 struct cl_io *io, struct cl_2queue *queue)
1298 cl_page_list_assume(env, io, &queue->c2_qin);
1299 cl_page_list_assume(env, io, &queue->c2_qout);
1301 EXPORT_SYMBOL(cl_2queue_assume);
1304 * Finalize both page lists of a 2-queue.
1306 void cl_2queue_fini(const struct lu_env *env, struct cl_2queue *queue)
1309 cl_page_list_fini(env, &queue->c2_qout);
1310 cl_page_list_fini(env, &queue->c2_qin);
1313 EXPORT_SYMBOL(cl_2queue_fini);
1316 * Initialize a 2-queue to contain \a page in its incoming page list.
1318 void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page)
1321 cl_2queue_init(queue);
1322 cl_2queue_add(queue, page);
1325 EXPORT_SYMBOL(cl_2queue_init_page);
1328 * Returns top-level io.
1330 * \see cl_object_top(), cl_page_top().
1332 struct cl_io *cl_io_top(struct cl_io *io)
1335 while (io->ci_parent != NULL)
1339 EXPORT_SYMBOL(cl_io_top);
1342 * Prints human readable representation of \a io to the \a f.
1344 void cl_io_print(const struct lu_env *env, void *cookie,
1345 lu_printer_t printer, const struct cl_io *io)
1350 * Adds request slice to the compound request.
1352 * This is called by cl_device_operations::cdo_req_init() methods to add a
1353 * per-layer state to the request. New state is added at the end of
1354 * cl_req::crq_layers list, that is, it is at the bottom of the stack.
1356 * \see cl_lock_slice_add(), cl_page_slice_add(), cl_io_slice_add()
1358 void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice,
1359 struct cl_device *dev,
1360 const struct cl_req_operations *ops)
1363 cfs_list_add_tail(&slice->crs_linkage, &req->crq_layers);
1364 slice->crs_dev = dev;
1365 slice->crs_ops = ops;
1366 slice->crs_req = req;
1369 EXPORT_SYMBOL(cl_req_slice_add);
1371 static void cl_req_free(const struct lu_env *env, struct cl_req *req)
1375 LASSERT(cfs_list_empty(&req->crq_pages));
1376 LASSERT(req->crq_nrpages == 0);
1377 LINVRNT(cfs_list_empty(&req->crq_layers));
1378 LINVRNT(equi(req->crq_nrobjs > 0, req->crq_o != NULL));
1381 if (req->crq_o != NULL) {
1382 for (i = 0; i < req->crq_nrobjs; ++i) {
1383 struct cl_object *obj = req->crq_o[i].ro_obj;
1385 lu_object_ref_del_at(&obj->co_lu,
1386 req->crq_o[i].ro_obj_ref,
1388 cl_object_put(env, obj);
1391 OBD_FREE(req->crq_o, req->crq_nrobjs * sizeof req->crq_o[0]);
1397 static int cl_req_init(const struct lu_env *env, struct cl_req *req,
1398 struct cl_page *page)
1400 struct cl_device *dev;
1401 struct cl_page_slice *slice;
1406 page = cl_page_top(page);
1408 cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
1409 dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev);
1410 if (dev->cd_ops->cdo_req_init != NULL) {
1411 result = dev->cd_ops->cdo_req_init(env,
1417 page = page->cp_child;
1418 } while (page != NULL && result == 0);
1423 * Invokes per-request transfer completion call-backs
1424 * (cl_req_operations::cro_completion()) bottom-to-top.
1426 void cl_req_completion(const struct lu_env *env, struct cl_req *req, int rc)
1428 struct cl_req_slice *slice;
1432 * for the lack of list_for_each_entry_reverse_safe()...
1434 while (!cfs_list_empty(&req->crq_layers)) {
1435 slice = cfs_list_entry(req->crq_layers.prev,
1436 struct cl_req_slice, crs_linkage);
1437 cfs_list_del_init(&slice->crs_linkage);
1438 if (slice->crs_ops->cro_completion != NULL)
1439 slice->crs_ops->cro_completion(env, slice, rc);
1441 cl_req_free(env, req);
1444 EXPORT_SYMBOL(cl_req_completion);
1447 * Allocates new transfer request.
1449 struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
1450 enum cl_req_type crt, int nr_objects)
1454 LINVRNT(nr_objects > 0);
1461 OBD_ALLOC(req->crq_o, nr_objects * sizeof req->crq_o[0]);
1462 if (req->crq_o != NULL) {
1463 req->crq_nrobjs = nr_objects;
1464 req->crq_type = crt;
1465 CFS_INIT_LIST_HEAD(&req->crq_pages);
1466 CFS_INIT_LIST_HEAD(&req->crq_layers);
1467 result = cl_req_init(env, req, page);
1471 cl_req_completion(env, req, result);
1472 req = ERR_PTR(result);
1475 req = ERR_PTR(-ENOMEM);
1478 EXPORT_SYMBOL(cl_req_alloc);
1481 * Adds a page to a request.
1483 void cl_req_page_add(const struct lu_env *env,
1484 struct cl_req *req, struct cl_page *page)
1486 struct cl_object *obj;
1487 struct cl_req_obj *rqo;
1491 page = cl_page_top(page);
1493 LINVRNT(cl_page_is_vmlocked(env, page));
1494 LASSERT(cfs_list_empty(&page->cp_flight));
1495 LASSERT(page->cp_req == NULL);
1497 cfs_list_add_tail(&page->cp_flight, &req->crq_pages);
1500 obj = cl_object_top(page->cp_obj);
1501 for (i = 0, rqo = req->crq_o; obj != rqo->ro_obj; ++i, ++rqo) {
1502 if (rqo->ro_obj == NULL) {
1505 rqo->ro_obj_ref = lu_object_ref_add(&obj->co_lu,
1510 LASSERT(i < req->crq_nrobjs);
1513 EXPORT_SYMBOL(cl_req_page_add);
1516 * Removes a page from a request.
1518 void cl_req_page_done(const struct lu_env *env, struct cl_page *page)
1520 struct cl_req *req = page->cp_req;
1523 page = cl_page_top(page);
1525 LINVRNT(cl_page_is_vmlocked(env, page));
1526 LASSERT(!cfs_list_empty(&page->cp_flight));
1527 LASSERT(req->crq_nrpages > 0);
1529 cfs_list_del_init(&page->cp_flight);
1531 page->cp_req = NULL;
1534 EXPORT_SYMBOL(cl_req_page_done);
1537 * Notifies layers that request is about to depart by calling
1538 * cl_req_operations::cro_prep() top-to-bottom.
1540 int cl_req_prep(const struct lu_env *env, struct cl_req *req)
1544 const struct cl_req_slice *slice;
1548 * Check that the caller of cl_req_alloc() didn't lie about the number
1551 for (i = 0; i < req->crq_nrobjs; ++i)
1552 LASSERT(req->crq_o[i].ro_obj != NULL);
1555 cfs_list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
1556 if (slice->crs_ops->cro_prep != NULL) {
1557 result = slice->crs_ops->cro_prep(env, slice);
1564 EXPORT_SYMBOL(cl_req_prep);
1567 * Fills in attributes that are passed to server together with transfer. Only
1568 * attributes from \a flags may be touched. This can be called multiple times
1569 * for the same request.
1571 void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,
1572 struct cl_req_attr *attr, obd_valid flags)
1574 const struct cl_req_slice *slice;
1575 struct cl_page *page;
1578 LASSERT(!cfs_list_empty(&req->crq_pages));
1581 /* Take any page to use as a model. */
1582 page = cfs_list_entry(req->crq_pages.next, struct cl_page, cp_flight);
1584 for (i = 0; i < req->crq_nrobjs; ++i) {
1585 cfs_list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
1586 const struct cl_page_slice *scan;
1587 const struct cl_object *obj;
1589 scan = cl_page_at(page,
1590 slice->crs_dev->cd_lu_dev.ld_type);
1591 LASSERT(scan != NULL);
1592 obj = scan->cpl_obj;
1593 if (slice->crs_ops->cro_attr_set != NULL)
1594 slice->crs_ops->cro_attr_set(env, slice, obj,
1600 EXPORT_SYMBOL(cl_req_attr_set);
1602 /* XXX complete(), init_completion(), and wait_for_completion(), until they are
1603 * implemented in libcfs. */
1605 # include <linux/sched.h>
1606 #else /* __KERNEL__ */
1607 # include <liblustre.h>
1611 * Initialize synchronous io wait anchor, for transfer of \a nrpages pages.
1613 void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages)
1616 cfs_waitq_init(&anchor->csi_waitq);
1617 cfs_atomic_set(&anchor->csi_sync_nr, nrpages);
1618 anchor->csi_sync_rc = 0;
1621 EXPORT_SYMBOL(cl_sync_io_init);
1624 * Wait until all transfer completes. Transfer completion routine has to call
1625 * cl_sync_io_note() for every page.
1627 int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io,
1628 struct cl_page_list *queue, struct cl_sync_io *anchor,
1631 struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout),
1636 LASSERT(timeout >= 0);
1638 rc = l_wait_event(anchor->csi_waitq,
1639 cfs_atomic_read(&anchor->csi_sync_nr) == 0,
1642 CERROR("SYNC IO failed with error: %d, try to cancel "
1643 "%d remaining pages\n",
1644 rc, cfs_atomic_read(&anchor->csi_sync_nr));
1646 (void)cl_io_cancel(env, io, queue);
1648 lwi = (struct l_wait_info) { 0 };
1649 (void)l_wait_event(anchor->csi_waitq,
1650 cfs_atomic_read(&anchor->csi_sync_nr) == 0,
1653 rc = anchor->csi_sync_rc;
1655 LASSERT(cfs_atomic_read(&anchor->csi_sync_nr) == 0);
1656 cl_page_list_assume(env, io, queue);
1657 POISON(anchor, 0x5a, sizeof *anchor);
1660 EXPORT_SYMBOL(cl_sync_io_wait);
1663 * Indicate that transfer of a single page completed.
1665 void cl_sync_io_note(struct cl_sync_io *anchor, int ioret)
1668 if (anchor->csi_sync_rc == 0 && ioret < 0)
1669 anchor->csi_sync_rc = ioret;
1671 * Synchronous IO done without releasing page lock (e.g., as a part of
1672 * ->{prepare,commit}_write(). Completion is used to signal the end of
1675 LASSERT(cfs_atomic_read(&anchor->csi_sync_nr) > 0);
1676 if (cfs_atomic_dec_and_test(&anchor->csi_sync_nr))
1677 cfs_waitq_broadcast(&anchor->csi_waitq);
1680 EXPORT_SYMBOL(cl_sync_io_note);