1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
32 * Copyright (c) 2011, 2012, Whamcloud, Inc.
35 * This file is part of Lustre, http://www.lustre.org/
36 * Lustre is a trademark of Sun Microsystems, Inc.
40 * Author: Nikita Danilov <nikita.danilov@sun.com>
43 #define DEBUG_SUBSYSTEM S_CLASS
45 # define EXPORT_SYMTAB
48 #include <obd_class.h>
49 #include <obd_support.h>
50 #include <lustre_fid.h>
51 #include <libcfs/list.h>
52 /* lu_time_global_{init,fini}() */
55 #include <cl_object.h>
56 #include "cl_internal.h"
58 /*****************************************************************************
64 #define cl_io_for_each(slice, io) \
65 cfs_list_for_each_entry((slice), &io->ci_layers, cis_linkage)
66 #define cl_io_for_each_reverse(slice, io) \
67 cfs_list_for_each_entry_reverse((slice), &io->ci_layers, cis_linkage)
69 static inline int cl_io_type_is_valid(enum cl_io_type type)
71 return CIT_READ <= type && type < CIT_OP_NR;
74 static inline int cl_io_is_loopable(const struct cl_io *io)
76 return cl_io_type_is_valid(io->ci_type) && io->ci_type != CIT_MISC;
80 * Returns true iff there is an IO ongoing in the given environment.
82 int cl_io_is_going(const struct lu_env *env)
84 return cl_env_info(env)->clt_current_io != NULL;
86 EXPORT_SYMBOL(cl_io_is_going);
89 * cl_io invariant that holds at all times when exported cl_io_*() functions
90 * are entered and left.
92 static int cl_io_invariant(const struct cl_io *io)
99 * io can own pages only when it is ongoing. Sub-io might
100 * still be in CIS_LOCKED state when top-io is in
103 ergo(io->ci_owned_nr > 0, io->ci_state == CIS_IO_GOING ||
104 (io->ci_state == CIS_LOCKED && up != NULL));
108 * Finalize \a io, by calling cl_io_operations::cio_fini() bottom-to-top.
110 void cl_io_fini(const struct lu_env *env, struct cl_io *io)
112 struct cl_io_slice *slice;
113 struct cl_thread_info *info;
115 LINVRNT(cl_io_type_is_valid(io->ci_type));
116 LINVRNT(cl_io_invariant(io));
119 while (!cfs_list_empty(&io->ci_layers)) {
120 slice = container_of(io->ci_layers.next, struct cl_io_slice,
122 cfs_list_del_init(&slice->cis_linkage);
123 if (slice->cis_iop->op[io->ci_type].cio_fini != NULL)
124 slice->cis_iop->op[io->ci_type].cio_fini(env, slice);
126 * Invalidate slice to catch use after free. This assumes that
127 * slices are allocated within session and can be touched
128 * after ->cio_fini() returns.
130 slice->cis_io = NULL;
132 io->ci_state = CIS_FINI;
133 info = cl_env_info(env);
134 if (info->clt_current_io == io)
135 info->clt_current_io = NULL;
138 EXPORT_SYMBOL(cl_io_fini);
140 static int cl_io_init0(const struct lu_env *env, struct cl_io *io,
141 enum cl_io_type iot, struct cl_object *obj)
143 struct cl_object *scan;
146 LINVRNT(io->ci_state == CIS_ZERO || io->ci_state == CIS_FINI);
147 LINVRNT(cl_io_type_is_valid(iot));
148 LINVRNT(cl_io_invariant(io));
152 CFS_INIT_LIST_HEAD(&io->ci_lockset.cls_todo);
153 CFS_INIT_LIST_HEAD(&io->ci_lockset.cls_curr);
154 CFS_INIT_LIST_HEAD(&io->ci_lockset.cls_done);
155 CFS_INIT_LIST_HEAD(&io->ci_layers);
158 cl_object_for_each(scan, obj) {
159 if (scan->co_ops->coo_io_init != NULL) {
160 result = scan->co_ops->coo_io_init(env, scan, io);
166 io->ci_state = CIS_INIT;
171 * Initialize sub-io, by calling cl_io_operations::cio_init() top-to-bottom.
173 * \pre obj != cl_object_top(obj)
175 int cl_io_sub_init(const struct lu_env *env, struct cl_io *io,
176 enum cl_io_type iot, struct cl_object *obj)
178 struct cl_thread_info *info = cl_env_info(env);
180 LASSERT(obj != cl_object_top(obj));
181 if (info->clt_current_io == NULL)
182 info->clt_current_io = io;
183 return cl_io_init0(env, io, iot, obj);
185 EXPORT_SYMBOL(cl_io_sub_init);
188 * Initialize \a io, by calling cl_io_operations::cio_init() top-to-bottom.
190 * Caller has to call cl_io_fini() after a call to cl_io_init(), no matter
191 * what the latter returned.
193 * \pre obj == cl_object_top(obj)
194 * \pre cl_io_type_is_valid(iot)
195 * \post cl_io_type_is_valid(io->ci_type) && io->ci_type == iot
197 int cl_io_init(const struct lu_env *env, struct cl_io *io,
198 enum cl_io_type iot, struct cl_object *obj)
200 struct cl_thread_info *info = cl_env_info(env);
202 LASSERT(obj == cl_object_top(obj));
203 LASSERT(info->clt_current_io == NULL);
205 info->clt_current_io = io;
206 return cl_io_init0(env, io, iot, obj);
208 EXPORT_SYMBOL(cl_io_init);
211 * Initialize read or write io.
213 * \pre iot == CIT_READ || iot == CIT_WRITE
215 int cl_io_rw_init(const struct lu_env *env, struct cl_io *io,
216 enum cl_io_type iot, loff_t pos, size_t count)
218 LINVRNT(iot == CIT_READ || iot == CIT_WRITE);
219 LINVRNT(io->ci_obj != NULL);
222 LU_OBJECT_HEADER(D_VFSTRACE, env, &io->ci_obj->co_lu,
223 "io range: %u ["LPU64", "LPU64") %u %u\n",
224 iot, (__u64)pos, (__u64)pos + count,
225 io->u.ci_rw.crw_nonblock, io->u.ci_wr.wr_append);
226 io->u.ci_rw.crw_pos = pos;
227 io->u.ci_rw.crw_count = count;
228 RETURN(cl_io_init(env, io, iot, io->ci_obj));
230 EXPORT_SYMBOL(cl_io_rw_init);
232 static inline const struct lu_fid *
233 cl_lock_descr_fid(const struct cl_lock_descr *descr)
235 return lu_object_fid(&descr->cld_obj->co_lu);
238 static int cl_lock_descr_sort(const struct cl_lock_descr *d0,
239 const struct cl_lock_descr *d1)
241 return lu_fid_cmp(cl_lock_descr_fid(d0), cl_lock_descr_fid(d1)) ?:
242 __diff_normalize(d0->cld_start, d1->cld_start);
245 static int cl_lock_descr_cmp(const struct cl_lock_descr *d0,
246 const struct cl_lock_descr *d1)
250 ret = lu_fid_cmp(cl_lock_descr_fid(d0), cl_lock_descr_fid(d1));
253 if (d0->cld_end < d1->cld_start)
255 if (d0->cld_start > d0->cld_end)
260 static void cl_lock_descr_merge(struct cl_lock_descr *d0,
261 const struct cl_lock_descr *d1)
263 d0->cld_start = min(d0->cld_start, d1->cld_start);
264 d0->cld_end = max(d0->cld_end, d1->cld_end);
266 if (d1->cld_mode == CLM_WRITE && d0->cld_mode != CLM_WRITE)
267 d0->cld_mode = CLM_WRITE;
269 if (d1->cld_mode == CLM_GROUP && d0->cld_mode != CLM_GROUP)
270 d0->cld_mode = CLM_GROUP;
274 * Sort locks in lexicographical order of their (fid, start-offset) pairs.
276 static void cl_io_locks_sort(struct cl_io *io)
281 /* hidden treasure: bubble sort for now. */
283 struct cl_io_lock_link *curr;
284 struct cl_io_lock_link *prev;
285 struct cl_io_lock_link *temp;
290 cfs_list_for_each_entry_safe(curr, temp,
291 &io->ci_lockset.cls_todo,
294 switch (cl_lock_descr_sort(&prev->cill_descr,
295 &curr->cill_descr)) {
298 * IMPOSSIBLE: Identical locks are
305 cfs_list_move_tail(&curr->cill_linkage,
306 &prev->cill_linkage);
308 continue; /* don't change prev: it's
309 * still "previous" */
310 case -1: /* already in order */
321 * Check whether \a queue contains locks matching \a need.
323 * \retval +ve there is a matching lock in the \a queue
324 * \retval 0 there are no matching locks in the \a queue
326 int cl_queue_match(const cfs_list_t *queue,
327 const struct cl_lock_descr *need)
329 struct cl_io_lock_link *scan;
332 cfs_list_for_each_entry(scan, queue, cill_linkage) {
333 if (cl_lock_descr_match(&scan->cill_descr, need))
338 EXPORT_SYMBOL(cl_queue_match);
340 static int cl_queue_merge(const cfs_list_t *queue,
341 const struct cl_lock_descr *need)
343 struct cl_io_lock_link *scan;
346 cfs_list_for_each_entry(scan, queue, cill_linkage) {
347 if (cl_lock_descr_cmp(&scan->cill_descr, need))
349 cl_lock_descr_merge(&scan->cill_descr, need);
350 CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
351 scan->cill_descr.cld_mode, scan->cill_descr.cld_start,
352 scan->cill_descr.cld_end);
359 static int cl_lockset_match(const struct cl_lockset *set,
360 const struct cl_lock_descr *need)
362 return cl_queue_match(&set->cls_curr, need) ||
363 cl_queue_match(&set->cls_done, need);
366 static int cl_lockset_merge(const struct cl_lockset *set,
367 const struct cl_lock_descr *need)
369 return cl_queue_merge(&set->cls_todo, need) ||
370 cl_lockset_match(set, need);
373 static int cl_lockset_lock_one(const struct lu_env *env,
374 struct cl_io *io, struct cl_lockset *set,
375 struct cl_io_lock_link *link)
377 struct cl_lock *lock;
382 if (io->ci_lockreq == CILR_PEEK) {
383 lock = cl_lock_peek(env, io, &link->cill_descr, "io", io);
385 lock = ERR_PTR(-ENODATA);
387 lock = cl_lock_request(env, io, &link->cill_descr, "io", io);
390 link->cill_lock = lock;
391 cfs_list_move(&link->cill_linkage, &set->cls_curr);
392 if (!(link->cill_descr.cld_enq_flags & CEF_ASYNC)) {
393 result = cl_wait(env, lock);
395 cfs_list_move(&link->cill_linkage,
400 result = PTR_ERR(lock);
404 static void cl_lock_link_fini(const struct lu_env *env, struct cl_io *io,
405 struct cl_io_lock_link *link)
407 struct cl_lock *lock = link->cill_lock;
410 cfs_list_del_init(&link->cill_linkage);
412 cl_lock_release(env, lock, "io", io);
413 link->cill_lock = NULL;
415 if (link->cill_fini != NULL)
416 link->cill_fini(env, link);
420 static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io,
421 struct cl_lockset *set)
423 struct cl_io_lock_link *link;
424 struct cl_io_lock_link *temp;
425 struct cl_lock *lock;
430 cfs_list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
431 if (!cl_lockset_match(set, &link->cill_descr)) {
432 /* XXX some locking to guarantee that locks aren't
433 * expanded in between. */
434 result = cl_lockset_lock_one(env, io, set, link);
438 cl_lock_link_fini(env, io, link);
441 cfs_list_for_each_entry_safe(link, temp,
442 &set->cls_curr, cill_linkage) {
443 lock = link->cill_lock;
444 result = cl_wait(env, lock);
446 cfs_list_move(&link->cill_linkage,
456 * Takes locks necessary for the current iteration of io.
458 * Calls cl_io_operations::cio_lock() top-to-bottom to collect locks required
459 * by layers for the current iteration. Then sort locks (to avoid dead-locks),
462 int cl_io_lock(const struct lu_env *env, struct cl_io *io)
464 const struct cl_io_slice *scan;
467 LINVRNT(cl_io_is_loopable(io));
468 LINVRNT(io->ci_state == CIS_IT_STARTED);
469 LINVRNT(cl_io_invariant(io));
472 cl_io_for_each(scan, io) {
473 if (scan->cis_iop->op[io->ci_type].cio_lock == NULL)
475 result = scan->cis_iop->op[io->ci_type].cio_lock(env, scan);
480 cl_io_locks_sort(io);
481 result = cl_lockset_lock(env, io, &io->ci_lockset);
484 cl_io_unlock(env, io);
486 io->ci_state = CIS_LOCKED;
489 EXPORT_SYMBOL(cl_io_lock);
492 * Release locks takes by io.
494 void cl_io_unlock(const struct lu_env *env, struct cl_io *io)
496 struct cl_lockset *set;
497 struct cl_io_lock_link *link;
498 struct cl_io_lock_link *temp;
499 const struct cl_io_slice *scan;
501 LASSERT(cl_io_is_loopable(io));
502 LASSERT(CIS_IT_STARTED <= io->ci_state && io->ci_state < CIS_UNLOCKED);
503 LINVRNT(cl_io_invariant(io));
506 set = &io->ci_lockset;
508 cfs_list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage)
509 cl_lock_link_fini(env, io, link);
511 cfs_list_for_each_entry_safe(link, temp, &set->cls_curr, cill_linkage)
512 cl_lock_link_fini(env, io, link);
514 cfs_list_for_each_entry_safe(link, temp, &set->cls_done, cill_linkage) {
515 cl_unuse(env, link->cill_lock);
516 cl_lock_link_fini(env, io, link);
518 cl_io_for_each_reverse(scan, io) {
519 if (scan->cis_iop->op[io->ci_type].cio_unlock != NULL)
520 scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
522 io->ci_state = CIS_UNLOCKED;
523 LASSERT(!cl_env_info(env)->clt_counters[CNL_TOP].ctc_nr_locks_acquired);
526 EXPORT_SYMBOL(cl_io_unlock);
529 * Prepares next iteration of io.
531 * Calls cl_io_operations::cio_iter_init() top-to-bottom. This exists to give
532 * layers a chance to modify io parameters, e.g., so that lov can restrict io
533 * to a single stripe.
535 int cl_io_iter_init(const struct lu_env *env, struct cl_io *io)
537 const struct cl_io_slice *scan;
540 LINVRNT(cl_io_is_loopable(io));
541 LINVRNT(io->ci_state == CIS_INIT || io->ci_state == CIS_IT_ENDED);
542 LINVRNT(cl_io_invariant(io));
546 cl_io_for_each(scan, io) {
547 if (scan->cis_iop->op[io->ci_type].cio_iter_init == NULL)
549 result = scan->cis_iop->op[io->ci_type].cio_iter_init(env,
555 io->ci_state = CIS_IT_STARTED;
558 EXPORT_SYMBOL(cl_io_iter_init);
561 * Finalizes io iteration.
563 * Calls cl_io_operations::cio_iter_fini() bottom-to-top.
565 void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io)
567 const struct cl_io_slice *scan;
569 LINVRNT(cl_io_is_loopable(io));
570 LINVRNT(io->ci_state == CIS_UNLOCKED);
571 LINVRNT(cl_io_invariant(io));
574 cl_io_for_each_reverse(scan, io) {
575 if (scan->cis_iop->op[io->ci_type].cio_iter_fini != NULL)
576 scan->cis_iop->op[io->ci_type].cio_iter_fini(env, scan);
578 io->ci_state = CIS_IT_ENDED;
581 EXPORT_SYMBOL(cl_io_iter_fini);
584 * Records that read or write io progressed \a nob bytes forward.
586 void cl_io_rw_advance(const struct lu_env *env, struct cl_io *io, size_t nob)
588 const struct cl_io_slice *scan;
590 LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
592 LINVRNT(cl_io_is_loopable(io));
593 LINVRNT(cl_io_invariant(io));
597 io->u.ci_rw.crw_pos += nob;
598 io->u.ci_rw.crw_count -= nob;
600 /* layers have to be notified. */
601 cl_io_for_each_reverse(scan, io) {
602 if (scan->cis_iop->op[io->ci_type].cio_advance != NULL)
603 scan->cis_iop->op[io->ci_type].cio_advance(env, scan,
608 EXPORT_SYMBOL(cl_io_rw_advance);
611 * Adds a lock to a lockset.
613 int cl_io_lock_add(const struct lu_env *env, struct cl_io *io,
614 struct cl_io_lock_link *link)
619 if (cl_lockset_merge(&io->ci_lockset, &link->cill_descr))
622 cfs_list_add(&link->cill_linkage, &io->ci_lockset.cls_todo);
627 EXPORT_SYMBOL(cl_io_lock_add);
629 static void cl_free_io_lock_link(const struct lu_env *env,
630 struct cl_io_lock_link *link)
636 * Allocates new lock link, and uses it to add a lock to a lockset.
638 int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
639 struct cl_lock_descr *descr)
641 struct cl_io_lock_link *link;
647 link->cill_descr = *descr;
648 link->cill_fini = cl_free_io_lock_link;
649 result = cl_io_lock_add(env, io, link);
650 if (result) /* lock match */
651 link->cill_fini(env, link);
657 EXPORT_SYMBOL(cl_io_lock_alloc_add);
660 * Starts io by calling cl_io_operations::cio_start() top-to-bottom.
662 int cl_io_start(const struct lu_env *env, struct cl_io *io)
664 const struct cl_io_slice *scan;
667 LINVRNT(cl_io_is_loopable(io));
668 LINVRNT(io->ci_state == CIS_LOCKED);
669 LINVRNT(cl_io_invariant(io));
672 io->ci_state = CIS_IO_GOING;
673 cl_io_for_each(scan, io) {
674 if (scan->cis_iop->op[io->ci_type].cio_start == NULL)
676 result = scan->cis_iop->op[io->ci_type].cio_start(env, scan);
684 EXPORT_SYMBOL(cl_io_start);
687 * Wait until current io iteration is finished by calling
688 * cl_io_operations::cio_end() bottom-to-top.
690 void cl_io_end(const struct lu_env *env, struct cl_io *io)
692 const struct cl_io_slice *scan;
694 LINVRNT(cl_io_is_loopable(io));
695 LINVRNT(io->ci_state == CIS_IO_GOING);
696 LINVRNT(cl_io_invariant(io));
699 cl_io_for_each_reverse(scan, io) {
700 if (scan->cis_iop->op[io->ci_type].cio_end != NULL)
701 scan->cis_iop->op[io->ci_type].cio_end(env, scan);
702 /* TODO: error handling. */
704 io->ci_state = CIS_IO_FINISHED;
707 EXPORT_SYMBOL(cl_io_end);
709 static const struct cl_page_slice *
710 cl_io_slice_page(const struct cl_io_slice *ios, struct cl_page *page)
712 const struct cl_page_slice *slice;
714 slice = cl_page_at(page, ios->cis_obj->co_lu.lo_dev->ld_type);
715 LINVRNT(slice != NULL);
720 * True iff \a page is within \a io range.
722 static int cl_page_in_io(const struct cl_page *page, const struct cl_io *io)
729 idx = page->cp_index;
730 switch (io->ci_type) {
734 * check that [start, end) and [pos, pos + count) extents
737 if (!cl_io_is_append(io)) {
738 const struct cl_io_rw_common *crw = &(io->u.ci_rw);
739 start = cl_offset(page->cp_obj, idx);
740 end = cl_offset(page->cp_obj, idx + 1);
741 result = crw->crw_pos < end &&
742 start < crw->crw_pos + crw->crw_count;
746 result = io->u.ci_fault.ft_index == idx;
755 * Called by read io, when page has to be read from the server.
757 * \see cl_io_operations::cio_read_page()
759 int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
760 struct cl_page *page)
762 const struct cl_io_slice *scan;
763 struct cl_2queue *queue;
766 LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_FAULT);
767 LINVRNT(cl_page_is_owned(page, io));
768 LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
769 LINVRNT(cl_page_in_io(page, io));
770 LINVRNT(cl_io_invariant(io));
773 queue = &io->ci_queue;
775 cl_2queue_init(queue);
777 * ->cio_read_page() methods called in the loop below are supposed to
778 * never block waiting for network (the only subtle point is the
779 * creation of new pages for read-ahead that might result in cache
780 * shrinking, but currently only clean pages are shrunk and this
781 * requires no network io).
783 * Should this ever starts blocking, retry loop would be needed for
784 * "parallel io" (see CLO_REPEAT loops in cl_lock.c).
786 cl_io_for_each(scan, io) {
787 if (scan->cis_iop->cio_read_page != NULL) {
788 const struct cl_page_slice *slice;
790 slice = cl_io_slice_page(scan, page);
791 LINVRNT(slice != NULL);
792 result = scan->cis_iop->cio_read_page(env, scan, slice);
798 result = cl_io_submit_rw(env, io, CRT_READ, queue, CRP_NORMAL);
800 * Unlock unsent pages in case of error.
802 cl_page_list_disown(env, io, &queue->c2_qin);
803 cl_2queue_fini(env, queue);
806 EXPORT_SYMBOL(cl_io_read_page);
809 * Called by write io to prepare page to receive data from user buffer.
811 * \see cl_io_operations::cio_prepare_write()
813 int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io,
814 struct cl_page *page, unsigned from, unsigned to)
816 const struct cl_io_slice *scan;
819 LINVRNT(io->ci_type == CIT_WRITE);
820 LINVRNT(cl_page_is_owned(page, io));
821 LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
822 LINVRNT(cl_io_invariant(io));
823 LASSERT(cl_page_in_io(page, io));
826 cl_io_for_each_reverse(scan, io) {
827 if (scan->cis_iop->cio_prepare_write != NULL) {
828 const struct cl_page_slice *slice;
830 slice = cl_io_slice_page(scan, page);
831 result = scan->cis_iop->cio_prepare_write(env, scan,
840 EXPORT_SYMBOL(cl_io_prepare_write);
843 * Called by write io after user data were copied into a page.
845 * \see cl_io_operations::cio_commit_write()
847 int cl_io_commit_write(const struct lu_env *env, struct cl_io *io,
848 struct cl_page *page, unsigned from, unsigned to)
850 const struct cl_io_slice *scan;
853 LINVRNT(io->ci_type == CIT_WRITE);
854 LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
855 LINVRNT(cl_io_invariant(io));
857 * XXX Uh... not nice. Top level cl_io_commit_write() call (vvp->lov)
858 * already called cl_page_cache_add(), moving page into CPS_CACHED
859 * state. Better (and more general) way of dealing with such situation
862 LASSERT(cl_page_is_owned(page, io) || page->cp_parent != NULL);
863 LASSERT(cl_page_in_io(page, io));
866 cl_io_for_each(scan, io) {
867 if (scan->cis_iop->cio_commit_write != NULL) {
868 const struct cl_page_slice *slice;
870 slice = cl_io_slice_page(scan, page);
871 result = scan->cis_iop->cio_commit_write(env, scan,
878 LINVRNT(result <= 0);
881 EXPORT_SYMBOL(cl_io_commit_write);
884 * Submits a list of pages for immediate io.
886 * After the function gets returned, The submitted pages are moved to
887 * queue->c2_qout queue, and queue->c2_qin contain both the pages don't need
888 * to be submitted, and the pages are errant to submit.
890 * \returns 0 if at least one page was submitted, error code otherwise.
891 * \see cl_io_operations::cio_submit()
893 int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
894 enum cl_req_type crt, struct cl_2queue *queue,
895 enum cl_req_priority priority)
897 const struct cl_io_slice *scan;
900 LINVRNT(crt < ARRAY_SIZE(scan->cis_iop->req_op));
903 cl_io_for_each(scan, io) {
904 if (scan->cis_iop->req_op[crt].cio_submit == NULL)
906 result = scan->cis_iop->req_op[crt].cio_submit(env, scan, crt,
912 * If ->cio_submit() failed, no pages were sent.
914 LASSERT(ergo(result != 0, cfs_list_empty(&queue->c2_qout.pl_pages)));
917 EXPORT_SYMBOL(cl_io_submit_rw);
920 * Submit a sync_io and wait for the IO to be finished, or error happens.
921 * If \a timeout is zero, it means to wait for the IO unconditionally.
923 int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
924 enum cl_req_type iot, struct cl_2queue *queue,
925 enum cl_req_priority prio, long timeout)
927 struct cl_sync_io *anchor = &cl_env_info(env)->clt_anchor;
931 LASSERT(prio == CRP_NORMAL || prio == CRP_CANCEL);
933 cl_page_list_for_each(pg, &queue->c2_qin) {
934 LASSERT(pg->cp_sync_io == NULL);
935 pg->cp_sync_io = anchor;
938 cl_sync_io_init(anchor, queue->c2_qin.pl_nr);
939 rc = cl_io_submit_rw(env, io, iot, queue, prio);
942 * If some pages weren't sent for any reason (e.g.,
943 * read found up-to-date pages in the cache, or write found
944 * clean pages), count them as completed to avoid infinite
947 cl_page_list_for_each(pg, &queue->c2_qin) {
948 pg->cp_sync_io = NULL;
949 cl_sync_io_note(anchor, +1);
952 /* wait for the IO to be finished. */
953 rc = cl_sync_io_wait(env, io, &queue->c2_qout,
956 LASSERT(cfs_list_empty(&queue->c2_qout.pl_pages));
957 cl_page_list_for_each(pg, &queue->c2_qin)
958 pg->cp_sync_io = NULL;
962 EXPORT_SYMBOL(cl_io_submit_sync);
965 * Cancel an IO which has been submitted by cl_io_submit_rw.
967 int cl_io_cancel(const struct lu_env *env, struct cl_io *io,
968 struct cl_page_list *queue)
970 struct cl_page *page;
973 CERROR("Canceling ongoing page trasmission\n");
974 cl_page_list_for_each(page, queue) {
977 LINVRNT(cl_page_in_io(page, io));
978 rc = cl_page_cancel(env, page);
979 result = result ?: rc;
983 EXPORT_SYMBOL(cl_io_cancel);
988 * Pumps io through iterations calling
990 * - cl_io_iter_init()
1000 * - cl_io_iter_fini()
1002 * repeatedly until there is no more io to do.
1004 int cl_io_loop(const struct lu_env *env, struct cl_io *io)
1008 LINVRNT(cl_io_is_loopable(io));
1014 io->ci_continue = 0;
1015 result = cl_io_iter_init(env, io);
1018 result = cl_io_lock(env, io);
1021 * Notify layers that locks has been taken,
1022 * and do actual i/o.
1024 * - llite: kms, short read;
1025 * - llite: generic_file_read();
1027 result = cl_io_start(env, io);
1029 * Send any remaining pending
1032 * - llite: ll_rw_stats_tally.
1035 cl_io_unlock(env, io);
1036 cl_io_rw_advance(env, io, io->ci_nob - nob);
1039 cl_io_iter_fini(env, io);
1040 } while (result == 0 && io->ci_continue);
1041 RETURN(result < 0 ? result : 0);
1043 EXPORT_SYMBOL(cl_io_loop);
1046 * Adds io slice to the cl_io.
1048 * This is called by cl_object_operations::coo_io_init() methods to add a
1049 * per-layer state to the io. New state is added at the end of
1050 * cl_io::ci_layers list, that is, it is at the bottom of the stack.
1052 * \see cl_lock_slice_add(), cl_req_slice_add(), cl_page_slice_add()
1054 void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
1055 struct cl_object *obj,
1056 const struct cl_io_operations *ops)
1058 cfs_list_t *linkage = &slice->cis_linkage;
1060 LASSERT((linkage->prev == NULL && linkage->next == NULL) ||
1061 cfs_list_empty(linkage));
1064 cfs_list_add_tail(linkage, &io->ci_layers);
1066 slice->cis_obj = obj;
1067 slice->cis_iop = ops;
1070 EXPORT_SYMBOL(cl_io_slice_add);
1074 * Initializes page list.
1076 void cl_page_list_init(struct cl_page_list *plist)
1080 CFS_INIT_LIST_HEAD(&plist->pl_pages);
1081 plist->pl_owner = cfs_current();
1084 EXPORT_SYMBOL(cl_page_list_init);
1087 * Adds a page to a page list.
1089 void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page)
1092 /* it would be better to check that page is owned by "current" io, but
1093 * it is not passed here. */
1094 LASSERT(page->cp_owner != NULL);
1095 LINVRNT(plist->pl_owner == cfs_current());
1098 cfs_mutex_lock(&page->cp_mutex);
1100 LASSERT(cfs_list_empty(&page->cp_batch));
1101 cfs_list_add_tail(&page->cp_batch, &plist->pl_pages);
1103 page->cp_queue_ref = lu_ref_add(&page->cp_reference, "queue", plist);
1107 EXPORT_SYMBOL(cl_page_list_add);
1110 * Removes a page from a page list.
1112 void cl_page_list_del(const struct lu_env *env,
1113 struct cl_page_list *plist, struct cl_page *page)
1115 LASSERT(plist->pl_nr > 0);
1116 LINVRNT(plist->pl_owner == cfs_current());
1119 cfs_list_del_init(&page->cp_batch);
1121 cfs_mutex_unlock(&page->cp_mutex);
1124 lu_ref_del_at(&page->cp_reference, page->cp_queue_ref, "queue", plist);
1125 cl_page_put(env, page);
1128 EXPORT_SYMBOL(cl_page_list_del);
1131 * Moves a page from one page list to another.
1133 void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
1134 struct cl_page *page)
1136 LASSERT(src->pl_nr > 0);
1137 LINVRNT(dst->pl_owner == cfs_current());
1138 LINVRNT(src->pl_owner == cfs_current());
1141 cfs_list_move_tail(&page->cp_batch, &dst->pl_pages);
1144 lu_ref_set_at(&page->cp_reference,
1145 page->cp_queue_ref, "queue", src, dst);
1148 EXPORT_SYMBOL(cl_page_list_move);
1151 * splice the cl_page_list, just as list head does
1153 void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head)
1155 struct cl_page *page;
1156 struct cl_page *tmp;
1158 LINVRNT(list->pl_owner == cfs_current());
1159 LINVRNT(head->pl_owner == cfs_current());
1162 cl_page_list_for_each_safe(page, tmp, list)
1163 cl_page_list_move(head, list, page);
1166 EXPORT_SYMBOL(cl_page_list_splice);
1168 void cl_page_disown0(const struct lu_env *env,
1169 struct cl_io *io, struct cl_page *pg);
1172 * Disowns pages in a queue.
1174 void cl_page_list_disown(const struct lu_env *env,
1175 struct cl_io *io, struct cl_page_list *plist)
1177 struct cl_page *page;
1178 struct cl_page *temp;
1180 LINVRNT(plist->pl_owner == cfs_current());
1183 cl_page_list_for_each_safe(page, temp, plist) {
1184 LASSERT(plist->pl_nr > 0);
1186 cfs_list_del_init(&page->cp_batch);
1188 cfs_mutex_unlock(&page->cp_mutex);
1192 * cl_page_disown0 rather than usual cl_page_disown() is used,
1193 * because pages are possibly in CPS_FREEING state already due
1194 * to the call to cl_page_list_discard().
1197 * XXX cl_page_disown0() will fail if page is not locked.
1199 cl_page_disown0(env, io, page);
1200 lu_ref_del(&page->cp_reference, "queue", plist);
1201 cl_page_put(env, page);
1205 EXPORT_SYMBOL(cl_page_list_disown);
1208 * Releases pages from queue.
1210 void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist)
1212 struct cl_page *page;
1213 struct cl_page *temp;
1215 LINVRNT(plist->pl_owner == cfs_current());
1218 cl_page_list_for_each_safe(page, temp, plist)
1219 cl_page_list_del(env, plist, page);
1220 LASSERT(plist->pl_nr == 0);
1223 EXPORT_SYMBOL(cl_page_list_fini);
1226 * Owns all pages in a queue.
1228 int cl_page_list_own(const struct lu_env *env,
1229 struct cl_io *io, struct cl_page_list *plist)
1231 struct cl_page *page;
1232 struct cl_page *temp;
1236 LINVRNT(plist->pl_owner == cfs_current());
1240 cl_page_list_for_each_safe(page, temp, plist) {
1241 LASSERT(index <= page->cp_index);
1242 index = page->cp_index;
1243 if (cl_page_own(env, io, page) == 0)
1244 result = result ?: page->cp_error;
1246 cl_page_list_del(env, plist, page);
1250 EXPORT_SYMBOL(cl_page_list_own);
1253 * Assumes all pages in a queue.
1255 void cl_page_list_assume(const struct lu_env *env,
1256 struct cl_io *io, struct cl_page_list *plist)
1258 struct cl_page *page;
1260 LINVRNT(plist->pl_owner == cfs_current());
1262 cl_page_list_for_each(page, plist)
1263 cl_page_assume(env, io, page);
1265 EXPORT_SYMBOL(cl_page_list_assume);
1268 * Discards all pages in a queue.
1270 void cl_page_list_discard(const struct lu_env *env, struct cl_io *io,
1271 struct cl_page_list *plist)
1273 struct cl_page *page;
1275 LINVRNT(plist->pl_owner == cfs_current());
1277 cl_page_list_for_each(page, plist)
1278 cl_page_discard(env, io, page);
1281 EXPORT_SYMBOL(cl_page_list_discard);
1284 * Unmaps all pages in a queue from user virtual memory.
1286 int cl_page_list_unmap(const struct lu_env *env, struct cl_io *io,
1287 struct cl_page_list *plist)
1289 struct cl_page *page;
1292 LINVRNT(plist->pl_owner == cfs_current());
1295 cl_page_list_for_each(page, plist) {
1296 result = cl_page_unmap(env, io, page);
1302 EXPORT_SYMBOL(cl_page_list_unmap);
1305 * Initialize dual page queue.
1307 void cl_2queue_init(struct cl_2queue *queue)
1310 cl_page_list_init(&queue->c2_qin);
1311 cl_page_list_init(&queue->c2_qout);
1314 EXPORT_SYMBOL(cl_2queue_init);
1317 * Add a page to the incoming page list of 2-queue.
1319 void cl_2queue_add(struct cl_2queue *queue, struct cl_page *page)
1322 cl_page_list_add(&queue->c2_qin, page);
1325 EXPORT_SYMBOL(cl_2queue_add);
1328 * Disown pages in both lists of a 2-queue.
1330 void cl_2queue_disown(const struct lu_env *env,
1331 struct cl_io *io, struct cl_2queue *queue)
1334 cl_page_list_disown(env, io, &queue->c2_qin);
1335 cl_page_list_disown(env, io, &queue->c2_qout);
1338 EXPORT_SYMBOL(cl_2queue_disown);
1341 * Discard (truncate) pages in both lists of a 2-queue.
1343 void cl_2queue_discard(const struct lu_env *env,
1344 struct cl_io *io, struct cl_2queue *queue)
1347 cl_page_list_discard(env, io, &queue->c2_qin);
1348 cl_page_list_discard(env, io, &queue->c2_qout);
1351 EXPORT_SYMBOL(cl_2queue_discard);
1354 * Assume to own the pages in cl_2queue
1356 void cl_2queue_assume(const struct lu_env *env,
1357 struct cl_io *io, struct cl_2queue *queue)
1359 cl_page_list_assume(env, io, &queue->c2_qin);
1360 cl_page_list_assume(env, io, &queue->c2_qout);
1362 EXPORT_SYMBOL(cl_2queue_assume);
1365 * Finalize both page lists of a 2-queue.
1367 void cl_2queue_fini(const struct lu_env *env, struct cl_2queue *queue)
1370 cl_page_list_fini(env, &queue->c2_qout);
1371 cl_page_list_fini(env, &queue->c2_qin);
1374 EXPORT_SYMBOL(cl_2queue_fini);
1377 * Initialize a 2-queue to contain \a page in its incoming page list.
1379 void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page)
1382 cl_2queue_init(queue);
1383 cl_2queue_add(queue, page);
1386 EXPORT_SYMBOL(cl_2queue_init_page);
1389 * Returns top-level io.
1391 * \see cl_object_top(), cl_page_top().
1393 struct cl_io *cl_io_top(struct cl_io *io)
1396 while (io->ci_parent != NULL)
1400 EXPORT_SYMBOL(cl_io_top);
1403 * Prints human readable representation of \a io to the \a f.
1405 void cl_io_print(const struct lu_env *env, void *cookie,
1406 lu_printer_t printer, const struct cl_io *io)
1411 * Adds request slice to the compound request.
1413 * This is called by cl_device_operations::cdo_req_init() methods to add a
1414 * per-layer state to the request. New state is added at the end of
1415 * cl_req::crq_layers list, that is, it is at the bottom of the stack.
1417 * \see cl_lock_slice_add(), cl_page_slice_add(), cl_io_slice_add()
1419 void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice,
1420 struct cl_device *dev,
1421 const struct cl_req_operations *ops)
1424 cfs_list_add_tail(&slice->crs_linkage, &req->crq_layers);
1425 slice->crs_dev = dev;
1426 slice->crs_ops = ops;
1427 slice->crs_req = req;
1430 EXPORT_SYMBOL(cl_req_slice_add);
1432 static void cl_req_free(const struct lu_env *env, struct cl_req *req)
1436 LASSERT(cfs_list_empty(&req->crq_pages));
1437 LASSERT(req->crq_nrpages == 0);
1438 LINVRNT(cfs_list_empty(&req->crq_layers));
1439 LINVRNT(equi(req->crq_nrobjs > 0, req->crq_o != NULL));
1442 if (req->crq_o != NULL) {
1443 for (i = 0; i < req->crq_nrobjs; ++i) {
1444 struct cl_object *obj = req->crq_o[i].ro_obj;
1446 lu_object_ref_del_at(&obj->co_lu,
1447 req->crq_o[i].ro_obj_ref,
1449 cl_object_put(env, obj);
1452 OBD_FREE(req->crq_o, req->crq_nrobjs * sizeof req->crq_o[0]);
1458 static int cl_req_init(const struct lu_env *env, struct cl_req *req,
1459 struct cl_page *page)
1461 struct cl_device *dev;
1462 struct cl_page_slice *slice;
1467 page = cl_page_top(page);
1469 cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
1470 dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev);
1471 if (dev->cd_ops->cdo_req_init != NULL) {
1472 result = dev->cd_ops->cdo_req_init(env,
1478 page = page->cp_child;
1479 } while (page != NULL && result == 0);
1484 * Invokes per-request transfer completion call-backs
1485 * (cl_req_operations::cro_completion()) bottom-to-top.
1487 void cl_req_completion(const struct lu_env *env, struct cl_req *req, int rc)
1489 struct cl_req_slice *slice;
1493 * for the lack of list_for_each_entry_reverse_safe()...
1495 while (!cfs_list_empty(&req->crq_layers)) {
1496 slice = cfs_list_entry(req->crq_layers.prev,
1497 struct cl_req_slice, crs_linkage);
1498 cfs_list_del_init(&slice->crs_linkage);
1499 if (slice->crs_ops->cro_completion != NULL)
1500 slice->crs_ops->cro_completion(env, slice, rc);
1502 cl_req_free(env, req);
1505 EXPORT_SYMBOL(cl_req_completion);
1508 * Allocates new transfer request.
1510 struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
1511 enum cl_req_type crt, int nr_objects)
1515 LINVRNT(nr_objects > 0);
1522 OBD_ALLOC(req->crq_o, nr_objects * sizeof req->crq_o[0]);
1523 if (req->crq_o != NULL) {
1524 req->crq_nrobjs = nr_objects;
1525 req->crq_type = crt;
1526 CFS_INIT_LIST_HEAD(&req->crq_pages);
1527 CFS_INIT_LIST_HEAD(&req->crq_layers);
1528 result = cl_req_init(env, req, page);
1532 cl_req_completion(env, req, result);
1533 req = ERR_PTR(result);
1536 req = ERR_PTR(-ENOMEM);
1539 EXPORT_SYMBOL(cl_req_alloc);
1542 * Adds a page to a request.
1544 void cl_req_page_add(const struct lu_env *env,
1545 struct cl_req *req, struct cl_page *page)
1547 struct cl_object *obj;
1548 struct cl_req_obj *rqo;
1552 page = cl_page_top(page);
1554 LASSERT(cfs_list_empty(&page->cp_flight));
1555 LASSERT(page->cp_req == NULL);
1557 CL_PAGE_DEBUG(D_PAGE, env, page, "req %p, %d, %u\n",
1558 req, req->crq_type, req->crq_nrpages);
1560 cfs_list_add_tail(&page->cp_flight, &req->crq_pages);
1563 obj = cl_object_top(page->cp_obj);
1564 for (i = 0, rqo = req->crq_o; obj != rqo->ro_obj; ++i, ++rqo) {
1565 if (rqo->ro_obj == NULL) {
1568 rqo->ro_obj_ref = lu_object_ref_add(&obj->co_lu,
1573 LASSERT(i < req->crq_nrobjs);
1576 EXPORT_SYMBOL(cl_req_page_add);
1579 * Removes a page from a request.
1581 void cl_req_page_done(const struct lu_env *env, struct cl_page *page)
1583 struct cl_req *req = page->cp_req;
1586 page = cl_page_top(page);
1588 LASSERT(!cfs_list_empty(&page->cp_flight));
1589 LASSERT(req->crq_nrpages > 0);
1591 cfs_list_del_init(&page->cp_flight);
1593 page->cp_req = NULL;
1596 EXPORT_SYMBOL(cl_req_page_done);
1599 * Notifies layers that request is about to depart by calling
1600 * cl_req_operations::cro_prep() top-to-bottom.
1602 int cl_req_prep(const struct lu_env *env, struct cl_req *req)
1606 const struct cl_req_slice *slice;
1610 * Check that the caller of cl_req_alloc() didn't lie about the number
1613 for (i = 0; i < req->crq_nrobjs; ++i)
1614 LASSERT(req->crq_o[i].ro_obj != NULL);
1617 cfs_list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
1618 if (slice->crs_ops->cro_prep != NULL) {
1619 result = slice->crs_ops->cro_prep(env, slice);
1626 EXPORT_SYMBOL(cl_req_prep);
1629 * Fills in attributes that are passed to server together with transfer. Only
1630 * attributes from \a flags may be touched. This can be called multiple times
1631 * for the same request.
1633 void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,
1634 struct cl_req_attr *attr, obd_valid flags)
1636 const struct cl_req_slice *slice;
1637 struct cl_page *page;
1640 LASSERT(!cfs_list_empty(&req->crq_pages));
1643 /* Take any page to use as a model. */
1644 page = cfs_list_entry(req->crq_pages.next, struct cl_page, cp_flight);
1646 for (i = 0; i < req->crq_nrobjs; ++i) {
1647 cfs_list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
1648 const struct cl_page_slice *scan;
1649 const struct cl_object *obj;
1651 scan = cl_page_at(page,
1652 slice->crs_dev->cd_lu_dev.ld_type);
1653 LASSERT(scan != NULL);
1654 obj = scan->cpl_obj;
1655 if (slice->crs_ops->cro_attr_set != NULL)
1656 slice->crs_ops->cro_attr_set(env, slice, obj,
1662 EXPORT_SYMBOL(cl_req_attr_set);
1664 /* XXX complete(), init_completion(), and wait_for_completion(), until they are
1665 * implemented in libcfs. */
1667 # include <linux/sched.h>
1668 #else /* __KERNEL__ */
1669 # include <liblustre.h>
1673 * Initialize synchronous io wait anchor, for transfer of \a nrpages pages.
1675 void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages)
1678 cfs_waitq_init(&anchor->csi_waitq);
1679 cfs_atomic_set(&anchor->csi_sync_nr, nrpages);
1680 anchor->csi_sync_rc = 0;
1683 EXPORT_SYMBOL(cl_sync_io_init);
1686 * Wait until all transfer completes. Transfer completion routine has to call
1687 * cl_sync_io_note() for every page.
1689 int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io,
1690 struct cl_page_list *queue, struct cl_sync_io *anchor,
1693 struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout),
1698 LASSERT(timeout >= 0);
1700 rc = l_wait_event(anchor->csi_waitq,
1701 cfs_atomic_read(&anchor->csi_sync_nr) == 0,
1704 CERROR("SYNC IO failed with error: %d, try to cancel "
1705 "%d remaining pages\n",
1706 rc, cfs_atomic_read(&anchor->csi_sync_nr));
1708 (void)cl_io_cancel(env, io, queue);
1710 lwi = (struct l_wait_info) { 0 };
1711 (void)l_wait_event(anchor->csi_waitq,
1712 cfs_atomic_read(&anchor->csi_sync_nr) == 0,
1715 rc = anchor->csi_sync_rc;
1717 LASSERT(cfs_atomic_read(&anchor->csi_sync_nr) == 0);
1718 cl_page_list_assume(env, io, queue);
1719 POISON(anchor, 0x5a, sizeof *anchor);
1722 EXPORT_SYMBOL(cl_sync_io_wait);
1725 * Indicate that transfer of a single page completed.
1727 void cl_sync_io_note(struct cl_sync_io *anchor, int ioret)
1730 if (anchor->csi_sync_rc == 0 && ioret < 0)
1731 anchor->csi_sync_rc = ioret;
1733 * Synchronous IO done without releasing page lock (e.g., as a part of
1734 * ->{prepare,commit}_write(). Completion is used to signal the end of
1737 LASSERT(cfs_atomic_read(&anchor->csi_sync_nr) > 0);
1738 if (cfs_atomic_dec_and_test(&anchor->csi_sync_nr))
1739 cfs_waitq_broadcast(&anchor->csi_waitq);
1742 EXPORT_SYMBOL(cl_sync_io_note);