4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 #define DEBUG_SUBSYSTEM S_CLASS
43 #include <obd_class.h>
44 #include <obd_support.h>
45 #include <lustre_fid.h>
46 #include <libcfs/list.h>
47 #include <cl_object.h>
48 #include "cl_internal.h"
50 /*****************************************************************************
56 #define cl_io_for_each(slice, io) \
57 cfs_list_for_each_entry((slice), &io->ci_layers, cis_linkage)
58 #define cl_io_for_each_reverse(slice, io) \
59 cfs_list_for_each_entry_reverse((slice), &io->ci_layers, cis_linkage)
61 static inline int cl_io_type_is_valid(enum cl_io_type type)
63 return CIT_READ <= type && type < CIT_OP_NR;
66 static inline int cl_io_is_loopable(const struct cl_io *io)
68 return cl_io_type_is_valid(io->ci_type) && io->ci_type != CIT_MISC;
72 * Returns true iff there is an IO ongoing in the given environment.
74 int cl_io_is_going(const struct lu_env *env)
76 return cl_env_info(env)->clt_current_io != NULL;
78 EXPORT_SYMBOL(cl_io_is_going);
81 * cl_io invariant that holds at all times when exported cl_io_*() functions
82 * are entered and left.
84 static int cl_io_invariant(const struct cl_io *io)
91 * io can own pages only when it is ongoing. Sub-io might
92 * still be in CIS_LOCKED state when top-io is in
95 ergo(io->ci_owned_nr > 0, io->ci_state == CIS_IO_GOING ||
96 (io->ci_state == CIS_LOCKED && up != NULL));
100 * Finalize \a io, by calling cl_io_operations::cio_fini() bottom-to-top.
102 void cl_io_fini(const struct lu_env *env, struct cl_io *io)
104 struct cl_io_slice *slice;
105 struct cl_thread_info *info;
107 LINVRNT(cl_io_type_is_valid(io->ci_type));
108 LINVRNT(cl_io_invariant(io));
111 while (!cfs_list_empty(&io->ci_layers)) {
112 slice = container_of(io->ci_layers.prev, struct cl_io_slice,
114 cfs_list_del_init(&slice->cis_linkage);
115 if (slice->cis_iop->op[io->ci_type].cio_fini != NULL)
116 slice->cis_iop->op[io->ci_type].cio_fini(env, slice);
118 * Invalidate slice to catch use after free. This assumes that
119 * slices are allocated within session and can be touched
120 * after ->cio_fini() returns.
122 slice->cis_io = NULL;
124 io->ci_state = CIS_FINI;
125 info = cl_env_info(env);
126 if (info->clt_current_io == io)
127 info->clt_current_io = NULL;
129 /* sanity check for layout change */
130 switch(io->ci_type) {
136 LASSERT(!io->ci_need_restart);
140 /* Check ignore layout change conf */
141 LASSERT(ergo(io->ci_ignore_layout || !io->ci_verify_layout,
142 !io->ci_need_restart));
149 EXPORT_SYMBOL(cl_io_fini);
151 static int cl_io_init0(const struct lu_env *env, struct cl_io *io,
152 enum cl_io_type iot, struct cl_object *obj)
154 struct cl_object *scan;
157 LINVRNT(io->ci_state == CIS_ZERO || io->ci_state == CIS_FINI);
158 LINVRNT(cl_io_type_is_valid(iot));
159 LINVRNT(cl_io_invariant(io));
163 CFS_INIT_LIST_HEAD(&io->ci_lockset.cls_todo);
164 CFS_INIT_LIST_HEAD(&io->ci_lockset.cls_curr);
165 CFS_INIT_LIST_HEAD(&io->ci_lockset.cls_done);
166 CFS_INIT_LIST_HEAD(&io->ci_layers);
169 cl_object_for_each(scan, obj) {
170 if (scan->co_ops->coo_io_init != NULL) {
171 result = scan->co_ops->coo_io_init(env, scan, io);
177 io->ci_state = CIS_INIT;
182 * Initialize sub-io, by calling cl_io_operations::cio_init() top-to-bottom.
184 * \pre obj != cl_object_top(obj)
186 int cl_io_sub_init(const struct lu_env *env, struct cl_io *io,
187 enum cl_io_type iot, struct cl_object *obj)
189 struct cl_thread_info *info = cl_env_info(env);
191 LASSERT(obj != cl_object_top(obj));
192 if (info->clt_current_io == NULL)
193 info->clt_current_io = io;
194 return cl_io_init0(env, io, iot, obj);
196 EXPORT_SYMBOL(cl_io_sub_init);
199 * Initialize \a io, by calling cl_io_operations::cio_init() top-to-bottom.
201 * Caller has to call cl_io_fini() after a call to cl_io_init(), no matter
202 * what the latter returned.
204 * \pre obj == cl_object_top(obj)
205 * \pre cl_io_type_is_valid(iot)
206 * \post cl_io_type_is_valid(io->ci_type) && io->ci_type == iot
208 int cl_io_init(const struct lu_env *env, struct cl_io *io,
209 enum cl_io_type iot, struct cl_object *obj)
211 struct cl_thread_info *info = cl_env_info(env);
213 LASSERT(obj == cl_object_top(obj));
214 LASSERT(info->clt_current_io == NULL);
216 info->clt_current_io = io;
217 return cl_io_init0(env, io, iot, obj);
219 EXPORT_SYMBOL(cl_io_init);
222 * Initialize read or write io.
224 * \pre iot == CIT_READ || iot == CIT_WRITE
226 int cl_io_rw_init(const struct lu_env *env, struct cl_io *io,
227 enum cl_io_type iot, loff_t pos, size_t count)
229 LINVRNT(iot == CIT_READ || iot == CIT_WRITE);
230 LINVRNT(io->ci_obj != NULL);
233 LU_OBJECT_HEADER(D_VFSTRACE, env, &io->ci_obj->co_lu,
234 "io range: %u ["LPU64", "LPU64") %u %u\n",
235 iot, (__u64)pos, (__u64)pos + count,
236 io->u.ci_rw.crw_nonblock, io->u.ci_wr.wr_append);
237 io->u.ci_rw.crw_pos = pos;
238 io->u.ci_rw.crw_count = count;
239 RETURN(cl_io_init(env, io, iot, io->ci_obj));
241 EXPORT_SYMBOL(cl_io_rw_init);
243 static inline const struct lu_fid *
244 cl_lock_descr_fid(const struct cl_lock_descr *descr)
246 return lu_object_fid(&descr->cld_obj->co_lu);
249 static int cl_lock_descr_sort(const struct cl_lock_descr *d0,
250 const struct cl_lock_descr *d1)
252 return lu_fid_cmp(cl_lock_descr_fid(d0), cl_lock_descr_fid(d1)) ?:
253 __diff_normalize(d0->cld_start, d1->cld_start);
256 static int cl_lock_descr_cmp(const struct cl_lock_descr *d0,
257 const struct cl_lock_descr *d1)
261 ret = lu_fid_cmp(cl_lock_descr_fid(d0), cl_lock_descr_fid(d1));
264 if (d0->cld_end < d1->cld_start)
266 if (d0->cld_start > d0->cld_end)
271 static void cl_lock_descr_merge(struct cl_lock_descr *d0,
272 const struct cl_lock_descr *d1)
274 d0->cld_start = min(d0->cld_start, d1->cld_start);
275 d0->cld_end = max(d0->cld_end, d1->cld_end);
277 if (d1->cld_mode == CLM_WRITE && d0->cld_mode != CLM_WRITE)
278 d0->cld_mode = CLM_WRITE;
280 if (d1->cld_mode == CLM_GROUP && d0->cld_mode != CLM_GROUP)
281 d0->cld_mode = CLM_GROUP;
285 * Sort locks in lexicographical order of their (fid, start-offset) pairs.
287 static void cl_io_locks_sort(struct cl_io *io)
292 /* hidden treasure: bubble sort for now. */
294 struct cl_io_lock_link *curr;
295 struct cl_io_lock_link *prev;
296 struct cl_io_lock_link *temp;
301 cfs_list_for_each_entry_safe(curr, temp,
302 &io->ci_lockset.cls_todo,
305 switch (cl_lock_descr_sort(&prev->cill_descr,
306 &curr->cill_descr)) {
309 * IMPOSSIBLE: Identical locks are
316 cfs_list_move_tail(&curr->cill_linkage,
317 &prev->cill_linkage);
319 continue; /* don't change prev: it's
320 * still "previous" */
321 case -1: /* already in order */
332 * Check whether \a queue contains locks matching \a need.
334 * \retval +ve there is a matching lock in the \a queue
335 * \retval 0 there are no matching locks in the \a queue
337 int cl_queue_match(const cfs_list_t *queue,
338 const struct cl_lock_descr *need)
340 struct cl_io_lock_link *scan;
343 cfs_list_for_each_entry(scan, queue, cill_linkage) {
344 if (cl_lock_descr_match(&scan->cill_descr, need))
349 EXPORT_SYMBOL(cl_queue_match);
351 static int cl_queue_merge(const cfs_list_t *queue,
352 const struct cl_lock_descr *need)
354 struct cl_io_lock_link *scan;
357 cfs_list_for_each_entry(scan, queue, cill_linkage) {
358 if (cl_lock_descr_cmp(&scan->cill_descr, need))
360 cl_lock_descr_merge(&scan->cill_descr, need);
361 CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
362 scan->cill_descr.cld_mode, scan->cill_descr.cld_start,
363 scan->cill_descr.cld_end);
370 static int cl_lockset_match(const struct cl_lockset *set,
371 const struct cl_lock_descr *need)
373 return cl_queue_match(&set->cls_curr, need) ||
374 cl_queue_match(&set->cls_done, need);
377 static int cl_lockset_merge(const struct cl_lockset *set,
378 const struct cl_lock_descr *need)
380 return cl_queue_merge(&set->cls_todo, need) ||
381 cl_lockset_match(set, need);
384 static int cl_lockset_lock_one(const struct lu_env *env,
385 struct cl_io *io, struct cl_lockset *set,
386 struct cl_io_lock_link *link)
388 struct cl_lock *lock;
393 if (io->ci_lockreq == CILR_PEEK) {
394 lock = cl_lock_peek(env, io, &link->cill_descr, "io", io);
396 lock = ERR_PTR(-ENODATA);
398 lock = cl_lock_request(env, io, &link->cill_descr, "io", io);
401 link->cill_lock = lock;
402 cfs_list_move(&link->cill_linkage, &set->cls_curr);
403 if (!(link->cill_descr.cld_enq_flags & CEF_ASYNC)) {
404 result = cl_wait(env, lock);
406 cfs_list_move(&link->cill_linkage,
411 result = PTR_ERR(lock);
415 static void cl_lock_link_fini(const struct lu_env *env, struct cl_io *io,
416 struct cl_io_lock_link *link)
418 struct cl_lock *lock = link->cill_lock;
421 cfs_list_del_init(&link->cill_linkage);
423 cl_lock_release(env, lock, "io", io);
424 link->cill_lock = NULL;
426 if (link->cill_fini != NULL)
427 link->cill_fini(env, link);
431 static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io,
432 struct cl_lockset *set)
434 struct cl_io_lock_link *link;
435 struct cl_io_lock_link *temp;
436 struct cl_lock *lock;
441 cfs_list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
442 if (!cl_lockset_match(set, &link->cill_descr)) {
443 /* XXX some locking to guarantee that locks aren't
444 * expanded in between. */
445 result = cl_lockset_lock_one(env, io, set, link);
449 cl_lock_link_fini(env, io, link);
452 cfs_list_for_each_entry_safe(link, temp,
453 &set->cls_curr, cill_linkage) {
454 lock = link->cill_lock;
455 result = cl_wait(env, lock);
457 cfs_list_move(&link->cill_linkage,
467 * Takes locks necessary for the current iteration of io.
469 * Calls cl_io_operations::cio_lock() top-to-bottom to collect locks required
470 * by layers for the current iteration. Then sort locks (to avoid dead-locks),
473 int cl_io_lock(const struct lu_env *env, struct cl_io *io)
475 const struct cl_io_slice *scan;
478 LINVRNT(cl_io_is_loopable(io));
479 LINVRNT(io->ci_state == CIS_IT_STARTED);
480 LINVRNT(cl_io_invariant(io));
483 cl_io_for_each(scan, io) {
484 if (scan->cis_iop->op[io->ci_type].cio_lock == NULL)
486 result = scan->cis_iop->op[io->ci_type].cio_lock(env, scan);
491 cl_io_locks_sort(io);
492 result = cl_lockset_lock(env, io, &io->ci_lockset);
495 cl_io_unlock(env, io);
497 io->ci_state = CIS_LOCKED;
500 EXPORT_SYMBOL(cl_io_lock);
503 * Release locks takes by io.
505 void cl_io_unlock(const struct lu_env *env, struct cl_io *io)
507 struct cl_lockset *set;
508 struct cl_io_lock_link *link;
509 struct cl_io_lock_link *temp;
510 const struct cl_io_slice *scan;
512 LASSERT(cl_io_is_loopable(io));
513 LASSERT(CIS_IT_STARTED <= io->ci_state && io->ci_state < CIS_UNLOCKED);
514 LINVRNT(cl_io_invariant(io));
517 set = &io->ci_lockset;
519 cfs_list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage)
520 cl_lock_link_fini(env, io, link);
522 cfs_list_for_each_entry_safe(link, temp, &set->cls_curr, cill_linkage)
523 cl_lock_link_fini(env, io, link);
525 cfs_list_for_each_entry_safe(link, temp, &set->cls_done, cill_linkage) {
526 cl_unuse(env, link->cill_lock);
527 cl_lock_link_fini(env, io, link);
529 cl_io_for_each_reverse(scan, io) {
530 if (scan->cis_iop->op[io->ci_type].cio_unlock != NULL)
531 scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
533 io->ci_state = CIS_UNLOCKED;
534 LASSERT(!cl_env_info(env)->clt_counters[CNL_TOP].ctc_nr_locks_acquired);
537 EXPORT_SYMBOL(cl_io_unlock);
540 * Prepares next iteration of io.
542 * Calls cl_io_operations::cio_iter_init() top-to-bottom. This exists to give
543 * layers a chance to modify io parameters, e.g., so that lov can restrict io
544 * to a single stripe.
546 int cl_io_iter_init(const struct lu_env *env, struct cl_io *io)
548 const struct cl_io_slice *scan;
551 LINVRNT(cl_io_is_loopable(io));
552 LINVRNT(io->ci_state == CIS_INIT || io->ci_state == CIS_IT_ENDED);
553 LINVRNT(cl_io_invariant(io));
557 cl_io_for_each(scan, io) {
558 if (scan->cis_iop->op[io->ci_type].cio_iter_init == NULL)
560 result = scan->cis_iop->op[io->ci_type].cio_iter_init(env,
566 io->ci_state = CIS_IT_STARTED;
569 EXPORT_SYMBOL(cl_io_iter_init);
572 * Finalizes io iteration.
574 * Calls cl_io_operations::cio_iter_fini() bottom-to-top.
576 void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io)
578 const struct cl_io_slice *scan;
580 LINVRNT(cl_io_is_loopable(io));
581 LINVRNT(io->ci_state == CIS_UNLOCKED);
582 LINVRNT(cl_io_invariant(io));
585 cl_io_for_each_reverse(scan, io) {
586 if (scan->cis_iop->op[io->ci_type].cio_iter_fini != NULL)
587 scan->cis_iop->op[io->ci_type].cio_iter_fini(env, scan);
589 io->ci_state = CIS_IT_ENDED;
592 EXPORT_SYMBOL(cl_io_iter_fini);
595 * Records that read or write io progressed \a nob bytes forward.
597 void cl_io_rw_advance(const struct lu_env *env, struct cl_io *io, size_t nob)
599 const struct cl_io_slice *scan;
601 LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
603 LINVRNT(cl_io_is_loopable(io));
604 LINVRNT(cl_io_invariant(io));
608 io->u.ci_rw.crw_pos += nob;
609 io->u.ci_rw.crw_count -= nob;
611 /* layers have to be notified. */
612 cl_io_for_each_reverse(scan, io) {
613 if (scan->cis_iop->op[io->ci_type].cio_advance != NULL)
614 scan->cis_iop->op[io->ci_type].cio_advance(env, scan,
619 EXPORT_SYMBOL(cl_io_rw_advance);
622 * Adds a lock to a lockset.
624 int cl_io_lock_add(const struct lu_env *env, struct cl_io *io,
625 struct cl_io_lock_link *link)
630 if (cl_lockset_merge(&io->ci_lockset, &link->cill_descr))
633 cfs_list_add(&link->cill_linkage, &io->ci_lockset.cls_todo);
638 EXPORT_SYMBOL(cl_io_lock_add);
640 static void cl_free_io_lock_link(const struct lu_env *env,
641 struct cl_io_lock_link *link)
647 * Allocates new lock link, and uses it to add a lock to a lockset.
649 int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
650 struct cl_lock_descr *descr)
652 struct cl_io_lock_link *link;
658 link->cill_descr = *descr;
659 link->cill_fini = cl_free_io_lock_link;
660 result = cl_io_lock_add(env, io, link);
661 if (result) /* lock match */
662 link->cill_fini(env, link);
668 EXPORT_SYMBOL(cl_io_lock_alloc_add);
671 * Starts io by calling cl_io_operations::cio_start() top-to-bottom.
673 int cl_io_start(const struct lu_env *env, struct cl_io *io)
675 const struct cl_io_slice *scan;
678 LINVRNT(cl_io_is_loopable(io));
679 LINVRNT(io->ci_state == CIS_LOCKED);
680 LINVRNT(cl_io_invariant(io));
683 io->ci_state = CIS_IO_GOING;
684 cl_io_for_each(scan, io) {
685 if (scan->cis_iop->op[io->ci_type].cio_start == NULL)
687 result = scan->cis_iop->op[io->ci_type].cio_start(env, scan);
695 EXPORT_SYMBOL(cl_io_start);
698 * Wait until current io iteration is finished by calling
699 * cl_io_operations::cio_end() bottom-to-top.
701 void cl_io_end(const struct lu_env *env, struct cl_io *io)
703 const struct cl_io_slice *scan;
705 LINVRNT(cl_io_is_loopable(io));
706 LINVRNT(io->ci_state == CIS_IO_GOING);
707 LINVRNT(cl_io_invariant(io));
710 cl_io_for_each_reverse(scan, io) {
711 if (scan->cis_iop->op[io->ci_type].cio_end != NULL)
712 scan->cis_iop->op[io->ci_type].cio_end(env, scan);
713 /* TODO: error handling. */
715 io->ci_state = CIS_IO_FINISHED;
718 EXPORT_SYMBOL(cl_io_end);
720 static const struct cl_page_slice *
721 cl_io_slice_page(const struct cl_io_slice *ios, struct cl_page *page)
723 const struct cl_page_slice *slice;
725 slice = cl_page_at(page, ios->cis_obj->co_lu.lo_dev->ld_type);
726 LINVRNT(slice != NULL);
731 * True iff \a page is within \a io range.
733 static int cl_page_in_io(const struct cl_page *page, const struct cl_io *io)
740 idx = page->cp_index;
741 switch (io->ci_type) {
745 * check that [start, end) and [pos, pos + count) extents
748 if (!cl_io_is_append(io)) {
749 const struct cl_io_rw_common *crw = &(io->u.ci_rw);
750 start = cl_offset(page->cp_obj, idx);
751 end = cl_offset(page->cp_obj, idx + 1);
752 result = crw->crw_pos < end &&
753 start < crw->crw_pos + crw->crw_count;
757 result = io->u.ci_fault.ft_index == idx;
766 * Called by read io, when page has to be read from the server.
768 * \see cl_io_operations::cio_read_page()
770 int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
771 struct cl_page *page)
773 const struct cl_io_slice *scan;
774 struct cl_2queue *queue;
777 LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_FAULT);
778 LINVRNT(cl_page_is_owned(page, io));
779 LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
780 LINVRNT(cl_page_in_io(page, io));
781 LINVRNT(cl_io_invariant(io));
784 queue = &io->ci_queue;
786 cl_2queue_init(queue);
788 * ->cio_read_page() methods called in the loop below are supposed to
789 * never block waiting for network (the only subtle point is the
790 * creation of new pages for read-ahead that might result in cache
791 * shrinking, but currently only clean pages are shrunk and this
792 * requires no network io).
794 * Should this ever starts blocking, retry loop would be needed for
795 * "parallel io" (see CLO_REPEAT loops in cl_lock.c).
797 cl_io_for_each(scan, io) {
798 if (scan->cis_iop->cio_read_page != NULL) {
799 const struct cl_page_slice *slice;
801 slice = cl_io_slice_page(scan, page);
802 LINVRNT(slice != NULL);
803 result = scan->cis_iop->cio_read_page(env, scan, slice);
809 result = cl_io_submit_rw(env, io, CRT_READ, queue);
811 * Unlock unsent pages in case of error.
813 cl_page_list_disown(env, io, &queue->c2_qin);
814 cl_2queue_fini(env, queue);
817 EXPORT_SYMBOL(cl_io_read_page);
820 * Called by write io to prepare page to receive data from user buffer.
822 * \see cl_io_operations::cio_prepare_write()
824 int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io,
825 struct cl_page *page, unsigned from, unsigned to)
827 const struct cl_io_slice *scan;
830 LINVRNT(io->ci_type == CIT_WRITE);
831 LINVRNT(cl_page_is_owned(page, io));
832 LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
833 LINVRNT(cl_io_invariant(io));
834 LASSERT(cl_page_in_io(page, io));
837 cl_io_for_each_reverse(scan, io) {
838 if (scan->cis_iop->cio_prepare_write != NULL) {
839 const struct cl_page_slice *slice;
841 slice = cl_io_slice_page(scan, page);
842 result = scan->cis_iop->cio_prepare_write(env, scan,
851 EXPORT_SYMBOL(cl_io_prepare_write);
854 * Called by write io after user data were copied into a page.
856 * \see cl_io_operations::cio_commit_write()
858 int cl_io_commit_write(const struct lu_env *env, struct cl_io *io,
859 struct cl_page *page, unsigned from, unsigned to)
861 const struct cl_io_slice *scan;
864 LINVRNT(io->ci_type == CIT_WRITE);
865 LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
866 LINVRNT(cl_io_invariant(io));
868 * XXX Uh... not nice. Top level cl_io_commit_write() call (vvp->lov)
869 * already called cl_page_cache_add(), moving page into CPS_CACHED
870 * state. Better (and more general) way of dealing with such situation
873 LASSERT(cl_page_is_owned(page, io) || page->cp_parent != NULL);
874 LASSERT(cl_page_in_io(page, io));
877 cl_io_for_each(scan, io) {
878 if (scan->cis_iop->cio_commit_write != NULL) {
879 const struct cl_page_slice *slice;
881 slice = cl_io_slice_page(scan, page);
882 result = scan->cis_iop->cio_commit_write(env, scan,
889 LINVRNT(result <= 0);
892 EXPORT_SYMBOL(cl_io_commit_write);
895 * Submits a list of pages for immediate io.
897 * After the function gets returned, The submitted pages are moved to
898 * queue->c2_qout queue, and queue->c2_qin contain both the pages don't need
899 * to be submitted, and the pages are errant to submit.
901 * \returns 0 if at least one page was submitted, error code otherwise.
902 * \see cl_io_operations::cio_submit()
904 int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
905 enum cl_req_type crt, struct cl_2queue *queue)
907 const struct cl_io_slice *scan;
910 LINVRNT(crt < ARRAY_SIZE(scan->cis_iop->req_op));
913 cl_io_for_each(scan, io) {
914 if (scan->cis_iop->req_op[crt].cio_submit == NULL)
916 result = scan->cis_iop->req_op[crt].cio_submit(env, scan, crt,
922 * If ->cio_submit() failed, no pages were sent.
924 LASSERT(ergo(result != 0, cfs_list_empty(&queue->c2_qout.pl_pages)));
927 EXPORT_SYMBOL(cl_io_submit_rw);
930 * Submit a sync_io and wait for the IO to be finished, or error happens.
931 * If \a timeout is zero, it means to wait for the IO unconditionally.
933 int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
934 enum cl_req_type iot, struct cl_2queue *queue,
937 struct cl_sync_io *anchor = &cl_env_info(env)->clt_anchor;
941 cl_page_list_for_each(pg, &queue->c2_qin) {
942 LASSERT(pg->cp_sync_io == NULL);
943 pg->cp_sync_io = anchor;
946 cl_sync_io_init(anchor, queue->c2_qin.pl_nr);
947 rc = cl_io_submit_rw(env, io, iot, queue);
950 * If some pages weren't sent for any reason (e.g.,
951 * read found up-to-date pages in the cache, or write found
952 * clean pages), count them as completed to avoid infinite
955 cl_page_list_for_each(pg, &queue->c2_qin) {
956 pg->cp_sync_io = NULL;
957 cl_sync_io_note(anchor, +1);
960 /* wait for the IO to be finished. */
961 rc = cl_sync_io_wait(env, io, &queue->c2_qout,
964 LASSERT(cfs_list_empty(&queue->c2_qout.pl_pages));
965 cl_page_list_for_each(pg, &queue->c2_qin)
966 pg->cp_sync_io = NULL;
970 EXPORT_SYMBOL(cl_io_submit_sync);
973 * Cancel an IO which has been submitted by cl_io_submit_rw.
975 int cl_io_cancel(const struct lu_env *env, struct cl_io *io,
976 struct cl_page_list *queue)
978 struct cl_page *page;
981 CERROR("Canceling ongoing page trasmission\n");
982 cl_page_list_for_each(page, queue) {
985 LINVRNT(cl_page_in_io(page, io));
986 rc = cl_page_cancel(env, page);
987 result = result ?: rc;
991 EXPORT_SYMBOL(cl_io_cancel);
996 * Pumps io through iterations calling
998 * - cl_io_iter_init()
1008 * - cl_io_iter_fini()
1010 * repeatedly until there is no more io to do.
1012 int cl_io_loop(const struct lu_env *env, struct cl_io *io)
1016 LINVRNT(cl_io_is_loopable(io));
1022 io->ci_continue = 0;
1023 result = cl_io_iter_init(env, io);
1026 result = cl_io_lock(env, io);
1029 * Notify layers that locks has been taken,
1030 * and do actual i/o.
1032 * - llite: kms, short read;
1033 * - llite: generic_file_read();
1035 result = cl_io_start(env, io);
1037 * Send any remaining pending
1040 * - llite: ll_rw_stats_tally.
1043 cl_io_unlock(env, io);
1044 cl_io_rw_advance(env, io, io->ci_nob - nob);
1047 cl_io_iter_fini(env, io);
1048 } while (result == 0 && io->ci_continue);
1050 result = io->ci_result;
1051 RETURN(result < 0 ? result : 0);
1053 EXPORT_SYMBOL(cl_io_loop);
1056 * Adds io slice to the cl_io.
1058 * This is called by cl_object_operations::coo_io_init() methods to add a
1059 * per-layer state to the io. New state is added at the end of
1060 * cl_io::ci_layers list, that is, it is at the bottom of the stack.
1062 * \see cl_lock_slice_add(), cl_req_slice_add(), cl_page_slice_add()
1064 void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
1065 struct cl_object *obj,
1066 const struct cl_io_operations *ops)
1068 cfs_list_t *linkage = &slice->cis_linkage;
1070 LASSERT((linkage->prev == NULL && linkage->next == NULL) ||
1071 cfs_list_empty(linkage));
1074 cfs_list_add_tail(linkage, &io->ci_layers);
1076 slice->cis_obj = obj;
1077 slice->cis_iop = ops;
1080 EXPORT_SYMBOL(cl_io_slice_add);
1084 * Initializes page list.
1086 void cl_page_list_init(struct cl_page_list *plist)
1090 CFS_INIT_LIST_HEAD(&plist->pl_pages);
1091 plist->pl_owner = cfs_current();
1094 EXPORT_SYMBOL(cl_page_list_init);
1097 * Adds a page to a page list.
1099 void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page)
1102 /* it would be better to check that page is owned by "current" io, but
1103 * it is not passed here. */
1104 LASSERT(page->cp_owner != NULL);
1105 LINVRNT(plist->pl_owner == cfs_current());
1108 mutex_lock(&page->cp_mutex);
1110 LASSERT(cfs_list_empty(&page->cp_batch));
1111 cfs_list_add_tail(&page->cp_batch, &plist->pl_pages);
1113 page->cp_queue_ref = lu_ref_add(&page->cp_reference, "queue", plist);
1117 EXPORT_SYMBOL(cl_page_list_add);
1120 * Removes a page from a page list.
1122 void cl_page_list_del(const struct lu_env *env,
1123 struct cl_page_list *plist, struct cl_page *page)
1125 LASSERT(plist->pl_nr > 0);
1126 LINVRNT(plist->pl_owner == cfs_current());
1129 cfs_list_del_init(&page->cp_batch);
1131 mutex_unlock(&page->cp_mutex);
1134 lu_ref_del_at(&page->cp_reference, page->cp_queue_ref, "queue", plist);
1135 cl_page_put(env, page);
1138 EXPORT_SYMBOL(cl_page_list_del);
1141 * Moves a page from one page list to another.
1143 void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
1144 struct cl_page *page)
1146 LASSERT(src->pl_nr > 0);
1147 LINVRNT(dst->pl_owner == cfs_current());
1148 LINVRNT(src->pl_owner == cfs_current());
1151 cfs_list_move_tail(&page->cp_batch, &dst->pl_pages);
1154 lu_ref_set_at(&page->cp_reference,
1155 page->cp_queue_ref, "queue", src, dst);
1158 EXPORT_SYMBOL(cl_page_list_move);
1161 * splice the cl_page_list, just as list head does
1163 void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head)
1165 struct cl_page *page;
1166 struct cl_page *tmp;
1168 LINVRNT(list->pl_owner == cfs_current());
1169 LINVRNT(head->pl_owner == cfs_current());
1172 cl_page_list_for_each_safe(page, tmp, list)
1173 cl_page_list_move(head, list, page);
1176 EXPORT_SYMBOL(cl_page_list_splice);
1178 void cl_page_disown0(const struct lu_env *env,
1179 struct cl_io *io, struct cl_page *pg);
1182 * Disowns pages in a queue.
1184 void cl_page_list_disown(const struct lu_env *env,
1185 struct cl_io *io, struct cl_page_list *plist)
1187 struct cl_page *page;
1188 struct cl_page *temp;
1190 LINVRNT(plist->pl_owner == cfs_current());
1193 cl_page_list_for_each_safe(page, temp, plist) {
1194 LASSERT(plist->pl_nr > 0);
1196 cfs_list_del_init(&page->cp_batch);
1198 mutex_unlock(&page->cp_mutex);
1202 * cl_page_disown0 rather than usual cl_page_disown() is used,
1203 * because pages are possibly in CPS_FREEING state already due
1204 * to the call to cl_page_list_discard().
1207 * XXX cl_page_disown0() will fail if page is not locked.
1209 cl_page_disown0(env, io, page);
1210 lu_ref_del(&page->cp_reference, "queue", plist);
1211 cl_page_put(env, page);
1215 EXPORT_SYMBOL(cl_page_list_disown);
1218 * Releases pages from queue.
1220 void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist)
1222 struct cl_page *page;
1223 struct cl_page *temp;
1225 LINVRNT(plist->pl_owner == cfs_current());
1228 cl_page_list_for_each_safe(page, temp, plist)
1229 cl_page_list_del(env, plist, page);
1230 LASSERT(plist->pl_nr == 0);
1233 EXPORT_SYMBOL(cl_page_list_fini);
1236 * Owns all pages in a queue.
1238 int cl_page_list_own(const struct lu_env *env,
1239 struct cl_io *io, struct cl_page_list *plist)
1241 struct cl_page *page;
1242 struct cl_page *temp;
1246 LINVRNT(plist->pl_owner == cfs_current());
1250 cl_page_list_for_each_safe(page, temp, plist) {
1251 LASSERT(index <= page->cp_index);
1252 index = page->cp_index;
1253 if (cl_page_own(env, io, page) == 0)
1254 result = result ?: page->cp_error;
1256 cl_page_list_del(env, plist, page);
1260 EXPORT_SYMBOL(cl_page_list_own);
1263 * Assumes all pages in a queue.
1265 void cl_page_list_assume(const struct lu_env *env,
1266 struct cl_io *io, struct cl_page_list *plist)
1268 struct cl_page *page;
1270 LINVRNT(plist->pl_owner == cfs_current());
1272 cl_page_list_for_each(page, plist)
1273 cl_page_assume(env, io, page);
1275 EXPORT_SYMBOL(cl_page_list_assume);
1278 * Discards all pages in a queue.
1280 void cl_page_list_discard(const struct lu_env *env, struct cl_io *io,
1281 struct cl_page_list *plist)
1283 struct cl_page *page;
1285 LINVRNT(plist->pl_owner == cfs_current());
1287 cl_page_list_for_each(page, plist)
1288 cl_page_discard(env, io, page);
1291 EXPORT_SYMBOL(cl_page_list_discard);
1294 * Unmaps all pages in a queue from user virtual memory.
1296 int cl_page_list_unmap(const struct lu_env *env, struct cl_io *io,
1297 struct cl_page_list *plist)
1299 struct cl_page *page;
1302 LINVRNT(plist->pl_owner == cfs_current());
1305 cl_page_list_for_each(page, plist) {
1306 result = cl_page_unmap(env, io, page);
1312 EXPORT_SYMBOL(cl_page_list_unmap);
1315 * Initialize dual page queue.
1317 void cl_2queue_init(struct cl_2queue *queue)
1320 cl_page_list_init(&queue->c2_qin);
1321 cl_page_list_init(&queue->c2_qout);
1324 EXPORT_SYMBOL(cl_2queue_init);
1327 * Add a page to the incoming page list of 2-queue.
1329 void cl_2queue_add(struct cl_2queue *queue, struct cl_page *page)
1332 cl_page_list_add(&queue->c2_qin, page);
1335 EXPORT_SYMBOL(cl_2queue_add);
1338 * Disown pages in both lists of a 2-queue.
1340 void cl_2queue_disown(const struct lu_env *env,
1341 struct cl_io *io, struct cl_2queue *queue)
1344 cl_page_list_disown(env, io, &queue->c2_qin);
1345 cl_page_list_disown(env, io, &queue->c2_qout);
1348 EXPORT_SYMBOL(cl_2queue_disown);
1351 * Discard (truncate) pages in both lists of a 2-queue.
1353 void cl_2queue_discard(const struct lu_env *env,
1354 struct cl_io *io, struct cl_2queue *queue)
1357 cl_page_list_discard(env, io, &queue->c2_qin);
1358 cl_page_list_discard(env, io, &queue->c2_qout);
1361 EXPORT_SYMBOL(cl_2queue_discard);
1364 * Assume to own the pages in cl_2queue
1366 void cl_2queue_assume(const struct lu_env *env,
1367 struct cl_io *io, struct cl_2queue *queue)
1369 cl_page_list_assume(env, io, &queue->c2_qin);
1370 cl_page_list_assume(env, io, &queue->c2_qout);
1372 EXPORT_SYMBOL(cl_2queue_assume);
1375 * Finalize both page lists of a 2-queue.
1377 void cl_2queue_fini(const struct lu_env *env, struct cl_2queue *queue)
1380 cl_page_list_fini(env, &queue->c2_qout);
1381 cl_page_list_fini(env, &queue->c2_qin);
1384 EXPORT_SYMBOL(cl_2queue_fini);
1387 * Initialize a 2-queue to contain \a page in its incoming page list.
1389 void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page)
1392 cl_2queue_init(queue);
1393 cl_2queue_add(queue, page);
1396 EXPORT_SYMBOL(cl_2queue_init_page);
1399 * Returns top-level io.
1401 * \see cl_object_top(), cl_page_top().
1403 struct cl_io *cl_io_top(struct cl_io *io)
1406 while (io->ci_parent != NULL)
1410 EXPORT_SYMBOL(cl_io_top);
1413 * Prints human readable representation of \a io to the \a f.
1415 void cl_io_print(const struct lu_env *env, void *cookie,
1416 lu_printer_t printer, const struct cl_io *io)
1421 * Adds request slice to the compound request.
1423 * This is called by cl_device_operations::cdo_req_init() methods to add a
1424 * per-layer state to the request. New state is added at the end of
1425 * cl_req::crq_layers list, that is, it is at the bottom of the stack.
1427 * \see cl_lock_slice_add(), cl_page_slice_add(), cl_io_slice_add()
1429 void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice,
1430 struct cl_device *dev,
1431 const struct cl_req_operations *ops)
1434 cfs_list_add_tail(&slice->crs_linkage, &req->crq_layers);
1435 slice->crs_dev = dev;
1436 slice->crs_ops = ops;
1437 slice->crs_req = req;
1440 EXPORT_SYMBOL(cl_req_slice_add);
1442 static void cl_req_free(const struct lu_env *env, struct cl_req *req)
1446 LASSERT(cfs_list_empty(&req->crq_pages));
1447 LASSERT(req->crq_nrpages == 0);
1448 LINVRNT(cfs_list_empty(&req->crq_layers));
1449 LINVRNT(equi(req->crq_nrobjs > 0, req->crq_o != NULL));
1452 if (req->crq_o != NULL) {
1453 for (i = 0; i < req->crq_nrobjs; ++i) {
1454 struct cl_object *obj = req->crq_o[i].ro_obj;
1456 lu_object_ref_del_at(&obj->co_lu,
1457 req->crq_o[i].ro_obj_ref,
1459 cl_object_put(env, obj);
1462 OBD_FREE(req->crq_o, req->crq_nrobjs * sizeof req->crq_o[0]);
1468 static int cl_req_init(const struct lu_env *env, struct cl_req *req,
1469 struct cl_page *page)
1471 struct cl_device *dev;
1472 struct cl_page_slice *slice;
1477 page = cl_page_top(page);
1479 cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
1480 dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev);
1481 if (dev->cd_ops->cdo_req_init != NULL) {
1482 result = dev->cd_ops->cdo_req_init(env,
1488 page = page->cp_child;
1489 } while (page != NULL && result == 0);
1494 * Invokes per-request transfer completion call-backs
1495 * (cl_req_operations::cro_completion()) bottom-to-top.
1497 void cl_req_completion(const struct lu_env *env, struct cl_req *req, int rc)
1499 struct cl_req_slice *slice;
1503 * for the lack of list_for_each_entry_reverse_safe()...
1505 while (!cfs_list_empty(&req->crq_layers)) {
1506 slice = cfs_list_entry(req->crq_layers.prev,
1507 struct cl_req_slice, crs_linkage);
1508 cfs_list_del_init(&slice->crs_linkage);
1509 if (slice->crs_ops->cro_completion != NULL)
1510 slice->crs_ops->cro_completion(env, slice, rc);
1512 cl_req_free(env, req);
1515 EXPORT_SYMBOL(cl_req_completion);
1518 * Allocates new transfer request.
1520 struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
1521 enum cl_req_type crt, int nr_objects)
1525 LINVRNT(nr_objects > 0);
1532 OBD_ALLOC(req->crq_o, nr_objects * sizeof req->crq_o[0]);
1533 if (req->crq_o != NULL) {
1534 req->crq_nrobjs = nr_objects;
1535 req->crq_type = crt;
1536 CFS_INIT_LIST_HEAD(&req->crq_pages);
1537 CFS_INIT_LIST_HEAD(&req->crq_layers);
1538 result = cl_req_init(env, req, page);
1542 cl_req_completion(env, req, result);
1543 req = ERR_PTR(result);
1546 req = ERR_PTR(-ENOMEM);
1549 EXPORT_SYMBOL(cl_req_alloc);
1552 * Adds a page to a request.
1554 void cl_req_page_add(const struct lu_env *env,
1555 struct cl_req *req, struct cl_page *page)
1557 struct cl_object *obj;
1558 struct cl_req_obj *rqo;
1562 page = cl_page_top(page);
1564 LASSERT(cfs_list_empty(&page->cp_flight));
1565 LASSERT(page->cp_req == NULL);
1567 CL_PAGE_DEBUG(D_PAGE, env, page, "req %p, %d, %u\n",
1568 req, req->crq_type, req->crq_nrpages);
1570 cfs_list_add_tail(&page->cp_flight, &req->crq_pages);
1573 obj = cl_object_top(page->cp_obj);
1574 for (i = 0, rqo = req->crq_o; obj != rqo->ro_obj; ++i, ++rqo) {
1575 if (rqo->ro_obj == NULL) {
1578 rqo->ro_obj_ref = lu_object_ref_add(&obj->co_lu,
1583 LASSERT(i < req->crq_nrobjs);
1586 EXPORT_SYMBOL(cl_req_page_add);
1589 * Removes a page from a request.
1591 void cl_req_page_done(const struct lu_env *env, struct cl_page *page)
1593 struct cl_req *req = page->cp_req;
1596 page = cl_page_top(page);
1598 LASSERT(!cfs_list_empty(&page->cp_flight));
1599 LASSERT(req->crq_nrpages > 0);
1601 cfs_list_del_init(&page->cp_flight);
1603 page->cp_req = NULL;
1606 EXPORT_SYMBOL(cl_req_page_done);
1609 * Notifies layers that request is about to depart by calling
1610 * cl_req_operations::cro_prep() top-to-bottom.
1612 int cl_req_prep(const struct lu_env *env, struct cl_req *req)
1616 const struct cl_req_slice *slice;
1620 * Check that the caller of cl_req_alloc() didn't lie about the number
1623 for (i = 0; i < req->crq_nrobjs; ++i)
1624 LASSERT(req->crq_o[i].ro_obj != NULL);
1627 cfs_list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
1628 if (slice->crs_ops->cro_prep != NULL) {
1629 result = slice->crs_ops->cro_prep(env, slice);
1636 EXPORT_SYMBOL(cl_req_prep);
1639 * Fills in attributes that are passed to server together with transfer. Only
1640 * attributes from \a flags may be touched. This can be called multiple times
1641 * for the same request.
1643 void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,
1644 struct cl_req_attr *attr, obd_valid flags)
1646 const struct cl_req_slice *slice;
1647 struct cl_page *page;
1650 LASSERT(!cfs_list_empty(&req->crq_pages));
1653 /* Take any page to use as a model. */
1654 page = cfs_list_entry(req->crq_pages.next, struct cl_page, cp_flight);
1656 for (i = 0; i < req->crq_nrobjs; ++i) {
1657 cfs_list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
1658 const struct cl_page_slice *scan;
1659 const struct cl_object *obj;
1661 scan = cl_page_at(page,
1662 slice->crs_dev->cd_lu_dev.ld_type);
1663 LASSERT(scan != NULL);
1664 obj = scan->cpl_obj;
1665 if (slice->crs_ops->cro_attr_set != NULL)
1666 slice->crs_ops->cro_attr_set(env, slice, obj,
1672 EXPORT_SYMBOL(cl_req_attr_set);
1674 /* XXX complete(), init_completion(), and wait_for_completion(), until they are
1675 * implemented in libcfs. */
1677 # include <linux/sched.h>
1678 #else /* __KERNEL__ */
1679 # include <liblustre.h>
1683 * Initialize synchronous io wait anchor, for transfer of \a nrpages pages.
1685 void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages)
1688 cfs_waitq_init(&anchor->csi_waitq);
1689 cfs_atomic_set(&anchor->csi_sync_nr, nrpages);
1690 cfs_atomic_set(&anchor->csi_barrier, nrpages > 0);
1691 anchor->csi_sync_rc = 0;
1694 EXPORT_SYMBOL(cl_sync_io_init);
1697 * Wait until all transfer completes. Transfer completion routine has to call
1698 * cl_sync_io_note() for every page.
1700 int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io,
1701 struct cl_page_list *queue, struct cl_sync_io *anchor,
1704 struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout),
1709 LASSERT(timeout >= 0);
1711 rc = l_wait_event(anchor->csi_waitq,
1712 cfs_atomic_read(&anchor->csi_sync_nr) == 0,
1715 CERROR("SYNC IO failed with error: %d, try to cancel "
1716 "%d remaining pages\n",
1717 rc, cfs_atomic_read(&anchor->csi_sync_nr));
1719 (void)cl_io_cancel(env, io, queue);
1721 lwi = (struct l_wait_info) { 0 };
1722 (void)l_wait_event(anchor->csi_waitq,
1723 cfs_atomic_read(&anchor->csi_sync_nr) == 0,
1726 rc = anchor->csi_sync_rc;
1728 LASSERT(cfs_atomic_read(&anchor->csi_sync_nr) == 0);
1729 cl_page_list_assume(env, io, queue);
1731 /* wait until cl_sync_io_note() has done wakeup */
1732 while (unlikely(cfs_atomic_read(&anchor->csi_barrier) != 0)) {
1738 POISON(anchor, 0x5a, sizeof *anchor);
1741 EXPORT_SYMBOL(cl_sync_io_wait);
1744 * Indicate that transfer of a single page completed.
1746 void cl_sync_io_note(struct cl_sync_io *anchor, int ioret)
1749 if (anchor->csi_sync_rc == 0 && ioret < 0)
1750 anchor->csi_sync_rc = ioret;
1752 * Synchronous IO done without releasing page lock (e.g., as a part of
1753 * ->{prepare,commit}_write(). Completion is used to signal the end of
1756 LASSERT(cfs_atomic_read(&anchor->csi_sync_nr) > 0);
1757 if (cfs_atomic_dec_and_test(&anchor->csi_sync_nr)) {
1758 cfs_waitq_broadcast(&anchor->csi_waitq);
1759 /* it's safe to nuke or reuse anchor now */
1760 cfs_atomic_set(&anchor->csi_barrier, 0);
1764 EXPORT_SYMBOL(cl_sync_io_note);