4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2014, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
42 #define DEBUG_SUBSYSTEM S_CLASS
44 #include <linux/sched.h>
45 #include <obd_class.h>
46 #include <obd_support.h>
47 #include <lustre_fid.h>
48 #include <libcfs/list.h>
49 #include <cl_object.h>
50 #include "cl_internal.h"
52 /*****************************************************************************
58 #define cl_io_for_each(slice, io) \
59 list_for_each_entry((slice), &io->ci_layers, cis_linkage)
60 #define cl_io_for_each_reverse(slice, io) \
61 list_for_each_entry_reverse((slice), &io->ci_layers, cis_linkage)
63 static inline int cl_io_type_is_valid(enum cl_io_type type)
65 return CIT_READ <= type && type < CIT_OP_NR;
68 static inline int cl_io_is_loopable(const struct cl_io *io)
70 return cl_io_type_is_valid(io->ci_type) && io->ci_type != CIT_MISC;
74 * Returns true iff there is an IO ongoing in the given environment.
76 int cl_io_is_going(const struct lu_env *env)
78 return cl_env_info(env)->clt_current_io != NULL;
82 * cl_io invariant that holds at all times when exported cl_io_*() functions
83 * are entered and left.
85 static int cl_io_invariant(const struct cl_io *io)
92 * io can own pages only when it is ongoing. Sub-io might
93 * still be in CIS_LOCKED state when top-io is in
96 ergo(io->ci_owned_nr > 0, io->ci_state == CIS_IO_GOING ||
97 (io->ci_state == CIS_LOCKED && up != NULL));
101 * Finalize \a io, by calling cl_io_operations::cio_fini() bottom-to-top.
103 void cl_io_fini(const struct lu_env *env, struct cl_io *io)
105 struct cl_io_slice *slice;
106 struct cl_thread_info *info;
108 LINVRNT(cl_io_type_is_valid(io->ci_type));
109 LINVRNT(cl_io_invariant(io));
112 while (!list_empty(&io->ci_layers)) {
113 slice = container_of(io->ci_layers.prev, struct cl_io_slice,
115 list_del_init(&slice->cis_linkage);
116 if (slice->cis_iop->op[io->ci_type].cio_fini != NULL)
117 slice->cis_iop->op[io->ci_type].cio_fini(env, slice);
119 * Invalidate slice to catch use after free. This assumes that
120 * slices are allocated within session and can be touched
121 * after ->cio_fini() returns.
123 slice->cis_io = NULL;
125 io->ci_state = CIS_FINI;
126 info = cl_env_info(env);
127 if (info->clt_current_io == io)
128 info->clt_current_io = NULL;
130 /* sanity check for layout change */
131 switch(io->ci_type) {
137 LASSERT(!io->ci_need_restart);
141 /* Check ignore layout change conf */
142 LASSERT(ergo(io->ci_ignore_layout || !io->ci_verify_layout,
143 !io->ci_need_restart));
150 EXPORT_SYMBOL(cl_io_fini);
152 static int cl_io_init0(const struct lu_env *env, struct cl_io *io,
153 enum cl_io_type iot, struct cl_object *obj)
155 struct cl_object *scan;
158 LINVRNT(io->ci_state == CIS_ZERO || io->ci_state == CIS_FINI);
159 LINVRNT(cl_io_type_is_valid(iot));
160 LINVRNT(cl_io_invariant(io));
164 INIT_LIST_HEAD(&io->ci_lockset.cls_todo);
165 INIT_LIST_HEAD(&io->ci_lockset.cls_done);
166 INIT_LIST_HEAD(&io->ci_layers);
169 cl_object_for_each(scan, obj) {
170 if (scan->co_ops->coo_io_init != NULL) {
171 result = scan->co_ops->coo_io_init(env, scan, io);
177 io->ci_state = CIS_INIT;
182 * Initialize sub-io, by calling cl_io_operations::cio_init() top-to-bottom.
184 * \pre obj != cl_object_top(obj)
186 int cl_io_sub_init(const struct lu_env *env, struct cl_io *io,
187 enum cl_io_type iot, struct cl_object *obj)
189 struct cl_thread_info *info = cl_env_info(env);
191 LASSERT(obj != cl_object_top(obj));
192 if (info->clt_current_io == NULL)
193 info->clt_current_io = io;
194 return cl_io_init0(env, io, iot, obj);
196 EXPORT_SYMBOL(cl_io_sub_init);
199 * Initialize \a io, by calling cl_io_operations::cio_init() top-to-bottom.
201 * Caller has to call cl_io_fini() after a call to cl_io_init(), no matter
202 * what the latter returned.
204 * \pre obj == cl_object_top(obj)
205 * \pre cl_io_type_is_valid(iot)
206 * \post cl_io_type_is_valid(io->ci_type) && io->ci_type == iot
208 int cl_io_init(const struct lu_env *env, struct cl_io *io,
209 enum cl_io_type iot, struct cl_object *obj)
211 struct cl_thread_info *info = cl_env_info(env);
213 LASSERT(obj == cl_object_top(obj));
214 LASSERT(info->clt_current_io == NULL);
216 info->clt_current_io = io;
217 return cl_io_init0(env, io, iot, obj);
219 EXPORT_SYMBOL(cl_io_init);
222 * Initialize read or write io.
224 * \pre iot == CIT_READ || iot == CIT_WRITE
226 int cl_io_rw_init(const struct lu_env *env, struct cl_io *io,
227 enum cl_io_type iot, loff_t pos, size_t count)
229 LINVRNT(iot == CIT_READ || iot == CIT_WRITE);
230 LINVRNT(io->ci_obj != NULL);
233 LU_OBJECT_HEADER(D_VFSTRACE, env, &io->ci_obj->co_lu,
234 "io range: %u ["LPU64", "LPU64") %u %u\n",
235 iot, (__u64)pos, (__u64)pos + count,
236 io->u.ci_rw.crw_nonblock, io->u.ci_wr.wr_append);
237 io->u.ci_rw.crw_pos = pos;
238 io->u.ci_rw.crw_count = count;
239 RETURN(cl_io_init(env, io, iot, io->ci_obj));
241 EXPORT_SYMBOL(cl_io_rw_init);
243 static int cl_lock_descr_sort(const struct cl_lock_descr *d0,
244 const struct cl_lock_descr *d1)
246 return lu_fid_cmp(lu_object_fid(&d0->cld_obj->co_lu),
247 lu_object_fid(&d1->cld_obj->co_lu));
251 * Sort locks in lexicographical order of their (fid, start-offset) pairs.
253 static void cl_io_locks_sort(struct cl_io *io)
258 /* hidden treasure: bubble sort for now. */
260 struct cl_io_lock_link *curr;
261 struct cl_io_lock_link *prev;
262 struct cl_io_lock_link *temp;
267 list_for_each_entry_safe(curr, temp, &io->ci_lockset.cls_todo,
270 switch (cl_lock_descr_sort(&prev->cill_descr,
271 &curr->cill_descr)) {
274 * IMPOSSIBLE: Identical locks are
281 list_move_tail(&curr->cill_linkage,
282 &prev->cill_linkage);
284 continue; /* don't change prev: it's
285 * still "previous" */
286 case -1: /* already in order */
296 static void cl_lock_descr_merge(struct cl_lock_descr *d0,
297 const struct cl_lock_descr *d1)
299 d0->cld_start = min(d0->cld_start, d1->cld_start);
300 d0->cld_end = max(d0->cld_end, d1->cld_end);
302 if (d1->cld_mode == CLM_WRITE && d0->cld_mode != CLM_WRITE)
303 d0->cld_mode = CLM_WRITE;
305 if (d1->cld_mode == CLM_GROUP && d0->cld_mode != CLM_GROUP)
306 d0->cld_mode = CLM_GROUP;
309 static int cl_lockset_merge(const struct cl_lockset *set,
310 const struct cl_lock_descr *need)
312 struct cl_io_lock_link *scan;
315 list_for_each_entry(scan, &set->cls_todo, cill_linkage) {
316 if (!cl_object_same(scan->cill_descr.cld_obj, need->cld_obj))
319 /* Merge locks for the same object because ldlm lock server
320 * may expand the lock extent, otherwise there is a deadlock
321 * case if two conflicted locks are queueud for the same object
322 * and lock server expands one lock to overlap the another.
323 * The side effect is that it can generate a multi-stripe lock
324 * that may cause casacading problem */
325 cl_lock_descr_merge(&scan->cill_descr, need);
326 CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
327 scan->cill_descr.cld_mode, scan->cill_descr.cld_start,
328 scan->cill_descr.cld_end);
334 static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io,
335 struct cl_lockset *set)
337 struct cl_io_lock_link *link;
338 struct cl_io_lock_link *temp;
343 list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
344 result = cl_lock_request(env, io, &link->cill_lock);
348 list_move(&link->cill_linkage, &set->cls_done);
354 * Takes locks necessary for the current iteration of io.
356 * Calls cl_io_operations::cio_lock() top-to-bottom to collect locks required
357 * by layers for the current iteration. Then sort locks (to avoid dead-locks),
360 int cl_io_lock(const struct lu_env *env, struct cl_io *io)
362 const struct cl_io_slice *scan;
365 LINVRNT(cl_io_is_loopable(io));
366 LINVRNT(io->ci_state == CIS_IT_STARTED);
367 LINVRNT(cl_io_invariant(io));
370 cl_io_for_each(scan, io) {
371 if (scan->cis_iop->op[io->ci_type].cio_lock == NULL)
373 result = scan->cis_iop->op[io->ci_type].cio_lock(env, scan);
378 cl_io_locks_sort(io);
379 result = cl_lockset_lock(env, io, &io->ci_lockset);
382 cl_io_unlock(env, io);
384 io->ci_state = CIS_LOCKED;
387 EXPORT_SYMBOL(cl_io_lock);
390 * Release locks takes by io.
392 void cl_io_unlock(const struct lu_env *env, struct cl_io *io)
394 struct cl_lockset *set;
395 struct cl_io_lock_link *link;
396 struct cl_io_lock_link *temp;
397 const struct cl_io_slice *scan;
399 LASSERT(cl_io_is_loopable(io));
400 LASSERT(CIS_IT_STARTED <= io->ci_state && io->ci_state < CIS_UNLOCKED);
401 LINVRNT(cl_io_invariant(io));
404 set = &io->ci_lockset;
406 list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
407 list_del_init(&link->cill_linkage);
408 if (link->cill_fini != NULL)
409 link->cill_fini(env, link);
412 list_for_each_entry_safe(link, temp, &set->cls_done, cill_linkage) {
413 list_del_init(&link->cill_linkage);
414 cl_lock_release(env, &link->cill_lock);
415 if (link->cill_fini != NULL)
416 link->cill_fini(env, link);
419 cl_io_for_each_reverse(scan, io) {
420 if (scan->cis_iop->op[io->ci_type].cio_unlock != NULL)
421 scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
423 io->ci_state = CIS_UNLOCKED;
424 LASSERT(!cl_env_info(env)->clt_counters[CNL_TOP].ctc_nr_locks_acquired);
427 EXPORT_SYMBOL(cl_io_unlock);
430 * Prepares next iteration of io.
432 * Calls cl_io_operations::cio_iter_init() top-to-bottom. This exists to give
433 * layers a chance to modify io parameters, e.g., so that lov can restrict io
434 * to a single stripe.
436 int cl_io_iter_init(const struct lu_env *env, struct cl_io *io)
438 const struct cl_io_slice *scan;
441 LINVRNT(cl_io_is_loopable(io));
442 LINVRNT(io->ci_state == CIS_INIT || io->ci_state == CIS_IT_ENDED);
443 LINVRNT(cl_io_invariant(io));
447 cl_io_for_each(scan, io) {
448 if (scan->cis_iop->op[io->ci_type].cio_iter_init == NULL)
450 result = scan->cis_iop->op[io->ci_type].cio_iter_init(env,
456 io->ci_state = CIS_IT_STARTED;
459 EXPORT_SYMBOL(cl_io_iter_init);
462 * Finalizes io iteration.
464 * Calls cl_io_operations::cio_iter_fini() bottom-to-top.
466 void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io)
468 const struct cl_io_slice *scan;
470 LINVRNT(cl_io_is_loopable(io));
471 LINVRNT(io->ci_state == CIS_UNLOCKED);
472 LINVRNT(cl_io_invariant(io));
475 cl_io_for_each_reverse(scan, io) {
476 if (scan->cis_iop->op[io->ci_type].cio_iter_fini != NULL)
477 scan->cis_iop->op[io->ci_type].cio_iter_fini(env, scan);
479 io->ci_state = CIS_IT_ENDED;
482 EXPORT_SYMBOL(cl_io_iter_fini);
485 * Records that read or write io progressed \a nob bytes forward.
487 void cl_io_rw_advance(const struct lu_env *env, struct cl_io *io, size_t nob)
489 const struct cl_io_slice *scan;
491 LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
493 LINVRNT(cl_io_is_loopable(io));
494 LINVRNT(cl_io_invariant(io));
498 io->u.ci_rw.crw_pos += nob;
499 io->u.ci_rw.crw_count -= nob;
501 /* layers have to be notified. */
502 cl_io_for_each_reverse(scan, io) {
503 if (scan->cis_iop->op[io->ci_type].cio_advance != NULL)
504 scan->cis_iop->op[io->ci_type].cio_advance(env, scan,
511 * Adds a lock to a lockset.
513 int cl_io_lock_add(const struct lu_env *env, struct cl_io *io,
514 struct cl_io_lock_link *link)
519 if (cl_lockset_merge(&io->ci_lockset, &link->cill_descr))
522 list_add(&link->cill_linkage, &io->ci_lockset.cls_todo);
527 EXPORT_SYMBOL(cl_io_lock_add);
529 static void cl_free_io_lock_link(const struct lu_env *env,
530 struct cl_io_lock_link *link)
536 * Allocates new lock link, and uses it to add a lock to a lockset.
538 int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
539 struct cl_lock_descr *descr)
541 struct cl_io_lock_link *link;
547 link->cill_descr = *descr;
548 link->cill_fini = cl_free_io_lock_link;
549 result = cl_io_lock_add(env, io, link);
550 if (result) /* lock match */
551 link->cill_fini(env, link);
557 EXPORT_SYMBOL(cl_io_lock_alloc_add);
560 * Starts io by calling cl_io_operations::cio_start() top-to-bottom.
562 int cl_io_start(const struct lu_env *env, struct cl_io *io)
564 const struct cl_io_slice *scan;
567 LINVRNT(cl_io_is_loopable(io));
568 LINVRNT(io->ci_state == CIS_LOCKED);
569 LINVRNT(cl_io_invariant(io));
572 io->ci_state = CIS_IO_GOING;
573 cl_io_for_each(scan, io) {
574 if (scan->cis_iop->op[io->ci_type].cio_start == NULL)
576 result = scan->cis_iop->op[io->ci_type].cio_start(env, scan);
584 EXPORT_SYMBOL(cl_io_start);
587 * Wait until current io iteration is finished by calling
588 * cl_io_operations::cio_end() bottom-to-top.
590 void cl_io_end(const struct lu_env *env, struct cl_io *io)
592 const struct cl_io_slice *scan;
594 LINVRNT(cl_io_is_loopable(io));
595 LINVRNT(io->ci_state == CIS_IO_GOING);
596 LINVRNT(cl_io_invariant(io));
599 cl_io_for_each_reverse(scan, io) {
600 if (scan->cis_iop->op[io->ci_type].cio_end != NULL)
601 scan->cis_iop->op[io->ci_type].cio_end(env, scan);
602 /* TODO: error handling. */
604 io->ci_state = CIS_IO_FINISHED;
607 EXPORT_SYMBOL(cl_io_end);
609 static const struct cl_page_slice *
610 cl_io_slice_page(const struct cl_io_slice *ios, struct cl_page *page)
612 const struct cl_page_slice *slice;
614 slice = cl_page_at(page, ios->cis_obj->co_lu.lo_dev->ld_type);
615 LINVRNT(slice != NULL);
620 * Called by read io, when page has to be read from the server.
622 * \see cl_io_operations::cio_read_page()
624 int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
625 struct cl_page *page)
627 const struct cl_io_slice *scan;
628 struct cl_2queue *queue;
631 LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_FAULT);
632 LINVRNT(cl_page_is_owned(page, io));
633 LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
634 LINVRNT(cl_io_invariant(io));
637 queue = &io->ci_queue;
639 cl_2queue_init(queue);
641 * ->cio_read_page() methods called in the loop below are supposed to
642 * never block waiting for network (the only subtle point is the
643 * creation of new pages for read-ahead that might result in cache
644 * shrinking, but currently only clean pages are shrunk and this
645 * requires no network io).
647 * Should this ever starts blocking, retry loop would be needed for
648 * "parallel io" (see CLO_REPEAT loops in cl_lock.c).
650 cl_io_for_each(scan, io) {
651 if (scan->cis_iop->cio_read_page != NULL) {
652 const struct cl_page_slice *slice;
654 slice = cl_io_slice_page(scan, page);
655 LINVRNT(slice != NULL);
656 result = scan->cis_iop->cio_read_page(env, scan, slice);
661 if (result == 0 && queue->c2_qin.pl_nr > 0)
662 result = cl_io_submit_rw(env, io, CRT_READ, queue);
664 * Unlock unsent pages in case of error.
666 cl_page_list_disown(env, io, &queue->c2_qin);
667 cl_2queue_fini(env, queue);
670 EXPORT_SYMBOL(cl_io_read_page);
673 * Commit a list of contiguous pages into writeback cache.
675 * \returns 0 if all pages committed, or errcode if error occurred.
676 * \see cl_io_operations::cio_commit_async()
678 int cl_io_commit_async(const struct lu_env *env, struct cl_io *io,
679 struct cl_page_list *queue, int from, int to,
682 const struct cl_io_slice *scan;
686 cl_io_for_each(scan, io) {
687 if (scan->cis_iop->cio_commit_async == NULL)
689 result = scan->cis_iop->cio_commit_async(env, scan, queue,
696 EXPORT_SYMBOL(cl_io_commit_async);
699 * Submits a list of pages for immediate io.
701 * After the function gets returned, The submitted pages are moved to
702 * queue->c2_qout queue, and queue->c2_qin contain both the pages don't need
703 * to be submitted, and the pages are errant to submit.
705 * \returns 0 if at least one page was submitted, error code otherwise.
706 * \see cl_io_operations::cio_submit()
708 int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
709 enum cl_req_type crt, struct cl_2queue *queue)
711 const struct cl_io_slice *scan;
715 cl_io_for_each(scan, io) {
716 if (scan->cis_iop->cio_submit == NULL)
718 result = scan->cis_iop->cio_submit(env, scan, crt, queue);
723 * If ->cio_submit() failed, no pages were sent.
725 LASSERT(ergo(result != 0, list_empty(&queue->c2_qout.pl_pages)));
728 EXPORT_SYMBOL(cl_io_submit_rw);
731 * Submit a sync_io and wait for the IO to be finished, or error happens.
732 * If \a timeout is zero, it means to wait for the IO unconditionally.
734 int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
735 enum cl_req_type iot, struct cl_2queue *queue,
738 struct cl_sync_io *anchor = &cl_env_info(env)->clt_anchor;
742 cl_page_list_for_each(pg, &queue->c2_qin) {
743 LASSERT(pg->cp_sync_io == NULL);
744 pg->cp_sync_io = anchor;
747 cl_sync_io_init(anchor, queue->c2_qin.pl_nr, &cl_sync_io_end);
748 rc = cl_io_submit_rw(env, io, iot, queue);
751 * If some pages weren't sent for any reason (e.g.,
752 * read found up-to-date pages in the cache, or write found
753 * clean pages), count them as completed to avoid infinite
756 cl_page_list_for_each(pg, &queue->c2_qin) {
757 pg->cp_sync_io = NULL;
758 cl_sync_io_note(env, anchor, 1);
761 /* wait for the IO to be finished. */
762 rc = cl_sync_io_wait(env, anchor, timeout);
763 cl_page_list_assume(env, io, &queue->c2_qout);
765 LASSERT(list_empty(&queue->c2_qout.pl_pages));
766 cl_page_list_for_each(pg, &queue->c2_qin)
767 pg->cp_sync_io = NULL;
771 EXPORT_SYMBOL(cl_io_submit_sync);
774 * Cancel an IO which has been submitted by cl_io_submit_rw.
776 int cl_io_cancel(const struct lu_env *env, struct cl_io *io,
777 struct cl_page_list *queue)
779 struct cl_page *page;
782 CERROR("Canceling ongoing page trasmission\n");
783 cl_page_list_for_each(page, queue) {
786 rc = cl_page_cancel(env, page);
787 result = result ?: rc;
795 * Pumps io through iterations calling
797 * - cl_io_iter_init()
807 * - cl_io_iter_fini()
809 * repeatedly until there is no more io to do.
811 int cl_io_loop(const struct lu_env *env, struct cl_io *io)
815 LINVRNT(cl_io_is_loopable(io));
822 result = cl_io_iter_init(env, io);
825 result = cl_io_lock(env, io);
828 * Notify layers that locks has been taken,
831 * - llite: kms, short read;
832 * - llite: generic_file_read();
834 result = cl_io_start(env, io);
836 * Send any remaining pending
839 * - llite: ll_rw_stats_tally.
842 cl_io_unlock(env, io);
843 cl_io_rw_advance(env, io, io->ci_nob - nob);
846 cl_io_iter_fini(env, io);
847 } while (result == 0 && io->ci_continue);
849 result = io->ci_result;
850 RETURN(result < 0 ? result : 0);
852 EXPORT_SYMBOL(cl_io_loop);
855 * Adds io slice to the cl_io.
857 * This is called by cl_object_operations::coo_io_init() methods to add a
858 * per-layer state to the io. New state is added at the end of
859 * cl_io::ci_layers list, that is, it is at the bottom of the stack.
861 * \see cl_lock_slice_add(), cl_req_slice_add(), cl_page_slice_add()
863 void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
864 struct cl_object *obj,
865 const struct cl_io_operations *ops)
867 struct list_head *linkage = &slice->cis_linkage;
869 LASSERT((linkage->prev == NULL && linkage->next == NULL) ||
870 list_empty(linkage));
873 list_add_tail(linkage, &io->ci_layers);
875 slice->cis_obj = obj;
876 slice->cis_iop = ops;
879 EXPORT_SYMBOL(cl_io_slice_add);
883 * Initializes page list.
885 void cl_page_list_init(struct cl_page_list *plist)
889 INIT_LIST_HEAD(&plist->pl_pages);
890 plist->pl_owner = current;
893 EXPORT_SYMBOL(cl_page_list_init);
896 * Adds a page to a page list.
898 void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page)
901 /* it would be better to check that page is owned by "current" io, but
902 * it is not passed here. */
903 LASSERT(page->cp_owner != NULL);
904 LINVRNT(plist->pl_owner == current);
906 LASSERT(list_empty(&page->cp_batch));
907 list_add_tail(&page->cp_batch, &plist->pl_pages);
909 lu_ref_add_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
913 EXPORT_SYMBOL(cl_page_list_add);
916 * Removes a page from a page list.
918 void cl_page_list_del(const struct lu_env *env,
919 struct cl_page_list *plist, struct cl_page *page)
921 LASSERT(plist->pl_nr > 0);
922 LASSERT(cl_page_is_vmlocked(env, page));
923 LINVRNT(plist->pl_owner == current);
926 list_del_init(&page->cp_batch);
928 lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
929 cl_page_put(env, page);
932 EXPORT_SYMBOL(cl_page_list_del);
935 * Moves a page from one page list to another.
937 void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
938 struct cl_page *page)
940 LASSERT(src->pl_nr > 0);
941 LINVRNT(dst->pl_owner == current);
942 LINVRNT(src->pl_owner == current);
945 list_move_tail(&page->cp_batch, &dst->pl_pages);
948 lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue",
952 EXPORT_SYMBOL(cl_page_list_move);
955 * Moves a page from one page list to the head of another list.
957 void cl_page_list_move_head(struct cl_page_list *dst, struct cl_page_list *src,
958 struct cl_page *page)
960 LASSERT(src->pl_nr > 0);
961 LINVRNT(dst->pl_owner == current);
962 LINVRNT(src->pl_owner == current);
965 list_move(&page->cp_batch, &dst->pl_pages);
968 lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue",
972 EXPORT_SYMBOL(cl_page_list_move_head);
975 * splice the cl_page_list, just as list head does
977 void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head)
979 struct cl_page *page;
982 LINVRNT(list->pl_owner == current);
983 LINVRNT(head->pl_owner == current);
986 cl_page_list_for_each_safe(page, tmp, list)
987 cl_page_list_move(head, list, page);
990 EXPORT_SYMBOL(cl_page_list_splice);
993 * Disowns pages in a queue.
995 void cl_page_list_disown(const struct lu_env *env,
996 struct cl_io *io, struct cl_page_list *plist)
998 struct cl_page *page;
999 struct cl_page *temp;
1001 LINVRNT(plist->pl_owner == current);
1004 cl_page_list_for_each_safe(page, temp, plist) {
1005 LASSERT(plist->pl_nr > 0);
1007 list_del_init(&page->cp_batch);
1010 * cl_page_disown0 rather than usual cl_page_disown() is used,
1011 * because pages are possibly in CPS_FREEING state already due
1012 * to the call to cl_page_list_discard().
1015 * XXX cl_page_disown0() will fail if page is not locked.
1017 cl_page_disown0(env, io, page);
1018 lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue",
1020 cl_page_put(env, page);
1024 EXPORT_SYMBOL(cl_page_list_disown);
1027 * Releases pages from queue.
1029 void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist)
1031 struct cl_page *page;
1032 struct cl_page *temp;
1034 LINVRNT(plist->pl_owner == current);
1037 cl_page_list_for_each_safe(page, temp, plist)
1038 cl_page_list_del(env, plist, page);
1039 LASSERT(plist->pl_nr == 0);
1042 EXPORT_SYMBOL(cl_page_list_fini);
1045 * Owns all pages in a queue.
1047 int cl_page_list_own(const struct lu_env *env,
1048 struct cl_io *io, struct cl_page_list *plist)
1050 struct cl_page *page;
1051 struct cl_page *temp;
1054 LINVRNT(plist->pl_owner == current);
1058 cl_page_list_for_each_safe(page, temp, plist) {
1059 if (cl_page_own(env, io, page) == 0)
1060 result = result ?: page->cp_error;
1062 cl_page_list_del(env, plist, page);
1068 * Assumes all pages in a queue.
1070 void cl_page_list_assume(const struct lu_env *env,
1071 struct cl_io *io, struct cl_page_list *plist)
1073 struct cl_page *page;
1075 LINVRNT(plist->pl_owner == current);
1077 cl_page_list_for_each(page, plist)
1078 cl_page_assume(env, io, page);
1082 * Discards all pages in a queue.
1084 void cl_page_list_discard(const struct lu_env *env, struct cl_io *io,
1085 struct cl_page_list *plist)
1087 struct cl_page *page;
1089 LINVRNT(plist->pl_owner == current);
1091 cl_page_list_for_each(page, plist)
1092 cl_page_discard(env, io, page);
1097 * Initialize dual page queue.
1099 void cl_2queue_init(struct cl_2queue *queue)
1102 cl_page_list_init(&queue->c2_qin);
1103 cl_page_list_init(&queue->c2_qout);
1106 EXPORT_SYMBOL(cl_2queue_init);
1109 * Add a page to the incoming page list of 2-queue.
1111 void cl_2queue_add(struct cl_2queue *queue, struct cl_page *page)
1114 cl_page_list_add(&queue->c2_qin, page);
1117 EXPORT_SYMBOL(cl_2queue_add);
1120 * Disown pages in both lists of a 2-queue.
1122 void cl_2queue_disown(const struct lu_env *env,
1123 struct cl_io *io, struct cl_2queue *queue)
1126 cl_page_list_disown(env, io, &queue->c2_qin);
1127 cl_page_list_disown(env, io, &queue->c2_qout);
1130 EXPORT_SYMBOL(cl_2queue_disown);
1133 * Discard (truncate) pages in both lists of a 2-queue.
1135 void cl_2queue_discard(const struct lu_env *env,
1136 struct cl_io *io, struct cl_2queue *queue)
1139 cl_page_list_discard(env, io, &queue->c2_qin);
1140 cl_page_list_discard(env, io, &queue->c2_qout);
1143 EXPORT_SYMBOL(cl_2queue_discard);
1146 * Assume to own the pages in cl_2queue
1148 void cl_2queue_assume(const struct lu_env *env,
1149 struct cl_io *io, struct cl_2queue *queue)
1151 cl_page_list_assume(env, io, &queue->c2_qin);
1152 cl_page_list_assume(env, io, &queue->c2_qout);
1156 * Finalize both page lists of a 2-queue.
1158 void cl_2queue_fini(const struct lu_env *env, struct cl_2queue *queue)
1161 cl_page_list_fini(env, &queue->c2_qout);
1162 cl_page_list_fini(env, &queue->c2_qin);
1165 EXPORT_SYMBOL(cl_2queue_fini);
1168 * Initialize a 2-queue to contain \a page in its incoming page list.
1170 void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page)
1173 cl_2queue_init(queue);
1174 cl_2queue_add(queue, page);
1177 EXPORT_SYMBOL(cl_2queue_init_page);
1180 * Returns top-level io.
1182 * \see cl_object_top()
1184 struct cl_io *cl_io_top(struct cl_io *io)
1187 while (io->ci_parent != NULL)
1191 EXPORT_SYMBOL(cl_io_top);
1194 * Prints human readable representation of \a io to the \a f.
1196 void cl_io_print(const struct lu_env *env, void *cookie,
1197 lu_printer_t printer, const struct cl_io *io)
1202 * Adds request slice to the compound request.
1204 * This is called by cl_device_operations::cdo_req_init() methods to add a
1205 * per-layer state to the request. New state is added at the end of
1206 * cl_req::crq_layers list, that is, it is at the bottom of the stack.
1208 * \see cl_lock_slice_add(), cl_page_slice_add(), cl_io_slice_add()
1210 void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice,
1211 struct cl_device *dev,
1212 const struct cl_req_operations *ops)
1215 list_add_tail(&slice->crs_linkage, &req->crq_layers);
1216 slice->crs_dev = dev;
1217 slice->crs_ops = ops;
1218 slice->crs_req = req;
1221 EXPORT_SYMBOL(cl_req_slice_add);
1223 static void cl_req_free(const struct lu_env *env, struct cl_req *req)
1227 LASSERT(list_empty(&req->crq_pages));
1228 LASSERT(req->crq_nrpages == 0);
1229 LINVRNT(list_empty(&req->crq_layers));
1230 LINVRNT(equi(req->crq_nrobjs > 0, req->crq_o != NULL));
1233 if (req->crq_o != NULL) {
1234 for (i = 0; i < req->crq_nrobjs; ++i) {
1235 struct cl_object *obj = req->crq_o[i].ro_obj;
1237 lu_object_ref_del_at(&obj->co_lu,
1238 &req->crq_o[i].ro_obj_ref,
1240 cl_object_put(env, obj);
1243 OBD_FREE(req->crq_o, req->crq_nrobjs * sizeof req->crq_o[0]);
1249 static int cl_req_init(const struct lu_env *env, struct cl_req *req,
1250 struct cl_page *page)
1252 struct cl_device *dev;
1253 struct cl_page_slice *slice;
1258 list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
1259 dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev);
1260 if (dev->cd_ops->cdo_req_init != NULL) {
1261 result = dev->cd_ops->cdo_req_init(env,
1271 * Invokes per-request transfer completion call-backs
1272 * (cl_req_operations::cro_completion()) bottom-to-top.
1274 void cl_req_completion(const struct lu_env *env, struct cl_req *req, int rc)
1276 struct cl_req_slice *slice;
1280 * for the lack of list_for_each_entry_reverse_safe()...
1282 while (!list_empty(&req->crq_layers)) {
1283 slice = list_entry(req->crq_layers.prev,
1284 struct cl_req_slice, crs_linkage);
1285 list_del_init(&slice->crs_linkage);
1286 if (slice->crs_ops->cro_completion != NULL)
1287 slice->crs_ops->cro_completion(env, slice, rc);
1289 cl_req_free(env, req);
1292 EXPORT_SYMBOL(cl_req_completion);
1295 * Allocates new transfer request.
1297 struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
1298 enum cl_req_type crt, int nr_objects)
1302 LINVRNT(nr_objects > 0);
1309 req->crq_type = crt;
1310 INIT_LIST_HEAD(&req->crq_pages);
1311 INIT_LIST_HEAD(&req->crq_layers);
1313 OBD_ALLOC(req->crq_o, nr_objects * sizeof req->crq_o[0]);
1314 if (req->crq_o != NULL) {
1315 req->crq_nrobjs = nr_objects;
1316 result = cl_req_init(env, req, page);
1320 cl_req_completion(env, req, result);
1321 req = ERR_PTR(result);
1324 req = ERR_PTR(-ENOMEM);
1327 EXPORT_SYMBOL(cl_req_alloc);
1330 * Adds a page to a request.
1332 void cl_req_page_add(const struct lu_env *env,
1333 struct cl_req *req, struct cl_page *page)
1335 struct cl_object *obj;
1336 struct cl_req_obj *rqo;
1341 LASSERT(list_empty(&page->cp_flight));
1342 LASSERT(page->cp_req == NULL);
1344 CL_PAGE_DEBUG(D_PAGE, env, page, "req %p, %d, %u\n",
1345 req, req->crq_type, req->crq_nrpages);
1347 list_add_tail(&page->cp_flight, &req->crq_pages);
1350 obj = cl_object_top(page->cp_obj);
1351 for (i = 0, rqo = req->crq_o; obj != rqo->ro_obj; ++i, ++rqo) {
1352 if (rqo->ro_obj == NULL) {
1355 lu_object_ref_add_at(&obj->co_lu, &rqo->ro_obj_ref,
1360 LASSERT(i < req->crq_nrobjs);
1363 EXPORT_SYMBOL(cl_req_page_add);
1366 * Removes a page from a request.
1368 void cl_req_page_done(const struct lu_env *env, struct cl_page *page)
1370 struct cl_req *req = page->cp_req;
1374 LASSERT(!list_empty(&page->cp_flight));
1375 LASSERT(req->crq_nrpages > 0);
1377 list_del_init(&page->cp_flight);
1379 page->cp_req = NULL;
1382 EXPORT_SYMBOL(cl_req_page_done);
1385 * Notifies layers that request is about to depart by calling
1386 * cl_req_operations::cro_prep() top-to-bottom.
1388 int cl_req_prep(const struct lu_env *env, struct cl_req *req)
1392 const struct cl_req_slice *slice;
1396 * Check that the caller of cl_req_alloc() didn't lie about the number
1399 for (i = 0; i < req->crq_nrobjs; ++i)
1400 LASSERT(req->crq_o[i].ro_obj != NULL);
1403 list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
1404 if (slice->crs_ops->cro_prep != NULL) {
1405 result = slice->crs_ops->cro_prep(env, slice);
1412 EXPORT_SYMBOL(cl_req_prep);
1415 * Fills in attributes that are passed to server together with transfer. Only
1416 * attributes from \a flags may be touched. This can be called multiple times
1417 * for the same request.
1419 void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,
1420 struct cl_req_attr *attr, u64 flags)
1422 const struct cl_req_slice *slice;
1423 struct cl_page *page;
1426 LASSERT(!list_empty(&req->crq_pages));
1429 /* Take any page to use as a model. */
1430 page = list_entry(req->crq_pages.next, struct cl_page, cp_flight);
1432 for (i = 0; i < req->crq_nrobjs; ++i) {
1433 list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
1434 const struct cl_page_slice *scan;
1435 const struct cl_object *obj;
1437 scan = cl_page_at(page,
1438 slice->crs_dev->cd_lu_dev.ld_type);
1439 LASSERT(scan != NULL);
1440 obj = scan->cpl_obj;
1441 if (slice->crs_ops->cro_attr_set != NULL)
1442 slice->crs_ops->cro_attr_set(env, slice, obj,
1448 EXPORT_SYMBOL(cl_req_attr_set);
1450 /* cl_sync_io_callback assumes the caller must call cl_sync_io_wait() to
1451 * wait for the IO to finish. */
1452 void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor)
1454 wake_up_all(&anchor->csi_waitq);
1456 /* it's safe to nuke or reuse anchor now */
1457 atomic_set(&anchor->csi_barrier, 0);
1459 EXPORT_SYMBOL(cl_sync_io_end);
1462 * Initialize synchronous io wait anchor
1464 void cl_sync_io_init(struct cl_sync_io *anchor, int nr,
1465 void (*end)(const struct lu_env *, struct cl_sync_io *))
1468 memset(anchor, 0, sizeof(*anchor));
1469 init_waitqueue_head(&anchor->csi_waitq);
1470 atomic_set(&anchor->csi_sync_nr, nr);
1471 atomic_set(&anchor->csi_barrier, nr > 0);
1472 anchor->csi_sync_rc = 0;
1473 anchor->csi_end_io = end;
1474 LASSERT(end != NULL);
1477 EXPORT_SYMBOL(cl_sync_io_init);
1480 * Wait until all IO completes. Transfer completion routine has to call
1481 * cl_sync_io_note() for every entity.
1483 int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor,
1486 struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout),
1491 LASSERT(timeout >= 0);
1493 rc = l_wait_event(anchor->csi_waitq,
1494 atomic_read(&anchor->csi_sync_nr) == 0,
1497 CERROR("IO failed: %d, still wait for %d remaining entries\n",
1498 rc, atomic_read(&anchor->csi_sync_nr));
1500 lwi = (struct l_wait_info) { 0 };
1501 (void)l_wait_event(anchor->csi_waitq,
1502 atomic_read(&anchor->csi_sync_nr) == 0,
1505 rc = anchor->csi_sync_rc;
1507 LASSERT(atomic_read(&anchor->csi_sync_nr) == 0);
1509 /* wait until cl_sync_io_note() has done wakeup */
1510 while (unlikely(atomic_read(&anchor->csi_barrier) != 0)) {
1515 EXPORT_SYMBOL(cl_sync_io_wait);
1518 * Indicate that transfer of a single page completed.
1520 void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor,
1524 if (anchor->csi_sync_rc == 0 && ioret < 0)
1525 anchor->csi_sync_rc = ioret;
1527 * Synchronous IO done without releasing page lock (e.g., as a part of
1528 * ->{prepare,commit}_write(). Completion is used to signal the end of
1531 LASSERT(atomic_read(&anchor->csi_sync_nr) > 0);
1532 if (atomic_dec_and_test(&anchor->csi_sync_nr)) {
1533 LASSERT(anchor->csi_end_io != NULL);
1534 anchor->csi_end_io(env, anchor);
1535 /* Can't access anchor any more */
1539 EXPORT_SYMBOL(cl_sync_io_note);