4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
34 * Author: Nikita Danilov <nikita.danilov@sun.com>
35 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
38 #define DEBUG_SUBSYSTEM S_CLASS
40 #include <linux/sched.h>
41 #include <linux/list.h>
42 #include <obd_class.h>
43 #include <obd_support.h>
44 #include <lustre_fid.h>
45 #include <cl_object.h>
46 #include "cl_internal.h"
48 /*****************************************************************************
54 static inline int cl_io_type_is_valid(enum cl_io_type type)
56 return CIT_READ <= type && type < CIT_OP_NR;
59 static inline int cl_io_is_loopable(const struct cl_io *io)
61 return cl_io_type_is_valid(io->ci_type) && io->ci_type != CIT_MISC;
65 * cl_io invariant that holds at all times when exported cl_io_*() functions
66 * are entered and left.
68 static int cl_io_invariant(const struct cl_io *io)
75 * io can own pages only when it is ongoing. Sub-io might
76 * still be in CIS_LOCKED state when top-io is in
79 ergo(io->ci_owned_nr > 0, io->ci_state == CIS_IO_GOING ||
80 (io->ci_state == CIS_LOCKED && up != NULL));
84 * Finalize \a io, by calling cl_io_operations::cio_fini() bottom-to-top.
86 void cl_io_fini(const struct lu_env *env, struct cl_io *io)
88 struct cl_io_slice *slice;
90 LINVRNT(cl_io_type_is_valid(io->ci_type));
91 LINVRNT(cl_io_invariant(io));
94 while (!list_empty(&io->ci_layers)) {
95 slice = container_of(io->ci_layers.prev, struct cl_io_slice,
97 list_del_init(&slice->cis_linkage);
98 if (slice->cis_iop->op[io->ci_type].cio_fini != NULL)
99 slice->cis_iop->op[io->ci_type].cio_fini(env, slice);
101 * Invalidate slice to catch use after free. This assumes that
102 * slices are allocated within session and can be touched
103 * after ->cio_fini() returns.
105 slice->cis_io = NULL;
107 io->ci_state = CIS_FINI;
109 /* sanity check for layout change */
110 switch(io->ci_type) {
113 case CIT_DATA_VERSION:
117 LASSERT(!io->ci_need_restart);
121 /* Check ignore layout change conf */
122 LASSERT(ergo(io->ci_ignore_layout || !io->ci_verify_layout,
123 !io->ci_need_restart));
133 EXPORT_SYMBOL(cl_io_fini);
135 static int cl_io_init0(const struct lu_env *env, struct cl_io *io,
136 enum cl_io_type iot, struct cl_object *obj)
138 struct cl_object *scan;
141 LINVRNT(io->ci_state == CIS_ZERO || io->ci_state == CIS_FINI);
142 LINVRNT(cl_io_type_is_valid(iot));
143 LINVRNT(cl_io_invariant(io));
147 INIT_LIST_HEAD(&io->ci_lockset.cls_todo);
148 INIT_LIST_HEAD(&io->ci_lockset.cls_done);
149 INIT_LIST_HEAD(&io->ci_layers);
152 cl_object_for_each(scan, obj) {
153 if (scan->co_ops->coo_io_init != NULL) {
154 result = scan->co_ops->coo_io_init(env, scan, io);
160 io->ci_state = CIS_INIT;
165 * Initialize sub-io, by calling cl_io_operations::cio_init() top-to-bottom.
167 * \pre obj != cl_object_top(obj)
169 int cl_io_sub_init(const struct lu_env *env, struct cl_io *io,
170 enum cl_io_type iot, struct cl_object *obj)
172 LASSERT(obj != cl_object_top(obj));
174 return cl_io_init0(env, io, iot, obj);
176 EXPORT_SYMBOL(cl_io_sub_init);
179 * Initialize \a io, by calling cl_io_operations::cio_init() top-to-bottom.
181 * Caller has to call cl_io_fini() after a call to cl_io_init(), no matter
182 * what the latter returned.
184 * \pre obj == cl_object_top(obj)
185 * \pre cl_io_type_is_valid(iot)
186 * \post cl_io_type_is_valid(io->ci_type) && io->ci_type == iot
188 int cl_io_init(const struct lu_env *env, struct cl_io *io,
189 enum cl_io_type iot, struct cl_object *obj)
191 LASSERT(obj == cl_object_top(obj));
193 /* clear I/O restart from previous instance */
194 io->ci_need_restart = 0;
196 return cl_io_init0(env, io, iot, obj);
198 EXPORT_SYMBOL(cl_io_init);
201 * Initialize read or write io.
203 * \pre iot == CIT_READ || iot == CIT_WRITE
205 int cl_io_rw_init(const struct lu_env *env, struct cl_io *io,
206 enum cl_io_type iot, loff_t pos, size_t count)
208 LINVRNT(iot == CIT_READ || iot == CIT_WRITE);
209 LINVRNT(io->ci_obj != NULL);
212 LU_OBJECT_HEADER(D_VFSTRACE, env, &io->ci_obj->co_lu,
213 "io range: %u [%llu, %llu) %u %u\n",
214 iot, (__u64)pos, (__u64)pos + count,
215 io->u.ci_rw.crw_nonblock, io->u.ci_wr.wr_append);
216 io->u.ci_rw.crw_pos = pos;
217 io->u.ci_rw.crw_count = count;
218 RETURN(cl_io_init(env, io, iot, io->ci_obj));
220 EXPORT_SYMBOL(cl_io_rw_init);
222 static int cl_lock_descr_sort(const struct cl_lock_descr *d0,
223 const struct cl_lock_descr *d1)
225 return lu_fid_cmp(lu_object_fid(&d0->cld_obj->co_lu),
226 lu_object_fid(&d1->cld_obj->co_lu));
230 * Sort locks in lexicographical order of their (fid, start-offset) pairs.
232 static void cl_io_locks_sort(struct cl_io *io)
237 /* hidden treasure: bubble sort for now. */
239 struct cl_io_lock_link *curr;
240 struct cl_io_lock_link *prev;
241 struct cl_io_lock_link *temp;
246 list_for_each_entry_safe(curr, temp, &io->ci_lockset.cls_todo,
249 switch (cl_lock_descr_sort(&prev->cill_descr,
250 &curr->cill_descr)) {
253 * IMPOSSIBLE: Identical locks are
260 list_move_tail(&curr->cill_linkage,
261 &prev->cill_linkage);
263 continue; /* don't change prev: it's
264 * still "previous" */
265 case -1: /* already in order */
275 static void cl_lock_descr_merge(struct cl_lock_descr *d0,
276 const struct cl_lock_descr *d1)
278 d0->cld_start = min(d0->cld_start, d1->cld_start);
279 d0->cld_end = max(d0->cld_end, d1->cld_end);
281 if (d1->cld_mode == CLM_WRITE && d0->cld_mode != CLM_WRITE)
282 d0->cld_mode = CLM_WRITE;
284 if (d1->cld_mode == CLM_GROUP && d0->cld_mode != CLM_GROUP)
285 d0->cld_mode = CLM_GROUP;
288 static int cl_lockset_merge(const struct cl_lockset *set,
289 const struct cl_lock_descr *need)
291 struct cl_io_lock_link *scan;
294 list_for_each_entry(scan, &set->cls_todo, cill_linkage) {
295 if (!cl_object_same(scan->cill_descr.cld_obj, need->cld_obj))
298 /* Merge locks for the same object because ldlm lock server
299 * may expand the lock extent, otherwise there is a deadlock
300 * case if two conflicted locks are queueud for the same object
301 * and lock server expands one lock to overlap the another.
302 * The side effect is that it can generate a multi-stripe lock
303 * that may cause casacading problem */
304 cl_lock_descr_merge(&scan->cill_descr, need);
305 CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
306 scan->cill_descr.cld_mode, scan->cill_descr.cld_start,
307 scan->cill_descr.cld_end);
313 static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io,
314 struct cl_lockset *set)
316 struct cl_io_lock_link *link;
317 struct cl_io_lock_link *temp;
322 list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
323 result = cl_lock_request(env, io, &link->cill_lock);
327 list_move(&link->cill_linkage, &set->cls_done);
333 * Takes locks necessary for the current iteration of io.
335 * Calls cl_io_operations::cio_lock() top-to-bottom to collect locks required
336 * by layers for the current iteration. Then sort locks (to avoid dead-locks),
339 int cl_io_lock(const struct lu_env *env, struct cl_io *io)
341 const struct cl_io_slice *scan;
344 LINVRNT(cl_io_is_loopable(io));
345 LINVRNT(io->ci_state == CIS_IT_STARTED);
346 LINVRNT(cl_io_invariant(io));
349 list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
350 if (scan->cis_iop->op[io->ci_type].cio_lock == NULL)
352 result = scan->cis_iop->op[io->ci_type].cio_lock(env, scan);
357 cl_io_locks_sort(io);
358 result = cl_lockset_lock(env, io, &io->ci_lockset);
361 cl_io_unlock(env, io);
363 io->ci_state = CIS_LOCKED;
366 EXPORT_SYMBOL(cl_io_lock);
369 * Release locks takes by io.
371 void cl_io_unlock(const struct lu_env *env, struct cl_io *io)
373 struct cl_lockset *set;
374 struct cl_io_lock_link *link;
375 struct cl_io_lock_link *temp;
376 const struct cl_io_slice *scan;
378 LASSERT(cl_io_is_loopable(io));
379 LASSERT(CIS_IT_STARTED <= io->ci_state && io->ci_state < CIS_UNLOCKED);
380 LINVRNT(cl_io_invariant(io));
383 set = &io->ci_lockset;
385 list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
386 list_del_init(&link->cill_linkage);
387 if (link->cill_fini != NULL)
388 link->cill_fini(env, link);
391 list_for_each_entry_safe(link, temp, &set->cls_done, cill_linkage) {
392 list_del_init(&link->cill_linkage);
393 cl_lock_release(env, &link->cill_lock);
394 if (link->cill_fini != NULL)
395 link->cill_fini(env, link);
398 list_for_each_entry_reverse(scan, &io->ci_layers, cis_linkage) {
399 if (scan->cis_iop->op[io->ci_type].cio_unlock != NULL)
400 scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
402 io->ci_state = CIS_UNLOCKED;
405 EXPORT_SYMBOL(cl_io_unlock);
408 * Prepares next iteration of io.
410 * Calls cl_io_operations::cio_iter_init() top-to-bottom. This exists to give
411 * layers a chance to modify io parameters, e.g., so that lov can restrict io
412 * to a single stripe.
414 int cl_io_iter_init(const struct lu_env *env, struct cl_io *io)
416 const struct cl_io_slice *scan;
419 LINVRNT(cl_io_is_loopable(io));
420 LINVRNT(io->ci_state == CIS_INIT || io->ci_state == CIS_IT_ENDED);
421 LINVRNT(cl_io_invariant(io));
425 list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
426 if (scan->cis_iop->op[io->ci_type].cio_iter_init == NULL)
428 result = scan->cis_iop->op[io->ci_type].cio_iter_init(env,
434 io->ci_state = CIS_IT_STARTED;
437 EXPORT_SYMBOL(cl_io_iter_init);
440 * Finalizes io iteration.
442 * Calls cl_io_operations::cio_iter_fini() bottom-to-top.
444 void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io)
446 const struct cl_io_slice *scan;
448 LINVRNT(cl_io_is_loopable(io));
449 LINVRNT(io->ci_state <= CIS_IT_STARTED ||
450 io->ci_state > CIS_IO_FINISHED);
451 LINVRNT(cl_io_invariant(io));
454 list_for_each_entry_reverse(scan, &io->ci_layers, cis_linkage) {
455 if (scan->cis_iop->op[io->ci_type].cio_iter_fini != NULL)
456 scan->cis_iop->op[io->ci_type].cio_iter_fini(env, scan);
458 io->ci_state = CIS_IT_ENDED;
461 EXPORT_SYMBOL(cl_io_iter_fini);
464 * Records that read or write io progressed \a nob bytes forward.
466 void cl_io_rw_advance(const struct lu_env *env, struct cl_io *io, size_t nob)
468 const struct cl_io_slice *scan;
472 LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
474 LINVRNT(cl_io_is_loopable(io));
475 LINVRNT(cl_io_invariant(io));
477 io->u.ci_rw.crw_pos += nob;
478 io->u.ci_rw.crw_count -= nob;
480 /* layers have to be notified. */
481 list_for_each_entry_reverse(scan, &io->ci_layers, cis_linkage) {
482 if (scan->cis_iop->op[io->ci_type].cio_advance != NULL)
483 scan->cis_iop->op[io->ci_type].cio_advance(env, scan,
490 * Adds a lock to a lockset.
492 int cl_io_lock_add(const struct lu_env *env, struct cl_io *io,
493 struct cl_io_lock_link *link)
498 if (cl_lockset_merge(&io->ci_lockset, &link->cill_descr))
501 list_add(&link->cill_linkage, &io->ci_lockset.cls_todo);
506 EXPORT_SYMBOL(cl_io_lock_add);
508 static void cl_free_io_lock_link(const struct lu_env *env,
509 struct cl_io_lock_link *link)
515 * Allocates new lock link, and uses it to add a lock to a lockset.
517 int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
518 struct cl_lock_descr *descr)
520 struct cl_io_lock_link *link;
526 link->cill_descr = *descr;
527 link->cill_fini = cl_free_io_lock_link;
528 result = cl_io_lock_add(env, io, link);
529 if (result) /* lock match */
530 link->cill_fini(env, link);
536 EXPORT_SYMBOL(cl_io_lock_alloc_add);
539 * Starts io by calling cl_io_operations::cio_start() top-to-bottom.
541 int cl_io_start(const struct lu_env *env, struct cl_io *io)
543 const struct cl_io_slice *scan;
546 LINVRNT(cl_io_is_loopable(io));
547 LINVRNT(io->ci_state == CIS_LOCKED);
548 LINVRNT(cl_io_invariant(io));
551 io->ci_state = CIS_IO_GOING;
552 list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
553 if (scan->cis_iop->op[io->ci_type].cio_start == NULL)
555 result = scan->cis_iop->op[io->ci_type].cio_start(env, scan);
563 EXPORT_SYMBOL(cl_io_start);
566 * Wait until current io iteration is finished by calling
567 * cl_io_operations::cio_end() bottom-to-top.
569 void cl_io_end(const struct lu_env *env, struct cl_io *io)
571 const struct cl_io_slice *scan;
573 LINVRNT(cl_io_is_loopable(io));
574 LINVRNT(io->ci_state == CIS_IO_GOING);
575 LINVRNT(cl_io_invariant(io));
578 list_for_each_entry_reverse(scan, &io->ci_layers, cis_linkage) {
579 if (scan->cis_iop->op[io->ci_type].cio_end != NULL)
580 scan->cis_iop->op[io->ci_type].cio_end(env, scan);
581 /* TODO: error handling. */
583 io->ci_state = CIS_IO_FINISHED;
586 EXPORT_SYMBOL(cl_io_end);
589 * Called by read io, to decide the readahead extent
591 * \see cl_io_operations::cio_read_ahead()
593 int cl_io_read_ahead(const struct lu_env *env, struct cl_io *io,
594 pgoff_t start, struct cl_read_ahead *ra)
596 const struct cl_io_slice *scan;
599 LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_FAULT);
600 LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
601 LINVRNT(cl_io_invariant(io));
604 list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
605 if (scan->cis_iop->cio_read_ahead == NULL)
608 result = scan->cis_iop->cio_read_ahead(env, scan, start, ra);
612 RETURN(result > 0 ? 0 : result);
614 EXPORT_SYMBOL(cl_io_read_ahead);
617 * Commit a list of contiguous pages into writeback cache.
619 * \returns 0 if all pages committed, or errcode if error occurred.
620 * \see cl_io_operations::cio_commit_async()
622 int cl_io_commit_async(const struct lu_env *env, struct cl_io *io,
623 struct cl_page_list *queue, int from, int to,
626 const struct cl_io_slice *scan;
630 list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
631 if (scan->cis_iop->cio_commit_async == NULL)
633 result = scan->cis_iop->cio_commit_async(env, scan, queue,
640 EXPORT_SYMBOL(cl_io_commit_async);
643 * Submits a list of pages for immediate io.
645 * After the function gets returned, The submitted pages are moved to
646 * queue->c2_qout queue, and queue->c2_qin contain both the pages don't need
647 * to be submitted, and the pages are errant to submit.
649 * \returns 0 if at least one page was submitted, error code otherwise.
650 * \see cl_io_operations::cio_submit()
652 int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
653 enum cl_req_type crt, struct cl_2queue *queue)
655 const struct cl_io_slice *scan;
659 list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
660 if (scan->cis_iop->cio_submit == NULL)
662 result = scan->cis_iop->cio_submit(env, scan, crt, queue);
667 * If ->cio_submit() failed, no pages were sent.
669 LASSERT(ergo(result != 0, list_empty(&queue->c2_qout.pl_pages)));
672 EXPORT_SYMBOL(cl_io_submit_rw);
675 * Submit a sync_io and wait for the IO to be finished, or error happens.
676 * If \a timeout is zero, it means to wait for the IO unconditionally.
678 int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
679 enum cl_req_type iot, struct cl_2queue *queue,
682 struct cl_sync_io *anchor = &cl_env_info(env)->clt_anchor;
686 cl_page_list_for_each(pg, &queue->c2_qin) {
687 LASSERT(pg->cp_sync_io == NULL);
688 pg->cp_sync_io = anchor;
691 cl_sync_io_init(anchor, queue->c2_qin.pl_nr, &cl_sync_io_end);
692 rc = cl_io_submit_rw(env, io, iot, queue);
695 * If some pages weren't sent for any reason (e.g.,
696 * read found up-to-date pages in the cache, or write found
697 * clean pages), count them as completed to avoid infinite
700 cl_page_list_for_each(pg, &queue->c2_qin) {
701 pg->cp_sync_io = NULL;
702 cl_sync_io_note(env, anchor, 1);
705 /* wait for the IO to be finished. */
706 rc = cl_sync_io_wait(env, anchor, timeout);
707 cl_page_list_assume(env, io, &queue->c2_qout);
709 LASSERT(list_empty(&queue->c2_qout.pl_pages));
710 cl_page_list_for_each(pg, &queue->c2_qin)
711 pg->cp_sync_io = NULL;
715 EXPORT_SYMBOL(cl_io_submit_sync);
718 * Cancel an IO which has been submitted by cl_io_submit_rw.
720 int cl_io_cancel(const struct lu_env *env, struct cl_io *io,
721 struct cl_page_list *queue)
723 struct cl_page *page;
726 CERROR("Canceling ongoing page trasmission\n");
727 cl_page_list_for_each(page, queue) {
730 rc = cl_page_cancel(env, page);
731 result = result ?: rc;
739 * Pumps io through iterations calling
741 * - cl_io_iter_init()
751 * - cl_io_iter_fini()
753 * repeatedly until there is no more io to do.
755 int cl_io_loop(const struct lu_env *env, struct cl_io *io)
759 LINVRNT(cl_io_is_loopable(io));
766 result = cl_io_iter_init(env, io);
769 result = cl_io_lock(env, io);
772 * Notify layers that locks has been taken,
775 * - llite: kms, short read;
776 * - llite: generic_file_read();
778 result = cl_io_start(env, io);
780 * Send any remaining pending
783 ** - llite: ll_rw_stats_tally.
786 cl_io_unlock(env, io);
787 cl_io_rw_advance(env, io, io->ci_nob - nob);
790 cl_io_iter_fini(env, io);
791 } while (result == 0 && io->ci_continue);
793 if (result == -EWOULDBLOCK && io->ci_ndelay) {
794 io->ci_need_restart = 1;
799 result = io->ci_result;
800 RETURN(result < 0 ? result : 0);
802 EXPORT_SYMBOL(cl_io_loop);
805 * Adds io slice to the cl_io.
807 * This is called by cl_object_operations::coo_io_init() methods to add a
808 * per-layer state to the io. New state is added at the end of
809 * cl_io::ci_layers list, that is, it is at the bottom of the stack.
811 * \see cl_lock_slice_add(), cl_req_slice_add(), cl_page_slice_add()
813 void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
814 struct cl_object *obj,
815 const struct cl_io_operations *ops)
817 struct list_head *linkage = &slice->cis_linkage;
819 LASSERT((linkage->prev == NULL && linkage->next == NULL) ||
820 list_empty(linkage));
823 list_add_tail(linkage, &io->ci_layers);
825 slice->cis_obj = obj;
826 slice->cis_iop = ops;
829 EXPORT_SYMBOL(cl_io_slice_add);
833 * Initializes page list.
835 void cl_page_list_init(struct cl_page_list *plist)
839 INIT_LIST_HEAD(&plist->pl_pages);
840 plist->pl_owner = current;
843 EXPORT_SYMBOL(cl_page_list_init);
846 * Adds a page to a page list.
848 void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page)
851 /* it would be better to check that page is owned by "current" io, but
852 * it is not passed here. */
853 LASSERT(page->cp_owner != NULL);
854 LINVRNT(plist->pl_owner == current);
856 LASSERT(list_empty(&page->cp_batch));
857 list_add_tail(&page->cp_batch, &plist->pl_pages);
859 lu_ref_add_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
863 EXPORT_SYMBOL(cl_page_list_add);
866 * Removes a page from a page list.
868 void cl_page_list_del(const struct lu_env *env,
869 struct cl_page_list *plist, struct cl_page *page)
871 LASSERT(plist->pl_nr > 0);
872 LASSERT(cl_page_is_vmlocked(env, page));
873 LINVRNT(plist->pl_owner == current);
876 list_del_init(&page->cp_batch);
878 lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
879 cl_page_put(env, page);
882 EXPORT_SYMBOL(cl_page_list_del);
885 * Moves a page from one page list to another.
887 void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
888 struct cl_page *page)
890 LASSERT(src->pl_nr > 0);
891 LINVRNT(dst->pl_owner == current);
892 LINVRNT(src->pl_owner == current);
895 list_move_tail(&page->cp_batch, &dst->pl_pages);
898 lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue",
902 EXPORT_SYMBOL(cl_page_list_move);
905 * Moves a page from one page list to the head of another list.
907 void cl_page_list_move_head(struct cl_page_list *dst, struct cl_page_list *src,
908 struct cl_page *page)
910 LASSERT(src->pl_nr > 0);
911 LINVRNT(dst->pl_owner == current);
912 LINVRNT(src->pl_owner == current);
915 list_move(&page->cp_batch, &dst->pl_pages);
918 lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue",
922 EXPORT_SYMBOL(cl_page_list_move_head);
925 * splice the cl_page_list, just as list head does
927 void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head)
929 struct cl_page *page;
932 LINVRNT(list->pl_owner == current);
933 LINVRNT(head->pl_owner == current);
936 cl_page_list_for_each_safe(page, tmp, list)
937 cl_page_list_move(head, list, page);
940 EXPORT_SYMBOL(cl_page_list_splice);
943 * Disowns pages in a queue.
945 void cl_page_list_disown(const struct lu_env *env,
946 struct cl_io *io, struct cl_page_list *plist)
948 struct cl_page *page;
949 struct cl_page *temp;
951 LINVRNT(plist->pl_owner == current);
954 cl_page_list_for_each_safe(page, temp, plist) {
955 LASSERT(plist->pl_nr > 0);
957 list_del_init(&page->cp_batch);
960 * cl_page_disown0 rather than usual cl_page_disown() is used,
961 * because pages are possibly in CPS_FREEING state already due
962 * to the call to cl_page_list_discard().
965 * XXX cl_page_disown0() will fail if page is not locked.
967 cl_page_disown0(env, io, page);
968 lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue",
970 cl_page_put(env, page);
974 EXPORT_SYMBOL(cl_page_list_disown);
977 * Releases pages from queue.
979 void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist)
981 struct cl_page *page;
982 struct cl_page *temp;
984 LINVRNT(plist->pl_owner == current);
987 cl_page_list_for_each_safe(page, temp, plist)
988 cl_page_list_del(env, plist, page);
989 LASSERT(plist->pl_nr == 0);
992 EXPORT_SYMBOL(cl_page_list_fini);
995 * Assumes all pages in a queue.
997 void cl_page_list_assume(const struct lu_env *env,
998 struct cl_io *io, struct cl_page_list *plist)
1000 struct cl_page *page;
1002 LINVRNT(plist->pl_owner == current);
1004 cl_page_list_for_each(page, plist)
1005 cl_page_assume(env, io, page);
1009 * Discards all pages in a queue.
1011 void cl_page_list_discard(const struct lu_env *env, struct cl_io *io,
1012 struct cl_page_list *plist)
1014 struct cl_page *page;
1016 LINVRNT(plist->pl_owner == current);
1018 cl_page_list_for_each(page, plist)
1019 cl_page_discard(env, io, page);
1022 EXPORT_SYMBOL(cl_page_list_discard);
1025 * Initialize dual page queue.
1027 void cl_2queue_init(struct cl_2queue *queue)
1030 cl_page_list_init(&queue->c2_qin);
1031 cl_page_list_init(&queue->c2_qout);
1034 EXPORT_SYMBOL(cl_2queue_init);
1037 * Add a page to the incoming page list of 2-queue.
1039 void cl_2queue_add(struct cl_2queue *queue, struct cl_page *page)
1042 cl_page_list_add(&queue->c2_qin, page);
1045 EXPORT_SYMBOL(cl_2queue_add);
1048 * Disown pages in both lists of a 2-queue.
1050 void cl_2queue_disown(const struct lu_env *env,
1051 struct cl_io *io, struct cl_2queue *queue)
1054 cl_page_list_disown(env, io, &queue->c2_qin);
1055 cl_page_list_disown(env, io, &queue->c2_qout);
1058 EXPORT_SYMBOL(cl_2queue_disown);
1061 * Discard (truncate) pages in both lists of a 2-queue.
1063 void cl_2queue_discard(const struct lu_env *env,
1064 struct cl_io *io, struct cl_2queue *queue)
1067 cl_page_list_discard(env, io, &queue->c2_qin);
1068 cl_page_list_discard(env, io, &queue->c2_qout);
1071 EXPORT_SYMBOL(cl_2queue_discard);
1074 * Assume to own the pages in cl_2queue
1076 void cl_2queue_assume(const struct lu_env *env,
1077 struct cl_io *io, struct cl_2queue *queue)
1079 cl_page_list_assume(env, io, &queue->c2_qin);
1080 cl_page_list_assume(env, io, &queue->c2_qout);
1084 * Finalize both page lists of a 2-queue.
1086 void cl_2queue_fini(const struct lu_env *env, struct cl_2queue *queue)
1089 cl_page_list_fini(env, &queue->c2_qout);
1090 cl_page_list_fini(env, &queue->c2_qin);
1093 EXPORT_SYMBOL(cl_2queue_fini);
1096 * Initialize a 2-queue to contain \a page in its incoming page list.
1098 void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page)
1101 cl_2queue_init(queue);
1102 cl_2queue_add(queue, page);
1105 EXPORT_SYMBOL(cl_2queue_init_page);
1108 * Returns top-level io.
1110 * \see cl_object_top()
1112 struct cl_io *cl_io_top(struct cl_io *io)
1115 while (io->ci_parent != NULL)
1119 EXPORT_SYMBOL(cl_io_top);
1122 * Prints human readable representation of \a io to the \a f.
1124 void cl_io_print(const struct lu_env *env, void *cookie,
1125 lu_printer_t printer, const struct cl_io *io)
1130 * Fills in attributes that are passed to server together with transfer. Only
1131 * attributes from \a flags may be touched. This can be called multiple times
1132 * for the same request.
1134 void cl_req_attr_set(const struct lu_env *env, struct cl_object *obj,
1135 struct cl_req_attr *attr)
1137 struct cl_object *scan;
1140 cl_object_for_each(scan, obj) {
1141 if (scan->co_ops->coo_req_attr_set != NULL)
1142 scan->co_ops->coo_req_attr_set(env, scan, attr);
1146 EXPORT_SYMBOL(cl_req_attr_set);
1148 /* cl_sync_io_callback assumes the caller must call cl_sync_io_wait() to
1149 * wait for the IO to finish. */
1150 void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor)
1152 wake_up_all(&anchor->csi_waitq);
1154 /* it's safe to nuke or reuse anchor now */
1155 atomic_set(&anchor->csi_barrier, 0);
1157 EXPORT_SYMBOL(cl_sync_io_end);
1160 * Initialize synchronous io wait anchor
1162 void cl_sync_io_init(struct cl_sync_io *anchor, int nr,
1163 void (*end)(const struct lu_env *, struct cl_sync_io *))
1166 memset(anchor, 0, sizeof(*anchor));
1167 init_waitqueue_head(&anchor->csi_waitq);
1168 atomic_set(&anchor->csi_sync_nr, nr);
1169 atomic_set(&anchor->csi_barrier, nr > 0);
1170 anchor->csi_sync_rc = 0;
1171 anchor->csi_end_io = end;
1172 LASSERT(end != NULL);
1175 EXPORT_SYMBOL(cl_sync_io_init);
1178 * Wait until all IO completes. Transfer completion routine has to call
1179 * cl_sync_io_note() for every entity.
1181 int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor,
1184 struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout),
1189 LASSERT(timeout >= 0);
1191 rc = l_wait_event(anchor->csi_waitq,
1192 atomic_read(&anchor->csi_sync_nr) == 0,
1195 CERROR("IO failed: %d, still wait for %d remaining entries\n",
1196 rc, atomic_read(&anchor->csi_sync_nr));
1198 lwi = (struct l_wait_info) { 0 };
1199 (void)l_wait_event(anchor->csi_waitq,
1200 atomic_read(&anchor->csi_sync_nr) == 0,
1203 rc = anchor->csi_sync_rc;
1205 LASSERT(atomic_read(&anchor->csi_sync_nr) == 0);
1207 /* wait until cl_sync_io_note() has done wakeup */
1208 while (unlikely(atomic_read(&anchor->csi_barrier) != 0)) {
1213 EXPORT_SYMBOL(cl_sync_io_wait);
1216 * Indicate that transfer of a single page completed.
1218 void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor,
1222 if (anchor->csi_sync_rc == 0 && ioret < 0)
1223 anchor->csi_sync_rc = ioret;
1225 * Synchronous IO done without releasing page lock (e.g., as a part of
1226 * ->{prepare,commit}_write(). Completion is used to signal the end of
1229 LASSERT(atomic_read(&anchor->csi_sync_nr) > 0);
1230 if (atomic_dec_and_test(&anchor->csi_sync_nr)) {
1231 LASSERT(anchor->csi_end_io != NULL);
1232 anchor->csi_end_io(env, anchor);
1233 /* Can't access anchor any more */
1237 EXPORT_SYMBOL(cl_sync_io_note);