4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2015, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
42 #define DEBUG_SUBSYSTEM S_CLASS
44 #include <linux/sched.h>
45 #include <obd_class.h>
46 #include <obd_support.h>
47 #include <lustre_fid.h>
48 #include <libcfs/list.h>
49 #include <cl_object.h>
50 #include "cl_internal.h"
52 /*****************************************************************************
58 #define cl_io_for_each(slice, io) \
59 list_for_each_entry((slice), &io->ci_layers, cis_linkage)
60 #define cl_io_for_each_reverse(slice, io) \
61 list_for_each_entry_reverse((slice), &io->ci_layers, cis_linkage)
63 static inline int cl_io_type_is_valid(enum cl_io_type type)
65 return CIT_READ <= type && type < CIT_OP_NR;
68 static inline int cl_io_is_loopable(const struct cl_io *io)
70 return cl_io_type_is_valid(io->ci_type) && io->ci_type != CIT_MISC;
74 * Returns true iff there is an IO ongoing in the given environment.
76 int cl_io_is_going(const struct lu_env *env)
78 return cl_env_info(env)->clt_current_io != NULL;
82 * cl_io invariant that holds at all times when exported cl_io_*() functions
83 * are entered and left.
85 static int cl_io_invariant(const struct cl_io *io)
92 * io can own pages only when it is ongoing. Sub-io might
93 * still be in CIS_LOCKED state when top-io is in
96 ergo(io->ci_owned_nr > 0, io->ci_state == CIS_IO_GOING ||
97 (io->ci_state == CIS_LOCKED && up != NULL));
101 * Finalize \a io, by calling cl_io_operations::cio_fini() bottom-to-top.
103 void cl_io_fini(const struct lu_env *env, struct cl_io *io)
105 struct cl_io_slice *slice;
106 struct cl_thread_info *info;
108 LINVRNT(cl_io_type_is_valid(io->ci_type));
109 LINVRNT(cl_io_invariant(io));
112 while (!list_empty(&io->ci_layers)) {
113 slice = container_of(io->ci_layers.prev, struct cl_io_slice,
115 list_del_init(&slice->cis_linkage);
116 if (slice->cis_iop->op[io->ci_type].cio_fini != NULL)
117 slice->cis_iop->op[io->ci_type].cio_fini(env, slice);
119 * Invalidate slice to catch use after free. This assumes that
120 * slices are allocated within session and can be touched
121 * after ->cio_fini() returns.
123 slice->cis_io = NULL;
125 io->ci_state = CIS_FINI;
126 info = cl_env_info(env);
127 if (info->clt_current_io == io)
128 info->clt_current_io = NULL;
130 /* sanity check for layout change */
131 switch(io->ci_type) {
134 case CIT_DATA_VERSION:
138 LASSERT(!io->ci_need_restart);
142 /* Check ignore layout change conf */
143 LASSERT(ergo(io->ci_ignore_layout || !io->ci_verify_layout,
144 !io->ci_need_restart));
153 EXPORT_SYMBOL(cl_io_fini);
155 static int cl_io_init0(const struct lu_env *env, struct cl_io *io,
156 enum cl_io_type iot, struct cl_object *obj)
158 struct cl_object *scan;
161 LINVRNT(io->ci_state == CIS_ZERO || io->ci_state == CIS_FINI);
162 LINVRNT(cl_io_type_is_valid(iot));
163 LINVRNT(cl_io_invariant(io));
167 INIT_LIST_HEAD(&io->ci_lockset.cls_todo);
168 INIT_LIST_HEAD(&io->ci_lockset.cls_done);
169 INIT_LIST_HEAD(&io->ci_layers);
172 cl_object_for_each(scan, obj) {
173 if (scan->co_ops->coo_io_init != NULL) {
174 result = scan->co_ops->coo_io_init(env, scan, io);
180 io->ci_state = CIS_INIT;
185 * Initialize sub-io, by calling cl_io_operations::cio_init() top-to-bottom.
187 * \pre obj != cl_object_top(obj)
189 int cl_io_sub_init(const struct lu_env *env, struct cl_io *io,
190 enum cl_io_type iot, struct cl_object *obj)
192 struct cl_thread_info *info = cl_env_info(env);
194 LASSERT(obj != cl_object_top(obj));
195 if (info->clt_current_io == NULL)
196 info->clt_current_io = io;
197 return cl_io_init0(env, io, iot, obj);
199 EXPORT_SYMBOL(cl_io_sub_init);
202 * Initialize \a io, by calling cl_io_operations::cio_init() top-to-bottom.
204 * Caller has to call cl_io_fini() after a call to cl_io_init(), no matter
205 * what the latter returned.
207 * \pre obj == cl_object_top(obj)
208 * \pre cl_io_type_is_valid(iot)
209 * \post cl_io_type_is_valid(io->ci_type) && io->ci_type == iot
211 int cl_io_init(const struct lu_env *env, struct cl_io *io,
212 enum cl_io_type iot, struct cl_object *obj)
214 struct cl_thread_info *info = cl_env_info(env);
216 LASSERT(obj == cl_object_top(obj));
217 LASSERT(info->clt_current_io == NULL);
219 info->clt_current_io = io;
220 return cl_io_init0(env, io, iot, obj);
222 EXPORT_SYMBOL(cl_io_init);
225 * Initialize read or write io.
227 * \pre iot == CIT_READ || iot == CIT_WRITE
229 int cl_io_rw_init(const struct lu_env *env, struct cl_io *io,
230 enum cl_io_type iot, loff_t pos, size_t count)
232 LINVRNT(iot == CIT_READ || iot == CIT_WRITE);
233 LINVRNT(io->ci_obj != NULL);
236 LU_OBJECT_HEADER(D_VFSTRACE, env, &io->ci_obj->co_lu,
237 "io range: %u ["LPU64", "LPU64") %u %u\n",
238 iot, (__u64)pos, (__u64)pos + count,
239 io->u.ci_rw.crw_nonblock, io->u.ci_wr.wr_append);
240 io->u.ci_rw.crw_pos = pos;
241 io->u.ci_rw.crw_count = count;
242 RETURN(cl_io_init(env, io, iot, io->ci_obj));
244 EXPORT_SYMBOL(cl_io_rw_init);
246 static int cl_lock_descr_sort(const struct cl_lock_descr *d0,
247 const struct cl_lock_descr *d1)
249 return lu_fid_cmp(lu_object_fid(&d0->cld_obj->co_lu),
250 lu_object_fid(&d1->cld_obj->co_lu));
254 * Sort locks in lexicographical order of their (fid, start-offset) pairs.
256 static void cl_io_locks_sort(struct cl_io *io)
261 /* hidden treasure: bubble sort for now. */
263 struct cl_io_lock_link *curr;
264 struct cl_io_lock_link *prev;
265 struct cl_io_lock_link *temp;
270 list_for_each_entry_safe(curr, temp, &io->ci_lockset.cls_todo,
273 switch (cl_lock_descr_sort(&prev->cill_descr,
274 &curr->cill_descr)) {
277 * IMPOSSIBLE: Identical locks are
284 list_move_tail(&curr->cill_linkage,
285 &prev->cill_linkage);
287 continue; /* don't change prev: it's
288 * still "previous" */
289 case -1: /* already in order */
299 static void cl_lock_descr_merge(struct cl_lock_descr *d0,
300 const struct cl_lock_descr *d1)
302 d0->cld_start = min(d0->cld_start, d1->cld_start);
303 d0->cld_end = max(d0->cld_end, d1->cld_end);
305 if (d1->cld_mode == CLM_WRITE && d0->cld_mode != CLM_WRITE)
306 d0->cld_mode = CLM_WRITE;
308 if (d1->cld_mode == CLM_GROUP && d0->cld_mode != CLM_GROUP)
309 d0->cld_mode = CLM_GROUP;
312 static int cl_lockset_merge(const struct cl_lockset *set,
313 const struct cl_lock_descr *need)
315 struct cl_io_lock_link *scan;
318 list_for_each_entry(scan, &set->cls_todo, cill_linkage) {
319 if (!cl_object_same(scan->cill_descr.cld_obj, need->cld_obj))
322 /* Merge locks for the same object because ldlm lock server
323 * may expand the lock extent, otherwise there is a deadlock
324 * case if two conflicted locks are queueud for the same object
325 * and lock server expands one lock to overlap the another.
326 * The side effect is that it can generate a multi-stripe lock
327 * that may cause casacading problem */
328 cl_lock_descr_merge(&scan->cill_descr, need);
329 CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
330 scan->cill_descr.cld_mode, scan->cill_descr.cld_start,
331 scan->cill_descr.cld_end);
337 static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io,
338 struct cl_lockset *set)
340 struct cl_io_lock_link *link;
341 struct cl_io_lock_link *temp;
346 list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
347 result = cl_lock_request(env, io, &link->cill_lock);
351 list_move(&link->cill_linkage, &set->cls_done);
357 * Takes locks necessary for the current iteration of io.
359 * Calls cl_io_operations::cio_lock() top-to-bottom to collect locks required
360 * by layers for the current iteration. Then sort locks (to avoid dead-locks),
363 int cl_io_lock(const struct lu_env *env, struct cl_io *io)
365 const struct cl_io_slice *scan;
368 LINVRNT(cl_io_is_loopable(io));
369 LINVRNT(io->ci_state == CIS_IT_STARTED);
370 LINVRNT(cl_io_invariant(io));
373 cl_io_for_each(scan, io) {
374 if (scan->cis_iop->op[io->ci_type].cio_lock == NULL)
376 result = scan->cis_iop->op[io->ci_type].cio_lock(env, scan);
381 cl_io_locks_sort(io);
382 result = cl_lockset_lock(env, io, &io->ci_lockset);
385 cl_io_unlock(env, io);
387 io->ci_state = CIS_LOCKED;
390 EXPORT_SYMBOL(cl_io_lock);
393 * Release locks takes by io.
395 void cl_io_unlock(const struct lu_env *env, struct cl_io *io)
397 struct cl_lockset *set;
398 struct cl_io_lock_link *link;
399 struct cl_io_lock_link *temp;
400 const struct cl_io_slice *scan;
402 LASSERT(cl_io_is_loopable(io));
403 LASSERT(CIS_IT_STARTED <= io->ci_state && io->ci_state < CIS_UNLOCKED);
404 LINVRNT(cl_io_invariant(io));
407 set = &io->ci_lockset;
409 list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
410 list_del_init(&link->cill_linkage);
411 if (link->cill_fini != NULL)
412 link->cill_fini(env, link);
415 list_for_each_entry_safe(link, temp, &set->cls_done, cill_linkage) {
416 list_del_init(&link->cill_linkage);
417 cl_lock_release(env, &link->cill_lock);
418 if (link->cill_fini != NULL)
419 link->cill_fini(env, link);
422 cl_io_for_each_reverse(scan, io) {
423 if (scan->cis_iop->op[io->ci_type].cio_unlock != NULL)
424 scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
426 io->ci_state = CIS_UNLOCKED;
427 LASSERT(!cl_env_info(env)->clt_counters[CNL_TOP].ctc_nr_locks_acquired);
430 EXPORT_SYMBOL(cl_io_unlock);
433 * Prepares next iteration of io.
435 * Calls cl_io_operations::cio_iter_init() top-to-bottom. This exists to give
436 * layers a chance to modify io parameters, e.g., so that lov can restrict io
437 * to a single stripe.
439 int cl_io_iter_init(const struct lu_env *env, struct cl_io *io)
441 const struct cl_io_slice *scan;
444 LINVRNT(cl_io_is_loopable(io));
445 LINVRNT(io->ci_state == CIS_INIT || io->ci_state == CIS_IT_ENDED);
446 LINVRNT(cl_io_invariant(io));
450 cl_io_for_each(scan, io) {
451 if (scan->cis_iop->op[io->ci_type].cio_iter_init == NULL)
453 result = scan->cis_iop->op[io->ci_type].cio_iter_init(env,
459 io->ci_state = CIS_IT_STARTED;
462 EXPORT_SYMBOL(cl_io_iter_init);
465 * Finalizes io iteration.
467 * Calls cl_io_operations::cio_iter_fini() bottom-to-top.
469 void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io)
471 const struct cl_io_slice *scan;
473 LINVRNT(cl_io_is_loopable(io));
474 LINVRNT(io->ci_state == CIS_UNLOCKED);
475 LINVRNT(cl_io_invariant(io));
478 cl_io_for_each_reverse(scan, io) {
479 if (scan->cis_iop->op[io->ci_type].cio_iter_fini != NULL)
480 scan->cis_iop->op[io->ci_type].cio_iter_fini(env, scan);
482 io->ci_state = CIS_IT_ENDED;
485 EXPORT_SYMBOL(cl_io_iter_fini);
488 * Records that read or write io progressed \a nob bytes forward.
490 void cl_io_rw_advance(const struct lu_env *env, struct cl_io *io, size_t nob)
492 const struct cl_io_slice *scan;
494 LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
496 LINVRNT(cl_io_is_loopable(io));
497 LINVRNT(cl_io_invariant(io));
501 io->u.ci_rw.crw_pos += nob;
502 io->u.ci_rw.crw_count -= nob;
504 /* layers have to be notified. */
505 cl_io_for_each_reverse(scan, io) {
506 if (scan->cis_iop->op[io->ci_type].cio_advance != NULL)
507 scan->cis_iop->op[io->ci_type].cio_advance(env, scan,
514 * Adds a lock to a lockset.
516 int cl_io_lock_add(const struct lu_env *env, struct cl_io *io,
517 struct cl_io_lock_link *link)
522 if (cl_lockset_merge(&io->ci_lockset, &link->cill_descr))
525 list_add(&link->cill_linkage, &io->ci_lockset.cls_todo);
530 EXPORT_SYMBOL(cl_io_lock_add);
532 static void cl_free_io_lock_link(const struct lu_env *env,
533 struct cl_io_lock_link *link)
539 * Allocates new lock link, and uses it to add a lock to a lockset.
541 int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
542 struct cl_lock_descr *descr)
544 struct cl_io_lock_link *link;
550 link->cill_descr = *descr;
551 link->cill_fini = cl_free_io_lock_link;
552 result = cl_io_lock_add(env, io, link);
553 if (result) /* lock match */
554 link->cill_fini(env, link);
560 EXPORT_SYMBOL(cl_io_lock_alloc_add);
563 * Starts io by calling cl_io_operations::cio_start() top-to-bottom.
565 int cl_io_start(const struct lu_env *env, struct cl_io *io)
567 const struct cl_io_slice *scan;
570 LINVRNT(cl_io_is_loopable(io));
571 LINVRNT(io->ci_state == CIS_LOCKED);
572 LINVRNT(cl_io_invariant(io));
575 io->ci_state = CIS_IO_GOING;
576 cl_io_for_each(scan, io) {
577 if (scan->cis_iop->op[io->ci_type].cio_start == NULL)
579 result = scan->cis_iop->op[io->ci_type].cio_start(env, scan);
587 EXPORT_SYMBOL(cl_io_start);
590 * Wait until current io iteration is finished by calling
591 * cl_io_operations::cio_end() bottom-to-top.
593 void cl_io_end(const struct lu_env *env, struct cl_io *io)
595 const struct cl_io_slice *scan;
597 LINVRNT(cl_io_is_loopable(io));
598 LINVRNT(io->ci_state == CIS_IO_GOING);
599 LINVRNT(cl_io_invariant(io));
602 cl_io_for_each_reverse(scan, io) {
603 if (scan->cis_iop->op[io->ci_type].cio_end != NULL)
604 scan->cis_iop->op[io->ci_type].cio_end(env, scan);
605 /* TODO: error handling. */
607 io->ci_state = CIS_IO_FINISHED;
610 EXPORT_SYMBOL(cl_io_end);
613 * Called by read io, to decide the readahead extent
615 * \see cl_io_operations::cio_read_ahead()
617 int cl_io_read_ahead(const struct lu_env *env, struct cl_io *io,
618 pgoff_t start, struct cl_read_ahead *ra)
620 const struct cl_io_slice *scan;
623 LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_FAULT);
624 LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
625 LINVRNT(cl_io_invariant(io));
628 cl_io_for_each(scan, io) {
629 if (scan->cis_iop->cio_read_ahead == NULL)
632 result = scan->cis_iop->cio_read_ahead(env, scan, start, ra);
636 RETURN(result > 0 ? 0 : result);
638 EXPORT_SYMBOL(cl_io_read_ahead);
641 * Commit a list of contiguous pages into writeback cache.
643 * \returns 0 if all pages committed, or errcode if error occurred.
644 * \see cl_io_operations::cio_commit_async()
646 int cl_io_commit_async(const struct lu_env *env, struct cl_io *io,
647 struct cl_page_list *queue, int from, int to,
650 const struct cl_io_slice *scan;
654 cl_io_for_each(scan, io) {
655 if (scan->cis_iop->cio_commit_async == NULL)
657 result = scan->cis_iop->cio_commit_async(env, scan, queue,
664 EXPORT_SYMBOL(cl_io_commit_async);
667 * Submits a list of pages for immediate io.
669 * After the function gets returned, The submitted pages are moved to
670 * queue->c2_qout queue, and queue->c2_qin contain both the pages don't need
671 * to be submitted, and the pages are errant to submit.
673 * \returns 0 if at least one page was submitted, error code otherwise.
674 * \see cl_io_operations::cio_submit()
676 int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
677 enum cl_req_type crt, struct cl_2queue *queue)
679 const struct cl_io_slice *scan;
683 cl_io_for_each(scan, io) {
684 if (scan->cis_iop->cio_submit == NULL)
686 result = scan->cis_iop->cio_submit(env, scan, crt, queue);
691 * If ->cio_submit() failed, no pages were sent.
693 LASSERT(ergo(result != 0, list_empty(&queue->c2_qout.pl_pages)));
696 EXPORT_SYMBOL(cl_io_submit_rw);
699 * Submit a sync_io and wait for the IO to be finished, or error happens.
700 * If \a timeout is zero, it means to wait for the IO unconditionally.
702 int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
703 enum cl_req_type iot, struct cl_2queue *queue,
706 struct cl_sync_io *anchor = &cl_env_info(env)->clt_anchor;
710 cl_page_list_for_each(pg, &queue->c2_qin) {
711 LASSERT(pg->cp_sync_io == NULL);
712 pg->cp_sync_io = anchor;
715 cl_sync_io_init(anchor, queue->c2_qin.pl_nr, &cl_sync_io_end);
716 rc = cl_io_submit_rw(env, io, iot, queue);
719 * If some pages weren't sent for any reason (e.g.,
720 * read found up-to-date pages in the cache, or write found
721 * clean pages), count them as completed to avoid infinite
724 cl_page_list_for_each(pg, &queue->c2_qin) {
725 pg->cp_sync_io = NULL;
726 cl_sync_io_note(env, anchor, 1);
729 /* wait for the IO to be finished. */
730 rc = cl_sync_io_wait(env, anchor, timeout);
731 cl_page_list_assume(env, io, &queue->c2_qout);
733 LASSERT(list_empty(&queue->c2_qout.pl_pages));
734 cl_page_list_for_each(pg, &queue->c2_qin)
735 pg->cp_sync_io = NULL;
739 EXPORT_SYMBOL(cl_io_submit_sync);
742 * Cancel an IO which has been submitted by cl_io_submit_rw.
744 int cl_io_cancel(const struct lu_env *env, struct cl_io *io,
745 struct cl_page_list *queue)
747 struct cl_page *page;
750 CERROR("Canceling ongoing page trasmission\n");
751 cl_page_list_for_each(page, queue) {
754 rc = cl_page_cancel(env, page);
755 result = result ?: rc;
763 * Pumps io through iterations calling
765 * - cl_io_iter_init()
775 * - cl_io_iter_fini()
777 * repeatedly until there is no more io to do.
779 int cl_io_loop(const struct lu_env *env, struct cl_io *io)
783 LINVRNT(cl_io_is_loopable(io));
790 result = cl_io_iter_init(env, io);
793 result = cl_io_lock(env, io);
796 * Notify layers that locks has been taken,
799 * - llite: kms, short read;
800 * - llite: generic_file_read();
802 result = cl_io_start(env, io);
804 * Send any remaining pending
807 * - llite: ll_rw_stats_tally.
810 cl_io_unlock(env, io);
811 cl_io_rw_advance(env, io, io->ci_nob - nob);
814 cl_io_iter_fini(env, io);
815 } while (result == 0 && io->ci_continue);
817 result = io->ci_result;
818 RETURN(result < 0 ? result : 0);
820 EXPORT_SYMBOL(cl_io_loop);
823 * Adds io slice to the cl_io.
825 * This is called by cl_object_operations::coo_io_init() methods to add a
826 * per-layer state to the io. New state is added at the end of
827 * cl_io::ci_layers list, that is, it is at the bottom of the stack.
829 * \see cl_lock_slice_add(), cl_req_slice_add(), cl_page_slice_add()
831 void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
832 struct cl_object *obj,
833 const struct cl_io_operations *ops)
835 struct list_head *linkage = &slice->cis_linkage;
837 LASSERT((linkage->prev == NULL && linkage->next == NULL) ||
838 list_empty(linkage));
841 list_add_tail(linkage, &io->ci_layers);
843 slice->cis_obj = obj;
844 slice->cis_iop = ops;
847 EXPORT_SYMBOL(cl_io_slice_add);
851 * Initializes page list.
853 void cl_page_list_init(struct cl_page_list *plist)
857 INIT_LIST_HEAD(&plist->pl_pages);
858 plist->pl_owner = current;
861 EXPORT_SYMBOL(cl_page_list_init);
864 * Adds a page to a page list.
866 void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page)
869 /* it would be better to check that page is owned by "current" io, but
870 * it is not passed here. */
871 LASSERT(page->cp_owner != NULL);
872 LINVRNT(plist->pl_owner == current);
874 LASSERT(list_empty(&page->cp_batch));
875 list_add_tail(&page->cp_batch, &plist->pl_pages);
877 lu_ref_add_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
881 EXPORT_SYMBOL(cl_page_list_add);
884 * Removes a page from a page list.
886 void cl_page_list_del(const struct lu_env *env,
887 struct cl_page_list *plist, struct cl_page *page)
889 LASSERT(plist->pl_nr > 0);
890 LASSERT(cl_page_is_vmlocked(env, page));
891 LINVRNT(plist->pl_owner == current);
894 list_del_init(&page->cp_batch);
896 lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
897 cl_page_put(env, page);
900 EXPORT_SYMBOL(cl_page_list_del);
903 * Moves a page from one page list to another.
905 void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
906 struct cl_page *page)
908 LASSERT(src->pl_nr > 0);
909 LINVRNT(dst->pl_owner == current);
910 LINVRNT(src->pl_owner == current);
913 list_move_tail(&page->cp_batch, &dst->pl_pages);
916 lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue",
920 EXPORT_SYMBOL(cl_page_list_move);
923 * Moves a page from one page list to the head of another list.
925 void cl_page_list_move_head(struct cl_page_list *dst, struct cl_page_list *src,
926 struct cl_page *page)
928 LASSERT(src->pl_nr > 0);
929 LINVRNT(dst->pl_owner == current);
930 LINVRNT(src->pl_owner == current);
933 list_move(&page->cp_batch, &dst->pl_pages);
936 lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue",
940 EXPORT_SYMBOL(cl_page_list_move_head);
943 * splice the cl_page_list, just as list head does
945 void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head)
947 struct cl_page *page;
950 LINVRNT(list->pl_owner == current);
951 LINVRNT(head->pl_owner == current);
954 cl_page_list_for_each_safe(page, tmp, list)
955 cl_page_list_move(head, list, page);
958 EXPORT_SYMBOL(cl_page_list_splice);
961 * Disowns pages in a queue.
963 void cl_page_list_disown(const struct lu_env *env,
964 struct cl_io *io, struct cl_page_list *plist)
966 struct cl_page *page;
967 struct cl_page *temp;
969 LINVRNT(plist->pl_owner == current);
972 cl_page_list_for_each_safe(page, temp, plist) {
973 LASSERT(plist->pl_nr > 0);
975 list_del_init(&page->cp_batch);
978 * cl_page_disown0 rather than usual cl_page_disown() is used,
979 * because pages are possibly in CPS_FREEING state already due
980 * to the call to cl_page_list_discard().
983 * XXX cl_page_disown0() will fail if page is not locked.
985 cl_page_disown0(env, io, page);
986 lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue",
988 cl_page_put(env, page);
992 EXPORT_SYMBOL(cl_page_list_disown);
995 * Releases pages from queue.
997 void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist)
999 struct cl_page *page;
1000 struct cl_page *temp;
1002 LINVRNT(plist->pl_owner == current);
1005 cl_page_list_for_each_safe(page, temp, plist)
1006 cl_page_list_del(env, plist, page);
1007 LASSERT(plist->pl_nr == 0);
1010 EXPORT_SYMBOL(cl_page_list_fini);
1013 * Owns all pages in a queue.
1015 int cl_page_list_own(const struct lu_env *env,
1016 struct cl_io *io, struct cl_page_list *plist)
1018 struct cl_page *page;
1019 struct cl_page *temp;
1022 LINVRNT(plist->pl_owner == current);
1026 cl_page_list_for_each_safe(page, temp, plist) {
1027 if (cl_page_own(env, io, page) == 0)
1028 result = result ?: page->cp_error;
1030 cl_page_list_del(env, plist, page);
1036 * Assumes all pages in a queue.
1038 void cl_page_list_assume(const struct lu_env *env,
1039 struct cl_io *io, struct cl_page_list *plist)
1041 struct cl_page *page;
1043 LINVRNT(plist->pl_owner == current);
1045 cl_page_list_for_each(page, plist)
1046 cl_page_assume(env, io, page);
1050 * Discards all pages in a queue.
1052 void cl_page_list_discard(const struct lu_env *env, struct cl_io *io,
1053 struct cl_page_list *plist)
1055 struct cl_page *page;
1057 LINVRNT(plist->pl_owner == current);
1059 cl_page_list_for_each(page, plist)
1060 cl_page_discard(env, io, page);
1065 * Initialize dual page queue.
1067 void cl_2queue_init(struct cl_2queue *queue)
1070 cl_page_list_init(&queue->c2_qin);
1071 cl_page_list_init(&queue->c2_qout);
1074 EXPORT_SYMBOL(cl_2queue_init);
1077 * Add a page to the incoming page list of 2-queue.
1079 void cl_2queue_add(struct cl_2queue *queue, struct cl_page *page)
1082 cl_page_list_add(&queue->c2_qin, page);
1085 EXPORT_SYMBOL(cl_2queue_add);
1088 * Disown pages in both lists of a 2-queue.
1090 void cl_2queue_disown(const struct lu_env *env,
1091 struct cl_io *io, struct cl_2queue *queue)
1094 cl_page_list_disown(env, io, &queue->c2_qin);
1095 cl_page_list_disown(env, io, &queue->c2_qout);
1098 EXPORT_SYMBOL(cl_2queue_disown);
1101 * Discard (truncate) pages in both lists of a 2-queue.
1103 void cl_2queue_discard(const struct lu_env *env,
1104 struct cl_io *io, struct cl_2queue *queue)
1107 cl_page_list_discard(env, io, &queue->c2_qin);
1108 cl_page_list_discard(env, io, &queue->c2_qout);
1111 EXPORT_SYMBOL(cl_2queue_discard);
1114 * Assume to own the pages in cl_2queue
1116 void cl_2queue_assume(const struct lu_env *env,
1117 struct cl_io *io, struct cl_2queue *queue)
1119 cl_page_list_assume(env, io, &queue->c2_qin);
1120 cl_page_list_assume(env, io, &queue->c2_qout);
1124 * Finalize both page lists of a 2-queue.
1126 void cl_2queue_fini(const struct lu_env *env, struct cl_2queue *queue)
1129 cl_page_list_fini(env, &queue->c2_qout);
1130 cl_page_list_fini(env, &queue->c2_qin);
1133 EXPORT_SYMBOL(cl_2queue_fini);
1136 * Initialize a 2-queue to contain \a page in its incoming page list.
1138 void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page)
1141 cl_2queue_init(queue);
1142 cl_2queue_add(queue, page);
1145 EXPORT_SYMBOL(cl_2queue_init_page);
1148 * Returns top-level io.
1150 * \see cl_object_top()
1152 struct cl_io *cl_io_top(struct cl_io *io)
1155 while (io->ci_parent != NULL)
1159 EXPORT_SYMBOL(cl_io_top);
1162 * Prints human readable representation of \a io to the \a f.
1164 void cl_io_print(const struct lu_env *env, void *cookie,
1165 lu_printer_t printer, const struct cl_io *io)
1170 * Fills in attributes that are passed to server together with transfer. Only
1171 * attributes from \a flags may be touched. This can be called multiple times
1172 * for the same request.
1174 void cl_req_attr_set(const struct lu_env *env, struct cl_object *obj,
1175 struct cl_req_attr *attr)
1177 struct cl_object *scan;
1180 cl_object_for_each(scan, obj) {
1181 if (scan->co_ops->coo_req_attr_set != NULL)
1182 scan->co_ops->coo_req_attr_set(env, scan, attr);
1186 EXPORT_SYMBOL(cl_req_attr_set);
1188 /* cl_sync_io_callback assumes the caller must call cl_sync_io_wait() to
1189 * wait for the IO to finish. */
1190 void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor)
1192 wake_up_all(&anchor->csi_waitq);
1194 /* it's safe to nuke or reuse anchor now */
1195 atomic_set(&anchor->csi_barrier, 0);
1197 EXPORT_SYMBOL(cl_sync_io_end);
1200 * Initialize synchronous io wait anchor
1202 void cl_sync_io_init(struct cl_sync_io *anchor, int nr,
1203 void (*end)(const struct lu_env *, struct cl_sync_io *))
1206 memset(anchor, 0, sizeof(*anchor));
1207 init_waitqueue_head(&anchor->csi_waitq);
1208 atomic_set(&anchor->csi_sync_nr, nr);
1209 atomic_set(&anchor->csi_barrier, nr > 0);
1210 anchor->csi_sync_rc = 0;
1211 anchor->csi_end_io = end;
1212 LASSERT(end != NULL);
1215 EXPORT_SYMBOL(cl_sync_io_init);
1218 * Wait until all IO completes. Transfer completion routine has to call
1219 * cl_sync_io_note() for every entity.
1221 int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor,
1224 struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout),
1229 LASSERT(timeout >= 0);
1231 rc = l_wait_event(anchor->csi_waitq,
1232 atomic_read(&anchor->csi_sync_nr) == 0,
1235 CERROR("IO failed: %d, still wait for %d remaining entries\n",
1236 rc, atomic_read(&anchor->csi_sync_nr));
1238 lwi = (struct l_wait_info) { 0 };
1239 (void)l_wait_event(anchor->csi_waitq,
1240 atomic_read(&anchor->csi_sync_nr) == 0,
1243 rc = anchor->csi_sync_rc;
1245 LASSERT(atomic_read(&anchor->csi_sync_nr) == 0);
1247 /* wait until cl_sync_io_note() has done wakeup */
1248 while (unlikely(atomic_read(&anchor->csi_barrier) != 0)) {
1253 EXPORT_SYMBOL(cl_sync_io_wait);
1256 * Indicate that transfer of a single page completed.
1258 void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor,
1262 if (anchor->csi_sync_rc == 0 && ioret < 0)
1263 anchor->csi_sync_rc = ioret;
1265 * Synchronous IO done without releasing page lock (e.g., as a part of
1266 * ->{prepare,commit}_write(). Completion is used to signal the end of
1269 LASSERT(atomic_read(&anchor->csi_sync_nr) > 0);
1270 if (atomic_dec_and_test(&anchor->csi_sync_nr)) {
1271 LASSERT(anchor->csi_end_io != NULL);
1272 anchor->csi_end_io(env, anchor);
1273 /* Can't access anchor any more */
1277 EXPORT_SYMBOL(cl_sync_io_note);