4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
33 * Author: Nikita Danilov <nikita.danilov@sun.com>
34 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
37 #define DEBUG_SUBSYSTEM S_CLASS
39 #include <linux/sched.h>
40 #include <linux/list.h>
41 #include <linux/list_sort.h>
42 #include <obd_class.h>
43 #include <obd_support.h>
44 #include <lustre_fid.h>
45 #include <cl_object.h>
46 #include "cl_internal.h"
48 /*****************************************************************************
54 static inline int cl_io_type_is_valid(enum cl_io_type type)
56 return CIT_READ <= type && type < CIT_OP_NR;
59 static inline int cl_io_is_loopable(const struct cl_io *io)
61 return cl_io_type_is_valid(io->ci_type) && io->ci_type != CIT_MISC;
65 * cl_io invariant that holds at all times when exported cl_io_*() functions
66 * are entered and left.
68 static int cl_io_invariant(const struct cl_io *io)
75 * io can own pages only when it is ongoing. Sub-io might
76 * still be in CIS_LOCKED state when top-io is in
79 ergo(io->ci_owned_nr > 0, io->ci_state == CIS_IO_GOING ||
80 (io->ci_state == CIS_LOCKED && up != NULL));
84 * Finalize \a io, by calling cl_io_operations::cio_fini() bottom-to-top.
86 void cl_io_fini(const struct lu_env *env, struct cl_io *io)
88 struct cl_io_slice *slice;
90 LINVRNT(cl_io_type_is_valid(io->ci_type));
91 LINVRNT(cl_io_invariant(io));
94 while (!list_empty(&io->ci_layers)) {
95 slice = container_of(io->ci_layers.prev, struct cl_io_slice,
97 list_del_init(&slice->cis_linkage);
98 if (slice->cis_iop->op[io->ci_type].cio_fini != NULL)
99 slice->cis_iop->op[io->ci_type].cio_fini(env, slice);
101 * Invalidate slice to catch use after free. This assumes that
102 * slices are allocated within session and can be touched
103 * after ->cio_fini() returns.
105 slice->cis_io = NULL;
107 io->ci_state = CIS_FINI;
109 /* sanity check for layout change */
110 switch(io->ci_type) {
113 case CIT_DATA_VERSION:
117 LASSERT(!io->ci_need_restart);
121 /* Check ignore layout change conf */
122 LASSERT(ergo(io->ci_ignore_layout || !io->ci_verify_layout,
123 !io->ci_need_restart));
134 EXPORT_SYMBOL(cl_io_fini);
136 static int __cl_io_init(const struct lu_env *env, struct cl_io *io,
137 enum cl_io_type iot, struct cl_object *obj)
139 struct cl_object *scan;
142 LINVRNT(io->ci_state == CIS_ZERO || io->ci_state == CIS_FINI);
143 LINVRNT(cl_io_type_is_valid(iot));
144 LINVRNT(cl_io_invariant(io));
148 INIT_LIST_HEAD(&io->ci_lockset.cls_todo);
149 INIT_LIST_HEAD(&io->ci_lockset.cls_done);
150 INIT_LIST_HEAD(&io->ci_layers);
153 cl_object_for_each(scan, obj) {
154 if (scan->co_ops->coo_io_init != NULL) {
155 result = scan->co_ops->coo_io_init(env, scan, io);
161 io->ci_state = CIS_INIT;
166 * Initialize sub-io, by calling cl_io_operations::cio_init() top-to-bottom.
168 * \pre obj != cl_object_top(obj)
170 int cl_io_sub_init(const struct lu_env *env, struct cl_io *io,
171 enum cl_io_type iot, struct cl_object *obj)
173 LASSERT(obj != cl_object_top(obj));
175 return __cl_io_init(env, io, iot, obj);
177 EXPORT_SYMBOL(cl_io_sub_init);
180 * Initialize \a io, by calling cl_io_operations::cio_init() top-to-bottom.
182 * Caller has to call cl_io_fini() after a call to cl_io_init(), no matter
183 * what the latter returned.
185 * \pre obj == cl_object_top(obj)
186 * \pre cl_io_type_is_valid(iot)
187 * \post cl_io_type_is_valid(io->ci_type) && io->ci_type == iot
189 int cl_io_init(const struct lu_env *env, struct cl_io *io,
190 enum cl_io_type iot, struct cl_object *obj)
192 LASSERT(obj == cl_object_top(obj));
194 /* clear I/O restart from previous instance */
195 io->ci_need_restart = 0;
197 return __cl_io_init(env, io, iot, obj);
199 EXPORT_SYMBOL(cl_io_init);
202 * Initialize read or write io.
204 * \pre iot == CIT_READ || iot == CIT_WRITE
206 int cl_io_rw_init(const struct lu_env *env, struct cl_io *io,
207 enum cl_io_type iot, loff_t pos, size_t count)
209 LINVRNT(iot == CIT_READ || iot == CIT_WRITE);
210 LINVRNT(io->ci_obj != NULL);
213 LU_OBJECT_HEADER(D_VFSTRACE, env, &io->ci_obj->co_lu,
214 "io range: %u [%llu, %llu) %u %u\n",
215 iot, (__u64)pos, (__u64)pos + count,
216 io->u.ci_rw.crw_nonblock, io->u.ci_wr.wr_append);
217 io->u.ci_rw.crw_pos = pos;
218 io->u.ci_rw.crw_count = count;
219 RETURN(cl_io_init(env, io, iot, io->ci_obj));
221 EXPORT_SYMBOL(cl_io_rw_init);
223 #ifdef HAVE_LIST_CMP_FUNC_T
224 static int cl_lock_descr_cmp(void *priv,
225 const struct list_head *a,
226 const struct list_head *b)
227 #else /* !HAVE_LIST_CMP_FUNC_T */
228 static int cl_lock_descr_cmp(void *priv,
229 struct list_head *a, struct list_head *b)
230 #endif /* HAVE_LIST_CMP_FUNC_T */
232 const struct cl_io_lock_link *l0 = list_entry(a, struct cl_io_lock_link,
234 const struct cl_io_lock_link *l1 = list_entry(b, struct cl_io_lock_link,
236 const struct cl_lock_descr *d0 = &l0->cill_descr;
237 const struct cl_lock_descr *d1 = &l1->cill_descr;
239 return lu_fid_cmp(lu_object_fid(&d0->cld_obj->co_lu),
240 lu_object_fid(&d1->cld_obj->co_lu));
243 static void cl_lock_descr_merge(struct cl_lock_descr *d0,
244 const struct cl_lock_descr *d1)
246 d0->cld_start = min(d0->cld_start, d1->cld_start);
247 d0->cld_end = max(d0->cld_end, d1->cld_end);
249 if (d1->cld_mode == CLM_WRITE && d0->cld_mode != CLM_WRITE)
250 d0->cld_mode = CLM_WRITE;
252 if (d1->cld_mode == CLM_GROUP && d0->cld_mode != CLM_GROUP)
253 d0->cld_mode = CLM_GROUP;
256 static int cl_lockset_merge(const struct cl_lockset *set,
257 const struct cl_lock_descr *need)
259 struct cl_io_lock_link *scan;
262 list_for_each_entry(scan, &set->cls_todo, cill_linkage) {
263 if (!cl_object_same(scan->cill_descr.cld_obj, need->cld_obj))
266 /* Merge locks for the same object because ldlm lock server
267 * may expand the lock extent, otherwise there is a deadlock
268 * case if two conflicted locks are queueud for the same object
269 * and lock server expands one lock to overlap the another.
270 * The side effect is that it can generate a multi-stripe lock
271 * that may cause casacading problem */
272 cl_lock_descr_merge(&scan->cill_descr, need);
273 CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
274 scan->cill_descr.cld_mode, scan->cill_descr.cld_start,
275 scan->cill_descr.cld_end);
281 static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io,
282 struct cl_lockset *set)
284 struct cl_io_lock_link *link;
285 struct cl_io_lock_link *temp;
290 list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
291 result = cl_lock_request(env, io, &link->cill_lock);
295 list_move(&link->cill_linkage, &set->cls_done);
301 * Takes locks necessary for the current iteration of io.
303 * Calls cl_io_operations::cio_lock() top-to-bottom to collect locks required
304 * by layers for the current iteration. Then sort locks (to avoid dead-locks),
307 int cl_io_lock(const struct lu_env *env, struct cl_io *io)
309 const struct cl_io_slice *scan;
312 LINVRNT(cl_io_is_loopable(io));
313 LINVRNT(io->ci_state == CIS_IT_STARTED);
314 LINVRNT(cl_io_invariant(io));
317 list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
318 if (scan->cis_iop->op[io->ci_type].cio_lock == NULL)
320 result = scan->cis_iop->op[io->ci_type].cio_lock(env, scan);
326 * Sort locks in lexicographical order of their (fid,
327 * start-offset) pairs to avoid deadlocks.
329 list_sort(NULL, &io->ci_lockset.cls_todo, cl_lock_descr_cmp);
330 result = cl_lockset_lock(env, io, &io->ci_lockset);
333 cl_io_unlock(env, io);
335 io->ci_state = CIS_LOCKED;
338 EXPORT_SYMBOL(cl_io_lock);
341 * Release locks takes by io.
343 void cl_io_unlock(const struct lu_env *env, struct cl_io *io)
345 struct cl_lockset *set;
346 struct cl_io_lock_link *link;
347 struct cl_io_lock_link *temp;
348 const struct cl_io_slice *scan;
350 LASSERT(cl_io_is_loopable(io));
351 LASSERT(CIS_IT_STARTED <= io->ci_state && io->ci_state < CIS_UNLOCKED);
352 LINVRNT(cl_io_invariant(io));
355 set = &io->ci_lockset;
357 list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
358 list_del_init(&link->cill_linkage);
359 if (link->cill_fini != NULL)
360 link->cill_fini(env, link);
363 list_for_each_entry_safe(link, temp, &set->cls_done, cill_linkage) {
364 list_del_init(&link->cill_linkage);
365 cl_lock_release(env, &link->cill_lock);
366 if (link->cill_fini != NULL)
367 link->cill_fini(env, link);
370 list_for_each_entry_reverse(scan, &io->ci_layers, cis_linkage) {
371 if (scan->cis_iop->op[io->ci_type].cio_unlock != NULL)
372 scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
374 io->ci_state = CIS_UNLOCKED;
377 EXPORT_SYMBOL(cl_io_unlock);
380 * Prepares next iteration of io.
382 * Calls cl_io_operations::cio_iter_init() top-to-bottom. This exists to give
383 * layers a chance to modify io parameters, e.g., so that lov can restrict io
384 * to a single stripe.
386 int cl_io_iter_init(const struct lu_env *env, struct cl_io *io)
388 const struct cl_io_slice *scan;
391 LINVRNT(cl_io_is_loopable(io));
392 LINVRNT(io->ci_state == CIS_INIT || io->ci_state == CIS_IT_ENDED);
393 LINVRNT(cl_io_invariant(io));
397 list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
398 if (scan->cis_iop->op[io->ci_type].cio_iter_init == NULL)
400 result = scan->cis_iop->op[io->ci_type].cio_iter_init(env,
406 io->ci_state = CIS_IT_STARTED;
409 EXPORT_SYMBOL(cl_io_iter_init);
412 * Finalizes io iteration.
414 * Calls cl_io_operations::cio_iter_fini() bottom-to-top.
416 void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io)
418 const struct cl_io_slice *scan;
420 LINVRNT(cl_io_is_loopable(io));
421 LINVRNT(io->ci_state <= CIS_IT_STARTED ||
422 io->ci_state > CIS_IO_FINISHED);
423 LINVRNT(cl_io_invariant(io));
426 list_for_each_entry_reverse(scan, &io->ci_layers, cis_linkage) {
427 if (scan->cis_iop->op[io->ci_type].cio_iter_fini != NULL)
428 scan->cis_iop->op[io->ci_type].cio_iter_fini(env, scan);
430 io->ci_state = CIS_IT_ENDED;
433 EXPORT_SYMBOL(cl_io_iter_fini);
436 * Records that read or write io progressed \a nob bytes forward.
438 void cl_io_rw_advance(const struct lu_env *env, struct cl_io *io, size_t nob)
440 const struct cl_io_slice *scan;
444 LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
446 LINVRNT(cl_io_is_loopable(io));
447 LINVRNT(cl_io_invariant(io));
449 io->u.ci_rw.crw_pos += nob;
450 io->u.ci_rw.crw_count -= nob;
452 /* layers have to be notified. */
453 list_for_each_entry_reverse(scan, &io->ci_layers, cis_linkage) {
454 if (scan->cis_iop->op[io->ci_type].cio_advance != NULL)
455 scan->cis_iop->op[io->ci_type].cio_advance(env, scan,
462 * Adds a lock to a lockset.
464 int cl_io_lock_add(const struct lu_env *env, struct cl_io *io,
465 struct cl_io_lock_link *link)
470 if (cl_lockset_merge(&io->ci_lockset, &link->cill_descr))
473 list_add(&link->cill_linkage, &io->ci_lockset.cls_todo);
478 EXPORT_SYMBOL(cl_io_lock_add);
480 static void cl_free_io_lock_link(const struct lu_env *env,
481 struct cl_io_lock_link *link)
487 * Allocates new lock link, and uses it to add a lock to a lockset.
489 int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
490 struct cl_lock_descr *descr)
492 struct cl_io_lock_link *link;
498 link->cill_descr = *descr;
499 link->cill_fini = cl_free_io_lock_link;
500 result = cl_io_lock_add(env, io, link);
501 if (result) /* lock match */
502 link->cill_fini(env, link);
508 EXPORT_SYMBOL(cl_io_lock_alloc_add);
511 * Starts io by calling cl_io_operations::cio_start() top-to-bottom.
513 int cl_io_start(const struct lu_env *env, struct cl_io *io)
515 const struct cl_io_slice *scan;
518 LINVRNT(cl_io_is_loopable(io));
519 LINVRNT(io->ci_state == CIS_LOCKED);
520 LINVRNT(cl_io_invariant(io));
523 io->ci_state = CIS_IO_GOING;
524 list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
525 if (scan->cis_iop->op[io->ci_type].cio_start == NULL)
527 result = scan->cis_iop->op[io->ci_type].cio_start(env, scan);
535 EXPORT_SYMBOL(cl_io_start);
538 * Wait until current io iteration is finished by calling
539 * cl_io_operations::cio_end() bottom-to-top.
541 void cl_io_end(const struct lu_env *env, struct cl_io *io)
543 const struct cl_io_slice *scan;
545 LINVRNT(cl_io_is_loopable(io));
546 LINVRNT(io->ci_state == CIS_IO_GOING);
547 LINVRNT(cl_io_invariant(io));
550 list_for_each_entry_reverse(scan, &io->ci_layers, cis_linkage) {
551 if (scan->cis_iop->op[io->ci_type].cio_end != NULL)
552 scan->cis_iop->op[io->ci_type].cio_end(env, scan);
553 /* TODO: error handling. */
555 io->ci_state = CIS_IO_FINISHED;
558 EXPORT_SYMBOL(cl_io_end);
561 * Called by read io, to decide the readahead extent
563 * \see cl_io_operations::cio_read_ahead()
565 int cl_io_read_ahead(const struct lu_env *env, struct cl_io *io,
566 pgoff_t start, struct cl_read_ahead *ra)
568 const struct cl_io_slice *scan;
571 LINVRNT(io->ci_type == CIT_READ ||
572 io->ci_type == CIT_FAULT ||
573 io->ci_type == CIT_WRITE);
574 LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
575 LINVRNT(cl_io_invariant(io));
578 list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
579 if (scan->cis_iop->cio_read_ahead == NULL)
582 result = scan->cis_iop->cio_read_ahead(env, scan, start, ra);
586 RETURN(result > 0 ? 0 : result);
588 EXPORT_SYMBOL(cl_io_read_ahead);
591 * Called before io start, to reserve enough LRU slots to avoid
594 * \see cl_io_operations::cio_lru_reserve()
596 int cl_io_lru_reserve(const struct lu_env *env, struct cl_io *io,
597 loff_t pos, size_t bytes)
599 const struct cl_io_slice *scan;
602 LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
603 LINVRNT(cl_io_invariant(io));
606 list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
607 if (scan->cis_iop->cio_lru_reserve) {
608 result = scan->cis_iop->cio_lru_reserve(env, scan,
617 EXPORT_SYMBOL(cl_io_lru_reserve);
620 * Commit a list of contiguous pages into writeback cache.
622 * \returns 0 if all pages committed, or errcode if error occurred.
623 * \see cl_io_operations::cio_commit_async()
625 int cl_io_commit_async(const struct lu_env *env, struct cl_io *io,
626 struct cl_page_list *queue, int from, int to,
629 const struct cl_io_slice *scan;
633 list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
634 if (scan->cis_iop->cio_commit_async == NULL)
636 result = scan->cis_iop->cio_commit_async(env, scan, queue,
643 EXPORT_SYMBOL(cl_io_commit_async);
645 void cl_io_extent_release(const struct lu_env *env, struct cl_io *io)
647 const struct cl_io_slice *scan;
650 list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
651 if (scan->cis_iop->cio_extent_release == NULL)
653 scan->cis_iop->cio_extent_release(env, scan);
657 EXPORT_SYMBOL(cl_io_extent_release);
660 * Submits a list of pages for immediate io.
662 * After the function gets returned, The submitted pages are moved to
663 * queue->c2_qout queue, and queue->c2_qin contain both the pages don't need
664 * to be submitted, and the pages are errant to submit.
666 * \returns 0 if at least one page was submitted, error code otherwise.
667 * \see cl_io_operations::cio_submit()
669 int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
670 enum cl_req_type crt, struct cl_2queue *queue)
672 const struct cl_io_slice *scan;
676 list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
677 if (scan->cis_iop->cio_submit == NULL)
679 result = scan->cis_iop->cio_submit(env, scan, crt, queue);
684 * If ->cio_submit() failed, no pages were sent.
686 LASSERT(ergo(result != 0, list_empty(&queue->c2_qout.pl_pages)));
689 EXPORT_SYMBOL(cl_io_submit_rw);
692 * Submit a sync_io and wait for the IO to be finished, or error happens.
693 * If \a timeout is zero, it means to wait for the IO unconditionally.
695 int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
696 enum cl_req_type iot, struct cl_2queue *queue,
699 struct cl_sync_io *anchor = &cl_env_info(env)->clt_anchor;
704 cl_page_list_for_each(pg, &queue->c2_qin) {
705 LASSERT(pg->cp_sync_io == NULL);
706 pg->cp_sync_io = anchor;
709 cl_sync_io_init(anchor, queue->c2_qin.pl_nr);
710 rc = cl_io_submit_rw(env, io, iot, queue);
713 * If some pages weren't sent for any reason (e.g.,
714 * read found up-to-date pages in the cache, or write found
715 * clean pages), count them as completed to avoid infinite
718 cl_page_list_for_each(pg, &queue->c2_qin) {
719 pg->cp_sync_io = NULL;
720 cl_sync_io_note(env, anchor, 1);
723 /* wait for the IO to be finished. */
724 rc = cl_sync_io_wait(env, anchor, timeout);
725 cl_page_list_assume(env, io, &queue->c2_qout);
727 LASSERT(list_empty(&queue->c2_qout.pl_pages));
728 cl_page_list_for_each(pg, &queue->c2_qin)
729 pg->cp_sync_io = NULL;
733 EXPORT_SYMBOL(cl_io_submit_sync);
738 * Pumps io through iterations calling
740 * - cl_io_iter_init()
750 * - cl_io_iter_fini()
752 * repeatedly until there is no more io to do.
754 int cl_io_loop(const struct lu_env *env, struct cl_io *io)
759 LINVRNT(cl_io_is_loopable(io));
766 result = cl_io_iter_init(env, io);
769 result = cl_io_lock(env, io);
772 * Notify layers that locks has been taken,
775 * - llite: kms, short read;
776 * - llite: generic_file_read();
778 result = cl_io_start(env, io);
780 * Send any remaining pending
783 ** - llite: ll_rw_stats_tally.
786 cl_io_unlock(env, io);
787 cl_io_rw_advance(env, io, io->ci_nob - nob);
790 cl_io_iter_fini(env, io);
793 } while ((result == 0 || result == -EIOCBQUEUED) &&
799 if (result == -EAGAIN && io->ci_ndelay && !io->ci_iocb_nowait) {
800 io->ci_need_restart = 1;
805 result = io->ci_result;
806 RETURN(result < 0 ? result : 0);
808 EXPORT_SYMBOL(cl_io_loop);
811 * Adds io slice to the cl_io.
813 * This is called by cl_object_operations::coo_io_init() methods to add a
814 * per-layer state to the io. New state is added at the end of
815 * cl_io::ci_layers list, that is, it is at the bottom of the stack.
817 * \see cl_lock_slice_add(), cl_req_slice_add(), cl_page_slice_add()
819 void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
820 struct cl_object *obj,
821 const struct cl_io_operations *ops)
823 struct list_head *linkage = &slice->cis_linkage;
825 LASSERT((linkage->prev == NULL && linkage->next == NULL) ||
826 list_empty(linkage));
829 list_add_tail(linkage, &io->ci_layers);
831 slice->cis_obj = obj;
832 slice->cis_iop = ops;
835 EXPORT_SYMBOL(cl_io_slice_add);
839 * Initializes page list.
841 void cl_page_list_init(struct cl_page_list *plist)
845 INIT_LIST_HEAD(&plist->pl_pages);
848 EXPORT_SYMBOL(cl_page_list_init);
851 * Adds a page to a page list.
853 void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page,
857 /* it would be better to check that page is owned by "current" io, but
858 * it is not passed here. */
859 LASSERT(page->cp_owner != NULL);
861 LASSERT(list_empty(&page->cp_batch));
862 list_add_tail(&page->cp_batch, &plist->pl_pages);
864 lu_ref_add_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
869 EXPORT_SYMBOL(cl_page_list_add);
872 * Removes a page from a page list.
874 void cl_page_list_del(const struct lu_env *env,
875 struct cl_page_list *plist, struct cl_page *page)
877 LASSERT(plist->pl_nr > 0);
880 list_del_init(&page->cp_batch);
882 lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
883 cl_page_put(env, page);
886 EXPORT_SYMBOL(cl_page_list_del);
889 * Moves a page from one page list to another.
891 void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
892 struct cl_page *page)
894 LASSERT(src->pl_nr > 0);
897 list_move_tail(&page->cp_batch, &dst->pl_pages);
900 lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue",
904 EXPORT_SYMBOL(cl_page_list_move);
907 * Moves a page from one page list to the head of another list.
909 void cl_page_list_move_head(struct cl_page_list *dst, struct cl_page_list *src,
910 struct cl_page *page)
912 LASSERT(src->pl_nr > 0);
915 list_move(&page->cp_batch, &dst->pl_pages);
918 lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue",
922 EXPORT_SYMBOL(cl_page_list_move_head);
925 * splice the cl_page_list, just as list head does
927 void cl_page_list_splice(struct cl_page_list *src, struct cl_page_list *dst)
929 #ifdef CONFIG_LUSTRE_DEBUG_LU_REF
930 struct cl_page *page;
934 cl_page_list_for_each_safe(page, tmp, src)
935 lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref,
940 dst->pl_nr += src->pl_nr;
942 list_splice_tail_init(&src->pl_pages, &dst->pl_pages);
946 EXPORT_SYMBOL(cl_page_list_splice);
949 * Disowns pages in a queue.
951 void cl_page_list_disown(const struct lu_env *env, struct cl_page_list *plist)
953 struct cl_page *page;
954 struct cl_page *temp;
957 cl_page_list_for_each_safe(page, temp, plist) {
958 LASSERT(plist->pl_nr > 0);
960 list_del_init(&page->cp_batch);
963 * __cl_page_disown rather than usual cl_page_disown() is used,
964 * because pages are possibly in CPS_FREEING state already due
965 * to the call to cl_page_list_discard().
968 * XXX __cl_page_disown() will fail if page is not locked.
970 __cl_page_disown(env, page);
971 lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue",
973 cl_page_put(env, page);
977 EXPORT_SYMBOL(cl_page_list_disown);
980 * Releases pages from queue.
982 void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist)
984 struct cl_page *page;
985 struct cl_page *temp;
989 cl_page_list_for_each_safe(page, temp, plist)
990 cl_page_list_del(env, plist, page);
991 LASSERT(plist->pl_nr == 0);
994 EXPORT_SYMBOL(cl_page_list_fini);
997 * Assumes all pages in a queue.
999 void cl_page_list_assume(const struct lu_env *env,
1000 struct cl_io *io, struct cl_page_list *plist)
1002 struct cl_page *page;
1005 cl_page_list_for_each(page, plist)
1006 cl_page_assume(env, io, page);
1010 * Discards all pages in a queue.
1012 void cl_page_list_discard(const struct lu_env *env, struct cl_io *io,
1013 struct cl_page_list *plist)
1015 struct cl_page *page;
1018 cl_page_list_for_each(page, plist)
1019 cl_page_discard(env, io, page);
1022 EXPORT_SYMBOL(cl_page_list_discard);
1025 * Initialize dual page queue.
1027 void cl_2queue_init(struct cl_2queue *queue)
1030 cl_page_list_init(&queue->c2_qin);
1031 cl_page_list_init(&queue->c2_qout);
1034 EXPORT_SYMBOL(cl_2queue_init);
1037 * Disown pages in both lists of a 2-queue.
1039 void cl_2queue_disown(const struct lu_env *env, struct cl_2queue *queue)
1042 cl_page_list_disown(env, &queue->c2_qin);
1043 cl_page_list_disown(env, &queue->c2_qout);
1046 EXPORT_SYMBOL(cl_2queue_disown);
1049 * Discard (truncate) pages in both lists of a 2-queue.
1051 void cl_2queue_discard(const struct lu_env *env,
1052 struct cl_io *io, struct cl_2queue *queue)
1055 cl_page_list_discard(env, io, &queue->c2_qin);
1056 cl_page_list_discard(env, io, &queue->c2_qout);
1059 EXPORT_SYMBOL(cl_2queue_discard);
1062 * Assume to own the pages in cl_2queue
1064 void cl_2queue_assume(const struct lu_env *env,
1065 struct cl_io *io, struct cl_2queue *queue)
1067 cl_page_list_assume(env, io, &queue->c2_qin);
1068 cl_page_list_assume(env, io, &queue->c2_qout);
1072 * Finalize both page lists of a 2-queue.
1074 void cl_2queue_fini(const struct lu_env *env, struct cl_2queue *queue)
1077 cl_page_list_fini(env, &queue->c2_qout);
1078 cl_page_list_fini(env, &queue->c2_qin);
1081 EXPORT_SYMBOL(cl_2queue_fini);
1084 * Initialize a 2-queue to contain \a page in its incoming page list.
1086 void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page)
1089 cl_2queue_init(queue);
1091 * Add a page to the incoming page list of 2-queue.
1093 cl_page_list_add(&queue->c2_qin, page, true);
1096 EXPORT_SYMBOL(cl_2queue_init_page);
1099 * Returns top-level io.
1101 * \see cl_object_top()
1103 struct cl_io *cl_io_top(struct cl_io *io)
1106 while (io->ci_parent != NULL)
1110 EXPORT_SYMBOL(cl_io_top);
1113 * Prints human readable representation of \a io to the \a f.
1115 void cl_io_print(const struct lu_env *env, void *cookie,
1116 lu_printer_t printer, const struct cl_io *io)
1121 * Fills in attributes that are passed to server together with transfer. Only
1122 * attributes from \a flags may be touched. This can be called multiple times
1123 * for the same request.
1125 void cl_req_attr_set(const struct lu_env *env, struct cl_object *obj,
1126 struct cl_req_attr *attr)
1128 struct cl_object *scan;
1131 cl_object_for_each(scan, obj) {
1132 if (scan->co_ops->coo_req_attr_set != NULL)
1133 scan->co_ops->coo_req_attr_set(env, scan, attr);
1137 EXPORT_SYMBOL(cl_req_attr_set);
1140 * Initialize synchronous io wait \a anchor for \a nr pages with optional
1142 * \param anchor owned by caller, initialzied here.
1143 * \param nr number of pages initally pending in sync.
1144 * \param end optional callback sync_io completion, can be used to
1145 * trigger erasure coding, integrity, dedupe, or similar operation.
1146 * \q end is called with a spinlock on anchor->csi_waitq.lock
1149 void cl_sync_io_init_notify(struct cl_sync_io *anchor, int nr,
1150 void *dio_aio, cl_sync_io_end_t *end)
1153 memset(anchor, 0, sizeof(*anchor));
1154 init_waitqueue_head(&anchor->csi_waitq);
1155 atomic_set(&anchor->csi_sync_nr, nr);
1156 anchor->csi_sync_rc = 0;
1157 anchor->csi_end_io = end;
1158 anchor->csi_dio_aio = dio_aio;
1161 EXPORT_SYMBOL(cl_sync_io_init_notify);
1164 * Wait until all IO completes. Transfer completion routine has to call
1165 * cl_sync_io_note() for every entity.
1167 int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor,
1173 LASSERT(timeout >= 0);
1176 wait_event_idle_timeout(anchor->csi_waitq,
1177 atomic_read(&anchor->csi_sync_nr) == 0,
1178 cfs_time_seconds(timeout)) == 0) {
1180 CERROR("IO failed: %d, still wait for %d remaining entries\n",
1181 rc, atomic_read(&anchor->csi_sync_nr));
1184 wait_event_idle(anchor->csi_waitq,
1185 atomic_read(&anchor->csi_sync_nr) == 0);
1187 rc = anchor->csi_sync_rc;
1189 /* We take the lock to ensure that cl_sync_io_note() has finished */
1190 spin_lock(&anchor->csi_waitq.lock);
1191 LASSERT(atomic_read(&anchor->csi_sync_nr) == 0);
1192 spin_unlock(&anchor->csi_waitq.lock);
1196 EXPORT_SYMBOL(cl_sync_io_wait);
1198 static inline void dio_aio_complete(struct kiocb *iocb, ssize_t res)
1200 #ifdef HAVE_AIO_COMPLETE
1201 aio_complete(iocb, res, 0);
1203 if (iocb->ki_complete)
1204 # ifdef HAVE_KIOCB_COMPLETE_2ARGS
1205 iocb->ki_complete(iocb, res);
1207 iocb->ki_complete(iocb, res, 0);
1212 static void cl_dio_aio_end(const struct lu_env *env, struct cl_sync_io *anchor)
1214 struct cl_dio_aio *aio = container_of(anchor, typeof(*aio), cda_sync);
1215 ssize_t ret = anchor->csi_sync_rc;
1219 if (!aio->cda_no_aio_complete)
1220 dio_aio_complete(aio->cda_iocb, ret ?: aio->cda_bytes);
1225 static void cl_sub_dio_end(const struct lu_env *env, struct cl_sync_io *anchor)
1227 struct cl_sub_dio *sdio = container_of(anchor, typeof(*sdio), csd_sync);
1228 ssize_t ret = anchor->csi_sync_rc;
1233 while (sdio->csd_pages.pl_nr > 0) {
1234 struct cl_page *page = cl_page_list_first(&sdio->csd_pages);
1236 cl_page_delete(env, page);
1237 cl_page_list_del(env, &sdio->csd_pages, page);
1240 ll_release_user_pages(sdio->csd_dio_pages.ldp_pages,
1241 sdio->csd_dio_pages.ldp_count);
1242 cl_sync_io_note(env, &sdio->csd_ll_aio->cda_sync, ret);
1247 struct cl_dio_aio *cl_dio_aio_alloc(struct kiocb *iocb, struct cl_object *obj,
1250 struct cl_dio_aio *aio;
1252 OBD_SLAB_ALLOC_PTR_GFP(aio, cl_dio_aio_kmem, GFP_NOFS);
1255 * Hold one ref so that it won't be released until
1256 * every pages is added.
1258 cl_sync_io_init_notify(&aio->cda_sync, 1, aio, cl_dio_aio_end);
1259 aio->cda_iocb = iocb;
1260 aio->cda_no_aio_complete = !is_aio;
1261 /* if this is true AIO, the memory is freed by the last call
1262 * to cl_sync_io_note (when all the I/O is complete), because
1263 * no one is waiting (in the kernel) for this to complete
1265 * in other cases, the last user is cl_sync_io_wait, and in
1266 * that case, the creator frees the struct after that call
1268 aio->cda_creator_free = !is_aio;
1275 EXPORT_SYMBOL(cl_dio_aio_alloc);
1277 struct cl_sub_dio *cl_sub_dio_alloc(struct cl_dio_aio *ll_aio, bool sync)
1279 struct cl_sub_dio *sdio;
1281 OBD_SLAB_ALLOC_PTR_GFP(sdio, cl_sub_dio_kmem, GFP_NOFS);
1284 * Hold one ref so that it won't be released until
1285 * every pages is added.
1287 cl_sync_io_init_notify(&sdio->csd_sync, 1, sdio,
1289 cl_page_list_init(&sdio->csd_pages);
1291 sdio->csd_ll_aio = ll_aio;
1292 atomic_add(1, &ll_aio->cda_sync.csi_sync_nr);
1293 sdio->csd_creator_free = sync;
1297 EXPORT_SYMBOL(cl_sub_dio_alloc);
1299 void cl_dio_aio_free(const struct lu_env *env, struct cl_dio_aio *aio)
1302 cl_object_put(env, aio->cda_obj);
1303 OBD_SLAB_FREE_PTR(aio, cl_dio_aio_kmem);
1306 EXPORT_SYMBOL(cl_dio_aio_free);
1308 void cl_sub_dio_free(struct cl_sub_dio *sdio)
1311 OBD_SLAB_FREE_PTR(sdio, cl_sub_dio_kmem);
1313 EXPORT_SYMBOL(cl_sub_dio_free);
1315 * ll_release_user_pages - tear down page struct array
1316 * @pages: array of page struct pointers underlying target buffer
1318 void ll_release_user_pages(struct page **pages, int npages)
1327 for (i = 0; i < npages; i++) {
1333 #if defined(HAVE_DIO_ITER)
1336 OBD_FREE_PTR_ARRAY_LARGE(pages, npages);
1339 EXPORT_SYMBOL(ll_release_user_pages);
1342 * Indicate that transfer of a single page completed.
1344 void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor,
1349 if (anchor->csi_sync_rc == 0 && ioret < 0)
1350 anchor->csi_sync_rc = ioret;
1352 * Synchronous IO done without releasing page lock (e.g., as a part of
1353 * ->{prepare,commit}_write(). Completion is used to signal the end of
1356 LASSERT(atomic_read(&anchor->csi_sync_nr) > 0);
1357 if (atomic_dec_and_lock(&anchor->csi_sync_nr,
1358 &anchor->csi_waitq.lock)) {
1359 struct cl_sub_dio *sub_dio_aio = NULL;
1360 struct cl_dio_aio *dio_aio = NULL;
1361 void *csi_dio_aio = NULL;
1362 bool creator_free = true;
1364 cl_sync_io_end_t *end_io = anchor->csi_end_io;
1367 * Holding the lock across both the decrement and
1368 * the wakeup ensures cl_sync_io_wait() doesn't complete
1369 * before the wakeup completes and the contents of
1370 * of anchor become unsafe to access as the owner is free
1371 * to immediately reclaim anchor when cl_sync_io_wait()
1374 wake_up_locked(&anchor->csi_waitq);
1376 end_io(env, anchor);
1378 csi_dio_aio = anchor->csi_dio_aio;
1379 sub_dio_aio = csi_dio_aio;
1380 dio_aio = csi_dio_aio;
1382 if (csi_dio_aio && end_io == cl_dio_aio_end)
1383 creator_free = dio_aio->cda_creator_free;
1384 else if (csi_dio_aio && end_io == cl_sub_dio_end)
1385 creator_free = sub_dio_aio->csd_creator_free;
1387 spin_unlock(&anchor->csi_waitq.lock);
1390 if (end_io == cl_dio_aio_end) {
1392 cl_dio_aio_free(env, dio_aio);
1393 } else if (end_io == cl_sub_dio_end) {
1395 cl_sub_dio_free(sub_dio_aio);
1401 EXPORT_SYMBOL(cl_sync_io_note);
1403 int cl_sync_io_wait_recycle(const struct lu_env *env, struct cl_sync_io *anchor,
1404 long timeout, int ioret)
1409 * @anchor was inited as 1 to prevent end_io to be
1410 * called before we add all pages for IO, so drop
1411 * one extra reference to make sure we could wait
1414 cl_sync_io_note(env, anchor, ioret);
1415 /* Wait for completion of normal dio.
1416 * This replaces the EIOCBQEUED return from the DIO/AIO
1417 * path, and this is where AIO and DIO implementations
1420 rc = cl_sync_io_wait(env, anchor, timeout);
1422 * One extra reference again, as if @anchor is
1423 * reused we assume it as 1 before using.
1425 atomic_add(1, &anchor->csi_sync_nr);
1429 EXPORT_SYMBOL(cl_sync_io_wait_recycle);