1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
5 * Use is subject to license terms.
7 * Copyright (c) 2011, 2017, Intel Corporation.
12 * This file is part of Lustre, http://www.lustre.org/
16 * Author: Nikita Danilov <nikita.danilov@sun.com>
17 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
21 #define DEBUG_SUBSYSTEM S_CLASS
23 #include <linux/sched.h>
24 #include <linux/list.h>
25 #include <linux/list_sort.h>
26 #include <obd_class.h>
27 #include <obd_support.h>
28 #include <lustre_fid.h>
29 #include <cl_object.h>
30 #include "cl_internal.h"
36 static inline int cl_io_type_is_valid(enum cl_io_type type)
38 return CIT_READ <= type && type < CIT_OP_NR;
41 static inline int cl_io_is_loopable(const struct cl_io *io)
43 return cl_io_type_is_valid(io->ci_type) && io->ci_type != CIT_MISC;
47 * cl_io invariant that holds at all times when exported cl_io_*() functions
48 * are entered and left.
50 static inline int cl_io_invariant(const struct cl_io *io)
53 * io can own pages only when it is ongoing. Sub-io might
54 * still be in CIS_LOCKED state when top-io is in
57 return ergo(io->ci_owned_nr > 0, io->ci_state == CIS_IO_GOING ||
58 (io->ci_state == CIS_LOCKED && io->ci_parent != NULL));
62 * Finalize \a io, by calling cl_io_operations::cio_fini() bottom-to-top.
64 void cl_io_fini(const struct lu_env *env, struct cl_io *io)
66 struct cl_io_slice *slice;
68 LINVRNT(cl_io_type_is_valid(io->ci_type));
69 LINVRNT(cl_io_invariant(io));
72 while (!list_empty(&io->ci_layers)) {
73 slice = container_of(io->ci_layers.prev, struct cl_io_slice,
75 list_del_init(&slice->cis_linkage);
76 if (slice->cis_iop->op[io->ci_type].cio_fini != NULL)
77 slice->cis_iop->op[io->ci_type].cio_fini(env, slice);
79 * Invalidate slice to catch use after free. This assumes that
80 * slices are allocated within session and can be touched
81 * after ->cio_fini() returns.
85 io->ci_state = CIS_FINI;
87 /* sanity check for layout change */
91 case CIT_DATA_VERSION:
95 LASSERT(!io->ci_need_restart);
99 /* Check ignore layout change conf */
100 LASSERT(ergo(io->ci_ignore_layout || !io->ci_verify_layout,
101 !io->ci_need_restart));
112 EXPORT_SYMBOL(cl_io_fini);
114 static int __cl_io_init(const struct lu_env *env, struct cl_io *io,
115 enum cl_io_type iot, struct cl_object *obj)
117 struct cl_object *scan;
120 LINVRNT(io->ci_state == CIS_ZERO || io->ci_state == CIS_FINI);
121 LINVRNT(cl_io_type_is_valid(iot));
122 LINVRNT(cl_io_invariant(io));
126 INIT_LIST_HEAD(&io->ci_lockset.cls_todo);
127 INIT_LIST_HEAD(&io->ci_lockset.cls_done);
128 INIT_LIST_HEAD(&io->ci_layers);
131 cl_object_for_each(scan, obj) {
132 if (scan->co_ops->coo_io_init != NULL) {
133 result = scan->co_ops->coo_io_init(env, scan, io);
139 io->ci_state = CIS_INIT;
144 * Initialize sub-io, by calling cl_io_operations::cio_init() top-to-bottom.
146 * \pre obj != cl_object_top(obj)
148 int cl_io_sub_init(const struct lu_env *env, struct cl_io *io,
149 enum cl_io_type iot, struct cl_object *obj)
151 LASSERT(obj != cl_object_top(obj));
153 return __cl_io_init(env, io, iot, obj);
155 EXPORT_SYMBOL(cl_io_sub_init);
158 * Initialize \a io, by calling cl_io_operations::cio_init() top-to-bottom.
160 * Caller has to call cl_io_fini() after a call to cl_io_init(), no matter
161 * what the latter returned.
163 * \pre obj == cl_object_top(obj)
164 * \pre cl_io_type_is_valid(iot)
165 * \post cl_io_type_is_valid(io->ci_type) && io->ci_type == iot
167 int cl_io_init(const struct lu_env *env, struct cl_io *io,
168 enum cl_io_type iot, struct cl_object *obj)
170 LASSERT(obj == cl_object_top(obj));
172 /* clear I/O restart from previous instance */
173 io->ci_need_restart = 0;
175 return __cl_io_init(env, io, iot, obj);
177 EXPORT_SYMBOL(cl_io_init);
180 * Initialize read or write io.
182 * \pre iot == CIT_READ || iot == CIT_WRITE
184 int cl_io_rw_init(const struct lu_env *env, struct cl_io *io,
185 enum cl_io_type iot, loff_t pos, size_t count)
187 LINVRNT(iot == CIT_READ || iot == CIT_WRITE);
188 LINVRNT(io->ci_obj != NULL);
191 LU_OBJECT_HEADER(D_VFSTRACE, env, &io->ci_obj->co_lu,
192 "io range: %u [%llu, %llu) %u %u\n",
193 iot, (__u64)pos, (__u64)pos + count,
194 io->u.ci_rw.crw_nonblock, io->u.ci_wr.wr_append);
195 io->u.ci_rw.crw_pos = pos;
196 io->u.ci_rw.crw_count = count;
197 RETURN(cl_io_init(env, io, iot, io->ci_obj));
199 EXPORT_SYMBOL(cl_io_rw_init);
201 #ifdef HAVE_LIST_CMP_FUNC_T
202 static int cl_lock_descr_cmp(void *priv,
203 const struct list_head *a,
204 const struct list_head *b)
205 #else /* !HAVE_LIST_CMP_FUNC_T */
206 static int cl_lock_descr_cmp(void *priv,
207 struct list_head *a, struct list_head *b)
208 #endif /* HAVE_LIST_CMP_FUNC_T */
210 const struct cl_io_lock_link *l0 = list_entry(a, struct cl_io_lock_link,
212 const struct cl_io_lock_link *l1 = list_entry(b, struct cl_io_lock_link,
214 const struct cl_lock_descr *d0 = &l0->cill_descr;
215 const struct cl_lock_descr *d1 = &l1->cill_descr;
217 return lu_fid_cmp(lu_object_fid(&d0->cld_obj->co_lu),
218 lu_object_fid(&d1->cld_obj->co_lu));
221 static void cl_lock_descr_merge(struct cl_lock_descr *d0,
222 const struct cl_lock_descr *d1)
224 d0->cld_start = min(d0->cld_start, d1->cld_start);
225 d0->cld_end = max(d0->cld_end, d1->cld_end);
227 if (d1->cld_mode == CLM_WRITE && d0->cld_mode != CLM_WRITE)
228 d0->cld_mode = CLM_WRITE;
230 if (d1->cld_mode == CLM_GROUP && d0->cld_mode != CLM_GROUP)
231 d0->cld_mode = CLM_GROUP;
234 static int cl_lockset_merge(const struct cl_lockset *set,
235 const struct cl_lock_descr *need)
237 struct cl_io_lock_link *scan;
240 list_for_each_entry(scan, &set->cls_todo, cill_linkage) {
241 if (!cl_object_same(scan->cill_descr.cld_obj, need->cld_obj))
244 /* Merge locks for the same object because ldlm lock server
245 * may expand the lock extent, otherwise there is a deadlock
246 * case if two conflicted locks are queueud for the same object
247 * and lock server expands one lock to overlap the another.
248 * The side effect is that it can generate a multi-stripe lock
249 * that may cause casacading problem */
250 cl_lock_descr_merge(&scan->cill_descr, need);
251 CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
252 scan->cill_descr.cld_mode, scan->cill_descr.cld_start,
253 scan->cill_descr.cld_end);
259 static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io,
260 struct cl_lockset *set)
262 struct cl_io_lock_link *link;
263 struct cl_io_lock_link *temp;
268 list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
269 result = cl_lock_request(env, io, &link->cill_lock);
273 list_move(&link->cill_linkage, &set->cls_done);
279 * Takes locks necessary for the current iteration of io.
281 * Calls cl_io_operations::cio_lock() top-to-bottom to collect locks required
282 * by layers for the current iteration. Then sort locks (to avoid dead-locks),
285 int cl_io_lock(const struct lu_env *env, struct cl_io *io)
287 const struct cl_io_slice *scan;
290 LINVRNT(cl_io_is_loopable(io));
291 LINVRNT(io->ci_state == CIS_IT_STARTED);
292 LINVRNT(cl_io_invariant(io));
295 list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
296 if (scan->cis_iop->op[io->ci_type].cio_lock == NULL)
298 result = scan->cis_iop->op[io->ci_type].cio_lock(env, scan);
304 * Sort locks in lexicographical order of their (fid,
305 * start-offset) pairs to avoid deadlocks.
307 list_sort(NULL, &io->ci_lockset.cls_todo, cl_lock_descr_cmp);
308 result = cl_lockset_lock(env, io, &io->ci_lockset);
311 cl_io_unlock(env, io);
313 io->ci_state = CIS_LOCKED;
316 EXPORT_SYMBOL(cl_io_lock);
319 * Release locks takes by io.
321 void cl_io_unlock(const struct lu_env *env, struct cl_io *io)
323 struct cl_lockset *set;
324 struct cl_io_lock_link *link;
325 struct cl_io_lock_link *temp;
326 const struct cl_io_slice *scan;
328 LASSERT(cl_io_is_loopable(io));
329 LASSERT(CIS_IT_STARTED <= io->ci_state && io->ci_state < CIS_UNLOCKED);
330 LINVRNT(cl_io_invariant(io));
333 set = &io->ci_lockset;
335 list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
336 list_del_init(&link->cill_linkage);
337 if (link->cill_fini != NULL)
338 link->cill_fini(env, link);
341 list_for_each_entry_safe(link, temp, &set->cls_done, cill_linkage) {
342 list_del_init(&link->cill_linkage);
343 cl_lock_release(env, &link->cill_lock);
344 if (link->cill_fini != NULL)
345 link->cill_fini(env, link);
348 list_for_each_entry_reverse(scan, &io->ci_layers, cis_linkage) {
349 if (scan->cis_iop->op[io->ci_type].cio_unlock != NULL)
350 scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
352 io->ci_state = CIS_UNLOCKED;
355 EXPORT_SYMBOL(cl_io_unlock);
358 * Prepares next iteration of io.
360 * Calls cl_io_operations::cio_iter_init() top-to-bottom. This exists to give
361 * layers a chance to modify io parameters, e.g., so that lov can restrict io
362 * to a single stripe.
364 int cl_io_iter_init(const struct lu_env *env, struct cl_io *io)
366 const struct cl_io_slice *scan;
369 LINVRNT(cl_io_is_loopable(io));
370 LINVRNT(io->ci_state == CIS_INIT || io->ci_state == CIS_IT_ENDED);
371 LINVRNT(cl_io_invariant(io));
375 list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
376 if (scan->cis_iop->op[io->ci_type].cio_iter_init == NULL)
378 result = scan->cis_iop->op[io->ci_type].cio_iter_init(env,
384 io->ci_state = CIS_IT_STARTED;
387 EXPORT_SYMBOL(cl_io_iter_init);
390 * Finalizes io iteration.
392 * Calls cl_io_operations::cio_iter_fini() bottom-to-top.
394 void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io)
396 const struct cl_io_slice *scan;
398 LINVRNT(cl_io_is_loopable(io));
399 LINVRNT(io->ci_state <= CIS_IT_STARTED ||
400 io->ci_state > CIS_IO_FINISHED);
401 LINVRNT(cl_io_invariant(io));
404 list_for_each_entry_reverse(scan, &io->ci_layers, cis_linkage) {
405 if (scan->cis_iop->op[io->ci_type].cio_iter_fini != NULL)
406 scan->cis_iop->op[io->ci_type].cio_iter_fini(env, scan);
408 io->ci_state = CIS_IT_ENDED;
411 EXPORT_SYMBOL(cl_io_iter_fini);
414 * Records that read or write io progressed \a nob bytes forward.
416 void cl_io_rw_advance(const struct lu_env *env, struct cl_io *io, size_t nob)
418 const struct cl_io_slice *scan;
422 LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
424 LINVRNT(cl_io_is_loopable(io));
425 LINVRNT(cl_io_invariant(io));
427 io->u.ci_rw.crw_pos += nob;
428 io->u.ci_rw.crw_count -= nob;
430 /* layers have to be notified. */
431 list_for_each_entry_reverse(scan, &io->ci_layers, cis_linkage) {
432 if (scan->cis_iop->op[io->ci_type].cio_advance != NULL)
433 scan->cis_iop->op[io->ci_type].cio_advance(env, scan,
440 * Adds a lock to a lockset.
442 int cl_io_lock_add(const struct lu_env *env, struct cl_io *io,
443 struct cl_io_lock_link *link)
448 if (cl_lockset_merge(&io->ci_lockset, &link->cill_descr))
451 list_add(&link->cill_linkage, &io->ci_lockset.cls_todo);
456 EXPORT_SYMBOL(cl_io_lock_add);
458 static void cl_free_io_lock_link(const struct lu_env *env,
459 struct cl_io_lock_link *link)
465 * Allocates new lock link, and uses it to add a lock to a lockset.
467 int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
468 struct cl_lock_descr *descr)
470 struct cl_io_lock_link *link;
476 link->cill_descr = *descr;
477 link->cill_fini = cl_free_io_lock_link;
478 result = cl_io_lock_add(env, io, link);
479 if (result) /* lock match */
480 link->cill_fini(env, link);
486 EXPORT_SYMBOL(cl_io_lock_alloc_add);
489 * Starts io by calling cl_io_operations::cio_start() top-to-bottom.
491 int cl_io_start(const struct lu_env *env, struct cl_io *io)
493 const struct cl_io_slice *scan;
496 LINVRNT(cl_io_is_loopable(io));
497 LINVRNT(io->ci_state == CIS_LOCKED);
498 LINVRNT(cl_io_invariant(io));
501 io->ci_state = CIS_IO_GOING;
502 list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
503 if (scan->cis_iop->op[io->ci_type].cio_start == NULL)
505 result = scan->cis_iop->op[io->ci_type].cio_start(env, scan);
513 EXPORT_SYMBOL(cl_io_start);
516 * Wait until current io iteration is finished by calling
517 * cl_io_operations::cio_end() bottom-to-top.
519 void cl_io_end(const struct lu_env *env, struct cl_io *io)
521 const struct cl_io_slice *scan;
523 LINVRNT(cl_io_is_loopable(io));
524 LINVRNT(io->ci_state == CIS_IO_GOING);
525 LINVRNT(cl_io_invariant(io));
528 list_for_each_entry_reverse(scan, &io->ci_layers, cis_linkage) {
529 if (scan->cis_iop->op[io->ci_type].cio_end != NULL)
530 scan->cis_iop->op[io->ci_type].cio_end(env, scan);
531 /* TODO: error handling. */
533 io->ci_state = CIS_IO_FINISHED;
536 EXPORT_SYMBOL(cl_io_end);
539 * Called by read io, to decide the readahead extent
541 * \see cl_io_operations::cio_read_ahead()
543 int cl_io_read_ahead(const struct lu_env *env, struct cl_io *io,
544 pgoff_t start, struct cl_read_ahead *ra)
546 const struct cl_io_slice *scan;
549 LINVRNT(io->ci_type == CIT_READ ||
550 io->ci_type == CIT_FAULT ||
551 io->ci_type == CIT_WRITE);
552 LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
553 LINVRNT(cl_io_invariant(io));
556 list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
557 if (scan->cis_iop->cio_read_ahead == NULL)
560 result = scan->cis_iop->cio_read_ahead(env, scan, start, ra);
564 RETURN(result > 0 ? 0 : result);
566 EXPORT_SYMBOL(cl_io_read_ahead);
569 * Called before io start, to reserve enough LRU slots to avoid
572 * \see cl_io_operations::cio_lru_reserve()
574 int cl_io_lru_reserve(const struct lu_env *env, struct cl_io *io,
575 loff_t pos, size_t bytes)
577 const struct cl_io_slice *scan;
580 LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
581 LINVRNT(cl_io_invariant(io));
584 list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
585 if (scan->cis_iop->cio_lru_reserve) {
586 result = scan->cis_iop->cio_lru_reserve(env, scan,
595 EXPORT_SYMBOL(cl_io_lru_reserve);
598 * Commit a list of contiguous pages into writeback cache.
600 * \returns 0 if all pages committed, or errcode if error occurred.
601 * \see cl_io_operations::cio_commit_async()
603 int cl_io_commit_async(const struct lu_env *env, struct cl_io *io,
604 struct cl_page_list *queue, int from, int to,
607 const struct cl_io_slice *scan;
611 list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
612 if (scan->cis_iop->cio_commit_async == NULL)
614 result = scan->cis_iop->cio_commit_async(env, scan, queue,
621 EXPORT_SYMBOL(cl_io_commit_async);
623 void cl_io_extent_release(const struct lu_env *env, struct cl_io *io)
625 const struct cl_io_slice *scan;
628 list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
629 if (scan->cis_iop->cio_extent_release == NULL)
631 scan->cis_iop->cio_extent_release(env, scan);
635 EXPORT_SYMBOL(cl_io_extent_release);
638 * Submits a list of pages for immediate io.
640 * After the function gets returned, The submitted pages are moved to
641 * queue->c2_qout queue, and queue->c2_qin contain both the pages don't need
642 * to be submitted, and the pages are errant to submit.
644 * \returns 0 if at least one page was submitted, error code otherwise.
645 * \see cl_io_operations::cio_submit()
647 int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
648 enum cl_req_type crt, struct cl_2queue *queue)
650 const struct cl_io_slice *scan;
654 list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
655 if (scan->cis_iop->cio_submit == NULL)
657 result = scan->cis_iop->cio_submit(env, scan, crt, queue);
662 * If ->cio_submit() failed, no pages were sent.
664 LASSERT(ergo(result != 0, list_empty(&queue->c2_qout.pl_pages)));
667 EXPORT_SYMBOL(cl_io_submit_rw);
670 * Submit a sync_io and wait for the IO to be finished, or error happens.
671 * If \a timeout is zero, it means to wait for the IO unconditionally.
673 int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
674 enum cl_req_type iot, struct cl_2queue *queue,
677 struct cl_sync_io *anchor = &cl_env_info(env)->clt_anchor;
682 cl_page_list_for_each(pg, &queue->c2_qin) {
683 LASSERT(pg->cp_sync_io == NULL);
684 pg->cp_sync_io = anchor;
687 cl_sync_io_init(anchor, queue->c2_qin.pl_nr);
688 rc = cl_io_submit_rw(env, io, iot, queue);
691 * If some pages weren't sent for any reason (e.g.,
692 * read found up-to-date pages in the cache, or write found
693 * clean pages), count them as completed to avoid infinite
696 cl_page_list_for_each(pg, &queue->c2_qin) {
697 pg->cp_sync_io = NULL;
698 cl_sync_io_note(env, anchor, 1);
701 /* wait for the IO to be finished. */
702 rc = cl_sync_io_wait(env, anchor, timeout);
703 cl_page_list_assume(env, io, &queue->c2_qout);
705 LASSERT(list_empty(&queue->c2_qout.pl_pages));
706 cl_page_list_for_each(pg, &queue->c2_qin)
707 pg->cp_sync_io = NULL;
711 EXPORT_SYMBOL(cl_io_submit_sync);
716 * Pumps io through iterations calling
718 * - cl_io_iter_init()
728 * - cl_io_iter_fini()
730 * repeatedly until there is no more io to do.
732 int cl_io_loop(const struct lu_env *env, struct cl_io *io)
737 LINVRNT(cl_io_is_loopable(io));
744 result = cl_io_iter_init(env, io);
747 result = cl_io_lock(env, io);
750 * Notify layers that locks has been taken,
753 * - llite: kms, short read;
754 * - llite: generic_file_read();
756 result = cl_io_start(env, io);
758 * Send any remaining pending
761 ** - llite: ll_rw_stats_tally.
764 cl_io_unlock(env, io);
765 cl_io_rw_advance(env, io, io->ci_nob - nob);
768 cl_io_iter_fini(env, io);
771 } while ((result == 0 || result == -EIOCBQUEUED) &&
777 if (result == -EAGAIN && io->ci_ndelay && !io->ci_iocb_nowait) {
778 io->ci_need_restart = 1;
783 result = io->ci_result;
784 RETURN(result < 0 ? result : 0);
786 EXPORT_SYMBOL(cl_io_loop);
789 * Adds io slice to the cl_io.
791 * This is called by cl_object_operations::coo_io_init() methods to add a
792 * per-layer state to the io. New state is added at the end of
793 * cl_io::ci_layers list, that is, it is at the bottom of the stack.
795 * \see cl_lock_slice_add(), cl_req_slice_add(), cl_page_slice_add()
797 void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
798 struct cl_object *obj,
799 const struct cl_io_operations *ops)
801 struct list_head *linkage = &slice->cis_linkage;
803 LASSERT((linkage->prev == NULL && linkage->next == NULL) ||
804 list_empty(linkage));
807 list_add_tail(linkage, &io->ci_layers);
809 slice->cis_obj = obj;
810 slice->cis_iop = ops;
813 EXPORT_SYMBOL(cl_io_slice_add);
817 * Initializes page list.
819 void cl_page_list_init(struct cl_page_list *plist)
823 INIT_LIST_HEAD(&plist->pl_pages);
826 EXPORT_SYMBOL(cl_page_list_init);
829 * Adds a page to a page list.
831 void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page,
835 /* it would be better to check that page is owned by "current" io, but
836 * it is not passed here. */
837 LASSERT(page->cp_owner != NULL);
839 LASSERT(list_empty(&page->cp_batch));
840 list_add_tail(&page->cp_batch, &plist->pl_pages);
842 lu_ref_add_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
847 EXPORT_SYMBOL(cl_page_list_add);
850 * Removes a page from a page list.
852 void cl_page_list_del(const struct lu_env *env,
853 struct cl_page_list *plist, struct cl_page *page)
855 LASSERT(plist->pl_nr > 0);
858 list_del_init(&page->cp_batch);
860 lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
861 cl_page_put(env, page);
864 EXPORT_SYMBOL(cl_page_list_del);
867 * Moves a page from one page list to another.
869 void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
870 struct cl_page *page)
872 LASSERT(src->pl_nr > 0);
875 list_move_tail(&page->cp_batch, &dst->pl_pages);
878 lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue",
882 EXPORT_SYMBOL(cl_page_list_move);
885 * Moves a page from one page list to the head of another list.
887 void cl_page_list_move_head(struct cl_page_list *dst, struct cl_page_list *src,
888 struct cl_page *page)
890 LASSERT(src->pl_nr > 0);
893 list_move(&page->cp_batch, &dst->pl_pages);
896 lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue",
900 EXPORT_SYMBOL(cl_page_list_move_head);
903 * splice the cl_page_list, just as list head does
905 void cl_page_list_splice(struct cl_page_list *src, struct cl_page_list *dst)
907 #ifdef CONFIG_LUSTRE_DEBUG_LU_REF
908 struct cl_page *page;
912 cl_page_list_for_each_safe(page, tmp, src)
913 lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref,
918 dst->pl_nr += src->pl_nr;
920 list_splice_tail_init(&src->pl_pages, &dst->pl_pages);
924 EXPORT_SYMBOL(cl_page_list_splice);
927 * Disowns pages in a queue.
929 void cl_page_list_disown(const struct lu_env *env, struct cl_page_list *plist)
931 struct cl_page *page;
932 struct cl_page *temp;
935 cl_page_list_for_each_safe(page, temp, plist) {
936 LASSERT(plist->pl_nr > 0);
938 list_del_init(&page->cp_batch);
941 * __cl_page_disown rather than usual cl_page_disown() is used,
942 * because pages are possibly in CPS_FREEING state already due
943 * to the call to cl_page_list_discard().
946 * XXX __cl_page_disown() will fail if page is not locked.
948 __cl_page_disown(env, page);
949 lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue",
951 cl_page_put(env, page);
955 EXPORT_SYMBOL(cl_page_list_disown);
958 * Releases pages from queue.
960 void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist)
962 struct cl_page *page;
963 struct cl_page *temp;
966 cl_page_list_for_each_safe(page, temp, plist)
967 cl_page_list_del(env, plist, page);
968 LASSERT(plist->pl_nr == 0);
971 EXPORT_SYMBOL(cl_page_list_fini);
974 * Assumes all pages in a queue.
976 void cl_page_list_assume(const struct lu_env *env,
977 struct cl_io *io, struct cl_page_list *plist)
979 struct cl_page *page;
981 cl_page_list_for_each(page, plist)
982 cl_page_assume(env, io, page);
986 * Discards all pages in a queue.
988 void cl_page_list_discard(const struct lu_env *env, struct cl_io *io,
989 struct cl_page_list *plist)
991 struct cl_page *page;
994 cl_page_list_for_each(page, plist)
995 cl_page_discard(env, io, page);
998 EXPORT_SYMBOL(cl_page_list_discard);
1001 * Initialize dual page queue.
1003 void cl_2queue_init(struct cl_2queue *queue)
1006 cl_page_list_init(&queue->c2_qin);
1007 cl_page_list_init(&queue->c2_qout);
1010 EXPORT_SYMBOL(cl_2queue_init);
1013 * Disown pages in both lists of a 2-queue.
1015 void cl_2queue_disown(const struct lu_env *env, struct cl_2queue *queue)
1018 cl_page_list_disown(env, &queue->c2_qin);
1019 cl_page_list_disown(env, &queue->c2_qout);
1022 EXPORT_SYMBOL(cl_2queue_disown);
1025 * Discard (truncate) pages in both lists of a 2-queue.
1027 void cl_2queue_discard(const struct lu_env *env,
1028 struct cl_io *io, struct cl_2queue *queue)
1031 cl_page_list_discard(env, io, &queue->c2_qin);
1032 cl_page_list_discard(env, io, &queue->c2_qout);
1035 EXPORT_SYMBOL(cl_2queue_discard);
1038 * Assume to own the pages in cl_2queue
1040 void cl_2queue_assume(const struct lu_env *env,
1041 struct cl_io *io, struct cl_2queue *queue)
1043 cl_page_list_assume(env, io, &queue->c2_qin);
1044 cl_page_list_assume(env, io, &queue->c2_qout);
1048 * Finalize both page lists of a 2-queue.
1050 void cl_2queue_fini(const struct lu_env *env, struct cl_2queue *queue)
1053 cl_page_list_fini(env, &queue->c2_qout);
1054 cl_page_list_fini(env, &queue->c2_qin);
1057 EXPORT_SYMBOL(cl_2queue_fini);
1060 * Initialize a 2-queue to contain \a page in its incoming page list.
1062 void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page)
1065 cl_2queue_init(queue);
1067 * Add a page to the incoming page list of 2-queue.
1069 cl_page_list_add(&queue->c2_qin, page, true);
1072 EXPORT_SYMBOL(cl_2queue_init_page);
1075 * Returns top-level io.
1077 * \see cl_object_top()
1079 struct cl_io *cl_io_top(struct cl_io *io)
1082 while (io->ci_parent != NULL)
1086 EXPORT_SYMBOL(cl_io_top);
1089 * Fills in attributes that are passed to server together with transfer. Only
1090 * attributes from \a flags may be touched. This can be called multiple times
1091 * for the same request.
1093 void cl_req_attr_set(const struct lu_env *env, struct cl_object *obj,
1094 struct cl_req_attr *attr)
1096 struct cl_object *scan;
1099 cl_object_for_each(scan, obj) {
1100 if (scan->co_ops->coo_req_attr_set != NULL)
1101 scan->co_ops->coo_req_attr_set(env, scan, attr);
1105 EXPORT_SYMBOL(cl_req_attr_set);
1108 * Initialize synchronous io wait \a anchor for \a nr pages with optional
1110 * \param anchor owned by caller, initialzied here.
1111 * \param nr number of pages initally pending in sync.
1112 * \param end optional callback sync_io completion, can be used to
1113 * trigger erasure coding, integrity, dedupe, or similar operation.
1114 * \q end is called with a spinlock on anchor->csi_waitq.lock
1116 void cl_sync_io_init_notify(struct cl_sync_io *anchor, int nr,
1117 void *dio_aio, cl_sync_io_end_t *end)
1120 memset(anchor, 0, sizeof(*anchor));
1121 init_waitqueue_head(&anchor->csi_waitq);
1122 atomic_set(&anchor->csi_sync_nr, nr);
1123 anchor->csi_sync_rc = 0;
1124 anchor->csi_end_io = end;
1125 anchor->csi_dio_aio = dio_aio;
1128 EXPORT_SYMBOL(cl_sync_io_init_notify);
1131 * Wait until all IO completes. Transfer completion routine has to call
1132 * cl_sync_io_note() for every entity.
1134 int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor,
1140 LASSERT(timeout >= 0);
1143 wait_event_idle_timeout(anchor->csi_waitq,
1144 atomic_read(&anchor->csi_sync_nr) == 0,
1145 cfs_time_seconds(timeout)) == 0) {
1147 CERROR("IO failed: %d, still wait for %d remaining entries\n",
1148 rc, atomic_read(&anchor->csi_sync_nr));
1151 wait_event_idle(anchor->csi_waitq,
1152 atomic_read(&anchor->csi_sync_nr) == 0);
1154 rc = anchor->csi_sync_rc;
1156 /* We take the lock to ensure that cl_sync_io_note() has finished */
1157 spin_lock(&anchor->csi_waitq.lock);
1158 LASSERT(atomic_read(&anchor->csi_sync_nr) == 0);
1159 spin_unlock(&anchor->csi_waitq.lock);
1163 EXPORT_SYMBOL(cl_sync_io_wait);
1165 static inline void dio_aio_complete(struct kiocb *iocb, ssize_t res)
1167 #ifdef HAVE_AIO_COMPLETE
1168 aio_complete(iocb, res, 0);
1170 if (iocb->ki_complete)
1171 # ifdef HAVE_KIOCB_COMPLETE_2ARGS
1172 iocb->ki_complete(iocb, res);
1174 iocb->ki_complete(iocb, res, 0);
1179 static void cl_dio_aio_end(const struct lu_env *env, struct cl_sync_io *anchor)
1181 struct cl_dio_aio *aio = container_of(anchor, typeof(*aio), cda_sync);
1182 ssize_t ret = anchor->csi_sync_rc;
1186 if (!aio->cda_no_aio_complete)
1187 dio_aio_complete(aio->cda_iocb, ret ?: aio->cda_bytes);
1192 static void cl_sub_dio_end(const struct lu_env *env, struct cl_sync_io *anchor)
1194 struct cl_sub_dio *sdio = container_of(anchor, typeof(*sdio), csd_sync);
1195 ssize_t ret = anchor->csi_sync_rc;
1200 while (sdio->csd_pages.pl_nr > 0) {
1201 struct cl_page *page = cl_page_list_first(&sdio->csd_pages);
1203 cl_page_delete(env, page);
1204 cl_page_list_del(env, &sdio->csd_pages, page);
1207 ll_release_user_pages(sdio->csd_dio_pages.ldp_pages,
1208 sdio->csd_dio_pages.ldp_count);
1209 cl_sync_io_note(env, &sdio->csd_ll_aio->cda_sync, ret);
1214 struct cl_dio_aio *cl_dio_aio_alloc(struct kiocb *iocb, struct cl_object *obj,
1217 struct cl_dio_aio *aio;
1219 OBD_SLAB_ALLOC_PTR_GFP(aio, cl_dio_aio_kmem, GFP_NOFS);
1222 * Hold one ref so that it won't be released until
1223 * every pages is added.
1225 cl_sync_io_init_notify(&aio->cda_sync, 1, aio, cl_dio_aio_end);
1226 aio->cda_iocb = iocb;
1227 aio->cda_no_aio_complete = !is_aio;
1228 /* if this is true AIO, the memory is freed by the last call
1229 * to cl_sync_io_note (when all the I/O is complete), because
1230 * no one is waiting (in the kernel) for this to complete
1232 * in other cases, the last user is cl_sync_io_wait, and in
1233 * that case, the creator frees the struct after that call
1235 aio->cda_creator_free = !is_aio;
1242 EXPORT_SYMBOL(cl_dio_aio_alloc);
1244 struct cl_sub_dio *cl_sub_dio_alloc(struct cl_dio_aio *ll_aio,
1245 struct iov_iter *iter, bool write,
1248 struct cl_sub_dio *sdio;
1250 OBD_SLAB_ALLOC_PTR_GFP(sdio, cl_sub_dio_kmem, GFP_NOFS);
1253 * Hold one ref so that it won't be released until
1254 * every pages is added.
1256 cl_sync_io_init_notify(&sdio->csd_sync, 1, sdio,
1258 cl_page_list_init(&sdio->csd_pages);
1260 sdio->csd_ll_aio = ll_aio;
1261 atomic_add(1, &ll_aio->cda_sync.csi_sync_nr);
1262 sdio->csd_creator_free = sync;
1263 sdio->csd_write = write;
1265 /* we need to make a copy of the user iovec at this point in
1267 * A) have the correct state of the iovec for this chunk of I/O
1268 * B) have a chunk-local copy; some of the things we want to
1269 * do to the iovec modify it, so to process each chunk from a
1270 * separate thread requires a local copy of the iovec
1272 memcpy(&sdio->csd_iter, iter,
1273 sizeof(struct iov_iter));
1274 OBD_ALLOC_PTR(sdio->csd_iter.iov);
1275 if (sdio->csd_iter.iov == NULL) {
1276 cl_sub_dio_free(sdio);
1280 memcpy((void *) sdio->csd_iter.iov, iter->iov,
1281 sizeof(struct iovec));
1286 EXPORT_SYMBOL(cl_sub_dio_alloc);
1288 void cl_dio_aio_free(const struct lu_env *env, struct cl_dio_aio *aio)
1291 cl_object_put(env, aio->cda_obj);
1292 OBD_SLAB_FREE_PTR(aio, cl_dio_aio_kmem);
1295 EXPORT_SYMBOL(cl_dio_aio_free);
1297 void cl_sub_dio_free(struct cl_sub_dio *sdio)
1300 void *tmp = (void *) sdio->csd_iter.iov;
1303 OBD_FREE(tmp, sizeof(struct iovec));
1304 OBD_SLAB_FREE_PTR(sdio, cl_sub_dio_kmem);
1307 EXPORT_SYMBOL(cl_sub_dio_free);
1310 * ll_release_user_pages - tear down page struct array
1311 * @pages: array of page struct pointers underlying target buffer
1313 void ll_release_user_pages(struct page **pages, int npages)
1322 for (i = 0; i < npages; i++) {
1328 #if defined(HAVE_DIO_ITER)
1331 OBD_FREE_PTR_ARRAY_LARGE(pages, npages);
1334 EXPORT_SYMBOL(ll_release_user_pages);
1337 * Indicate that transfer of a single page completed.
1339 void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor,
1344 if (anchor->csi_sync_rc == 0 && ioret < 0)
1345 anchor->csi_sync_rc = ioret;
1347 * Synchronous IO done without releasing page lock (e.g., as a part of
1348 * ->{prepare,commit}_write(). Completion is used to signal the end of
1351 LASSERT(atomic_read(&anchor->csi_sync_nr) > 0);
1352 if (atomic_dec_and_lock(&anchor->csi_sync_nr,
1353 &anchor->csi_waitq.lock)) {
1354 struct cl_sub_dio *sub_dio_aio = NULL;
1355 struct cl_dio_aio *dio_aio = NULL;
1356 void *csi_dio_aio = NULL;
1357 bool creator_free = true;
1359 cl_sync_io_end_t *end_io = anchor->csi_end_io;
1362 * Holding the lock across both the decrement and
1363 * the wakeup ensures cl_sync_io_wait() doesn't complete
1364 * before the wakeup completes and the contents of
1365 * of anchor become unsafe to access as the owner is free
1366 * to immediately reclaim anchor when cl_sync_io_wait()
1369 wake_up_locked(&anchor->csi_waitq);
1371 end_io(env, anchor);
1373 csi_dio_aio = anchor->csi_dio_aio;
1374 sub_dio_aio = csi_dio_aio;
1375 dio_aio = csi_dio_aio;
1377 if (csi_dio_aio && end_io == cl_dio_aio_end)
1378 creator_free = dio_aio->cda_creator_free;
1379 else if (csi_dio_aio && end_io == cl_sub_dio_end)
1380 creator_free = sub_dio_aio->csd_creator_free;
1382 spin_unlock(&anchor->csi_waitq.lock);
1384 if (csi_dio_aio && !creator_free) {
1385 if (end_io == cl_dio_aio_end)
1386 cl_dio_aio_free(env, dio_aio);
1387 else if (end_io == cl_sub_dio_end)
1388 cl_sub_dio_free(sub_dio_aio);
1393 EXPORT_SYMBOL(cl_sync_io_note);
1395 /* this function waits for completion of outstanding io and then re-initializes
1396 * the anchor used to track it. This is used to wait to complete DIO before
1397 * returning to userspace, and is never called for true AIO
1399 int cl_sync_io_wait_recycle(const struct lu_env *env, struct cl_sync_io *anchor,
1400 long timeout, int ioret)
1405 * @anchor was inited as 1 to prevent end_io to be
1406 * called before we add all pages for IO, so drop
1407 * one extra reference to make sure we could wait
1410 cl_sync_io_note(env, anchor, ioret);
1411 /* Wait for completion of outstanding dio before re-initializing for
1414 rc = cl_sync_io_wait(env, anchor, timeout);
1416 * One extra reference again, as if @anchor is
1417 * reused we assume it as 1 before using.
1419 atomic_add(1, &anchor->csi_sync_nr);
1423 EXPORT_SYMBOL(cl_sync_io_wait_recycle);