4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
33 * Author: Nikita Danilov <nikita.danilov@sun.com>
34 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
37 #define DEBUG_SUBSYSTEM S_CLASS
39 #include <linux/sched.h>
40 #include <linux/list.h>
41 #include <linux/list_sort.h>
42 #include <obd_class.h>
43 #include <obd_support.h>
44 #include <lustre_fid.h>
45 #include <cl_object.h>
46 #include "cl_internal.h"
47 #include <libcfs/crypto/llcrypt.h>
49 /*****************************************************************************
55 static inline int cl_io_type_is_valid(enum cl_io_type type)
57 return CIT_READ <= type && type < CIT_OP_NR;
60 static inline int cl_io_is_loopable(const struct cl_io *io)
62 return cl_io_type_is_valid(io->ci_type) && io->ci_type != CIT_MISC;
66 * cl_io invariant that holds at all times when exported cl_io_*() functions
67 * are entered and left.
69 static int cl_io_invariant(const struct cl_io *io)
76 * io can own pages only when it is ongoing. Sub-io might
77 * still be in CIS_LOCKED state when top-io is in
80 ergo(io->ci_owned_nr > 0, io->ci_state == CIS_IO_GOING ||
81 (io->ci_state == CIS_LOCKED && up != NULL));
85 * Finalize \a io, by calling cl_io_operations::cio_fini() bottom-to-top.
87 void cl_io_fini(const struct lu_env *env, struct cl_io *io)
89 struct cl_io_slice *slice;
91 LINVRNT(cl_io_type_is_valid(io->ci_type));
92 LINVRNT(cl_io_invariant(io));
95 while (!list_empty(&io->ci_layers)) {
96 slice = container_of(io->ci_layers.prev, struct cl_io_slice,
98 list_del_init(&slice->cis_linkage);
99 if (slice->cis_iop->op[io->ci_type].cio_fini != NULL)
100 slice->cis_iop->op[io->ci_type].cio_fini(env, slice);
102 * Invalidate slice to catch use after free. This assumes that
103 * slices are allocated within session and can be touched
104 * after ->cio_fini() returns.
106 slice->cis_io = NULL;
108 io->ci_state = CIS_FINI;
110 /* sanity check for layout change */
111 switch(io->ci_type) {
114 case CIT_DATA_VERSION:
118 LASSERT(!io->ci_need_restart);
122 /* Check ignore layout change conf */
123 LASSERT(ergo(io->ci_ignore_layout || !io->ci_verify_layout,
124 !io->ci_need_restart));
135 EXPORT_SYMBOL(cl_io_fini);
137 static int cl_io_init0(const struct lu_env *env, struct cl_io *io,
138 enum cl_io_type iot, struct cl_object *obj)
140 struct cl_object *scan;
143 LINVRNT(io->ci_state == CIS_ZERO || io->ci_state == CIS_FINI);
144 LINVRNT(cl_io_type_is_valid(iot));
145 LINVRNT(cl_io_invariant(io));
149 INIT_LIST_HEAD(&io->ci_lockset.cls_todo);
150 INIT_LIST_HEAD(&io->ci_lockset.cls_done);
151 INIT_LIST_HEAD(&io->ci_layers);
154 cl_object_for_each(scan, obj) {
155 if (scan->co_ops->coo_io_init != NULL) {
156 result = scan->co_ops->coo_io_init(env, scan, io);
162 io->ci_state = CIS_INIT;
167 * Initialize sub-io, by calling cl_io_operations::cio_init() top-to-bottom.
169 * \pre obj != cl_object_top(obj)
171 int cl_io_sub_init(const struct lu_env *env, struct cl_io *io,
172 enum cl_io_type iot, struct cl_object *obj)
174 LASSERT(obj != cl_object_top(obj));
176 return cl_io_init0(env, io, iot, obj);
178 EXPORT_SYMBOL(cl_io_sub_init);
181 * Initialize \a io, by calling cl_io_operations::cio_init() top-to-bottom.
183 * Caller has to call cl_io_fini() after a call to cl_io_init(), no matter
184 * what the latter returned.
186 * \pre obj == cl_object_top(obj)
187 * \pre cl_io_type_is_valid(iot)
188 * \post cl_io_type_is_valid(io->ci_type) && io->ci_type == iot
190 int cl_io_init(const struct lu_env *env, struct cl_io *io,
191 enum cl_io_type iot, struct cl_object *obj)
193 LASSERT(obj == cl_object_top(obj));
195 /* clear I/O restart from previous instance */
196 io->ci_need_restart = 0;
198 return cl_io_init0(env, io, iot, obj);
200 EXPORT_SYMBOL(cl_io_init);
203 * Initialize read or write io.
205 * \pre iot == CIT_READ || iot == CIT_WRITE
207 int cl_io_rw_init(const struct lu_env *env, struct cl_io *io,
208 enum cl_io_type iot, loff_t pos, size_t count)
210 LINVRNT(iot == CIT_READ || iot == CIT_WRITE);
211 LINVRNT(io->ci_obj != NULL);
214 LU_OBJECT_HEADER(D_VFSTRACE, env, &io->ci_obj->co_lu,
215 "io range: %u [%llu, %llu) %u %u\n",
216 iot, (__u64)pos, (__u64)pos + count,
217 io->u.ci_rw.crw_nonblock, io->u.ci_wr.wr_append);
218 io->u.ci_rw.crw_pos = pos;
219 io->u.ci_rw.crw_count = count;
220 RETURN(cl_io_init(env, io, iot, io->ci_obj));
222 EXPORT_SYMBOL(cl_io_rw_init);
224 #ifdef HAVE_LIST_CMP_FUNC_T
225 static int cl_lock_descr_cmp(void *priv,
226 const struct list_head *a,
227 const struct list_head *b)
228 #else /* !HAVE_LIST_CMP_FUNC_T */
229 static int cl_lock_descr_cmp(void *priv,
230 struct list_head *a, struct list_head *b)
231 #endif /* HAVE_LIST_CMP_FUNC_T */
233 const struct cl_io_lock_link *l0 = list_entry(a, struct cl_io_lock_link,
235 const struct cl_io_lock_link *l1 = list_entry(b, struct cl_io_lock_link,
237 const struct cl_lock_descr *d0 = &l0->cill_descr;
238 const struct cl_lock_descr *d1 = &l1->cill_descr;
240 return lu_fid_cmp(lu_object_fid(&d0->cld_obj->co_lu),
241 lu_object_fid(&d1->cld_obj->co_lu));
244 static void cl_lock_descr_merge(struct cl_lock_descr *d0,
245 const struct cl_lock_descr *d1)
247 d0->cld_start = min(d0->cld_start, d1->cld_start);
248 d0->cld_end = max(d0->cld_end, d1->cld_end);
250 if (d1->cld_mode == CLM_WRITE && d0->cld_mode != CLM_WRITE)
251 d0->cld_mode = CLM_WRITE;
253 if (d1->cld_mode == CLM_GROUP && d0->cld_mode != CLM_GROUP)
254 d0->cld_mode = CLM_GROUP;
257 static int cl_lockset_merge(const struct cl_lockset *set,
258 const struct cl_lock_descr *need)
260 struct cl_io_lock_link *scan;
263 list_for_each_entry(scan, &set->cls_todo, cill_linkage) {
264 if (!cl_object_same(scan->cill_descr.cld_obj, need->cld_obj))
267 /* Merge locks for the same object because ldlm lock server
268 * may expand the lock extent, otherwise there is a deadlock
269 * case if two conflicted locks are queueud for the same object
270 * and lock server expands one lock to overlap the another.
271 * The side effect is that it can generate a multi-stripe lock
272 * that may cause casacading problem */
273 cl_lock_descr_merge(&scan->cill_descr, need);
274 CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
275 scan->cill_descr.cld_mode, scan->cill_descr.cld_start,
276 scan->cill_descr.cld_end);
282 static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io,
283 struct cl_lockset *set)
285 struct cl_io_lock_link *link;
286 struct cl_io_lock_link *temp;
291 list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
292 result = cl_lock_request(env, io, &link->cill_lock);
296 list_move(&link->cill_linkage, &set->cls_done);
302 * Takes locks necessary for the current iteration of io.
304 * Calls cl_io_operations::cio_lock() top-to-bottom to collect locks required
305 * by layers for the current iteration. Then sort locks (to avoid dead-locks),
308 int cl_io_lock(const struct lu_env *env, struct cl_io *io)
310 const struct cl_io_slice *scan;
313 LINVRNT(cl_io_is_loopable(io));
314 LINVRNT(io->ci_state == CIS_IT_STARTED);
315 LINVRNT(cl_io_invariant(io));
318 list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
319 if (scan->cis_iop->op[io->ci_type].cio_lock == NULL)
321 result = scan->cis_iop->op[io->ci_type].cio_lock(env, scan);
327 * Sort locks in lexicographical order of their (fid,
328 * start-offset) pairs to avoid deadlocks.
330 list_sort(NULL, &io->ci_lockset.cls_todo, cl_lock_descr_cmp);
331 result = cl_lockset_lock(env, io, &io->ci_lockset);
334 cl_io_unlock(env, io);
336 io->ci_state = CIS_LOCKED;
339 EXPORT_SYMBOL(cl_io_lock);
342 * Release locks takes by io.
344 void cl_io_unlock(const struct lu_env *env, struct cl_io *io)
346 struct cl_lockset *set;
347 struct cl_io_lock_link *link;
348 struct cl_io_lock_link *temp;
349 const struct cl_io_slice *scan;
351 LASSERT(cl_io_is_loopable(io));
352 LASSERT(CIS_IT_STARTED <= io->ci_state && io->ci_state < CIS_UNLOCKED);
353 LINVRNT(cl_io_invariant(io));
356 set = &io->ci_lockset;
358 list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
359 list_del_init(&link->cill_linkage);
360 if (link->cill_fini != NULL)
361 link->cill_fini(env, link);
364 list_for_each_entry_safe(link, temp, &set->cls_done, cill_linkage) {
365 list_del_init(&link->cill_linkage);
366 cl_lock_release(env, &link->cill_lock);
367 if (link->cill_fini != NULL)
368 link->cill_fini(env, link);
371 list_for_each_entry_reverse(scan, &io->ci_layers, cis_linkage) {
372 if (scan->cis_iop->op[io->ci_type].cio_unlock != NULL)
373 scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
375 io->ci_state = CIS_UNLOCKED;
378 EXPORT_SYMBOL(cl_io_unlock);
381 * Prepares next iteration of io.
383 * Calls cl_io_operations::cio_iter_init() top-to-bottom. This exists to give
384 * layers a chance to modify io parameters, e.g., so that lov can restrict io
385 * to a single stripe.
387 int cl_io_iter_init(const struct lu_env *env, struct cl_io *io)
389 const struct cl_io_slice *scan;
392 LINVRNT(cl_io_is_loopable(io));
393 LINVRNT(io->ci_state == CIS_INIT || io->ci_state == CIS_IT_ENDED);
394 LINVRNT(cl_io_invariant(io));
398 list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
399 if (scan->cis_iop->op[io->ci_type].cio_iter_init == NULL)
401 result = scan->cis_iop->op[io->ci_type].cio_iter_init(env,
407 io->ci_state = CIS_IT_STARTED;
410 EXPORT_SYMBOL(cl_io_iter_init);
413 * Finalizes io iteration.
415 * Calls cl_io_operations::cio_iter_fini() bottom-to-top.
417 void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io)
419 const struct cl_io_slice *scan;
421 LINVRNT(cl_io_is_loopable(io));
422 LINVRNT(io->ci_state <= CIS_IT_STARTED ||
423 io->ci_state > CIS_IO_FINISHED);
424 LINVRNT(cl_io_invariant(io));
427 list_for_each_entry_reverse(scan, &io->ci_layers, cis_linkage) {
428 if (scan->cis_iop->op[io->ci_type].cio_iter_fini != NULL)
429 scan->cis_iop->op[io->ci_type].cio_iter_fini(env, scan);
431 io->ci_state = CIS_IT_ENDED;
434 EXPORT_SYMBOL(cl_io_iter_fini);
437 * Records that read or write io progressed \a nob bytes forward.
439 void cl_io_rw_advance(const struct lu_env *env, struct cl_io *io, size_t nob)
441 const struct cl_io_slice *scan;
445 LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
447 LINVRNT(cl_io_is_loopable(io));
448 LINVRNT(cl_io_invariant(io));
450 io->u.ci_rw.crw_pos += nob;
451 io->u.ci_rw.crw_count -= nob;
453 /* layers have to be notified. */
454 list_for_each_entry_reverse(scan, &io->ci_layers, cis_linkage) {
455 if (scan->cis_iop->op[io->ci_type].cio_advance != NULL)
456 scan->cis_iop->op[io->ci_type].cio_advance(env, scan,
463 * Adds a lock to a lockset.
465 int cl_io_lock_add(const struct lu_env *env, struct cl_io *io,
466 struct cl_io_lock_link *link)
471 if (cl_lockset_merge(&io->ci_lockset, &link->cill_descr))
474 list_add(&link->cill_linkage, &io->ci_lockset.cls_todo);
479 EXPORT_SYMBOL(cl_io_lock_add);
481 static void cl_free_io_lock_link(const struct lu_env *env,
482 struct cl_io_lock_link *link)
488 * Allocates new lock link, and uses it to add a lock to a lockset.
490 int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
491 struct cl_lock_descr *descr)
493 struct cl_io_lock_link *link;
499 link->cill_descr = *descr;
500 link->cill_fini = cl_free_io_lock_link;
501 result = cl_io_lock_add(env, io, link);
502 if (result) /* lock match */
503 link->cill_fini(env, link);
509 EXPORT_SYMBOL(cl_io_lock_alloc_add);
512 * Starts io by calling cl_io_operations::cio_start() top-to-bottom.
514 int cl_io_start(const struct lu_env *env, struct cl_io *io)
516 const struct cl_io_slice *scan;
519 LINVRNT(cl_io_is_loopable(io));
520 LINVRNT(io->ci_state == CIS_LOCKED);
521 LINVRNT(cl_io_invariant(io));
524 io->ci_state = CIS_IO_GOING;
525 list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
526 if (scan->cis_iop->op[io->ci_type].cio_start == NULL)
528 result = scan->cis_iop->op[io->ci_type].cio_start(env, scan);
536 EXPORT_SYMBOL(cl_io_start);
539 * Wait until current io iteration is finished by calling
540 * cl_io_operations::cio_end() bottom-to-top.
542 void cl_io_end(const struct lu_env *env, struct cl_io *io)
544 const struct cl_io_slice *scan;
546 LINVRNT(cl_io_is_loopable(io));
547 LINVRNT(io->ci_state == CIS_IO_GOING);
548 LINVRNT(cl_io_invariant(io));
551 list_for_each_entry_reverse(scan, &io->ci_layers, cis_linkage) {
552 if (scan->cis_iop->op[io->ci_type].cio_end != NULL)
553 scan->cis_iop->op[io->ci_type].cio_end(env, scan);
554 /* TODO: error handling. */
556 io->ci_state = CIS_IO_FINISHED;
559 EXPORT_SYMBOL(cl_io_end);
562 * Called by read io, to decide the readahead extent
564 * \see cl_io_operations::cio_read_ahead()
566 int cl_io_read_ahead(const struct lu_env *env, struct cl_io *io,
567 pgoff_t start, struct cl_read_ahead *ra)
569 const struct cl_io_slice *scan;
572 LINVRNT(io->ci_type == CIT_READ ||
573 io->ci_type == CIT_FAULT ||
574 io->ci_type == CIT_WRITE);
575 LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
576 LINVRNT(cl_io_invariant(io));
579 list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
580 if (scan->cis_iop->cio_read_ahead == NULL)
583 result = scan->cis_iop->cio_read_ahead(env, scan, start, ra);
587 RETURN(result > 0 ? 0 : result);
589 EXPORT_SYMBOL(cl_io_read_ahead);
592 * Called before io start, to reserve enough LRU slots to avoid
595 * \see cl_io_operations::cio_lru_reserve()
597 int cl_io_lru_reserve(const struct lu_env *env, struct cl_io *io,
598 loff_t pos, size_t bytes)
600 const struct cl_io_slice *scan;
603 LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
604 LINVRNT(cl_io_invariant(io));
607 list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
608 if (scan->cis_iop->cio_lru_reserve) {
609 result = scan->cis_iop->cio_lru_reserve(env, scan,
618 EXPORT_SYMBOL(cl_io_lru_reserve);
621 * Commit a list of contiguous pages into writeback cache.
623 * \returns 0 if all pages committed, or errcode if error occurred.
624 * \see cl_io_operations::cio_commit_async()
626 int cl_io_commit_async(const struct lu_env *env, struct cl_io *io,
627 struct cl_page_list *queue, int from, int to,
630 const struct cl_io_slice *scan;
634 list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
635 if (scan->cis_iop->cio_commit_async == NULL)
637 result = scan->cis_iop->cio_commit_async(env, scan, queue,
644 EXPORT_SYMBOL(cl_io_commit_async);
646 void cl_io_extent_release(const struct lu_env *env, struct cl_io *io)
648 const struct cl_io_slice *scan;
651 list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
652 if (scan->cis_iop->cio_extent_release == NULL)
654 scan->cis_iop->cio_extent_release(env, scan);
658 EXPORT_SYMBOL(cl_io_extent_release);
661 * Submits a list of pages for immediate io.
663 * After the function gets returned, The submitted pages are moved to
664 * queue->c2_qout queue, and queue->c2_qin contain both the pages don't need
665 * to be submitted, and the pages are errant to submit.
667 * \returns 0 if at least one page was submitted, error code otherwise.
668 * \see cl_io_operations::cio_submit()
670 int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
671 enum cl_req_type crt, struct cl_2queue *queue)
673 const struct cl_io_slice *scan;
677 list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
678 if (scan->cis_iop->cio_submit == NULL)
680 result = scan->cis_iop->cio_submit(env, scan, crt, queue);
685 * If ->cio_submit() failed, no pages were sent.
687 LASSERT(ergo(result != 0, list_empty(&queue->c2_qout.pl_pages)));
690 EXPORT_SYMBOL(cl_io_submit_rw);
693 * Submit a sync_io and wait for the IO to be finished, or error happens.
694 * If \a timeout is zero, it means to wait for the IO unconditionally.
696 int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
697 enum cl_req_type iot, struct cl_2queue *queue,
700 struct cl_sync_io *anchor = &cl_env_info(env)->clt_anchor;
705 cl_page_list_for_each(pg, &queue->c2_qin) {
706 LASSERT(pg->cp_sync_io == NULL);
707 pg->cp_sync_io = anchor;
710 cl_sync_io_init(anchor, queue->c2_qin.pl_nr);
711 rc = cl_io_submit_rw(env, io, iot, queue);
714 * If some pages weren't sent for any reason (e.g.,
715 * read found up-to-date pages in the cache, or write found
716 * clean pages), count them as completed to avoid infinite
719 cl_page_list_for_each(pg, &queue->c2_qin) {
720 pg->cp_sync_io = NULL;
721 cl_sync_io_note(env, anchor, 1);
724 /* wait for the IO to be finished. */
725 rc = cl_sync_io_wait(env, anchor, timeout);
726 cl_page_list_assume(env, io, &queue->c2_qout);
728 LASSERT(list_empty(&queue->c2_qout.pl_pages));
729 cl_page_list_for_each(pg, &queue->c2_qin)
730 pg->cp_sync_io = NULL;
734 EXPORT_SYMBOL(cl_io_submit_sync);
739 * Pumps io through iterations calling
741 * - cl_io_iter_init()
751 * - cl_io_iter_fini()
753 * repeatedly until there is no more io to do.
755 int cl_io_loop(const struct lu_env *env, struct cl_io *io)
760 LINVRNT(cl_io_is_loopable(io));
767 result = cl_io_iter_init(env, io);
770 result = cl_io_lock(env, io);
773 * Notify layers that locks has been taken,
776 * - llite: kms, short read;
777 * - llite: generic_file_read();
779 result = cl_io_start(env, io);
781 * Send any remaining pending
784 ** - llite: ll_rw_stats_tally.
787 cl_io_unlock(env, io);
788 cl_io_rw_advance(env, io, io->ci_nob - nob);
791 cl_io_iter_fini(env, io);
794 } while ((result == 0 || result == -EIOCBQUEUED) &&
800 if (result == -EAGAIN && io->ci_ndelay) {
801 io->ci_need_restart = 1;
806 result = io->ci_result;
807 RETURN(result < 0 ? result : 0);
809 EXPORT_SYMBOL(cl_io_loop);
812 * Adds io slice to the cl_io.
814 * This is called by cl_object_operations::coo_io_init() methods to add a
815 * per-layer state to the io. New state is added at the end of
816 * cl_io::ci_layers list, that is, it is at the bottom of the stack.
818 * \see cl_lock_slice_add(), cl_req_slice_add(), cl_page_slice_add()
820 void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
821 struct cl_object *obj,
822 const struct cl_io_operations *ops)
824 struct list_head *linkage = &slice->cis_linkage;
826 LASSERT((linkage->prev == NULL && linkage->next == NULL) ||
827 list_empty(linkage));
830 list_add_tail(linkage, &io->ci_layers);
832 slice->cis_obj = obj;
833 slice->cis_iop = ops;
836 EXPORT_SYMBOL(cl_io_slice_add);
840 * Initializes page list.
842 void cl_page_list_init(struct cl_page_list *plist)
846 INIT_LIST_HEAD(&plist->pl_pages);
849 EXPORT_SYMBOL(cl_page_list_init);
852 * Adds a page to a page list.
854 void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page,
858 /* it would be better to check that page is owned by "current" io, but
859 * it is not passed here. */
860 LASSERT(page->cp_owner != NULL);
862 LASSERT(list_empty(&page->cp_batch));
863 list_add_tail(&page->cp_batch, &plist->pl_pages);
865 lu_ref_add_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
870 EXPORT_SYMBOL(cl_page_list_add);
873 * Removes a page from a page list.
875 void cl_page_list_del(const struct lu_env *env,
876 struct cl_page_list *plist, struct cl_page *page)
878 LASSERT(plist->pl_nr > 0);
879 LASSERT(cl_page_is_vmlocked(env, page));
882 list_del_init(&page->cp_batch);
884 lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
885 cl_page_put(env, page);
888 EXPORT_SYMBOL(cl_page_list_del);
891 * Moves a page from one page list to another.
893 void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
894 struct cl_page *page)
896 LASSERT(src->pl_nr > 0);
899 list_move_tail(&page->cp_batch, &dst->pl_pages);
902 lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue",
906 EXPORT_SYMBOL(cl_page_list_move);
909 * Moves a page from one page list to the head of another list.
911 void cl_page_list_move_head(struct cl_page_list *dst, struct cl_page_list *src,
912 struct cl_page *page)
914 LASSERT(src->pl_nr > 0);
917 list_move(&page->cp_batch, &dst->pl_pages);
920 lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue",
924 EXPORT_SYMBOL(cl_page_list_move_head);
927 * splice the cl_page_list, just as list head does
929 void cl_page_list_splice(struct cl_page_list *src, struct cl_page_list *dst)
931 #ifdef CONFIG_LUSTRE_DEBUG_LU_REF
932 struct cl_page *page;
936 cl_page_list_for_each_safe(page, tmp, src)
937 lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref,
942 dst->pl_nr += src->pl_nr;
944 list_splice_tail_init(&src->pl_pages, &dst->pl_pages);
948 EXPORT_SYMBOL(cl_page_list_splice);
951 * Disowns pages in a queue.
953 void cl_page_list_disown(const struct lu_env *env,
954 struct cl_io *io, struct cl_page_list *plist)
956 struct cl_page *page;
957 struct cl_page *temp;
961 cl_page_list_for_each_safe(page, temp, plist) {
962 LASSERT(plist->pl_nr > 0);
964 list_del_init(&page->cp_batch);
967 * cl_page_disown0 rather than usual cl_page_disown() is used,
968 * because pages are possibly in CPS_FREEING state already due
969 * to the call to cl_page_list_discard().
972 * XXX cl_page_disown0() will fail if page is not locked.
974 cl_page_disown0(env, io, page);
975 lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue",
977 cl_page_put(env, page);
981 EXPORT_SYMBOL(cl_page_list_disown);
984 * Releases pages from queue.
986 void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist)
988 struct cl_page *page;
989 struct cl_page *temp;
993 cl_page_list_for_each_safe(page, temp, plist)
994 cl_page_list_del(env, plist, page);
995 LASSERT(plist->pl_nr == 0);
998 EXPORT_SYMBOL(cl_page_list_fini);
1001 * Assumes all pages in a queue.
1003 void cl_page_list_assume(const struct lu_env *env,
1004 struct cl_io *io, struct cl_page_list *plist)
1006 struct cl_page *page;
1009 cl_page_list_for_each(page, plist)
1010 cl_page_assume(env, io, page);
1014 * Discards all pages in a queue.
1016 void cl_page_list_discard(const struct lu_env *env, struct cl_io *io,
1017 struct cl_page_list *plist)
1019 struct cl_page *page;
1022 cl_page_list_for_each(page, plist)
1023 cl_page_discard(env, io, page);
1026 EXPORT_SYMBOL(cl_page_list_discard);
1029 * Initialize dual page queue.
1031 void cl_2queue_init(struct cl_2queue *queue)
1034 cl_page_list_init(&queue->c2_qin);
1035 cl_page_list_init(&queue->c2_qout);
1038 EXPORT_SYMBOL(cl_2queue_init);
1041 * Add a page to the incoming page list of 2-queue.
1043 void cl_2queue_add(struct cl_2queue *queue, struct cl_page *page, bool get_ref)
1045 cl_page_list_add(&queue->c2_qin, page, get_ref);
1047 EXPORT_SYMBOL(cl_2queue_add);
1050 * Disown pages in both lists of a 2-queue.
1052 void cl_2queue_disown(const struct lu_env *env,
1053 struct cl_io *io, struct cl_2queue *queue)
1056 cl_page_list_disown(env, io, &queue->c2_qin);
1057 cl_page_list_disown(env, io, &queue->c2_qout);
1060 EXPORT_SYMBOL(cl_2queue_disown);
1063 * Discard (truncate) pages in both lists of a 2-queue.
1065 void cl_2queue_discard(const struct lu_env *env,
1066 struct cl_io *io, struct cl_2queue *queue)
1069 cl_page_list_discard(env, io, &queue->c2_qin);
1070 cl_page_list_discard(env, io, &queue->c2_qout);
1073 EXPORT_SYMBOL(cl_2queue_discard);
1076 * Assume to own the pages in cl_2queue
1078 void cl_2queue_assume(const struct lu_env *env,
1079 struct cl_io *io, struct cl_2queue *queue)
1081 cl_page_list_assume(env, io, &queue->c2_qin);
1082 cl_page_list_assume(env, io, &queue->c2_qout);
1086 * Finalize both page lists of a 2-queue.
1088 void cl_2queue_fini(const struct lu_env *env, struct cl_2queue *queue)
1091 cl_page_list_fini(env, &queue->c2_qout);
1092 cl_page_list_fini(env, &queue->c2_qin);
1095 EXPORT_SYMBOL(cl_2queue_fini);
1098 * Initialize a 2-queue to contain \a page in its incoming page list.
1100 void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page)
1103 cl_2queue_init(queue);
1104 cl_2queue_add(queue, page, true);
1107 EXPORT_SYMBOL(cl_2queue_init_page);
1110 * Returns top-level io.
1112 * \see cl_object_top()
1114 struct cl_io *cl_io_top(struct cl_io *io)
1117 while (io->ci_parent != NULL)
1121 EXPORT_SYMBOL(cl_io_top);
1124 * Prints human readable representation of \a io to the \a f.
1126 void cl_io_print(const struct lu_env *env, void *cookie,
1127 lu_printer_t printer, const struct cl_io *io)
1132 * Fills in attributes that are passed to server together with transfer. Only
1133 * attributes from \a flags may be touched. This can be called multiple times
1134 * for the same request.
1136 void cl_req_attr_set(const struct lu_env *env, struct cl_object *obj,
1137 struct cl_req_attr *attr)
1139 struct cl_object *scan;
1142 cl_object_for_each(scan, obj) {
1143 if (scan->co_ops->coo_req_attr_set != NULL)
1144 scan->co_ops->coo_req_attr_set(env, scan, attr);
1148 EXPORT_SYMBOL(cl_req_attr_set);
1151 * Initialize synchronous io wait \a anchor for \a nr pages with optional
1153 * \param anchor owned by caller, initialzied here.
1154 * \param nr number of pages initally pending in sync.
1155 * \param end optional callback sync_io completion, can be used to
1156 * trigger erasure coding, integrity, dedupe, or similar operation.
1157 * \q end is called with a spinlock on anchor->csi_waitq.lock
1160 void cl_sync_io_init_notify(struct cl_sync_io *anchor, int nr,
1161 struct cl_dio_aio *aio, cl_sync_io_end_t *end)
1164 memset(anchor, 0, sizeof(*anchor));
1165 init_waitqueue_head(&anchor->csi_waitq);
1166 atomic_set(&anchor->csi_sync_nr, nr);
1167 anchor->csi_sync_rc = 0;
1168 anchor->csi_end_io = end;
1169 anchor->csi_aio = aio;
1172 EXPORT_SYMBOL(cl_sync_io_init_notify);
1175 * Wait until all IO completes. Transfer completion routine has to call
1176 * cl_sync_io_note() for every entity.
1178 int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor,
1184 LASSERT(timeout >= 0);
1187 wait_event_idle_timeout(anchor->csi_waitq,
1188 atomic_read(&anchor->csi_sync_nr) == 0,
1189 cfs_time_seconds(timeout)) == 0) {
1191 CERROR("IO failed: %d, still wait for %d remaining entries\n",
1192 rc, atomic_read(&anchor->csi_sync_nr));
1195 wait_event_idle(anchor->csi_waitq,
1196 atomic_read(&anchor->csi_sync_nr) == 0);
1198 rc = anchor->csi_sync_rc;
1200 /* We take the lock to ensure that cl_sync_io_note() has finished */
1201 spin_lock(&anchor->csi_waitq.lock);
1202 LASSERT(atomic_read(&anchor->csi_sync_nr) == 0);
1203 spin_unlock(&anchor->csi_waitq.lock);
1207 EXPORT_SYMBOL(cl_sync_io_wait);
1209 #ifndef HAVE_AIO_COMPLETE
1210 static inline void aio_complete(struct kiocb *iocb, ssize_t res, ssize_t res2)
1212 if (iocb->ki_complete)
1213 iocb->ki_complete(iocb, res, res2);
1217 static void cl_aio_end(const struct lu_env *env, struct cl_sync_io *anchor)
1219 struct cl_dio_aio *aio = container_of(anchor, typeof(*aio), cda_sync);
1220 ssize_t ret = anchor->csi_sync_rc;
1225 while (aio->cda_pages.pl_nr > 0) {
1226 struct cl_page *page = cl_page_list_first(&aio->cda_pages);
1229 cl_page_list_del(env, &aio->cda_pages, page);
1230 cl_page_delete(env, page);
1231 cl_page_put(env, page);
1234 if (!is_sync_kiocb(aio->cda_iocb) && !aio->cda_no_aio_complete)
1235 aio_complete(aio->cda_iocb, ret ?: aio->cda_bytes, 0);
1240 struct cl_dio_aio *cl_aio_alloc(struct kiocb *iocb, struct cl_object *obj)
1242 struct cl_dio_aio *aio;
1244 OBD_SLAB_ALLOC_PTR_GFP(aio, cl_dio_aio_kmem, GFP_NOFS);
1247 * Hold one ref so that it won't be released until
1248 * every pages is added.
1250 cl_sync_io_init_notify(&aio->cda_sync, 1, is_sync_kiocb(iocb) ?
1251 NULL : aio, cl_aio_end);
1252 cl_page_list_init(&aio->cda_pages);
1253 aio->cda_iocb = iocb;
1254 aio->cda_no_aio_complete = 0;
1260 EXPORT_SYMBOL(cl_aio_alloc);
1262 void cl_aio_free(const struct lu_env *env, struct cl_dio_aio *aio)
1265 cl_object_put(env, aio->cda_obj);
1266 OBD_SLAB_FREE_PTR(aio, cl_dio_aio_kmem);
1269 EXPORT_SYMBOL(cl_aio_free);
1273 * Indicate that transfer of a single page completed.
1275 void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor,
1279 if (anchor->csi_sync_rc == 0 && ioret < 0)
1280 anchor->csi_sync_rc = ioret;
1282 * Synchronous IO done without releasing page lock (e.g., as a part of
1283 * ->{prepare,commit}_write(). Completion is used to signal the end of
1286 LASSERT(atomic_read(&anchor->csi_sync_nr) > 0);
1287 if (atomic_dec_and_lock(&anchor->csi_sync_nr,
1288 &anchor->csi_waitq.lock)) {
1289 struct cl_dio_aio *aio = NULL;
1291 cl_sync_io_end_t *end_io = anchor->csi_end_io;
1294 * Holding the lock across both the decrement and
1295 * the wakeup ensures cl_sync_io_wait() doesn't complete
1296 * before the wakeup completes and the contents of
1297 * of anchor become unsafe to access as the owner is free
1298 * to immediately reclaim anchor when cl_sync_io_wait()
1301 wake_up_locked(&anchor->csi_waitq);
1303 end_io(env, anchor);
1304 if (anchor->csi_aio)
1305 aio = anchor->csi_aio;
1307 spin_unlock(&anchor->csi_waitq.lock);
1310 * If anchor->csi_aio is set, we are responsible for freeing
1311 * memory here rather than when cl_sync_io_wait() completes.
1313 cl_aio_free(env, aio);
1317 EXPORT_SYMBOL(cl_sync_io_note);
1320 int cl_sync_io_wait_recycle(const struct lu_env *env, struct cl_sync_io *anchor,
1321 long timeout, int ioret)
1326 * @anchor was inited as 1 to prevent end_io to be
1327 * called before we add all pages for IO, so drop
1328 * one extra reference to make sure we could wait
1331 cl_sync_io_note(env, anchor, ioret);
1332 /* Wait for completion of normal dio.
1333 * This replaces the EIOCBQEUED return from the DIO/AIO
1334 * path, and this is where AIO and DIO implementations
1337 rc = cl_sync_io_wait(env, anchor, timeout);
1339 * One extra reference again, as if @anchor is
1340 * reused we assume it as 1 before using.
1342 atomic_add(1, &anchor->csi_sync_nr);
1346 EXPORT_SYMBOL(cl_sync_io_wait_recycle);