4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * Implementation of cl_io for OSC layer.
33 * Author: Nikita Danilov <nikita.danilov@sun.com>
34 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
37 #define DEBUG_SUBSYSTEM S_OSC
39 #include <lustre_obdo.h>
40 #include <lustre_osc.h>
41 #include <linux/pagevec.h>
42 #include <linux/falloc.h>
44 #include "osc_internal.h"
45 #include <lnet/lnet_rdma.h>
51 /*****************************************************************************
57 static void osc_io_fini(const struct lu_env *env, const struct cl_io_slice *io)
61 void osc_read_ahead_release(const struct lu_env *env, struct cl_read_ahead *ra)
63 struct ldlm_lock *dlmlock = ra->cra_dlmlock;
64 struct osc_io *oio = ra->cra_oio;
65 struct lustre_handle lockh;
67 oio->oi_is_readahead = 0;
68 ldlm_lock2handle(dlmlock, &lockh);
69 ldlm_lock_decref(&lockh, LCK_PR);
70 LDLM_LOCK_PUT(dlmlock);
72 EXPORT_SYMBOL(osc_read_ahead_release);
74 static int osc_io_read_ahead(const struct lu_env *env,
75 const struct cl_io_slice *ios,
76 pgoff_t start, struct cl_read_ahead *ra)
78 struct osc_object *osc = cl2osc(ios->cis_obj);
79 struct osc_io *oio = cl2osc_io(env, ios);
80 struct ldlm_lock *dlmlock;
81 int result = -ENODATA;
85 oio->oi_is_readahead = true;
86 dlmlock = osc_dlmlock_at_pgoff(env, osc, start, 0);
87 if (dlmlock != NULL) {
88 struct lov_oinfo *oinfo = osc->oo_oinfo;
90 LASSERT(dlmlock->l_ast_data == osc);
91 if (dlmlock->l_req_mode != LCK_PR) {
92 struct lustre_handle lockh;
93 ldlm_lock2handle(dlmlock, &lockh);
94 ldlm_lock_addref(&lockh, LCK_PR);
95 ldlm_lock_decref(&lockh, dlmlock->l_req_mode);
98 ra->cra_rpc_pages = osc_cli(osc)->cl_max_pages_per_rpc;
100 dlmlock->l_policy_data.l_extent.end >> PAGE_SHIFT;
101 ra->cra_release = osc_read_ahead_release;
102 ra->cra_dlmlock = dlmlock;
104 if (ra->cra_end_idx != CL_PAGE_EOF)
105 ra->cra_contention = true;
106 ra->cra_end_idx = min_t(pgoff_t,
108 (oinfo->loi_kms - 1) >> PAGE_SHIFT);
116 * An implementation of cl_io_operations::cio_io_submit() method for osc
117 * layer. Iterates over pages in the in-queue, prepares each for io by calling
118 * cl_page_prep() and then either submits them through osc_io_submit_page()
119 * or, if page is already submitted, changes osc flags through
120 * osc_set_async_flags().
122 int osc_io_submit(const struct lu_env *env, struct cl_io *io,
123 const struct cl_io_slice *ios, enum cl_req_type crt,
124 struct cl_2queue *queue)
126 struct cl_page *page;
128 struct cl_io *top_io = cl_io_top(io);
129 struct client_obd *cli = NULL;
130 struct osc_object *osc = NULL; /* to keep gcc happy */
131 struct osc_page *opg;
134 struct cl_page_list *qin = &queue->c2_qin;
135 struct cl_page_list *qout = &queue->c2_qout;
136 unsigned int queued = 0;
139 unsigned int max_pages;
140 unsigned int ppc_bits; /* pages per chunk bits */
142 bool sync_queue = false;
144 LASSERT(qin->pl_nr > 0);
146 CDEBUG(D_CACHE|D_READA, "%d %d\n", qin->pl_nr, crt);
148 osc = cl2osc(ios->cis_obj);
150 max_pages = cli->cl_max_pages_per_rpc;
151 ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
154 brw_flags = osc_io_srvlock(cl2osc_io(env, ios)) ? OBD_BRW_SRVLOCK : 0;
155 brw_flags |= crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ;
156 if (crt == CRT_READ && ios->cis_io->ci_ndelay)
157 brw_flags |= OBD_BRW_NDELAY;
159 page = cl_page_list_first(qin);
160 if (page->cp_type == CPT_TRANSIENT)
161 brw_flags |= OBD_BRW_NOCACHE;
162 if (lnet_is_rdma_only_page(page->cp_vmpage))
163 brw_flags |= OBD_BRW_RDMA_ONLY;
166 * NOTE: here @page is a top-level page. This is done to avoid
167 * creation of sub-page-list.
169 cl_page_list_for_each_safe(page, tmp, qin) {
170 struct osc_async_page *oap;
172 LASSERT(top_io != NULL);
174 opg = osc_cl_page_osc(page, osc);
177 if (!list_empty(&oap->oap_pending_item) ||
178 !list_empty(&oap->oap_rpc_item)) {
179 CDEBUG(D_CACHE, "Busy oap %p page %p for submit.\n",
185 result = cl_page_prep(env, top_io, page, crt);
188 if (result != -EALREADY)
191 * Handle -EALREADY error: for read case, the page is
192 * already in UPTODATE state; for write, the page
199 if (page->cp_type != CPT_TRANSIENT) {
200 oap->oap_async_flags = ASYNC_URGENT|ASYNC_READY|ASYNC_COUNT_STABLE;
203 osc_page_submit(env, opg, crt, brw_flags);
204 list_add_tail(&oap->oap_pending_item, &list);
206 if (page->cp_sync_io != NULL)
207 cl_page_list_move(qout, qin, page);
209 cl_page_list_del(env, qin, page, true);
212 if (queued == max_pages) {
214 } else if (crt == CRT_WRITE) {
216 unsigned int next_chunks;
218 chunks = (queued + ppc - 1) >> ppc_bits;
219 /* chunk number if add another page */
220 next_chunks = (queued + ppc) >> ppc_bits;
222 /* next page will excceed write chunk limit */
223 if (chunks == osc_max_write_chunks(cli) &&
224 next_chunks > chunks)
229 result = osc_queue_sync_pages(env, top_io, osc, &list,
239 result = osc_queue_sync_pages(env, top_io, osc, &list,
242 /* Update c/mtime for sync write. LU-7310 */
243 if (crt == CRT_WRITE && qout->pl_nr > 0 && result == 0) {
244 struct cl_object *obj = ios->cis_obj;
245 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
247 cl_object_attr_lock(obj);
248 attr->cat_mtime = attr->cat_ctime = ktime_get_real_seconds();
249 cl_object_attr_update(env, obj, attr, CAT_MTIME | CAT_CTIME);
250 cl_object_attr_unlock(obj);
253 CDEBUG(D_INFO, "%d/%d %d\n", qin->pl_nr, qout->pl_nr, result);
254 return qout->pl_nr > 0 ? 0 : result;
256 EXPORT_SYMBOL(osc_io_submit);
259 * This is called to update the attributes when modifying a specific page,
260 * both when making new pages and when doing updates to existing cached pages.
262 * Expand stripe KMS if necessary.
264 void osc_page_touch_at(const struct lu_env *env, struct cl_object *obj,
265 pgoff_t idx, size_t to)
267 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
268 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
274 /* offset within stripe */
275 kms = (idx << PAGE_SHIFT) + to;
277 cl_object_attr_lock(obj);
278 CDEBUG(D_INODE, "stripe KMS %sincreasing %llu->%llu %llu\n",
279 kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms,
280 loi->loi_lvb.lvb_size);
282 attr->cat_mtime = attr->cat_ctime = ktime_get_real_seconds();
283 valid = CAT_MTIME | CAT_CTIME;
284 if (kms > loi->loi_kms) {
288 if (kms > loi->loi_lvb.lvb_size) {
289 attr->cat_size = kms;
292 cl_object_attr_update(env, obj, attr, valid);
293 cl_object_attr_unlock(obj);
298 int osc_io_commit_async(const struct lu_env *env,
299 const struct cl_io_slice *ios,
300 struct cl_page_list *qin, int from, int to,
303 struct cl_io *io = ios->cis_io;
304 struct osc_io *oio = cl2osc_io(env, ios);
305 struct osc_object *osc = cl2osc(ios->cis_obj);
306 struct cl_page *page;
307 struct cl_page *last_page;
308 struct osc_page *opg;
309 struct folio_batch *fbatch = &osc_env_info(env)->oti_fbatch;
313 LASSERT(qin->pl_nr > 0);
315 /* Handle partial page cases */
316 last_page = cl_page_list_last(qin);
317 if (oio->oi_lockless) {
318 page = cl_page_list_first(qin);
319 if (page == last_page) {
320 cl_page_clip(env, page, from, to);
323 cl_page_clip(env, page, from, PAGE_SIZE);
325 cl_page_clip(env, last_page, 0, to);
329 ll_folio_batch_init(fbatch, 0);
331 while (qin->pl_nr > 0) {
332 struct osc_async_page *oap;
334 page = cl_page_list_first(qin);
335 opg = osc_cl_page_osc(page, osc);
338 if (!list_empty(&oap->oap_rpc_item)) {
339 CDEBUG(D_CACHE, "Busy oap %p page %p for submit.\n",
345 /* The page may be already in dirty cache. */
346 if (list_empty(&oap->oap_pending_item)) {
347 result = osc_page_cache_add(env, osc, opg, io, cb);
352 osc_page_touch_at(env, osc2cl(osc), osc_index(opg),
353 page == last_page ? to : PAGE_SIZE);
355 cl_page_list_del(env, qin, page, true);
357 /* if there are no more slots, do the callback & reinit */
358 if (!folio_batch_add_page(fbatch, page->cp_vmpage)) {
359 (*cb)(env, io, fbatch);
360 folio_batch_reinit(fbatch);
363 /* The shrink interval is in seconds, so we can update it once per
364 * write, rather than once per page.
366 osc_update_next_shrink(osc_cli(osc));
369 /* Clean up any partially full folio_batches */
370 if (folio_batch_count(fbatch) != 0)
371 (*cb)(env, io, fbatch);
373 /* Can't access these pages any more. Page can be in transfer and
374 * complete at any time. */
376 /* for sync write, kernel will wait for this page to be flushed before
377 * osc_io_end() is called, so release it earlier.
378 * for mkwrite(), it's known there is no further pages. */
379 if (cl_io_is_sync_write(io) && oio->oi_active != NULL) {
380 osc_extent_release(env, oio->oi_active);
381 oio->oi_active = NULL;
384 CDEBUG(D_INFO, "%d %d\n", qin->pl_nr, result);
387 EXPORT_SYMBOL(osc_io_commit_async);
389 void osc_io_extent_release(const struct lu_env *env,
390 const struct cl_io_slice *ios)
392 struct osc_io *oio = cl2osc_io(env, ios);
394 if (oio->oi_active != NULL) {
395 osc_extent_release(env, oio->oi_active);
396 oio->oi_active = NULL;
399 EXPORT_SYMBOL(osc_io_extent_release);
401 static bool osc_import_not_healthy(struct obd_import *imp)
403 return imp->imp_invalid || imp->imp_deactive ||
404 !(imp->imp_state == LUSTRE_IMP_FULL ||
405 imp->imp_state == LUSTRE_IMP_IDLE);
408 int osc_io_iter_init(const struct lu_env *env, const struct cl_io_slice *ios)
410 struct osc_object *osc = cl2osc(ios->cis_obj);
411 struct obd_import *imp = osc_cli(osc)->cl_import;
412 struct osc_io *oio = osc_env_io(env);
417 spin_lock(&imp->imp_lock);
419 * check whether this OSC device is available for non-delay read,
420 * fast switching mirror if we haven't tried all mirrors.
422 if (ios->cis_io->ci_type == CIT_READ && ios->cis_io->ci_ndelay &&
423 !ios->cis_io->ci_tried_all_mirrors && osc_import_not_healthy(imp)) {
425 } else if (likely(!imp->imp_invalid)) {
426 atomic_inc(&osc->oo_nr_ios);
427 oio->oi_is_active = 1;
430 spin_unlock(&imp->imp_lock);
432 if (capable(CAP_SYS_RESOURCE))
433 oio->oi_cap_sys_resource = 1;
437 EXPORT_SYMBOL(osc_io_iter_init);
439 void osc_io_iter_fini(const struct lu_env *env,
440 const struct cl_io_slice *ios)
442 struct osc_io *oio = osc_env_io(env);
444 if (oio->oi_is_active) {
445 struct osc_object *osc = cl2osc(ios->cis_obj);
447 oio->oi_is_active = 0;
448 LASSERT(atomic_read(&osc->oo_nr_ios) > 0);
449 if (atomic_dec_and_test(&osc->oo_nr_ios))
450 wake_up(&osc->oo_io_waitq);
453 EXPORT_SYMBOL(osc_io_iter_fini);
455 void osc_io_rw_iter_fini(const struct lu_env *env,
456 const struct cl_io_slice *ios)
458 struct osc_io *oio = osc_env_io(env);
459 struct osc_object *osc = cl2osc(ios->cis_obj);
461 if (oio->oi_lru_reserved > 0) {
462 osc_lru_unreserve(osc_cli(osc), oio->oi_lru_reserved);
463 oio->oi_lru_reserved = 0;
465 oio->oi_write_osclock = NULL;
466 oio->oi_read_osclock = NULL;
468 osc_io_iter_fini(env, ios);
470 EXPORT_SYMBOL(osc_io_rw_iter_fini);
472 int osc_io_fault_start(const struct lu_env *env, const struct cl_io_slice *ios)
475 struct cl_fault_io *fio;
479 fio = &io->u.ci_fault;
480 CDEBUG(D_INFO, "%lu %d %zu\n",
481 fio->ft_index, fio->ft_writable, fio->ft_bytes);
483 * If mapping is writeable, adjust kms to cover this page,
484 * but do not extend kms beyond actual file size.
487 if (fio->ft_writable)
488 osc_page_touch_at(env, ios->cis_obj,
489 fio->ft_index, fio->ft_bytes);
492 EXPORT_SYMBOL(osc_io_fault_start);
495 static int osc_async_upcall(void *a, int rc)
497 struct osc_async_cbargs *args = a;
500 complete(&args->opc_sync);
505 * Checks that there are no pages being written in the extent being truncated.
507 static bool trunc_check_cb(const struct lu_env *env, struct cl_io *io,
508 void **pvec, int count, void *cbdata)
512 for (i = 0; i < count; i++) {
513 struct osc_page *ops = pvec[i];
514 struct cl_page *page = ops->ops_cl.cpl_page;
515 struct osc_async_page *oap;
516 __u64 start = *(__u64 *)cbdata;
519 if (oap->oap_cmd & OBD_BRW_WRITE &&
520 !list_empty(&oap->oap_pending_item))
521 CL_PAGE_DEBUG(D_ERROR, env, page, "exists %llu/%s.\n",
522 start, current->comm);
524 if (PageLocked(page->cp_vmpage))
525 CDEBUG(D_CACHE, "page %p index %lu locked for cmd=%d\n",
526 ops, osc_index(ops), oap->oap_cmd);
531 static void osc_trunc_check(const struct lu_env *env, struct cl_io *io,
532 struct osc_io *oio, __u64 size)
534 struct cl_object *clob;
538 clob = oio->oi_cl.cis_obj;
539 start = size >> PAGE_SHIFT;
540 partial = (start << PAGE_SHIFT) < size;
543 * Complain if there are pages in the truncated region.
545 osc_page_gang_lookup(env, io, cl2osc(clob),
546 start + partial, CL_PAGE_EOF,
547 trunc_check_cb, (void *)&size);
551 * Flush affected pages prior punch.
552 * We shouldn't discard them locally first because that could be data loss
553 * if server doesn't support fallocate punch, we also need these data to be
554 * flushed first to prevent re-ordering with the punch
556 int osc_punch_start(const struct lu_env *env, struct cl_io *io,
557 struct cl_object *obj)
559 struct osc_object *osc = cl2osc(obj);
560 pgoff_t pg_start = io->u.ci_setattr.sa_falloc_offset >> PAGE_SHIFT;
561 pgoff_t pg_end = (io->u.ci_setattr.sa_falloc_end - 1) >> PAGE_SHIFT;
565 rc = osc_cache_writeback_range(env, osc, pg_start, pg_end, 1, 0);
569 osc_page_gang_lookup(env, io, osc, pg_start, pg_end, osc_discard_cb,
573 EXPORT_SYMBOL(osc_punch_start);
575 static int osc_io_setattr_start(const struct lu_env *env,
576 const struct cl_io_slice *slice)
578 struct cl_io *io = slice->cis_io;
579 struct osc_io *oio = cl2osc_io(env, slice);
580 struct cl_object *obj = slice->cis_obj;
581 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
582 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
583 struct obdo *oa = &oio->oi_oa;
584 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
585 unsigned int ia_avalid = io->u.ci_setattr.sa_avalid;
586 enum op_xvalid ia_xvalid = io->u.ci_setattr.sa_xvalid;
588 __u64 size = io->u.ci_setattr.sa_attr.lvb_size;
589 bool io_is_falloc = cl_io_is_fallocate(io);
592 /* truncate cache dirty pages first */
593 if (cl_io_is_trunc(io))
594 result = osc_cache_truncate_start(env, cl2osc(obj), size,
596 /* flush local pages prior punching them on server */
598 io->u.ci_setattr.sa_falloc_mode & FALLOC_FL_PUNCH_HOLE)
599 result = osc_punch_start(env, io, obj);
601 if (result == 0 && oio->oi_lockless == 0) {
602 cl_object_attr_lock(obj);
603 result = cl_object_attr_get(env, obj, attr);
605 struct ost_lvb *lvb = &io->u.ci_setattr.sa_attr;
606 unsigned int cl_valid = 0;
608 if (ia_avalid & ATTR_SIZE) {
609 attr->cat_size = size;
610 attr->cat_kms = size;
611 cl_valid = (CAT_SIZE | CAT_KMS);
613 if (ia_avalid & ATTR_MTIME_SET) {
614 attr->cat_mtime = lvb->lvb_mtime;
615 cl_valid |= CAT_MTIME;
617 if (ia_avalid & ATTR_ATIME_SET) {
618 attr->cat_atime = lvb->lvb_atime;
619 cl_valid |= CAT_ATIME;
621 if (ia_xvalid & OP_XVALID_CTIME_SET) {
622 attr->cat_ctime = lvb->lvb_ctime;
623 cl_valid |= CAT_CTIME;
625 result = cl_object_attr_update(env, obj, attr,
628 cl_object_attr_unlock(obj);
630 memset(oa, 0, sizeof(*oa));
632 oa->o_oi = loi->loi_oi;
633 obdo_set_parent_fid(oa, io->u.ci_setattr.sa_parent_fid);
634 oa->o_stripe_idx = io->u.ci_setattr.sa_stripe_index;
635 oa->o_layout = io->u.ci_setattr.sa_layout;
636 oa->o_valid |= OBD_MD_FLID | OBD_MD_FLGROUP |
638 if (ia_avalid & ATTR_CTIME) {
639 oa->o_valid |= OBD_MD_FLCTIME;
640 oa->o_ctime = attr->cat_ctime;
642 if (ia_avalid & ATTR_ATIME) {
643 oa->o_valid |= OBD_MD_FLATIME;
644 oa->o_atime = attr->cat_atime;
646 if (ia_avalid & ATTR_MTIME) {
647 oa->o_valid |= OBD_MD_FLMTIME;
648 oa->o_mtime = attr->cat_mtime;
651 if (ia_avalid & ATTR_SIZE || io_is_falloc) {
652 if (oio->oi_lockless) {
653 oa->o_flags = OBD_FL_SRVLOCK;
654 oa->o_valid |= OBD_MD_FLFLAGS;
657 if (io->ci_layout_version > 0) {
658 /* verify layout version */
659 oa->o_valid |= OBD_MD_LAYOUT_VERSION;
660 oa->o_layout_version = io->ci_layout_version;
663 LASSERT(oio->oi_lockless == 0);
666 if (ia_xvalid & OP_XVALID_FLAGS) {
667 oa->o_flags = io->u.ci_setattr.sa_attr_flags;
668 oa->o_valid |= OBD_MD_FLFLAGS;
671 init_completion(&cbargs->opc_sync);
674 int falloc_mode = io->u.ci_setattr.sa_falloc_mode;
676 oa->o_size = io->u.ci_setattr.sa_falloc_offset;
677 oa->o_blocks = io->u.ci_setattr.sa_falloc_end;
678 oa->o_uid = io->u.ci_setattr.sa_falloc_uid;
679 oa->o_gid = io->u.ci_setattr.sa_falloc_gid;
680 oa->o_projid = io->u.ci_setattr.sa_falloc_projid;
681 oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
682 OBD_MD_FLUID | OBD_MD_FLGID | OBD_MD_FLPROJID;
685 "size %llu blocks %llu uid %u gid %u prjid %u\n",
686 oa->o_size, oa->o_blocks, oa->o_uid, oa->o_gid,
688 result = osc_fallocate_base(osc_export(cl2osc(obj)),
689 oa, osc_async_upcall,
690 cbargs, falloc_mode);
691 } else if (ia_avalid & ATTR_SIZE) {
693 oa->o_blocks = OBD_OBJECT_EOF;
694 oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
695 result = osc_punch_send(osc_export(cl2osc(obj)),
696 oa, osc_async_upcall, cbargs);
698 result = osc_setattr_async(osc_export(cl2osc(obj)),
699 oa, osc_async_upcall,
700 cbargs, PTLRPCD_SET);
702 cbargs->opc_rpc_sent = result == 0;
708 void osc_io_setattr_end(const struct lu_env *env,
709 const struct cl_io_slice *slice)
711 struct cl_io *io = slice->cis_io;
712 struct osc_io *oio = cl2osc_io(env, slice);
713 struct cl_object *obj = slice->cis_obj;
714 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
715 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
716 struct obdo *oa = &oio->oi_oa;
717 unsigned int cl_valid = 0;
720 if (cbargs->opc_rpc_sent) {
721 wait_for_completion(&cbargs->opc_sync);
722 result = io->ci_result = cbargs->opc_rc;
725 if (cl_io_is_trunc(io)) {
726 __u64 size = io->u.ci_setattr.sa_attr.lvb_size;
729 cl_object_attr_lock(obj);
730 if (oa->o_valid & OBD_MD_FLBLOCKS) {
731 attr->cat_blocks = oa->o_blocks;
732 cl_valid |= CAT_BLOCKS;
735 cl_object_attr_update(env, obj, attr, cl_valid);
736 cl_object_attr_unlock(obj);
738 osc_trunc_check(env, io, oio, size);
739 osc_cache_truncate_end(env, oio->oi_trunc);
740 oio->oi_trunc = NULL;
743 if (cl_io_is_fallocate(io)) {
745 cl_object_attr_lock(obj);
747 if (oa->o_valid & OBD_MD_FLBLOCKS) {
748 attr->cat_blocks = oa->o_blocks;
749 cl_valid |= CAT_BLOCKS;
752 cl_object_attr_update(env, obj, attr, cl_valid);
753 cl_object_attr_unlock(obj);
757 EXPORT_SYMBOL(osc_io_setattr_end);
759 struct osc_data_version_args {
760 struct osc_io *dva_oio;
764 osc_data_version_interpret(const struct lu_env *env, struct ptlrpc_request *req,
767 struct osc_data_version_args *dva = args;
768 struct osc_io *oio = dva->dva_oio;
769 const struct ost_body *body;
775 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
777 GOTO(out, rc = -EPROTO);
779 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, &oio->oi_oa,
783 oio->oi_cbarg.opc_rc = rc;
784 complete(&oio->oi_cbarg.opc_sync);
789 static int osc_io_data_version_start(const struct lu_env *env,
790 const struct cl_io_slice *slice)
792 struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version;
793 struct osc_io *oio = cl2osc_io(env, slice);
794 struct obdo *oa = &oio->oi_oa;
795 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
796 struct osc_object *obj = cl2osc(slice->cis_obj);
797 struct lov_oinfo *loi = obj->oo_oinfo;
798 struct obd_export *exp = osc_export(obj);
799 struct ptlrpc_request *req;
800 struct ost_body *body;
801 struct osc_data_version_args *dva;
805 memset(oa, 0, sizeof(*oa));
806 oa->o_oi = loi->loi_oi;
807 oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
809 if (dv->dv_flags & (LL_DV_RD_FLUSH | LL_DV_WR_FLUSH)) {
810 oa->o_valid |= OBD_MD_FLFLAGS;
811 oa->o_flags |= OBD_FL_SRVLOCK;
812 if (dv->dv_flags & LL_DV_WR_FLUSH)
813 oa->o_flags |= OBD_FL_FLUSH;
816 init_completion(&cbargs->opc_sync);
818 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
822 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
824 ptlrpc_request_free(req);
828 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
829 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
831 ptlrpc_request_set_replen(req);
832 req->rq_interpret_reply = osc_data_version_interpret;
833 dva = ptlrpc_req_async_args(dva, req);
836 ptlrpcd_add_req(req);
841 static void osc_io_data_version_end(const struct lu_env *env,
842 const struct cl_io_slice *slice)
844 struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version;
845 struct osc_io *oio = cl2osc_io(env, slice);
846 struct cl_object *obj = slice->cis_obj;
847 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
848 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
849 struct obdo *oa = &oio->oi_oa;
850 unsigned int cl_valid = 0;
853 wait_for_completion(&cbargs->opc_sync);
855 if (cbargs->opc_rc != 0) {
856 slice->cis_io->ci_result = cbargs->opc_rc;
858 slice->cis_io->ci_result = 0;
860 (OBD_MD_LAYOUT_VERSION | OBD_MD_FLDATAVERSION)))
861 slice->cis_io->ci_result = -EOPNOTSUPP;
863 if (oa->o_valid & OBD_MD_LAYOUT_VERSION)
864 dv->dv_layout_version = oa->o_layout_version;
865 if (oa->o_valid & OBD_MD_FLDATAVERSION)
866 dv->dv_data_version = oa->o_data_version;
868 if (dv->dv_flags & LL_DV_SZ_UPDATE) {
869 if (oa->o_valid & OBD_MD_FLSIZE) {
870 attr->cat_size = oa->o_size;
871 cl_valid |= CAT_SIZE;
874 if (oa->o_valid & OBD_MD_FLBLOCKS) {
875 attr->cat_blocks = oa->o_blocks;
876 cl_valid |= CAT_BLOCKS;
879 cl_object_attr_lock(obj);
880 cl_object_attr_update(env, obj, attr, cl_valid);
881 cl_object_attr_unlock(obj);
888 int osc_io_read_start(const struct lu_env *env,
889 const struct cl_io_slice *slice)
891 struct cl_object *obj = slice->cis_obj;
892 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
896 if (!slice->cis_io->ci_noatime) {
897 cl_object_attr_lock(obj);
898 attr->cat_atime = ktime_get_real_seconds();
899 rc = cl_object_attr_update(env, obj, attr, CAT_ATIME);
900 cl_object_attr_unlock(obj);
905 EXPORT_SYMBOL(osc_io_read_start);
907 int osc_io_write_start(const struct lu_env *env,
908 const struct cl_io_slice *slice)
910 struct cl_object *obj = slice->cis_obj;
911 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
915 CFS_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_SETTIME, 1);
916 cl_object_attr_lock(obj);
917 attr->cat_mtime = attr->cat_ctime = ktime_get_real_seconds();
918 rc = cl_object_attr_update(env, obj, attr, CAT_MTIME | CAT_CTIME);
919 cl_object_attr_unlock(obj);
923 EXPORT_SYMBOL(osc_io_write_start);
925 int osc_fsync_ost(const struct lu_env *env, struct osc_object *obj,
926 struct cl_fsync_io *fio)
928 struct osc_io *oio = osc_env_io(env);
929 struct obdo *oa = &oio->oi_oa;
930 struct lov_oinfo *loi = obj->oo_oinfo;
931 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
935 memset(oa, 0, sizeof(*oa));
936 oa->o_oi = loi->loi_oi;
937 oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
939 /* reload size abd blocks for start and end of sync range */
940 oa->o_size = fio->fi_start;
941 oa->o_blocks = fio->fi_end;
942 oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
944 obdo_set_parent_fid(oa, fio->fi_fid);
946 init_completion(&cbargs->opc_sync);
948 rc = osc_sync_base(obj, oa, osc_async_upcall, cbargs, PTLRPCD_SET);
951 EXPORT_SYMBOL(osc_fsync_ost);
953 static int osc_io_fsync_start(const struct lu_env *env,
954 const struct cl_io_slice *slice)
956 struct cl_io *io = slice->cis_io;
957 struct cl_fsync_io *fio = &io->u.ci_fsync;
958 struct cl_object *obj = slice->cis_obj;
959 struct osc_object *osc = cl2osc(obj);
960 pgoff_t start = fio->fi_start >> PAGE_SHIFT;
961 pgoff_t end = fio->fi_end >> PAGE_SHIFT;
966 if (fio->fi_mode == CL_FSYNC_RECLAIM) {
967 struct client_obd *cli = osc_cli(osc);
969 if (!atomic_long_read(&cli->cl_unstable_count)) {
970 /* Stop flush when there are no unstable pages? */
971 CDEBUG(D_CACHE, "unstable count is zero\n");
976 if (fio->fi_end == OBD_OBJECT_EOF)
979 result = osc_cache_writeback_range(env, osc, start, end, 0,
980 fio->fi_mode == CL_FSYNC_DISCARD);
981 if (result < 0 && fio->fi_mode == CL_FSYNC_DISCARD) {
983 "%s: ignore error %d on discarding "DFID":[%lu-%lu]\n",
984 cli_name(osc_cli(osc)), result, PFID(fio->fi_fid),
989 fio->fi_nr_written += result;
992 if (fio->fi_mode == CL_FSYNC_ALL || fio->fi_mode == CL_FSYNC_RECLAIM) {
993 struct osc_io *oio = cl2osc_io(env, slice);
994 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
997 /* we have to wait for writeback to finish before we can
998 * send OST_SYNC RPC. This is bad because it causes extents
999 * to be written osc by osc. However, we usually start
1000 * writeback before CL_FSYNC_ALL so this won't have any real
1002 * We do not have to wait for waitback to finish in the memory
1003 * reclaim environment.
1005 if (fio->fi_mode == CL_FSYNC_ALL) {
1006 rc = osc_cache_wait_range(env, osc, start, end);
1011 rc = osc_fsync_ost(env, osc, fio);
1013 cbargs->opc_rpc_sent = 1;
1021 void osc_io_fsync_end(const struct lu_env *env,
1022 const struct cl_io_slice *slice)
1024 struct cl_fsync_io *fio = &slice->cis_io->u.ci_fsync;
1025 struct cl_object *obj = slice->cis_obj;
1026 struct osc_io *oio = cl2osc_io(env, slice);
1027 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
1028 pgoff_t start = fio->fi_start >> PAGE_SHIFT;
1029 pgoff_t end = fio->fi_end >> PAGE_SHIFT;
1032 if (fio->fi_mode == CL_FSYNC_LOCAL) {
1033 result = osc_cache_wait_range(env, cl2osc(obj), start, end);
1034 } else if (cbargs->opc_rpc_sent && (fio->fi_mode == CL_FSYNC_ALL ||
1035 fio->fi_mode == CL_FSYNC_RECLAIM)) {
1037 wait_for_completion(&cbargs->opc_sync);
1039 result = cbargs->opc_rc;
1041 slice->cis_io->ci_result = result;
1043 EXPORT_SYMBOL(osc_io_fsync_end);
1045 static int osc_io_ladvise_start(const struct lu_env *env,
1046 const struct cl_io_slice *slice)
1049 struct cl_io *io = slice->cis_io;
1050 struct osc_io *oio = cl2osc_io(env, slice);
1051 struct cl_object *obj = slice->cis_obj;
1052 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
1053 struct cl_ladvise_io *lio = &io->u.ci_ladvise;
1054 struct obdo *oa = &oio->oi_oa;
1055 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
1056 struct lu_ladvise *ladvise;
1057 struct ladvise_hdr *ladvise_hdr;
1062 /* TODO: add multiple ladvise support in CLIO */
1063 buf_size = offsetof(typeof(*ladvise_hdr), lah_advise[num_advise]);
1064 if (osc_env_info(env)->oti_ladvise_buf.lb_len < buf_size)
1065 lu_buf_realloc(&osc_env_info(env)->oti_ladvise_buf, buf_size);
1067 ladvise_hdr = osc_env_info(env)->oti_ladvise_buf.lb_buf;
1068 if (ladvise_hdr == NULL)
1071 memset(ladvise_hdr, 0, buf_size);
1072 ladvise_hdr->lah_magic = LADVISE_MAGIC;
1073 ladvise_hdr->lah_count = num_advise;
1074 ladvise_hdr->lah_flags = lio->lio_flags;
1076 memset(oa, 0, sizeof(*oa));
1077 oa->o_oi = loi->loi_oi;
1078 oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
1079 obdo_set_parent_fid(oa, lio->lio_fid);
1081 ladvise = ladvise_hdr->lah_advise;
1082 ladvise->lla_start = lio->lio_start;
1083 ladvise->lla_end = lio->lio_end;
1084 ladvise->lla_advice = lio->lio_advice;
1086 if (lio->lio_flags & LF_ASYNC) {
1087 result = osc_ladvise_base(osc_export(cl2osc(obj)), oa,
1088 ladvise_hdr, NULL, NULL, NULL);
1090 init_completion(&cbargs->opc_sync);
1091 result = osc_ladvise_base(osc_export(cl2osc(obj)), oa,
1092 ladvise_hdr, osc_async_upcall,
1093 cbargs, PTLRPCD_SET);
1094 cbargs->opc_rpc_sent = result == 0;
1099 static void osc_io_ladvise_end(const struct lu_env *env,
1100 const struct cl_io_slice *slice)
1102 struct cl_io *io = slice->cis_io;
1103 struct osc_io *oio = cl2osc_io(env, slice);
1104 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
1106 struct cl_ladvise_io *lio = &io->u.ci_ladvise;
1108 if ((!(lio->lio_flags & LF_ASYNC)) && cbargs->opc_rpc_sent) {
1109 wait_for_completion(&cbargs->opc_sync);
1110 result = cbargs->opc_rc;
1112 slice->cis_io->ci_result = result;
1115 void osc_io_end(const struct lu_env *env, const struct cl_io_slice *slice)
1117 struct osc_io *oio = cl2osc_io(env, slice);
1119 if (oio->oi_active) {
1120 osc_extent_release(env, oio->oi_active);
1121 oio->oi_active = NULL;
1124 EXPORT_SYMBOL(osc_io_end);
1126 struct osc_lseek_args {
1127 struct osc_io *lsa_oio;
1130 static int osc_lseek_interpret(const struct lu_env *env,
1131 struct ptlrpc_request *req,
1134 struct ost_body *reply;
1135 struct osc_lseek_args *lsa = arg;
1136 struct osc_io *oio = lsa->lsa_oio;
1137 struct cl_io *io = oio->oi_cl.cis_io;
1138 struct cl_lseek_io *lsio = &io->u.ci_lseek;
1145 reply = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
1147 GOTO(out, rc = -EPROTO);
1149 lsio->ls_result = reply->oa.o_size;
1151 osc_async_upcall(&oio->oi_cbarg, rc);
1155 int osc_io_lseek_start(const struct lu_env *env,
1156 const struct cl_io_slice *slice)
1158 struct cl_io *io = slice->cis_io;
1159 struct osc_io *oio = cl2osc_io(env, slice);
1160 struct cl_object *obj = slice->cis_obj;
1161 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
1162 struct cl_lseek_io *lsio = &io->u.ci_lseek;
1163 struct obdo *oa = &oio->oi_oa;
1164 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
1165 struct obd_export *exp = osc_export(cl2osc(obj));
1166 struct ptlrpc_request *req;
1167 struct ost_body *body;
1168 struct osc_lseek_args *lsa;
1173 /* No negative values at this point */
1174 LASSERT(lsio->ls_start >= 0);
1175 LASSERT(lsio->ls_whence == SEEK_HOLE || lsio->ls_whence == SEEK_DATA);
1177 /* with IO lock taken we have object size in LVB and can check
1178 * boundaries prior sending LSEEK RPC
1180 if (lsio->ls_start >= loi->loi_lvb.lvb_size) {
1181 /* consider area beyond end of object as hole */
1182 if (lsio->ls_whence == SEEK_HOLE)
1183 lsio->ls_result = lsio->ls_start;
1185 lsio->ls_result = -ENXIO;
1189 /* if LSEEK RPC is not supported by server, consider whole stripe
1190 * object is data with hole after end of object
1192 if (!exp_connect_lseek(exp)) {
1193 if (lsio->ls_whence == SEEK_HOLE)
1194 lsio->ls_result = loi->loi_lvb.lvb_size;
1196 lsio->ls_result = lsio->ls_start;
1200 memset(oa, 0, sizeof(*oa));
1201 oa->o_oi = loi->loi_oi;
1202 oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
1203 oa->o_size = lsio->ls_start;
1204 oa->o_mode = lsio->ls_whence;
1205 if (oio->oi_lockless) {
1206 oa->o_flags = OBD_FL_SRVLOCK;
1207 oa->o_valid |= OBD_MD_FLFLAGS;
1210 init_completion(&cbargs->opc_sync);
1211 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SEEK);
1215 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SEEK);
1217 ptlrpc_request_free(req);
1221 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
1222 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
1223 ptlrpc_request_set_replen(req);
1224 req->rq_interpret_reply = osc_lseek_interpret;
1225 lsa = ptlrpc_req_async_args(lsa, req);
1228 ptlrpcd_add_req(req);
1229 cbargs->opc_rpc_sent = 1;
1233 EXPORT_SYMBOL(osc_io_lseek_start);
1235 void osc_io_lseek_end(const struct lu_env *env,
1236 const struct cl_io_slice *slice)
1238 struct osc_io *oio = cl2osc_io(env, slice);
1239 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
1242 if (cbargs->opc_rpc_sent) {
1243 wait_for_completion(&cbargs->opc_sync);
1244 rc = cbargs->opc_rc;
1246 slice->cis_io->ci_result = rc;
1248 EXPORT_SYMBOL(osc_io_lseek_end);
1250 int osc_io_lru_reserve(const struct lu_env *env,
1251 const struct cl_io_slice *ios,
1252 loff_t pos, size_t bytes)
1254 struct osc_object *osc = cl2osc(ios->cis_obj);
1255 struct osc_io *oio = osc_env_io(env);
1256 unsigned long npages = 0;
1261 page_offset = pos & ~PAGE_MASK;
1264 if (bytes > PAGE_SIZE - page_offset)
1265 bytes -= (PAGE_SIZE - page_offset);
1269 npages += (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
1270 oio->oi_lru_reserved = osc_lru_reserve(osc_cli(osc), npages);
1274 EXPORT_SYMBOL(osc_io_lru_reserve);
1276 static const struct cl_io_operations osc_io_ops = {
1279 .cio_iter_init = osc_io_iter_init,
1280 .cio_iter_fini = osc_io_rw_iter_fini,
1281 .cio_start = osc_io_read_start,
1282 .cio_fini = osc_io_fini
1285 .cio_iter_init = osc_io_iter_init,
1286 .cio_iter_fini = osc_io_rw_iter_fini,
1287 .cio_start = osc_io_write_start,
1288 .cio_end = osc_io_end,
1289 .cio_fini = osc_io_fini
1292 .cio_iter_init = osc_io_iter_init,
1293 .cio_iter_fini = osc_io_iter_fini,
1294 .cio_start = osc_io_setattr_start,
1295 .cio_end = osc_io_setattr_end
1297 [CIT_DATA_VERSION] = {
1298 .cio_start = osc_io_data_version_start,
1299 .cio_end = osc_io_data_version_end,
1302 .cio_iter_init = osc_io_iter_init,
1303 .cio_iter_fini = osc_io_iter_fini,
1304 .cio_start = osc_io_fault_start,
1305 .cio_end = osc_io_end,
1306 .cio_fini = osc_io_fini
1309 .cio_start = osc_io_fsync_start,
1310 .cio_end = osc_io_fsync_end,
1311 .cio_fini = osc_io_fini
1314 .cio_start = osc_io_ladvise_start,
1315 .cio_end = osc_io_ladvise_end,
1316 .cio_fini = osc_io_fini
1319 .cio_start = osc_io_lseek_start,
1320 .cio_end = osc_io_lseek_end,
1321 .cio_fini = osc_io_fini
1324 .cio_fini = osc_io_fini
1327 .cio_read_ahead = osc_io_read_ahead,
1328 .cio_lru_reserve = osc_io_lru_reserve,
1329 .cio_submit = osc_io_submit,
1330 .cio_commit_async = osc_io_commit_async,
1331 .cio_extent_release = osc_io_extent_release
1334 /*****************************************************************************
1336 * Transfer operations.
1340 int osc_io_init(const struct lu_env *env,
1341 struct cl_object *obj, struct cl_io *io)
1343 struct obd_export *exp = osc_export(cl2osc(obj));
1344 struct osc_io *oio = osc_env_io(env);
1346 CL_IO_SLICE_CLEAN(oio, oi_cl);
1347 cl_io_slice_add(io, &oio->oi_cl, obj, &osc_io_ops);
1349 if (!exp_connect_unaligned_dio(exp))
1350 cl_io_top(io)->ci_allow_unaligned_dio = false;