4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * Implementation of cl_io for OSC layer.
34 * Author: Nikita Danilov <nikita.danilov@sun.com>
35 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
38 #define DEBUG_SUBSYSTEM S_OSC
40 #include <lustre_obdo.h>
41 #include <lustre_osc.h>
42 #include <linux/pagevec.h>
43 #include <linux/falloc.h>
45 #include "osc_internal.h"
51 /*****************************************************************************
57 static void osc_io_fini(const struct lu_env *env, const struct cl_io_slice *io)
61 void osc_read_ahead_release(const struct lu_env *env, void *cbdata)
63 struct ldlm_lock *dlmlock = cbdata;
64 struct lustre_handle lockh;
66 ldlm_lock2handle(dlmlock, &lockh);
67 ldlm_lock_decref(&lockh, LCK_PR);
68 LDLM_LOCK_PUT(dlmlock);
70 EXPORT_SYMBOL(osc_read_ahead_release);
72 static int osc_io_read_ahead(const struct lu_env *env,
73 const struct cl_io_slice *ios,
74 pgoff_t start, struct cl_read_ahead *ra)
76 struct osc_object *osc = cl2osc(ios->cis_obj);
77 struct ldlm_lock *dlmlock;
78 int result = -ENODATA;
81 dlmlock = osc_dlmlock_at_pgoff(env, osc, start, 0);
82 if (dlmlock != NULL) {
83 LASSERT(dlmlock->l_ast_data == osc);
84 if (dlmlock->l_req_mode != LCK_PR) {
85 struct lustre_handle lockh;
86 ldlm_lock2handle(dlmlock, &lockh);
87 ldlm_lock_addref(&lockh, LCK_PR);
88 ldlm_lock_decref(&lockh, dlmlock->l_req_mode);
91 ra->cra_rpc_pages = osc_cli(osc)->cl_max_pages_per_rpc;
92 ra->cra_end_idx = cl_index(osc2cl(osc),
93 dlmlock->l_policy_data.l_extent.end);
94 ra->cra_release = osc_read_ahead_release;
95 ra->cra_cbdata = dlmlock;
96 if (ra->cra_end_idx != CL_PAGE_EOF)
97 ra->cra_contention = true;
105 * An implementation of cl_io_operations::cio_io_submit() method for osc
106 * layer. Iterates over pages in the in-queue, prepares each for io by calling
107 * cl_page_prep() and then either submits them through osc_io_submit_page()
108 * or, if page is already submitted, changes osc flags through
109 * osc_set_async_flags().
111 int osc_io_submit(const struct lu_env *env, const struct cl_io_slice *ios,
112 enum cl_req_type crt, struct cl_2queue *queue)
114 struct cl_page *page;
116 struct client_obd *cli = NULL;
117 struct osc_object *osc = NULL; /* to keep gcc happy */
118 struct osc_page *opg;
122 struct cl_page_list *qin = &queue->c2_qin;
123 struct cl_page_list *qout = &queue->c2_qout;
124 unsigned int queued = 0;
127 unsigned int max_pages;
128 unsigned int ppc_bits; /* pages per chunk bits */
130 bool sync_queue = false;
132 LASSERT(qin->pl_nr > 0);
134 CDEBUG(D_CACHE|D_READA, "%d %d\n", qin->pl_nr, crt);
136 osc = cl2osc(ios->cis_obj);
138 max_pages = cli->cl_max_pages_per_rpc;
139 ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
142 brw_flags = osc_io_srvlock(cl2osc_io(env, ios)) ? OBD_BRW_SRVLOCK : 0;
143 brw_flags |= crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ;
144 if (crt == CRT_READ && ios->cis_io->ci_ndelay)
145 brw_flags |= OBD_BRW_NDELAY;
147 page = cl_page_list_first(qin);
148 if (page->cp_type == CPT_TRANSIENT)
149 brw_flags |= OBD_BRW_NOCACHE;
152 * NOTE: here @page is a top-level page. This is done to avoid
153 * creation of sub-page-list.
155 cl_page_list_for_each_safe(page, tmp, qin) {
156 struct osc_async_page *oap;
162 opg = osc_cl_page_osc(page, osc);
164 LASSERT(osc == oap->oap_obj);
166 if (!list_empty(&oap->oap_pending_item) ||
167 !list_empty(&oap->oap_rpc_item)) {
168 CDEBUG(D_CACHE, "Busy oap %p page %p for submit.\n",
174 result = cl_page_prep(env, io, page, crt);
177 if (result != -EALREADY)
180 * Handle -EALREADY error: for read case, the page is
181 * already in UPTODATE state; for write, the page
188 spin_lock(&oap->oap_lock);
189 oap->oap_async_flags = ASYNC_URGENT|ASYNC_READY;
190 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
191 spin_unlock(&oap->oap_lock);
193 osc_page_submit(env, opg, crt, brw_flags);
194 list_add_tail(&oap->oap_pending_item, &list);
196 if (page->cp_sync_io != NULL)
197 cl_page_list_move(qout, qin, page);
199 cl_page_list_del(env, qin, page);
202 if (queued == max_pages) {
204 } else if (crt == CRT_WRITE) {
206 unsigned int next_chunks;
208 chunks = (queued + ppc - 1) >> ppc_bits;
209 /* chunk number if add another page */
210 next_chunks = (queued + ppc) >> ppc_bits;
212 /* next page will excceed write chunk limit */
213 if (chunks == osc_max_write_chunks(cli) &&
214 next_chunks > chunks)
219 result = osc_queue_sync_pages(env, io, osc, &list,
229 result = osc_queue_sync_pages(env, io, osc, &list, brw_flags);
231 /* Update c/mtime for sync write. LU-7310 */
232 if (crt == CRT_WRITE && qout->pl_nr > 0 && result == 0) {
233 struct cl_object *obj = ios->cis_obj;
234 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
236 cl_object_attr_lock(obj);
237 attr->cat_mtime = attr->cat_ctime = ktime_get_real_seconds();
238 cl_object_attr_update(env, obj, attr, CAT_MTIME | CAT_CTIME);
239 cl_object_attr_unlock(obj);
242 CDEBUG(D_INFO, "%d/%d %d\n", qin->pl_nr, qout->pl_nr, result);
243 return qout->pl_nr > 0 ? 0 : result;
245 EXPORT_SYMBOL(osc_io_submit);
248 * This is called to update the attributes when modifying a specific page,
249 * both when making new pages and when doing updates to existing cached pages.
251 * Expand stripe KMS if necessary.
253 void osc_page_touch_at(const struct lu_env *env, struct cl_object *obj,
254 pgoff_t idx, size_t to)
256 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
257 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
263 /* offset within stripe */
264 kms = cl_offset(obj, idx) + to;
266 cl_object_attr_lock(obj);
267 CDEBUG(D_INODE, "stripe KMS %sincreasing %llu->%llu %llu\n",
268 kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms,
269 loi->loi_lvb.lvb_size);
271 attr->cat_mtime = attr->cat_ctime = ktime_get_real_seconds();
272 valid = CAT_MTIME | CAT_CTIME;
273 if (kms > loi->loi_kms) {
277 if (kms > loi->loi_lvb.lvb_size) {
278 attr->cat_size = kms;
281 cl_object_attr_update(env, obj, attr, valid);
282 cl_object_attr_unlock(obj);
287 int osc_io_commit_async(const struct lu_env *env,
288 const struct cl_io_slice *ios,
289 struct cl_page_list *qin, int from, int to,
292 struct cl_io *io = ios->cis_io;
293 struct osc_io *oio = cl2osc_io(env, ios);
294 struct osc_object *osc = cl2osc(ios->cis_obj);
295 struct cl_page *page;
296 struct cl_page *last_page;
297 struct osc_page *opg;
298 struct pagevec *pvec = &osc_env_info(env)->oti_pagevec;
302 LASSERT(qin->pl_nr > 0);
304 /* Handle partial page cases */
305 last_page = cl_page_list_last(qin);
306 if (oio->oi_lockless) {
307 page = cl_page_list_first(qin);
308 if (page == last_page) {
309 cl_page_clip(env, page, from, to);
312 cl_page_clip(env, page, from, PAGE_SIZE);
314 cl_page_clip(env, last_page, 0, to);
318 ll_pagevec_init(pvec, 0);
320 while (qin->pl_nr > 0) {
321 struct osc_async_page *oap;
323 page = cl_page_list_first(qin);
324 opg = osc_cl_page_osc(page, osc);
327 LASSERTF(osc == oap->oap_obj,
328 "obj mismatch: %p / %p\n", osc, oap->oap_obj);
330 if (!list_empty(&oap->oap_rpc_item)) {
331 CDEBUG(D_CACHE, "Busy oap %p page %p for submit.\n",
337 /* The page may be already in dirty cache. */
338 if (list_empty(&oap->oap_pending_item)) {
339 result = osc_page_cache_add(env, opg, io, cb);
344 osc_page_touch_at(env, osc2cl(osc), osc_index(opg),
345 page == last_page ? to : PAGE_SIZE);
347 cl_page_list_del(env, qin, page);
349 /* if there are no more slots, do the callback & reinit */
350 if (pagevec_add(pvec, page->cp_vmpage) == 0) {
351 (*cb)(env, io, pvec);
352 pagevec_reinit(pvec);
356 /* Clean up any partially full pagevecs */
357 if (pagevec_count(pvec) != 0)
358 (*cb)(env, io, pvec);
360 /* Can't access these pages any more. Page can be in transfer and
361 * complete at any time. */
363 /* for sync write, kernel will wait for this page to be flushed before
364 * osc_io_end() is called, so release it earlier.
365 * for mkwrite(), it's known there is no further pages. */
366 if (cl_io_is_sync_write(io) && oio->oi_active != NULL) {
367 osc_extent_release(env, oio->oi_active);
368 oio->oi_active = NULL;
371 CDEBUG(D_INFO, "%d %d\n", qin->pl_nr, result);
374 EXPORT_SYMBOL(osc_io_commit_async);
376 void osc_io_extent_release(const struct lu_env *env,
377 const struct cl_io_slice *ios)
379 struct osc_io *oio = cl2osc_io(env, ios);
381 if (oio->oi_active != NULL) {
382 osc_extent_release(env, oio->oi_active);
383 oio->oi_active = NULL;
386 EXPORT_SYMBOL(osc_io_extent_release);
388 static bool osc_import_not_healthy(struct obd_import *imp)
390 return imp->imp_invalid || imp->imp_deactive ||
391 !(imp->imp_state == LUSTRE_IMP_FULL ||
392 imp->imp_state == LUSTRE_IMP_IDLE);
395 int osc_io_iter_init(const struct lu_env *env, const struct cl_io_slice *ios)
397 struct osc_object *osc = cl2osc(ios->cis_obj);
398 struct obd_import *imp = osc_cli(osc)->cl_import;
399 struct osc_io *oio = osc_env_io(env);
403 spin_lock(&imp->imp_lock);
405 * check whether this OSC device is available for non-delay read,
406 * fast switching mirror if we haven't tried all mirrors.
408 if (ios->cis_io->ci_type == CIT_READ && ios->cis_io->ci_ndelay &&
409 !ios->cis_io->ci_tried_all_mirrors && osc_import_not_healthy(imp)) {
411 } else if (likely(!imp->imp_invalid)) {
412 atomic_inc(&osc->oo_nr_ios);
413 oio->oi_is_active = 1;
416 spin_unlock(&imp->imp_lock);
418 if (cfs_capable(CAP_SYS_RESOURCE))
419 oio->oi_cap_sys_resource = 1;
423 EXPORT_SYMBOL(osc_io_iter_init);
425 int osc_io_rw_iter_init(const struct lu_env *env,
426 const struct cl_io_slice *ios)
428 struct cl_io *io = ios->cis_io;
429 struct osc_io *oio = osc_env_io(env);
430 struct osc_object *osc = cl2osc(ios->cis_obj);
431 unsigned long npages;
434 if (cl_io_is_append(io))
435 RETURN(osc_io_iter_init(env, ios));
437 npages = io->u.ci_rw.crw_count >> PAGE_SHIFT;
438 if (io->u.ci_rw.crw_pos & ~PAGE_MASK)
441 oio->oi_lru_reserved = osc_lru_reserve(osc_cli(osc), npages);
443 RETURN(osc_io_iter_init(env, ios));
445 EXPORT_SYMBOL(osc_io_rw_iter_init);
447 void osc_io_iter_fini(const struct lu_env *env,
448 const struct cl_io_slice *ios)
450 struct osc_io *oio = osc_env_io(env);
452 if (oio->oi_is_active) {
453 struct osc_object *osc = cl2osc(ios->cis_obj);
455 oio->oi_is_active = 0;
456 LASSERT(atomic_read(&osc->oo_nr_ios) > 0);
457 if (atomic_dec_and_test(&osc->oo_nr_ios))
458 wake_up_all(&osc->oo_io_waitq);
461 EXPORT_SYMBOL(osc_io_iter_fini);
463 void osc_io_rw_iter_fini(const struct lu_env *env,
464 const struct cl_io_slice *ios)
466 struct osc_io *oio = osc_env_io(env);
467 struct osc_object *osc = cl2osc(ios->cis_obj);
469 if (oio->oi_lru_reserved > 0) {
470 osc_lru_unreserve(osc_cli(osc), oio->oi_lru_reserved);
471 oio->oi_lru_reserved = 0;
473 oio->oi_write_osclock = NULL;
475 osc_io_iter_fini(env, ios);
477 EXPORT_SYMBOL(osc_io_rw_iter_fini);
479 int osc_io_fault_start(const struct lu_env *env, const struct cl_io_slice *ios)
482 struct cl_fault_io *fio;
486 fio = &io->u.ci_fault;
487 CDEBUG(D_INFO, "%lu %d %zu\n",
488 fio->ft_index, fio->ft_writable, fio->ft_nob);
490 * If mapping is writeable, adjust kms to cover this page,
491 * but do not extend kms beyond actual file size.
494 if (fio->ft_writable)
495 osc_page_touch_at(env, ios->cis_obj,
496 fio->ft_index, fio->ft_nob);
499 EXPORT_SYMBOL(osc_io_fault_start);
502 static int osc_async_upcall(void *a, int rc)
504 struct osc_async_cbargs *args = a;
507 complete(&args->opc_sync);
512 * Checks that there are no pages being written in the extent being truncated.
514 static bool trunc_check_cb(const struct lu_env *env, struct cl_io *io,
515 struct osc_page *ops , void *cbdata)
517 struct cl_page *page = ops->ops_cl.cpl_page;
518 struct osc_async_page *oap;
519 __u64 start = *(__u64 *)cbdata;
522 if (oap->oap_cmd & OBD_BRW_WRITE &&
523 !list_empty(&oap->oap_pending_item))
524 CL_PAGE_DEBUG(D_ERROR, env, page, "exists %llu/%s.\n",
525 start, current->comm);
527 if (PageLocked(page->cp_vmpage))
528 CDEBUG(D_CACHE, "page %p index %lu locked for %d.\n",
529 ops, osc_index(ops), oap->oap_cmd & OBD_BRW_RWMASK);
534 static void osc_trunc_check(const struct lu_env *env, struct cl_io *io,
535 struct osc_io *oio, __u64 size)
537 struct cl_object *clob;
541 clob = oio->oi_cl.cis_obj;
542 start = cl_index(clob, size);
543 partial = cl_offset(clob, start) < size;
546 * Complain if there are pages in the truncated region.
548 osc_page_gang_lookup(env, io, cl2osc(clob),
549 start + partial, CL_PAGE_EOF,
550 trunc_check_cb, (void *)&size);
553 static int osc_io_setattr_start(const struct lu_env *env,
554 const struct cl_io_slice *slice)
556 struct cl_io *io = slice->cis_io;
557 struct osc_io *oio = cl2osc_io(env, slice);
558 struct cl_object *obj = slice->cis_obj;
559 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
560 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
561 struct obdo *oa = &oio->oi_oa;
562 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
563 unsigned int ia_avalid = io->u.ci_setattr.sa_avalid;
564 enum op_xvalid ia_xvalid = io->u.ci_setattr.sa_xvalid;
566 __u64 size = io->u.ci_setattr.sa_attr.lvb_size;
567 __u64 end = OBD_OBJECT_EOF;
568 bool io_is_falloc = false;
571 /* truncate cache dirty pages first */
572 if (cl_io_is_trunc(io)) {
573 result = osc_cache_truncate_start(env, cl2osc(obj), size,
575 } else if (cl_io_is_fallocate(io)) {
577 size = io->u.ci_setattr.sa_falloc_offset;
578 end = io->u.ci_setattr.sa_falloc_end;
581 if (result == 0 && oio->oi_lockless == 0) {
582 cl_object_attr_lock(obj);
583 result = cl_object_attr_get(env, obj, attr);
585 struct ost_lvb *lvb = &io->u.ci_setattr.sa_attr;
586 unsigned int cl_valid = 0;
588 if (ia_avalid & ATTR_SIZE) {
591 io->u.ci_setattr.sa_attr.lvb_size;
592 attr->cat_kms = attr->cat_size;
594 attr->cat_size = size;
595 attr->cat_kms = size;
597 cl_valid = (CAT_SIZE | CAT_KMS);
599 if (ia_avalid & ATTR_MTIME_SET) {
600 attr->cat_mtime = lvb->lvb_mtime;
601 cl_valid |= CAT_MTIME;
603 if (ia_avalid & ATTR_ATIME_SET) {
604 attr->cat_atime = lvb->lvb_atime;
605 cl_valid |= CAT_ATIME;
607 if (ia_xvalid & OP_XVALID_CTIME_SET) {
608 attr->cat_ctime = lvb->lvb_ctime;
609 cl_valid |= CAT_CTIME;
611 result = cl_object_attr_update(env, obj, attr,
614 cl_object_attr_unlock(obj);
616 memset(oa, 0, sizeof(*oa));
618 oa->o_oi = loi->loi_oi;
619 obdo_set_parent_fid(oa, io->u.ci_setattr.sa_parent_fid);
620 oa->o_stripe_idx = io->u.ci_setattr.sa_stripe_index;
621 oa->o_layout = io->u.ci_setattr.sa_layout;
622 oa->o_valid |= OBD_MD_FLID | OBD_MD_FLGROUP |
624 if (ia_avalid & ATTR_CTIME) {
625 oa->o_valid |= OBD_MD_FLCTIME;
626 oa->o_ctime = attr->cat_ctime;
628 if (ia_avalid & ATTR_ATIME) {
629 oa->o_valid |= OBD_MD_FLATIME;
630 oa->o_atime = attr->cat_atime;
632 if (ia_avalid & ATTR_MTIME) {
633 oa->o_valid |= OBD_MD_FLMTIME;
634 oa->o_mtime = attr->cat_mtime;
636 if (ia_avalid & ATTR_SIZE) {
640 oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
643 oa->o_blocks = OBD_OBJECT_EOF;
644 oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
647 if (oio->oi_lockless) {
648 oa->o_flags = OBD_FL_SRVLOCK;
649 oa->o_valid |= OBD_MD_FLFLAGS;
652 if (io->ci_layout_version > 0) {
653 /* verify layout version */
654 oa->o_valid |= OBD_MD_LAYOUT_VERSION;
655 oa->o_layout_version = io->ci_layout_version;
658 LASSERT(oio->oi_lockless == 0);
661 if (ia_xvalid & OP_XVALID_FLAGS) {
662 oa->o_flags = io->u.ci_setattr.sa_attr_flags;
663 oa->o_valid |= OBD_MD_FLFLAGS;
666 init_completion(&cbargs->opc_sync);
669 int falloc_mode = io->u.ci_setattr.sa_falloc_mode;
671 result = osc_fallocate_base(osc_export(cl2osc(obj)),
672 oa, osc_async_upcall,
673 cbargs, falloc_mode);
674 } else if (ia_avalid & ATTR_SIZE) {
675 result = osc_punch_send(osc_export(cl2osc(obj)),
676 oa, osc_async_upcall, cbargs);
678 result = osc_setattr_async(osc_export(cl2osc(obj)),
679 oa, osc_async_upcall,
680 cbargs, PTLRPCD_SET);
682 cbargs->opc_rpc_sent = result == 0;
688 void osc_io_setattr_end(const struct lu_env *env,
689 const struct cl_io_slice *slice)
691 struct cl_io *io = slice->cis_io;
692 struct osc_io *oio = cl2osc_io(env, slice);
693 struct cl_object *obj = slice->cis_obj;
694 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
695 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
696 struct obdo *oa = &oio->oi_oa;
697 unsigned int cl_valid = 0;
700 if (cbargs->opc_rpc_sent) {
701 wait_for_completion(&cbargs->opc_sync);
702 result = io->ci_result = cbargs->opc_rc;
706 if (oio->oi_lockless) {
707 /* lockless truncate */
708 struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
710 LASSERT(cl_io_is_trunc(io));
711 LASSERT(cl_io_is_trunc(io) || cl_io_is_fallocate(io));
712 /* XXX: Need a lock. */
713 osd->od_stats.os_lockless_truncates++;
717 if (cl_io_is_trunc(io)) {
718 __u64 size = io->u.ci_setattr.sa_attr.lvb_size;
721 cl_object_attr_lock(obj);
722 if (oa->o_valid & OBD_MD_FLBLOCKS) {
723 attr->cat_blocks = oa->o_blocks;
724 cl_valid |= CAT_BLOCKS;
727 cl_object_attr_update(env, obj, attr, cl_valid);
728 cl_object_attr_unlock(obj);
730 osc_trunc_check(env, io, oio, size);
731 osc_cache_truncate_end(env, oio->oi_trunc);
732 oio->oi_trunc = NULL;
735 if (cl_io_is_fallocate(io)) {
737 cl_object_attr_lock(obj);
739 if (oa->o_valid & OBD_MD_FLBLOCKS) {
740 attr->cat_blocks = oa->o_blocks;
741 cl_valid |= CAT_BLOCKS;
744 cl_object_attr_update(env, obj, attr, cl_valid);
745 cl_object_attr_unlock(obj);
749 EXPORT_SYMBOL(osc_io_setattr_end);
751 struct osc_data_version_args {
752 struct osc_io *dva_oio;
756 osc_data_version_interpret(const struct lu_env *env, struct ptlrpc_request *req,
759 struct osc_data_version_args *dva = args;
760 struct osc_io *oio = dva->dva_oio;
761 const struct ost_body *body;
767 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
769 GOTO(out, rc = -EPROTO);
771 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, &oio->oi_oa,
775 oio->oi_cbarg.opc_rc = rc;
776 complete(&oio->oi_cbarg.opc_sync);
781 static int osc_io_data_version_start(const struct lu_env *env,
782 const struct cl_io_slice *slice)
784 struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version;
785 struct osc_io *oio = cl2osc_io(env, slice);
786 struct obdo *oa = &oio->oi_oa;
787 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
788 struct osc_object *obj = cl2osc(slice->cis_obj);
789 struct lov_oinfo *loi = obj->oo_oinfo;
790 struct obd_export *exp = osc_export(obj);
791 struct ptlrpc_request *req;
792 struct ost_body *body;
793 struct osc_data_version_args *dva;
797 memset(oa, 0, sizeof(*oa));
798 oa->o_oi = loi->loi_oi;
799 oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
801 if (dv->dv_flags & (LL_DV_RD_FLUSH | LL_DV_WR_FLUSH)) {
802 oa->o_valid |= OBD_MD_FLFLAGS;
803 oa->o_flags |= OBD_FL_SRVLOCK;
804 if (dv->dv_flags & LL_DV_WR_FLUSH)
805 oa->o_flags |= OBD_FL_FLUSH;
808 init_completion(&cbargs->opc_sync);
810 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
814 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
816 ptlrpc_request_free(req);
820 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
821 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
823 ptlrpc_request_set_replen(req);
824 req->rq_interpret_reply = osc_data_version_interpret;
825 dva = ptlrpc_req_async_args(dva, req);
828 ptlrpcd_add_req(req);
833 static void osc_io_data_version_end(const struct lu_env *env,
834 const struct cl_io_slice *slice)
836 struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version;
837 struct osc_io *oio = cl2osc_io(env, slice);
838 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
841 wait_for_completion(&cbargs->opc_sync);
843 if (cbargs->opc_rc != 0) {
844 slice->cis_io->ci_result = cbargs->opc_rc;
846 slice->cis_io->ci_result = 0;
847 if (!(oio->oi_oa.o_valid &
848 (OBD_MD_LAYOUT_VERSION | OBD_MD_FLDATAVERSION)))
849 slice->cis_io->ci_result = -ENOTSUPP;
851 if (oio->oi_oa.o_valid & OBD_MD_LAYOUT_VERSION)
852 dv->dv_layout_version = oio->oi_oa.o_layout_version;
853 if (oio->oi_oa.o_valid & OBD_MD_FLDATAVERSION)
854 dv->dv_data_version = oio->oi_oa.o_data_version;
860 int osc_io_read_start(const struct lu_env *env,
861 const struct cl_io_slice *slice)
863 struct cl_object *obj = slice->cis_obj;
864 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
868 if (!slice->cis_io->ci_noatime) {
869 cl_object_attr_lock(obj);
870 attr->cat_atime = ktime_get_real_seconds();
871 rc = cl_object_attr_update(env, obj, attr, CAT_ATIME);
872 cl_object_attr_unlock(obj);
877 EXPORT_SYMBOL(osc_io_read_start);
879 int osc_io_write_start(const struct lu_env *env,
880 const struct cl_io_slice *slice)
882 struct cl_object *obj = slice->cis_obj;
883 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
887 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_SETTIME, 1);
888 cl_object_attr_lock(obj);
889 attr->cat_mtime = attr->cat_ctime = ktime_get_real_seconds();
890 rc = cl_object_attr_update(env, obj, attr, CAT_MTIME | CAT_CTIME);
891 cl_object_attr_unlock(obj);
895 EXPORT_SYMBOL(osc_io_write_start);
897 int osc_fsync_ost(const struct lu_env *env, struct osc_object *obj,
898 struct cl_fsync_io *fio)
900 struct osc_io *oio = osc_env_io(env);
901 struct obdo *oa = &oio->oi_oa;
902 struct lov_oinfo *loi = obj->oo_oinfo;
903 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
907 memset(oa, 0, sizeof(*oa));
908 oa->o_oi = loi->loi_oi;
909 oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
911 /* reload size abd blocks for start and end of sync range */
912 oa->o_size = fio->fi_start;
913 oa->o_blocks = fio->fi_end;
914 oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
916 obdo_set_parent_fid(oa, fio->fi_fid);
918 init_completion(&cbargs->opc_sync);
920 rc = osc_sync_base(obj, oa, osc_async_upcall, cbargs, PTLRPCD_SET);
923 EXPORT_SYMBOL(osc_fsync_ost);
925 int osc_io_fsync_start(const struct lu_env *env,
926 const struct cl_io_slice *slice)
928 struct cl_io *io = slice->cis_io;
929 struct cl_fsync_io *fio = &io->u.ci_fsync;
930 struct cl_object *obj = slice->cis_obj;
931 struct osc_object *osc = cl2osc(obj);
932 pgoff_t start = cl_index(obj, fio->fi_start);
933 pgoff_t end = cl_index(obj, fio->fi_end);
937 if (fio->fi_end == OBD_OBJECT_EOF)
940 result = osc_cache_writeback_range(env, osc, start, end, 0,
941 fio->fi_mode == CL_FSYNC_DISCARD);
943 fio->fi_nr_written += result;
946 if (fio->fi_mode == CL_FSYNC_ALL) {
949 /* we have to wait for writeback to finish before we can
950 * send OST_SYNC RPC. This is bad because it causes extents
951 * to be written osc by osc. However, we usually start
952 * writeback before CL_FSYNC_ALL so this won't have any real
954 rc = osc_cache_wait_range(env, osc, start, end);
957 rc = osc_fsync_ost(env, osc, fio);
965 void osc_io_fsync_end(const struct lu_env *env,
966 const struct cl_io_slice *slice)
968 struct cl_fsync_io *fio = &slice->cis_io->u.ci_fsync;
969 struct cl_object *obj = slice->cis_obj;
970 pgoff_t start = cl_index(obj, fio->fi_start);
971 pgoff_t end = cl_index(obj, fio->fi_end);
974 if (fio->fi_mode == CL_FSYNC_LOCAL) {
975 result = osc_cache_wait_range(env, cl2osc(obj), start, end);
976 } else if (fio->fi_mode == CL_FSYNC_ALL) {
977 struct osc_io *oio = cl2osc_io(env, slice);
978 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
980 wait_for_completion(&cbargs->opc_sync);
982 result = cbargs->opc_rc;
984 slice->cis_io->ci_result = result;
986 EXPORT_SYMBOL(osc_io_fsync_end);
988 static int osc_io_ladvise_start(const struct lu_env *env,
989 const struct cl_io_slice *slice)
992 struct cl_io *io = slice->cis_io;
993 struct osc_io *oio = cl2osc_io(env, slice);
994 struct cl_object *obj = slice->cis_obj;
995 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
996 struct cl_ladvise_io *lio = &io->u.ci_ladvise;
997 struct obdo *oa = &oio->oi_oa;
998 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
999 struct lu_ladvise *ladvise;
1000 struct ladvise_hdr *ladvise_hdr;
1005 /* TODO: add multiple ladvise support in CLIO */
1006 buf_size = offsetof(typeof(*ladvise_hdr), lah_advise[num_advise]);
1007 if (osc_env_info(env)->oti_ladvise_buf.lb_len < buf_size)
1008 lu_buf_realloc(&osc_env_info(env)->oti_ladvise_buf, buf_size);
1010 ladvise_hdr = osc_env_info(env)->oti_ladvise_buf.lb_buf;
1011 if (ladvise_hdr == NULL)
1014 memset(ladvise_hdr, 0, buf_size);
1015 ladvise_hdr->lah_magic = LADVISE_MAGIC;
1016 ladvise_hdr->lah_count = num_advise;
1017 ladvise_hdr->lah_flags = lio->li_flags;
1019 memset(oa, 0, sizeof(*oa));
1020 oa->o_oi = loi->loi_oi;
1021 oa->o_valid = OBD_MD_FLID;
1022 obdo_set_parent_fid(oa, lio->li_fid);
1024 ladvise = ladvise_hdr->lah_advise;
1025 ladvise->lla_start = lio->li_start;
1026 ladvise->lla_end = lio->li_end;
1027 ladvise->lla_advice = lio->li_advice;
1029 if (lio->li_flags & LF_ASYNC) {
1030 result = osc_ladvise_base(osc_export(cl2osc(obj)), oa,
1031 ladvise_hdr, NULL, NULL, NULL);
1033 init_completion(&cbargs->opc_sync);
1034 result = osc_ladvise_base(osc_export(cl2osc(obj)), oa,
1035 ladvise_hdr, osc_async_upcall,
1036 cbargs, PTLRPCD_SET);
1037 cbargs->opc_rpc_sent = result == 0;
1042 static void osc_io_ladvise_end(const struct lu_env *env,
1043 const struct cl_io_slice *slice)
1045 struct cl_io *io = slice->cis_io;
1046 struct osc_io *oio = cl2osc_io(env, slice);
1047 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
1049 struct cl_ladvise_io *lio = &io->u.ci_ladvise;
1051 if ((!(lio->li_flags & LF_ASYNC)) && cbargs->opc_rpc_sent) {
1052 wait_for_completion(&cbargs->opc_sync);
1053 result = cbargs->opc_rc;
1055 slice->cis_io->ci_result = result;
1058 void osc_io_end(const struct lu_env *env, const struct cl_io_slice *slice)
1060 struct osc_io *oio = cl2osc_io(env, slice);
1062 if (oio->oi_active) {
1063 osc_extent_release(env, oio->oi_active);
1064 oio->oi_active = NULL;
1067 EXPORT_SYMBOL(osc_io_end);
1069 struct osc_lseek_args {
1070 struct osc_io *lsa_oio;
1073 static int osc_lseek_interpret(const struct lu_env *env,
1074 struct ptlrpc_request *req,
1077 struct ost_body *reply;
1078 struct osc_lseek_args *lsa = arg;
1079 struct osc_io *oio = lsa->lsa_oio;
1080 struct cl_io *io = oio->oi_cl.cis_io;
1081 struct cl_lseek_io *lsio = &io->u.ci_lseek;
1088 reply = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
1090 GOTO(out, rc = -EPROTO);
1092 lsio->ls_result = reply->oa.o_size;
1094 osc_async_upcall(&oio->oi_cbarg, rc);
1098 int osc_io_lseek_start(const struct lu_env *env,
1099 const struct cl_io_slice *slice)
1101 struct cl_io *io = slice->cis_io;
1102 struct osc_io *oio = cl2osc_io(env, slice);
1103 struct cl_object *obj = slice->cis_obj;
1104 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
1105 struct cl_lseek_io *lsio = &io->u.ci_lseek;
1106 struct obdo *oa = &oio->oi_oa;
1107 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
1108 struct obd_export *exp = osc_export(cl2osc(obj));
1109 struct ptlrpc_request *req;
1110 struct ost_body *body;
1111 struct osc_lseek_args *lsa;
1116 /* No negative values at this point */
1117 LASSERT(lsio->ls_start >= 0);
1118 LASSERT(lsio->ls_whence == SEEK_HOLE || lsio->ls_whence == SEEK_DATA);
1120 /* with IO lock taken we have object size in LVB and can check
1121 * boundaries prior sending LSEEK RPC
1123 if (lsio->ls_start >= loi->loi_lvb.lvb_size) {
1124 /* consider area beyond end of object as hole */
1125 if (lsio->ls_whence == SEEK_HOLE)
1126 lsio->ls_result = lsio->ls_start;
1128 lsio->ls_result = -ENXIO;
1132 /* if LSEEK RPC is not supported by server, consider whole stripe
1133 * object is data with hole after end of object
1135 if (!exp_connect_lseek(exp)) {
1136 if (lsio->ls_whence == SEEK_HOLE)
1137 lsio->ls_result = loi->loi_lvb.lvb_size;
1139 lsio->ls_result = lsio->ls_start;
1143 memset(oa, 0, sizeof(*oa));
1144 oa->o_oi = loi->loi_oi;
1145 oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
1146 oa->o_size = lsio->ls_start;
1147 oa->o_mode = lsio->ls_whence;
1148 if (oio->oi_lockless) {
1149 oa->o_flags = OBD_FL_SRVLOCK;
1150 oa->o_valid |= OBD_MD_FLFLAGS;
1153 init_completion(&cbargs->opc_sync);
1154 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SEEK);
1158 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SEEK);
1160 ptlrpc_request_free(req);
1164 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
1165 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
1166 ptlrpc_request_set_replen(req);
1167 req->rq_interpret_reply = osc_lseek_interpret;
1168 lsa = ptlrpc_req_async_args(lsa, req);
1171 ptlrpcd_add_req(req);
1172 cbargs->opc_rpc_sent = 1;
1176 EXPORT_SYMBOL(osc_io_lseek_start);
1178 void osc_io_lseek_end(const struct lu_env *env,
1179 const struct cl_io_slice *slice)
1181 struct osc_io *oio = cl2osc_io(env, slice);
1182 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
1185 if (cbargs->opc_rpc_sent) {
1186 wait_for_completion(&cbargs->opc_sync);
1187 rc = cbargs->opc_rc;
1189 slice->cis_io->ci_result = rc;
1191 EXPORT_SYMBOL(osc_io_lseek_end);
1193 static const struct cl_io_operations osc_io_ops = {
1196 .cio_iter_init = osc_io_rw_iter_init,
1197 .cio_iter_fini = osc_io_rw_iter_fini,
1198 .cio_start = osc_io_read_start,
1199 .cio_fini = osc_io_fini
1202 .cio_iter_init = osc_io_rw_iter_init,
1203 .cio_iter_fini = osc_io_rw_iter_fini,
1204 .cio_start = osc_io_write_start,
1205 .cio_end = osc_io_end,
1206 .cio_fini = osc_io_fini
1209 .cio_iter_init = osc_io_iter_init,
1210 .cio_iter_fini = osc_io_iter_fini,
1211 .cio_start = osc_io_setattr_start,
1212 .cio_end = osc_io_setattr_end
1214 [CIT_DATA_VERSION] = {
1215 .cio_start = osc_io_data_version_start,
1216 .cio_end = osc_io_data_version_end,
1219 .cio_iter_init = osc_io_iter_init,
1220 .cio_iter_fini = osc_io_iter_fini,
1221 .cio_start = osc_io_fault_start,
1222 .cio_end = osc_io_end,
1223 .cio_fini = osc_io_fini
1226 .cio_start = osc_io_fsync_start,
1227 .cio_end = osc_io_fsync_end,
1228 .cio_fini = osc_io_fini
1231 .cio_start = osc_io_ladvise_start,
1232 .cio_end = osc_io_ladvise_end,
1233 .cio_fini = osc_io_fini
1236 .cio_start = osc_io_lseek_start,
1237 .cio_end = osc_io_lseek_end,
1238 .cio_fini = osc_io_fini
1241 .cio_fini = osc_io_fini
1244 .cio_read_ahead = osc_io_read_ahead,
1245 .cio_submit = osc_io_submit,
1246 .cio_commit_async = osc_io_commit_async,
1247 .cio_extent_release = osc_io_extent_release
1250 /*****************************************************************************
1252 * Transfer operations.
1256 int osc_io_init(const struct lu_env *env,
1257 struct cl_object *obj, struct cl_io *io)
1259 struct osc_io *oio = osc_env_io(env);
1261 CL_IO_SLICE_CLEAN(oio, oi_cl);
1262 cl_io_slice_add(io, &oio->oi_cl, obj, &osc_io_ops);