4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2015, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Implementation of cl_io for OSC layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
42 #define DEBUG_SUBSYSTEM S_OSC
44 #include <lustre_obdo.h>
46 #include "osc_cl_internal.h"
52 /*****************************************************************************
58 static struct osc_io *cl2osc_io(const struct lu_env *env,
59 const struct cl_io_slice *slice)
61 struct osc_io *oio = container_of0(slice, struct osc_io, oi_cl);
62 LINVRNT(oio == osc_env_io(env));
66 /*****************************************************************************
72 static void osc_io_fini(const struct lu_env *env, const struct cl_io_slice *io)
76 static void osc_read_ahead_release(const struct lu_env *env,
79 struct ldlm_lock *dlmlock = cbdata;
80 struct lustre_handle lockh;
82 ldlm_lock2handle(dlmlock, &lockh);
83 ldlm_lock_decref(&lockh, LCK_PR);
84 LDLM_LOCK_PUT(dlmlock);
87 static int osc_io_read_ahead(const struct lu_env *env,
88 const struct cl_io_slice *ios,
89 pgoff_t start, struct cl_read_ahead *ra)
91 struct osc_object *osc = cl2osc(ios->cis_obj);
92 struct ldlm_lock *dlmlock;
93 int result = -ENODATA;
96 dlmlock = osc_dlmlock_at_pgoff(env, osc, start, 0);
97 if (dlmlock != NULL) {
98 LASSERT(dlmlock->l_ast_data == osc);
99 if (dlmlock->l_req_mode != LCK_PR) {
100 struct lustre_handle lockh;
101 ldlm_lock2handle(dlmlock, &lockh);
102 ldlm_lock_addref(&lockh, LCK_PR);
103 ldlm_lock_decref(&lockh, dlmlock->l_req_mode);
106 ra->cra_rpc_size = osc_cli(osc)->cl_max_pages_per_rpc;
107 ra->cra_end = cl_index(osc2cl(osc),
108 dlmlock->l_policy_data.l_extent.end);
109 ra->cra_release = osc_read_ahead_release;
110 ra->cra_cbdata = dlmlock;
118 * An implementation of cl_io_operations::cio_io_submit() method for osc
119 * layer. Iterates over pages in the in-queue, prepares each for io by calling
120 * cl_page_prep() and then either submits them through osc_io_submit_page()
121 * or, if page is already submitted, changes osc flags through
122 * osc_set_async_flags().
124 static int osc_io_submit(const struct lu_env *env,
125 const struct cl_io_slice *ios,
126 enum cl_req_type crt, struct cl_2queue *queue)
128 struct cl_page *page;
130 struct client_obd *cli = NULL;
131 struct osc_object *osc = NULL; /* to keep gcc happy */
132 struct osc_page *opg;
134 struct list_head list = LIST_HEAD_INIT(list);
136 struct cl_page_list *qin = &queue->c2_qin;
137 struct cl_page_list *qout = &queue->c2_qout;
138 unsigned int queued = 0;
142 unsigned int max_pages;
144 LASSERT(qin->pl_nr > 0);
146 CDEBUG(D_CACHE|D_READA, "%d %d\n", qin->pl_nr, crt);
148 osc = cl2osc(ios->cis_obj);
150 max_pages = cli->cl_max_pages_per_rpc;
152 cmd = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ;
153 brw_flags = osc_io_srvlock(cl2osc_io(env, ios)) ? OBD_BRW_SRVLOCK : 0;
156 * NOTE: here @page is a top-level page. This is done to avoid
157 * creation of sub-page-list.
159 cl_page_list_for_each_safe(page, tmp, qin) {
160 struct osc_async_page *oap;
166 opg = osc_cl_page_osc(page, osc);
168 LASSERT(osc == oap->oap_obj);
170 if (!list_empty(&oap->oap_pending_item) ||
171 !list_empty(&oap->oap_rpc_item)) {
172 CDEBUG(D_CACHE, "Busy oap %p page %p for submit.\n",
178 result = cl_page_prep(env, io, page, crt);
181 if (result != -EALREADY)
184 * Handle -EALREADY error: for read case, the page is
185 * already in UPTODATE state; for write, the page
192 spin_lock(&oap->oap_lock);
193 oap->oap_async_flags = ASYNC_URGENT|ASYNC_READY;
194 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
195 spin_unlock(&oap->oap_lock);
197 osc_page_submit(env, opg, crt, brw_flags);
198 list_add_tail(&oap->oap_pending_item, &list);
200 if (page->cp_sync_io != NULL)
201 cl_page_list_move(qout, qin, page);
203 cl_page_list_del(env, qin, page);
205 if (++queued == max_pages) {
207 result = osc_queue_sync_pages(env, osc, &list, cmd,
215 result = osc_queue_sync_pages(env, osc, &list, cmd, brw_flags);
217 /* Update c/mtime for sync write. LU-7310 */
218 if (qout->pl_nr > 0 && result == 0) {
219 struct cl_object *obj = ios->cis_obj;
220 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
222 cl_object_attr_lock(obj);
223 attr->cat_mtime = attr->cat_ctime = LTIME_S(CURRENT_TIME);
224 cl_object_attr_update(env, obj, attr, CAT_MTIME | CAT_CTIME);
225 cl_object_attr_unlock(obj);
228 CDEBUG(D_INFO, "%d/%d %d\n", qin->pl_nr, qout->pl_nr, result);
229 return qout->pl_nr > 0 ? 0 : result;
233 * This is called when a page is accessed within file in a way that creates
234 * new page, if one were missing (i.e., if there were a hole at that place in
235 * the file, or accessed page is beyond the current file size).
237 * Expand stripe KMS if necessary.
239 static void osc_page_touch_at(const struct lu_env *env,
240 struct cl_object *obj, pgoff_t idx, size_t to)
242 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
243 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
247 /* offset within stripe */
248 kms = cl_offset(obj, idx) + to;
250 cl_object_attr_lock(obj);
254 * ll_inode_size_lock(inode, 0); lov_stripe_lock(lsm);
258 CDEBUG(D_INODE, "stripe KMS %sincreasing %llu->%llu %llu\n",
259 kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms,
260 loi->loi_lvb.lvb_size);
262 attr->cat_mtime = attr->cat_ctime = LTIME_S(CURRENT_TIME);
263 valid = CAT_MTIME | CAT_CTIME;
264 if (kms > loi->loi_kms) {
268 if (kms > loi->loi_lvb.lvb_size) {
269 attr->cat_size = kms;
272 cl_object_attr_update(env, obj, attr, valid);
273 cl_object_attr_unlock(obj);
276 static int osc_io_commit_async(const struct lu_env *env,
277 const struct cl_io_slice *ios,
278 struct cl_page_list *qin, int from, int to,
281 struct cl_io *io = ios->cis_io;
282 struct osc_io *oio = cl2osc_io(env, ios);
283 struct osc_object *osc = cl2osc(ios->cis_obj);
284 struct cl_page *page;
285 struct cl_page *last_page;
286 struct osc_page *opg;
290 LASSERT(qin->pl_nr > 0);
292 /* Handle partial page cases */
293 last_page = cl_page_list_last(qin);
294 if (oio->oi_lockless) {
295 page = cl_page_list_first(qin);
296 if (page == last_page) {
297 cl_page_clip(env, page, from, to);
300 cl_page_clip(env, page, from, PAGE_SIZE);
302 cl_page_clip(env, last_page, 0, to);
306 while (qin->pl_nr > 0) {
307 struct osc_async_page *oap;
309 page = cl_page_list_first(qin);
310 opg = osc_cl_page_osc(page, osc);
313 if (!list_empty(&oap->oap_rpc_item)) {
314 CDEBUG(D_CACHE, "Busy oap %p page %p for submit.\n",
320 /* The page may be already in dirty cache. */
321 if (list_empty(&oap->oap_pending_item)) {
322 result = osc_page_cache_add(env, &opg->ops_cl, io);
327 osc_page_touch_at(env, osc2cl(osc), osc_index(opg),
328 page == last_page ? to : PAGE_SIZE);
330 cl_page_list_del(env, qin, page);
332 (*cb)(env, io, page);
333 /* Can't access page any more. Page can be in transfer and
334 * complete at any time. */
337 /* for sync write, kernel will wait for this page to be flushed before
338 * osc_io_end() is called, so release it earlier.
339 * for mkwrite(), it's known there is no further pages. */
340 if (cl_io_is_sync_write(io) && oio->oi_active != NULL) {
341 osc_extent_release(env, oio->oi_active);
342 oio->oi_active = NULL;
345 CDEBUG(D_INFO, "%d %d\n", qin->pl_nr, result);
349 static int osc_io_iter_init(const struct lu_env *env,
350 const struct cl_io_slice *ios)
352 struct osc_object *osc = cl2osc(ios->cis_obj);
353 struct obd_import *imp = osc_cli(osc)->cl_import;
356 spin_lock(&imp->imp_lock);
357 if (likely(!imp->imp_invalid)) {
358 struct osc_io *oio = osc_env_io(env);
360 atomic_inc(&osc->oo_nr_ios);
361 oio->oi_is_active = 1;
364 spin_unlock(&imp->imp_lock);
369 static int osc_io_write_iter_init(const struct lu_env *env,
370 const struct cl_io_slice *ios)
372 struct cl_io *io = ios->cis_io;
373 struct osc_io *oio = osc_env_io(env);
374 struct osc_object *osc = cl2osc(ios->cis_obj);
375 unsigned long npages;
378 if (cl_io_is_append(io))
379 RETURN(osc_io_iter_init(env, ios));
381 npages = io->u.ci_rw.crw_count >> PAGE_SHIFT;
382 if (io->u.ci_rw.crw_pos & ~PAGE_MASK)
385 oio->oi_lru_reserved = osc_lru_reserve(osc_cli(osc), npages);
387 RETURN(osc_io_iter_init(env, ios));
390 static void osc_io_iter_fini(const struct lu_env *env,
391 const struct cl_io_slice *ios)
393 struct osc_io *oio = osc_env_io(env);
395 if (oio->oi_is_active) {
396 struct osc_object *osc = cl2osc(ios->cis_obj);
398 oio->oi_is_active = 0;
399 LASSERT(atomic_read(&osc->oo_nr_ios) > 0);
400 if (atomic_dec_and_test(&osc->oo_nr_ios))
401 wake_up_all(&osc->oo_io_waitq);
405 static void osc_io_write_iter_fini(const struct lu_env *env,
406 const struct cl_io_slice *ios)
408 struct osc_io *oio = osc_env_io(env);
409 struct osc_object *osc = cl2osc(ios->cis_obj);
411 if (oio->oi_lru_reserved > 0) {
412 osc_lru_unreserve(osc_cli(osc), oio->oi_lru_reserved);
413 oio->oi_lru_reserved = 0;
415 oio->oi_write_osclock = NULL;
417 osc_io_iter_fini(env, ios);
420 static int osc_io_fault_start(const struct lu_env *env,
421 const struct cl_io_slice *ios)
424 struct cl_fault_io *fio;
428 fio = &io->u.ci_fault;
429 CDEBUG(D_INFO, "%lu %d %zu\n",
430 fio->ft_index, fio->ft_writable, fio->ft_nob);
432 * If mapping is writeable, adjust kms to cover this page,
433 * but do not extend kms beyond actual file size.
436 if (fio->ft_writable)
437 osc_page_touch_at(env, ios->cis_obj,
438 fio->ft_index, fio->ft_nob);
442 static int osc_async_upcall(void *a, int rc)
444 struct osc_async_cbargs *args = a;
447 complete(&args->opc_sync);
452 * Checks that there are no pages being written in the extent being truncated.
454 static int trunc_check_cb(const struct lu_env *env, struct cl_io *io,
455 struct osc_page *ops , void *cbdata)
457 struct cl_page *page = ops->ops_cl.cpl_page;
458 struct osc_async_page *oap;
459 __u64 start = *(__u64 *)cbdata;
462 if (oap->oap_cmd & OBD_BRW_WRITE &&
463 !list_empty(&oap->oap_pending_item))
464 CL_PAGE_DEBUG(D_ERROR, env, page, "exists %llu/%s.\n",
465 start, current->comm);
467 if (PageLocked(page->cp_vmpage))
468 CDEBUG(D_CACHE, "page %p index %lu locked for %d.\n",
469 ops, osc_index(ops), oap->oap_cmd & OBD_BRW_RWMASK);
471 return CLP_GANG_OKAY;
474 static void osc_trunc_check(const struct lu_env *env, struct cl_io *io,
475 struct osc_io *oio, __u64 size)
477 struct cl_object *clob;
481 clob = oio->oi_cl.cis_obj;
482 start = cl_index(clob, size);
483 partial = cl_offset(clob, start) < size;
486 * Complain if there are pages in the truncated region.
488 osc_page_gang_lookup(env, io, cl2osc(clob),
489 start + partial, CL_PAGE_EOF,
490 trunc_check_cb, (void *)&size);
493 static int osc_io_setattr_start(const struct lu_env *env,
494 const struct cl_io_slice *slice)
496 struct cl_io *io = slice->cis_io;
497 struct osc_io *oio = cl2osc_io(env, slice);
498 struct cl_object *obj = slice->cis_obj;
499 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
500 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
501 struct obdo *oa = &oio->oi_oa;
502 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
503 __u64 size = io->u.ci_setattr.sa_attr.lvb_size;
504 unsigned int ia_valid = io->u.ci_setattr.sa_valid;
507 /* truncate cache dirty pages first */
508 if (cl_io_is_trunc(io))
509 result = osc_cache_truncate_start(env, cl2osc(obj), size,
512 if (result == 0 && oio->oi_lockless == 0) {
513 cl_object_attr_lock(obj);
514 result = cl_object_attr_get(env, obj, attr);
516 struct ost_lvb *lvb = &io->u.ci_setattr.sa_attr;
517 unsigned int cl_valid = 0;
519 if (ia_valid & ATTR_SIZE) {
520 attr->cat_size = attr->cat_kms = size;
521 cl_valid = (CAT_SIZE | CAT_KMS);
523 if (ia_valid & ATTR_MTIME_SET) {
524 attr->cat_mtime = lvb->lvb_mtime;
525 cl_valid |= CAT_MTIME;
527 if (ia_valid & ATTR_ATIME_SET) {
528 attr->cat_atime = lvb->lvb_atime;
529 cl_valid |= CAT_ATIME;
531 if (ia_valid & ATTR_CTIME_SET) {
532 attr->cat_ctime = lvb->lvb_ctime;
533 cl_valid |= CAT_CTIME;
535 result = cl_object_attr_update(env, obj, attr,
538 cl_object_attr_unlock(obj);
540 memset(oa, 0, sizeof(*oa));
542 oa->o_oi = loi->loi_oi;
543 obdo_set_parent_fid(oa, io->u.ci_setattr.sa_parent_fid);
544 oa->o_stripe_idx = io->u.ci_setattr.sa_stripe_index;
545 oa->o_valid |= OBD_MD_FLID | OBD_MD_FLGROUP;
546 if (ia_valid & ATTR_CTIME) {
547 oa->o_valid |= OBD_MD_FLCTIME;
548 oa->o_ctime = attr->cat_ctime;
550 if (ia_valid & ATTR_ATIME) {
551 oa->o_valid |= OBD_MD_FLATIME;
552 oa->o_atime = attr->cat_atime;
554 if (ia_valid & ATTR_MTIME) {
555 oa->o_valid |= OBD_MD_FLMTIME;
556 oa->o_mtime = attr->cat_mtime;
558 if (ia_valid & ATTR_SIZE) {
560 oa->o_blocks = OBD_OBJECT_EOF;
561 oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
563 if (oio->oi_lockless) {
564 oa->o_flags = OBD_FL_SRVLOCK;
565 oa->o_valid |= OBD_MD_FLFLAGS;
568 LASSERT(oio->oi_lockless == 0);
571 if (ia_valid & ATTR_ATTR_FLAG) {
572 oa->o_flags = io->u.ci_setattr.sa_attr_flags;
573 oa->o_valid |= OBD_MD_FLFLAGS;
576 init_completion(&cbargs->opc_sync);
578 if (ia_valid & ATTR_SIZE)
579 result = osc_punch_base(osc_export(cl2osc(obj)),
580 oa, osc_async_upcall,
581 cbargs, PTLRPCD_SET);
583 result = osc_setattr_async(osc_export(cl2osc(obj)),
584 oa, osc_async_upcall,
585 cbargs, PTLRPCD_SET);
587 cbargs->opc_rpc_sent = result == 0;
593 static void osc_io_setattr_end(const struct lu_env *env,
594 const struct cl_io_slice *slice)
596 struct cl_io *io = slice->cis_io;
597 struct osc_io *oio = cl2osc_io(env, slice);
598 struct cl_object *obj = slice->cis_obj;
599 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
602 if (cbargs->opc_rpc_sent) {
603 wait_for_completion(&cbargs->opc_sync);
604 result = io->ci_result = cbargs->opc_rc;
607 if (oio->oi_lockless) {
608 /* lockless truncate */
609 struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
611 LASSERT(cl_io_is_trunc(io));
612 /* XXX: Need a lock. */
613 osd->od_stats.os_lockless_truncates++;
617 if (cl_io_is_trunc(io)) {
618 __u64 size = io->u.ci_setattr.sa_attr.lvb_size;
619 osc_trunc_check(env, io, oio, size);
620 osc_cache_truncate_end(env, oio->oi_trunc);
621 oio->oi_trunc = NULL;
625 struct osc_data_version_args {
626 struct osc_io *dva_oio;
630 osc_data_version_interpret(const struct lu_env *env, struct ptlrpc_request *req,
633 struct osc_data_version_args *dva = arg;
634 struct osc_io *oio = dva->dva_oio;
635 const struct ost_body *body;
641 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
643 GOTO(out, rc = -EPROTO);
645 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, &oio->oi_oa,
649 oio->oi_cbarg.opc_rc = rc;
650 complete(&oio->oi_cbarg.opc_sync);
655 static int osc_io_data_version_start(const struct lu_env *env,
656 const struct cl_io_slice *slice)
658 struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version;
659 struct osc_io *oio = cl2osc_io(env, slice);
660 struct obdo *oa = &oio->oi_oa;
661 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
662 struct osc_object *obj = cl2osc(slice->cis_obj);
663 struct lov_oinfo *loi = obj->oo_oinfo;
664 struct obd_export *exp = osc_export(obj);
665 struct ptlrpc_request *req;
666 struct ost_body *body;
667 struct osc_data_version_args *dva;
671 memset(oa, 0, sizeof(*oa));
672 oa->o_oi = loi->loi_oi;
673 oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
675 if (dv->dv_flags & (LL_DV_RD_FLUSH | LL_DV_WR_FLUSH)) {
676 oa->o_valid |= OBD_MD_FLFLAGS;
677 oa->o_flags |= OBD_FL_SRVLOCK;
678 if (dv->dv_flags & LL_DV_WR_FLUSH)
679 oa->o_flags |= OBD_FL_FLUSH;
682 init_completion(&cbargs->opc_sync);
684 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
688 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
690 ptlrpc_request_free(req);
694 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
695 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
697 ptlrpc_request_set_replen(req);
698 req->rq_interpret_reply = osc_data_version_interpret;
699 CLASSERT(sizeof(*dva) <= sizeof(req->rq_async_args));
700 dva = ptlrpc_req_async_args(req);
703 ptlrpcd_add_req(req);
708 static void osc_io_data_version_end(const struct lu_env *env,
709 const struct cl_io_slice *slice)
711 struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version;
712 struct osc_io *oio = cl2osc_io(env, slice);
713 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
716 wait_for_completion(&cbargs->opc_sync);
718 if (cbargs->opc_rc != 0) {
719 slice->cis_io->ci_result = cbargs->opc_rc;
720 } else if (!(oio->oi_oa.o_valid & OBD_MD_FLDATAVERSION)) {
721 slice->cis_io->ci_result = -EOPNOTSUPP;
723 dv->dv_data_version = oio->oi_oa.o_data_version;
724 slice->cis_io->ci_result = 0;
730 static int osc_io_read_start(const struct lu_env *env,
731 const struct cl_io_slice *slice)
733 struct cl_object *obj = slice->cis_obj;
734 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
738 if (!slice->cis_io->ci_noatime) {
739 cl_object_attr_lock(obj);
740 attr->cat_atime = LTIME_S(CURRENT_TIME);
741 rc = cl_object_attr_update(env, obj, attr, CAT_ATIME);
742 cl_object_attr_unlock(obj);
748 static int osc_io_write_start(const struct lu_env *env,
749 const struct cl_io_slice *slice)
751 struct cl_object *obj = slice->cis_obj;
752 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
756 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_SETTIME, 1);
757 cl_object_attr_lock(obj);
758 attr->cat_mtime = attr->cat_ctime = LTIME_S(CURRENT_TIME);
759 rc = cl_object_attr_update(env, obj, attr, CAT_MTIME | CAT_CTIME);
760 cl_object_attr_unlock(obj);
765 static int osc_fsync_ost(const struct lu_env *env, struct osc_object *obj,
766 struct cl_fsync_io *fio)
768 struct osc_io *oio = osc_env_io(env);
769 struct obdo *oa = &oio->oi_oa;
770 struct lov_oinfo *loi = obj->oo_oinfo;
771 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
775 memset(oa, 0, sizeof(*oa));
776 oa->o_oi = loi->loi_oi;
777 oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
779 /* reload size abd blocks for start and end of sync range */
780 oa->o_size = fio->fi_start;
781 oa->o_blocks = fio->fi_end;
782 oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
784 obdo_set_parent_fid(oa, fio->fi_fid);
786 init_completion(&cbargs->opc_sync);
788 rc = osc_sync_base(obj, oa, osc_async_upcall, cbargs, PTLRPCD_SET);
792 static int osc_io_fsync_start(const struct lu_env *env,
793 const struct cl_io_slice *slice)
795 struct cl_io *io = slice->cis_io;
796 struct cl_fsync_io *fio = &io->u.ci_fsync;
797 struct cl_object *obj = slice->cis_obj;
798 struct osc_object *osc = cl2osc(obj);
799 pgoff_t start = cl_index(obj, fio->fi_start);
800 pgoff_t end = cl_index(obj, fio->fi_end);
804 if (fio->fi_end == OBD_OBJECT_EOF)
807 result = osc_cache_writeback_range(env, osc, start, end, 0,
808 fio->fi_mode == CL_FSYNC_DISCARD);
810 fio->fi_nr_written += result;
813 if (fio->fi_mode == CL_FSYNC_ALL) {
816 /* we have to wait for writeback to finish before we can
817 * send OST_SYNC RPC. This is bad because it causes extents
818 * to be written osc by osc. However, we usually start
819 * writeback before CL_FSYNC_ALL so this won't have any real
821 rc = osc_cache_wait_range(env, osc, start, end);
824 rc = osc_fsync_ost(env, osc, fio);
832 static void osc_io_fsync_end(const struct lu_env *env,
833 const struct cl_io_slice *slice)
835 struct cl_fsync_io *fio = &slice->cis_io->u.ci_fsync;
836 struct cl_object *obj = slice->cis_obj;
837 pgoff_t start = cl_index(obj, fio->fi_start);
838 pgoff_t end = cl_index(obj, fio->fi_end);
841 if (fio->fi_mode == CL_FSYNC_LOCAL) {
842 result = osc_cache_wait_range(env, cl2osc(obj), start, end);
843 } else if (fio->fi_mode == CL_FSYNC_ALL) {
844 struct osc_io *oio = cl2osc_io(env, slice);
845 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
847 wait_for_completion(&cbargs->opc_sync);
849 result = cbargs->opc_rc;
851 slice->cis_io->ci_result = result;
854 static int osc_io_ladvise_start(const struct lu_env *env,
855 const struct cl_io_slice *slice)
858 struct cl_io *io = slice->cis_io;
859 struct osc_io *oio = cl2osc_io(env, slice);
860 struct cl_object *obj = slice->cis_obj;
861 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
862 struct cl_ladvise_io *lio = &io->u.ci_ladvise;
863 struct obdo *oa = &oio->oi_oa;
864 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
865 struct lu_ladvise *ladvise;
866 struct ladvise_hdr *ladvise_hdr;
871 /* TODO: add multiple ladvise support in CLIO */
872 buf_size = offsetof(typeof(*ladvise_hdr), lah_advise[num_advise]);
873 if (osc_env_info(env)->oti_ladvise_buf.lb_len < buf_size)
874 lu_buf_realloc(&osc_env_info(env)->oti_ladvise_buf, buf_size);
876 ladvise_hdr = osc_env_info(env)->oti_ladvise_buf.lb_buf;
877 if (ladvise_hdr == NULL)
880 memset(ladvise_hdr, 0, buf_size);
881 ladvise_hdr->lah_magic = LADVISE_MAGIC;
882 ladvise_hdr->lah_count = num_advise;
883 ladvise_hdr->lah_flags = lio->li_flags;
885 memset(oa, 0, sizeof(*oa));
886 oa->o_oi = loi->loi_oi;
887 oa->o_valid = OBD_MD_FLID;
888 obdo_set_parent_fid(oa, lio->li_fid);
890 ladvise = ladvise_hdr->lah_advise;
891 ladvise->lla_start = lio->li_start;
892 ladvise->lla_end = lio->li_end;
893 ladvise->lla_advice = lio->li_advice;
895 if (lio->li_flags & LF_ASYNC) {
896 result = osc_ladvise_base(osc_export(cl2osc(obj)), oa,
897 ladvise_hdr, NULL, NULL, NULL);
899 init_completion(&cbargs->opc_sync);
900 result = osc_ladvise_base(osc_export(cl2osc(obj)), oa,
901 ladvise_hdr, osc_async_upcall,
902 cbargs, PTLRPCD_SET);
903 cbargs->opc_rpc_sent = result == 0;
908 static void osc_io_ladvise_end(const struct lu_env *env,
909 const struct cl_io_slice *slice)
911 struct cl_io *io = slice->cis_io;
912 struct osc_io *oio = cl2osc_io(env, slice);
913 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
915 struct cl_ladvise_io *lio = &io->u.ci_ladvise;
917 if ((!(lio->li_flags & LF_ASYNC)) && cbargs->opc_rpc_sent) {
918 wait_for_completion(&cbargs->opc_sync);
919 result = cbargs->opc_rc;
921 slice->cis_io->ci_result = result;
924 static void osc_io_end(const struct lu_env *env,
925 const struct cl_io_slice *slice)
927 struct osc_io *oio = cl2osc_io(env, slice);
929 if (oio->oi_active) {
930 osc_extent_release(env, oio->oi_active);
931 oio->oi_active = NULL;
935 static const struct cl_io_operations osc_io_ops = {
938 .cio_iter_init = osc_io_iter_init,
939 .cio_iter_fini = osc_io_iter_fini,
940 .cio_start = osc_io_read_start,
941 .cio_fini = osc_io_fini
944 .cio_iter_init = osc_io_write_iter_init,
945 .cio_iter_fini = osc_io_write_iter_fini,
946 .cio_start = osc_io_write_start,
947 .cio_end = osc_io_end,
948 .cio_fini = osc_io_fini
951 .cio_iter_init = osc_io_iter_init,
952 .cio_iter_fini = osc_io_iter_fini,
953 .cio_start = osc_io_setattr_start,
954 .cio_end = osc_io_setattr_end
956 [CIT_DATA_VERSION] = {
957 .cio_start = osc_io_data_version_start,
958 .cio_end = osc_io_data_version_end,
961 .cio_iter_init = osc_io_iter_init,
962 .cio_iter_fini = osc_io_iter_fini,
963 .cio_start = osc_io_fault_start,
964 .cio_end = osc_io_end,
965 .cio_fini = osc_io_fini
968 .cio_start = osc_io_fsync_start,
969 .cio_end = osc_io_fsync_end,
970 .cio_fini = osc_io_fini
973 .cio_start = osc_io_ladvise_start,
974 .cio_end = osc_io_ladvise_end,
975 .cio_fini = osc_io_fini
978 .cio_fini = osc_io_fini
981 .cio_read_ahead = osc_io_read_ahead,
982 .cio_submit = osc_io_submit,
983 .cio_commit_async = osc_io_commit_async
986 /*****************************************************************************
988 * Transfer operations.
992 int osc_io_init(const struct lu_env *env,
993 struct cl_object *obj, struct cl_io *io)
995 struct osc_io *oio = osc_env_io(env);
997 CL_IO_SLICE_CLEAN(oio, oi_cl);
998 cl_io_slice_add(io, &oio->oi_cl, obj, &osc_io_ops);