4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * Implementation of cl_io for OSC layer.
34 * Author: Nikita Danilov <nikita.danilov@sun.com>
35 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
38 #define DEBUG_SUBSYSTEM S_OSC
40 #include <lustre_obdo.h>
41 #include <lustre_osc.h>
43 #include "osc_internal.h"
49 /*****************************************************************************
55 static void osc_io_fini(const struct lu_env *env, const struct cl_io_slice *io)
59 void osc_read_ahead_release(const struct lu_env *env, void *cbdata)
61 struct ldlm_lock *dlmlock = cbdata;
62 struct lustre_handle lockh;
64 ldlm_lock2handle(dlmlock, &lockh);
65 ldlm_lock_decref(&lockh, LCK_PR);
66 LDLM_LOCK_PUT(dlmlock);
68 EXPORT_SYMBOL(osc_read_ahead_release);
70 static int osc_io_read_ahead(const struct lu_env *env,
71 const struct cl_io_slice *ios,
72 pgoff_t start, struct cl_read_ahead *ra)
74 struct osc_object *osc = cl2osc(ios->cis_obj);
75 struct ldlm_lock *dlmlock;
76 int result = -ENODATA;
79 dlmlock = osc_dlmlock_at_pgoff(env, osc, start, 0);
80 if (dlmlock != NULL) {
81 LASSERT(dlmlock->l_ast_data == osc);
82 if (dlmlock->l_req_mode != LCK_PR) {
83 struct lustre_handle lockh;
84 ldlm_lock2handle(dlmlock, &lockh);
85 ldlm_lock_addref(&lockh, LCK_PR);
86 ldlm_lock_decref(&lockh, dlmlock->l_req_mode);
89 ra->cra_rpc_size = osc_cli(osc)->cl_max_pages_per_rpc;
90 ra->cra_end = cl_index(osc2cl(osc),
91 dlmlock->l_policy_data.l_extent.end);
92 ra->cra_release = osc_read_ahead_release;
93 ra->cra_cbdata = dlmlock;
101 * An implementation of cl_io_operations::cio_io_submit() method for osc
102 * layer. Iterates over pages in the in-queue, prepares each for io by calling
103 * cl_page_prep() and then either submits them through osc_io_submit_page()
104 * or, if page is already submitted, changes osc flags through
105 * osc_set_async_flags().
107 int osc_io_submit(const struct lu_env *env, const struct cl_io_slice *ios,
108 enum cl_req_type crt, struct cl_2queue *queue)
110 struct cl_page *page;
112 struct client_obd *cli = NULL;
113 struct osc_object *osc = NULL; /* to keep gcc happy */
114 struct osc_page *opg;
116 struct list_head list = LIST_HEAD_INIT(list);
118 struct cl_page_list *qin = &queue->c2_qin;
119 struct cl_page_list *qout = &queue->c2_qout;
120 unsigned int queued = 0;
123 unsigned int max_pages;
125 LASSERT(qin->pl_nr > 0);
127 CDEBUG(D_CACHE|D_READA, "%d %d\n", qin->pl_nr, crt);
129 osc = cl2osc(ios->cis_obj);
131 max_pages = cli->cl_max_pages_per_rpc;
133 brw_flags = osc_io_srvlock(cl2osc_io(env, ios)) ? OBD_BRW_SRVLOCK : 0;
134 brw_flags |= crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ;
135 if (crt == CRT_READ && ios->cis_io->ci_ndelay)
136 brw_flags |= OBD_BRW_NDELAY;
139 * NOTE: here @page is a top-level page. This is done to avoid
140 * creation of sub-page-list.
142 cl_page_list_for_each_safe(page, tmp, qin) {
143 struct osc_async_page *oap;
149 opg = osc_cl_page_osc(page, osc);
151 LASSERT(osc == oap->oap_obj);
153 if (!list_empty(&oap->oap_pending_item) ||
154 !list_empty(&oap->oap_rpc_item)) {
155 CDEBUG(D_CACHE, "Busy oap %p page %p for submit.\n",
161 result = cl_page_prep(env, io, page, crt);
164 if (result != -EALREADY)
167 * Handle -EALREADY error: for read case, the page is
168 * already in UPTODATE state; for write, the page
175 spin_lock(&oap->oap_lock);
176 oap->oap_async_flags = ASYNC_URGENT|ASYNC_READY;
177 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
178 spin_unlock(&oap->oap_lock);
180 osc_page_submit(env, opg, crt, brw_flags);
181 list_add_tail(&oap->oap_pending_item, &list);
183 if (page->cp_sync_io != NULL)
184 cl_page_list_move(qout, qin, page);
186 cl_page_list_del(env, qin, page);
188 if (++queued == max_pages) {
190 result = osc_queue_sync_pages(env, io, osc, &list,
198 result = osc_queue_sync_pages(env, io, osc, &list, brw_flags);
200 /* Update c/mtime for sync write. LU-7310 */
201 if (crt == CRT_WRITE && qout->pl_nr > 0 && result == 0) {
202 struct cl_object *obj = ios->cis_obj;
203 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
205 cl_object_attr_lock(obj);
206 attr->cat_mtime = attr->cat_ctime = ktime_get_real_seconds();
207 cl_object_attr_update(env, obj, attr, CAT_MTIME | CAT_CTIME);
208 cl_object_attr_unlock(obj);
211 CDEBUG(D_INFO, "%d/%d %d\n", qin->pl_nr, qout->pl_nr, result);
212 return qout->pl_nr > 0 ? 0 : result;
214 EXPORT_SYMBOL(osc_io_submit);
217 * This is called to update the attributes when modifying a specific page,
218 * both when making new pages and when doing updates to existing cached pages.
220 * Expand stripe KMS if necessary.
222 void osc_page_touch_at(const struct lu_env *env, struct cl_object *obj,
223 pgoff_t idx, size_t to)
225 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
226 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
232 /* offset within stripe */
233 kms = cl_offset(obj, idx) + to;
235 cl_object_attr_lock(obj);
236 CDEBUG(D_INODE, "stripe KMS %sincreasing %llu->%llu %llu\n",
237 kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms,
238 loi->loi_lvb.lvb_size);
240 attr->cat_mtime = attr->cat_ctime = ktime_get_real_seconds();
241 valid = CAT_MTIME | CAT_CTIME;
242 if (kms > loi->loi_kms) {
246 if (kms > loi->loi_lvb.lvb_size) {
247 attr->cat_size = kms;
250 cl_object_attr_update(env, obj, attr, valid);
251 cl_object_attr_unlock(obj);
256 int osc_io_commit_async(const struct lu_env *env,
257 const struct cl_io_slice *ios,
258 struct cl_page_list *qin, int from, int to,
261 struct cl_io *io = ios->cis_io;
262 struct osc_io *oio = cl2osc_io(env, ios);
263 struct osc_object *osc = cl2osc(ios->cis_obj);
264 struct cl_page *page;
265 struct cl_page *last_page;
266 struct osc_page *opg;
270 LASSERT(qin->pl_nr > 0);
272 /* Handle partial page cases */
273 last_page = cl_page_list_last(qin);
274 if (oio->oi_lockless) {
275 page = cl_page_list_first(qin);
276 if (page == last_page) {
277 cl_page_clip(env, page, from, to);
280 cl_page_clip(env, page, from, PAGE_SIZE);
282 cl_page_clip(env, last_page, 0, to);
286 while (qin->pl_nr > 0) {
287 struct osc_async_page *oap;
289 page = cl_page_list_first(qin);
290 opg = osc_cl_page_osc(page, osc);
293 LASSERTF(osc == oap->oap_obj,
294 "obj mismatch: %p / %p\n", osc, oap->oap_obj);
296 if (!list_empty(&oap->oap_rpc_item)) {
297 CDEBUG(D_CACHE, "Busy oap %p page %p for submit.\n",
303 /* The page may be already in dirty cache. */
304 if (list_empty(&oap->oap_pending_item)) {
305 result = osc_page_cache_add(env, &opg->ops_cl, io);
310 osc_page_touch_at(env, osc2cl(osc), osc_index(opg),
311 page == last_page ? to : PAGE_SIZE);
313 cl_page_list_del(env, qin, page);
315 (*cb)(env, io, page);
316 /* Can't access page any more. Page can be in transfer and
317 * complete at any time. */
320 /* for sync write, kernel will wait for this page to be flushed before
321 * osc_io_end() is called, so release it earlier.
322 * for mkwrite(), it's known there is no further pages. */
323 if (cl_io_is_sync_write(io) && oio->oi_active != NULL) {
324 osc_extent_release(env, oio->oi_active);
325 oio->oi_active = NULL;
328 CDEBUG(D_INFO, "%d %d\n", qin->pl_nr, result);
331 EXPORT_SYMBOL(osc_io_commit_async);
333 int osc_io_iter_init(const struct lu_env *env, const struct cl_io_slice *ios)
335 struct osc_object *osc = cl2osc(ios->cis_obj);
336 struct obd_import *imp = osc_cli(osc)->cl_import;
339 spin_lock(&imp->imp_lock);
340 if (likely(!imp->imp_invalid)) {
341 struct osc_io *oio = osc_env_io(env);
343 atomic_inc(&osc->oo_nr_ios);
344 oio->oi_is_active = 1;
347 spin_unlock(&imp->imp_lock);
351 EXPORT_SYMBOL(osc_io_iter_init);
353 int osc_io_write_iter_init(const struct lu_env *env,
354 const struct cl_io_slice *ios)
356 struct cl_io *io = ios->cis_io;
357 struct osc_io *oio = osc_env_io(env);
358 struct osc_object *osc = cl2osc(ios->cis_obj);
359 unsigned long npages;
362 if (cl_io_is_append(io))
363 RETURN(osc_io_iter_init(env, ios));
365 npages = io->u.ci_rw.rw_range.cir_count >> PAGE_SHIFT;
366 if (io->u.ci_rw.rw_range.cir_pos & ~PAGE_MASK)
369 oio->oi_lru_reserved = osc_lru_reserve(osc_cli(osc), npages);
371 RETURN(osc_io_iter_init(env, ios));
373 EXPORT_SYMBOL(osc_io_write_iter_init);
375 void osc_io_iter_fini(const struct lu_env *env,
376 const struct cl_io_slice *ios)
378 struct osc_io *oio = osc_env_io(env);
380 if (oio->oi_is_active) {
381 struct osc_object *osc = cl2osc(ios->cis_obj);
383 oio->oi_is_active = 0;
384 LASSERT(atomic_read(&osc->oo_nr_ios) > 0);
385 if (atomic_dec_and_test(&osc->oo_nr_ios))
386 wake_up_all(&osc->oo_io_waitq);
389 EXPORT_SYMBOL(osc_io_iter_fini);
391 void osc_io_write_iter_fini(const struct lu_env *env,
392 const struct cl_io_slice *ios)
394 struct osc_io *oio = osc_env_io(env);
395 struct osc_object *osc = cl2osc(ios->cis_obj);
397 if (oio->oi_lru_reserved > 0) {
398 osc_lru_unreserve(osc_cli(osc), oio->oi_lru_reserved);
399 oio->oi_lru_reserved = 0;
401 oio->oi_write_osclock = NULL;
403 osc_io_iter_fini(env, ios);
405 EXPORT_SYMBOL(osc_io_write_iter_fini);
407 int osc_io_fault_start(const struct lu_env *env, const struct cl_io_slice *ios)
410 struct cl_fault_io *fio;
414 fio = &io->u.ci_fault;
415 CDEBUG(D_INFO, "%lu %d %zu\n",
416 fio->ft_index, fio->ft_writable, fio->ft_nob);
418 * If mapping is writeable, adjust kms to cover this page,
419 * but do not extend kms beyond actual file size.
422 if (fio->ft_writable)
423 osc_page_touch_at(env, ios->cis_obj,
424 fio->ft_index, fio->ft_nob);
427 EXPORT_SYMBOL(osc_io_fault_start);
430 static int osc_async_upcall(void *a, int rc)
432 struct osc_async_cbargs *args = a;
435 complete(&args->opc_sync);
440 * Checks that there are no pages being written in the extent being truncated.
442 static int trunc_check_cb(const struct lu_env *env, struct cl_io *io,
443 struct osc_page *ops , void *cbdata)
445 struct cl_page *page = ops->ops_cl.cpl_page;
446 struct osc_async_page *oap;
447 __u64 start = *(__u64 *)cbdata;
450 if (oap->oap_cmd & OBD_BRW_WRITE &&
451 !list_empty(&oap->oap_pending_item))
452 CL_PAGE_DEBUG(D_ERROR, env, page, "exists %llu/%s.\n",
453 start, current->comm);
455 if (PageLocked(page->cp_vmpage))
456 CDEBUG(D_CACHE, "page %p index %lu locked for %d.\n",
457 ops, osc_index(ops), oap->oap_cmd & OBD_BRW_RWMASK);
459 return CLP_GANG_OKAY;
462 static void osc_trunc_check(const struct lu_env *env, struct cl_io *io,
463 struct osc_io *oio, __u64 size)
465 struct cl_object *clob;
469 clob = oio->oi_cl.cis_obj;
470 start = cl_index(clob, size);
471 partial = cl_offset(clob, start) < size;
474 * Complain if there are pages in the truncated region.
476 osc_page_gang_lookup(env, io, cl2osc(clob),
477 start + partial, CL_PAGE_EOF,
478 trunc_check_cb, (void *)&size);
481 static int osc_io_setattr_start(const struct lu_env *env,
482 const struct cl_io_slice *slice)
484 struct cl_io *io = slice->cis_io;
485 struct osc_io *oio = cl2osc_io(env, slice);
486 struct cl_object *obj = slice->cis_obj;
487 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
488 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
489 struct obdo *oa = &oio->oi_oa;
490 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
491 __u64 size = io->u.ci_setattr.sa_attr.lvb_size;
492 unsigned int ia_avalid = io->u.ci_setattr.sa_avalid;
493 enum op_xvalid ia_xvalid = io->u.ci_setattr.sa_xvalid;
497 /* truncate cache dirty pages first */
498 if (cl_io_is_trunc(io))
499 result = osc_cache_truncate_start(env, cl2osc(obj), size,
502 if (result == 0 && oio->oi_lockless == 0) {
503 cl_object_attr_lock(obj);
504 result = cl_object_attr_get(env, obj, attr);
506 struct ost_lvb *lvb = &io->u.ci_setattr.sa_attr;
507 unsigned int cl_valid = 0;
509 if (ia_avalid & ATTR_SIZE) {
510 attr->cat_size = size;
511 attr->cat_kms = size;
512 cl_valid = (CAT_SIZE | CAT_KMS);
514 if (ia_avalid & ATTR_MTIME_SET) {
515 attr->cat_mtime = lvb->lvb_mtime;
516 cl_valid |= CAT_MTIME;
518 if (ia_avalid & ATTR_ATIME_SET) {
519 attr->cat_atime = lvb->lvb_atime;
520 cl_valid |= CAT_ATIME;
522 if (ia_xvalid & OP_XVALID_CTIME_SET) {
523 attr->cat_ctime = lvb->lvb_ctime;
524 cl_valid |= CAT_CTIME;
526 result = cl_object_attr_update(env, obj, attr,
529 cl_object_attr_unlock(obj);
531 memset(oa, 0, sizeof(*oa));
533 oa->o_oi = loi->loi_oi;
534 obdo_set_parent_fid(oa, io->u.ci_setattr.sa_parent_fid);
535 oa->o_stripe_idx = io->u.ci_setattr.sa_stripe_index;
536 oa->o_layout = io->u.ci_setattr.sa_layout;
537 oa->o_valid |= OBD_MD_FLID | OBD_MD_FLGROUP |
539 if (ia_avalid & ATTR_CTIME) {
540 oa->o_valid |= OBD_MD_FLCTIME;
541 oa->o_ctime = attr->cat_ctime;
543 if (ia_avalid & ATTR_ATIME) {
544 oa->o_valid |= OBD_MD_FLATIME;
545 oa->o_atime = attr->cat_atime;
547 if (ia_avalid & ATTR_MTIME) {
548 oa->o_valid |= OBD_MD_FLMTIME;
549 oa->o_mtime = attr->cat_mtime;
551 if (ia_avalid & ATTR_SIZE) {
553 oa->o_blocks = OBD_OBJECT_EOF;
554 oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
556 if (oio->oi_lockless) {
557 oa->o_flags = OBD_FL_SRVLOCK;
558 oa->o_valid |= OBD_MD_FLFLAGS;
561 if (io->ci_layout_version > 0) {
562 /* verify layout version */
563 oa->o_valid |= OBD_MD_LAYOUT_VERSION;
564 oa->o_layout_version = io->ci_layout_version;
567 LASSERT(oio->oi_lockless == 0);
570 if (ia_xvalid & OP_XVALID_FLAGS) {
571 oa->o_flags = io->u.ci_setattr.sa_attr_flags;
572 oa->o_valid |= OBD_MD_FLFLAGS;
575 init_completion(&cbargs->opc_sync);
577 if (ia_avalid & ATTR_SIZE)
578 result = osc_punch_send(osc_export(cl2osc(obj)),
579 oa, osc_async_upcall, cbargs);
581 result = osc_setattr_async(osc_export(cl2osc(obj)),
582 oa, osc_async_upcall,
583 cbargs, PTLRPCD_SET);
585 cbargs->opc_rpc_sent = result == 0;
591 void osc_io_setattr_end(const struct lu_env *env,
592 const struct cl_io_slice *slice)
594 struct cl_io *io = slice->cis_io;
595 struct osc_io *oio = cl2osc_io(env, slice);
596 struct cl_object *obj = slice->cis_obj;
597 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
598 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
599 struct obdo *oa = &oio->oi_oa;
600 unsigned int cl_valid = 0;
603 if (cbargs->opc_rpc_sent) {
604 wait_for_completion(&cbargs->opc_sync);
605 result = io->ci_result = cbargs->opc_rc;
609 if (oio->oi_lockless) {
610 /* lockless truncate */
611 struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
613 LASSERT(cl_io_is_trunc(io));
614 /* XXX: Need a lock. */
615 osd->od_stats.os_lockless_truncates++;
619 if (cl_io_is_trunc(io)) {
620 __u64 size = io->u.ci_setattr.sa_attr.lvb_size;
621 cl_object_attr_lock(obj);
622 if (oa->o_valid & OBD_MD_FLBLOCKS) {
623 attr->cat_blocks = oa->o_blocks;
624 cl_valid |= CAT_BLOCKS;
627 cl_object_attr_update(env, obj, attr, cl_valid);
628 cl_object_attr_unlock(obj);
629 osc_trunc_check(env, io, oio, size);
630 osc_cache_truncate_end(env, oio->oi_trunc);
631 oio->oi_trunc = NULL;
634 EXPORT_SYMBOL(osc_io_setattr_end);
636 struct osc_data_version_args {
637 struct osc_io *dva_oio;
641 osc_data_version_interpret(const struct lu_env *env, struct ptlrpc_request *req,
644 struct osc_data_version_args *dva = arg;
645 struct osc_io *oio = dva->dva_oio;
646 const struct ost_body *body;
652 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
654 GOTO(out, rc = -EPROTO);
656 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, &oio->oi_oa,
660 oio->oi_cbarg.opc_rc = rc;
661 complete(&oio->oi_cbarg.opc_sync);
666 static int osc_io_data_version_start(const struct lu_env *env,
667 const struct cl_io_slice *slice)
669 struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version;
670 struct osc_io *oio = cl2osc_io(env, slice);
671 struct obdo *oa = &oio->oi_oa;
672 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
673 struct osc_object *obj = cl2osc(slice->cis_obj);
674 struct lov_oinfo *loi = obj->oo_oinfo;
675 struct obd_export *exp = osc_export(obj);
676 struct ptlrpc_request *req;
677 struct ost_body *body;
678 struct osc_data_version_args *dva;
682 memset(oa, 0, sizeof(*oa));
683 oa->o_oi = loi->loi_oi;
684 oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
686 if (dv->dv_flags & (LL_DV_RD_FLUSH | LL_DV_WR_FLUSH)) {
687 oa->o_valid |= OBD_MD_FLFLAGS;
688 oa->o_flags |= OBD_FL_SRVLOCK;
689 if (dv->dv_flags & LL_DV_WR_FLUSH)
690 oa->o_flags |= OBD_FL_FLUSH;
693 init_completion(&cbargs->opc_sync);
695 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
699 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
701 ptlrpc_request_free(req);
705 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
706 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
708 ptlrpc_request_set_replen(req);
709 req->rq_interpret_reply = osc_data_version_interpret;
710 CLASSERT(sizeof(*dva) <= sizeof(req->rq_async_args));
711 dva = ptlrpc_req_async_args(req);
714 ptlrpcd_add_req(req);
719 static void osc_io_data_version_end(const struct lu_env *env,
720 const struct cl_io_slice *slice)
722 struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version;
723 struct osc_io *oio = cl2osc_io(env, slice);
724 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
727 wait_for_completion(&cbargs->opc_sync);
729 if (cbargs->opc_rc != 0) {
730 slice->cis_io->ci_result = cbargs->opc_rc;
732 slice->cis_io->ci_result = 0;
733 if (!(oio->oi_oa.o_valid &
734 (OBD_MD_LAYOUT_VERSION | OBD_MD_FLDATAVERSION)))
735 slice->cis_io->ci_result = -ENOTSUPP;
737 if (oio->oi_oa.o_valid & OBD_MD_LAYOUT_VERSION)
738 dv->dv_layout_version = oio->oi_oa.o_layout_version;
739 if (oio->oi_oa.o_valid & OBD_MD_FLDATAVERSION)
740 dv->dv_data_version = oio->oi_oa.o_data_version;
746 int osc_io_read_start(const struct lu_env *env,
747 const struct cl_io_slice *slice)
749 struct cl_object *obj = slice->cis_obj;
750 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
754 if (!slice->cis_io->ci_noatime) {
755 cl_object_attr_lock(obj);
756 attr->cat_atime = ktime_get_real_seconds();
757 rc = cl_object_attr_update(env, obj, attr, CAT_ATIME);
758 cl_object_attr_unlock(obj);
763 EXPORT_SYMBOL(osc_io_read_start);
765 int osc_io_write_start(const struct lu_env *env,
766 const struct cl_io_slice *slice)
768 struct cl_object *obj = slice->cis_obj;
769 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
773 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_SETTIME, 1);
774 cl_object_attr_lock(obj);
775 attr->cat_mtime = attr->cat_ctime = ktime_get_real_seconds();
776 rc = cl_object_attr_update(env, obj, attr, CAT_MTIME | CAT_CTIME);
777 cl_object_attr_unlock(obj);
781 EXPORT_SYMBOL(osc_io_write_start);
783 int osc_fsync_ost(const struct lu_env *env, struct osc_object *obj,
784 struct cl_fsync_io *fio)
786 struct osc_io *oio = osc_env_io(env);
787 struct obdo *oa = &oio->oi_oa;
788 struct lov_oinfo *loi = obj->oo_oinfo;
789 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
793 memset(oa, 0, sizeof(*oa));
794 oa->o_oi = loi->loi_oi;
795 oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
797 /* reload size abd blocks for start and end of sync range */
798 oa->o_size = fio->fi_start;
799 oa->o_blocks = fio->fi_end;
800 oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
802 obdo_set_parent_fid(oa, fio->fi_fid);
804 init_completion(&cbargs->opc_sync);
806 rc = osc_sync_base(obj, oa, osc_async_upcall, cbargs, PTLRPCD_SET);
809 EXPORT_SYMBOL(osc_fsync_ost);
811 int osc_io_fsync_start(const struct lu_env *env,
812 const struct cl_io_slice *slice)
814 struct cl_io *io = slice->cis_io;
815 struct cl_fsync_io *fio = &io->u.ci_fsync;
816 struct cl_object *obj = slice->cis_obj;
817 struct osc_object *osc = cl2osc(obj);
818 pgoff_t start = cl_index(obj, fio->fi_start);
819 pgoff_t end = cl_index(obj, fio->fi_end);
823 if (fio->fi_end == OBD_OBJECT_EOF)
826 result = osc_cache_writeback_range(env, osc, start, end, 0,
827 fio->fi_mode == CL_FSYNC_DISCARD);
829 fio->fi_nr_written += result;
832 if (fio->fi_mode == CL_FSYNC_ALL) {
835 /* we have to wait for writeback to finish before we can
836 * send OST_SYNC RPC. This is bad because it causes extents
837 * to be written osc by osc. However, we usually start
838 * writeback before CL_FSYNC_ALL so this won't have any real
840 rc = osc_cache_wait_range(env, osc, start, end);
843 rc = osc_fsync_ost(env, osc, fio);
851 void osc_io_fsync_end(const struct lu_env *env,
852 const struct cl_io_slice *slice)
854 struct cl_fsync_io *fio = &slice->cis_io->u.ci_fsync;
855 struct cl_object *obj = slice->cis_obj;
856 pgoff_t start = cl_index(obj, fio->fi_start);
857 pgoff_t end = cl_index(obj, fio->fi_end);
860 if (fio->fi_mode == CL_FSYNC_LOCAL) {
861 result = osc_cache_wait_range(env, cl2osc(obj), start, end);
862 } else if (fio->fi_mode == CL_FSYNC_ALL) {
863 struct osc_io *oio = cl2osc_io(env, slice);
864 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
866 wait_for_completion(&cbargs->opc_sync);
868 result = cbargs->opc_rc;
870 slice->cis_io->ci_result = result;
872 EXPORT_SYMBOL(osc_io_fsync_end);
874 static int osc_io_ladvise_start(const struct lu_env *env,
875 const struct cl_io_slice *slice)
878 struct cl_io *io = slice->cis_io;
879 struct osc_io *oio = cl2osc_io(env, slice);
880 struct cl_object *obj = slice->cis_obj;
881 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
882 struct cl_ladvise_io *lio = &io->u.ci_ladvise;
883 struct obdo *oa = &oio->oi_oa;
884 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
885 struct lu_ladvise *ladvise;
886 struct ladvise_hdr *ladvise_hdr;
891 /* TODO: add multiple ladvise support in CLIO */
892 buf_size = offsetof(typeof(*ladvise_hdr), lah_advise[num_advise]);
893 if (osc_env_info(env)->oti_ladvise_buf.lb_len < buf_size)
894 lu_buf_realloc(&osc_env_info(env)->oti_ladvise_buf, buf_size);
896 ladvise_hdr = osc_env_info(env)->oti_ladvise_buf.lb_buf;
897 if (ladvise_hdr == NULL)
900 memset(ladvise_hdr, 0, buf_size);
901 ladvise_hdr->lah_magic = LADVISE_MAGIC;
902 ladvise_hdr->lah_count = num_advise;
903 ladvise_hdr->lah_flags = lio->li_flags;
905 memset(oa, 0, sizeof(*oa));
906 oa->o_oi = loi->loi_oi;
907 oa->o_valid = OBD_MD_FLID;
908 obdo_set_parent_fid(oa, lio->li_fid);
910 ladvise = ladvise_hdr->lah_advise;
911 ladvise->lla_start = lio->li_start;
912 ladvise->lla_end = lio->li_end;
913 ladvise->lla_advice = lio->li_advice;
915 if (lio->li_flags & LF_ASYNC) {
916 result = osc_ladvise_base(osc_export(cl2osc(obj)), oa,
917 ladvise_hdr, NULL, NULL, NULL);
919 init_completion(&cbargs->opc_sync);
920 result = osc_ladvise_base(osc_export(cl2osc(obj)), oa,
921 ladvise_hdr, osc_async_upcall,
922 cbargs, PTLRPCD_SET);
923 cbargs->opc_rpc_sent = result == 0;
928 static void osc_io_ladvise_end(const struct lu_env *env,
929 const struct cl_io_slice *slice)
931 struct cl_io *io = slice->cis_io;
932 struct osc_io *oio = cl2osc_io(env, slice);
933 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
935 struct cl_ladvise_io *lio = &io->u.ci_ladvise;
937 if ((!(lio->li_flags & LF_ASYNC)) && cbargs->opc_rpc_sent) {
938 wait_for_completion(&cbargs->opc_sync);
939 result = cbargs->opc_rc;
941 slice->cis_io->ci_result = result;
944 void osc_io_end(const struct lu_env *env, const struct cl_io_slice *slice)
946 struct osc_io *oio = cl2osc_io(env, slice);
948 if (oio->oi_active) {
949 osc_extent_release(env, oio->oi_active);
950 oio->oi_active = NULL;
953 EXPORT_SYMBOL(osc_io_end);
955 static const struct cl_io_operations osc_io_ops = {
958 .cio_iter_init = osc_io_iter_init,
959 .cio_iter_fini = osc_io_iter_fini,
960 .cio_start = osc_io_read_start,
961 .cio_fini = osc_io_fini
964 .cio_iter_init = osc_io_write_iter_init,
965 .cio_iter_fini = osc_io_write_iter_fini,
966 .cio_start = osc_io_write_start,
967 .cio_end = osc_io_end,
968 .cio_fini = osc_io_fini
971 .cio_iter_init = osc_io_iter_init,
972 .cio_iter_fini = osc_io_iter_fini,
973 .cio_start = osc_io_setattr_start,
974 .cio_end = osc_io_setattr_end
976 [CIT_DATA_VERSION] = {
977 .cio_start = osc_io_data_version_start,
978 .cio_end = osc_io_data_version_end,
981 .cio_iter_init = osc_io_iter_init,
982 .cio_iter_fini = osc_io_iter_fini,
983 .cio_start = osc_io_fault_start,
984 .cio_end = osc_io_end,
985 .cio_fini = osc_io_fini
988 .cio_start = osc_io_fsync_start,
989 .cio_end = osc_io_fsync_end,
990 .cio_fini = osc_io_fini
993 .cio_start = osc_io_ladvise_start,
994 .cio_end = osc_io_ladvise_end,
995 .cio_fini = osc_io_fini
998 .cio_fini = osc_io_fini
1001 .cio_read_ahead = osc_io_read_ahead,
1002 .cio_submit = osc_io_submit,
1003 .cio_commit_async = osc_io_commit_async
1006 /*****************************************************************************
1008 * Transfer operations.
1012 int osc_io_init(const struct lu_env *env,
1013 struct cl_object *obj, struct cl_io *io)
1015 struct osc_io *oio = osc_env_io(env);
1017 CL_IO_SLICE_CLEAN(oio, oi_cl);
1018 cl_io_slice_add(io, &oio->oi_cl, obj, &osc_io_ops);