4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * Implementation of cl_io for OSC layer.
34 * Author: Nikita Danilov <nikita.danilov@sun.com>
35 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
38 #define DEBUG_SUBSYSTEM S_OSC
40 #include <lustre_obdo.h>
41 #include <lustre_osc.h>
42 #include <linux/pagevec.h>
43 #include <linux/falloc.h>
45 #include "osc_internal.h"
51 /*****************************************************************************
57 static void osc_io_fini(const struct lu_env *env, const struct cl_io_slice *io)
61 void osc_read_ahead_release(const struct lu_env *env, void *cbdata)
63 struct ldlm_lock *dlmlock = cbdata;
64 struct lustre_handle lockh;
66 ldlm_lock2handle(dlmlock, &lockh);
67 ldlm_lock_decref(&lockh, LCK_PR);
68 LDLM_LOCK_PUT(dlmlock);
70 EXPORT_SYMBOL(osc_read_ahead_release);
72 static int osc_io_read_ahead(const struct lu_env *env,
73 const struct cl_io_slice *ios,
74 pgoff_t start, struct cl_read_ahead *ra)
76 struct osc_object *osc = cl2osc(ios->cis_obj);
77 struct ldlm_lock *dlmlock;
78 int result = -ENODATA;
81 dlmlock = osc_dlmlock_at_pgoff(env, osc, start, 0);
82 if (dlmlock != NULL) {
83 LASSERT(dlmlock->l_ast_data == osc);
84 if (dlmlock->l_req_mode != LCK_PR) {
85 struct lustre_handle lockh;
86 ldlm_lock2handle(dlmlock, &lockh);
87 ldlm_lock_addref(&lockh, LCK_PR);
88 ldlm_lock_decref(&lockh, dlmlock->l_req_mode);
91 ra->cra_rpc_pages = osc_cli(osc)->cl_max_pages_per_rpc;
92 ra->cra_end_idx = cl_index(osc2cl(osc),
93 dlmlock->l_policy_data.l_extent.end);
94 ra->cra_release = osc_read_ahead_release;
95 ra->cra_cbdata = dlmlock;
96 if (ra->cra_end_idx != CL_PAGE_EOF)
97 ra->cra_contention = true;
105 * An implementation of cl_io_operations::cio_io_submit() method for osc
106 * layer. Iterates over pages in the in-queue, prepares each for io by calling
107 * cl_page_prep() and then either submits them through osc_io_submit_page()
108 * or, if page is already submitted, changes osc flags through
109 * osc_set_async_flags().
111 int osc_io_submit(const struct lu_env *env, const struct cl_io_slice *ios,
112 enum cl_req_type crt, struct cl_2queue *queue)
114 struct cl_page *page;
116 struct client_obd *cli = NULL;
117 struct osc_object *osc = NULL; /* to keep gcc happy */
118 struct osc_page *opg;
122 struct cl_page_list *qin = &queue->c2_qin;
123 struct cl_page_list *qout = &queue->c2_qout;
124 unsigned int queued = 0;
127 unsigned int max_pages;
128 unsigned int ppc_bits; /* pages per chunk bits */
130 bool sync_queue = false;
132 LASSERT(qin->pl_nr > 0);
134 CDEBUG(D_CACHE|D_READA, "%d %d\n", qin->pl_nr, crt);
136 osc = cl2osc(ios->cis_obj);
138 max_pages = cli->cl_max_pages_per_rpc;
139 ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
142 brw_flags = osc_io_srvlock(cl2osc_io(env, ios)) ? OBD_BRW_SRVLOCK : 0;
143 brw_flags |= crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ;
144 if (crt == CRT_READ && ios->cis_io->ci_ndelay)
145 brw_flags |= OBD_BRW_NDELAY;
147 page = cl_page_list_first(qin);
148 if (page->cp_type == CPT_TRANSIENT)
149 brw_flags |= OBD_BRW_NOCACHE;
152 * NOTE: here @page is a top-level page. This is done to avoid
153 * creation of sub-page-list.
155 cl_page_list_for_each_safe(page, tmp, qin) {
156 struct osc_async_page *oap;
162 opg = osc_cl_page_osc(page, osc);
164 LASSERT(osc == oap->oap_obj);
166 if (!list_empty(&oap->oap_pending_item) ||
167 !list_empty(&oap->oap_rpc_item)) {
168 CDEBUG(D_CACHE, "Busy oap %p page %p for submit.\n",
174 result = cl_page_prep(env, io, page, crt);
177 if (result != -EALREADY)
180 * Handle -EALREADY error: for read case, the page is
181 * already in UPTODATE state; for write, the page
188 spin_lock(&oap->oap_lock);
189 oap->oap_async_flags = ASYNC_URGENT|ASYNC_READY;
190 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
191 spin_unlock(&oap->oap_lock);
193 osc_page_submit(env, opg, crt, brw_flags);
194 list_add_tail(&oap->oap_pending_item, &list);
196 if (page->cp_sync_io != NULL)
197 cl_page_list_move(qout, qin, page);
199 cl_page_list_del(env, qin, page);
202 if (queued == max_pages) {
204 } else if (crt == CRT_WRITE) {
206 unsigned int next_chunks;
208 chunks = (queued + ppc - 1) >> ppc_bits;
209 /* chunk number if add another page */
210 next_chunks = (queued + ppc) >> ppc_bits;
212 /* next page will excceed write chunk limit */
213 if (chunks == osc_max_write_chunks(cli) &&
214 next_chunks > chunks)
219 result = osc_queue_sync_pages(env, io, osc, &list,
229 result = osc_queue_sync_pages(env, io, osc, &list, brw_flags);
231 /* Update c/mtime for sync write. LU-7310 */
232 if (crt == CRT_WRITE && qout->pl_nr > 0 && result == 0) {
233 struct cl_object *obj = ios->cis_obj;
234 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
236 cl_object_attr_lock(obj);
237 attr->cat_mtime = attr->cat_ctime = ktime_get_real_seconds();
238 cl_object_attr_update(env, obj, attr, CAT_MTIME | CAT_CTIME);
239 cl_object_attr_unlock(obj);
242 CDEBUG(D_INFO, "%d/%d %d\n", qin->pl_nr, qout->pl_nr, result);
243 return qout->pl_nr > 0 ? 0 : result;
245 EXPORT_SYMBOL(osc_io_submit);
248 * This is called to update the attributes when modifying a specific page,
249 * both when making new pages and when doing updates to existing cached pages.
251 * Expand stripe KMS if necessary.
253 void osc_page_touch_at(const struct lu_env *env, struct cl_object *obj,
254 pgoff_t idx, size_t to)
256 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
257 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
263 /* offset within stripe */
264 kms = cl_offset(obj, idx) + to;
266 cl_object_attr_lock(obj);
267 CDEBUG(D_INODE, "stripe KMS %sincreasing %llu->%llu %llu\n",
268 kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms,
269 loi->loi_lvb.lvb_size);
271 attr->cat_mtime = attr->cat_ctime = ktime_get_real_seconds();
272 valid = CAT_MTIME | CAT_CTIME;
273 if (kms > loi->loi_kms) {
277 if (kms > loi->loi_lvb.lvb_size) {
278 attr->cat_size = kms;
281 cl_object_attr_update(env, obj, attr, valid);
282 cl_object_attr_unlock(obj);
287 int osc_io_commit_async(const struct lu_env *env,
288 const struct cl_io_slice *ios,
289 struct cl_page_list *qin, int from, int to,
292 struct cl_io *io = ios->cis_io;
293 struct osc_io *oio = cl2osc_io(env, ios);
294 struct osc_object *osc = cl2osc(ios->cis_obj);
295 struct cl_page *page;
296 struct cl_page *last_page;
297 struct osc_page *opg;
298 struct pagevec *pvec = &osc_env_info(env)->oti_pagevec;
302 LASSERT(qin->pl_nr > 0);
304 /* Handle partial page cases */
305 last_page = cl_page_list_last(qin);
306 if (oio->oi_lockless) {
307 page = cl_page_list_first(qin);
308 if (page == last_page) {
309 cl_page_clip(env, page, from, to);
312 cl_page_clip(env, page, from, PAGE_SIZE);
314 cl_page_clip(env, last_page, 0, to);
318 ll_pagevec_init(pvec, 0);
320 while (qin->pl_nr > 0) {
321 struct osc_async_page *oap;
323 page = cl_page_list_first(qin);
324 opg = osc_cl_page_osc(page, osc);
327 LASSERTF(osc == oap->oap_obj,
328 "obj mismatch: %p / %p\n", osc, oap->oap_obj);
330 if (!list_empty(&oap->oap_rpc_item)) {
331 CDEBUG(D_CACHE, "Busy oap %p page %p for submit.\n",
337 /* The page may be already in dirty cache. */
338 if (list_empty(&oap->oap_pending_item)) {
339 result = osc_page_cache_add(env, opg, io, cb);
344 osc_page_touch_at(env, osc2cl(osc), osc_index(opg),
345 page == last_page ? to : PAGE_SIZE);
347 cl_page_list_del(env, qin, page);
349 /* if there are no more slots, do the callback & reinit */
350 if (pagevec_add(pvec, page->cp_vmpage) == 0) {
351 (*cb)(env, io, pvec);
352 pagevec_reinit(pvec);
356 /* Clean up any partially full pagevecs */
357 if (pagevec_count(pvec) != 0)
358 (*cb)(env, io, pvec);
360 /* Can't access these pages any more. Page can be in transfer and
361 * complete at any time. */
363 /* for sync write, kernel will wait for this page to be flushed before
364 * osc_io_end() is called, so release it earlier.
365 * for mkwrite(), it's known there is no further pages. */
366 if (cl_io_is_sync_write(io) && oio->oi_active != NULL) {
367 osc_extent_release(env, oio->oi_active);
368 oio->oi_active = NULL;
371 CDEBUG(D_INFO, "%d %d\n", qin->pl_nr, result);
374 EXPORT_SYMBOL(osc_io_commit_async);
376 static bool osc_import_not_healthy(struct obd_import *imp)
378 return imp->imp_invalid || imp->imp_deactive ||
379 !(imp->imp_state == LUSTRE_IMP_FULL ||
380 imp->imp_state == LUSTRE_IMP_IDLE);
383 int osc_io_iter_init(const struct lu_env *env, const struct cl_io_slice *ios)
385 struct osc_object *osc = cl2osc(ios->cis_obj);
386 struct obd_import *imp = osc_cli(osc)->cl_import;
387 struct osc_io *oio = osc_env_io(env);
391 spin_lock(&imp->imp_lock);
393 * check whether this OSC device is available for non-delay read,
394 * fast switching mirror if we haven't tried all mirrors.
396 if (ios->cis_io->ci_type == CIT_READ && ios->cis_io->ci_ndelay &&
397 !ios->cis_io->ci_tried_all_mirrors && osc_import_not_healthy(imp)) {
399 } else if (likely(!imp->imp_invalid)) {
400 atomic_inc(&osc->oo_nr_ios);
401 oio->oi_is_active = 1;
404 spin_unlock(&imp->imp_lock);
406 if (cfs_capable(CFS_CAP_SYS_RESOURCE))
407 oio->oi_cap_sys_resource = 1;
411 EXPORT_SYMBOL(osc_io_iter_init);
413 int osc_io_rw_iter_init(const struct lu_env *env,
414 const struct cl_io_slice *ios)
416 struct cl_io *io = ios->cis_io;
417 struct osc_io *oio = osc_env_io(env);
418 struct osc_object *osc = cl2osc(ios->cis_obj);
419 unsigned long npages;
422 if (cl_io_is_append(io))
423 RETURN(osc_io_iter_init(env, ios));
425 npages = io->u.ci_rw.crw_count >> PAGE_SHIFT;
426 if (io->u.ci_rw.crw_pos & ~PAGE_MASK)
429 oio->oi_lru_reserved = osc_lru_reserve(osc_cli(osc), npages);
431 RETURN(osc_io_iter_init(env, ios));
433 EXPORT_SYMBOL(osc_io_rw_iter_init);
435 void osc_io_iter_fini(const struct lu_env *env,
436 const struct cl_io_slice *ios)
438 struct osc_io *oio = osc_env_io(env);
440 if (oio->oi_is_active) {
441 struct osc_object *osc = cl2osc(ios->cis_obj);
443 oio->oi_is_active = 0;
444 LASSERT(atomic_read(&osc->oo_nr_ios) > 0);
445 if (atomic_dec_and_test(&osc->oo_nr_ios))
446 wake_up_all(&osc->oo_io_waitq);
449 EXPORT_SYMBOL(osc_io_iter_fini);
451 void osc_io_rw_iter_fini(const struct lu_env *env,
452 const struct cl_io_slice *ios)
454 struct osc_io *oio = osc_env_io(env);
455 struct osc_object *osc = cl2osc(ios->cis_obj);
457 if (oio->oi_lru_reserved > 0) {
458 osc_lru_unreserve(osc_cli(osc), oio->oi_lru_reserved);
459 oio->oi_lru_reserved = 0;
461 oio->oi_write_osclock = NULL;
463 osc_io_iter_fini(env, ios);
465 EXPORT_SYMBOL(osc_io_rw_iter_fini);
467 int osc_io_fault_start(const struct lu_env *env, const struct cl_io_slice *ios)
470 struct cl_fault_io *fio;
474 fio = &io->u.ci_fault;
475 CDEBUG(D_INFO, "%lu %d %zu\n",
476 fio->ft_index, fio->ft_writable, fio->ft_nob);
478 * If mapping is writeable, adjust kms to cover this page,
479 * but do not extend kms beyond actual file size.
482 if (fio->ft_writable)
483 osc_page_touch_at(env, ios->cis_obj,
484 fio->ft_index, fio->ft_nob);
487 EXPORT_SYMBOL(osc_io_fault_start);
490 static int osc_async_upcall(void *a, int rc)
492 struct osc_async_cbargs *args = a;
495 complete(&args->opc_sync);
500 * Checks that there are no pages being written in the extent being truncated.
502 static bool trunc_check_cb(const struct lu_env *env, struct cl_io *io,
503 struct osc_page *ops , void *cbdata)
505 struct cl_page *page = ops->ops_cl.cpl_page;
506 struct osc_async_page *oap;
507 __u64 start = *(__u64 *)cbdata;
510 if (oap->oap_cmd & OBD_BRW_WRITE &&
511 !list_empty(&oap->oap_pending_item))
512 CL_PAGE_DEBUG(D_ERROR, env, page, "exists %llu/%s.\n",
513 start, current->comm);
515 if (PageLocked(page->cp_vmpage))
516 CDEBUG(D_CACHE, "page %p index %lu locked for %d.\n",
517 ops, osc_index(ops), oap->oap_cmd & OBD_BRW_RWMASK);
522 static void osc_trunc_check(const struct lu_env *env, struct cl_io *io,
523 struct osc_io *oio, __u64 size)
525 struct cl_object *clob;
529 clob = oio->oi_cl.cis_obj;
530 start = cl_index(clob, size);
531 partial = cl_offset(clob, start) < size;
534 * Complain if there are pages in the truncated region.
536 osc_page_gang_lookup(env, io, cl2osc(clob),
537 start + partial, CL_PAGE_EOF,
538 trunc_check_cb, (void *)&size);
541 static int osc_io_setattr_start(const struct lu_env *env,
542 const struct cl_io_slice *slice)
544 struct cl_io *io = slice->cis_io;
545 struct osc_io *oio = cl2osc_io(env, slice);
546 struct cl_object *obj = slice->cis_obj;
547 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
548 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
549 struct obdo *oa = &oio->oi_oa;
550 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
551 unsigned int ia_avalid = io->u.ci_setattr.sa_avalid;
552 enum op_xvalid ia_xvalid = io->u.ci_setattr.sa_xvalid;
554 __u64 size = io->u.ci_setattr.sa_attr.lvb_size;
555 __u64 end = OBD_OBJECT_EOF;
556 bool io_is_falloc = false;
559 /* truncate cache dirty pages first */
560 if (cl_io_is_trunc(io)) {
561 result = osc_cache_truncate_start(env, cl2osc(obj), size,
563 } else if (cl_io_is_fallocate(io)) {
565 size = io->u.ci_setattr.sa_falloc_offset;
566 end = io->u.ci_setattr.sa_falloc_end;
569 if (result == 0 && oio->oi_lockless == 0) {
570 cl_object_attr_lock(obj);
571 result = cl_object_attr_get(env, obj, attr);
573 struct ost_lvb *lvb = &io->u.ci_setattr.sa_attr;
574 unsigned int cl_valid = 0;
576 if (ia_avalid & ATTR_SIZE) {
577 attr->cat_size = size;
578 attr->cat_kms = size;
579 cl_valid = (CAT_SIZE | CAT_KMS);
581 if (ia_avalid & ATTR_MTIME_SET) {
582 attr->cat_mtime = lvb->lvb_mtime;
583 cl_valid |= CAT_MTIME;
585 if (ia_avalid & ATTR_ATIME_SET) {
586 attr->cat_atime = lvb->lvb_atime;
587 cl_valid |= CAT_ATIME;
589 if (ia_xvalid & OP_XVALID_CTIME_SET) {
590 attr->cat_ctime = lvb->lvb_ctime;
591 cl_valid |= CAT_CTIME;
593 result = cl_object_attr_update(env, obj, attr,
596 cl_object_attr_unlock(obj);
598 memset(oa, 0, sizeof(*oa));
600 oa->o_oi = loi->loi_oi;
601 obdo_set_parent_fid(oa, io->u.ci_setattr.sa_parent_fid);
602 oa->o_stripe_idx = io->u.ci_setattr.sa_stripe_index;
603 oa->o_layout = io->u.ci_setattr.sa_layout;
604 oa->o_valid |= OBD_MD_FLID | OBD_MD_FLGROUP |
606 if (ia_avalid & ATTR_CTIME) {
607 oa->o_valid |= OBD_MD_FLCTIME;
608 oa->o_ctime = attr->cat_ctime;
610 if (ia_avalid & ATTR_ATIME) {
611 oa->o_valid |= OBD_MD_FLATIME;
612 oa->o_atime = attr->cat_atime;
614 if (ia_avalid & ATTR_MTIME) {
615 oa->o_valid |= OBD_MD_FLMTIME;
616 oa->o_mtime = attr->cat_mtime;
618 if (ia_avalid & ATTR_SIZE) {
622 oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
625 oa->o_blocks = OBD_OBJECT_EOF;
626 oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
629 if (oio->oi_lockless) {
630 oa->o_flags = OBD_FL_SRVLOCK;
631 oa->o_valid |= OBD_MD_FLFLAGS;
634 if (io->ci_layout_version > 0) {
635 /* verify layout version */
636 oa->o_valid |= OBD_MD_LAYOUT_VERSION;
637 oa->o_layout_version = io->ci_layout_version;
640 LASSERT(oio->oi_lockless == 0);
643 if (ia_xvalid & OP_XVALID_FLAGS) {
644 oa->o_flags = io->u.ci_setattr.sa_attr_flags;
645 oa->o_valid |= OBD_MD_FLFLAGS;
648 init_completion(&cbargs->opc_sync);
651 int falloc_mode = io->u.ci_setattr.sa_falloc_mode;
653 result = osc_fallocate_base(osc_export(cl2osc(obj)),
654 oa, osc_async_upcall,
655 cbargs, falloc_mode);
656 } else if (ia_avalid & ATTR_SIZE) {
657 result = osc_punch_send(osc_export(cl2osc(obj)),
658 oa, osc_async_upcall, cbargs);
660 result = osc_setattr_async(osc_export(cl2osc(obj)),
661 oa, osc_async_upcall,
662 cbargs, PTLRPCD_SET);
664 cbargs->opc_rpc_sent = result == 0;
670 void osc_io_setattr_end(const struct lu_env *env,
671 const struct cl_io_slice *slice)
673 struct cl_io *io = slice->cis_io;
674 struct osc_io *oio = cl2osc_io(env, slice);
675 struct cl_object *obj = slice->cis_obj;
676 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
677 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
678 struct obdo *oa = &oio->oi_oa;
679 unsigned int cl_valid = 0;
682 if (cbargs->opc_rpc_sent) {
683 wait_for_completion(&cbargs->opc_sync);
684 result = io->ci_result = cbargs->opc_rc;
688 if (oio->oi_lockless) {
689 /* lockless truncate */
690 struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
692 LASSERT(cl_io_is_trunc(io));
693 LASSERT(cl_io_is_trunc(io) || cl_io_is_fallocate(io));
694 /* XXX: Need a lock. */
695 osd->od_stats.os_lockless_truncates++;
699 if (cl_io_is_trunc(io)) {
700 __u64 size = io->u.ci_setattr.sa_attr.lvb_size;
701 cl_object_attr_lock(obj);
702 if (oa->o_valid & OBD_MD_FLBLOCKS) {
703 attr->cat_blocks = oa->o_blocks;
704 cl_valid |= CAT_BLOCKS;
707 cl_object_attr_update(env, obj, attr, cl_valid);
708 cl_object_attr_unlock(obj);
709 osc_trunc_check(env, io, oio, size);
710 osc_cache_truncate_end(env, oio->oi_trunc);
711 oio->oi_trunc = NULL;
714 if (cl_io_is_fallocate(io)) {
715 cl_object_attr_lock(obj);
718 if (oa->o_valid & OBD_MD_FLBLOCKS) {
719 attr->cat_blocks = oa->o_blocks;
720 cl_valid |= CAT_BLOCKS;
724 if (oa->o_valid & OBD_MD_FLSIZE) {
725 attr->cat_size = oa->o_size;
726 cl_valid |= CAT_SIZE;
729 cl_object_attr_update(env, obj, attr, cl_valid);
730 cl_object_attr_unlock(obj);
733 EXPORT_SYMBOL(osc_io_setattr_end);
735 struct osc_data_version_args {
736 struct osc_io *dva_oio;
740 osc_data_version_interpret(const struct lu_env *env, struct ptlrpc_request *req,
743 struct osc_data_version_args *dva = args;
744 struct osc_io *oio = dva->dva_oio;
745 const struct ost_body *body;
751 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
753 GOTO(out, rc = -EPROTO);
755 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, &oio->oi_oa,
759 oio->oi_cbarg.opc_rc = rc;
760 complete(&oio->oi_cbarg.opc_sync);
765 static int osc_io_data_version_start(const struct lu_env *env,
766 const struct cl_io_slice *slice)
768 struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version;
769 struct osc_io *oio = cl2osc_io(env, slice);
770 struct obdo *oa = &oio->oi_oa;
771 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
772 struct osc_object *obj = cl2osc(slice->cis_obj);
773 struct lov_oinfo *loi = obj->oo_oinfo;
774 struct obd_export *exp = osc_export(obj);
775 struct ptlrpc_request *req;
776 struct ost_body *body;
777 struct osc_data_version_args *dva;
781 memset(oa, 0, sizeof(*oa));
782 oa->o_oi = loi->loi_oi;
783 oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
785 if (dv->dv_flags & (LL_DV_RD_FLUSH | LL_DV_WR_FLUSH)) {
786 oa->o_valid |= OBD_MD_FLFLAGS;
787 oa->o_flags |= OBD_FL_SRVLOCK;
788 if (dv->dv_flags & LL_DV_WR_FLUSH)
789 oa->o_flags |= OBD_FL_FLUSH;
792 init_completion(&cbargs->opc_sync);
794 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
798 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
800 ptlrpc_request_free(req);
804 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
805 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
807 ptlrpc_request_set_replen(req);
808 req->rq_interpret_reply = osc_data_version_interpret;
809 dva = ptlrpc_req_async_args(dva, req);
812 ptlrpcd_add_req(req);
817 static void osc_io_data_version_end(const struct lu_env *env,
818 const struct cl_io_slice *slice)
820 struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version;
821 struct osc_io *oio = cl2osc_io(env, slice);
822 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
825 wait_for_completion(&cbargs->opc_sync);
827 if (cbargs->opc_rc != 0) {
828 slice->cis_io->ci_result = cbargs->opc_rc;
830 slice->cis_io->ci_result = 0;
831 if (!(oio->oi_oa.o_valid &
832 (OBD_MD_LAYOUT_VERSION | OBD_MD_FLDATAVERSION)))
833 slice->cis_io->ci_result = -ENOTSUPP;
835 if (oio->oi_oa.o_valid & OBD_MD_LAYOUT_VERSION)
836 dv->dv_layout_version = oio->oi_oa.o_layout_version;
837 if (oio->oi_oa.o_valid & OBD_MD_FLDATAVERSION)
838 dv->dv_data_version = oio->oi_oa.o_data_version;
844 int osc_io_read_start(const struct lu_env *env,
845 const struct cl_io_slice *slice)
847 struct cl_object *obj = slice->cis_obj;
848 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
852 if (!slice->cis_io->ci_noatime) {
853 cl_object_attr_lock(obj);
854 attr->cat_atime = ktime_get_real_seconds();
855 rc = cl_object_attr_update(env, obj, attr, CAT_ATIME);
856 cl_object_attr_unlock(obj);
861 EXPORT_SYMBOL(osc_io_read_start);
863 int osc_io_write_start(const struct lu_env *env,
864 const struct cl_io_slice *slice)
866 struct cl_object *obj = slice->cis_obj;
867 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
871 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_SETTIME, 1);
872 cl_object_attr_lock(obj);
873 attr->cat_mtime = attr->cat_ctime = ktime_get_real_seconds();
874 rc = cl_object_attr_update(env, obj, attr, CAT_MTIME | CAT_CTIME);
875 cl_object_attr_unlock(obj);
879 EXPORT_SYMBOL(osc_io_write_start);
881 int osc_fsync_ost(const struct lu_env *env, struct osc_object *obj,
882 struct cl_fsync_io *fio)
884 struct osc_io *oio = osc_env_io(env);
885 struct obdo *oa = &oio->oi_oa;
886 struct lov_oinfo *loi = obj->oo_oinfo;
887 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
891 memset(oa, 0, sizeof(*oa));
892 oa->o_oi = loi->loi_oi;
893 oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
895 /* reload size abd blocks for start and end of sync range */
896 oa->o_size = fio->fi_start;
897 oa->o_blocks = fio->fi_end;
898 oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
900 obdo_set_parent_fid(oa, fio->fi_fid);
902 init_completion(&cbargs->opc_sync);
904 rc = osc_sync_base(obj, oa, osc_async_upcall, cbargs, PTLRPCD_SET);
907 EXPORT_SYMBOL(osc_fsync_ost);
909 int osc_io_fsync_start(const struct lu_env *env,
910 const struct cl_io_slice *slice)
912 struct cl_io *io = slice->cis_io;
913 struct cl_fsync_io *fio = &io->u.ci_fsync;
914 struct cl_object *obj = slice->cis_obj;
915 struct osc_object *osc = cl2osc(obj);
916 pgoff_t start = cl_index(obj, fio->fi_start);
917 pgoff_t end = cl_index(obj, fio->fi_end);
921 if (fio->fi_end == OBD_OBJECT_EOF)
924 result = osc_cache_writeback_range(env, osc, start, end, 0,
925 fio->fi_mode == CL_FSYNC_DISCARD);
927 fio->fi_nr_written += result;
930 if (fio->fi_mode == CL_FSYNC_ALL) {
933 /* we have to wait for writeback to finish before we can
934 * send OST_SYNC RPC. This is bad because it causes extents
935 * to be written osc by osc. However, we usually start
936 * writeback before CL_FSYNC_ALL so this won't have any real
938 rc = osc_cache_wait_range(env, osc, start, end);
941 rc = osc_fsync_ost(env, osc, fio);
949 void osc_io_fsync_end(const struct lu_env *env,
950 const struct cl_io_slice *slice)
952 struct cl_fsync_io *fio = &slice->cis_io->u.ci_fsync;
953 struct cl_object *obj = slice->cis_obj;
954 pgoff_t start = cl_index(obj, fio->fi_start);
955 pgoff_t end = cl_index(obj, fio->fi_end);
958 if (fio->fi_mode == CL_FSYNC_LOCAL) {
959 result = osc_cache_wait_range(env, cl2osc(obj), start, end);
960 } else if (fio->fi_mode == CL_FSYNC_ALL) {
961 struct osc_io *oio = cl2osc_io(env, slice);
962 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
964 wait_for_completion(&cbargs->opc_sync);
966 result = cbargs->opc_rc;
968 slice->cis_io->ci_result = result;
970 EXPORT_SYMBOL(osc_io_fsync_end);
972 static int osc_io_ladvise_start(const struct lu_env *env,
973 const struct cl_io_slice *slice)
976 struct cl_io *io = slice->cis_io;
977 struct osc_io *oio = cl2osc_io(env, slice);
978 struct cl_object *obj = slice->cis_obj;
979 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
980 struct cl_ladvise_io *lio = &io->u.ci_ladvise;
981 struct obdo *oa = &oio->oi_oa;
982 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
983 struct lu_ladvise *ladvise;
984 struct ladvise_hdr *ladvise_hdr;
989 /* TODO: add multiple ladvise support in CLIO */
990 buf_size = offsetof(typeof(*ladvise_hdr), lah_advise[num_advise]);
991 if (osc_env_info(env)->oti_ladvise_buf.lb_len < buf_size)
992 lu_buf_realloc(&osc_env_info(env)->oti_ladvise_buf, buf_size);
994 ladvise_hdr = osc_env_info(env)->oti_ladvise_buf.lb_buf;
995 if (ladvise_hdr == NULL)
998 memset(ladvise_hdr, 0, buf_size);
999 ladvise_hdr->lah_magic = LADVISE_MAGIC;
1000 ladvise_hdr->lah_count = num_advise;
1001 ladvise_hdr->lah_flags = lio->li_flags;
1003 memset(oa, 0, sizeof(*oa));
1004 oa->o_oi = loi->loi_oi;
1005 oa->o_valid = OBD_MD_FLID;
1006 obdo_set_parent_fid(oa, lio->li_fid);
1008 ladvise = ladvise_hdr->lah_advise;
1009 ladvise->lla_start = lio->li_start;
1010 ladvise->lla_end = lio->li_end;
1011 ladvise->lla_advice = lio->li_advice;
1013 if (lio->li_flags & LF_ASYNC) {
1014 result = osc_ladvise_base(osc_export(cl2osc(obj)), oa,
1015 ladvise_hdr, NULL, NULL, NULL);
1017 init_completion(&cbargs->opc_sync);
1018 result = osc_ladvise_base(osc_export(cl2osc(obj)), oa,
1019 ladvise_hdr, osc_async_upcall,
1020 cbargs, PTLRPCD_SET);
1021 cbargs->opc_rpc_sent = result == 0;
1026 static void osc_io_ladvise_end(const struct lu_env *env,
1027 const struct cl_io_slice *slice)
1029 struct cl_io *io = slice->cis_io;
1030 struct osc_io *oio = cl2osc_io(env, slice);
1031 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
1033 struct cl_ladvise_io *lio = &io->u.ci_ladvise;
1035 if ((!(lio->li_flags & LF_ASYNC)) && cbargs->opc_rpc_sent) {
1036 wait_for_completion(&cbargs->opc_sync);
1037 result = cbargs->opc_rc;
1039 slice->cis_io->ci_result = result;
1042 void osc_io_end(const struct lu_env *env, const struct cl_io_slice *slice)
1044 struct osc_io *oio = cl2osc_io(env, slice);
1046 if (oio->oi_active) {
1047 osc_extent_release(env, oio->oi_active);
1048 oio->oi_active = NULL;
1051 EXPORT_SYMBOL(osc_io_end);
1053 static const struct cl_io_operations osc_io_ops = {
1056 .cio_iter_init = osc_io_rw_iter_init,
1057 .cio_iter_fini = osc_io_rw_iter_fini,
1058 .cio_start = osc_io_read_start,
1059 .cio_fini = osc_io_fini
1062 .cio_iter_init = osc_io_rw_iter_init,
1063 .cio_iter_fini = osc_io_rw_iter_fini,
1064 .cio_start = osc_io_write_start,
1065 .cio_end = osc_io_end,
1066 .cio_fini = osc_io_fini
1069 .cio_iter_init = osc_io_iter_init,
1070 .cio_iter_fini = osc_io_iter_fini,
1071 .cio_start = osc_io_setattr_start,
1072 .cio_end = osc_io_setattr_end
1074 [CIT_DATA_VERSION] = {
1075 .cio_start = osc_io_data_version_start,
1076 .cio_end = osc_io_data_version_end,
1079 .cio_iter_init = osc_io_iter_init,
1080 .cio_iter_fini = osc_io_iter_fini,
1081 .cio_start = osc_io_fault_start,
1082 .cio_end = osc_io_end,
1083 .cio_fini = osc_io_fini
1086 .cio_start = osc_io_fsync_start,
1087 .cio_end = osc_io_fsync_end,
1088 .cio_fini = osc_io_fini
1091 .cio_start = osc_io_ladvise_start,
1092 .cio_end = osc_io_ladvise_end,
1093 .cio_fini = osc_io_fini
1096 .cio_fini = osc_io_fini
1099 .cio_read_ahead = osc_io_read_ahead,
1100 .cio_submit = osc_io_submit,
1101 .cio_commit_async = osc_io_commit_async
1104 /*****************************************************************************
1106 * Transfer operations.
1110 int osc_io_init(const struct lu_env *env,
1111 struct cl_object *obj, struct cl_io *io)
1113 struct osc_io *oio = osc_env_io(env);
1115 CL_IO_SLICE_CLEAN(oio, oi_cl);
1116 cl_io_slice_add(io, &oio->oi_cl, obj, &osc_io_ops);